Example #1
0
static SLVAL
response_redirect(sl_vm_t* vm, SLVAL self, SLVAL url)
{
    response(vm)->status = 302;
    response_set_header(vm, self, sl_make_cstring(vm, "Location"), url);
    sl_exit(vm, sl_make_int(vm, 0));
    return self; /* never reached */
}
Example #2
0
void unmap_segments(dso *so) {
  /* Unmap text section */
  if ((long)sl_munmap(so->text_addr, so->text_size) == -1) {
    sl_printf("Error unmap_segments: unmapping text segment failed.\n");
    sl_exit(1);
  }
  
  /* Unmap data section */
  if ((long)sl_munmap(so->data_addr, so->data_size) == -1) {
    sl_printf("Error unmap_segments: unmapping data segment failed.\n");
    sl_exit(1);
  }
    
  /* Unmap bss section */
  if (so->bss_size) {
    sl_munmap(so->bss_addr, so->bss_size);
  }
}
Example #3
0
void unload_elf(dso *so) {
  /* Are we allowed to unload? */
  if (so->flags_1 & DF_1_NODELETE) {
    sl_printf("Error unload_elf: NODELETE flag set on %s.\n", so->name);
    sl_exit(1);
  }

#ifdef D_RLOAD
  sl_printf("Unloading %s (%p-%p)\n", so->path, so->text_addr, so->end_addr);
#endif

#ifdef SL_STATISTIC
  curr_loaded_dsos--;
#endif
  
  /* Call fini functions */
  so_fini(so);
  
  /* Remove it from chain and global scope */
  chain_delete(so);
  gscope_remove(so);

#if defined(VERIFY_CFTX)  
  /* Remove it from libdetox dso_chain */
  remove_dso(so);
#endif

  /* Free name and path */
  sl_free((void *)so->name, MAX_LIB_NAME);
  sl_free((void *)so->path, MAX_PATH_LEN);
  
  /* Free dep. and lsope lists */
  if (so->deps)
    sl_free(so->deps, so->deps_count * sizeof(dso *));
  if (so->lscope)
    sl_free(so->lscope, so->lscope_num * sizeof(dso *));

  /* Unmap text and data segments */
  unmap_segments(so);

  /* Free struct */
  sl_free(so, sizeof(dso));
}
Example #4
0
void map_segments (long fd, Elf32_Phdr *segs[2], Elf32_Half type, dso *so) {

  /* Adjust text segment addresses to page size */
  Elf32_Off text_offset = TRUNC_PAGE(segs[0]->p_offset);
  Elf32_Addr text_vaddr = TRUNC_PAGE(segs[0]->p_vaddr);  
  Elf32_Addr text_vlimit = ROUND_PAGE(segs[1]->p_vaddr + segs[1]->p_memsz);
  unsigned long mapsize = text_vlimit - text_vaddr;

  /* Executable has to be loaded at constant address */
  void *base_addr = 0;
  if (type == ET_EXEC) {
    base_addr = (void *)text_vaddr;
  }

  /* TODO: what if base address lies in already mapped area? E.g. where the loader resides? */

  /* Map text segment into memory */
  char *mapbase = sl_mmap(base_addr, mapsize, convert_prot(segs[0]->p_flags),
                          MAP_PRIVATE, fd, text_offset);
  if ((long)mapbase == -1) {
    sl_close(fd);
    sl_printf("Error map_segments: mapping of text segment failed.\n");
    sl_exit(1);
  }

  /* Adjust data segment addresses to page size */
  Elf32_Off data_offset = TRUNC_PAGE(segs[1]->p_offset);
  Elf32_Addr data_vaddr = TRUNC_PAGE(segs[1]->p_vaddr);
  Elf32_Addr data_vlimit = ROUND_PAGE(segs[1]->p_vaddr + segs[1]->p_filesz);
  void *data_addr = mapbase + (data_vaddr - text_vaddr);
  long data_prot = convert_prot(segs[1]->p_flags);

  /* Map data segment into memory */
  if ((long)sl_mmap(data_addr, data_vlimit - data_vaddr, data_prot,
                    MAP_PRIVATE | MAP_FIXED, fd, data_offset) == -1) {
    sl_close(fd);
    sl_printf("Error map_segments: mapping of data segment failed.\n");
    sl_exit(1);
  }
   
  /* Clear BSS part */
  Elf32_Addr clear_vaddr = segs[1]->p_vaddr + segs[1]->p_filesz;
  void *clear_addr = mapbase + (clear_vaddr - text_vaddr);
  void *clear_page = mapbase + (TRUNC_PAGE(clear_vaddr) - text_vaddr);
  unsigned long nclear = data_vlimit - clear_vaddr;

  if (nclear > 0) {
    /* Make sure the end of the segment is writable */
    if ((data_prot & PROT_WRITE) == 0 &&
        sl_mprotect(clear_page, PAGE_SIZE, data_prot|PROT_WRITE) == -1) {
      sl_printf("Error map_segments: mprotect on data segment failed.\n");
      sl_exit(1);
    }
    
    sl_memset(clear_addr, 0, nclear);

    /* Reset the data protection */
    if ((data_prot & PROT_WRITE) == 0) {
      sl_mprotect(clear_page, PAGE_SIZE, data_prot);
    }
  }
  
  /* Allocate remaining part of bss section */
  Elf32_Addr bss_vaddr = data_vlimit;
  Elf32_Addr bss_vlimit = ROUND_PAGE(segs[1]->p_vaddr + segs[1]->p_memsz);
  void *bss_addr = mapbase + (bss_vaddr - text_vaddr);
  if (bss_vlimit > bss_vaddr) {
    if ((long)sl_mmap(bss_addr, bss_vlimit - bss_vaddr, data_prot,
                      MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0) == -1) {
      sl_printf("Error map_segments: mmap of bss segment failed.\n");
      sl_exit(1);
    }
    
  }
  
  /* Save important information */
  so->base_addr = (type == ET_EXEC) ? 0 : mapbase;
  so->text_addr = mapbase;
  so->text_size = mapsize;  
  so->data_addr = data_addr;
  so->data_size = data_vlimit - data_vaddr;
  so->bss_addr = bss_addr;
  so->bss_size = bss_vlimit - bss_vaddr;
  so->end_addr = bss_addr + so->bss_size;
  so->text_prot = convert_prot(segs[0]->p_flags);
  so->data_prot = data_prot;
  so->bss_prot = data_prot;
}
Example #5
0
/* Verifies the kernel provided program header PT_LOAD entries and does the
 * segment mappings only if required. As the kernel already mapped the PT_LOAD
 * segments our RTLD should not map them again.
 */
void map_segments_RTLD (long fd, Elf32_Phdr *segs[2], Elf32_Half type, dso *so, Elf32_Phdr *segs_auxv[2]) {

  /* TODO: improve error handling ;) */
  if(segs[0]->p_offset != segs_auxv[0]->p_offset) {
	 sl_printf("map_segments_RTLD: difference in program headers found!\n");
	 sl_exit(1);
  }
  if(segs[0]->p_vaddr != segs_auxv[0]->p_vaddr) {
	 sl_printf("map_segments_RTLD: difference in program headers found!\n");
	 sl_exit(1);
  }
  if(segs[0]->p_memsz != segs_auxv[0]->p_memsz) {
	 sl_printf("map_segments_RTLD: difference in program headers found!\n");
	 sl_exit(1);
  }
  
  if(segs[1]->p_offset != segs_auxv[1]->p_offset) {
	 sl_printf("map_segments_RTLD: difference in program headers found!\n");
	 sl_exit(1);
  }
  if(segs[1]->p_vaddr != segs_auxv[1]->p_vaddr) {
	 sl_printf("map_segments_RTLD: difference in program headers found!\n");
	 sl_exit(1);
  }
  if(segs[1]->p_memsz != segs_auxv[1]->p_memsz) {
	 sl_printf("map_segments_RTLD: difference in program headers found!\n");
	 sl_exit(1);
  }
  
  /* Adjust text segment addresses to page size */
  //Elf32_Off text_offset = TRUNC_PAGE(segs[0]->p_offset);
  Elf32_Addr text_vaddr = TRUNC_PAGE(segs[0]->p_vaddr);  
  Elf32_Addr text_vlimit = ROUND_PAGE(segs[1]->p_vaddr + segs[1]->p_memsz);
  unsigned long mapsize = text_vlimit - text_vaddr;
  
  /* Executable has to be loaded at constant address */
  void *base_addr = 0;
  if (type == ET_EXEC) {
    base_addr = (void *)text_vaddr;
  } else {
	 sl_printf("map_segments_RTLD: first program header entry is not ET_EXEC!\n");
	 sl_exit(1);
  }

  /* TODO: what if base address lies in already mapped area? E.g. where the loader resides? */

  /* Text segment already mapped */
  char *mapbase = base_addr;

  /* Adjust data segment addresses to page size */
  //Elf32_Off data_offset = TRUNC_PAGE(segs[1]->p_offset);
  Elf32_Addr data_vaddr = TRUNC_PAGE(segs[1]->p_vaddr);
  Elf32_Addr data_vlimit = ROUND_PAGE(segs[1]->p_vaddr + segs[1]->p_filesz);
  void *data_addr = mapbase + (data_vaddr - text_vaddr);
  //long data_prot = convert_prot(segs[1]->p_flags);
  
  /* Clear BSS part */
  //Elf32_Addr clear_vaddr = segs[1]->p_vaddr + segs[1]->p_filesz;
  //void *clear_addr = mapbase + (clear_vaddr - text_vaddr);
  //void *clear_page = mapbase + (TRUNC_PAGE(clear_vaddr) - text_vaddr);
  //unsigned long nclear = data_vlimit - clear_vaddr;
 
  /* Allocate remaining part of bss section */
  Elf32_Addr bss_vaddr = data_vlimit;
  Elf32_Addr bss_vlimit = ROUND_PAGE(segs[1]->p_vaddr + segs[1]->p_memsz);
  void *bss_addr = mapbase + (bss_vaddr - text_vaddr);
  
  /* Save important information */
  so->base_addr = (type == ET_EXEC) ? 0 : mapbase;
  so->text_addr = mapbase;
  so->text_size = mapsize;  
  so->data_addr = data_addr;
  so->data_size = data_vlimit - data_vaddr;
  so->bss_addr = bss_addr;
  so->bss_size = bss_vlimit - bss_vaddr;
  so->end_addr = bss_addr + so->bss_size;
  so->text_prot = convert_prot(segs[0]->p_flags);
  so->data_prot = convert_prot(segs[1]->p_flags);
  so->bss_prot = convert_prot(segs[1]->p_flags);
}
Example #6
0
dso *load_elf(dso *loader, const char *name, const char *path, long fd,
              unsigned char rt_load, unsigned int rtld_mode, Elf32_auxv_t *auxv) {
#ifdef D_LOAD
  sl_printf("\nLoading elf file: %s (%s)\n", path, name);
#endif

#ifdef SL_STATISTIC
  /* Some statistics */
  loaded_dsos++;
  curr_loaded_dsos++;
  max_loaded_dsos = MAX(max_loaded_dsos, curr_loaded_dsos);    
#endif
  
  /* Get file information */
  struct kernel_stat file_info; 
  if(sl_fstat(fd, &file_info) == -1) {
    /* Close file */
    sl_close(fd);

    /* Signal error (longjmp) if we load at runtime */
    if(rt_load)
      signal_error(0, name, 0, "fstat failed");

    /* Not at runtime -> fail */
    sl_printf("Error load_elf: fstat failed while loading %s.\n", name);
    sl_exit(1);
  }
  
  /* Map entire file in memory */
  void *file_map = sl_mmap(0, file_info.st_size, PROT_READ,
                           MAP_PRIVATE, fd, 0);
  if ((long)file_map == -1) {
    /* Close file */
    sl_close(fd);

    /* Signal error (longjmp) if we load at runtime */
    if(rt_load)
      signal_error(0, name, 0, "mmap failed");

    /* Not at runtime -> fail */
    sl_printf("Error load_elf: mmap of file %s failed.\n", name);
    sl_exit(1);
  }
  
  /* Get ELF header and check file */
  Elf32_Ehdr *elf_hdr = (Elf32_Ehdr *) file_map;
  long valid = check_elf(elf_hdr);
  if (valid != 0) {
    /* Invalid elf file */
    sl_close(fd);

    /* Signal error (longjmp) if we load at runtime */
    if(rt_load)
      signal_error(0, name, 0, "invalid ELF file");

    /* Not at runtime -> fail */
    sl_printf("Error load_elf: %s is not a valid ELF file (error: %d).\n",
              name, valid);
    sl_exit(1);
  }

  /* Get program and section header */
  Elf32_Phdr *program_hdr = (Elf32_Phdr *) (file_map + elf_hdr->e_phoff);
  Elf32_Shdr *shdr = (Elf32_Shdr *) (file_map + elf_hdr->e_shoff);
  /* Segments (text and data) which we have to map in memory */
  Elf32_Phdr *load_segments[2];
  long num_load = 0;
  
  /* Create new shared object */
  dso *so = sl_calloc(sizeof(dso), 1);

  /* Iterate over program headers */
  unsigned long i = 0;
  for(i = 0; i < elf_hdr->e_phnum; ++i) {
    switch (program_hdr[i].p_type) {
      
    case PT_DYNAMIC: /* Dynamic Header */
      so->dynamic_section = (Elf32_Dyn *)(program_hdr[i].p_vaddr);
      break;
      
    case PT_LOAD: /* Section must be mapped in memory */
      if (num_load >= 2) {
        sl_printf("Error load_elf: more than two PT_LOAD segments!");
        sl_exit(1);
      }
      load_segments[num_load++] = program_hdr+i;
      break;

    case PT_TLS: /* Thread Local Storage information*/
      if (program_hdr[i].p_memsz == 0)
        break;

      /* Initialize TLS information */
      so->tls_blocksize = program_hdr[i].p_memsz;
      so->tls_align = program_hdr[i].p_align;
      so->tls_initimage_size = program_hdr[i].p_filesz;

      /* TLS image (addr later adjusted) */
      so->tls_initimage = (void *) program_hdr[i].p_vaddr;
      
      /* Assign next module ID */
      so->tls_modid = ++GL(_dl_tls_max_dtv_idx);          
      break;

      /*
    case PT_GNU_STACK:
      if (program_hdr[i].p_flags & PF_X) {
        sl_printf("Warning: executable stack\n");
        sl_exit(1);
      }
      break;
      */
      
    case PT_GNU_RELRO: /* Sections to set readonly after relocation */
      /* Address is later adjusted */
      so->relro = program_hdr[i].p_vaddr;
      so->relro_size = program_hdr[i].p_memsz;
      break;
    }
  }

  /* Map segments into memory and intitialize dso struct */
  if(rtld_mode == 1 && auxv != NULL) {
	  
	  Elf32_Phdr *program_hdr_auxv;
	  Elf32_Phdr *load_segments_auxv[2];
	  
	  program_hdr_auxv = (Elf32_Phdr *)get_aux_value(auxv, AT_PHDR);
	  uint32_t auxv_e_phnum = (uint32_t)get_aux_value(auxv, AT_PHNUM);
	  unsigned long j = 0;
	  unsigned int nr_load = 0;
	  
	  for(j = 0; j < auxv_e_phnum; j++) {
		switch (program_hdr_auxv[j].p_type) {
		   
		case PT_LOAD: /* Section must be mapped in memory */
		  if (nr_load >= 2) {
			sl_exit(1);
		  }
		  load_segments_auxv[nr_load++] = program_hdr_auxv+j;
		  break;

		}
	  }
	  
	  map_segments_RTLD(fd, load_segments, elf_hdr->e_type, so, load_segments_auxv);
	  
  } else {
	  map_segments(fd, load_segments, elf_hdr->e_type, so);
  }
  
  so->ref_count = 1;
  so->deps_count = 0;
  so->name = name;
  so->path = path;
  so->type = elf_hdr->e_type;
  so->entry = (void*) elf_hdr->e_entry;
  so->loader = loader;
  so->dynamic_section = (Elf32_Dyn *) BYTE_STEP(so->dynamic_section,
                                                so->base_addr);
  so->program_header = (Elf32_Phdr *) BYTE_STEP(elf_hdr->e_phoff,
                                                so->text_addr);
  so->program_header_num = elf_hdr->e_phnum;
   
  so->l_real = so;
  
  /* Adjust address of TLS init image and relro address */
  if (so->tls_initimage) {
    so->tls_initimage = (char *)so->tls_initimage + (long)so->base_addr;
  }
  if (so->relro) {
    so->relro = (Elf32_Addr) BYTE_STEP(so->relro, so->base_addr);
  }
  
  /* Iterate over section headers */
  char *strtab = (char *)file_map + shdr[elf_hdr->e_shstrndx].sh_offset;
  char *sname=0;
  for (i=0; i<elf_hdr->e_shnum; ++i) {
    sname = strtab + shdr[i].sh_name;

    /* Save important sections */
    if (sl_strncmp(sname, ".got", 5) == 0) {
      so->got = (char *) (so->base_addr + shdr[i].sh_addr);
      so->got_size = shdr[i].sh_size;
    }
    if (sl_strncmp(sname, ".plt", 5) == 0) {
      so->plt = (char *) (so->base_addr + shdr[i].sh_addr);
      so->plt_size = shdr[i].sh_size;
    }
    if (sl_strncmp(sname, ".got.plt", 9) == 0) {
      so->gotplt = (char *) (so->base_addr + shdr[i].sh_addr);
      so->gotplt_size = shdr[i].sh_size;
    }
  }

  /* Resolve */
  Elf32_Dyn *dyn;
  long rpath=-1;
  for (dyn = so->dynamic_section; dyn->d_tag != DT_NULL; ++dyn) {
    switch (dyn->d_tag) {
      
    case DT_INIT: /* Initialization function */
      so->init = (void (*)(int, char**, char**))
        BYTE_STEP(dyn->d_un.d_ptr, so->base_addr);
      break;
      
    case DT_INIT_ARRAY: /* Array of initialization functions */
      so->init_array = (Elf32_Addr *)BYTE_STEP(dyn->d_un.d_ptr, so->base_addr);
      break;
      
    case DT_INIT_ARRAYSZ: /* Size of init array */
      so->init_array_sz = (long)dyn->d_un.d_val / sizeof(Elf32_Addr);
      break;
      
    case DT_FINI: /* Finalization function */
      so->fini = (void (*)()) BYTE_STEP(dyn->d_un.d_ptr, so->base_addr);
      break;

    case DT_FINI_ARRAY: /* Array of finalization functions */
      so->fini_array = (Elf32_Addr *)BYTE_STEP(dyn->d_un.d_ptr, so->base_addr);
      break;

    case DT_FINI_ARRAYSZ: /* Size of fini array */
      so->fini_array_sz = (long)dyn->d_un.d_val / sizeof(Elf32_Addr);
      break;
      
    case DT_RUNPATH: /* String with library search paths */
      rpath = dyn->d_un.d_val;
      break;	
      
    case DT_RPATH: /* String with library search paths */
      if (rpath == -1)
        rpath = dyn->d_un.d_val;
      break;

    case DT_PLTGOT: /* Plt part of the global offset table */
      so->gotplt = (char *) BYTE_STEP(dyn->d_un.d_ptr, so->base_addr);
      break;
      
    case DT_REL: /* Relocation table */
      so->rel = (Elf32_Rel *) BYTE_STEP(dyn->d_un.d_ptr, so->base_addr);
      break;

    case DT_RELSZ: /* Size of the relocation table */
      so->relsz = (long)dyn->d_un.d_val / sizeof(Elf32_Rel);
      break;
        
    case DT_JMPREL: /* Plt relocations part of relocation table */
      so->pltrel = (Elf32_Rel *) BYTE_STEP(dyn->d_un.d_ptr, so->base_addr);
      break;
            
    case DT_PLTRELSZ: /* Size of plt relocations part of relocation table */
      so->pltrelsz = (long)dyn->d_un.d_val / sizeof(Elf32_Rel);
      break;

    case DT_HASH: /* ELF hash table */
      so->hash_table = (Elf32_Word *) BYTE_STEP(dyn->d_un.d_ptr,
                                                so->base_addr);
      break;

    case DT_GNU_HASH: /* GNU hash table */
      so->gnu_hash_table = (Elf32_Word *) BYTE_STEP(dyn->d_un.d_ptr,
                                                         so->base_addr);
      break;
      
    case DT_SYMTAB: /* Dynamic symbol table */
      so->symbol_table = (Elf32_Sym *) BYTE_STEP(dyn->d_un.d_ptr,
                                                 so->base_addr);
      break;

    case DT_VERDEF: /* Versions defined in this DSO */
      so->verdef = (Elf32_Verdef *) BYTE_STEP(dyn->d_un.d_ptr, so->base_addr);
      break;

    case DT_VERDEFNUM: /* Number of versions defined in this DSO */
      so->verdef_num = (unsigned long) dyn->d_un.d_val;
      break;

    case DT_VERNEED: /* Versions needed by this DSO */
      so->verneed = (Elf32_Verneed *) BYTE_STEP(dyn->d_un.d_ptr,
                                                so->base_addr);
      break;

    case DT_VERNEEDNUM: /* Number of versions needed by this DSO */
      so->verneed_num = (unsigned long) dyn->d_un.d_val;
      break;

    case DT_VERSYM: /* Version symbol table */
      so->versym = (Elf32_Half *) BYTE_STEP(dyn->d_un.d_ptr, so->base_addr);
      break;
      
    case DT_STRTAB: /* Dynamic string table */
      so->string_table = (char *) so->base_addr + dyn->d_un.d_ptr;
      break;

    case DT_NEEDED: /* Dependencies on other DSOs */
      /* Count the number of direct dependencies */
      so->deps_count++;
      break;

    case DT_FLAGS: /* Flags */
      so->flags = dyn->d_un.d_val;
      if ((so->flags & DF_SYMBOLIC)
          || (so->flags & DF_TEXTREL)) {
        sl_printf("Error load_elf: not supported flag 0x%x in %s.\n",
                  so->flags, so->name);
        sl_exit(1);
      }
      break;

    case DT_FLAGS_1: /* Flags */
      so->flags_1 = dyn->d_un.d_val;
      if ((so->flags_1 & DF_1_GROUP)
          || (so->flags_1 & DF_1_LOADFLTR)
          || (so->flags_1 & DF_1_DIRECT)
          || (so->flags_1 & DF_1_INTERPOSE)
          || (so->flags_1 & DF_1_NODEFLIB)
          //          || (so->flags_1 & DF_1_NODUMP)
          || (so->flags_1 & DF_1_CONFALT)
          || (so->flags_1 & DF_1_ENDFILTEE)
          || (so->flags_1 & DF_1_DISPRELDNE)
          || (so->flags_1 & DF_1_DISPRELPND)) {
        sl_printf("Error load_elf: not supported flag_1 0x%x in %s.\n",
                  so->flags_1, so->name);
        sl_exit(1);
      }
          
      break;
    }
  }

  /* Initialize the versioning data */
  init_versions(so);
  
  /* Set library search paths */
  if (rpath != -1) 
    so->search_path = decompose_path(so->string_table+rpath,so->name, "RPATH");
  
  /* Allocate memory for deps */
  if (so->deps_count != 0)
    so->deps = sl_malloc(so->deps_count * sizeof(dso *));

  /* Add shared object to chain */
  chain_add(so);

  /* Now that we have the stringtable, iterate a second time over dynamic
     section to get the names of the needed libraries. */
  char *lib_name = 0;
  long num = 0;
  for (dyn = so->dynamic_section; dyn->d_tag != DT_NULL; dyn++) { 
    switch (dyn->d_tag) {
      
    case DT_NEEDED:
      /* Get name of needed lib */
      lib_name = (char *)so->string_table + dyn->d_un.d_val;
      
#ifdef D_LOAD
      sl_printf("Found dependency in %s: %s\n", so->name, lib_name);
#endif

      /* Do not load the linux dynamic loader, because we replace it */
      if (sl_strncmp(lib_name, LINUX_LOADER,
                     sl_strnlen(LINUX_LOADER, MAX_LIB_NAME))==0) {
        so->deps_count--;
        continue;
      }

      /* Check if we already loaded it */
      dso *so_search = chain_search(lib_name); 
      if (so_search == 0) {
        /* Not already loaded, search for it */
        char *lib_path;
        long fd = search_lib(so, lib_name, &lib_path);
        if (fd == -1) {
          /* Not found, signal error (longjmp) if we load at runtime */
          if(rt_load)
            signal_error(0, lib_name, 0, "cannot open shared object file");

          /* Not at runtime -> fail */
          sl_printf("Error load_elf: lib %s not found.\n", lib_name);
          sl_exit(1);
        }

        /* Copy name */
        char *lname = sl_malloc(MAX_LIB_NAME);
        sl_strncpy(lname, lib_name, MAX_LIB_NAME);
        PROT_DATA(lname, MAX_LIB_NAME);

        /* Load it */
        dso *so_loaded = load_elf(so, lname, lib_path, fd, rt_load, 0, NULL);

        /* Increment local scope counter and add to direct deps */
        so->lscope_num += so_loaded->lscope_num;
        so->deps[num] = so_loaded;
        
      } else {
        
        /* Increment reference counter */
        UNPROT(so_search);
        so_search->ref_count++;
        PROT(so_search);

        /* Increment local scope counter and add to direct deps */
        so->lscope_num += so_search->lscope_num;
        so->deps[num] = so_search;        
      }
      
      num++;      
      so->lscope_num++;      
      break;
    }    
  }
  
  so->lscope_num++;
  
  /* Create local scope list. This has to be done in breadth-first order! */
  so->lscope = sl_malloc(so->lscope_num * sizeof(dso *));
  long j,k,l;
  i = 0;

  /* Add object itself */
  so->lscope[i++] = so;
  
  /* First add direct dependencies */
  for (l=0; l<so->deps_count; ++l) {
    so->lscope[i] = so->deps[l];
    ++i;
  }
  
  /* Now add deps recursively */
  for (l=0; l<so->deps_count; ++l) {
    for (k=0; k<so->deps[l]->lscope_num; ++k) {
      dso *dep = so->deps[l]->lscope[k];

      /* Check if already added */
      long found = 0;
      for (j=0; j<i; ++j) {
        if (so->lscope[j] == dep)
          found = 1;
      }

      if (found || !dep)
        continue;
      
      so->lscope[i] = dep;
      ++i;
    }
  }

  /* Initialize Global Offset Table */
  init_got(so);

#if defined(VERIFY_CFTX)
  /* Add object to dso chain if libdetox wants to check the control flow
     transfers */
  add_dso(so, (char *)file_map);
  
  #if defined(CALLBACK_MAIN_DETECTION) || defined(CALLBACK_DATA_SECTION_SEARCH)
  /* Right after loading the dso we need to detect the libc callbacks to main/__libc_csu_init */
  if(so->loader == NULL && dso_chain->next != 0) {
  /* This is the main executable. Get the immediate values pushed on the stack right before __libc_start_main is called. */

	  #if defined(CALLBACK_MAIN_DETECTION)
	  /* This is the main function callback detection hack!
			* - it tries to find the callback pointers passed to libc 
			* - and it adds them to the callback table so CFTX checks will pass */
	  long count = 0;
	  unsigned char *iptr;
	  unsigned long ptr;
	  
	  if(so->type == ET_EXEC)
		iptr = (unsigned char *)so->entry;
	  else /* PIE executable */
	    iptr = (unsigned char *)((unsigned long)so->base_addr + (unsigned long)so->entry);
	  
	  /* TODO: 48? Remove hardcoded value. */
	  while(count<48) {
		
		/* is it a push instruction with a 32bit immediate value? */
		if(*iptr == 0x68) {
				/* add the immediate value pushed to the stack */
				ptr = *((unsigned long*)(++iptr));
				fbt_add_callback(dso_chain->next, (void*)ptr);
				iptr+=4;
				count+=5;
		} else {
			iptr++;
			count++;
		}
		/* in case we reached NOPs we can just stop looking for the pushes */
		if(*iptr == 0x90) break;
	  }
	  #endif /* CALLBACK_MAIN_DETECTION */
	  
	  #if defined(CALLBACK_DATA_SECTION_SEARCH)
	  /* Some x* applications have global function pointers in their .data section.
	   * This are probably widget class objects and their members. It seems that there is no other
	   * way to detect these potential callbacks than scanning through the .data section. This has
	   * only to be done for prelinked executables as we will detect the other callbacks during relocation. */
	   unsigned long *dptr = so->data_addr;
	   if(dptr) {
		  while((unsigned long)dptr < ((unsigned long)so->data_addr+(unsigned long)so->data_size)) {
	   			/* check if the obtained address points to executable memory (potential callback target) */
			if (PTR_IN_REGION(*dptr, so->text_addr, so->text_size)
				|| PTR_IN_REGION(*dptr, so->dso->init, so->dso->init_size)
				|| PTR_IN_REGION(*dptr, so->dso->fini, so->dso->fini_size)) {

				fbt_add_callback(so->dso, (void*)*dptr);
					
			 }
			 dptr++; /* increase by sizeof(unsigned long) = bytes */
		 }
	   }
	  
	   /* go through GOT */
	   dptr = (unsigned long*)(so->got);
	   if(dptr) {
		  while((unsigned long)dptr < ((unsigned long)so->got+(unsigned long)so->got_size)) {
	   			/* check if the obtained address points to executable memory (potential callback target) */
			if (PTR_IN_REGION(*dptr, so->text_addr, so->text_size)
				|| PTR_IN_REGION(*dptr, so->dso->init, so->dso->init_size)
				|| PTR_IN_REGION(*dptr, so->dso->fini, so->dso->fini_size)) {

				fbt_add_callback(so->dso, (void*)*dptr);
					
			 }
			 dptr++; /* increase by sizeof(unsigned long) = bytes */
		 }
	   }
	   
	   /* go through GOT.PLT */
	   dptr = (unsigned long*)(so->gotplt);
	   if(dptr) {
		  while((unsigned long)dptr < ((unsigned long)so->gotplt+(unsigned long)so->gotplt_size)) {
	   			/* check if the obtained address points to executable memory (potential callback target) */
			if (PTR_IN_REGION(*dptr, so->text_addr, so->text_size)
				|| PTR_IN_REGION(*dptr, so->dso->init, so->dso->init_size)
				|| PTR_IN_REGION(*dptr, so->dso->fini, so->dso->fini_size)) {

				fbt_add_callback(so->dso, (void*)*dptr);
					
			 }
			 dptr++; /* increase by sizeof(unsigned long) = bytes */
		 }
	   }

	   /* go through rodata */
	   dptr = (unsigned long*)(so->dso->rodata);
	   if(dptr) {
		  while((unsigned long)dptr < ((unsigned long)so->dso->rodata+(unsigned long)so->dso->rodata_size)) {
	   			/* check if the obtained address points to executable memory (potential callback target) */
			if (PTR_IN_REGION(*dptr, so->text_addr, so->text_size)
				|| PTR_IN_REGION(*dptr, so->dso->init, so->dso->init_size)
				|| PTR_IN_REGION(*dptr, so->dso->fini, so->dso->fini_size)) {

				fbt_add_callback(so->dso, (void*)*dptr);
					
			 }
			 dptr++; /* increase by sizeof(unsigned long) = bytes */
		 }
	   }	
	   
	   /* go through reldyn */
	   dptr = (unsigned long*)(so->dso->reldyn);
	   if(dptr) {
		  while((unsigned long)dptr < ((unsigned long)so->dso->reldyn+(unsigned long)so->dso->reldyn_size)) {
	   			/* check if the obtained address points to executable memory (potential callback target) */
			if (PTR_IN_REGION(*dptr, so->text_addr, so->text_size)
				|| PTR_IN_REGION(*dptr, so->dso->init, so->dso->init_size)
				|| PTR_IN_REGION(*dptr, so->dso->fini, so->dso->fini_size)) {

				fbt_add_callback(so->dso, (void*)*dptr);
					
			 }
			 dptr++; /* increase by sizeof(unsigned long) = bytes */
		 }
	   }	
	   	   	   
	   /* go through relplt */
	   dptr = (unsigned long*)(so->dso->relplt);
	   if(dptr) {
		  while((unsigned long)dptr < ((unsigned long)so->dso->relplt+(unsigned long)so->dso->relplt_size)) {
	   			/* check if the obtained address points to executable memory (potential callback target) */
			if (PTR_IN_REGION(*dptr, so->text_addr, so->text_size)
				|| PTR_IN_REGION(*dptr, so->dso->init, so->dso->init_size)
				|| PTR_IN_REGION(*dptr, so->dso->fini, so->dso->fini_size)) {

				fbt_add_callback(so->dso, (void*)*dptr);
					
			 }
			 dptr++; /* increase by sizeof(unsigned long) = bytes */
		 }
	   }	   
	  #endif /* CALLBACK_DATA_SECTION_SEARCH */
	  
  }
  #endif /* defined(CALLBACK_MAIN_DETECTION) || defined(CALLBACK_DATA_SECTION_SEARCH) */

  #if defined(CALLBACK_LIBRARIES_DATA_SECTION_SEARCH)
  if(so->loader != NULL) { /* it's a library! */

	   unsigned long *dptr = so->data_addr;
	   if(dptr) {
		  while((unsigned long)dptr < ((unsigned long)so->data_addr+(unsigned long)so->data_size)) {
	   			/* check if the obtained address points to executable memory (potential callback target) */
			if (PTR_IN_REGION(*dptr, so->text_addr, so->text_size)
				|| PTR_IN_REGION(*dptr, so->dso->init, so->dso->init_size)
				|| PTR_IN_REGION(*dptr, so->dso->fini, so->dso->fini_size)) {

				fbt_add_callback(so->dso, (void*)*dptr);
					
			 }
			 dptr++; /* increase by sizeof(unsigned long) = bytes */
		 }
	   }
	  
	   /* go through GOT */
	   dptr = (unsigned long*)(so->got);
	   if(dptr) {
		  while((unsigned long)dptr < ((unsigned long)so->got+(unsigned long)so->got_size)) {
	   			/* check if the obtained address points to executable memory (potential callback target) */
			if (PTR_IN_REGION(*dptr, so->text_addr, so->text_size)
				|| PTR_IN_REGION(*dptr, so->dso->init, so->dso->init_size)
				|| PTR_IN_REGION(*dptr, so->dso->fini, so->dso->fini_size)) {

				fbt_add_callback(so->dso, (void*)*dptr);
					
			 }
			 dptr++; /* increase by sizeof(unsigned long) = bytes */
		 }
	   }
	   
	   /* go through GOT.PLT */
	   dptr = (unsigned long*)(so->gotplt);
	   if(dptr) {
		  while((unsigned long)dptr < ((unsigned long)so->gotplt+(unsigned long)so->gotplt_size)) {
	   			/* check if the obtained address points to executable memory (potential callback target) */
			if (PTR_IN_REGION(*dptr, so->text_addr, so->text_size)
				|| PTR_IN_REGION(*dptr, so->dso->init, so->dso->init_size)
				|| PTR_IN_REGION(*dptr, so->dso->fini, so->dso->fini_size)) {

				fbt_add_callback(so->dso, (void*)*dptr);
					
			 }
			 dptr++; /* increase by sizeof(unsigned long) = bytes */
		 }
	   }

	   /* go through rodata */
	   dptr = (unsigned long*)(so->dso->rodata);
	   if(dptr) {
		  while((unsigned long)dptr < ((unsigned long)so->dso->rodata+(unsigned long)so->dso->rodata_size)) {
	   			/* check if the obtained address points to executable memory (potential callback target) */
			if (PTR_IN_REGION(*dptr, so->text_addr, so->text_size)
				|| PTR_IN_REGION(*dptr, so->dso->init, so->dso->init_size)
				|| PTR_IN_REGION(*dptr, so->dso->fini, so->dso->fini_size)) {

				fbt_add_callback(so->dso, (void*)*dptr);
					
			 }
			 dptr++; /* increase by sizeof(unsigned long) = bytes */
		 }
	   }	
	   
	   /* go through reldyn */
	   dptr = (unsigned long*)(so->dso->reldyn);
	   if(dptr) {
		  while((unsigned long)dptr < ((unsigned long)so->dso->reldyn+(unsigned long)so->dso->reldyn_size)) {
	   			/* check if the obtained address points to executable memory (potential callback target) */
			if (PTR_IN_REGION(*dptr, so->text_addr, so->text_size)
				|| PTR_IN_REGION(*dptr, so->dso->init, so->dso->init_size)
				|| PTR_IN_REGION(*dptr, so->dso->fini, so->dso->fini_size)) {

				fbt_add_callback(so->dso, (void*)*dptr);
					
			 }
			 dptr++; /* increase by sizeof(unsigned long) = bytes */
		 }
	   }	
	   	   	   
	   /* go through relplt */
	   dptr = (unsigned long*)(so->dso->relplt);
	   if(dptr) {
		  while((unsigned long)dptr < ((unsigned long)so->dso->relplt+(unsigned long)so->dso->relplt_size)) {
	   			/* check if the obtained address points to executable memory (potential callback target) */
			if (PTR_IN_REGION(*dptr, so->text_addr, so->text_size)
				|| PTR_IN_REGION(*dptr, so->dso->init, so->dso->init_size)
				|| PTR_IN_REGION(*dptr, so->dso->fini, so->dso->fini_size)) {

				fbt_add_callback(so->dso, (void*)*dptr);
					
			 }
			 dptr++; /* increase by sizeof(unsigned long) = bytes */
		 }
	   }	  
  }
  
  #endif /* CALLBACK_LIBRARIES_DATA_SECTION_SEARCH */

#endif /* VERIFY_CFTX */

  /* All necessary information in memory -> unmap file */
  sl_munmap(file_map, file_info.st_size);
  sl_close(fd);
  
  /* Protect dependencies and local search scope */
  PROT_DATA(so->deps, so->deps_count*sizeof(dso *));
  PROT_DATA(so->lscope, so->lscope_num*sizeof(dso *));
  
  return so;
}