/*
 * Map a shared object into memory.  The "fd" argument is a file descriptor,
 * which must be open on the object and positioned at its beginning.
 * The "path" argument is a pathname that is used only for error messages.
 *
 * The return value is a pointer to a newly-allocated Obj_Entry structure
 * for the shared object.  Returns NULL on failure.
 */
Obj_Entry *
map_object(const char *path, char *buf, ssize_t size)
{
    Obj_Entry *obj;
    Elf_Ehdr *hdr;
    int i;
    Elf_Phdr *phdr;
    Elf_Phdr *phlimit;
    Elf_Phdr **segs;
    int nsegs;
    Elf_Phdr *phdyn;
    Elf_Phdr *phinterp;
    Elf_Phdr *phtls;
    caddr_t mapbase;
    size_t mapsize;
    Elf_Off base_offset;
    Elf_Addr base_vaddr;
    Elf_Addr base_vlimit;
    caddr_t base_addr;
    Elf_Off data_offset;
    Elf_Addr data_vaddr;
    Elf_Addr data_vlimit;
    caddr_t data_addr;
    int data_prot;
    int data_flags;
    Elf_Addr clear_vaddr;
    caddr_t clear_addr;
    caddr_t clear_page;
    Elf_Addr phdr_vaddr;
    size_t nclear, phsize;
    Elf_Addr bss_vaddr;
    Elf_Addr bss_vlimit;
    caddr_t bss_addr;

    
    hdr = get_elf_header(path, buf, size);
    if (hdr == NULL)
	return (NULL);

    /*
     * Scan the program header entries, and save key information.
     *
     * We expect that the loadable segments are ordered by load address.
     */
    phdr = (Elf_Phdr *) ((char *)hdr + hdr->e_phoff);
    phsize  = hdr->e_phnum * sizeof (phdr[0]);
    phlimit = phdr + hdr->e_phnum;
    nsegs = -1;
    phdyn = phinterp = phtls = NULL;
    phdr_vaddr = 0;
    segs = alloca(sizeof(segs[0]) * hdr->e_phnum);
    while (phdr < phlimit) {
	switch (phdr->p_type) {

	case PT_INTERP:
	    phinterp = phdr;
	    break;

	case PT_LOAD:
	    segs[++nsegs] = phdr;
    	    if ((segs[nsegs]->p_align & (PAGE_SIZE - 1)) != 0) {
		_rtld_error("%s: PT_LOAD segment %d not page-aligned",
		    path, nsegs);
		return NULL;
	    }
	    break;

	case PT_PHDR:
	    phdr_vaddr = phdr->p_vaddr;
	    phsize = phdr->p_memsz;
	    break;

	case PT_DYNAMIC:
	    phdyn = phdr;
	    break;

	case PT_TLS:
	    phtls = phdr;
	    break;
	}

	++phdr;
    }
    if (phdyn == NULL) {
	_rtld_error("%s: object is not dynamically-linked", path);
	return NULL;
    }

    if (nsegs < 0) {
	_rtld_error("%s: too few PT_LOAD segments", path);
	return NULL;
    }

    /*
     * Map the entire address space of the object, to stake out our
     * contiguous region, and to establish the base address for relocation.
     */
    base_offset = trunc_page(segs[0]->p_offset);
    base_vaddr = trunc_page(segs[0]->p_vaddr);
    base_vlimit = round_page(segs[nsegs]->p_vaddr + segs[nsegs]->p_memsz);
    mapsize = base_vlimit - base_vaddr;
    base_addr = hdr->e_type == ET_EXEC ? (caddr_t) base_vaddr : NULL;

    mapbase = mmap(base_addr, mapsize, PROT_NONE, MAP_ANON | MAP_PRIVATE |
      MAP_NOCORE, -1, 0);
    if (mapbase == (caddr_t) -1) {
	_rtld_error("%s: mmap of entire address space failed: %s",
	  path, strerror(errno));
	return NULL;
    }
    if (base_addr != NULL && mapbase != base_addr) {
	_rtld_error("%s: mmap returned wrong address: wanted %p, got %p",
	  path, base_addr, mapbase);
	munmap(mapbase, mapsize);
	return NULL;
    }

    for (i = 0; i <= nsegs; i++) {
	size_t data_vsize;

	/* Overlay the segment onto the proper region. */
	data_offset = trunc_page(segs[i]->p_offset);
	data_vaddr = trunc_page(segs[i]->p_vaddr);
	data_vlimit = round_page(segs[i]->p_vaddr + segs[i]->p_filesz);
	data_addr = mapbase + (data_vaddr - base_vaddr);
	data_prot = convert_prot(segs[i]->p_flags) | PROT_WRITE;
	data_vsize = data_vlimit - data_vaddr;
	data_flags = convert_flags(segs[i]->p_flags) |	\
	    MAP_FIXED | MAP_ANON | MAP_PRIVATE;
	if (mmap(data_addr, data_vsize, data_prot, data_flags,
		-1, data_offset) == (caddr_t) -1) {
	    _rtld_error("%s: mmap of data failed: %s", path, strerror(errno));
	    return NULL;
	}
	bcopy(buf + data_offset, data_addr, MIN(data_vsize, (size - data_offset)));

	/* Do BSS setup */
	if (segs[i]->p_filesz != segs[i]->p_memsz) {

	    /* Clear any BSS in the last page of the segment. */
	    clear_vaddr = segs[i]->p_vaddr + segs[i]->p_filesz;
	    clear_addr = mapbase + (clear_vaddr - base_vaddr);
	    clear_page = mapbase + (trunc_page(clear_vaddr) - base_vaddr);

	    if ((nclear = data_vlimit - clear_vaddr) > 0) {
		/* Make sure the end of the segment is writable */
		if ((data_prot & PROT_WRITE) == 0 && -1 ==
		     mprotect(clear_page, PAGE_SIZE, data_prot|PROT_WRITE)) {
			_rtld_error("%s: mprotect failed: %s", path,
			    strerror(errno));
			return NULL;
		}

		memset(clear_addr, 0, nclear);

		/* Reset the data protection back */
		if ((data_prot & PROT_WRITE) == 0)
		    mprotect(clear_page, PAGE_SIZE, data_prot);
	    }

	    /* Overlay the BSS segment onto the proper region. */
	    bss_vaddr = data_vlimit;
	    bss_vlimit = round_page(segs[i]->p_vaddr + segs[i]->p_memsz);
	    bss_addr = mapbase +  (bss_vaddr - base_vaddr);
	    if (bss_vlimit > bss_vaddr) {	/* There is something to do */
		if (mprotect(bss_addr, bss_vlimit - bss_vaddr, data_prot) == -1) {
		    _rtld_error("%s: mprotect of bss failed: %s", path,
			strerror(errno));
		    return NULL;
		}
	    }
	}

	if (phdr_vaddr == 0 && data_offset <= hdr->e_phoff &&
	  (data_vlimit - data_vaddr + data_offset) >=
	  (hdr->e_phoff + hdr->e_phnum * sizeof (Elf_Phdr))) {
	    phdr_vaddr = data_vaddr + hdr->e_phoff - data_offset;
	}
    }

    obj = obj_new();
    obj->mapbase = mapbase;
    obj->mapsize = mapsize;
    obj->textsize = round_page(segs[0]->p_vaddr + segs[0]->p_memsz) -
      base_vaddr;
    obj->vaddrbase = base_vaddr;
    obj->relocbase = mapbase - base_vaddr;
    obj->dynamic = (const Elf_Dyn *) (obj->relocbase + phdyn->p_vaddr);
    if (hdr->e_entry != 0)
	obj->entry = (caddr_t) (obj->relocbase + hdr->e_entry);
    if (phdr_vaddr != 0) {
	obj->phdr = (const Elf_Phdr *) (obj->relocbase + phdr_vaddr);
    } else {
	obj->phdr = malloc(phsize);
	if (obj->phdr == NULL) {
	    obj_free(obj);
	    _rtld_error("%s: cannot allocate program header", path);
	     return NULL;
	}
	memcpy((char *)obj->phdr, (char *)hdr + hdr->e_phoff, phsize);
	obj->phdr_alloc = true;
    }
    obj->phsize = phsize;
    if (phinterp != NULL)
	obj->interp = (const char *) (obj->relocbase + phinterp->p_vaddr);
    if (phtls != NULL) {
	tls_dtv_generation++;
	obj->tlsindex = ++tls_max_index;
	obj->tlssize = phtls->p_memsz;
	obj->tlsalign = phtls->p_align;
	obj->tlsinitsize = phtls->p_filesz;
	obj->tlsinit = mapbase + phtls->p_vaddr;
    }
    return obj;
}
Beispiel #2
0
void map_segments (long fd, Elf32_Phdr *segs[2], Elf32_Half type, dso *so) {

  /* Adjust text segment addresses to page size */
  Elf32_Off text_offset = TRUNC_PAGE(segs[0]->p_offset);
  Elf32_Addr text_vaddr = TRUNC_PAGE(segs[0]->p_vaddr);  
  Elf32_Addr text_vlimit = ROUND_PAGE(segs[1]->p_vaddr + segs[1]->p_memsz);
  unsigned long mapsize = text_vlimit - text_vaddr;

  /* Executable has to be loaded at constant address */
  void *base_addr = 0;
  if (type == ET_EXEC) {
    base_addr = (void *)text_vaddr;
  }

  /* TODO: what if base address lies in already mapped area? E.g. where the loader resides? */

  /* Map text segment into memory */
  char *mapbase = sl_mmap(base_addr, mapsize, convert_prot(segs[0]->p_flags),
                          MAP_PRIVATE, fd, text_offset);
  if ((long)mapbase == -1) {
    sl_close(fd);
    sl_printf("Error map_segments: mapping of text segment failed.\n");
    sl_exit(1);
  }

  /* Adjust data segment addresses to page size */
  Elf32_Off data_offset = TRUNC_PAGE(segs[1]->p_offset);
  Elf32_Addr data_vaddr = TRUNC_PAGE(segs[1]->p_vaddr);
  Elf32_Addr data_vlimit = ROUND_PAGE(segs[1]->p_vaddr + segs[1]->p_filesz);
  void *data_addr = mapbase + (data_vaddr - text_vaddr);
  long data_prot = convert_prot(segs[1]->p_flags);

  /* Map data segment into memory */
  if ((long)sl_mmap(data_addr, data_vlimit - data_vaddr, data_prot,
                    MAP_PRIVATE | MAP_FIXED, fd, data_offset) == -1) {
    sl_close(fd);
    sl_printf("Error map_segments: mapping of data segment failed.\n");
    sl_exit(1);
  }
   
  /* Clear BSS part */
  Elf32_Addr clear_vaddr = segs[1]->p_vaddr + segs[1]->p_filesz;
  void *clear_addr = mapbase + (clear_vaddr - text_vaddr);
  void *clear_page = mapbase + (TRUNC_PAGE(clear_vaddr) - text_vaddr);
  unsigned long nclear = data_vlimit - clear_vaddr;

  if (nclear > 0) {
    /* Make sure the end of the segment is writable */
    if ((data_prot & PROT_WRITE) == 0 &&
        sl_mprotect(clear_page, PAGE_SIZE, data_prot|PROT_WRITE) == -1) {
      sl_printf("Error map_segments: mprotect on data segment failed.\n");
      sl_exit(1);
    }
    
    sl_memset(clear_addr, 0, nclear);

    /* Reset the data protection */
    if ((data_prot & PROT_WRITE) == 0) {
      sl_mprotect(clear_page, PAGE_SIZE, data_prot);
    }
  }
  
  /* Allocate remaining part of bss section */
  Elf32_Addr bss_vaddr = data_vlimit;
  Elf32_Addr bss_vlimit = ROUND_PAGE(segs[1]->p_vaddr + segs[1]->p_memsz);
  void *bss_addr = mapbase + (bss_vaddr - text_vaddr);
  if (bss_vlimit > bss_vaddr) {
    if ((long)sl_mmap(bss_addr, bss_vlimit - bss_vaddr, data_prot,
                      MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0) == -1) {
      sl_printf("Error map_segments: mmap of bss segment failed.\n");
      sl_exit(1);
    }
    
  }
  
  /* Save important information */
  so->base_addr = (type == ET_EXEC) ? 0 : mapbase;
  so->text_addr = mapbase;
  so->text_size = mapsize;  
  so->data_addr = data_addr;
  so->data_size = data_vlimit - data_vaddr;
  so->bss_addr = bss_addr;
  so->bss_size = bss_vlimit - bss_vaddr;
  so->end_addr = bss_addr + so->bss_size;
  so->text_prot = convert_prot(segs[0]->p_flags);
  so->data_prot = data_prot;
  so->bss_prot = data_prot;
}
/*
 * Map a shared object into memory.  The "fd" argument is a file descriptor,
 * which must be open on the object and positioned at its beginning.
 * The "path" argument is a pathname that is used only for error messages.
 *
 * The return value is a pointer to a newly-allocated Obj_Entry structure
 * for the shared object.  Returns NULL on failure.
 */
Obj_Entry *
map_object(int fd, const char *path, const struct stat *sb)
{
    Obj_Entry *obj;
    Elf_Ehdr *hdr;
    int i;
    Elf_Phdr *phdr;
    Elf_Phdr *phlimit;
    Elf_Phdr **segs;
    int nsegs;
    Elf_Phdr *phdyn;
    Elf_Phdr *phinterp;
    Elf_Phdr *phtls;
    caddr_t mapbase;
    caddr_t shlib_base;
    size_t mapsize;
    Elf_Addr base_vaddr;
    Elf_Addr base_vlimit;
    caddr_t base_addr;
    Elf_Off data_offset;
    Elf_Addr data_vaddr;
    Elf_Addr data_vlimit;
    caddr_t data_addr;
    int data_prot;
    int data_flags;
    Elf_Addr clear_vaddr;
    caddr_t clear_addr;
    caddr_t clear_page;
    Elf_Addr phdr_vaddr;
    size_t nclear, phsize;
    Elf_Addr bss_vaddr;
    Elf_Addr bss_vlimit;
    caddr_t bss_addr;
    Elf_Word stack_flags;
    Elf_Addr relro_page;
    size_t relro_size;
    Elf_Addr note_start;
    Elf_Addr note_end;

    hdr = get_elf_header(fd, path);
    if (hdr == NULL)
	return (NULL);

    if (__ld_sharedlib_base) {
	shlib_base = (void *)(intptr_t)strtoul(__ld_sharedlib_base, NULL, 0);
    } else {
	shlib_base = NULL;
    }

    /*
     * Scan the program header entries, and save key information.
     *
     * We expect that the loadable segments are ordered by load address.
     */
    phdr = (Elf_Phdr *) ((char *)hdr + hdr->e_phoff);
    phsize  = hdr->e_phnum * sizeof (phdr[0]);
    phlimit = phdr + hdr->e_phnum;
    nsegs = -1;
    phdyn = phinterp = phtls = NULL;
    phdr_vaddr = 0;
    relro_page = 0;
    relro_size = 0;
    note_start = 0;
    note_end = 0;
    segs = alloca(sizeof(segs[0]) * hdr->e_phnum);
    stack_flags = RTLD_DEFAULT_STACK_PF_EXEC | PF_R | PF_W;
    while (phdr < phlimit) {
	switch (phdr->p_type) {

	case PT_INTERP:
	    phinterp = phdr;
	    break;

	case PT_LOAD:
	    segs[++nsegs] = phdr;
	    if ((segs[nsegs]->p_align & (PAGE_SIZE - 1)) != 0) {
		_rtld_error("%s: PT_LOAD segment %d not page-aligned",
		    path, nsegs);
		goto error;
	    }
	    break;

	case PT_PHDR:
	    phdr_vaddr = phdr->p_vaddr;
	    phsize = phdr->p_memsz;
	    break;

	case PT_DYNAMIC:
	    phdyn = phdr;
	    break;

	case PT_TLS:
	    phtls = phdr;
	    break;

	case PT_GNU_STACK:
	    stack_flags = phdr->p_flags;
	    break;

	case PT_GNU_RELRO:
	    relro_page = phdr->p_vaddr;
	    relro_size = phdr->p_memsz;
	    break;

	case PT_NOTE:
	    if (phdr->p_offset > PAGE_SIZE ||
	      phdr->p_offset + phdr->p_filesz > PAGE_SIZE)
		break;
	    note_start = (Elf_Addr)(char *)hdr + phdr->p_offset;
	    note_end = note_start + phdr->p_filesz;
	    break;
	}

	++phdr;
    }
    if (phdyn == NULL) {
	_rtld_error("%s: object is not dynamically-linked", path);
	goto error;
    }

    if (nsegs < 0) {
	_rtld_error("%s: too few PT_LOAD segments", path);
	goto error;
    }

    /*
     * Map the entire address space of the object, to stake out our
     * contiguous region, and to establish the base address for relocation.
     */
    base_vaddr = trunc_page(segs[0]->p_vaddr);
    base_vlimit = round_page(segs[nsegs]->p_vaddr + segs[nsegs]->p_memsz);
    mapsize = base_vlimit - base_vaddr;
    base_addr = (caddr_t) base_vaddr;

    if (base_addr == NULL && shlib_base) {
	size_t limit = 1024 * 256 * 1024;
	size_t offset;

	for (offset = 0; offset < limit; offset += 256 * 1024) {
		mapbase = mmap(shlib_base + offset, mapsize,
			       PROT_NONE,
			       MAP_ANON | MAP_PRIVATE | MAP_NOCORE |
			       MAP_TRYFIXED,
			       -1, 0);
		if (mapbase != MAP_FAILED)
			break;
	}
    } else {
	mapbase = mmap(base_addr, mapsize,
		       PROT_NONE,
		       MAP_ANON | MAP_PRIVATE | MAP_NOCORE,
		       -1, 0);
    }
    if (mapbase == (caddr_t) -1) {
	_rtld_error("%s: mmap of entire address space failed: %s",
	  path, rtld_strerror(errno));
	goto error;
    }
    if (base_addr != NULL && mapbase != base_addr) {
	_rtld_error("%s: mmap returned wrong address: wanted %p, got %p",
	  path, base_addr, mapbase);
	goto error1;
    }

    for (i = 0; i <= nsegs; i++) {
	/* Overlay the segment onto the proper region. */
	data_offset = trunc_page(segs[i]->p_offset);
	data_vaddr = trunc_page(segs[i]->p_vaddr);
	data_vlimit = round_page(segs[i]->p_vaddr + segs[i]->p_filesz);
	data_addr = mapbase + (data_vaddr - base_vaddr);
	data_prot = convert_prot(segs[i]->p_flags);
	data_flags = convert_flags(segs[i]->p_flags) | MAP_FIXED;
	if (mmap(data_addr, data_vlimit - data_vaddr, data_prot,
	  data_flags, fd, data_offset) == (caddr_t) -1) {
	    _rtld_error("%s: mmap of data failed: %s", path,
		rtld_strerror(errno));
	    goto error1;
	}

	/* Do BSS setup */
	if (segs[i]->p_filesz != segs[i]->p_memsz) {

	    /* Clear any BSS in the last page of the segment. */
	    clear_vaddr = segs[i]->p_vaddr + segs[i]->p_filesz;
	    clear_addr = mapbase + (clear_vaddr - base_vaddr);
	    clear_page = mapbase + (trunc_page(clear_vaddr) - base_vaddr);

	    if ((nclear = data_vlimit - clear_vaddr) > 0) {
		/* Make sure the end of the segment is writable */
		if ((data_prot & PROT_WRITE) == 0 && -1 ==
		     mprotect(clear_page, PAGE_SIZE, data_prot|PROT_WRITE)) {
			_rtld_error("%s: mprotect failed: %s", path,
			    rtld_strerror(errno));
			goto error1;
		}

		memset(clear_addr, 0, nclear);

		/*
		 * reset the data protection back, enable the segment to be
		 * coredumped since we modified it.
		 */
		if ((data_prot & PROT_WRITE) == 0) {
		    madvise(clear_page, PAGE_SIZE, MADV_CORE);
		    mprotect(clear_page, PAGE_SIZE, data_prot);
		}
	    }

	    /* Overlay the BSS segment onto the proper region. */
	    bss_vaddr = data_vlimit;
	    bss_vlimit = round_page(segs[i]->p_vaddr + segs[i]->p_memsz);
	    bss_addr = mapbase +  (bss_vaddr - base_vaddr);
	    if (bss_vlimit > bss_vaddr) {	/* There is something to do */
		if (mmap(bss_addr, bss_vlimit - bss_vaddr, data_prot,
		    data_flags | MAP_ANON, -1, 0) == (caddr_t)-1) {
		    _rtld_error("%s: mmap of bss failed: %s", path,
			rtld_strerror(errno));
		    goto error1;
		}
	    }
	}

	if (phdr_vaddr == 0 && data_offset <= hdr->e_phoff &&
	  (data_vlimit - data_vaddr + data_offset) >=
	  (hdr->e_phoff + hdr->e_phnum * sizeof (Elf_Phdr))) {
	    phdr_vaddr = data_vaddr + hdr->e_phoff - data_offset;
	}
    }

    obj = obj_new();
    if (sb != NULL) {
	obj->dev = sb->st_dev;
	obj->ino = sb->st_ino;
    }
    obj->mapbase = mapbase;
    obj->mapsize = mapsize;
    obj->textsize = round_page(segs[0]->p_vaddr + segs[0]->p_memsz) -
      base_vaddr;
    obj->vaddrbase = base_vaddr;
    obj->relocbase = mapbase - base_vaddr;
    obj->dynamic = (const Elf_Dyn *) (obj->relocbase + phdyn->p_vaddr);
    if (hdr->e_entry != 0)
	obj->entry = (caddr_t) (obj->relocbase + hdr->e_entry);
    if (phdr_vaddr != 0) {
	obj->phdr = (const Elf_Phdr *) (obj->relocbase + phdr_vaddr);
    } else {
	obj->phdr = malloc(phsize);
	if (obj->phdr == NULL) {
	    obj_free(obj);
	    _rtld_error("%s: cannot allocate program header", path);
	    goto error1;
	}
	memcpy((char *)obj->phdr, (char *)hdr + hdr->e_phoff, phsize);
	obj->phdr_alloc = true;
    }
    obj->phsize = phsize;
    if (phinterp != NULL)
	obj->interp = (const char *) (obj->relocbase + phinterp->p_vaddr);
    if (phtls != NULL) {
	tls_dtv_generation++;
	obj->tlsindex = ++tls_max_index;
	obj->tlssize = phtls->p_memsz;
	obj->tlsalign = phtls->p_align;
	obj->tlsinitsize = phtls->p_filesz;
	obj->tlsinit = mapbase + phtls->p_vaddr;
    }
    obj->stack_flags = stack_flags;
    if (relro_size) {
        obj->relro_page = obj->relocbase + trunc_page(relro_page);
        obj->relro_size = round_page(relro_size);
    }
    if (note_start < note_end)
       digest_notes(obj, note_start, note_end);
    munmap(hdr, PAGE_SIZE);
    return (obj);

error1:
    munmap(mapbase, mapsize);
error:
    munmap(hdr, PAGE_SIZE);
    return (NULL);
}
Beispiel #4
0
/* Verifies the kernel provided program header PT_LOAD entries and does the
 * segment mappings only if required. As the kernel already mapped the PT_LOAD
 * segments our RTLD should not map them again.
 */
void map_segments_RTLD (long fd, Elf32_Phdr *segs[2], Elf32_Half type, dso *so, Elf32_Phdr *segs_auxv[2]) {

  /* TODO: improve error handling ;) */
  if(segs[0]->p_offset != segs_auxv[0]->p_offset) {
	 sl_printf("map_segments_RTLD: difference in program headers found!\n");
	 sl_exit(1);
  }
  if(segs[0]->p_vaddr != segs_auxv[0]->p_vaddr) {
	 sl_printf("map_segments_RTLD: difference in program headers found!\n");
	 sl_exit(1);
  }
  if(segs[0]->p_memsz != segs_auxv[0]->p_memsz) {
	 sl_printf("map_segments_RTLD: difference in program headers found!\n");
	 sl_exit(1);
  }
  
  if(segs[1]->p_offset != segs_auxv[1]->p_offset) {
	 sl_printf("map_segments_RTLD: difference in program headers found!\n");
	 sl_exit(1);
  }
  if(segs[1]->p_vaddr != segs_auxv[1]->p_vaddr) {
	 sl_printf("map_segments_RTLD: difference in program headers found!\n");
	 sl_exit(1);
  }
  if(segs[1]->p_memsz != segs_auxv[1]->p_memsz) {
	 sl_printf("map_segments_RTLD: difference in program headers found!\n");
	 sl_exit(1);
  }
  
  /* Adjust text segment addresses to page size */
  //Elf32_Off text_offset = TRUNC_PAGE(segs[0]->p_offset);
  Elf32_Addr text_vaddr = TRUNC_PAGE(segs[0]->p_vaddr);  
  Elf32_Addr text_vlimit = ROUND_PAGE(segs[1]->p_vaddr + segs[1]->p_memsz);
  unsigned long mapsize = text_vlimit - text_vaddr;
  
  /* Executable has to be loaded at constant address */
  void *base_addr = 0;
  if (type == ET_EXEC) {
    base_addr = (void *)text_vaddr;
  } else {
	 sl_printf("map_segments_RTLD: first program header entry is not ET_EXEC!\n");
	 sl_exit(1);
  }

  /* TODO: what if base address lies in already mapped area? E.g. where the loader resides? */

  /* Text segment already mapped */
  char *mapbase = base_addr;

  /* Adjust data segment addresses to page size */
  //Elf32_Off data_offset = TRUNC_PAGE(segs[1]->p_offset);
  Elf32_Addr data_vaddr = TRUNC_PAGE(segs[1]->p_vaddr);
  Elf32_Addr data_vlimit = ROUND_PAGE(segs[1]->p_vaddr + segs[1]->p_filesz);
  void *data_addr = mapbase + (data_vaddr - text_vaddr);
  //long data_prot = convert_prot(segs[1]->p_flags);
  
  /* Clear BSS part */
  //Elf32_Addr clear_vaddr = segs[1]->p_vaddr + segs[1]->p_filesz;
  //void *clear_addr = mapbase + (clear_vaddr - text_vaddr);
  //void *clear_page = mapbase + (TRUNC_PAGE(clear_vaddr) - text_vaddr);
  //unsigned long nclear = data_vlimit - clear_vaddr;
 
  /* Allocate remaining part of bss section */
  Elf32_Addr bss_vaddr = data_vlimit;
  Elf32_Addr bss_vlimit = ROUND_PAGE(segs[1]->p_vaddr + segs[1]->p_memsz);
  void *bss_addr = mapbase + (bss_vaddr - text_vaddr);
  
  /* Save important information */
  so->base_addr = (type == ET_EXEC) ? 0 : mapbase;
  so->text_addr = mapbase;
  so->text_size = mapsize;  
  so->data_addr = data_addr;
  so->data_size = data_vlimit - data_vaddr;
  so->bss_addr = bss_addr;
  so->bss_size = bss_vlimit - bss_vaddr;
  so->end_addr = bss_addr + so->bss_size;
  so->text_prot = convert_prot(segs[0]->p_flags);
  so->data_prot = convert_prot(segs[1]->p_flags);
  so->bss_prot = convert_prot(segs[1]->p_flags);
}