Пример #1
0
static int
grow_heap (heap_info *h, long diff)
{
  size_t pagesize = GLRO (dl_pagesize);
  long new_size;

  diff = ALIGN_UP (diff, pagesize);
  new_size = (long) h->size + diff;
  if ((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
    return -1;

  if ((unsigned long) new_size > h->mprotect_size)
    {
      if (__mprotect ((char *) h + h->mprotect_size,
                      (unsigned long) new_size - h->mprotect_size,
                      PROT_READ | PROT_WRITE) != 0)
        return -2;

      h->mprotect_size = new_size;
    }

  h->size = new_size;
  LIBC_PROBE (memory_heap_more, 2, h, h->size);
  return 0;
}
Пример #2
0
int
internal_function
_dl_make_stack_executable (void **stack_endp)
{
  /* This gives us the highest/lowest page that needs to be changed.  */
  uintptr_t page = ((uintptr_t) *stack_endp
		    & -(intptr_t) GLRO(dl_pagesize));
  int result = 0;

  /* Challenge the caller.  */
  if (__builtin_expect (__check_caller (RETURN_ADDRESS (0),
					allow_ldso|allow_libpthread) != 0, 0)
      || __builtin_expect (*stack_endp != __libc_stack_end, 0))
    return EPERM;

  if (__builtin_expect (__mprotect ((void *) page, GLRO(dl_pagesize),
				    __stack_prot) == 0, 1))
    goto return_success;
  result = errno;
  goto out;

 return_success:
  /* Clear the address.  */
  *stack_endp = NULL;

  /* Remember that we changed the permission.  */
  GL(dl_stack_flags) |= PF_X;

 out:
#ifdef check_consistency
  check_consistency ();
#endif

  return result;
}
Пример #3
0
static void
_dl_unprotect_relro (struct link_map *l)
{
  ElfW(Addr) start = ((l->l_addr + l->l_relro_addr)
		      & ~(GLRO(dl_pagesize) - 1));
  ElfW(Addr) end = ((l->l_addr + l->l_relro_addr + l->l_relro_size)
		    & ~(GLRO(dl_pagesize) - 1));

  if (start != end)
    __mprotect ((void *) start, end - start, PROT_READ | PROT_WRITE);
}
Пример #4
0
static int
grow_heap(heap_info *h, long diff)
{
  size_t page_mask = GLRO(dl_pagesize) - 1;
  long new_size;

  diff = (diff + page_mask) & ~page_mask;
  new_size = (long)h->size + diff;
  if((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
    return -1;
  if((unsigned long) new_size > h->mprotect_size) {
    if (__mprotect((char *)h + h->mprotect_size,
		   (unsigned long) new_size - h->mprotect_size,
		   PROT_READ|PROT_WRITE) != 0)
      return -2;
    h->mprotect_size = new_size;
  }

  h->size = new_size;
  return 0;
}
Пример #5
0
int
internal_function
_dl_make_stack_executable (void **stack_endp)
{
  /* Challenge the caller.  */
  if (__builtin_expect (*stack_endp != __libc_stack_end, 0))
    return EPERM;
  *stack_endp = NULL;

#ifdef IS_IN_rtld
  if (__mprotect ((void *)_dl_hurd_data->stack_base, _dl_hurd_data->stack_size,
		  PROT_READ|PROT_WRITE|PROT_EXEC) != 0)
    return errno;

  /* Remember that we changed the permission.  */
  GL(dl_stack_flags) |= PF_X;

  return 0;
#else
  /* We don't bother to implement this for static linking.  */
  return ENOSYS;
#endif
}
Пример #6
0
internal_function
new_heap(size_t size, size_t top_pad)
{
  size_t page_mask = GLRO(dl_pagesize) - 1;
  char *p1, *p2;
  unsigned long ul;
  heap_info *h;

  if(size+top_pad < HEAP_MIN_SIZE)
    size = HEAP_MIN_SIZE;
  else if(size+top_pad <= HEAP_MAX_SIZE)
    size += top_pad;
  else if(size > HEAP_MAX_SIZE)
    return 0;
  else
    size = HEAP_MAX_SIZE;
  size = (size + page_mask) & ~page_mask;

  /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
     No swap space needs to be reserved for the following large
     mapping (on Linux, this is the case for all non-writable mappings
     anyway). */
  p2 = MAP_FAILED;
  if(aligned_heap_area) {
    p2 = (char *)MMAP(aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
		      MAP_NORESERVE);
    aligned_heap_area = NULL;
    if (p2 != MAP_FAILED && ((unsigned long)p2 & (HEAP_MAX_SIZE-1))) {
      __munmap(p2, HEAP_MAX_SIZE);
      p2 = MAP_FAILED;
    }
  }
  if(p2 == MAP_FAILED) {
    p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_NONE, MAP_NORESERVE);
    if(p1 != MAP_FAILED) {
      p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE-1))
		    & ~(HEAP_MAX_SIZE-1));
      ul = p2 - p1;
      if (ul)
	__munmap(p1, ul);
      else
	aligned_heap_area = p2 + HEAP_MAX_SIZE;
      __munmap(p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
    } else {
      /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
	 is already aligned. */
      p2 = (char *)MMAP(0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE);
      if(p2 == MAP_FAILED)
	return 0;
      if((unsigned long)p2 & (HEAP_MAX_SIZE-1)) {
	__munmap(p2, HEAP_MAX_SIZE);
	return 0;
      }
    }
  }
  if(__mprotect(p2, size, PROT_READ|PROT_WRITE) != 0) {
    __munmap(p2, HEAP_MAX_SIZE);
    return 0;
  }
  h = (heap_info *)p2;
  h->size = size;
  h->mprotect_size = size;
  THREAD_STAT(stat_n_heaps++);
  return h;
}
Пример #7
0
void
_dl_relocate_object (struct link_map *l, struct link_map *scope[], int lazy)
{
  if (l->l_relocated)
    return;

  if (l->l_info[DT_TEXTREL])
    {
      /* Bletch.  We must make read-only segments writable
	 long enough to relocate them.  */
      const ElfW(Phdr) *ph;
      for (ph = l->l_phdr; ph < &l->l_phdr[l->l_phnum]; ++ph)
	if (ph->p_type == PT_LOAD && (ph->p_flags & PF_W) == 0)
	  {
	    caddr_t mapstart = ((caddr_t) l->l_addr +
				(ph->p_vaddr & ~(_dl_pagesize - 1)));
	    caddr_t mapend = ((caddr_t) l->l_addr +
			      ((ph->p_vaddr + ph->p_memsz + _dl_pagesize - 1)
			       & ~(_dl_pagesize - 1)));
	    if (__mprotect (mapstart, mapend - mapstart,
			    PROT_READ|PROT_WRITE) < 0)
	      _dl_signal_error (errno, l->l_name,
				"cannot make segment writable for relocation");
	  }
    }

  {
    /* Do the actual relocation of the object's GOT and other data.  */

    const char *strtab		/* String table object symbols.  */
      = ((void *) l->l_addr + l->l_info[DT_STRTAB]->d_un.d_ptr);

    /* This macro is used as a callback from the ELF_DYNAMIC_RELOCATE code.  */
#define RESOLVE(ref, flags) \
    (_dl_lookup_symbol (strtab + (*ref)->st_name, ref, scope, \
			l->l_name, flags))

#include "dynamic-link.h"
    ELF_DYNAMIC_RELOCATE (l, lazy);
  }

  /* Set up the PLT so its unrelocated entries will jump to
     _dl_runtime_resolve (dl-runtime.c), which will relocate them.  */
  elf_machine_runtime_setup (l, lazy);

  /* Mark the object so we know ths work has been done.  */
  l->l_relocated = 1;

  if (l->l_info[DT_TEXTREL])
    {
      /* Undo the protection change we made before relocating.  */
      const ElfW(Phdr) *ph;
      for (ph = l->l_phdr; ph < &l->l_phdr[l->l_phnum]; ++ph)
	if (ph->p_type == PT_LOAD && (ph->p_flags & PF_W) == 0)
	  {
	    caddr_t mapstart = ((caddr_t) l->l_addr +
				(ph->p_vaddr & ~(_dl_pagesize - 1)));
	    caddr_t mapend = ((caddr_t) l->l_addr +
			      ((ph->p_vaddr + ph->p_memsz + _dl_pagesize - 1)
			       & ~(_dl_pagesize - 1)));
	    int prot = 0;
	    if (ph->p_flags & PF_R)
	      prot |= PROT_READ;
	    if (ph->p_flags & PF_X)
	      prot |= PROT_EXEC;
	    if (__mprotect (mapstart, mapend - mapstart, prot) < 0)
	      _dl_signal_error (errno, l->l_name,
				"can't restore segment prot after reloc");
	  }
    }
}
Пример #8
0
int
internal_function
_dl_make_stack_executable (void **stack_endp)
{
  /* This gives us the highest/lowest page that needs to be changed.  */
  uintptr_t page = ((uintptr_t) *stack_endp
		    & -(intptr_t) GLRO(dl_pagesize));
  int result = 0;

  /* Challenge the caller.  */
  if (__builtin_expect (__check_caller (RETURN_ADDRESS (0),
					allow_ldso|allow_libpthread) != 0, 0)
      || __builtin_expect (*stack_endp != __libc_stack_end, 0))
    return EPERM;

  /* Newer Linux kernels support a flag to make our job easy.  */
#if defined  PROT_GROWSDOWN || defined PROT_GROWSUP
# if __ASSUME_PROT_GROWSUPDOWN == 0
  static bool no_growsupdown;
  if (! no_growsupdown)
# endif
    {
      if (__builtin_expect (__mprotect ((void *) page, GLRO(dl_pagesize),
					__stack_prot) == 0, 1))
	goto return_success;
# if __ASSUME_PROT_GROWSUPDOWN == 0
      if (errno == EINVAL)
	no_growsupdown = true;
      else
# endif
	{
	  result = errno;
	  goto out;
	}
    }
#endif

  /* There is always a hole in the address space below the bottom of the
     stack.  So when we make an mprotect call that starts below the bottom
     of the stack, it will include the hole and fail with ENOMEM.

     We start with a random guess at how deep the stack might have gotten
     so as to have extended the GROWSDOWN mapping to lower pages.  */

#if __ASSUME_PROT_GROWSUPDOWN == 0
  size_t size = GLRO(dl_pagesize) * 8;

# if _STACK_GROWS_DOWN
  page = page + GLRO(dl_pagesize) - size;
  while (1)
    {
      if (__mprotect ((void *) page, size,
		      __stack_prot & ~PROT_GROWSDOWN) == 0)
	/* We got this chunk changed; loop to do another chunk below.  */
	page -= size;
      else
	{
	  if (errno != ENOMEM)	/* Unexpected failure mode.  */
	    {
	      result = errno;
	      goto out;
	    }

	  if (size == GLRO(dl_pagesize))
	    /* We just tried to mprotect the top hole page and failed.
	       We are done.  */
	    break;

	  /* Our mprotect call failed because it started below the lowest
	     stack page.  Try again on just the top half of that region.  */
	  size /= 2;
	  page += size;
	}
    }

# elif _STACK_GROWS_UP
  while (1)
    {
      if (__mprotect ((void *) page, size, __stack_prot & ~PROT_GROWSUP) == 0)
	/* We got this chunk changed; loop to do another chunk below.  */
	page += size;
      else
	{
	  if (errno != ENOMEM)	/* Unexpected failure mode.  */
	    {
	      result = errno;
	      goto out;
	    }

	  if (size == GLRO(dl_pagesize))
	    /* We just tried to mprotect the lowest hole page and failed.
	       We are done.  */
	    break;

	  /* Our mprotect call failed because it extended past the highest
	     stack page.  Try again on just the bottom half of that region.  */
	  size /= 2;
	}
    }

# else
#  error "Define either _STACK_GROWS_DOWN or _STACK_GROWS_UP"
# endif
#endif

 return_success:
  /* Clear the address.  */
  *stack_endp = NULL;

  /* Remember that we changed the permission.  */
  GL(dl_stack_flags) |= PF_X;

 out:
#ifdef check_consistency
  check_consistency ();
#endif

  return result;
}