Exemplo n.º 1
0
WRAPPER(void *, realloc, void *buf, size_t c)
{
  DECLARE(void * , realloc, void *, size_t);
  size_t size_with_crumple_zones;
  char *base = buf;
  unsigned saved_wipe_heap;
  char *result;
  BEGIN_PROTECT (realloc, buf, c);

  if (LIKELY(buf))
    base -= __mf_opts.crumple_zone;

  size_with_crumple_zones =
    CLAMPADD(c, CLAMPADD(__mf_opts.crumple_zone,
			 __mf_opts.crumple_zone));
  BEGIN_MALLOC_PROTECT ();
  result = (char *) CALL_REAL (realloc, base, size_with_crumple_zones);
  END_MALLOC_PROTECT ();

  /* Ensure heap wiping doesn't occur during this peculiar
     unregister/reregister pair.  */
  LOCKTH ();
  __mf_set_state (reentrant);
  saved_wipe_heap = __mf_opts.wipe_heap;
  __mf_opts.wipe_heap = 0;

  if (LIKELY(buf))
    __mfu_unregister (buf, 0, __MF_TYPE_HEAP_I);
  /* NB: underlying region may have been __MF_TYPE_HEAP. */

  if (LIKELY(result))
    {
      result += __mf_opts.crumple_zone;
      __mfu_register (result, c, __MF_TYPE_HEAP_I, "realloc region");
      /* XXX: register __MF_TYPE_NOACCESS for crumple zones.  */
    }

  /* Restore previous setting.  */
  __mf_opts.wipe_heap = saved_wipe_heap;

  __mf_set_state (active);
  UNLOCKTH ();

  return result;
}
Exemplo n.º 2
0
WRAPPER(void, free, void *buf)
{
  /* Use a circular queue to delay some number (__mf_opts.free_queue_length) of free()s.  */
  static void *free_queue [__MF_FREEQ_MAX];
  static unsigned free_ptr = 0;
  static int freeq_initialized = 0;
  DECLARE(void, free, void *);

  if (UNLIKELY(buf == NULL))
    return;

  BEGIN_PROTECT (free, buf);

#if PIC
  /* Check whether the given buffer might have come from a
     __mf_0fn_malloc/calloc call that for whatever reason was not
     redirected back to __mf_0fn_free.  If so, we just ignore the
     call. */
  if (UNLIKELY((uintptr_t) buf >= (uintptr_t) __mf_0fn_bufs &&
               (uintptr_t) buf < ((uintptr_t) __mf_0fn_bufs + sizeof(__mf_0fn_bufs))))
  {
    VERBOSE_TRACE ("skipping free of boot (0fn) alloc buffer %p\n", buf);
    return;
  }
#endif

  LOCKTH ();
  if (UNLIKELY(!freeq_initialized))
    {
      memset (free_queue, 0,
		     __MF_FREEQ_MAX * sizeof (void *));
      freeq_initialized = 1;
    }
  UNLOCKTH ();

  __mf_unregister (buf, 0, __MF_TYPE_HEAP_I);
  /* NB: underlying region may have been __MF_TYPE_HEAP. */

  if (UNLIKELY(__mf_opts.free_queue_length > 0))
    {
      char *freeme = NULL;
      LOCKTH ();
      if (free_queue [free_ptr] != NULL)
	{
	  freeme = free_queue [free_ptr];
	  freeme -= __mf_opts.crumple_zone;
	}
      free_queue [free_ptr] = buf;
      free_ptr = (free_ptr == (__mf_opts.free_queue_length-1) ? 0 : free_ptr + 1);
      UNLOCKTH ();
      if (freeme)
	{
	  if (__mf_opts.trace_mf_calls)
	    {
	      VERBOSE_TRACE ("freeing deferred pointer %p (crumple %u)\n",
			     (void *) freeme,
			     __mf_opts.crumple_zone);
	    }
	  BEGIN_MALLOC_PROTECT ();
	  CALL_REAL (free, freeme);
	  END_MALLOC_PROTECT ();
	}
    }
  else
    {
      /* back pointer up a bit to the beginning of crumple zone */
      char *base = (char *)buf;
      base -= __mf_opts.crumple_zone;
      if (__mf_opts.trace_mf_calls)
	{
	  VERBOSE_TRACE ("freeing pointer %p = %p - %u\n",
			 (void *) base,
			 (void *) buf,
			 __mf_opts.crumple_zone);
	}
      BEGIN_MALLOC_PROTECT ();
      CALL_REAL (free, base);
      END_MALLOC_PROTECT ();
    }
}
Exemplo n.º 3
0
WRAPPER(void, free, void *buf)
{
  /* Use a circular queue to delay some number (__mf_opts.free_queue_length) of free()s.  */
  static void *free_queue [__MF_FREEQ_MAX];
  static unsigned free_ptr = 0;
  static int freeq_initialized = 0;
  DECLARE(void, free, void *);

  BEGIN_PROTECT (free, buf);

  if (UNLIKELY(buf == NULL))
    return;

  LOCKTH ();
  if (UNLIKELY(!freeq_initialized))
    {
      memset (free_queue, 0,
		     __MF_FREEQ_MAX * sizeof (void *));
      freeq_initialized = 1;
    }
  UNLOCKTH ();

  __mf_unregister (buf, 0, __MF_TYPE_HEAP_I);
  /* NB: underlying region may have been __MF_TYPE_HEAP. */

  if (UNLIKELY(__mf_opts.free_queue_length > 0))
    {
      char *freeme = NULL;
      LOCKTH ();
      if (free_queue [free_ptr] != NULL)
	{
	  freeme = free_queue [free_ptr];
	  freeme -= __mf_opts.crumple_zone;
	}
      free_queue [free_ptr] = buf;
      free_ptr = (free_ptr == (__mf_opts.free_queue_length-1) ? 0 : free_ptr + 1);
      UNLOCKTH ();
      if (freeme)
	{
	  if (__mf_opts.trace_mf_calls)
	    {
	      VERBOSE_TRACE ("freeing deferred pointer %p (crumple %u)\n",
			     (void *) freeme,
			     __mf_opts.crumple_zone);
	    }
	  BEGIN_MALLOC_PROTECT ();
	  CALL_REAL (free, freeme);
	  END_MALLOC_PROTECT ();
	}
    }
  else
    {
      /* back pointer up a bit to the beginning of crumple zone */
      char *base = (char *)buf;
      base -= __mf_opts.crumple_zone;
      if (__mf_opts.trace_mf_calls)
	{
	  VERBOSE_TRACE ("freeing pointer %p = %p - %u\n",
			 (void *) base,
			 (void *) buf,
			 __mf_opts.crumple_zone);
	}
      BEGIN_MALLOC_PROTECT ();
      CALL_REAL (free, base);
      END_MALLOC_PROTECT ();
    }
}
Exemplo n.º 4
0
WRAPPER(int, pthread_create, pthread_t *thr, const pthread_attr_t *attr, 
	 void * (*start) (void *), void *arg)
{
  DECLARE(int, munmap, void *p, size_t l);
  DECLARE(void *, mmap, void *p, size_t l, int prot, int flags, int fd, off_t of);
  DECLARE(int, pthread_create, pthread_t *thr, const pthread_attr_t *attr, 
	  void * (*start) (void *), void *arg);
  int result;
  pthread_attr_t override_attr;
  void *override_stack;
  size_t override_stacksize;
  void *override_stack_alloc = (void *) 0;
  size_t override_stacksize_alloc = 0;
  unsigned i;

  TRACE ("pthread_create\n");

  /* Garbage-collect dead threads' stacks.  */
  LOCKTH ();
  for (i = 0; i < LIBMUDFLAPTH_THREADS_MAX; i++)
    {
      struct pthread_info *pi = & __mf_pthread_info [i];
      if (! pi->used_p)
	continue;
      if (! pi->dead_p)
	continue;

      /* VERBOSE_TRACE ("thread %u pi %p stack cleanup deferred (%u)\n",
	 (unsigned) pi->self, pi, pi->dead_p); */
	      
      /* Delay actual deallocation by a few cycles, try to discourage the
	 race mentioned at the end of __mf_pthread_spawner().  */
      if (pi->dead_p)
	pi->dead_p ++;
      if (pi->dead_p >= 10 /* XXX */)
	{
	  if (pi->stack)
	    CALL_REAL (munmap, pi->stack_alloc, pi->stack_size_alloc);

	  VERBOSE_TRACE ("slot %u freed, stack %p\n", i, pi->stack_alloc);
	  memset (pi, 0, sizeof (*pi));

	  /* One round of garbage collection is enough.  */
	  break;
	}
    }
  UNLOCKTH ();

  /* Let's allocate a stack for this thread, if one is not already
     supplied by the caller.  We don't want to let e.g. the
     linuxthreads manager thread do this allocation.  */
  if (attr != NULL)
    override_attr = *attr;
  else
    pthread_attr_init (& override_attr);

  /* Get supplied attributes, if any.  */
  /* XXX: consider using POSIX2K attr_getstack() */
  if (pthread_attr_getstackaddr (& override_attr, & override_stack) != 0 ||
      pthread_attr_getstacksize (& override_attr, & override_stacksize) != 0)
    {
      override_stack = NULL;
      override_stacksize = 0;
    }

  /* Do we need to allocate the new thread's stack?  */
  if (__mf_opts.thread_stack && override_stack == NULL)
    {
      uintptr_t alignment = 256; /* power of two */

      /* Perturb the initial stack addresses slightly, to encourage
	 threads to have nonconflicting entries in the lookup cache
	 for their tracked stack objects.  */
      static unsigned perturb = 0;
      const unsigned perturb_delta = 32;
      const unsigned perturb_count = 16;
      perturb += perturb_delta;
      if (perturb > perturb_delta*perturb_count) perturb = 0;

      /* Use glibc x86 defaults */
/* Should have been defined in <limits.h> */
#ifndef PTHREAD_STACK_MIN
#define PTHREAD_STACK_MIN 65536
#endif
      override_stacksize = max (PTHREAD_STACK_MIN, __mf_opts.thread_stack * 1024);


#if defined(MAP_ANONYMOUS)
#define MF_MAP_ANON MAP_ANONYMOUS
#elif defined(MAP_ANON)
#define MF_MAP_ANON MAP_ANON
#endif

#ifndef MAP_FAILED
#define MAP_FAILED ((void *) -1)
#endif

#ifdef MF_MAP_ANON
      override_stack = CALL_REAL (mmap, NULL, override_stacksize, 
				  PROT_READ|PROT_WRITE, 
				  MAP_PRIVATE|MF_MAP_ANON,
				  0, 0);
#else
      /* Try mapping /dev/zero instead.  */
      {
        static int zerofd = -1;
        if (zerofd == -1)
          zerofd = open ("/dev/zero", O_RDWR);
        if (zerofd == -1)
          override_stack = MAP_FAILED;
        else
          override_stack = CALL_REAL (mmap, NULL, override_stacksize, 
                                      PROT_READ|PROT_WRITE, 
                                      MAP_PRIVATE, zerofd, 0);
      }
#endif

      if (override_stack == 0 || override_stack == MAP_FAILED)
	{
	  errno = EAGAIN;
	  return -1;
	}

      VERBOSE_TRACE ("thread stack alloc %p size %lu\n", 
		     override_stack, (unsigned long) override_stacksize);

      /* Save the original allocated values for later deallocation.  */
      override_stack_alloc = override_stack;
      override_stacksize_alloc = override_stacksize;

      /* The stackaddr pthreads attribute is a candidate stack pointer.
	 It must point near the top or the bottom of this buffer, depending
	 on whether stack grows downward or upward, and suitably aligned.
	 On the x86, it grows down, so we set stackaddr near the top.  */
      /* XXX: port logic */
      override_stack = (void *)
	(((uintptr_t) override_stack + override_stacksize - alignment - perturb)
	 & (~(uintptr_t)(alignment-1)));
      
      /* XXX: consider using POSIX2K attr_setstack() */
      if (pthread_attr_setstackaddr (& override_attr, override_stack) != 0 ||
	  pthread_attr_setstacksize (& override_attr, 
				     override_stacksize - alignment - perturb) != 0)
	{
	  /* This should not happen.  */
	  CALL_REAL (munmap, override_stack, override_stacksize);
	  errno = EAGAIN;
	  return -1;
	}
  }

  /* Actually start the child thread.  */
  {
    struct pthread_start_info psi;
    struct pthread_info *pi = NULL;
    
    /* Fill in startup-control fields.  */
    psi.user_fn = start;
    psi.user_arg = arg;
    psi.thread_info = NULL;
    
    /* Actually create the thread.  */
    __mf_state = reentrant;
    result = CALL_REAL (pthread_create, thr, & override_attr,
			& __mf_pthread_spawner, (void *) & psi);
    __mf_state = active;
    /* We also hook pthread_join/pthread_exit to get into reentrant
       mode during thread shutdown/cleanup.  */

    /* Wait until child thread has progressed far enough into its
       __mf_pthread_spawner() call.  */
    while (1) /* XXX: timeout? */
      {
	volatile struct pthread_start_info *psip = & psi;
	pi = psip->thread_info;
	if (pi != NULL) 
	  break;
	sched_yield ();
      }

    /* Fill in remaining fields in pthread_info. */
    pi->stack = override_stack;
    pi->stack_size = override_stacksize;
    pi->stack_alloc = override_stack_alloc;
    pi->stack_size_alloc = override_stacksize_alloc;
    /* XXX: this might be too late for future heuristics that attempt
       to use thread stack bounds.  We may need to put the new thread
       to sleep. */
  }


  /* May need to clean up if we created a pthread_attr_t of our own.  */
  if (attr == NULL)
    pthread_attr_destroy (& override_attr); /* NB: this shouldn't deallocate stack */

  return result;
}