static void *malloc_pages(size_t len, size_t alignment)
{
  void *r;
  size_t extra = 0;

  if (!page_size)
    page_size = getpagesize();

#ifndef MAP_ANON
  if (!fd_created) {
    fd_created = 1;
    fd = open("/dev/zero", O_RDWR);
  }
#endif

  CHECK_USED_AGAINST_MAX(len);

  /* Round up to nearest page: */
  if (len & (page_size - 1))
    len += page_size - (len & (page_size - 1));

  /* Something from the cache, perhaps? */
  r = find_cached_pages(len, alignment);
  if (r)
    return r;

  extra = alignment;

#ifdef MAP_ANON
  r = mmap(NULL, len + extra, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
#else
  r = mmap(NULL, len + extra, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
#endif

  if (r  == (void *)-1)
    return NULL;

  if (extra) {
    /* We allocated too large so we can choose the alignment. */
    void *real_r;
    long pre_extra;

    real_r = (void *)(((unsigned long)r + (alignment - 1)) & (~(alignment - 1)));
    
    pre_extra = real_r - r;
    if (pre_extra)
      if (munmap(r, pre_extra))
	GCPRINT(GCOUTF, "Unmap warning: %lx, %ld, %d\n", (long)r, pre_extra, errno);
    if (pre_extra < extra)
      if (munmap(real_r + len, extra - pre_extra))
	GCPRINT(GCOUTF, "Unmap warning: %lx, %ld, %d\n", (long)r, pre_extra, errno);
    r = real_r;
  }

  ACTUALLY_ALLOCATING_PAGES(len);
  LOGICALLY_ALLOCATING_PAGES(len);

  return r;
}
Beispiel #2
0
/* the VM subsystem as defined by the GC files */
static void *do_malloc_pages(size_t len, size_t alignment, int dirty_ok)
{
  kern_return_t retval;
  size_t extra = 0;
  void *r;

  if(!task_self) task_self = mach_task_self();

  CHECK_USED_AGAINST_MAX(len);
  
  /* round up to the nearest page: */
  if(len & (page_size - 1))
    len += page_size - (len & (page_size - 1));

  r = find_cached_pages(len, alignment, dirty_ok);
  if (r)
    return r;

  extra = alignment;

  retval = vm_allocate(task_self, (vm_address_t*)&r, len + extra, TRUE);
  if(retval != KERN_SUCCESS) {
    GCPRINT(GCOUTF, "Couldn't allocate memory: %s\n", mach_error_string(retval));
    abort();
  }

  if(extra) {
    /* we allocated too large so we can choose the alignment */
    void *real_r;
    long pre_extra;

    real_r = (void*)(((unsigned long)r + (alignment-1)) & (~(alignment-1)));
    pre_extra = real_r - r;
    if(pre_extra) {
      retval = vm_deallocate(task_self, (vm_address_t)r, pre_extra);
      if(retval != KERN_SUCCESS) {
	GCPRINT(GCOUTF, "WARNING: couldn't deallocate pre-extra: %s\n",
	       mach_error_string(retval));
      }
    }
    if(pre_extra < extra) {
      if (!pre_extra) {
	/* Instead of actually unmapping, put it in the cache, and there's
	   a good chance we can use it next time: */
	ACTUALLY_ALLOCATING_PAGES(extra);
	free_actual_pages(real_r + len, extra, 1);
      } else {
	retval = vm_deallocate(task_self, (vm_address_t)real_r + len, 
			       extra - pre_extra);
	if(retval != KERN_SUCCESS) {
	  GCPRINT(GCOUTF, "WARNING: couldn't deallocate post-extra: %s\n",
		  mach_error_string(retval));
	}
      }
    }
    r = real_r;
  }

  ACTUALLY_ALLOCATING_PAGES(len);
  LOGICALLY_ALLOCATING_PAGES(len);

  return r;
}