Ejemplo n.º 1
0
static void *malloc_pages(size_t len, size_t alignment)
{
  CHECK_USED_AGAINST_MAX(len);
  LOGICALLY_ALLOCATING_PAGES(len);

#if CACHE_SLOTS
 {
   int i, j;
   
   for (j = 0; j < 2; j++) {
     for (i = 0; i < CACHE_SLOTS; i++) {
       if (cache[j][i].len == len) {
	 if (cache[j][i].page) {
	   void *result = cache[j][i].page;
	   cache[j][i].page = *(void **)result;
	   memset(result, 0, len);
	   return result;
	 }
	 break;
       }
     }
   }
 }
#endif

  ACTUALLY_ALLOCATING_PAGES(len);

  return (void *)VirtualAlloc(NULL, len, 
			      MEM_COMMIT | MEM_RESERVE, 
			      PAGE_READWRITE);
}
Ejemplo n.º 2
0
static void *malloc_pages(size_t len, size_t alignment)
{
  void *r;
  size_t extra = 0;

  if (!page_size)
    page_size = getpagesize();

#ifndef MAP_ANON
  if (!fd_created) {
    fd_created = 1;
    fd = open("/dev/zero", O_RDWR);
  }
#endif

  CHECK_USED_AGAINST_MAX(len);

  /* Round up to nearest page: */
  if (len & (page_size - 1))
    len += page_size - (len & (page_size - 1));

  /* Something from the cache, perhaps? */
  r = find_cached_pages(len, alignment);
  if (r)
    return r;

  extra = alignment;

#ifdef MAP_ANON
  r = mmap(NULL, len + extra, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
#else
  r = mmap(NULL, len + extra, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
#endif

  if (r  == (void *)-1)
    return NULL;

  if (extra) {
    /* We allocated too large so we can choose the alignment. */
    void *real_r;
    long pre_extra;

    real_r = (void *)(((unsigned long)r + (alignment - 1)) & (~(alignment - 1)));
    
    pre_extra = real_r - r;
    if (pre_extra)
      if (munmap(r, pre_extra))
	GCPRINT(GCOUTF, "Unmap warning: %lx, %ld, %d\n", (long)r, pre_extra, errno);
    if (pre_extra < extra)
      if (munmap(real_r + len, extra - pre_extra))
	GCPRINT(GCOUTF, "Unmap warning: %lx, %ld, %d\n", (long)r, pre_extra, errno);
    r = real_r;
  }

  ACTUALLY_ALLOCATING_PAGES(len);
  LOGICALLY_ALLOCATING_PAGES(len);

  return r;
}
Ejemplo n.º 3
0
inline static void *malloc_pages(size_t len, size_t alignment)
{
  void *p;

  CHECK_USED_AGAINST_MAX(len);

  p = smemalign(alignment, len);
  memset(p, 0, len);

  ACTUALLY_ALLOCATING_PAGES(len);
  LOGICALLY_ALLOCATING_PAGES(len);

  return p;
}
Ejemplo n.º 4
0
/* the VM subsystem as defined by the GC files */
static void *do_malloc_pages(size_t len, size_t alignment, int dirty_ok)
{
  kern_return_t retval;
  size_t extra = 0;
  void *r;

  if(!task_self) task_self = mach_task_self();

  CHECK_USED_AGAINST_MAX(len);
  
  /* round up to the nearest page: */
  if(len & (page_size - 1))
    len += page_size - (len & (page_size - 1));

  r = find_cached_pages(len, alignment, dirty_ok);
  if (r)
    return r;

  extra = alignment;

  retval = vm_allocate(task_self, (vm_address_t*)&r, len + extra, TRUE);
  if(retval != KERN_SUCCESS) {
    GCPRINT(GCOUTF, "Couldn't allocate memory: %s\n", mach_error_string(retval));
    abort();
  }

  if(extra) {
    /* we allocated too large so we can choose the alignment */
    void *real_r;
    long pre_extra;

    real_r = (void*)(((unsigned long)r + (alignment-1)) & (~(alignment-1)));
    pre_extra = real_r - r;
    if(pre_extra) {
      retval = vm_deallocate(task_self, (vm_address_t)r, pre_extra);
      if(retval != KERN_SUCCESS) {
	GCPRINT(GCOUTF, "WARNING: couldn't deallocate pre-extra: %s\n",
	       mach_error_string(retval));
      }
    }
    if(pre_extra < extra) {
      if (!pre_extra) {
	/* Instead of actually unmapping, put it in the cache, and there's
	   a good chance we can use it next time: */
	ACTUALLY_ALLOCATING_PAGES(extra);
	free_actual_pages(real_r + len, extra, 1);
      } else {
	retval = vm_deallocate(task_self, (vm_address_t)real_r + len, 
			       extra - pre_extra);
	if(retval != KERN_SUCCESS) {
	  GCPRINT(GCOUTF, "WARNING: couldn't deallocate post-extra: %s\n",
		  mach_error_string(retval));
	}
      }
    }
    r = real_r;
  }

  ACTUALLY_ALLOCATING_PAGES(len);
  LOGICALLY_ALLOCATING_PAGES(len);

  return r;
}