/*--------------------------------------------------------------------------*/ void *mmap (void *ptr, long size, long prot, long type, long handle, long arg) { static long g_pagesize; static long g_regionsize; /* Wait for spin lock */ slwait (&g_sl); /* First time initialization */ if (! g_pagesize) { g_pagesize = getpagesize (); } if (! g_regionsize) { g_regionsize = getregionsize (); } /* Allocate this */ ptr = VirtualAlloc (ptr, size, MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN, PAGE_READWRITE); /* Release spin lock */ slrelease (&g_sl); return ptr; }
/*--------------------------------------------------------------------------*/ long munmap (void *ptr, long size) { static long g_pagesize = 0; static long g_regionsize = 0; int rc = MUNMAP_FAILURE; /* Wait for spin lock */ slwait (&g_sl); /* First time initialization */ if (! g_pagesize) { g_pagesize = getpagesize (); } if (! g_regionsize) { g_regionsize = getregionsize (); } /* Free this */ if (! VirtualFree (ptr, 0, MEM_RELEASE)) { /* Release spin lock */ slrelease (&g_sl); return rc; } rc = 0; /* Release spin lock */ slrelease (&g_sl); return rc; }
/* munmap for windows */ int munmap (caddr_t ptr, int size) { static long g_pagesize; static long g_regionsize; int rc = MUNMAP_FAILURE; /* Wait for spin lock */ slwait (&g_sl); /* First time initialization */ if (! g_pagesize) g_pagesize = getpagesize (); if (! g_regionsize) g_regionsize = getregionsize (); /* Free this */ if (! VirtualFree (ptr, 0, MEM_RELEASE)) goto munmap_exit; rc = 0; munmap_exit: /* Release spin lock */ slrelease (&g_sl); return rc; }
/* mmap for windows */ void *mmap (void *ptr, size_t size, int prot, int type, int handle, off_t arg) { static long g_pagesize; static long g_regionsize; /* Wait for spin lock */ slwait (&g_sl); /* First time initialization */ if (! g_pagesize) g_pagesize = getpagesize (); if (! g_regionsize) g_regionsize = getregionsize (); /* Allocate this */ ptr = VirtualAlloc (ptr, size, MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN, PAGE_READWRITE); if (! ptr) { ptr = MMAP_FAILURE; goto mmap_exit; } mmap_exit: /* Release spin lock */ slrelease (&g_sl); return ptr; }
//static INTERNAL_INTPTR_T munmap ( void *ptr, INTERNAL_INTPTR_T size) { #ifndef USE_PTMALLOC3_ARENA static INTERNAL_INTPTR_T g_pagesize; int rc = MUNMAP_FAILURE; #ifdef TRACE printf ("munmap %p %d\n", ptr, size); #endif /* Wait for spin lock */ /* slwait (&g_sl); */ /* First time initialization */ if (! g_pagesize) g_pagesize = getpagesize (); /* Assert preconditions */ assert (((INTERNAL_SIZE_T) ptr) % g_pagesize == 0); assert (size % g_pagesize == 0); /* Free this */ // if (! VirtualFree (ptr, 0, // MEM_RELEASE)) if (! VirtualAlloc (ptr, 0, MEM_RESERVE, PAGE_NOACCESS)) goto munmap_exit; rc = 0; #ifdef TRACE printf ("Release %p %d\n", ptr, size); #endif munmap_exit: /* Release spin lock */ /* slrelease (&g_sl); */ return rc; #else // #ifndef USE_PTMALLOC3_ARENA slwait ( &g_sl ); virtual_free( &g_ptmalloc3_arena, ptr, size ); slrelease ( &g_sl ); return ( 0 ); #endif // #ifndef USE_PTMALLOC3_ARENA }
/* sbrk for windows */ extern void *win_sbrk (long size) { static long g_pagesize, g_my_pagesize; static long g_regionsize, g_my_regionsize; static region_list_entry *g_last; void *result = SBRK_FAILURE; /* Wait for spin lock */ slwait (&g_sl); /* First time initialization */ if (! g_pagesize) { g_pagesize = getpagesize (); g_my_pagesize = g_pagesize << SBRK_SCALE; } if (! g_regionsize) { g_regionsize = getregionsize (); g_my_regionsize = g_regionsize << SBRK_SCALE; } if (! g_last) { if (! region_list_append (&g_last, 0, 0)) goto sbrk_exit; } /* Allocation requested? */ if (size >= 0) { /* Allocation size is the requested size */ long allocate_size = size; /* Compute the size to commit */ long to_reserve = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_reserved; /* Do we reach the commit limit? */ if (to_reserve > 0) { /* Now we are going to search and reserve. */ int contiguous = -1; int found = FALSE; MEMORY_BASIC_INFORMATION memory_info; void *base_reserved; long reserve_size; do { /* Assume contiguous memory */ contiguous = TRUE; /* Round size to reserve */ reserve_size = CEIL (to_reserve, g_my_regionsize); /* Start with the current region's top */ memory_info.BaseAddress = g_last->top_reserved; while (VirtualQuery (memory_info.BaseAddress, &memory_info, sizeof (memory_info))) { /* Region is free, well aligned and big enough: we are done */ if (memory_info.State == MEM_FREE && (unsigned) memory_info.BaseAddress % g_regionsize == 0 && memory_info.RegionSize >= (unsigned) reserve_size) { found = TRUE; break; } /* From now on we can't get contiguous memory! */ contiguous = FALSE; /* Recompute size to reserve */ reserve_size = CEIL (allocate_size, g_my_regionsize); memory_info.BaseAddress = (char *) memory_info.BaseAddress + memory_info.RegionSize; } /* Search failed? */ if (! found) goto sbrk_exit; /* Try to reserve this */ base_reserved = VirtualAlloc (memory_info.BaseAddress, reserve_size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); if (! base_reserved) { int rc = GetLastError (); if (rc != ERROR_INVALID_ADDRESS) goto sbrk_exit; } /* A null pointer signals (hopefully) a race condition with another thread. */ /* In this case, we try again. */ } while (! base_reserved); /* Check returned pointer for consistency */ if (memory_info.BaseAddress && base_reserved != memory_info.BaseAddress) goto sbrk_exit; /* Did we get contiguous memory? */ if (contiguous) { long start_size = (char *) g_last->top_reserved - (char *) g_last->top_allocated; /* Adjust allocation size */ allocate_size -= start_size; /* Adjust the regions allocation top */ g_last->top_allocated = g_last->top_reserved; } /* Append the new region to the list */ if (! region_list_append (&g_last, base_reserved, reserve_size)) goto sbrk_exit; } /* Adjust the regions allocation top */ g_last->top_allocated = (char *) g_last->top_allocated + allocate_size; result = (char *) g_last->top_allocated - size; /* Deallocation requested? */ } else if (size < 0) { long deallocate_size = - size; /* As long as we have a region to release */ while ((char *) g_last->top_allocated - deallocate_size < (char *) g_last->top_reserved - g_last->reserve_size) { /* Get the size to release */ long release_size = g_last->reserve_size; /* Get the base address */ void *base_reserved = (char *) g_last->top_reserved - release_size; /* Release this */ int rc = VirtualFree (base_reserved, 0, MEM_RELEASE); /* Check returned code for consistency */ if (! rc) goto sbrk_exit; /* Adjust deallocation size */ deallocate_size -= (char *) g_last->top_allocated - (char *) base_reserved; /* Remove the old region from the list */ if (! region_list_remove (&g_last)) goto sbrk_exit; } /* Adjust regions allocate top */ g_last->top_allocated = (char *) g_last->top_allocated - deallocate_size; /* Check for underflow */ if ((char *) g_last->top_reserved - g_last->reserve_size > (char *) g_last->top_allocated || g_last->top_allocated > g_last->top_reserved) { /* Adjust regions allocate top */ g_last->top_allocated = (char *) g_last->top_reserved - g_last->reserve_size; goto sbrk_exit; } result = g_last->top_allocated; } sbrk_exit: /* Release spin lock */ slrelease (&g_sl); return result; }
//static void *mmap ( void *ptr, INTERNAL_INTPTR_T size, INTERNAL_INTPTR_T prot, INTERNAL_INTPTR_T type, INTERNAL_INTPTR_T handle, INTERNAL_INTPTR_T arg) { #ifndef USE_PTMALLOC3_ARENA static INTERNAL_INTPTR_T g_pagesize; static INTERNAL_INTPTR_T g_regionsize; DWORD alloc=MEM_RESERVE|MEM_TOP_DOWN, ntprot=0; INTERNAL_INTPTR_T rounding=0; char *p; #ifdef TRACE printf ("mmap %p %d %d %d\n", ptr, size, prot, type); #endif /* Wait for spin lock */ slwait (&g_sl); /* First time initialization */ if (! g_pagesize) g_pagesize = getpagesize (); if (! g_regionsize) g_regionsize = getregionsize (); /* Assert preconditions */ assert (*(unsigned*) &ptr % g_pagesize == 0); assert (size % g_pagesize == 0); /* Allocate this */ if(!(type & MAP_NORESERVE)) alloc|=MEM_COMMIT; if((prot & (PROT_READ|PROT_WRITE))==(PROT_READ|PROT_WRITE)) ntprot|=PAGE_READWRITE; else if(prot & PROT_READ) ntprot|=PAGE_READONLY; else if(prot & PROT_WRITE) ntprot|=PAGE_READWRITE; else { ntprot|=PAGE_NOACCESS; if(size==HEAP_MAX_SIZE) { rounding=size; size<<=1; #ifdef TRACE printf("Rounding to multiple of %d\n", rounding); #endif } if(ptr) { /* prot==PROT_NONE also appears to be a euphemism for free */ MEMORY_BASIC_INFORMATION mbi; DWORD read=0; for(p=((char *)ptr)+read; read<(DWORD) size && VirtualQuery(p, &mbi, sizeof(mbi)); read+=mbi.RegionSize) { if(mbi.State & MEM_COMMIT) { // if(!VirtualFree((LPVOID) p, mbi.RegionSize, MEM_DECOMMIT)) // goto mmap_exit; if(!VirtualAlloc((LPVOID) p, mbi.RegionSize, MEM_RESERVE, PAGE_NOACCESS)) goto mmap_exit; #ifdef TRACE printf ("Release %p %d\n", p, mbi.RegionSize); #endif } } ptr=0; /* success */ goto mmap_exit; } } ptr = VirtualAlloc (ptr, size, alloc, ntprot); if (! ptr) { ptr = (void *) MORECORE_FAILURE; goto mmap_exit; } if(rounding) { // VirtualFree(ptr, 0, MEM_RELEASE); VirtualAlloc(ptr, 0, MEM_RESERVE, PAGE_NOACCESS); ptr=(void *)(((INTERNAL_SIZE_T)ptr + (rounding-1)) & ~(rounding-1)); // if(!(ptr=VirtualAlloc(ptr, rounding, alloc, ntprot))) if(0==(ptr=VirtualAlloc(ptr, rounding, alloc, ntprot))) { ptr = (void *) MORECORE_FAILURE; goto mmap_exit; } assert ((unsigned) ptr % rounding == 0); size=rounding; } else { /* Assert postconditions */ assert ((unsigned) ptr % g_regionsize == 0); } #ifdef TRACE printf ("%s %p %d %d %d\n", (type & MAP_NORESERVE) ? "Reserve" : "Commit", ptr, size, prot, type); #endif mmap_exit: /* Release spin lock */ slrelease (&g_sl); return ptr; #else // #ifndef USE_PTMALLOC3_ARENA void* result; slwait ( &g_sl ); result = virtual_alloc ( &g_ptmalloc3_arena, (unsigned int)size ); slrelease ( &g_sl ); return ( result ); #endif // #ifndef USE_PTMALLOC3_ARENA }
/* sbrk for windows */ static void *sbrk (INTERNAL_INTPTR_T size) { static INTERNAL_INTPTR_T g_pagesize, g_my_pagesize; static INTERNAL_INTPTR_T g_regionsize, g_my_regionsize; static region_list_entry *g_last; void *result = (void *) MORECORE_FAILURE; #ifdef TRACE printf ("sbrk %d\n", size); #endif /* Wait for spin lock */ slwait (&g_sl); /* First time initialization */ if (! g_pagesize) { g_pagesize = getpagesize (); g_my_pagesize = g_pagesize << SBRK_SCALE; } if (! g_regionsize) { g_regionsize = getregionsize (); g_my_regionsize = g_regionsize << SBRK_SCALE; } if (! g_last) { if (! region_list_append (&g_last, 0, 0)) goto sbrk_exit; } /* Assert invariants */ assert (g_last); assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_allocated && g_last->top_allocated <= g_last->top_committed); assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_committed && g_last->top_committed <= g_last->top_reserved && (unsigned) g_last->top_committed % g_pagesize == 0); assert ((unsigned) g_last->top_reserved % g_regionsize == 0); assert ((unsigned) g_last->reserve_size % g_regionsize == 0); /* Allocation requested? */ if (size >= 0) { /* Allocation size is the requested size */ INTERNAL_INTPTR_T allocate_size = size; /* Compute the size to commit */ INTERNAL_INTPTR_T to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed; /* Do we reach the commit limit? */ if (to_commit > 0) { /* Round size to commit */ INTERNAL_INTPTR_T commit_size = CEIL (to_commit, g_my_pagesize); /* Compute the size to reserve */ INTERNAL_INTPTR_T to_reserve = (char *) g_last->top_committed + commit_size - (char *) g_last->top_reserved; /* Do we reach the reserve limit? */ if (to_reserve > 0) { /* Compute the remaining size to commit in the current region */ INTERNAL_INTPTR_T remaining_commit_size = (char *) g_last->top_reserved - (char *) g_last->top_committed; if (remaining_commit_size > 0) { /* Assert preconditions */ assert ((unsigned) g_last->top_committed % g_pagesize == 0); assert (0 < remaining_commit_size && remaining_commit_size % g_pagesize == 0); { /* Commit this */ void *base_committed = VirtualAlloc (g_last->top_committed, remaining_commit_size, MEM_COMMIT, PAGE_READWRITE); /* Check returned pointer for consistency */ if (base_committed != g_last->top_committed) goto sbrk_exit; /* Assert postconditions */ assert ((unsigned) base_committed % g_pagesize == 0); #ifdef TRACE printf ("Commit %p %d\n", base_committed, remaining_commit_size); #endif /* Adjust the regions commit top */ g_last->top_committed = (char *) base_committed + remaining_commit_size; } } { /* Now we are going to search and reserve. */ int contiguous = -1; int found = FALSE; MEMORY_BASIC_INFORMATION memory_info; void *base_reserved; INTERNAL_INTPTR_T reserve_size; do { /* Assume contiguous memory */ contiguous = TRUE; /* Round size to reserve */ reserve_size = CEIL (to_reserve, g_my_regionsize); /* Start with the current region's top */ memory_info.BaseAddress = g_last->top_reserved; /* Assert preconditions */ assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0); assert (0 < reserve_size && reserve_size % g_regionsize == 0); while (VirtualQuery (memory_info.BaseAddress, &memory_info, sizeof (memory_info))) { /* Assert postconditions */ assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0); #ifdef TRACE printf ("Query %p %d %s\n", memory_info.BaseAddress, memory_info.RegionSize, memory_info.State == MEM_FREE ? "FREE": (memory_info.State == MEM_RESERVE ? "RESERVED": (memory_info.State == MEM_COMMIT ? "COMMITTED": "?"))); #endif /* Region is free, well aligned and big enough: we are done */ if (memory_info.State == MEM_FREE && (unsigned) memory_info.BaseAddress % g_regionsize == 0 && memory_info.RegionSize >= (unsigned) reserve_size) { found = TRUE; break; } /* From now on we can't get contiguous memory! */ contiguous = FALSE; /* Recompute size to reserve */ reserve_size = CEIL (allocate_size, g_my_regionsize); memory_info.BaseAddress = (char *) memory_info.BaseAddress + memory_info.RegionSize; /* Assert preconditions */ assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0); assert (0 < reserve_size && reserve_size % g_regionsize == 0); } /* Search failed? */ if (! found) goto sbrk_exit; /* Assert preconditions */ assert ((unsigned) memory_info.BaseAddress % g_regionsize == 0); assert (0 < reserve_size && reserve_size % g_regionsize == 0); /* Try to reserve this */ base_reserved = VirtualAlloc (memory_info.BaseAddress, reserve_size, MEM_RESERVE, PAGE_NOACCESS); if (! base_reserved) { int rc = GetLastError (); if (rc != ERROR_INVALID_ADDRESS) goto sbrk_exit; } /* A null pointer signals (hopefully) a race condition with another thread. */ /* In this case, we try again. */ } while (! base_reserved); /* Check returned pointer for consistency */ if (memory_info.BaseAddress && base_reserved != memory_info.BaseAddress) goto sbrk_exit; /* Assert postconditions */ assert ((unsigned) base_reserved % g_regionsize == 0); #ifdef TRACE printf ("Reserve %p %d\n", base_reserved, reserve_size); #endif /* Did we get contiguous memory? */ if (contiguous) { INTERNAL_INTPTR_T start_size = (char *) g_last->top_committed - (char *) g_last->top_allocated; /* Adjust allocation size */ allocate_size -= start_size; /* Adjust the regions allocation top */ g_last->top_allocated = g_last->top_committed; /* Recompute the size to commit */ to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed; /* Round size to commit */ commit_size = CEIL (to_commit, g_my_pagesize); } /* Append the new region to the list */ if (! region_list_append (&g_last, base_reserved, reserve_size)) goto sbrk_exit; /* Didn't we get contiguous memory? */ if (! contiguous) { /* Recompute the size to commit */ to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed; /* Round size to commit */ commit_size = CEIL (to_commit, g_my_pagesize); } } } /* Assert preconditions */ assert ((unsigned) g_last->top_committed % g_pagesize == 0); assert (0 < commit_size && commit_size % g_pagesize == 0); { /* Commit this */ void *base_committed = VirtualAlloc (g_last->top_committed, commit_size, MEM_COMMIT, PAGE_READWRITE); /* Check returned pointer for consistency */ if (base_committed != g_last->top_committed) goto sbrk_exit; /* Assert postconditions */ assert ((unsigned) base_committed % g_pagesize == 0); #ifdef TRACE printf ("Commit %p %d\n", base_committed, commit_size); #endif /* Adjust the regions commit top */ g_last->top_committed = (char *) base_committed + commit_size; } } /* Adjust the regions allocation top */ g_last->top_allocated = (char *) g_last->top_allocated + allocate_size; result = (char *) g_last->top_allocated - size; /* Deallocation requested? */ } else if (size < 0) { INTERNAL_INTPTR_T deallocate_size = - size; /* As INTERNAL_INTPTR_T as we have a region to release */ while ((char *) g_last->top_allocated - deallocate_size < (char *) g_last->top_reserved - g_last->reserve_size) { /* Get the size to release */ INTERNAL_INTPTR_T release_size = g_last->reserve_size; /* Get the base address */ void *base_reserved = (char *) g_last->top_reserved - release_size; /* Assert preconditions */ assert ((unsigned) base_reserved % g_regionsize == 0); assert (0 < release_size && release_size % g_regionsize == 0); { /* Release this */ int rc = VirtualFree (base_reserved, 0, MEM_RELEASE); /* Check returned code for consistency */ if (! rc) goto sbrk_exit; #ifdef TRACE printf ("Release %p %d\n", base_reserved, release_size); #endif } /* Adjust deallocation size */ deallocate_size -= (char *) g_last->top_allocated - (char *) base_reserved; /* Remove the old region from the list */ if (! region_list_remove (&g_last)) goto sbrk_exit; } { /* Compute the size to decommit */ INTERNAL_INTPTR_T to_decommit = (char *) g_last->top_committed - ((char *) g_last->top_allocated - deallocate_size); if (to_decommit >= g_my_pagesize) { /* Compute the size to decommit */ INTERNAL_INTPTR_T decommit_size = FLOOR (to_decommit, g_my_pagesize); /* Compute the base address */ void *base_committed = (char *) g_last->top_committed - decommit_size; /* Assert preconditions */ assert ((unsigned) base_committed % g_pagesize == 0); assert (0 < decommit_size && decommit_size % g_pagesize == 0); { /* Decommit this */ int rc = VirtualFree ((char *) base_committed, decommit_size, MEM_DECOMMIT); /* Check returned code for consistency */ if (! rc) goto sbrk_exit; #ifdef TRACE printf ("Decommit %p %d\n", base_committed, decommit_size); #endif } /* Adjust deallocation size and regions commit and allocate top */ deallocate_size -= (char *) g_last->top_allocated - (char *) base_committed; g_last->top_committed = base_committed; g_last->top_allocated = base_committed; } } /* Adjust regions allocate top */ g_last->top_allocated = (char *) g_last->top_allocated - deallocate_size; /* Check for underflow */ if ((char *) g_last->top_reserved - g_last->reserve_size > (char *) g_last->top_allocated || g_last->top_allocated > g_last->top_committed) { /* Adjust regions allocate top */ g_last->top_allocated = (char *) g_last->top_reserved - g_last->reserve_size; goto sbrk_exit; } result = g_last->top_allocated; } /* Assert invariants */ assert (g_last); assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_allocated && g_last->top_allocated <= g_last->top_committed); assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_committed && g_last->top_committed <= g_last->top_reserved && (unsigned) g_last->top_committed % g_pagesize == 0); assert ((unsigned) g_last->top_reserved % g_regionsize == 0); assert ((unsigned) g_last->reserve_size % g_regionsize == 0); sbrk_exit: /* Release spin lock */ slrelease (&g_sl); return result; }