static void sweep_malloc_ptrs(void) { jl_mallocptr_t *mp = malloc_ptrs; jl_mallocptr_t **pmp = &malloc_ptrs; while (mp != NULL) { jl_mallocptr_t *nxt = (jl_mallocptr_t*)((uptrint_t)mp->next & ~1UL); if (((gcval_t*)mp)->marked) { pmp = &mp->next; ((gcval_t*)mp)->marked = 0; } else { *pmp = nxt; if (mp->ptr) { freed_bytes += mp->sz; #if defined(_OS_WINDOWS_) && !defined(_CPU_X86_64_) if (mp->isaligned) { free_a16(mp->ptr); } else { free(mp->ptr); } #else free_a16(mp->ptr); #endif } mp->next = malloc_ptrs_freelist; malloc_ptrs_freelist = mp; } mp = nxt; } }
void *jl_gc_managed_realloc(void *d, size_t sz, size_t oldsz, int isaligned) { if (allocd_bytes > collect_interval) jl_gc_collect(); sz = (sz+15) & -16; void *b; #ifdef _P64 b = realloc(d, sz); #elif defined(_OS_WINDOWS_) if (isaligned) b = _aligned_realloc(d, sz, 16); else b = realloc(d, sz); #elif defined(__APPLE__) b = realloc(d, sz); #else // TODO better aligned realloc here b = malloc_a16(sz); if (b != NULL) { memcpy(b, d, oldsz); if (isaligned) free_a16(d); else free(d); } #endif if (b == NULL) jl_throw(jl_memory_exception); allocd_bytes += sz; return b; }
static void sweep_pool(pool_t *p) { //int empty; int freedall; gcval_t **prev_pfl; gcval_t *v; gcpage_t *pg = p->pages; gcpage_t **ppg = &p->pages; gcval_t **pfl = &p->freelist; size_t osize = p->osize; size_t nfreed = 0; size_t nfree = 0; gcval_t *old_fl = p->freelist; while (pg != NULL) { v = (gcval_t*)&pg->data[0]; char *lim = (char*)v + GC_PAGE_SZ - osize; //empty = 1; freedall = 1; prev_pfl = pfl; while ((char*)v <= lim) { if (old_fl != NULL) { // keep track of difference between new and old freelist // in order to count newly-freed objects nfree++; old_fl = old_fl->next; } if (!v->marked) { *pfl = v; pfl = &v->next; nfreed++; } else { v->marked = 0; freedall = 0; } v = (gcval_t*)((char*)v + osize); } gcpage_t *nextpg = pg->next; // lazy version: (empty) if the whole page was already unused, free it // eager version: (freedall) free page as soon as possible // the eager one uses less memory. if (freedall) { pfl = prev_pfl; *ppg = nextpg; #ifdef MEMDEBUG memset(pg, 0xbb, sizeof(gcpage_t)); #endif free_a16(pg); //freed_bytes += GC_PAGE_SZ; } else { ppg = &pg->next; } pg = nextpg; } *pfl = NULL; freed_bytes += (nfreed-nfree)*osize; }
void jl_gc_free_array(jl_array_t *a) { if (a->how == 2) { char *d = (char*)a->data - a->offset*a->elsize; if (a->isaligned) free_a16(d); else free(d); freed_bytes += array_nbytes(a); } }
static void sweep_big(void) { bigval_t *v = big_objects; bigval_t **pv = &big_objects; while (v != NULL) { bigval_t *nxt = v->next; if (v->marked) { pv = &v->next; v->marked = 0; } else { *pv = nxt; freed_bytes += v->sz; #ifdef MEMDEBUG memset(v, 0xbb, v->sz+BVOFFS*sizeof(void*)); #endif free_a16(v); } v = nxt; } }
static void sweep_pool(pool_t *p) { #ifdef FREE_PAGES_EAGER int freedall; #else int empty; #endif gcval_t **prev_pfl; gcval_t *v; gcpage_t *pg = p->pages; gcpage_t **ppg = &p->pages; gcval_t **pfl = &p->freelist; size_t osize = p->osize; size_t nfreed = 0; size_t old_nfree = 0; gcval_t *ofl = p->freelist; while (ofl != NULL) { old_nfree++; ofl = ofl->next; } while (pg != NULL) { v = (gcval_t*)&pg->data[0]; char *lim = (char*)v + GC_PAGE_SZ - osize; #ifdef FREE_PAGES_EAGER freedall = 1; #else empty = 1; #endif prev_pfl = pfl; while ((char*)v <= lim) { if (!v->marked) { #ifndef FREE_PAGES_EAGER // check that all but last object points to its next object, // which is a heuristic check for being on the freelist. if ((char*)v->next != (char*)v + osize && v->next != NULL && (char*)v+osize <= lim) empty = 0; #endif *pfl = v; pfl = &v->next; nfreed++; } else { v->marked = 0; #ifdef FREE_PAGES_EAGER freedall = 0; #else empty = 0; #endif } v = (gcval_t*)((char*)v + osize); } gcpage_t *nextpg = pg->next; // lazy version: (empty) if the whole page was already unused, free it // eager version: (freedall) free page as soon as possible // the eager one uses less memory. if ( #ifdef FREE_PAGES_EAGER freedall #else empty #endif ) { pfl = prev_pfl; *ppg = nextpg; #ifdef MEMDEBUG memset(pg, 0xbb, sizeof(gcpage_t)); #endif #ifdef USE_MMAP munmap(pg, sizeof(gcpage_t)); #else free_a16(pg); #endif //freed_bytes += GC_PAGE_SZ; } else { ppg = &pg->next; } pg = nextpg; } *pfl = NULL; freed_bytes += (nfreed - old_nfree)*osize; }