gboolean sgen_need_major_collection (mword space_needed) { size_t heap_size; if (sgen_concurrent_collection_in_progress ()) { heap_size = get_heap_size (); if (heap_size <= major_collection_trigger_size) return FALSE; /* * The more the heap grows, the more we need to decrease the allowance above, * in order to have similar trigger sizes as the synchronous collector. * If the heap grows so much that we would need to have a negative allowance, * we force the finishing of the collection, to avoid increased memory usage. */ if ((heap_size - major_start_heap_size) > major_start_heap_size * SGEN_DEFAULT_ALLOWANCE_HEAP_SIZE_RATIO) return TRUE; return FALSE; } /* FIXME: This is a cop-out. We should have some way of figuring this out. */ if (!major_collector.have_swept ()) return FALSE; if (space_needed > sgen_memgov_available_free_space ()) return TRUE; sgen_memgov_calculate_minor_collection_allowance (); heap_size = get_heap_size (); return heap_size > major_collection_trigger_size; }
/* ** It should free the last block of the heap partially or completely ** according to the entire size of the heap. If the heap size is a pagesize multiple, ** free all the block. Else, free a bit of the block so that new heap size is a pagesize multiple. */ void process_free(t_block **heap, size_t pagesize) { t_block *block; size_t size; size_t s; block = (*heap)->prev; size = block->size + sizeof(t_block); if (block == *heap) *heap = NULL; else { s = get_heap_size(*heap); if (s % pagesize) { block->size = (pagesize - (s + pagesize) % pagesize) - sizeof(t_block); size -= (block->size + sizeof(t_block)); block = (void *)block + sizeof(t_block) + block->size; } else { block->prev->next = NULL; (*heap)->prev = block->prev; } } clear_block((char *)block, size); sbrk(-size); }
void sgen_memgov_major_collection_start (void) { need_calculate_minor_collection_allowance = TRUE; major_start_heap_size = get_heap_size (); if (debug_print_allowance) { SGEN_LOG (0, "Starting collection with heap size %ld bytes", (long)major_start_heap_size); } }
void sgen_memgov_major_pre_sweep (void) { if (sgen_concurrent_collection_in_progress ()) { major_pre_sweep_heap_size = get_heap_size (); } else { /* We decrease the allowance only in the concurrent case */ major_pre_sweep_heap_size = major_start_heap_size; } }
static void *constrained_mmap(void *addr, size_t size, int prot , int flags, int fd, off_t offset) { int heap_increase_allowed = !g_constrained_allocation || (get_heap_limit() >= (get_heap_size() + size)); if ( heap_increase_allowed ) { #ifdef SHOW_MEM_INFO if ( g_show_mem_info ) { mstate av = get_malloc_state(); int new_total = av->mmapped_mem + av->sbrked_mem + size; if ( av->max_total_mem < new_total ) show_mem_info(new_total); } #endif return mmap(addr, size, prot, flags, fd, offset); } return (void*) MORECORE_FAILURE; }
/** * Prints memory info */ void print_meminfo() { printk("Total mem: %d MB\nFree mem: %d MB\n", get_mem_size() / 1024, (get_max_blocks() - get_used_blocks()) * 4 / 1024); printk("Heap size: %d KB Free heap: %d KB\n", get_heap_size() / 1024, (get_heap_size() - get_used_heap()) / 1024); printk("cr0: %x cr2: %x cr3: %x\n", get_cr0(), get_cr2(), get_pdbr()); }
static void *constrained_sbrk(intptr_t increment) { /* increment == 0 is used to find the current break, so we must always let * it through, and it should ofcourse always be possible to decrease the * heap size... */ int heap_increase_allowed = !g_constrained_allocation || increment <= 0 || (get_heap_limit() >= (get_heap_size() + increment)); if ( heap_increase_allowed ) { #ifdef SHOW_MEM_INFO if ( g_show_mem_info ) { mstate av = get_malloc_state(); int new_total = av->mmapped_mem + av->sbrked_mem + increment; if ( av->max_total_mem < new_total ) show_mem_info(new_total); } #endif return sbrk(increment); } return (void*) MORECORE_FAILURE; }