static void init_heap_space(size_t min_size, size_t max_size) { size_t pagesize, alloc_size, reserve_size, freesize_pre, freesize_post; unsigned int min_num_segments, max_num_segments, bitmap_bits; void *p; pagesize = GetPageSize(); if (SEGMENT_SIZE % pagesize != 0) sml_fatal(0, "SEGMENT_SIZE is not aligned in page size."); alloc_size = ALIGNSIZE(min_size, SEGMENT_SIZE); reserve_size = ALIGNSIZE(max_size, SEGMENT_SIZE); if (alloc_size < SEGMENT_SIZE) alloc_size = SEGMENT_SIZE; if (reserve_size < alloc_size) reserve_size = alloc_size; min_num_segments = alloc_size / SEGMENT_SIZE; max_num_segments = reserve_size / SEGMENT_SIZE; p = ReservePage(HEAP_BEGIN_ADDR, SEGMENT_SIZE + reserve_size); if (p == ReservePageError) sml_fatal(0, "failed to alloc virtual memory."); freesize_post = (uintptr_t)p & (SEGMENT_SIZE - 1); if (freesize_post == 0) { ReleasePage(p + reserve_size, SEGMENT_SIZE); } else { freesize_pre = SEGMENT_SIZE - freesize_post; ReleasePage(p, freesize_pre); p = (char*)p + freesize_pre; ReleasePage(p + reserve_size, freesize_post); } heap_space.begin = p; heap_space.end = (char*)p + reserve_size; heap_space.min_num_segments = min_num_segments; heap_space.max_num_segments = max_num_segments; heap_space.num_committed = 0; heap_space.extend_step = min_num_segments > 0 ? min_num_segments : 1; bitmap_bits = ALIGNSIZE(max_num_segments, BITPTR_WORDBITS); heap_space.bitmap = xmalloc(bitmap_bits / CHAR_BIT); memset(heap_space.bitmap, 0, bitmap_bits / CHAR_BIT); extend_heap(min_num_segments); }
static NOINLINE void * slow_alloc(size_t obj_size) { void *obj; GCSTAT_TRIGGER(obj_size); do_gc(); if (HEAP_REST(sml_heap_from_space) >= obj_size) { obj = sml_heap_from_space.free; sml_heap_from_space.free += obj_size; #ifdef GC_STAT sml_heap_alloced(obj_size); #endif /* GC_STAT */ } else { #ifdef GCSTAT stat_notice("---"); stat_notice("event: error"); stat_notice("heap exceeded: intented to allocate %lu bytes.", (unsigned long)obj_size); if (gcstat.file) fclose(gcstat.file); #endif /* GCSTAT */ sml_fatal(0, "heap exceeded: intended to allocate %lu bytes.", (unsigned long)obj_size); } GIANT_UNLOCK(); #ifndef FAIR_COMPARISON sml_run_finalizer(); #endif /* FAIR_COMPARISON */ return obj; }
void sml_obj_enum_ptr(void *obj, void (*trace)(void **, void *), void *data) { unsigned int i; unsigned int *bitmaps; /* DBG("%p: size=%lu, type=%08x", obj, (unsigned long)OBJ_SIZE(obj), (unsigned int)OBJ_TYPE(obj)); */ switch (OBJ_TYPE(obj)) { case OBJTYPE_UNBOXED_ARRAY: case OBJTYPE_UNBOXED_VECTOR: case OBJTYPE_INTINF: break; case OBJTYPE_BOXED_ARRAY: case OBJTYPE_BOXED_VECTOR: for (i = 0; i < OBJ_SIZE(obj) / sizeof(void*); i++) trace((void**)obj + i, data); break; case OBJTYPE_RECORD: bitmaps = OBJ_BITMAP(obj); for (i = 0; i < OBJ_SIZE(obj) / sizeof(void*); i++) { if (BITMAP_BIT(bitmaps, i) != TAG_UNBOXED) trace((void**)obj + i, data); } break; default: sml_fatal(0, "BUG: invalid object type : %d", OBJ_TYPE(obj)); } }
/* * ptr must be either NULL or the address of an object allocated in * the obstack. * If ptr is NULL, the whole of obstack is freed. * Otherwise, every object allocated in objstack since ptr is freed. */ void sml_obstack_free(sml_obstack_t **obstack, void *ptr) { struct sml_obstack *chunk, *next; chunk = *obstack; while (chunk) { if (chunk->start <= (char*)ptr && (char*)ptr <= chunk->free) { chunk->free = ptr; chunk->base = ptr; *obstack = chunk; return; } next = chunk->next; free(chunk); chunk = next; } if (ptr != NULL) sml_fatal(0, "BUG: obstack_free: invalid pointer: %p %p", (void*)*obstack, ptr); *obstack = NULL; }
void * sml_obj_dup(void *obj) { void **slot, *newobj; size_t obj_size; switch (OBJ_TYPE(obj)) { case OBJTYPE_UNBOXED_ARRAY: case OBJTYPE_BOXED_ARRAY: case OBJTYPE_UNBOXED_VECTOR: case OBJTYPE_BOXED_VECTOR: obj_size = OBJ_SIZE(obj); slot = sml_push_tmp_rootset(1); *slot = obj; newobj = sml_obj_alloc(OBJ_TYPE(obj), obj_size); memcpy(newobj, *slot, obj_size); sml_pop_tmp_rootset(slot); return newobj; case OBJTYPE_RECORD: obj_size = OBJ_SIZE(obj); slot = sml_push_tmp_rootset(1); *slot = obj; newobj = sml_record_alloc(obj_size); memcpy(newobj, *slot, obj_size + SIZEOF_BITMAP * OBJ_BITMAPS_LEN(obj_size)); sml_pop_tmp_rootset(slot); return newobj; default: sml_fatal(0, "BUG: invalid object type : %d", OBJ_TYPE(obj)); } }
int prim_fegetround() { #if !defined(HAVE_FEGETROUND) && !HAVE_DECL_FEGETROUND /* ToDo: stub */ sml_fatal(0, "fegetround is not implemented"); #else return fegetround(); #endif /* !HAVE_FEGETROUND */ }
void * xmalloc(size_t size) { void *page, *p; size_t allocsize; size_t pagesize = getpagesize(); allocsize = ALIGNSIZE(size + sizeof(size_t), pagesize) + pagesize; page = mmap(NULL, allocsize, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0); if (page == (void*)-1) sml_sysfatal("mmap"); mprotect(page + allocsize - pagesize, pagesize, 0); p = page + allocsize - pagesize - size; if (PAGEHEAD(p) != page) sml_fatal(0, "xmalloc"); *(size_t*)page = size; //sml_debug("xmalloc: %p (%p:%u)\n", p, page, (unsigned int)size); return p; }
float nextafter(float x, float y) { /* ToDo: stub */ sml_fatal(0, "nextafterf is not implemented"); }
double nextafter(double x, double y) { /* ToDo: stub */ sml_fatal(0, "nextafter is not implemented"); }
SML_PRIMITIVE int sml_obj_equal(void *obj1, void *obj2) { unsigned int i, tag; unsigned int *bitmap1, *bitmap2; void **p1, **p2; if (obj1 == obj2) return 1; if (obj1 == NULL || obj2 == NULL) return 0; if (OBJ_SIZE(obj1) != OBJ_SIZE(obj2)) return 0; if (OBJ_TYPE(obj1) != OBJ_TYPE(obj2)) { if (OBJ_TYPE(obj1) == OBJTYPE_RECORD) { void *tmp = obj1; obj1 = obj2, obj2 = tmp; } else if (OBJ_TYPE(obj2) != OBJTYPE_RECORD) return 0; if (OBJ_TYPE(obj1) == OBJTYPE_UNBOXED_VECTOR) tag = TAG_UNBOXED; else if (OBJ_TYPE(obj1) == OBJTYPE_BOXED_VECTOR) tag = TAG_BOXED; else return 0; assert(OBJ_SIZE(obj2) % sizeof(void*) == 0); bitmap2 = OBJ_BITMAP(obj2); for (i = 0; i < OBJ_SIZE(obj2) / sizeof(void*); i++) { if (BITMAP_BIT(bitmap2, i) != tag) return 0; } } switch (OBJ_TYPE(obj1)) { case OBJTYPE_UNBOXED_ARRAY: case OBJTYPE_BOXED_ARRAY: return 0; case OBJTYPE_UNBOXED_VECTOR: return memcmp(obj1, obj2, OBJ_SIZE(obj1)) == 0; case OBJTYPE_BOXED_VECTOR: p1 = obj1; p2 = obj2; assert(OBJ_SIZE(obj1) % sizeof(void*) == 0); for (i = 0; i < OBJ_SIZE(obj1) / sizeof(void*); i++) { if (!sml_obj_equal(p1[i], p2[i])) return 0; } return 1; case OBJTYPE_INTINF: return sml_intinf_cmp((sml_intinf_t*)obj1, (sml_intinf_t*)obj2) == 0; case OBJTYPE_RECORD: bitmap1 = OBJ_BITMAP(obj1); bitmap2 = OBJ_BITMAP(obj2); p1 = obj1; p2 = obj2; assert(OBJ_NUM_BITMAPS(obj1) == OBJ_NUM_BITMAPS(obj2)); assert(OBJ_SIZE(obj1) % sizeof(void*) == 0); for (i = 0; i < OBJ_NUM_BITMAPS(obj1); i++) { if (bitmap1[i] != bitmap2[i]) return 0; } for (i = 0; i < OBJ_SIZE(obj1) / sizeof(void*); i++) { if (BITMAP_BIT(bitmap1, i) == TAG_UNBOXED) { if (p1[i] != p2[i]) return 0; } else { if (!sml_obj_equal(p1[i], p2[i])) return 0; } } return 1; default: sml_fatal(0, "BUG: invalid object type : %d", OBJ_TYPE(obj1)); } }
void sml_heap_init(size_t size, size_t max_size ATTR_UNUSED) { #ifdef PRINT_ALLOC_TIME arranged = size; double st; getRusage(st); #endif /* PRINT_ALLOC_TIME */ void *stack_bottom; #ifdef GCSTAT { const char *env; env = getenv("SMLSHARP_GCSTAT_FILE"); if (env) { gcstat.file = fopen(env, "w"); if (gcstat.file == NULL) { perror("sml_heap_init"); abort(); } stat_notice = gcstat_print; } env = getenv("SMLSHARP_GCSTAT_VERBOSE"); if (env) gcstat.verbose = strtol(env, NULL, 10); else gcstat.verbose = GCSTAT_VERBOSE_MAX; env = getenv("SMLSHARP_GCSTAT_PROBE"); if (env) { gcstat.probe_threshold = strtol(env, NULL, 10); if (gcstat.probe_threshold == 0) gcstat.probe_threshold = size; } else { gcstat.probe_threshold = 2 * 1024 * 1024; } } #endif /* GCSTAT */ #ifdef GCTIME sml_timer_now(gcstat.exec_begin); #endif /* GCTIME */ major_heap.base = xmalloc(size); major_heap.size = size; heap_space_clear(); stack_bottom = make_bitmap_information(size); if((char *)marking_stack_init(stack_bottom) >= (char *)major_heap.limit) sml_fatal(0,"heap size over"); DBG(("heap space init %p %p %u",major_heap.base,major_heap.limit,major_heap.size)); #ifdef PRINT_ALLOC_TIME double en; getRusage(en); init_time = (en - st); fp_at = stderr; if(fp_at == NULL) sml_fatal(0,"can not open print alloc file"); print_info_init(); #endif /* PRINT_ALLOC_TIME */ #ifdef GCSTAT { unsigned int i; stat_notice("---"); stat_notice("event: init"); stat_notice("time: 0.0"); stat_notice("heap_size: %lu", (unsigned long)size); stat_notice("config:"); for (i = 0; i < THE_NUMBER_OF_FIXED_BLOCK; i++) stat_notice(" %lu: {size: %lu, num_slots: %lu, bitmap_size: %lu}", (unsigned long)bitmap_info[i].block_size_bytes, (unsigned long)heap_layout[i].total_size, (unsigned long)heap_layout[i].block_counts, (unsigned long)heap_layout[i].bitmap_and_tree_size); stat_notice("stack_size: %lu", (unsigned long)marking_stack.size); stat_notice("counters:"); stat_notice(" heap: [fast, find, next, new]"); stat_notice(" other: [malloc]"); print_heap_occupancy(); } #endif /* GCSTAT */ }
void * sml_heap_slow_alloc(size_t alloc_size) { void *obj; #ifdef PRINT_ALLOC_TIME int i; for(i=0; i<THE_NUMBER_OF_FIXED_BLOCK; i++) { if(print_info[i].block_size >= alloc_size) { print_info[i].count_gc++; break; } } #ifdef GC_TIME tmp_mark = count_call_mark - count_not_mark - count_outside; #endif /* GC_TIME */ double st; getRusage(st); #endif /* PRINT_ALLOC_TIME */ #ifdef GCSTAT { struct bitmap_info_space *b_info = MAPPING_HEAP_ALLOC(alloc_size); gcstat.last.trigger = b_info->block_size_bytes; } #endif /* GCSTAT */ sml_heap_gc(); #ifdef PRINT_ALLOC_TIME double en; getRusage(en); all_time_gc += (en - st); #ifdef GC_TIME fprintf(fp_at,"gc %f mark %u live %u alloc %u invoke_size %u\n", (en - st), (count_call_mark - count_not_mark - count_outside)-tmp_mark, live_tmp,count_alloc - tmp_alloc, alloc_size); tmp_alloc = count_alloc; #endif /* GC_TIME */ #endif /* PRINT_ALLOC_TIME */ #ifndef UPPER obj = heap_alloc(alloc_size); #else /* UPPER */ obj = heap_alloc_with_upper(alloc_size); #endif /* UPPER */ if (obj == NULL) { DBG(("alloc failed")); #ifdef GCSTAT stat_notice("---"); stat_notice("event: error"); stat_notice("heap exceeded: intented to allocate %lu bytes.", (unsigned long)alloc_size); if (gcstat.file) fclose(gcstat.file); #endif /* GCSTAT */ sml_fatal(0, "heap exceeded: intended to allocate %"PRIuMAX" bytes", (intmax_t)alloc_size); } return obj; }
SML_PRIMITIVE void * sml_alloc(unsigned int objsize, void *frame_pointer) { size_t alloc_size; unsigned int blocksize_log2; struct alloc_ptr *ptr; void *obj; /* ensure that alloc_size is at least BLOCKSIZE_MIN. */ alloc_size = ALIGNSIZE(OBJ_HEADER_SIZE + objsize, BLOCKSIZE_MIN); if (alloc_size > BLOCKSIZE_MAX) { GCSTAT_ALLOC_COUNT(malloc, 0, alloc_size); sml_save_frame_pointer(frame_pointer); return sml_obj_malloc(alloc_size); } blocksize_log2 = CEIL_LOG2(alloc_size); ASSERT(BLOCKSIZE_MIN_LOG2 <= blocksize_log2 && blocksize_log2 <= BLOCKSIZE_MAX_LOG2); ptr = &ALLOC_PTR_SET()->alloc_ptr[blocksize_log2]; if (!BITPTR_TEST(ptr->freebit)) { GCSTAT_ALLOC_COUNT(fast, blocksize_log2, alloc_size); BITPTR_INC(ptr->freebit); obj = ptr->free; ptr->free += ptr->blocksize_bytes; goto alloced; } sml_save_frame_pointer(frame_pointer); if (ptr->free != NULL) { obj = find_bitmap(ptr); if (obj) goto alloced; } obj = find_segment(ptr); if (obj) goto alloced; GCSTAT_TRIGGER(blocksize_log2); do_gc(MAJOR); obj = find_segment(ptr); if (obj) goto alloced_major; extend_heap(heap_space.extend_step); obj = find_segment(ptr); if (obj) goto alloced_major; sml_fatal(0, "heap exceeded: intended to allocate %u bytes.", ptr->blocksize_bytes); alloced_major: ASSERT(check_newobj(obj)); /* NOTE: sml_run_finalizer may cause garbage collection. */ obj = sml_run_finalizer(obj); goto finished; alloced: ASSERT(check_newobj(obj)); finished: OBJ_HEADER(obj) = 0; return obj; }