// Expand or shrink a chunk returned by allocate_chunk(). // The chunk is never moved. // // Returns 0 if fail to expand (shrink will always succeed). Returns old // size if successful. size_t OsMemory_adjust_chunk(address chunk_ptr, size_t new_committed_size) { ChunkInfo* ci = get_chunk_info(chunk_ptr); size_t old_size = ci->size; size_t new_size = page_align_up(new_committed_size); if (new_size <= ci->mmaped_size) { int rv; if (new_size < old_size) { rv = protect_area(chunk_ptr + new_size, old_size - new_size); } else { rv = unprotect_area(chunk_ptr, new_size); } GUARANTEE(rv == 0, "mprotect must succeed"); ci->size = new_size; return old_size; } new_size = page_align_up(new_size - ci->mmaped_size); if (anon_mmap(chunk_ptr + ci->mmaped_size, new_size) == NULL) { return 0; } ci->mmaped_size += new_size; ci->size = ci->mmaped_size; unprotect_area(chunk_ptr, ci->size); return old_size; }
address OsMemory_allocate_chunk(size_t initial_size, size_t max_size, size_t alignment) { // make it page aligned max_size = page_align_up(max_size); address chunk = anon_mmap(NULL, max_size); if (chunk == MAP_FAILED) { return NULL; } GUARANTEE((juint)chunk % alignment == 0, "must be aligned"); GUARANTEE((juint)chunk % SysPageSize == 0, "must be page aligned"); size_t aligned_size = page_align_up(initial_size); alloc_chunk(chunk, aligned_size, max_size); if (max_size > aligned_size) { protect_area(chunk + aligned_size, max_size - aligned_size); } return chunk; }
protected_area_ptr new_protected_area(BytePtr start, BytePtr end, lisp_protection_kind reason, natural protsize, Boolean now) { protected_area_ptr p = malloc(sizeof(protected_area)); if (p == NULL) return NULL; p->protsize = protsize; p->nprot = 0; p->start = start; p->end = end; p->why = reason; p->next = AllProtectedAreas; AllProtectedAreas = p; if (now) { protect_area(p); } return p; }