STATIC void gc_sweep(void) { #if MICROPY_PY_GC_COLLECT_RETVAL MP_STATE_MEM(gc_collected) = 0; #endif // free unmarked heads and their tails int free_tail = 0; for (size_t block = 0; block < MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB; block++) { switch (ATB_GET_KIND(block)) { case AT_HEAD: #if MICROPY_ENABLE_FINALISER if (FTB_GET(block)) { mp_obj_base_t *obj = (mp_obj_base_t*)PTR_FROM_BLOCK(block); if (obj->type != NULL) { // if the object has a type then see if it has a __del__ method mp_obj_t dest[2]; mp_load_method_maybe(MP_OBJ_FROM_PTR(obj), MP_QSTR___del__, dest); if (dest[0] != MP_OBJ_NULL) { // load_method returned a method, execute it in a protected environment #if MICROPY_ENABLE_SCHEDULER mp_sched_lock(); #endif mp_call_function_1_protected(dest[0], dest[1]); #if MICROPY_ENABLE_SCHEDULER mp_sched_unlock(); #endif } } // clear finaliser flag FTB_CLEAR(block); } #endif free_tail = 1; DEBUG_printf("gc_sweep(%p)\n", PTR_FROM_BLOCK(block)); #if MICROPY_PY_GC_COLLECT_RETVAL MP_STATE_MEM(gc_collected)++; #endif // fall through to free the head case AT_TAIL: if (free_tail) { ATB_ANY_TO_FREE(block); #if CLEAR_ON_SWEEP memset((void*)PTR_FROM_BLOCK(block), 0, BYTES_PER_BLOCK); #endif } break; case AT_MARK: ATB_MARK_TO_HEAD(block); free_tail = 0; break; } } }
STATIC void gc_sweep(void) { mp_uint_t block; #if MICROPY_PY_GC_COLLECT_RETVAL gc_collected = 0; #endif // free unmarked heads and their tails int free_tail = 0; for (block = 0; block < gc_alloc_table_byte_len * BLOCKS_PER_ATB; block++) { switch (ATB_GET_KIND(block)) { case AT_HEAD: #if MICROPY_ENABLE_FINALISER if (FTB_GET(block)) { mp_obj_t obj = (mp_obj_t)PTR_FROM_BLOCK(block); if (((mp_obj_base_t*)obj)->type != MP_OBJ_NULL) { // if the object has a type then see if it has a __del__ method mp_obj_t dest[2]; mp_load_method_maybe(obj, MP_QSTR___del__, dest); if (dest[0] != MP_OBJ_NULL) { // load_method returned a method mp_call_method_n_kw(0, 0, dest); } } // clear finaliser flag FTB_CLEAR(block); } #endif free_tail = 1; #if MICROPY_PY_GC_COLLECT_RETVAL gc_collected++; #endif // fall through to free the head // no break - disable eclipse static analyzer warning - intentional fall through case AT_TAIL: if (free_tail) { DEBUG_printf("gc_sweep(%p)\n",PTR_FROM_BLOCK(block)); ATB_ANY_TO_FREE(block); } break; case AT_MARK: ATB_MARK_TO_HEAD(block); free_tail = 0; break; } } }
STATIC void gc_sweep(void) { // free unmarked heads and their tails int free_tail = 0; for (machine_uint_t block = 0; block < gc_alloc_table_byte_len * BLOCKS_PER_ATB; block++) { switch (ATB_GET_KIND(block)) { case AT_HEAD: #if MICROPY_ENABLE_FINALISER if (FTB_GET(block)) { mp_obj_t obj = (mp_obj_t)PTR_FROM_BLOCK(block); if (((mp_obj_base_t*)obj)->type != MP_OBJ_NULL) { // if the object has a type then see if it has a __del__ method mp_obj_t dest[2]; mp_load_method_maybe(obj, MP_QSTR___del__, dest); if (dest[0] != MP_OBJ_NULL) { // load_method returned a method mp_call_method_n_kw(0, 0, dest); } } // clear finaliser flag FTB_CLEAR(block); } #endif free_tail = 1; // fall through to free the head case AT_TAIL: if (free_tail) { ATB_ANY_TO_FREE(block); } break; case AT_MARK: ATB_MARK_TO_HEAD(block); free_tail = 0; break; } } }
// old, simple realloc that didn't expand memory in place void *gc_realloc(void *ptr, machine_uint_t n_bytes) { machine_uint_t n_existing = gc_nbytes(ptr); if (n_bytes <= n_existing) { return ptr; } else { bool has_finaliser; if (ptr == NULL) { has_finaliser = false; } else { #if MICROPY_ENABLE_FINALISER has_finaliser = FTB_GET(BLOCK_FROM_PTR((machine_uint_t)ptr)); #else has_finaliser = false; #endif } void *ptr2 = gc_alloc(n_bytes, has_finaliser); if (ptr2 == NULL) { return ptr2; } memcpy(ptr2, ptr, n_existing); gc_free(ptr); return ptr2; } }
void *gc_realloc(void *ptr_in, machine_uint_t n_bytes) { if (gc_lock_depth > 0) { return NULL; } // check for pure allocation if (ptr_in == NULL) { return gc_alloc(n_bytes, false); } machine_uint_t ptr = (machine_uint_t)ptr_in; // sanity check the ptr if (!VERIFY_PTR(ptr)) { return NULL; } // get first block machine_uint_t block = BLOCK_FROM_PTR(ptr); // sanity check the ptr is pointing to the head of a block if (ATB_GET_KIND(block) != AT_HEAD) { return NULL; } // compute number of new blocks that are requested machine_uint_t new_blocks = (n_bytes + BYTES_PER_BLOCK - 1) / BYTES_PER_BLOCK; // get the number of consecutive tail blocks and // the number of free blocks after last tail block // stop if we reach (or are at) end of heap machine_uint_t n_free = 0; machine_uint_t n_blocks = 1; // counting HEAD block machine_uint_t max_block = gc_alloc_table_byte_len * BLOCKS_PER_ATB; while (block + n_blocks + n_free < max_block) { if (n_blocks + n_free >= new_blocks) { // stop as soon as we find enough blocks for n_bytes break; } byte block_type = ATB_GET_KIND(block + n_blocks + n_free); switch (block_type) { case AT_FREE: n_free++; continue; case AT_TAIL: n_blocks++; continue; case AT_MARK: assert(0); } break; } // return original ptr if it already has the requested number of blocks if (new_blocks == n_blocks) { return ptr_in; } // check if we can shrink the allocated area if (new_blocks < n_blocks) { // free unneeded tail blocks for (machine_uint_t bl = block + new_blocks; ATB_GET_KIND(bl) == AT_TAIL; bl++) { ATB_ANY_TO_FREE(bl); } return ptr_in; } // check if we can expand in place if (new_blocks <= n_blocks + n_free) { // mark few more blocks as used tail for (machine_uint_t bl = block + n_blocks; bl < block + new_blocks; bl++) { assert(ATB_GET_KIND(bl) == AT_FREE); ATB_FREE_TO_TAIL(bl); } // zero out the additional bytes of the newly allocated blocks (see comment above in gc_alloc) memset((byte*)ptr_in + n_bytes, 0, new_blocks * BYTES_PER_BLOCK - n_bytes); return ptr_in; } // can't resize inplace; try to find a new contiguous chain void *ptr_out = gc_alloc(n_bytes, #if MICROPY_ENABLE_FINALISER FTB_GET(block) #else false #endif ); // check that the alloc succeeded if (ptr_out == NULL) { return NULL; } DEBUG_printf("gc_realloc(%p -> %p)\n", ptr_in, ptr_out); memcpy(ptr_out, ptr_in, n_blocks * BYTES_PER_BLOCK); gc_free(ptr_in); return ptr_out; }
void *gc_realloc(void *ptr_in, size_t n_bytes, bool allow_move) { if (MP_STATE_MEM(gc_lock_depth) > 0) { return NULL; } // check for pure allocation if (ptr_in == NULL) { return gc_alloc(n_bytes, false); } // check for pure free if (n_bytes == 0) { gc_free(ptr_in); return NULL; } void *ptr = ptr_in; // sanity check the ptr if (!VERIFY_PTR(ptr)) { return NULL; } // get first block size_t block = BLOCK_FROM_PTR(ptr); // sanity check the ptr is pointing to the head of a block if (ATB_GET_KIND(block) != AT_HEAD) { return NULL; } // compute number of new blocks that are requested size_t new_blocks = (n_bytes + BYTES_PER_BLOCK - 1) / BYTES_PER_BLOCK; // Get the total number of consecutive blocks that are already allocated to // this chunk of memory, and then count the number of free blocks following // it. Stop if we reach the end of the heap, or if we find enough extra // free blocks to satisfy the realloc. Note that we need to compute the // total size of the existing memory chunk so we can correctly and // efficiently shrink it (see below for shrinking code). size_t n_free = 0; size_t n_blocks = 1; // counting HEAD block size_t max_block = MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB; for (size_t bl = block + n_blocks; bl < max_block; bl++) { byte block_type = ATB_GET_KIND(bl); if (block_type == AT_TAIL) { n_blocks++; continue; } if (block_type == AT_FREE) { n_free++; if (n_blocks + n_free >= new_blocks) { // stop as soon as we find enough blocks for n_bytes break; } continue; } break; } // return original ptr if it already has the requested number of blocks if (new_blocks == n_blocks) { return ptr_in; } // check if we can shrink the allocated area if (new_blocks < n_blocks) { // free unneeded tail blocks for (size_t bl = block + new_blocks, count = n_blocks - new_blocks; count > 0; bl++, count--) { ATB_ANY_TO_FREE(bl); } // set the last_free pointer to end of this block if it's earlier in the heap if ((block + new_blocks) / BLOCKS_PER_ATB < MP_STATE_MEM(gc_last_free_atb_index)) { MP_STATE_MEM(gc_last_free_atb_index) = (block + new_blocks) / BLOCKS_PER_ATB; } #if EXTENSIVE_HEAP_PROFILING gc_dump_alloc_table(); #endif return ptr_in; } // check if we can expand in place if (new_blocks <= n_blocks + n_free) { // mark few more blocks as used tail for (size_t bl = block + n_blocks; bl < block + new_blocks; bl++) { assert(ATB_GET_KIND(bl) == AT_FREE); ATB_FREE_TO_TAIL(bl); } // zero out the bytes of the newly allocated blocks (see comment above in gc_alloc) memset((byte*)ptr_in + n_blocks * BYTES_PER_BLOCK, 0, (new_blocks - n_blocks) * BYTES_PER_BLOCK); #if EXTENSIVE_HEAP_PROFILING gc_dump_alloc_table(); #endif return ptr_in; } if (!allow_move) { // not allowed to move memory block so return failure return NULL; } // can't resize inplace; try to find a new contiguous chain void *ptr_out = gc_alloc(n_bytes, #if MICROPY_ENABLE_FINALISER FTB_GET(block) #else false #endif ); // check that the alloc succeeded if (ptr_out == NULL) { return NULL; } DEBUG_printf("gc_realloc(%p -> %p)\n", ptr_in, ptr_out); memcpy(ptr_out, ptr_in, n_blocks * BYTES_PER_BLOCK); gc_free(ptr_in); return ptr_out; }