/* * Frees the first numPtrs objects in the ptrs list and returns the * amount of reclaimed storage. The list must contain addresses all in * the same mspace, and must be in increasing order. This implies that * there are no duplicates, and no entries are NULL. */ size_t dvmHeapSourceFreeList(size_t numPtrs, void **ptrs) { HS_BOILERPLATE(); if (numPtrs == 0) { return 0; } assert(ptrs != NULL); assert(*ptrs != NULL); Heap* heap = ptr2heap(gHs, *ptrs); size_t numBytes = 0; if (heap != NULL) { mspace msp = heap->msp; // Calling mspace_free on shared heaps disrupts sharing too // much. For heap[0] -- the 'active heap' -- we call // mspace_free, but on the other heaps we only do some // accounting. if (heap == gHs->heaps) { // Count freed objects. for (size_t i = 0; i < numPtrs; i++) { assert(ptrs[i] != NULL); assert(ptr2heap(gHs, ptrs[i]) == heap); countFree(heap, ptrs[i], &numBytes); } // Bulk free ptrs. mspace_bulk_free(msp, ptrs, numPtrs); } else { // This is not an 'active heap'. Only do the accounting. for (size_t i = 0; i < numPtrs; i++) { assert(ptrs[i] != NULL); assert(ptr2heap(gHs, ptrs[i]) == heap); countFree(heap, ptrs[i], &numBytes); } } } return numBytes; }
mm_shared_space_bulk_free(struct mm_shared_space *space, void **ptrs, size_t nptrs) { mm_common_lock(&space->lock); mspace_bulk_free(space->space.opaque, ptrs, nptrs); mm_common_unlock(&space->lock); }
mm_private_space_bulk_free(struct mm_private_space *space, void **ptrs, size_t nptrs) { mspace_bulk_free(space->space.opaque, ptrs, nptrs); }