/* * palloc_first -- returns the first object from the heap. */ uint64_t palloc_first(struct palloc_heap *heap) { struct memory_block search = MEMORY_BLOCK_NONE; heap_foreach_object(heap, pmalloc_search_cb, &search, MEMORY_BLOCK_NONE); if (MEMORY_BLOCK_IS_NONE(search)) return 0; void *uptr = search.m_ops->get_user_data(&search); return HEAP_PTR_TO_OFF(heap, uptr); }
/* * palloc_next -- returns the next object relative to 'off'. */ uint64_t palloc_next(struct palloc_heap *heap, uint64_t off) { struct memory_block m = memblock_from_offset(heap, off); struct memory_block search = m; heap_foreach_object(heap, pmalloc_search_cb, &search, m); if (MEMORY_BLOCK_IS_NONE(search) || MEMORY_BLOCK_EQUALS(search, m)) return 0; void *uptr = search.m_ops->get_user_data(&search); return HEAP_PTR_TO_OFF(heap, uptr); }
/* * memblock_from_offset -- resolves a memory block data from an offset that * originates from the heap */ struct memory_block memblock_from_offset_opt(struct palloc_heap *heap, uint64_t off, int size) { struct memory_block m = MEMORY_BLOCK_NONE; m.heap = heap; off -= HEAP_PTR_TO_OFF(heap, &heap->layout->zone0); m.zone_id = (uint32_t)(off / ZONE_MAX_SIZE); off -= (ZONE_MAX_SIZE * m.zone_id) + sizeof(struct zone); m.chunk_id = (uint32_t)(off / CHUNKSIZE); struct chunk_header *hdr = &ZID_TO_ZONE(heap->layout, m.zone_id) ->chunk_headers[m.chunk_id]; if (hdr->type == CHUNK_TYPE_RUN_DATA) m.chunk_id -= hdr->size_idx; off -= CHUNKSIZE * m.chunk_id; m.header_type = memblock_header_type(&m); off -= header_type_to_size[m.header_type]; m.type = off != 0 ? MEMORY_BLOCK_RUN : MEMORY_BLOCK_HUGE; #ifdef DEBUG enum memory_block_type t = memblock_detect_type(&m, heap->layout); ASSERTeq(t, m.type); #endif m.m_ops = &mb_ops[m.type]; uint64_t unit_size = m.m_ops->block_size(&m); if (off != 0) { /* run */ off -= RUN_METASIZE; m.block_off = (uint16_t)(off / unit_size); off -= m.block_off * unit_size; } m.size_idx = !size ? 0 : CALC_SIZE_IDX(unit_size, memblock_header_ops[m.header_type].get_size(&m)); ASSERTeq(off, 0); return m; }
/* * memblock_from_offset -- resolves a memory block data from an offset that * originates from the heap */ struct memory_block memblock_from_offset_opt(struct palloc_heap *heap, uint64_t off, int size) { struct memory_block m = MEMORY_BLOCK_NONE; m.heap = heap; off -= HEAP_PTR_TO_OFF(heap, &heap->layout->zone0); m.zone_id = (uint32_t)(off / ZONE_MAX_SIZE); off -= (ZONE_MAX_SIZE * m.zone_id) + sizeof(struct zone); m.chunk_id = (uint32_t)(off / CHUNKSIZE); struct chunk_header *hdr = heap_get_chunk_hdr(heap, &m); if (hdr->type == CHUNK_TYPE_RUN_DATA) m.chunk_id -= hdr->size_idx; off -= CHUNKSIZE * m.chunk_id; m.header_type = memblock_header_type(&m); off -= header_type_to_size[m.header_type]; m.type = off != 0 ? MEMORY_BLOCK_RUN : MEMORY_BLOCK_HUGE; ASSERTeq(memblock_detect_type(heap, &m), m.type); m.m_ops = &mb_ops[m.type]; uint64_t unit_size = m.m_ops->block_size(&m); if (off != 0) { /* run */ struct chunk_run *run = heap_get_chunk_run(heap, &m); off -= run_get_alignment_padding(hdr, run, m.header_type); off -= RUN_METASIZE; m.block_off = (uint16_t)(off / unit_size); off -= m.block_off * unit_size; } m.size_idx = !size ? 0 : CALC_SIZE_IDX(unit_size, memblock_header_ops[m.header_type].get_size(&m)); ASSERTeq(off, 0); return m; }
/* * alloc_prep_block -- (internal) prepares a memory block for allocation * * Once the block is fully reserved and it's guaranteed that no one else will * be able to write to this memory region it is safe to write the allocation * header and call the object construction function. * * Because the memory block at this stage is only reserved in transient state * there's no need to worry about fail-safety of this method because in case * of a crash the memory will be back in the free blocks collection. */ static int alloc_prep_block(struct palloc_heap *heap, const struct memory_block *m, palloc_constr constructor, void *arg, uint64_t extra_field, uint16_t object_flags, uint64_t *offset_value) { void *uptr = m->m_ops->get_user_data(m); size_t usize = m->m_ops->get_user_size(m); VALGRIND_DO_MEMPOOL_ALLOC(heap->layout, uptr, usize); VALGRIND_DO_MAKE_MEM_UNDEFINED(uptr, usize); VALGRIND_ANNOTATE_NEW_MEMORY(uptr, usize); int ret; if (constructor != NULL && (ret = constructor(heap->base, uptr, usize, arg)) != 0) { /* * If canceled, revert the block back to the free state in vg * machinery. */ VALGRIND_DO_MEMPOOL_FREE(heap->layout, uptr); return ret; } m->m_ops->write_header(m, extra_field, object_flags); /* * To avoid determining the user data pointer twice this method is also * responsible for calculating the offset of the object in the pool that * will be used to set the offset destination pointer provided by the * caller. */ *offset_value = HEAP_PTR_TO_OFF(heap, uptr); return 0; }
/* * memblock_validate_offset -- checks the state of any arbtirary offset within * the heap. * * This function traverses an entire zone, so use with caution. */ enum memblock_state memblock_validate_offset(struct palloc_heap *heap, uint64_t off) { struct memory_block m = MEMORY_BLOCK_NONE; m.heap = heap; off -= HEAP_PTR_TO_OFF(heap, &heap->layout->zone0); m.zone_id = (uint32_t)(off / ZONE_MAX_SIZE); off -= (ZONE_MAX_SIZE * m.zone_id) + sizeof(struct zone); m.chunk_id = (uint32_t)(off / CHUNKSIZE); struct zone *z = ZID_TO_ZONE(heap->layout, m.zone_id); struct chunk_header *hdr = &z->chunk_headers[m.chunk_id]; if (hdr->type == CHUNK_TYPE_RUN_DATA) m.chunk_id -= hdr->size_idx; off -= CHUNKSIZE * m.chunk_id; for (uint32_t i = 0; i < z->header.size_idx; ) { hdr = &z->chunk_headers[i]; if (i + hdr->size_idx > m.chunk_id && i < m.chunk_id) { return MEMBLOCK_STATE_UNKNOWN; /* invalid chunk */ } else if (m.chunk_id == i) { break; } i += hdr->size_idx; } ASSERTne(hdr, NULL); m.header_type = memblock_header_type(&m); if (hdr->type != CHUNK_TYPE_RUN) { if (header_type_to_size[m.header_type] != off) return MEMBLOCK_STATE_UNKNOWN; else if (hdr->type == CHUNK_TYPE_USED) return MEMBLOCK_ALLOCATED; else if (hdr->type == CHUNK_TYPE_FREE) return MEMBLOCK_FREE; else return MEMBLOCK_STATE_UNKNOWN; } if (header_type_to_size[m.header_type] > off) return MEMBLOCK_STATE_UNKNOWN; off -= header_type_to_size[m.header_type]; m.type = off != 0 ? MEMORY_BLOCK_RUN : MEMORY_BLOCK_HUGE; #ifdef DEBUG enum memory_block_type t = memblock_detect_type(&m, heap->layout); ASSERTeq(t, m.type); #endif m.m_ops = &mb_ops[m.type]; uint64_t unit_size = m.m_ops->block_size(&m); if (off != 0) { /* run */ off -= RUN_METASIZE; m.block_off = (uint16_t)(off / unit_size); off -= m.block_off * unit_size; } m.size_idx = CALC_SIZE_IDX(unit_size, memblock_header_ops[m.header_type].get_size(&m)); ASSERTeq(off, 0); return m.m_ops->get_state(&m); }