/* * This is used to insert a block which is not previously on the * free list. Only the a.size field of the arena header is assumed * to be valid. */ void __inject_free_block(struct free_arena_header *ah) { struct free_arena_header *nah; size_t a_end = (size_t) ah + ah->a.size; size_t n_end; for (nah = __malloc_head.a.next; nah->a.type != ARENA_TYPE_HEAD; nah = nah->a.next) { n_end = (size_t) nah + nah->a.size; /* Is nah entirely beyond this block? */ if ((size_t) nah >= a_end) break; /* Is this block entirely beyond nah? */ if ((size_t) ah >= n_end) continue; /* Otherwise we have some sort of overlap - reject this block */ return; } /* Now, nah should point to the successor block */ ah->a.next = nah; ah->a.prev = nah->a.prev; nah->a.prev = ah; ah->a.prev->a.next = ah; __free_block(ah); }
void bios_free(void *ptr) { struct free_arena_header *ah; ah = (struct free_arena_header *) ((struct arena_header *)ptr - 1); #ifdef DEBUG_MALLOC if (ah->a.magic != ARENA_MAGIC) dprintf("failed free() magic check: %p\n", ptr); if (ARENA_TYPE_GET(ah->a.attrs) != ARENA_TYPE_USED) dprintf("invalid arena type: %d\n", ARENA_TYPE_GET(ah->a.attrs)); #endif __free_block(ah); }
void free(void *ptr) { struct free_arena_header *ah; if (!ptr) return; ah = (struct free_arena_header *) ((struct arena_header *)ptr - 1); #ifdef DEBUG_MALLOC assert(ah->a.type == ARENA_TYPE_USED); #endif __free_block(ah); /* Here we could insert code to return memory to the system. */ }
/* * Free all memory which is tagged with a specific tag. */ static void __free_tagged(malloc_tag_t tag) { struct free_arena_header *fp, *head; int i; sem_down(&__malloc_semaphore, 0); for (i = 0; i < NHEAP; i++) { dprintf("__free_tagged(%u) heap %d\n", tag, i); head = &__core_malloc_head[i]; for (fp = head->a.next ; fp != head ; fp = fp->a.next) { if (ARENA_TYPE_GET(fp->a.attrs) == ARENA_TYPE_USED && fp->a.tag == tag) fp = __free_block(fp); } } sem_up(&__malloc_semaphore); dprintf("__free_tagged(%u) done\n", tag); }
/* * This is used to insert a block which is not previously on the * free list. Only the a.size field of the arena header is assumed * to be valid. */ void __inject_free_block(struct free_arena_header *ah) { struct free_arena_header *head = &__core_malloc_head[ARENA_HEAP_GET(ah->a.attrs)]; struct free_arena_header *nah; size_t a_end = (size_t) ah + ARENA_SIZE_GET(ah->a.attrs); size_t n_end; dprintf("inject: %#zx bytes @ %p, heap %u (%p)\n", ARENA_SIZE_GET(ah->a.attrs), ah, ARENA_HEAP_GET(ah->a.attrs), head); sem_down(&__malloc_semaphore, 0); for (nah = head->a.next ; nah != head ; nah = nah->a.next) { n_end = (size_t) nah + ARENA_SIZE_GET(nah->a.attrs); /* Is nah entirely beyond this block? */ if ((size_t) nah >= a_end) break; /* Is this block entirely beyond nah? */ if ((size_t) ah >= n_end) continue; printf("conflict:ah: %p, a_end: %p, nah: %p, n_end: %p\n", ah, a_end, nah, n_end); /* Otherwise we have some sort of overlap - reject this block */ sem_up(&__malloc_semaphore); return; } /* Now, nah should point to the successor block */ ah->a.next = nah; ah->a.prev = nah->a.prev; nah->a.prev = ah; ah->a.prev->a.next = ah; __free_block(ah); sem_up(&__malloc_semaphore); }
void free(void *ptr) { struct free_arena_header *ah; if (!ptr) return; ah = (struct free_arena_header *) ((struct arena_header *)ptr - 1); #ifdef DEBUG_MALLOC assert(ah->a.type == ARENA_TYPE_USED); #endif /* Merge into adjacent free blocks */ ah = __free_block(ah); /* See if it makes sense to return memory to the system */ #if _KLIBC_MALLOC_USES_SBRK if (ah->a.size >= _KLIBC_MALLOC_CHUNK_SIZE && (char *)ah + ah->a.size == __current_brk) { remove_from_chains(ah); brk(ah); } #else { size_t page_size = getpagesize(); size_t page_mask = page_size - 1; size_t head_portion = -(size_t)ah & page_mask; size_t tail_portion = ((size_t)ah + ah->a.size) & page_mask; size_t adj_size; /* Careful here... an individual chunk of memory must have a minimum size if it exists at all, so if either the head or the tail is below the minimum, then extend that chunk by a page. */ if (head_portion && head_portion < 2*sizeof(struct arena_header)) head_portion += page_size; if (tail_portion && tail_portion < 2*sizeof(struct arena_header)) tail_portion += page_size; adj_size = ah->a.size - head_portion - tail_portion; /* Worth it? This is written the way it is to guard against overflows... */ if (ah->a.size >= head_portion+tail_portion+ _KLIBC_MALLOC_CHUNK_SIZE) { struct free_arena_header *tah, *tan, *tap; if (tail_portion) { /* Make a new header, and insert into chains immediately after the current block */ tah = (struct free_arena_header *) ((char *)ah + head_portion + adj_size); tah->a.type = ARENA_TYPE_FREE; tah->a.size = tail_portion; tah->a.next = tan = ah->a.next; tan->a.prev = tah; tah->a.prev = ah; ah->a.next = tah; tah->prev_free = tap = ah->prev_free; tap->next_free = tah; tah->next_free = ah; ah->prev_free = tah; } if (head_portion) ah->a.size = head_portion; else remove_from_chains(ah); munmap((char *)ah + head_portion, adj_size); } } #endif }
void *malloc(size_t size) { struct free_arena_header *fp; struct free_arena_header *pah; size_t fsize; if (size == 0) return NULL; /* Add the obligatory arena header, and round up */ size = (size + 2 * sizeof(struct arena_header) - 1) & ARENA_SIZE_MASK; for (fp = __malloc_head.next_free; fp->a.type != ARENA_TYPE_HEAD; fp = fp->next_free) { if (fp->a.size >= size) { /* Found fit -- allocate out of this block */ return __malloc_from_block(fp, size); } } /* Nothing found... need to request a block from the kernel */ fsize = (size + MALLOC_CHUNK_MASK) & ~MALLOC_CHUNK_MASK; #if _KLIBC_MALLOC_USES_SBRK fp = (struct free_arena_header *)sbrk(fsize); #else fp = (struct free_arena_header *) mmap(NULL, fsize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); #endif if (fp == (struct free_arena_header *)MAP_FAILED) { return NULL; /* Failed to get a block */ } /* Insert the block into the management chains. We need to set up the size and the main block list pointer, the rest of the work is logically identical to free(). */ fp->a.type = ARENA_TYPE_FREE; fp->a.size = fsize; /* We need to insert this into the main block list in the proper place -- this list is required to be sorted. Since we most likely get memory assignments in ascending order, search backwards for the proper place. */ for (pah = __malloc_head.a.prev; pah->a.type != ARENA_TYPE_HEAD; pah = pah->a.prev) { if (pah < fp) break; } /* Now pah points to the node that should be the predecessor of the new node */ fp->a.next = pah->a.next; fp->a.prev = pah; pah->a.next = fp; fp->a.next->a.prev = fp; /* Insert into the free chain and coalesce with adjacent blocks */ fp = __free_block(fp); /* Now we can allocate from this block */ return __malloc_from_block(fp, size); }