MP_GLOBAL void __mp_deleteheap(heaphead *h) { heapnode *n, *p; /* Free up the memory pointed to by the heap nodes first, since the * heap nodes themselves are freed in the second loop. */ for (n = (heapnode *) __mp_minimum(h->dtree.root); n != NULL; n = p) { p = (heapnode *) __mp_successor(&n->node); __mp_heapfree(h, n); } for (n = (heapnode *) __mp_minimum(h->itree.root); n != NULL; n = p) { p = (heapnode *) __mp_successor(&n->node); __mp_treeremove(&h->itree, &n->node); __mp_memfree(&h->memory, n->block, n->size); } __mp_endmemory(&h->memory); h->table.free = NULL; h->table.size = 0; h->isize = 0; h->prot = MA_NOACCESS; h->protrecur = 0; h->tracing = 0; }
static void deletegraph(void) { vertex *n, *v; edge *e, *m; /* Remove the nodes from the graph and free up the memory that they * inhabit. This is done by traversing the tree rather than the graph * in order to avoid deleting nodes that result in other nodes being * inaccessible. */ for (v = (vertex *) __mp_minimum(temptree.root); v != NULL; v = n) { n = (vertex *) __mp_successor(&v->node); __mp_treeremove(&temptree, &v->node); __mp_removenode(&graph, &v->gnode); free(v); } /* Remove the edges from the graph. The graph is now empty following * the removal of the nodes in the previous loop, so we just need to * free the memory that each edge uses. */ for (e = (edge *) edgelist.head; (m = (edge *) e->node.next) != NULL; e = m) { __mp_remove(&edgelist, &e->node); free(e); } }
MP_GLOBAL void __mp_heapfree(heaphead *h, heapnode *n) { h->dsize -= n->size; __mp_memfree(&h->memory, n->block, n->size); __mp_treeremove(&h->dtree, &n->node); __mp_freeslot(&h->table, n); }
static allocnode * mergenode(allochead *h, allocnode *n) { allocnode *l, *r; /* See if the left node is free and borders on this node. */ l = (allocnode *) n->lnode.prev; if ((l->lnode.prev == NULL) || (l->info != NULL) || ((char *) l->block + l->size < (char *) n->block)) l = NULL; /* See if the right node is free and borders on this node. */ r = (allocnode *) n->lnode.next; if ((r->lnode.next == NULL) || (r->info != NULL) || ((char *) n->block + n->size < (char *) r->block)) r = NULL; /* If either or both of the left or right node is suitable for * merging then perform the merge. */ if ((l != NULL) || (r != NULL)) { __mp_treeremove(&h->ftree, &n->tnode); if (l != NULL) { __mp_remove(&h->list, &l->lnode); __mp_treeremove(&h->ftree, &l->tnode); n->block = l->block; n->size += l->size; __mp_freeslot(&h->table, l); } if (r != NULL) { __mp_remove(&h->list, &r->lnode); __mp_treeremove(&h->ftree, &r->tnode); n->size += r->size; __mp_freeslot(&h->table, r); } __mp_treeinsert(&h->ftree, &n->tnode, n->size); } return n; }
static void freeallocs(void) { allocation *n, *p; for (n = (allocation *) __mp_minimum(alloctree.root); n != NULL; n = p) { p = (allocation *) __mp_successor(&n->node); __mp_treeremove(&alloctree, &n->node); free(n); } }
MP_GLOBAL void __mp_recyclefreed(allochead *h) { allocnode *n; void *p; size_t l, s; n = (allocnode *) ((char *) h->flist.head - offsetof(allocnode, fnode)); /* Remove the freed node from the freed list and the freed tree. */ __mp_remove(&h->flist, &n->fnode); __mp_treeremove(&h->gtree, &n->tnode); h->gsize -= n->size; if (h->flags & FLG_PAGEALLOC) { p = (void *) __mp_rounddown((unsigned long) n->block, h->heap.memory.page); s = __mp_roundup(n->size + ((char *) n->block - (char *) p), h->heap.memory.page); if (h->flags & FLG_OFLOWWATCH) { /* Remove any watch points within the allocated pages. */ if ((l = (char *) n->block - (char *) p) > 0) __mp_memwatch(&h->heap.memory, p, l, MA_READWRITE); if ((l = s - n->size - l) > 0) __mp_memwatch(&h->heap.memory, (char *) n->block + n->size, l, MA_READWRITE); } } /* We are placing this node on the free tree and so it will become * available for reuse. If all allocations are pages then we prevent * the contents from being read or written to, otherwise the contents * will be filled with the free byte. */ if (h->flags & FLG_PAGEALLOC) { /* Any watch points will have already been removed, and the * surrounding overflow buffers will already be protected with * the MA_NOACCESS flag. */ __mp_memprotect(&h->heap.memory, n->block, n->size, MA_NOACCESS); n->block = p; n->size = s; } else if (h->flags & FLG_OFLOWWATCH) { /* Remove any watch points that were made to monitor the overflow * buffers. */ __mp_memwatch(&h->heap.memory, (char *) n->block - h->oflow, h->oflow, MA_READWRITE); __mp_memwatch(&h->heap.memory, (char *) n->block + n->size, h->oflow, MA_READWRITE); } n->block = (char *) n->block - h->oflow; n->size += h->oflow << 1; n->info = NULL; if (!(h->flags & FLG_PAGEALLOC)) __mp_memset(n->block, h->fbyte, n->size); __mp_treeinsert(&h->ftree, &n->tnode, n->size); h->fsize += n->size; mergenode(h, n); }
MP_GLOBAL void __mp_freealloc(allochead *h, allocnode *n, void *i) { void *p=NULL; size_t l, s=0; /* If we are keeping the details (and possibly the contents) of a specified * number of recently freed memory allocations then we may have to recycle * the oldest freed allocation if the length of the queue would extend past * the user-specified limit. */ if ((i != NULL) && (h->flist.size != 0) && (h->flist.size == h->fmax)) __mp_recyclefreed(h); /* Remove the allocated node from the allocation tree. */ __mp_treeremove(&h->atree, &n->tnode); h->asize -= n->size; if (h->flags & FLG_PAGEALLOC) { p = (void *) __mp_rounddown((unsigned long) n->block, h->heap.memory.page); s = __mp_roundup(n->size + ((char *) n->block - (char *) p), h->heap.memory.page); if (h->flags & FLG_OFLOWWATCH) { /* Remove any watch points within the allocated pages. */ if ((l = (char *) n->block - (char *) p) > 0) __mp_memwatch(&h->heap.memory, p, l, MA_READWRITE); if ((l = s - n->size - l) > 0) __mp_memwatch(&h->heap.memory, (char *) n->block + n->size, l, MA_READWRITE); } } if (i != NULL) { /* We are keeping this node and so place it on the freed tree. * If all allocations are pages then we either prevent the original * contents from being both read or written to, or prevent the * allocation from being written to. If not then we may optionally * preserve its contents, otherwise it will be filled with the free * byte. */ n->info = i; if (h->flags & FLG_PAGEALLOC) if (h->flags & FLG_PRESERVE) { __mp_memprotect(&h->heap.memory, n->block, n->size, MA_READONLY); if (h->flags & FLG_OFLOWWATCH) { /* Replace any watch points within the allocated pages. * We have to do this here because when we change the * memory protection we may trigger a watch point on some * systems. */ if ((l = (char *) n->block - (char *) p) > 0) __mp_memwatch(&h->heap.memory, p, l, MA_NOACCESS); if ((l = s - n->size - l) > 0) __mp_memwatch(&h->heap.memory, (char *) n->block + n->size, l, MA_NOACCESS); } } else __mp_memprotect(&h->heap.memory, n->block, n->size, MA_NOACCESS); else if (!(h->flags & FLG_PRESERVE)) __mp_memset(n->block, h->fbyte, n->size); __mp_addtail(&h->flist, &n->fnode); __mp_treeinsert(&h->gtree, &n->tnode, (unsigned long) n->block); h->gsize += n->size; } else { /* We are placing this node on the free tree and so it will become * available for reuse. If all allocations are pages then we prevent * the contents from being read or written to, otherwise the contents * will be filled with the free byte. */ if (h->flags & FLG_PAGEALLOC) { /* Any watch points will have already been removed, and the * surrounding overflow buffers will already be protected with * the MA_NOACCESS flag. */ __mp_memprotect(&h->heap.memory, n->block, n->size, MA_NOACCESS); n->block = p; n->size = s; } else if (h->flags & FLG_OFLOWWATCH) { /* Remove any watch points that were made to monitor the overflow * buffers. */ __mp_memwatch(&h->heap.memory, (char *) n->block - h->oflow, h->oflow, MA_READWRITE); __mp_memwatch(&h->heap.memory, (char *) n->block + n->size, h->oflow, MA_READWRITE); } n->block = (char *) n->block - h->oflow; n->size += h->oflow << 1; n->info = NULL; if (!(h->flags & FLG_PAGEALLOC)) __mp_memset(n->block, h->fbyte, n->size); __mp_treeinsert(&h->ftree, &n->tnode, n->size); h->fsize += n->size; mergenode(h, n); } }
MP_GLOBAL int __mp_resizealloc(allochead *h, allocnode *n, size_t l) { allocnode *p; size_t m, s; long d; /* If all allocations are pages and the allocations are to be aligned * to the end of a page then the easiest solution is to fail here since * the majority of cases would require relocation of the original memory * allocation. */ if ((h->flags & FLG_PAGEALLOC) && (h->flags & FLG_ALLOCUPPER)) return 0; if (l == 0) l = 1; d = l - n->size; /* If we are allocating pages then the effective block size is the * original size rounded up to a multiple of the system page size. */ if (h->flags & FLG_PAGEALLOC) m = __mp_roundup(n->size, h->heap.memory.page); else m = n->size; /* Obtain the bordering free node to the right of this node, if one * exists. There is no need to look any further right as it is * guaranteed that it will not be another bordering free node. */ p = (allocnode *) n->lnode.next; if ((p->lnode.next == NULL) || (p->info != NULL) || ((char *) n->block + m + h->oflow < (char *) p->block)) p = NULL; if ((h->flags & FLG_PAGEALLOC) && (l <= m) && (l > m - h->heap.memory.page)) { /* There is space in the existing allocated pages to perform the * resize without requiring the modification or creation of a * neighbouring free node so we remove the watch point area if it * exists. */ if (h->flags & FLG_OFLOWWATCH) __mp_memwatch(&h->heap.memory, (char *) n->block + n->size, m - n->size, MA_READWRITE); } else if (d > 0) { /* If the request was to increase the size of the node and we have no * suitable node to merge with or the total size of both nodes is still * too small then we just fail. The relocation to a larger memory * allocation is done by the calling function. */ if ((p == NULL) || (m + p->size < l)) return 0; __mp_treeremove(&h->ftree, &p->tnode); if (h->flags & FLG_PAGEALLOC) { s = __mp_roundup(l, h->heap.memory.page) - m; /* Remove any memory protection and the watch point area if it * exists. */ __mp_memprotect(&h->heap.memory, (char *) p->block - h->oflow, s, MA_READWRITE); if (h->flags & FLG_OFLOWWATCH) __mp_memwatch(&h->heap.memory, (char *) n->block + n->size, m - n->size, MA_READWRITE); } else { s = d; /* Remove the right-most watch point area if it exists. */ if (h->flags & FLG_OFLOWWATCH) __mp_memwatch(&h->heap.memory, (char *) n->block + m, h->oflow, MA_READWRITE); } p->block = (char *) p->block + s; p->size -= s; /* If the resulting size of the free block we merged with is zero then * we can just delete it, otherwise we must insert it back into the * free tree. */ if (p->size == 0) { __mp_remove(&h->list, &p->lnode); __mp_freeslot(&h->table, p); } else __mp_treeinsert(&h->ftree, &p->tnode, p->size); h->fsize -= s; } else if (d < 0) { /* If the request was to decrease the size of the node then we * must either increase the size of the bordering node, or create * a new free node. */ if (p == NULL) { if ((p = getnode(h)) == NULL) return 0; __mp_insert(&h->list, &n->lnode, &p->lnode); p->block = (char *) n->block + m + h->oflow; p->size = 0; p->info = NULL; } else __mp_treeremove(&h->ftree, &p->tnode); if (h->flags & FLG_PAGEALLOC) { s = m - __mp_roundup(l, h->heap.memory.page); /* Remove the watch point area if it exists. */ if (h->flags & FLG_OFLOWWATCH) __mp_memwatch(&h->heap.memory, (char *) n->block + n->size, m - n->size, MA_READWRITE); } else { s = -d; /* Remove the right-most watch point area if it exists. */ if (h->flags & FLG_OFLOWWATCH) __mp_memwatch(&h->heap.memory, (char *) n->block + m, h->oflow, MA_READWRITE); } p->block = (char *) p->block - s; p->size += s; if (h->flags & FLG_PAGEALLOC) __mp_memprotect(&h->heap.memory, p->block, s, MA_NOACCESS); else __mp_memset(p->block, h->fbyte, s); __mp_treeinsert(&h->ftree, &p->tnode, p->size); h->fsize += s; } if (h->flags & FLG_PAGEALLOC) { s = __mp_roundup(l, h->heap.memory.page) - l; if (h->flags & FLG_OFLOWWATCH) __mp_memwatch(&h->heap.memory, (char *) n->block + l, s, MA_NOACCESS); else __mp_memset((char *) n->block + l, h->obyte, s); } else if (h->flags & FLG_OFLOWWATCH) __mp_memwatch(&h->heap.memory, (char *) n->block + l, h->oflow, MA_NOACCESS); else __mp_memset((char *) n->block + l, h->obyte, h->oflow); n->size = l; h->asize += d; return 1; }
static allocnode * splitnode(allochead *h, allocnode *n, size_t l, size_t a, void *i) { allocnode *p, *q; size_t m, s; /* We choose the worst case scenario here and allocate new nodes for * both the left and right nodes. This is so that we can easily recover * from lack of system memory at this point rather than rebuild the * original free node if we discover that we are out of memory later. */ if (((p = getnode(h)) == NULL) || ((q = getnode(h)) == NULL)) { if (p != NULL) __mp_freeslot(&h->table, p); return NULL; } /* Remove the free node from the free tree. */ __mp_treeremove(&h->ftree, &n->tnode); h->fsize -= n->size; n->block = (char *) n->block + h->oflow; n->size -= h->oflow << 1; /* Check to see if we have space left over to create a free node to the * left of the new node. This is never done if all allocations are pages. */ if (!(h->flags & FLG_PAGEALLOC) && ((m = __mp_roundup((unsigned long) n->block, a) - (unsigned long) n->block) > 0)) { __mp_prepend(&h->list, &n->lnode, &p->lnode); __mp_treeinsert(&h->ftree, &p->tnode, m); p->block = (char *) n->block - h->oflow; p->size = m; p->info = NULL; n->block = (char *) n->block + m; n->size -= m; h->fsize += m; } else __mp_freeslot(&h->table, p); /* If we are allocating pages then the effective block size is the * original size rounded up to a multiple of the system page size. */ if (h->flags & FLG_PAGEALLOC) s = __mp_roundup(l, h->heap.memory.page); else s = l; /* Check to see if we have space left over to create a free node to the * right of the new node. This free node will always have a size which is * a multiple of the system page size if all allocations are pages. */ if ((m = n->size - s) > 0) { __mp_insert(&h->list, &n->lnode, &q->lnode); __mp_treeinsert(&h->ftree, &q->tnode, m); q->block = (char *) n->block + s + h->oflow; q->size = m; q->info = NULL; n->size = s; h->fsize += m; } else __mp_freeslot(&h->table, q); /* Initialise the details of the newly allocated node and insert it in * the allocation tree. */ n->info = i; if (h->flags & FLG_PAGEALLOC) { __mp_memprotect(&h->heap.memory, n->block, n->size, MA_READWRITE); /* If we are aligning the end of allocations to the upper end of pages * then we may have to shift the start of the block up by a certain * number of bytes. This will then also lead to us having to prefill * the unused space with the overflow byte or place a watch point area * there. */ if ((h->flags & FLG_ALLOCUPPER) && ((m = __mp_rounddown(n->size - l, a)) > 0)) { if (h->flags & FLG_OFLOWWATCH) __mp_memwatch(&h->heap.memory, n->block, m, MA_NOACCESS); else __mp_memset(n->block, h->obyte, m); n->block = (char *) n->block + m; n->size -= m; } /* We may need to prefill any unused space at the end of the block with * the overflow byte, or place a watch point area there. */ if ((m = n->size - l) > 0) { if (h->flags & FLG_OFLOWWATCH) __mp_memwatch(&h->heap.memory, (char *) n->block + l, m, MA_NOACCESS); else __mp_memset((char *) n->block + l, h->obyte, m); } n->size = l; } else if (h->flags & FLG_OFLOWWATCH) { __mp_memwatch(&h->heap.memory, (char *) n->block - h->oflow, h->oflow, MA_NOACCESS); __mp_memwatch(&h->heap.memory, (char *) n->block + n->size, h->oflow, MA_NOACCESS); } else { __mp_memset((char *) n->block - h->oflow, h->obyte, h->oflow); __mp_memset((char *) n->block + n->size, h->obyte, h->oflow); } __mp_treeinsert(&h->atree, &n->tnode, (unsigned long) n->block); h->asize += n->size; return n; }