Esempio n. 1
0
MP_GLOBAL
heapnode *
__mp_heapalloc(heaphead *h, size_t l, size_t a, int i)
{
    heapnode *n;
    void *p;
    size_t s;

    /* If we have no more heap node slots left then we must allocate
     * some more memory for them.  An extra MP_ALLOCFACTOR pages of memory
     * should suffice.
     */
    if ((n = (heapnode *) __mp_getslot(&h->table)) == NULL)
    {
        s = h->memory.page * MP_ALLOCFACTOR;
        if ((p = __mp_memalloc(&h->memory, &s, h->table.entalign, 0)) == NULL)
            return NULL;
        __mp_initslots(&h->table, p, s);
        n = (heapnode *) __mp_getslot(&h->table);
        __mp_treeinsert(&h->itree, &n->node, (unsigned long) p);
        n->block = p;
        n->size = s;
        h->isize += s;
        if (h->tracing)
            __mp_traceheap(p, s, 1);
#if MP_INUSE_SUPPORT
        _Inuse_heapalloc(p, s);
#endif /* MP_INUSE_SUPPORT */
        n = (heapnode *) __mp_getslot(&h->table);
    }
    /* Allocate the requested block of memory and add it to the heap.
     */
    if ((p = __mp_memalloc(&h->memory, &l, a, !i)) == NULL)
    {
        __mp_freeslot(&h->table, n);
        return NULL;
    }
    __mp_treeinsert(&h->dtree, &n->node, (unsigned long) p);
    n->block = p;
    n->size = l;
    h->dsize += l;
    if (h->tracing)
        __mp_traceheap(p, l, i);
#if MP_INUSE_SUPPORT
    _Inuse_heapalloc(p, l);
#endif /* MP_INUSE_SUPPORT */
    return n;
}
Esempio n. 2
0
static
allocnode *
getnode(allochead *h)
{
    allocnode *n;
    heapnode *p;

    /* If we have no more allocation node slots left then we must allocate
     * some more memory for them.  An extra MP_ALLOCFACTOR pages of memory
     * should suffice.
     */
    if ((n = (allocnode *) __mp_getslot(&h->table)) == NULL)
    {
        if ((p = __mp_heapalloc(&h->heap, h->heap.memory.page * MP_ALLOCFACTOR,
              h->table.entalign, 1)) == NULL)
            return NULL;
        __mp_initslots(&h->table, p->block, p->size);
        n = (allocnode *) __mp_getslot(&h->table);
        n->lnode.next = n->lnode.prev = NULL;
        __mp_treeinsert(&h->itree, &n->tnode, (unsigned long) p->block);
        n->block = p->block;
        n->size = p->size;
        n->info = NULL;
        h->isize += p->size;
        n = (allocnode *) __mp_getslot(&h->table);
    }
    return n;
}
Esempio n. 3
0
static
vertex *
addvertex(profilenode *n)
{
    vertex *v;

    if ((v = (vertex *) malloc(sizeof(vertex))) == NULL)
    {
        fprintf(stderr, "%s: Out of memory\n", progname);
        exit(EXIT_FAILURE);
    }
    if (useaddresses || (n->symbol == 0))
        __mp_treeinsert(&temptree, &v->node, (unsigned long) n->addr);
    else
        __mp_treeinsert(&temptree, &v->node, n->symbol);
    __mp_addnode(&graph, &v->gnode);
    v->pnode = n;
    v->index = 0;
    return v;
}
Esempio n. 4
0
static
allocnode *
mergenode(allochead *h, allocnode *n)
{
    allocnode *l, *r;

    /* See if the left node is free and borders on this node.
     */
    l = (allocnode *) n->lnode.prev;
    if ((l->lnode.prev == NULL) || (l->info != NULL) ||
        ((char *) l->block + l->size < (char *) n->block))
        l = NULL;
    /* See if the right node is free and borders on this node.
     */
    r = (allocnode *) n->lnode.next;
    if ((r->lnode.next == NULL) || (r->info != NULL) ||
        ((char *) n->block + n->size < (char *) r->block))
        r = NULL;
    /* If either or both of the left or right node is suitable for
     * merging then perform the merge.
     */
    if ((l != NULL) || (r != NULL))
    {
        __mp_treeremove(&h->ftree, &n->tnode);
        if (l != NULL)
        {
            __mp_remove(&h->list, &l->lnode);
            __mp_treeremove(&h->ftree, &l->tnode);
            n->block = l->block;
            n->size += l->size;
            __mp_freeslot(&h->table, l);
        }
        if (r != NULL)
        {
            __mp_remove(&h->list, &r->lnode);
            __mp_treeremove(&h->ftree, &r->tnode);
            n->size += r->size;
            __mp_freeslot(&h->table, r);
        }
        __mp_treeinsert(&h->ftree, &n->tnode, n->size);
    }
    return n;
}
Esempio n. 5
0
static
allocation *
newalloc(unsigned long i, unsigned long e, void *a, size_t l)
{
    allocation *n;

    if (n = (allocation *) __mp_search(alloctree.root, i))
    {
        if (n->time == 0)
            fprintf(stderr, "%s: Allocation index `%lu' has been allocated "
                    "twice without being freed\n", progname, i);
    }
    else
    {
        if ((n = (allocation *) malloc(sizeof(allocation))) == NULL)
        {
            fprintf(stderr, "%s: Out of memory\n", progname);
            exit(EXIT_FAILURE);
        }
        __mp_treeinsert(&alloctree, &n->node, i);
        n->event = e;
    }
    if (simfile != NULL)
    {
        if ((n->entry = __mp_getslot(&table)) == NULL)
        {
            fprintf(stderr, "%s: Too many allocations in use\n", progname);
            exit(EXIT_FAILURE);
        }
    }
    else
        n->entry = NULL;
    n->addr = a;
    n->size = l;
    n->time = 0;
    return n;
}
Esempio n. 6
0
static
void
directtable(void)
{
    profiledata *d;
    profilenode *n, *p;
    treenode *t;
    profiledata m;
    size_t i;
    unsigned long a, b, c;
    double e, f;

    cleardata(&m);
    printchar(' ', 31);
    fputs("DIRECT ALLOCATIONS\n\n", stdout);
    printchar(' ', 20);
    fprintf(stdout, "(0 < s <= %lu < m <= %lu < l <= %lu < x)\n\n",
            sbound, mbound, lbound);
    if (showcounts)
    {
        printchar(' ', 9);
        fputs("allocated", stdout);
        printchar(' ', 21);
        fputs("unfreed\n", stdout);
        printchar('-', 27);
        fputs("  ", stdout);
        printchar('-', 27);
        fputs("\n count       %   s  m  l  x   "
              "count       %   s  m  l  x     bytes  function\n\n", stdout);
    }
    else
    {
        printchar(' ', 10);
        fputs("allocated", stdout);
        printchar(' ', 23);
        fputs("unfreed\n", stdout);
        printchar('-', 29);
        fputs("  ", stdout);
        printchar('-', 29);
        fputs("\n   bytes       %   s  m  l  x     "
              "bytes       %   s  m  l  x   count  function\n\n", stdout);
    }
    for (n = (profilenode *) __mp_minimum(proftree.root); n != NULL; n = p)
    {
        p = (profilenode *) __mp_successor(&n->node);
        if (n->data != 0)
        {
            d = &n->tdata;
            sumdata(d, &data[n->data - 1]);
            while ((p != NULL) && ((p->addr == n->addr) || (!useaddresses &&
                     (p->symbol != 0) && (p->symbol == n->symbol))))
            {
                if (p->data != 0)
                    sumdata(d, &data[p->data - 1]);
                p = (profilenode *) __mp_successor(&p->node);
            }
            a = 0;
            for (i = 0; i < 4; i++)
                if (showcounts)
                    a += d->acount[i];
                else
                    a += d->atotal[i];
            __mp_treeinsert(&temptree, &n->tnode, a);
            sumdata(&m, d);
        }
    }
    for (t = __mp_maximum(temptree.root); t != NULL; t = __mp_predecessor(t))
    {
        n = (profilenode *) ((char *) t - offsetof(profilenode, tnode));
        d = &n->tdata;
        a = t->key;
        b = c = 0;
        for (i = 0; i < 4; i++)
        {
            if (showcounts)
            {
                b += d->dcount[i];
                c += d->atotal[i];
            }
            else
            {
                b += d->dtotal[i];
                c += d->acount[i];
            }
            d->dcount[i] = d->acount[i] - d->dcount[i];
            d->dtotal[i] = d->atotal[i] - d->dtotal[i];
        }
        b = a - b;
        if (showcounts)
        {
            e = ((double) a / (double) acount) * 100.0;
            if (acount != dcount)
                f = ((double) b / (double) (acount - dcount)) * 100.0;
            else
                f = 0.0;
            fprintf(stdout, "%6lu  %6.2f ", a, e);
            printdata(d->acount, acount);
            fprintf(stdout, "  %6lu  %6.2f ", b, f);
            printdata(d->dcount, acount - dcount);
            fprintf(stdout, "  %8lu  ", c);
        }
        else
        {
            e = ((double) a / (double) atotal) * 100.0;
            if (atotal != dtotal)
                f = ((double) b / (double) (atotal - dtotal)) * 100.0;
            else
                f = 0.0;
            fprintf(stdout, "%8lu  %6.2f ", a, e);
            printdata(d->atotal, atotal);
            fprintf(stdout, "  %8lu  %6.2f ", b, f);
            printdata(d->dtotal, atotal - dtotal);
            fprintf(stdout, "  %6lu  ", c);
        }
        printsymbol(stdout, n);
        fputc('\n', stdout);
        cleardata(d);
    }
    for (i = 0; i < 4; i++)
    {
        m.dcount[i] = m.acount[i] - m.dcount[i];
        m.dtotal[i] = m.atotal[i] - m.dtotal[i];
    }
    if (temptree.size != 0)
        fputc('\n', stdout);
    if (showcounts)
    {
        fprintf(stdout, "%6lu         ", acount);
        printdata(m.acount, acount);
        fprintf(stdout, "  %6lu         ", acount - dcount);
        printdata(m.dcount, acount - dcount);
        fprintf(stdout, "  %8lu  total\n", atotal);
    }
    else
    {
        fprintf(stdout, "%8lu         ", atotal);
        printdata(m.atotal, atotal);
        fprintf(stdout, "  %8lu         ", atotal - dtotal);
        printdata(m.dtotal, atotal - dtotal);
        fprintf(stdout, "  %6lu  total\n", acount);
    }
    __mp_newtree(&temptree);
}
Esempio n. 7
0
static
void
readfile(void)
{
    char s[4];
    profiledata *d;
    profilenode *p;
    size_t i;
    unsigned long n;
    int b;

    /* When reading the profiling output file, we assume that if it begins and
     * ends with the magic sequence of characters then it is a valid profiling
     * output file from the mpatrol library.  There are probably an infinite
     * number of checks we could do to ensure that the rest of the data in the
     * file is valid, but that would be overcomplicated and probably slow this
     * program down.  However, if the file is only partially written then the
     * getentry() function will catch the error before we do something silly.
     */
    getentry(s, sizeof(char), 4, 0);
    if (memcmp(s, MP_PROFMAGIC, 4) != 0)
    {
        fprintf(stderr, "%s: Invalid file format\n", progname);
        exit(EXIT_FAILURE);
    }
    /* The following test allows us to read profiling output files that were
     * produced on a different processor architecture.  If the next word in the
     * file does not contain the value 1 then we have to byte-swap any further
     * data that we read from the file.  Note that this test only works if the
     * word size is the same on both machines.
     */
    getentry(&i, sizeof(size_t), 1, 0);
    b = (i != 1);
    /* Get the version number of the mpatrol library which produced the
     * profiling output file.  The profiling file format changed to include the
     * version number at mpatrol 1.3.0 so we can't reliably read files produced
     * before then.  We also assume that we can't read files produced by later
     * versions of mpatrol.
     */
    getentry(&version, sizeof(unsigned long), 1, b);
    if (version < 10300)
    {
        fprintf(stderr, "%s: Profiling file version too old\n", progname);
        exit(EXIT_FAILURE);
    }
    else if (version / 100 > MP_VERNUM / 100)
    {
        fprintf(stderr, "%s: Profiling file version too new\n", progname);
        exit(EXIT_FAILURE);
    }
    getentry(&sbound, sizeof(size_t), 1, b);
    getentry(&mbound, sizeof(size_t), 1, b);
    getentry(&lbound, sizeof(size_t), 1, b);
    /* Read the allocation and deallocation bins.
     */
    getentry(&binsize, sizeof(size_t), 1, b);
    if (binsize > 0)
    {
        if (((acounts = (size_t *) malloc(binsize * sizeof(size_t))) == NULL) ||
            ((dcounts = (size_t *) malloc(binsize * sizeof(size_t))) == NULL))
        {
            fprintf(stderr, "%s: Out of memory\n", progname);
            exit(EXIT_FAILURE);
        }
        getentry(acounts, sizeof(size_t), binsize, b);
        getentry(&atotals, sizeof(size_t), 1, b);
        getentry(dcounts, sizeof(size_t), binsize, b);
        getentry(&dtotals, sizeof(size_t), 1, b);
        for (i = 0; i < binsize; i++)
        {
            acount += acounts[i];
            dcount += dcounts[i];
            if (i == binsize - 1)
            {
                atotal += atotals;
                dtotal += dtotals;
            }
            else
            {
                atotal += acounts[i] * (i + 1);
                dtotal += dcounts[i] * (i + 1);
            }
        }
    }
    /* Read the profiling data structures.
     */
    getentry(&datasize, sizeof(size_t), 1, b);
    if (datasize > 0)
    {
        if ((data = (profiledata *) malloc(datasize * sizeof(profiledata))) ==
            NULL)
        {
            fprintf(stderr, "%s: Out of memory\n", progname);
            exit(EXIT_FAILURE);
        }
        for (i = 0; i < datasize; i++)
        {
            getentry(&n, sizeof(unsigned long), 1, b);
            d = &data[n - 1];
            getentry(d->acount, sizeof(size_t), 4, b);
            getentry(d->atotal, sizeof(size_t), 4, b);
            getentry(d->dcount, sizeof(size_t), 4, b);
            getentry(d->dtotal, sizeof(size_t), 4, b);
        }
    }
    /* Read the statistics for every call site.
     */
    getentry(&nodesize, sizeof(size_t), 1, b);
    if (nodesize > 0)
    {
        if ((nodes = (profilenode *) malloc(nodesize * sizeof(profilenode))) ==
            NULL)
        {
            fprintf(stderr, "%s: Out of memory\n", progname);
            exit(EXIT_FAILURE);
        }
        for (i = 0; i < nodesize; i++)
        {
            getentry(&n, sizeof(unsigned long), 1, b);
            p = &nodes[n - 1];
            getentry(&p->parent, sizeof(unsigned long), 1, b);
            getentry(&p->addr, sizeof(void *), 1, b);
            getentry(&p->symbol, sizeof(unsigned long), 1, b);
            getentry(&p->name, sizeof(unsigned long), 1, b);
            getentry(&p->data, sizeof(unsigned long), 1, b);
            __mp_treeinsert(&proftree, &p->node, (unsigned long) p->addr);
            cleardata(&p->tdata);
            p->flags = 0;
        }
    }
    /* Read the table containing the symbol addresses.
     */
    getentry(&i, sizeof(size_t), 1, b);
    if (i > 0)
    {
        if ((addrs = (void **) malloc(i * sizeof(void *))) == NULL)
        {
            fprintf(stderr, "%s: Out of memory\n", progname);
            exit(EXIT_FAILURE);
        }
        getentry(addrs, sizeof(void *), i, b);
    }
    /* Read the string table containing the symbol names.
     */
    getentry(&i, sizeof(size_t), 1, b);
    if (i > 0)
    {
        if ((symbols = (char *) malloc(i * sizeof(char))) == NULL)
        {
            fprintf(stderr, "%s: Out of memory\n", progname);
            exit(EXIT_FAILURE);
        }
        getentry(symbols, sizeof(char), i, 0);
    }
    getentry(s, sizeof(char), 4, 0);
    if (memcmp(s, MP_PROFMAGIC, 4) != 0)
    {
        fprintf(stderr, "%s: Invalid file format\n", progname);
        exit(EXIT_FAILURE);
    }
}
Esempio n. 8
0
static
void
leaktable(void)
{
    profiledata *d;
    profilenode *n, *p;
    treenode *t;
    size_t i;
    unsigned long a, b, j, k;
    double e, f, g;

    printchar(' ', 34);
    fputs("MEMORY LEAKS\n\n", stdout);
    printchar(' ', 28);
    fprintf(stdout, "(maximum stack depth: %lu)\n\n", maxstack);
    printchar(' ', 16);
    fputs("unfreed", stdout);
    printchar(' ', 22);
    fputs("allocated\n", stdout);
    printchar('-', 40);
    fputs("  ", stdout);
    printchar('-', 16);
    if (showcounts)
        fputs("\n     %   count       %     bytes       %   "
              "count     bytes  function\n\n", stdout);
    else
        fputs("\n     %     bytes       %   count       %     "
              "bytes   count  function\n\n", stdout);
    for (n = (profilenode *) __mp_minimum(proftree.root); n != NULL; n = p)
    {
        p = (profilenode *) __mp_successor(&n->node);
        if ((n->data != 0) && !n->flags)
        {
            d = &n->tdata;
            sumdata(d, &data[n->data - 1]);
            while ((p != NULL) && ((p->addr == n->addr) || (!useaddresses &&
                     (p->symbol != 0) && (p->symbol == n->symbol))))
            {
                if ((p->data != 0) && !p->flags && comparestack(n, p, 0))
                {
                    sumdata(d, &data[p->data - 1]);
                    p->flags = 1;
                }
                p = (profilenode *) __mp_successor(&p->node);
            }
            p = (profilenode *) __mp_successor(&n->node);
            a = 0;
            for (i = 0; i < 4; i++)
                if (showcounts)
                    a += d->acount[i] - d->dcount[i];
                else
                    a += d->atotal[i] - d->dtotal[i];
            if (a > 0)
                __mp_treeinsert(&temptree, &n->tnode, a);
        }
    }
    for (n = (profilenode *) __mp_minimum(proftree.root); n != NULL;
         n = (profilenode *) __mp_successor(&n->node))
        n->flags = 0;
    for (t = __mp_maximum(temptree.root); t != NULL; t = __mp_predecessor(t))
    {
        n = (profilenode *) ((char *) t - offsetof(profilenode, tnode));
        d = &n->tdata;
        a = t->key;
        b = j = k = 0;
        for (i = 0; i < 4; i++)
            if (showcounts)
            {
                b += d->dtotal[i];
                j += d->acount[i];
                k += d->atotal[i];
            }
            else
            {
                b += d->dcount[i];
                j += d->atotal[i];
                k += d->acount[i];
            }
        b = k - b;
        e = ((double) a / (double) j) * 100.0;
        f = ((double) b / (double) k) * 100.0;
        if (showcounts)
        {
            g = ((double) a / (double) (acount - dcount)) * 100.0;
            fprintf(stdout, "%6.2f  %6lu  %6.2f  %8lu  %6.2f  %6lu  %8lu  ",
                    g, a, e, b, f, j, k);
        }
        else
        {
            g = ((double) a / (double) (atotal - dtotal)) * 100.0;
            fprintf(stdout, "%6.2f  %8lu  %6.2f  %6lu  %6.2f  %8lu  %6lu  ",
                    g, a, e, b, f, j, k);
        }
        printsymbol(stdout, n);
        fputc('\n', stdout);
        p = n;
        for (i = 1; (maxstack == 0) || (i < maxstack); i++)
        {
            if (p->parent == 0)
                break;
            p = &nodes[p->parent - 1];
            printchar(' ', 60);
            printsymbol(stdout, p);
            fputc('\n', stdout);
        }
        cleardata(d);
    }
    if (acount != 0)
        e = ((double) (acount - dcount) / (double) acount) * 100.0;
    else
        e = 0.0;
    if (atotal != 0)
        f = ((double) (atotal - dtotal) / (double) atotal) * 100.0;
    else
        f = 0.0;
    if (temptree.size != 0)
        fputc('\n', stdout);
    if (showcounts)
        fprintf(stdout, "        %6lu  %6.2f  %8lu  %6.2f  %6lu  %8lu  total\n",
                acount - dcount, e, atotal - dtotal, f, acount, atotal);
    else
        fprintf(stdout, "        %8lu  %6.2f  %6lu  %6.2f  %8lu  %6lu  total\n",
                atotal - dtotal, f, acount - dcount, e, atotal, acount);
    __mp_newtree(&temptree);
}
Esempio n. 9
0
MP_GLOBAL
void
__mp_recyclefreed(allochead *h)
{
    allocnode *n;
    void *p;
    size_t l, s;

    n = (allocnode *) ((char *) h->flist.head - offsetof(allocnode, fnode));
    /* Remove the freed node from the freed list and the freed tree.
     */
    __mp_remove(&h->flist, &n->fnode);
    __mp_treeremove(&h->gtree, &n->tnode);
    h->gsize -= n->size;
    if (h->flags & FLG_PAGEALLOC)
    {
        p = (void *) __mp_rounddown((unsigned long) n->block,
                                    h->heap.memory.page);
        s = __mp_roundup(n->size + ((char *) n->block - (char *) p),
                         h->heap.memory.page);
        if (h->flags & FLG_OFLOWWATCH)
        {
            /* Remove any watch points within the allocated pages.
             */
            if ((l = (char *) n->block - (char *) p) > 0)
                __mp_memwatch(&h->heap.memory, p, l, MA_READWRITE);
            if ((l = s - n->size - l) > 0)
                __mp_memwatch(&h->heap.memory, (char *) n->block + n->size, l,
                              MA_READWRITE);
        }
    }
    /* We are placing this node on the free tree and so it will become
     * available for reuse.  If all allocations are pages then we prevent
     * the contents from being read or written to, otherwise the contents
     * will be filled with the free byte.
     */
    if (h->flags & FLG_PAGEALLOC)
    {
        /* Any watch points will have already been removed, and the
         * surrounding overflow buffers will already be protected with
         * the MA_NOACCESS flag.
         */
        __mp_memprotect(&h->heap.memory, n->block, n->size, MA_NOACCESS);
        n->block = p;
        n->size = s;
    }
    else if (h->flags & FLG_OFLOWWATCH)
    {
        /* Remove any watch points that were made to monitor the overflow
         * buffers.
         */
        __mp_memwatch(&h->heap.memory, (char *) n->block - h->oflow, h->oflow,
                      MA_READWRITE);
        __mp_memwatch(&h->heap.memory, (char *) n->block + n->size, h->oflow,
                      MA_READWRITE);
    }
    n->block = (char *) n->block - h->oflow;
    n->size += h->oflow << 1;
    n->info = NULL;
    if (!(h->flags & FLG_PAGEALLOC))
        __mp_memset(n->block, h->fbyte, n->size);
    __mp_treeinsert(&h->ftree, &n->tnode, n->size);
    h->fsize += n->size;
    mergenode(h, n);
}
Esempio n. 10
0
MP_GLOBAL
void
__mp_freealloc(allochead *h, allocnode *n, void *i)
{
    void *p=NULL;
    size_t l, s=0;

    /* If we are keeping the details (and possibly the contents) of a specified
     * number of recently freed memory allocations then we may have to recycle
     * the oldest freed allocation if the length of the queue would extend past
     * the user-specified limit.
     */
    if ((i != NULL) && (h->flist.size != 0) && (h->flist.size == h->fmax))
        __mp_recyclefreed(h);
    /* Remove the allocated node from the allocation tree.
     */
    __mp_treeremove(&h->atree, &n->tnode);
    h->asize -= n->size;
    if (h->flags & FLG_PAGEALLOC)
    {
        p = (void *) __mp_rounddown((unsigned long) n->block,
                                    h->heap.memory.page);
        s = __mp_roundup(n->size + ((char *) n->block - (char *) p),
                         h->heap.memory.page);
        if (h->flags & FLG_OFLOWWATCH)
        {
            /* Remove any watch points within the allocated pages.
             */
            if ((l = (char *) n->block - (char *) p) > 0)
                __mp_memwatch(&h->heap.memory, p, l, MA_READWRITE);
            if ((l = s - n->size - l) > 0)
                __mp_memwatch(&h->heap.memory, (char *) n->block + n->size, l,
                              MA_READWRITE);
        }
    }
    if (i != NULL)
    {
        /* We are keeping this node and so place it on the freed tree.
         * If all allocations are pages then we either prevent the original
         * contents from being both read or written to, or prevent the
         * allocation from being written to.  If not then we may optionally
         * preserve its contents, otherwise it will be filled with the free
         * byte.
         */
        n->info = i;
        if (h->flags & FLG_PAGEALLOC)
            if (h->flags & FLG_PRESERVE)
            {
                __mp_memprotect(&h->heap.memory, n->block, n->size,
                                MA_READONLY);
                if (h->flags & FLG_OFLOWWATCH)
                {
                    /* Replace any watch points within the allocated pages.
                     * We have to do this here because when we change the
                     * memory protection we may trigger a watch point on some
                     * systems.
                     */
                    if ((l = (char *) n->block - (char *) p) > 0)
                        __mp_memwatch(&h->heap.memory, p, l, MA_NOACCESS);
                    if ((l = s - n->size - l) > 0)
                        __mp_memwatch(&h->heap.memory, (char *) n->block +
                                      n->size, l, MA_NOACCESS);
                }
            }
            else
                __mp_memprotect(&h->heap.memory, n->block, n->size,
                                MA_NOACCESS);
        else if (!(h->flags & FLG_PRESERVE))
            __mp_memset(n->block, h->fbyte, n->size);
        __mp_addtail(&h->flist, &n->fnode);
        __mp_treeinsert(&h->gtree, &n->tnode, (unsigned long) n->block);
        h->gsize += n->size;
    }
    else
    {
        /* We are placing this node on the free tree and so it will become
         * available for reuse.  If all allocations are pages then we prevent
         * the contents from being read or written to, otherwise the contents
         * will be filled with the free byte.
         */
        if (h->flags & FLG_PAGEALLOC)
        {
            /* Any watch points will have already been removed, and the
             * surrounding overflow buffers will already be protected with
             * the MA_NOACCESS flag.
             */
            __mp_memprotect(&h->heap.memory, n->block, n->size, MA_NOACCESS);
            n->block = p;
            n->size = s;
        }
        else if (h->flags & FLG_OFLOWWATCH)
        {
            /* Remove any watch points that were made to monitor the overflow
             * buffers.
             */
            __mp_memwatch(&h->heap.memory, (char *) n->block - h->oflow,
                          h->oflow, MA_READWRITE);
            __mp_memwatch(&h->heap.memory, (char *) n->block + n->size,
                          h->oflow, MA_READWRITE);
        }
        n->block = (char *) n->block - h->oflow;
        n->size += h->oflow << 1;
        n->info = NULL;
        if (!(h->flags & FLG_PAGEALLOC))
            __mp_memset(n->block, h->fbyte, n->size);
        __mp_treeinsert(&h->ftree, &n->tnode, n->size);
        h->fsize += n->size;
        mergenode(h, n);
    }
}
Esempio n. 11
0
MP_GLOBAL
int
__mp_resizealloc(allochead *h, allocnode *n, size_t l)
{
    allocnode *p;
    size_t m, s;
    long d;

    /* If all allocations are pages and the allocations are to be aligned
     * to the end of a page then the easiest solution is to fail here since
     * the majority of cases would require relocation of the original memory
     * allocation.
     */
    if ((h->flags & FLG_PAGEALLOC) && (h->flags & FLG_ALLOCUPPER))
        return 0;
    if (l == 0)
        l = 1;
    d = l - n->size;
    /* If we are allocating pages then the effective block size is the
     * original size rounded up to a multiple of the system page size.
     */
    if (h->flags & FLG_PAGEALLOC)
        m = __mp_roundup(n->size, h->heap.memory.page);
    else
        m = n->size;
    /* Obtain the bordering free node to the right of this node, if one
     * exists.  There is no need to look any further right as it is
     * guaranteed that it will not be another bordering free node.
     */
    p = (allocnode *) n->lnode.next;
    if ((p->lnode.next == NULL) || (p->info != NULL) ||
        ((char *) n->block + m + h->oflow < (char *) p->block))
        p = NULL;
    if ((h->flags & FLG_PAGEALLOC) && (l <= m) && (l > m - h->heap.memory.page))
    {
        /* There is space in the existing allocated pages to perform the
         * resize without requiring the modification or creation of a
         * neighbouring free node so we remove the watch point area if it
         * exists.
         */
        if (h->flags & FLG_OFLOWWATCH)
            __mp_memwatch(&h->heap.memory, (char *) n->block + n->size,
                          m - n->size, MA_READWRITE);
    }
    else if (d > 0)
    {
        /* If the request was to increase the size of the node and we have no
         * suitable node to merge with or the total size of both nodes is still
         * too small then we just fail.  The relocation to a larger memory
         * allocation is done by the calling function.
         */
        if ((p == NULL) || (m + p->size < l))
            return 0;
        __mp_treeremove(&h->ftree, &p->tnode);
        if (h->flags & FLG_PAGEALLOC)
        {
            s = __mp_roundup(l, h->heap.memory.page) - m;
            /* Remove any memory protection and the watch point area if it
             * exists.
             */
            __mp_memprotect(&h->heap.memory, (char *) p->block - h->oflow, s,
                            MA_READWRITE);
            if (h->flags & FLG_OFLOWWATCH)
                __mp_memwatch(&h->heap.memory, (char *) n->block + n->size,
                              m - n->size, MA_READWRITE);
        }
        else
        {
            s = d;
            /* Remove the right-most watch point area if it exists.
             */
            if (h->flags & FLG_OFLOWWATCH)
                __mp_memwatch(&h->heap.memory, (char *) n->block + m, h->oflow,
                              MA_READWRITE);
        }
        p->block = (char *) p->block + s;
        p->size -= s;
        /* If the resulting size of the free block we merged with is zero then
         * we can just delete it, otherwise we must insert it back into the
         * free tree.
         */
        if (p->size == 0)
        {
            __mp_remove(&h->list, &p->lnode);
            __mp_freeslot(&h->table, p);
        }
        else
            __mp_treeinsert(&h->ftree, &p->tnode, p->size);
        h->fsize -= s;
    }
    else if (d < 0)
    {
        /* If the request was to decrease the size of the node then we
         * must either increase the size of the bordering node, or create
         * a new free node.
         */
        if (p == NULL)
        {
            if ((p = getnode(h)) == NULL)
                return 0;
            __mp_insert(&h->list, &n->lnode, &p->lnode);
            p->block = (char *) n->block + m + h->oflow;
            p->size = 0;
            p->info = NULL;
        }
        else
            __mp_treeremove(&h->ftree, &p->tnode);
        if (h->flags & FLG_PAGEALLOC)
        {
            s = m - __mp_roundup(l, h->heap.memory.page);
            /* Remove the watch point area if it exists.
             */
            if (h->flags & FLG_OFLOWWATCH)
                __mp_memwatch(&h->heap.memory, (char *) n->block + n->size,
                              m - n->size, MA_READWRITE);
        }
        else
        {
            s = -d;
            /* Remove the right-most watch point area if it exists.
             */
            if (h->flags & FLG_OFLOWWATCH)
                __mp_memwatch(&h->heap.memory, (char *) n->block + m, h->oflow,
                              MA_READWRITE);
        }
        p->block = (char *) p->block - s;
        p->size += s;
        if (h->flags & FLG_PAGEALLOC)
            __mp_memprotect(&h->heap.memory, p->block, s, MA_NOACCESS);
        else
            __mp_memset(p->block, h->fbyte, s);
        __mp_treeinsert(&h->ftree, &p->tnode, p->size);
        h->fsize += s;
    }
    if (h->flags & FLG_PAGEALLOC)
    {
        s = __mp_roundup(l, h->heap.memory.page) - l;
        if (h->flags & FLG_OFLOWWATCH)
            __mp_memwatch(&h->heap.memory, (char *) n->block + l, s,
                          MA_NOACCESS);
        else
            __mp_memset((char *) n->block + l, h->obyte, s);
    }
    else if (h->flags & FLG_OFLOWWATCH)
        __mp_memwatch(&h->heap.memory, (char *) n->block + l, h->oflow,
                      MA_NOACCESS);
    else
        __mp_memset((char *) n->block + l, h->obyte, h->oflow);
    n->size = l;
    h->asize += d;
    return 1;
}
Esempio n. 12
0
MP_GLOBAL
allocnode *
__mp_getalloc(allochead *h, size_t l, size_t a, void *i)
{
    allocnode *n, *r, *s;
    heapnode *p;
    treenode *t;
    size_t b, m;

    b = h->oflow << 1;
    if (l == 0)
        l = 1;
    if (a == 0)
        a = h->heap.memory.align;
    else if (a > h->heap.memory.page)
        a = h->heap.memory.page;
    else
        a = __mp_poweroftwo(a);
    /* If all allocations are not pages then we must add more bytes to the
     * allocation request to account for alignment.
     */
    if (h->flags & FLG_PAGEALLOC)
        m = 0;
    else
        m = a - 1;
    /* If we have no suitable space for this allocation then we must allocate
     * memory via the heap manager.
     */
    if ((t = __mp_searchhigher(h->ftree.root, l + b + m)) == NULL)
    {
        if ((n = getnode(h)) == NULL)
            return NULL;
        /* If all allocations are pages then we must specify that we want our
         * heap allocation to be page-aligned.
         */
        if (h->flags & FLG_PAGEALLOC)
            m = h->heap.memory.page;
        else
            m = a;
        if ((p = __mp_heapalloc(&h->heap,
              __mp_roundup(l + b, h->heap.memory.page), m, 0)) == NULL)
        {
            __mp_freeslot(&h->table, n);
            return NULL;
        }
        /* Initialise the free memory.  If all allocations are pages then we
         * prevent any free memory from being both read from and written to.
         */
        if (h->flags & FLG_PAGEALLOC)
            __mp_memprotect(&h->heap.memory, p->block, p->size, MA_NOACCESS);
        else
            __mp_memset(p->block, h->fbyte, p->size);
        /* Insert the new memory block into the correct position in the
         * memory block list.  This is vital for merging free nodes.
         */
        if ((t = __mp_searchlower(h->atree.root, (unsigned long) p->block)) ||
            (t = __mp_searchlower(h->gtree.root, (unsigned long) p->block)))
            r = (allocnode *) ((char *) t - offsetof(allocnode, tnode));
        else
            r = (allocnode *) &h->list;
        while (((s = (allocnode *) r->lnode.next)->lnode.next != NULL) &&
               (s->block < p->block))
            r = s;
        __mp_insert(&h->list, &r->lnode, &n->lnode);
        __mp_treeinsert(&h->ftree, &n->tnode, p->size);
        n->block = p->block;
        n->size = p->size;
        n->info = NULL;
        h->fsize += p->size;
        /* Merge the memory block with any bordering free nodes.  This
         * is also vital to maintain the property that the memory block
         * list does not ever contain two bordering free nodes.
         */
        n = mergenode(h, n);
    }
    else
        n = (allocnode *) ((char *) t - offsetof(allocnode, tnode));
    /* Split the free node as requested.
     */
    return splitnode(h, n, l, a, i);
}
Esempio n. 13
0
static
allocnode *
splitnode(allochead *h, allocnode *n, size_t l, size_t a, void *i)
{
    allocnode *p, *q;
    size_t m, s;

    /* We choose the worst case scenario here and allocate new nodes for
     * both the left and right nodes.  This is so that we can easily recover
     * from lack of system memory at this point rather than rebuild the
     * original free node if we discover that we are out of memory later.
     */
    if (((p = getnode(h)) == NULL) || ((q = getnode(h)) == NULL))
    {
        if (p != NULL)
            __mp_freeslot(&h->table, p);
        return NULL;
    }
    /* Remove the free node from the free tree.
     */
    __mp_treeremove(&h->ftree, &n->tnode);
    h->fsize -= n->size;
    n->block = (char *) n->block + h->oflow;
    n->size -= h->oflow << 1;
    /* Check to see if we have space left over to create a free node to the
     * left of the new node.  This is never done if all allocations are pages.
     */
    if (!(h->flags & FLG_PAGEALLOC) &&
        ((m = __mp_roundup((unsigned long) n->block, a) -
          (unsigned long) n->block) > 0))
    {
        __mp_prepend(&h->list, &n->lnode, &p->lnode);
        __mp_treeinsert(&h->ftree, &p->tnode, m);
        p->block = (char *) n->block - h->oflow;
        p->size = m;
        p->info = NULL;
        n->block = (char *) n->block + m;
        n->size -= m;
        h->fsize += m;
    }
    else
        __mp_freeslot(&h->table, p);
    /* If we are allocating pages then the effective block size is the
     * original size rounded up to a multiple of the system page size.
     */
    if (h->flags & FLG_PAGEALLOC)
        s = __mp_roundup(l, h->heap.memory.page);
    else
        s = l;
    /* Check to see if we have space left over to create a free node to the
     * right of the new node.  This free node will always have a size which is
     * a multiple of the system page size if all allocations are pages.
     */
    if ((m = n->size - s) > 0)
    {
        __mp_insert(&h->list, &n->lnode, &q->lnode);
        __mp_treeinsert(&h->ftree, &q->tnode, m);
        q->block = (char *) n->block + s + h->oflow;
        q->size = m;
        q->info = NULL;
        n->size = s;
        h->fsize += m;
    }
    else
        __mp_freeslot(&h->table, q);
    /* Initialise the details of the newly allocated node and insert it in
     * the allocation tree.
     */
    n->info = i;
    if (h->flags & FLG_PAGEALLOC)
    {
        __mp_memprotect(&h->heap.memory, n->block, n->size, MA_READWRITE);
        /* If we are aligning the end of allocations to the upper end of pages
         * then we may have to shift the start of the block up by a certain
         * number of bytes.  This will then also lead to us having to prefill
         * the unused space with the overflow byte or place a watch point area
         * there.
         */
        if ((h->flags & FLG_ALLOCUPPER) &&
            ((m = __mp_rounddown(n->size - l, a)) > 0))
        {
            if (h->flags & FLG_OFLOWWATCH)
                __mp_memwatch(&h->heap.memory, n->block, m, MA_NOACCESS);
            else
                __mp_memset(n->block, h->obyte, m);
            n->block = (char *) n->block + m;
            n->size -= m;
        }
        /* We may need to prefill any unused space at the end of the block with
         * the overflow byte, or place a watch point area there.
         */
        if ((m = n->size - l) > 0)
        {
            if (h->flags & FLG_OFLOWWATCH)
                __mp_memwatch(&h->heap.memory, (char *) n->block + l, m,
                              MA_NOACCESS);
            else
                __mp_memset((char *) n->block + l, h->obyte, m);
        }
        n->size = l;
    }
    else if (h->flags & FLG_OFLOWWATCH)
    {
        __mp_memwatch(&h->heap.memory, (char *) n->block - h->oflow, h->oflow,
                      MA_NOACCESS);
        __mp_memwatch(&h->heap.memory, (char *) n->block + n->size, h->oflow,
                      MA_NOACCESS);
    }
    else
    {
        __mp_memset((char *) n->block - h->oflow, h->obyte, h->oflow);
        __mp_memset((char *) n->block + n->size, h->obyte, h->oflow);
    }
    __mp_treeinsert(&h->atree, &n->tnode, (unsigned long) n->block);
    h->asize += n->size;
    return n;
}