コード例 #1
0
ファイル: alloc.c プロジェクト: groleo/mpatrol
MP_GLOBAL
allocnode *
__mp_findnode(allochead *h, void *p, size_t s)
{
    allocnode *n;
    treenode *t;
    void *b;
    size_t l;

    /* Search for the lowest node that is closest to the given address.
     */
    if ((t = __mp_searchlower(h->atree.root, (unsigned long) p)) ||
        (t = __mp_searchlower(h->gtree.root, (unsigned long) p)))
        n = (allocnode *) ((char *) t - offsetof(allocnode, tnode));
    else
        n = (allocnode *) h->list.head;
    /* Loop through the list of suitable nodes looking for a likely
     * candidate.
     */
    while (n->lnode.next != NULL)
    {
        if ((h->flags & FLG_PAGEALLOC) && (n->info != NULL))
        {
            b = (void *) __mp_rounddown((unsigned long) n->block,
                                        h->heap.memory.page);
            l = __mp_roundup(n->size + ((char *) n->block - (char *) b),
                             h->heap.memory.page);
        }
        else
        {
            b = n->block;
            l = n->size;
        }
        if (n->info != NULL)
        {
            b = (char *) b - h->oflow;
            l += h->oflow << 1;
        }
        if (p < b)
            if ((char *) p + s > (char *) b)
                return n;
            else
                break;
        else if ((char *) b + l > (char *) p)
            return n;
        n = (allocnode *) n->lnode.next;
    }
    return NULL;
}
コード例 #2
0
ファイル: memory.c プロジェクト: groleo/mpatrol
MP_GLOBAL
void
__mp_memfree(memoryinfo *i, void *p, size_t l)
{
#if !MP_ARRAY_SUPPORT
#if TARGET == TARGET_UNIX || TARGET == TARGET_WINDOWS || \
    TARGET == TARGET_NETWARE
    void *t;
#endif /* TARGET */
#endif /* MP_ARRAY_SUPPORT */

    /* This function is hardly ever called except when the process is
     * terminating as the heap manager will take care of reusing unused
     * memory.  There is also no point in doing anything when we are using
     * a simulated heap as it will automatically be returned to the system.
     */
#if !MP_ARRAY_SUPPORT
    if (l == 0)
        return;
#if TARGET == TARGET_UNIX || TARGET == TARGET_WINDOWS || \
    TARGET == TARGET_NETWARE
    t = (void *) __mp_rounddown((unsigned long) p, i->page);
#endif /* TARGET */
#if TARGET == TARGET_UNIX
    /* If we used sbrk() to allocate this memory then we can't shrink the
     * break point since someone else might have allocated memory in between
     * our allocations.  The next best thing is to unmap our freed allocations
     * so that they no longer need to be handled by the virtual memory system.
     * If we used mmap() to allocate this memory then we don't need to worry
     * about the above problem.
     */
    l = __mp_roundup(l + ((char *) p - (char *) t), i->page);
    mprotect(t, l, PROT_NONE);
    munmap(t, l);
#elif TARGET == TARGET_AMIGA
    FreeMem(p, l);
#elif TARGET == TARGET_WINDOWS
    VirtualFree(t, 0, MEM_RELEASE);
#elif TARGET == TARGET_NETWARE
    NXPageFree(t);
#endif /* TARGET */
#endif /* MP_ARRAY_SUPPORT */
}
コード例 #3
0
ファイル: memory.c プロジェクト: groleo/mpatrol
MP_GLOBAL
int
__mp_memprotect(memoryinfo *i, void *p, size_t l, memaccess a)
{
#if TARGET == TARGET_UNIX || TARGET == TARGET_WINDOWS
    void *t;
    int n;
#endif /* TARGET */

#if TARGET == TARGET_UNIX || TARGET == TARGET_WINDOWS
    if (l == 0)
        return 1;
    t = (void *) __mp_rounddown((unsigned long) p, i->page);
    l = __mp_roundup(l + ((char *) p - (char *) t), i->page);
#if TARGET == TARGET_UNIX
    if (a == MA_NOACCESS)
        n = PROT_NONE;
    else if (a == MA_READONLY)
        n = PROT_READ;
    else
        n = PROT_READ | PROT_WRITE;
    if (mprotect(t, l, n) == -1)
        return 0;
#elif TARGET == TARGET_WINDOWS
    if (a == MA_NOACCESS)
        n = PAGE_NOACCESS;
    else if (a == MA_READONLY)
        n = PAGE_READONLY;
    else
        n = PAGE_READWRITE;
    if (!VirtualProtect(t, l, n, (unsigned long *) &n))
        return 0;
#endif /* TARGET */
#endif /* TARGET */
    return 1;
}
コード例 #4
0
ファイル: memory.c プロジェクト: groleo/mpatrol
MP_GLOBAL
memaccess
__mp_memquery(memoryinfo *i, void *p)
{
#if TARGET == TARGET_UNIX
#if MP_SIGINFO_SUPPORT
    struct sigaction s;
#endif /* MP_SIGINFO_SUPPORT */
    char c;
#elif TARGET == TARGET_WINDOWS
    MEMORY_BASIC_INFORMATION m;
#endif /* TARGET */
    memaccess r;

    r = MA_READWRITE;
#if TARGET == TARGET_UNIX
#if MP_MINCORE_SUPPORT
    /* The mincore() system call allows us to determine if a page is in core,
     * and if it is not and ENOMEM is set then it means that the page is not
     * mapped.  Unfortunately, we can't tell if it's read-only.
     */
    if ((mincore((char *) __mp_rounddown((unsigned long) p, i->page), 1, &c) ==
         -1) && (errno == ENOMEM))
        return MA_NOACCESS;
#endif /* MP_MINCORE_SUPPORT */
    /* One generic way to determine the access permission of an address across
     * all UNIX systems is to attempt to read from and write to the address and
     * check the results using signals.
     */
#if MP_SIGINFO_SUPPORT
    s.sa_flags = 0;
    //(void *) s.sa_handler = (void *) memoryhandler;
    s.sa_handler = memoryhandler;
    sigfillset(&s.sa_mask);
    sigaction(SIGBUS, &s, &membushandler);
    sigaction(SIGSEGV, &s, &memsegvhandler);
#else /* MP_SIGINFO_SUPPORT */
    membushandler = signal(SIGBUS, memoryhandler);
    memsegvhandler = signal(SIGSEGV, memoryhandler);
#endif /* MP_SIGINFO_SUPPORT */
    if (setjmp(memorystate))
        r = MA_NOACCESS;
    else
    {
        c = *((char *) p);
        if (setjmp(memorystate))
            r = MA_READONLY;
        else
            *((char *) p) = c;
    }
#if MP_SIGINFO_SUPPORT
    sigaction(SIGBUS, &membushandler, NULL);
    sigaction(SIGSEGV, &memsegvhandler, NULL);
#else /* MP_SIGINFO_SUPPORT */
    signal(SIGBUS, membushandler);
    signal(SIGSEGV, memsegvhandler);
#endif /* MP_SIGINFO_SUPPORT */
#elif TARGET == TARGET_WINDOWS
    /* On Windows, the VirtualQuery() function allows us to determine the
     * access permission of the page the address belongs to.
     */
    if (VirtualQuery(p, &m, sizeof(m)) >= sizeof(m))
        if (!(m.State & MEM_COMMIT) || (m.Protect & PAGE_NOACCESS) ||
            (m.Protect & PAGE_EXECUTE))
            r = MA_NOACCESS;
        else if ((m.Protect & PAGE_READONLY) || (m.Protect & PAGE_EXECUTE_READ))
            r = MA_READONLY;
#endif /* TARGET */
    return r;
}
コード例 #5
0
ファイル: memory.c プロジェクト: groleo/mpatrol
MP_GLOBAL
void *
__mp_memalloc(memoryinfo *i, size_t *l, size_t a, int u)
{
    void *p;
#if MP_ARRAY_SUPPORT || TARGET == TARGET_UNIX
    void *t;
    unsigned long n;
#endif /* MP_ARRAY_SUPPORT && TARGET */

    if (*l == 0)
        *l = 1;
#if MP_ARRAY_SUPPORT || TARGET == TARGET_UNIX || TARGET == TARGET_NETWARE
    /* Round up the size of the allocation to a multiple of the system page
     * size.
     */
    *l = __mp_roundup(*l, i->page);
#elif TARGET == TARGET_WINDOWS
    /* The VirtualAlloc() function on Windows only seems to allocate memory in
     * blocks of 65536 bytes, so we round up the size of the allocation to this
     * amount since otherwise the space would be wasted.
     */
    *l = __mp_roundup(*l, 0x10000);
#elif TARGET == TARGET_AMIGA
    /* We aren't guaranteed to allocate a block of memory that is page
     * aligned on the Amiga, so we have to assume the worst case scenario
     * and allocate more memory for the specified alignment.
     */
    if (a > i->page)
        a = i->page;
    if (a > MEM_BLOCKSIZE)
        *l += __mp_poweroftwo(a) - MEM_BLOCKSIZE;
#endif /* MP_ARRAY_SUPPORT && TARGET */
#if MP_ARRAY_SUPPORT || TARGET == TARGET_UNIX
    /* UNIX has a contiguous heap for a process, but we are not guaranteed to
     * have full control over it, so we must assume that each separate memory
     * allocation is independent.  If we are using sbrk() to allocate memory
     * then we also try to ensure that all of our memory allocations are blocks
     * of pages.
     */
#if MP_MMAP_SUPPORT
    /* Decide if we are using mmap() or sbrk() to allocate the memory.  Requests
     * for user memory will be allocated in the opposite way to internal memory.
     */
    if ((((i->flags & FLG_USEMMAP) != 0) == (u != 0)) && (i->mfile != -1))
        u = 1;
    else
        u = 0;
    if (u != 0)
    {
#if MP_MMAP_ANONYMOUS
        if ((p = mmap(NULL, *l, PROT_READ | PROT_WRITE,
              MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)) == (void *) -1)
#else /* MP_MMAP_ANONYMOUS */
        if ((p = mmap(NULL, *l, PROT_READ | PROT_WRITE, MAP_PRIVATE, i->mfile,
              0)) == (void *) -1)
#endif /* MP_MMAP_ANONYMOUS */
            p = NULL;
    }
    else
#endif /* MP_MMAP_SUPPORT */
    {
        if (((t = getmemory(0)) == (void *) -1) ||
            ((p = getmemory(*l)) == (void *) -1))
            p = NULL;
        else
        {
            if (p < t)
                /* The heap has grown down, which is quite unusual except on
                 * some weird systems where the stack grows up.
                 */
                n = (unsigned long) p - __mp_rounddown((unsigned long) p,
                                                       i->page);
            else
            {
                t = p;
                n = __mp_roundup((unsigned long) p, i->page) -
                    (unsigned long) p;
            }
            if (n > 0)
            {
                /* We need to allocate a little more memory in order to make the
                 * allocation page-aligned.
                 */
                if ((p = getmemory(n)) == (void *) -1)
                {
                    /* We failed to allocate more memory, but we try to be nice
                     * and return our original allocation back to the system.
                     */
                    getmemory(-*l);
                    p = NULL;
                }
                else if (p >= t)
                    p = (char *) t + n;
            }
        }
    }
#elif TARGET == TARGET_AMIGA
    p = AllocMem(*l, MEMF_ANY | MEMF_CLEAR);
#elif TARGET == TARGET_WINDOWS
    /* The VirtualProtect() function won't allow us to protect a range of pages
     * that span the allocation boundaries made by VirtualAlloc().  As mpatrol
     * tries to merge all bordering free memory areas, we must prevent the
     * pages allocated by different calls to VirtualAlloc() from being merged.
     * The easiest way to do this is to reserve a page of virtual memory after
     * each call to VirtualAlloc() since this won't actually take up any
     * physical memory.  It's a bit of a hack, though!
     */
    p = VirtualAlloc(NULL, *l, MEM_COMMIT, PAGE_READWRITE);
    VirtualAlloc(NULL, 0x10000, MEM_RESERVE, PAGE_NOACCESS);
#elif TARGET == TARGET_NETWARE
    p = NXPageAlloc(*l / i->page, 0);
#endif /* MP_ARRAY_SUPPORT && TARGET */
#if MP_ARRAY_SUPPORT || TARGET == TARGET_UNIX || TARGET == TARGET_NETWARE
    /* UNIX's sbrk() and Netware's NXPageAlloc() do not zero the allocated
     * memory, so we do this here for predictable behaviour.  This is also the
     * case if we are using a simulated heap.
     */
#if MP_MMAP_SUPPORT
    if ((p != NULL) && (u == 0))
#else /* MP_MMAP_SUPPORT */
    if (p != NULL)
#endif /* MP_MMAP_SUPPORT */
        __mp_memset(p, 0, *l);
#endif /* MP_ARRAY_SUPPORT && TARGET */
    if (p == NULL)
        errno = ENOMEM;
    return p;
}
コード例 #6
0
/* Read an event from the tracing output file.
 */

#if MP_GUI_SUPPORT
static
int
readevent(XtPointer p)
#else /* MP_GUI_SUPPORT */
static
int
readevent(void)
#endif /* MP_GUI_SUPPORT */
{
    char s[4];
    allocation *f;
    char *g, *h;
    void *a;
    size_t i, l, m;
    unsigned long n, t, u;

    if (refill(1))
        switch (*bufferpos)
        {
          case 'A':
            bufferpos++;
            bufferlen--;
            currentevent++;
            n = getuleb128();
            a = (void *) getuleb128();
            l = getuleb128();
            getsource(&t, &g, &h, &u);
            f = newalloc(n, currentevent, a, l);
            stats.acount++;
            stats.atotal += l;
            if (stats.pcount < stats.acount - stats.fcount)
                stats.pcount = stats.acount - stats.fcount;
            if (stats.ptotal < stats.atotal - stats.ftotal)
                stats.ptotal = stats.atotal - stats.ftotal;
            if ((stats.lsize == 0) || (stats.lsize > l))
                stats.lsize = l;
            if (stats.usize < l)
                stats.usize = l;
            if (verbose)
            {
                fprintf(stdout, "%6lu  alloc   %6lu  " MP_POINTER "  %8lu"
                        "          %6lu  %8lu\n", currentevent, n, a, l,
                        stats.acount - stats.fcount,
                        stats.atotal - stats.ftotal);
                if (displaysource)
                    printsource(t, g, h, u);
            }
            if (hatffile != NULL)
                fprintf(hatffile, "1 %lu 0x%lx\n", l, a);
            if (f->entry != NULL)
            {
                if ((m = slotentry(f)) > maxslots)
                    maxslots = m;
                fprintf(simfile, "    {%lu, %lu, 0},\n", m, l);
            }
#if MP_GUI_SUPPORT
            if (usegui)
            {
                if (addrbase == NULL)
                    addrbase = (void *) __mp_rounddown((unsigned long) a, 1024);
                drawmemory(a, l, algc);
                return 0;
            }
#endif /* MP_GUI_SUPPORT */
            return 1;
          case 'R':
            bufferpos++;
            bufferlen--;
            currentevent++;
            n = getuleb128();
            a = (void *) getuleb128();
            l = getuleb128();
            getsource(&t, &g, &h, &u);
            if (f = (allocation *) __mp_search(alloctree.root, n))
            {
                if (f->time != 0)
                    fprintf(stderr, "%s: Allocation index `%lu' has already "
                            "been freed\n", progname, n);
                stats.acount++;
                stats.atotal += l;
                stats.fcount++;
                stats.ftotal += f->size;
                if (stats.pcount < stats.acount - stats.fcount)
                    stats.pcount = stats.acount - stats.fcount;
                if (stats.ptotal < stats.atotal - stats.ftotal)
                    stats.ptotal = stats.atotal - stats.ftotal;
                if ((stats.lsize == 0) || (stats.lsize > l))
                    stats.lsize = l;
                if (stats.usize < l)
                    stats.usize = l;
                if (verbose)
                {
                    fprintf(stdout, "%6lu  realloc %6lu  " MP_POINTER
                            "  %8lu          %6lu  %8lu\n", currentevent, n, a,
                            l, stats.acount - stats.fcount,
                            stats.atotal - stats.ftotal);
                    if (displaysource)
                        printsource(t, g, h, u);
                }
                if (hatffile != NULL)
                    fprintf(hatffile, "4 %lu 0x%lx 0x%lx\n", l, f->addr, a);
                if (f->entry != NULL)
                {
                    m = slotentry(f);
                    fprintf(simfile, "    {%lu, %lu, 1},\n", m, l);
                }
#if MP_GUI_SUPPORT
                if (usegui)
                {
                    drawmemory(f->addr, f->size, frgc);
                    drawmemory(a, l, algc);
                }
#endif /* MP_GUI_SUPPORT */
                f->addr = a;
                f->size = l;
            }
            else
                fprintf(stderr, "%s: Unknown allocation index `%lu'\n",
                        progname, n);
#if MP_GUI_SUPPORT
            if (usegui)
                return 0;
#endif /* MP_GUI_SUPPORT */
            return 1;
          case 'F':
            bufferpos++;
            bufferlen--;
            currentevent++;
            n = getuleb128();
            getsource(&t, &g, &h, &u);
            if (f = (allocation *) __mp_search(alloctree.root, n))
            {
                if (f->time != 0)
                    fprintf(stderr, "%s: Allocation index `%lu' has already "
                            "been freed\n", progname, n);
                f->time = currentevent - f->event;
                stats.fcount++;
                stats.ftotal += f->size;
                if (verbose)
                {
                    fprintf(stdout, "%6lu  free    %6lu  " MP_POINTER "  %8lu  "
                            "%6lu  %6lu  %8lu\n", currentevent, n, f->addr,
                            f->size, f->time, stats.acount - stats.fcount,
                            stats.atotal - stats.ftotal);
                    if (displaysource)
                        printsource(t, g, h, u);
                }
                if (hatffile != NULL)
                    fprintf(hatffile, "2 0x%lx\n", f->addr);
                if (f->entry != NULL)
                {
                    fprintf(simfile, "    {%lu, 0, 0},\n", slotentry(f));
                    __mp_freeslot(&table, f->entry);
                    f->entry = NULL;
                }
#if MP_GUI_SUPPORT
                if (usegui)
                    drawmemory(f->addr, f->size, frgc);
#endif /* MP_GUI_SUPPORT */
            }
            else
                fprintf(stderr, "%s: Unknown allocation index `%lu'\n",
                        progname, n);
#if MP_GUI_SUPPORT
            if (usegui)
                return 0;
#endif /* MP_GUI_SUPPORT */
            return 1;
          case 'H':
            bufferpos++;
            bufferlen--;
            a = (void *) getuleb128();
            l = getuleb128();
            if (verbose)
                fprintf(stdout, "        reserve         " MP_POINTER
                        "  %8lu\n", a, l);
            stats.rcount++;
            stats.rtotal += l;
#if MP_GUI_SUPPORT
            if (usegui)
            {
                if (addrbase == NULL)
                    addrbase = (void *) __mp_rounddown((unsigned long) a, 1024);
                drawmemory(a, l, frgc);
                return 0;
            }
#endif /* MP_GUI_SUPPORT */
            return 1;
          case 'I':
            bufferpos++;
            bufferlen--;
            a = (void *) getuleb128();
            l = getuleb128();
            if (verbose)
                fprintf(stdout, "        internal        " MP_POINTER
                        "  %8lu\n", a, l);
            stats.icount++;
            stats.itotal += l;
#if MP_GUI_SUPPORT
            if (usegui)
            {
                drawmemory(a, l, ingc);
                return 0;
            }
#endif /* MP_GUI_SUPPORT */
            return 1;
          default:
            break;
        }
    if ((hatffile != NULL) && (hatffile != stdout) && (hatffile != stderr))
        fclose(hatffile);
    if (simfile != NULL)
    {
        fputs("    {0, 0, 0}\n};\n\n\n", simfile);
        fputs("int main(void)\n{\n", simfile);
        fprintf(simfile, "    void *p[%lu];\n", maxslots);
        fputs("    event *e;\n\n", simfile);
        fputs("    for (e = events; e->index != 0; e++)\n", simfile);
        fputs("        if (e->resize)\n", simfile);
        fputs("        {\n", simfile);
        fputs("            if ((p[e->index - 1] = realloc(p[e->index - 1], "
              "e->size)) == NULL)\n", simfile);
        fputs("            {\n", simfile);
        fputs("                fputs(\"out of memory\\n\", stderr);\n",
                                     simfile);
        fputs("                exit(EXIT_FAILURE);\n", simfile);
        fputs("            }\n", simfile);
        fputs("        }\n", simfile);
        fputs("        else if (e->size == 0)\n", simfile);
        fputs("            free(p[e->index - 1]);\n", simfile);
        fputs("        else if ((p[e->index - 1] = malloc(e->size)) == NULL)\n",
              simfile);
        fputs("        {\n", simfile);
        fputs("            fputs(\"out of memory\\n\", stderr);\n", simfile);
        fputs("            exit(EXIT_FAILURE);\n", simfile);
        fputs("        }\n", simfile);
        fputs("    return EXIT_SUCCESS;\n}\n", simfile);
        if ((simfile != stdout) && (simfile != stderr))
            fclose(simfile);
    }
    getentry(s, sizeof(char), 4, 0);
    if (memcmp(s, MP_TRACEMAGIC, 4) != 0)
    {
        fprintf(stderr, "%s: Invalid file format\n", progname);
        exit(EXIT_FAILURE);
    }
    if (verbose)
        fputc('\n', stdout);
    showstats();
    for (i = 0; i < MP_NAMECACHE_SIZE; i++)
    {
        if (funcnames[i] != NULL)
            free(funcnames[i]);
        if (filenames[i] != NULL)
            free(filenames[i]);
    }
    freeallocs();
    fclose(tracefile);
#if MP_GUI_SUPPORT
    if (usegui)
        return 1;
#endif /* MP_GUI_SUPPORT */
    return 0;
}
コード例 #7
0
ファイル: alloc.c プロジェクト: groleo/mpatrol
MP_GLOBAL
void
__mp_recyclefreed(allochead *h)
{
    allocnode *n;
    void *p;
    size_t l, s;

    n = (allocnode *) ((char *) h->flist.head - offsetof(allocnode, fnode));
    /* Remove the freed node from the freed list and the freed tree.
     */
    __mp_remove(&h->flist, &n->fnode);
    __mp_treeremove(&h->gtree, &n->tnode);
    h->gsize -= n->size;
    if (h->flags & FLG_PAGEALLOC)
    {
        p = (void *) __mp_rounddown((unsigned long) n->block,
                                    h->heap.memory.page);
        s = __mp_roundup(n->size + ((char *) n->block - (char *) p),
                         h->heap.memory.page);
        if (h->flags & FLG_OFLOWWATCH)
        {
            /* Remove any watch points within the allocated pages.
             */
            if ((l = (char *) n->block - (char *) p) > 0)
                __mp_memwatch(&h->heap.memory, p, l, MA_READWRITE);
            if ((l = s - n->size - l) > 0)
                __mp_memwatch(&h->heap.memory, (char *) n->block + n->size, l,
                              MA_READWRITE);
        }
    }
    /* We are placing this node on the free tree and so it will become
     * available for reuse.  If all allocations are pages then we prevent
     * the contents from being read or written to, otherwise the contents
     * will be filled with the free byte.
     */
    if (h->flags & FLG_PAGEALLOC)
    {
        /* Any watch points will have already been removed, and the
         * surrounding overflow buffers will already be protected with
         * the MA_NOACCESS flag.
         */
        __mp_memprotect(&h->heap.memory, n->block, n->size, MA_NOACCESS);
        n->block = p;
        n->size = s;
    }
    else if (h->flags & FLG_OFLOWWATCH)
    {
        /* Remove any watch points that were made to monitor the overflow
         * buffers.
         */
        __mp_memwatch(&h->heap.memory, (char *) n->block - h->oflow, h->oflow,
                      MA_READWRITE);
        __mp_memwatch(&h->heap.memory, (char *) n->block + n->size, h->oflow,
                      MA_READWRITE);
    }
    n->block = (char *) n->block - h->oflow;
    n->size += h->oflow << 1;
    n->info = NULL;
    if (!(h->flags & FLG_PAGEALLOC))
        __mp_memset(n->block, h->fbyte, n->size);
    __mp_treeinsert(&h->ftree, &n->tnode, n->size);
    h->fsize += n->size;
    mergenode(h, n);
}
コード例 #8
0
ファイル: alloc.c プロジェクト: groleo/mpatrol
MP_GLOBAL
void
__mp_freealloc(allochead *h, allocnode *n, void *i)
{
    void *p=NULL;
    size_t l, s=0;

    /* If we are keeping the details (and possibly the contents) of a specified
     * number of recently freed memory allocations then we may have to recycle
     * the oldest freed allocation if the length of the queue would extend past
     * the user-specified limit.
     */
    if ((i != NULL) && (h->flist.size != 0) && (h->flist.size == h->fmax))
        __mp_recyclefreed(h);
    /* Remove the allocated node from the allocation tree.
     */
    __mp_treeremove(&h->atree, &n->tnode);
    h->asize -= n->size;
    if (h->flags & FLG_PAGEALLOC)
    {
        p = (void *) __mp_rounddown((unsigned long) n->block,
                                    h->heap.memory.page);
        s = __mp_roundup(n->size + ((char *) n->block - (char *) p),
                         h->heap.memory.page);
        if (h->flags & FLG_OFLOWWATCH)
        {
            /* Remove any watch points within the allocated pages.
             */
            if ((l = (char *) n->block - (char *) p) > 0)
                __mp_memwatch(&h->heap.memory, p, l, MA_READWRITE);
            if ((l = s - n->size - l) > 0)
                __mp_memwatch(&h->heap.memory, (char *) n->block + n->size, l,
                              MA_READWRITE);
        }
    }
    if (i != NULL)
    {
        /* We are keeping this node and so place it on the freed tree.
         * If all allocations are pages then we either prevent the original
         * contents from being both read or written to, or prevent the
         * allocation from being written to.  If not then we may optionally
         * preserve its contents, otherwise it will be filled with the free
         * byte.
         */
        n->info = i;
        if (h->flags & FLG_PAGEALLOC)
            if (h->flags & FLG_PRESERVE)
            {
                __mp_memprotect(&h->heap.memory, n->block, n->size,
                                MA_READONLY);
                if (h->flags & FLG_OFLOWWATCH)
                {
                    /* Replace any watch points within the allocated pages.
                     * We have to do this here because when we change the
                     * memory protection we may trigger a watch point on some
                     * systems.
                     */
                    if ((l = (char *) n->block - (char *) p) > 0)
                        __mp_memwatch(&h->heap.memory, p, l, MA_NOACCESS);
                    if ((l = s - n->size - l) > 0)
                        __mp_memwatch(&h->heap.memory, (char *) n->block +
                                      n->size, l, MA_NOACCESS);
                }
            }
            else
                __mp_memprotect(&h->heap.memory, n->block, n->size,
                                MA_NOACCESS);
        else if (!(h->flags & FLG_PRESERVE))
            __mp_memset(n->block, h->fbyte, n->size);
        __mp_addtail(&h->flist, &n->fnode);
        __mp_treeinsert(&h->gtree, &n->tnode, (unsigned long) n->block);
        h->gsize += n->size;
    }
    else
    {
        /* We are placing this node on the free tree and so it will become
         * available for reuse.  If all allocations are pages then we prevent
         * the contents from being read or written to, otherwise the contents
         * will be filled with the free byte.
         */
        if (h->flags & FLG_PAGEALLOC)
        {
            /* Any watch points will have already been removed, and the
             * surrounding overflow buffers will already be protected with
             * the MA_NOACCESS flag.
             */
            __mp_memprotect(&h->heap.memory, n->block, n->size, MA_NOACCESS);
            n->block = p;
            n->size = s;
        }
        else if (h->flags & FLG_OFLOWWATCH)
        {
            /* Remove any watch points that were made to monitor the overflow
             * buffers.
             */
            __mp_memwatch(&h->heap.memory, (char *) n->block - h->oflow,
                          h->oflow, MA_READWRITE);
            __mp_memwatch(&h->heap.memory, (char *) n->block + n->size,
                          h->oflow, MA_READWRITE);
        }
        n->block = (char *) n->block - h->oflow;
        n->size += h->oflow << 1;
        n->info = NULL;
        if (!(h->flags & FLG_PAGEALLOC))
            __mp_memset(n->block, h->fbyte, n->size);
        __mp_treeinsert(&h->ftree, &n->tnode, n->size);
        h->fsize += n->size;
        mergenode(h, n);
    }
}
コード例 #9
0
ファイル: alloc.c プロジェクト: groleo/mpatrol
static
allocnode *
splitnode(allochead *h, allocnode *n, size_t l, size_t a, void *i)
{
    allocnode *p, *q;
    size_t m, s;

    /* We choose the worst case scenario here and allocate new nodes for
     * both the left and right nodes.  This is so that we can easily recover
     * from lack of system memory at this point rather than rebuild the
     * original free node if we discover that we are out of memory later.
     */
    if (((p = getnode(h)) == NULL) || ((q = getnode(h)) == NULL))
    {
        if (p != NULL)
            __mp_freeslot(&h->table, p);
        return NULL;
    }
    /* Remove the free node from the free tree.
     */
    __mp_treeremove(&h->ftree, &n->tnode);
    h->fsize -= n->size;
    n->block = (char *) n->block + h->oflow;
    n->size -= h->oflow << 1;
    /* Check to see if we have space left over to create a free node to the
     * left of the new node.  This is never done if all allocations are pages.
     */
    if (!(h->flags & FLG_PAGEALLOC) &&
        ((m = __mp_roundup((unsigned long) n->block, a) -
          (unsigned long) n->block) > 0))
    {
        __mp_prepend(&h->list, &n->lnode, &p->lnode);
        __mp_treeinsert(&h->ftree, &p->tnode, m);
        p->block = (char *) n->block - h->oflow;
        p->size = m;
        p->info = NULL;
        n->block = (char *) n->block + m;
        n->size -= m;
        h->fsize += m;
    }
    else
        __mp_freeslot(&h->table, p);
    /* If we are allocating pages then the effective block size is the
     * original size rounded up to a multiple of the system page size.
     */
    if (h->flags & FLG_PAGEALLOC)
        s = __mp_roundup(l, h->heap.memory.page);
    else
        s = l;
    /* Check to see if we have space left over to create a free node to the
     * right of the new node.  This free node will always have a size which is
     * a multiple of the system page size if all allocations are pages.
     */
    if ((m = n->size - s) > 0)
    {
        __mp_insert(&h->list, &n->lnode, &q->lnode);
        __mp_treeinsert(&h->ftree, &q->tnode, m);
        q->block = (char *) n->block + s + h->oflow;
        q->size = m;
        q->info = NULL;
        n->size = s;
        h->fsize += m;
    }
    else
        __mp_freeslot(&h->table, q);
    /* Initialise the details of the newly allocated node and insert it in
     * the allocation tree.
     */
    n->info = i;
    if (h->flags & FLG_PAGEALLOC)
    {
        __mp_memprotect(&h->heap.memory, n->block, n->size, MA_READWRITE);
        /* If we are aligning the end of allocations to the upper end of pages
         * then we may have to shift the start of the block up by a certain
         * number of bytes.  This will then also lead to us having to prefill
         * the unused space with the overflow byte or place a watch point area
         * there.
         */
        if ((h->flags & FLG_ALLOCUPPER) &&
            ((m = __mp_rounddown(n->size - l, a)) > 0))
        {
            if (h->flags & FLG_OFLOWWATCH)
                __mp_memwatch(&h->heap.memory, n->block, m, MA_NOACCESS);
            else
                __mp_memset(n->block, h->obyte, m);
            n->block = (char *) n->block + m;
            n->size -= m;
        }
        /* We may need to prefill any unused space at the end of the block with
         * the overflow byte, or place a watch point area there.
         */
        if ((m = n->size - l) > 0)
        {
            if (h->flags & FLG_OFLOWWATCH)
                __mp_memwatch(&h->heap.memory, (char *) n->block + l, m,
                              MA_NOACCESS);
            else
                __mp_memset((char *) n->block + l, h->obyte, m);
        }
        n->size = l;
    }
    else if (h->flags & FLG_OFLOWWATCH)
    {
        __mp_memwatch(&h->heap.memory, (char *) n->block - h->oflow, h->oflow,
                      MA_NOACCESS);
        __mp_memwatch(&h->heap.memory, (char *) n->block + n->size, h->oflow,
                      MA_NOACCESS);
    }
    else
    {
        __mp_memset((char *) n->block - h->oflow, h->obyte, h->oflow);
        __mp_memset((char *) n->block + n->size, h->obyte, h->oflow);
    }
    __mp_treeinsert(&h->atree, &n->tnode, (unsigned long) n->block);
    h->asize += n->size;
    return n;
}