MP_GLOBAL void __mp_newallocs(allochead *h, size_t m, size_t s, unsigned char o, unsigned char a, unsigned char f, unsigned long u) { struct { char x; allocnode y; } z; long n; __mp_newheap(&h->heap); /* Determine the minimum alignment for an allocation node on this * system and force the alignment to be a power of two. This * information is used when initialising the slot table. */ n = (char *) &z.y - &z.x; __mp_newslots(&h->table, sizeof(allocnode), __mp_poweroftwo(n)); __mp_newlist(&h->list); __mp_newlist(&h->flist); __mp_newtree(&h->itree); __mp_newtree(&h->atree); __mp_newtree(&h->gtree); __mp_newtree(&h->ftree); h->isize = h->asize = h->gsize = h->fsize = 0; h->fmax = m; h->oflow = __mp_poweroftwo(s); h->obyte = o; h->abyte = a; h->fbyte = f; h->flags = u; if (h->flags & FLG_PAGEALLOC) { if (h->oflow == 0) h->oflow = 1; h->oflow = __mp_roundup(h->oflow, h->heap.memory.page); } h->prot = MA_NOACCESS; h->protrecur = 0; }
static size_t minalign(void) { size_t a; long n; { /* Hopefully the largest integral type. If the compiler supports * long long, it doesn't necessarily mean that it will have a more * restrictive alignment than a long integer, but we allow that * check anyway. */ #if MP_LONGLONG_SUPPORT struct { char x; long long y; } z; #else /* MP_LONGLONG_SUPPORT */ struct { char x; long y; } z; #endif /* MP_LONGLONG_SUPPORT */ n = (char *) &z.y - &z.x; } a = n; { /* Hopefully the largest floating point type. The long double * type appeared with the ANSI standard and this code is written * in ANSI C so we shouldn't need to worry about not supporting it. */ struct { char x; long double y; } z; n = (char *) &z.y - &z.x; } if (a < (unsigned long) n) a = n; { /* A generic pointer type. The assumption in this case is that * a pointer to void is the most restrictive pointer type on this * system. */ struct { char x; void *y; } z; n = (char *) &z.y - &z.x; } if (a < (unsigned long) n) a = n; return __mp_poweroftwo(a); }
MP_GLOBAL void __mp_newheap(heaphead *h) { struct { char x; heapnode y; } z; long n; __mp_newmemory(&h->memory); /* Determine the minimum alignment for a heap node on this system * and force the alignment to be a power of two. This information * is used when initialising the slot table. */ n = (char *) &z.y - &z.x; __mp_newslots(&h->table, sizeof(heapnode), __mp_poweroftwo(n)); __mp_newtree(&h->itree); __mp_newtree(&h->dtree); h->isize = h->dsize = 0; h->prot = MA_NOACCESS; h->protrecur = 0; h->tracing = 0; }
MP_GLOBAL void * __mp_memalloc(memoryinfo *i, size_t *l, size_t a, int u) { void *p; #if MP_ARRAY_SUPPORT || TARGET == TARGET_UNIX void *t; unsigned long n; #endif /* MP_ARRAY_SUPPORT && TARGET */ if (*l == 0) *l = 1; #if MP_ARRAY_SUPPORT || TARGET == TARGET_UNIX || TARGET == TARGET_NETWARE /* Round up the size of the allocation to a multiple of the system page * size. */ *l = __mp_roundup(*l, i->page); #elif TARGET == TARGET_WINDOWS /* The VirtualAlloc() function on Windows only seems to allocate memory in * blocks of 65536 bytes, so we round up the size of the allocation to this * amount since otherwise the space would be wasted. */ *l = __mp_roundup(*l, 0x10000); #elif TARGET == TARGET_AMIGA /* We aren't guaranteed to allocate a block of memory that is page * aligned on the Amiga, so we have to assume the worst case scenario * and allocate more memory for the specified alignment. */ if (a > i->page) a = i->page; if (a > MEM_BLOCKSIZE) *l += __mp_poweroftwo(a) - MEM_BLOCKSIZE; #endif /* MP_ARRAY_SUPPORT && TARGET */ #if MP_ARRAY_SUPPORT || TARGET == TARGET_UNIX /* UNIX has a contiguous heap for a process, but we are not guaranteed to * have full control over it, so we must assume that each separate memory * allocation is independent. If we are using sbrk() to allocate memory * then we also try to ensure that all of our memory allocations are blocks * of pages. */ #if MP_MMAP_SUPPORT /* Decide if we are using mmap() or sbrk() to allocate the memory. Requests * for user memory will be allocated in the opposite way to internal memory. */ if ((((i->flags & FLG_USEMMAP) != 0) == (u != 0)) && (i->mfile != -1)) u = 1; else u = 0; if (u != 0) { #if MP_MMAP_ANONYMOUS if ((p = mmap(NULL, *l, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)) == (void *) -1) #else /* MP_MMAP_ANONYMOUS */ if ((p = mmap(NULL, *l, PROT_READ | PROT_WRITE, MAP_PRIVATE, i->mfile, 0)) == (void *) -1) #endif /* MP_MMAP_ANONYMOUS */ p = NULL; } else #endif /* MP_MMAP_SUPPORT */ { if (((t = getmemory(0)) == (void *) -1) || ((p = getmemory(*l)) == (void *) -1)) p = NULL; else { if (p < t) /* The heap has grown down, which is quite unusual except on * some weird systems where the stack grows up. */ n = (unsigned long) p - __mp_rounddown((unsigned long) p, i->page); else { t = p; n = __mp_roundup((unsigned long) p, i->page) - (unsigned long) p; } if (n > 0) { /* We need to allocate a little more memory in order to make the * allocation page-aligned. */ if ((p = getmemory(n)) == (void *) -1) { /* We failed to allocate more memory, but we try to be nice * and return our original allocation back to the system. */ getmemory(-*l); p = NULL; } else if (p >= t) p = (char *) t + n; } } } #elif TARGET == TARGET_AMIGA p = AllocMem(*l, MEMF_ANY | MEMF_CLEAR); #elif TARGET == TARGET_WINDOWS /* The VirtualProtect() function won't allow us to protect a range of pages * that span the allocation boundaries made by VirtualAlloc(). As mpatrol * tries to merge all bordering free memory areas, we must prevent the * pages allocated by different calls to VirtualAlloc() from being merged. * The easiest way to do this is to reserve a page of virtual memory after * each call to VirtualAlloc() since this won't actually take up any * physical memory. It's a bit of a hack, though! */ p = VirtualAlloc(NULL, *l, MEM_COMMIT, PAGE_READWRITE); VirtualAlloc(NULL, 0x10000, MEM_RESERVE, PAGE_NOACCESS); #elif TARGET == TARGET_NETWARE p = NXPageAlloc(*l / i->page, 0); #endif /* MP_ARRAY_SUPPORT && TARGET */ #if MP_ARRAY_SUPPORT || TARGET == TARGET_UNIX || TARGET == TARGET_NETWARE /* UNIX's sbrk() and Netware's NXPageAlloc() do not zero the allocated * memory, so we do this here for predictable behaviour. This is also the * case if we are using a simulated heap. */ #if MP_MMAP_SUPPORT if ((p != NULL) && (u == 0)) #else /* MP_MMAP_SUPPORT */ if (p != NULL) #endif /* MP_MMAP_SUPPORT */ __mp_memset(p, 0, *l); #endif /* MP_ARRAY_SUPPORT && TARGET */ if (p == NULL) errno = ENOMEM; return p; }
int main(int argc, char **argv) { struct { char x; void *y; } z; char b[256]; char *f, *s, *t; #if MP_GUI_SUPPORT XGCValues g; #endif /* MP_GUI_SUPPORT */ long n; int c, e, h, v; #if MP_GUI_SUPPORT appwidget = XtVaAppInitialize(&appcontext, "MPTrace", options, XtNumber(options), &argc, argv, NULL, NULL); XtVaGetApplicationResources(appwidget, NULL, resources, XtNumber(resources), NULL); #endif /* MP_GUI_SUPPORT */ s = t = NULL; e = h = v = 0; progname = __mp_basename(argv[0]); while ((c = __mp_getopt(argc, argv, __mp_shortopts(b, options_table), options_table)) != EOF) switch (c) { case OF_GUI: #if MP_GUI_SUPPORT usegui = 1; #endif /* MP_GUI_SUPPORT */ break; case OF_HATFFILE: t = __mp_optarg; break; case OF_HELP: h = 1; break; case OF_SIMFILE: s = __mp_optarg; break; case OF_SOURCE: displaysource = 1; break; case OF_VERBOSE: verbose = 1; break; case OF_VERSION: v = 1; break; default: e = 1; break; } argc -= __mp_optindex; argv += __mp_optindex; if (v == 1) { fprintf(stdout, "%s %s\n%s %s\n\n", progname, PROGVERSION, __mp_copyright, __mp_author); fputs("This is free software, and you are welcome to redistribute it " "under certain\n", stdout); fputs("conditions; see the GNU Lesser General Public License for " "details.\n\n", stdout); fputs("For the latest mpatrol release and documentation,\n", stdout); fprintf(stdout, "visit %s.\n\n", __mp_homepage); } if (argc > 1) e = 1; if ((e == 1) || (h == 1)) { fprintf(stdout, "Usage: %s [options] [file]\n\n", progname); if (h == 0) fprintf(stdout, "Type `%s --help' for a complete list of " "options.\n", progname); else __mp_showopts(options_table); if (e == 1) exit(EXIT_FAILURE); exit(EXIT_SUCCESS); } if (argc == 1) f = argv[0]; else f = MP_TRACEFILE; __mp_newtree(&alloctree); if (strcmp(f, "-") == 0) tracefile = stdin; else if ((tracefile = fopen(f, "rb")) == NULL) { fprintf(stderr, "%s: Cannot open file `%s'\n", progname, f); exit(EXIT_FAILURE); } currentevent = 0; bufferpos = buffer; bufferlen = 0; n = (char *) &z.y - &z.x; __mp_newslots(&table, sizeof(void *), __mp_poweroftwo(n)); __mp_initslots(&table, tableslots, sizeof(tableslots)); maxslots = 1; if (s != NULL) { if (strcmp(s, "stdout") == 0) simfile = stdout; else if (strcmp(s, "stderr") == 0) simfile = stderr; else if ((simfile = fopen(s, "w")) == NULL) { fprintf(stderr, "%s: Cannot open file `%s'\n", progname, s); exit(EXIT_FAILURE); } fprintf(simfile, "/* produced by %s %s from %s */\n\n\n", progname, PROGVERSION, f); fputs("#include <stdio.h>\n", simfile); fputs("#include <stdlib.h>\n\n\n", simfile); fputs("typedef struct event\n{\n", simfile); fputs(" unsigned long index;\n", simfile); fputs(" unsigned long size;\n", simfile); fputs(" char resize;\n", simfile); fputs("}\nevent;\n\n\n", simfile); fputs("static event events[] =\n{\n", simfile); } if (t != NULL) { if (strcmp(t, "stdout") == 0) hatffile = stdout; else if (strcmp(t, "stderr") == 0) hatffile = stderr; else if ((hatffile = fopen(t, "w")) == NULL) { fprintf(stderr, "%s: Cannot open file `%s'\n", progname, t); exit(EXIT_FAILURE); } fprintf(hatffile, "## Tracename: %s\n", t); fputs("## Author: Unknown\n", hatffile); fputs("## Date: Unknown\n", hatffile); fputs("## DTDURL: hatf.dtd\n", hatffile); fprintf(hatffile, "## Description: Converted to HATF by %s %s.\n\n", progname, PROGVERSION); } readfile(); #if MP_GUI_SUPPORT if (usegui) { appdisplay = XtDisplay(appwidget); appscreen = XtScreen(appwidget); addrscale = (((addrspace * 1048576) - 1) / (width * height)) + 1; /* Set up the main application window and scrollable drawing area. * Also set up a pixmap to backup the drawing area. */ mainwidget = XtVaCreateManagedWidget("main", xmScrolledWindowWidgetClass, appwidget, XmNwidth, vwidth, XmNheight, vheight, XmNscrollingPolicy, XmAUTOMATIC, XmNscrollBarDisplayPolicy, XmAS_NEEDED, NULL); drawwidget = XtVaCreateManagedWidget("draw", xmDrawingAreaWidgetClass, mainwidget, XmNwidth, width, XmNheight, height, NULL); pixmap = XCreatePixmap(appdisplay, RootWindowOfScreen(appscreen), width, height, DefaultDepthOfScreen(appscreen)); /* Set up the graphics contexts that are used for drawing in different * colours. */ g.foreground = uncol; ungc = XCreateGC(appdisplay, RootWindowOfScreen(appscreen), GCForeground, &g); g.foreground = incol; ingc = XCreateGC(appdisplay, RootWindowOfScreen(appscreen), GCForeground, &g); g.foreground = frcol; frgc = XCreateGC(appdisplay, RootWindowOfScreen(appscreen), GCForeground, &g); g.foreground = alcol; algc = XCreateGC(appdisplay, RootWindowOfScreen(appscreen), GCForeground, &g); /* Add a callback procedure to handle the refreshing of the drawing * area and also a work procedure to read events from the tracing * output file. Then initialise the drawing area and enter the main X * application loop. */ XtAddCallback(drawwidget, XmNexposeCallback, (XtCallbackProc) redrawmemory, NULL); XtAppAddWorkProc(appcontext, (XtWorkProc) readevent, NULL); XtRealizeWidget(appwidget); XFillRectangle(appdisplay, XtWindow(drawwidget), ungc, 0, 0, width - 1, height - 1); XFillRectangle(appdisplay, pixmap, ungc, 0, 0, width - 1, height - 1); XtAppMainLoop(appcontext); } #endif /* MP_GUI_SUPPORT */ return EXIT_SUCCESS; }
MP_GLOBAL allocnode * __mp_getalloc(allochead *h, size_t l, size_t a, void *i) { allocnode *n, *r, *s; heapnode *p; treenode *t; size_t b, m; b = h->oflow << 1; if (l == 0) l = 1; if (a == 0) a = h->heap.memory.align; else if (a > h->heap.memory.page) a = h->heap.memory.page; else a = __mp_poweroftwo(a); /* If all allocations are not pages then we must add more bytes to the * allocation request to account for alignment. */ if (h->flags & FLG_PAGEALLOC) m = 0; else m = a - 1; /* If we have no suitable space for this allocation then we must allocate * memory via the heap manager. */ if ((t = __mp_searchhigher(h->ftree.root, l + b + m)) == NULL) { if ((n = getnode(h)) == NULL) return NULL; /* If all allocations are pages then we must specify that we want our * heap allocation to be page-aligned. */ if (h->flags & FLG_PAGEALLOC) m = h->heap.memory.page; else m = a; if ((p = __mp_heapalloc(&h->heap, __mp_roundup(l + b, h->heap.memory.page), m, 0)) == NULL) { __mp_freeslot(&h->table, n); return NULL; } /* Initialise the free memory. If all allocations are pages then we * prevent any free memory from being both read from and written to. */ if (h->flags & FLG_PAGEALLOC) __mp_memprotect(&h->heap.memory, p->block, p->size, MA_NOACCESS); else __mp_memset(p->block, h->fbyte, p->size); /* Insert the new memory block into the correct position in the * memory block list. This is vital for merging free nodes. */ if ((t = __mp_searchlower(h->atree.root, (unsigned long) p->block)) || (t = __mp_searchlower(h->gtree.root, (unsigned long) p->block))) r = (allocnode *) ((char *) t - offsetof(allocnode, tnode)); else r = (allocnode *) &h->list; while (((s = (allocnode *) r->lnode.next)->lnode.next != NULL) && (s->block < p->block)) r = s; __mp_insert(&h->list, &r->lnode, &n->lnode); __mp_treeinsert(&h->ftree, &n->tnode, p->size); n->block = p->block; n->size = p->size; n->info = NULL; h->fsize += p->size; /* Merge the memory block with any bordering free nodes. This * is also vital to maintain the property that the memory block * list does not ever contain two bordering free nodes. */ n = mergenode(h, n); } else n = (allocnode *) ((char *) t - offsetof(allocnode, tnode)); /* Split the free node as requested. */ return splitnode(h, n, l, a, i); }