/* * Remove a set of items. Return 0 if successful. */ int ejsRemoveRangeOfItems(EjsList *lp, int start, int end) { void **items; int i, count, capacity; mprAssert(lp); mprAssert(lp->length > 0); mprAssert(start > end); if (start < 0 || start >= lp->length) { return MPR_ERR_NOT_FOUND; } if (end < 0 || end >= lp->length) { return MPR_ERR_NOT_FOUND; } if (start > end) { return MPR_ERR_BAD_ARGS; } /* * Copy down to coejsess */ items = lp->items; count = end - start; for (i = start; i < (lp->length - count); i++) { items[i] = items[i + count]; } lp->length -= count; capacity = CAPACITY(lp->items); for (i = lp->length; i < capacity; i++) { items[i] = 0; } return 0; }
/* * Insert an item to the list at a specified position. We insert before "index". */ int ejsInsertItemAtPos(MprCtx ctx, EjsList *lp, int index, cvoid *item) { void **items; int i; mprAssert(lp); mprAssert(lp->length >= 0); if (lp->length >= CAPACITY(lp->items)) { if (growList(ctx, lp, 1) < 0) { return MPR_ERR_TOO_MANY; } } /* * Copy up items to make room to insert */ items = lp->items; for (i = lp->length; i > index; i--) { items[i] = items[i - 1]; } lp->items[index] = (void*) item; lp->length++; return index; }
int ejsGetListCapacity(EjsList *lp) { mprAssert(lp); if (lp == 0) { return 0; } return CAPACITY(lp->items); }
extern symbol * add_s_to_b(symbol * p, const char * s) { int n = strlen(s); int k; if (p == 0) p = create_b(n); k = SIZE(p); { int x = k + n - CAPACITY(p); if (x > 0) p = increase_capacity(p, x); } { int i; for (i = 0; i < n; i++) p[i + k] = s[i]; } SIZE(p) += n; return p; }
int ejsCopyList(MprCtx ctx, EjsList *dest, EjsList *src) { void *item; int next, capacity; ejsClearList(dest); capacity = CAPACITY(src->items); if (ejsSetListLimits(ctx, dest, capacity, src->maxSize) < 0) { return MPR_ERR_NO_MEMORY; } for (next = 0; (item = ejsGetNextItem(src, &next)) != 0; ) { if (ejsAddItem(ctx, dest, item) < 0) { return MPR_ERR_NO_MEMORY; } } return 0; }
/* * Add an item to the list and return the item index. */ int ejsAddItem(MprCtx ctx, EjsList *lp, cvoid *item) { int index, capacity; mprAssert(lp); mprAssert(lp->length >= 0); capacity = CAPACITY(lp->items); mprAssert(capacity >= 0); if (lp->items == 0 || lp->length >= capacity) { if (growList(ctx, lp, 1) < 0) { return MPR_ERR_TOO_MANY; } } index = lp->length++; lp->items[index] = (void*) item; return index; }
/* * Grow the list by the requried increment */ static int growList(MprCtx ctx, EjsList *lp, int incr) { int len, memsize, capacity; /* * Need to grow the list */ capacity = CAPACITY(lp->items); mprAssert(capacity >= 0); if (capacity >= lp->maxSize) { if (lp->maxSize == 0) { lp->maxSize = INT_MAX; } else { mprAssert(capacity < lp->maxSize); return MPR_ERR_TOO_MANY; } } /* * If growing by 1, then use the default increment which exponentially grows. * Otherwise, assume the caller knows exactly how the list needs to grow. */ if (incr <= 1) { len = MPR_LIST_INCR + capacity + capacity; } else { len = capacity + incr; } memsize = (int) (len * sizeof(void*)); /* * Grow the list of items. Use the existing context for lp->items if it already exists. Otherwise use the list as the * memory context owner. */ lp->items = (void**) mprRealloc(ctx, lp->items, memsize); /* * Zero the new portion (required for no-compact lists) */ memset(&lp->items[capacity], 0, sizeof(void*) * (len - capacity)); return 0; }
/* * Change the item in the list at index. Return the old item. */ void *ejsSetItem(MprCtx ctx, EjsList *lp, int index, cvoid *item) { void *old; mprAssert(lp); mprAssert(lp->length >= 0); if (index >= lp->length) { lp->length = index + 1; } capacity = CAPACITY(lp->items); if (lp->length > capacity) { if (growList(ctx, lp, lp->length - capacity) < 0) { return 0; } } old = lp->items[index]; lp->items[index] = (void*) item; return old; }
extern symbol * add_to_b(symbol * p, int n, symbol * q) { int x = SIZE(p) + n - CAPACITY(p); if (x > 0) p = increase_capacity(p, x); memmove(p + SIZE(p), q, n * sizeof(symbol)); SIZE(p) += n; return p; }
extern symbol * increase_capacity(symbol * p, int n) { symbol * q = create_b(CAPACITY(p) + n + EXTENDER); memmove(q, p, CAPACITY(p) * sizeof(symbol)); SIZE(q) = SIZE(p); lose_b(p); return q; }
extern symbol * create_b(int n) { symbol * p = (symbol *) (HEAD + (char *) MALLOC(HEAD + (n + 1) * sizeof(symbol))); CAPACITY(p) = n; SIZE(p) = 0; return p; }
extern symbol * move_to_b(symbol * p, int n, const symbol * q) { int x = n - CAPACITY(p); if (x > 0) p = increase_capacity(p, x); memmove(p, q, n * sizeof(symbol)); SIZE(p) = n; return p; }
/* Report on the $Memories currently in use. * Can be called either in mallfuncs or signal context, * or from the library destructor. */ static void report(void) { static unsigned nreports; static int previous; struct tm tm; struct timeval now; char buf[64]; char const *prg; int saved_errno; FILE *saved_stderr; struct ero_st *mem; saved_errno = errno; saved_stderr = stderr; /* Construct the output file name. Get it right even after a fork(). */ if (!(prg = strrchr(program_invocation_short_name, '/'))) prg = program_invocation_short_name; else prg++; snprintf(buf, sizeof(buf), "%s.%u.leaks", prg, getpid()); /* bt1() will only log onto stderr. */ if (!(stderr = fopen(buf, "a"))) goto out; /* Overall statistics */ if (!nreports) { localtime_r(&Profiling_since.tv_sec, &tm); fprintf(stderr, "started profiling on:\t" "%.2u:%.2u:%.2u.%.6lu %.2u/%.2u/%.2u\n", tm.tm_hour, tm.tm_min, tm.tm_sec, Profiling_since.tv_usec, tm.tm_mday, 1+tm.tm_mon, tm.tm_year % 100); } gettimeofday(&now, NULL); localtime_r(&now.tv_sec, &tm); fprintf(stderr, "report %u created on:\t" "%.2u:%.2u:%.2u.%.6lu %.2u/%.2u/%.2u\n", ++nreports, tm.tm_hour, tm.tm_min, tm.tm_sec, now.tv_usec, tm.tm_mday, 1+tm.tm_mon, tm.tm_year % 100); fprintf(stderr, "number of allocations:\t" "%u (currently %u)\n", NAllocations, NMemories); fprintf(stderr, "current allocation:\t" "%d (delta=%+d bytes)\n", Allocated, Allocated-previous); fprintf(stderr, "peak allocation:\t" "%d (%d bytes since the start of period)\n", Peak, Peak-previous); fputs("\n", stderr); NAllocations = 0; Peak = previous = Allocated; if (Summary_only) goto done; /* Dump all $Memories. * It makes little sense to sort without backtraces. */ if (Backtrace_depth) Memories = sort(Memories, NMemories); for (mem = Memories; mem; mem = mem->next) { unsigned karmas; struct ero_st const *prev; struct backtrace_st const *bt; /* Chain up identical call sites. */ karmas = 0; prev = NULL; for (;;) { #ifdef _THREAD_SAFE fprintf(stderr, "ptr=%p (tid=%u), size=%zu, karma=%u\n", mem->ptr, mem->tid, mem->size, mem->karma++); #else fprintf(stderr, "ptr=%p, size=%zu, karma=%u\n", mem->ptr, mem->size, mem->karma++); #endif /* Count with how many different karmas have we seen * the same backtrace. */ if (!prev || prev->karma != mem->karma) karmas++; /* Is the next backtrace the same as $mem's? */ if (!mem->next || compare_backtraces(mem->backtrace, mem->next->backtrace)) break; prev = mem; mem = mem->next; } /* for */ /* Dump the backtrace. */ if (karmas >= Karma_min_depth) { unsigned i, o; for (i = 1, o = 0, bt = mem->backtrace; bt && bt->addrs[o]; i++) { bt0(i, bt->addrs[o++], NULL); if (o >= CAPACITY(bt->addrs)) { bt = bt->next; o = 0; } } } /* if */ } /* for */ done: fputs("-------------------------------------------------" "--------------------------\n", stderr); fclose(stderr); out: stderr = saved_stderr; errno = saved_errno; } /* report */
/* Add $ptr to the records. Called in mallfuncs context. */ static void *garbage(void *ptr, size_t size, int intracall) { struct ero_st *mem; unsigned i, top, bottom; if (!ptr) /* malloc() failed, don't record. */ return NULL; /* Update the counters whether we can make a record or not. */ NAllocations++; Allocated += size; if (Peak < Allocated) Peak = Allocated; /* We are permitted to clobber errno because our caller * is going to return with success. */ if (!Ero_pool && !(Ero_pool = new_ero_pool())) return ptr; mem = Ero_pool; Ero_pool = Ero_pool->next; mem->next = Memories; Memories = mem; NMemories++; mem->ptr = ptr; mem->size = size; mem->karma = 0; IF_THREAD_SAFE(mem->tid = gettid()); mem->backtrace = NULL; if (!Backtrace_depth) goto skip_backtrace; if (!Backtraces && !(Backtraces = new_backtraces())) goto skip_backtrace; mem->backtrace = Backtraces; /* We're called through fun() -> malloc() -> garbage(), * ignore the top two frames. The bottom two frames * are below main(), ignore them too. */ top = 2; if (intracall) /* An accountant function called another hook, ignore that too. */ top++; #ifndef CONFIG_FAST_UNWIND /* Try getting the backtrace until $addrs is large enough. * Start with a large buffer to get away with as few retries * as possible. */ bottom = 2; for (i = Backtrace_depth > 0 ? top+Backtrace_depth : 100; ; i += 100) { unsigned depth; void *addrs[i]; struct backtrace_st *next; if ((depth = backtrace(addrs, i)) >= i && Backtrace_depth < 0) /* $addrs was too small. */ continue; if (depth > top) { /* Ignore $top frames. */ depth -= top; if (top+depth < i && depth > bottom) /* If we got the full backtrace also ignore * the $bottom frames. */ depth -= bottom; } else /* Don't ignore anyhing. */ top = 0; /* Add the backtrace to $mem. If #depth is larger than the CAPACITY * of backtrace_st chain up more. */ for (;;) { int more; unsigned n; n = CAPACITY(Backtraces->addrs); more = n < depth; if (n > depth) n = depth; memcpy(Backtraces->addrs, &addrs[top], sizeof(addrs[0]) * n); if (!more) { /* NULL-pad the unused $addrs, so compare_backtraces() won't * tell apart identical backtraces because of garbage. */ memset(&Backtraces->addrs[n], 0, sizeof(Backtraces->addrs[0]) * (CAPACITY(Backtraces->addrs)-n)); break; } if (!Backtraces->next && !(Backtraces->next = new_backtraces())) /* The bottom of the backtrace will be lost. */ break; Backtraces = Backtraces->next; top += n; depth -= n; } /* for */ next = Backtraces->next; Backtraces->next = NULL; Backtraces = next; break; } /* for */ #else /* CONFIG_FAST_UNWIND */ /* We need to be able to store at least $top addresses. * If not, ignoring top/bottom frames may break. */ ASSERT(CAPACITY(Backtraces->addrs) >= 3); /* arf leaves less junk at the bottom than backtrace(). */ bottom = 1; do { unsigned depth; void const *sseg; void const *const *fp, *lr; struct backtrace_st *prev, *next; /* We don't know beforehand the depth of the backtrace, * so store the addresses in $Backtraces as we unwind * the stack iteratively. */ prev = NULL; sseg = NULL; fp = __builtin_frame_address(0); for (i = depth = 0; ; i++, depth++) { if (!(fp = getlr(fp, &lr, &sseg)) || (Backtrace_depth > 0 && depth >= Backtrace_depth)) { /* End of backtrace, ignore the bottom frames if we can. */ if (!fp && !top && depth > bottom) { if (i <= bottom) { /* The addresses in $Backtraces are all ignored, * discard the whole page. */ Backtraces = prev; i += CAPACITY(Backtraces->addrs) - bottom; } else i -= bottom; } /* Zero out the unused slots (which can be 0). */ memset(&Backtraces->addrs[i], 0, sizeof(Backtraces->addrs[0]) * (CAPACITY(Backtraces->addrs)-i)); break; } if (depth == top) /* Time to ignore $top. */ i = depth = top = 0; else if (i >= CAPACITY(Backtraces->addrs)) { /* $Backtraces is full, get a new page. */ if (!Backtraces->next && !(Backtraces->next = new_backtraces())) break; prev = Backtraces; Backtraces = Backtraces->next; i = 0; } Backtraces->addrs[i] = lr; } /* for */ /* NULL-terminate $mem->backtaces and dequeue the tail * from $Backtraces. */ next = Backtraces->next; Backtraces->next = NULL; Backtraces = next; } while (0); #endif /* CONFIG_FAST_UNWIND */ skip_backtrace: return ptr; } /* garbage */