void malloc_get_stats(struct malloc_stats *stats) { uint32_t exceptions = malloc_lock(); memcpy(stats, &mstats, sizeof(*stats)); stats->allocated = totalloc; malloc_unlock(exceptions); }
void malloc_reset_stats(void) { unsigned int exceptions = malloc_lock(); mstats.max_allocated = 0; mstats.num_alloc_fail = 0; mstats.biggest_alloc_fail = 0; mstats.biggest_alloc_fail_used = 0; malloc_unlock(exceptions); }
void bmk_memfree(void *cp) { long size; union overhead *op; u_long alignpad; void *origp; if (cp == NULL) return; op = ((union overhead *)cp)-1; if (op->ov_magic != MAGIC) { #ifdef MEMALLOC_TESTING ASSERT(0); #endif return; /* sanity */ } #ifdef RCHECK ASSERT(op->ov_rmagic == RMAGIC); ASSERT(*(u_short *)((char *)(op + 1) + op->ov_size) == RMAGIC); #endif size = op->ov_index; alignpad = op->ov_alignpad; ASSERT(size < NBUCKETS); malloc_lock(); origp = (uint8_t *)cp - alignpad; #ifdef MEMALLOC_TESTING { u_long i; for (i = 0; (uint8_t *)origp + i < (uint8_t *)op; i++) { ASSERT(*((uint8_t *)origp + i) == MAGIC); } } #endif op = (void *)origp; op->ov_next = nextf[(unsigned int)size];/* also clobbers ov_magic */ nextf[(unsigned int)size] = op; #ifdef MSTATS nmalloc[(size_t)size]--; #endif malloc_unlock(); }
void *mdbg_malloc(const char *fname, int lineno, size_t size) { struct mdbg_hdr *hdr; uint32_t exceptions = malloc_lock(); /* * Check struct mdbg_hdr doesn't get bad alignment. * This is required by C standard: the buffer returned from * malloc() should be aligned with a fundamental alignment. * For ARM32, the required alignment is 8. For ARM64, it is 16. */ COMPILE_TIME_ASSERT( (sizeof(struct mdbg_hdr) % (__alignof(uintptr_t) * 2)) == 0); hdr = raw_malloc(sizeof(struct mdbg_hdr), mdbg_get_ftr_size(size), size); if (hdr) { mdbg_update_hdr(hdr, fname, lineno, size); hdr++; } malloc_unlock(exceptions); return hdr; }
void * bmk_memalloc(size_t nbytes, size_t align) { union overhead *op; void *rv; size_t allocbytes; int bucket; unsigned amt; u_long alignpad; malloc_lock(); if (pagesz == 0) { pagesz = PAGE_SIZE; ASSERT(pagesz > 0); #if 0 op = (union overhead *)(void *)sbrk(0); n = n - sizeof (*op) - ((long)op & (n - 1)); if (n < 0) n += pagesz; if (n) { if (sbrk((int)n) == (void *)-1) { malloc_unlock(); return (NULL); } } #endif bucket = 0; amt = 1<<MINSHIFT; while (pagesz > amt) { amt <<= 1; bucket++; } pagebucket = bucket; } if (align & (align-1)) return NULL; if (align < sizeof(void *)) align = sizeof(void *); /* need at least this many bytes plus header to satisfy alignment */ allocbytes = nbytes + ((sizeof(*op) + (align-1)) & ~(align-1)); /* * Convert amount of memory requested into closest block size * stored in hash buckets which satisfies request. * Account for space used per block for accounting. */ if (allocbytes <= pagesz - RSLOP) { #ifndef RCHECK amt = 1<<MINSHIFT; /* size of first bucket */ bucket = 0; #else amt = 1<<(MINSHIFT+1); /* size of first bucket */ bucket = 1; #endif } else { amt = (unsigned)pagesz; bucket = pagebucket; } while (allocbytes > amt) { amt <<= 1; if (amt == 0) return (NULL); bucket++; } /* * If nothing in hash bucket right now, * request more memory from the system. */ if ((op = nextf[bucket]) == NULL) { morecore(bucket); if ((op = nextf[bucket]) == NULL) { malloc_unlock(); return (NULL); } } /* remove from linked list */ nextf[bucket] = op->ov_next; /* align op before returned memory */ rv = (void *)(((uintptr_t)(op+1) + align - 1) & ~(align - 1)); alignpad = (uintptr_t)rv - (uintptr_t)op; #ifdef MEMALLOC_TESTING memset(op, MAGIC, alignpad); #endif op = ((union overhead *)rv)-1; op->ov_magic = MAGIC; op->ov_index = bucket; op->ov_alignpad = alignpad; #ifdef MSTATS nmalloc[bucket]++; #endif malloc_unlock(); #ifdef RCHECK /* * Record allocated size of block and * bound space with magic numbers. */ op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1); op->ov_rmagic = RMAGIC; *(u_short *)((char *)(op + 1) + op->ov_size) = RMAGIC; #endif return rv; }