void * malloc(size_t size) { void *retval = NULL; //debugf("malloc(%lu): ", size); if (sizeof(struct slab_header) != PAGE_SIZE) { koops("slab_header is %lu bytes", sizeof(struct slab_header)); } if (int_nest_count > 0) { koops("malloc called in interrupt handler"); } if (atomic_fetch_add(&malloc_lock, 1) != 0) { koops("(malloc)malloc_lock != 0"); } if (size > MAX_SLAB_SIZE) { size_t pages = (sizeof(uint32_t) + size + PAGE_MASK) / PAGE_SIZE; struct malloc_region *result = alloc_pages(pages); result->region_size = (pages * PAGE_SIZE) - sizeof(struct malloc_region); debugf("Wanted %lu got %u\n", size, result->region_size); retval = result->data; } else { int slab_idx = map_size_to_idx(size); struct slab_header *slab = slabs[slab_idx]; validate_is_slab(slab); uint64_t allocation_mask = bitmap_mask(slab_idx); uint64_t free_bits = slab->allocation_bm[0] ^ allocation_mask; int freebit = __builtin_ffsl(free_bits); if (unlikely(freebit == 0)) { slab = add_new_slab(slab_idx); debugf(" got new slab @ %p ", slab); free_bits = slab->allocation_bm[0] ^ allocation_mask; freebit = __builtin_ffsl(free_bits); if(unlikely(freebit == 0)) { koops("new slab for idx:%d has filled up [%"PRIu64 "/%"PRIu64" /%"PRIX64 "]!", slab_idx, slab->malloc_cnt, slab->free_cnt, slab->allocation_bm[0]); } } freebit--; size_t offset = freebit * slab_info[slab_idx].slab_size; retval = &slab->data[offset]; uint64_t free_mask = (uint64_t)1 << freebit; slab->allocation_bm[0] |= free_mask; slab->malloc_cnt++; update_checksum(slab); debugf("malloc(%lu)=%p slab=%p offset=%lx [%"PRIu64 "/%"PRIu64"]\n", size, retval, slab, offset, slab->malloc_cnt, slab->free_cnt); } if (atomic_fetch_sub(&malloc_lock, 1) != 1) { koops("(malloc)malloc_lock != 1"); } return retval; }
// Debugging for now, wouldnt work normally as text could be there for other reasons static void validate_is_slab(struct slab_header *slab) { if (strcmp(slab->signature, "MALLOC")) { koops("slab @ %p is not a slab!", slab); } if (compute_checksum(slab) != slab->checksum) { koops("slab @ %p has invalid checksum!", slab); } }
size_t fwrite(const void *ptr, size_t size, size_t nmemb, void *stream) { debugf("fwrite(\"%s\", %lu, %lu, %p)", ptr, size, nmemb, stream); if (stream != stderr && stream != stdout) { koops("fwrite stream = %p", stream); } size_t len; if (__builtin_umull_overflow(size, nmemb, &len)) { koops("fwrite size too large (%lu,%lu)", size, nmemb); } print_string_len(ptr, len); return len; }
void funlockfile(void *stream) { if (stream != stderr && stream != stdout) { koops("funlockfile stream = %p", stream); } }
void flockfile(void *stream) { debugf("flockfile(%p)\n", stream); if (stream != stderr && stream != stdout) { koops("flockfile stream = %p", stream); } }
int fputc(int ch, void *stream) { debugf("putc('%c', %p)\n", ch, stream); if (stream != stderr && stream != stdout) { koops("putc stream = %p", stream); } print_char(ch); return ch; }
int fputs(const char *s, void *stream) { debugf("fputs(\"%s\",%p)\n", s, stream); if (stream != stderr && stream != stdout) { koops("fputs stream = %p", stream); } print_string(s); return 0; }
// Doesnt currently work if the page being freed came from alloc_pages() size_t malloc_usable_size(void *ptr) { if (atomic_fetch_add(&malloc_lock, 1) != 0) { koops("(usable_size)malloc_lock != 0"); } if (int_nest_count > 0) { koops("malloc called in interrupt handler"); } debugf("%s(%p)=", __func__, ptr); uint64_t p = (uint64_t)ptr; struct slab_header *slab = (struct slab_header *)(p & ~PAGE_MASK); size_t retval = slab->slab_size; debugf("%lu\n", retval); if (atomic_fetch_sub(&malloc_lock, 1) != 1) { koops("(usable_size)malloc_lock != 1"); } return retval; }
ssize_t write(int fd, const void *buf, size_t nbyte) { debugf("write(fd=%d, buf=%p nbyte=%lu)\n", fd, buf, nbyte); if (fd == 1 || fd == 2) { print_string_len(buf, nbyte); } else { koops("write() with fd = %d\n", fd); } return nbyte; }
static inline int map_size_to_idx(size_t size) { // Could convert to map the highest bit set in the size if (size <= 32) return 0; if (size <= 64) return 1; if (size <= 192) return 2; if (size <= 448) return 3; if (size <= 1008) return 4; if (size <= 2016) return 5; if (size <= 4032) return 6; koops("map_size_to_idx: bad size %lu\n", size); }
int fprintf(void *stream, const char *format, ...) { if (stream != stderr && stream != stdout) { koops("fprintf stream = %p", stream); } va_list argp; va_start(argp, format); int len = kvprintf(format, argp); va_end(argp); return len; }
void free(void *ptr) { debugf("free(%p)=", ptr); if (unlikely(ptr == NULL)) { return; } if (int_nest_count > 0) { koops("malloc called in interrupt handler"); } if (atomic_fetch_add(&malloc_lock, 1) != 0) { koops("(free)malloc_lock != 0"); } uint64_t p = (uint64_t)ptr; struct slab_header *slab = (struct slab_header *)(p & ~PAGE_MASK); if (!region_is_slab(slab)) { size_t pages = (slab->slab_size + sizeof(struct malloc_region)) / PAGE_SIZE; free_pages(slab, pages); } else { validate_is_slab(slab); debugf("slab=%p ", slab); debugf("cs=%"PRIx64 "\n", slab->checksum); debugf("size=%u ", slab->slab_size); size_t offset = (ptr - (void *)slab); debugf("offset=%"PRIu64, offset); if (unlikely(offset < 64)) { koops("free(%p) offset = %lu", ptr, offset); } if (unlikely((offset - 64) % slab->slab_size)) { koops("free(%p) is not on a valid boundary for slab size of %u (%lx)", ptr, slab->slab_size, offset - 64); } int bit_idx = (offset-64) / slab->slab_size; uint64_t bitmap_mask = (uint64_t)1 << bit_idx; debugf(" bit_idx = %d mask=%"PRIx64, bit_idx, bitmap_mask); if (likely(slab->allocation_bm[0] & bitmap_mask)) { slab->allocation_bm[0] &= ~bitmap_mask; slab->free_cnt++; debugf(" alloc_bm = %"PRIx64 " freecnt=%"PRIu64 " ", slab->allocation_bm[0], slab->free_cnt); } else { koops("%p is not allocated, alloc=%"PRIx64 " mask = %"PRIx64, ptr, slab->allocation_bm[0], bitmap_mask); } memset(ptr, 0xAA, slab->slab_size); update_checksum(slab); debugf("cs=%"PRIx64 "\n", slab->checksum); } if (atomic_fetch_sub(&malloc_lock, 1) != 1) { koops("(free)malloc_lock != 1"); } }
void abort() { koops("abort() called"); }