static void checksubpage(struct pageref *pr) { vaddr_t prpage, fla; struct freelist *fl; int blktype; int nfree=0; assert(curspl>0); if (pr->freelist_offset == INVALID_OFFSET) { assert(pr->nfree==0); return; } prpage = PR_PAGEADDR(pr); blktype = PR_BLOCKTYPE(pr); assert(pr->freelist_offset < PAGE_SIZE); assert(pr->freelist_offset % sizes[blktype] == 0); fla = prpage + pr->freelist_offset; fl = (struct freelist *)fla; for (; fl != NULL; fl = fl->next) { fla = (vaddr_t)fl; assert(fla >= prpage && fla < prpage + PAGE_SIZE); assert((fla-prpage) % sizes[blktype] == 0); assert(fla >= MIPS_KSEG0); assert(fla < MIPS_KSEG1); nfree++; } assert(nfree==pr->nfree); }
static void checksubpage(struct pageref *pr) { vaddr_t prpage, fla; struct freelist *fl; int blktype; int nfree=0; KASSERT(spinlock_do_i_hold(&kmalloc_spinlock)); if (pr->freelist_offset == INVALID_OFFSET) { KASSERT(pr->nfree==0); return; } prpage = PR_PAGEADDR(pr); blktype = PR_BLOCKTYPE(pr); KASSERT(pr->freelist_offset < PAGE_SIZE); KASSERT(pr->freelist_offset % sizes[blktype] == 0); fla = prpage + pr->freelist_offset; fl = (struct freelist *)fla; for (; fl != NULL; fl = fl->next) { fla = (vaddr_t)fl; KASSERT(fla >= prpage && fla < prpage + PAGE_SIZE); KASSERT((fla-prpage) % sizes[blktype] == 0); KASSERT(fla >= MIPS_KSEG0); KASSERT(fla < MIPS_KSEG1); nfree++; } KASSERT(nfree==pr->nfree); }
/* * Print the allocated/freed map of a single kernel heap page. */ static void subpage_stats(struct pageref *pr) { vaddr_t prpage, fla; struct freelist *fl; int blktype; unsigned i, n, index; uint32_t freemap[PAGE_SIZE / (SMALLEST_SUBPAGE_SIZE*32)]; checksubpage(pr); KASSERT(spinlock_do_i_hold(&kmalloc_spinlock)); /* clear freemap[] */ for (i=0; i<ARRAYCOUNT(freemap); i++) { freemap[i] = 0; } prpage = PR_PAGEADDR(pr); blktype = PR_BLOCKTYPE(pr); KASSERT(blktype >= 0 && blktype < NSIZES); /* compute how many bits we need in freemap and assert we fit */ n = PAGE_SIZE / sizes[blktype]; KASSERT(n <= 32 * ARRAYCOUNT(freemap)); if (pr->freelist_offset != INVALID_OFFSET) { fla = prpage + pr->freelist_offset; fl = (struct freelist *)fla; for (; fl != NULL; fl = fl->next) { fla = (vaddr_t)fl; index = (fla-prpage) / sizes[blktype]; KASSERT(index<n); freemap[index/32] |= (1<<(index%32)); } } kprintf("at 0x%08lx: size %-4lu %u/%u free\n", (unsigned long)prpage, (unsigned long) sizes[blktype], (unsigned) pr->nfree, n); kprintf(" "); for (i=0; i<n; i++) { int val = (freemap[i/32] & (1<<(i%32)))!=0; kprintf("%c", val ? '.' : '*'); if (i%64==63 && i<n-1) { kprintf("\n "); } } kprintf("\n"); }
static void dumpsubpage(struct pageref *pr) { vaddr_t prpage, fla; struct freelist *fl; int blktype; unsigned i, n, index; u_int32_t freemap[PAGE_SIZE / (SMALLEST_SUBPAGE_SIZE*32)]; checksubpage(pr); assert(curspl>0); /* clear freemap[] */ for (i=0; i<sizeof(freemap)/sizeof(freemap[0]); i++) { freemap[i] = 0; } prpage = PR_PAGEADDR(pr); blktype = PR_BLOCKTYPE(pr); /* compute how many bits we need in freemap and assert we fit */ n = PAGE_SIZE / sizes[blktype]; assert(n <= 32*sizeof(freemap)/sizeof(freemap[0])); if (pr->freelist_offset != INVALID_OFFSET) { fla = prpage + pr->freelist_offset; fl = (struct freelist *)fla; for (; fl != NULL; fl = fl->next) { fla = (vaddr_t)fl; index = (fla-prpage) / sizes[blktype]; assert(index<n); freemap[index/32] |= (1<<(index%32)); } } kprintf("at 0x%08lx: size %-4lu %u/%u free\n", (unsigned long)prpage, (unsigned long) sizes[blktype], (unsigned) pr->nfree, n); kprintf(" "); for (i=0; i<n; i++) { int val = (freemap[i/32] & (1<<(i%32)))!=0; kprintf("%c", val ? '.' : '*'); if (i%64==63 && i<n-1) { kprintf("\n "); } } kprintf("\n"); }
static void dump_subpage(struct pageref *pr, unsigned generation) { unsigned blocksize = sizes[PR_BLOCKTYPE(pr)]; unsigned numblocks = PAGE_SIZE / blocksize; unsigned numfreewords = DIVROUNDUP(numblocks, 32); uint32_t isfree[numfreewords], mask; vaddr_t prpage; struct freelist *fl; vaddr_t blockaddr; struct malloclabel *ml; unsigned i; for (i=0; i<numfreewords; i++) { isfree[i] = 0; } prpage = PR_PAGEADDR(pr); fl = (struct freelist *)(prpage + pr->freelist_offset); for (; fl != NULL; fl = fl->next) { i = ((vaddr_t)fl - prpage) / blocksize; mask = 1U << (i % 32); isfree[i / 32] |= mask; } for (i=0; i<numblocks; i++) { mask = 1U << (i % 32); if (isfree[i / 32] & mask) { continue; } blockaddr = prpage + i * blocksize; ml = (struct malloclabel *)blockaddr; if (ml->generation != generation) { continue; } kprintf("%5zu bytes at %p, allocated at %p\n", blocksize, (void *)blockaddr, (void *)ml->label); } }
static int subpage_kfree(void *ptr) { int blktype; // index into sizes[] that we're using vaddr_t ptraddr; // same as ptr struct pageref *pr; // pageref for page we're freeing in vaddr_t prpage; // PR_PAGEADDR(pr) vaddr_t fla; // free list entry address struct freelist *fl; // free list entry vaddr_t offset; // offset into page ptraddr = (vaddr_t)ptr; spinlock_acquire(&kmalloc_spinlock); checksubpages(); for (pr = allbase; pr; pr = pr->next_all) { prpage = PR_PAGEADDR(pr); blktype = PR_BLOCKTYPE(pr); /* check for corruption */ KASSERT(blktype>=0 && blktype<NSIZES); checksubpage(pr); if (ptraddr >= prpage && ptraddr < prpage + PAGE_SIZE) { break; } } if (pr==NULL) { /* Not on any of our pages - not a subpage allocation */ spinlock_release(&kmalloc_spinlock); return -1; } offset = ptraddr - prpage; /* Check for proper positioning and alignment */ if (offset >= PAGE_SIZE || offset % sizes[blktype] != 0) { panic("kfree: subpage free of invalid addr %p\n", ptr); } /* * Clear the block to 0xdeadbeef to make it easier to detect * uses of dangling pointers. */ fill_deadbeef(ptr, sizes[blktype]); /* * We probably ought to check for free twice by seeing if the block * is already on the free list. But that's expensive, so we don't. */ fla = prpage + offset; fl = (struct freelist *)fla; if (pr->freelist_offset == INVALID_OFFSET) { fl->next = NULL; } else { fl->next = (struct freelist *)(prpage + pr->freelist_offset); } pr->freelist_offset = offset; pr->nfree++; KASSERT(pr->nfree <= PAGE_SIZE / sizes[blktype]); if (pr->nfree == PAGE_SIZE / sizes[blktype]) { /* Whole page is free. */ remove_lists(pr, blktype); freepageref(pr); /* Call free_kpages without kmalloc_spinlock. */ spinlock_release(&kmalloc_spinlock); free_kpages(prpage); } else { spinlock_release(&kmalloc_spinlock); } #ifdef SLOWER /* Don't get the lock unless checksubpages does something. */ spinlock_acquire(&kmalloc_spinlock); checksubpages(); spinlock_release(&kmalloc_spinlock); #endif return 0; }
static void * subpage_kmalloc(size_t sz) { unsigned blktype; // index into sizes[] that we're using struct pageref *pr; // pageref for page we're allocating from vaddr_t prpage; // PR_PAGEADDR(pr) vaddr_t fla; // free list entry address struct freelist *volatile fl; // free list entry void *retptr; // our result volatile int i; blktype = blocktype(sz); sz = sizes[blktype]; spinlock_acquire(&kmalloc_spinlock); checksubpages(); for (pr = sizebases[blktype]; pr != NULL; pr = pr->next_samesize) { /* check for corruption */ KASSERT(PR_BLOCKTYPE(pr) == blktype); checksubpage(pr); if (pr->nfree > 0) { doalloc: /* comes here after getting a whole fresh page */ KASSERT(pr->freelist_offset < PAGE_SIZE); prpage = PR_PAGEADDR(pr); fla = prpage + pr->freelist_offset; fl = (struct freelist *)fla; retptr = fl; fl = fl->next; pr->nfree--; if (fl != NULL) { KASSERT(pr->nfree > 0); fla = (vaddr_t)fl; KASSERT(fla - prpage < PAGE_SIZE); pr->freelist_offset = fla - prpage; } else { KASSERT(pr->nfree == 0); pr->freelist_offset = INVALID_OFFSET; } checksubpages(); spinlock_release(&kmalloc_spinlock); return retptr; } } /* * No page of the right size available. * Make a new one. * * We release the spinlock while calling alloc_kpages. This * avoids deadlock if alloc_kpages needs to come back here. * Note that this means things can change behind our back... */ spinlock_release(&kmalloc_spinlock); prpage = alloc_kpages(1); if (prpage==0) { /* Out of memory. */ kprintf("kmalloc: Subpage allocator couldn't get a page\n"); return NULL; } spinlock_acquire(&kmalloc_spinlock); pr = allocpageref(); if (pr==NULL) { /* Couldn't allocate accounting space for the new page. */ spinlock_release(&kmalloc_spinlock); free_kpages(prpage); kprintf("kmalloc: Subpage allocator couldn't get pageref\n"); return NULL; } pr->pageaddr_and_blocktype = MKPAB(prpage, blktype); pr->nfree = PAGE_SIZE / sizes[blktype]; /* * Note: fl is volatile because the MIPS toolchain we were * using in spring 2001 attempted to optimize this loop and * blew it. Making fl volatile inhibits the optimization. */ fla = prpage; fl = (struct freelist *)fla; fl->next = NULL; for (i=1; i<pr->nfree; i++) { fl = (struct freelist *)(fla + i*sizes[blktype]); fl->next = (struct freelist *)(fla + (i-1)*sizes[blktype]); KASSERT(fl != fl->next); } fla = (vaddr_t) fl; pr->freelist_offset = fla - prpage; KASSERT(pr->freelist_offset == (pr->nfree-1)*sizes[blktype]); pr->next_samesize = sizebases[blktype]; sizebases[blktype] = pr; pr->next_all = allbase; allbase = pr; /* This is kind of cheesy, but avoids duplicating the alloc code. */ goto doalloc; }
/* * Check that a particular heap page (the one managed by the argument * PR) is valid. * * This checks: * - that the page is within MIPS_KSEG0 (for mips) * - that the freelist starting point in PR is valid * - that the number of free blocks is consistent with the freelist * - that each freelist next pointer points within the page * - that no freelist pointer points to the middle of a block * - that free blocks are still deadbeefed (if CHECKBEEF) * - that the freelist is not circular * - that the guard bands are intact on all allocated blocks (if * CHECKGUARDS) * * Note that if CHECKGUARDS is set, a circular freelist will cause an * assertion as a bit in isfree is set twice; if not, a circular * freelist will cause an infinite loop. */ static void checksubpage(struct pageref *pr) { vaddr_t prpage, fla; struct freelist *fl; int blktype; int nfree=0; size_t blocksize; #ifdef CHECKGUARDS const unsigned maxblocks = PAGE_SIZE / SMALLEST_SUBPAGE_SIZE; const unsigned numfreewords = DIVROUNDUP(maxblocks, 32); uint32_t isfree[numfreewords], mask; unsigned numblocks, blocknum, i; size_t smallerblocksize; #endif KASSERT(spinlock_do_i_hold(&kmalloc_spinlock)); if (pr->freelist_offset == INVALID_OFFSET) { KASSERT(pr->nfree==0); return; } prpage = PR_PAGEADDR(pr); blktype = PR_BLOCKTYPE(pr); KASSERT(blktype >= 0 && blktype < NSIZES); blocksize = sizes[blktype]; #ifdef CHECKGUARDS smallerblocksize = blktype > 0 ? sizes[blktype - 1] : 0; for (i=0; i<numfreewords; i++) { isfree[i] = 0; } #endif #ifdef __mips__ KASSERT(prpage >= MIPS_KSEG0); KASSERT(prpage < MIPS_KSEG1); #endif KASSERT(pr->freelist_offset < PAGE_SIZE); KASSERT(pr->freelist_offset % blocksize == 0); fla = prpage + pr->freelist_offset; fl = (struct freelist *)fla; for (; fl != NULL; fl = fl->next) { fla = (vaddr_t)fl; KASSERT(fla >= prpage && fla < prpage + PAGE_SIZE); KASSERT((fla-prpage) % blocksize == 0); #ifdef CHECKBEEF checkdeadbeef(fl, blocksize); #endif #ifdef CHECKGUARDS blocknum = (fla-prpage) / blocksize; mask = 1U << (blocknum % 32); KASSERT((isfree[blocknum / 32] & mask) == 0); isfree[blocknum / 32] |= mask; #endif KASSERT(fl->next != fl); nfree++; } KASSERT(nfree==pr->nfree); #ifdef CHECKGUARDS numblocks = PAGE_SIZE / blocksize; for (i=0; i<numblocks; i++) { mask = 1U << (i % 32); if ((isfree[i / 32] & mask) == 0) { checkguardband(prpage + i * blocksize, smallerblocksize, blocksize); } } #endif }
/* * Free a pointer previously returned from subpage_kmalloc. If the * pointer is not on any heap page we recognize, return -1. */ static int subpage_kfree(void *ptr) { int blktype; // index into sizes[] that we're using vaddr_t ptraddr; // same as ptr struct pageref *pr; // pageref for page we're freeing in vaddr_t prpage; // PR_PAGEADDR(pr) vaddr_t fla; // free list entry address struct freelist *fl; // free list entry vaddr_t offset; // offset into page #ifdef GUARDS size_t blocksize, smallerblocksize; #endif ptraddr = (vaddr_t)ptr; #ifdef GUARDS if (ptraddr % PAGE_SIZE == 0) { /* * With guard bands, all client-facing subpage * pointers are offset by GUARD_PTROFFSET (which is 4) * from the underlying blocks and are therefore not * page-aligned. So a page-aligned pointer is not one * of ours. Catch this up front, as otherwise * subtracting GUARD_PTROFFSET could give a pointer on * a page we *do* own, and then we'll panic because * it's not a valid one. */ return -1; } ptraddr -= GUARD_PTROFFSET; #endif #ifdef LABELS if (ptraddr % PAGE_SIZE == 0) { /* ditto */ return -1; } ptraddr -= LABEL_PTROFFSET; #endif spinlock_acquire(&kmalloc_spinlock); checksubpages(); for (pr = allbase; pr; pr = pr->next_all) { prpage = PR_PAGEADDR(pr); blktype = PR_BLOCKTYPE(pr); KASSERT(blktype >= 0 && blktype < NSIZES); /* check for corruption */ KASSERT(blktype>=0 && blktype<NSIZES); checksubpage(pr); if (ptraddr >= prpage && ptraddr < prpage + PAGE_SIZE) { break; } } if (pr==NULL) { /* Not on any of our pages - not a subpage allocation */ spinlock_release(&kmalloc_spinlock); return -1; } offset = ptraddr - prpage; /* Check for proper positioning and alignment */ if (offset >= PAGE_SIZE || offset % sizes[blktype] != 0) { panic("kfree: subpage free of invalid addr %p\n", ptr); } #ifdef GUARDS blocksize = sizes[blktype]; smallerblocksize = blktype > 0 ? sizes[blktype - 1] : 0; checkguardband(ptraddr, smallerblocksize, blocksize); #endif /* * Clear the block to 0xdeadbeef to make it easier to detect * uses of dangling pointers. */ fill_deadbeef((void *)ptraddr, sizes[blktype]); /* * We probably ought to check for free twice by seeing if the block * is already on the free list. But that's expensive, so we don't. */ fla = prpage + offset; fl = (struct freelist *)fla; if (pr->freelist_offset == INVALID_OFFSET) { fl->next = NULL; } else { fl->next = (struct freelist *)(prpage + pr->freelist_offset); /* this block should not already be on the free list! */ #ifdef SLOW { struct freelist *fl2; for (fl2 = fl->next; fl2 != NULL; fl2 = fl2->next) { KASSERT(fl2 != fl); } } #else /* check just the head */ KASSERT(fl != fl->next); #endif } pr->freelist_offset = offset; pr->nfree++; KASSERT(pr->nfree <= PAGE_SIZE / sizes[blktype]); if (pr->nfree == PAGE_SIZE / sizes[blktype]) { /* Whole page is free. */ remove_lists(pr, blktype); freepageref(pr); /* Call free_kpages without kmalloc_spinlock. */ spinlock_release(&kmalloc_spinlock); free_kpages(prpage); } else { spinlock_release(&kmalloc_spinlock); } #ifdef SLOWER /* Don't get the lock unless checksubpages does something. */ spinlock_acquire(&kmalloc_spinlock); checksubpages(); spinlock_release(&kmalloc_spinlock); #endif return 0; }