/* * Free memory * It may be called during interrupt disable. In this case, need to wait * until interrupt is enabled and until free. */ EXPORT void knl_Ifree( void *ptr ) { QUEUE *aq; UINT imask; DI(imask); /* Exclusive control by interrupt disable */ aq = (QUEUE*)ptr - 1; clrAreaFlag(aq, AREA_USE); if ( !chkAreaFlag(aq->next, AREA_USE) ) { /* Merge with free area in after location */ knl_removeFreeQue(aq->next + 1); knl_removeAreaQue(aq->next); } if ( !chkAreaFlag(aq->prev, AREA_USE) ) { /* Merge with free area in front location */ aq = aq->prev; knl_removeFreeQue(aq + 1); knl_removeAreaQue(aq->next); } knl_appendFreeArea(knl_imacb, aq); EI(imask); }
/* * Free memory block */ LOCAL ER rel_blk( MPLCB *mplcb, void *blk ) { QUEUE *aq; aq = (QUEUE*)blk - 1; #if CHK_PAR if ( !chkAreaFlag(aq, AREA_USE) ) { return E_PAR; } #endif clrAreaFlag(aq, AREA_USE); if ( !chkAreaFlag(aq->next, AREA_USE) ) { /* Merge to the next area */ removeFreeQue(aq->next + 1); removeAreaQue(aq->next); } if ( !chkAreaFlag(aq->prev, AREA_USE) ) { /* Merge to the previous area */ aq = aq->prev; removeFreeQue(aq + 1); removeAreaQue(aq->next); } /* Register free area onto FreeQue */ appendFreeArea(mplcb, aq); return E_OK; }
/* * Free memory * It may be called during interrupt disable. In this case, need to wait * until interrupt is enabled and until free. */ LOCAL void ifree( void *ptr, IMACB *imacb ) { QUEUE *aq; UW imask; DI(imask); /* Exclusive control by interrupt disable */ SpinLock(&MemLockObj); aq = (QUEUE*)ptr - 1; clrAreaFlag(aq, AREA_USE); if ( !chkAreaFlag(aq->next, AREA_END|AREA_USE) ) { /* Merge with free area in after location */ removeFreeQue(aq->next + 1); removeAreaQue(aq->next); } if ( !chkAreaFlag(aq, AREA_TOP) && !chkAreaFlag(aq->prev, AREA_USE) ) { /* Merge with free area in front location */ aq = aq->prev; removeFreeQue(aq + 1); removeAreaQue(aq->next); } /* If the whole page is free, then free the page. * However, do not free the page if it is called during * interrupt disabled. */ if ( !isDI(imask) && chkAreaFlag(aq, AREA_TOP) && chkAreaFlag(aq->next, AREA_END) ) { /* Free pages */ removeAreaQue(aq->next); removeAreaQue(aq); SpinUnlock(&MemLockObj); EI(imask); RelSysMemBlk(aq); DI(imask); SpinLock(&MemLockObj); } else { /* Register free area to FreeQue */ appendFreeArea(aq, imacb); } SpinUnlock(&MemLockObj); EI(imask); }
/* * Free memory */ static void _mem_free( void *ptr, MACB *macb ) { LIST *aq; if ( ptr == 0 ) { return; } if ( macb->testmode > 0 ) { if ( !chkalloc(ptr, 0, macb) ) { return; } } aq = (LIST *)ptr - 1; clrAreaFlag(aq, AREA_USE); if ( !chkAreaFlag(aq->next, AREA_END|AREA_USE) ) { /* Merge with just next free area */ removeFreeQue(aq->next + 1); removeAreaQue(aq->next); } if ( !chkAreaFlag(aq, AREA_TOP) && !chkAreaFlag(aq->previous, AREA_USE) ) { /* Merge with just previous free area */ aq = aq->previous; removeFreeQue(aq + 1); removeAreaQue(aq->next); } /* If whole page is empty, then release the page itself */ if ( chkAreaFlag(aq, AREA_TOP) && chkAreaFlag(aq->next, AREA_END) ) { /* Page release */ removeAreaQue(aq->next); removeAreaQue(aq); (*macb->relblk)(aq); } else { /* Register free area in free queue */ appendFreeArea(aq, macb); } }
/* * Checks for errors in memory allocation information. When mode < 0, * dumps the usage status. When ptr != NULL, checks to see that * memory allocation corresponds properly with ptr allocated blocks. * If so, returns True. */ LOCAL BOOL chkalloc( void *ptr, int mode, MACB *macb ) { QUEUE *aq, *nq; size_t usesz = 0, fresz = 0, sz; int usebk = 0, frebk = 0, npage = 0; BOOL newpg, ptr_ok; /* Checks each area in turn */ newpg = TRUE; ptr_ok = ( ptr == NULL )? TRUE: FALSE; for ( aq = macb->areaque.next; aq != &macb->areaque; aq = aq->next ) { if ( newpg && !chkAreaFlag(aq, AREA_TOP) ) { goto err_found; } if ( chkAreaFlag(aq, AREA_END) ) { if ( newpg ) { goto err_found; } newpg = TRUE; fresz += sizeof(QUEUE); npage++; continue; } newpg = FALSE; nq = aq->next; if ( Mask(aq->next) != nq || nq <= aq || Mask(nq->prev) != aq ) { goto err_found; } sz = (size_t)((VB*)nq - (VB*)aq); if ( sz < sizeof(QUEUE)*3 ) { goto err_found; } if ( chkAreaFlag(aq, AREA_USE) ) { usesz += sz; ++usebk; if ( ptr == (void*)(aq+1) ) { ptr_ok = TRUE; } if ( mode < -1 ) { syslog(LOG_NOTICE, "malloc ptr: 0x%08x [%d B]", aq+1, AreaSize(aq)); } } else { fresz += sz; ++frebk; } } if ( !newpg ) { goto err_found; } if ( !ptr_ok ) { syslog(LOG_ERR, "MALLOC: illegal ptr: 0x%08x", ptr); return FALSE; } if ( mode < 0 ) { syslog(LOG_NOTICE, "MALLOC: %d pages, used: %d [%d blks] free: %d [%d blks]", npage, usesz, usebk, fresz, frebk); } return TRUE; err_found: syslog(LOG_ERR, "MALLOC: block corrupted at 0x%08x", aq); return FALSE; }
/* * Checks for errors in memory allocation information. When mode < 0, * dumps the usage status. When ptr != NULL, checks to see that * memory allocation corresponds properly with ptr allocated blocks. * If so, returns True. */ static RAW_U16 chkalloc_test( void *ptr, int mode, MACB *macb ) { LIST *aq, *nq; RAW_U32 usesz = 0, fresz = 0, sz; int usebk = 0, frebk = 0, npage = 0; RAW_U8 newpg, ptr_ok; /* Checks each area in turn */ newpg = 1; ptr_ok = ( ptr == 0 )? 1: 0; for ( aq = macb->areaque.next; aq != &macb->areaque; aq = aq->next ) { if ( newpg && !chkAreaFlag(aq, AREA_TOP) ) { goto err_found; } if ( chkAreaFlag(aq, AREA_END) ) { if ( newpg ) { goto err_found; } newpg = 1; fresz += sizeof(LIST); npage++; continue; } newpg = 0; nq = aq->next; if ( Mask(aq->next) != nq || nq <= aq || Mask(nq->previous) != aq ) { goto err_found; } sz = (RAW_U32)((RAW_S8 *)nq - (RAW_S8 *)aq); if ( sz < sizeof(LIST)*3 ) { goto err_found; } if ( chkAreaFlag(aq, AREA_USE) ) { usesz += sz; ++usebk; if ( ptr == (void*)(aq+1) ) { ptr_ok = 1; } if ( mode < -1 ) { extension_printf("malloc ptr: 0x%08x [%d B]", aq+1, AreaSize(aq)); } } else { fresz += sz; ++frebk; } } if ( !newpg ) { goto err_found; } if ( !ptr_ok ) { extension_printf("MALLOC: illegal ptr: 0x%08x", ptr); return 0; } if ( mode < 0 ) { extension_printf("MALLOC: %d pages, used: %d [%d blks] free: %d [%d blks]", npage, usesz, usebk, fresz, frebk); } return 1; err_found: extension_printf("MALLOC: block corrupted at 0x%08x", aq); return 0; }
/* * Memory allocation size change */ static void *_mem_realloc( void *ptr, RAW_U32 size, MACB *macb ) { LIST *aq; RAW_U32 oldsz, sz; if ( macb->testmode > 0 ) { if ( !chkalloc(ptr, 0, macb) ) { return 0; } } /* If smaller than minimum fragment size, allocate minimum fragment size */ if ( size > 0 && size < MIN_FRAGMENT ) { size = MIN_FRAGMENT; } size = ROUND(size); aq = (LIST *)ptr - 1; if ( ptr != 0 ) { /* Current allocation size */ oldsz = (RAW_U32)AreaSize(aq); /* Merge if next space is free space */ if ( !chkAreaFlag(aq->next, AREA_END|AREA_USE) ) { removeFreeQue(aq->next + 1); removeAreaQue(aq->next); } sz = (RAW_U32)AreaSize(aq); } else { sz = oldsz = 0; } if ( size <= sz ) { if ( size > 0 ) { /* Fragment current area and allocate */ allocate(aq, size, macb); } else { /* Release area */ _mem_free(ptr, macb); ptr = 0; } } else { /* Allocate new area */ void *newptr = _mem_malloc(size, macb); if ( newptr == 0 ) { /* Reallocate original area at original size */ if ( ptr != 0 ) { allocate(aq, oldsz, macb); } return 0; } if ( ptr != 0 ) { /* Copy contents */ raw_memcpy(newptr, ptr, oldsz); /* Release old area */ _mem_free(ptr, macb); } ptr = newptr; } return ptr; }