/* * Allocate new page capable of allocating a contiguous area at least * as big as 'size' byte */ RAW_INLINE LIST *newPage( RAW_U32 size, MACB *macb ) { LIST *top, *end; RAW_U32 nblk; if ( macb->pagesz == 0 ) { return 0; } /* Allocate page */ nblk = toPageCount(size + sizeof(LIST )*2, macb); top = (LIST *)(*macb->getblk)(nblk); if ( top == 0 ) { return 0; } /* Register in area queue */ end = (LIST *)((RAW_S8 *)top + nblk * macb->pagesz) - 1; insertAreaQue(&macb->areaque, end); insertAreaQue(&macb->areaque, top); setAreaFlag(top, AREA_TOP); setAreaFlag(end, AREA_END); return top; }
/* * Get memory */ LOCAL void* imalloc( size_t size, IMACB *imacb ) { QUEUE *q; VP mem; UW imask; /* If it is smaller than the minimum fragment size, allocate the minimum size to it. */ if ( size < MIN_FRAGMENT ) { size = MIN_FRAGMENT; } size = ROUND(size); DI(imask); /* Exclusive control by interrupt disable */ SpinLock(&MemLockObj); /* Search FreeQue */ q = searchFreeArea(size, imacb); if ( q != &imacb->freeque ) { /* There is free area: Split from FreeQue once */ removeFreeQue(q); q = q - 1; } else { /* Reserve new pages because there is no free space */ QUEUE *e; size_t n; /* Reserve pages */ SpinUnlock(&MemLockObj); EI(imask); n = PageCount(size + sizeof(QUEUE) * 2); q = GetSysMemBlk(n, imacb->mematr); if ( q == NULL ) { goto err_ret; /* Insufficient memory */ } DI(imask); SpinLock(&MemLockObj); /* Register on AreaQue */ e = (QUEUE*)((VB*)q + n * pagesz) - 1; insertAreaQue(&imacb->areaque, e); insertAreaQue(&imacb->areaque, q); setAreaFlag(q, AREA_TOP); setAreaFlag(e, AREA_END); } /* Allocate memory */ mem = mem_alloc(q, size, imacb); SpinUnlock(&MemLockObj); EI(imask); return mem; err_ret: BMS_DEBUG_PRINT(("imalloc error\n")); return NULL; }
/* * Get memory block * 'blksz' must be larger than minimum fragment size * and adjusted by ROUNDSZ unit. */ LOCAL void* get_blk( MPLCB *mplcb, INT blksz ) { QUEUE *q, *aq; /* Search FreeQue */ q = searchFreeArea(mplcb, blksz); if ( q == &mplcb->freeque ) { return NULL; } /* remove free area from FreeQue */ removeFreeQue(q); aq = q - 1; /* If there is a fragment smaller than the minimum fragment size, allocate them together */ if ( AreaSize(aq) - (UINT)blksz >= MIN_FRAGMENT + sizeof(QUEUE) ) { /* Divide the area into 2. */ q = (QUEUE*)((VB*)(aq + 1) + blksz); insertAreaQue(aq, q); /* Register the remaining area onto FreeQue */ appendFreeArea(mplcb, q); } setAreaFlag(aq, AREA_USE); return (void*)(aq + 1); }
/* * Memory pool initial setting */ LOCAL void init_mempool( MPLCB *mplcb, void *mempool, INT mempsz ) { QUEUE *tp, *ep; QueInit(&mplcb->areaque); QueInit(&mplcb->freeque); /* Register onto AreaQue */ tp = (QUEUE*)mempool; ep = (QUEUE*)((VB*)mempool + mempsz) - 1; insertAreaQue(&mplcb->areaque, ep); insertAreaQue(&mplcb->areaque, tp); /* Set AREA_USE for locations that must not be free area */ setAreaFlag(&mplcb->areaque, AREA_USE); setAreaFlag(ep, AREA_USE); /* Register onto FreeQue */ appendFreeArea(mplcb, tp); }
/* * Imalloc initial setting */ EXPORT ER knl_init_Imalloc( void ) { /* Low-level memory management information */ IMPORT VP knl_lowmem_top, knl_lowmem_limit; VP memend; QUEUE *top, *end; /* Acquire system configuration definition information */ memend = CFN_REALMEMEND; if ( (UW)memend > (UW)knl_lowmem_limit ) { memend = knl_lowmem_limit; } /* Align top with 8 byte unit alignment */ knl_lowmem_top = (VP)(((UW)knl_lowmem_top + 7) & ~0x00000007UL); knl_imacb = (IMACB*)knl_lowmem_top; knl_lowmem_top = (VP)((UW)knl_lowmem_top + sizeof(IMACB)); top = (QUEUE*)knl_lowmem_top; knl_imacb->memsz = (W)((UW)memend - (UW)knl_lowmem_top - sizeof(QUEUE)*2); knl_lowmem_top = memend; /* Update memory free space */ initIMACB(); /* Register on AreaQue */ end = (QUEUE*)((VB*)top + knl_imacb->memsz) + 1; knl_insertAreaQue(&knl_imacb->areaque, end); knl_insertAreaQue(&knl_imacb->areaque, top); setAreaFlag(end, AREA_USE); setAreaFlag(&knl_imacb->areaque, AREA_USE); knl_appendFreeArea(knl_imacb, top); return E_OK; }
/* * Get memory */ EXPORT void* knl_Imalloc( size_t size ) { QUEUE *q, *aq, *aq2; UINT imask; /* If it is smaller than the minimum fragment size, allocate the minimum size to it. */ if ( size < MIN_FRAGMENT ) { size = MIN_FRAGMENT; } else { size = ROUND(size); } DI(imask); /* Exclusive control by interrupt disable */ /* Search FreeQue */ q = knl_searchFreeArea(knl_imacb, size); if ( q == &(knl_imacb->freeque) ) { q = NULL; /* Insufficient memory */ goto err_ret; } /* There is free area: Split from FreeQue once */ knl_removeFreeQue(q); aq = q - 1; /* If there are fragments smaller than the minimum fragment size, allocate them also */ if ( FreeSize(q) - size >= MIN_FRAGMENT + sizeof(QUEUE) ) { /* Divide area into 2 */ aq2 = (QUEUE*)((VB*)(aq + 1) + size); knl_insertAreaQue(aq, aq2); /* Register remaining area to FreeQue */ knl_appendFreeArea(knl_imacb, aq2); } setAreaFlag(aq, AREA_USE); err_ret: EI(imask); return (VP)q; }
/* * Fragment and allocate */ static void *allocate( LIST *aq, RAW_U32 size, MACB *_macb ) { LIST *q; /* Any fragments smaller than the minimum fragment size will also be allocated together */ if ( (RAW_U32)AreaSize(aq) - size >= MIN_FRAGMENT + sizeof(LIST ) ) { /* Divide area in half */ q = (LIST *)((RAW_S8 *)(aq + 1) + size); insertAreaQue(aq, q); /* Register surplus area in free queue */ appendFreeArea(q, _macb); } setAreaFlag(aq, AREA_USE); return (void*)(aq + 1); }
/* * Subdivide and allocate */ Inline VP mem_alloc( QUEUE *aq, size_t blksz, IMACB *imacb ) { QUEUE *q; /* If there are fragments smaller than the minimum fragment size, allocate them also */ if ( (AreaSize(aq) - blksz) >= (MIN_FRAGMENT + sizeof(QUEUE)) ) { /* Divide area into 2 */ q = (QUEUE*)((VB*)(aq + 1) + blksz); insertAreaQue(aq, q); /* Register remaining area to FreeQue */ appendFreeArea(q, imacb); } setAreaFlag(aq, AREA_USE); return (VP)(aq + 1); }