/** Internal function to free a pool entry. Caller must have the memory lock held @param Buffer The allocated pool entry to free @retval EFI_INVALID_PARAMETER Buffer not valid @retval EFI_SUCCESS Buffer successfully freed. **/ EFI_STATUS CoreFreePoolI ( IN VOID *Buffer ) { POOL *Pool; POOL_HEAD *Head; POOL_TAIL *Tail; POOL_FREE *Free; UINTN Index; UINTN NoPages; UINTN Size; CHAR8 *NewPage; UINTN FSize; UINTN Offset; BOOLEAN AllFree; ASSERT(Buffer != NULL); // // Get the head & tail of the pool entry // Head = CR (Buffer, POOL_HEAD, Data, POOL_HEAD_SIGNATURE); ASSERT(Head != NULL); if (Head->Signature != POOL_HEAD_SIGNATURE) { return EFI_INVALID_PARAMETER; } Tail = HEAD_TO_TAIL (Head); ASSERT(Tail != NULL); // // Debug // ASSERT (Tail->Signature == POOL_TAIL_SIGNATURE); ASSERT (Head->Size == Tail->Size); ASSERT_LOCKED (&gMemoryLock); if (Tail->Signature != POOL_TAIL_SIGNATURE) { return EFI_INVALID_PARAMETER; } if (Head->Size != Tail->Size) { return EFI_INVALID_PARAMETER; } // // Determine the pool type and account for it // Size = Head->Size; Pool = LookupPoolHead (Head->Type); if (Pool == NULL) { return EFI_INVALID_PARAMETER; } Pool->Used -= Size; DEBUG ((DEBUG_POOL, "FreePool: %p (len %lx) %,ld\n", Head->Data, (UINT64)(Head->Size - POOL_OVERHEAD), (UINT64) Pool->Used)); // // Determine the pool list // Index = SIZE_TO_LIST(Size); DEBUG_CLEAR_MEMORY (Head, Size); // // If it's not on the list, it must be pool pages // if (Index >= MAX_POOL_LIST) { // // Return the memory pages back to free memory // NoPages = EFI_SIZE_TO_PAGES(Size) + EFI_SIZE_TO_PAGES (DEFAULT_PAGE_ALLOCATION) - 1; NoPages &= ~(UINTN)(EFI_SIZE_TO_PAGES (DEFAULT_PAGE_ALLOCATION) - 1); CoreFreePoolPages ((EFI_PHYSICAL_ADDRESS) (UINTN) Head, NoPages); } else { // // Put the pool entry onto the free pool list // Free = (POOL_FREE *) Head; ASSERT(Free != NULL); Free->Signature = POOL_FREE_SIGNATURE; Free->Index = (UINT32)Index; InsertHeadList (&Pool->FreeList[Index], &Free->Link); // // See if all the pool entries in the same page as Free are freed pool // entries // NewPage = (CHAR8 *)((UINTN)Free & ~((DEFAULT_PAGE_ALLOCATION) -1)); Free = (POOL_FREE *) &NewPage[0]; ASSERT(Free != NULL); if (Free->Signature == POOL_FREE_SIGNATURE) { Index = Free->Index; AllFree = TRUE; Offset = 0; while ((Offset < DEFAULT_PAGE_ALLOCATION) && (AllFree)) { FSize = LIST_TO_SIZE(Index); while (Offset + FSize <= DEFAULT_PAGE_ALLOCATION) { Free = (POOL_FREE *) &NewPage[Offset]; ASSERT(Free != NULL); if (Free->Signature != POOL_FREE_SIGNATURE) { AllFree = FALSE; } Offset += FSize; } Index -= 1; } if (AllFree) { // // All of the pool entries in the same page as Free are free pool // entries // Remove all of these pool entries from the free loop lists. // Free = (POOL_FREE *) &NewPage[0]; ASSERT(Free != NULL); Index = Free->Index; Offset = 0; while (Offset < DEFAULT_PAGE_ALLOCATION) { FSize = LIST_TO_SIZE(Index); while (Offset + FSize <= DEFAULT_PAGE_ALLOCATION) { Free = (POOL_FREE *) &NewPage[Offset]; ASSERT(Free != NULL); RemoveEntryList (&Free->Link); Offset += FSize; } Index -= 1; } // // Free the page // CoreFreePoolPages ((EFI_PHYSICAL_ADDRESS) (UINTN)NewPage, EFI_SIZE_TO_PAGES (DEFAULT_PAGE_ALLOCATION)); } } } // // If this is an OS specific memory type, then check to see if the last // portion of that memory type has been freed. If it has, then free the // list entry for that memory type // if ((INT32)Pool->MemoryType < 0 && Pool->Used == 0) { RemoveEntryList (&Pool->Link); CoreFreePoolI (Pool); } return EFI_SUCCESS; }
/** Internal function to allocate pool of a particular type. Caller must have the memory lock held @param PoolType Type of pool to allocate @param Size The amount of pool to allocate @param NeedGuard Flag to indicate Guard page is needed or not @return The allocate pool, or NULL **/ VOID * CoreAllocatePoolI ( IN EFI_MEMORY_TYPE PoolType, IN UINTN Size, IN BOOLEAN NeedGuard ) { POOL *Pool; POOL_FREE *Free; POOL_HEAD *Head; POOL_TAIL *Tail; CHAR8 *NewPage; VOID *Buffer; UINTN Index; UINTN FSize; UINTN Offset, MaxOffset; UINTN NoPages; UINTN Granularity; BOOLEAN HasPoolTail; ASSERT_LOCKED (&mPoolMemoryLock); if (PoolType == EfiACPIReclaimMemory || PoolType == EfiACPIMemoryNVS || PoolType == EfiRuntimeServicesCode || PoolType == EfiRuntimeServicesData) { Granularity = RUNTIME_PAGE_ALLOCATION_GRANULARITY; } else { Granularity = DEFAULT_PAGE_ALLOCATION_GRANULARITY; } // // Adjust the size by the pool header & tail overhead // HasPoolTail = !(NeedGuard && ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) == 0)); // // Adjusting the Size to be of proper alignment so that // we don't get an unaligned access fault later when // pool_Tail is being initialized // Size = ALIGN_VARIABLE (Size); Size += POOL_OVERHEAD; Index = SIZE_TO_LIST(Size); Pool = LookupPoolHead (PoolType); if (Pool== NULL) { return NULL; } Head = NULL; // // If allocation is over max size, just allocate pages for the request // (slow) // if (Index >= SIZE_TO_LIST (Granularity) || NeedGuard) { if (!HasPoolTail) { Size -= sizeof (POOL_TAIL); } NoPages = EFI_SIZE_TO_PAGES (Size) + EFI_SIZE_TO_PAGES (Granularity) - 1; NoPages &= ~(UINTN)(EFI_SIZE_TO_PAGES (Granularity) - 1); Head = CoreAllocatePoolPagesI (PoolType, NoPages, Granularity, NeedGuard); if (NeedGuard) { Head = AdjustPoolHeadA ((EFI_PHYSICAL_ADDRESS)(UINTN)Head, NoPages, Size); } goto Done; } // // If there's no free pool in the proper list size, go get some more pages // if (IsListEmpty (&Pool->FreeList[Index])) { Offset = LIST_TO_SIZE (Index); MaxOffset = Granularity; // // Check the bins holding larger blocks, and carve one up if needed // while (++Index < SIZE_TO_LIST (Granularity)) { if (!IsListEmpty (&Pool->FreeList[Index])) { Free = CR (Pool->FreeList[Index].ForwardLink, POOL_FREE, Link, POOL_FREE_SIGNATURE); RemoveEntryList (&Free->Link); NewPage = (VOID *) Free; MaxOffset = LIST_TO_SIZE (Index); goto Carve; } } // // Get another page // NewPage = CoreAllocatePoolPagesI (PoolType, EFI_SIZE_TO_PAGES (Granularity), Granularity, NeedGuard); if (NewPage == NULL) { goto Done; } // // Serve the allocation request from the head of the allocated block // Carve: Head = (POOL_HEAD *) NewPage; // // Carve up remaining space into free pool blocks // Index--; while (Offset < MaxOffset) { ASSERT (Index < MAX_POOL_LIST); FSize = LIST_TO_SIZE(Index); while (Offset + FSize <= MaxOffset) { Free = (POOL_FREE *) &NewPage[Offset]; Free->Signature = POOL_FREE_SIGNATURE; Free->Index = (UINT32)Index; InsertHeadList (&Pool->FreeList[Index], &Free->Link); Offset += FSize; } Index -= 1; } ASSERT (Offset == MaxOffset); goto Done; } // // Remove entry from free pool list // Free = CR (Pool->FreeList[Index].ForwardLink, POOL_FREE, Link, POOL_FREE_SIGNATURE); RemoveEntryList (&Free->Link); Head = (POOL_HEAD *) Free; Done: Buffer = NULL; if (Head != NULL) { // // Account the allocation // Pool->Used += Size; // // If we have a pool buffer, fill in the header & tail info // Head->Signature = POOL_HEAD_SIGNATURE; Head->Size = Size; Head->Type = (EFI_MEMORY_TYPE) PoolType; Buffer = Head->Data; if (HasPoolTail) { Tail = HEAD_TO_TAIL (Head); Tail->Signature = POOL_TAIL_SIGNATURE; Tail->Size = Size; Size -= POOL_OVERHEAD; } else { Size -= SIZE_OF_POOL_HEAD; } DEBUG_CLEAR_MEMORY (Buffer, Size); DEBUG (( DEBUG_POOL, "AllocatePoolI: Type %x, Addr %p (len %lx) %,ld\n", PoolType, Buffer, (UINT64)Size, (UINT64) Pool->Used )); } else { DEBUG ((DEBUG_ERROR | DEBUG_POOL, "AllocatePool: failed to allocate %ld bytes\n", (UINT64) Size)); } return Buffer; }
/** Internal function to allocate pool of a particular type. Caller must have the memory lock held @param PoolType Type of pool to allocate @param Size The amount of pool to allocate @return The allocate pool, or NULL **/ VOID * CoreAllocatePoolI ( IN EFI_MEMORY_TYPE PoolType, IN UINTN Size ) { POOL *Pool; POOL_FREE *Free; POOL_HEAD *Head; POOL_TAIL *Tail; CHAR8 *NewPage; VOID *Buffer; UINTN Index; UINTN FSize; UINTN Offset; UINTN NoPages; ASSERT_LOCKED (&gMemoryLock); // // Adjust the size by the pool header & tail overhead // // // Adjusting the Size to be of proper alignment so that // we don't get an unaligned access fault later when // pool_Tail is being initialized // Size = ALIGN_VARIABLE (Size); Size += POOL_OVERHEAD; Index = SIZE_TO_LIST(Size); Pool = LookupPoolHead (PoolType); if (Pool== NULL) { return NULL; } Head = NULL; // // If allocation is over max size, just allocate pages for the request // (slow) // if (Index >= MAX_POOL_LIST) { NoPages = EFI_SIZE_TO_PAGES(Size) + EFI_SIZE_TO_PAGES (DEFAULT_PAGE_ALLOCATION) - 1; NoPages &= ~(UINTN)(EFI_SIZE_TO_PAGES (DEFAULT_PAGE_ALLOCATION) - 1); Head = CoreAllocatePoolPages (PoolType, NoPages, DEFAULT_PAGE_ALLOCATION); goto Done; } // // If there's no free pool in the proper list size, go get some more pages // if (IsListEmpty (&Pool->FreeList[Index])) { // // Get another page // NewPage = CoreAllocatePoolPages(PoolType, EFI_SIZE_TO_PAGES (DEFAULT_PAGE_ALLOCATION), DEFAULT_PAGE_ALLOCATION); if (NewPage == NULL) { goto Done; } // // Carve up new page into free pool blocks // Offset = 0; while (Offset < DEFAULT_PAGE_ALLOCATION) { ASSERT (Index < MAX_POOL_LIST); FSize = LIST_TO_SIZE(Index); while (Offset + FSize <= DEFAULT_PAGE_ALLOCATION) { Free = (POOL_FREE *) &NewPage[Offset]; Free->Signature = POOL_FREE_SIGNATURE; Free->Index = (UINT32)Index; InsertHeadList (&Pool->FreeList[Index], &Free->Link); Offset += FSize; } Index -= 1; } ASSERT (Offset == DEFAULT_PAGE_ALLOCATION); Index = SIZE_TO_LIST(Size); } // // Remove entry from free pool list // Free = CR (Pool->FreeList[Index].ForwardLink, POOL_FREE, Link, POOL_FREE_SIGNATURE); RemoveEntryList (&Free->Link); Head = (POOL_HEAD *) Free; Done: Buffer = NULL; if (Head != NULL) { // // If we have a pool buffer, fill in the header & tail info // Head->Signature = POOL_HEAD_SIGNATURE; Head->Size = Size; Head->Type = (EFI_MEMORY_TYPE) PoolType; Tail = HEAD_TO_TAIL (Head); Tail->Signature = POOL_TAIL_SIGNATURE; Tail->Size = Size; Buffer = Head->Data; DEBUG_CLEAR_MEMORY (Buffer, Size - POOL_OVERHEAD); DEBUG (( DEBUG_POOL, "AllocatePoolI: Type %x, Addr %p (len %lx) %,ld\n", PoolType, Buffer, (UINT64)(Size - POOL_OVERHEAD), (UINT64) Pool->Used )); // // Account the allocation // Pool->Used += Size; } else { DEBUG ((DEBUG_ERROR | DEBUG_POOL, "AllocatePool: failed to allocate %ld bytes\n", (UINT64) Size)); } return Buffer; }