/** @file UEFI Memory pool management functions. Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.
SPDX-License-Identifier: BSD-2-Clause-Patent **/ #include #include #include #include #include #include "InternalPool.h" STATIC EFI_LOCK mPoolMemoryLock = EFI_INITIALIZE_LOCK_VARIABLE (TPL_NOTIFY); #define POOL_FREE_SIGNATURE SIGNATURE_32('p','f','r','0') typedef struct { UINT32 Signature; UINT32 Index; LIST_ENTRY Link; } POOL_FREE; #define POOL_HEAD_SIGNATURE SIGNATURE_32('p','h','d','0') #define POOLPAGE_HEAD_SIGNATURE SIGNATURE_32('p','h','d','1') typedef struct { UINT32 Signature; UINT32 Reserved; EFI_MEMORY_TYPE Type; UINTN Size; CHAR8 Data[1]; } POOL_HEAD; #define SIZE_OF_POOL_HEAD OFFSET_OF(POOL_HEAD,Data) #define POOL_TAIL_SIGNATURE SIGNATURE_32('p','t','a','l') typedef struct { UINT32 Signature; UINT32 Reserved; UINTN Size; } POOL_TAIL; #define POOL_OVERHEAD (SIZE_OF_POOL_HEAD + sizeof(POOL_TAIL)) #define HEAD_TO_TAIL(a) \ ((POOL_TAIL *) (((CHAR8 *) (a)) + (a)->Size - sizeof(POOL_TAIL))); // // Each element is the sum of the 2 previous ones: this allows us to migrate // blocks between bins by splitting them up, while not wasting too much memory // as we would in a strict power-of-2 sequence // STATIC CONST UINT16 mPoolSizeTable[] = { 128, 256, 384, 640, 1024, 1664, 2688, 4352, 7040, 11392, 18432, 29824 }; #define SIZE_TO_LIST(a) (GetPoolIndexFromSize (a)) #define LIST_TO_SIZE(a) (mPoolSizeTable [a]) #define MAX_POOL_LIST (ARRAY_SIZE (mPoolSizeTable)) #define MAX_POOL_SIZE (MAX_ADDRESS - POOL_OVERHEAD) // // Globals // #define POOL_SIGNATURE SIGNATURE_32('p','l','s','t') typedef struct { INTN Signature; UINTN Used; EFI_MEMORY_TYPE MemoryType; LIST_ENTRY FreeList[MAX_POOL_LIST]; LIST_ENTRY Link; } POOL; // // Pool header for each memory type. // POOL mPoolHead[MAX_MEMORY_TYPE]; STATIC_ASSERT (MAX_MEMORY_TYPE > EfiMaxMemoryType, "PcdMaxMemoryType must be greater than EfiMaxMemoryType"); STATIC BOOLEAN mIsCore; // // List of pool header to search for the appropriate memory type. // LIST_ENTRY mPoolHeadList = INITIALIZE_LIST_HEAD_VARIABLE (mPoolHeadList); /** Adjust the pool head position to make sure the Guard page is adjavent to pool tail or pool head. @param[in] Memory Base address of memory allocated. @param[in] NoPages Number of pages actually allocated. @param[in] Size Size of memory requested. (plus pool head/tail overhead) @return Address of pool head. **/ STATIC VOID * AdjustPoolHeadA ( IN EFI_PHYSICAL_ADDRESS Memory, IN UINTN NoPages, IN UINTN Size ) { if ((Memory == 0) || ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0)) { // // Pool head is put near the head Guard // return (VOID *)(UINTN)Memory; } // // Pool head is put near the tail Guard // Size = ALIGN_VALUE (Size, 8); return (VOID *)(UINTN)(Memory + EFI_PAGES_TO_SIZE (NoPages) - Size); } /** Get the page base address according to pool head address. @param[in] Memory Head address of pool to free. @param[in] NoPages Number of pages actually allocated. @param[in] Size Size of memory requested. (plus pool head/tail overhead) @return Address of pool head. **/ STATIC VOID * AdjustPoolHeadF ( IN EFI_PHYSICAL_ADDRESS Memory, IN UINTN NoPages, IN UINTN Size ) { if ((Memory == 0) || ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0)) { // // Pool head is put near the head Guard // return (VOID *)(UINTN)Memory; } // // Pool head is put near the tail Guard. We need to exactly undo the addition done in AdjustPoolHeadA // because we may not have allocated the pool head on the first allocated page, since we are aligned to // the tail and on some architectures, the runtime page allocation granularity is > one page. So we allocate // more pages than we need and put the pool head somewhere past the first page. // return (VOID *)(UINTN)(Memory + Size - EFI_PAGES_TO_SIZE (NoPages)); } /** Get pool size table index from the specified size. @param Size The specified size to get index from pool table. @return The index of pool size table. **/ STATIC UINTN GetPoolIndexFromSize ( UINTN Size ) { UINTN Index; for (Index = 0; Index < MAX_POOL_LIST; Index++) { if (mPoolSizeTable[Index] >= Size) { return Index; } } return MAX_POOL_LIST; } /** Called to initialize the pool. **/ VOID CoreInitializePool ( IN BOOLEAN IsCore ) { UINTN Type; UINTN Index; mIsCore = IsCore; for (Type = 0; Type < EfiMaxMemoryType; Type++) { mPoolHead[Type].Signature = 0; mPoolHead[Type].Used = 0; mPoolHead[Type].MemoryType = (EFI_MEMORY_TYPE)Type; for (Index = 0; Index < MAX_POOL_LIST; Index++) { InitializeListHead (&mPoolHead[Type].FreeList[Index]); } } } /** Look up pool head for specified memory type. @param MemoryType Memory type of which pool head is looked for @return Pointer of Corresponding pool head. **/ STATIC POOL * LookupPoolHead ( IN EFI_MEMORY_TYPE MemoryType ) { LIST_ENTRY *Link; POOL *Pool; UINTN Index; if (!mIsCore) { if ((UINT32)MemoryType < MAX_MEMORY_TYPE) { return &mPoolHead[MemoryType]; } return NULL; } if ((UINT32)MemoryType < EfiMaxMemoryType) { return &mPoolHead[MemoryType]; } // // MemoryType values in the range 0x80000000..0xFFFFFFFF are reserved for use by UEFI // OS loaders that are provided by operating system vendors. // MemoryType values in the range 0x70000000..0x7FFFFFFF are reserved for OEM use. // if ((UINT32)MemoryType >= MEMORY_TYPE_OEM_RESERVED_MIN) { for (Link = mPoolHeadList.ForwardLink; Link != &mPoolHeadList; Link = Link->ForwardLink) { Pool = CR (Link, POOL, Link, POOL_SIGNATURE); if (Pool->MemoryType == MemoryType) { return Pool; } } Pool = CoreAllocatePoolI (EfiBootServicesData, sizeof (POOL), FALSE); if (Pool == NULL) { return NULL; } Pool->Signature = POOL_SIGNATURE; Pool->Used = 0; Pool->MemoryType = MemoryType; for (Index = 0; Index < MAX_POOL_LIST; Index++) { InitializeListHead (&Pool->FreeList[Index]); } InsertHeadList (&mPoolHeadList, &Pool->Link); return Pool; } return NULL; } /** Allocate pool of a particular type. @param PoolType Type of pool to allocate @param Size The amount of pool to allocate @param Buffer The address to return a pointer to the allocated pool @retval EFI_INVALID_PARAMETER Buffer is NULL. PoolType is in the range EfiMaxMemoryType..0x6FFFFFFF. PoolType is EfiPersistentMemory. @retval EFI_OUT_OF_RESOURCES Size exceeds max pool size or allocation failed. @retval EFI_SUCCESS Pool successfully allocated. **/ EFI_STATUS EFIAPI CoreInternalAllocatePool ( IN EFI_MEMORY_TYPE PoolType, IN UINTN Size, OUT VOID **Buffer ) { EFI_STATUS Status; BOOLEAN NeedGuard; // // If it's not a valid type, fail it // if (mIsCore && (((PoolType >= EfiMaxMemoryType) && (PoolType < MEMORY_TYPE_OEM_RESERVED_MIN)) || (PoolType == EfiConventionalMemory) || (PoolType == EfiPersistentMemory) || (PoolType == EfiUnacceptedMemoryType))) { return EFI_INVALID_PARAMETER; } if (Buffer == NULL) { return EFI_INVALID_PARAMETER; } *Buffer = NULL; // // If size is too large, fail it // Base on the EFI spec, return status of EFI_OUT_OF_RESOURCES // if (Size > MAX_POOL_SIZE) { return EFI_OUT_OF_RESOURCES; } NeedGuard = IsPoolTypeToGuard (PoolType) && !mOnGuarding; // // Acquire the memory lock and make the allocation // Status = EfiAcquireLockOrFail (&mPoolMemoryLock); if (EFI_ERROR (Status)) { return EFI_OUT_OF_RESOURCES; } *Buffer = CoreAllocatePoolI (PoolType, Size, NeedGuard); EfiReleaseLock (&mPoolMemoryLock); return (*Buffer != NULL) ? EFI_SUCCESS : EFI_OUT_OF_RESOURCES; } /** Allocate pool of a particular type. @param PoolType Type of pool to allocate @param Size The amount of pool to allocate @param Buffer The address to return a pointer to the allocated pool @retval EFI_INVALID_PARAMETER Buffer is NULL. PoolType is in the range EfiMaxMemoryType..0x6FFFFFFF. PoolType is EfiPersistentMemory. @retval EFI_OUT_OF_RESOURCES Size exceeds max pool size or allocation failed. @retval EFI_SUCCESS Pool successfully allocated. **/ EFI_STATUS EFIAPI CoreAllocatePool ( IN EFI_MEMORY_TYPE PoolType, IN UINTN Size, OUT VOID **Buffer ) { EFI_STATUS Status; Status = CoreInternalAllocatePool (PoolType, Size, Buffer); if (!EFI_ERROR (Status)) { CoreUpdateProfile ( (EFI_PHYSICAL_ADDRESS)(UINTN)RETURN_ADDRESS (0), MemoryProfileActionAllocatePool, PoolType, Size, *Buffer, NULL ); InstallMemoryAttributesTableOnMemoryAllocation (PoolType); } return Status; } /** Internal function to allocate pool of a particular type. Caller must have the memory lock held @param PoolType Type of pool to allocate @param Size The amount of pool to allocate @param NeedGuard Flag to indicate Guard page is needed or not @return The allocate pool, or NULL **/ VOID * CoreAllocatePoolI ( IN EFI_MEMORY_TYPE PoolType, IN UINTN Size, IN BOOLEAN NeedGuard ) { POOL *Pool; POOL_FREE *Free; POOL_HEAD *Head; POOL_TAIL *Tail; CHAR8 *NewPage; VOID *Buffer; UINTN Index; UINTN FSize; UINTN Offset, MaxOffset; UINTN NoPages; UINTN Granularity; BOOLEAN HasPoolTail; BOOLEAN PageAsPool; ASSERT_LOCKED (&mPoolMemoryLock); if (mIsCore && ((PoolType == EfiReservedMemoryType) || (PoolType == EfiACPIMemoryNVS) || (PoolType == EfiRuntimeServicesCode) || (PoolType == EfiRuntimeServicesData))) { Granularity = RUNTIME_PAGE_ALLOCATION_GRANULARITY; } else { Granularity = DEFAULT_PAGE_ALLOCATION_GRANULARITY; } // // The heap guard system does not support non-EFI_PAGE_SIZE alignments. // Architectures that require larger RUNTIME_PAGE_ALLOCATION_GRANULARITY // will have the runtime memory regions unguarded. OSes do not // map guard pages anyway, so this is a minimal loss. Not guarding prevents // alignment mismatches // if (Granularity != EFI_PAGE_SIZE) { NeedGuard = FALSE; } // // Adjust the size by the pool header & tail overhead // HasPoolTail = !(NeedGuard && ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) == 0)); PageAsPool = (IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED) && !mOnGuarding); // // Adjusting the Size to be of proper alignment so that // we don't get an unaligned access fault later when // pool_Tail is being initialized // Size = ALIGN_VARIABLE (Size); Size += POOL_OVERHEAD; Index = SIZE_TO_LIST (Size); Pool = LookupPoolHead (PoolType); if (Pool == NULL) { return NULL; } Head = NULL; // // If allocation is over max size, just allocate pages for the request // (slow) // if ((Index >= SIZE_TO_LIST (Granularity)) || NeedGuard || PageAsPool) { if (!HasPoolTail) { Size -= sizeof (POOL_TAIL); } NoPages = EFI_SIZE_TO_PAGES (Size) + EFI_SIZE_TO_PAGES (Granularity) - 1; NoPages &= ~(UINTN)(EFI_SIZE_TO_PAGES (Granularity) - 1); Head = CoreAllocatePoolPagesI (PoolType, NoPages, Granularity, NeedGuard); if (NeedGuard) { Head = AdjustPoolHeadA ((EFI_PHYSICAL_ADDRESS)(UINTN)Head, NoPages, Size); } goto Done; } // // If there's no free pool in the proper list size, go get some more pages // if (IsListEmpty (&Pool->FreeList[Index])) { Offset = LIST_TO_SIZE (Index); MaxOffset = Granularity; // // Check the bins holding larger blocks, and carve one up if needed // while (++Index < SIZE_TO_LIST (Granularity)) { if (!IsListEmpty (&Pool->FreeList[Index])) { Free = CR (Pool->FreeList[Index].ForwardLink, POOL_FREE, Link, POOL_FREE_SIGNATURE); RemoveEntryList (&Free->Link); NewPage = (VOID *)Free; MaxOffset = LIST_TO_SIZE (Index); goto Carve; } } // // Get another page // NewPage = CoreAllocatePoolPagesI ( PoolType, EFI_SIZE_TO_PAGES (Granularity), Granularity, NeedGuard ); if (NewPage == NULL) { goto Done; } // // Serve the allocation request from the head of the allocated block // Carve: Head = (POOL_HEAD *)NewPage; // // Carve up remaining space into free pool blocks // Index--; while (Offset < MaxOffset) { ASSERT (Index < MAX_POOL_LIST); FSize = LIST_TO_SIZE (Index); while (Offset + FSize <= MaxOffset) { Free = (POOL_FREE *)&NewPage[Offset]; Free->Signature = POOL_FREE_SIGNATURE; Free->Index = (UINT32)Index; InsertHeadList (&Pool->FreeList[Index], &Free->Link); Offset += FSize; } Index -= 1; } ASSERT (Offset == MaxOffset); goto Done; } // // Remove entry from free pool list // Free = CR (Pool->FreeList[Index].ForwardLink, POOL_FREE, Link, POOL_FREE_SIGNATURE); RemoveEntryList (&Free->Link); Head = (POOL_HEAD *)Free; Done: Buffer = NULL; if (Head != NULL) { // // Account the allocation // Pool->Used += Size; // // If we have a pool buffer, fill in the header & tail info // Head->Signature = (PageAsPool) ? POOLPAGE_HEAD_SIGNATURE : POOL_HEAD_SIGNATURE; Head->Size = Size; Head->Type = (EFI_MEMORY_TYPE)PoolType; Buffer = Head->Data; if (HasPoolTail) { Tail = HEAD_TO_TAIL (Head); Tail->Signature = POOL_TAIL_SIGNATURE; Tail->Size = Size; Size -= POOL_OVERHEAD; } else { Size -= SIZE_OF_POOL_HEAD; } DEBUG_CLEAR_MEMORY (Buffer, Size); DEBUG (( DEBUG_POOL, "AllocatePoolI: Type %x, Addr %p (len %lx) %,ld\n", PoolType, Buffer, (UINT64)Size, (UINT64)Pool->Used )); } else { DEBUG ((DEBUG_ERROR | DEBUG_POOL, "AllocatePool: failed to allocate %ld bytes\n", (UINT64)Size)); } return Buffer; } /** Frees pool. @param Buffer The allocated pool entry to free @param PoolType Pointer to pool type @retval EFI_INVALID_PARAMETER Buffer is not a valid value. @retval EFI_SUCCESS Pool successfully freed. **/ EFI_STATUS EFIAPI CoreInternalFreePool ( IN VOID *Buffer, OUT EFI_MEMORY_TYPE *PoolType OPTIONAL ) { EFI_STATUS Status; if (Buffer == NULL) { return EFI_INVALID_PARAMETER; } EfiAcquireLock (&mPoolMemoryLock); Status = CoreFreePoolI (Buffer, PoolType); EfiReleaseLock (&mPoolMemoryLock); return Status; } /** Frees pool. @param Buffer The allocated pool entry to free @retval EFI_INVALID_PARAMETER Buffer is not a valid value. @retval EFI_SUCCESS Pool successfully freed. **/ EFI_STATUS EFIAPI CoreFreePool ( IN VOID *Buffer ) { EFI_STATUS Status; EFI_MEMORY_TYPE PoolType; Status = CoreInternalFreePool (Buffer, &PoolType); if (!EFI_ERROR (Status)) { CoreUpdateProfile ( (EFI_PHYSICAL_ADDRESS)(UINTN)RETURN_ADDRESS (0), MemoryProfileActionFreePool, PoolType, 0, Buffer, NULL ); InstallMemoryAttributesTableOnMemoryAllocation (PoolType); } return Status; } /** Internal function to free a pool entry. Caller must have the memory lock held @param Buffer The allocated pool entry to free @param PoolType Pointer to pool type @retval EFI_INVALID_PARAMETER Buffer not valid @retval EFI_SUCCESS Buffer successfully freed. **/ EFI_STATUS CoreFreePoolI ( IN VOID *Buffer, OUT EFI_MEMORY_TYPE *PoolType OPTIONAL ) { POOL *Pool; POOL_HEAD *Head; POOL_TAIL *Tail; POOL_FREE *Free; UINTN Index; UINTN NoPages; UINTN Size; CHAR8 *NewPage; UINTN Offset; BOOLEAN AllFree; UINTN Granularity; BOOLEAN IsGuarded; BOOLEAN HasPoolTail; BOOLEAN PageAsPool; ASSERT (Buffer != NULL); // // Get the head & tail of the pool entry // Head = BASE_CR (Buffer, POOL_HEAD, Data); ASSERT (Head != NULL); if ((Head->Signature != POOL_HEAD_SIGNATURE) && (Head->Signature != POOLPAGE_HEAD_SIGNATURE)) { ASSERT ( Head->Signature == POOL_HEAD_SIGNATURE || Head->Signature == POOLPAGE_HEAD_SIGNATURE ); return EFI_INVALID_PARAMETER; } IsGuarded = IsPoolTypeToGuard (Head->Type) && IsMemoryGuarded ((EFI_PHYSICAL_ADDRESS)(UINTN)Head); HasPoolTail = !(IsGuarded && ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) == 0)); PageAsPool = (Head->Signature == POOLPAGE_HEAD_SIGNATURE); if (HasPoolTail) { Tail = HEAD_TO_TAIL (Head); ASSERT (Tail != NULL); // // Debug // ASSERT (Tail->Signature == POOL_TAIL_SIGNATURE); ASSERT (Head->Size == Tail->Size); if (Tail->Signature != POOL_TAIL_SIGNATURE) { return EFI_INVALID_PARAMETER; } if (Head->Size != Tail->Size) { return EFI_INVALID_PARAMETER; } } ASSERT_LOCKED (&mPoolMemoryLock); // // Determine the pool type and account for it // Size = Head->Size; Pool = LookupPoolHead (Head->Type); if (Pool == NULL) { return EFI_INVALID_PARAMETER; } Pool->Used -= Size; DEBUG ((DEBUG_POOL, "FreePool: %p (len %lx) %,ld\n", Head->Data, (UINT64)(Head->Size - POOL_OVERHEAD), (UINT64)Pool->Used)); if (mIsCore && ((Head->Type == EfiReservedMemoryType) || (Head->Type == EfiACPIMemoryNVS) || (Head->Type == EfiRuntimeServicesCode) || (Head->Type == EfiRuntimeServicesData))) { Granularity = RUNTIME_PAGE_ALLOCATION_GRANULARITY; } else { Granularity = DEFAULT_PAGE_ALLOCATION_GRANULARITY; } if (PoolType != NULL) { *PoolType = Head->Type; } // // Determine the pool list // Index = SIZE_TO_LIST (Size); DEBUG_CLEAR_MEMORY (Head, Size); // // If it's not on the list, it must be pool pages // if ((Index >= SIZE_TO_LIST (Granularity)) || IsGuarded || PageAsPool) { // // Return the memory pages back to free memory // NoPages = EFI_SIZE_TO_PAGES (Size) + EFI_SIZE_TO_PAGES (Granularity) - 1; NoPages &= ~(UINTN)(EFI_SIZE_TO_PAGES (Granularity) - 1); if (IsGuarded) { Head = AdjustPoolHeadF ((EFI_PHYSICAL_ADDRESS)(UINTN)Head, NoPages, Size); CoreFreePoolPagesWithGuard ( Pool->MemoryType, (EFI_PHYSICAL_ADDRESS)(UINTN)Head, NoPages ); } else { CoreFreePoolPagesI ( Pool->MemoryType, (EFI_PHYSICAL_ADDRESS)(UINTN)Head, NoPages ); } } else { // // Put the pool entry onto the free pool list // Free = (POOL_FREE *)Head; ASSERT (Free != NULL); Free->Signature = POOL_FREE_SIGNATURE; Free->Index = (UINT32)Index; InsertHeadList (&Pool->FreeList[Index], &Free->Link); // // See if all the pool entries in the same page as Free are freed pool // entries // NewPage = (CHAR8 *)((UINTN)Free & ~(Granularity - 1)); Free = (POOL_FREE *)&NewPage[0]; ASSERT (Free != NULL); if (Free->Signature == POOL_FREE_SIGNATURE) { AllFree = TRUE; Offset = 0; while ((Offset < Granularity) && (AllFree)) { Free = (POOL_FREE *)&NewPage[Offset]; ASSERT (Free != NULL); if (Free->Signature != POOL_FREE_SIGNATURE) { AllFree = FALSE; } Offset += LIST_TO_SIZE (Free->Index); } if (AllFree) { // // All of the pool entries in the same page as Free are free pool // entries // Remove all of these pool entries from the free loop lists. // Free = (POOL_FREE *)&NewPage[0]; ASSERT (Free != NULL); Offset = 0; while (Offset < Granularity) { Free = (POOL_FREE *)&NewPage[Offset]; ASSERT (Free != NULL); RemoveEntryList (&Free->Link); Offset += LIST_TO_SIZE (Free->Index); } // // Free the page // CoreFreePoolPagesI ( Pool->MemoryType, (EFI_PHYSICAL_ADDRESS)(UINTN)NewPage, EFI_SIZE_TO_PAGES (Granularity) ); } } } // // If this is an OS/OEM specific memory type, then check to see if the last // portion of that memory type has been freed. If it has, then free the // list entry for that memory type // if (((UINT32)Pool->MemoryType >= MEMORY_TYPE_OEM_RESERVED_MIN) && (Pool->Used == 0)) { RemoveEntryList (&Pool->Link); CoreFreePoolI (Pool, NULL); } return EFI_SUCCESS; } /** Check to see if the heap guard is enabled for page and/or pool allocation. @param[in] GuardType Specify the sub-type(s) of Heap Guard. @return TRUE/FALSE. **/ BOOLEAN IsHeapGuardEnabled ( UINT8 GuardType ) { return IsMemoryTypeToGuard (EfiMaxMemoryType, AllocateAnyPages, GuardType); } /** Check to see if the pool at the given address should be guarded or not. @param[in] MemoryType Pool type to check. @return TRUE The given type of pool should be guarded. @return FALSE The given type of pool should not be guarded. **/ BOOLEAN IsPoolTypeToGuard ( IN EFI_MEMORY_TYPE MemoryType ) { return IsMemoryTypeToGuard ( MemoryType, AllocateAnyPages, GUARD_HEAP_TYPE_POOL ); } /** Check to see if the memory at the given address should be guarded or not. @param[in] MemoryType Memory type to check. @param[in] AllocateType Allocation type to check. @param[in] PageOrPool Indicate a page allocation or pool allocation. @return TRUE The given type of memory should be guarded. @return FALSE The given type of memory should not be guarded. **/ BOOLEAN IsMemoryTypeToGuard ( IN EFI_MEMORY_TYPE MemoryType, IN EFI_ALLOCATE_TYPE AllocateType, IN UINT8 PageOrPool ) { UINT64 TestBit; UINT64 ConfigBit; if (!mIsCore) { return FALSE; } if (AllocateType == AllocateAddress) { return FALSE; } if ((PcdGet8 (PcdHeapGuardPropertyMask) & PageOrPool) == 0) { return FALSE; } if (PageOrPool == GUARD_HEAP_TYPE_POOL) { ConfigBit = PcdGet64 (PcdHeapGuardPoolType); } else if (PageOrPool == GUARD_HEAP_TYPE_PAGE) { ConfigBit = PcdGet64 (PcdHeapGuardPageType); } else { ConfigBit = (UINT64)-1; } if ((UINT32)MemoryType >= MEMORY_TYPE_OS_RESERVED_MIN) { TestBit = BIT63; } else if ((UINT32)MemoryType >= MEMORY_TYPE_OEM_RESERVED_MIN) { TestBit = BIT62; } else if (MemoryType < EfiMaxMemoryType) { TestBit = LShiftU64 (1, MemoryType); } else if (MemoryType == EfiMaxMemoryType) { TestBit = (UINT64)-1; } else { TestBit = 0; } return ((ConfigBit & TestBit) != 0); }