diff --git a/MdeModulePkg/Core/PiSmmCore/HeapGuard.c b/MdeModulePkg/Core/PiSmmCore/HeapGuard.c
new file mode 100644
index 0000000000..6fda9ba430
--- /dev/null
+++ b/MdeModulePkg/Core/PiSmmCore/HeapGuard.c
@@ -0,0 +1,1467 @@
+/** @file
+ UEFI Heap Guard functions.
+
+Copyright (c) 2017, Intel Corporation. All rights reserved.
+This program and the accompanying materials
+are licensed and made available under the terms and conditions of the BSD License
+which accompanies this distribution. The full text of the license may be found at
+http://opensource.org/licenses/bsd-license.php
+
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include "HeapGuard.h"
+
+//
+// Global to avoid infinite reentrance of memory allocation when updating
+// page table attributes, which may need allocating pages for new PDE/PTE.
+//
+GLOBAL_REMOVE_IF_UNREFERENCED BOOLEAN mOnGuarding = FALSE;
+
+//
+// Pointer to table tracking the Guarded memory with bitmap, in which '1'
+// is used to indicate memory guarded. '0' might be free memory or Guard
+// page itself, depending on status of memory adjacent to it.
+//
+GLOBAL_REMOVE_IF_UNREFERENCED UINT64 mGuardedMemoryMap = 0;
+
+//
+// Current depth level of map table pointed by mGuardedMemoryMap.
+// mMapLevel must be initialized at least by 1. It will be automatically
+// updated according to the address of memory just tracked.
+//
+GLOBAL_REMOVE_IF_UNREFERENCED UINTN mMapLevel = 1;
+
+//
+// Shift and mask for each level of map table
+//
+GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH]
+ = GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS;
+GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH]
+ = GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS;
+
+//
+// SMM memory attribute protocol
+//
+EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL *mSmmMemoryAttribute = NULL;
+
+/**
+ Set corresponding bits in bitmap table to 1 according to the address.
+
+ @param[in] Address Start address to set for.
+ @param[in] BitNumber Number of bits to set.
+ @param[in] BitMap Pointer to bitmap which covers the Address.
+
+ @return VOID
+**/
+STATIC
+VOID
+SetBits (
+ IN EFI_PHYSICAL_ADDRESS Address,
+ IN UINTN BitNumber,
+ IN UINT64 *BitMap
+ )
+{
+ UINTN Lsbs;
+ UINTN Qwords;
+ UINTN Msbs;
+ UINTN StartBit;
+ UINTN EndBit;
+
+ StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
+ EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
+
+ if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {
+ Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
+ GUARDED_HEAP_MAP_ENTRY_BITS;
+ Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
+ Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
+ } else {
+ Msbs = BitNumber;
+ Lsbs = 0;
+ Qwords = 0;
+ }
+
+ if (Msbs > 0) {
+ *BitMap |= LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
+ BitMap += 1;
+ }
+
+ if (Qwords > 0) {
+ SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES,
+ (UINT64)-1);
+ BitMap += Qwords;
+ }
+
+ if (Lsbs > 0) {
+ *BitMap |= (LShiftU64 (1, Lsbs) - 1);
+ }
+}
+
+/**
+ Set corresponding bits in bitmap table to 0 according to the address.
+
+ @param[in] Address Start address to set for.
+ @param[in] BitNumber Number of bits to set.
+ @param[in] BitMap Pointer to bitmap which covers the Address.
+
+ @return VOID.
+**/
+STATIC
+VOID
+ClearBits (
+ IN EFI_PHYSICAL_ADDRESS Address,
+ IN UINTN BitNumber,
+ IN UINT64 *BitMap
+ )
+{
+ UINTN Lsbs;
+ UINTN Qwords;
+ UINTN Msbs;
+ UINTN StartBit;
+ UINTN EndBit;
+
+ StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
+ EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
+
+ if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {
+ Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
+ GUARDED_HEAP_MAP_ENTRY_BITS;
+ Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
+ Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
+ } else {
+ Msbs = BitNumber;
+ Lsbs = 0;
+ Qwords = 0;
+ }
+
+ if (Msbs > 0) {
+ *BitMap &= ~LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
+ BitMap += 1;
+ }
+
+ if (Qwords > 0) {
+ SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES, 0);
+ BitMap += Qwords;
+ }
+
+ if (Lsbs > 0) {
+ *BitMap &= ~(LShiftU64 (1, Lsbs) - 1);
+ }
+}
+
+/**
+ Get corresponding bits in bitmap table according to the address.
+
+ The value of bit 0 corresponds to the status of memory at given Address.
+ No more than 64 bits can be retrieved in one call.
+
+ @param[in] Address Start address to retrieve bits for.
+ @param[in] BitNumber Number of bits to get.
+ @param[in] BitMap Pointer to bitmap which covers the Address.
+
+ @return An integer containing the bits information.
+**/
+STATIC
+UINT64
+GetBits (
+ IN EFI_PHYSICAL_ADDRESS Address,
+ IN UINTN BitNumber,
+ IN UINT64 *BitMap
+ )
+{
+ UINTN StartBit;
+ UINTN EndBit;
+ UINTN Lsbs;
+ UINTN Msbs;
+ UINT64 Result;
+
+ ASSERT (BitNumber <= GUARDED_HEAP_MAP_ENTRY_BITS);
+
+ StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
+ EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
+
+ if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {
+ Msbs = GUARDED_HEAP_MAP_ENTRY_BITS - StartBit;
+ Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
+ } else {
+ Msbs = BitNumber;
+ Lsbs = 0;
+ }
+
+ Result = RShiftU64 ((*BitMap), StartBit) & (LShiftU64 (1, Msbs) - 1);
+ if (Lsbs > 0) {
+ BitMap += 1;
+ Result |= LShiftU64 ((*BitMap) & (LShiftU64 (1, Lsbs) - 1), Msbs);
+ }
+
+ return Result;
+}
+
+/**
+ Helper function to allocate pages without Guard for internal uses.
+
+ @param[in] Pages Page number.
+
+ @return Address of memory allocated.
+**/
+VOID *
+PageAlloc (
+ IN UINTN Pages
+ )
+{
+ EFI_STATUS Status;
+ EFI_PHYSICAL_ADDRESS Memory;
+
+ Status = SmmInternalAllocatePages (AllocateAnyPages, EfiRuntimeServicesData,
+ Pages, &Memory, FALSE);
+ if (EFI_ERROR (Status)) {
+ Memory = 0;
+ }
+
+ return (VOID *)(UINTN)Memory;
+}
+
+/**
+ Locate the pointer of bitmap from the guarded memory bitmap tables, which
+ covers the given Address.
+
+ @param[in] Address Start address to search the bitmap for.
+ @param[in] AllocMapUnit Flag to indicate memory allocation for the table.
+ @param[out] BitMap Pointer to bitmap which covers the Address.
+
+ @return The bit number from given Address to the end of current map table.
+**/
+UINTN
+FindGuardedMemoryMap (
+ IN EFI_PHYSICAL_ADDRESS Address,
+ IN BOOLEAN AllocMapUnit,
+ OUT UINT64 **BitMap
+ )
+{
+ UINTN Level;
+ UINT64 *GuardMap;
+ UINT64 MapMemory;
+ UINTN Index;
+ UINTN Size;
+ UINTN BitsToUnitEnd;
+
+ //
+ // Adjust current map table depth according to the address to access
+ //
+ while (mMapLevel < GUARDED_HEAP_MAP_TABLE_DEPTH
+ &&
+ RShiftU64 (
+ Address,
+ mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1]
+ ) != 0) {
+
+ if (mGuardedMemoryMap != 0) {
+ Size = (mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1] + 1)
+ * GUARDED_HEAP_MAP_ENTRY_BYTES;
+ MapMemory = (UINT64)(UINTN)PageAlloc (EFI_SIZE_TO_PAGES (Size));
+ ASSERT (MapMemory != 0);
+
+ SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
+
+ *(UINT64 *)(UINTN)MapMemory = mGuardedMemoryMap;
+ mGuardedMemoryMap = MapMemory;
+ }
+
+ mMapLevel++;
+
+ }
+
+ GuardMap = &mGuardedMemoryMap;
+ for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
+ Level < GUARDED_HEAP_MAP_TABLE_DEPTH;
+ ++Level) {
+
+ if (*GuardMap == 0) {
+ if (!AllocMapUnit) {
+ GuardMap = NULL;
+ break;
+ }
+
+ Size = (mLevelMask[Level] + 1) * GUARDED_HEAP_MAP_ENTRY_BYTES;
+ MapMemory = (UINT64)(UINTN)PageAlloc (EFI_SIZE_TO_PAGES (Size));
+ ASSERT (MapMemory != 0);
+
+ SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
+ *GuardMap = MapMemory;
+ }
+
+ Index = (UINTN)RShiftU64 (Address, mLevelShift[Level]);
+ Index &= mLevelMask[Level];
+ GuardMap = (UINT64 *)(UINTN)((*GuardMap) + Index * sizeof (UINT64));
+
+ }
+
+ BitsToUnitEnd = GUARDED_HEAP_MAP_BITS - GUARDED_HEAP_MAP_BIT_INDEX (Address);
+ *BitMap = GuardMap;
+
+ return BitsToUnitEnd;
+}
+
+/**
+ Set corresponding bits in bitmap table to 1 according to given memory range.
+
+ @param[in] Address Memory address to guard from.
+ @param[in] NumberOfPages Number of pages to guard.
+
+ @return VOID
+**/
+VOID
+EFIAPI
+SetGuardedMemoryBits (
+ IN EFI_PHYSICAL_ADDRESS Address,
+ IN UINTN NumberOfPages
+ )
+{
+ UINT64 *BitMap;
+ UINTN Bits;
+ UINTN BitsToUnitEnd;
+
+ while (NumberOfPages > 0) {
+ BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
+ ASSERT (BitMap != NULL);
+
+ if (NumberOfPages > BitsToUnitEnd) {
+ // Cross map unit
+ Bits = BitsToUnitEnd;
+ } else {
+ Bits = NumberOfPages;
+ }
+
+ SetBits (Address, Bits, BitMap);
+
+ NumberOfPages -= Bits;
+ Address += EFI_PAGES_TO_SIZE (Bits);
+ }
+}
+
+/**
+ Clear corresponding bits in bitmap table according to given memory range.
+
+ @param[in] Address Memory address to unset from.
+ @param[in] NumberOfPages Number of pages to unset guard.
+
+ @return VOID
+**/
+VOID
+EFIAPI
+ClearGuardedMemoryBits (
+ IN EFI_PHYSICAL_ADDRESS Address,
+ IN UINTN NumberOfPages
+ )
+{
+ UINT64 *BitMap;
+ UINTN Bits;
+ UINTN BitsToUnitEnd;
+
+ while (NumberOfPages > 0) {
+ BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
+ ASSERT (BitMap != NULL);
+
+ if (NumberOfPages > BitsToUnitEnd) {
+ // Cross map unit
+ Bits = BitsToUnitEnd;
+ } else {
+ Bits = NumberOfPages;
+ }
+
+ ClearBits (Address, Bits, BitMap);
+
+ NumberOfPages -= Bits;
+ Address += EFI_PAGES_TO_SIZE (Bits);
+ }
+}
+
+/**
+ Retrieve corresponding bits in bitmap table according to given memory range.
+
+ @param[in] Address Memory address to retrieve from.
+ @param[in] NumberOfPages Number of pages to retrieve.
+
+ @return VOID
+**/
+UINTN
+GetGuardedMemoryBits (
+ IN EFI_PHYSICAL_ADDRESS Address,
+ IN UINTN NumberOfPages
+ )
+{
+ UINT64 *BitMap;
+ UINTN Bits;
+ UINTN Result;
+ UINTN Shift;
+ UINTN BitsToUnitEnd;
+
+ ASSERT (NumberOfPages <= GUARDED_HEAP_MAP_ENTRY_BITS);
+
+ Result = 0;
+ Shift = 0;
+ while (NumberOfPages > 0) {
+ BitsToUnitEnd = FindGuardedMemoryMap (Address, FALSE, &BitMap);
+
+ if (NumberOfPages > BitsToUnitEnd) {
+ // Cross map unit
+ Bits = BitsToUnitEnd;
+ } else {
+ Bits = NumberOfPages;
+ }
+
+ if (BitMap != NULL) {
+ Result |= LShiftU64 (GetBits (Address, Bits, BitMap), Shift);
+ }
+
+ Shift += Bits;
+ NumberOfPages -= Bits;
+ Address += EFI_PAGES_TO_SIZE (Bits);
+ }
+
+ return Result;
+}
+
+/**
+ Get bit value in bitmap table for the given address.
+
+ @param[in] Address The address to retrieve for.
+
+ @return 1 or 0.
+**/
+UINTN
+EFIAPI
+GetGuardMapBit (
+ IN EFI_PHYSICAL_ADDRESS Address
+ )
+{
+ UINT64 *GuardMap;
+
+ FindGuardedMemoryMap (Address, FALSE, &GuardMap);
+ if (GuardMap != NULL) {
+ if (RShiftU64 (*GuardMap,
+ GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address)) & 1) {
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ Set the bit in bitmap table for the given address.
+
+ @param[in] Address The address to set for.
+
+ @return VOID.
+**/
+VOID
+EFIAPI
+SetGuardMapBit (
+ IN EFI_PHYSICAL_ADDRESS Address
+ )
+{
+ UINT64 *GuardMap;
+ UINT64 BitMask;
+
+ FindGuardedMemoryMap (Address, TRUE, &GuardMap);
+ if (GuardMap != NULL) {
+ BitMask = LShiftU64 (1, GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address));
+ *GuardMap |= BitMask;
+ }
+}
+
+/**
+ Clear the bit in bitmap table for the given address.
+
+ @param[in] Address The address to clear for.
+
+ @return VOID.
+**/
+VOID
+EFIAPI
+ClearGuardMapBit (
+ IN EFI_PHYSICAL_ADDRESS Address
+ )
+{
+ UINT64 *GuardMap;
+ UINT64 BitMask;
+
+ FindGuardedMemoryMap (Address, TRUE, &GuardMap);
+ if (GuardMap != NULL) {
+ BitMask = LShiftU64 (1, GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address));
+ *GuardMap &= ~BitMask;
+ }
+}
+
+/**
+ Check to see if the page at the given address is a Guard page or not.
+
+ @param[in] Address The address to check for.
+
+ @return TRUE The page at Address is a Guard page.
+ @return FALSE The page at Address is not a Guard page.
+**/
+BOOLEAN
+EFIAPI
+IsGuardPage (
+ IN EFI_PHYSICAL_ADDRESS Address
+)
+{
+ UINTN BitMap;
+
+ BitMap = GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 3);
+ return ((BitMap == 0b001) || (BitMap == 0b100) || (BitMap == 0b101));
+}
+
+/**
+ Check to see if the page at the given address is a head Guard page or not.
+
+ @param[in] Address The address to check for.
+
+ @return TRUE The page at Address is a head Guard page.
+ @return FALSE The page at Address is not a head Guard page.
+**/
+BOOLEAN
+EFIAPI
+IsHeadGuard (
+ IN EFI_PHYSICAL_ADDRESS Address
+ )
+{
+ return (GetGuardedMemoryBits (Address, 2) == 0b10);
+}
+
+/**
+ Check to see if the page at the given address is a tail Guard page or not.
+
+ @param[in] Address The address to check for.
+
+ @return TRUE The page at Address is a tail Guard page.
+ @return FALSE The page at Address is not a tail Guard page.
+**/
+BOOLEAN
+EFIAPI
+IsTailGuard (
+ IN EFI_PHYSICAL_ADDRESS Address
+ )
+{
+ return (GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 2) == 0b01);
+}
+
+/**
+ Check to see if the page at the given address is guarded or not.
+
+ @param[in] Address The address to check for.
+
+ @return TRUE The page at Address is guarded.
+ @return FALSE The page at Address is not guarded.
+**/
+BOOLEAN
+EFIAPI
+IsMemoryGuarded (
+ IN EFI_PHYSICAL_ADDRESS Address
+ )
+{
+ return (GetGuardMapBit (Address) == 1);
+}
+
+/**
+ Set the page at the given address to be a Guard page.
+
+ This is done by changing the page table attribute to be NOT PRSENT.
+
+ @param[in] BaseAddress Page address to Guard at.
+
+ @return VOID.
+**/
+VOID
+EFIAPI
+SetGuardPage (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress
+ )
+{
+ if (mSmmMemoryAttribute != NULL) {
+ mOnGuarding = TRUE;
+ mSmmMemoryAttribute->SetMemoryAttributes (
+ mSmmMemoryAttribute,
+ BaseAddress,
+ EFI_PAGE_SIZE,
+ EFI_MEMORY_RP
+ );
+ mOnGuarding = FALSE;
+ }
+}
+
+/**
+ Unset the Guard page at the given address to the normal memory.
+
+ This is done by changing the page table attribute to be PRSENT.
+
+ @param[in] BaseAddress Page address to Guard at.
+
+ @return VOID.
+**/
+VOID
+EFIAPI
+UnsetGuardPage (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress
+ )
+{
+ if (mSmmMemoryAttribute != NULL) {
+ mOnGuarding = TRUE;
+ mSmmMemoryAttribute->ClearMemoryAttributes (
+ mSmmMemoryAttribute,
+ BaseAddress,
+ EFI_PAGE_SIZE,
+ EFI_MEMORY_RP
+ );
+ mOnGuarding = FALSE;
+ }
+}
+
+/**
+ Check to see if the memory at the given address should be guarded or not.
+
+ @param[in] MemoryType Memory type to check.
+ @param[in] AllocateType Allocation type to check.
+ @param[in] PageOrPool Indicate a page allocation or pool allocation.
+
+
+ @return TRUE The given type of memory should be guarded.
+ @return FALSE The given type of memory should not be guarded.
+**/
+BOOLEAN
+IsMemoryTypeToGuard (
+ IN EFI_MEMORY_TYPE MemoryType,
+ IN EFI_ALLOCATE_TYPE AllocateType,
+ IN UINT8 PageOrPool
+ )
+{
+ UINT64 TestBit;
+ UINT64 ConfigBit;
+
+ if ((PcdGet8 (PcdHeapGuardPropertyMask) & PageOrPool) == 0
+ || mOnGuarding
+ || AllocateType == AllocateAddress) {
+ return FALSE;
+ }
+
+ ConfigBit = 0;
+ if ((PageOrPool & GUARD_HEAP_TYPE_POOL) != 0) {
+ ConfigBit |= PcdGet64 (PcdHeapGuardPoolType);
+ }
+
+ if ((PageOrPool & GUARD_HEAP_TYPE_PAGE) != 0) {
+ ConfigBit |= PcdGet64 (PcdHeapGuardPageType);
+ }
+
+ if (MemoryType == EfiRuntimeServicesData ||
+ MemoryType == EfiRuntimeServicesCode) {
+ TestBit = LShiftU64 (1, MemoryType);
+ } else if (MemoryType == EfiMaxMemoryType) {
+ TestBit = (UINT64)-1;
+ } else {
+ TestBit = 0;
+ }
+
+ return ((ConfigBit & TestBit) != 0);
+}
+
+/**
+ Check to see if the pool at the given address should be guarded or not.
+
+ @param[in] MemoryType Pool type to check.
+
+
+ @return TRUE The given type of pool should be guarded.
+ @return FALSE The given type of pool should not be guarded.
+**/
+BOOLEAN
+IsPoolTypeToGuard (
+ IN EFI_MEMORY_TYPE MemoryType
+ )
+{
+ return IsMemoryTypeToGuard (MemoryType, AllocateAnyPages,
+ GUARD_HEAP_TYPE_POOL);
+}
+
+/**
+ Check to see if the page at the given address should be guarded or not.
+
+ @param[in] MemoryType Page type to check.
+ @param[in] AllocateType Allocation type to check.
+
+ @return TRUE The given type of page should be guarded.
+ @return FALSE The given type of page should not be guarded.
+**/
+BOOLEAN
+IsPageTypeToGuard (
+ IN EFI_MEMORY_TYPE MemoryType,
+ IN EFI_ALLOCATE_TYPE AllocateType
+ )
+{
+ return IsMemoryTypeToGuard (MemoryType, AllocateType, GUARD_HEAP_TYPE_PAGE);
+}
+
+/**
+ Check to see if the heap guard is enabled for page and/or pool allocation.
+
+ @return TRUE/FALSE.
+**/
+BOOLEAN
+IsHeapGuardEnabled (
+ VOID
+ )
+{
+ return IsMemoryTypeToGuard (EfiMaxMemoryType, AllocateAnyPages,
+ GUARD_HEAP_TYPE_POOL|GUARD_HEAP_TYPE_PAGE);
+}
+
+/**
+ Set head Guard and tail Guard for the given memory range.
+
+ @param[in] Memory Base address of memory to set guard for.
+ @param[in] NumberOfPages Memory size in pages.
+
+ @return VOID.
+**/
+VOID
+SetGuardForMemory (
+ IN EFI_PHYSICAL_ADDRESS Memory,
+ IN UINTN NumberOfPages
+ )
+{
+ EFI_PHYSICAL_ADDRESS GuardPage;
+
+ //
+ // Set tail Guard
+ //
+ GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
+ if (!IsGuardPage (GuardPage)) {
+ SetGuardPage (GuardPage);
+ }
+
+ // Set head Guard
+ GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
+ if (!IsGuardPage (GuardPage)) {
+ SetGuardPage (GuardPage);
+ }
+
+ //
+ // Mark the memory range as Guarded
+ //
+ SetGuardedMemoryBits (Memory, NumberOfPages);
+}
+
+/**
+ Unset head Guard and tail Guard for the given memory range.
+
+ @param[in] Memory Base address of memory to unset guard for.
+ @param[in] NumberOfPages Memory size in pages.
+
+ @return VOID.
+**/
+VOID
+UnsetGuardForMemory (
+ IN EFI_PHYSICAL_ADDRESS Memory,
+ IN UINTN NumberOfPages
+ )
+{
+ EFI_PHYSICAL_ADDRESS GuardPage;
+
+ if (NumberOfPages == 0) {
+ return;
+ }
+
+ //
+ // Head Guard must be one page before, if any.
+ //
+ GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
+ if (IsHeadGuard (GuardPage)) {
+ if (!IsMemoryGuarded (GuardPage - EFI_PAGES_TO_SIZE (1))) {
+ //
+ // If the head Guard is not a tail Guard of adjacent memory block,
+ // unset it.
+ //
+ UnsetGuardPage (GuardPage);
+ }
+ } else if (IsMemoryGuarded (GuardPage)) {
+ //
+ // Pages before memory to free are still in Guard. It's a partial free
+ // case. Turn first page of memory block to free into a new Guard.
+ //
+ SetGuardPage (Memory);
+ }
+
+ //
+ // Tail Guard must be the page after this memory block to free, if any.
+ //
+ GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
+ if (IsTailGuard (GuardPage)) {
+ if (!IsMemoryGuarded (GuardPage + EFI_PAGES_TO_SIZE (1))) {
+ //
+ // If the tail Guard is not a head Guard of adjacent memory block,
+ // free it; otherwise, keep it.
+ //
+ UnsetGuardPage (GuardPage);
+ }
+ } else if (IsMemoryGuarded (GuardPage)) {
+ //
+ // Pages after memory to free are still in Guard. It's a partial free
+ // case. We need to keep one page to be a head Guard.
+ //
+ SetGuardPage (GuardPage - EFI_PAGES_TO_SIZE (1));
+ }
+
+ //
+ // No matter what, we just clear the mark of the Guarded memory.
+ //
+ ClearGuardedMemoryBits(Memory, NumberOfPages);
+}
+
+/**
+ Adjust address of free memory according to existing and/or required Guard.
+
+ This function will check if there're existing Guard pages of adjacent
+ memory blocks, and try to use it as the Guard page of the memory to be
+ allocated.
+
+ @param[in] Start Start address of free memory block.
+ @param[in] Size Size of free memory block.
+ @param[in] SizeRequested Size of memory to allocate.
+
+ @return The end address of memory block found.
+ @return 0 if no enough space for the required size of memory and its Guard.
+**/
+UINT64
+AdjustMemoryS (
+ IN UINT64 Start,
+ IN UINT64 Size,
+ IN UINT64 SizeRequested
+ )
+{
+ UINT64 Target;
+
+ Target = Start + Size - SizeRequested;
+
+ //
+ // At least one more page needed for Guard page.
+ //
+ if (Size < (SizeRequested + EFI_PAGES_TO_SIZE (1))) {
+ return 0;
+ }
+
+ if (!IsGuardPage (Start + Size)) {
+ // No Guard at tail to share. One more page is needed.
+ Target -= EFI_PAGES_TO_SIZE (1);
+ }
+
+ // Out of range?
+ if (Target < Start) {
+ return 0;
+ }
+
+ // At the edge?
+ if (Target == Start) {
+ if (!IsGuardPage (Target - EFI_PAGES_TO_SIZE (1))) {
+ // No enough space for a new head Guard if no Guard at head to share.
+ return 0;
+ }
+ }
+
+ // OK, we have enough pages for memory and its Guards. Return the End of the
+ // free space.
+ return Target + SizeRequested - 1;
+}
+
+/**
+ Adjust the start address and number of pages to free according to Guard.
+
+ The purpose of this function is to keep the shared Guard page with adjacent
+ memory block if it's still in guard, or free it if no more sharing. Another
+ is to reserve pages as Guard pages in partial page free situation.
+
+ @param[in,out] Memory Base address of memory to free.
+ @param[in,out] NumberOfPages Size of memory to free.
+
+ @return VOID.
+**/
+VOID
+AdjustMemoryF (
+ IN OUT EFI_PHYSICAL_ADDRESS *Memory,
+ IN OUT UINTN *NumberOfPages
+ )
+{
+ EFI_PHYSICAL_ADDRESS Start;
+ EFI_PHYSICAL_ADDRESS MemoryToTest;
+ UINTN PagesToFree;
+
+ if (Memory == NULL || NumberOfPages == NULL || *NumberOfPages == 0) {
+ return;
+ }
+
+ Start = *Memory;
+ PagesToFree = *NumberOfPages;
+
+ //
+ // Head Guard must be one page before, if any.
+ //
+ MemoryToTest = Start - EFI_PAGES_TO_SIZE (1);
+ if (IsHeadGuard (MemoryToTest)) {
+ if (!IsMemoryGuarded (MemoryToTest - EFI_PAGES_TO_SIZE (1))) {
+ //
+ // If the head Guard is not a tail Guard of adjacent memory block,
+ // free it; otherwise, keep it.
+ //
+ Start -= EFI_PAGES_TO_SIZE (1);
+ PagesToFree += 1;
+ }
+ } else if (IsMemoryGuarded (MemoryToTest)) {
+ //
+ // Pages before memory to free are still in Guard. It's a partial free
+ // case. We need to keep one page to be a tail Guard.
+ //
+ Start += EFI_PAGES_TO_SIZE (1);
+ PagesToFree -= 1;
+ }
+
+ //
+ // Tail Guard must be the page after this memory block to free, if any.
+ //
+ MemoryToTest = Start + EFI_PAGES_TO_SIZE (PagesToFree);
+ if (IsTailGuard (MemoryToTest)) {
+ if (!IsMemoryGuarded (MemoryToTest + EFI_PAGES_TO_SIZE (1))) {
+ //
+ // If the tail Guard is not a head Guard of adjacent memory block,
+ // free it; otherwise, keep it.
+ //
+ PagesToFree += 1;
+ }
+ } else if (IsMemoryGuarded (MemoryToTest)) {
+ //
+ // Pages after memory to free are still in Guard. It's a partial free
+ // case. We need to keep one page to be a head Guard.
+ //
+ PagesToFree -= 1;
+ }
+
+ *Memory = Start;
+ *NumberOfPages = PagesToFree;
+}
+
+/**
+ Adjust the base and number of pages to really allocate according to Guard.
+
+ @param[in,out] Memory Base address of free memory.
+ @param[in,out] NumberOfPages Size of memory to allocate.
+
+ @return VOID.
+**/
+VOID
+AdjustMemoryA (
+ IN OUT EFI_PHYSICAL_ADDRESS *Memory,
+ IN OUT UINTN *NumberOfPages
+ )
+{
+ //
+ // FindFreePages() has already taken the Guard into account. It's safe to
+ // adjust the start address and/or number of pages here, to make sure that
+ // the Guards are also "allocated".
+ //
+ if (!IsGuardPage (*Memory + EFI_PAGES_TO_SIZE (*NumberOfPages))) {
+ // No tail Guard, add one.
+ *NumberOfPages += 1;
+ }
+
+ if (!IsGuardPage (*Memory - EFI_PAGE_SIZE)) {
+ // No head Guard, add one.
+ *Memory -= EFI_PAGE_SIZE;
+ *NumberOfPages += 1;
+ }
+}
+
+/**
+ Adjust the pool head position to make sure the Guard page is adjavent to
+ pool tail or pool head.
+
+ @param[in] Memory Base address of memory allocated.
+ @param[in] NoPages Number of pages actually allocated.
+ @param[in] Size Size of memory requested.
+ (plus pool head/tail overhead)
+
+ @return Address of pool head
+**/
+VOID *
+AdjustPoolHeadA (
+ IN EFI_PHYSICAL_ADDRESS Memory,
+ IN UINTN NoPages,
+ IN UINTN Size
+ )
+{
+ if ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {
+ //
+ // Pool head is put near the head Guard
+ //
+ return (VOID *)(UINTN)Memory;
+ }
+
+ //
+ // Pool head is put near the tail Guard
+ //
+ return (VOID *)(UINTN)(Memory + EFI_PAGES_TO_SIZE (NoPages) - Size);
+}
+
+/**
+ Get the page base address according to pool head address.
+
+ @param[in] Memory Head address of pool to free.
+
+ @return Address of pool head.
+**/
+VOID *
+AdjustPoolHeadF (
+ IN EFI_PHYSICAL_ADDRESS Memory
+ )
+{
+ if ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {
+ //
+ // Pool head is put near the head Guard
+ //
+ return (VOID *)(UINTN)Memory;
+ }
+
+ //
+ // Pool head is put near the tail Guard
+ //
+ return (VOID *)(UINTN)(Memory & ~EFI_PAGE_MASK);
+}
+
+/**
+ Helper function of memory allocation with Guard pages.
+
+ @param FreePageList The free page node.
+ @param NumberOfPages Number of pages to be allocated.
+ @param MaxAddress Request to allocate memory below this address.
+ @param MemoryType Type of memory requested.
+
+ @return Memory address of allocated pages.
+**/
+UINTN
+InternalAllocMaxAddressWithGuard (
+ IN OUT LIST_ENTRY *FreePageList,
+ IN UINTN NumberOfPages,
+ IN UINTN MaxAddress,
+ IN EFI_MEMORY_TYPE MemoryType
+
+ )
+{
+ LIST_ENTRY *Node;
+ FREE_PAGE_LIST *Pages;
+ UINTN PagesToAlloc;
+ UINTN HeadGuard;
+ UINTN TailGuard;
+ UINTN Address;
+
+ for (Node = FreePageList->BackLink; Node != FreePageList;
+ Node = Node->BackLink) {
+ Pages = BASE_CR (Node, FREE_PAGE_LIST, Link);
+ if (Pages->NumberOfPages >= NumberOfPages &&
+ (UINTN)Pages + EFI_PAGES_TO_SIZE (NumberOfPages) - 1 <= MaxAddress) {
+
+ //
+ // We may need 1 or 2 more pages for Guard. Check it out.
+ //
+ PagesToAlloc = NumberOfPages;
+ TailGuard = (UINTN)Pages + EFI_PAGES_TO_SIZE (Pages->NumberOfPages);
+ if (!IsGuardPage (TailGuard)) {
+ //
+ // Add one if no Guard at the end of current free memory block.
+ //
+ PagesToAlloc += 1;
+ TailGuard = 0;
+ }
+
+ HeadGuard = (UINTN)Pages +
+ EFI_PAGES_TO_SIZE (Pages->NumberOfPages - PagesToAlloc) -
+ EFI_PAGE_SIZE;
+ if (!IsGuardPage (HeadGuard)) {
+ //
+ // Add one if no Guard at the page before the address to allocate
+ //
+ PagesToAlloc += 1;
+ HeadGuard = 0;
+ }
+
+ if (Pages->NumberOfPages < PagesToAlloc) {
+ // Not enough space to allocate memory with Guards? Try next block.
+ continue;
+ }
+
+ Address = InternalAllocPagesOnOneNode (Pages, PagesToAlloc, MaxAddress);
+ ConvertSmmMemoryMapEntry(MemoryType, Address, PagesToAlloc, FALSE);
+ CoreFreeMemoryMapStack();
+ if (HeadGuard == 0) {
+ // Don't pass the Guard page to user.
+ Address += EFI_PAGE_SIZE;
+ }
+ SetGuardForMemory (Address, NumberOfPages);
+ return Address;
+ }
+ }
+
+ return (UINTN)(-1);
+}
+
+/**
+ Helper function of memory free with Guard pages.
+
+ @param[in] Memory Base address of memory being freed.
+ @param[in] NumberOfPages The number of pages to free.
+ @param[in] AddRegion If this memory is new added region.
+
+ @retval EFI_NOT_FOUND Could not find the entry that covers the range.
+ @retval EFI_INVALID_PARAMETER Address not aligned, Address is zero or NumberOfPages is zero.
+ @return EFI_SUCCESS Pages successfully freed.
+**/
+EFI_STATUS
+SmmInternalFreePagesExWithGuard (
+ IN EFI_PHYSICAL_ADDRESS Memory,
+ IN UINTN NumberOfPages,
+ IN BOOLEAN AddRegion
+ )
+{
+ EFI_PHYSICAL_ADDRESS MemoryToFree;
+ UINTN PagesToFree;
+
+ MemoryToFree = Memory;
+ PagesToFree = NumberOfPages;
+
+ AdjustMemoryF (&MemoryToFree, &PagesToFree);
+ UnsetGuardForMemory (Memory, NumberOfPages);
+
+ return SmmInternalFreePagesEx (MemoryToFree, PagesToFree, AddRegion);
+}
+
+/**
+ Set all Guard pages which cannot be set during the non-SMM mode time.
+**/
+VOID
+SetAllGuardPages (
+ VOID
+ )
+{
+ UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
+ UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
+ UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
+ UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
+ UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
+ UINT64 TableEntry;
+ UINT64 Address;
+ UINT64 GuardPage;
+ INTN Level;
+ UINTN Index;
+ BOOLEAN OnGuarding;
+
+ if (mGuardedMemoryMap == 0) {
+ return;
+ }
+
+ CopyMem (Entries, mLevelMask, sizeof (Entries));
+ CopyMem (Shifts, mLevelShift, sizeof (Shifts));
+
+ SetMem (Tables, sizeof(Tables), 0);
+ SetMem (Addresses, sizeof(Addresses), 0);
+ SetMem (Indices, sizeof(Indices), 0);
+
+ Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
+ Tables[Level] = mGuardedMemoryMap;
+ Address = 0;
+ OnGuarding = FALSE;
+
+ DEBUG_CODE (
+ DumpGuardedMemoryBitmap ();
+ );
+
+ while (TRUE) {
+ if (Indices[Level] > Entries[Level]) {
+ Tables[Level] = 0;
+ Level -= 1;
+ } else {
+
+ TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]];
+ Address = Addresses[Level];
+
+ if (TableEntry == 0) {
+
+ OnGuarding = FALSE;
+
+ } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
+
+ Level += 1;
+ Tables[Level] = TableEntry;
+ Addresses[Level] = Address;
+ Indices[Level] = 0;
+
+ continue;
+
+ } else {
+
+ Index = 0;
+ while (Index < GUARDED_HEAP_MAP_ENTRY_BITS) {
+ if ((TableEntry & 1) == 1) {
+ if (OnGuarding) {
+ GuardPage = 0;
+ } else {
+ GuardPage = Address - EFI_PAGE_SIZE;
+ }
+ OnGuarding = TRUE;
+ } else {
+ if (OnGuarding) {
+ GuardPage = Address;
+ } else {
+ GuardPage = 0;
+ }
+ OnGuarding = FALSE;
+ }
+
+ if (GuardPage != 0) {
+ SetGuardPage (GuardPage);
+ }
+
+ if (TableEntry == 0) {
+ break;
+ }
+
+ TableEntry = RShiftU64 (TableEntry, 1);
+ Address += EFI_PAGE_SIZE;
+ Index += 1;
+ }
+ }
+ }
+
+ if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
+ break;
+ }
+
+ Indices[Level] += 1;
+ Address = (Level == 0) ? 0 : Addresses[Level - 1];
+ Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);
+
+ }
+}
+
+/**
+ Hook function used to set all Guard pages after entering SMM mode.
+**/
+VOID
+SmmEntryPointMemoryManagementHook (
+ VOID
+ )
+{
+ EFI_STATUS Status;
+
+ if (mSmmMemoryAttribute == NULL) {
+ Status = SmmLocateProtocol (
+ &gEdkiiSmmMemoryAttributeProtocolGuid,
+ NULL,
+ (VOID **)&mSmmMemoryAttribute
+ );
+ if (!EFI_ERROR(Status)) {
+ SetAllGuardPages ();
+ }
+ }
+}
+
+/**
+ Helper function to convert a UINT64 value in binary to a string.
+
+ @param[in] Value Value of a UINT64 integer.
+ @param[out] BinString String buffer to contain the conversion result.
+
+ @return VOID.
+**/
+VOID
+Uint64ToBinString (
+ IN UINT64 Value,
+ OUT CHAR8 *BinString
+ )
+{
+ UINTN Index;
+
+ if (BinString == NULL) {
+ return;
+ }
+
+ for (Index = 64; Index > 0; --Index) {
+ BinString[Index - 1] = '0' + (Value & 1);
+ Value = RShiftU64 (Value, 1);
+ }
+ BinString[64] = '\0';
+}
+
+/**
+ Dump the guarded memory bit map.
+**/
+VOID
+EFIAPI
+DumpGuardedMemoryBitmap (
+ VOID
+ )
+{
+ UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
+ UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
+ UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
+ UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
+ UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
+ UINT64 TableEntry;
+ UINT64 Address;
+ INTN Level;
+ UINTN RepeatZero;
+ CHAR8 String[GUARDED_HEAP_MAP_ENTRY_BITS + 1];
+ CHAR8 *Ruler1;
+ CHAR8 *Ruler2;
+
+ if (mGuardedMemoryMap == 0) {
+ return;
+ }
+
+ Ruler1 = " 3 2 1 0";
+ Ruler2 = "FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210";
+
+ DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "============================="
+ " Guarded Memory Bitmap "
+ "==============================\r\n"));
+ DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler1));
+ DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler2));
+
+ CopyMem (Entries, mLevelMask, sizeof (Entries));
+ CopyMem (Shifts, mLevelShift, sizeof (Shifts));
+
+ SetMem (Indices, sizeof(Indices), 0);
+ SetMem (Tables, sizeof(Tables), 0);
+ SetMem (Addresses, sizeof(Addresses), 0);
+
+ Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
+ Tables[Level] = mGuardedMemoryMap;
+ Address = 0;
+ RepeatZero = 0;
+
+ while (TRUE) {
+ if (Indices[Level] > Entries[Level]) {
+
+ Tables[Level] = 0;
+ Level -= 1;
+ RepeatZero = 0;
+
+ DEBUG ((
+ HEAP_GUARD_DEBUG_LEVEL,
+ "========================================="
+ "=========================================\r\n"
+ ));
+
+ } else {
+
+ TableEntry = ((UINT64 *)(UINTN)Tables[Level])[Indices[Level]];
+ Address = Addresses[Level];
+
+ if (TableEntry == 0) {
+
+ if (Level == GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
+ if (RepeatZero == 0) {
+ Uint64ToBinString(TableEntry, String);
+ DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
+ } else if (RepeatZero == 1) {
+ DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "... : ...\r\n"));
+ }
+ RepeatZero += 1;
+ }
+
+ } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
+
+ Level += 1;
+ Tables[Level] = TableEntry;
+ Addresses[Level] = Address;
+ Indices[Level] = 0;
+ RepeatZero = 0;
+
+ continue;
+
+ } else {
+
+ RepeatZero = 0;
+ Uint64ToBinString(TableEntry, String);
+ DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
+
+ }
+ }
+
+ if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
+ break;
+ }
+
+ Indices[Level] += 1;
+ Address = (Level == 0) ? 0 : Addresses[Level - 1];
+ Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);
+
+ }
+}
+
+/**
+ Debug function used to verify if the Guard page is well set or not.
+
+ @param[in] BaseAddress Address of memory to check.
+ @param[in] NumberOfPages Size of memory in pages.
+
+ @return TRUE The head Guard and tail Guard are both well set.
+ @return FALSE The head Guard and/or tail Guard are not well set.
+**/
+BOOLEAN
+VerifyMemoryGuard (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINTN NumberOfPages
+ )
+{
+ EFI_STATUS Status;
+ UINT64 Attribute;
+ EFI_PHYSICAL_ADDRESS Address;
+
+ if (mSmmMemoryAttribute == NULL) {
+ return TRUE;
+ }
+
+ Attribute = 0;
+ Address = BaseAddress - EFI_PAGE_SIZE;
+ Status = mSmmMemoryAttribute->GetMemoryAttributes (
+ mSmmMemoryAttribute,
+ Address,
+ EFI_PAGE_SIZE,
+ &Attribute
+ );
+ if (EFI_ERROR (Status) || (Attribute & EFI_MEMORY_RP) == 0) {
+ DEBUG ((DEBUG_ERROR, "Head Guard is not set at: %016lx (%016lX)!!!\r\n",
+ Address, Attribute));
+ DumpGuardedMemoryBitmap ();
+ return FALSE;
+ }
+
+ Attribute = 0;
+ Address = BaseAddress + EFI_PAGES_TO_SIZE (NumberOfPages);
+ Status = mSmmMemoryAttribute->GetMemoryAttributes (
+ mSmmMemoryAttribute,
+ Address,
+ EFI_PAGE_SIZE,
+ &Attribute
+ );
+ if (EFI_ERROR (Status) || (Attribute & EFI_MEMORY_RP) == 0) {
+ DEBUG ((DEBUG_ERROR, "Tail Guard is not set at: %016lx (%016lX)!!!\r\n",
+ Address, Attribute));
+ DumpGuardedMemoryBitmap ();
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
diff --git a/MdeModulePkg/Core/PiSmmCore/HeapGuard.h b/MdeModulePkg/Core/PiSmmCore/HeapGuard.h
new file mode 100644
index 0000000000..8698ff9f87
--- /dev/null
+++ b/MdeModulePkg/Core/PiSmmCore/HeapGuard.h
@@ -0,0 +1,398 @@
+/** @file
+ Data structure and functions to allocate and free memory space.
+
+Copyright (c) 2017, Intel Corporation. All rights reserved.
+This program and the accompanying materials
+are licensed and made available under the terms and conditions of the BSD License
+which accompanies this distribution. The full text of the license may be found at
+http://opensource.org/licenses/bsd-license.php
+
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#ifndef _HEAPGUARD_H_
+#define _HEAPGUARD_H_
+
+#include "PiSmmCore.h"
+
+//
+// Following macros are used to define and access the guarded memory bitmap
+// table.
+//
+// To simplify the access and reduce the memory used for this table, the
+// table is constructed in the similar way as page table structure but in
+// reverse direction, i.e. from bottom growing up to top.
+//
+// - 1-bit tracks 1 page (4KB)
+// - 1-UINT64 map entry tracks 256KB memory
+// - 1K-UINT64 map table tracks 256MB memory
+// - Five levels of tables can track any address of memory of 64-bit
+// system, like below.
+//
+// 512 * 512 * 512 * 512 * 1K * 64b * 4K
+// 111111111 111111111 111111111 111111111 1111111111 111111 111111111111
+// 63 54 45 36 27 17 11 0
+// 9b 9b 9b 9b 10b 6b 12b
+// L0 -> L1 -> L2 -> L3 -> L4 -> bits -> page
+// 1FF 1FF 1FF 1FF 3FF 3F FFF
+//
+// L4 table has 1K * sizeof(UINT64) = 8K (2-page), which can track 256MB
+// memory. Each table of L0-L3 will be allocated when its memory address
+// range is to be tracked. Only 1-page will be allocated each time. This
+// can save memories used to establish this map table.
+//
+// For a normal configuration of system with 4G memory, two levels of tables
+// can track the whole memory, because two levels (L3+L4) of map tables have
+// already coverred 37-bit of memory address. And for a normal UEFI BIOS,
+// less than 128M memory would be consumed during boot. That means we just
+// need
+//
+// 1-page (L3) + 2-page (L4)
+//
+// memory (3 pages) to track the memory allocation works. In this case,
+// there's no need to setup L0-L2 tables.
+//
+
+//
+// Each entry occupies 8B/64b. 1-page can hold 512 entries, which spans 9
+// bits in address. (512 = 1 << 9)
+//
+#define BYTE_LENGTH_SHIFT 3 // (8 = 1 << 3)
+
+#define GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT \
+ (EFI_PAGE_SHIFT - BYTE_LENGTH_SHIFT)
+
+#define GUARDED_HEAP_MAP_TABLE_DEPTH 5
+
+// Use UINT64_index + bit_index_of_UINT64 to locate the bit in may
+#define GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT 6 // (64 = 1 << 6)
+
+#define GUARDED_HEAP_MAP_ENTRY_BITS \
+ (1 << GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT)
+
+#define GUARDED_HEAP_MAP_ENTRY_BYTES \
+ (GUARDED_HEAP_MAP_ENTRY_BITS / 8)
+
+// L4 table address width: 64 - 9 * 4 - 6 - 12 = 10b
+#define GUARDED_HEAP_MAP_ENTRY_SHIFT \
+ (GUARDED_HEAP_MAP_ENTRY_BITS \
+ - GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT * 4 \
+ - GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT \
+ - EFI_PAGE_SHIFT)
+
+// L4 table address mask: (1 << 10 - 1) = 0x3FF
+#define GUARDED_HEAP_MAP_ENTRY_MASK \
+ ((1 << GUARDED_HEAP_MAP_ENTRY_SHIFT) - 1)
+
+// Size of each L4 table: (1 << 10) * 8 = 8KB = 2-page
+#define GUARDED_HEAP_MAP_SIZE \
+ ((1 << GUARDED_HEAP_MAP_ENTRY_SHIFT) * GUARDED_HEAP_MAP_ENTRY_BYTES)
+
+// Memory size tracked by one L4 table: 8KB * 8 * 4KB = 256MB
+#define GUARDED_HEAP_MAP_UNIT_SIZE \
+ (GUARDED_HEAP_MAP_SIZE * 8 * EFI_PAGE_SIZE)
+
+// L4 table entry number: 8KB / 8 = 1024
+#define GUARDED_HEAP_MAP_ENTRIES_PER_UNIT \
+ (GUARDED_HEAP_MAP_SIZE / GUARDED_HEAP_MAP_ENTRY_BYTES)
+
+// L4 table entry indexing
+#define GUARDED_HEAP_MAP_ENTRY_INDEX(Address) \
+ (RShiftU64 (Address, EFI_PAGE_SHIFT \
+ + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT) \
+ & GUARDED_HEAP_MAP_ENTRY_MASK)
+
+// L4 table entry bit indexing
+#define GUARDED_HEAP_MAP_ENTRY_BIT_INDEX(Address) \
+ (RShiftU64 (Address, EFI_PAGE_SHIFT) \
+ & ((1 << GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT) - 1))
+
+//
+// Total bits (pages) tracked by one L4 table (65536-bit)
+//
+#define GUARDED_HEAP_MAP_BITS \
+ (1 << (GUARDED_HEAP_MAP_ENTRY_SHIFT \
+ + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT))
+
+//
+// Bit indexing inside the whole L4 table (0 - 65535)
+//
+#define GUARDED_HEAP_MAP_BIT_INDEX(Address) \
+ (RShiftU64 (Address, EFI_PAGE_SHIFT) \
+ & ((1 << (GUARDED_HEAP_MAP_ENTRY_SHIFT \
+ + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT)) - 1))
+
+//
+// Memory address bit width tracked by L4 table: 10 + 6 + 12 = 28
+//
+#define GUARDED_HEAP_MAP_TABLE_SHIFT \
+ (GUARDED_HEAP_MAP_ENTRY_SHIFT + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT \
+ + EFI_PAGE_SHIFT)
+
+//
+// Macro used to initialize the local array variable for map table traversing
+// {55, 46, 37, 28, 18}
+//
+#define GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS \
+ { \
+ GUARDED_HEAP_MAP_TABLE_SHIFT + GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT * 3, \
+ GUARDED_HEAP_MAP_TABLE_SHIFT + GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT * 2, \
+ GUARDED_HEAP_MAP_TABLE_SHIFT + GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT, \
+ GUARDED_HEAP_MAP_TABLE_SHIFT, \
+ EFI_PAGE_SHIFT + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT \
+ }
+
+//
+// Masks used to extract address range of each level of table
+// {0x1FF, 0x1FF, 0x1FF, 0x1FF, 0x3FF}
+//
+#define GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS \
+ { \
+ (1 << GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT) - 1, \
+ (1 << GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT) - 1, \
+ (1 << GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT) - 1, \
+ (1 << GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT) - 1, \
+ (1 << GUARDED_HEAP_MAP_ENTRY_SHIFT) - 1 \
+ }
+
+//
+// Memory type to guard (matching the related PCD definition)
+//
+#define GUARD_HEAP_TYPE_POOL BIT2
+#define GUARD_HEAP_TYPE_PAGE BIT3
+
+//
+// Debug message level
+//
+#define HEAP_GUARD_DEBUG_LEVEL (DEBUG_POOL|DEBUG_PAGE)
+
+typedef struct {
+ UINT32 TailMark;
+ UINT32 HeadMark;
+ EFI_PHYSICAL_ADDRESS Address;
+ LIST_ENTRY Link;
+} HEAP_GUARD_NODE;
+
+/**
+ Set head Guard and tail Guard for the given memory range.
+
+ @param[in] Memory Base address of memory to set guard for.
+ @param[in] NumberOfPages Memory size in pages.
+
+ @return VOID.
+**/
+VOID
+SetGuardForMemory (
+ IN EFI_PHYSICAL_ADDRESS Memory,
+ IN UINTN NumberOfPages
+ );
+
+/**
+ Unset head Guard and tail Guard for the given memory range.
+
+ @param[in] Memory Base address of memory to unset guard for.
+ @param[in] NumberOfPages Memory size in pages.
+
+ @return VOID.
+**/
+VOID
+UnsetGuardForMemory (
+ IN EFI_PHYSICAL_ADDRESS Memory,
+ IN UINTN NumberOfPages
+ );
+
+/**
+ Adjust the base and number of pages to really allocate according to Guard.
+
+ @param[in,out] Memory Base address of free memory.
+ @param[in,out] NumberOfPages Size of memory to allocate.
+
+ @return VOID.
+**/
+VOID
+AdjustMemoryA (
+ IN OUT EFI_PHYSICAL_ADDRESS *Memory,
+ IN OUT UINTN *NumberOfPages
+ );
+
+/**
+ Adjust the start address and number of pages to free according to Guard.
+
+ The purpose of this function is to keep the shared Guard page with adjacent
+ memory block if it's still in guard, or free it if no more sharing. Another
+ is to reserve pages as Guard pages in partial page free situation.
+
+ @param[in,out] Memory Base address of memory to free.
+ @param[in,out] NumberOfPages Size of memory to free.
+
+ @return VOID.
+**/
+VOID
+AdjustMemoryF (
+ IN OUT EFI_PHYSICAL_ADDRESS *Memory,
+ IN OUT UINTN *NumberOfPages
+ );
+
+/**
+ Check to see if the pool at the given address should be guarded or not.
+
+ @param[in] MemoryType Pool type to check.
+
+
+ @return TRUE The given type of pool should be guarded.
+ @return FALSE The given type of pool should not be guarded.
+**/
+BOOLEAN
+IsPoolTypeToGuard (
+ IN EFI_MEMORY_TYPE MemoryType
+ );
+
+/**
+ Check to see if the page at the given address should be guarded or not.
+
+ @param[in] MemoryType Page type to check.
+ @param[in] AllocateType Allocation type to check.
+
+ @return TRUE The given type of page should be guarded.
+ @return FALSE The given type of page should not be guarded.
+**/
+BOOLEAN
+IsPageTypeToGuard (
+ IN EFI_MEMORY_TYPE MemoryType,
+ IN EFI_ALLOCATE_TYPE AllocateType
+ );
+
+/**
+ Check to see if the page at the given address is guarded or not.
+
+ @param[in] Address The address to check for.
+
+ @return TRUE The page at Address is guarded.
+ @return FALSE The page at Address is not guarded.
+**/
+BOOLEAN
+EFIAPI
+IsMemoryGuarded (
+ IN EFI_PHYSICAL_ADDRESS Address
+ );
+
+/**
+ Check to see if the page at the given address is a Guard page or not.
+
+ @param[in] Address The address to check for.
+
+ @return TRUE The page at Address is a Guard page.
+ @return FALSE The page at Address is not a Guard page.
+**/
+BOOLEAN
+EFIAPI
+IsGuardPage (
+ IN EFI_PHYSICAL_ADDRESS Address
+ );
+
+/**
+ Dump the guarded memory bit map.
+**/
+VOID
+EFIAPI
+DumpGuardedMemoryBitmap (
+ VOID
+ );
+
+/**
+ Adjust the pool head position to make sure the Guard page is adjavent to
+ pool tail or pool head.
+
+ @param[in] Memory Base address of memory allocated.
+ @param[in] NoPages Number of pages actually allocated.
+ @param[in] Size Size of memory requested.
+ (plus pool head/tail overhead)
+
+ @return Address of pool head.
+**/
+VOID *
+AdjustPoolHeadA (
+ IN EFI_PHYSICAL_ADDRESS Memory,
+ IN UINTN NoPages,
+ IN UINTN Size
+ );
+
+/**
+ Get the page base address according to pool head address.
+
+ @param[in] Memory Head address of pool to free.
+
+ @return Address of pool head.
+**/
+VOID *
+AdjustPoolHeadF (
+ IN EFI_PHYSICAL_ADDRESS Memory
+ );
+
+/**
+ Helper function of memory allocation with Guard pages.
+
+ @param FreePageList The free page node.
+ @param NumberOfPages Number of pages to be allocated.
+ @param MaxAddress Request to allocate memory below this address.
+ @param MemoryType Type of memory requested.
+
+ @return Memory address of allocated pages.
+**/
+UINTN
+InternalAllocMaxAddressWithGuard (
+ IN OUT LIST_ENTRY *FreePageList,
+ IN UINTN NumberOfPages,
+ IN UINTN MaxAddress,
+ IN EFI_MEMORY_TYPE MemoryType
+ );
+
+/**
+ Helper function of memory free with Guard pages.
+
+ @param[in] Memory Base address of memory being freed.
+ @param[in] NumberOfPages The number of pages to free.
+ @param[in] AddRegion If this memory is new added region.
+
+ @retval EFI_NOT_FOUND Could not find the entry that covers the range.
+ @retval EFI_INVALID_PARAMETER Address not aligned, Address is zero or
+ NumberOfPages is zero.
+ @return EFI_SUCCESS Pages successfully freed.
+**/
+EFI_STATUS
+SmmInternalFreePagesExWithGuard (
+ IN EFI_PHYSICAL_ADDRESS Memory,
+ IN UINTN NumberOfPages,
+ IN BOOLEAN AddRegion
+ );
+
+/**
+ Check to see if the heap guard is enabled for page and/or pool allocation.
+
+ @return TRUE/FALSE.
+**/
+BOOLEAN
+IsHeapGuardEnabled (
+ VOID
+ );
+
+/**
+ Debug function used to verify if the Guard page is well set or not.
+
+ @param[in] BaseAddress Address of memory to check.
+ @param[in] NumberOfPages Size of memory in pages.
+
+ @return TRUE The head Guard and tail Guard are both well set.
+ @return FALSE The head Guard and/or tail Guard are not well set.
+**/
+BOOLEAN
+VerifyMemoryGuard (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINTN NumberOfPages
+ );
+
+extern BOOLEAN mOnGuarding;
+
+#endif
diff --git a/MdeModulePkg/Core/PiSmmCore/Page.c b/MdeModulePkg/Core/PiSmmCore/Page.c
index 4154c2e6a1..5f5b4bbe1c 100644
--- a/MdeModulePkg/Core/PiSmmCore/Page.c
+++ b/MdeModulePkg/Core/PiSmmCore/Page.c
@@ -64,6 +64,8 @@ LIST_ENTRY mFreeMemoryMapEntryList = INITIALIZE_LIST_HEAD_VARIABLE (mFreeMemor
@param[out] Memory A pointer to receive the base allocated memory
address.
@param[in] AddRegion If this memory is new added region.
+ @param[in] NeedGuard Flag to indicate Guard page is needed
+ or not
@retval EFI_INVALID_PARAMETER Parameters violate checking rules defined in spec.
@retval EFI_NOT_FOUND Could not allocate pages match the requirement.
@@ -77,7 +79,8 @@ SmmInternalAllocatePagesEx (
IN EFI_MEMORY_TYPE MemoryType,
IN UINTN NumberOfPages,
OUT EFI_PHYSICAL_ADDRESS *Memory,
- IN BOOLEAN AddRegion
+ IN BOOLEAN AddRegion,
+ IN BOOLEAN NeedGuard
);
/**
@@ -112,7 +115,8 @@ AllocateMemoryMapEntry (
EfiRuntimeServicesData,
EFI_SIZE_TO_PAGES (RUNTIME_PAGE_ALLOCATION_GRANULARITY),
&Mem,
- TRUE
+ TRUE,
+ FALSE
);
ASSERT_EFI_ERROR (Status);
if(!EFI_ERROR (Status)) {
@@ -688,6 +692,8 @@ InternalAllocAddress (
@param[out] Memory A pointer to receive the base allocated memory
address.
@param[in] AddRegion If this memory is new added region.
+ @param[in] NeedGuard Flag to indicate Guard page is needed
+ or not
@retval EFI_INVALID_PARAMETER Parameters violate checking rules defined in spec.
@retval EFI_NOT_FOUND Could not allocate pages match the requirement.
@@ -701,7 +707,8 @@ SmmInternalAllocatePagesEx (
IN EFI_MEMORY_TYPE MemoryType,
IN UINTN NumberOfPages,
OUT EFI_PHYSICAL_ADDRESS *Memory,
- IN BOOLEAN AddRegion
+ IN BOOLEAN AddRegion,
+ IN BOOLEAN NeedGuard
)
{
UINTN RequestedAddress;
@@ -723,6 +730,21 @@ SmmInternalAllocatePagesEx (
case AllocateAnyPages:
RequestedAddress = (UINTN)(-1);
case AllocateMaxAddress:
+ if (NeedGuard) {
+ *Memory = InternalAllocMaxAddressWithGuard (
+ &mSmmMemoryMap,
+ NumberOfPages,
+ RequestedAddress,
+ MemoryType
+ );
+ if (*Memory == (UINTN)-1) {
+ return EFI_OUT_OF_RESOURCES;
+ } else {
+ ASSERT (VerifyMemoryGuard (*Memory, NumberOfPages) == TRUE);
+ return EFI_SUCCESS;
+ }
+ }
+
*Memory = InternalAllocMaxAddress (
&mSmmMemoryMap,
NumberOfPages,
@@ -766,6 +788,8 @@ SmmInternalAllocatePagesEx (
@param[in] NumberOfPages The number of pages to allocate.
@param[out] Memory A pointer to receive the base allocated memory
address.
+ @param[in] NeedGuard Flag to indicate Guard page is needed
+ or not
@retval EFI_INVALID_PARAMETER Parameters violate checking rules defined in spec.
@retval EFI_NOT_FOUND Could not allocate pages match the requirement.
@@ -779,10 +803,12 @@ SmmInternalAllocatePages (
IN EFI_ALLOCATE_TYPE Type,
IN EFI_MEMORY_TYPE MemoryType,
IN UINTN NumberOfPages,
- OUT EFI_PHYSICAL_ADDRESS *Memory
+ OUT EFI_PHYSICAL_ADDRESS *Memory,
+ IN BOOLEAN NeedGuard
)
{
- return SmmInternalAllocatePagesEx (Type, MemoryType, NumberOfPages, Memory, FALSE);
+ return SmmInternalAllocatePagesEx (Type, MemoryType, NumberOfPages, Memory,
+ FALSE, NeedGuard);
}
/**
@@ -811,8 +837,11 @@ SmmAllocatePages (
)
{
EFI_STATUS Status;
+ BOOLEAN NeedGuard;
- Status = SmmInternalAllocatePages (Type, MemoryType, NumberOfPages, Memory);
+ NeedGuard = IsPageTypeToGuard (MemoryType, Type);
+ Status = SmmInternalAllocatePages (Type, MemoryType, NumberOfPages, Memory,
+ NeedGuard);
if (!EFI_ERROR (Status)) {
SmmCoreUpdateProfile (
(EFI_PHYSICAL_ADDRESS) (UINTN) RETURN_ADDRESS (0),
@@ -931,6 +960,7 @@ SmmInternalFreePagesEx (
@param[in] Memory Base address of memory being freed.
@param[in] NumberOfPages The number of pages to free.
+ @param[in] IsGuarded Is the memory to free guarded or not.
@retval EFI_NOT_FOUND Could not find the entry that covers the range.
@retval EFI_INVALID_PARAMETER Address not aligned, Address is zero or NumberOfPages is zero.
@@ -941,9 +971,13 @@ EFI_STATUS
EFIAPI
SmmInternalFreePages (
IN EFI_PHYSICAL_ADDRESS Memory,
- IN UINTN NumberOfPages
+ IN UINTN NumberOfPages,
+ IN BOOLEAN IsGuarded
)
{
+ if (IsGuarded) {
+ return SmmInternalFreePagesExWithGuard (Memory, NumberOfPages, FALSE);
+ }
return SmmInternalFreePagesEx (Memory, NumberOfPages, FALSE);
}
@@ -966,8 +1000,10 @@ SmmFreePages (
)
{
EFI_STATUS Status;
+ BOOLEAN IsGuarded;
- Status = SmmInternalFreePages (Memory, NumberOfPages);
+ IsGuarded = IsHeapGuardEnabled () && IsMemoryGuarded (Memory);
+ Status = SmmInternalFreePages (Memory, NumberOfPages, IsGuarded);
if (!EFI_ERROR (Status)) {
SmmCoreUpdateProfile (
(EFI_PHYSICAL_ADDRESS) (UINTN) RETURN_ADDRESS (0),
diff --git a/MdeModulePkg/Core/PiSmmCore/PiSmmCore.c b/MdeModulePkg/Core/PiSmmCore/PiSmmCore.c
index b833763f9a..a0b295b31a 100644
--- a/MdeModulePkg/Core/PiSmmCore/PiSmmCore.c
+++ b/MdeModulePkg/Core/PiSmmCore/PiSmmCore.c
@@ -506,6 +506,11 @@ SmmEntryPoint (
//
PlatformHookBeforeSmmDispatch ();
+ //
+ // Call memory management hook function
+ //
+ SmmEntryPointMemoryManagementHook ();
+
//
// If a legacy boot has occured, then make sure gSmmCorePrivate is not accessed
//
@@ -699,7 +704,7 @@ SmmMain (
//
gSmmCorePrivate->Smst = &gSmmCoreSmst;
gSmmCorePrivate->SmmEntryPoint = SmmEntryPoint;
-
+
//
// No need to initialize memory service.
// It is done in constructor of PiSmmCoreMemoryAllocationLib(),
diff --git a/MdeModulePkg/Core/PiSmmCore/PiSmmCore.h b/MdeModulePkg/Core/PiSmmCore/PiSmmCore.h
index 6cc824b047..fbbecfae52 100644
--- a/MdeModulePkg/Core/PiSmmCore/PiSmmCore.h
+++ b/MdeModulePkg/Core/PiSmmCore/PiSmmCore.h
@@ -33,6 +33,7 @@
#include
#include
#include
+#include
#include
#include
@@ -60,6 +61,7 @@
#include
#include "PiSmmCorePrivateData.h"
+#include "HeapGuard.h"
//
// Used to build a table of SMI Handlers that the SMM Core registers
@@ -318,6 +320,7 @@ SmmAllocatePages (
@param NumberOfPages The number of pages to allocate
@param Memory A pointer to receive the base allocated memory
address
+ @param NeedGuard Flag to indicate Guard page is needed or not
@retval EFI_INVALID_PARAMETER Parameters violate checking rules defined in spec.
@retval EFI_NOT_FOUND Could not allocate pages match the requirement.
@@ -331,7 +334,8 @@ SmmInternalAllocatePages (
IN EFI_ALLOCATE_TYPE Type,
IN EFI_MEMORY_TYPE MemoryType,
IN UINTN NumberOfPages,
- OUT EFI_PHYSICAL_ADDRESS *Memory
+ OUT EFI_PHYSICAL_ADDRESS *Memory,
+ IN BOOLEAN NeedGuard
);
/**
@@ -357,6 +361,8 @@ SmmFreePages (
@param Memory Base address of memory being freed
@param NumberOfPages The number of pages to free
+ @param IsGuarded Flag to indicate if the memory is guarded
+ or not
@retval EFI_NOT_FOUND Could not find the entry that covers the range
@retval EFI_INVALID_PARAMETER Address not aligned, Address is zero or NumberOfPages is zero.
@@ -367,7 +373,8 @@ EFI_STATUS
EFIAPI
SmmInternalFreePages (
IN EFI_PHYSICAL_ADDRESS Memory,
- IN UINTN NumberOfPages
+ IN UINTN NumberOfPages,
+ IN BOOLEAN IsGuarded
);
/**
@@ -1255,4 +1262,74 @@ typedef enum {
extern LIST_ENTRY mSmmPoolLists[SmmPoolTypeMax][MAX_POOL_INDEX];
+/**
+ Internal Function. Allocate n pages from given free page node.
+
+ @param Pages The free page node.
+ @param NumberOfPages Number of pages to be allocated.
+ @param MaxAddress Request to allocate memory below this address.
+
+ @return Memory address of allocated pages.
+
+**/
+UINTN
+InternalAllocPagesOnOneNode (
+ IN OUT FREE_PAGE_LIST *Pages,
+ IN UINTN NumberOfPages,
+ IN UINTN MaxAddress
+ );
+
+/**
+ Update SMM memory map entry.
+
+ @param[in] Type The type of allocation to perform.
+ @param[in] Memory The base of memory address.
+ @param[in] NumberOfPages The number of pages to allocate.
+ @param[in] AddRegion If this memory is new added region.
+**/
+VOID
+ConvertSmmMemoryMapEntry (
+ IN EFI_MEMORY_TYPE Type,
+ IN EFI_PHYSICAL_ADDRESS Memory,
+ IN UINTN NumberOfPages,
+ IN BOOLEAN AddRegion
+ );
+
+/**
+ Internal function. Moves any memory descriptors that are on the
+ temporary descriptor stack to heap.
+
+**/
+VOID
+CoreFreeMemoryMapStack (
+ VOID
+ );
+
+/**
+ Frees previous allocated pages.
+
+ @param[in] Memory Base address of memory being freed.
+ @param[in] NumberOfPages The number of pages to free.
+ @param[in] AddRegion If this memory is new added region.
+
+ @retval EFI_NOT_FOUND Could not find the entry that covers the range.
+ @retval EFI_INVALID_PARAMETER Address not aligned, Address is zero or NumberOfPages is zero.
+ @return EFI_SUCCESS Pages successfully freed.
+
+**/
+EFI_STATUS
+SmmInternalFreePagesEx (
+ IN EFI_PHYSICAL_ADDRESS Memory,
+ IN UINTN NumberOfPages,
+ IN BOOLEAN AddRegion
+ );
+
+/**
+ Hook function used to set all Guard pages after entering SMM mode.
+**/
+VOID
+SmmEntryPointMemoryManagementHook (
+ VOID
+ );
+
#endif
diff --git a/MdeModulePkg/Core/PiSmmCore/PiSmmCore.inf b/MdeModulePkg/Core/PiSmmCore/PiSmmCore.inf
index a01ef7ed57..ead821b78f 100644
--- a/MdeModulePkg/Core/PiSmmCore/PiSmmCore.inf
+++ b/MdeModulePkg/Core/PiSmmCore/PiSmmCore.inf
@@ -40,6 +40,7 @@
SmramProfileRecord.c
MemoryAttributesTable.c
SmiHandlerProfile.c
+ HeapGuard.c
[Packages]
MdePkg/MdePkg.dec
@@ -89,6 +90,8 @@
gEfiSmmGpiDispatch2ProtocolGuid ## SOMETIMES_CONSUMES
gEfiSmmIoTrapDispatch2ProtocolGuid ## SOMETIMES_CONSUMES
gEfiSmmUsbDispatch2ProtocolGuid ## SOMETIMES_CONSUMES
+ gEfiSmmCpuProtocolGuid ## SOMETIMES_CONSUMES
+ gEdkiiSmmMemoryAttributeProtocolGuid ## CONSUMES
[Pcd]
gEfiMdeModulePkgTokenSpaceGuid.PcdLoadFixAddressSmmCodePageNumber ## SOMETIMES_CONSUMES
@@ -97,6 +100,9 @@
gEfiMdeModulePkgTokenSpaceGuid.PcdMemoryProfilePropertyMask ## CONSUMES
gEfiMdeModulePkgTokenSpaceGuid.PcdMemoryProfileDriverPath ## CONSUMES
gEfiMdeModulePkgTokenSpaceGuid.PcdSmiHandlerProfilePropertyMask ## CONSUMES
+ gEfiMdeModulePkgTokenSpaceGuid.PcdHeapGuardPageType ## CONSUMES
+ gEfiMdeModulePkgTokenSpaceGuid.PcdHeapGuardPoolType ## CONSUMES
+ gEfiMdeModulePkgTokenSpaceGuid.PcdHeapGuardPropertyMask ## CONSUMES
[Guids]
gAprioriGuid ## SOMETIMES_CONSUMES ## File
diff --git a/MdeModulePkg/Core/PiSmmCore/Pool.c b/MdeModulePkg/Core/PiSmmCore/Pool.c
index 36317563c4..e77caa8853 100644
--- a/MdeModulePkg/Core/PiSmmCore/Pool.c
+++ b/MdeModulePkg/Core/PiSmmCore/Pool.c
@@ -144,7 +144,9 @@ InternalAllocPoolByIndex (
Status = EFI_SUCCESS;
Hdr = NULL;
if (PoolIndex == MAX_POOL_INDEX) {
- Status = SmmInternalAllocatePages (AllocateAnyPages, PoolType, EFI_SIZE_TO_PAGES (MAX_POOL_SIZE << 1), &Address);
+ Status = SmmInternalAllocatePages (AllocateAnyPages, PoolType,
+ EFI_SIZE_TO_PAGES (MAX_POOL_SIZE << 1),
+ &Address, FALSE);
if (EFI_ERROR (Status)) {
return EFI_OUT_OF_RESOURCES;
}
@@ -243,6 +245,9 @@ SmmInternalAllocatePool (
EFI_STATUS Status;
EFI_PHYSICAL_ADDRESS Address;
UINTN PoolIndex;
+ BOOLEAN HasPoolTail;
+ BOOLEAN NeedGuard;
+ UINTN NoPages;
Address = 0;
@@ -251,25 +256,47 @@ SmmInternalAllocatePool (
return EFI_INVALID_PARAMETER;
}
+ NeedGuard = IsPoolTypeToGuard (PoolType);
+ HasPoolTail = !(NeedGuard &&
+ ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) == 0));
+
//
// Adjust the size by the pool header & tail overhead
//
Size += POOL_OVERHEAD;
- if (Size > MAX_POOL_SIZE) {
- Size = EFI_SIZE_TO_PAGES (Size);
- Status = SmmInternalAllocatePages (AllocateAnyPages, PoolType, Size, &Address);
+ if (Size > MAX_POOL_SIZE || NeedGuard) {
+ if (!HasPoolTail) {
+ Size -= sizeof (POOL_TAIL);
+ }
+
+ NoPages = EFI_SIZE_TO_PAGES (Size);
+ Status = SmmInternalAllocatePages (AllocateAnyPages, PoolType, NoPages,
+ &Address, NeedGuard);
if (EFI_ERROR (Status)) {
return Status;
}
+ if (NeedGuard) {
+ ASSERT (VerifyMemoryGuard (Address, NoPages) == TRUE);
+ Address = (EFI_PHYSICAL_ADDRESS)(UINTN)AdjustPoolHeadA (
+ Address,
+ NoPages,
+ Size
+ );
+ }
+
PoolHdr = (POOL_HEADER*)(UINTN)Address;
PoolHdr->Signature = POOL_HEAD_SIGNATURE;
- PoolHdr->Size = EFI_PAGES_TO_SIZE (Size);
+ PoolHdr->Size = Size;
PoolHdr->Available = FALSE;
PoolHdr->Type = PoolType;
- PoolTail = HEAD_TO_TAIL(PoolHdr);
- PoolTail->Signature = POOL_TAIL_SIGNATURE;
- PoolTail->Size = PoolHdr->Size;
+
+ if (HasPoolTail) {
+ PoolTail = HEAD_TO_TAIL (PoolHdr);
+ PoolTail->Signature = POOL_TAIL_SIGNATURE;
+ PoolTail->Size = PoolHdr->Size;
+ }
+
*Buffer = PoolHdr + 1;
return Status;
}
@@ -341,28 +368,47 @@ SmmInternalFreePool (
{
FREE_POOL_HEADER *FreePoolHdr;
POOL_TAIL *PoolTail;
+ BOOLEAN HasPoolTail;
+ BOOLEAN MemoryGuarded;
if (Buffer == NULL) {
return EFI_INVALID_PARAMETER;
}
+ MemoryGuarded = IsHeapGuardEnabled () &&
+ IsMemoryGuarded ((EFI_PHYSICAL_ADDRESS)(UINTN)Buffer);
+ HasPoolTail = !(MemoryGuarded &&
+ ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) == 0));
+
FreePoolHdr = (FREE_POOL_HEADER*)((POOL_HEADER*)Buffer - 1);
ASSERT (FreePoolHdr->Header.Signature == POOL_HEAD_SIGNATURE);
ASSERT (!FreePoolHdr->Header.Available);
- PoolTail = HEAD_TO_TAIL(&FreePoolHdr->Header);
- ASSERT (PoolTail->Signature == POOL_TAIL_SIGNATURE);
- ASSERT (FreePoolHdr->Header.Size == PoolTail->Size);
-
if (FreePoolHdr->Header.Signature != POOL_HEAD_SIGNATURE) {
return EFI_INVALID_PARAMETER;
}
- if (PoolTail->Signature != POOL_TAIL_SIGNATURE) {
- return EFI_INVALID_PARAMETER;
+ if (HasPoolTail) {
+ PoolTail = HEAD_TO_TAIL (&FreePoolHdr->Header);
+ ASSERT (PoolTail->Signature == POOL_TAIL_SIGNATURE);
+ ASSERT (FreePoolHdr->Header.Size == PoolTail->Size);
+ if (PoolTail->Signature != POOL_TAIL_SIGNATURE) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ if (FreePoolHdr->Header.Size != PoolTail->Size) {
+ return EFI_INVALID_PARAMETER;
+ }
+ } else {
+ PoolTail = NULL;
}
- if (FreePoolHdr->Header.Size != PoolTail->Size) {
- return EFI_INVALID_PARAMETER;
+ if (MemoryGuarded) {
+ Buffer = AdjustPoolHeadF ((EFI_PHYSICAL_ADDRESS)(UINTN)FreePoolHdr);
+ return SmmInternalFreePages (
+ (EFI_PHYSICAL_ADDRESS)(UINTN)Buffer,
+ EFI_SIZE_TO_PAGES (FreePoolHdr->Header.Size),
+ TRUE
+ );
}
if (FreePoolHdr->Header.Size > MAX_POOL_SIZE) {
@@ -370,7 +416,8 @@ SmmInternalFreePool (
ASSERT ((FreePoolHdr->Header.Size & EFI_PAGE_MASK) == 0);
return SmmInternalFreePages (
(EFI_PHYSICAL_ADDRESS)(UINTN)FreePoolHdr,
- EFI_SIZE_TO_PAGES (FreePoolHdr->Header.Size)
+ EFI_SIZE_TO_PAGES (FreePoolHdr->Header.Size),
+ FALSE
);
}
return InternalFreePoolByIndex (FreePoolHdr, PoolTail);