mirror of https://github.com/acidanthera/audk.git
MdeModulePkg/DxeIpl: Enable paging for Stack Guard
Stack guard feature makes use of paging mechanism to monitor if there's a stack overflow occurred during boot. This patch will check setting of PCD PcdCpuStackGuard. If it's TRUE, DxeIpl will setup page table and set the page at which the stack base locates to be NOT PRESENT. If stack is used up and memory access cross into the last page of it, #PF exception will be triggered. Cc: Star Zeng <star.zeng@intel.com> Cc: Eric Dong <eric.dong@intel.com> Cc: Jiewen Yao <jiewen.yao@intel.com> Suggested-by: Ayellet Wolman <ayellet.wolman@intel.com> Contributed-under: TianoCore Contribution Agreement 1.1 Signed-off-by: Jian J Wang <jian.j.wang@intel.com> Reviewed-by: Jeff Fan <vanjeff_919@hotmail.com> Reviewed-by: Jiewen.yao@intel.com
This commit is contained in:
parent
a8ab14d355
commit
50255363cb
|
@ -49,7 +49,7 @@
|
|||
[Sources.X64]
|
||||
X64/VirtualMemory.h
|
||||
X64/VirtualMemory.c
|
||||
X64/DxeLoadFunc.c
|
||||
X64/DxeLoadFunc.c
|
||||
|
||||
[Sources.IPF]
|
||||
Ipf/DxeLoadFunc.c
|
||||
|
@ -117,6 +117,7 @@
|
|||
gEfiMdeModulePkgTokenSpaceGuid.PcdPteMemoryEncryptionAddressOrMask ## CONSUMES
|
||||
gEfiMdeModulePkgTokenSpaceGuid.PcdNullPointerDetectionPropertyMask ## CONSUMES
|
||||
gEfiMdeModulePkgTokenSpaceGuid.PcdHeapGuardPropertyMask ## CONSUMES
|
||||
gEfiMdeModulePkgTokenSpaceGuid.PcdCpuStackGuard ## CONSUMES
|
||||
|
||||
[Pcd.IA32,Pcd.X64,Pcd.ARM,Pcd.AARCH64]
|
||||
gEfiMdeModulePkgTokenSpaceGuid.PcdSetNxForStack ## SOMETIMES_CONSUMES
|
||||
|
@ -132,7 +133,7 @@
|
|||
#
|
||||
# [Hob]
|
||||
# MEMORY_ALLOCATION ## SOMETIMES_PRODUCES # MEMORY_ALLOCATION_MODULE for DxeCore
|
||||
# MEMORY_ALLOCATION ## SOMETIMES_PRODUCES # New Stack HoB
|
||||
# MEMORY_ALLOCATION ## SOMETIMES_PRODUCES # New Stack HoB
|
||||
# MEMORY_ALLOCATION ## SOMETIMES_PRODUCES # Old Stack HOB
|
||||
#
|
||||
# [Hob.IPF]
|
||||
|
|
|
@ -235,6 +235,10 @@ ToBuildPageTable (
|
|||
return TRUE;
|
||||
}
|
||||
|
||||
if (PcdGetBool (PcdCpuStackGuard)) {
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
if (PcdGetBool (PcdSetNxForStack) && IsExecuteDisableBitAvailable ()) {
|
||||
return TRUE;
|
||||
}
|
||||
|
|
|
@ -95,6 +95,7 @@ HandOffToDxeCore (
|
|||
// for the DxeIpl and the DxeCore are both X64.
|
||||
//
|
||||
ASSERT (PcdGetBool (PcdSetNxForStack) == FALSE);
|
||||
ASSERT (PcdGetBool (PcdCpuStackGuard) == FALSE);
|
||||
}
|
||||
|
||||
//
|
||||
|
|
|
@ -117,6 +117,39 @@ EnableExecuteDisableBit (
|
|||
AsmWriteMsr64 (0xC0000080, MsrRegisters);
|
||||
}
|
||||
|
||||
/**
|
||||
The function will check if page table entry should be splitted to smaller
|
||||
granularity.
|
||||
|
||||
@retval TRUE Page table should be split.
|
||||
@retval FALSE Page table should not be split.
|
||||
**/
|
||||
BOOLEAN
|
||||
ToSplitPageTable (
|
||||
IN EFI_PHYSICAL_ADDRESS Address,
|
||||
IN UINTN Size,
|
||||
IN EFI_PHYSICAL_ADDRESS StackBase,
|
||||
IN UINTN StackSize
|
||||
)
|
||||
{
|
||||
if (IsNullDetectionEnabled () && Address == 0) {
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
if (PcdGetBool (PcdCpuStackGuard)) {
|
||||
if (StackBase >= Address && StackBase < (Address + Size)) {
|
||||
return TRUE;
|
||||
}
|
||||
}
|
||||
|
||||
if (PcdGetBool (PcdSetNxForStack)) {
|
||||
if ((Address < StackBase + StackSize) && ((Address + Size) > StackBase)) {
|
||||
return TRUE;
|
||||
}
|
||||
}
|
||||
|
||||
return FALSE;
|
||||
}
|
||||
/**
|
||||
Split 2M page to 4K.
|
||||
|
||||
|
@ -160,7 +193,8 @@ Split2MPageTo4K (
|
|||
PageTableEntry->Uint64 = (UINT64) PhysicalAddress4K | AddressEncMask;
|
||||
PageTableEntry->Bits.ReadWrite = 1;
|
||||
|
||||
if (IsNullDetectionEnabled () && PhysicalAddress4K == 0) {
|
||||
if ((IsNullDetectionEnabled () && PhysicalAddress4K == 0) ||
|
||||
(PcdGetBool (PcdCpuStackGuard) && PhysicalAddress4K == StackBase)) {
|
||||
PageTableEntry->Bits.Present = 0;
|
||||
} else {
|
||||
PageTableEntry->Bits.Present = 1;
|
||||
|
@ -214,10 +248,7 @@ Split1GPageTo2M (
|
|||
|
||||
PhysicalAddress2M = PhysicalAddress;
|
||||
for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PhysicalAddress2M += SIZE_2MB) {
|
||||
if ((IsNullDetectionEnabled () && PhysicalAddress2M == 0)
|
||||
|| (PcdGetBool (PcdSetNxForStack)
|
||||
&& (PhysicalAddress2M < StackBase + StackSize)
|
||||
&& ((PhysicalAddress2M + SIZE_2MB) > StackBase))) {
|
||||
if (ToSplitPageTable (PhysicalAddress2M, SIZE_2MB, StackBase, StackSize)) {
|
||||
//
|
||||
// Need to split this 2M page that covers NULL or stack range.
|
||||
//
|
||||
|
@ -359,10 +390,7 @@ CreateIdentityMappingPageTables (
|
|||
PageDirectory1GEntry = (VOID *) PageDirectoryPointerEntry;
|
||||
|
||||
for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
|
||||
if ((IsNullDetectionEnabled () && PageAddress == 0)
|
||||
|| (PcdGetBool (PcdSetNxForStack)
|
||||
&& (PageAddress < StackBase + StackSize)
|
||||
&& ((PageAddress + SIZE_1GB) > StackBase))) {
|
||||
if (ToSplitPageTable (PageAddress, SIZE_1GB, StackBase, StackSize)) {
|
||||
Split1GPageTo2M (PageAddress, (UINT64 *) PageDirectory1GEntry, StackBase, StackSize);
|
||||
} else {
|
||||
//
|
||||
|
@ -391,10 +419,7 @@ CreateIdentityMappingPageTables (
|
|||
PageDirectoryPointerEntry->Bits.Present = 1;
|
||||
|
||||
for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
|
||||
if ((IsNullDetectionEnabled () && PageAddress == 0)
|
||||
|| (PcdGetBool (PcdSetNxForStack)
|
||||
&& (PageAddress < StackBase + StackSize)
|
||||
&& ((PageAddress + SIZE_2MB) > StackBase))) {
|
||||
if (ToSplitPageTable (PageAddress, SIZE_2MB, StackBase, StackSize)) {
|
||||
//
|
||||
// Need to split this 2M page that covers NULL or stack range.
|
||||
//
|
||||
|
|
Loading…
Reference in New Issue