OvmfPkg/PlatformPei: rewrite page table calculation

Consider 5-level paging.  Simplify calculation to make it easier
to understand.  Add some comments, improve ASSERTs.

Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
Reviewed-by: Laszlo Ersek <lersek@redhat.com>
Message-Id: <20240214104504.2931339-4-kraxel@redhat.com>
This commit is contained in:
Gerd Hoffmann 2024-02-14 11:45:03 +01:00 committed by mergify[bot]
parent 8757e648d1
commit 3ad1d7eb7b

View File

@ -184,9 +184,12 @@ GetPeiMemoryCap (
BOOLEAN Page1GSupport; BOOLEAN Page1GSupport;
UINT32 RegEax; UINT32 RegEax;
UINT32 RegEdx; UINT32 RegEdx;
UINT32 Pml4Entries; UINT64 MaxAddr;
UINT32 PdpEntries; UINT32 Level5Pages;
UINTN TotalPages; UINT32 Level4Pages;
UINT32 Level3Pages;
UINT32 Level2Pages;
UINT32 TotalPages;
UINT64 ApStacks; UINT64 ApStacks;
UINT64 MemoryCap; UINT64 MemoryCap;
@ -203,8 +206,7 @@ GetPeiMemoryCap (
// //
// Dependent on physical address width, PEI memory allocations can be // Dependent on physical address width, PEI memory allocations can be
// dominated by the page tables built for 64-bit DXE. So we key the cap off // dominated by the page tables built for 64-bit DXE. So we key the cap off
// of those. The code below is based on CreateIdentityMappingPageTables() in // of those.
// "MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c".
// //
Page1GSupport = FALSE; Page1GSupport = FALSE;
if (PcdGetBool (PcdUse1GPageTable)) { if (PcdGetBool (PcdUse1GPageTable)) {
@ -217,24 +219,36 @@ GetPeiMemoryCap (
} }
} }
if (PlatformInfoHob->PhysMemAddressWidth <= 39) { //
Pml4Entries = 1; // - A 4KB page accommodates the least significant 12 bits of the
PdpEntries = 1 << (PlatformInfoHob->PhysMemAddressWidth - 30); // virtual address.
ASSERT (PdpEntries <= 0x200); // - A page table entry at any level consumes 8 bytes, so a 4KB page
} else { // table page (at any level) contains 512 entries, and
if (PlatformInfoHob->PhysMemAddressWidth > 48) { // accommodates 9 bits of the virtual address.
Pml4Entries = 0x200; // - we minimally cover the phys address space with 2MB pages, so
} else { // level 1 never exists.
Pml4Entries = 1 << (PlatformInfoHob->PhysMemAddressWidth - 39); // - If 1G paging is available, then level 2 doesn't exist either.
} // - Start with level 2, where a page table page accommodates
// 9 + 9 + 12 = 30 bits of the virtual address (and covers 1GB of
// physical address space).
//
ASSERT (Pml4Entries <= 0x200); MaxAddr = LShiftU64 (1, PlatformInfoHob->PhysMemAddressWidth);
PdpEntries = 512; Level2Pages = (UINT32)RShiftU64 (MaxAddr, 30);
} Level3Pages = MAX (Level2Pages >> 9, 1u);
Level4Pages = MAX (Level3Pages >> 9, 1u);
Level5Pages = 1;
TotalPages = Page1GSupport ? Pml4Entries + 1 : if (Page1GSupport) {
(PdpEntries + 1) * Pml4Entries + 1; Level2Pages = 0;
TotalPages = Level5Pages + Level4Pages + Level3Pages;
ASSERT (TotalPages <= 0x40201); ASSERT (TotalPages <= 0x40201);
} else {
TotalPages = Level5Pages + Level4Pages + Level3Pages + Level2Pages;
// PlatformAddressWidthFromCpuid() caps at 40 phys bits without 1G pages.
ASSERT (PlatformInfoHob->PhysMemAddressWidth <= 40);
ASSERT (TotalPages <= 0x404);
}
// //
// With 32k stacks and 4096 vcpus this lands at 128 MB (far away // With 32k stacks and 4096 vcpus this lands at 128 MB (far away
@ -247,7 +261,7 @@ GetPeiMemoryCap (
// PhysMemAddressWidth values close to 36 and a small number of // PhysMemAddressWidth values close to 36 and a small number of
// CPUs, the cap will actually be dominated by this increment. // CPUs, the cap will actually be dominated by this increment.
// //
MemoryCap = EFI_PAGES_TO_SIZE (TotalPages) + ApStacks + SIZE_64MB; MemoryCap = EFI_PAGES_TO_SIZE ((UINTN)TotalPages) + ApStacks + SIZE_64MB;
ASSERT (MemoryCap <= MAX_UINT32); ASSERT (MemoryCap <= MAX_UINT32);
return (UINT32)MemoryCap; return (UINT32)MemoryCap;