UefiCpuPkg/PiSmmCpuDxeSmm: Add support for PCD PcdPteMemoryEncryptionAddressOrMask

This PCD holds the address mask for page table entries when memory
encryption is enabled on AMD processors supporting the Secure Encrypted
Virtualization (SEV) feature.

The mask is applied when page tables entriees are created or modified.

CC: Jeff Fan <jeff.fan@intel.com>
Cc: Feng Tian <feng.tian@intel.com>
Cc: Star Zeng <star.zeng@intel.com>
Cc: Laszlo Ersek <lersek@redhat.com>
Cc: Brijesh Singh <brijesh.singh@amd.com>
Contributed-under: TianoCore Contribution Agreement 1.0
Signed-off-by: Leo Duran <leo.duran@amd.com>
Reviewed-by: Jeff Fan <jeff.fan@intel.com>
This commit is contained in:
Leo Duran 2017-02-27 01:43:07 +08:00 committed by Star Zeng
parent ab1a5a58c9
commit 241f914975
9 changed files with 91 additions and 125 deletions

View File

@ -2,6 +2,8 @@
Page table manipulation functions for IA-32 processors Page table manipulation functions for IA-32 processors
Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR> Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
This program and the accompanying materials This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at which accompanies this distribution. The full text of the license may be found at
@ -204,7 +206,7 @@ SetPageTableAttributes (
PageTableSplitted = (PageTableSplitted || IsSplitted); PageTableSplitted = (PageTableSplitted || IsSplitted);
for (Index3 = 0; Index3 < 4; Index3++) { for (Index3 = 0; Index3 < 4; Index3++) {
L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & PAGING_4K_ADDRESS_MASK_64); L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
if (L2PageTable == NULL) { if (L2PageTable == NULL) {
continue; continue;
} }
@ -217,7 +219,7 @@ SetPageTableAttributes (
// 2M // 2M
continue; continue;
} }
L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & PAGING_4K_ADDRESS_MASK_64); L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
if (L1PageTable == NULL) { if (L1PageTable == NULL) {
continue; continue;
} }

View File

@ -2,6 +2,8 @@
SMM MP service implementation SMM MP service implementation
Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR> Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
This program and the accompanying materials This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at which accompanies this distribution. The full text of the license may be found at
@ -781,7 +783,8 @@ Gen4GPageTable (
// Set Page Directory Pointers // Set Page Directory Pointers
// //
for (Index = 0; Index < 4; Index++) { for (Index = 0; Index < 4; Index++) {
Pte[Index] = (UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1) + (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS); Pte[Index] = (UINT64)((UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1)) | mAddressEncMask |
(Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);
} }
Pte += EFI_PAGE_SIZE / sizeof (*Pte); Pte += EFI_PAGE_SIZE / sizeof (*Pte);
@ -789,7 +792,7 @@ Gen4GPageTable (
// Fill in Page Directory Entries // Fill in Page Directory Entries
// //
for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) { for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {
Pte[Index] = (Index << 21) | IA32_PG_PS | PAGE_ATTRIBUTE_BITS; Pte[Index] = (Index << 21) | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
} }
if (FeaturePcdGet (PcdCpuSmmStackGuard)) { if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
@ -797,8 +800,8 @@ Gen4GPageTable (
GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE; GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;
Pdpte = (UINT64*)PageTable; Pdpte = (UINT64*)PageTable;
for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) { for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {
Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~(EFI_PAGE_SIZE - 1)); Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | PAGE_ATTRIBUTE_BITS; Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
// //
// Fill in Page Table Entries // Fill in Page Table Entries
// //
@ -809,13 +812,13 @@ Gen4GPageTable (
// //
// Mark the guard page as non-present // Mark the guard page as non-present
// //
Pte[Index] = PageAddress; Pte[Index] = PageAddress | mAddressEncMask;
GuardPage += mSmmStackSize; GuardPage += mSmmStackSize;
if (GuardPage > mSmmStackArrayEnd) { if (GuardPage > mSmmStackArrayEnd) {
GuardPage = 0; GuardPage = 0;
} }
} else { } else {
Pte[Index] = PageAddress | PAGE_ATTRIBUTE_BITS; Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
} }
PageAddress+= EFI_PAGE_SIZE; PageAddress+= EFI_PAGE_SIZE;
} }
@ -826,74 +829,6 @@ Gen4GPageTable (
return (UINT32)(UINTN)PageTable; return (UINT32)(UINTN)PageTable;
} }
/**
Set memory cache ability.
@param PageTable PageTable Address
@param Address Memory Address to change cache ability
@param Cacheability Cache ability to set
**/
VOID
SetCacheability (
IN UINT64 *PageTable,
IN UINTN Address,
IN UINT8 Cacheability
)
{
UINTN PTIndex;
VOID *NewPageTableAddress;
UINT64 *NewPageTable;
UINTN Index;
ASSERT ((Address & EFI_PAGE_MASK) == 0);
if (sizeof (UINTN) == sizeof (UINT64)) {
PTIndex = (UINTN)RShiftU64 (Address, 39) & 0x1ff;
ASSERT (PageTable[PTIndex] & IA32_PG_P);
PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);
}
PTIndex = (UINTN)RShiftU64 (Address, 30) & 0x1ff;
ASSERT (PageTable[PTIndex] & IA32_PG_P);
PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);
//
// A perfect implementation should check the original cacheability with the
// one being set, and break a 2M page entry into pieces only when they
// disagreed.
//
PTIndex = (UINTN)RShiftU64 (Address, 21) & 0x1ff;
if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
//
// Allocate a page from SMRAM
//
NewPageTableAddress = AllocatePageTableMemory (1);
ASSERT (NewPageTableAddress != NULL);
NewPageTable = (UINT64 *)NewPageTableAddress;
for (Index = 0; Index < 0x200; Index++) {
NewPageTable[Index] = PageTable[PTIndex];
if ((NewPageTable[Index] & IA32_PG_PAT_2M) != 0) {
NewPageTable[Index] &= ~((UINT64)IA32_PG_PAT_2M);
NewPageTable[Index] |= (UINT64)IA32_PG_PAT_4K;
}
NewPageTable[Index] |= (UINT64)(Index << EFI_PAGE_SHIFT);
}
PageTable[PTIndex] = ((UINTN)NewPageTableAddress & gPhyMask) | PAGE_ATTRIBUTE_BITS;
}
ASSERT (PageTable[PTIndex] & IA32_PG_P);
PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);
PTIndex = (UINTN)RShiftU64 (Address, 12) & 0x1ff;
ASSERT (PageTable[PTIndex] & IA32_PG_P);
PageTable[PTIndex] &= ~((UINT64)((IA32_PG_PAT_4K | IA32_PG_CD | IA32_PG_WT)));
PageTable[PTIndex] |= (UINT64)Cacheability;
}
/** /**
Schedule a procedure to run on the specified CPU. Schedule a procedure to run on the specified CPU.

14
UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c Normal file → Executable file
View File

@ -2,6 +2,8 @@
Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU. Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.
Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR> Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
This program and the accompanying materials This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at which accompanies this distribution. The full text of the license may be found at
@ -96,6 +98,11 @@ BOOLEAN mSmmReadyToLock = FALSE;
// //
BOOLEAN mSmmCodeAccessCheckEnable = FALSE; BOOLEAN mSmmCodeAccessCheckEnable = FALSE;
//
// Global copy of the PcdPteMemoryEncryptionAddressOrMask
//
UINT64 mAddressEncMask = 0;
// //
// Spin lock used to serialize setting of SMM Code Access Check feature // Spin lock used to serialize setting of SMM Code Access Check feature
// //
@ -604,6 +611,13 @@ PiCpuSmmEntry (
mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable); mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);
DEBUG ((EFI_D_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable)); DEBUG ((EFI_D_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));
//
// Save the PcdPteMemoryEncryptionAddressOrMask value into a global variable.
// Make sure AddressEncMask is contained to smallest supported address field.
//
mAddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
DEBUG ((EFI_D_INFO, "mAddressEncMask = 0x%lx\n", mAddressEncMask));
// //
// If support CPU hot plug, we need to allocate resources for possibly hot-added processors // If support CPU hot plug, we need to allocate resources for possibly hot-added processors
// //

View File

@ -2,6 +2,8 @@
Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU. Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.
Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR> Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
This program and the accompanying materials This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at which accompanies this distribution. The full text of the license may be found at
@ -184,7 +186,6 @@ extern EFI_SMM_CPU_PROTOCOL mSmmCpu;
/// ///
extern UINT8 mSmmSaveStateRegisterLma; extern UINT8 mSmmSaveStateRegisterLma;
// //
// SMM CPU Protocol function prototypes. // SMM CPU Protocol function prototypes.
// //
@ -415,6 +416,11 @@ extern SPIN_LOCK *mPFLock;
extern SPIN_LOCK *mConfigSmmCodeAccessCheckLock; extern SPIN_LOCK *mConfigSmmCodeAccessCheckLock;
extern SPIN_LOCK *mMemoryMappedLock; extern SPIN_LOCK *mMemoryMappedLock;
//
// Copy of the PcdPteMemoryEncryptionAddressOrMask
//
extern UINT64 mAddressEncMask;
/** /**
Create 4G PageTable in SMRAM. Create 4G PageTable in SMRAM.

View File

@ -5,6 +5,7 @@
# provides CPU specific services in SMM. # provides CPU specific services in SMM.
# #
# Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR> # Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
# #
# This program and the accompanying materials # This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License # are licensed and made available under the terms and conditions of the BSD License
@ -157,6 +158,7 @@
gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmSyncMode ## CONSUMES gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmSyncMode ## CONSUMES
gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmStaticPageTable ## CONSUMES gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmStaticPageTable ## CONSUMES
gEfiMdeModulePkgTokenSpaceGuid.PcdAcpiS3Enable ## CONSUMES gEfiMdeModulePkgTokenSpaceGuid.PcdAcpiS3Enable ## CONSUMES
gEfiMdeModulePkgTokenSpaceGuid.PcdPteMemoryEncryptionAddressOrMask ## CONSUMES
[Depex] [Depex]
gEfiMpServiceProtocolGuid gEfiMpServiceProtocolGuid

View File

@ -119,7 +119,7 @@ GetPageTableEntry (
return NULL; return NULL;
} }
L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & PAGING_4K_ADDRESS_MASK_64); L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
} else { } else {
L3PageTable = (UINT64 *)GetPageTableBase (); L3PageTable = (UINT64 *)GetPageTableBase ();
} }
@ -133,7 +133,7 @@ GetPageTableEntry (
return &L3PageTable[Index3]; return &L3PageTable[Index3];
} }
L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & PAGING_4K_ADDRESS_MASK_64); L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
if (L2PageTable[Index2] == 0) { if (L2PageTable[Index2] == 0) {
*PageAttribute = PageNone; *PageAttribute = PageNone;
return NULL; return NULL;
@ -145,7 +145,7 @@ GetPageTableEntry (
} }
// 4k // 4k
L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & PAGING_4K_ADDRESS_MASK_64); L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
if ((L1PageTable[Index1] == 0) && (Address != 0)) { if ((L1PageTable[Index1] == 0) && (Address != 0)) {
*PageAttribute = PageNone; *PageAttribute = PageNone;
return NULL; return NULL;
@ -304,9 +304,9 @@ SplitPage (
} }
BaseAddress = *PageEntry & PAGING_2M_ADDRESS_MASK_64; BaseAddress = *PageEntry & PAGING_2M_ADDRESS_MASK_64;
for (Index = 0; Index < SIZE_4KB / sizeof(UINT64); Index++) { for (Index = 0; Index < SIZE_4KB / sizeof(UINT64); Index++) {
NewPageEntry[Index] = BaseAddress + SIZE_4KB * Index + ((*PageEntry) & PAGE_PROGATE_BITS); NewPageEntry[Index] = (BaseAddress + SIZE_4KB * Index) | mAddressEncMask | ((*PageEntry) & PAGE_PROGATE_BITS);
} }
(*PageEntry) = (UINT64)(UINTN)NewPageEntry + PAGE_ATTRIBUTE_BITS; (*PageEntry) = (UINT64)(UINTN)NewPageEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
return RETURN_SUCCESS; return RETURN_SUCCESS;
} else { } else {
return RETURN_UNSUPPORTED; return RETURN_UNSUPPORTED;
@ -325,9 +325,9 @@ SplitPage (
} }
BaseAddress = *PageEntry & PAGING_1G_ADDRESS_MASK_64; BaseAddress = *PageEntry & PAGING_1G_ADDRESS_MASK_64;
for (Index = 0; Index < SIZE_4KB / sizeof(UINT64); Index++) { for (Index = 0; Index < SIZE_4KB / sizeof(UINT64); Index++) {
NewPageEntry[Index] = BaseAddress + SIZE_2MB * Index + IA32_PG_PS + ((*PageEntry) & PAGE_PROGATE_BITS); NewPageEntry[Index] = (BaseAddress + SIZE_2MB * Index) | mAddressEncMask | IA32_PG_PS | ((*PageEntry) & PAGE_PROGATE_BITS);
} }
(*PageEntry) = (UINT64)(UINTN)NewPageEntry + PAGE_ATTRIBUTE_BITS; (*PageEntry) = (UINT64)(UINTN)NewPageEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
return RETURN_SUCCESS; return RETURN_SUCCESS;
} else { } else {
return RETURN_UNSUPPORTED; return RETURN_UNSUPPORTED;

View File

@ -2,6 +2,8 @@
Enable SMM profile. Enable SMM profile.
Copyright (c) 2012 - 2016, Intel Corporation. All rights reserved.<BR> Copyright (c) 2012 - 2016, Intel Corporation. All rights reserved.<BR>
Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
This program and the accompanying materials This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at which accompanies this distribution. The full text of the license may be found at
@ -513,7 +515,7 @@ InitPaging (
// //
continue; continue;
} }
Pde = (UINT64 *)(UINTN)(Pml4[Level1] & PHYSICAL_ADDRESS_MASK); Pde = (UINT64 *)(UINTN)(Pml4[Level1] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
} else { } else {
Pde = (UINT64*)(UINTN)mSmmProfileCr3; Pde = (UINT64*)(UINTN)mSmmProfileCr3;
} }
@ -530,7 +532,7 @@ InitPaging (
// //
continue; continue;
} }
Pte = (UINT64 *)(UINTN)(*Pde & PHYSICAL_ADDRESS_MASK); Pte = (UINT64 *)(UINTN)(*Pde & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
if (Pte == 0) { if (Pte == 0) {
continue; continue;
} }
@ -557,9 +559,9 @@ InitPaging (
// Split it // Split it
for (Level4 = 0; Level4 < SIZE_4KB / sizeof(*Pt); Level4++) { for (Level4 = 0; Level4 < SIZE_4KB / sizeof(*Pt); Level4++) {
Pt[Level4] = Address + ((Level4 << 12) | PAGE_ATTRIBUTE_BITS); Pt[Level4] = Address + ((Level4 << 12) | mAddressEncMask | PAGE_ATTRIBUTE_BITS);
} // end for PT } // end for PT
*Pte = (UINTN)Pt | PAGE_ATTRIBUTE_BITS; *Pte = (UINT64)(UINTN)Pt | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
} // end if IsAddressSplit } // end if IsAddressSplit
} // end for PTE } // end for PTE
} // end for PDE } // end for PDE
@ -577,7 +579,7 @@ InitPaging (
// //
continue; continue;
} }
Pde = (UINT64 *)(UINTN)(Pml4[Level1] & PHYSICAL_ADDRESS_MASK); Pde = (UINT64 *)(UINTN)(Pml4[Level1] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
} else { } else {
Pde = (UINT64*)(UINTN)mSmmProfileCr3; Pde = (UINT64*)(UINTN)mSmmProfileCr3;
} }
@ -597,7 +599,7 @@ InitPaging (
} }
continue; continue;
} }
Pte = (UINT64 *)(UINTN)(*Pde & PHYSICAL_ADDRESS_MASK); Pte = (UINT64 *)(UINTN)(*Pde & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
if (Pte == 0) { if (Pte == 0) {
continue; continue;
} }
@ -624,7 +626,7 @@ InitPaging (
} }
} else { } else {
// 4KB page // 4KB page
Pt = (UINT64 *)(UINTN)(*Pte & PHYSICAL_ADDRESS_MASK); Pt = (UINT64 *)(UINTN)(*Pte & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
if (Pt == 0) { if (Pt == 0) {
continue; continue;
} }

View File

@ -2,6 +2,8 @@
Page Fault (#PF) handler for X64 processors Page Fault (#PF) handler for X64 processors
Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR> Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
This program and the accompanying materials This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at which accompanies this distribution. The full text of the license may be found at
@ -16,6 +18,7 @@ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#define PAGE_TABLE_PAGES 8 #define PAGE_TABLE_PAGES 8
#define ACC_MAX_BIT BIT3 #define ACC_MAX_BIT BIT3
LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool); LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);
BOOLEAN m1GPageTableSupport = FALSE; BOOLEAN m1GPageTableSupport = FALSE;
UINT8 mPhysicalAddressBits; UINT8 mPhysicalAddressBits;
@ -168,13 +171,13 @@ SetStaticPageTable (
// //
// Each PML4 entry points to a page of Page Directory Pointer entries. // Each PML4 entry points to a page of Page Directory Pointer entries.
// //
PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & gPhyMask); PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);
if (PageDirectoryPointerEntry == NULL) { if (PageDirectoryPointerEntry == NULL) {
PageDirectoryPointerEntry = AllocatePageTableMemory (1); PageDirectoryPointerEntry = AllocatePageTableMemory (1);
ASSERT(PageDirectoryPointerEntry != NULL); ASSERT(PageDirectoryPointerEntry != NULL);
ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1)); ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1));
*PageMapLevel4Entry = ((UINTN)PageDirectoryPointerEntry & gPhyMask) | PAGE_ATTRIBUTE_BITS; *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
} }
if (m1GPageTableSupport) { if (m1GPageTableSupport) {
@ -189,7 +192,7 @@ SetStaticPageTable (
// //
// Fill in the Page Directory entries // Fill in the Page Directory entries
// //
*PageDirectory1GEntry = (PageAddress & gPhyMask) | IA32_PG_PS | PAGE_ATTRIBUTE_BITS; *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
} }
} else { } else {
PageAddress = BASE_4GB; PageAddress = BASE_4GB;
@ -204,7 +207,7 @@ SetStaticPageTable (
// Each Directory Pointer entries points to a page of Page Directory entires. // Each Directory Pointer entries points to a page of Page Directory entires.
// So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop. // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
// //
PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & gPhyMask); PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);
if (PageDirectoryEntry == NULL) { if (PageDirectoryEntry == NULL) {
PageDirectoryEntry = AllocatePageTableMemory (1); PageDirectoryEntry = AllocatePageTableMemory (1);
ASSERT(PageDirectoryEntry != NULL); ASSERT(PageDirectoryEntry != NULL);
@ -213,14 +216,14 @@ SetStaticPageTable (
// //
// Fill in a Page Directory Pointer Entries // Fill in a Page Directory Pointer Entries
// //
*PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | PAGE_ATTRIBUTE_BITS; *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
} }
for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) { for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
// //
// Fill in the Page Directory entries // Fill in the Page Directory entries
// //
*PageDirectoryEntry = (UINT64)PageAddress | IA32_PG_PS | PAGE_ATTRIBUTE_BITS; *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
} }
} }
} }
@ -276,7 +279,7 @@ SmmInitPageTable (
// //
PTEntry = (UINT64*)AllocatePageTableMemory (1); PTEntry = (UINT64*)AllocatePageTableMemory (1);
ASSERT (PTEntry != NULL); ASSERT (PTEntry != NULL);
*PTEntry = Pages | PAGE_ATTRIBUTE_BITS; *PTEntry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry)); ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));
// //
@ -457,7 +460,7 @@ ReclaimPages (
// //
continue; continue;
} }
Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & gPhyMask); Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);
PML4EIgnore = FALSE; PML4EIgnore = FALSE;
for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) { for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {
if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) { if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
@ -478,7 +481,7 @@ ReclaimPages (
// we will not check PML4 entry more // we will not check PML4 entry more
// //
PML4EIgnore = TRUE; PML4EIgnore = TRUE;
Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & gPhyMask); Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);
PDPTEIgnore = FALSE; PDPTEIgnore = FALSE;
for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) { for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {
if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) { if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
@ -560,7 +563,7 @@ ReclaimPages (
// //
// Secondly, insert the page pointed by this entry into page pool and clear this entry // Secondly, insert the page pointed by this entry into page pool and clear this entry
// //
InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(*ReleasePageAddress & gPhyMask)); InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(*ReleasePageAddress & ~mAddressEncMask & gPhyMask));
*ReleasePageAddress = 0; *ReleasePageAddress = 0;
// //
@ -572,14 +575,14 @@ ReclaimPages (
// //
// If 4 KByte Page Table is released, check the PDPT entry // If 4 KByte Page Table is released, check the PDPT entry
// //
Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & gPhyMask); Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);
SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt); SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);
if (SubEntriesNum == 0) { if (SubEntriesNum == 0) {
// //
// Release the empty Page Directory table if there was no more 4 KByte Page Table entry // Release the empty Page Directory table if there was no more 4 KByte Page Table entry
// clear the Page directory entry // clear the Page directory entry
// //
InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pdpt[MinPdpt] & gPhyMask)); InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pdpt[MinPdpt] & ~mAddressEncMask & gPhyMask));
Pdpt[MinPdpt] = 0; Pdpt[MinPdpt] = 0;
// //
// Go on checking the PML4 table // Go on checking the PML4 table
@ -603,7 +606,7 @@ ReclaimPages (
// Release the empty PML4 table if there was no more 1G KByte Page Table entry // Release the empty PML4 table if there was no more 1G KByte Page Table entry
// clear the Page directory entry // clear the Page directory entry
// //
InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pml4[MinPml4] & gPhyMask)); InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask));
Pml4[MinPml4] = 0; Pml4[MinPml4] = 0;
MinPdpt = (UINTN)-1; MinPdpt = (UINTN)-1;
continue; continue;
@ -747,7 +750,7 @@ SmiDefaultPFHandler (
// //
// If the entry is not present, allocate one page from page pool for it // If the entry is not present, allocate one page from page pool for it
// //
PageTable[PTIndex] = AllocPage () | PAGE_ATTRIBUTE_BITS; PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
} else { } else {
// //
// Save the upper entry address // Save the upper entry address
@ -760,7 +763,7 @@ SmiDefaultPFHandler (
// //
PageTable[PTIndex] |= (UINT64)IA32_PG_A; PageTable[PTIndex] |= (UINT64)IA32_PG_A;
SetAccNum (PageTable + PTIndex, 7); SetAccNum (PageTable + PTIndex, 7);
PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask); PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);
} }
PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8); PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
@ -776,7 +779,7 @@ SmiDefaultPFHandler (
// //
// Fill the new entry // Fill the new entry
// //
PageTable[PTIndex] = (PFAddress & gPhyMask & ~((1ull << EndBit) - 1)) | PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << EndBit) - 1)) |
PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS; PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;
if (UpperEntry != NULL) { if (UpperEntry != NULL) {
SetSubEntriesNum (UpperEntry, GetSubEntriesNum (UpperEntry) + 1); SetSubEntriesNum (UpperEntry, GetSubEntriesNum (UpperEntry) + 1);
@ -927,7 +930,7 @@ SetPageTableAttributes (
PageTableSplitted = (PageTableSplitted || IsSplitted); PageTableSplitted = (PageTableSplitted || IsSplitted);
for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) { for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) {
L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & PAGING_4K_ADDRESS_MASK_64); L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
if (L3PageTable == NULL) { if (L3PageTable == NULL) {
continue; continue;
} }
@ -940,7 +943,7 @@ SetPageTableAttributes (
// 1G // 1G
continue; continue;
} }
L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & PAGING_4K_ADDRESS_MASK_64); L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
if (L2PageTable == NULL) { if (L2PageTable == NULL) {
continue; continue;
} }
@ -953,7 +956,7 @@ SetPageTableAttributes (
// 2M // 2M
continue; continue;
} }
L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & PAGING_4K_ADDRESS_MASK_64); L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
if (L1PageTable == NULL) { if (L1PageTable == NULL) {
continue; continue;
} }

View File

@ -2,6 +2,8 @@
X64 processor specific functions to enable SMM profile. X64 processor specific functions to enable SMM profile.
Copyright (c) 2012 - 2016, Intel Corporation. All rights reserved.<BR> Copyright (c) 2012 - 2016, Intel Corporation. All rights reserved.<BR>
Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
This program and the accompanying materials This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at which accompanies this distribution. The full text of the license may be found at
@ -52,7 +54,7 @@ InitSmmS3Cr3 (
// //
PTEntry = (UINT64*)AllocatePageTableMemory (1); PTEntry = (UINT64*)AllocatePageTableMemory (1);
ASSERT (PTEntry != NULL); ASSERT (PTEntry != NULL);
*PTEntry = Pages | PAGE_ATTRIBUTE_BITS; *PTEntry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry)); ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));
// //
@ -111,14 +113,14 @@ AcquirePage (
// //
// Cut the previous uplink if it exists and wasn't overwritten // Cut the previous uplink if it exists and wasn't overwritten
// //
if ((mPFPageUplink[mPFPageIndex] != NULL) && ((*mPFPageUplink[mPFPageIndex] & PHYSICAL_ADDRESS_MASK) == Address)) { if ((mPFPageUplink[mPFPageIndex] != NULL) && ((*mPFPageUplink[mPFPageIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK) == Address)) {
*mPFPageUplink[mPFPageIndex] = 0; *mPFPageUplink[mPFPageIndex] = 0;
} }
// //
// Link & Record the current uplink // Link & Record the current uplink
// //
*Uplink = Address | PAGE_ATTRIBUTE_BITS; *Uplink = Address | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
mPFPageUplink[mPFPageIndex] = Uplink; mPFPageUplink[mPFPageIndex] = Uplink;
mPFPageIndex = (mPFPageIndex + 1) % MAX_PF_PAGE_COUNT; mPFPageIndex = (mPFPageIndex + 1) % MAX_PF_PAGE_COUNT;
@ -168,33 +170,33 @@ RestorePageTableAbove4G (
PTIndex = BitFieldRead64 (PFAddress, 39, 47); PTIndex = BitFieldRead64 (PFAddress, 39, 47);
if ((PageTable[PTIndex] & IA32_PG_P) != 0) { if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
// PML4E // PML4E
PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK); PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
PTIndex = BitFieldRead64 (PFAddress, 30, 38); PTIndex = BitFieldRead64 (PFAddress, 30, 38);
if ((PageTable[PTIndex] & IA32_PG_P) != 0) { if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
// PDPTE // PDPTE
PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK); PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
PTIndex = BitFieldRead64 (PFAddress, 21, 29); PTIndex = BitFieldRead64 (PFAddress, 21, 29);
// PD // PD
if ((PageTable[PTIndex] & IA32_PG_PS) != 0) { if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
// //
// 2MB page // 2MB page
// //
Address = (UINT64)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK); Address = (UINT64)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
if ((Address & PHYSICAL_ADDRESS_MASK & ~((1ull << 21) - 1)) == ((PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 21) - 1)))) { if ((Address & ~((1ull << 21) - 1)) == ((PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 21) - 1)))) {
Existed = TRUE; Existed = TRUE;
} }
} else { } else {
// //
// 4KB page // 4KB page
// //
PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK); PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask& PHYSICAL_ADDRESS_MASK);
if (PageTable != 0) { if (PageTable != 0) {
// //
// When there is a valid entry to map to 4KB page, need not create a new entry to map 2MB. // When there is a valid entry to map to 4KB page, need not create a new entry to map 2MB.
// //
PTIndex = BitFieldRead64 (PFAddress, 12, 20); PTIndex = BitFieldRead64 (PFAddress, 12, 20);
Address = (UINT64)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK); Address = (UINT64)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
if ((Address & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1)) == (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) { if ((Address & ~((1ull << 12) - 1)) == (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) {
Existed = TRUE; Existed = TRUE;
} }
} }
@ -227,13 +229,13 @@ RestorePageTableAbove4G (
PFAddress = AsmReadCr2 (); PFAddress = AsmReadCr2 ();
// PML4E // PML4E
PTIndex = BitFieldRead64 (PFAddress, 39, 47); PTIndex = BitFieldRead64 (PFAddress, 39, 47);
PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK); PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
// PDPTE // PDPTE
PTIndex = BitFieldRead64 (PFAddress, 30, 38); PTIndex = BitFieldRead64 (PFAddress, 30, 38);
PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK); PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
// PD // PD
PTIndex = BitFieldRead64 (PFAddress, 21, 29); PTIndex = BitFieldRead64 (PFAddress, 21, 29);
Address = PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK; Address = PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK;
// //
// Check if 2MB-page entry need be changed to 4KB-page entry. // Check if 2MB-page entry need be changed to 4KB-page entry.
// //
@ -241,9 +243,9 @@ RestorePageTableAbove4G (
AcquirePage (&PageTable[PTIndex]); AcquirePage (&PageTable[PTIndex]);
// PTE // PTE
PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK); PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
for (Index = 0; Index < 512; Index++) { for (Index = 0; Index < 512; Index++) {
PageTable[Index] = Address | PAGE_ATTRIBUTE_BITS; PageTable[Index] = Address | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
if (!IsAddressValid (Address, &Nx)) { if (!IsAddressValid (Address, &Nx)) {
PageTable[Index] = PageTable[Index] & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS); PageTable[Index] = PageTable[Index] & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
} }