OvmfPkg/BaseMemEncryptSevLib: Enable protection for newly added page table

Commit 2ac1730bf2 (MdeModulePkg/DxeIpl: Mark page table as read-only)
sets the memory pages used for page table as read-only after paging is
setup and sets CR0.WP to protect CPU modifying the read-only pages.
The commit causes #PF when MemEncryptSevClearPageEncMask() or
MemEncryptSevSetPageEncMask() tries to change the page-table attributes.

This patch takes the similar approach as Commit 147fd35c3e
(UefiCpuPkg/CpuDxe: Enable protection for newly added page table).
When page table protection is enabled, we disable it temporarily before
changing the page table attributes.

This patch makes use of the same approach as Commit 2ac1730bf2
(MdeModulePkg/DxeIpl: Mark page table as read-only)) for allocating
page table memory from reserved memory pool, which helps to reduce a
potential "split" operation.

The patch duplicates code from commit 147fd35c3e. The code duplication
will be removed after we implement page table manipulation library. See
bugzilla https://bugzilla.tianocore.org/show_bug.cgi?id=847.

Cc: Jian J Wang <jian.j.wang@intel.com>
Cc: Jiewen Yao <jiewen.yao@intel.com>
Cc: Jordan Justen <jordan.l.justen@intel.com>
Cc: Laszlo Ersek <lersek@redhat.com>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Contributed-under: TianoCore Contribution Agreement 1.1
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
Acked-by: Laszlo Ersek <lersek@redhat.com>
This commit is contained in:
Brijesh Singh 2018-01-11 13:51:01 -06:00 committed by Laszlo Ersek
parent 84513df419
commit b721aa749b
2 changed files with 399 additions and 7 deletions

View File

@ -25,6 +25,7 @@ Code is derived from MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c
STATIC BOOLEAN mAddressEncMaskChecked = FALSE; STATIC BOOLEAN mAddressEncMaskChecked = FALSE;
STATIC UINT64 mAddressEncMask; STATIC UINT64 mAddressEncMask;
STATIC PAGE_TABLE_POOL *mPageTablePool = NULL;
typedef enum { typedef enum {
SetCBit, SetCBit,
@ -62,6 +63,123 @@ GetMemEncryptionAddressMask (
return mAddressEncMask; return mAddressEncMask;
} }
/**
Initialize a buffer pool for page table use only.
To reduce the potential split operation on page table, the pages reserved for
page table should be allocated in the times of PAGE_TABLE_POOL_UNIT_PAGES and
at the boundary of PAGE_TABLE_POOL_ALIGNMENT. So the page pool is always
initialized with number of pages greater than or equal to the given PoolPages.
Once the pages in the pool are used up, this method should be called again to
reserve at least another PAGE_TABLE_POOL_UNIT_PAGES. Usually this won't happen
often in practice.
@param[in] PoolPages The least page number of the pool to be created.
@retval TRUE The pool is initialized successfully.
@retval FALSE The memory is out of resource.
**/
STATIC
BOOLEAN
InitializePageTablePool (
IN UINTN PoolPages
)
{
VOID *Buffer;
//
// Always reserve at least PAGE_TABLE_POOL_UNIT_PAGES, including one page for
// header.
//
PoolPages += 1; // Add one page for header.
PoolPages = ((PoolPages - 1) / PAGE_TABLE_POOL_UNIT_PAGES + 1) *
PAGE_TABLE_POOL_UNIT_PAGES;
Buffer = AllocateAlignedPages (PoolPages, PAGE_TABLE_POOL_ALIGNMENT);
if (Buffer == NULL) {
DEBUG ((DEBUG_ERROR, "ERROR: Out of aligned pages\r\n"));
return FALSE;
}
//
// Link all pools into a list for easier track later.
//
if (mPageTablePool == NULL) {
mPageTablePool = Buffer;
mPageTablePool->NextPool = mPageTablePool;
} else {
((PAGE_TABLE_POOL *)Buffer)->NextPool = mPageTablePool->NextPool;
mPageTablePool->NextPool = Buffer;
mPageTablePool = Buffer;
}
//
// Reserve one page for pool header.
//
mPageTablePool->FreePages = PoolPages - 1;
mPageTablePool->Offset = EFI_PAGES_TO_SIZE (1);
return TRUE;
}
/**
This API provides a way to allocate memory for page table.
This API can be called more than once to allocate memory for page tables.
Allocates the number of 4KB pages and returns a pointer to the allocated
buffer. The buffer returned is aligned on a 4KB boundary.
If Pages is 0, then NULL is returned.
If there is not enough memory remaining to satisfy the request, then NULL is
returned.
@param Pages The number of 4 KB pages to allocate.
@return A pointer to the allocated buffer or NULL if allocation fails.
**/
STATIC
VOID *
EFIAPI
AllocatePageTableMemory (
IN UINTN Pages
)
{
VOID *Buffer;
if (Pages == 0) {
return NULL;
}
//
// Renew the pool if necessary.
//
if (mPageTablePool == NULL ||
Pages > mPageTablePool->FreePages) {
if (!InitializePageTablePool (Pages)) {
return NULL;
}
}
Buffer = (UINT8 *)mPageTablePool + mPageTablePool->Offset;
mPageTablePool->Offset += EFI_PAGES_TO_SIZE (Pages);
mPageTablePool->FreePages -= Pages;
DEBUG ((
DEBUG_VERBOSE,
"%a:%a: Buffer=0x%Lx Pages=%ld\n",
gEfiCallerBaseName,
__FUNCTION__,
Buffer,
Pages
));
return Buffer;
}
/** /**
Split 2M page to 4K. Split 2M page to 4K.
@ -85,7 +203,7 @@ Split2MPageTo4K (
PAGE_TABLE_4K_ENTRY *PageTableEntry, *PageTableEntry1; PAGE_TABLE_4K_ENTRY *PageTableEntry, *PageTableEntry1;
UINT64 AddressEncMask; UINT64 AddressEncMask;
PageTableEntry = AllocatePages(1); PageTableEntry = AllocatePageTableMemory(1);
PageTableEntry1 = PageTableEntry; PageTableEntry1 = PageTableEntry;
@ -116,6 +234,179 @@ Split2MPageTo4K (
*PageEntry2M = (UINT64) (UINTN) PageTableEntry1 | IA32_PG_P | IA32_PG_RW | AddressEncMask; *PageEntry2M = (UINT64) (UINTN) PageTableEntry1 | IA32_PG_P | IA32_PG_RW | AddressEncMask;
} }
/**
Set one page of page table pool memory to be read-only.
@param[in] PageTableBase Base address of page table (CR3).
@param[in] Address Start address of a page to be set as read-only.
@param[in] Level4Paging Level 4 paging flag.
**/
STATIC
VOID
SetPageTablePoolReadOnly (
IN UINTN PageTableBase,
IN EFI_PHYSICAL_ADDRESS Address,
IN BOOLEAN Level4Paging
)
{
UINTN Index;
UINTN EntryIndex;
UINT64 AddressEncMask;
EFI_PHYSICAL_ADDRESS PhysicalAddress;
UINT64 *PageTable;
UINT64 *NewPageTable;
UINT64 PageAttr;
UINT64 LevelSize[5];
UINT64 LevelMask[5];
UINTN LevelShift[5];
UINTN Level;
UINT64 PoolUnitSize;
ASSERT (PageTableBase != 0);
//
// Since the page table is always from page table pool, which is always
// located at the boundary of PcdPageTablePoolAlignment, we just need to
// set the whole pool unit to be read-only.
//
Address = Address & PAGE_TABLE_POOL_ALIGN_MASK;
LevelShift[1] = PAGING_L1_ADDRESS_SHIFT;
LevelShift[2] = PAGING_L2_ADDRESS_SHIFT;
LevelShift[3] = PAGING_L3_ADDRESS_SHIFT;
LevelShift[4] = PAGING_L4_ADDRESS_SHIFT;
LevelMask[1] = PAGING_4K_ADDRESS_MASK_64;
LevelMask[2] = PAGING_2M_ADDRESS_MASK_64;
LevelMask[3] = PAGING_1G_ADDRESS_MASK_64;
LevelMask[4] = PAGING_1G_ADDRESS_MASK_64;
LevelSize[1] = SIZE_4KB;
LevelSize[2] = SIZE_2MB;
LevelSize[3] = SIZE_1GB;
LevelSize[4] = SIZE_512GB;
AddressEncMask = GetMemEncryptionAddressMask() &
PAGING_1G_ADDRESS_MASK_64;
PageTable = (UINT64 *)(UINTN)PageTableBase;
PoolUnitSize = PAGE_TABLE_POOL_UNIT_SIZE;
for (Level = (Level4Paging) ? 4 : 3; Level > 0; --Level) {
Index = ((UINTN)RShiftU64 (Address, LevelShift[Level]));
Index &= PAGING_PAE_INDEX_MASK;
PageAttr = PageTable[Index];
if ((PageAttr & IA32_PG_PS) == 0) {
//
// Go to next level of table.
//
PageTable = (UINT64 *)(UINTN)(PageAttr & ~AddressEncMask &
PAGING_4K_ADDRESS_MASK_64);
continue;
}
if (PoolUnitSize >= LevelSize[Level]) {
//
// Clear R/W bit if current page granularity is not larger than pool unit
// size.
//
if ((PageAttr & IA32_PG_RW) != 0) {
while (PoolUnitSize > 0) {
//
// PAGE_TABLE_POOL_UNIT_SIZE and PAGE_TABLE_POOL_ALIGNMENT are fit in
// one page (2MB). Then we don't need to update attributes for pages
// crossing page directory. ASSERT below is for that purpose.
//
ASSERT (Index < EFI_PAGE_SIZE/sizeof (UINT64));
PageTable[Index] &= ~(UINT64)IA32_PG_RW;
PoolUnitSize -= LevelSize[Level];
++Index;
}
}
break;
} else {
//
// The smaller granularity of page must be needed.
//
ASSERT (Level > 1);
NewPageTable = AllocatePageTableMemory (1);
ASSERT (NewPageTable != NULL);
PhysicalAddress = PageAttr & LevelMask[Level];
for (EntryIndex = 0;
EntryIndex < EFI_PAGE_SIZE/sizeof (UINT64);
++EntryIndex) {
NewPageTable[EntryIndex] = PhysicalAddress | AddressEncMask |
IA32_PG_P | IA32_PG_RW;
if (Level > 2) {
NewPageTable[EntryIndex] |= IA32_PG_PS;
}
PhysicalAddress += LevelSize[Level - 1];
}
PageTable[Index] = (UINT64)(UINTN)NewPageTable | AddressEncMask |
IA32_PG_P | IA32_PG_RW;
PageTable = NewPageTable;
}
}
}
/**
Prevent the memory pages used for page table from been overwritten.
@param[in] PageTableBase Base address of page table (CR3).
@param[in] Level4Paging Level 4 paging flag.
**/
STATIC
VOID
EnablePageTableProtection (
IN UINTN PageTableBase,
IN BOOLEAN Level4Paging
)
{
PAGE_TABLE_POOL *HeadPool;
PAGE_TABLE_POOL *Pool;
UINT64 PoolSize;
EFI_PHYSICAL_ADDRESS Address;
if (mPageTablePool == NULL) {
return;
}
//
// SetPageTablePoolReadOnly might update mPageTablePool. It's safer to
// remember original one in advance.
//
HeadPool = mPageTablePool;
Pool = HeadPool;
do {
Address = (EFI_PHYSICAL_ADDRESS)(UINTN)Pool;
PoolSize = Pool->Offset + EFI_PAGES_TO_SIZE (Pool->FreePages);
//
// The size of one pool must be multiple of PAGE_TABLE_POOL_UNIT_SIZE, which
// is one of page size of the processor (2MB by default). Let's apply the
// protection to them one by one.
//
while (PoolSize > 0) {
SetPageTablePoolReadOnly(PageTableBase, Address, Level4Paging);
Address += PAGE_TABLE_POOL_UNIT_SIZE;
PoolSize -= PAGE_TABLE_POOL_UNIT_SIZE;
}
Pool = Pool->NextPool;
} while (Pool != HeadPool);
}
/** /**
Split 1G page to 2M. Split 1G page to 2M.
@ -139,7 +430,7 @@ Split1GPageTo2M (
PAGE_TABLE_ENTRY *PageDirectoryEntry; PAGE_TABLE_ENTRY *PageDirectoryEntry;
UINT64 AddressEncMask; UINT64 AddressEncMask;
PageDirectoryEntry = AllocatePages(1); PageDirectoryEntry = AllocatePageTableMemory(1);
AddressEncMask = GetMemEncryptionAddressMask (); AddressEncMask = GetMemEncryptionAddressMask ();
ASSERT (PageDirectoryEntry != NULL); ASSERT (PageDirectoryEntry != NULL);
@ -194,6 +485,47 @@ SetOrClearCBit(
} }
/**
Check the WP status in CR0 register. This bit is used to lock or unlock write
access to pages marked as read-only.
@retval TRUE Write protection is enabled.
@retval FALSE Write protection is disabled.
**/
STATIC
BOOLEAN
IsReadOnlyPageWriteProtected (
VOID
)
{
return ((AsmReadCr0 () & BIT16) != 0);
}
/**
Disable Write Protect on pages marked as read-only.
**/
STATIC
VOID
DisableReadOnlyPageWriteProtect (
VOID
)
{
AsmWriteCr0 (AsmReadCr0() & ~BIT16);
}
/**
Enable Write Protect on pages marked as read-only.
**/
VOID
EnableReadOnlyPageWriteProtect (
VOID
)
{
AsmWriteCr0 (AsmReadCr0() | BIT16);
}
/** /**
This function either sets or clears memory encryption bit for the memory region This function either sets or clears memory encryption bit for the memory region
specified by PhysicalAddress and length from the current page table context. specified by PhysicalAddress and length from the current page table context.
@ -238,6 +570,8 @@ SetMemoryEncDec (
PAGE_TABLE_4K_ENTRY *PageTableEntry; PAGE_TABLE_4K_ENTRY *PageTableEntry;
UINT64 PgTableMask; UINT64 PgTableMask;
UINT64 AddressEncMask; UINT64 AddressEncMask;
BOOLEAN IsWpEnabled;
RETURN_STATUS Status;
DEBUG (( DEBUG ((
DEBUG_VERBOSE, DEBUG_VERBOSE,
@ -274,6 +608,16 @@ SetMemoryEncDec (
WriteBackInvalidateDataCacheRange((VOID*) (UINTN)PhysicalAddress, Length); WriteBackInvalidateDataCacheRange((VOID*) (UINTN)PhysicalAddress, Length);
} }
//
// Make sure that the page table is changeable.
//
IsWpEnabled = IsReadOnlyPageWriteProtected ();
if (IsWpEnabled) {
DisableReadOnlyPageWriteProtect ();
}
Status = EFI_SUCCESS;
while (Length) while (Length)
{ {
// //
@ -293,7 +637,8 @@ SetMemoryEncDec (
__FUNCTION__, __FUNCTION__,
PhysicalAddress PhysicalAddress
)); ));
return RETURN_NO_MAPPING; Status = RETURN_NO_MAPPING;
goto Done;
} }
PageDirectory1GEntry = (VOID*) ((PageMapLevel4Entry->Bits.PageTableBaseAddress<<12) & ~PgTableMask); PageDirectory1GEntry = (VOID*) ((PageMapLevel4Entry->Bits.PageTableBaseAddress<<12) & ~PgTableMask);
@ -306,7 +651,8 @@ SetMemoryEncDec (
__FUNCTION__, __FUNCTION__,
PhysicalAddress PhysicalAddress
)); ));
return RETURN_NO_MAPPING; Status = RETURN_NO_MAPPING;
goto Done;
} }
// //
@ -357,7 +703,8 @@ SetMemoryEncDec (
__FUNCTION__, __FUNCTION__,
PhysicalAddress PhysicalAddress
)); ));
return RETURN_NO_MAPPING; Status = RETURN_NO_MAPPING;
goto Done;
} }
// //
// If the MustBe1 bit is not a 1, it's not a 2MB entry // If the MustBe1 bit is not a 1, it's not a 2MB entry
@ -397,7 +744,8 @@ SetMemoryEncDec (
__FUNCTION__, __FUNCTION__,
PhysicalAddress PhysicalAddress
)); ));
return RETURN_NO_MAPPING; Status = RETURN_NO_MAPPING;
goto Done;
} }
SetOrClearCBit (&PageTableEntry->Uint64, Mode); SetOrClearCBit (&PageTableEntry->Uint64, Mode);
PhysicalAddress += EFI_PAGE_SIZE; PhysicalAddress += EFI_PAGE_SIZE;
@ -406,12 +754,28 @@ SetMemoryEncDec (
} }
} }
//
// Protect the page table by marking the memory used for page table to be
// read-only.
//
if (IsWpEnabled) {
EnablePageTableProtection ((UINTN)PageMapLevel4Entry, TRUE);
}
// //
// Flush TLB // Flush TLB
// //
CpuFlushTlb(); CpuFlushTlb();
return RETURN_SUCCESS; Done:
//
// Restore page table write protection, if any.
//
if (IsWpEnabled) {
EnableReadOnlyPageWriteProtect ();
}
return Status;
} }
/** /**

View File

@ -128,6 +128,20 @@ typedef union {
#define IA32_PG_P BIT0 #define IA32_PG_P BIT0
#define IA32_PG_RW BIT1 #define IA32_PG_RW BIT1
#define IA32_PG_PS BIT7
#define PAGING_PAE_INDEX_MASK 0x1FF
#define PAGING_4K_ADDRESS_MASK_64 0x000FFFFFFFFFF000ull
#define PAGING_2M_ADDRESS_MASK_64 0x000FFFFFFFE00000ull
#define PAGING_1G_ADDRESS_MASK_64 0x000FFFFFC0000000ull
#define PAGING_L1_ADDRESS_SHIFT 12
#define PAGING_L2_ADDRESS_SHIFT 21
#define PAGING_L3_ADDRESS_SHIFT 30
#define PAGING_L4_ADDRESS_SHIFT 39
#define PAGING_PML4E_NUMBER 4
#define PAGETABLE_ENTRY_MASK ((1UL << 9) - 1) #define PAGETABLE_ENTRY_MASK ((1UL << 9) - 1)
#define PML4_OFFSET(x) ( (x >> 39) & PAGETABLE_ENTRY_MASK) #define PML4_OFFSET(x) ( (x >> 39) & PAGETABLE_ENTRY_MASK)
@ -136,6 +150,20 @@ typedef union {
#define PTE_OFFSET(x) ( (x >> 12) & PAGETABLE_ENTRY_MASK) #define PTE_OFFSET(x) ( (x >> 12) & PAGETABLE_ENTRY_MASK)
#define PAGING_1G_ADDRESS_MASK_64 0x000FFFFFC0000000ull #define PAGING_1G_ADDRESS_MASK_64 0x000FFFFFC0000000ull
#define PAGE_TABLE_POOL_ALIGNMENT BASE_2MB
#define PAGE_TABLE_POOL_UNIT_SIZE SIZE_2MB
#define PAGE_TABLE_POOL_UNIT_PAGES EFI_SIZE_TO_PAGES (PAGE_TABLE_POOL_UNIT_SIZE)
#define PAGE_TABLE_POOL_ALIGN_MASK \
(~(EFI_PHYSICAL_ADDRESS)(PAGE_TABLE_POOL_ALIGNMENT - 1))
typedef struct {
VOID *NextPool;
UINTN Offset;
UINTN FreePages;
} PAGE_TABLE_POOL;
/** /**
This function clears memory encryption bit for the memory region specified by PhysicalAddress This function clears memory encryption bit for the memory region specified by PhysicalAddress
and length from the current page table context. and length from the current page table context.