ArmPkg/ArmMmuLib: Disable and re-enable MMU only when needed

When updating a page table descriptor in a way that requires break
before make, we temporarily disable the MMU to ensure that we don't
unmap the memory region that the code itself is executing from.

However, this is a condition we can check in a straight-forward manner,
and if the regions are disjoint, we don't have to bother with the MMU
controls, and we can just perform an ordinary break before make.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Reviewed-by: Leif Lindholm <quic_llindhol@quicinc.com>
This commit is contained in:
Ard Biesheuvel 2022-09-24 18:26:19 +02:00 committed by mergify[bot]
parent ab644cfac5
commit 0487cac09f
3 changed files with 123 additions and 29 deletions

View File

@ -54,7 +54,8 @@ EFIAPI
ArmReplaceLiveTranslationEntry ( ArmReplaceLiveTranslationEntry (
IN UINT64 *Entry, IN UINT64 *Entry,
IN UINT64 Value, IN UINT64 Value,
IN UINT64 RegionStart IN UINT64 RegionStart,
IN BOOLEAN DisableMmu
); );
EFI_STATUS EFI_STATUS

View File

@ -18,6 +18,17 @@
#include <Library/ArmMmuLib.h> #include <Library/ArmMmuLib.h>
#include <Library/BaseLib.h> #include <Library/BaseLib.h>
#include <Library/DebugLib.h> #include <Library/DebugLib.h>
#include <Library/HobLib.h>
STATIC
VOID (
EFIAPI *mReplaceLiveEntryFunc
)(
IN UINT64 *Entry,
IN UINT64 Value,
IN UINT64 RegionStart,
IN BOOLEAN DisableMmu
) = ArmReplaceLiveTranslationEntry;
STATIC STATIC
UINT64 UINT64
@ -83,14 +94,40 @@ ReplaceTableEntry (
IN UINT64 *Entry, IN UINT64 *Entry,
IN UINT64 Value, IN UINT64 Value,
IN UINT64 RegionStart, IN UINT64 RegionStart,
IN UINT64 BlockMask,
IN BOOLEAN IsLiveBlockMapping IN BOOLEAN IsLiveBlockMapping
) )
{ {
if (!ArmMmuEnabled () || !IsLiveBlockMapping) { BOOLEAN DisableMmu;
//
// Replacing a live block entry with a table entry (or vice versa) requires a
// break-before-make sequence as per the architecture. This means the mapping
// must be made invalid and cleaned from the TLBs first, and this is a bit of
// a hassle if the mapping in question covers the code that is actually doing
// the mapping and the unmapping, and so we only bother with this if actually
// necessary.
//
if (!IsLiveBlockMapping || !ArmMmuEnabled ()) {
// If the mapping is not a live block mapping, or the MMU is not on yet, we
// can simply overwrite the entry.
*Entry = Value; *Entry = Value;
ArmUpdateTranslationTableEntry (Entry, (VOID *)(UINTN)RegionStart); ArmUpdateTranslationTableEntry (Entry, (VOID *)(UINTN)RegionStart);
} else { } else {
ArmReplaceLiveTranslationEntry (Entry, Value, RegionStart); // If the mapping in question does not cover the code that updates the
// entry in memory, or the entry that we are intending to update, we can
// use an ordinary break before make. Otherwise, we will need to
// temporarily disable the MMU.
DisableMmu = FALSE;
if ((((RegionStart ^ (UINTN)ArmReplaceLiveTranslationEntry) & ~BlockMask) == 0) ||
(((RegionStart ^ (UINTN)Entry) & ~BlockMask) == 0))
{
DisableMmu = TRUE;
DEBUG ((DEBUG_WARN, "%a: splitting block entry with MMU disabled\n", __FUNCTION__));
}
ArmReplaceLiveTranslationEntry (Entry, Value, RegionStart, DisableMmu);
} }
} }
@ -160,7 +197,8 @@ UpdateRegionMappingRecursive (
IN UINT64 AttributeSetMask, IN UINT64 AttributeSetMask,
IN UINT64 AttributeClearMask, IN UINT64 AttributeClearMask,
IN UINT64 *PageTable, IN UINT64 *PageTable,
IN UINTN Level IN UINTN Level,
IN BOOLEAN TableIsLive
) )
{ {
UINTN BlockShift; UINTN BlockShift;
@ -170,6 +208,7 @@ UpdateRegionMappingRecursive (
UINT64 EntryValue; UINT64 EntryValue;
VOID *TranslationTable; VOID *TranslationTable;
EFI_STATUS Status; EFI_STATUS Status;
BOOLEAN NextTableIsLive;
ASSERT (((RegionStart | RegionEnd) & EFI_PAGE_MASK) == 0); ASSERT (((RegionStart | RegionEnd) & EFI_PAGE_MASK) == 0);
@ -198,7 +237,14 @@ UpdateRegionMappingRecursive (
// the next level. No block mappings are allowed at all at level 0, // the next level. No block mappings are allowed at all at level 0,
// so in that case, we have to recurse unconditionally. // so in that case, we have to recurse unconditionally.
// //
// One special case to take into account is any region that covers the page
// table itself: if we'd cover such a region with block mappings, we are
// more likely to end up in the situation later where we need to disable
// the MMU in order to update page table entries safely, so prefer page
// mappings in that particular case.
//
if ((Level == 0) || (((RegionStart | BlockEnd) & BlockMask) != 0) || if ((Level == 0) || (((RegionStart | BlockEnd) & BlockMask) != 0) ||
((Level < 3) && (((UINT64)PageTable & ~BlockMask) == RegionStart)) ||
IsTableEntry (*Entry, Level)) IsTableEntry (*Entry, Level))
{ {
ASSERT (Level < 3); ASSERT (Level < 3);
@ -234,7 +280,8 @@ UpdateRegionMappingRecursive (
*Entry & TT_ATTRIBUTES_MASK, *Entry & TT_ATTRIBUTES_MASK,
0, 0,
TranslationTable, TranslationTable,
Level + 1 Level + 1,
FALSE
); );
if (EFI_ERROR (Status)) { if (EFI_ERROR (Status)) {
// //
@ -246,8 +293,11 @@ UpdateRegionMappingRecursive (
return Status; return Status;
} }
} }
NextTableIsLive = FALSE;
} else { } else {
TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY); TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY);
NextTableIsLive = TableIsLive;
} }
// //
@ -259,7 +309,8 @@ UpdateRegionMappingRecursive (
AttributeSetMask, AttributeSetMask,
AttributeClearMask, AttributeClearMask,
TranslationTable, TranslationTable,
Level + 1 Level + 1,
NextTableIsLive
); );
if (EFI_ERROR (Status)) { if (EFI_ERROR (Status)) {
if (!IsTableEntry (*Entry, Level)) { if (!IsTableEntry (*Entry, Level)) {
@ -282,7 +333,8 @@ UpdateRegionMappingRecursive (
Entry, Entry,
EntryValue, EntryValue,
RegionStart, RegionStart,
IsBlockEntry (*Entry, Level) BlockMask,
TableIsLive && IsBlockEntry (*Entry, Level)
); );
} }
} else { } else {
@ -291,7 +343,7 @@ UpdateRegionMappingRecursive (
EntryValue |= (Level == 3) ? TT_TYPE_BLOCK_ENTRY_LEVEL3 EntryValue |= (Level == 3) ? TT_TYPE_BLOCK_ENTRY_LEVEL3
: TT_TYPE_BLOCK_ENTRY; : TT_TYPE_BLOCK_ENTRY;
ReplaceTableEntry (Entry, EntryValue, RegionStart, FALSE); ReplaceTableEntry (Entry, EntryValue, RegionStart, BlockMask, FALSE);
} }
} }
@ -304,7 +356,8 @@ UpdateRegionMapping (
IN UINT64 RegionStart, IN UINT64 RegionStart,
IN UINT64 RegionLength, IN UINT64 RegionLength,
IN UINT64 AttributeSetMask, IN UINT64 AttributeSetMask,
IN UINT64 AttributeClearMask IN UINT64 AttributeClearMask,
IN BOOLEAN TableIsLive
) )
{ {
UINTN T0SZ; UINTN T0SZ;
@ -321,7 +374,8 @@ UpdateRegionMapping (
AttributeSetMask, AttributeSetMask,
AttributeClearMask, AttributeClearMask,
ArmGetTTBR0BaseAddress (), ArmGetTTBR0BaseAddress (),
GetRootTableLevel (T0SZ) GetRootTableLevel (T0SZ),
TableIsLive
); );
} }
@ -336,7 +390,8 @@ FillTranslationTable (
MemoryRegion->VirtualBase, MemoryRegion->VirtualBase,
MemoryRegion->Length, MemoryRegion->Length,
ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF, ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF,
0 0,
FALSE
); );
} }
@ -410,7 +465,8 @@ ArmSetMemoryAttributes (
BaseAddress, BaseAddress,
Length, Length,
PageAttributes, PageAttributes,
PageAttributeMask PageAttributeMask,
TRUE
); );
} }
@ -423,7 +479,13 @@ SetMemoryRegionAttribute (
IN UINT64 BlockEntryMask IN UINT64 BlockEntryMask
) )
{ {
return UpdateRegionMapping (BaseAddress, Length, Attributes, BlockEntryMask); return UpdateRegionMapping (
BaseAddress,
Length,
Attributes,
BlockEntryMask,
TRUE
);
} }
EFI_STATUS EFI_STATUS

View File

@ -12,6 +12,14 @@
.macro __replace_entry, el .macro __replace_entry, el
// check whether we should disable the MMU
cbz x3, .L1_\@
// clean and invalidate first so that we don't clobber
// adjacent entries that are dirty in the caches
dc civac, x0
dsb nsh
// disable the MMU // disable the MMU
mrs x8, sctlr_el\el mrs x8, sctlr_el\el
bic x9, x8, #CTRL_M_BIT bic x9, x8, #CTRL_M_BIT
@ -38,8 +46,33 @@
// re-enable the MMU // re-enable the MMU
msr sctlr_el\el, x8 msr sctlr_el\el, x8
isb isb
b .L2_\@
.L1_\@:
// write invalid entry
str xzr, [x0]
dsb nshst
// flush translations for the target address from the TLBs
lsr x2, x2, #12
.if \el == 1
tlbi vaae1, x2
.else
tlbi vae\el, x2
.endif
dsb nsh
// write updated entry
str x1, [x0]
dsb nshst
.L2_\@:
.endm .endm
// Align this routine to a log2 upper bound of its size, so that it is
// guaranteed not to cross a page or block boundary.
.balign 0x200
//VOID //VOID
//ArmReplaceLiveTranslationEntry ( //ArmReplaceLiveTranslationEntry (
// IN UINT64 *Entry, // IN UINT64 *Entry,
@ -53,12 +86,7 @@ ASM_FUNC(ArmReplaceLiveTranslationEntry)
msr daifset, #0xf msr daifset, #0xf
isb isb
// clean and invalidate first so that we don't clobber EL1_OR_EL2_OR_EL3(x5)
// adjacent entries that are dirty in the caches
dc civac, x0
dsb nsh
EL1_OR_EL2_OR_EL3(x3)
1:__replace_entry 1 1:__replace_entry 1
b 4f b 4f
2:__replace_entry 2 2:__replace_entry 2
@ -72,3 +100,6 @@ ASM_GLOBAL ASM_PFX(ArmReplaceLiveTranslationEntrySize)
ASM_PFX(ArmReplaceLiveTranslationEntrySize): ASM_PFX(ArmReplaceLiveTranslationEntrySize):
.long . - ArmReplaceLiveTranslationEntry .long . - ArmReplaceLiveTranslationEntry
// Double check that we did not overrun the assumed maximum size
.org ArmReplaceLiveTranslationEntry + 0x200