mirror of https://github.com/acidanthera/audk.git
ArmPkg/ArmMmuLib ARM: cache-invalidate initial page table entries
In the ARM version of ArmMmuLib, we are currently relying on set/way invalidation to ensure that the caches are in a consistent state with respect to main memory once we turn the MMU on. Even if set/way operations were the appropriate method to achieve this, doing an invalidate-all first and then populating the page table entries creates a window where page table entries could be loaded speculatively into the caches before we modify them, and shadow the new values that we write there. So let's get rid of the blanket clean/invalidate operations, and instead, invalidate each page table right after allocating it, and each section entry after it is updated (to address all the little corner cases that the ARMv7 spec permits), and invalidate sets of level 2 entries in blocks, using the generic invalidation routine from CacheMaintenanceLib On ARMv7, cache maintenance may be required also when the MMU is enabled, in case the page table walker is not cache coherent. However, the code being updated here is guaranteed to run only when the MMU is still off, and so we can disregard the case when the MMU and caches are on. Since the MMU and D-cache are already off when we reach this point, we can drop the MMU and D-cache disables as well. Maintenance of the I-cache is unnecessary, since we are not modifying any code, and the installed mapping is guaranteed to be 1:1. This means we can also leave it enabled while the page table population code is running. Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Reviewed-by: Leif Lindholm <leif@nuviainc.com>
This commit is contained in:
parent
825c3e2c1b
commit
02d7797d1a
|
@ -147,6 +147,13 @@ PopulateLevel2PageTable (
|
||||||
|
|
||||||
BaseSectionAddress = TT_DESCRIPTOR_SECTION_BASE_ADDRESS(*SectionEntry);
|
BaseSectionAddress = TT_DESCRIPTOR_SECTION_BASE_ADDRESS(*SectionEntry);
|
||||||
|
|
||||||
|
//
|
||||||
|
// Make sure we are not inadvertently hitting in the caches
|
||||||
|
// when populating the page tables
|
||||||
|
//
|
||||||
|
InvalidateDataCacheRange ((VOID *)TranslationTable,
|
||||||
|
TRANSLATION_TABLE_PAGE_SIZE);
|
||||||
|
|
||||||
// Populate the new Level2 Page Table for the section
|
// Populate the new Level2 Page Table for the section
|
||||||
PageEntry = (UINT32*)TranslationTable;
|
PageEntry = (UINT32*)TranslationTable;
|
||||||
for (Index = 0; Index < TRANSLATION_TABLE_PAGE_COUNT; Index++) {
|
for (Index = 0; Index < TRANSLATION_TABLE_PAGE_COUNT; Index++) {
|
||||||
|
@ -166,6 +173,12 @@ PopulateLevel2PageTable (
|
||||||
TranslationTable = (UINTN)AllocateAlignedPages (
|
TranslationTable = (UINTN)AllocateAlignedPages (
|
||||||
EFI_SIZE_TO_PAGES (TRANSLATION_TABLE_PAGE_SIZE),
|
EFI_SIZE_TO_PAGES (TRANSLATION_TABLE_PAGE_SIZE),
|
||||||
TRANSLATION_TABLE_PAGE_ALIGNMENT);
|
TRANSLATION_TABLE_PAGE_ALIGNMENT);
|
||||||
|
//
|
||||||
|
// Make sure we are not inadvertently hitting in the caches
|
||||||
|
// when populating the page tables
|
||||||
|
//
|
||||||
|
InvalidateDataCacheRange ((VOID *)TranslationTable,
|
||||||
|
TRANSLATION_TABLE_PAGE_SIZE);
|
||||||
ZeroMem ((VOID *)TranslationTable, TRANSLATION_TABLE_PAGE_SIZE);
|
ZeroMem ((VOID *)TranslationTable, TRANSLATION_TABLE_PAGE_SIZE);
|
||||||
|
|
||||||
*SectionEntry = (TranslationTable & TT_DESCRIPTOR_SECTION_PAGETABLE_ADDRESS_MASK) |
|
*SectionEntry = (TranslationTable & TT_DESCRIPTOR_SECTION_PAGETABLE_ADDRESS_MASK) |
|
||||||
|
@ -184,6 +197,13 @@ PopulateLevel2PageTable (
|
||||||
PhysicalBase += TT_DESCRIPTOR_PAGE_SIZE;
|
PhysicalBase += TT_DESCRIPTOR_PAGE_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// Invalidate again to ensure that any line fetches that may have occurred
|
||||||
|
// [speculatively] since the previous invalidate are evicted again.
|
||||||
|
//
|
||||||
|
ArmDataMemoryBarrier ();
|
||||||
|
InvalidateDataCacheRange ((UINT32 *)TranslationTable + FirstPageOffset,
|
||||||
|
RemainLength / TT_DESCRIPTOR_PAGE_SIZE * sizeof (*PageEntry));
|
||||||
}
|
}
|
||||||
|
|
||||||
STATIC
|
STATIC
|
||||||
|
@ -258,7 +278,16 @@ FillTranslationTable (
|
||||||
RemainLength >= TT_DESCRIPTOR_SECTION_SIZE) {
|
RemainLength >= TT_DESCRIPTOR_SECTION_SIZE) {
|
||||||
// Case: Physical address aligned on the Section Size (1MB) && the length
|
// Case: Physical address aligned on the Section Size (1MB) && the length
|
||||||
// is greater than the Section Size
|
// is greater than the Section Size
|
||||||
*SectionEntry++ = TT_DESCRIPTOR_SECTION_BASE_ADDRESS(PhysicalBase) | Attributes;
|
*SectionEntry = TT_DESCRIPTOR_SECTION_BASE_ADDRESS(PhysicalBase) | Attributes;
|
||||||
|
|
||||||
|
//
|
||||||
|
// Issue a DMB to ensure that the page table entry update made it to
|
||||||
|
// memory before we issue the invalidate, otherwise, a subsequent
|
||||||
|
// speculative fetch could observe the old value.
|
||||||
|
//
|
||||||
|
ArmDataMemoryBarrier ();
|
||||||
|
ArmInvalidateDataCacheEntryByMVA ((UINTN)SectionEntry++);
|
||||||
|
|
||||||
PhysicalBase += TT_DESCRIPTOR_SECTION_SIZE;
|
PhysicalBase += TT_DESCRIPTOR_SECTION_SIZE;
|
||||||
RemainLength -= TT_DESCRIPTOR_SECTION_SIZE;
|
RemainLength -= TT_DESCRIPTOR_SECTION_SIZE;
|
||||||
} else {
|
} else {
|
||||||
|
@ -268,9 +297,17 @@ FillTranslationTable (
|
||||||
// Case: Physical address aligned on the Section Size (1MB) && the length
|
// Case: Physical address aligned on the Section Size (1MB) && the length
|
||||||
// does not fill a section
|
// does not fill a section
|
||||||
// Case: Physical address NOT aligned on the Section Size (1MB)
|
// Case: Physical address NOT aligned on the Section Size (1MB)
|
||||||
PopulateLevel2PageTable (SectionEntry++, PhysicalBase, PageMapLength,
|
PopulateLevel2PageTable (SectionEntry, PhysicalBase, PageMapLength,
|
||||||
MemoryRegion->Attributes);
|
MemoryRegion->Attributes);
|
||||||
|
|
||||||
|
//
|
||||||
|
// Issue a DMB to ensure that the page table entry update made it to
|
||||||
|
// memory before we issue the invalidate, otherwise, a subsequent
|
||||||
|
// speculative fetch could observe the old value.
|
||||||
|
//
|
||||||
|
ArmDataMemoryBarrier ();
|
||||||
|
ArmInvalidateDataCacheEntryByMVA ((UINTN)SectionEntry++);
|
||||||
|
|
||||||
// If it is the last entry
|
// If it is the last entry
|
||||||
if (RemainLength < TT_DESCRIPTOR_SECTION_SIZE) {
|
if (RemainLength < TT_DESCRIPTOR_SECTION_SIZE) {
|
||||||
break;
|
break;
|
||||||
|
@ -309,6 +346,11 @@ ArmConfigureMmu (
|
||||||
*TranslationTableSize = TRANSLATION_TABLE_SECTION_SIZE;
|
*TranslationTableSize = TRANSLATION_TABLE_SECTION_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// Make sure we are not inadvertently hitting in the caches
|
||||||
|
// when populating the page tables
|
||||||
|
//
|
||||||
|
InvalidateDataCacheRange (TranslationTable, TRANSLATION_TABLE_SECTION_SIZE);
|
||||||
ZeroMem (TranslationTable, TRANSLATION_TABLE_SECTION_SIZE);
|
ZeroMem (TranslationTable, TRANSLATION_TABLE_SECTION_SIZE);
|
||||||
|
|
||||||
// By default, mark the translation table as belonging to a uncached region
|
// By default, mark the translation table as belonging to a uncached region
|
||||||
|
@ -350,18 +392,6 @@ ArmConfigureMmu (
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ArmCleanInvalidateDataCache ();
|
|
||||||
ArmInvalidateInstructionCache ();
|
|
||||||
|
|
||||||
ArmDisableDataCache ();
|
|
||||||
ArmDisableInstructionCache();
|
|
||||||
// TLBs are also invalidated when calling ArmDisableMmu()
|
|
||||||
ArmDisableMmu ();
|
|
||||||
|
|
||||||
// Make sure nothing sneaked into the cache
|
|
||||||
ArmCleanInvalidateDataCache ();
|
|
||||||
ArmInvalidateInstructionCache ();
|
|
||||||
|
|
||||||
ArmSetTTBR0 ((VOID *)(UINTN)(((UINTN)TranslationTable & ~TRANSLATION_TABLE_SECTION_ALIGNMENT_MASK) | (TTBRAttributes & 0x7F)));
|
ArmSetTTBR0 ((VOID *)(UINTN)(((UINTN)TranslationTable & ~TRANSLATION_TABLE_SECTION_ALIGNMENT_MASK) | (TTBRAttributes & 0x7F)));
|
||||||
|
|
||||||
//
|
//
|
||||||
|
|
Loading…
Reference in New Issue