diff --git a/UefiCpuPkg/Library/MpInitLib/DxeMpLib.c b/UefiCpuPkg/Library/MpInitLib/DxeMpLib.c index 5f0a87c024..f9c5c92c22 100644 --- a/UefiCpuPkg/Library/MpInitLib/DxeMpLib.c +++ b/UefiCpuPkg/Library/MpInitLib/DxeMpLib.c @@ -20,15 +20,11 @@ #define AP_SAFE_STACK_SIZE 128 -CPU_MP_DATA *mCpuMpData = NULL; -EFI_EVENT mCheckAllApsEvent = NULL; -EFI_EVENT mMpInitExitBootServicesEvent = NULL; -EFI_EVENT mLegacyBootEvent = NULL; -volatile BOOLEAN mStopCheckAllApsStatus = TRUE; -RELOCATE_AP_LOOP_ENTRY mReservedApLoop; -UINTN mReservedTopOfApStack; -volatile UINT32 mNumberToFinish = 0; -UINTN mApPageTable; +CPU_MP_DATA *mCpuMpData = NULL; +EFI_EVENT mCheckAllApsEvent = NULL; +EFI_EVENT mMpInitExitBootServicesEvent = NULL; +EFI_EVENT mLegacyBootEvent = NULL; +volatile BOOLEAN mStopCheckAllApsStatus = TRUE; // // Begin wakeup buffer allocation below 0x88000 @@ -368,60 +364,6 @@ GetProtectedModeCS ( return Index * 8; } -/** - Do sync on APs. - - @param[in, out] Buffer Pointer to private data buffer. -**/ -VOID -EFIAPI -RelocateApLoop ( - IN OUT VOID *Buffer - ) -{ - CPU_MP_DATA *CpuMpData; - BOOLEAN MwaitSupport; - UINTN ProcessorNumber; - UINTN StackStart; - - MpInitLibWhoAmI (&ProcessorNumber); - CpuMpData = GetCpuMpData (); - MwaitSupport = IsMwaitSupport (); - if (CpuMpData->UseSevEsAPMethod) { - // - // 64-bit AMD processors with SEV-ES - // - StackStart = CpuMpData->SevEsAPResetStackStart; - mReservedApLoop.AmdSevEntry ( - MwaitSupport, - CpuMpData->ApTargetCState, - CpuMpData->PmCodeSegment, - StackStart - ProcessorNumber * AP_SAFE_STACK_SIZE, - (UINTN)&mNumberToFinish, - CpuMpData->Pm16CodeSegment, - CpuMpData->SevEsAPBuffer, - CpuMpData->WakeupBuffer - ); - } else { - // - // Intel processors (32-bit or 64-bit), 32-bit AMD processors, or 64-bit AMD processors without SEV-ES - // - StackStart = mReservedTopOfApStack; - mReservedApLoop.GenericEntry ( - MwaitSupport, - CpuMpData->ApTargetCState, - StackStart - ProcessorNumber * AP_SAFE_STACK_SIZE, - (UINTN)&mNumberToFinish, - mApPageTable - ); - } - - // - // It should never reach here - // - ASSERT (FALSE); -} - /** Allocate buffer for ApLoopCode. @@ -477,89 +419,6 @@ RemoveNxprotection ( } } -/** - Prepare ApLoopCode. - - @param[in] CpuMpData Pointer to CpuMpData. -**/ -VOID -PrepareApLoopCode ( - IN CPU_MP_DATA *CpuMpData - ) -{ - EFI_PHYSICAL_ADDRESS Address; - MP_ASSEMBLY_ADDRESS_MAP *AddressMap; - UINT8 *ApLoopFunc; - UINTN ApLoopFuncSize; - UINTN StackPages; - UINTN FuncPages; - IA32_CR0 Cr0; - - AddressMap = &CpuMpData->AddressMap; - if (CpuMpData->UseSevEsAPMethod) { - // - // 64-bit AMD processors with SEV-ES - // - Address = BASE_4GB - 1; - ApLoopFunc = AddressMap->RelocateApLoopFuncAddressAmdSev; - ApLoopFuncSize = AddressMap->RelocateApLoopFuncSizeAmdSev; - } else { - // - // Intel processors (32-bit or 64-bit), 32-bit AMD processors, or 64-bit AMD processors without SEV-ES - // - Address = MAX_ADDRESS; - ApLoopFunc = AddressMap->RelocateApLoopFuncAddressGeneric; - ApLoopFuncSize = AddressMap->RelocateApLoopFuncSizeGeneric; - } - - // - // Avoid APs access invalid buffer data which allocated by BootServices, - // so we will allocate reserved data for AP loop code. We also need to - // allocate this buffer below 4GB due to APs may be transferred to 32bit - // protected mode on long mode DXE. - // Allocating it in advance since memory services are not available in - // Exit Boot Services callback function. - // - // +------------+ (TopOfApStack) - // | Stack * N | - // +------------+ (stack base, 4k aligned) - // | Padding | - // +------------+ - // | Ap Loop | - // +------------+ ((low address, 4k-aligned) - // - - StackPages = EFI_SIZE_TO_PAGES (CpuMpData->CpuCount * AP_SAFE_STACK_SIZE); - FuncPages = EFI_SIZE_TO_PAGES (ApLoopFuncSize); - - AllocateApLoopCodeBuffer (StackPages + FuncPages, &Address); - ASSERT (Address != 0); - - Cr0.UintN = AsmReadCr0 (); - if (Cr0.Bits.PG != 0) { - // - // Make sure that the buffer memory is executable if NX protection is enabled - // for EfiReservedMemoryType. - // - RemoveNxprotection (Address, EFI_PAGES_TO_SIZE (FuncPages)); - } - - mReservedTopOfApStack = (UINTN)Address + EFI_PAGES_TO_SIZE (StackPages+FuncPages); - ASSERT ((mReservedTopOfApStack & (UINTN)(CPU_STACK_ALIGNMENT - 1)) == 0); - mReservedApLoop.Data = (VOID *)(UINTN)Address; - ASSERT (mReservedApLoop.Data != NULL); - CopyMem (mReservedApLoop.Data, ApLoopFunc, ApLoopFuncSize); - if (!CpuMpData->UseSevEsAPMethod) { - // - // processors without SEV-ES and paging is enabled - // - mApPageTable = CreatePageTable ( - (UINTN)Address, - EFI_PAGES_TO_SIZE (StackPages+FuncPages) - ); - } -} - /** Callback function for ExitBootServices. diff --git a/UefiCpuPkg/Library/MpInitLib/MpLib.c b/UefiCpuPkg/Library/MpInitLib/MpLib.c index 4bf3dc5fca..f97298887f 100644 --- a/UefiCpuPkg/Library/MpInitLib/MpLib.c +++ b/UefiCpuPkg/Library/MpInitLib/MpLib.c @@ -17,6 +17,11 @@ EFI_GUID mCpuInitMpLibHobGuid = CPU_INIT_MP_LIB_HOB_GUID; EFI_GUID mMpHandOffGuid = MP_HANDOFF_GUID; EFI_GUID mMpHandOffConfigGuid = MP_HANDOFF_CONFIG_GUID; +RELOCATE_AP_LOOP_ENTRY mReservedApLoop; +UINTN mReservedTopOfApStack; +volatile UINT32 mNumberToFinish = 0; +UINTN mApPageTable; + /** Save the volatile registers required to be restored following INIT IPI. @@ -3240,3 +3245,140 @@ ConfidentialComputingGuestHas ( return (CurrentAttr == Attr); } + +/** + Do sync on APs. + + @param[in, out] Buffer Pointer to private data buffer. +**/ +VOID +EFIAPI +RelocateApLoop ( + IN OUT VOID *Buffer + ) +{ + CPU_MP_DATA *CpuMpData; + BOOLEAN MwaitSupport; + UINTN ProcessorNumber; + UINTN StackStart; + + MpInitLibWhoAmI (&ProcessorNumber); + CpuMpData = GetCpuMpData (); + MwaitSupport = IsMwaitSupport (); + if (CpuMpData->UseSevEsAPMethod) { + // + // 64-bit AMD processors with SEV-ES + // + StackStart = CpuMpData->SevEsAPResetStackStart; + mReservedApLoop.AmdSevEntry ( + MwaitSupport, + CpuMpData->ApTargetCState, + CpuMpData->PmCodeSegment, + StackStart - ProcessorNumber * AP_SAFE_STACK_SIZE, + (UINTN)&mNumberToFinish, + CpuMpData->Pm16CodeSegment, + CpuMpData->SevEsAPBuffer, + CpuMpData->WakeupBuffer + ); + } else { + // + // Intel processors (32-bit or 64-bit), 32-bit AMD processors, or 64-bit AMD processors without SEV-ES + // + StackStart = mReservedTopOfApStack; + mReservedApLoop.GenericEntry ( + MwaitSupport, + CpuMpData->ApTargetCState, + StackStart - ProcessorNumber * AP_SAFE_STACK_SIZE, + (UINTN)&mNumberToFinish, + mApPageTable + ); + } + + // + // It should never reach here + // + ASSERT (FALSE); +} + +/** + Prepare ApLoopCode. + + @param[in] CpuMpData Pointer to CpuMpData. +**/ +VOID +PrepareApLoopCode ( + IN CPU_MP_DATA *CpuMpData + ) +{ + EFI_PHYSICAL_ADDRESS Address; + MP_ASSEMBLY_ADDRESS_MAP *AddressMap; + UINT8 *ApLoopFunc; + UINTN ApLoopFuncSize; + UINTN StackPages; + UINTN FuncPages; + IA32_CR0 Cr0; + + AddressMap = &CpuMpData->AddressMap; + if (CpuMpData->UseSevEsAPMethod) { + // + // 64-bit AMD processors with SEV-ES + // + Address = BASE_4GB - 1; + ApLoopFunc = AddressMap->RelocateApLoopFuncAddressAmdSev; + ApLoopFuncSize = AddressMap->RelocateApLoopFuncSizeAmdSev; + } else { + // + // Intel processors (32-bit or 64-bit), 32-bit AMD processors, or 64-bit AMD processors without SEV-ES + // + Address = MAX_ADDRESS; + ApLoopFunc = AddressMap->RelocateApLoopFuncAddressGeneric; + ApLoopFuncSize = AddressMap->RelocateApLoopFuncSizeGeneric; + } + + // + // Avoid APs access invalid buffer data which allocated by BootServices, + // so we will allocate reserved data for AP loop code. We also need to + // allocate this buffer below 4GB due to APs may be transferred to 32bit + // protected mode on long mode DXE. + // Allocating it in advance since memory services are not available in + // Exit Boot Services callback function. + // + // +------------+ (TopOfApStack) + // | Stack * N | + // +------------+ (stack base, 4k aligned) + // | Padding | + // +------------+ + // | Ap Loop | + // +------------+ ((low address, 4k-aligned) + // + + StackPages = EFI_SIZE_TO_PAGES (CpuMpData->CpuCount * AP_SAFE_STACK_SIZE); + FuncPages = EFI_SIZE_TO_PAGES (ApLoopFuncSize); + + AllocateApLoopCodeBuffer (StackPages + FuncPages, &Address); + ASSERT (Address != 0); + + Cr0.UintN = AsmReadCr0 (); + if (Cr0.Bits.PG != 0) { + // + // Make sure that the buffer memory is executable if NX protection is enabled + // for EfiReservedMemoryType. + // + RemoveNxprotection (Address, EFI_PAGES_TO_SIZE (FuncPages)); + } + + mReservedTopOfApStack = (UINTN)Address + EFI_PAGES_TO_SIZE (StackPages+FuncPages); + ASSERT ((mReservedTopOfApStack & (UINTN)(CPU_STACK_ALIGNMENT - 1)) == 0); + mReservedApLoop.Data = (VOID *)(UINTN)Address; + ASSERT (mReservedApLoop.Data != NULL); + CopyMem (mReservedApLoop.Data, ApLoopFunc, ApLoopFuncSize); + if (!CpuMpData->UseSevEsAPMethod) { + // + // processors without SEV-ES and paging is enabled + // + mApPageTable = CreatePageTable ( + (UINTN)Address, + EFI_PAGES_TO_SIZE (StackPages+FuncPages) + ); + } +} diff --git a/UefiCpuPkg/Library/MpInitLib/MpLib.h b/UefiCpuPkg/Library/MpInitLib/MpLib.h index 179f8e585b..a4a33bf538 100644 --- a/UefiCpuPkg/Library/MpInitLib/MpLib.h +++ b/UefiCpuPkg/Library/MpInitLib/MpLib.h @@ -1,7 +1,7 @@ /** @file Common header file for MP Initialize Library. - Copyright (c) 2016 - 2023, Intel Corporation. All rights reserved.
+ Copyright (c) 2016 - 2024, Intel Corporation. All rights reserved.
Copyright (c) 2020 - 2024, AMD Inc. All rights reserved.
SPDX-License-Identifier: BSD-2-Clause-Patent @@ -357,7 +357,8 @@ typedef IN UINTN StackStart ); -extern EFI_GUID mCpuInitMpLibHobGuid; +extern EFI_GUID mCpuInitMpLibHobGuid; +extern volatile UINT32 mNumberToFinish; /** Assembly code to place AP into safe loop mode. @@ -933,4 +934,52 @@ AmdSevUpdateCpuMpData ( IN CPU_MP_DATA *CpuMpData ); +/** + Prepare ApLoopCode. + + @param[in] CpuMpData Pointer to CpuMpData. +**/ +VOID +PrepareApLoopCode ( + IN CPU_MP_DATA *CpuMpData + ); + +/** + Do sync on APs. + + @param[in, out] Buffer Pointer to private data buffer. +**/ +VOID +EFIAPI +RelocateApLoop ( + IN OUT VOID *Buffer + ); + +/** + Allocate buffer for ApLoopCode. + + @param[in] Pages Number of pages to allocate. + @param[in, out] Address Pointer to the allocated buffer. +**/ +VOID +AllocateApLoopCodeBuffer ( + IN UINTN Pages, + IN OUT EFI_PHYSICAL_ADDRESS *Address + ); + +/** + Remove Nx protection for the range specific by BaseAddress and Length. + + The PEI implementation uses CpuPageTableLib to change the attribute. + The DXE implementation uses gDS to change the attribute. + + @param[in] BaseAddress BaseAddress of the range. + @param[in] Length Length of the range. +**/ +VOID +RemoveNxprotection ( + IN EFI_PHYSICAL_ADDRESS BaseAddress, + IN UINTN Length + ); + #endif