UefiCpuPkg:Move some code in DxeMpLib to common place

Move some code in DxeMpLib.C to common MpLib.c.
The related code is to relocate Ap to new safe buffer
before booting into OS. In next commits, these code
also will be used by PeiMpLib. This commit doesn't
change any code functionality.

Signed-off-by: Dun Tan <dun.tan@intel.com>
Reviewed-by: Ray Ni <ray.ni@intel.com>
Cc: Rahul Kumar <rahul1.kumar@intel.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Reviewed-by: Jiaxin Wu <jiaxin.wu@intel.com>
This commit is contained in:
Dun Tan 2024-05-10 11:48:15 +08:00 committed by mergify[bot]
parent 68310cd56a
commit fcd09b1edb
3 changed files with 198 additions and 148 deletions

View File

@ -20,15 +20,11 @@
#define AP_SAFE_STACK_SIZE 128
CPU_MP_DATA *mCpuMpData = NULL;
EFI_EVENT mCheckAllApsEvent = NULL;
EFI_EVENT mMpInitExitBootServicesEvent = NULL;
EFI_EVENT mLegacyBootEvent = NULL;
volatile BOOLEAN mStopCheckAllApsStatus = TRUE;
RELOCATE_AP_LOOP_ENTRY mReservedApLoop;
UINTN mReservedTopOfApStack;
volatile UINT32 mNumberToFinish = 0;
UINTN mApPageTable;
CPU_MP_DATA *mCpuMpData = NULL;
EFI_EVENT mCheckAllApsEvent = NULL;
EFI_EVENT mMpInitExitBootServicesEvent = NULL;
EFI_EVENT mLegacyBootEvent = NULL;
volatile BOOLEAN mStopCheckAllApsStatus = TRUE;
//
// Begin wakeup buffer allocation below 0x88000
@ -368,60 +364,6 @@ GetProtectedModeCS (
return Index * 8;
}
/**
Do sync on APs.
@param[in, out] Buffer Pointer to private data buffer.
**/
VOID
EFIAPI
RelocateApLoop (
IN OUT VOID *Buffer
)
{
CPU_MP_DATA *CpuMpData;
BOOLEAN MwaitSupport;
UINTN ProcessorNumber;
UINTN StackStart;
MpInitLibWhoAmI (&ProcessorNumber);
CpuMpData = GetCpuMpData ();
MwaitSupport = IsMwaitSupport ();
if (CpuMpData->UseSevEsAPMethod) {
//
// 64-bit AMD processors with SEV-ES
//
StackStart = CpuMpData->SevEsAPResetStackStart;
mReservedApLoop.AmdSevEntry (
MwaitSupport,
CpuMpData->ApTargetCState,
CpuMpData->PmCodeSegment,
StackStart - ProcessorNumber * AP_SAFE_STACK_SIZE,
(UINTN)&mNumberToFinish,
CpuMpData->Pm16CodeSegment,
CpuMpData->SevEsAPBuffer,
CpuMpData->WakeupBuffer
);
} else {
//
// Intel processors (32-bit or 64-bit), 32-bit AMD processors, or 64-bit AMD processors without SEV-ES
//
StackStart = mReservedTopOfApStack;
mReservedApLoop.GenericEntry (
MwaitSupport,
CpuMpData->ApTargetCState,
StackStart - ProcessorNumber * AP_SAFE_STACK_SIZE,
(UINTN)&mNumberToFinish,
mApPageTable
);
}
//
// It should never reach here
//
ASSERT (FALSE);
}
/**
Allocate buffer for ApLoopCode.
@ -477,89 +419,6 @@ RemoveNxprotection (
}
}
/**
Prepare ApLoopCode.
@param[in] CpuMpData Pointer to CpuMpData.
**/
VOID
PrepareApLoopCode (
IN CPU_MP_DATA *CpuMpData
)
{
EFI_PHYSICAL_ADDRESS Address;
MP_ASSEMBLY_ADDRESS_MAP *AddressMap;
UINT8 *ApLoopFunc;
UINTN ApLoopFuncSize;
UINTN StackPages;
UINTN FuncPages;
IA32_CR0 Cr0;
AddressMap = &CpuMpData->AddressMap;
if (CpuMpData->UseSevEsAPMethod) {
//
// 64-bit AMD processors with SEV-ES
//
Address = BASE_4GB - 1;
ApLoopFunc = AddressMap->RelocateApLoopFuncAddressAmdSev;
ApLoopFuncSize = AddressMap->RelocateApLoopFuncSizeAmdSev;
} else {
//
// Intel processors (32-bit or 64-bit), 32-bit AMD processors, or 64-bit AMD processors without SEV-ES
//
Address = MAX_ADDRESS;
ApLoopFunc = AddressMap->RelocateApLoopFuncAddressGeneric;
ApLoopFuncSize = AddressMap->RelocateApLoopFuncSizeGeneric;
}
//
// Avoid APs access invalid buffer data which allocated by BootServices,
// so we will allocate reserved data for AP loop code. We also need to
// allocate this buffer below 4GB due to APs may be transferred to 32bit
// protected mode on long mode DXE.
// Allocating it in advance since memory services are not available in
// Exit Boot Services callback function.
//
// +------------+ (TopOfApStack)
// | Stack * N |
// +------------+ (stack base, 4k aligned)
// | Padding |
// +------------+
// | Ap Loop |
// +------------+ ((low address, 4k-aligned)
//
StackPages = EFI_SIZE_TO_PAGES (CpuMpData->CpuCount * AP_SAFE_STACK_SIZE);
FuncPages = EFI_SIZE_TO_PAGES (ApLoopFuncSize);
AllocateApLoopCodeBuffer (StackPages + FuncPages, &Address);
ASSERT (Address != 0);
Cr0.UintN = AsmReadCr0 ();
if (Cr0.Bits.PG != 0) {
//
// Make sure that the buffer memory is executable if NX protection is enabled
// for EfiReservedMemoryType.
//
RemoveNxprotection (Address, EFI_PAGES_TO_SIZE (FuncPages));
}
mReservedTopOfApStack = (UINTN)Address + EFI_PAGES_TO_SIZE (StackPages+FuncPages);
ASSERT ((mReservedTopOfApStack & (UINTN)(CPU_STACK_ALIGNMENT - 1)) == 0);
mReservedApLoop.Data = (VOID *)(UINTN)Address;
ASSERT (mReservedApLoop.Data != NULL);
CopyMem (mReservedApLoop.Data, ApLoopFunc, ApLoopFuncSize);
if (!CpuMpData->UseSevEsAPMethod) {
//
// processors without SEV-ES and paging is enabled
//
mApPageTable = CreatePageTable (
(UINTN)Address,
EFI_PAGES_TO_SIZE (StackPages+FuncPages)
);
}
}
/**
Callback function for ExitBootServices.

View File

@ -17,6 +17,11 @@ EFI_GUID mCpuInitMpLibHobGuid = CPU_INIT_MP_LIB_HOB_GUID;
EFI_GUID mMpHandOffGuid = MP_HANDOFF_GUID;
EFI_GUID mMpHandOffConfigGuid = MP_HANDOFF_CONFIG_GUID;
RELOCATE_AP_LOOP_ENTRY mReservedApLoop;
UINTN mReservedTopOfApStack;
volatile UINT32 mNumberToFinish = 0;
UINTN mApPageTable;
/**
Save the volatile registers required to be restored following INIT IPI.
@ -3240,3 +3245,140 @@ ConfidentialComputingGuestHas (
return (CurrentAttr == Attr);
}
/**
Do sync on APs.
@param[in, out] Buffer Pointer to private data buffer.
**/
VOID
EFIAPI
RelocateApLoop (
IN OUT VOID *Buffer
)
{
CPU_MP_DATA *CpuMpData;
BOOLEAN MwaitSupport;
UINTN ProcessorNumber;
UINTN StackStart;
MpInitLibWhoAmI (&ProcessorNumber);
CpuMpData = GetCpuMpData ();
MwaitSupport = IsMwaitSupport ();
if (CpuMpData->UseSevEsAPMethod) {
//
// 64-bit AMD processors with SEV-ES
//
StackStart = CpuMpData->SevEsAPResetStackStart;
mReservedApLoop.AmdSevEntry (
MwaitSupport,
CpuMpData->ApTargetCState,
CpuMpData->PmCodeSegment,
StackStart - ProcessorNumber * AP_SAFE_STACK_SIZE,
(UINTN)&mNumberToFinish,
CpuMpData->Pm16CodeSegment,
CpuMpData->SevEsAPBuffer,
CpuMpData->WakeupBuffer
);
} else {
//
// Intel processors (32-bit or 64-bit), 32-bit AMD processors, or 64-bit AMD processors without SEV-ES
//
StackStart = mReservedTopOfApStack;
mReservedApLoop.GenericEntry (
MwaitSupport,
CpuMpData->ApTargetCState,
StackStart - ProcessorNumber * AP_SAFE_STACK_SIZE,
(UINTN)&mNumberToFinish,
mApPageTable
);
}
//
// It should never reach here
//
ASSERT (FALSE);
}
/**
Prepare ApLoopCode.
@param[in] CpuMpData Pointer to CpuMpData.
**/
VOID
PrepareApLoopCode (
IN CPU_MP_DATA *CpuMpData
)
{
EFI_PHYSICAL_ADDRESS Address;
MP_ASSEMBLY_ADDRESS_MAP *AddressMap;
UINT8 *ApLoopFunc;
UINTN ApLoopFuncSize;
UINTN StackPages;
UINTN FuncPages;
IA32_CR0 Cr0;
AddressMap = &CpuMpData->AddressMap;
if (CpuMpData->UseSevEsAPMethod) {
//
// 64-bit AMD processors with SEV-ES
//
Address = BASE_4GB - 1;
ApLoopFunc = AddressMap->RelocateApLoopFuncAddressAmdSev;
ApLoopFuncSize = AddressMap->RelocateApLoopFuncSizeAmdSev;
} else {
//
// Intel processors (32-bit or 64-bit), 32-bit AMD processors, or 64-bit AMD processors without SEV-ES
//
Address = MAX_ADDRESS;
ApLoopFunc = AddressMap->RelocateApLoopFuncAddressGeneric;
ApLoopFuncSize = AddressMap->RelocateApLoopFuncSizeGeneric;
}
//
// Avoid APs access invalid buffer data which allocated by BootServices,
// so we will allocate reserved data for AP loop code. We also need to
// allocate this buffer below 4GB due to APs may be transferred to 32bit
// protected mode on long mode DXE.
// Allocating it in advance since memory services are not available in
// Exit Boot Services callback function.
//
// +------------+ (TopOfApStack)
// | Stack * N |
// +------------+ (stack base, 4k aligned)
// | Padding |
// +------------+
// | Ap Loop |
// +------------+ ((low address, 4k-aligned)
//
StackPages = EFI_SIZE_TO_PAGES (CpuMpData->CpuCount * AP_SAFE_STACK_SIZE);
FuncPages = EFI_SIZE_TO_PAGES (ApLoopFuncSize);
AllocateApLoopCodeBuffer (StackPages + FuncPages, &Address);
ASSERT (Address != 0);
Cr0.UintN = AsmReadCr0 ();
if (Cr0.Bits.PG != 0) {
//
// Make sure that the buffer memory is executable if NX protection is enabled
// for EfiReservedMemoryType.
//
RemoveNxprotection (Address, EFI_PAGES_TO_SIZE (FuncPages));
}
mReservedTopOfApStack = (UINTN)Address + EFI_PAGES_TO_SIZE (StackPages+FuncPages);
ASSERT ((mReservedTopOfApStack & (UINTN)(CPU_STACK_ALIGNMENT - 1)) == 0);
mReservedApLoop.Data = (VOID *)(UINTN)Address;
ASSERT (mReservedApLoop.Data != NULL);
CopyMem (mReservedApLoop.Data, ApLoopFunc, ApLoopFuncSize);
if (!CpuMpData->UseSevEsAPMethod) {
//
// processors without SEV-ES and paging is enabled
//
mApPageTable = CreatePageTable (
(UINTN)Address,
EFI_PAGES_TO_SIZE (StackPages+FuncPages)
);
}
}

View File

@ -1,7 +1,7 @@
/** @file
Common header file for MP Initialize Library.
Copyright (c) 2016 - 2023, Intel Corporation. All rights reserved.<BR>
Copyright (c) 2016 - 2024, Intel Corporation. All rights reserved.<BR>
Copyright (c) 2020 - 2024, AMD Inc. All rights reserved.<BR>
SPDX-License-Identifier: BSD-2-Clause-Patent
@ -357,7 +357,8 @@ typedef
IN UINTN StackStart
);
extern EFI_GUID mCpuInitMpLibHobGuid;
extern EFI_GUID mCpuInitMpLibHobGuid;
extern volatile UINT32 mNumberToFinish;
/**
Assembly code to place AP into safe loop mode.
@ -933,4 +934,52 @@ AmdSevUpdateCpuMpData (
IN CPU_MP_DATA *CpuMpData
);
/**
Prepare ApLoopCode.
@param[in] CpuMpData Pointer to CpuMpData.
**/
VOID
PrepareApLoopCode (
IN CPU_MP_DATA *CpuMpData
);
/**
Do sync on APs.
@param[in, out] Buffer Pointer to private data buffer.
**/
VOID
EFIAPI
RelocateApLoop (
IN OUT VOID *Buffer
);
/**
Allocate buffer for ApLoopCode.
@param[in] Pages Number of pages to allocate.
@param[in, out] Address Pointer to the allocated buffer.
**/
VOID
AllocateApLoopCodeBuffer (
IN UINTN Pages,
IN OUT EFI_PHYSICAL_ADDRESS *Address
);
/**
Remove Nx protection for the range specific by BaseAddress and Length.
The PEI implementation uses CpuPageTableLib to change the attribute.
The DXE implementation uses gDS to change the attribute.
@param[in] BaseAddress BaseAddress of the range.
@param[in] Length Length of the range.
**/
VOID
RemoveNxprotection (
IN EFI_PHYSICAL_ADDRESS BaseAddress,
IN UINTN Length
);
#endif