UefiCpuPkg: Put APs in 64 bit mode before handoff to OS.

Add the 'AsmRelocateApLoopStartGeneric' for X64 processors except 64-bit
 AMD processors with SEV-ES.

Remove the unused arguments of AsmRelocateApLoopStartGeneric, updated
the stack offset.

Create PageTable for the allocated reserved memory.

Only keep 4GB limitation of memory allocation for the case APs still
need to be transferred to 32-bit mode before OS.

Cc: Guo Dong <guo.dong@intel.com>
Cc: Ray Ni <ray.ni@intel.com>
Cc: Sean Rhodes <sean@starlabs.systems>
Cc: James Lu <james.lu@intel.com>
Cc: Gua Guo <gua.guo@intel.com>
Signed-off-by: Yuanhao Xie <yuanhao.xie@intel.com>
Acked-by: Gerd Hoffmann <kraxel@redhat.com>
Tested-by: Gerd Hoffmann <kraxel@redhat.com>
Reviewed-by: Ray Ni <ray.ni@intel.com>
This commit is contained in:
Xie, Yuanhao 2023-03-01 14:09:52 +08:00 committed by mergify[bot]
parent 6bc74286e7
commit facf52aeb8
8 changed files with 272 additions and 48 deletions

View File

@ -1,7 +1,7 @@
## @file
# MP Initialize Library instance for DXE driver.
#
# Copyright (c) 2016 - 2021, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2016 - 2023, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
@ -24,10 +24,12 @@
[Sources.IA32]
Ia32/AmdSev.c
Ia32/MpFuncs.nasm
Ia32/CreatePageTable.c
[Sources.X64]
X64/AmdSev.c
X64/MpFuncs.nasm
X64/CreatePageTable.c
[Sources.common]
AmdSev.c
@ -56,6 +58,8 @@
PcdLib
CcExitLib
MicrocodeLib
[LibraryClasses.X64]
CpuPageTableLib
[Protocols]
gEfiTimerArchProtocolGuid ## SOMETIMES_CONSUMES

View File

@ -28,6 +28,7 @@ volatile BOOLEAN mStopCheckAllApsStatus = TRUE;
RELOCATE_AP_LOOP_ENTRY mReservedApLoop;
UINTN mReservedTopOfApStack;
volatile UINT32 mNumberToFinish = 0;
UINTN mApPageTable;
//
// Begin wakeup buffer allocation below 0x88000
@ -409,12 +410,9 @@ RelocateApLoop (
mReservedApLoop.GenericEntry (
MwaitSupport,
CpuMpData->ApTargetCState,
CpuMpData->PmCodeSegment,
StackStart - ProcessorNumber * AP_SAFE_STACK_SIZE,
(UINTN)&mNumberToFinish,
CpuMpData->Pm16CodeSegment,
CpuMpData->SevEsAPBuffer,
CpuMpData->WakeupBuffer
mApPageTable
);
}
@ -484,6 +482,9 @@ InitMpGlobalData (
EFI_GCD_MEMORY_SPACE_DESCRIPTOR MemDesc;
UINTN StackBase;
CPU_INFO_IN_HOB *CpuInfoInHob;
MP_ASSEMBLY_ADDRESS_MAP *AddressMap;
UINT8 *ApLoopFunc;
UINTN ApLoopFuncSize;
UINTN StackPages;
UINTN FuncPages;
@ -540,6 +541,23 @@ InitMpGlobalData (
}
}
AddressMap = &CpuMpData->AddressMap;
if (CpuMpData->UseSevEsAPMethod) {
//
// 64-bit AMD processors with SEV-ES
//
Address = BASE_4GB - 1;
ApLoopFunc = AddressMap->RelocateApLoopFuncAddress;
ApLoopFuncSize = AddressMap->RelocateApLoopFuncSize;
} else {
//
// Intel processors (32-bit or 64-bit), 32-bit AMD processors, or 64-bit AMD processors without SEV-ES
//
Address = MAX_ADDRESS;
ApLoopFunc = AddressMap->RelocateApLoopFuncAddressGeneric;
ApLoopFuncSize = AddressMap->RelocateApLoopFuncSizeGeneric;
}
//
// Avoid APs access invalid buffer data which allocated by BootServices,
// so we will allocate reserved data for AP loop code. We also need to
@ -558,20 +576,16 @@ InitMpGlobalData (
//
StackPages = EFI_SIZE_TO_PAGES (CpuMpData->CpuCount * AP_SAFE_STACK_SIZE);
FuncPages = EFI_SIZE_TO_PAGES (CpuMpData->AddressMap.RelocateApLoopFuncSize);
FuncPages = EFI_SIZE_TO_PAGES (ApLoopFuncSize);
Address = BASE_4GB - 1;
Status = gBS->AllocatePages (
AllocateMaxAddress,
EfiReservedMemoryType,
StackPages + FuncPages,
&Address
);
Status = gBS->AllocatePages (
AllocateMaxAddress,
EfiReservedMemoryType,
StackPages + FuncPages,
&Address
);
ASSERT_EFI_ERROR (Status);
mReservedApLoop.Data = (VOID *)(UINTN)Address;
ASSERT (mReservedApLoop.Data != NULL);
//
// Make sure that the buffer memory is executable if NX protection is enabled
// for EfiReservedMemoryType.
@ -590,11 +604,18 @@ InitMpGlobalData (
mReservedTopOfApStack = (UINTN)Address + EFI_PAGES_TO_SIZE (StackPages+FuncPages);
ASSERT ((mReservedTopOfApStack & (UINTN)(CPU_STACK_ALIGNMENT - 1)) == 0);
CopyMem (
mReservedApLoop.Data,
CpuMpData->AddressMap.RelocateApLoopFuncAddress,
CpuMpData->AddressMap.RelocateApLoopFuncSize
);
mReservedApLoop.Data = (VOID *)(UINTN)Address;
ASSERT (mReservedApLoop.Data != NULL);
CopyMem (mReservedApLoop.Data, ApLoopFunc, ApLoopFuncSize);
if (!CpuMpData->UseSevEsAPMethod) {
//
// processors without SEV-ES
//
mApPageTable = CreatePageTable (
(UINTN)Address,
EFI_PAGES_TO_SIZE (StackPages+FuncPages)
);
}
Status = gBS->CreateEvent (
EVT_TIMER | EVT_NOTIFY_SIGNAL,

View File

@ -0,0 +1,23 @@
/** @file
Function to create page talbe.
Only create page table for x64, and leave the CreatePageTable empty for Ia32.
Copyright (c) 2023, Intel Corporation. All rights reserved.<BR>
SPDX-License-Identifier: BSD-2-Clause-Patent
**/
#include <Base.h>
/**
Only create page table for x64, and leave the CreatePageTable empty for Ia32.
@param[in] LinearAddress The start of the linear address range.
@param[in] Length The length of the linear address range.
@return The page table to be created.
**/
UINTN
CreatePageTable (
IN UINTN Address,
IN UINTN Length
)
{
return 0;
}

View File

@ -1,5 +1,5 @@
;------------------------------------------------------------------------------ ;
; Copyright (c) 2015 - 2022, Intel Corporation. All rights reserved.<BR>
; Copyright (c) 2015 - 2023, Intel Corporation. All rights reserved.<BR>
; SPDX-License-Identifier: BSD-2-Clause-Patent
;
; Module Name:
@ -219,24 +219,24 @@ SwitchToRealProcEnd:
RendezvousFunnelProcEnd:
;-------------------------------------------------------------------------------------
; AsmRelocateApLoop (MwaitSupport, ApTargetCState, PmCodeSegment, TopOfApStack, CountTofinish, Pm16CodeSegment, SevEsAPJumpTable, WakeupBuffer);
; AsmRelocateApLoopGeneric (MwaitSupport, ApTargetCState, PmCodeSegment, TopOfApStack, CountTofinish, Pm16CodeSegment, SevEsAPJumpTable, WakeupBuffer);
;
; The last three parameters (Pm16CodeSegment, SevEsAPJumpTable and WakeupBuffer) are
; specific to SEV-ES support and are not applicable on IA32.
;-------------------------------------------------------------------------------------
AsmRelocateApLoopStart:
AsmRelocateApLoopGenericStart:
mov eax, esp
mov esp, [eax + 16] ; TopOfApStack
mov esp, [eax + 12] ; TopOfApStack
push dword [eax] ; push return address for stack trace
push ebp
mov ebp, esp
mov ebx, [eax + 8] ; ApTargetCState
mov ecx, [eax + 4] ; MwaitSupport
mov eax, [eax + 20] ; CountTofinish
mov eax, [eax + 16] ; CountTofinish
lock dec dword [eax] ; (*CountTofinish)--
cmp cl, 1 ; Check mwait-monitor support
jnz HltLoop
MwaitLoop:
jnz HltLoopGeneric
MwaitLoopGeneric:
cli
mov eax, esp
xor ecx, ecx
@ -245,12 +245,12 @@ MwaitLoop:
mov eax, ebx ; Mwait Cx, Target C-State per eax[7:4]
shl eax, 4
mwait
jmp MwaitLoop
HltLoop:
jmp MwaitLoopGeneric
HltLoopGeneric:
cli
hlt
jmp HltLoop
AsmRelocateApLoopEnd:
jmp HltLoopGeneric
AsmRelocateApLoopGenericEnd:
;-------------------------------------------------------------------------------------
; AsmGetAddressMap (&AddressMap);
@ -264,8 +264,8 @@ ASM_PFX(AsmGetAddressMap):
mov dword [ebx + MP_ASSEMBLY_ADDRESS_MAP.RendezvousFunnelAddress], RendezvousFunnelProcStart
mov dword [ebx + MP_ASSEMBLY_ADDRESS_MAP.ModeEntryOffset], Flat32Start - RendezvousFunnelProcStart
mov dword [ebx + MP_ASSEMBLY_ADDRESS_MAP.RendezvousFunnelSize], RendezvousFunnelProcEnd - RendezvousFunnelProcStart
mov dword [ebx + MP_ASSEMBLY_ADDRESS_MAP.RelocateApLoopFuncAddress], AsmRelocateApLoopStart
mov dword [ebx + MP_ASSEMBLY_ADDRESS_MAP.RelocateApLoopFuncSize], AsmRelocateApLoopEnd - AsmRelocateApLoopStart
mov dword [ebx + MP_ASSEMBLY_ADDRESS_MAP.RelocateApLoopFuncAddressGeneric], AsmRelocateApLoopGenericStart
mov dword [ebx + MP_ASSEMBLY_ADDRESS_MAP.RelocateApLoopFuncSizeGeneric], AsmRelocateApLoopGenericEnd - AsmRelocateApLoopGenericStart
mov dword [ebx + MP_ASSEMBLY_ADDRESS_MAP.ModeTransitionOffset], Flat32Start - RendezvousFunnelProcStart
mov dword [ebx + MP_ASSEMBLY_ADDRESS_MAP.SwitchToRealNoNxOffset], SwitchToRealProcStart - Flat32Start
mov dword [ebx + MP_ASSEMBLY_ADDRESS_MAP.SwitchToRealPM16ModeOffset], 0

View File

@ -1,5 +1,5 @@
;------------------------------------------------------------------------------ ;
; Copyright (c) 2015 - 2022, Intel Corporation. All rights reserved.<BR>
; Copyright (c) 2015 - 2023, Intel Corporation. All rights reserved.<BR>
; SPDX-License-Identifier: BSD-2-Clause-Patent
;
; Module Name:
@ -21,15 +21,17 @@ CPU_SWITCH_STATE_LOADED equ 2
; Equivalent NASM structure of MP_ASSEMBLY_ADDRESS_MAP
;
struc MP_ASSEMBLY_ADDRESS_MAP
.RendezvousFunnelAddress CTYPE_UINTN 1
.ModeEntryOffset CTYPE_UINTN 1
.RendezvousFunnelSize CTYPE_UINTN 1
.RelocateApLoopFuncAddress CTYPE_UINTN 1
.RelocateApLoopFuncSize CTYPE_UINTN 1
.ModeTransitionOffset CTYPE_UINTN 1
.SwitchToRealNoNxOffset CTYPE_UINTN 1
.SwitchToRealPM16ModeOffset CTYPE_UINTN 1
.SwitchToRealPM16ModeSize CTYPE_UINTN 1
.RendezvousFunnelAddress CTYPE_UINTN 1
.ModeEntryOffset CTYPE_UINTN 1
.RendezvousFunnelSize CTYPE_UINTN 1
.RelocateApLoopFuncAddressGeneric CTYPE_UINTN 1
.RelocateApLoopFuncSizeGeneric CTYPE_UINTN 1
.RelocateApLoopFuncAddress CTYPE_UINTN 1
.RelocateApLoopFuncSize CTYPE_UINTN 1
.ModeTransitionOffset CTYPE_UINTN 1
.SwitchToRealNoNxOffset CTYPE_UINTN 1
.SwitchToRealPM16ModeOffset CTYPE_UINTN 1
.SwitchToRealPM16ModeSize CTYPE_UINTN 1
endstruc
;

View File

@ -177,6 +177,8 @@ typedef struct {
UINT8 *RendezvousFunnelAddress;
UINTN ModeEntryOffset;
UINTN RendezvousFunnelSize;
UINT8 *RelocateApLoopFuncAddressGeneric;
UINTN RelocateApLoopFuncSizeGeneric;
UINT8 *RelocateApLoopFuncAddress;
UINTN RelocateApLoopFuncSize;
UINTN ModeTransitionOffset;
@ -361,6 +363,29 @@ extern EFI_GUID mCpuInitMpLibHobGuid;
@param[in] ApTargetCState Target C-State value.
@param[in] PmCodeSegment Protected mode code segment value.
**/
typedef
VOID
(EFIAPI *ASM_RELOCATE_AP_LOOP_GENERIC)(
IN BOOLEAN MwaitSupport,
IN UINTN ApTargetCState,
IN UINTN TopOfApStack,
IN UINTN NumberToFinish,
IN UINTN Cr3
);
/**
Assembly code to place AP into safe loop mode for Amd processors
with Sev enabled.
Place AP into targeted C-State if MONITOR is supported, otherwise
place AP into hlt state.
Place AP in protected mode if the current is long mode. Due to AP maybe
wakeup by some hardware event. It could avoid accessing page table that
may not available during booting to OS.
@param[in] MwaitSupport TRUE indicates MONITOR is supported.
FALSE indicates MONITOR is not supported.
@param[in] ApTargetCState Target C-State value.
@param[in] PmCodeSegment Protected mode code segment value.
**/
typedef
VOID
(EFIAPI *ASM_RELOCATE_AP_LOOP)(
@ -403,9 +428,9 @@ AsmExchangeRole (
);
typedef union {
VOID *Data;
ASM_RELOCATE_AP_LOOP AmdSevEntry; // 64-bit AMD Sev processors
ASM_RELOCATE_AP_LOOP GenericEntry; // Intel processors (32-bit or 64-bit), 32-bit AMD processors, or AMD non-Sev processors
VOID *Data;
ASM_RELOCATE_AP_LOOP AmdSevEntry; // 64-bit AMD Sev processors
ASM_RELOCATE_AP_LOOP_GENERIC GenericEntry; // Intel processors (32-bit or 64-bit), 32-bit AMD processors, or AMD non-Sev processors
} RELOCATE_AP_LOOP_ENTRY;
/**
@ -471,6 +496,18 @@ GetSevEsAPMemory (
VOID
);
/**
Create 1:1 mapping page table in reserved memory to map the specified address range.
@param[in] LinearAddress The start of the linear address range.
@param[in] Length The length of the linear address range.
@return The page table to be created.
**/
UINTN
CreatePageTable (
IN UINTN Address,
IN UINTN Length
);
/**
This function will be called by BSP to wakeup AP.

View File

@ -0,0 +1,82 @@
/** @file
Function to create page talbe.
Only create page table for x64, and leave the CreatePageTable empty for Ia32.
Copyright (c) 2023, Intel Corporation. All rights reserved.<BR>
SPDX-License-Identifier: BSD-2-Clause-Patent
**/
#include <Library/CpuPageTableLib.h>
#include <Library/MemoryAllocationLib.h>
#include <Base.h>
#include <Library/BaseMemoryLib.h>
#include <Library/DebugLib.h>
#include <Library/BaseLib.h>
/**
Create 1:1 mapping page table in reserved memory to map the specified address range.
@param[in] LinearAddress The start of the linear address range.
@param[in] Length The length of the linear address range.
@return The page table to be created.
**/
UINTN
CreatePageTable (
IN UINTN Address,
IN UINTN Length
)
{
EFI_STATUS Status;
VOID *PageTableBuffer;
UINTN PageTableBufferSize;
UINTN PageTable;
PAGING_MODE PagingMode;
IA32_CR4 Cr4;
IA32_MAP_ATTRIBUTE MapAttribute;
IA32_MAP_ATTRIBUTE MapMask;
MapAttribute.Uint64 = Address;
MapAttribute.Bits.Present = 1;
MapAttribute.Bits.ReadWrite = 1;
MapMask.Bits.PageTableBaseAddress = 1;
MapMask.Bits.Present = 1;
MapMask.Bits.ReadWrite = 1;
PageTable = 0;
PageTableBufferSize = 0;
Cr4.UintN = AsmReadCr4 ();
if (Cr4.Bits.LA57 == 1) {
PagingMode = Paging5Level;
} else {
PagingMode = Paging4Level;
}
Status = PageTableMap (
&PageTable,
PagingMode,
NULL,
&PageTableBufferSize,
Address,
Length,
&MapAttribute,
&MapMask
);
ASSERT (Status == EFI_BUFFER_TOO_SMALL);
DEBUG ((DEBUG_INFO, "AP Page Table Buffer Size = %x\n", PageTableBufferSize));
PageTableBuffer = AllocateReservedPages (EFI_SIZE_TO_PAGES (PageTableBufferSize));
ASSERT (PageTableBuffer != NULL);
Status = PageTableMap (
&PageTable,
PagingMode,
PageTableBuffer,
&PageTableBufferSize,
Address,
Length,
&MapAttribute,
&MapMask
);
ASSERT_EFI_ERROR (Status);
return PageTable;
}

View File

@ -1,5 +1,5 @@
;------------------------------------------------------------------------------ ;
; Copyright (c) 2015 - 2022, Intel Corporation. All rights reserved.<BR>
; Copyright (c) 2015 - 2023, Intel Corporation. All rights reserved.<BR>
; SPDX-License-Identifier: BSD-2-Clause-Patent
;
; Module Name:
@ -447,6 +447,58 @@ DoHlt:
BITS 64
AsmRelocateApLoopEnd:
;-------------------------------------------------------------------------------------
; AsmRelocateApLoop (MwaitSupport, ApTargetCState, TopOfApStack, CountTofinish, Cr3);
; This function is called during the finalizaiton of Mp initialization before booting
; to OS, and aim to put Aps either in Mwait or HLT.
;-------------------------------------------------------------------------------------
; +----------------+
; | Cr3 | rsp+40
; +----------------+
; | CountTofinish | r9
; +----------------+
; | TopOfApStack | r8
; +----------------+
; | ApTargetCState | rdx
; +----------------+
; | MwaitSupport | rcx
; +----------------+
; | the return |
; +----------------+ low address
AsmRelocateApLoopGenericStart:
mov rax, r9 ; CountTofinish
lock dec dword [rax] ; (*CountTofinish)--
mov rax, [rsp + 40] ; Cr3
; Do not push on old stack, since old stack is not mapped
; in the page table pointed by cr3
mov cr3, rax
mov rsp, r8 ; TopOfApStack
MwaitCheckGeneric:
cmp cl, 1 ; Check mwait-monitor support
jnz HltLoopGeneric
mov rbx, rdx ; Save C-State to ebx
MwaitLoopGeneric:
cli
mov rax, rsp ; Set Monitor Address
xor ecx, ecx ; ecx = 0
xor edx, edx ; edx = 0
monitor
mov rax, rbx ; Mwait Cx, Target C-State per eax[7:4]
shl eax, 4
mwait
jmp MwaitLoopGeneric
HltLoopGeneric:
cli
hlt
jmp HltLoopGeneric
AsmRelocateApLoopGenericEnd:
;-------------------------------------------------------------------------------------
; AsmGetAddressMap (&AddressMap);
;-------------------------------------------------------------------------------------
@ -456,6 +508,9 @@ ASM_PFX(AsmGetAddressMap):
mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.RendezvousFunnelAddress], rax
mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.ModeEntryOffset], LongModeStart - RendezvousFunnelProcStart
mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.RendezvousFunnelSize], RendezvousFunnelProcEnd - RendezvousFunnelProcStart
lea rax, [AsmRelocateApLoopGenericStart]
mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.RelocateApLoopFuncAddressGeneric], rax
mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.RelocateApLoopFuncSizeGeneric], AsmRelocateApLoopGenericEnd - AsmRelocateApLoopGenericStart
lea rax, [AsmRelocateApLoopStart]
mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.RelocateApLoopFuncAddress], rax
mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.RelocateApLoopFuncSize], AsmRelocateApLoopEnd - AsmRelocateApLoopStart