2015-10-19 21:12:53 +02:00
|
|
|
/** @file
|
|
|
|
Code for Processor S3 restoration
|
|
|
|
|
2024-05-10 08:39:33 +02:00
|
|
|
Copyright (c) 2006 - 2024, Intel Corporation. All rights reserved.<BR>
|
2019-04-04 01:07:22 +02:00
|
|
|
SPDX-License-Identifier: BSD-2-Clause-Patent
|
2015-10-19 21:12:53 +02:00
|
|
|
|
|
|
|
**/
|
|
|
|
|
|
|
|
#include "PiSmmCpuDxeSmm.h"
|
2023-07-26 11:36:02 +02:00
|
|
|
#include <PiPei.h>
|
2015-10-19 21:12:53 +02:00
|
|
|
|
2016-06-29 03:00:13 +02:00
|
|
|
//
|
2018-10-15 04:34:59 +02:00
|
|
|
// Flags used when program the register.
|
2016-06-29 03:00:13 +02:00
|
|
|
//
|
2018-10-15 04:34:59 +02:00
|
|
|
typedef struct {
|
UefiCpuPkg/PiSmmCpuDxeSmm: Separate semaphore container.
In current implementation, core and package level sync uses same semaphores.
Sharing the semaphore may cause wrong execution order.
For example:
1. Feature A has CPU_FEATURE_CORE_BEFORE dependency with Feature B.
2. Feature C has CPU_FEATURE_PACKAGE_AFTER dependency with Feature B.
The expected feature initialization order is A B C:
A ---- (Core Depends) ----> B ---- (Package Depends) ----> C
For a CPU has 1 package, 2 cores and 4 threads. The feature initialization
order may like below:
Thread#1 Thread#2 Thread#3 Thread#4
[A.Init] [A.Init] [A.Init]
Release(S1, S2) Release(S1, S2) Release(S3, S4)
Wait(S1) * 2 Wait(S2) * 2 <------------------------------- Core sync
[B.Init] [B.Init]
Release (S1,S2,S3,S4)
Wait (S1) * 4 <----------------------------------------------------- Package sync
Wait(S4 * 2) <- Core sync
[B.Init]
In above case, for thread#4, when it syncs in core level, Wait(S4) * 2 isn't
blocked and [B.Init] runs. But [A.Init] hasn't run in thread#3. It's wrong!
Thread#4 should execute [B.Init] after thread#3 executes [A.Init] because B
core level depends on A.
The reason of the wrong execution order is that S4 is released in thread#1
by calling Release (S1, S2, S3, S4) and in thread #4 by calling
Release (S3, S4).
To fix this issue, core level sync and package level sync should use separate
semaphores.
In above example, the S4 released in Release (S1, S2, S3, S4) should not be the
same semaphore as that in Release (S3, S4).
Related BZ: https://bugzilla.tianocore.org/show_bug.cgi?id=1311
Cc: Laszlo Ersek <lersek@redhat.com>
Cc: Ruiyu Ni <ruiyu.ni@intel.com>
Contributed-under: TianoCore Contribution Agreement 1.1
Signed-off-by: Eric Dong <eric.dong@intel.com>
Reviewed-by: Ruiyu Ni <ruiyu.ni@intel.com>
Acked-by: Laszlo Ersek <lersek@redhat.com>
2018-11-10 03:53:41 +01:00
|
|
|
volatile UINTN MemoryMappedLock; // Spinlock used to program mmio
|
|
|
|
volatile UINT32 *CoreSemaphoreCount; // Semaphore container used to program
|
|
|
|
// core level semaphore.
|
|
|
|
volatile UINT32 *PackageSemaphoreCount; // Semaphore container used to program
|
|
|
|
// package level semaphore.
|
2018-10-15 04:34:59 +02:00
|
|
|
} PROGRAM_CPU_REGISTER_FLAGS;
|
2016-06-29 03:00:13 +02:00
|
|
|
|
2015-10-19 21:12:53 +02:00
|
|
|
#define LEGACY_REGION_SIZE (2 * 0x1000)
|
|
|
|
#define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
|
|
|
|
|
2024-05-10 09:10:00 +02:00
|
|
|
ACPI_CPU_DATA mAcpiCpuData;
|
|
|
|
BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;
|
2015-10-19 21:12:53 +02:00
|
|
|
|
2016-07-19 10:44:16 +02:00
|
|
|
//
|
|
|
|
// S3 boot flag
|
|
|
|
//
|
|
|
|
BOOLEAN mSmmS3Flag = FALSE;
|
|
|
|
|
|
|
|
//
|
|
|
|
// Pointer to structure used during S3 Resume
|
|
|
|
//
|
|
|
|
SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;
|
|
|
|
|
2016-07-20 04:24:58 +02:00
|
|
|
BOOLEAN mAcpiS3Enable = TRUE;
|
|
|
|
|
2016-07-19 10:44:16 +02:00
|
|
|
/**
|
|
|
|
Restore SMM Configuration in S3 boot path.
|
|
|
|
|
|
|
|
**/
|
|
|
|
VOID
|
|
|
|
RestoreSmmConfigurationInS3 (
|
|
|
|
VOID
|
|
|
|
)
|
|
|
|
{
|
2016-07-20 04:24:58 +02:00
|
|
|
if (!mAcpiS3Enable) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-07-19 10:44:16 +02:00
|
|
|
//
|
|
|
|
// Restore SMM Configuration in S3 boot path.
|
|
|
|
//
|
|
|
|
if (mRestoreSmmConfigurationInS3) {
|
|
|
|
//
|
|
|
|
// Need make sure gSmst is correct because below function may use them.
|
|
|
|
//
|
|
|
|
gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;
|
|
|
|
gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
|
|
|
|
gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
|
|
|
|
gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;
|
|
|
|
gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;
|
|
|
|
|
|
|
|
//
|
|
|
|
// Configure SMM Code Access Check feature if available.
|
|
|
|
//
|
|
|
|
ConfigSmmCodeAccessCheck ();
|
|
|
|
|
|
|
|
SmmCpuFeaturesCompleteSmmReadyToLock ();
|
|
|
|
|
|
|
|
mRestoreSmmConfigurationInS3 = FALSE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
Perform SMM initialization for all processors in the S3 boot path.
|
|
|
|
|
|
|
|
For a native platform, MP initialization in the S3 boot path is also performed in this function.
|
|
|
|
**/
|
|
|
|
VOID
|
|
|
|
EFIAPI
|
|
|
|
SmmRestoreCpu (
|
|
|
|
VOID
|
|
|
|
)
|
|
|
|
{
|
2024-05-10 09:10:00 +02:00
|
|
|
SMM_S3_RESUME_STATE *SmmS3ResumeState;
|
|
|
|
IA32_DESCRIPTOR Ia32Idtr;
|
|
|
|
IA32_DESCRIPTOR X64Idtr;
|
|
|
|
IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];
|
|
|
|
EFI_STATUS Status;
|
2016-07-19 10:44:16 +02:00
|
|
|
|
2021-11-17 04:21:42 +01:00
|
|
|
DEBUG ((DEBUG_INFO, "SmmRestoreCpu()\n"));
|
2016-07-19 10:44:16 +02:00
|
|
|
|
|
|
|
mSmmS3Flag = TRUE;
|
|
|
|
|
|
|
|
//
|
|
|
|
// See if there is enough context to resume PEI Phase
|
|
|
|
//
|
|
|
|
if (mSmmS3ResumeState == NULL) {
|
2021-11-17 04:21:42 +01:00
|
|
|
DEBUG ((DEBUG_ERROR, "No context to return to PEI Phase\n"));
|
2016-07-19 10:44:16 +02:00
|
|
|
CpuDeadLoop ();
|
|
|
|
}
|
|
|
|
|
|
|
|
SmmS3ResumeState = mSmmS3ResumeState;
|
|
|
|
ASSERT (SmmS3ResumeState != NULL);
|
|
|
|
|
2022-12-16 13:46:26 +01:00
|
|
|
//
|
|
|
|
// Setup 64bit IDT in 64bit SMM env when called from 32bit PEI.
|
|
|
|
// Note: 64bit PEI and 32bit DXE is not a supported combination.
|
|
|
|
//
|
|
|
|
if ((SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) && (FeaturePcdGet (PcdDxeIplSwitchToLongMode) == TRUE)) {
|
2016-07-19 10:44:16 +02:00
|
|
|
//
|
|
|
|
// Save the IA32 IDT Descriptor
|
|
|
|
//
|
|
|
|
AsmReadIdtr ((IA32_DESCRIPTOR *)&Ia32Idtr);
|
|
|
|
|
|
|
|
//
|
|
|
|
// Setup X64 IDT table
|
|
|
|
//
|
|
|
|
ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);
|
|
|
|
X64Idtr.Base = (UINTN)IdtEntryTable;
|
|
|
|
X64Idtr.Limit = (UINT16)(sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);
|
|
|
|
AsmWriteIdtr ((IA32_DESCRIPTOR *)&X64Idtr);
|
|
|
|
|
|
|
|
//
|
|
|
|
// Setup the default exception handler
|
|
|
|
//
|
|
|
|
Status = InitializeCpuExceptionHandlers (NULL);
|
|
|
|
ASSERT_EFI_ERROR (Status);
|
|
|
|
|
|
|
|
//
|
|
|
|
// Initialize Debug Agent to support source level debug
|
|
|
|
//
|
2023-12-15 09:38:13 +01:00
|
|
|
if (mSmmDebugAgentSupport) {
|
|
|
|
InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);
|
|
|
|
}
|
2016-07-19 10:44:16 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
//
|
2024-05-10 09:10:00 +02:00
|
|
|
// Issue SMI IPI (All Excluding Self SMM IPI + BSP SMM IPI) to execute first SMI init.
|
2016-07-19 10:44:16 +02:00
|
|
|
//
|
2024-05-10 09:10:00 +02:00
|
|
|
ExecuteFirstSmiInit ();
|
2016-07-19 10:44:16 +02:00
|
|
|
|
|
|
|
//
|
|
|
|
// Set a flag to restore SMM configuration in S3 path.
|
|
|
|
//
|
|
|
|
mRestoreSmmConfigurationInS3 = TRUE;
|
|
|
|
|
2021-11-17 04:21:42 +01:00
|
|
|
DEBUG ((DEBUG_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));
|
|
|
|
DEBUG ((DEBUG_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));
|
|
|
|
DEBUG ((DEBUG_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));
|
|
|
|
DEBUG ((DEBUG_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));
|
|
|
|
DEBUG ((DEBUG_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));
|
2016-07-19 10:44:16 +02:00
|
|
|
|
|
|
|
//
|
2022-12-16 13:46:26 +01:00
|
|
|
// If SMM is in 32-bit mode or PcdDxeIplSwitchToLongMode is FALSE, then use SwitchStack() to resume PEI Phase.
|
|
|
|
// Note: 64bit PEI and 32bit DXE is not a supported combination.
|
2016-07-19 10:44:16 +02:00
|
|
|
//
|
2022-12-16 13:46:26 +01:00
|
|
|
if ((SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) || (FeaturePcdGet (PcdDxeIplSwitchToLongMode) == FALSE)) {
|
2021-11-17 04:21:42 +01:00
|
|
|
DEBUG ((DEBUG_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));
|
2016-07-19 10:44:16 +02:00
|
|
|
|
|
|
|
SwitchStack (
|
|
|
|
(SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,
|
|
|
|
(VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,
|
|
|
|
(VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,
|
|
|
|
(VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase
|
|
|
|
//
|
|
|
|
if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
|
2021-11-17 04:21:42 +01:00
|
|
|
DEBUG ((DEBUG_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));
|
2016-07-19 10:44:16 +02:00
|
|
|
//
|
|
|
|
// Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.
|
|
|
|
//
|
|
|
|
SaveAndSetDebugTimerInterrupt (FALSE);
|
|
|
|
//
|
|
|
|
// Restore IA32 IDT table
|
|
|
|
//
|
|
|
|
AsmWriteIdtr ((IA32_DESCRIPTOR *)&Ia32Idtr);
|
|
|
|
AsmDisablePaging64 (
|
|
|
|
SmmS3ResumeState->ReturnCs,
|
|
|
|
(UINT32)SmmS3ResumeState->ReturnEntryPoint,
|
|
|
|
(UINT32)SmmS3ResumeState->ReturnContext1,
|
|
|
|
(UINT32)SmmS3ResumeState->ReturnContext2,
|
|
|
|
(UINT32)SmmS3ResumeState->ReturnStackPointer
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Can not resume PEI Phase
|
|
|
|
//
|
2021-11-17 04:21:42 +01:00
|
|
|
DEBUG ((DEBUG_ERROR, "No context to return to PEI Phase\n"));
|
2016-07-19 10:44:16 +02:00
|
|
|
CpuDeadLoop ();
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
Initialize SMM S3 resume state structure used during S3 Resume.
|
|
|
|
|
|
|
|
@param[in] Cr3 The base address of the page tables to use in SMM.
|
|
|
|
|
|
|
|
**/
|
|
|
|
VOID
|
|
|
|
InitSmmS3ResumeState (
|
|
|
|
IN UINT32 Cr3
|
|
|
|
)
|
|
|
|
{
|
|
|
|
VOID *GuidHob;
|
|
|
|
EFI_SMRAM_DESCRIPTOR *SmramDescriptor;
|
|
|
|
SMM_S3_RESUME_STATE *SmmS3ResumeState;
|
|
|
|
|
2016-07-20 04:24:58 +02:00
|
|
|
if (!mAcpiS3Enable) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-07-19 10:44:16 +02:00
|
|
|
GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);
|
2018-09-10 05:13:36 +02:00
|
|
|
if (GuidHob == NULL) {
|
|
|
|
DEBUG ((
|
|
|
|
DEBUG_ERROR,
|
|
|
|
"ERROR:%a(): HOB(gEfiAcpiVariableGuid=%g) needed by S3 resume doesn't exist!\n",
|
2023-04-06 21:49:10 +02:00
|
|
|
__func__,
|
2018-09-10 05:13:36 +02:00
|
|
|
&gEfiAcpiVariableGuid
|
|
|
|
));
|
|
|
|
CpuDeadLoop ();
|
|
|
|
} else {
|
2016-07-19 10:44:16 +02:00
|
|
|
SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *)GET_GUID_HOB_DATA (GuidHob);
|
|
|
|
|
2021-11-17 04:21:42 +01:00
|
|
|
DEBUG ((DEBUG_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));
|
|
|
|
DEBUG ((DEBUG_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));
|
2016-07-19 10:44:16 +02:00
|
|
|
|
|
|
|
SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;
|
|
|
|
ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));
|
|
|
|
|
|
|
|
mSmmS3ResumeState = SmmS3ResumeState;
|
|
|
|
SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;
|
|
|
|
|
|
|
|
SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;
|
|
|
|
|
|
|
|
SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;
|
|
|
|
SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));
|
|
|
|
if (SmmS3ResumeState->SmmS3StackBase == 0) {
|
|
|
|
SmmS3ResumeState->SmmS3StackSize = 0;
|
|
|
|
}
|
|
|
|
|
2024-01-12 08:33:20 +01:00
|
|
|
SmmS3ResumeState->SmmS3Cr0 = (UINT32)AsmReadCr0 ();
|
2016-07-19 10:44:16 +02:00
|
|
|
SmmS3ResumeState->SmmS3Cr3 = Cr3;
|
2024-01-12 08:33:20 +01:00
|
|
|
SmmS3ResumeState->SmmS3Cr4 = (UINT32)AsmReadCr4 ();
|
2016-07-19 10:44:16 +02:00
|
|
|
|
|
|
|
if (sizeof (UINTN) == sizeof (UINT64)) {
|
|
|
|
SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;
|
|
|
|
}
|
2021-12-05 23:54:17 +01:00
|
|
|
|
2016-07-19 10:44:16 +02:00
|
|
|
if (sizeof (UINTN) == sizeof (UINT32)) {
|
|
|
|
SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;
|
|
|
|
}
|
|
|
|
|
2018-09-14 07:40:37 +02:00
|
|
|
//
|
|
|
|
// Patch SmmS3ResumeState->SmmS3Cr3
|
|
|
|
//
|
|
|
|
InitSmmS3Cr3 ();
|
|
|
|
}
|
2016-07-19 10:44:16 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2021-01-11 02:54:19 +01:00
|
|
|
Copy register table from non-SMRAM into SMRAM.
|
2016-07-19 10:44:16 +02:00
|
|
|
|
|
|
|
@param[in] DestinationRegisterTableList Points to destination register table.
|
|
|
|
@param[in] SourceRegisterTableList Points to source register table.
|
|
|
|
@param[in] NumberOfCpus Number of CPUs.
|
|
|
|
|
|
|
|
**/
|
|
|
|
VOID
|
|
|
|
CopyRegisterTable (
|
|
|
|
IN CPU_REGISTER_TABLE *DestinationRegisterTableList,
|
|
|
|
IN CPU_REGISTER_TABLE *SourceRegisterTableList,
|
|
|
|
IN UINT32 NumberOfCpus
|
|
|
|
)
|
|
|
|
{
|
|
|
|
UINTN Index;
|
|
|
|
CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
|
|
|
|
|
|
|
|
CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
|
|
|
|
for (Index = 0; Index < NumberOfCpus; Index++) {
|
2021-01-11 02:54:19 +01:00
|
|
|
if (DestinationRegisterTableList[Index].TableLength != 0) {
|
|
|
|
DestinationRegisterTableList[Index].AllocatedSize = DestinationRegisterTableList[Index].TableLength * sizeof (CPU_REGISTER_TABLE_ENTRY);
|
2017-03-07 13:01:51 +01:00
|
|
|
RegisterTableEntry = AllocateCopyPool (
|
|
|
|
DestinationRegisterTableList[Index].AllocatedSize,
|
|
|
|
(VOID *)(UINTN)SourceRegisterTableList[Index].RegisterTableEntry
|
|
|
|
);
|
|
|
|
ASSERT (RegisterTableEntry != NULL);
|
|
|
|
DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;
|
2016-07-19 10:44:16 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-11 02:54:19 +01:00
|
|
|
/**
|
|
|
|
Check whether the register table is empty or not.
|
|
|
|
|
|
|
|
@param[in] RegisterTable Point to the register table.
|
|
|
|
@param[in] NumberOfCpus Number of CPUs.
|
|
|
|
|
|
|
|
@retval TRUE The register table is empty.
|
|
|
|
@retval FALSE The register table is not empty.
|
|
|
|
**/
|
|
|
|
BOOLEAN
|
|
|
|
IsRegisterTableEmpty (
|
|
|
|
IN CPU_REGISTER_TABLE *RegisterTable,
|
|
|
|
IN UINT32 NumberOfCpus
|
|
|
|
)
|
|
|
|
{
|
|
|
|
UINTN Index;
|
|
|
|
|
|
|
|
if (RegisterTable != NULL) {
|
|
|
|
for (Index = 0; Index < NumberOfCpus; Index++) {
|
|
|
|
if (RegisterTable[Index].TableLength != 0) {
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
|
2021-09-16 11:27:11 +02:00
|
|
|
/**
|
|
|
|
Copy the data used to initialize processor register into SMRAM.
|
|
|
|
|
|
|
|
@param[in,out] CpuFeatureInitDataDst Pointer to the destination CPU_FEATURE_INIT_DATA structure.
|
|
|
|
@param[in] CpuFeatureInitDataSrc Pointer to the source CPU_FEATURE_INIT_DATA structure.
|
|
|
|
|
|
|
|
**/
|
|
|
|
VOID
|
|
|
|
CopyCpuFeatureInitDatatoSmram (
|
|
|
|
IN OUT CPU_FEATURE_INIT_DATA *CpuFeatureInitDataDst,
|
|
|
|
IN CPU_FEATURE_INIT_DATA *CpuFeatureInitDataSrc
|
|
|
|
)
|
|
|
|
{
|
|
|
|
CPU_STATUS_INFORMATION *CpuStatus;
|
|
|
|
|
|
|
|
if (!IsRegisterTableEmpty ((CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus)) {
|
|
|
|
CpuFeatureInitDataDst->PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
|
|
|
|
ASSERT (CpuFeatureInitDataDst->PreSmmInitRegisterTable != 0);
|
|
|
|
|
|
|
|
CopyRegisterTable (
|
|
|
|
(CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataDst->PreSmmInitRegisterTable,
|
|
|
|
(CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->PreSmmInitRegisterTable,
|
|
|
|
mAcpiCpuData.NumberOfCpus
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!IsRegisterTableEmpty ((CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->RegisterTable, mAcpiCpuData.NumberOfCpus)) {
|
|
|
|
CpuFeatureInitDataDst->RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
|
|
|
|
ASSERT (CpuFeatureInitDataDst->RegisterTable != 0);
|
|
|
|
|
|
|
|
CopyRegisterTable (
|
|
|
|
(CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataDst->RegisterTable,
|
|
|
|
(CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->RegisterTable,
|
|
|
|
mAcpiCpuData.NumberOfCpus
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
CpuStatus = &CpuFeatureInitDataDst->CpuStatus;
|
|
|
|
CopyMem (CpuStatus, &CpuFeatureInitDataSrc->CpuStatus, sizeof (CPU_STATUS_INFORMATION));
|
|
|
|
|
|
|
|
if (CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerPackage != 0) {
|
|
|
|
CpuStatus->ThreadCountPerPackage = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
|
|
|
|
sizeof (UINT32) * CpuStatus->PackageCount,
|
|
|
|
(UINT32 *)(UINTN)CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerPackage
|
|
|
|
);
|
|
|
|
ASSERT (CpuStatus->ThreadCountPerPackage != 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerCore != 0) {
|
|
|
|
CpuStatus->ThreadCountPerCore = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
|
|
|
|
sizeof (UINT8) * (CpuStatus->PackageCount * CpuStatus->MaxCoreCount),
|
|
|
|
(UINT32 *)(UINTN)CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerCore
|
|
|
|
);
|
|
|
|
ASSERT (CpuStatus->ThreadCountPerCore != 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (CpuFeatureInitDataSrc->ApLocation != 0) {
|
|
|
|
CpuFeatureInitDataDst->ApLocation = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
|
|
|
|
mAcpiCpuData.NumberOfCpus * sizeof (EFI_CPU_PHYSICAL_LOCATION),
|
|
|
|
(EFI_CPU_PHYSICAL_LOCATION *)(UINTN)CpuFeatureInitDataSrc->ApLocation
|
|
|
|
);
|
|
|
|
ASSERT (CpuFeatureInitDataDst->ApLocation != 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-19 10:44:16 +02:00
|
|
|
/**
|
|
|
|
Get ACPI CPU data.
|
|
|
|
|
|
|
|
**/
|
|
|
|
VOID
|
|
|
|
GetAcpiCpuData (
|
|
|
|
VOID
|
|
|
|
)
|
|
|
|
{
|
|
|
|
ACPI_CPU_DATA *AcpiCpuData;
|
|
|
|
IA32_DESCRIPTOR *Gdtr;
|
|
|
|
IA32_DESCRIPTOR *Idtr;
|
2018-08-10 04:27:42 +02:00
|
|
|
VOID *GdtForAp;
|
|
|
|
VOID *IdtForAp;
|
|
|
|
VOID *MachineCheckHandlerForAp;
|
2018-10-15 04:34:59 +02:00
|
|
|
CPU_STATUS_INFORMATION *CpuStatus;
|
2016-07-19 10:44:16 +02:00
|
|
|
|
2016-07-20 04:24:58 +02:00
|
|
|
if (!mAcpiS3Enable) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-07-19 10:44:16 +02:00
|
|
|
//
|
|
|
|
// Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0
|
|
|
|
//
|
|
|
|
mAcpiCpuData.NumberOfCpus = 0;
|
|
|
|
|
|
|
|
//
|
|
|
|
// If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM
|
|
|
|
//
|
|
|
|
AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);
|
|
|
|
if (AcpiCpuData == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.
|
|
|
|
//
|
|
|
|
CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));
|
|
|
|
|
|
|
|
mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));
|
|
|
|
ASSERT (mAcpiCpuData.MtrrTable != 0);
|
|
|
|
|
|
|
|
CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));
|
|
|
|
|
|
|
|
mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
|
|
|
|
ASSERT (mAcpiCpuData.GdtrProfile != 0);
|
|
|
|
|
|
|
|
CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));
|
|
|
|
|
|
|
|
mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
|
|
|
|
ASSERT (mAcpiCpuData.IdtrProfile != 0);
|
|
|
|
|
|
|
|
CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));
|
|
|
|
|
|
|
|
//
|
|
|
|
// Copy AP's GDT, IDT and Machine Check handler into SMRAM.
|
|
|
|
//
|
|
|
|
Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;
|
|
|
|
Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;
|
|
|
|
|
2021-09-16 11:27:11 +02:00
|
|
|
GdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);
|
2018-08-10 04:27:42 +02:00
|
|
|
ASSERT (GdtForAp != NULL);
|
|
|
|
IdtForAp = (VOID *)((UINTN)GdtForAp + (Gdtr->Limit + 1));
|
|
|
|
MachineCheckHandlerForAp = (VOID *)((UINTN)IdtForAp + (Idtr->Limit + 1));
|
|
|
|
|
|
|
|
CopyMem (GdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);
|
|
|
|
CopyMem (IdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);
|
|
|
|
CopyMem (MachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);
|
2016-07-19 10:44:16 +02:00
|
|
|
|
2018-08-10 04:27:42 +02:00
|
|
|
Gdtr->Base = (UINTN)GdtForAp;
|
|
|
|
Idtr->Base = (UINTN)IdtForAp;
|
|
|
|
mAcpiCpuData.ApMachineCheckHandlerBase = (EFI_PHYSICAL_ADDRESS)(UINTN)MachineCheckHandlerForAp;
|
2018-10-15 04:34:59 +02:00
|
|
|
|
2021-09-16 11:27:11 +02:00
|
|
|
ZeroMem (&mAcpiCpuData.CpuFeatureInitData, sizeof (CPU_FEATURE_INIT_DATA));
|
|
|
|
|
2021-09-16 11:27:12 +02:00
|
|
|
if (!PcdGetBool (PcdCpuFeaturesInitOnS3Resume)) {
|
|
|
|
//
|
|
|
|
// If the CPU features will not be initialized by CpuFeaturesPei module during
|
|
|
|
// next ACPI S3 resume, copy the CPU features initialization data into SMRAM,
|
|
|
|
// which will be consumed in SmmRestoreCpu during next S3 resume.
|
|
|
|
//
|
|
|
|
CopyCpuFeatureInitDatatoSmram (&mAcpiCpuData.CpuFeatureInitData, &AcpiCpuData->CpuFeatureInitData);
|
2021-09-16 11:27:11 +02:00
|
|
|
|
2021-09-16 11:27:12 +02:00
|
|
|
CpuStatus = &mAcpiCpuData.CpuFeatureInitData.CpuStatus;
|
2021-09-16 11:27:11 +02:00
|
|
|
|
2021-09-16 11:27:12 +02:00
|
|
|
mCpuFlags.CoreSemaphoreCount = AllocateZeroPool (
|
|
|
|
sizeof (UINT32) * CpuStatus->PackageCount *
|
|
|
|
CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount
|
|
|
|
);
|
|
|
|
ASSERT (mCpuFlags.CoreSemaphoreCount != NULL);
|
2021-09-16 11:27:11 +02:00
|
|
|
|
2021-09-16 11:27:12 +02:00
|
|
|
mCpuFlags.PackageSemaphoreCount = AllocateZeroPool (
|
|
|
|
sizeof (UINT32) * CpuStatus->PackageCount *
|
|
|
|
CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount
|
|
|
|
);
|
|
|
|
ASSERT (mCpuFlags.PackageSemaphoreCount != NULL);
|
|
|
|
|
|
|
|
InitializeSpinLock ((SPIN_LOCK *)&mCpuFlags.MemoryMappedLock);
|
|
|
|
}
|
2016-07-19 10:44:16 +02:00
|
|
|
}
|
2016-07-20 04:24:58 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
Get ACPI S3 enable flag.
|
|
|
|
|
|
|
|
**/
|
|
|
|
VOID
|
|
|
|
GetAcpiS3EnableFlag (
|
|
|
|
VOID
|
|
|
|
)
|
|
|
|
{
|
|
|
|
mAcpiS3Enable = PcdGetBool (PcdAcpiS3Enable);
|
|
|
|
}
|