mirror of https://github.com/acidanthera/audk.git
UefiCpuPkg/PiSmmCpuDxeSmm: Add logic to support semaphore type.
V4 changes: 1. Serial console log for different threads when program register table. 2. Check the AcpiCpuData before use it to avoid potential ASSERT. V3 changes: 1. Use global variable instead of internal function to return string for register type and dependence type. 2. Add comments for some complicated logic. V1 changes: Because this driver needs to set MSRs saved in normal boot phase, sync semaphore logic from RegisterCpuFeaturesLib code which used for normal boot phase. Detail see below change for RegisterCpuFeaturesLib: UefiCpuPkg/RegisterCpuFeaturesLib: Add logic to support semaphore type. Cc: Ruiyu Ni <ruiyu.ni@intel.com> Cc: Laszlo Ersek <lersek@redhat.com> Contributed-under: TianoCore Contribution Agreement 1.1 Signed-off-by: Eric Dong <eric.dong@intel.com> Reviewed-by: Ruiyu Ni <ruiyu.ni@intel.com> Acked-by: Laszlo Ersek <lersek@redhat.com> Regression-tested-by: Laszlo Ersek <lersek@redhat.com>
This commit is contained in:
parent
b3c71b472d
commit
9332439058
|
@ -38,9 +38,13 @@ typedef struct {
|
|||
} MP_ASSEMBLY_ADDRESS_MAP;
|
||||
|
||||
//
|
||||
// Spin lock used to serialize MemoryMapped operation
|
||||
// Flags used when program the register.
|
||||
//
|
||||
SPIN_LOCK *mMemoryMappedLock = NULL;
|
||||
typedef struct {
|
||||
volatile UINTN ConsoleLogLock; // Spinlock used to control console.
|
||||
volatile UINTN MemoryMappedLock; // Spinlock used to program mmio
|
||||
volatile UINT32 *SemaphoreCount; // Semaphore used to program semaphore.
|
||||
} PROGRAM_CPU_REGISTER_FLAGS;
|
||||
|
||||
//
|
||||
// Signal that SMM BASE relocation is complete.
|
||||
|
@ -62,13 +66,11 @@ AsmGetAddressMap (
|
|||
#define LEGACY_REGION_SIZE (2 * 0x1000)
|
||||
#define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
|
||||
|
||||
PROGRAM_CPU_REGISTER_FLAGS mCpuFlags;
|
||||
ACPI_CPU_DATA mAcpiCpuData;
|
||||
volatile UINT32 mNumberToFinish;
|
||||
MP_CPU_EXCHANGE_INFO *mExchangeInfo;
|
||||
BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;
|
||||
MP_MSR_LOCK *mMsrSpinLocks = NULL;
|
||||
UINTN mMsrSpinLockCount;
|
||||
UINTN mMsrCount = 0;
|
||||
|
||||
//
|
||||
// S3 boot flag
|
||||
|
@ -91,88 +93,7 @@ UINT8 mApHltLoopCodeTemplate[] = {
|
|||
0xEB, 0xFC // jmp $-2
|
||||
};
|
||||
|
||||
/**
|
||||
Get MSR spin lock by MSR index.
|
||||
|
||||
@param MsrIndex MSR index value.
|
||||
|
||||
@return Pointer to MSR spin lock.
|
||||
|
||||
**/
|
||||
SPIN_LOCK *
|
||||
GetMsrSpinLockByIndex (
|
||||
IN UINT32 MsrIndex
|
||||
)
|
||||
{
|
||||
UINTN Index;
|
||||
for (Index = 0; Index < mMsrCount; Index++) {
|
||||
if (MsrIndex == mMsrSpinLocks[Index].MsrIndex) {
|
||||
return mMsrSpinLocks[Index].SpinLock;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
Initialize MSR spin lock by MSR index.
|
||||
|
||||
@param MsrIndex MSR index value.
|
||||
|
||||
**/
|
||||
VOID
|
||||
InitMsrSpinLockByIndex (
|
||||
IN UINT32 MsrIndex
|
||||
)
|
||||
{
|
||||
UINTN MsrSpinLockCount;
|
||||
UINTN NewMsrSpinLockCount;
|
||||
UINTN Index;
|
||||
UINTN AddedSize;
|
||||
|
||||
if (mMsrSpinLocks == NULL) {
|
||||
MsrSpinLockCount = mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter;
|
||||
mMsrSpinLocks = (MP_MSR_LOCK *) AllocatePool (sizeof (MP_MSR_LOCK) * MsrSpinLockCount);
|
||||
ASSERT (mMsrSpinLocks != NULL);
|
||||
for (Index = 0; Index < MsrSpinLockCount; Index++) {
|
||||
mMsrSpinLocks[Index].SpinLock =
|
||||
(SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr + Index * mSemaphoreSize);
|
||||
mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;
|
||||
}
|
||||
mMsrSpinLockCount = MsrSpinLockCount;
|
||||
mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter = 0;
|
||||
}
|
||||
if (GetMsrSpinLockByIndex (MsrIndex) == NULL) {
|
||||
//
|
||||
// Initialize spin lock for MSR programming
|
||||
//
|
||||
mMsrSpinLocks[mMsrCount].MsrIndex = MsrIndex;
|
||||
InitializeSpinLock (mMsrSpinLocks[mMsrCount].SpinLock);
|
||||
mMsrCount ++;
|
||||
if (mMsrCount == mMsrSpinLockCount) {
|
||||
//
|
||||
// If MSR spin lock buffer is full, enlarge it
|
||||
//
|
||||
AddedSize = SIZE_4KB;
|
||||
mSmmCpuSemaphores.SemaphoreMsr.Msr =
|
||||
AllocatePages (EFI_SIZE_TO_PAGES(AddedSize));
|
||||
ASSERT (mSmmCpuSemaphores.SemaphoreMsr.Msr != NULL);
|
||||
NewMsrSpinLockCount = mMsrSpinLockCount + AddedSize / mSemaphoreSize;
|
||||
mMsrSpinLocks = ReallocatePool (
|
||||
sizeof (MP_MSR_LOCK) * mMsrSpinLockCount,
|
||||
sizeof (MP_MSR_LOCK) * NewMsrSpinLockCount,
|
||||
mMsrSpinLocks
|
||||
);
|
||||
ASSERT (mMsrSpinLocks != NULL);
|
||||
mMsrSpinLockCount = NewMsrSpinLockCount;
|
||||
for (Index = mMsrCount; Index < mMsrSpinLockCount; Index++) {
|
||||
mMsrSpinLocks[Index].SpinLock =
|
||||
(SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr +
|
||||
(Index - mMsrCount) * mSemaphoreSize);
|
||||
mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
CHAR16 *mRegisterTypeStr[] = {L"MSR", L"CR", L"MMIO", L"CACHE", L"SEMAP", L"INVALID" };
|
||||
|
||||
/**
|
||||
Sync up the MTRR values for all processors.
|
||||
|
@ -204,42 +125,103 @@ Returns:
|
|||
}
|
||||
|
||||
/**
|
||||
Programs registers for the calling processor.
|
||||
Increment semaphore by 1.
|
||||
|
||||
This function programs registers for the calling processor.
|
||||
|
||||
@param RegisterTables Pointer to register table of the running processor.
|
||||
@param RegisterTableCount Register table count.
|
||||
@param Sem IN: 32-bit unsigned integer
|
||||
|
||||
**/
|
||||
VOID
|
||||
SetProcessorRegister (
|
||||
IN CPU_REGISTER_TABLE *RegisterTables,
|
||||
IN UINTN RegisterTableCount
|
||||
S3ReleaseSemaphore (
|
||||
IN OUT volatile UINT32 *Sem
|
||||
)
|
||||
{
|
||||
InterlockedIncrement (Sem);
|
||||
}
|
||||
|
||||
/**
|
||||
Decrement the semaphore by 1 if it is not zero.
|
||||
|
||||
Performs an atomic decrement operation for semaphore.
|
||||
The compare exchange operation must be performed using
|
||||
MP safe mechanisms.
|
||||
|
||||
@param Sem IN: 32-bit unsigned integer
|
||||
|
||||
**/
|
||||
VOID
|
||||
S3WaitForSemaphore (
|
||||
IN OUT volatile UINT32 *Sem
|
||||
)
|
||||
{
|
||||
UINT32 Value;
|
||||
|
||||
do {
|
||||
Value = *Sem;
|
||||
} while (Value == 0 ||
|
||||
InterlockedCompareExchange32 (
|
||||
Sem,
|
||||
Value,
|
||||
Value - 1
|
||||
) != Value);
|
||||
}
|
||||
|
||||
/**
|
||||
Initialize the CPU registers from a register table.
|
||||
|
||||
@param[in] RegisterTable The register table for this AP.
|
||||
@param[in] ApLocation AP location info for this ap.
|
||||
@param[in] CpuStatus CPU status info for this CPU.
|
||||
@param[in] CpuFlags Flags data structure used when program the register.
|
||||
|
||||
@note This service could be called by BSP/APs.
|
||||
**/
|
||||
VOID
|
||||
ProgramProcessorRegister (
|
||||
IN CPU_REGISTER_TABLE *RegisterTable,
|
||||
IN EFI_CPU_PHYSICAL_LOCATION *ApLocation,
|
||||
IN CPU_STATUS_INFORMATION *CpuStatus,
|
||||
IN PROGRAM_CPU_REGISTER_FLAGS *CpuFlags
|
||||
)
|
||||
{
|
||||
CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
|
||||
UINTN Index;
|
||||
UINTN Value;
|
||||
SPIN_LOCK *MsrSpinLock;
|
||||
UINT32 InitApicId;
|
||||
CPU_REGISTER_TABLE *RegisterTable;
|
||||
|
||||
InitApicId = GetInitialApicId ();
|
||||
RegisterTable = NULL;
|
||||
for (Index = 0; Index < RegisterTableCount; Index++) {
|
||||
if (RegisterTables[Index].InitialApicId == InitApicId) {
|
||||
RegisterTable = &RegisterTables[Index];
|
||||
break;
|
||||
}
|
||||
}
|
||||
ASSERT (RegisterTable != NULL);
|
||||
CPU_REGISTER_TABLE_ENTRY *RegisterTableEntryHead;
|
||||
volatile UINT32 *SemaphorePtr;
|
||||
UINT32 FirstThread;
|
||||
UINT32 PackageThreadsCount;
|
||||
UINT32 CurrentThread;
|
||||
UINTN ProcessorIndex;
|
||||
UINTN ThreadIndex;
|
||||
UINTN ValidThreadCount;
|
||||
UINT32 *ValidCoreCountPerPackage;
|
||||
|
||||
//
|
||||
// Traverse Register Table of this logical processor
|
||||
//
|
||||
RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;
|
||||
for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) {
|
||||
RegisterTableEntryHead = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;
|
||||
|
||||
for (Index = 0; Index < RegisterTable->TableLength; Index++) {
|
||||
|
||||
RegisterTableEntry = &RegisterTableEntryHead[Index];
|
||||
|
||||
DEBUG_CODE_BEGIN ();
|
||||
if (ApLocation != NULL) {
|
||||
AcquireSpinLock (&CpuFlags->ConsoleLogLock);
|
||||
ThreadIndex = ApLocation->Package * CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount +
|
||||
ApLocation->Core * CpuStatus->MaxThreadCount +
|
||||
ApLocation->Thread;
|
||||
DEBUG ((
|
||||
DEBUG_INFO,
|
||||
"Processor = %lu, Entry Index %lu, Type = %s!\n",
|
||||
(UINT64)ThreadIndex,
|
||||
(UINT64)Index,
|
||||
mRegisterTypeStr[MIN ((REGISTER_TYPE)RegisterTableEntry->RegisterType, InvalidReg)]
|
||||
));
|
||||
ReleaseSpinLock (&CpuFlags->ConsoleLogLock);
|
||||
}
|
||||
DEBUG_CODE_END ();
|
||||
|
||||
//
|
||||
// Check the type of specified register
|
||||
//
|
||||
|
@ -310,12 +292,6 @@ SetProcessorRegister (
|
|||
RegisterTableEntry->Value
|
||||
);
|
||||
} else {
|
||||
//
|
||||
// Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode
|
||||
// to make sure MSR read/write operation is atomic.
|
||||
//
|
||||
MsrSpinLock = GetMsrSpinLockByIndex (RegisterTableEntry->Index);
|
||||
AcquireSpinLock (MsrSpinLock);
|
||||
//
|
||||
// Set the bit section according to bit start and length
|
||||
//
|
||||
|
@ -325,21 +301,20 @@ SetProcessorRegister (
|
|||
RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
|
||||
RegisterTableEntry->Value
|
||||
);
|
||||
ReleaseSpinLock (MsrSpinLock);
|
||||
}
|
||||
break;
|
||||
//
|
||||
// MemoryMapped operations
|
||||
//
|
||||
case MemoryMapped:
|
||||
AcquireSpinLock (mMemoryMappedLock);
|
||||
AcquireSpinLock (&CpuFlags->MemoryMappedLock);
|
||||
MmioBitFieldWrite32 (
|
||||
(UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),
|
||||
RegisterTableEntry->ValidBitStart,
|
||||
RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
|
||||
(UINT32)RegisterTableEntry->Value
|
||||
);
|
||||
ReleaseSpinLock (mMemoryMappedLock);
|
||||
ReleaseSpinLock (&CpuFlags->MemoryMappedLock);
|
||||
break;
|
||||
//
|
||||
// Enable or disable cache
|
||||
|
@ -355,9 +330,150 @@ SetProcessorRegister (
|
|||
}
|
||||
break;
|
||||
|
||||
case Semaphore:
|
||||
// Semaphore works logic like below:
|
||||
//
|
||||
// V(x) = LibReleaseSemaphore (Semaphore[FirstThread + x]);
|
||||
// P(x) = LibWaitForSemaphore (Semaphore[FirstThread + x]);
|
||||
//
|
||||
// All threads (T0...Tn) waits in P() line and continues running
|
||||
// together.
|
||||
//
|
||||
//
|
||||
// T0 T1 ... Tn
|
||||
//
|
||||
// V(0...n) V(0...n) ... V(0...n)
|
||||
// n * P(0) n * P(1) ... n * P(n)
|
||||
//
|
||||
ASSERT (
|
||||
(ApLocation != NULL) &&
|
||||
(CpuStatus->ValidCoreCountPerPackage != 0) &&
|
||||
(CpuFlags->SemaphoreCount) != NULL
|
||||
);
|
||||
SemaphorePtr = CpuFlags->SemaphoreCount;
|
||||
switch (RegisterTableEntry->Value) {
|
||||
case CoreDepType:
|
||||
//
|
||||
// Get Offset info for the first thread in the core which current thread belongs to.
|
||||
//
|
||||
FirstThread = (ApLocation->Package * CpuStatus->MaxCoreCount + ApLocation->Core) * CpuStatus->MaxThreadCount;
|
||||
CurrentThread = FirstThread + ApLocation->Thread;
|
||||
//
|
||||
// First Notify all threads in current Core that this thread has ready.
|
||||
//
|
||||
for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex ++) {
|
||||
S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);
|
||||
}
|
||||
//
|
||||
// Second, check whether all valid threads in current core have ready.
|
||||
//
|
||||
for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex ++) {
|
||||
S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);
|
||||
}
|
||||
break;
|
||||
|
||||
case PackageDepType:
|
||||
ValidCoreCountPerPackage = (UINT32 *)(UINTN)CpuStatus->ValidCoreCountPerPackage;
|
||||
//
|
||||
// Get Offset info for the first thread in the package which current thread belongs to.
|
||||
//
|
||||
FirstThread = ApLocation->Package * CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount;
|
||||
//
|
||||
// Get the possible threads count for current package.
|
||||
//
|
||||
PackageThreadsCount = CpuStatus->MaxThreadCount * CpuStatus->MaxCoreCount;
|
||||
CurrentThread = FirstThread + CpuStatus->MaxThreadCount * ApLocation->Core + ApLocation->Thread;
|
||||
//
|
||||
// Get the valid thread count for current package.
|
||||
//
|
||||
ValidThreadCount = CpuStatus->MaxThreadCount * ValidCoreCountPerPackage[ApLocation->Package];
|
||||
|
||||
//
|
||||
// Different packages may have different valid cores in them. If driver maintail clearly
|
||||
// cores number in different packages, the logic will be much complicated.
|
||||
// Here driver just simply records the max core number in all packages and use it as expect
|
||||
// core number for all packages.
|
||||
// In below two steps logic, first current thread will Release semaphore for each thread
|
||||
// in current package. Maybe some threads are not valid in this package, but driver don't
|
||||
// care. Second, driver will let current thread wait semaphore for all valid threads in
|
||||
// current package. Because only the valid threads will do release semaphore for this
|
||||
// thread, driver here only need to wait the valid thread count.
|
||||
//
|
||||
|
||||
//
|
||||
// First Notify all threads in current package that this thread has ready.
|
||||
//
|
||||
for (ProcessorIndex = 0; ProcessorIndex < PackageThreadsCount ; ProcessorIndex ++) {
|
||||
S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);
|
||||
}
|
||||
//
|
||||
// Second, check whether all valid threads in current package have ready.
|
||||
//
|
||||
for (ProcessorIndex = 0; ProcessorIndex < ValidThreadCount; ProcessorIndex ++) {
|
||||
S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
Set Processor register for one AP.
|
||||
|
||||
@param PreSmmRegisterTable Use pre Smm register table or register table.
|
||||
|
||||
**/
|
||||
VOID
|
||||
SetRegister (
|
||||
IN BOOLEAN PreSmmRegisterTable
|
||||
)
|
||||
{
|
||||
CPU_REGISTER_TABLE *RegisterTable;
|
||||
CPU_REGISTER_TABLE *RegisterTables;
|
||||
UINT32 InitApicId;
|
||||
UINTN ProcIndex;
|
||||
UINTN Index;
|
||||
|
||||
if (PreSmmRegisterTable) {
|
||||
RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable;
|
||||
} else {
|
||||
RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable;
|
||||
}
|
||||
|
||||
InitApicId = GetInitialApicId ();
|
||||
RegisterTable = NULL;
|
||||
for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
|
||||
if (RegisterTables[Index].InitialApicId == InitApicId) {
|
||||
RegisterTable = &RegisterTables[Index];
|
||||
ProcIndex = Index;
|
||||
break;
|
||||
}
|
||||
}
|
||||
ASSERT (RegisterTable != NULL);
|
||||
|
||||
if (mAcpiCpuData.ApLocation != 0) {
|
||||
ProgramProcessorRegister (
|
||||
RegisterTable,
|
||||
(EFI_CPU_PHYSICAL_LOCATION *)(UINTN)mAcpiCpuData.ApLocation + ProcIndex,
|
||||
&mAcpiCpuData.CpuStatus,
|
||||
&mCpuFlags
|
||||
);
|
||||
} else {
|
||||
ProgramProcessorRegister (
|
||||
RegisterTable,
|
||||
NULL,
|
||||
&mAcpiCpuData.CpuStatus,
|
||||
&mCpuFlags
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -374,7 +490,7 @@ InitializeAp (
|
|||
|
||||
LoadMtrrData (mAcpiCpuData.MtrrTable);
|
||||
|
||||
SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus);
|
||||
SetRegister (TRUE);
|
||||
|
||||
//
|
||||
// Count down the number with lock mechanism.
|
||||
|
@ -391,7 +507,7 @@ InitializeAp (
|
|||
ProgramVirtualWireMode ();
|
||||
DisableLvtInterrupts ();
|
||||
|
||||
SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable, mAcpiCpuData.NumberOfCpus);
|
||||
SetRegister (FALSE);
|
||||
|
||||
//
|
||||
// Place AP into the safe code, count down the number with lock mechanism in the safe code.
|
||||
|
@ -466,7 +582,7 @@ InitializeCpuBeforeRebase (
|
|||
{
|
||||
LoadMtrrData (mAcpiCpuData.MtrrTable);
|
||||
|
||||
SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus);
|
||||
SetRegister (TRUE);
|
||||
|
||||
ProgramVirtualWireMode ();
|
||||
|
||||
|
@ -502,15 +618,24 @@ InitializeCpuAfterRebase (
|
|||
VOID
|
||||
)
|
||||
{
|
||||
SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable, mAcpiCpuData.NumberOfCpus);
|
||||
|
||||
mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
|
||||
|
||||
//
|
||||
// Signal that SMM base relocation is complete and to continue initialization.
|
||||
// Signal that SMM base relocation is complete and to continue initialization for all APs.
|
||||
//
|
||||
mInitApsAfterSmmBaseReloc = TRUE;
|
||||
|
||||
//
|
||||
// Must begin set register after all APs have continue their initialization.
|
||||
// This is a requirement to support semaphore mechanism in register table.
|
||||
// Because if semaphore's dependence type is package type, semaphore will wait
|
||||
// for all Aps in one package finishing their tasks before set next register
|
||||
// for all APs. If the Aps not begin its task during BSP doing its task, the
|
||||
// BSP thread will hang because it is waiting for other Aps in the same
|
||||
// package finishing their task.
|
||||
//
|
||||
SetRegister (FALSE);
|
||||
|
||||
while (mNumberToFinish > 0) {
|
||||
CpuPause ();
|
||||
}
|
||||
|
@ -574,8 +699,6 @@ SmmRestoreCpu (
|
|||
|
||||
mSmmS3Flag = TRUE;
|
||||
|
||||
InitializeSpinLock (mMemoryMappedLock);
|
||||
|
||||
//
|
||||
// See if there is enough context to resume PEI Phase
|
||||
//
|
||||
|
@ -790,7 +913,6 @@ CopyRegisterTable (
|
|||
)
|
||||
{
|
||||
UINTN Index;
|
||||
UINTN Index1;
|
||||
CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
|
||||
|
||||
CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
|
||||
|
@ -802,17 +924,6 @@ CopyRegisterTable (
|
|||
);
|
||||
ASSERT (RegisterTableEntry != NULL);
|
||||
DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;
|
||||
//
|
||||
// Go though all MSRs in register table to initialize MSR spin lock
|
||||
//
|
||||
for (Index1 = 0; Index1 < DestinationRegisterTableList[Index].TableLength; Index1++, RegisterTableEntry++) {
|
||||
if ((RegisterTableEntry->RegisterType == Msr) && (RegisterTableEntry->ValidBitLength < 64)) {
|
||||
//
|
||||
// Initialize MSR spin lock only for those MSRs need bit field writing
|
||||
//
|
||||
InitMsrSpinLockByIndex (RegisterTableEntry->Index);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -832,6 +943,7 @@ GetAcpiCpuData (
|
|||
VOID *GdtForAp;
|
||||
VOID *IdtForAp;
|
||||
VOID *MachineCheckHandlerForAp;
|
||||
CPU_STATUS_INFORMATION *CpuStatus;
|
||||
|
||||
if (!mAcpiS3Enable) {
|
||||
return;
|
||||
|
@ -906,6 +1018,31 @@ GetAcpiCpuData (
|
|||
Gdtr->Base = (UINTN)GdtForAp;
|
||||
Idtr->Base = (UINTN)IdtForAp;
|
||||
mAcpiCpuData.ApMachineCheckHandlerBase = (EFI_PHYSICAL_ADDRESS)(UINTN)MachineCheckHandlerForAp;
|
||||
|
||||
CpuStatus = &mAcpiCpuData.CpuStatus;
|
||||
CopyMem (CpuStatus, &AcpiCpuData->CpuStatus, sizeof (CPU_STATUS_INFORMATION));
|
||||
if (AcpiCpuData->CpuStatus.ValidCoreCountPerPackage != 0) {
|
||||
CpuStatus->ValidCoreCountPerPackage = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
|
||||
sizeof (UINT32) * CpuStatus->PackageCount,
|
||||
(UINT32 *)(UINTN)AcpiCpuData->CpuStatus.ValidCoreCountPerPackage
|
||||
);
|
||||
ASSERT (CpuStatus->ValidCoreCountPerPackage != 0);
|
||||
}
|
||||
if (AcpiCpuData->ApLocation != 0) {
|
||||
mAcpiCpuData.ApLocation = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
|
||||
mAcpiCpuData.NumberOfCpus * sizeof (EFI_CPU_PHYSICAL_LOCATION),
|
||||
(EFI_CPU_PHYSICAL_LOCATION *)(UINTN)AcpiCpuData->ApLocation
|
||||
);
|
||||
ASSERT (mAcpiCpuData.ApLocation != 0);
|
||||
}
|
||||
if (CpuStatus->PackageCount != 0) {
|
||||
mCpuFlags.SemaphoreCount = AllocateZeroPool (
|
||||
sizeof (UINT32) * CpuStatus->PackageCount *
|
||||
CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount);
|
||||
ASSERT (mCpuFlags.SemaphoreCount != NULL);
|
||||
}
|
||||
InitializeSpinLock((SPIN_LOCK*) &mCpuFlags.MemoryMappedLock);
|
||||
InitializeSpinLock((SPIN_LOCK*) &mCpuFlags.ConsoleLogLock);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1303,8 +1303,6 @@ InitializeSmmCpuSemaphores (
|
|||
mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock
|
||||
= (SPIN_LOCK *)SemaphoreAddr;
|
||||
SemaphoreAddr += SemaphoreSize;
|
||||
mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock
|
||||
= (SPIN_LOCK *)SemaphoreAddr;
|
||||
|
||||
SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;
|
||||
mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;
|
||||
|
@ -1321,7 +1319,6 @@ InitializeSmmCpuSemaphores (
|
|||
|
||||
mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;
|
||||
mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;
|
||||
mMemoryMappedLock = mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock;
|
||||
|
||||
mSemaphoreSize = SemaphoreSize;
|
||||
}
|
||||
|
|
|
@ -53,6 +53,7 @@ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
|
|||
#include <Library/ReportStatusCodeLib.h>
|
||||
#include <Library/SmmCpuFeaturesLib.h>
|
||||
#include <Library/PeCoffGetEntryPointLib.h>
|
||||
#include <Library/RegisterCpuFeaturesLib.h>
|
||||
|
||||
#include <AcpiCpuData.h>
|
||||
#include <CpuHotPlugData.h>
|
||||
|
@ -364,7 +365,6 @@ typedef struct {
|
|||
volatile BOOLEAN *AllCpusInSync;
|
||||
SPIN_LOCK *PFLock;
|
||||
SPIN_LOCK *CodeAccessCheckLock;
|
||||
SPIN_LOCK *MemoryMappedLock;
|
||||
} SMM_CPU_SEMAPHORE_GLOBAL;
|
||||
|
||||
///
|
||||
|
@ -409,7 +409,6 @@ extern SMM_CPU_SEMAPHORES mSmmCpuSemaphores;
|
|||
extern UINTN mSemaphoreSize;
|
||||
extern SPIN_LOCK *mPFLock;
|
||||
extern SPIN_LOCK *mConfigSmmCodeAccessCheckLock;
|
||||
extern SPIN_LOCK *mMemoryMappedLock;
|
||||
extern EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;
|
||||
extern UINTN mSmmCpuSmramRangeCount;
|
||||
extern UINT8 mPhysicalAddressBits;
|
||||
|
|
Loading…
Reference in New Issue