UefiCpuPkg/PiSmmCpuDxeSmm: Remove duplicate aligned buffer on S3 path

InitializeMpSyncData() invokes InitializeSmmCpuSemaphores() to allocate an
aligned buffer for all locks and semaphores. However, this function is
invoked on S3 resume path again to reset mSmmMpSyncData. It causes
an additional aligned buffer to be allocated.

This update moves InitializeSmmCpuSemaphores() into
InitializeMpServiceData() that is only invoked on normal boot.
InitializeMpSyncData() is updated to reset the locks/semaphore in
mSmmMpSyncData.

Cc: Michael Kinney <michael.d.kinney@intel.com>
Cc: Feng Tian <feng.tian@intel.com>
Contributed-under: TianoCore Contribution Agreement 1.0
Signed-off-by: Jeff Fan <jeff.fan@intel.com>
Reviewed-by: Feng Tian <feng.tian@intel.com>
Reviewed-by: Michael Kinney <michael.d.kinney@intel.com>
Regression-tested-by: Laszlo Ersek <lersek@redhat.com>
This commit is contained in:
Jeff Fan 2016-06-27 15:41:50 +08:00 committed by Michael Kinney
parent 77d172b76d
commit 8b9311b795

View File

@ -1205,7 +1205,6 @@ InitializeSmmCpuSemaphores (
VOID VOID
) )
{ {
UINTN CpuIndex;
UINTN ProcessorCount; UINTN ProcessorCount;
UINTN TotalSize; UINTN TotalSize;
UINTN GlobalSemaphoresSize; UINTN GlobalSemaphoresSize;
@ -1240,7 +1239,6 @@ InitializeSmmCpuSemaphores (
SemaphoreAddr += SemaphoreSize; SemaphoreAddr += SemaphoreSize;
mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock
= (SPIN_LOCK *)SemaphoreAddr; = (SPIN_LOCK *)SemaphoreAddr;
SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize; SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;
mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr; mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;
SemaphoreAddr += ProcessorCount * SemaphoreSize; SemaphoreAddr += ProcessorCount * SemaphoreSize;
@ -1254,21 +1252,9 @@ InitializeSmmCpuSemaphores (
((UINTN)SemaphoreBlock + Pages * SIZE_4KB - SemaphoreAddr) / SemaphoreSize; ((UINTN)SemaphoreBlock + Pages * SIZE_4KB - SemaphoreAddr) / SemaphoreSize;
ASSERT (mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter >= MSR_SPIN_LOCK_INIT_NUM); ASSERT (mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter >= MSR_SPIN_LOCK_INIT_NUM);
mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;
mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;
mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;
mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock; mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;
mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock; mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;
for (CpuIndex = 0; CpuIndex < ProcessorCount; CpuIndex ++) {
mSmmMpSyncData->CpuData[CpuIndex].Busy =
(SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + SemaphoreSize * CpuIndex);
mSmmMpSyncData->CpuData[CpuIndex].Run =
(UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + SemaphoreSize * CpuIndex);
mSmmMpSyncData->CpuData[CpuIndex].Present =
(BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + SemaphoreSize * CpuIndex);
}
mSemaphoreSize = SemaphoreSize; mSemaphoreSize = SemaphoreSize;
} }
@ -1282,8 +1268,10 @@ InitializeMpSyncData (
VOID VOID
) )
{ {
UINTN CpuIndex;
if (mSmmMpSyncData != NULL) { if (mSmmMpSyncData != NULL) {
ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize); mSmmMpSyncData->SwitchBsp = FALSE;
mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA)); mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));
mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus); mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) { if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
@ -1294,7 +1282,23 @@ InitializeMpSyncData (
} }
mSmmMpSyncData->EffectiveSyncMode = (SMM_CPU_SYNC_MODE) PcdGet8 (PcdCpuSmmSyncMode); mSmmMpSyncData->EffectiveSyncMode = (SMM_CPU_SYNC_MODE) PcdGet8 (PcdCpuSmmSyncMode);
InitializeSmmCpuSemaphores (); mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;
mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;
mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;
ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&
mSmmMpSyncData->AllCpusInSync != NULL);
*mSmmMpSyncData->Counter = 0;
*mSmmMpSyncData->InsideSmm = FALSE;
*mSmmMpSyncData->AllCpusInSync = FALSE;
for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) {
mSmmMpSyncData->CpuData[CpuIndex].Busy =
(SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);
mSmmMpSyncData->CpuData[CpuIndex].Run =
(UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);
mSmmMpSyncData->CpuData[CpuIndex].Present =
(BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);
}
} }
} }
@ -1318,6 +1322,11 @@ InitializeMpServiceData (
UINT8 *GdtTssTables; UINT8 *GdtTssTables;
UINTN GdtTableStepSize; UINTN GdtTableStepSize;
//
// Allocate memory for all locks and semaphores
//
InitializeSmmCpuSemaphores ();
// //
// Initialize mSmmMpSyncData // Initialize mSmmMpSyncData
// //