diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c b/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c index 9b477b6695..4fbb0bba87 100644 --- a/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c @@ -29,118 +29,6 @@ MM_COMPLETION mSmmStartupThisApToken; // UINT32 *mPackageFirstThreadIndex = NULL; -/** - Performs an atomic compare exchange operation to get semaphore. - The compare exchange operation must be performed using - MP safe mechanisms. - - @param Sem IN: 32-bit unsigned integer - OUT: original integer - 1 - @return Original integer - 1 - -**/ -UINT32 -WaitForSemaphore ( - IN OUT volatile UINT32 *Sem - ) -{ - UINT32 Value; - - for ( ; ;) { - Value = *Sem; - if ((Value != 0) && - (InterlockedCompareExchange32 ( - (UINT32 *)Sem, - Value, - Value - 1 - ) == Value)) - { - break; - } - - CpuPause (); - } - - return Value - 1; -} - -/** - Performs an atomic compare exchange operation to release semaphore. - The compare exchange operation must be performed using - MP safe mechanisms. - - @param Sem IN: 32-bit unsigned integer - OUT: original integer + 1 - @return Original integer + 1 - -**/ -UINT32 -ReleaseSemaphore ( - IN OUT volatile UINT32 *Sem - ) -{ - UINT32 Value; - - do { - Value = *Sem; - } while (Value + 1 != 0 && - InterlockedCompareExchange32 ( - (UINT32 *)Sem, - Value, - Value + 1 - ) != Value); - - return Value + 1; -} - -/** - Performs an atomic compare exchange operation to lock semaphore. - The compare exchange operation must be performed using - MP safe mechanisms. - - @param Sem IN: 32-bit unsigned integer - OUT: -1 - @return Original integer - -**/ -UINT32 -LockdownSemaphore ( - IN OUT volatile UINT32 *Sem - ) -{ - UINT32 Value; - - do { - Value = *Sem; - } while (InterlockedCompareExchange32 ( - (UINT32 *)Sem, - Value, - (UINT32)-1 - ) != Value); - - return Value; -} - -/** - Used for BSP to wait all APs. - Wait all APs to performs an atomic compare exchange operation to release semaphore. - - @param NumberOfAPs AP number - -**/ -VOID -WaitForAllAPs ( - IN UINTN NumberOfAPs - ) -{ - UINTN BspIndex; - - BspIndex = mSmmMpSyncData->BspIndex; - while (NumberOfAPs-- > 0) { - WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run); - } -} - /** Used for BSP to release all APs. Performs an atomic compare exchange operation to release semaphore @@ -156,53 +44,11 @@ ReleaseAllAPs ( for (Index = 0; Index < mMaxNumberOfCpus; Index++) { if (IsPresentAp (Index)) { - ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run); + SmmCpuSyncReleaseOneAp (mSmmMpSyncData->SyncContext, Index, gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu); } } } -/** - Used for BSP to release one AP. - - @param ApSem IN: 32-bit unsigned integer - OUT: original integer + 1 -**/ -VOID -ReleaseOneAp ( - IN OUT volatile UINT32 *ApSem - ) -{ - ReleaseSemaphore (ApSem); -} - -/** - Used for AP to wait BSP. - - @param ApSem IN: 32-bit unsigned integer - OUT: original integer - 1 -**/ -VOID -WaitForBsp ( - IN OUT volatile UINT32 *ApSem - ) -{ - WaitForSemaphore (ApSem); -} - -/** - Used for AP to release BSP. - - @param BspSem IN: 32-bit unsigned integer - OUT: original integer + 1 -**/ -VOID -ReleaseBsp ( - IN OUT volatile UINT32 *BspSem - ) -{ - ReleaseSemaphore (BspSem); -} - /** Check whether the index of CPU perform the package level register programming during System Management Mode initialization. @@ -294,14 +140,14 @@ AllCpusInSmmExceptBlockedDisabled ( DisabledCount = 0; // - // Check to make sure mSmmMpSyncData->Counter is valid and not locked. + // Check to make sure the CPU arrival count is valid and not locked. // - ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus); + ASSERT (SmmCpuSyncGetArrivedCpuCount (mSmmMpSyncData->SyncContext) <= mNumberOfCpus); // // Check whether all CPUs in SMM. // - if (*mSmmMpSyncData->Counter == mNumberOfCpus) { + if (SmmCpuSyncGetArrivedCpuCount (mSmmMpSyncData->SyncContext) == mNumberOfCpus) { return TRUE; } @@ -311,14 +157,14 @@ AllCpusInSmmExceptBlockedDisabled ( GetSmmDelayedBlockedDisabledCount (NULL, &BlockedCount, &DisabledCount); // - // *mSmmMpSyncData->Counter might be updated by all APs concurrently. The value + // The CPU arrival count might be updated by all APs concurrently. The value // can be dynamic changed. If some Aps enter the SMI after the BlockedCount & - // DisabledCount check, then the *mSmmMpSyncData->Counter will be increased, thus - // leading the *mSmmMpSyncData->Counter + BlockedCount + DisabledCount > mNumberOfCpus. + // DisabledCount check, then the CPU arrival count will be increased, thus + // leading the retrieved CPU arrival count + BlockedCount + DisabledCount > mNumberOfCpus. // since the BlockedCount & DisabledCount are local variable, it's ok here only for // the checking of all CPUs In Smm. // - if (*mSmmMpSyncData->Counter + BlockedCount + DisabledCount >= mNumberOfCpus) { + if (SmmCpuSyncGetArrivedCpuCount (mSmmMpSyncData->SyncContext) + BlockedCount + DisabledCount >= mNumberOfCpus) { return TRUE; } @@ -398,7 +244,7 @@ SmmWaitForApArrival ( DelayedCount = 0; BlockedCount = 0; - ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus); + ASSERT (SmmCpuSyncGetArrivedCpuCount (mSmmMpSyncData->SyncContext) <= mNumberOfCpus); LmceEn = FALSE; LmceSignal = FALSE; @@ -449,7 +295,7 @@ SmmWaitForApArrival ( // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function. // In both cases, adding SMI-disabling checking code increases overhead. // - if (*mSmmMpSyncData->Counter < mNumberOfCpus) { + if (SmmCpuSyncGetArrivedCpuCount (mSmmMpSyncData->SyncContext) < mNumberOfCpus) { // // Send SMI IPIs to bring outside processors in // @@ -612,6 +458,7 @@ BSPHandler ( IN SMM_CPU_SYNC_MODE SyncMode ) { + UINTN CpuCount; UINTN Index; MTRR_SETTINGS Mtrrs; UINTN ApCount; @@ -619,7 +466,8 @@ BSPHandler ( UINTN PresentCount; ASSERT (CpuIndex == mSmmMpSyncData->BspIndex); - ApCount = 0; + CpuCount = 0; + ApCount = 0; PERF_FUNCTION_BEGIN (); @@ -661,15 +509,18 @@ BSPHandler ( SmmWaitForApArrival (); // - // Lock the counter down and retrieve the number of APs + // Lock door for late coming CPU checkin and retrieve the Arrived number of APs // *mSmmMpSyncData->AllCpusInSync = TRUE; - ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1; + + SmmCpuSyncLockDoor (mSmmMpSyncData->SyncContext, CpuIndex, &CpuCount); + + ApCount = CpuCount - 1; // // Wait for all APs to get ready for programming MTRRs // - WaitForAllAPs (ApCount); + SmmCpuSyncWaitForAPs (mSmmMpSyncData->SyncContext, ApCount, CpuIndex); if (SmmCpuFeaturesNeedConfigureMtrrs ()) { // @@ -678,7 +529,7 @@ BSPHandler ( ReleaseAllAPs (); // - // WaitForAllAPs() may wait for ever if an AP happens to enter SMM at + // SmmCpuSyncWaitForAPs() may wait for ever if an AP happens to enter SMM at // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set // to a large enough value to avoid this situation. // Note: For HT capable CPUs, threads within a core share the same set of MTRRs. @@ -690,7 +541,7 @@ BSPHandler ( // // Wait for all APs to complete their MTRR saving // - WaitForAllAPs (ApCount); + SmmCpuSyncWaitForAPs (mSmmMpSyncData->SyncContext, ApCount, CpuIndex); // // Let all processors program SMM MTRRs together @@ -698,7 +549,7 @@ BSPHandler ( ReleaseAllAPs (); // - // WaitForAllAPs() may wait for ever if an AP happens to enter SMM at + // SmmCpuSyncWaitForAPs() may wait for ever if an AP happens to enter SMM at // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set // to a large enough value to avoid this situation. // @@ -707,7 +558,7 @@ BSPHandler ( // // Wait for all APs to complete their MTRR programming // - WaitForAllAPs (ApCount); + SmmCpuSyncWaitForAPs (mSmmMpSyncData->SyncContext, ApCount, CpuIndex); } } @@ -743,10 +594,14 @@ BSPHandler ( // if ((SyncMode != SmmCpuSyncModeTradition) && !SmmCpuFeaturesNeedConfigureMtrrs ()) { // - // Lock the counter down and retrieve the number of APs + // Lock door for late coming CPU checkin and retrieve the Arrived number of APs // *mSmmMpSyncData->AllCpusInSync = TRUE; - ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1; + + SmmCpuSyncLockDoor (mSmmMpSyncData->SyncContext, CpuIndex, &CpuCount); + + ApCount = CpuCount - 1; + // // Make sure all APs have their Present flag set // @@ -773,7 +628,7 @@ BSPHandler ( // // Wait for all APs to complete their pending tasks // - WaitForAllAPs (ApCount); + SmmCpuSyncWaitForAPs (mSmmMpSyncData->SyncContext, ApCount, CpuIndex); if (SmmCpuFeaturesNeedConfigureMtrrs ()) { // @@ -790,7 +645,7 @@ BSPHandler ( // // Wait for all APs to complete MTRR programming // - WaitForAllAPs (ApCount); + SmmCpuSyncWaitForAPs (mSmmMpSyncData->SyncContext, ApCount, CpuIndex); } // @@ -818,7 +673,7 @@ BSPHandler ( // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but // WaitForAllAps does not depend on the Present flag. // - WaitForAllAPs (ApCount); + SmmCpuSyncWaitForAPs (mSmmMpSyncData->SyncContext, ApCount, CpuIndex); // // At this point, all APs should have exited from APHandler(). @@ -844,7 +699,7 @@ BSPHandler ( // // Allow APs to check in from this point on // - *mSmmMpSyncData->Counter = 0; + SmmCpuSyncContextReset (mSmmMpSyncData->SyncContext); *mSmmMpSyncData->AllCpusInSync = FALSE; mSmmMpSyncData->AllApArrivedWithException = FALSE; @@ -914,17 +769,17 @@ APHandler ( // // Give up since BSP is unable to enter SMM // and signal the completion of this AP - // Reduce the mSmmMpSyncData->Counter! + // Reduce the CPU arrival count! // - WaitForSemaphore (mSmmMpSyncData->Counter); + SmmCpuSyncCheckOutCpu (mSmmMpSyncData->SyncContext, CpuIndex); return; } } else { // // Don't know BSP index. Give up without sending IPI to BSP. - // Reduce the mSmmMpSyncData->Counter! + // Reduce the CPU arrival count! // - WaitForSemaphore (mSmmMpSyncData->Counter); + SmmCpuSyncCheckOutCpu (mSmmMpSyncData->SyncContext, CpuIndex); return; } } @@ -944,14 +799,14 @@ APHandler ( // // Notify BSP of arrival at this point // - ReleaseBsp (mSmmMpSyncData->CpuData[BspIndex].Run); + SmmCpuSyncReleaseBsp (mSmmMpSyncData->SyncContext, CpuIndex, BspIndex); } if (SmmCpuFeaturesNeedConfigureMtrrs ()) { // // Wait for the signal from BSP to backup MTRRs // - WaitForBsp (mSmmMpSyncData->CpuData[CpuIndex].Run); + SmmCpuSyncWaitForBsp (mSmmMpSyncData->SyncContext, CpuIndex, BspIndex); // // Backup OS MTRRs @@ -961,12 +816,12 @@ APHandler ( // // Signal BSP the completion of this AP // - ReleaseBsp (mSmmMpSyncData->CpuData[BspIndex].Run); + SmmCpuSyncReleaseBsp (mSmmMpSyncData->SyncContext, CpuIndex, BspIndex); // // Wait for BSP's signal to program MTRRs // - WaitForBsp (mSmmMpSyncData->CpuData[CpuIndex].Run); + SmmCpuSyncWaitForBsp (mSmmMpSyncData->SyncContext, CpuIndex, BspIndex); // // Replace OS MTRRs with SMI MTRRs @@ -976,14 +831,14 @@ APHandler ( // // Signal BSP the completion of this AP // - ReleaseBsp (mSmmMpSyncData->CpuData[BspIndex].Run); + SmmCpuSyncReleaseBsp (mSmmMpSyncData->SyncContext, CpuIndex, BspIndex); } while (TRUE) { // // Wait for something to happen // - WaitForBsp (mSmmMpSyncData->CpuData[CpuIndex].Run); + SmmCpuSyncWaitForBsp (mSmmMpSyncData->SyncContext, CpuIndex, BspIndex); // // Check if BSP wants to exit SMM @@ -1023,12 +878,12 @@ APHandler ( // // Notify BSP the readiness of this AP to program MTRRs // - ReleaseBsp (mSmmMpSyncData->CpuData[BspIndex].Run); + SmmCpuSyncReleaseBsp (mSmmMpSyncData->SyncContext, CpuIndex, BspIndex); // // Wait for the signal from BSP to program MTRRs // - WaitForBsp (mSmmMpSyncData->CpuData[CpuIndex].Run); + SmmCpuSyncWaitForBsp (mSmmMpSyncData->SyncContext, CpuIndex, BspIndex); // // Restore OS MTRRs @@ -1040,12 +895,12 @@ APHandler ( // // Notify BSP the readiness of this AP to Reset states/semaphore for this processor // - ReleaseBsp (mSmmMpSyncData->CpuData[BspIndex].Run); + SmmCpuSyncReleaseBsp (mSmmMpSyncData->SyncContext, CpuIndex, BspIndex); // // Wait for the signal from BSP to Reset states/semaphore for this processor // - WaitForBsp (mSmmMpSyncData->CpuData[CpuIndex].Run); + SmmCpuSyncWaitForBsp (mSmmMpSyncData->SyncContext, CpuIndex, BspIndex); // // Reset states/semaphore for this processor @@ -1055,7 +910,7 @@ APHandler ( // // Notify BSP the readiness of this AP to exit SMM // - ReleaseBsp (mSmmMpSyncData->CpuData[BspIndex].Run); + SmmCpuSyncReleaseBsp (mSmmMpSyncData->SyncContext, CpuIndex, BspIndex); } /** @@ -1323,7 +1178,7 @@ InternalSmmStartupThisAp ( *mSmmMpSyncData->CpuData[CpuIndex].Status = EFI_NOT_READY; } - ReleaseOneAp (mSmmMpSyncData->CpuData[CpuIndex].Run); + SmmCpuSyncReleaseOneAp (mSmmMpSyncData->SyncContext, CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu); if (Token == NULL) { AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy); @@ -1727,10 +1582,11 @@ SmiRendezvous ( } else { // // Signal presence of this processor - // mSmmMpSyncData->Counter is increased here! - // "ReleaseSemaphore (mSmmMpSyncData->Counter) == 0" means BSP has already ended the synchronization. + // CPU check in here! + // "SmmCpuSyncCheckInCpu (mSmmMpSyncData->SyncContext, CpuIndex)" return error means failed + // to check in CPU. BSP has already ended the synchronization. // - if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) { + if (RETURN_ERROR (SmmCpuSyncCheckInCpu (mSmmMpSyncData->SyncContext, CpuIndex))) { // // BSP has already ended the synchronization, so QUIT!!! // Existing AP is too late now to enter SMI since BSP has already ended the synchronization!!! @@ -1826,8 +1682,6 @@ SmiRendezvous ( } } - ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0); - // // Wait for BSP's signal to exit SMI // @@ -1947,8 +1801,6 @@ InitializeSmmCpuSemaphores ( ZeroMem (SemaphoreBlock, TotalSize); SemaphoreAddr = (UINTN)SemaphoreBlock; - mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr; - SemaphoreAddr += SemaphoreSize; mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr; SemaphoreAddr += SemaphoreSize; mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr; @@ -1962,8 +1814,6 @@ InitializeSmmCpuSemaphores ( SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize; mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr; SemaphoreAddr += ProcessorCount * SemaphoreSize; - mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr; - SemaphoreAddr += ProcessorCount * SemaphoreSize; mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr; mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock; @@ -1982,6 +1832,8 @@ InitializeMpSyncData ( VOID ) { + RETURN_STATUS Status; + UINTN CpuIndex; if (mSmmMpSyncData != NULL) { @@ -2011,14 +1863,21 @@ InitializeMpSyncData ( mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode; - mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter; + Status = SmmCpuSyncContextInit (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus, &mSmmMpSyncData->SyncContext); + if (EFI_ERROR (Status)) { + DEBUG ((DEBUG_ERROR, "InitializeMpSyncData: SmmCpuSyncContextInit return error %r!\n", Status)); + CpuDeadLoop (); + return; + } + + ASSERT (mSmmMpSyncData->SyncContext != NULL); + mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm; mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync; ASSERT ( - mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL && + mSmmMpSyncData->InsideSmm != NULL && mSmmMpSyncData->AllCpusInSync != NULL ); - *mSmmMpSyncData->Counter = 0; *mSmmMpSyncData->InsideSmm = FALSE; *mSmmMpSyncData->AllCpusInSync = FALSE; @@ -2027,12 +1886,9 @@ InitializeMpSyncData ( for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex++) { mSmmMpSyncData->CpuData[CpuIndex].Busy = (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex); - mSmmMpSyncData->CpuData[CpuIndex].Run = - (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex); mSmmMpSyncData->CpuData[CpuIndex].Present = (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex); *(mSmmMpSyncData->CpuData[CpuIndex].Busy) = 0; - *(mSmmMpSyncData->CpuData[CpuIndex].Run) = 0; *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE; } } diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h index f18345881b..a2fa4f6734 100644 --- a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h @@ -54,6 +54,7 @@ SPDX-License-Identifier: BSD-2-Clause-Patent #include #include #include +#include #include #include @@ -405,7 +406,6 @@ typedef struct { SPIN_LOCK *Busy; volatile EFI_AP_PROCEDURE2 Procedure; volatile VOID *Parameter; - volatile UINT32 *Run; volatile BOOLEAN *Present; PROCEDURE_TOKEN *Token; EFI_STATUS *Status; @@ -423,7 +423,6 @@ typedef struct { // so that UC cache-ability can be set together. // SMM_CPU_DATA_BLOCK *CpuData; - volatile UINT32 *Counter; volatile UINT32 BspIndex; volatile BOOLEAN *InsideSmm; volatile BOOLEAN *AllCpusInSync; @@ -433,6 +432,7 @@ typedef struct { volatile BOOLEAN AllApArrivedWithException; EFI_AP_PROCEDURE StartupProcedure; VOID *StartupProcArgs; + SMM_CPU_SYNC_CONTEXT *SyncContext; } SMM_DISPATCHER_MP_SYNC_DATA; #define SMM_PSD_OFFSET 0xfb00 @@ -441,7 +441,6 @@ typedef struct { /// All global semaphores' pointer /// typedef struct { - volatile UINT32 *Counter; volatile BOOLEAN *InsideSmm; volatile BOOLEAN *AllCpusInSync; SPIN_LOCK *PFLock; @@ -453,7 +452,6 @@ typedef struct { /// typedef struct { SPIN_LOCK *Busy; - volatile UINT32 *Run; volatile BOOLEAN *Present; SPIN_LOCK *Token; } SMM_CPU_SEMAPHORE_CPU; diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf index 372596f24c..793220aba3 100644 --- a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf @@ -103,6 +103,7 @@ PerformanceLib CpuPageTableLib MmSaveStateLib + SmmCpuSyncLib [Protocols] gEfiSmmAccess2ProtocolGuid ## CONSUMES