UefiCpuPkg/PiSmmCpuDxeSmm: Using MSRs semaphores in aligned buffer

Update MSRs semaphores to the ones in allocated aligned semaphores
buffer. If MSRs semaphores is not enough, allocate one page more.

Cc: Michael Kinney <michael.d.kinney@intel.com>
Cc: Feng Tian <feng.tian@intel.com>
Contributed-under: TianoCore Contribution Agreement 1.0
Signed-off-by: Jeff Fan <jeff.fan@intel.com>
Reviewed-by: Feng Tian <feng.tian@intel.com>
Reviewed-by: Michael Kinney <michael.d.kinney@intel.com>
Regression-tested-by: Laszlo Ersek <lersek@redhat.com>
This commit is contained in:
Jeff Fan 2016-03-22 10:42:12 +08:00 committed by Michael Kinney
parent 695e62d141
commit dc99315b87
2 changed files with 30 additions and 6 deletions

View File

@ -57,7 +57,7 @@ VOID *mGdtForAp = NULL;
VOID *mIdtForAp = NULL;
VOID *mMachineCheckHandlerForAp = NULL;
MP_MSR_LOCK *mMsrSpinLocks = NULL;
UINTN mMsrSpinLockCount = MSR_SPIN_LOCK_INIT_NUM;
UINTN mMsrSpinLockCount;
UINTN mMsrCount = 0;
/**
@ -76,7 +76,7 @@ GetMsrSpinLockByIndex (
UINTN Index;
for (Index = 0; Index < mMsrCount; Index++) {
if (MsrIndex == mMsrSpinLocks[Index].MsrIndex) {
return &mMsrSpinLocks[Index].SpinLock;
return mMsrSpinLocks[Index].SpinLock;
}
}
return NULL;
@ -93,30 +93,52 @@ InitMsrSpinLockByIndex (
IN UINT32 MsrIndex
)
{
UINTN MsrSpinLockCount;
UINTN NewMsrSpinLockCount;
UINTN Index;
UINTN AddedSize;
if (mMsrSpinLocks == NULL) {
mMsrSpinLocks = (MP_MSR_LOCK *) AllocatePool (sizeof (MP_MSR_LOCK) * mMsrSpinLockCount);
MsrSpinLockCount = mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter;
mMsrSpinLocks = (MP_MSR_LOCK *) AllocatePool (sizeof (MP_MSR_LOCK) * MsrSpinLockCount);
ASSERT (mMsrSpinLocks != NULL);
for (Index = 0; Index < MsrSpinLockCount; Index++) {
mMsrSpinLocks[Index].SpinLock =
(SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr + Index * mSemaphoreSize);
mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;
}
mMsrSpinLockCount = MsrSpinLockCount;
mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter = 0;
}
if (GetMsrSpinLockByIndex (MsrIndex) == NULL) {
//
// Initialize spin lock for MSR programming
//
mMsrSpinLocks[mMsrCount].MsrIndex = MsrIndex;
InitializeSpinLock (&mMsrSpinLocks[mMsrCount].SpinLock);
InitializeSpinLock (mMsrSpinLocks[mMsrCount].SpinLock);
mMsrCount ++;
if (mMsrCount == mMsrSpinLockCount) {
//
// If MSR spin lock buffer is full, enlarge it
//
NewMsrSpinLockCount = mMsrSpinLockCount + MSR_SPIN_LOCK_INIT_NUM;
AddedSize = SIZE_4KB;
mSmmCpuSemaphores.SemaphoreMsr.Msr =
AllocatePages (EFI_SIZE_TO_PAGES(AddedSize));
ASSERT (mSmmCpuSemaphores.SemaphoreMsr.Msr != NULL);
NewMsrSpinLockCount = mMsrSpinLockCount + AddedSize / mSemaphoreSize;
mMsrSpinLocks = ReallocatePool (
sizeof (MP_MSR_LOCK) * mMsrSpinLockCount,
sizeof (MP_MSR_LOCK) * NewMsrSpinLockCount,
mMsrSpinLocks
);
ASSERT (mMsrSpinLocks != NULL);
mMsrSpinLockCount = NewMsrSpinLockCount;
for (Index = mMsrCount; Index < mMsrSpinLockCount; Index++) {
mMsrSpinLocks[Index].SpinLock =
(SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr +
(Index - mMsrCount) * mSemaphoreSize);
mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;
}
}
}
}

View File

@ -326,7 +326,7 @@ typedef struct {
#define MSR_SPIN_LOCK_INIT_NUM 15
typedef struct {
SPIN_LOCK SpinLock;
SPIN_LOCK *SpinLock;
UINT32 MsrIndex;
} MP_MSR_LOCK;
@ -409,6 +409,8 @@ extern UINTN mSmmStackArrayEnd;
extern UINTN mSmmStackSize;
extern EFI_SMM_CPU_SERVICE_PROTOCOL mSmmCpuService;
extern IA32_DESCRIPTOR gcSmiInitGdtr;
extern SMM_CPU_SEMAPHORES mSmmCpuSemaphores;
extern UINTN mSemaphoreSize;
extern SPIN_LOCK *mPFLock;
extern SPIN_LOCK *mConfigSmmCodeAccessCheckLock;