mirror of https://github.com/acidanthera/audk.git
Eliminate EFI_IMAGE_MACHINE_TYPE_SUPPORTED.
Move Gdt initialization from InitializeMpServiceData() to CPU Arch specific function. We create SmmFuncsArch.c for hold CPU specific function, so that EFI_IMAGE_MACHINE_TYPE_SUPPORTED(EFI_IMAGE_MACHINE_X64) can be removed. For IA32 version, we always allocate new page for GDT entry, for easy maintenance. For X64 version, we fixed TssBase in GDT entry to make sure TSS data is correct. Remove TSS fixup for GDT in ASM file. Contributed-under: TianoCore Contribution Agreement 1.0 Signed-off-by: "Yao, Jiewen" <jiewen.yao@intel.com> Reviewed-by: "Fan, Jeff" <jeff.fan@intel.com> git-svn-id: https://svn.code.sf.net/p/edk2/code/trunk/edk2@18937 6f19259b-4bc3-4df7-8a09-765794883524
This commit is contained in:
parent
02f02b16e2
commit
fe5f194943
|
@ -0,0 +1,96 @@
|
|||
/** @file
|
||||
SMM CPU misc functions for Ia32 arch specific.
|
||||
|
||||
Copyright (c) 2015, Intel Corporation. All rights reserved.<BR>
|
||||
This program and the accompanying materials
|
||||
are licensed and made available under the terms and conditions of the BSD License
|
||||
which accompanies this distribution. The full text of the license may be found at
|
||||
http://opensource.org/licenses/bsd-license.php
|
||||
|
||||
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
|
||||
|
||||
**/
|
||||
|
||||
#include "PiSmmCpuDxeSmm.h"
|
||||
|
||||
/**
|
||||
Initialize Gdt for all processors.
|
||||
|
||||
@param[in] Cr3 CR3 value.
|
||||
@param[out] GdtStepSize The step size for GDT table.
|
||||
|
||||
@return GdtBase for processor 0.
|
||||
GdtBase for processor X is: GdtBase + (GdtStepSize * X)
|
||||
**/
|
||||
VOID *
|
||||
InitGdt (
|
||||
IN UINTN Cr3,
|
||||
OUT UINTN *GdtStepSize
|
||||
)
|
||||
{
|
||||
UINTN Index;
|
||||
IA32_SEGMENT_DESCRIPTOR *GdtDescriptor;
|
||||
UINTN TssBase;
|
||||
UINTN GdtTssTableSize;
|
||||
UINT8 *GdtTssTables;
|
||||
UINTN GdtTableStepSize;
|
||||
|
||||
if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
|
||||
//
|
||||
// For IA32 SMM, if SMM Stack Guard feature is enabled, we use 2 TSS.
|
||||
// in this case, we allocate separate GDT/TSS for each CPUs to avoid TSS load contention
|
||||
// on each SMI entry.
|
||||
//
|
||||
|
||||
//
|
||||
// Enlarge GDT to contain 2 TSS descriptors
|
||||
//
|
||||
gcSmiGdtr.Limit += (UINT16)(2 * sizeof (IA32_SEGMENT_DESCRIPTOR));
|
||||
|
||||
GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE * 2 + 7) & ~7; // 8 bytes aligned
|
||||
GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
|
||||
ASSERT (GdtTssTables != NULL);
|
||||
GdtTableStepSize = GdtTssTableSize;
|
||||
|
||||
for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
|
||||
CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1 + TSS_SIZE * 2);
|
||||
//
|
||||
// Fixup TSS descriptors
|
||||
//
|
||||
TssBase = (UINTN)(GdtTssTables + GdtTableStepSize * Index + gcSmiGdtr.Limit + 1);
|
||||
GdtDescriptor = (IA32_SEGMENT_DESCRIPTOR *)(TssBase) - 2;
|
||||
GdtDescriptor->Bits.BaseLow = (UINT16)TssBase;
|
||||
GdtDescriptor->Bits.BaseMid = (UINT8)(TssBase >> 16);
|
||||
GdtDescriptor->Bits.BaseHigh = (UINT8)(TssBase >> 24);
|
||||
|
||||
TssBase += TSS_SIZE;
|
||||
GdtDescriptor++;
|
||||
GdtDescriptor->Bits.BaseLow = (UINT16)TssBase;
|
||||
GdtDescriptor->Bits.BaseMid = (UINT8)(TssBase >> 16);
|
||||
GdtDescriptor->Bits.BaseHigh = (UINT8)(TssBase >> 24);
|
||||
//
|
||||
// Fixup TSS segments
|
||||
//
|
||||
// ESP as known good stack
|
||||
//
|
||||
*(UINTN *)(TssBase + TSS_IA32_ESP_OFFSET) = mSmmStackArrayBase + EFI_PAGE_SIZE + Index * mSmmStackSize;
|
||||
*(UINT32 *)(TssBase + TSS_IA32_CR3_OFFSET) = Cr3;
|
||||
}
|
||||
} else {
|
||||
//
|
||||
// Just use original table, AllocatePage and copy them here to make sure GDTs are covered in page memory.
|
||||
//
|
||||
GdtTssTableSize = gcSmiGdtr.Limit + 1;
|
||||
GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
|
||||
ASSERT (GdtTssTables != NULL);
|
||||
GdtTableStepSize = GdtTssTableSize;
|
||||
|
||||
for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
|
||||
CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1);
|
||||
}
|
||||
}
|
||||
|
||||
*GdtStepSize = GdtTableStepSize;
|
||||
return GdtTssTables;
|
||||
}
|
|
@ -1163,10 +1163,7 @@ InitializeMpServiceData (
|
|||
UINTN Index;
|
||||
MTRR_SETTINGS *Mtrr;
|
||||
PROCESSOR_SMM_DESCRIPTOR *Psd;
|
||||
UINTN GdtTssTableSize;
|
||||
UINT8 *GdtTssTables;
|
||||
IA32_SEGMENT_DESCRIPTOR *GdtDescriptor;
|
||||
UINTN TssBase;
|
||||
UINTN GdtTableStepSize;
|
||||
|
||||
//
|
||||
|
@ -1182,71 +1179,7 @@ InitializeMpServiceData (
|
|||
//
|
||||
Cr3 = SmmInitPageTable ();
|
||||
|
||||
GdtTssTables = NULL;
|
||||
GdtTssTableSize = 0;
|
||||
GdtTableStepSize = 0;
|
||||
//
|
||||
// For X64 SMM, we allocate separate GDT/TSS for each CPUs to avoid TSS load contention
|
||||
// on each SMI entry.
|
||||
//
|
||||
if (EFI_IMAGE_MACHINE_TYPE_SUPPORTED(EFI_IMAGE_MACHINE_X64)) {
|
||||
GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE + 7) & ~7; // 8 bytes aligned
|
||||
GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
|
||||
ASSERT (GdtTssTables != NULL);
|
||||
GdtTableStepSize = GdtTssTableSize;
|
||||
|
||||
for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
|
||||
CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1 + TSS_SIZE);
|
||||
if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
|
||||
//
|
||||
// Setup top of known good stack as IST1 for each processor.
|
||||
//
|
||||
*(UINTN *)(GdtTssTables + GdtTableStepSize * Index + gcSmiGdtr.Limit + 1 + TSS_X64_IST1_OFFSET) = (mSmmStackArrayBase + EFI_PAGE_SIZE + Index * mSmmStackSize);
|
||||
}
|
||||
}
|
||||
} else if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
|
||||
|
||||
//
|
||||
// For IA32 SMM, if SMM Stack Guard feature is enabled, we use 2 TSS.
|
||||
// in this case, we allocate separate GDT/TSS for each CPUs to avoid TSS load contention
|
||||
// on each SMI entry.
|
||||
//
|
||||
|
||||
//
|
||||
// Enlarge GDT to contain 2 TSS descriptors
|
||||
//
|
||||
gcSmiGdtr.Limit += (UINT16)(2 * sizeof (IA32_SEGMENT_DESCRIPTOR));
|
||||
|
||||
GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE * 2 + 7) & ~7; // 8 bytes aligned
|
||||
GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
|
||||
ASSERT (GdtTssTables != NULL);
|
||||
GdtTableStepSize = GdtTssTableSize;
|
||||
|
||||
for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
|
||||
CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1 + TSS_SIZE * 2);
|
||||
//
|
||||
// Fixup TSS descriptors
|
||||
//
|
||||
TssBase = (UINTN)(GdtTssTables + GdtTableStepSize * Index + gcSmiGdtr.Limit + 1);
|
||||
GdtDescriptor = (IA32_SEGMENT_DESCRIPTOR *)(TssBase) - 2;
|
||||
GdtDescriptor->Bits.BaseLow = (UINT16)TssBase;
|
||||
GdtDescriptor->Bits.BaseMid = (UINT8)(TssBase >> 16);
|
||||
GdtDescriptor->Bits.BaseHigh = (UINT8)(TssBase >> 24);
|
||||
|
||||
TssBase += TSS_SIZE;
|
||||
GdtDescriptor++;
|
||||
GdtDescriptor->Bits.BaseLow = (UINT16)TssBase;
|
||||
GdtDescriptor->Bits.BaseMid = (UINT8)(TssBase >> 16);
|
||||
GdtDescriptor->Bits.BaseHigh = (UINT8)(TssBase >> 24);
|
||||
//
|
||||
// Fixup TSS segments
|
||||
//
|
||||
// ESP as known good stack
|
||||
//
|
||||
*(UINTN *)(TssBase + TSS_IA32_ESP_OFFSET) = mSmmStackArrayBase + EFI_PAGE_SIZE + Index * mSmmStackSize;
|
||||
*(UINT32 *)(TssBase + TSS_IA32_CR3_OFFSET) = Cr3;
|
||||
}
|
||||
}
|
||||
GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);
|
||||
|
||||
//
|
||||
// Initialize PROCESSOR_SMM_DESCRIPTOR for each CPU
|
||||
|
@ -1254,18 +1187,8 @@ InitializeMpServiceData (
|
|||
for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
|
||||
Psd = (PROCESSOR_SMM_DESCRIPTOR *)(VOID *)(UINTN)(mCpuHotPlugData.SmBase[Index] + SMM_PSD_OFFSET);
|
||||
CopyMem (Psd, &gcPsd, sizeof (gcPsd));
|
||||
if (EFI_IMAGE_MACHINE_TYPE_SUPPORTED (EFI_IMAGE_MACHINE_X64)) {
|
||||
//
|
||||
// For X64 SMM, set GDT to the copy allocated above.
|
||||
//
|
||||
Psd->SmmGdtPtr = (UINT64)(UINTN)(GdtTssTables + GdtTableStepSize * Index);
|
||||
} else if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
|
||||
//
|
||||
// For IA32 SMM, if SMM Stack Guard feature is enabled, set GDT to the copy allocated above.
|
||||
//
|
||||
Psd->SmmGdtPtr = (UINT64)(UINTN)(GdtTssTables + GdtTableStepSize * Index);
|
||||
Psd->SmmGdtSize = gcSmiGdtr.Limit + 1;
|
||||
}
|
||||
|
||||
//
|
||||
// Install SMI handler
|
||||
|
|
|
@ -427,6 +427,21 @@ InitializeIDTSmmStackGuard (
|
|||
VOID
|
||||
);
|
||||
|
||||
/**
|
||||
Initialize Gdt for all processors.
|
||||
|
||||
@param[in] Cr3 CR3 value.
|
||||
@param[out] GdtStepSize The step size for GDT table.
|
||||
|
||||
@return GdtBase for processor 0.
|
||||
GdtBase for processor X is: GdtBase + (GdtStepSize * X)
|
||||
**/
|
||||
VOID *
|
||||
InitGdt (
|
||||
IN UINTN Cr3,
|
||||
OUT UINTN *GdtStepSize
|
||||
);
|
||||
|
||||
/**
|
||||
|
||||
Register the SMM Foundation entry point.
|
||||
|
|
|
@ -48,6 +48,7 @@
|
|||
[Sources.Ia32]
|
||||
Ia32/Semaphore.c
|
||||
Ia32/PageTbl.c
|
||||
Ia32/SmmFuncsArch.c
|
||||
Ia32/SmmProfileArch.c
|
||||
Ia32/SmmProfileArch.h
|
||||
Ia32/SmmInit.asm | MSFT
|
||||
|
@ -68,6 +69,7 @@
|
|||
[Sources.X64]
|
||||
X64/Semaphore.c
|
||||
X64/PageTbl.c
|
||||
X64/SmmFuncsArch.c
|
||||
X64/SmmProfileArch.c
|
||||
X64/SmmProfileArch.h
|
||||
X64/SmmInit.asm | MSFT
|
||||
|
|
|
@ -128,14 +128,6 @@ ASM_PFX(gSmiCr3): .space 4
|
|||
sgdt (%rsp)
|
||||
movl 2(%rsp), %eax # eax = GDT base
|
||||
addl $8, %esp
|
||||
movl %eax, %edx
|
||||
addl $GDT_SIZE, %edx
|
||||
movb %dl, (TSS_SEGMENT + 2)(%rax)
|
||||
movb %dh, (TSS_SEGMENT + 3)(%rax)
|
||||
.byte 0xc1, 0xea, 0x10 # shr edx, 16
|
||||
movb %dl, (TSS_SEGMENT + 4)(%rax)
|
||||
movb %dh, (TSS_SEGMENT + 7)(%rax)
|
||||
movl %eax, %edx
|
||||
movb $0x89, %dl
|
||||
movb %dl, (TSS_SEGMENT + 5)(%rax) # clear busy flag
|
||||
movl $TSS_SEGMENT, %eax
|
||||
|
|
|
@ -124,14 +124,6 @@ gSmiCr3 DD ?
|
|||
sgdt fword ptr [rsp]
|
||||
mov eax, [rsp + 2] ; eax = GDT base
|
||||
add esp, 8
|
||||
mov edx, eax
|
||||
add edx, GDT_SIZE
|
||||
mov [rax + TSS_SEGMENT + 2], dl
|
||||
mov [rax + TSS_SEGMENT + 3], dh
|
||||
DB 0c1h, 0eah, 10h ; shr edx, 16
|
||||
mov [rax + TSS_SEGMENT + 4], dl
|
||||
mov [rax + TSS_SEGMENT + 7], dh
|
||||
mov edx, eax
|
||||
mov dl, 89h
|
||||
mov [rax + TSS_SEGMENT + 5], dl ; clear busy flag
|
||||
mov eax, TSS_SEGMENT
|
||||
|
|
|
@ -0,0 +1,70 @@
|
|||
/** @file
|
||||
SMM CPU misc functions for x64 arch specific.
|
||||
|
||||
Copyright (c) 2015, Intel Corporation. All rights reserved.<BR>
|
||||
This program and the accompanying materials
|
||||
are licensed and made available under the terms and conditions of the BSD License
|
||||
which accompanies this distribution. The full text of the license may be found at
|
||||
http://opensource.org/licenses/bsd-license.php
|
||||
|
||||
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
|
||||
|
||||
**/
|
||||
|
||||
#include "PiSmmCpuDxeSmm.h"
|
||||
|
||||
/**
|
||||
Initialize Gdt for all processors.
|
||||
|
||||
@param[in] Cr3 CR3 value.
|
||||
@param[out] GdtStepSize The step size for GDT table.
|
||||
|
||||
@return GdtBase for processor 0.
|
||||
GdtBase for processor X is: GdtBase + (GdtStepSize * X)
|
||||
**/
|
||||
VOID *
|
||||
InitGdt (
|
||||
IN UINTN Cr3,
|
||||
OUT UINTN *GdtStepSize
|
||||
)
|
||||
{
|
||||
UINTN Index;
|
||||
IA32_SEGMENT_DESCRIPTOR *GdtDescriptor;
|
||||
UINTN TssBase;
|
||||
UINTN GdtTssTableSize;
|
||||
UINT8 *GdtTssTables;
|
||||
UINTN GdtTableStepSize;
|
||||
|
||||
//
|
||||
// For X64 SMM, we allocate separate GDT/TSS for each CPUs to avoid TSS load contention
|
||||
// on each SMI entry.
|
||||
//
|
||||
GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE + 7) & ~7; // 8 bytes aligned
|
||||
GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
|
||||
ASSERT (GdtTssTables != NULL);
|
||||
GdtTableStepSize = GdtTssTableSize;
|
||||
|
||||
for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
|
||||
CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1 + TSS_SIZE);
|
||||
|
||||
//
|
||||
// Fixup TSS descriptors
|
||||
//
|
||||
TssBase = (UINTN)(GdtTssTables + GdtTableStepSize * Index + gcSmiGdtr.Limit + 1);
|
||||
GdtDescriptor = (IA32_SEGMENT_DESCRIPTOR *)(TssBase) - 2;
|
||||
GdtDescriptor->Bits.BaseLow = (UINT16)(UINTN)TssBase;
|
||||
GdtDescriptor->Bits.BaseMid = (UINT8)((UINTN)TssBase >> 16);
|
||||
GdtDescriptor->Bits.BaseHigh = (UINT8)((UINTN)TssBase >> 24);
|
||||
|
||||
if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
|
||||
//
|
||||
// Setup top of known good stack as IST1 for each processor.
|
||||
//
|
||||
*(UINTN *)(TssBase + TSS_X64_IST1_OFFSET) = (mSmmStackArrayBase + EFI_PAGE_SIZE + Index * mSmmStackSize);
|
||||
}
|
||||
}
|
||||
|
||||
*GdtStepSize = GdtTableStepSize;
|
||||
return GdtTssTables;
|
||||
}
|
Loading…
Reference in New Issue