UefiCpuPkg/PiSmmCpuDxeSmm: Add paging protection.

PiSmmCpuDxeSmm consumes SmmAttributesTable and setup page table:
1) Code region is marked as read-only and Data region is non-executable,
if the PE image is 4K aligned.
2) Important data structure is set to RO, such as GDT/IDT.
3) SmmSaveState is set to non-executable,
and SmmEntrypoint is set to read-only.
4) If static page is supported, page table is read-only.

We use page table to protect other components, and itself.

If we use dynamic paging, we can still provide *partial* protection.
And hope page table is not modified by other components.

The XD enabling code is moved to SmiEntry to let NX take effect.

Cc: Jeff Fan <jeff.fan@intel.com>
Cc: Feng Tian <feng.tian@intel.com>
Cc: Star Zeng <star.zeng@intel.com>
Cc: Michael D Kinney <michael.d.kinney@intel.com>
Cc: Laszlo Ersek <lersek@redhat.com>
Contributed-under: TianoCore Contribution Agreement 1.0
Signed-off-by: Jiewen Yao <jiewen.yao@intel.com>
Tested-by: Laszlo Ersek <lersek@redhat.com>
Reviewed-by: Jeff Fan <jeff.fan@intel.com>
Reviewed-by: Michael D Kinney <michael.d.kinney@intel.com>
This commit is contained in:
Jiewen Yao 2016-10-23 23:19:52 +08:00
parent 28b020b5de
commit 717fb60443
25 changed files with 2042 additions and 775 deletions

View File

@ -58,7 +58,7 @@ SmmInitPageTable (
if (FeaturePcdGet (PcdCpuSmmStackGuard)) { if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
InitializeIDTSmmStackGuard (); InitializeIDTSmmStackGuard ();
} }
return Gen4GPageTable (0, TRUE); return Gen4GPageTable (TRUE);
} }
/** /**
@ -99,7 +99,7 @@ SmiPFHandler (
if ((FeaturePcdGet (PcdCpuSmmStackGuard)) && if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
(PFAddress >= mCpuHotPlugData.SmrrBase) && (PFAddress >= mCpuHotPlugData.SmrrBase) &&
(PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) { (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {
DEBUG ((EFI_D_ERROR, "SMM stack overflow!\n")); DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));
CpuDeadLoop (); CpuDeadLoop ();
} }
@ -109,7 +109,7 @@ SmiPFHandler (
if ((PFAddress < mCpuHotPlugData.SmrrBase) || if ((PFAddress < mCpuHotPlugData.SmrrBase) ||
(PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) { (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {
if ((SystemContext.SystemContextIa32->ExceptionData & IA32_PF_EC_ID) != 0) { if ((SystemContext.SystemContextIa32->ExceptionData & IA32_PF_EC_ID) != 0) {
DEBUG ((EFI_D_ERROR, "Code executed on IP(0x%x) out of SMM range after SMM is locked!\n", PFAddress)); DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%x) out of SMM range after SMM is locked!\n", PFAddress));
DEBUG_CODE ( DEBUG_CODE (
DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextIa32->Esp); DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextIa32->Esp);
); );
@ -128,3 +128,68 @@ SmiPFHandler (
ReleaseSpinLock (mPFLock); ReleaseSpinLock (mPFLock);
} }
/**
This function sets memory attribute for page table.
**/
VOID
SetPageTableAttributes (
VOID
)
{
UINTN Index2;
UINTN Index3;
UINT64 *L1PageTable;
UINT64 *L2PageTable;
UINT64 *L3PageTable;
BOOLEAN IsSplitted;
BOOLEAN PageTableSplitted;
DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));
//
// Disable write protection, because we need mark page table to be write protected.
// We need *write* page table memory, to mark itself to be *read only*.
//
AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);
do {
DEBUG ((DEBUG_INFO, "Start...\n"));
PageTableSplitted = FALSE;
L3PageTable = (UINT64 *)GetPageTableBase ();
SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
PageTableSplitted = (PageTableSplitted || IsSplitted);
for (Index3 = 0; Index3 < 4; Index3++) {
L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & PAGING_4K_ADDRESS_MASK_64);
if (L2PageTable == NULL) {
continue;
}
SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
PageTableSplitted = (PageTableSplitted || IsSplitted);
for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {
if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
// 2M
continue;
}
L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & PAGING_4K_ADDRESS_MASK_64);
if (L1PageTable == NULL) {
continue;
}
SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
PageTableSplitted = (PageTableSplitted || IsSplitted);
}
}
} while (PageTableSplitted);
//
// Enable write protection, after page table updated.
//
AsmWriteCr0 (AsmReadCr0() | CR0_WP);
return ;
}

View File

@ -1,6 +1,6 @@
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
# #
# Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR> # Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials # This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License # are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at # which accompanies this distribution. The full text of the license may be found at
@ -24,9 +24,13 @@ ASM_GLOBAL ASM_PFX(gcSmiHandlerSize)
ASM_GLOBAL ASM_PFX(gSmiCr3) ASM_GLOBAL ASM_PFX(gSmiCr3)
ASM_GLOBAL ASM_PFX(gSmiStack) ASM_GLOBAL ASM_PFX(gSmiStack)
ASM_GLOBAL ASM_PFX(gSmbase) ASM_GLOBAL ASM_PFX(gSmbase)
ASM_GLOBAL ASM_PFX(mXdSupported)
ASM_GLOBAL ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard)) ASM_GLOBAL ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))
ASM_GLOBAL ASM_PFX(gSmiHandlerIdtr) ASM_GLOBAL ASM_PFX(gSmiHandlerIdtr)
.equ MSR_EFER, 0xc0000080
.equ MSR_EFER_XD, 0x800
.equ DSC_OFFSET, 0xfb00 .equ DSC_OFFSET, 0xfb00
.equ DSC_GDTPTR, 0x30 .equ DSC_GDTPTR, 0x30
.equ DSC_GDTSIZ, 0x38 .equ DSC_GDTSIZ, 0x38
@ -122,8 +126,41 @@ L11:
orl $BIT10, %eax orl $BIT10, %eax
L12: # as cr4.PGE is not set here, refresh cr3 L12: # as cr4.PGE is not set here, refresh cr3
movl %eax, %cr4 # in PreModifyMtrrs() to flush TLB. movl %eax, %cr4 # in PreModifyMtrrs() to flush TLB.
cmpb $0, ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))
jz L5
# Load TSS
movb $0x89, (TSS_SEGMENT + 5)(%ebp) # clear busy flag
movl $TSS_SEGMENT, %eax
ltrw %ax
L5:
# enable NXE if supported
.byte 0xb0 # mov al, imm8
ASM_PFX(mXdSupported): .byte 1
cmpb $0, %al
jz SkipNxe
#
# Check XD disable bit
#
movl $MSR_IA32_MISC_ENABLE, %ecx
rdmsr
pushl %edx # save MSR_IA32_MISC_ENABLE[63-32]
testl $BIT2, %edx # MSR_IA32_MISC_ENABLE[34]
jz L13
andw $0x0FFFB, %dx # clear XD Disable bit if it is set
wrmsr
L13:
movl $MSR_EFER, %ecx
rdmsr
orw $MSR_EFER_XD,%ax # enable NXE
wrmsr
SkipNxe:
subl $4, %esp
NxeDone:
movl %cr0, %ebx movl %cr0, %ebx
orl $0x080010000, %ebx # enable paging + WP orl $0x080010023, %ebx # enable paging + WP + NE + MP + PE
movl %ebx, %cr0 movl %ebx, %cr0
leal DSC_OFFSET(%edi),%ebx leal DSC_OFFSET(%edi),%ebx
movw DSC_DS(%ebx),%ax movw DSC_DS(%ebx),%ax
@ -135,35 +172,39 @@ L12: # as cr4.PGE is not set here, refresh
movw DSC_SS(%ebx),%ax movw DSC_SS(%ebx),%ax
movl %eax, %ss movl %eax, %ss
cmpb $0, ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))
jz L5
# Load TSS
movb $0x89, (TSS_SEGMENT + 5)(%ebp) # clear busy flag
movl $TSS_SEGMENT, %eax
ltrw %ax
L5:
# jmp _SmiHandler # instruction is not needed # jmp _SmiHandler # instruction is not needed
_SmiHandler: _SmiHandler:
movl (%esp), %ebx movl 4(%esp), %ebx
pushl %ebx pushl %ebx
movl $ASM_PFX(CpuSmmDebugEntry), %eax movl $ASM_PFX(CpuSmmDebugEntry), %eax
call *%eax call *%eax
popl %ecx addl $4, %esp
pushl %ebx pushl %ebx
movl $ASM_PFX(SmiRendezvous), %eax movl $ASM_PFX(SmiRendezvous), %eax
call *%eax call *%eax
popl %ecx addl $4, %esp
pushl %ebx pushl %ebx
movl $ASM_PFX(CpuSmmDebugExit), %eax movl $ASM_PFX(CpuSmmDebugExit), %eax
call *%eax call *%eax
popl %ecx addl $4, %esp
movl $ASM_PFX(mXdSupported), %eax
movb (%eax), %al
cmpb $0, %al
jz L16
popl %edx # get saved MSR_IA32_MISC_ENABLE[63-32]
testl $BIT2, %edx
jz L16
movl $MSR_IA32_MISC_ENABLE, %ecx
rdmsr
orw $BIT2, %dx # set XD Disable bit if it was set before entering into SMM
wrmsr
L16:
rsm rsm
ASM_PFX(gcSmiHandlerSize): .word . - _SmiEntryPoint ASM_PFX(gcSmiHandlerSize): .word . - _SmiEntryPoint

View File

@ -1,5 +1,5 @@
;------------------------------------------------------------------------------ ; ;------------------------------------------------------------------------------ ;
; Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR> ; Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
; This program and the accompanying materials ; This program and the accompanying materials
; are licensed and made available under the terms and conditions of the BSD License ; are licensed and made available under the terms and conditions of the BSD License
; which accompanies this distribution. The full text of the license may be found at ; which accompanies this distribution. The full text of the license may be found at
@ -22,6 +22,10 @@
.model flat,C .model flat,C
.xmm .xmm
MSR_IA32_MISC_ENABLE EQU 1A0h
MSR_EFER EQU 0c0000080h
MSR_EFER_XD EQU 0800h
DSC_OFFSET EQU 0fb00h DSC_OFFSET EQU 0fb00h
DSC_GDTPTR EQU 30h DSC_GDTPTR EQU 30h
DSC_GDTSIZ EQU 38h DSC_GDTSIZ EQU 38h
@ -43,6 +47,7 @@ EXTERNDEF gcSmiHandlerSize:WORD
EXTERNDEF gSmiCr3:DWORD EXTERNDEF gSmiCr3:DWORD
EXTERNDEF gSmiStack:DWORD EXTERNDEF gSmiStack:DWORD
EXTERNDEF gSmbase:DWORD EXTERNDEF gSmbase:DWORD
EXTERNDEF mXdSupported:BYTE
EXTERNDEF FeaturePcdGet (PcdCpuSmmStackGuard):BYTE EXTERNDEF FeaturePcdGet (PcdCpuSmmStackGuard):BYTE
EXTERNDEF gSmiHandlerIdtr:FWORD EXTERNDEF gSmiHandlerIdtr:FWORD
@ -128,8 +133,42 @@ gSmiCr3 DD ?
or eax, BIT10 or eax, BIT10
@@: ; as cr4.PGE is not set here, refresh cr3 @@: ; as cr4.PGE is not set here, refresh cr3
mov cr4, eax ; in PreModifyMtrrs() to flush TLB. mov cr4, eax ; in PreModifyMtrrs() to flush TLB.
cmp FeaturePcdGet (PcdCpuSmmStackGuard), 0
jz @F
; Load TSS
mov byte ptr [ebp + TSS_SEGMENT + 5], 89h ; clear busy flag
mov eax, TSS_SEGMENT
ltr ax
@@:
; enable NXE if supported
DB 0b0h ; mov al, imm8
mXdSupported DB 1
cmp al, 0
jz @SkipXd
;
; Check XD disable bit
;
mov ecx, MSR_IA32_MISC_ENABLE
rdmsr
push edx ; save MSR_IA32_MISC_ENABLE[63-32]
test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]
jz @f
and dx, 0FFFBh ; clear XD Disable bit if it is set
wrmsr
@@:
mov ecx, MSR_EFER
rdmsr
or ax, MSR_EFER_XD ; enable NXE
wrmsr
jmp @XdDone
@SkipXd:
sub esp, 4
@XdDone:
mov ebx, cr0 mov ebx, cr0
or ebx, 080010000h ; enable paging + WP or ebx, 080010023h ; enable paging + WP + NE + MP + PE
mov cr0, ebx mov cr0, ebx
lea ebx, [edi + DSC_OFFSET] lea ebx, [edi + DSC_OFFSET]
mov ax, [ebx + DSC_DS] mov ax, [ebx + DSC_DS]
@ -141,34 +180,38 @@ gSmiCr3 DD ?
mov ax, [ebx + DSC_SS] mov ax, [ebx + DSC_SS]
mov ss, eax mov ss, eax
cmp FeaturePcdGet (PcdCpuSmmStackGuard), 0
jz @F
; Load TSS
mov byte ptr [ebp + TSS_SEGMENT + 5], 89h ; clear busy flag
mov eax, TSS_SEGMENT
ltr ax
@@:
; jmp _SmiHandler ; instruction is not needed ; jmp _SmiHandler ; instruction is not needed
_SmiHandler PROC _SmiHandler PROC
mov ebx, [esp] ; CPU Index mov ebx, [esp + 4] ; CPU Index
push ebx push ebx
mov eax, CpuSmmDebugEntry mov eax, CpuSmmDebugEntry
call eax call eax
pop ecx add esp, 4
push ebx push ebx
mov eax, SmiRendezvous mov eax, SmiRendezvous
call eax call eax
pop ecx add esp, 4
push ebx push ebx
mov eax, CpuSmmDebugExit mov eax, CpuSmmDebugExit
call eax call eax
pop ecx add esp, 4
mov eax, mXdSupported
mov al, [eax]
cmp al, 0
jz @f
pop edx ; get saved MSR_IA32_MISC_ENABLE[63-32]
test edx, BIT2
jz @f
mov ecx, MSR_IA32_MISC_ENABLE
rdmsr
or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM
wrmsr
@@:
rsm rsm
_SmiHandler ENDP _SmiHandler ENDP

View File

@ -18,6 +18,10 @@
; ;
;------------------------------------------------------------------------------- ;-------------------------------------------------------------------------------
%define MSR_IA32_MISC_ENABLE 0x1A0
%define MSR_EFER 0xc0000080
%define MSR_EFER_XD 0x800
%define DSC_OFFSET 0xfb00 %define DSC_OFFSET 0xfb00
%define DSC_GDTPTR 0x30 %define DSC_GDTPTR 0x30
%define DSC_GDTSIZ 0x38 %define DSC_GDTSIZ 0x38
@ -40,6 +44,7 @@ global ASM_PFX(gcSmiHandlerSize)
global ASM_PFX(gSmiCr3) global ASM_PFX(gSmiCr3)
global ASM_PFX(gSmiStack) global ASM_PFX(gSmiStack)
global ASM_PFX(gSmbase) global ASM_PFX(gSmbase)
global ASM_PFX(mXdSupported)
extern ASM_PFX(gSmiHandlerIdtr) extern ASM_PFX(gSmiHandlerIdtr)
SECTION .text SECTION .text
@ -56,7 +61,7 @@ _SmiEntryPoint:
mov ebp, eax ; ebp = GDT base mov ebp, eax ; ebp = GDT base
o32 lgdt [cs:bx] ; lgdt fword ptr cs:[bx] o32 lgdt [cs:bx] ; lgdt fword ptr cs:[bx]
mov ax, PROTECT_MODE_CS mov ax, PROTECT_MODE_CS
mov [cs:bx-0x2],ax mov [cs:bx-0x2],ax
DB 0x66, 0xbf ; mov edi, SMBASE DB 0x66, 0xbf ; mov edi, SMBASE
ASM_PFX(gSmbase): DD 0 ASM_PFX(gSmbase): DD 0
lea eax, [edi + (@32bit - _SmiEntryPoint) + 0x8000] lea eax, [edi + (@32bit - _SmiEntryPoint) + 0x8000]
@ -66,7 +71,7 @@ ASM_PFX(gSmbase): DD 0
or ebx, 0x23 or ebx, 0x23
mov cr0, ebx mov cr0, ebx
jmp dword 0x0:0x0 jmp dword 0x0:0x0
_GdtDesc: _GdtDesc:
DW 0 DW 0
DD 0 DD 0
@ -115,8 +120,42 @@ ASM_PFX(gSmiCr3): DD 0
or eax, BIT10 or eax, BIT10
.4: ; as cr4.PGE is not set here, refresh cr3 .4: ; as cr4.PGE is not set here, refresh cr3
mov cr4, eax ; in PreModifyMtrrs() to flush TLB. mov cr4, eax ; in PreModifyMtrrs() to flush TLB.
cmp byte [dword ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))], 0
jz .6
; Load TSS
mov byte [ebp + TSS_SEGMENT + 5], 0x89 ; clear busy flag
mov eax, TSS_SEGMENT
ltr ax
.6:
; enable NXE if supported
DB 0b0h ; mov al, imm8
ASM_PFX(mXdSupported): DB 1
cmp al, 0
jz @SkipXd
;
; Check XD disable bit
;
mov ecx, MSR_IA32_MISC_ENABLE
rdmsr
push edx ; save MSR_IA32_MISC_ENABLE[63-32]
test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]
jz .5
and dx, 0xFFFB ; clear XD Disable bit if it is set
wrmsr
.5:
mov ecx, MSR_EFER
rdmsr
or ax, MSR_EFER_XD ; enable NXE
wrmsr
jmp @XdDone
@SkipXd:
sub esp, 4
@XdDone:
mov ebx, cr0 mov ebx, cr0
or ebx, 0x080010000 ; enable paging + WP or ebx, 0x80010023 ; enable paging + WP + NE + MP + PE
mov cr0, ebx mov cr0, ebx
lea ebx, [edi + DSC_OFFSET] lea ebx, [edi + DSC_OFFSET]
mov ax, [ebx + DSC_DS] mov ax, [ebx + DSC_DS]
@ -128,35 +167,39 @@ ASM_PFX(gSmiCr3): DD 0
mov ax, [ebx + DSC_SS] mov ax, [ebx + DSC_SS]
mov ss, eax mov ss, eax
cmp byte [dword ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))], 0
jz .5
; Load TSS
mov byte [ebp + TSS_SEGMENT + 5], 0x89 ; clear busy flag
mov eax, TSS_SEGMENT
ltr ax
.5:
; jmp _SmiHandler ; instruction is not needed ; jmp _SmiHandler ; instruction is not needed
global ASM_PFX(SmiHandler) global ASM_PFX(SmiHandler)
ASM_PFX(SmiHandler): ASM_PFX(SmiHandler):
mov ebx, [esp] ; CPU Index mov ebx, [esp + 4] ; CPU Index
push ebx push ebx
mov eax, ASM_PFX(CpuSmmDebugEntry) mov eax, ASM_PFX(CpuSmmDebugEntry)
call eax call eax
pop ecx add esp, 4
push ebx push ebx
mov eax, ASM_PFX(SmiRendezvous) mov eax, ASM_PFX(SmiRendezvous)
call eax call eax
pop ecx add esp, 4
push ebx push ebx
mov eax, ASM_PFX(CpuSmmDebugExit) mov eax, ASM_PFX(CpuSmmDebugExit)
call eax call eax
pop ecx add esp, 4
mov eax, ASM_PFX(mXdSupported)
mov al, [eax]
cmp al, 0
jz .7
pop edx ; get saved MSR_IA32_MISC_ENABLE[63-32]
test edx, BIT2
jz .7
mov ecx, MSR_IA32_MISC_ENABLE
rdmsr
or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM
wrmsr
.7:
rsm rsm
ASM_PFX(gcSmiHandlerSize): DW $ - _SmiEntryPoint ASM_PFX(gcSmiHandlerSize): DW $ - _SmiEntryPoint

View File

@ -1,6 +1,6 @@
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
# #
# Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR> # Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials # This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License # are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at # which accompanies this distribution. The full text of the license may be found at
@ -24,6 +24,7 @@ ASM_GLOBAL ASM_PFX(PageFaultStubFunction)
ASM_GLOBAL ASM_PFX(gSmiMtrrs) ASM_GLOBAL ASM_PFX(gSmiMtrrs)
ASM_GLOBAL ASM_PFX(gcSmiIdtr) ASM_GLOBAL ASM_PFX(gcSmiIdtr)
ASM_GLOBAL ASM_PFX(gcSmiGdtr) ASM_GLOBAL ASM_PFX(gcSmiGdtr)
ASM_GLOBAL ASM_PFX(gTaskGateDescriptor)
ASM_GLOBAL ASM_PFX(gcPsd) ASM_GLOBAL ASM_PFX(gcPsd)
ASM_GLOBAL ASM_PFX(FeaturePcdGet (PcdCpuSmmProfileEnable)) ASM_GLOBAL ASM_PFX(FeaturePcdGet (PcdCpuSmmProfileEnable))
@ -236,207 +237,10 @@ ASM_PFX(gcPsd):
ASM_PFX(gcSmiGdtr): .word GDT_SIZE - 1 ASM_PFX(gcSmiGdtr): .word GDT_SIZE - 1
.long NullSeg .long NullSeg
ASM_PFX(gcSmiIdtr): .word IDT_SIZE - 1 ASM_PFX(gcSmiIdtr): .word 0
.long _SmiIDT .long 0
_SmiIDT: ASM_PFX(gTaskGateDescriptor):
# The following segment repeats 32 times:
# No. 1
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
# No. 2
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
# No. 3
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
# No. 4
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
# No. 5
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
# No. 6
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
# No. 7
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
# No. 8
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
# No. 9
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
# No. 10
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
# No. 11
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
# No. 12
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
# No. 13
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
# No. 14
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
# No. 15
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
# No. 16
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
# No. 17
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
# No. 18
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
# No. 19
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
# No. 20
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
# No. 21
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
# No. 22
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
# No. 23
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
# No. 24
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
# No. 25
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
# No. 26
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
# No. 27
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
# No. 28
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
# No. 29
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
# No. 30
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
# No. 31
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
# No. 32
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
.equ IDT_SIZE, . - _SmiIDT
TaskGateDescriptor:
.word 0 # Reserved .word 0 # Reserved
.word EXCEPTION_TSS_SEL # TSS Segment selector .word EXCEPTION_TSS_SEL # TSS Segment selector
.byte 0 # Reserved .byte 0 # Reserved
@ -891,21 +695,3 @@ ASM_PFX(PageFaultStubFunction):
# #
clts clts
iret iret
ASM_GLOBAL ASM_PFX(InitializeIDTSmmStackGuard)
ASM_PFX(InitializeIDTSmmStackGuard):
pushl %ebx
#
# If SMM Stack Guard feature is enabled, the Page Fault Exception entry in IDT
# is a Task Gate Descriptor so that when a Page Fault Exception occurs,
# the processors can use a known good stack in case stack ran out.
#
leal _SmiIDT + 14 * 8, %ebx
leal TaskGateDescriptor, %edx
movl (%edx), %eax
movl %eax, (%ebx)
movl 4(%edx), %eax
movl %eax, 4(%ebx)
popl %ebx
ret

View File

@ -1,5 +1,5 @@
;------------------------------------------------------------------------------ ; ;------------------------------------------------------------------------------ ;
; Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR> ; Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
; This program and the accompanying materials ; This program and the accompanying materials
; are licensed and made available under the terms and conditions of the BSD License ; are licensed and made available under the terms and conditions of the BSD License
; which accompanies this distribution. The full text of the license may be found at ; which accompanies this distribution. The full text of the license may be found at
@ -26,6 +26,7 @@ EXTERNDEF PageFaultStubFunction:PROC
EXTERNDEF gSmiMtrrs:QWORD EXTERNDEF gSmiMtrrs:QWORD
EXTERNDEF gcSmiIdtr:FWORD EXTERNDEF gcSmiIdtr:FWORD
EXTERNDEF gcSmiGdtr:FWORD EXTERNDEF gcSmiGdtr:FWORD
EXTERNDEF gTaskGateDescriptor:QWORD
EXTERNDEF gcPsd:BYTE EXTERNDEF gcPsd:BYTE
EXTERNDEF FeaturePcdGet (PcdCpuSmmProfileEnable):BYTE EXTERNDEF FeaturePcdGet (PcdCpuSmmProfileEnable):BYTE
@ -252,20 +253,10 @@ gcSmiGdtr LABEL FWORD
DD offset NullSeg DD offset NullSeg
gcSmiIdtr LABEL FWORD gcSmiIdtr LABEL FWORD
DW IDT_SIZE - 1 DW 0
DD offset _SmiIDT DD 0
_SmiIDT LABEL QWORD gTaskGateDescriptor LABEL QWORD
REPEAT 32
DW 0 ; Offset 0:15
DW CODE_SEL ; Segment selector
DB 0 ; Unused
DB 8eh ; Interrupt Gate, Present
DW 0 ; Offset 16:31
ENDM
IDT_SIZE = $ - offset _SmiIDT
TaskGateDescriptor LABEL DWORD
DW 0 ; Reserved DW 0 ; Reserved
DW EXCEPTION_TSS_SEL ; TSS Segment selector DW EXCEPTION_TSS_SEL ; TSS Segment selector
DB 0 ; Reserved DB 0 ; Reserved
@ -720,19 +711,4 @@ PageFaultStubFunction PROC
iretd iretd
PageFaultStubFunction ENDP PageFaultStubFunction ENDP
InitializeIDTSmmStackGuard PROC USES ebx
;
; If SMM Stack Guard feature is enabled, the Page Fault Exception entry in IDT
; is a Task Gate Descriptor so that when a Page Fault Exception occurs,
; the processors can use a known good stack in case stack is ran out.
;
lea ebx, _SmiIDT + 14 * 8
lea edx, TaskGateDescriptor
mov eax, [edx]
mov [ebx], eax
mov eax, [edx + 4]
mov [ebx + 4], eax
ret
InitializeIDTSmmStackGuard ENDP
END END

View File

@ -1,5 +1,5 @@
;------------------------------------------------------------------------------ ; ;------------------------------------------------------------------------------ ;
; Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR> ; Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
; This program and the accompanying materials ; This program and the accompanying materials
; are licensed and made available under the terms and conditions of the BSD License ; are licensed and made available under the terms and conditions of the BSD License
; which accompanies this distribution. The full text of the license may be found at ; which accompanies this distribution. The full text of the license may be found at
@ -24,6 +24,7 @@ extern ASM_PFX(SmiPFHandler)
global ASM_PFX(gcSmiIdtr) global ASM_PFX(gcSmiIdtr)
global ASM_PFX(gcSmiGdtr) global ASM_PFX(gcSmiGdtr)
global ASM_PFX(gTaskGateDescriptor)
global ASM_PFX(gcPsd) global ASM_PFX(gcPsd)
SECTION .data SECTION .data
@ -250,21 +251,10 @@ ASM_PFX(gcSmiGdtr):
DD NullSeg DD NullSeg
ASM_PFX(gcSmiIdtr): ASM_PFX(gcSmiIdtr):
DW IDT_SIZE - 1 DW 0
DD _SmiIDT DD 0
_SmiIDT: ASM_PFX(gTaskGateDescriptor):
%rep 32
DW 0 ; Offset 0:15
DW CODE_SEL ; Segment selector
DB 0 ; Unused
DB 0x8e ; Interrupt Gate, Present
DW 0 ; Offset 16:31
%endrep
IDT_SIZE equ $ - _SmiIDT
TaskGateDescriptor:
DW 0 ; Reserved DW 0 ; Reserved
DW EXCEPTION_TSS_SEL ; TSS Segment selector DW EXCEPTION_TSS_SEL ; TSS Segment selector
DB 0 ; Reserved DB 0 ; Reserved
@ -717,19 +707,3 @@ ASM_PFX(PageFaultStubFunction):
clts clts
iretd iretd
global ASM_PFX(InitializeIDTSmmStackGuard)
ASM_PFX(InitializeIDTSmmStackGuard):
push ebx
;
; If SMM Stack Guard feature is enabled, the Page Fault Exception entry in IDT
; is a Task Gate Descriptor so that when a Page Fault Exception occurrs,
; the processors can use a known good stack in case stack is ran out.
;
lea ebx, [_SmiIDT + 14 * 8]
lea edx, [TaskGateDescriptor]
mov eax, [edx]
mov [ebx], eax
mov eax, [edx + 4]
mov [ebx + 4], eax
pop ebx
ret

View File

@ -14,6 +14,33 @@ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#include "PiSmmCpuDxeSmm.h" #include "PiSmmCpuDxeSmm.h"
extern UINT64 gTaskGateDescriptor;
EFI_PHYSICAL_ADDRESS mGdtBuffer;
UINTN mGdtBufferSize;
/**
Initialize IDT for SMM Stack Guard.
**/
VOID
EFIAPI
InitializeIDTSmmStackGuard (
VOID
)
{
IA32_IDT_GATE_DESCRIPTOR *IdtGate;
//
// If SMM Stack Guard feature is enabled, the Page Fault Exception entry in IDT
// is a Task Gate Descriptor so that when a Page Fault Exception occurs,
// the processors can use a known good stack in case stack is ran out.
//
IdtGate = (IA32_IDT_GATE_DESCRIPTOR *)gcSmiIdtr.Base;
IdtGate += EXCEPT_IA32_PAGE_FAULT;
IdtGate->Uint64 = gTaskGateDescriptor;
}
/** /**
Initialize Gdt for all processors. Initialize Gdt for all processors.
@ -49,8 +76,10 @@ InitGdt (
gcSmiGdtr.Limit += (UINT16)(2 * sizeof (IA32_SEGMENT_DESCRIPTOR)); gcSmiGdtr.Limit += (UINT16)(2 * sizeof (IA32_SEGMENT_DESCRIPTOR));
GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE * 2 + 7) & ~7; // 8 bytes aligned GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE * 2 + 7) & ~7; // 8 bytes aligned
GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus)); mGdtBufferSize = GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
GdtTssTables = (UINT8*)AllocateCodePages (EFI_SIZE_TO_PAGES (mGdtBufferSize));
ASSERT (GdtTssTables != NULL); ASSERT (GdtTssTables != NULL);
mGdtBuffer = (UINTN)GdtTssTables;
GdtTableStepSize = GdtTssTableSize; GdtTableStepSize = GdtTssTableSize;
for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) { for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
@ -82,8 +111,10 @@ InitGdt (
// Just use original table, AllocatePage and copy them here to make sure GDTs are covered in page memory. // Just use original table, AllocatePage and copy them here to make sure GDTs are covered in page memory.
// //
GdtTssTableSize = gcSmiGdtr.Limit + 1; GdtTssTableSize = gcSmiGdtr.Limit + 1;
GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus)); mGdtBufferSize = GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
GdtTssTables = (UINT8*)AllocateCodePages (EFI_SIZE_TO_PAGES (mGdtBufferSize));
ASSERT (GdtTssTables != NULL); ASSERT (GdtTssTables != NULL);
mGdtBuffer = (UINTN)GdtTssTables;
GdtTableStepSize = GdtTssTableSize; GdtTableStepSize = GdtTssTableSize;
for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) { for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {

View File

@ -1,7 +1,7 @@
/** @file /** @file
IA-32 processor specific functions to enable SMM profile. IA-32 processor specific functions to enable SMM profile.
Copyright (c) 2012 - 2015, Intel Corporation. All rights reserved.<BR> Copyright (c) 2012 - 2016, Intel Corporation. All rights reserved.<BR>
This program and the accompanying materials This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at which accompanies this distribution. The full text of the license may be found at
@ -24,7 +24,7 @@ InitSmmS3Cr3 (
VOID VOID
) )
{ {
mSmmS3ResumeState->SmmS3Cr3 = Gen4GPageTable (0, TRUE); mSmmS3ResumeState->SmmS3Cr3 = Gen4GPageTable (TRUE);
return ; return ;
} }

View File

@ -734,14 +734,12 @@ APHandler (
/** /**
Create 4G PageTable in SMRAM. Create 4G PageTable in SMRAM.
@param ExtraPages Additional page numbers besides for 4G memory @param[in] Is32BitPageTable Whether the page table is 32-bit PAE
@param Is32BitPageTable Whether the page table is 32-bit PAE
@return PageTable Address @return PageTable Address
**/ **/
UINT32 UINT32
Gen4GPageTable ( Gen4GPageTable (
IN UINTN ExtraPages,
IN BOOLEAN Is32BitPageTable IN BOOLEAN Is32BitPageTable
) )
{ {
@ -775,10 +773,10 @@ Gen4GPageTable (
// //
// Allocate the page table // Allocate the page table
// //
PageTable = AllocatePageTableMemory (ExtraPages + 5 + PagesNeeded); PageTable = AllocatePageTableMemory (5 + PagesNeeded);
ASSERT (PageTable != NULL); ASSERT (PageTable != NULL);
PageTable = (VOID *)((UINTN)PageTable + EFI_PAGES_TO_SIZE (ExtraPages)); PageTable = (VOID *)((UINTN)PageTable);
Pte = (UINT64*)PageTable; Pte = (UINT64*)PageTable;
// //
@ -903,6 +901,94 @@ SetCacheability (
PageTable[PTIndex] |= (UINT64)Cacheability; PageTable[PTIndex] |= (UINT64)Cacheability;
} }
/**
Schedule a procedure to run on the specified CPU.
@param[in] Procedure The address of the procedure to run
@param[in] CpuIndex Target CPU Index
@param[in, OUT] ProcArguments The parameter to pass to the procedure
@param[in] BlockingMode Startup AP in blocking mode or not
@retval EFI_INVALID_PARAMETER CpuNumber not valid
@retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
@retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
@retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
@retval EFI_SUCCESS The procedure has been successfully scheduled
**/
EFI_STATUS
InternalSmmStartupThisAp (
IN EFI_AP_PROCEDURE Procedure,
IN UINTN CpuIndex,
IN OUT VOID *ProcArguments OPTIONAL,
IN BOOLEAN BlockingMode
)
{
if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {
DEBUG((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
return EFI_INVALID_PARAMETER;
}
if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
DEBUG((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));
return EFI_INVALID_PARAMETER;
}
if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {
DEBUG((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));
}
return EFI_INVALID_PARAMETER;
}
if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {
if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {
DEBUG((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));
}
return EFI_INVALID_PARAMETER;
}
if (BlockingMode) {
AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
} else {
if (!AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)) {
DEBUG((DEBUG_ERROR, "mSmmMpSyncData->CpuData[%d].Busy\n", CpuIndex));
return EFI_INVALID_PARAMETER;
}
}
mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;
mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;
ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
if (BlockingMode) {
AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
}
return EFI_SUCCESS;
}
/**
Schedule a procedure to run on the specified CPU in blocking mode.
@param[in] Procedure The address of the procedure to run
@param[in] CpuIndex Target CPU Index
@param[in, out] ProcArguments The parameter to pass to the procedure
@retval EFI_INVALID_PARAMETER CpuNumber not valid
@retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
@retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
@retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
@retval EFI_SUCCESS The procedure has been successfully scheduled
**/
EFI_STATUS
EFIAPI
SmmBlockingStartupThisAp (
IN EFI_AP_PROCEDURE Procedure,
IN UINTN CpuIndex,
IN OUT VOID *ProcArguments OPTIONAL
)
{
return InternalSmmStartupThisAp(Procedure, CpuIndex, ProcArguments, TRUE);
}
/** /**
Schedule a procedure to run on the specified CPU. Schedule a procedure to run on the specified CPU.
@ -926,23 +1012,7 @@ SmmStartupThisAp (
IN OUT VOID *ProcArguments OPTIONAL IN OUT VOID *ProcArguments OPTIONAL
) )
{ {
if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus || return InternalSmmStartupThisAp(Procedure, CpuIndex, ProcArguments, FeaturePcdGet (PcdCpuSmmBlockStartupThisAp));
CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu ||
!(*(mSmmMpSyncData->CpuData[CpuIndex].Present)) ||
gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove ||
!AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)) {
return EFI_INVALID_PARAMETER;
}
mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;
mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;
ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
if (FeaturePcdGet (PcdCpuSmmBlockStartupThisAp)) {
AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
}
return EFI_SUCCESS;
} }
/** /**
@ -964,6 +1034,7 @@ CpuSmmDebugEntry (
SMRAM_SAVE_STATE_MAP *CpuSaveState; SMRAM_SAVE_STATE_MAP *CpuSaveState;
if (FeaturePcdGet (PcdCpuSmmDebug)) { if (FeaturePcdGet (PcdCpuSmmDebug)) {
ASSERT(CpuIndex < mMaxNumberOfCpus);
CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex]; CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) { if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
AsmWriteDr6 (CpuSaveState->x86._DR6); AsmWriteDr6 (CpuSaveState->x86._DR6);
@ -993,6 +1064,7 @@ CpuSmmDebugExit (
SMRAM_SAVE_STATE_MAP *CpuSaveState; SMRAM_SAVE_STATE_MAP *CpuSaveState;
if (FeaturePcdGet (PcdCpuSmmDebug)) { if (FeaturePcdGet (PcdCpuSmmDebug)) {
ASSERT(CpuIndex < mMaxNumberOfCpus);
CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex]; CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) { if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 (); CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();
@ -1022,8 +1094,8 @@ SmiRendezvous (
BOOLEAN BspInProgress; BOOLEAN BspInProgress;
UINTN Index; UINTN Index;
UINTN Cr2; UINTN Cr2;
BOOLEAN XdDisableFlag;
MSR_IA32_MISC_ENABLE_REGISTER MiscEnableMsr; ASSERT(CpuIndex < mMaxNumberOfCpus);
// //
// Save Cr2 because Page Fault exception in SMM may override its value // Save Cr2 because Page Fault exception in SMM may override its value
@ -1082,20 +1154,6 @@ SmiRendezvous (
InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy); InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
} }
//
// Try to enable XD
//
XdDisableFlag = FALSE;
if (mXdSupported) {
MiscEnableMsr.Uint64 = AsmReadMsr64 (MSR_IA32_MISC_ENABLE);
if (MiscEnableMsr.Bits.XD == 1) {
XdDisableFlag = TRUE;
MiscEnableMsr.Bits.XD = 0;
AsmWriteMsr64 (MSR_IA32_MISC_ENABLE, MiscEnableMsr.Uint64);
}
ActivateXd ();
}
if (FeaturePcdGet (PcdCpuSmmProfileEnable)) { if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
ActivateSmmProfile (CpuIndex); ActivateSmmProfile (CpuIndex);
} }
@ -1176,15 +1234,6 @@ SmiRendezvous (
// //
while (*mSmmMpSyncData->AllCpusInSync) { while (*mSmmMpSyncData->AllCpusInSync) {
CpuPause (); CpuPause ();
}
//
// Restore XD
//
if (XdDisableFlag) {
MiscEnableMsr.Uint64 = AsmReadMsr64 (MSR_IA32_MISC_ENABLE);
MiscEnableMsr.Bits.XD = 1;
AsmWriteMsr64 (MSR_IA32_MISC_ENABLE, MiscEnableMsr.Uint64);
} }
} }

View File

@ -113,6 +113,19 @@ InitializeSmmIdt (
EFI_STATUS Status; EFI_STATUS Status;
BOOLEAN InterruptState; BOOLEAN InterruptState;
IA32_DESCRIPTOR DxeIdtr; IA32_DESCRIPTOR DxeIdtr;
//
// There are 32 (not 255) entries in it since only processor
// generated exceptions will be handled.
//
gcSmiIdtr.Limit = (sizeof(IA32_IDT_GATE_DESCRIPTOR) * 32) - 1;
//
// Allocate page aligned IDT, because it might be set as read only.
//
gcSmiIdtr.Base = (UINTN)AllocateCodePages (EFI_SIZE_TO_PAGES(gcSmiIdtr.Limit + 1));
ASSERT (gcSmiIdtr.Base != 0);
ZeroMem ((VOID *)gcSmiIdtr.Base, gcSmiIdtr.Limit + 1);
// //
// Disable Interrupt and save DXE IDT table // Disable Interrupt and save DXE IDT table
// //
@ -731,9 +744,9 @@ PiCpuSmmEntry (
// //
BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1)); BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1));
if ((FamilyId == 4) || (FamilyId == 5)) { if ((FamilyId == 4) || (FamilyId == 5)) {
Buffer = AllocateAlignedPages (BufferPages, SIZE_32KB); Buffer = AllocateAlignedCodePages (BufferPages, SIZE_32KB);
} else { } else {
Buffer = AllocateAlignedPages (BufferPages, SIZE_4KB); Buffer = AllocateAlignedCodePages (BufferPages, SIZE_4KB);
} }
ASSERT (Buffer != NULL); ASSERT (Buffer != NULL);
DEBUG ((EFI_D_INFO, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE(BufferPages))); DEBUG ((EFI_D_INFO, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE(BufferPages)));
@ -842,6 +855,8 @@ PiCpuSmmEntry (
// //
SmmCpuFeaturesSmmRelocationComplete (); SmmCpuFeaturesSmmRelocationComplete ();
DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));
// //
// SMM Time initialization // SMM Time initialization
// //
@ -1137,6 +1152,17 @@ ConfigSmmCodeAccessCheck (
} }
} }
/**
Set code region to be read only and data region to be execute disable.
**/
VOID
SetRegionAttributes (
VOID
)
{
SetMemMapAttributes ();
}
/** /**
This API provides a way to allocate memory for page table. This API provides a way to allocate memory for page table.
@ -1166,6 +1192,109 @@ AllocatePageTableMemory (
return AllocatePages (Pages); return AllocatePages (Pages);
} }
/**
Allocate pages for code.
@param[in] Pages Number of pages to be allocated.
@return Allocated memory.
**/
VOID *
AllocateCodePages (
IN UINTN Pages
)
{
EFI_STATUS Status;
EFI_PHYSICAL_ADDRESS Memory;
if (Pages == 0) {
return NULL;
}
Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);
if (EFI_ERROR (Status)) {
return NULL;
}
return (VOID *) (UINTN) Memory;
}
/**
Allocate aligned pages for code.
@param[in] Pages Number of pages to be allocated.
@param[in] Alignment The requested alignment of the allocation.
Must be a power of two.
If Alignment is zero, then byte alignment is used.
@return Allocated memory.
**/
VOID *
AllocateAlignedCodePages (
IN UINTN Pages,
IN UINTN Alignment
)
{
EFI_STATUS Status;
EFI_PHYSICAL_ADDRESS Memory;
UINTN AlignedMemory;
UINTN AlignmentMask;
UINTN UnalignedPages;
UINTN RealPages;
//
// Alignment must be a power of two or zero.
//
ASSERT ((Alignment & (Alignment - 1)) == 0);
if (Pages == 0) {
return NULL;
}
if (Alignment > EFI_PAGE_SIZE) {
//
// Calculate the total number of pages since alignment is larger than page size.
//
AlignmentMask = Alignment - 1;
RealPages = Pages + EFI_SIZE_TO_PAGES (Alignment);
//
// Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.
//
ASSERT (RealPages > Pages);
Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, RealPages, &Memory);
if (EFI_ERROR (Status)) {
return NULL;
}
AlignedMemory = ((UINTN) Memory + AlignmentMask) & ~AlignmentMask;
UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN) Memory);
if (UnalignedPages > 0) {
//
// Free first unaligned page(s).
//
Status = gSmst->SmmFreePages (Memory, UnalignedPages);
ASSERT_EFI_ERROR (Status);
}
Memory = (EFI_PHYSICAL_ADDRESS) (AlignedMemory + EFI_PAGES_TO_SIZE (Pages));
UnalignedPages = RealPages - Pages - UnalignedPages;
if (UnalignedPages > 0) {
//
// Free last unaligned page(s).
//
Status = gSmst->SmmFreePages (Memory, UnalignedPages);
ASSERT_EFI_ERROR (Status);
}
} else {
//
// Do not over-allocate pages in this case.
//
Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);
if (EFI_ERROR (Status)) {
return NULL;
}
AlignedMemory = (UINTN) Memory;
}
return (VOID *) AlignedMemory;
}
/** /**
Perform the remaining tasks. Perform the remaining tasks.
@ -1186,6 +1315,17 @@ PerformRemainingTasks (
// Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable. // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.
// //
InitPaging (); InitPaging ();
//
// Mark critical region to be read-only in page table
//
SetRegionAttributes ();
//
// Set page table itself to be read-only
//
SetPageTableAttributes ();
// //
// Configure SMM Code Access Check feature if available. // Configure SMM Code Access Check feature if available.
// //

View File

@ -25,6 +25,7 @@ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#include <Protocol/SmmCpuService.h> #include <Protocol/SmmCpuService.h>
#include <Guid/AcpiS3Context.h> #include <Guid/AcpiS3Context.h>
#include <Guid/PiSmmMemoryAttributesTable.h>
#include <Library/BaseLib.h> #include <Library/BaseLib.h>
#include <Library/IoLib.h> #include <Library/IoLib.h>
@ -83,13 +84,38 @@ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#define IA32_PG_PMNT BIT62 #define IA32_PG_PMNT BIT62
#define IA32_PG_NX BIT63 #define IA32_PG_NX BIT63
#define PAGE_ATTRIBUTE_BITS (IA32_PG_RW | IA32_PG_P) #define PAGE_ATTRIBUTE_BITS (IA32_PG_D | IA32_PG_A | IA32_PG_U | IA32_PG_RW | IA32_PG_P)
// //
// Bits 1, 2, 5, 6 are reserved in the IA32 PAE PDPTE // Bits 1, 2, 5, 6 are reserved in the IA32 PAE PDPTE
// X64 PAE PDPTE does not have such restriction // X64 PAE PDPTE does not have such restriction
// //
#define IA32_PAE_PDPTE_ATTRIBUTE_BITS (IA32_PG_P) #define IA32_PAE_PDPTE_ATTRIBUTE_BITS (IA32_PG_P)
#define PAGE_PROGATE_BITS (IA32_PG_NX | PAGE_ATTRIBUTE_BITS)
#define PAGING_4K_MASK 0xFFF
#define PAGING_2M_MASK 0x1FFFFF
#define PAGING_1G_MASK 0x3FFFFFFF
#define PAGING_PAE_INDEX_MASK 0x1FF
#define PAGING_4K_ADDRESS_MASK_64 0x000FFFFFFFFFF000ull
#define PAGING_2M_ADDRESS_MASK_64 0x000FFFFFFFE00000ull
#define PAGING_1G_ADDRESS_MASK_64 0x000FFFFFC0000000ull
typedef enum {
PageNone,
Page4K,
Page2M,
Page1G,
} PAGE_ATTRIBUTE;
typedef struct {
PAGE_ATTRIBUTE Attribute;
UINT64 Length;
UINT64 AddressMask;
} PAGE_ATTRIBUTE_TABLE;
// //
// Size of Task-State Segment defined in IA32 Manual // Size of Task-State Segment defined in IA32 Manual
// //
@ -98,6 +124,8 @@ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#define TSS_IA32_CR3_OFFSET 28 #define TSS_IA32_CR3_OFFSET 28
#define TSS_IA32_ESP_OFFSET 56 #define TSS_IA32_ESP_OFFSET 56
#define CR0_WP BIT16
// //
// Code select value // Code select value
// //
@ -395,6 +423,8 @@ typedef struct {
} SMM_CPU_SEMAPHORES; } SMM_CPU_SEMAPHORES;
extern IA32_DESCRIPTOR gcSmiGdtr; extern IA32_DESCRIPTOR gcSmiGdtr;
extern EFI_PHYSICAL_ADDRESS mGdtBuffer;
extern UINTN mGdtBufferSize;
extern IA32_DESCRIPTOR gcSmiIdtr; extern IA32_DESCRIPTOR gcSmiIdtr;
extern VOID *gcSmiIdtrPtr; extern VOID *gcSmiIdtrPtr;
extern CONST PROCESSOR_SMM_DESCRIPTOR gcPsd; extern CONST PROCESSOR_SMM_DESCRIPTOR gcPsd;
@ -414,14 +444,12 @@ extern SPIN_LOCK *mMemoryMappedLock;
/** /**
Create 4G PageTable in SMRAM. Create 4G PageTable in SMRAM.
@param ExtraPages Additional page numbers besides for 4G memory @param[in] Is32BitPageTable Whether the page table is 32-bit PAE
@param Is32BitPageTable Whether the page table is 32-bit PAE
@return PageTable Address @return PageTable Address
**/ **/
UINT32 UINT32
Gen4GPageTable ( Gen4GPageTable (
IN UINTN ExtraPages,
IN BOOLEAN Is32BitPageTable IN BOOLEAN Is32BitPageTable
); );
@ -482,7 +510,7 @@ InitializeIDTSmmStackGuard (
/** /**
Initialize Gdt for all processors. Initialize Gdt for all processors.
@param[in] Cr3 CR3 value. @param[in] Cr3 CR3 value.
@param[out] GdtStepSize The step size for GDT table. @param[out] GdtStepSize The step size for GDT table.
@ -760,6 +788,96 @@ DumpModuleInfoByIp (
IN UINTN CallerIpAddress IN UINTN CallerIpAddress
); );
/**
This function sets memory attribute according to MemoryAttributesTable.
**/
VOID
SetMemMapAttributes (
VOID
);
/**
This function sets memory attribute for page table.
**/
VOID
SetPageTableAttributes (
VOID
);
/**
Return page table base.
@return page table base.
**/
UINTN
GetPageTableBase (
VOID
);
/**
This function sets the attributes for the memory region specified by BaseAddress and
Length from their current attributes to the attributes specified by Attributes.
@param[in] BaseAddress The physical address that is the start address of a memory region.
@param[in] Length The size in bytes of the memory region.
@param[in] Attributes The bit mask of attributes to set for the memory region.
@param[out] IsSplitted TRUE means page table splitted. FALSE means page table not splitted.
@retval EFI_SUCCESS The attributes were set for the memory region.
@retval EFI_ACCESS_DENIED The attributes for the memory resource range specified by
BaseAddress and Length cannot be modified.
@retval EFI_INVALID_PARAMETER Length is zero.
Attributes specified an illegal combination of attributes that
cannot be set together.
@retval EFI_OUT_OF_RESOURCES There are not enough system resources to modify the attributes of
the memory resource range.
@retval EFI_UNSUPPORTED The processor does not support one or more bytes of the memory
resource range specified by BaseAddress and Length.
The bit mask of attributes is not support for the memory resource
range specified by BaseAddress and Length.
**/
EFI_STATUS
EFIAPI
SmmSetMemoryAttributesEx (
IN EFI_PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length,
IN UINT64 Attributes,
OUT BOOLEAN *IsSplitted OPTIONAL
);
/**
This function clears the attributes for the memory region specified by BaseAddress and
Length from their current attributes to the attributes specified by Attributes.
@param[in] BaseAddress The physical address that is the start address of a memory region.
@param[in] Length The size in bytes of the memory region.
@param[in] Attributes The bit mask of attributes to clear for the memory region.
@param[out] IsSplitted TRUE means page table splitted. FALSE means page table not splitted.
@retval EFI_SUCCESS The attributes were cleared for the memory region.
@retval EFI_ACCESS_DENIED The attributes for the memory resource range specified by
BaseAddress and Length cannot be modified.
@retval EFI_INVALID_PARAMETER Length is zero.
Attributes specified an illegal combination of attributes that
cannot be set together.
@retval EFI_OUT_OF_RESOURCES There are not enough system resources to modify the attributes of
the memory resource range.
@retval EFI_UNSUPPORTED The processor does not support one or more bytes of the memory
resource range specified by BaseAddress and Length.
The bit mask of attributes is not support for the memory resource
range specified by BaseAddress and Length.
**/
EFI_STATUS
EFIAPI
SmmClearMemoryAttributesEx (
IN EFI_PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length,
IN UINT64 Attributes,
OUT BOOLEAN *IsSplitted OPTIONAL
);
/** /**
This API provides a way to allocate memory for page table. This API provides a way to allocate memory for page table.
@ -780,6 +898,34 @@ AllocatePageTableMemory (
IN UINTN Pages IN UINTN Pages
); );
/**
Allocate pages for code.
@param[in] Pages Number of pages to be allocated.
@return Allocated memory.
**/
VOID *
AllocateCodePages (
IN UINTN Pages
);
/**
Allocate aligned pages for code.
@param[in] Pages Number of pages to be allocated.
@param[in] Alignment The requested alignment of the allocation.
Must be a power of two.
If Alignment is zero, then byte alignment is used.
@return Allocated memory.
**/
VOID *
AllocateAlignedCodePages (
IN UINTN Pages,
IN UINTN Alignment
);
// //
// S3 related global variable and function prototype. // S3 related global variable and function prototype.

View File

@ -4,7 +4,7 @@
# This SMM driver performs SMM initialization, deploy SMM Entry Vector, # This SMM driver performs SMM initialization, deploy SMM Entry Vector,
# provides CPU specific services in SMM. # provides CPU specific services in SMM.
# #
# Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR> # Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
# #
# This program and the accompanying materials # This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License # are licensed and made available under the terms and conditions of the BSD License
@ -44,6 +44,7 @@
SmmProfile.h SmmProfile.h
SmmProfileInternal.h SmmProfileInternal.h
SmramSaveState.c SmramSaveState.c
SmmCpuMemoryManagement.c
[Sources.Ia32] [Sources.Ia32]
Ia32/Semaphore.c Ia32/Semaphore.c
@ -133,6 +134,7 @@
gEfiGlobalVariableGuid ## SOMETIMES_PRODUCES ## Variable:L"SmmProfileData" gEfiGlobalVariableGuid ## SOMETIMES_PRODUCES ## Variable:L"SmmProfileData"
gEfiAcpi20TableGuid ## SOMETIMES_CONSUMES ## SystemTable gEfiAcpi20TableGuid ## SOMETIMES_CONSUMES ## SystemTable
gEfiAcpi10TableGuid ## SOMETIMES_CONSUMES ## SystemTable gEfiAcpi10TableGuid ## SOMETIMES_CONSUMES ## SystemTable
gEdkiiPiSmmMemoryAttributesTableGuid ## CONSUMES ## SystemTable
[FeaturePcd] [FeaturePcd]
gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmDebug ## CONSUMES gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmDebug ## CONSUMES
@ -153,6 +155,7 @@
gUefiCpuPkgTokenSpaceGuid.PcdCpuHotPlugDataAddress ## SOMETIMES_PRODUCES gUefiCpuPkgTokenSpaceGuid.PcdCpuHotPlugDataAddress ## SOMETIMES_PRODUCES
gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmCodeAccessCheckEnable ## CONSUMES gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmCodeAccessCheckEnable ## CONSUMES
gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmSyncMode ## CONSUMES gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmSyncMode ## CONSUMES
gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmStaticPageTable ## CONSUMES
gEfiMdeModulePkgTokenSpaceGuid.PcdAcpiS3Enable ## CONSUMES gEfiMdeModulePkgTokenSpaceGuid.PcdAcpiS3Enable ## CONSUMES
[Depex] [Depex]

View File

@ -0,0 +1,871 @@
/** @file
Copyright (c) 2016, Intel Corporation. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#include "PiSmmCpuDxeSmm.h"
#define NEXT_MEMORY_DESCRIPTOR(MemoryDescriptor, Size) \
((EFI_MEMORY_DESCRIPTOR *)((UINT8 *)(MemoryDescriptor) + (Size)))
PAGE_ATTRIBUTE_TABLE mPageAttributeTable[] = {
{Page4K, SIZE_4KB, PAGING_4K_ADDRESS_MASK_64},
{Page2M, SIZE_2MB, PAGING_2M_ADDRESS_MASK_64},
{Page1G, SIZE_1GB, PAGING_1G_ADDRESS_MASK_64},
};
/**
Return page table base.
@return page table base.
**/
UINTN
GetPageTableBase (
VOID
)
{
return (AsmReadCr3 () & PAGING_4K_ADDRESS_MASK_64);
}
/**
Return length according to page attributes.
@param[in] PageAttributes The page attribute of the page entry.
@return The length of page entry.
**/
UINTN
PageAttributeToLength (
IN PAGE_ATTRIBUTE PageAttribute
)
{
UINTN Index;
for (Index = 0; Index < sizeof(mPageAttributeTable)/sizeof(mPageAttributeTable[0]); Index++) {
if (PageAttribute == mPageAttributeTable[Index].Attribute) {
return (UINTN)mPageAttributeTable[Index].Length;
}
}
return 0;
}
/**
Return address mask according to page attributes.
@param[in] PageAttributes The page attribute of the page entry.
@return The address mask of page entry.
**/
UINTN
PageAttributeToMask (
IN PAGE_ATTRIBUTE PageAttribute
)
{
UINTN Index;
for (Index = 0; Index < sizeof(mPageAttributeTable)/sizeof(mPageAttributeTable[0]); Index++) {
if (PageAttribute == mPageAttributeTable[Index].Attribute) {
return (UINTN)mPageAttributeTable[Index].AddressMask;
}
}
return 0;
}
/**
Return page table entry to match the address.
@param[in] Address The address to be checked.
@param[out] PageAttributes The page attribute of the page entry.
@return The page entry.
**/
VOID *
GetPageTableEntry (
IN PHYSICAL_ADDRESS Address,
OUT PAGE_ATTRIBUTE *PageAttribute
)
{
UINTN Index1;
UINTN Index2;
UINTN Index3;
UINTN Index4;
UINT64 *L1PageTable;
UINT64 *L2PageTable;
UINT64 *L3PageTable;
UINT64 *L4PageTable;
Index4 = ((UINTN)RShiftU64 (Address, 39)) & PAGING_PAE_INDEX_MASK;
Index3 = ((UINTN)Address >> 30) & PAGING_PAE_INDEX_MASK;
Index2 = ((UINTN)Address >> 21) & PAGING_PAE_INDEX_MASK;
Index1 = ((UINTN)Address >> 12) & PAGING_PAE_INDEX_MASK;
if (sizeof(UINTN) == sizeof(UINT64)) {
L4PageTable = (UINT64 *)GetPageTableBase ();
if (L4PageTable[Index4] == 0) {
*PageAttribute = PageNone;
return NULL;
}
L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & PAGING_4K_ADDRESS_MASK_64);
} else {
L3PageTable = (UINT64 *)GetPageTableBase ();
}
if (L3PageTable[Index3] == 0) {
*PageAttribute = PageNone;
return NULL;
}
if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {
// 1G
*PageAttribute = Page1G;
return &L3PageTable[Index3];
}
L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & PAGING_4K_ADDRESS_MASK_64);
if (L2PageTable[Index2] == 0) {
*PageAttribute = PageNone;
return NULL;
}
if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
// 2M
*PageAttribute = Page2M;
return &L2PageTable[Index2];
}
// 4k
L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & PAGING_4K_ADDRESS_MASK_64);
if ((L1PageTable[Index1] == 0) && (Address != 0)) {
*PageAttribute = PageNone;
return NULL;
}
*PageAttribute = Page4K;
return &L1PageTable[Index1];
}
/**
Return memory attributes of page entry.
@param[in] PageEntry The page entry.
@return Memory attributes of page entry.
**/
UINT64
GetAttributesFromPageEntry (
IN UINT64 *PageEntry
)
{
UINT64 Attributes;
Attributes = 0;
if ((*PageEntry & IA32_PG_P) == 0) {
Attributes |= EFI_MEMORY_RP;
}
if ((*PageEntry & IA32_PG_RW) == 0) {
Attributes |= EFI_MEMORY_RO;
}
if ((*PageEntry & IA32_PG_NX) != 0) {
Attributes |= EFI_MEMORY_XP;
}
return Attributes;
}
/**
Modify memory attributes of page entry.
@param[in] PageEntry The page entry.
@param[in] Attributes The bit mask of attributes to modify for the memory region.
@param[in] IsSet TRUE means to set attributes. FALSE means to clear attributes.
@param[out] IsModified TRUE means page table modified. FALSE means page table not modified.
**/
VOID
ConvertPageEntryAttribute (
IN UINT64 *PageEntry,
IN UINT64 Attributes,
IN BOOLEAN IsSet,
OUT BOOLEAN *IsModified
)
{
UINT64 CurrentPageEntry;
UINT64 NewPageEntry;
CurrentPageEntry = *PageEntry;
NewPageEntry = CurrentPageEntry;
if ((Attributes & EFI_MEMORY_RP) != 0) {
if (IsSet) {
NewPageEntry &= ~(UINT64)IA32_PG_P;
} else {
NewPageEntry |= IA32_PG_P;
}
}
if ((Attributes & EFI_MEMORY_RO) != 0) {
if (IsSet) {
NewPageEntry &= ~(UINT64)IA32_PG_RW;
} else {
NewPageEntry |= IA32_PG_RW;
}
}
if ((Attributes & EFI_MEMORY_XP) != 0) {
if (IsSet) {
NewPageEntry |= IA32_PG_NX;
} else {
NewPageEntry &= ~IA32_PG_NX;
}
}
*PageEntry = NewPageEntry;
if (CurrentPageEntry != NewPageEntry) {
*IsModified = TRUE;
DEBUG ((DEBUG_VERBOSE, "ConvertPageEntryAttribute 0x%lx", CurrentPageEntry));
DEBUG ((DEBUG_VERBOSE, "->0x%lx\n", NewPageEntry));
} else {
*IsModified = FALSE;
}
}
/**
This function returns if there is need to split page entry.
@param[in] BaseAddress The base address to be checked.
@param[in] Length The length to be checked.
@param[in] PageEntry The page entry to be checked.
@param[in] PageAttribute The page attribute of the page entry.
@retval SplitAttributes on if there is need to split page entry.
**/
PAGE_ATTRIBUTE
NeedSplitPage (
IN PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length,
IN UINT64 *PageEntry,
IN PAGE_ATTRIBUTE PageAttribute
)
{
UINT64 PageEntryLength;
PageEntryLength = PageAttributeToLength (PageAttribute);
if (((BaseAddress & (PageEntryLength - 1)) == 0) && (Length >= PageEntryLength)) {
return PageNone;
}
if (((BaseAddress & PAGING_2M_MASK) != 0) || (Length < SIZE_2MB)) {
return Page4K;
}
return Page2M;
}
/**
This function splits one page entry to small page entries.
@param[in] PageEntry The page entry to be splitted.
@param[in] PageAttribute The page attribute of the page entry.
@param[in] SplitAttribute How to split the page entry.
@retval RETURN_SUCCESS The page entry is splitted.
@retval RETURN_UNSUPPORTED The page entry does not support to be splitted.
@retval RETURN_OUT_OF_RESOURCES No resource to split page entry.
**/
RETURN_STATUS
SplitPage (
IN UINT64 *PageEntry,
IN PAGE_ATTRIBUTE PageAttribute,
IN PAGE_ATTRIBUTE SplitAttribute
)
{
UINT64 BaseAddress;
UINT64 *NewPageEntry;
UINTN Index;
ASSERT (PageAttribute == Page2M || PageAttribute == Page1G);
if (PageAttribute == Page2M) {
//
// Split 2M to 4K
//
ASSERT (SplitAttribute == Page4K);
if (SplitAttribute == Page4K) {
NewPageEntry = AllocatePageTableMemory (1);
DEBUG ((DEBUG_VERBOSE, "Split - 0x%x\n", NewPageEntry));
if (NewPageEntry == NULL) {
return RETURN_OUT_OF_RESOURCES;
}
BaseAddress = *PageEntry & PAGING_2M_ADDRESS_MASK_64;
for (Index = 0; Index < SIZE_4KB / sizeof(UINT64); Index++) {
NewPageEntry[Index] = BaseAddress + SIZE_4KB * Index + ((*PageEntry) & PAGE_PROGATE_BITS);
}
(*PageEntry) = (UINT64)(UINTN)NewPageEntry + ((*PageEntry) & PAGE_PROGATE_BITS);
return RETURN_SUCCESS;
} else {
return RETURN_UNSUPPORTED;
}
} else if (PageAttribute == Page1G) {
//
// Split 1G to 2M
// No need support 1G->4K directly, we should use 1G->2M, then 2M->4K to get more compact page table.
//
ASSERT (SplitAttribute == Page2M || SplitAttribute == Page4K);
if ((SplitAttribute == Page2M || SplitAttribute == Page4K)) {
NewPageEntry = AllocatePageTableMemory (1);
DEBUG ((DEBUG_VERBOSE, "Split - 0x%x\n", NewPageEntry));
if (NewPageEntry == NULL) {
return RETURN_OUT_OF_RESOURCES;
}
BaseAddress = *PageEntry & PAGING_1G_ADDRESS_MASK_64;
for (Index = 0; Index < SIZE_4KB / sizeof(UINT64); Index++) {
NewPageEntry[Index] = BaseAddress + SIZE_2MB * Index + IA32_PG_PS + ((*PageEntry) & PAGE_PROGATE_BITS);
}
(*PageEntry) = (UINT64)(UINTN)NewPageEntry + ((*PageEntry) & PAGE_PROGATE_BITS);
return RETURN_SUCCESS;
} else {
return RETURN_UNSUPPORTED;
}
} else {
return RETURN_UNSUPPORTED;
}
}
/**
This function modifies the page attributes for the memory region specified by BaseAddress and
Length from their current attributes to the attributes specified by Attributes.
Caller should make sure BaseAddress and Length is at page boundary.
@param[in] BaseAddress The physical address that is the start address of a memory region.
@param[in] Length The size in bytes of the memory region.
@param[in] Attributes The bit mask of attributes to modify for the memory region.
@param[in] IsSet TRUE means to set attributes. FALSE means to clear attributes.
@param[out] IsSplitted TRUE means page table splitted. FALSE means page table not splitted.
@param[out] IsModified TRUE means page table modified. FALSE means page table not modified.
@retval RETURN_SUCCESS The attributes were modified for the memory region.
@retval RETURN_ACCESS_DENIED The attributes for the memory resource range specified by
BaseAddress and Length cannot be modified.
@retval RETURN_INVALID_PARAMETER Length is zero.
Attributes specified an illegal combination of attributes that
cannot be set together.
@retval RETURN_OUT_OF_RESOURCES There are not enough system resources to modify the attributes of
the memory resource range.
@retval RETURN_UNSUPPORTED The processor does not support one or more bytes of the memory
resource range specified by BaseAddress and Length.
The bit mask of attributes is not support for the memory resource
range specified by BaseAddress and Length.
**/
RETURN_STATUS
EFIAPI
ConvertMemoryPageAttributes (
IN PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length,
IN UINT64 Attributes,
IN BOOLEAN IsSet,
OUT BOOLEAN *IsSplitted, OPTIONAL
OUT BOOLEAN *IsModified OPTIONAL
)
{
UINT64 *PageEntry;
PAGE_ATTRIBUTE PageAttribute;
UINTN PageEntryLength;
PAGE_ATTRIBUTE SplitAttribute;
RETURN_STATUS Status;
BOOLEAN IsEntryModified;
ASSERT (Attributes != 0);
ASSERT ((Attributes & ~(EFI_MEMORY_RP | EFI_MEMORY_RO | EFI_MEMORY_XP)) == 0);
ASSERT ((BaseAddress & (SIZE_4KB - 1)) == 0);
ASSERT ((Length & (SIZE_4KB - 1)) == 0);
if (Length == 0) {
return RETURN_INVALID_PARAMETER;
}
// DEBUG ((DEBUG_ERROR, "ConvertMemoryPageAttributes(%x) - %016lx, %016lx, %02lx\n", IsSet, BaseAddress, Length, Attributes));
if (IsSplitted != NULL) {
*IsSplitted = FALSE;
}
if (IsModified != NULL) {
*IsModified = FALSE;
}
//
// Below logic is to check 2M/4K page to make sure we donot waist memory.
//
while (Length != 0) {
PageEntry = GetPageTableEntry (BaseAddress, &PageAttribute);
if (PageEntry == NULL) {
return RETURN_UNSUPPORTED;
}
PageEntryLength = PageAttributeToLength (PageAttribute);
SplitAttribute = NeedSplitPage (BaseAddress, Length, PageEntry, PageAttribute);
if (SplitAttribute == PageNone) {
ConvertPageEntryAttribute (PageEntry, Attributes, IsSet, &IsEntryModified);
if (IsEntryModified) {
if (IsModified != NULL) {
*IsModified = TRUE;
}
}
//
// Convert success, move to next
//
BaseAddress += PageEntryLength;
Length -= PageEntryLength;
} else {
Status = SplitPage (PageEntry, PageAttribute, SplitAttribute);
if (RETURN_ERROR (Status)) {
return RETURN_UNSUPPORTED;
}
if (IsSplitted != NULL) {
*IsSplitted = TRUE;
}
if (IsModified != NULL) {
*IsModified = TRUE;
}
//
// Just split current page
// Convert success in next around
//
}
}
return RETURN_SUCCESS;
}
/**
FlushTlb on current processor.
@param[in,out] Buffer Pointer to private data buffer.
**/
VOID
EFIAPI
FlushTlbOnCurrentProcessor (
IN OUT VOID *Buffer
)
{
CpuFlushTlb ();
}
/**
FlushTlb for all processors.
**/
VOID
FlushTlbForAll (
VOID
)
{
UINTN Index;
FlushTlbOnCurrentProcessor (NULL);
for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {
if (Index != gSmst->CurrentlyExecutingCpu) {
// Force to start up AP in blocking mode,
SmmBlockingStartupThisAp (FlushTlbOnCurrentProcessor, Index, NULL);
// Do not check return status, because AP might not be present in some corner cases.
}
}
}
/**
This function sets the attributes for the memory region specified by BaseAddress and
Length from their current attributes to the attributes specified by Attributes.
@param[in] BaseAddress The physical address that is the start address of a memory region.
@param[in] Length The size in bytes of the memory region.
@param[in] Attributes The bit mask of attributes to set for the memory region.
@param[out] IsSplitted TRUE means page table splitted. FALSE means page table not splitted.
@retval EFI_SUCCESS The attributes were set for the memory region.
@retval EFI_ACCESS_DENIED The attributes for the memory resource range specified by
BaseAddress and Length cannot be modified.
@retval EFI_INVALID_PARAMETER Length is zero.
Attributes specified an illegal combination of attributes that
cannot be set together.
@retval EFI_OUT_OF_RESOURCES There are not enough system resources to modify the attributes of
the memory resource range.
@retval EFI_UNSUPPORTED The processor does not support one or more bytes of the memory
resource range specified by BaseAddress and Length.
The bit mask of attributes is not support for the memory resource
range specified by BaseAddress and Length.
**/
EFI_STATUS
EFIAPI
SmmSetMemoryAttributesEx (
IN EFI_PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length,
IN UINT64 Attributes,
OUT BOOLEAN *IsSplitted OPTIONAL
)
{
EFI_STATUS Status;
BOOLEAN IsModified;
Status = ConvertMemoryPageAttributes (BaseAddress, Length, Attributes, TRUE, IsSplitted, &IsModified);
if (!EFI_ERROR(Status)) {
if (IsModified) {
//
// Flush TLB as last step
//
FlushTlbForAll();
}
}
return Status;
}
/**
This function clears the attributes for the memory region specified by BaseAddress and
Length from their current attributes to the attributes specified by Attributes.
@param[in] BaseAddress The physical address that is the start address of a memory region.
@param[in] Length The size in bytes of the memory region.
@param[in] Attributes The bit mask of attributes to clear for the memory region.
@param[out] IsSplitted TRUE means page table splitted. FALSE means page table not splitted.
@retval EFI_SUCCESS The attributes were cleared for the memory region.
@retval EFI_ACCESS_DENIED The attributes for the memory resource range specified by
BaseAddress and Length cannot be modified.
@retval EFI_INVALID_PARAMETER Length is zero.
Attributes specified an illegal combination of attributes that
cannot be set together.
@retval EFI_OUT_OF_RESOURCES There are not enough system resources to modify the attributes of
the memory resource range.
@retval EFI_UNSUPPORTED The processor does not support one or more bytes of the memory
resource range specified by BaseAddress and Length.
The bit mask of attributes is not support for the memory resource
range specified by BaseAddress and Length.
**/
EFI_STATUS
EFIAPI
SmmClearMemoryAttributesEx (
IN EFI_PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length,
IN UINT64 Attributes,
OUT BOOLEAN *IsSplitted OPTIONAL
)
{
EFI_STATUS Status;
BOOLEAN IsModified;
Status = ConvertMemoryPageAttributes (BaseAddress, Length, Attributes, FALSE, IsSplitted, &IsModified);
if (!EFI_ERROR(Status)) {
if (IsModified) {
//
// Flush TLB as last step
//
FlushTlbForAll();
}
}
return Status;
}
/**
This function sets the attributes for the memory region specified by BaseAddress and
Length from their current attributes to the attributes specified by Attributes.
@param[in] BaseAddress The physical address that is the start address of a memory region.
@param[in] Length The size in bytes of the memory region.
@param[in] Attributes The bit mask of attributes to set for the memory region.
@retval EFI_SUCCESS The attributes were set for the memory region.
@retval EFI_ACCESS_DENIED The attributes for the memory resource range specified by
BaseAddress and Length cannot be modified.
@retval EFI_INVALID_PARAMETER Length is zero.
Attributes specified an illegal combination of attributes that
cannot be set together.
@retval EFI_OUT_OF_RESOURCES There are not enough system resources to modify the attributes of
the memory resource range.
@retval EFI_UNSUPPORTED The processor does not support one or more bytes of the memory
resource range specified by BaseAddress and Length.
The bit mask of attributes is not support for the memory resource
range specified by BaseAddress and Length.
**/
EFI_STATUS
EFIAPI
SmmSetMemoryAttributes (
IN EFI_PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length,
IN UINT64 Attributes
)
{
return SmmSetMemoryAttributesEx (BaseAddress, Length, Attributes, NULL);
}
/**
This function clears the attributes for the memory region specified by BaseAddress and
Length from their current attributes to the attributes specified by Attributes.
@param[in] BaseAddress The physical address that is the start address of a memory region.
@param[in] Length The size in bytes of the memory region.
@param[in] Attributes The bit mask of attributes to clear for the memory region.
@retval EFI_SUCCESS The attributes were cleared for the memory region.
@retval EFI_ACCESS_DENIED The attributes for the memory resource range specified by
BaseAddress and Length cannot be modified.
@retval EFI_INVALID_PARAMETER Length is zero.
Attributes specified an illegal combination of attributes that
cannot be set together.
@retval EFI_OUT_OF_RESOURCES There are not enough system resources to modify the attributes of
the memory resource range.
@retval EFI_UNSUPPORTED The processor does not support one or more bytes of the memory
resource range specified by BaseAddress and Length.
The bit mask of attributes is not support for the memory resource
range specified by BaseAddress and Length.
**/
EFI_STATUS
EFIAPI
SmmClearMemoryAttributes (
IN EFI_PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length,
IN UINT64 Attributes
)
{
return SmmClearMemoryAttributesEx (BaseAddress, Length, Attributes, NULL);
}
/**
Retrieves a pointer to the system configuration table from the SMM System Table
based on a specified GUID.
@param[in] TableGuid The pointer to table's GUID type.
@param[out] Table The pointer to the table associated with TableGuid in the EFI System Table.
@retval EFI_SUCCESS A configuration table matching TableGuid was found.
@retval EFI_NOT_FOUND A configuration table matching TableGuid could not be found.
**/
EFI_STATUS
EFIAPI
SmmGetSystemConfigurationTable (
IN EFI_GUID *TableGuid,
OUT VOID **Table
)
{
UINTN Index;
ASSERT (TableGuid != NULL);
ASSERT (Table != NULL);
*Table = NULL;
for (Index = 0; Index < gSmst->NumberOfTableEntries; Index++) {
if (CompareGuid (TableGuid, &(gSmst->SmmConfigurationTable[Index].VendorGuid))) {
*Table = gSmst->SmmConfigurationTable[Index].VendorTable;
return EFI_SUCCESS;
}
}
return EFI_NOT_FOUND;
}
/**
This function sets SMM save state buffer to be RW and XP.
**/
VOID
PatchSmmSaveStateMap (
VOID
)
{
UINTN Index;
UINTN TileCodeSize;
UINTN TileDataSize;
UINTN TileSize;
TileCodeSize = GetSmiHandlerSize ();
TileCodeSize = ALIGN_VALUE(TileCodeSize, SIZE_4KB);
TileDataSize = sizeof (SMRAM_SAVE_STATE_MAP) + sizeof (PROCESSOR_SMM_DESCRIPTOR);
TileDataSize = ALIGN_VALUE(TileDataSize, SIZE_4KB);
TileSize = TileDataSize + TileCodeSize - 1;
TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);
DEBUG ((DEBUG_INFO, "PatchSmmSaveStateMap:\n"));
for (Index = 0; Index < mMaxNumberOfCpus - 1; Index++) {
//
// Code
//
SmmSetMemoryAttributes (
mCpuHotPlugData.SmBase[Index] + SMM_HANDLER_OFFSET,
TileCodeSize,
EFI_MEMORY_RO
);
SmmClearMemoryAttributes (
mCpuHotPlugData.SmBase[Index] + SMM_HANDLER_OFFSET,
TileCodeSize,
EFI_MEMORY_XP
);
//
// Data
//
SmmClearMemoryAttributes (
mCpuHotPlugData.SmBase[Index] + SMM_HANDLER_OFFSET + TileCodeSize,
TileSize - TileCodeSize,
EFI_MEMORY_RO
);
SmmSetMemoryAttributes (
mCpuHotPlugData.SmBase[Index] + SMM_HANDLER_OFFSET + TileCodeSize,
TileSize - TileCodeSize,
EFI_MEMORY_XP
);
}
//
// Code
//
SmmSetMemoryAttributes (
mCpuHotPlugData.SmBase[mMaxNumberOfCpus - 1] + SMM_HANDLER_OFFSET,
TileCodeSize,
EFI_MEMORY_RO
);
SmmClearMemoryAttributes (
mCpuHotPlugData.SmBase[mMaxNumberOfCpus - 1] + SMM_HANDLER_OFFSET,
TileCodeSize,
EFI_MEMORY_XP
);
//
// Data
//
SmmClearMemoryAttributes (
mCpuHotPlugData.SmBase[mMaxNumberOfCpus - 1] + SMM_HANDLER_OFFSET + TileCodeSize,
SIZE_32KB - TileCodeSize,
EFI_MEMORY_RO
);
SmmSetMemoryAttributes (
mCpuHotPlugData.SmBase[mMaxNumberOfCpus - 1] + SMM_HANDLER_OFFSET + TileCodeSize,
SIZE_32KB - TileCodeSize,
EFI_MEMORY_XP
);
}
/**
This function sets GDT/IDT buffer to be RO and XP.
**/
VOID
PatchGdtIdtMap (
VOID
)
{
EFI_PHYSICAL_ADDRESS BaseAddress;
UINTN Size;
//
// GDT
//
DEBUG ((DEBUG_INFO, "PatchGdtIdtMap - GDT:\n"));
BaseAddress = mGdtBuffer;
Size = ALIGN_VALUE(mGdtBufferSize, SIZE_4KB);
SmmSetMemoryAttributes (
BaseAddress,
Size,
EFI_MEMORY_RO
);
SmmSetMemoryAttributes (
BaseAddress,
Size,
EFI_MEMORY_XP
);
//
// IDT
//
DEBUG ((DEBUG_INFO, "PatchGdtIdtMap - IDT:\n"));
BaseAddress = gcSmiIdtr.Base;
Size = ALIGN_VALUE(gcSmiIdtr.Limit + 1, SIZE_4KB);
SmmSetMemoryAttributes (
BaseAddress,
Size,
EFI_MEMORY_RO
);
SmmSetMemoryAttributes (
BaseAddress,
Size,
EFI_MEMORY_XP
);
}
/**
This function sets memory attribute according to MemoryAttributesTable.
**/
VOID
SetMemMapAttributes (
VOID
)
{
EFI_MEMORY_DESCRIPTOR *MemoryMap;
EFI_MEMORY_DESCRIPTOR *MemoryMapStart;
UINTN MemoryMapEntryCount;
UINTN DescriptorSize;
UINTN Index;
EDKII_PI_SMM_MEMORY_ATTRIBUTES_TABLE *MemoryAttributesTable;
SmmGetSystemConfigurationTable (&gEdkiiPiSmmMemoryAttributesTableGuid, (VOID **)&MemoryAttributesTable);
if (MemoryAttributesTable == NULL) {
DEBUG ((DEBUG_INFO, "MemoryAttributesTable - NULL\n"));
return ;
}
DEBUG ((DEBUG_INFO, "MemoryAttributesTable:\n"));
DEBUG ((DEBUG_INFO, " Version - 0x%08x\n", MemoryAttributesTable->Version));
DEBUG ((DEBUG_INFO, " NumberOfEntries - 0x%08x\n", MemoryAttributesTable->NumberOfEntries));
DEBUG ((DEBUG_INFO, " DescriptorSize - 0x%08x\n", MemoryAttributesTable->DescriptorSize));
MemoryMapEntryCount = MemoryAttributesTable->NumberOfEntries;
DescriptorSize = MemoryAttributesTable->DescriptorSize;
MemoryMapStart = (EFI_MEMORY_DESCRIPTOR *)(MemoryAttributesTable + 1);
MemoryMap = MemoryMapStart;
for (Index = 0; Index < MemoryMapEntryCount; Index++) {
DEBUG ((DEBUG_INFO, "Entry (0x%x)\n", MemoryMap));
DEBUG ((DEBUG_INFO, " Type - 0x%x\n", MemoryMap->Type));
DEBUG ((DEBUG_INFO, " PhysicalStart - 0x%016lx\n", MemoryMap->PhysicalStart));
DEBUG ((DEBUG_INFO, " VirtualStart - 0x%016lx\n", MemoryMap->VirtualStart));
DEBUG ((DEBUG_INFO, " NumberOfPages - 0x%016lx\n", MemoryMap->NumberOfPages));
DEBUG ((DEBUG_INFO, " Attribute - 0x%016lx\n", MemoryMap->Attribute));
MemoryMap = NEXT_MEMORY_DESCRIPTOR(MemoryMap, DescriptorSize);
}
MemoryMap = MemoryMapStart;
for (Index = 0; Index < MemoryMapEntryCount; Index++) {
DEBUG ((DEBUG_VERBOSE, "SetAttribute: Memory Entry - 0x%lx, 0x%x\n", MemoryMap->PhysicalStart, MemoryMap->NumberOfPages));
switch (MemoryMap->Type) {
case EfiRuntimeServicesCode:
SmmSetMemoryAttributes (
MemoryMap->PhysicalStart,
EFI_PAGES_TO_SIZE((UINTN)MemoryMap->NumberOfPages),
EFI_MEMORY_RO
);
break;
case EfiRuntimeServicesData:
SmmSetMemoryAttributes (
MemoryMap->PhysicalStart,
EFI_PAGES_TO_SIZE((UINTN)MemoryMap->NumberOfPages),
EFI_MEMORY_XP
);
break;
default:
SmmSetMemoryAttributes (
MemoryMap->PhysicalStart,
EFI_PAGES_TO_SIZE((UINTN)MemoryMap->NumberOfPages),
EFI_MEMORY_XP
);
break;
}
MemoryMap = NEXT_MEMORY_DESCRIPTOR(MemoryMap, DescriptorSize);
}
PatchSmmSaveStateMap ();
PatchGdtIdtMap ();
return ;
}

View File

@ -29,11 +29,6 @@ UINTN mSmmProfileSize;
// //
UINTN mMsrDsAreaSize = SMM_PROFILE_DTS_SIZE; UINTN mMsrDsAreaSize = SMM_PROFILE_DTS_SIZE;
//
// The flag indicates if execute-disable is supported by processor.
//
BOOLEAN mXdSupported = TRUE;
// //
// The flag indicates if execute-disable is enabled on processor. // The flag indicates if execute-disable is enabled on processor.
// //
@ -529,6 +524,12 @@ InitPaging (
// //
continue; continue;
} }
if ((*Pde & IA32_PG_PS) != 0) {
//
// This is 1G entry, skip it
//
continue;
}
Pte = (UINT64 *)(UINTN)(*Pde & PHYSICAL_ADDRESS_MASK); Pte = (UINT64 *)(UINTN)(*Pde & PHYSICAL_ADDRESS_MASK);
if (Pte == 0) { if (Pte == 0) {
continue; continue;
@ -587,6 +588,15 @@ InitPaging (
// //
continue; continue;
} }
if ((*Pde & IA32_PG_PS) != 0) {
//
// This is 1G entry, set NX bit and skip it
//
if (mXdSupported) {
*Pde = *Pde | IA32_PG_NX;
}
continue;
}
Pte = (UINT64 *)(UINTN)(*Pde & PHYSICAL_ADDRESS_MASK); Pte = (UINT64 *)(UINTN)(*Pde & PHYSICAL_ADDRESS_MASK);
if (Pte == 0) { if (Pte == 0) {
continue; continue;
@ -975,25 +985,6 @@ CheckFeatureSupported (
} }
} }
/**
Enable XD feature.
**/
VOID
ActivateXd (
VOID
)
{
UINT64 MsrRegisters;
MsrRegisters = AsmReadMsr64 (MSR_EFER);
if ((MsrRegisters & MSR_EFER_XD) != 0) {
return ;
}
MsrRegisters |= MSR_EFER_XD;
AsmWriteMsr64 (MSR_EFER, MsrRegisters);
}
/** /**
Enable single step. Enable single step.

View File

@ -96,15 +96,6 @@ CheckFeatureSupported (
VOID VOID
); );
/**
Enable XD feature.
**/
VOID
ActivateXd (
VOID
);
/** /**
Update page table according to protected memory ranges and the 4KB-page mapped memory ranges. Update page table according to protected memory ranges and the 4KB-page mapped memory ranges.
@ -114,7 +105,13 @@ InitPaging (
VOID VOID
); );
//
// The flag indicates if execute-disable is supported by processor.
//
extern BOOLEAN mXdSupported; extern BOOLEAN mXdSupported;
//
// The flag indicates if execute-disable is enabled on processor.
//
extern BOOLEAN mXdEnabled; extern BOOLEAN mXdEnabled;
#endif // _SMM_PROFILE_H_ #endif // _SMM_PROFILE_H_

View File

@ -18,6 +18,8 @@ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#define ACC_MAX_BIT BIT3 #define ACC_MAX_BIT BIT3
LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool); LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);
BOOLEAN m1GPageTableSupport = FALSE; BOOLEAN m1GPageTableSupport = FALSE;
UINT8 mPhysicalAddressBits;
BOOLEAN mCpuSmmStaticPageTable;
/** /**
Check if 1-GByte pages is supported by processor or not. Check if 1-GByte pages is supported by processor or not.
@ -85,6 +87,146 @@ GetSubEntriesNum (
return BitFieldRead64 (*Entry, 52, 60); return BitFieldRead64 (*Entry, 52, 60);
} }
/**
Calculate the maximum support address.
@return the maximum support address.
**/
UINT8
CalculateMaximumSupportAddress (
VOID
)
{
UINT32 RegEax;
UINT8 PhysicalAddressBits;
VOID *Hob;
//
// Get physical address bits supported.
//
Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
if (Hob != NULL) {
PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;
} else {
AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
if (RegEax >= 0x80000008) {
AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
PhysicalAddressBits = (UINT8) RegEax;
} else {
PhysicalAddressBits = 36;
}
}
//
// IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.
//
ASSERT (PhysicalAddressBits <= 52);
if (PhysicalAddressBits > 48) {
PhysicalAddressBits = 48;
}
return PhysicalAddressBits;
}
/**
Set static page table.
@param[in] PageTable Address of page table.
**/
VOID
SetStaticPageTable (
IN UINTN PageTable
)
{
UINT64 PageAddress;
UINTN NumberOfPml4EntriesNeeded;
UINTN NumberOfPdpEntriesNeeded;
UINTN IndexOfPml4Entries;
UINTN IndexOfPdpEntries;
UINTN IndexOfPageDirectoryEntries;
UINT64 *PageMapLevel4Entry;
UINT64 *PageMap;
UINT64 *PageDirectoryPointerEntry;
UINT64 *PageDirectory1GEntry;
UINT64 *PageDirectoryEntry;
if (mPhysicalAddressBits <= 39 ) {
NumberOfPml4EntriesNeeded = 1;
NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, (mPhysicalAddressBits - 30));
} else {
NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, (mPhysicalAddressBits - 39));
NumberOfPdpEntriesNeeded = 512;
}
//
// By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
//
PageMap = (VOID *) PageTable;
PageMapLevel4Entry = PageMap;
PageAddress = 0;
for (IndexOfPml4Entries = 0; IndexOfPml4Entries < NumberOfPml4EntriesNeeded; IndexOfPml4Entries++, PageMapLevel4Entry++) {
//
// Each PML4 entry points to a page of Page Directory Pointer entries.
//
PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & gPhyMask);
if (PageDirectoryPointerEntry == NULL) {
PageDirectoryPointerEntry = AllocatePageTableMemory (1);
ASSERT(PageDirectoryPointerEntry != NULL);
ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1));
*PageMapLevel4Entry = ((UINTN)PageDirectoryPointerEntry & gPhyMask) | PAGE_ATTRIBUTE_BITS;
}
if (m1GPageTableSupport) {
PageDirectory1GEntry = PageDirectoryPointerEntry;
for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) {
//
// Skip the < 4G entries
//
continue;
}
//
// Fill in the Page Directory entries
//
*PageDirectory1GEntry = (PageAddress & gPhyMask) | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
}
} else {
PageAddress = BASE_4GB;
for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) {
//
// Skip the < 4G entries
//
continue;
}
//
// Each Directory Pointer entries points to a page of Page Directory entires.
// So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
//
PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & gPhyMask);
if (PageDirectoryEntry == NULL) {
PageDirectoryEntry = AllocatePageTableMemory (1);
ASSERT(PageDirectoryEntry != NULL);
ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1));
//
// Fill in a Page Directory Pointer Entries
//
*PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | PAGE_ATTRIBUTE_BITS;
}
for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
//
// Fill in the Page Directory entries
//
*PageDirectoryEntry = (UINT64)PageAddress | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
}
}
}
}
}
/** /**
Create PageTable for SMM use. Create PageTable for SMM use.
@ -108,11 +250,17 @@ SmmInitPageTable (
// //
InitializeSpinLock (mPFLock); InitializeSpinLock (mPFLock);
mCpuSmmStaticPageTable = PcdGetBool (PcdCpuSmmStaticPageTable);
m1GPageTableSupport = Is1GPageSupport (); m1GPageTableSupport = Is1GPageSupport ();
DEBUG ((DEBUG_INFO, "1GPageTableSupport - 0x%x\n", m1GPageTableSupport));
DEBUG ((DEBUG_INFO, "PcdCpuSmmStaticPageTable - 0x%x\n", mCpuSmmStaticPageTable));
mPhysicalAddressBits = CalculateMaximumSupportAddress ();
DEBUG ((DEBUG_INFO, "PhysicalAddressBits - 0x%x\n", mPhysicalAddressBits));
// //
// Generate PAE page table for the first 4GB memory space // Generate PAE page table for the first 4GB memory space
// //
Pages = Gen4GPageTable (PAGE_TABLE_PAGES + 1, FALSE); Pages = Gen4GPageTable (FALSE);
// //
// Set IA32_PG_PMNT bit to mask this entry // Set IA32_PG_PMNT bit to mask this entry
@ -125,21 +273,28 @@ SmmInitPageTable (
// //
// Fill Page-Table-Level4 (PML4) entry // Fill Page-Table-Level4 (PML4) entry
// //
PTEntry = (UINT64*)(UINTN)(Pages - EFI_PAGES_TO_SIZE (PAGE_TABLE_PAGES + 1)); PTEntry = (UINT64*)AllocatePageTableMemory (1);
*PTEntry = Pages + PAGE_ATTRIBUTE_BITS; ASSERT (PTEntry != NULL);
*PTEntry = Pages | PAGE_ATTRIBUTE_BITS;
ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry)); ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));
// //
// Set sub-entries number // Set sub-entries number
// //
SetSubEntriesNum (PTEntry, 3); SetSubEntriesNum (PTEntry, 3);
// if (mCpuSmmStaticPageTable) {
// Add remaining pages to page pool SetStaticPageTable ((UINTN)PTEntry);
// } else {
FreePage = (LIST_ENTRY*)(PTEntry + EFI_PAGE_SIZE / sizeof (*PTEntry)); //
while ((UINTN)FreePage < Pages) { // Add pages to page pool
InsertTailList (&mPagePool, FreePage); //
FreePage += EFI_PAGE_SIZE / sizeof (*FreePage); FreePage = (LIST_ENTRY*)AllocatePageTableMemory (PAGE_TABLE_PAGES);
ASSERT (FreePage != NULL);
for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {
InsertTailList (&mPagePool, FreePage);
FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);
}
} }
if (FeaturePcdGet (PcdCpuSmmProfileEnable)) { if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
@ -561,7 +716,7 @@ SmiDefaultPFHandler (
break; break;
case SmmPageSize1G: case SmmPageSize1G:
if (!m1GPageTableSupport) { if (!m1GPageTableSupport) {
DEBUG ((EFI_D_ERROR, "1-GByte pages is not supported!")); DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));
ASSERT (FALSE); ASSERT (FALSE);
} }
// //
@ -612,8 +767,8 @@ SmiDefaultPFHandler (
// Check if the entry has already existed, this issue may occur when the different // Check if the entry has already existed, this issue may occur when the different
// size page entries created under the same entry // size page entries created under the same entry
// //
DEBUG ((EFI_D_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex])); DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));
DEBUG ((EFI_D_ERROR, "New page table overlapped with old page table!\n")); DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));
ASSERT (FALSE); ASSERT (FALSE);
} }
// //
@ -654,13 +809,18 @@ SmiPFHandler (
PFAddress = AsmReadCr2 (); PFAddress = AsmReadCr2 ();
if (mCpuSmmStaticPageTable && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {
DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));
CpuDeadLoop ();
}
// //
// If a page fault occurs in SMRAM range, it should be in a SMM stack guard page. // If a page fault occurs in SMRAM range, it should be in a SMM stack guard page.
// //
if ((FeaturePcdGet (PcdCpuSmmStackGuard)) && if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
(PFAddress >= mCpuHotPlugData.SmrrBase) && (PFAddress >= mCpuHotPlugData.SmrrBase) &&
(PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) { (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {
DEBUG ((EFI_D_ERROR, "SMM stack overflow!\n")); DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));
CpuDeadLoop (); CpuDeadLoop ();
} }
@ -670,7 +830,7 @@ SmiPFHandler (
if ((PFAddress < mCpuHotPlugData.SmrrBase) || if ((PFAddress < mCpuHotPlugData.SmrrBase) ||
(PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) { (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {
if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) { if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
DEBUG ((EFI_D_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress)); DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));
DEBUG_CODE ( DEBUG_CODE (
DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp); DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
); );
@ -689,3 +849,87 @@ SmiPFHandler (
ReleaseSpinLock (mPFLock); ReleaseSpinLock (mPFLock);
} }
/**
This function sets memory attribute for page table.
**/
VOID
SetPageTableAttributes (
VOID
)
{
UINTN Index2;
UINTN Index3;
UINTN Index4;
UINT64 *L1PageTable;
UINT64 *L2PageTable;
UINT64 *L3PageTable;
UINT64 *L4PageTable;
BOOLEAN IsSplitted;
BOOLEAN PageTableSplitted;
if (!mCpuSmmStaticPageTable) {
return ;
}
DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));
//
// Disable write protection, because we need mark page table to be write protected.
// We need *write* page table memory, to mark itself to be *read only*.
//
AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);
do {
DEBUG ((DEBUG_INFO, "Start...\n"));
PageTableSplitted = FALSE;
L4PageTable = (UINT64 *)GetPageTableBase ();
SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
PageTableSplitted = (PageTableSplitted || IsSplitted);
for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) {
L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & PAGING_4K_ADDRESS_MASK_64);
if (L3PageTable == NULL) {
continue;
}
SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
PageTableSplitted = (PageTableSplitted || IsSplitted);
for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64); Index3++) {
if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {
// 1G
continue;
}
L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & PAGING_4K_ADDRESS_MASK_64);
if (L2PageTable == NULL) {
continue;
}
SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
PageTableSplitted = (PageTableSplitted || IsSplitted);
for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {
if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
// 2M
continue;
}
L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & PAGING_4K_ADDRESS_MASK_64);
if (L1PageTable == NULL) {
continue;
}
SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
PageTableSplitted = (PageTableSplitted || IsSplitted);
}
}
}
} while (PageTableSplitted);
//
// Enable write protection, after page table updated.
//
AsmWriteCr0 (AsmReadCr0() | CR0_WP);
return ;
}

View File

@ -1,6 +1,6 @@
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
# #
# Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR> # Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials # This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License # are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at # which accompanies this distribution. The full text of the license may be found at
@ -24,8 +24,13 @@ ASM_GLOBAL ASM_PFX(gcSmiHandlerSize)
ASM_GLOBAL ASM_PFX(gSmiCr3) ASM_GLOBAL ASM_PFX(gSmiCr3)
ASM_GLOBAL ASM_PFX(gSmiStack) ASM_GLOBAL ASM_PFX(gSmiStack)
ASM_GLOBAL ASM_PFX(gSmbase) ASM_GLOBAL ASM_PFX(gSmbase)
ASM_GLOBAL ASM_PFX(mXdSupported)
ASM_GLOBAL ASM_PFX(gSmiHandlerIdtr) ASM_GLOBAL ASM_PFX(gSmiHandlerIdtr)
.equ MSR_IA32_MISC_ENABLE, 0x1A0
.equ MSR_EFER, 0xc0000080
.equ MSR_EFER_XD, 0x800
# #
# Constants relating to PROCESSOR_SMM_DESCRIPTOR # Constants relating to PROCESSOR_SMM_DESCRIPTOR
# #
@ -132,6 +137,32 @@ ASM_PFX(gSmiCr3): .space 4
movl $TSS_SEGMENT, %eax movl $TSS_SEGMENT, %eax
ltr %ax ltr %ax
# enable NXE if supported
.byte 0xb0 # mov al, imm8
ASM_PFX(mXdSupported): .byte 1
cmpb $0, %al
jz SkipNxe
#
# Check XD disable bit
#
movl $MSR_IA32_MISC_ENABLE, %ecx
rdmsr
subl $4, %esp
pushq %rdx # save MSR_IA32_MISC_ENABLE[63-32]
testl $BIT2, %edx # MSR_IA32_MISC_ENABLE[34]
jz L13
andw $0x0FFFB, %dx # clear XD Disable bit if it is set
wrmsr
L13:
movl $MSR_EFER, %ecx
rdmsr
orw $MSR_EFER_XD,%ax # enable NXE
wrmsr
jmp @NxeDone
SkipNxe:
subl $8, %esp
NxeDone:
# #
# Switch to LongMode # Switch to LongMode
# #
@ -139,12 +170,13 @@ ASM_PFX(gSmiCr3): .space 4
call Base # push return address for retf later call Base # push return address for retf later
Base: Base:
addl $(LongMode - Base), (%rsp) # offset for far retf, seg is the 1st arg addl $(LongMode - Base), (%rsp) # offset for far retf, seg is the 1st arg
movl $0xc0000080, %ecx
movl $MSR_EFER, %ecx
rdmsr rdmsr
orb $1,%ah orb $1,%ah # enable LME
wrmsr wrmsr
movq %cr0, %rbx movq %cr0, %rbx
orl $0x080010000, %ebx # enable paging + WP orl $0x080010023, %ebx # enable paging + WP + NE + MP + PE
movq %rbx, %cr0 movq %rbx, %cr0
retf retf
LongMode: # long mode (64-bit code) starts here LongMode: # long mode (64-bit code) starts here
@ -162,10 +194,10 @@ LongMode: # long mode (64-bit code) starts here
# jmp _SmiHandler ; instruction is not needed # jmp _SmiHandler ; instruction is not needed
_SmiHandler: _SmiHandler:
movq (%rsp), %rbx movq 8(%rsp), %rbx
# Save FP registers # Save FP registers
subq $0x208, %rsp subq $0x200, %rsp
.byte 0x48 # FXSAVE64 .byte 0x48 # FXSAVE64
fxsave (%rsp) fxsave (%rsp)
@ -191,6 +223,21 @@ _SmiHandler:
.byte 0x48 # FXRSTOR64 .byte 0x48 # FXRSTOR64
fxrstor (%rsp) fxrstor (%rsp)
addq $0x200, %rsp
movabsq $ASM_PFX(mXdSupported), %rax
movb (%rax), %al
cmpb $0, %al
jz L16
popq %rdx # get saved MSR_IA32_MISC_ENABLE[63-32]
testl $BIT2, %edx
jz L16
movl $MSR_IA32_MISC_ENABLE, %ecx
rdmsr
orw $BIT2, %dx # set XD Disable bit if it was set before entering into SMM
wrmsr
L16:
rsm rsm
ASM_PFX(gcSmiHandlerSize): .word . - _SmiEntryPoint ASM_PFX(gcSmiHandlerSize): .word . - _SmiEntryPoint

View File

@ -1,5 +1,5 @@
;------------------------------------------------------------------------------ ; ;------------------------------------------------------------------------------ ;
; Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR> ; Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
; This program and the accompanying materials ; This program and the accompanying materials
; are licensed and made available under the terms and conditions of the BSD License ; are licensed and made available under the terms and conditions of the BSD License
; which accompanies this distribution. The full text of the license may be found at ; which accompanies this distribution. The full text of the license may be found at
@ -29,8 +29,12 @@ EXTERNDEF gcSmiHandlerSize:WORD
EXTERNDEF gSmiCr3:DWORD EXTERNDEF gSmiCr3:DWORD
EXTERNDEF gSmiStack:DWORD EXTERNDEF gSmiStack:DWORD
EXTERNDEF gSmbase:DWORD EXTERNDEF gSmbase:DWORD
EXTERNDEF mXdSupported:BYTE
EXTERNDEF gSmiHandlerIdtr:FWORD EXTERNDEF gSmiHandlerIdtr:FWORD
MSR_IA32_MISC_ENABLE EQU 1A0h
MSR_EFER EQU 0c0000080h
MSR_EFER_XD EQU 0800h
; ;
; Constants relating to PROCESSOR_SMM_DESCRIPTOR ; Constants relating to PROCESSOR_SMM_DESCRIPTOR
@ -130,17 +134,44 @@ gSmiCr3 DD ?
mov eax, TSS_SEGMENT mov eax, TSS_SEGMENT
ltr ax ltr ax
; enable NXE if supported
DB 0b0h ; mov al, imm8
mXdSupported DB 1
cmp al, 0
jz @SkipXd
;
; Check XD disable bit
;
mov ecx, MSR_IA32_MISC_ENABLE
rdmsr
sub esp, 4
push rdx ; save MSR_IA32_MISC_ENABLE[63-32]
test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]
jz @f
and dx, 0FFFBh ; clear XD Disable bit if it is set
wrmsr
@@:
mov ecx, MSR_EFER
rdmsr
or ax, MSR_EFER_XD ; enable NXE
wrmsr
jmp @XdDone
@SkipXd:
sub esp, 8
@XdDone:
; Switch into @LongMode ; Switch into @LongMode
push LONG_MODE_CS ; push cs hardcore here push LONG_MODE_CS ; push cs hardcore here
call Base ; push return address for retf later call Base ; push return address for retf later
Base: Base:
add dword ptr [rsp], @LongMode - Base; offset for far retf, seg is the 1st arg add dword ptr [rsp], @LongMode - Base; offset for far retf, seg is the 1st arg
mov ecx, 0c0000080h
mov ecx, MSR_EFER
rdmsr rdmsr
or ah, 1 or ah, 1 ; enable LME
wrmsr wrmsr
mov rbx, cr0 mov rbx, cr0
or ebx, 080010000h ; enable paging + WP or ebx, 080010023h ; enable paging + WP + NE + MP + PE
mov cr0, rbx mov cr0, rbx
retf retf
@LongMode: ; long mode (64-bit code) starts here @LongMode: ; long mode (64-bit code) starts here
@ -163,7 +194,7 @@ _SmiHandler:
; ;
; Save FP registers ; Save FP registers
; ;
sub rsp, 208h sub rsp, 200h
DB 48h ; FXSAVE64 DB 48h ; FXSAVE64
fxsave [rsp] fxsave [rsp]
@ -172,15 +203,15 @@ _SmiHandler:
mov rcx, rbx mov rcx, rbx
mov rax, CpuSmmDebugEntry mov rax, CpuSmmDebugEntry
call rax call rax
mov rcx, rbx mov rcx, rbx
mov rax, SmiRendezvous ; rax <- absolute addr of SmiRedezvous mov rax, SmiRendezvous ; rax <- absolute addr of SmiRedezvous
call rax call rax
mov rcx, rbx mov rcx, rbx
mov rax, CpuSmmDebugExit mov rax, CpuSmmDebugExit
call rax call rax
add rsp, 20h add rsp, 20h
; ;
@ -189,6 +220,21 @@ _SmiHandler:
DB 48h ; FXRSTOR64 DB 48h ; FXRSTOR64
fxrstor [rsp] fxrstor [rsp]
add rsp, 200h
mov rax, ASM_PFX(mXdSupported)
mov al, [rax]
cmp al, 0
jz @f
pop rdx ; get saved MSR_IA32_MISC_ENABLE[63-32]
test edx, BIT2
jz @f
mov ecx, MSR_IA32_MISC_ENABLE
rdmsr
or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM
wrmsr
@@:
rsm rsm
gcSmiHandlerSize DW $ - _SmiEntryPoint gcSmiHandlerSize DW $ - _SmiEntryPoint

View File

@ -22,6 +22,10 @@
; Variables referrenced by C code ; Variables referrenced by C code
; ;
%define MSR_IA32_MISC_ENABLE 0x1A0
%define MSR_EFER 0xc0000080
%define MSR_EFER_XD 0x800
; ;
; Constants relating to PROCESSOR_SMM_DESCRIPTOR ; Constants relating to PROCESSOR_SMM_DESCRIPTOR
; ;
@ -50,6 +54,7 @@ extern ASM_PFX(CpuSmmDebugEntry)
extern ASM_PFX(CpuSmmDebugExit) extern ASM_PFX(CpuSmmDebugExit)
global ASM_PFX(gSmbase) global ASM_PFX(gSmbase)
global ASM_PFX(mXdSupported)
global ASM_PFX(gSmiStack) global ASM_PFX(gSmiStack)
global ASM_PFX(gSmiCr3) global ASM_PFX(gSmiCr3)
global ASM_PFX(gcSmiHandlerTemplate) global ASM_PFX(gcSmiHandlerTemplate)
@ -69,7 +74,7 @@ _SmiEntryPoint:
mov [cs:bx + 2], eax mov [cs:bx + 2], eax
o32 lgdt [cs:bx] ; lgdt fword ptr cs:[bx] o32 lgdt [cs:bx] ; lgdt fword ptr cs:[bx]
mov ax, PROTECT_MODE_CS mov ax, PROTECT_MODE_CS
mov [cs:bx-0x2],ax mov [cs:bx-0x2],ax
DB 0x66, 0xbf ; mov edi, SMBASE DB 0x66, 0xbf ; mov edi, SMBASE
ASM_PFX(gSmbase): DD 0 ASM_PFX(gSmbase): DD 0
lea eax, [edi + (@ProtectedMode - _SmiEntryPoint) + 0x8000] lea eax, [edi + (@ProtectedMode - _SmiEntryPoint) + 0x8000]
@ -79,7 +84,7 @@ ASM_PFX(gSmbase): DD 0
or ebx, 0x23 or ebx, 0x23
mov cr0, ebx mov cr0, ebx
jmp dword 0x0:0x0 jmp dword 0x0:0x0
_GdtDesc: _GdtDesc:
DW 0 DW 0
DD 0 DD 0
@ -112,17 +117,44 @@ ASM_PFX(gSmiCr3): DD 0
mov eax, TSS_SEGMENT mov eax, TSS_SEGMENT
ltr ax ltr ax
; enable NXE if supported
DB 0xb0 ; mov al, imm8
ASM_PFX(mXdSupported): DB 1
cmp al, 0
jz @SkipXd
;
; Check XD disable bit
;
mov ecx, MSR_IA32_MISC_ENABLE
rdmsr
sub esp, 4
push rdx ; save MSR_IA32_MISC_ENABLE[63-32]
test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]
jz .0
and dx, 0xFFFB ; clear XD Disable bit if it is set
wrmsr
.0:
mov ecx, MSR_EFER
rdmsr
or ax, MSR_EFER_XD ; enable NXE
wrmsr
jmp @XdDone
@SkipXd:
sub esp, 8
@XdDone:
; Switch into @LongMode ; Switch into @LongMode
push LONG_MODE_CS ; push cs hardcore here push LONG_MODE_CS ; push cs hardcore here
call Base ; push reture address for retf later call Base ; push return address for retf later
Base: Base:
add dword [rsp], @LongMode - Base; offset for far retf, seg is the 1st arg add dword [rsp], @LongMode - Base; offset for far retf, seg is the 1st arg
mov ecx, 0xc0000080
mov ecx, MSR_EFER
rdmsr rdmsr
or ah, 1 or ah, 1 ; enable LME
wrmsr wrmsr
mov rbx, cr0 mov rbx, cr0
or ebx, 080010000h ; enable paging + WP or ebx, 0x80010023 ; enable paging + WP + NE + MP + PE
mov cr0, rbx mov cr0, rbx
retf retf
@LongMode: ; long mode (64-bit code) starts here @LongMode: ; long mode (64-bit code) starts here
@ -140,12 +172,12 @@ Base:
; jmp _SmiHandler ; instruction is not needed ; jmp _SmiHandler ; instruction is not needed
_SmiHandler: _SmiHandler:
mov rbx, [rsp] ; rbx <- CpuIndex mov rbx, [rsp + 0x8] ; rcx <- CpuIndex
; ;
; Save FP registers ; Save FP registers
; ;
sub rsp, 0x208 sub rsp, 0x200
DB 0x48 ; FXSAVE64 DB 0x48 ; FXSAVE64
fxsave [rsp] fxsave [rsp]
@ -154,15 +186,15 @@ _SmiHandler:
mov rcx, rbx mov rcx, rbx
mov rax, CpuSmmDebugEntry mov rax, CpuSmmDebugEntry
call rax call rax
mov rcx, rbx mov rcx, rbx
mov rax, SmiRendezvous ; rax <- absolute addr of SmiRedezvous mov rax, SmiRendezvous ; rax <- absolute addr of SmiRedezvous
call rax call rax
mov rcx, rbx mov rcx, rbx
mov rax, CpuSmmDebugExit mov rax, CpuSmmDebugExit
call rax call rax
add rsp, 0x20 add rsp, 0x20
; ;
@ -171,6 +203,21 @@ _SmiHandler:
DB 0x48 ; FXRSTOR64 DB 0x48 ; FXRSTOR64
fxrstor [rsp] fxrstor [rsp]
add rsp, 0x200
mov rax, ASM_PFX(mXdSupported)
mov al, [rax]
cmp al, 0
jz .1
pop rdx ; get saved MSR_IA32_MISC_ENABLE[63-32]
test edx, BIT2
jz .1
mov ecx, MSR_IA32_MISC_ENABLE
rdmsr
or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM
wrmsr
.1:
rsm rsm
gcSmiHandlerSize DW $ - _SmiEntryPoint gcSmiHandlerSize DW $ - _SmiEntryPoint

View File

@ -1,6 +1,6 @@
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
# #
# Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR> # Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials # This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License # are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at # which accompanies this distribution. The full text of the license may be found at
@ -128,244 +128,8 @@ ASM_PFX(gcSmiGdtr):
.quad NullSeg .quad NullSeg
ASM_PFX(gcSmiIdtr): ASM_PFX(gcSmiIdtr):
.word IDT_SIZE - 1 .word 0
.quad _SmiIDT .quad 0
#
# Here is the IDT. There are 32 (not 255) entries in it since only processor
# generated exceptions will be handled.
#
_SmiIDT:
# The following segment repeats 32 times:
# No. 1
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
.quad 0 # Offset 32:63
# No. 2
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
.quad 0 # Offset 32:63
# No. 3
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
.quad 0 # Offset 32:63
# No. 4
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
.quad 0 # Offset 32:63
# No. 5
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
.quad 0 # Offset 32:63
# No. 6
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
.quad 0 # Offset 32:63
# No. 7
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
.quad 0 # Offset 32:63
# No. 8
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
.quad 0 # Offset 32:63
# No. 9
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
.quad 0 # Offset 32:63
# No. 10
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
.quad 0 # Offset 32:63
# No. 11
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
.quad 0 # Offset 32:63
# No. 12
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
.quad 0 # Offset 32:63
# No. 13
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
.quad 0 # Offset 32:63
# No. 14
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
.quad 0 # Offset 32:63
# No. 15
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
.quad 0 # Offset 32:63
# No. 16
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
.quad 0 # Offset 32:63
# No. 17
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
.quad 0 # Offset 32:63
# No. 18
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
.quad 0 # Offset 32:63
# No. 19
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
.quad 0 # Offset 32:63
# No. 20
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
.quad 0 # Offset 32:63
# No. 21
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
.quad 0 # Offset 32:63
# No. 22
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
.quad 0 # Offset 32:63
# No. 23
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
.quad 0 # Offset 32:63
# No. 24
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
.quad 0 # Offset 32:63
# No. 25
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
.quad 0 # Offset 32:63
# No. 26
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
.quad 0 # Offset 32:63
# No. 27
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
.quad 0 # Offset 32:63
# No. 28
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
.quad 0 # Offset 32:63
# No. 29
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
.quad 0 # Offset 32:63
# No. 30
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
.quad 0 # Offset 32:63
# No. 31
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
.quad 0 # Offset 32:63
# No. 32
.word 0 # Offset 0:15
.word CODE_SEL
.byte 0 # Unused
.byte 0x8e # Interrupt Gate, Present
.word 0 # Offset 16:31
.quad 0 # Offset 32:63
_SmiIDTEnd:
.equ IDT_SIZE, (_SmiIDTEnd - _SmiIDT)
.text .text
@ -600,11 +364,3 @@ L5:
addq $16, %rsp # skip INT# & ErrCode addq $16, %rsp # skip INT# & ErrCode
iretq iretq
ASM_GLOBAL ASM_PFX(InitializeIDTSmmStackGuard)
ASM_PFX(InitializeIDTSmmStackGuard):
# If SMM Stack Guard feature is enabled, set the IST field of
# the interrupt gate for Page Fault Exception to be 1
#
movabsq $_SmiIDT + 14 * 16, %rax
movb $1, 4(%rax)
ret

View File

@ -1,5 +1,5 @@
;------------------------------------------------------------------------------ ; ;------------------------------------------------------------------------------ ;
; Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR> ; Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
; This program and the accompanying materials ; This program and the accompanying materials
; are licensed and made available under the terms and conditions of the BSD License ; are licensed and made available under the terms and conditions of the BSD License
; which accompanies this distribution. The full text of the license may be found at ; which accompanies this distribution. The full text of the license may be found at
@ -144,27 +144,8 @@ gcSmiGdtr LABEL FWORD
DQ offset NullSeg DQ offset NullSeg
gcSmiIdtr LABEL FWORD gcSmiIdtr LABEL FWORD
DW IDT_SIZE - 1 DW 0
DQ offset _SmiIDT DQ 0
.data
;
; Here is the IDT. There are 32 (not 255) entries in it since only processor
; generated exceptions will be handled.
;
_SmiIDT:
REPEAT 32
DW 0 ; Offset 0:15
DW CODE_SEL ; Segment selector
DB 0 ; Unused
DB 8eh ; Interrupt Gate, Present
DW 0 ; Offset 16:31
DQ 0 ; Offset 32:63
ENDM
_SmiIDTEnd:
IDT_SIZE = (offset _SmiIDTEnd - offset _SmiIDT)
.code .code
@ -400,14 +381,4 @@ PageFaultIdtHandlerSmmProfile PROC
iretq iretq
PageFaultIdtHandlerSmmProfile ENDP PageFaultIdtHandlerSmmProfile ENDP
InitializeIDTSmmStackGuard PROC
;
; If SMM Stack Guard feature is enabled, set the IST field of
; the interrupt gate for Page Fault Exception to be 1
;
lea rax, _SmiIDT + 14 * 16
mov byte ptr [rax + 4], 1
ret
InitializeIDTSmmStackGuard ENDP
END END

View File

@ -145,25 +145,8 @@ ASM_PFX(gcSmiGdtr):
DQ NullSeg DQ NullSeg
ASM_PFX(gcSmiIdtr): ASM_PFX(gcSmiIdtr):
DW IDT_SIZE - 1 DW 0
DQ _SmiIDT DQ 0
;
; Here is the IDT. There are 32 (not 255) entries in it since only processor
; generated exceptions will be handled.
;
_SmiIDT:
%rep 32
DW 0 ; 0:15
DW CODE_SEL ; Segment selector
DB 0 ; Unused
DB 0x8e ; Interrupt Gate, Present
DW 0 ; 16:31
DQ 0 ; 32:63
%endrep
_SmiIDTEnd:
IDT_SIZE equ _SmiIDTEnd - _SmiIDT
DEFAULT REL DEFAULT REL
SECTION .text SECTION .text
@ -400,13 +383,3 @@ ASM_PFX(PageFaultIdtHandlerSmmProfile):
add rsp, 16 ; skip INT# & ErrCode add rsp, 16 ; skip INT# & ErrCode
iretq iretq
global ASM_PFX(InitializeIDTSmmStackGuard)
ASM_PFX(InitializeIDTSmmStackGuard):
;
; If SMM Stack Guard feature is enabled, set the IST field of
; the interrupt gate for Page Fault Exception to be 1
;
lea rax, [_SmiIDT + 14 * 16]
mov byte [rax + 4], 1
ret

View File

@ -14,6 +14,30 @@ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#include "PiSmmCpuDxeSmm.h" #include "PiSmmCpuDxeSmm.h"
EFI_PHYSICAL_ADDRESS mGdtBuffer;
UINTN mGdtBufferSize;
/**
Initialize IDT for SMM Stack Guard.
**/
VOID
EFIAPI
InitializeIDTSmmStackGuard (
VOID
)
{
IA32_IDT_GATE_DESCRIPTOR *IdtGate;
//
// If SMM Stack Guard feature is enabled, set the IST field of
// the interrupt gate for Page Fault Exception to be 1
//
IdtGate = (IA32_IDT_GATE_DESCRIPTOR *)gcSmiIdtr.Base;
IdtGate += EXCEPT_IA32_PAGE_FAULT;
IdtGate->Bits.Reserved_0 = 1;
}
/** /**
Initialize Gdt for all processors. Initialize Gdt for all processors.
@ -41,8 +65,10 @@ InitGdt (
// on each SMI entry. // on each SMI entry.
// //
GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE + 7) & ~7; // 8 bytes aligned GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE + 7) & ~7; // 8 bytes aligned
GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus)); mGdtBufferSize = GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
GdtTssTables = (UINT8*)AllocateCodePages (EFI_SIZE_TO_PAGES (mGdtBufferSize));
ASSERT (GdtTssTables != NULL); ASSERT (GdtTssTables != NULL);
mGdtBuffer = (UINTN)GdtTssTables;
GdtTableStepSize = GdtTssTableSize; GdtTableStepSize = GdtTssTableSize;
for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) { for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {

View File

@ -1,7 +1,7 @@
/** @file /** @file
X64 processor specific functions to enable SMM profile. X64 processor specific functions to enable SMM profile.
Copyright (c) 2012 - 2015, Intel Corporation. All rights reserved.<BR> Copyright (c) 2012 - 2016, Intel Corporation. All rights reserved.<BR>
This program and the accompanying materials This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at which accompanies this distribution. The full text of the license may be found at
@ -45,12 +45,13 @@ InitSmmS3Cr3 (
// //
// Generate PAE page table for the first 4GB memory space // Generate PAE page table for the first 4GB memory space
// //
Pages = Gen4GPageTable (1, FALSE); Pages = Gen4GPageTable (FALSE);
// //
// Fill Page-Table-Level4 (PML4) entry // Fill Page-Table-Level4 (PML4) entry
// //
PTEntry = (UINT64*)(UINTN)(Pages - EFI_PAGES_TO_SIZE (1)); PTEntry = (UINT64*)AllocatePageTableMemory (1);
ASSERT (PTEntry != NULL);
*PTEntry = Pages | PAGE_ATTRIBUTE_BITS; *PTEntry = Pages | PAGE_ATTRIBUTE_BITS;
ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry)); ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));