2016-06-14 10:28:14 +02:00
|
|
|
;------------------------------------------------------------------------------ ;
|
2018-01-11 10:05:15 +01:00
|
|
|
; Copyright (c) 2016 - 2018, Intel Corporation. All rights reserved.<BR>
|
2016-06-14 10:28:14 +02:00
|
|
|
; This program and the accompanying materials
|
|
|
|
; are licensed and made available under the terms and conditions of the BSD License
|
|
|
|
; which accompanies this distribution. The full text of the license may be found at
|
|
|
|
; http://opensource.org/licenses/bsd-license.php.
|
|
|
|
;
|
|
|
|
; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
|
|
|
|
; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
|
|
|
|
;
|
|
|
|
; Module Name:
|
|
|
|
;
|
|
|
|
; SmiEntry.nasm
|
|
|
|
;
|
|
|
|
; Abstract:
|
|
|
|
;
|
|
|
|
; Code template of the SMI handler for a particular processor
|
|
|
|
;
|
|
|
|
;-------------------------------------------------------------------------------
|
|
|
|
|
2018-12-21 06:45:55 +01:00
|
|
|
%include "StuffRsbNasm.inc"
|
2018-08-16 03:32:10 +02:00
|
|
|
|
2016-10-23 17:19:52 +02:00
|
|
|
%define MSR_IA32_MISC_ENABLE 0x1A0
|
|
|
|
%define MSR_EFER 0xc0000080
|
|
|
|
%define MSR_EFER_XD 0x800
|
|
|
|
|
2016-11-28 23:13:24 +01:00
|
|
|
;
|
|
|
|
; Constants relating to PROCESSOR_SMM_DESCRIPTOR
|
|
|
|
;
|
2016-06-14 10:28:14 +02:00
|
|
|
%define DSC_OFFSET 0xfb00
|
|
|
|
%define DSC_GDTPTR 0x30
|
|
|
|
%define DSC_GDTSIZ 0x38
|
|
|
|
%define DSC_CS 14
|
|
|
|
%define DSC_DS 16
|
|
|
|
%define DSC_SS 18
|
|
|
|
%define DSC_OTHERSEG 20
|
|
|
|
|
|
|
|
%define PROTECT_MODE_CS 0x8
|
|
|
|
%define PROTECT_MODE_DS 0x20
|
|
|
|
%define TSS_SEGMENT 0x40
|
|
|
|
|
|
|
|
extern ASM_PFX(SmiRendezvous)
|
|
|
|
extern ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))
|
|
|
|
extern ASM_PFX(CpuSmmDebugEntry)
|
|
|
|
extern ASM_PFX(CpuSmmDebugExit)
|
|
|
|
|
|
|
|
global ASM_PFX(gcSmiHandlerTemplate)
|
|
|
|
global ASM_PFX(gcSmiHandlerSize)
|
2018-02-01 23:40:29 +01:00
|
|
|
global ASM_PFX(gPatchSmiCr3)
|
2018-02-01 23:23:59 +01:00
|
|
|
global ASM_PFX(gPatchSmiStack)
|
2018-02-01 23:01:08 +01:00
|
|
|
global ASM_PFX(gPatchSmbase)
|
UefiCpuPkg/PiSmmCpuDxeSmm: patch "XdSupported" with PatchInstructionX86()
"mXdSupported" is a global BOOLEAN variable, initialized to TRUE. The
CheckFeatureSupported() function is executed on all processors (not
concurrently though), called from SmmInitHandler(). If XD support is found
to be missing on any CPU, then "mXdSupported" is set to FALSE, and further
processors omit the check. Afterwards, "mXdSupported" is read by several
assembly and C code locations.
The tricky part is *where* "mXdSupported" is allocated (defined):
- Before commit 717fb60443fb ("UefiCpuPkg/PiSmmCpuDxeSmm: Add paging
protection.", 2016-11-17), it used to be a normal global variable,
defined (allocated) in "SmmProfile.c".
- With said commit, we moved the definition (allocation) of "mXdSupported"
into "SmiEntry.nasm". The variable was defined over the last byte of a
"mov al, 1" instruction, so that setting it to FALSE in
CheckFeatureSupported() would patch the instruction to "mov al, 0". The
subsequent conditional jump would change behavior, plus all further read
references to "mXdSupported" (in C and assembly code) would read back
the source (imm8) operand of the patched MOV instruction as data.
This trick required that the MOV instruction be encoded with DB.
In order to get rid of the DB, we have to split both roles: we need a
label for the code patching, and "mXdSupported" has to be defined
(allocated) independently of the code patching. Of course, their values
must always remain in sync.
(1) Reinstate the "mXdSupported" definition and initialization in
"SmmProfile.c" from before commit 717fb60443fb. Change the assembly
language definition ("global") to a declaration ("extern").
(2) Define the "gPatchXdSupported" label (type X86_ASSEMBLY_PATCH_LABEL)
in "SmiEntry.nasm", and add the C-language declaration to
"SmmProfileInternal.h". Replace the DB with the MOV mnemonic (keeping
the imm8 source operand with value 1).
(3) In CheckFeatureSupported(), whenever "mXdSupported" is set to FALSE,
patch the assembly code in sync, with PatchInstructionX86().
Cc: Eric Dong <eric.dong@intel.com>
Cc: Michael D Kinney <michael.d.kinney@intel.com>
Ref: https://bugzilla.tianocore.org/show_bug.cgi?id=866
Contributed-under: TianoCore Contribution Agreement 1.1
Signed-off-by: Laszlo Ersek <lersek@redhat.com>
Reviewed-by: Liming Gao <liming.gao@intel.com>
2018-02-02 00:17:13 +01:00
|
|
|
extern ASM_PFX(mXdSupported)
|
|
|
|
global ASM_PFX(gPatchXdSupported)
|
2016-06-14 10:28:14 +02:00
|
|
|
extern ASM_PFX(gSmiHandlerIdtr)
|
|
|
|
|
|
|
|
SECTION .text
|
|
|
|
|
|
|
|
BITS 16
|
|
|
|
ASM_PFX(gcSmiHandlerTemplate):
|
|
|
|
_SmiEntryPoint:
|
|
|
|
mov bx, _GdtDesc - _SmiEntryPoint + 0x8000
|
|
|
|
mov ax,[cs:DSC_OFFSET + DSC_GDTSIZ]
|
|
|
|
dec ax
|
|
|
|
mov [cs:bx], ax
|
|
|
|
mov eax, [cs:DSC_OFFSET + DSC_GDTPTR]
|
|
|
|
mov [cs:bx + 2], eax
|
|
|
|
mov ebp, eax ; ebp = GDT base
|
|
|
|
o32 lgdt [cs:bx] ; lgdt fword ptr cs:[bx]
|
|
|
|
mov ax, PROTECT_MODE_CS
|
2016-10-23 17:19:52 +02:00
|
|
|
mov [cs:bx-0x2],ax
|
2018-02-01 23:01:08 +01:00
|
|
|
mov edi, strict dword 0 ; source operand will be patched
|
|
|
|
ASM_PFX(gPatchSmbase):
|
2016-06-14 10:28:14 +02:00
|
|
|
lea eax, [edi + (@32bit - _SmiEntryPoint) + 0x8000]
|
|
|
|
mov [cs:bx-0x6],eax
|
|
|
|
mov ebx, cr0
|
|
|
|
and ebx, 0x9ffafff3
|
|
|
|
or ebx, 0x23
|
|
|
|
mov cr0, ebx
|
|
|
|
jmp dword 0x0:0x0
|
2016-10-23 17:19:52 +02:00
|
|
|
_GdtDesc:
|
2016-06-14 10:28:14 +02:00
|
|
|
DW 0
|
|
|
|
DD 0
|
|
|
|
|
|
|
|
BITS 32
|
|
|
|
@32bit:
|
|
|
|
mov ax, PROTECT_MODE_DS
|
|
|
|
o16 mov ds, ax
|
|
|
|
o16 mov es, ax
|
|
|
|
o16 mov fs, ax
|
|
|
|
o16 mov gs, ax
|
|
|
|
o16 mov ss, ax
|
2018-02-01 23:23:59 +01:00
|
|
|
mov esp, strict dword 0 ; source operand will be patched
|
|
|
|
ASM_PFX(gPatchSmiStack):
|
2016-06-14 10:28:14 +02:00
|
|
|
mov eax, ASM_PFX(gSmiHandlerIdtr)
|
|
|
|
lidt [eax]
|
|
|
|
jmp ProtFlatMode
|
|
|
|
|
|
|
|
ProtFlatMode:
|
2018-02-01 23:40:29 +01:00
|
|
|
mov eax, strict dword 0 ; source operand will be patched
|
|
|
|
ASM_PFX(gPatchSmiCr3):
|
2016-06-14 10:28:14 +02:00
|
|
|
mov cr3, eax
|
|
|
|
;
|
|
|
|
; Need to test for CR4 specific bit support
|
|
|
|
;
|
|
|
|
mov eax, 1
|
|
|
|
cpuid ; use CPUID to determine if specific CR4 bits are supported
|
|
|
|
xor eax, eax ; Clear EAX
|
|
|
|
test edx, BIT2 ; Check for DE capabilities
|
|
|
|
jz .0
|
|
|
|
or eax, BIT3
|
|
|
|
.0:
|
|
|
|
test edx, BIT6 ; Check for PAE capabilities
|
|
|
|
jz .1
|
|
|
|
or eax, BIT5
|
|
|
|
.1:
|
|
|
|
test edx, BIT7 ; Check for MCE capabilities
|
|
|
|
jz .2
|
|
|
|
or eax, BIT6
|
|
|
|
.2:
|
|
|
|
test edx, BIT24 ; Check for FXSR capabilities
|
|
|
|
jz .3
|
|
|
|
or eax, BIT9
|
|
|
|
.3:
|
|
|
|
test edx, BIT25 ; Check for SSE capabilities
|
|
|
|
jz .4
|
|
|
|
or eax, BIT10
|
|
|
|
.4: ; as cr4.PGE is not set here, refresh cr3
|
|
|
|
mov cr4, eax ; in PreModifyMtrrs() to flush TLB.
|
2016-10-23 17:19:52 +02:00
|
|
|
|
|
|
|
cmp byte [dword ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))], 0
|
|
|
|
jz .6
|
|
|
|
; Load TSS
|
|
|
|
mov byte [ebp + TSS_SEGMENT + 5], 0x89 ; clear busy flag
|
|
|
|
mov eax, TSS_SEGMENT
|
|
|
|
ltr ax
|
|
|
|
.6:
|
|
|
|
|
|
|
|
; enable NXE if supported
|
UefiCpuPkg/PiSmmCpuDxeSmm: patch "XdSupported" with PatchInstructionX86()
"mXdSupported" is a global BOOLEAN variable, initialized to TRUE. The
CheckFeatureSupported() function is executed on all processors (not
concurrently though), called from SmmInitHandler(). If XD support is found
to be missing on any CPU, then "mXdSupported" is set to FALSE, and further
processors omit the check. Afterwards, "mXdSupported" is read by several
assembly and C code locations.
The tricky part is *where* "mXdSupported" is allocated (defined):
- Before commit 717fb60443fb ("UefiCpuPkg/PiSmmCpuDxeSmm: Add paging
protection.", 2016-11-17), it used to be a normal global variable,
defined (allocated) in "SmmProfile.c".
- With said commit, we moved the definition (allocation) of "mXdSupported"
into "SmiEntry.nasm". The variable was defined over the last byte of a
"mov al, 1" instruction, so that setting it to FALSE in
CheckFeatureSupported() would patch the instruction to "mov al, 0". The
subsequent conditional jump would change behavior, plus all further read
references to "mXdSupported" (in C and assembly code) would read back
the source (imm8) operand of the patched MOV instruction as data.
This trick required that the MOV instruction be encoded with DB.
In order to get rid of the DB, we have to split both roles: we need a
label for the code patching, and "mXdSupported" has to be defined
(allocated) independently of the code patching. Of course, their values
must always remain in sync.
(1) Reinstate the "mXdSupported" definition and initialization in
"SmmProfile.c" from before commit 717fb60443fb. Change the assembly
language definition ("global") to a declaration ("extern").
(2) Define the "gPatchXdSupported" label (type X86_ASSEMBLY_PATCH_LABEL)
in "SmiEntry.nasm", and add the C-language declaration to
"SmmProfileInternal.h". Replace the DB with the MOV mnemonic (keeping
the imm8 source operand with value 1).
(3) In CheckFeatureSupported(), whenever "mXdSupported" is set to FALSE,
patch the assembly code in sync, with PatchInstructionX86().
Cc: Eric Dong <eric.dong@intel.com>
Cc: Michael D Kinney <michael.d.kinney@intel.com>
Ref: https://bugzilla.tianocore.org/show_bug.cgi?id=866
Contributed-under: TianoCore Contribution Agreement 1.1
Signed-off-by: Laszlo Ersek <lersek@redhat.com>
Reviewed-by: Liming Gao <liming.gao@intel.com>
2018-02-02 00:17:13 +01:00
|
|
|
mov al, strict byte 1 ; source operand may be patched
|
|
|
|
ASM_PFX(gPatchXdSupported):
|
2016-10-23 17:19:52 +02:00
|
|
|
cmp al, 0
|
|
|
|
jz @SkipXd
|
|
|
|
;
|
|
|
|
; Check XD disable bit
|
|
|
|
;
|
|
|
|
mov ecx, MSR_IA32_MISC_ENABLE
|
|
|
|
rdmsr
|
|
|
|
push edx ; save MSR_IA32_MISC_ENABLE[63-32]
|
|
|
|
test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]
|
|
|
|
jz .5
|
|
|
|
and dx, 0xFFFB ; clear XD Disable bit if it is set
|
|
|
|
wrmsr
|
|
|
|
.5:
|
|
|
|
mov ecx, MSR_EFER
|
|
|
|
rdmsr
|
|
|
|
or ax, MSR_EFER_XD ; enable NXE
|
|
|
|
wrmsr
|
|
|
|
jmp @XdDone
|
|
|
|
@SkipXd:
|
|
|
|
sub esp, 4
|
|
|
|
@XdDone:
|
|
|
|
|
2016-06-14 10:28:14 +02:00
|
|
|
mov ebx, cr0
|
2016-10-23 17:19:52 +02:00
|
|
|
or ebx, 0x80010023 ; enable paging + WP + NE + MP + PE
|
2016-06-14 10:28:14 +02:00
|
|
|
mov cr0, ebx
|
|
|
|
lea ebx, [edi + DSC_OFFSET]
|
|
|
|
mov ax, [ebx + DSC_DS]
|
|
|
|
mov ds, eax
|
|
|
|
mov ax, [ebx + DSC_OTHERSEG]
|
|
|
|
mov es, eax
|
|
|
|
mov fs, eax
|
|
|
|
mov gs, eax
|
|
|
|
mov ax, [ebx + DSC_SS]
|
|
|
|
mov ss, eax
|
|
|
|
|
|
|
|
; jmp _SmiHandler ; instruction is not needed
|
|
|
|
|
|
|
|
global ASM_PFX(SmiHandler)
|
|
|
|
ASM_PFX(SmiHandler):
|
2016-10-23 17:19:52 +02:00
|
|
|
mov ebx, [esp + 4] ; CPU Index
|
2016-06-14 10:28:14 +02:00
|
|
|
push ebx
|
|
|
|
mov eax, ASM_PFX(CpuSmmDebugEntry)
|
|
|
|
call eax
|
2016-10-23 17:19:52 +02:00
|
|
|
add esp, 4
|
2016-06-14 10:28:14 +02:00
|
|
|
|
|
|
|
push ebx
|
|
|
|
mov eax, ASM_PFX(SmiRendezvous)
|
|
|
|
call eax
|
2016-10-23 17:19:52 +02:00
|
|
|
add esp, 4
|
|
|
|
|
2016-06-14 10:28:14 +02:00
|
|
|
push ebx
|
|
|
|
mov eax, ASM_PFX(CpuSmmDebugExit)
|
|
|
|
call eax
|
2016-10-23 17:19:52 +02:00
|
|
|
add esp, 4
|
|
|
|
|
|
|
|
mov eax, ASM_PFX(mXdSupported)
|
|
|
|
mov al, [eax]
|
|
|
|
cmp al, 0
|
|
|
|
jz .7
|
|
|
|
pop edx ; get saved MSR_IA32_MISC_ENABLE[63-32]
|
|
|
|
test edx, BIT2
|
|
|
|
jz .7
|
|
|
|
mov ecx, MSR_IA32_MISC_ENABLE
|
|
|
|
rdmsr
|
|
|
|
or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM
|
|
|
|
wrmsr
|
|
|
|
|
|
|
|
.7:
|
2018-08-16 03:32:10 +02:00
|
|
|
StuffRsb32
|
2016-06-14 10:28:14 +02:00
|
|
|
rsm
|
|
|
|
|
|
|
|
ASM_PFX(gcSmiHandlerSize): DW $ - _SmiEntryPoint
|
|
|
|
|
2018-01-11 10:05:15 +01:00
|
|
|
global ASM_PFX(PiSmmCpuSmiEntryFixupAddress)
|
|
|
|
ASM_PFX(PiSmmCpuSmiEntryFixupAddress):
|
|
|
|
ret
|