diff --git a/UefiCpuPkg/Library/SmmCpuFeaturesLib/Ia32/SmiEntry.S b/UefiCpuPkg/Library/SmmCpuFeaturesLib/Ia32/SmiEntry.S
new file mode 100644
index 0000000000..4c0f8c8933
--- /dev/null
+++ b/UefiCpuPkg/Library/SmmCpuFeaturesLib/Ia32/SmiEntry.S
@@ -0,0 +1,278 @@
+#------------------------------------------------------------------------------
+#
+# Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php.
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+# Module Name:
+#
+# SmiEntry.S
+#
+# Abstract:
+#
+# Code template of the SMI handler for a particular processor
+#
+#------------------------------------------------------------------------------
+
+ASM_GLOBAL ASM_PFX(gcStmSmiHandlerTemplate)
+ASM_GLOBAL ASM_PFX(gcStmSmiHandlerSize)
+ASM_GLOBAL ASM_PFX(gcStmSmiHandlerOffset)
+ASM_GLOBAL ASM_PFX(gStmSmiCr3)
+ASM_GLOBAL ASM_PFX(gStmSmiStack)
+ASM_GLOBAL ASM_PFX(gStmSmbase)
+ASM_GLOBAL ASM_PFX(gStmXdSupported)
+ASM_GLOBAL ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))
+ASM_GLOBAL ASM_PFX(gStmSmiHandlerIdtr)
+
+.equ MSR_IA32_MISC_ENABLE, 0x1A0
+.equ MSR_EFER, 0xc0000080
+.equ MSR_EFER_XD, 0x800
+
+#
+# Constants relating to TXT_PROCESSOR_SMM_DESCRIPTOR
+#
+.equ DSC_OFFSET, 0xfb00
+.equ DSC_GDTPTR, 0x48
+.equ DSC_GDTSIZ, 0x50
+.equ DSC_CS, 0x14
+.equ DSC_DS, 0x16
+.equ DSC_SS, 0x18
+.equ DSC_OTHERSEG, 0x1A
+
+.equ PROTECT_MODE_CS, 0x08
+.equ PROTECT_MODE_DS, 0x20
+.equ TSS_SEGMENT, 0x40
+
+ .text
+ASM_PFX(gcStmSmiHandlerTemplate):
+
+_StmSmiEntryPoint:
+ .byte 0xbb # mov bx, imm16
+ .word _StmGdtDesc - _StmSmiEntryPoint + 0x8000
+ .byte 0x2e,0xa1 # mov ax, cs:[offset16]
+ .word DSC_OFFSET + DSC_GDTSIZ
+ decl %eax
+ movl %eax, %cs:(%edi) # mov cs:[bx], ax
+ .byte 0x66,0x2e,0xa1 # mov eax, cs:[offset16]
+ .word DSC_OFFSET + DSC_GDTPTR
+ movw %ax, %cs:2(%edi)
+ movw %ax, %bp # ebp = GDT base
+ .byte 0x66
+ lgdt %cs:(%edi)
+# Patch ProtectedMode Segment
+ .byte 0xb8 # mov ax, imm16
+ .word PROTECT_MODE_CS # set AX for segment directly
+ movl %eax, %cs:-2(%edi) # mov cs:[bx - 2], ax
+# Patch ProtectedMode entry
+ .byte 0x66, 0xbf # mov edi, SMBASE
+ASM_PFX(gStmSmbase): .space 4
+ .byte 0x67
+ lea ((Start32bit - _StmSmiEntryPoint) + 0x8000)(%edi), %ax
+ movw %ax, %cs:-6(%edi)
+ movl %cr0, %ebx
+ .byte 0x66
+ andl $0x9ffafff3, %ebx
+ .byte 0x66
+ orl $0x23, %ebx
+ movl %ebx, %cr0
+ .byte 0x66,0xea
+ .space 4
+ .space 2
+_StmGdtDesc: .space 4
+ .space 2
+
+Start32bit:
+ movw $PROTECT_MODE_DS, %ax
+ movl %eax,%ds
+ movl %eax,%es
+ movl %eax,%fs
+ movl %eax,%gs
+ movl %eax,%ss
+ .byte 0xbc # mov esp, imm32
+ASM_PFX(gStmSmiStack): .space 4
+ movl $ASM_PFX(gStmSmiHandlerIdtr), %eax
+ lidt (%eax)
+ jmp ProtFlatMode
+
+ProtFlatMode:
+ .byte 0xb8 # mov eax, imm32
+ASM_PFX(gStmSmiCr3): .space 4
+ movl %eax, %cr3
+#
+# Need to test for CR4 specific bit support
+#
+ movl $1, %eax
+ cpuid # use CPUID to determine if specific CR4 bits are supported
+ xorl %eax, %eax # Clear EAX
+ testl $BIT2, %edx # Check for DE capabilities
+ jz L8
+ orl $BIT3, %eax
+L8:
+ testl $BIT6, %edx # Check for PAE capabilities
+ jz L9
+ orl $BIT5, %eax
+L9:
+ testl $BIT7, %edx # Check for MCE capabilities
+ jz L10
+ orl $BIT6, %eax
+L10:
+ testl $BIT24, %edx # Check for FXSR capabilities
+ jz L11
+ orl $BIT9, %eax
+L11:
+ testl $BIT25, %edx # Check for SSE capabilities
+ jz L12
+ orl $BIT10, %eax
+L12: # as cr4.PGE is not set here, refresh cr3
+ movl %eax, %cr4 # in PreModifyMtrrs() to flush TLB.
+
+ cmpb $0, ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))
+ jz L5
+# Load TSS
+ movb $0x89, (TSS_SEGMENT + 5)(%ebp) # clear busy flag
+ movl $TSS_SEGMENT, %eax
+ ltrw %ax
+L5:
+
+# enable NXE if supported
+ .byte 0xb0 # mov al, imm8
+ASM_PFX(gStmXdSupported): .byte 1
+ cmpb $0, %al
+ jz SkipXd
+#
+# Check XD disable bit
+#
+ movl $MSR_IA32_MISC_ENABLE, %ecx
+ rdmsr
+ pushl %edx # save MSR_IA32_MISC_ENABLE[63-32]
+ testl $BIT2, %edx # MSR_IA32_MISC_ENABLE[34]
+ jz L13
+ andw $0x0FFFB, %dx # clear XD Disable bit if it is set
+ wrmsr
+L13:
+ movl $MSR_EFER, %ecx
+ rdmsr
+ orw $MSR_EFER_XD,%ax # enable NXE
+ wrmsr
+ jmp XdDone
+SkipXd:
+ subl $4, %esp
+XdDone:
+
+ movl %cr0, %ebx
+ orl $0x080010023, %ebx # enable paging + WP + NE + MP + PE
+ movl %ebx, %cr0
+ leal DSC_OFFSET(%edi),%ebx
+ movw DSC_DS(%ebx),%ax
+ movl %eax, %ds
+ movw DSC_OTHERSEG(%ebx),%ax
+ movl %eax, %es
+ movl %eax, %fs
+ movl %eax, %gs
+ movw DSC_SS(%ebx),%ax
+ movl %eax, %ss
+
+CommonHandler:
+ movl 4(%esp), %ebx
+
+ pushl %ebx
+ movl $ASM_PFX(CpuSmmDebugEntry), %eax
+ call *%eax
+ addl $4, %esp
+
+ pushl %ebx
+ movl $ASM_PFX(SmiRendezvous), %eax
+ call *%eax
+ addl $4, %esp
+
+ pushl %ebx
+ movl $ASM_PFX(CpuSmmDebugExit), %eax
+ call *%eax
+ addl $4, %esp
+
+ movl $ASM_PFX(gStmXdSupported), %eax
+ movb (%eax), %al
+ cmpb $0, %al
+ jz L16
+ popl %edx # get saved MSR_IA32_MISC_ENABLE[63-32]
+ testl $BIT2, %edx
+ jz L16
+ movl $MSR_IA32_MISC_ENABLE, %ecx
+ rdmsr
+ orw $BIT2, %dx # set XD Disable bit if it was set before entering into SMM
+ wrmsr
+
+L16:
+ rsm
+
+_StmSmiHandler:
+#
+# Check XD disable bit
+#
+ xorl %esi, %esi
+ movl $ASM_PFX(gStmXdSupported), %eax
+ movb (%eax), %al
+ cmpb $0, %al
+ jz StmXdDone
+ movl $MSR_IA32_MISC_ENABLE, %ecx
+ rdmsr
+ movl %edx, %esi # save MSR_IA32_MISC_ENABLE[63-32]
+ testl $BIT2, %edx # MSR_IA32_MISC_ENABLE[34]
+ jz L14
+ andw $0x0FFFB, %dx # clear XD Disable bit if it is set
+ wrmsr
+L14:
+ movl $MSR_EFER, %ecx
+ rdmsr
+ orw $MSR_EFER_XD,%ax # enable NXE
+ wrmsr
+StmXdDone:
+ push %esi
+
+ # below step is needed, because STM does not run above code.
+ # we have to run below code to set IDT/CR0/CR4
+ movl $ASM_PFX(gStmSmiHandlerIdtr), %eax
+ lidt (%eax)
+
+ movl %cr0, %eax
+ orl $0x80010023, %eax # enable paging + WP + NE + MP + PE
+ movl %eax, %cr0
+#
+# Need to test for CR4 specific bit support
+#
+ movl $1, %eax
+ cpuid # use CPUID to determine if specific CR4 bits are supported
+ movl %cr4, %eax # init EAX
+ testl $BIT2, %edx # Check for DE capabilities
+ jz L28
+ orl $BIT3, %eax
+L28:
+ testl $BIT6, %edx # Check for PAE capabilities
+ jz L29
+ orl $BIT5, %eax
+L29:
+ testl $BIT7, %edx # Check for MCE capabilities
+ jz L30
+ orl $BIT6, %eax
+L30:
+ testl $BIT24, %edx # Check for FXSR capabilities
+ jz L31
+ orl $BIT9, %eax
+L31:
+ testl $BIT25, %edx # Check for SSE capabilities
+ jz L32
+ orl $BIT10, %eax
+L32: # as cr4.PGE is not set here, refresh cr3
+ movl %eax, %cr4 # in PreModifyMtrrs() to flush TLB.
+ # STM init finish
+ jmp CommonHandler
+
+
+ASM_PFX(gcStmSmiHandlerSize) : .word . - _StmSmiEntryPoint
+ASM_PFX(gcStmSmiHandlerOffset): .word _StmSmiHandler - _StmSmiEntryPoint
+
diff --git a/UefiCpuPkg/Library/SmmCpuFeaturesLib/Ia32/SmiEntry.asm b/UefiCpuPkg/Library/SmmCpuFeaturesLib/Ia32/SmiEntry.asm
new file mode 100644
index 0000000000..94888d5aec
--- /dev/null
+++ b/UefiCpuPkg/Library/SmmCpuFeaturesLib/Ia32/SmiEntry.asm
@@ -0,0 +1,285 @@
+;------------------------------------------------------------------------------ ;
+; Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.
+; This program and the accompanying materials
+; are licensed and made available under the terms and conditions of the BSD License
+; which accompanies this distribution. The full text of the license may be found at
+; http://opensource.org/licenses/bsd-license.php.
+;
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+;
+; Module Name:
+;
+; SmiEntry.asm
+;
+; Abstract:
+;
+; Code template of the SMI handler for a particular processor
+;
+;-------------------------------------------------------------------------------
+
+ .686p
+ .model flat,C
+ .xmm
+
+MSR_IA32_MISC_ENABLE EQU 1A0h
+MSR_EFER EQU 0c0000080h
+MSR_EFER_XD EQU 0800h
+
+;
+; Constants relating to TXT_PROCESSOR_SMM_DESCRIPTOR
+;
+DSC_OFFSET EQU 0fb00h
+DSC_GDTPTR EQU 48h
+DSC_GDTSIZ EQU 50h
+DSC_CS EQU 14h
+DSC_DS EQU 16h
+DSC_SS EQU 18h
+DSC_OTHERSEG EQU 1Ah
+
+PROTECT_MODE_CS EQU 08h
+PROTECT_MODE_DS EQU 20h
+TSS_SEGMENT EQU 40h
+
+SmiRendezvous PROTO C
+CpuSmmDebugEntry PROTO C
+CpuSmmDebugExit PROTO C
+
+EXTERNDEF gcStmSmiHandlerTemplate:BYTE
+EXTERNDEF gcStmSmiHandlerSize:WORD
+EXTERNDEF gcStmSmiHandlerOffset:WORD
+EXTERNDEF gStmSmiCr3:DWORD
+EXTERNDEF gStmSmiStack:DWORD
+EXTERNDEF gStmSmbase:DWORD
+EXTERNDEF gStmXdSupported:BYTE
+EXTERNDEF FeaturePcdGet (PcdCpuSmmStackGuard):BYTE
+EXTERNDEF gStmSmiHandlerIdtr:FWORD
+
+ .code
+
+gcStmSmiHandlerTemplate LABEL BYTE
+
+_StmSmiEntryPoint:
+ DB 0bbh ; mov bx, imm16
+ DW offset _StmGdtDesc - _StmSmiEntryPoint + 8000h
+ DB 2eh, 0a1h ; mov ax, cs:[offset16]
+ DW DSC_OFFSET + DSC_GDTSIZ
+ dec eax
+ mov cs:[edi], eax ; mov cs:[bx], ax
+ DB 66h, 2eh, 0a1h ; mov eax, cs:[offset16]
+ DW DSC_OFFSET + DSC_GDTPTR
+ mov cs:[edi + 2], ax ; mov cs:[bx + 2], eax
+ mov bp, ax ; ebp = GDT base
+ DB 66h
+ lgdt fword ptr cs:[edi] ; lgdt fword ptr cs:[bx]
+; Patch ProtectedMode Segment
+ DB 0b8h ; mov ax, imm16
+ DW PROTECT_MODE_CS ; set AX for segment directly
+ mov cs:[edi - 2], eax ; mov cs:[bx - 2], ax
+; Patch ProtectedMode entry
+ DB 66h, 0bfh ; mov edi, SMBASE
+gStmSmbase DD ?
+ DB 67h
+ lea ax, [edi + (@32bit - _StmSmiEntryPoint) + 8000h]
+ mov cs:[edi - 6], ax ; mov cs:[bx - 6], eax
+ mov ebx, cr0
+ DB 66h
+ and ebx, 9ffafff3h
+ DB 66h
+ or ebx, 23h
+ mov cr0, ebx
+ DB 66h, 0eah
+ DD ?
+ DW ?
+_StmGdtDesc FWORD ?
+
+@32bit:
+ mov ax, PROTECT_MODE_DS
+ mov ds, ax
+ mov es, ax
+ mov fs, ax
+ mov gs, ax
+ mov ss, ax
+ DB 0bch ; mov esp, imm32
+gStmSmiStack DD ?
+ mov eax, offset gStmSmiHandlerIdtr
+ lidt fword ptr [eax]
+ jmp ProtFlatMode
+
+ProtFlatMode:
+ DB 0b8h ; mov eax, imm32
+gStmSmiCr3 DD ?
+ mov cr3, eax
+;
+; Need to test for CR4 specific bit support
+;
+ mov eax, 1
+ cpuid ; use CPUID to determine if specific CR4 bits are supported
+ xor eax, eax ; Clear EAX
+ test edx, BIT2 ; Check for DE capabilities
+ jz @f
+ or eax, BIT3
+@@:
+ test edx, BIT6 ; Check for PAE capabilities
+ jz @f
+ or eax, BIT5
+@@:
+ test edx, BIT7 ; Check for MCE capabilities
+ jz @f
+ or eax, BIT6
+@@:
+ test edx, BIT24 ; Check for FXSR capabilities
+ jz @f
+ or eax, BIT9
+@@:
+ test edx, BIT25 ; Check for SSE capabilities
+ jz @f
+ or eax, BIT10
+@@: ; as cr4.PGE is not set here, refresh cr3
+ mov cr4, eax ; in PreModifyMtrrs() to flush TLB.
+
+ cmp FeaturePcdGet (PcdCpuSmmStackGuard), 0
+ jz @F
+; Load TSS
+ mov byte ptr [ebp + TSS_SEGMENT + 5], 89h ; clear busy flag
+ mov eax, TSS_SEGMENT
+ ltr ax
+@@:
+
+; enable NXE if supported
+ DB 0b0h ; mov al, imm8
+gStmXdSupported DB 1
+ cmp al, 0
+ jz @SkipXd
+;
+; Check XD disable bit
+;
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ push edx ; save MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]
+ jz @f
+ and dx, 0FFFBh ; clear XD Disable bit if it is set
+ wrmsr
+@@:
+ mov ecx, MSR_EFER
+ rdmsr
+ or ax, MSR_EFER_XD ; enable NXE
+ wrmsr
+ jmp @XdDone
+@SkipXd:
+ sub esp, 4
+@XdDone:
+
+ mov ebx, cr0
+ or ebx, 080010023h ; enable paging + WP + NE + MP + PE
+ mov cr0, ebx
+ lea ebx, [edi + DSC_OFFSET]
+ mov ax, [ebx + DSC_DS]
+ mov ds, eax
+ mov ax, [ebx + DSC_OTHERSEG]
+ mov es, eax
+ mov fs, eax
+ mov gs, eax
+ mov ax, [ebx + DSC_SS]
+ mov ss, eax
+
+CommonHandler:
+ mov ebx, [esp + 4] ; CPU Index
+ push ebx
+ mov eax, CpuSmmDebugEntry
+ call eax
+ add esp, 4
+
+ push ebx
+ mov eax, SmiRendezvous
+ call eax
+ add esp, 4
+
+ push ebx
+ mov eax, CpuSmmDebugExit
+ call eax
+ add esp, 4
+
+ mov eax, gStmXdSupported
+ mov al, [eax]
+ cmp al, 0
+ jz @f
+ pop edx ; get saved MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2
+ jz @f
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM
+ wrmsr
+
+@@:
+ rsm
+
+_StmSmiHandler:
+;
+; Check XD disable bit
+;
+ xor esi, esi
+ mov eax, gStmXdSupported
+ mov al, [eax]
+ cmp al, 0
+ jz @StmXdDone
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ mov esi, edx ; save MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]
+ jz @f
+ and dx, 0FFFBh ; clear XD Disable bit if it is set
+ wrmsr
+@@:
+ mov ecx, MSR_EFER
+ rdmsr
+ or ax, MSR_EFER_XD ; enable NXE
+ wrmsr
+@StmXdDone:
+ push esi
+
+ ; below step is needed, because STM does not run above code.
+ ; we have to run below code to set IDT/CR0/CR4
+ mov eax, offset gStmSmiHandlerIdtr
+ lidt fword ptr [eax]
+
+
+ mov eax, cr0
+ or eax, 80010023h ; enable paging + WP + NE + MP + PE
+ mov cr0, eax
+;
+; Need to test for CR4 specific bit support
+;
+ mov eax, 1
+ cpuid ; use CPUID to determine if specific CR4 bits are supported
+ mov eax, cr4 ; init EAX
+ test edx, BIT2 ; Check for DE capabilities
+ jz @f
+ or eax, BIT3
+@@:
+ test edx, BIT6 ; Check for PAE capabilities
+ jz @f
+ or eax, BIT5
+@@:
+ test edx, BIT7 ; Check for MCE capabilities
+ jz @f
+ or eax, BIT6
+@@:
+ test edx, BIT24 ; Check for FXSR capabilities
+ jz @f
+ or eax, BIT9
+@@:
+ test edx, BIT25 ; Check for SSE capabilities
+ jz @f
+ or eax, BIT10
+@@: ; as cr4.PGE is not set here, refresh cr3
+ mov cr4, eax ; in PreModifyMtrrs() to flush TLB.
+ ; STM init finish
+ jmp CommonHandler
+
+gcStmSmiHandlerSize DW $ - _StmSmiEntryPoint
+gcStmSmiHandlerOffset DW _StmSmiHandler - _StmSmiEntryPoint
+
+ END
diff --git a/UefiCpuPkg/Library/SmmCpuFeaturesLib/Ia32/SmiEntry.nasm b/UefiCpuPkg/Library/SmmCpuFeaturesLib/Ia32/SmiEntry.nasm
new file mode 100644
index 0000000000..b1c84a494f
--- /dev/null
+++ b/UefiCpuPkg/Library/SmmCpuFeaturesLib/Ia32/SmiEntry.nasm
@@ -0,0 +1,271 @@
+;------------------------------------------------------------------------------ ;
+; Copyright (c) 2016, Intel Corporation. All rights reserved.
+; This program and the accompanying materials
+; are licensed and made available under the terms and conditions of the BSD License
+; which accompanies this distribution. The full text of the license may be found at
+; http://opensource.org/licenses/bsd-license.php.
+;
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+;
+; Module Name:
+;
+; SmiEntry.nasm
+;
+; Abstract:
+;
+; Code template of the SMI handler for a particular processor
+;
+;-------------------------------------------------------------------------------
+
+%define MSR_IA32_MISC_ENABLE 0x1A0
+%define MSR_EFER 0xc0000080
+%define MSR_EFER_XD 0x800
+
+;
+; Constants relating to TXT_PROCESSOR_SMM_DESCRIPTOR
+;
+%define DSC_OFFSET 0xfb00
+%define DSC_GDTPTR 0x48
+%define DSC_GDTSIZ 0x50
+%define DSC_CS 0x14
+%define DSC_DS 0x16
+%define DSC_SS 0x18
+%define DSC_OTHERSEG 0x1a
+
+%define PROTECT_MODE_CS 0x8
+%define PROTECT_MODE_DS 0x20
+%define TSS_SEGMENT 0x40
+
+extern ASM_PFX(SmiRendezvous)
+extern ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))
+extern ASM_PFX(CpuSmmDebugEntry)
+extern ASM_PFX(CpuSmmDebugExit)
+
+global ASM_PFX(gcStmSmiHandlerTemplate)
+global ASM_PFX(gcStmSmiHandlerSize)
+global ASM_PFX(gcStmSmiHandlerOffset)
+global ASM_PFX(gStmSmiCr3)
+global ASM_PFX(gStmSmiStack)
+global ASM_PFX(gStmSmbase)
+global ASM_PFX(gStmXdSupported)
+extern ASM_PFX(gStmSmiHandlerIdtr)
+
+ SECTION .text
+
+BITS 16
+ASM_PFX(gcStmSmiHandlerTemplate):
+_StmSmiEntryPoint:
+ mov bx, _StmGdtDesc - _StmSmiEntryPoint + 0x8000
+ mov ax,[cs:DSC_OFFSET + DSC_GDTSIZ]
+ dec ax
+ mov [cs:bx], ax
+ mov eax, [cs:DSC_OFFSET + DSC_GDTPTR]
+ mov [cs:bx + 2], eax
+ mov ebp, eax ; ebp = GDT base
+o32 lgdt [cs:bx] ; lgdt fword ptr cs:[bx]
+ mov ax, PROTECT_MODE_CS
+ mov [cs:bx-0x2],ax
+ DB 0x66, 0xbf ; mov edi, SMBASE
+ASM_PFX(gStmSmbase): DD 0
+ lea eax, [edi + (@32bit - _StmSmiEntryPoint) + 0x8000]
+ mov [cs:bx-0x6],eax
+ mov ebx, cr0
+ and ebx, 0x9ffafff3
+ or ebx, 0x23
+ mov cr0, ebx
+ jmp dword 0x0:0x0
+_StmGdtDesc:
+ DW 0
+ DD 0
+
+BITS 32
+@32bit:
+ mov ax, PROTECT_MODE_DS
+o16 mov ds, ax
+o16 mov es, ax
+o16 mov fs, ax
+o16 mov gs, ax
+o16 mov ss, ax
+ DB 0xbc ; mov esp, imm32
+ASM_PFX(gStmSmiStack): DD 0
+ mov eax, ASM_PFX(gStmSmiHandlerIdtr)
+ lidt [eax]
+ jmp ProtFlatMode
+
+ProtFlatMode:
+ DB 0xb8 ; mov eax, imm32
+ASM_PFX(gStmSmiCr3): DD 0
+ mov cr3, eax
+;
+; Need to test for CR4 specific bit support
+;
+ mov eax, 1
+ cpuid ; use CPUID to determine if specific CR4 bits are supported
+ xor eax, eax ; Clear EAX
+ test edx, BIT2 ; Check for DE capabilities
+ jz .0
+ or eax, BIT3
+.0:
+ test edx, BIT6 ; Check for PAE capabilities
+ jz .1
+ or eax, BIT5
+.1:
+ test edx, BIT7 ; Check for MCE capabilities
+ jz .2
+ or eax, BIT6
+.2:
+ test edx, BIT24 ; Check for FXSR capabilities
+ jz .3
+ or eax, BIT9
+.3:
+ test edx, BIT25 ; Check for SSE capabilities
+ jz .4
+ or eax, BIT10
+.4: ; as cr4.PGE is not set here, refresh cr3
+ mov cr4, eax ; in PreModifyMtrrs() to flush TLB.
+
+ cmp byte [dword ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))], 0
+ jz .6
+; Load TSS
+ mov byte [ebp + TSS_SEGMENT + 5], 0x89 ; clear busy flag
+ mov eax, TSS_SEGMENT
+ ltr ax
+.6:
+
+; enable NXE if supported
+ DB 0b0h ; mov al, imm8
+ASM_PFX(gStmXdSupported): DB 1
+ cmp al, 0
+ jz @SkipXd
+;
+; Check XD disable bit
+;
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ push edx ; save MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]
+ jz .5
+ and dx, 0xFFFB ; clear XD Disable bit if it is set
+ wrmsr
+.5:
+ mov ecx, MSR_EFER
+ rdmsr
+ or ax, MSR_EFER_XD ; enable NXE
+ wrmsr
+ jmp @XdDone
+@SkipXd:
+ sub esp, 4
+@XdDone:
+
+ mov ebx, cr0
+ or ebx, 0x80010023 ; enable paging + WP + NE + MP + PE
+ mov cr0, ebx
+ lea ebx, [edi + DSC_OFFSET]
+ mov ax, [ebx + DSC_DS]
+ mov ds, eax
+ mov ax, [ebx + DSC_OTHERSEG]
+ mov es, eax
+ mov fs, eax
+ mov gs, eax
+ mov ax, [ebx + DSC_SS]
+ mov ss, eax
+
+CommonHandler:
+ mov ebx, [esp + 4] ; CPU Index
+ push ebx
+ mov eax, ASM_PFX(CpuSmmDebugEntry)
+ call eax
+ add esp, 4
+
+ push ebx
+ mov eax, ASM_PFX(SmiRendezvous)
+ call eax
+ add esp, 4
+
+ push ebx
+ mov eax, ASM_PFX(CpuSmmDebugExit)
+ call eax
+ add esp, 4
+
+ mov eax, ASM_PFX(gStmXdSupported)
+ mov al, [eax]
+ cmp al, 0
+ jz .7
+ pop edx ; get saved MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2
+ jz .7
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM
+ wrmsr
+
+.7:
+ rsm
+
+
+_StmSmiHandler:
+;
+; Check XD disable bit
+;
+ xor esi, esi
+ mov eax, ASM_PFX(gStmXdSupported)
+ mov al, [eax]
+ cmp al, 0
+ jz @StmXdDone
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ mov esi, edx ; save MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]
+ jz .5
+ and dx, 0xFFFB ; clear XD Disable bit if it is set
+ wrmsr
+.5:
+ mov ecx, MSR_EFER
+ rdmsr
+ or ax, MSR_EFER_XD ; enable NXE
+ wrmsr
+@StmXdDone:
+ push esi
+
+ ; below step is needed, because STM does not run above code.
+ ; we have to run below code to set IDT/CR0/CR4
+ mov eax, ASM_PFX(gStmSmiHandlerIdtr)
+ lidt [eax]
+
+ mov eax, cr0
+ or eax, 0x80010023 ; enable paging + WP + NE + MP + PE
+ mov cr0, eax
+;
+; Need to test for CR4 specific bit support
+;
+ mov eax, 1
+ cpuid ; use CPUID to determine if specific CR4 bits are supported
+ mov eax, cr4 ; init EAX
+ test edx, BIT2 ; Check for DE capabilities
+ jz .0
+ or eax, BIT3
+.0:
+ test edx, BIT6 ; Check for PAE capabilities
+ jz .1
+ or eax, BIT5
+.1:
+ test edx, BIT7 ; Check for MCE capabilities
+ jz .2
+ or eax, BIT6
+.2:
+ test edx, BIT24 ; Check for FXSR capabilities
+ jz .3
+ or eax, BIT9
+.3:
+ test edx, BIT25 ; Check for SSE capabilities
+ jz .4
+ or eax, BIT10
+.4: ; as cr4.PGE is not set here, refresh cr3
+ mov cr4, eax ; in PreModifyMtrrs() to flush TLB.
+ ; STM init finish
+ jmp CommonHandler
+
+ASM_PFX(gcStmSmiHandlerSize) : DW $ - _StmSmiEntryPoint
+ASM_PFX(gcStmSmiHandlerOffset) : DW _StmSmiHandler - _StmSmiEntryPoint
+
diff --git a/UefiCpuPkg/Library/SmmCpuFeaturesLib/Ia32/SmiException.S b/UefiCpuPkg/Library/SmmCpuFeaturesLib/Ia32/SmiException.S
new file mode 100644
index 0000000000..7d0057af80
--- /dev/null
+++ b/UefiCpuPkg/Library/SmmCpuFeaturesLib/Ia32/SmiException.S
@@ -0,0 +1,174 @@
+#------------------------------------------------------------------------------
+#
+# Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php.
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+# Module Name:
+#
+# SmiException.S
+#
+# Abstract:
+#
+# Exception handlers used in SM mode
+#
+#------------------------------------------------------------------------------
+
+ASM_GLOBAL ASM_PFX(gcStmPsd)
+
+ASM_GLOBAL ASM_PFX(SmmStmExceptionHandler)
+ASM_GLOBAL ASM_PFX(SmmStmSetup)
+ASM_GLOBAL ASM_PFX(SmmStmTeardown)
+
+.equ MSR_IA32_MISC_ENABLE, 0x1A0
+.equ MSR_EFER, 0xc0000080
+.equ MSR_EFER_XD, 0x800
+
+.equ CODE_SEL, 0x08
+.equ DATA_SEL, 0x20
+.equ TSS_SEL, 0x40
+
+ .data
+
+ASM_PFX(gcStmPsd):
+ .ascii "TXTPSSIG"
+ .word PSD_SIZE
+ .word 1 # Version
+ .long 0 # LocalApicId
+ .byte 0x5 # Cr4Pse;Cr4Pae;Intel64Mode;ExecutionDisableOutsideSmrr
+ .byte 0 # BIOS to STM
+ .byte 0 # STM to BIOS
+ .byte 0
+ .word CODE_SEL
+ .word DATA_SEL
+ .word DATA_SEL
+ .word DATA_SEL
+ .word TSS_SEL
+ .word 0
+ .quad 0 # SmmCr3
+ .long ASM_PFX(_OnStmSetup)
+ .long 0
+ .long ASM_PFX(_OnStmTeardown)
+ .long 0
+ .quad 0 # SmmSmiHandlerRip - SMM guest entrypoint
+ .quad 0 # SmmSmiHandlerRsp
+ .quad 0
+ .long 0
+ .long 0x80010100 # RequiredStmSmmRevId
+ .long ASM_PFX(_OnException)
+ .long 0
+ .quad 0 # ExceptionStack
+ .word DATA_SEL
+ .word 0x1F # ExceptionFilter
+ .long 0
+ .quad 0
+ .quad 0 # BiosHwResourceRequirementsPtr
+ .quad 0 # AcpiRsdp
+ .byte 0 # PhysicalAddressBits
+.equ PSD_SIZE, . - ASM_PFX(gcStmPsd)
+
+ .text
+
+#------------------------------------------------------------------------------
+# SMM Exception handlers
+#------------------------------------------------------------------------------
+ASM_GLOBAL ASM_PFX(_OnException)
+ASM_PFX(_OnException):
+ movl %esp, %ecx
+ pushl %ecx
+ call ASM_PFX(SmmStmExceptionHandler)
+ addl $4, %esp
+
+ movl %eax, %ebx
+ movl $4, %eax
+ .byte 0xf, 0x1, 0xc1 # VMCALL
+ jmp .
+
+ASM_GLOBAL ASM_PFX(_OnStmSetup)
+ASM_PFX(_OnStmSetup):
+#
+# Check XD disable bit
+#
+ xorl %esi, %esi
+ movl $ASM_PFX(gStmXdSupported), %eax
+ movb (%eax), %al
+ cmpb $0, %al
+ jz StmXdDone1
+ movl $MSR_IA32_MISC_ENABLE, %ecx
+ rdmsr
+ movl %edx, %esi # save MSR_IA32_MISC_ENABLE[63-32]
+ testl $BIT2, %edx # MSR_IA32_MISC_ENABLE[34]
+ jz L13
+ andw $0x0FFFB, %dx # clear XD Disable bit if it is set
+ wrmsr
+L13:
+ movl $MSR_EFER, %ecx
+ rdmsr
+ orw $MSR_EFER_XD,%ax # enable NXE
+ wrmsr
+StmXdDone1:
+ push %esi
+
+ call ASM_PFX(SmmStmSetup)
+
+ movl $ASM_PFX(gStmXdSupported), %eax
+ movb (%eax), %al
+ cmpb $0, %al
+ jz L14
+ popl %edx # get saved MSR_IA32_MISC_ENABLE[63-32]
+ testl $BIT2, %edx
+ jz L14
+ movl $MSR_IA32_MISC_ENABLE, %ecx
+ rdmsr
+ orw $BIT2, %dx # set XD Disable bit if it was set before entering into SMM
+ wrmsr
+L14:
+
+ rsm
+
+ASM_GLOBAL ASM_PFX(_OnStmTeardown)
+ASM_PFX(_OnStmTeardown):
+#
+# Check XD disable bit
+#
+ xorl %esi, %esi
+ movl $ASM_PFX(gStmXdSupported), %eax
+ movb (%eax), %al
+ cmpb $0, %al
+ jz StmXdDone2
+ movl $MSR_IA32_MISC_ENABLE, %ecx
+ rdmsr
+ movl %edx, %esi # save MSR_IA32_MISC_ENABLE[63-32]
+ testl $BIT2, %edx # MSR_IA32_MISC_ENABLE[34]
+ jz L15
+ andw $0x0FFFB, %dx # clear XD Disable bit if it is set
+ wrmsr
+L15:
+ movl $MSR_EFER, %ecx
+ rdmsr
+ orw $MSR_EFER_XD,%ax # enable NXE
+ wrmsr
+StmXdDone2:
+ push %esi
+
+ call ASM_PFX(SmmStmTeardown)
+
+ movl $ASM_PFX(gStmXdSupported), %eax
+ movb (%eax), %al
+ cmpb $0, %al
+ jz L16
+ popl %edx # get saved MSR_IA32_MISC_ENABLE[63-32]
+ testl $BIT2, %edx
+ jz L16
+ movl $MSR_IA32_MISC_ENABLE, %ecx
+ rdmsr
+ orw $BIT2, %dx # set XD Disable bit if it was set before entering into SMM
+ wrmsr
+L16:
+
+ rsm
diff --git a/UefiCpuPkg/Library/SmmCpuFeaturesLib/Ia32/SmiException.asm b/UefiCpuPkg/Library/SmmCpuFeaturesLib/Ia32/SmiException.asm
new file mode 100644
index 0000000000..7c04ad91f2
--- /dev/null
+++ b/UefiCpuPkg/Library/SmmCpuFeaturesLib/Ia32/SmiException.asm
@@ -0,0 +1,170 @@
+;------------------------------------------------------------------------------ ;
+; Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.
+; This program and the accompanying materials
+; are licensed and made available under the terms and conditions of the BSD License
+; which accompanies this distribution. The full text of the license may be found at
+; http://opensource.org/licenses/bsd-license.php.
+;
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+;
+; Module Name:
+;
+; SmiException.asm
+;
+; Abstract:
+;
+; Exception handlers used in SM mode
+;
+;-------------------------------------------------------------------------------
+
+ .686p
+ .model flat,C
+
+EXTERNDEF gcStmPsd:BYTE
+
+EXTERNDEF SmmStmExceptionHandler:PROC
+EXTERNDEF SmmStmSetup:PROC
+EXTERNDEF SmmStmTeardown:PROC
+
+CODE_SEL = 08h
+DATA_SEL = 20h
+TSS_SEL = 40h
+
+ .data
+
+gcStmPsd LABEL BYTE
+ DB 'TXTPSSIG'
+ DW PSD_SIZE
+ DW 1 ; Version
+ DD 0 ; LocalApicId
+ DB 05h ; Cr4Pse;Cr4Pae;Intel64Mode;ExecutionDisableOutsideSmrr
+ DB 0 ; BIOS to STM
+ DB 0 ; STM to BIOS
+ DB 0
+ DW CODE_SEL
+ DW DATA_SEL
+ DW DATA_SEL
+ DW DATA_SEL
+ DW TSS_SEL
+ DW 0
+ DQ 0 ; SmmCr3
+ DQ _OnStmSetup
+ DQ _OnStmTeardown
+ DQ 0 ; SmmSmiHandlerRip - SMM guest entrypoint
+ DQ 0 ; SmmSmiHandlerRsp
+ DQ 0
+ DD 0
+ DD 80010100h ; RequiredStmSmmRevId
+ DQ _OnException
+ DQ 0 ; ExceptionStack
+ DW DATA_SEL
+ DW 01Fh ; ExceptionFilter
+ DD 0
+ DQ 0
+ DQ 0 ; BiosHwResourceRequirementsPtr
+ DQ 0 ; AcpiRsdp
+ DB 0 ; PhysicalAddressBits
+PSD_SIZE = $ - offset gcStmPsd
+
+ .code
+;------------------------------------------------------------------------------
+; SMM Exception handlers
+;------------------------------------------------------------------------------
+_OnException PROC
+ mov ecx, esp
+ push ecx
+ call SmmStmExceptionHandler
+ add esp, 4
+
+ mov ebx, eax
+ mov eax, 4
+ DB 0fh, 01h, 0c1h ; VMCALL
+ jmp $
+_OnException ENDP
+
+_OnStmSetup PROC
+;
+; Check XD disable bit
+;
+ xor esi, esi
+ mov eax, gStmXdSupported
+ mov al, [eax]
+ cmp al, 0
+ jz @StmXdDone1
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ mov esi, edx ; save MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]
+ jz @f
+ and dx, 0FFFBh ; clear XD Disable bit if it is set
+ wrmsr
+@@:
+ mov ecx, MSR_EFER
+ rdmsr
+ or ax, MSR_EFER_XD ; enable NXE
+ wrmsr
+@StmXdDone1:
+ push esi
+
+ call SmmStmSetup
+
+ mov eax, gStmXdSupported
+ mov al, [eax]
+ cmp al, 0
+ jz @f
+ pop edx ; get saved MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2
+ jz @f
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM
+ wrmsr
+@@:
+
+ rsm
+_OnStmSetup ENDP
+
+_OnStmTeardown PROC
+;
+; Check XD disable bit
+;
+ xor esi, esi
+ mov eax, gStmXdSupported
+ mov al, [eax]
+ cmp al, 0
+ jz @StmXdDone2
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ mov esi, edx ; save MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]
+ jz @f
+ and dx, 0FFFBh ; clear XD Disable bit if it is set
+ wrmsr
+@@:
+ mov ecx, MSR_EFER
+ rdmsr
+ or ax, MSR_EFER_XD ; enable NXE
+ wrmsr
+@StmXdDone2:
+ push esi
+
+ call SmmStmTeardown
+
+ mov eax, gStmXdSupported
+ mov al, [eax]
+ cmp al, 0
+ jz @f
+ pop edx ; get saved MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2
+ jz @f
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM
+ wrmsr
+@@:
+
+ rsm
+_OnStmTeardown ENDP
+
+ END
diff --git a/UefiCpuPkg/Library/SmmCpuFeaturesLib/Ia32/SmiException.nasm b/UefiCpuPkg/Library/SmmCpuFeaturesLib/Ia32/SmiException.nasm
new file mode 100644
index 0000000000..0ce8501ba9
--- /dev/null
+++ b/UefiCpuPkg/Library/SmmCpuFeaturesLib/Ia32/SmiException.nasm
@@ -0,0 +1,176 @@
+;------------------------------------------------------------------------------ ;
+; Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.
+; This program and the accompanying materials
+; are licensed and made available under the terms and conditions of the BSD License
+; which accompanies this distribution. The full text of the license may be found at
+; http://opensource.org/licenses/bsd-license.php.
+;
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+;
+; Module Name:
+;
+; SmiException.nasm
+;
+; Abstract:
+;
+; Exception handlers used in SM mode
+;
+;-------------------------------------------------------------------------------
+
+global ASM_PFX(gcStmPsd)
+
+extern ASM_PFX(SmmStmExceptionHandler)
+extern ASM_PFX(SmmStmSetup)
+extern ASM_PFX(SmmStmTeardown)
+extern ASM_PFX(gStmXdSupported)
+extern ASM_PFX(gStmSmiHandlerIdtr)
+
+%define MSR_IA32_MISC_ENABLE 0x1A0
+%define MSR_EFER 0xc0000080
+%define MSR_EFER_XD 0x800
+
+CODE_SEL equ 0x08
+DATA_SEL equ 0x20
+TSS_SEL equ 0x40
+
+ SECTION .data
+
+ASM_PFX(gcStmPsd):
+ DB 'TXTPSSIG'
+ DW PSD_SIZE
+ DW 1 ; Version
+ DD 0 ; LocalApicId
+ DB 0x05 ; Cr4Pse;Cr4Pae;Intel64Mode;ExecutionDisableOutsideSmrr
+ DB 0 ; BIOS to STM
+ DB 0 ; STM to BIOS
+ DB 0
+ DW CODE_SEL
+ DW DATA_SEL
+ DW DATA_SEL
+ DW DATA_SEL
+ DW TSS_SEL
+ DW 0
+ DQ 0 ; SmmCr3
+ DD ASM_PFX(OnStmSetup)
+ DD 0
+ DD ASM_PFX(OnStmTeardown)
+ DD 0
+ DQ 0 ; SmmSmiHandlerRip - SMM guest entrypoint
+ DQ 0 ; SmmSmiHandlerRsp
+ DQ 0
+ DD 0
+ DD 0x80010100 ; RequiredStmSmmRevId
+ DD ASM_PFX(OnException)
+ DD 0
+ DQ 0 ; ExceptionStack
+ DW DATA_SEL
+ DW 0x01F ; ExceptionFilter
+ DD 0
+ DD 0
+ DD 0
+ DQ 0 ; BiosHwResourceRequirementsPtr
+ DQ 0 ; AcpiRsdp
+ DB 0 ; PhysicalAddressBits
+PSD_SIZE equ $ - ASM_PFX(gcStmPsd)
+
+ SECTION .text
+;------------------------------------------------------------------------------
+; SMM Exception handlers
+;------------------------------------------------------------------------------
+global ASM_PFX(OnException)
+ASM_PFX(OnException):
+ mov ecx, esp
+ push ecx
+ call ASM_PFX(SmmStmExceptionHandler)
+ add esp, 4
+
+ mov ebx, eax
+ mov eax, 4
+ DB 0x0f, 0x01, 0x0c1 ; VMCALL
+ jmp $
+
+global ASM_PFX(OnStmSetup)
+ASM_PFX(OnStmSetup):
+;
+; Check XD disable bit
+;
+ xor esi, esi
+ mov eax, ASM_PFX(gStmXdSupported)
+ mov al, [eax]
+ cmp al, 0
+ jz @StmXdDone1
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ mov esi, edx ; save MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]
+ jz .51
+ and dx, 0xFFFB ; clear XD Disable bit if it is set
+ wrmsr
+.51:
+ mov ecx, MSR_EFER
+ rdmsr
+ or ax, MSR_EFER_XD ; enable NXE
+ wrmsr
+@StmXdDone1:
+ push esi
+
+ call ASM_PFX(SmmStmSetup)
+
+ mov eax, ASM_PFX(gStmXdSupported)
+ mov al, [eax]
+ cmp al, 0
+ jz .71
+ pop edx ; get saved MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2
+ jz .71
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM
+ wrmsr
+
+.71:
+ rsm
+
+global ASM_PFX(OnStmTeardown)
+ASM_PFX(OnStmTeardown):
+;
+; Check XD disable bit
+;
+ xor esi, esi
+ mov eax, ASM_PFX(gStmXdSupported)
+ mov al, [eax]
+ cmp al, 0
+ jz @StmXdDone2
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ mov esi, edx ; save MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]
+ jz .52
+ and dx, 0xFFFB ; clear XD Disable bit if it is set
+ wrmsr
+.52:
+ mov ecx, MSR_EFER
+ rdmsr
+ or ax, MSR_EFER_XD ; enable NXE
+ wrmsr
+@StmXdDone2:
+ push esi
+
+ call ASM_PFX(SmmStmTeardown)
+
+ mov eax, ASM_PFX(gStmXdSupported)
+ mov al, [eax]
+ cmp al, 0
+ jz .72
+ pop edx ; get saved MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2
+ jz .72
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM
+ wrmsr
+
+.72:
+ rsm
+
diff --git a/UefiCpuPkg/Library/SmmCpuFeaturesLib/Ia32/SmmStmSupport.c b/UefiCpuPkg/Library/SmmCpuFeaturesLib/Ia32/SmmStmSupport.c
new file mode 100644
index 0000000000..0154c7c0f8
--- /dev/null
+++ b/UefiCpuPkg/Library/SmmCpuFeaturesLib/Ia32/SmmStmSupport.c
@@ -0,0 +1,83 @@
+/** @file
+ SMM STM support functions
+
+ Copyright (c) 2015 - 2016, Intel Corporation. All rights reserved.
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php.
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include
+#include
+
+#include "SmmStm.h"
+
+///
+/// Page Table Entry
+///
+#define IA32_PG_P BIT0
+#define IA32_PG_RW BIT1
+#define IA32_PG_PS BIT7
+
+/**
+
+ Create 4G page table for STM.
+ 4M Non-PAE page table in IA32 version.
+
+ @param PageTableBase The page table base in MSEG
+
+**/
+VOID
+StmGen4GPageTable (
+ IN UINTN PageTableBase
+ )
+{
+ UINTN Index;
+ UINT32 *Pte;
+ UINT32 Address;
+
+ Pte = (UINT32*)(UINTN)PageTableBase;
+
+ Address = 0;
+ for (Index = 0; Index < SIZE_4KB / sizeof (*Pte); Index++) {
+ *Pte = Address | IA32_PG_PS | IA32_PG_RW | IA32_PG_P;
+ Pte++;
+ Address += SIZE_4MB;
+ }
+}
+
+/**
+ This is SMM exception handle.
+ Consumed by STM when exception happen.
+
+ @param Context STM protection exception stack frame
+
+ @return the EBX value for STM reference.
+ EBX = 0: resume SMM guest using register state found on exception stack.
+ EBX = 1 to 0x0F: EBX contains a BIOS error code which the STM must record in the
+ TXT.ERRORCODE register and subsequently reset the system via
+ TXT.CMD.SYS_RESET. The value of the TXT.ERRORCODE register is calculated as
+ follows: TXT.ERRORCODE = (EBX & 0x0F) | STM_CRASH_BIOS_PANIC
+ EBX = 0x10 to 0xFFFFFFFF - reserved, do not use.
+
+**/
+UINT32
+EFIAPI
+SmmStmExceptionHandler (
+ IN OUT STM_PROTECTION_EXCEPTION_STACK_FRAME Context
+ )
+{
+ // TBD - SmmStmExceptionHandler, record information
+ DEBUG ((DEBUG_ERROR, "SmmStmExceptionHandler ...\n"));
+ //
+ // Skip this instruction and continue;
+ //
+ Context.Ia32StackFrame->Rip += Context.Ia32StackFrame->VmcsExitInstructionLength;
+
+ return 0;
+}
diff --git a/UefiCpuPkg/Library/SmmCpuFeaturesLib/SmmCpuFeaturesLibStm.inf b/UefiCpuPkg/Library/SmmCpuFeaturesLib/SmmCpuFeaturesLibStm.inf
new file mode 100644
index 0000000000..db8dcdcff4
--- /dev/null
+++ b/UefiCpuPkg/Library/SmmCpuFeaturesLib/SmmCpuFeaturesLibStm.inf
@@ -0,0 +1,88 @@
+## @file
+# The CPU specific programming for PiSmmCpuDxeSmm module when STM support
+# is included.
+#
+# Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+##
+
+[Defines]
+ INF_VERSION = 0x00010005
+ BASE_NAME = SmmCpuFeaturesLibStm
+ MODULE_UNI_FILE = SmmCpuFeaturesLib.uni
+ FILE_GUID = 374DE830-81C5-4CC8-B2AB-28F0AB73710B
+ MODULE_TYPE = DXE_SMM_DRIVER
+ VERSION_STRING = 1.0
+ LIBRARY_CLASS = SmmCpuFeaturesLib
+ CONSTRUCTOR = SmmCpuFeaturesLibStmConstructor
+
+[Sources]
+ SmmCpuFeaturesLib.c
+ SmmStm.c
+ SmmStm.h
+
+[Sources.Ia32]
+ Ia32/SmmStmSupport.c
+
+ Ia32/SmiEntry.asm
+ Ia32/SmiException.asm
+
+ Ia32/SmiEntry.nasm
+ Ia32/SmiException.nasm
+
+ Ia32/SmiEntry.S
+ Ia32/SmiException.S
+
+[Sources.X64]
+ X64/SmmStmSupport.c
+
+ X64/SmiEntry.asm
+ X64/SmiException.asm
+
+ X64/SmiEntry.nasm
+ X64/SmiException.nasm
+
+ X64/SmiEntry.S
+ X64/SmiException.S
+
+[Packages]
+ MdePkg/MdePkg.dec
+ MdeModulePkg/MdeModulePkg.dec
+ UefiCpuPkg/UefiCpuPkg.dec
+
+[LibraryClasses]
+ BaseLib
+ BaseMemoryLib
+ PcdLib
+ HobLib
+ MemoryAllocationLib
+ DebugLib
+ UefiBootServicesTableLib
+ SmmServicesTableLib
+ TpmMeasurementLib
+
+[Protocols]
+ gEfiMpServiceProtocolGuid ## CONSUMES
+ gEfiSmmEndOfDxeProtocolGuid ## CONSUMES
+ gEfiSmMonitorInitProtocolGuid ## PRODUCES
+
+[Guids]
+ gMsegSmramGuid ## SOMETIMES_CONSUMES ## HOB
+ gEfiAcpi20TableGuid ## SOMETIMES_CONSUMES ## SystemTable
+ gEfiAcpi10TableGuid ## SOMETIMES_CONSUMES ## SystemTable
+
+[Pcd]
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuMaxLogicalProcessorNumber ## SOMETIMES_CONSUMES
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuMsegSize ## SOMETIMES_CONSUMES
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmStmExceptionStackSize ## SOMETIMES_CONSUMES
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmStackGuard ## CONSUMES
+
+[Depex]
+ gEfiMpServiceProtocolGuid
diff --git a/UefiCpuPkg/Library/SmmCpuFeaturesLib/SmmStm.c b/UefiCpuPkg/Library/SmmCpuFeaturesLib/SmmStm.c
new file mode 100644
index 0000000000..59c49e3881
--- /dev/null
+++ b/UefiCpuPkg/Library/SmmCpuFeaturesLib/SmmStm.c
@@ -0,0 +1,1299 @@
+/** @file
+ SMM STM support functions
+
+ Copyright (c) 2015 - 2016, Intel Corporation. All rights reserved.
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php.
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+#include "SmmStm.h"
+
+#define TXT_EVTYPE_BASE 0x400
+#define TXT_EVTYPE_STM_HASH (TXT_EVTYPE_BASE + 14)
+
+#define RDWR_ACCS 3
+#define FULL_ACCS 7
+
+/**
+ The constructor function
+
+ @param[in] ImageHandle The firmware allocated handle for the EFI image.
+ @param[in] SystemTable A pointer to the EFI System Table.
+
+ @retval EFI_SUCCESS The constructor always returns EFI_SUCCESS.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmCpuFeaturesLibConstructor (
+ IN EFI_HANDLE ImageHandle,
+ IN EFI_SYSTEM_TABLE *SystemTable
+ );
+
+EFI_HANDLE mStmSmmCpuHandle = NULL;
+
+BOOLEAN mLockLoadMonitor = FALSE;
+
+//
+// Template of STM_RSC_END structure for copying.
+//
+GLOBAL_REMOVE_IF_UNREFERENCED STM_RSC_END mRscEndNode = {
+ {END_OF_RESOURCES, sizeof (STM_RSC_END)},
+};
+
+GLOBAL_REMOVE_IF_UNREFERENCED UINT8 *mStmResourcesPtr = NULL;
+GLOBAL_REMOVE_IF_UNREFERENCED UINTN mStmResourceTotalSize = 0x0;
+GLOBAL_REMOVE_IF_UNREFERENCED UINTN mStmResourceSizeUsed = 0x0;
+GLOBAL_REMOVE_IF_UNREFERENCED UINTN mStmResourceSizeAvailable = 0x0;
+
+GLOBAL_REMOVE_IF_UNREFERENCED UINT32 mStmState = 0;
+
+//
+// System Configuration Table pointing to STM Configuration Table
+//
+GLOBAL_REMOVE_IF_UNREFERENCED
+EFI_SM_MONITOR_INIT_PROTOCOL mSmMonitorInitProtocol = {
+ LoadMonitor,
+ AddPiResource,
+ DeletePiResource,
+ GetPiResource,
+ GetMonitorState,
+};
+
+
+
+
+#define CPUID1_EDX_XD_SUPPORT 0x100000
+
+//
+// External global variables associated with SMI Handler Template
+//
+extern CONST TXT_PROCESSOR_SMM_DESCRIPTOR gcStmPsd;
+extern UINT32 gStmSmbase;
+extern volatile UINT32 gStmSmiStack;
+extern UINT32 gStmSmiCr3;
+extern volatile UINT8 gcStmSmiHandlerTemplate[];
+extern CONST UINT16 gcStmSmiHandlerSize;
+extern UINT16 gcStmSmiHandlerOffset;
+extern BOOLEAN gStmXdSupported;
+
+//
+// Variables used by SMI Handler
+//
+IA32_DESCRIPTOR gStmSmiHandlerIdtr;
+
+//
+// MP Services Protocol
+//
+EFI_MP_SERVICES_PROTOCOL *mSmmCpuFeaturesLibMpService = NULL;
+
+//
+// MSEG Base and Length in SMRAM
+//
+UINTN mMsegBase = 0;
+UINTN mMsegSize = 0;
+
+BOOLEAN mStmConfigurationTableInitialized = FALSE;
+
+
+/**
+ The constructor function
+
+ @param[in] ImageHandle The firmware allocated handle for the EFI image.
+ @param[in] SystemTable A pointer to the EFI System Table.
+
+ @retval EFI_SUCCESS The constructor always returns EFI_SUCCESS.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmCpuFeaturesLibStmConstructor (
+ IN EFI_HANDLE ImageHandle,
+ IN EFI_SYSTEM_TABLE *SystemTable
+ )
+{
+ EFI_STATUS Status;
+ CPUID_VERSION_INFO_ECX RegEcx;
+ EFI_HOB_GUID_TYPE *GuidHob;
+ EFI_SMRAM_DESCRIPTOR *SmramDescriptor;
+
+ //
+ // Call the common constructor function
+ //
+ Status = SmmCpuFeaturesLibConstructor (ImageHandle, SystemTable);
+ ASSERT_EFI_ERROR (Status);
+
+ //
+ // Lookup the MP Services Protocol
+ //
+ Status = gBS->LocateProtocol (
+ &gEfiMpServiceProtocolGuid,
+ NULL,
+ (VOID **)&mSmmCpuFeaturesLibMpService
+ );
+ ASSERT_EFI_ERROR (Status);
+
+ //
+ // If CPU supports VMX, then determine SMRAM range for MSEG.
+ //
+ AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, &RegEcx.Uint32, NULL);
+ if (RegEcx.Bits.VMX == 1) {
+ GuidHob = GetFirstGuidHob (&gMsegSmramGuid);
+ if (GuidHob != NULL) {
+ //
+ // Retrieve MSEG location from MSEG SRAM HOB
+ //
+ SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);
+ if (SmramDescriptor->PhysicalSize > 0) {
+ mMsegBase = (UINTN)SmramDescriptor->CpuStart;
+ mMsegSize = (UINTN)SmramDescriptor->PhysicalSize;
+ }
+ } else if (PcdGet32 (PcdCpuMsegSize) > 0) {
+ //
+ // Allocate MSEG from SMRAM memory
+ //
+ mMsegBase = (UINTN)AllocatePages (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuMsegSize)));
+ if (mMsegBase > 0) {
+ mMsegSize = ALIGN_VALUE (PcdGet32 (PcdCpuMsegSize), EFI_PAGE_SIZE);
+ } else {
+ DEBUG ((DEBUG_ERROR, "Not enough SMRAM resource to allocate MSEG size %08x\n", PcdGet32 (PcdCpuMsegSize)));
+ }
+ }
+ if (mMsegBase > 0) {
+ DEBUG ((DEBUG_INFO, "MsegBase: 0x%08x, MsegSize: 0x%08x\n", mMsegBase, mMsegSize));
+ }
+ }
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Internal worker function that is called to complete CPU initialization at the
+ end of SmmCpuFeaturesInitializeProcessor()
+
+**/
+VOID
+FinishSmmCpuFeaturesInitializeProcessor (
+ VOID
+ )
+{
+ MSR_IA32_SMM_MONITOR_CTL_REGISTER SmmMonitorCtl;
+
+ //
+ // Set MSEG Base Address in SMM Monitor Control MSR.
+ //
+ if (mMsegBase > 0) {
+ SmmMonitorCtl.Uint64 = 0;
+ SmmMonitorCtl.Bits.MsegBase = (UINT32)mMsegBase >> 12;
+ SmmMonitorCtl.Bits.Valid = 1;
+ AsmWriteMsr64 (MSR_IA32_SMM_MONITOR_CTL, SmmMonitorCtl.Uint64);
+ }
+}
+
+/**
+ Return the size, in bytes, of a custom SMI Handler in bytes. If 0 is
+ returned, then a custom SMI handler is not provided by this library,
+ and the default SMI handler must be used.
+
+ @retval 0 Use the default SMI handler.
+ @retval > 0 Use the SMI handler installed by SmmCpuFeaturesInstallSmiHandler()
+ The caller is required to allocate enough SMRAM for each CPU to
+ support the size of the custom SMI handler.
+**/
+UINTN
+EFIAPI
+SmmCpuFeaturesGetSmiHandlerSize (
+ VOID
+ )
+{
+ return gcStmSmiHandlerSize;
+}
+
+/**
+ Install a custom SMI handler for the CPU specified by CpuIndex. This function
+ is only called if SmmCpuFeaturesGetSmiHandlerSize() returns a size is greater
+ than zero and is called by the CPU that was elected as monarch during System
+ Management Mode initialization.
+
+ @param[in] CpuIndex The index of the CPU to install the custom SMI handler.
+ The value must be between 0 and the NumberOfCpus field
+ in the System Management System Table (SMST).
+ @param[in] SmBase The SMBASE address for the CPU specified by CpuIndex.
+ @param[in] SmiStack The stack to use when an SMI is processed by the
+ the CPU specified by CpuIndex.
+ @param[in] StackSize The size, in bytes, if the stack used when an SMI is
+ processed by the CPU specified by CpuIndex.
+ @param[in] GdtBase The base address of the GDT to use when an SMI is
+ processed by the CPU specified by CpuIndex.
+ @param[in] GdtSize The size, in bytes, of the GDT used when an SMI is
+ processed by the CPU specified by CpuIndex.
+ @param[in] IdtBase The base address of the IDT to use when an SMI is
+ processed by the CPU specified by CpuIndex.
+ @param[in] IdtSize The size, in bytes, of the IDT used when an SMI is
+ processed by the CPU specified by CpuIndex.
+ @param[in] Cr3 The base address of the page tables to use when an SMI
+ is processed by the CPU specified by CpuIndex.
+**/
+VOID
+EFIAPI
+SmmCpuFeaturesInstallSmiHandler (
+ IN UINTN CpuIndex,
+ IN UINT32 SmBase,
+ IN VOID *SmiStack,
+ IN UINTN StackSize,
+ IN UINTN GdtBase,
+ IN UINTN GdtSize,
+ IN UINTN IdtBase,
+ IN UINTN IdtSize,
+ IN UINT32 Cr3
+ )
+{
+ EFI_STATUS Status;
+ TXT_PROCESSOR_SMM_DESCRIPTOR *Psd;
+ VOID *Hob;
+ UINT32 RegEax;
+ UINT32 RegEdx;
+ EFI_PROCESSOR_INFORMATION ProcessorInfo;
+
+ CopyMem ((VOID *)(UINTN)(SmBase + TXT_SMM_PSD_OFFSET), &gcStmPsd, sizeof (gcStmPsd));
+ Psd = (TXT_PROCESSOR_SMM_DESCRIPTOR *)(VOID *)(UINTN)(SmBase + TXT_SMM_PSD_OFFSET);
+ Psd->SmmGdtPtr = GdtBase;
+ Psd->SmmGdtSize = (UINT32)GdtSize;
+
+ //
+ // Initialize values in template before copy
+ //
+ gStmSmiStack = (UINT32)((UINTN)SmiStack + StackSize - sizeof (UINTN));
+ gStmSmiCr3 = Cr3;
+ gStmSmbase = SmBase;
+ gStmSmiHandlerIdtr.Base = IdtBase;
+ gStmSmiHandlerIdtr.Limit = (UINT16)(IdtSize - 1);
+
+ if (gStmXdSupported) {
+ AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
+ if (RegEax <= CPUID_EXTENDED_FUNCTION) {
+ //
+ // Extended CPUID functions are not supported on this processor.
+ //
+ gStmXdSupported = FALSE;
+ }
+
+ AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);
+ if ((RegEdx & CPUID1_EDX_XD_SUPPORT) == 0) {
+ //
+ // Execute Disable Bit feature is not supported on this processor.
+ //
+ gStmXdSupported = FALSE;
+ }
+ }
+
+ //
+ // Set the value at the top of the CPU stack to the CPU Index
+ //
+ *(UINTN*)(UINTN)gStmSmiStack = CpuIndex;
+
+ //
+ // Copy template to CPU specific SMI handler location
+ //
+ CopyMem (
+ (VOID*)(UINTN)(SmBase + SMM_HANDLER_OFFSET),
+ (VOID*)gcStmSmiHandlerTemplate,
+ gcStmSmiHandlerSize
+ );
+
+ Psd->SmmSmiHandlerRip = SmBase + SMM_HANDLER_OFFSET + gcStmSmiHandlerOffset;
+ Psd->SmmSmiHandlerRsp = (UINTN)SmiStack + StackSize - sizeof(UINTN);
+ Psd->SmmCr3 = Cr3;
+
+ DEBUG((DEBUG_ERROR, "CpuSmmStmExceptionStackSize - %x\n", PcdGet32(PcdCpuSmmStmExceptionStackSize)));
+ DEBUG((DEBUG_ERROR, "Pages - %x\n", EFI_SIZE_TO_PAGES(PcdGet32(PcdCpuSmmStmExceptionStackSize))));
+ Psd->StmProtectionExceptionHandler.SpeRsp = (UINT64)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStmExceptionStackSize)));
+ Psd->StmProtectionExceptionHandler.SpeRsp += EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStmExceptionStackSize)));
+
+ Psd->BiosHwResourceRequirementsPtr = (UINT64)(UINTN)GetStmResource ();
+
+ //
+ // Get the APIC ID for the CPU specified by CpuIndex
+ //
+ Status = mSmmCpuFeaturesLibMpService->GetProcessorInfo (
+ mSmmCpuFeaturesLibMpService,
+ CpuIndex,
+ &ProcessorInfo
+ );
+ ASSERT_EFI_ERROR (Status);
+
+ Psd->LocalApicId = (UINT32)ProcessorInfo.ProcessorId;
+ Psd->AcpiRsdp = 0;
+
+ Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
+ if (Hob != NULL) {
+ Psd->PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;
+ } else {
+ AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
+ if (RegEax >= 0x80000008) {
+ AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
+ Psd->PhysicalAddressBits = (UINT8) RegEax;
+ } else {
+ Psd->PhysicalAddressBits = 36;
+ }
+ }
+
+ if (!mStmConfigurationTableInitialized) {
+ StmSmmConfigurationTableInit ();
+ mStmConfigurationTableInitialized = TRUE;
+ }
+}
+
+/**
+ SMM End Of Dxe event notification handler.
+
+ STM support need patch AcpiRsdp in TXT_PROCESSOR_SMM_DESCRIPTOR.
+
+ @param[in] Protocol Points to the protocol's unique identifier.
+ @param[in] Interface Points to the interface instance.
+ @param[in] Handle The handle on which the interface was installed.
+
+ @retval EFI_SUCCESS Notification handler runs successfully.
+**/
+EFI_STATUS
+EFIAPI
+SmmEndOfDxeEventNotify (
+ IN CONST EFI_GUID *Protocol,
+ IN VOID *Interface,
+ IN EFI_HANDLE Handle
+ )
+{
+ VOID *Rsdp;
+ UINTN Index;
+ TXT_PROCESSOR_SMM_DESCRIPTOR *Psd;
+
+ DEBUG ((DEBUG_INFO, "SmmEndOfDxeEventNotify\n"));
+
+ //
+ // found ACPI table RSD_PTR from system table
+ //
+ Rsdp = NULL;
+ for (Index = 0; Index < gST->NumberOfTableEntries; Index++) {
+ if (CompareGuid (&(gST->ConfigurationTable[Index].VendorGuid), &gEfiAcpi20TableGuid)) {
+ //
+ // A match was found.
+ //
+ Rsdp = gST->ConfigurationTable[Index].VendorTable;
+ break;
+ }
+ }
+ if (Rsdp == NULL) {
+ for (Index = 0; Index < gST->NumberOfTableEntries; Index++) {
+ if (CompareGuid (&(gST->ConfigurationTable[Index].VendorGuid), &gEfiAcpi10TableGuid)) {
+ //
+ // A match was found.
+ //
+ Rsdp = gST->ConfigurationTable[Index].VendorTable;
+ break;
+ }
+ }
+ }
+
+ for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {
+ Psd = (TXT_PROCESSOR_SMM_DESCRIPTOR *)((UINTN)gSmst->CpuSaveState[Index] - SMRAM_SAVE_STATE_MAP_OFFSET + TXT_SMM_PSD_OFFSET);
+ DEBUG ((DEBUG_INFO, "Index=%d Psd=%p Rsdp=%p\n", Index, Psd, Rsdp));
+ Psd->AcpiRsdp = (UINT64)(UINTN)Rsdp;
+ }
+
+ mLockLoadMonitor = TRUE;
+
+ return EFI_SUCCESS;
+}
+
+/**
+ This function initializes the STM configuration table.
+**/
+VOID
+StmSmmConfigurationTableInit (
+ VOID
+ )
+{
+ EFI_STATUS Status;
+ VOID *Registration;
+
+ Status = gSmst->SmmInstallProtocolInterface (
+ &mStmSmmCpuHandle,
+ &gEfiSmMonitorInitProtocolGuid,
+ EFI_NATIVE_INTERFACE,
+ &mSmMonitorInitProtocol
+ );
+ ASSERT_EFI_ERROR (Status);
+
+ //
+ //
+ // Register SMM End of DXE Event
+ //
+ Status = gSmst->SmmRegisterProtocolNotify (
+ &gEfiSmmEndOfDxeProtocolGuid,
+ SmmEndOfDxeEventNotify,
+ &Registration
+ );
+ ASSERT_EFI_ERROR (Status);
+}
+
+/**
+
+ Get STM state.
+
+ @return STM state
+
+**/
+EFI_SM_MONITOR_STATE
+EFIAPI
+GetMonitorState (
+ VOID
+ )
+{
+ return mStmState;
+}
+
+/**
+
+ Handle single Resource to see if it can be merged into Record.
+
+ @param Resource A pointer to resource node to be added
+ @param Record A pointer to record node to be merged
+
+ @retval TRUE resource handled
+ @retval FALSE resource is not handled
+
+**/
+BOOLEAN
+HandleSingleResource (
+ IN STM_RSC *Resource,
+ IN STM_RSC *Record
+ )
+{
+ UINT64 ResourceLo;
+ UINT64 ResourceHi;
+ UINT64 RecordLo;
+ UINT64 RecordHi;
+
+ ResourceLo = 0;
+ ResourceHi = 0;
+ RecordLo = 0;
+ RecordHi = 0;
+
+ //
+ // Calling code is responsible for making sure that
+ // Resource->Header.RscType == (*Record)->Header.RscType
+ // thus we use just one of them as switch variable.
+ //
+ switch (Resource->Header.RscType) {
+ case MEM_RANGE:
+ case MMIO_RANGE:
+ ResourceLo = Resource->Mem.Base;
+ ResourceHi = Resource->Mem.Base + Resource->Mem.Length;
+ RecordLo = Record->Mem.Base;
+ RecordHi = Record->Mem.Base + Record->Mem.Length;
+ if (Resource->Mem.RWXAttributes != Record->Mem.RWXAttributes) {
+ if ((ResourceLo == RecordLo) && (ResourceHi == RecordHi)) {
+ Record->Mem.RWXAttributes = Resource->Mem.RWXAttributes | Record->Mem.RWXAttributes;
+ return TRUE;
+ } else {
+ return FALSE;
+ }
+ }
+ break;
+ case IO_RANGE:
+ case TRAPPED_IO_RANGE:
+ ResourceLo = (UINT64) Resource->Io.Base;
+ ResourceHi = (UINT64) Resource->Io.Base + (UINT64) Resource->Io.Length;
+ RecordLo = (UINT64) Record->Io.Base;
+ RecordHi = (UINT64) Record->Io.Base + (UINT64) Record->Io.Length;
+ break;
+ case PCI_CFG_RANGE:
+ if ((Resource->PciCfg.OriginatingBusNumber != Record->PciCfg.OriginatingBusNumber) ||
+ (Resource->PciCfg.LastNodeIndex != Record->PciCfg.LastNodeIndex)) {
+ return FALSE;
+ }
+ if (CompareMem (Resource->PciCfg.PciDevicePath, Record->PciCfg.PciDevicePath, sizeof(STM_PCI_DEVICE_PATH_NODE) * (Resource->PciCfg.LastNodeIndex + 1)) != 0) {
+ return FALSE;
+ }
+ ResourceLo = (UINT64) Resource->PciCfg.Base;
+ ResourceHi = (UINT64) Resource->PciCfg.Base + (UINT64) Resource->PciCfg.Length;
+ RecordLo = (UINT64) Record->PciCfg.Base;
+ RecordHi = (UINT64) Record->PciCfg.Base + (UINT64) Record->PciCfg.Length;
+ if (Resource->PciCfg.RWAttributes != Record->PciCfg.RWAttributes) {
+ if ((ResourceLo == RecordLo) && (ResourceHi == RecordHi)) {
+ Record->PciCfg.RWAttributes = Resource->PciCfg.RWAttributes | Record->PciCfg.RWAttributes;
+ return TRUE;
+ } else {
+ return FALSE;
+ }
+ }
+ break;
+ case MACHINE_SPECIFIC_REG:
+ //
+ // Special case - merge MSR masks in place.
+ //
+ if (Resource->Msr.MsrIndex != Record->Msr.MsrIndex) {
+ return FALSE;
+ }
+ Record->Msr.ReadMask |= Resource->Msr.ReadMask;
+ Record->Msr.WriteMask |= Resource->Msr.WriteMask;
+ return TRUE;
+ default:
+ return FALSE;
+ }
+ //
+ // If resources are disjoint
+ //
+ if ((ResourceHi < RecordLo) || (ResourceLo > RecordHi)) {
+ return FALSE;
+ }
+
+ //
+ // If resource is consumed by record.
+ //
+ if ((ResourceLo >= RecordLo) && (ResourceHi <= RecordHi)) {
+ return TRUE;
+ }
+ //
+ // Resources are overlapping.
+ // Resource and record are merged.
+ //
+ ResourceLo = (ResourceLo < RecordLo) ? ResourceLo : RecordLo;
+ ResourceHi = (ResourceHi > RecordHi) ? ResourceHi : RecordHi;
+
+ switch (Resource->Header.RscType) {
+ case MEM_RANGE:
+ case MMIO_RANGE:
+ Record->Mem.Base = ResourceLo;
+ Record->Mem.Length = ResourceHi - ResourceLo;
+ break;
+ case IO_RANGE:
+ case TRAPPED_IO_RANGE:
+ Record->Io.Base = (UINT16) ResourceLo;
+ Record->Io.Length = (UINT16) (ResourceHi - ResourceLo);
+ break;
+ case PCI_CFG_RANGE:
+ Record->PciCfg.Base = (UINT16) ResourceLo;
+ Record->PciCfg.Length = (UINT16) (ResourceHi - ResourceLo);
+ break;
+ default:
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+/**
+
+ Add resource node.
+
+ @param Resource A pointer to resource node to be added
+
+**/
+VOID
+AddSingleResource (
+ IN STM_RSC *Resource
+ )
+{
+ STM_RSC *Record;
+
+ Record = (STM_RSC *)mStmResourcesPtr;
+
+ while (TRUE) {
+ if (Record->Header.RscType == END_OF_RESOURCES) {
+ break;
+ }
+ //
+ // Go to next record if resource and record types don't match.
+ //
+ if (Resource->Header.RscType != Record->Header.RscType) {
+ Record = (STM_RSC *)((UINTN)Record + Record->Header.Length);
+ continue;
+ }
+ //
+ // Record is handled inside of procedure - don't adjust.
+ //
+ if (HandleSingleResource (Resource, Record)) {
+ return ;
+ }
+ Record = (STM_RSC *)((UINTN)Record + Record->Header.Length);
+ }
+
+ //
+ // Add resource to the end of area.
+ //
+ CopyMem (
+ mStmResourcesPtr + mStmResourceSizeUsed - sizeof(mRscEndNode),
+ Resource,
+ Resource->Header.Length
+ );
+ CopyMem (
+ mStmResourcesPtr + mStmResourceSizeUsed - sizeof(mRscEndNode) + Resource->Header.Length,
+ &mRscEndNode,
+ sizeof(mRscEndNode)
+ );
+ mStmResourceSizeUsed += Resource->Header.Length;
+ mStmResourceSizeAvailable = mStmResourceTotalSize - mStmResourceSizeUsed;
+
+ return ;
+}
+
+/**
+
+ Add resource list.
+
+ @param ResourceList A pointer to resource list to be added
+ @param NumEntries Optional number of entries.
+ If 0, list must be terminated by END_OF_RESOURCES.
+
+**/
+VOID
+AddResource (
+ IN STM_RSC *ResourceList,
+ IN UINT32 NumEntries OPTIONAL
+ )
+{
+ UINT32 Count;
+ UINTN Index;
+ STM_RSC *Resource;
+
+ if (NumEntries == 0) {
+ Count = 0xFFFFFFFF;
+ } else {
+ Count = NumEntries;
+ }
+
+ Resource = ResourceList;
+
+ for (Index = 0; Index < Count; Index++) {
+ if (Resource->Header.RscType == END_OF_RESOURCES) {
+ return ;
+ }
+ AddSingleResource (Resource);
+ Resource = (STM_RSC *)((UINTN)Resource + Resource->Header.Length);
+ }
+ return ;
+}
+
+/**
+
+ Validate resource list.
+
+ @param ResourceList A pointer to resource list to be added
+ @param NumEntries Optional number of entries.
+ If 0, list must be terminated by END_OF_RESOURCES.
+
+ @retval TRUE resource valid
+ @retval FALSE resource invalid
+
+**/
+BOOLEAN
+ValidateResource (
+ IN STM_RSC *ResourceList,
+ IN UINT32 NumEntries OPTIONAL
+ )
+{
+ UINT32 Count;
+ UINTN Index;
+ STM_RSC *Resource;
+ UINTN SubIndex;
+
+ //
+ // If NumEntries == 0 make it very big. Scan will be terminated by
+ // END_OF_RESOURCES.
+ //
+ if (NumEntries == 0) {
+ Count = 0xFFFFFFFF;
+ } else {
+ Count = NumEntries;
+ }
+
+ //
+ // Start from beginning of resource list.
+ //
+ Resource = ResourceList;
+
+ for (Index = 0; Index < Count; Index++) {
+ DEBUG ((DEBUG_ERROR, "ValidateResource (%d) - RscType(%x)\n", Index, Resource->Header.RscType));
+ //
+ // Validate resource.
+ //
+ switch (Resource->Header.RscType) {
+ case END_OF_RESOURCES:
+ if (Resource->Header.Length != sizeof (STM_RSC_END)) {
+ return FALSE;
+ }
+ //
+ // If we are passed actual number of resources to add,
+ // END_OF_RESOURCES structure between them is considered an
+ // error. If NumEntries == 0 END_OF_RESOURCES is a termination.
+ //
+ if (NumEntries != 0) {
+ return FALSE;
+ } else {
+ //
+ // If NumEntries == 0 and list reached end - return success.
+ //
+ return TRUE;
+ }
+ break;
+
+ case MEM_RANGE:
+ case MMIO_RANGE:
+ if (Resource->Header.Length != sizeof (STM_RSC_MEM_DESC)) {
+ return FALSE;
+ }
+
+ if (Resource->Mem.RWXAttributes > FULL_ACCS) {
+ return FALSE;
+ }
+ break;
+
+ case IO_RANGE:
+ case TRAPPED_IO_RANGE:
+ if (Resource->Header.Length != sizeof (STM_RSC_IO_DESC)) {
+ return FALSE;
+ }
+
+ if ((Resource->Io.Base + Resource->Io.Length) > 0xFFFF) {
+ return FALSE;
+ }
+ break;
+
+ case PCI_CFG_RANGE:
+ DEBUG ((DEBUG_ERROR, "ValidateResource - PCI (0x%02x, 0x%08x, 0x%02x, 0x%02x)\n", Resource->PciCfg.OriginatingBusNumber, Resource->PciCfg.LastNodeIndex, Resource->PciCfg.PciDevicePath[0].PciDevice, Resource->PciCfg.PciDevicePath[0].PciFunction));
+ if (Resource->Header.Length != sizeof (STM_RSC_PCI_CFG_DESC) + (sizeof(STM_PCI_DEVICE_PATH_NODE) * Resource->PciCfg.LastNodeIndex)) {
+ return FALSE;
+ }
+ for (SubIndex = 0; SubIndex <= Resource->PciCfg.LastNodeIndex; SubIndex++) {
+ if ((Resource->PciCfg.PciDevicePath[SubIndex].PciDevice > 0x1F) || (Resource->PciCfg.PciDevicePath[SubIndex].PciFunction > 7)) {
+ return FALSE;
+ }
+ }
+ if ((Resource->PciCfg.Base + Resource->PciCfg.Length) > 0x1000) {
+ return FALSE;
+ }
+ break;
+
+ case MACHINE_SPECIFIC_REG:
+ if (Resource->Header.Length != sizeof (STM_RSC_MSR_DESC)) {
+ return FALSE;
+ }
+ break;
+
+ default :
+ DEBUG ((DEBUG_ERROR, "ValidateResource - Unknown RscType(%x)\n", Resource->Header.RscType));
+ return FALSE;
+ }
+ Resource = (STM_RSC *)((UINTN)Resource + Resource->Header.Length);
+ }
+ return TRUE;
+}
+
+/**
+
+ Get resource list.
+ EndResource is excluded.
+
+ @param ResourceList A pointer to resource list to be added
+ @param NumEntries Optional number of entries.
+ If 0, list must be terminated by END_OF_RESOURCES.
+
+ @retval TRUE resource valid
+ @retval FALSE resource invalid
+
+**/
+UINTN
+GetResourceSize (
+ IN STM_RSC *ResourceList,
+ IN UINT32 NumEntries OPTIONAL
+ )
+{
+ UINT32 Count;
+ UINTN Index;
+ STM_RSC *Resource;
+
+ Resource = ResourceList;
+
+ //
+ // If NumEntries == 0 make it very big. Scan will be terminated by
+ // END_OF_RESOURCES.
+ //
+ if (NumEntries == 0) {
+ Count = 0xFFFFFFFF;
+ } else {
+ Count = NumEntries;
+ }
+
+ //
+ // Start from beginning of resource list.
+ //
+ Resource = ResourceList;
+
+ for (Index = 0; Index < Count; Index++) {
+ if (Resource->Header.RscType == END_OF_RESOURCES) {
+ break;
+ }
+ Resource = (STM_RSC *)((UINTN)Resource + Resource->Header.Length);
+ }
+
+ return (UINTN)Resource - (UINTN)ResourceList;
+}
+
+/**
+
+ Add resources in list to database. Allocate new memory areas as needed.
+
+ @param ResourceList A pointer to resource list to be added
+ @param NumEntries Optional number of entries.
+ If 0, list must be terminated by END_OF_RESOURCES.
+
+ @retval EFI_SUCCESS If resources are added
+ @retval EFI_INVALID_PARAMETER If nested procedure detected resource failer
+ @retval EFI_OUT_OF_RESOURCES If nested procedure returned it and we cannot allocate more areas.
+
+**/
+EFI_STATUS
+EFIAPI
+AddPiResource (
+ IN STM_RSC *ResourceList,
+ IN UINT32 NumEntries OPTIONAL
+ )
+{
+ EFI_STATUS Status;
+ UINTN ResourceSize;
+ EFI_PHYSICAL_ADDRESS NewResource;
+ UINTN NewResourceSize;
+
+ DEBUG ((DEBUG_INFO, "AddPiResource - Enter\n"));
+
+ if (!ValidateResource (ResourceList, NumEntries)) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ ResourceSize = GetResourceSize (ResourceList, NumEntries);
+ DEBUG ((DEBUG_INFO, "ResourceSize - 0x%08x\n", ResourceSize));
+ if (ResourceSize == 0) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ if (mStmResourcesPtr == NULL) {
+ //
+ // First time allocation
+ //
+ NewResourceSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (ResourceSize + sizeof(mRscEndNode)));
+ DEBUG ((DEBUG_INFO, "Allocate - 0x%08x\n", NewResourceSize));
+ Status = gSmst->SmmAllocatePages (
+ AllocateAnyPages,
+ EfiRuntimeServicesData,
+ EFI_SIZE_TO_PAGES (NewResourceSize),
+ &NewResource
+ );
+ if (EFI_ERROR (Status)) {
+ return Status;
+ }
+
+ //
+ // Copy EndResource for intialization
+ //
+ mStmResourcesPtr = (UINT8 *)(UINTN)NewResource;
+ mStmResourceTotalSize = NewResourceSize;
+ CopyMem (mStmResourcesPtr, &mRscEndNode, sizeof(mRscEndNode));
+ mStmResourceSizeUsed = sizeof(mRscEndNode);
+ mStmResourceSizeAvailable = mStmResourceTotalSize - sizeof(mRscEndNode);
+
+ //
+ // Let SmmCore change resource ptr
+ //
+ NotifyStmResourceChange (mStmResourcesPtr);
+ } else if (mStmResourceSizeAvailable < ResourceSize) {
+ //
+ // Need enlarge
+ //
+ NewResourceSize = mStmResourceTotalSize + (ResourceSize - mStmResourceSizeAvailable);
+ NewResourceSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (NewResourceSize));
+ DEBUG ((DEBUG_INFO, "ReAllocate - 0x%08x\n", NewResourceSize));
+ Status = gSmst->SmmAllocatePages (
+ AllocateAnyPages,
+ EfiRuntimeServicesData,
+ EFI_SIZE_TO_PAGES (NewResourceSize),
+ &NewResource
+ );
+ if (EFI_ERROR (Status)) {
+ return Status;
+ }
+ CopyMem ((VOID *)(UINTN)NewResource, mStmResourcesPtr, mStmResourceSizeUsed);
+ mStmResourceSizeAvailable = NewResourceSize - mStmResourceSizeUsed;
+
+ gSmst->SmmFreePages (
+ (EFI_PHYSICAL_ADDRESS)(UINTN)mStmResourcesPtr,
+ EFI_SIZE_TO_PAGES (mStmResourceTotalSize)
+ );
+
+ mStmResourceTotalSize = NewResourceSize;
+ mStmResourcesPtr = (UINT8 *)(UINTN)NewResource;
+
+ //
+ // Let SmmCore change resource ptr
+ //
+ NotifyStmResourceChange (mStmResourcesPtr);
+ }
+
+ //
+ // Check duplication
+ //
+ AddResource (ResourceList, NumEntries);
+
+ return EFI_SUCCESS;
+}
+
+/**
+
+ Delete resources in list to database.
+
+ @param ResourceList A pointer to resource list to be deleted
+ NULL means delete all resources.
+ @param NumEntries Optional number of entries.
+ If 0, list must be terminated by END_OF_RESOURCES.
+
+ @retval EFI_SUCCESS If resources are deleted
+ @retval EFI_INVALID_PARAMETER If nested procedure detected resource failer
+
+**/
+EFI_STATUS
+EFIAPI
+DeletePiResource (
+ IN STM_RSC *ResourceList,
+ IN UINT32 NumEntries OPTIONAL
+ )
+{
+ if (ResourceList != NULL) {
+ // TBD
+ ASSERT (FALSE);
+ return EFI_UNSUPPORTED;
+ }
+ //
+ // Delete all
+ //
+ CopyMem (mStmResourcesPtr, &mRscEndNode, sizeof(mRscEndNode));
+ mStmResourceSizeUsed = sizeof(mRscEndNode);
+ mStmResourceSizeAvailable = mStmResourceTotalSize - sizeof(mRscEndNode);
+ return EFI_SUCCESS;
+}
+
+/**
+
+ Get BIOS resources.
+
+ @param ResourceList A pointer to resource list to be filled
+ @param ResourceSize On input it means size of resource list input.
+ On output it means size of resource list filled,
+ or the size of resource list to be filled if size of too small.
+
+ @retval EFI_SUCCESS If resources are returned.
+ @retval EFI_BUFFER_TOO_SMALL If resource list buffer is too small to hold the whole resources.
+
+**/
+EFI_STATUS
+EFIAPI
+GetPiResource (
+ OUT STM_RSC *ResourceList,
+ IN OUT UINT32 *ResourceSize
+ )
+{
+ if (*ResourceSize < mStmResourceSizeUsed) {
+ *ResourceSize = (UINT32)mStmResourceSizeUsed;
+ return EFI_BUFFER_TOO_SMALL;
+ }
+
+ CopyMem (ResourceList, mStmResourcesPtr, mStmResourceSizeUsed);
+ *ResourceSize = (UINT32)mStmResourceSizeUsed;
+ return EFI_SUCCESS;
+}
+
+/**
+
+ Set valid bit for MSEG MSR.
+
+ @param Buffer Ap function buffer. (not used)
+
+**/
+VOID
+EFIAPI
+EnableMsegMsr (
+ IN VOID *Buffer
+ )
+{
+ MSR_IA32_SMM_MONITOR_CTL_REGISTER SmmMonitorCtl;
+
+ SmmMonitorCtl.Uint64 = AsmReadMsr64 (MSR_IA32_SMM_MONITOR_CTL);
+ SmmMonitorCtl.Bits.Valid = 1;
+ AsmWriteMsr64 (MSR_IA32_SMM_MONITOR_CTL, SmmMonitorCtl.Uint64);
+}
+
+/**
+
+ Get 4K page aligned VMCS size.
+
+ @return 4K page aligned VMCS size
+
+**/
+UINT32
+GetVmcsSize (
+ VOID
+ )
+{
+ MSR_IA32_VMX_BASIC_REGISTER VmxBasic;
+
+ //
+ // Read VMCS size and and align to 4KB
+ //
+ VmxBasic.Uint64 = AsmReadMsr64 (MSR_IA32_VMX_BASIC);
+ return ALIGN_VALUE (VmxBasic.Bits.VmcsSize, SIZE_4KB);
+}
+
+/**
+
+ Check STM image size.
+
+ @param StmImage STM image
+ @param StmImageSize STM image size
+
+ @retval TRUE check pass
+ @retval FALSE check fail
+**/
+BOOLEAN
+StmCheckStmImage (
+ IN EFI_PHYSICAL_ADDRESS StmImage,
+ IN UINTN StmImageSize
+ )
+{
+ UINTN MinMsegSize;
+ STM_HEADER *StmHeader;
+ IA32_VMX_MISC_REGISTER VmxMiscMsr;
+
+ //
+ // Check to see if STM image is compatible with CPU
+ //
+ StmHeader = (STM_HEADER *)(UINTN)StmImage;
+ VmxMiscMsr.Uint64 = AsmReadMsr64 (MSR_IA32_VMX_MISC);
+ if (StmHeader->HwStmHdr.MsegHeaderRevision != VmxMiscMsr.Bits.MsegRevisionIdentifier) {
+ DEBUG ((DEBUG_ERROR, "STM Image not compatible with CPU\n"));
+ DEBUG ((DEBUG_ERROR, " StmHeader->HwStmHdr.MsegHeaderRevision = %08x\n", StmHeader->HwStmHdr.MsegHeaderRevision));
+ DEBUG ((DEBUG_ERROR, " VmxMiscMsr.Bits.MsegRevisionIdentifier = %08x\n", VmxMiscMsr.Bits.MsegRevisionIdentifier));
+ return FALSE;
+ }
+
+ //
+ // Get Minimal required Mseg size
+ //
+ MinMsegSize = (EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (StmHeader->SwStmHdr.StaticImageSize)) +
+ StmHeader->SwStmHdr.AdditionalDynamicMemorySize +
+ (StmHeader->SwStmHdr.PerProcDynamicMemorySize + GetVmcsSize () * 2) * gSmst->NumberOfCpus);
+ if (MinMsegSize < StmImageSize) {
+ MinMsegSize = StmImageSize;
+ }
+
+ if (StmHeader->HwStmHdr.Cr3Offset >= StmHeader->SwStmHdr.StaticImageSize) {
+ //
+ // We will create page table, just in case that SINIT does not create it.
+ //
+ if (MinMsegSize < StmHeader->HwStmHdr.Cr3Offset + EFI_PAGES_TO_SIZE(6)) {
+ MinMsegSize = StmHeader->HwStmHdr.Cr3Offset + EFI_PAGES_TO_SIZE(6);
+ }
+ }
+
+ //
+ // Check if it exceeds MSEG size
+ //
+ if (MinMsegSize > mMsegSize) {
+ DEBUG ((DEBUG_ERROR, "MSEG too small. Min MSEG Size = %08x Current MSEG Size = %08x\n", MinMsegSize, mMsegSize));
+ DEBUG ((DEBUG_ERROR, " StmHeader->SwStmHdr.StaticImageSize = %08x\n", StmHeader->SwStmHdr.StaticImageSize));
+ DEBUG ((DEBUG_ERROR, " StmHeader->SwStmHdr.AdditionalDynamicMemorySize = %08x\n", StmHeader->SwStmHdr.AdditionalDynamicMemorySize));
+ DEBUG ((DEBUG_ERROR, " StmHeader->SwStmHdr.PerProcDynamicMemorySize = %08x\n", StmHeader->SwStmHdr.PerProcDynamicMemorySize));
+ DEBUG ((DEBUG_ERROR, " VMCS Size = %08x\n", GetVmcsSize ()));
+ DEBUG ((DEBUG_ERROR, " Max CPUs = %08x\n", gSmst->NumberOfCpus));
+ DEBUG ((DEBUG_ERROR, " StmHeader->HwStmHdr.Cr3Offset = %08x\n", StmHeader->HwStmHdr.Cr3Offset));
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+/**
+
+ Load STM image to MSEG.
+
+ @param StmImage STM image
+ @param StmImageSize STM image size
+
+**/
+VOID
+StmLoadStmImage (
+ IN EFI_PHYSICAL_ADDRESS StmImage,
+ IN UINTN StmImageSize
+ )
+{
+ MSR_IA32_SMM_MONITOR_CTL_REGISTER SmmMonitorCtl;
+ UINT32 MsegBase;
+ STM_HEADER *StmHeader;
+
+ //
+ // Get MSEG base address from MSR_IA32_SMM_MONITOR_CTL
+ //
+ SmmMonitorCtl.Uint64 = AsmReadMsr64 (MSR_IA32_SMM_MONITOR_CTL);
+ MsegBase = SmmMonitorCtl.Bits.MsegBase << 12;
+
+ //
+ // Zero all of MSEG base address
+ //
+ ZeroMem ((VOID *)(UINTN)MsegBase, mMsegSize);
+
+ //
+ // Copy STM Image into MSEG
+ //
+ CopyMem ((VOID *)(UINTN)MsegBase, (VOID *)(UINTN)StmImage, StmImageSize);
+
+ //
+ // STM Header is at the beginning of the STM Image
+ //
+ StmHeader = (STM_HEADER *)(UINTN)StmImage;
+
+ StmGen4GPageTable ((UINTN)MsegBase + StmHeader->HwStmHdr.Cr3Offset);
+}
+
+/**
+
+ Load STM image to MSEG.
+
+ @param StmImage STM image
+ @param StmImageSize STM image size
+
+ @retval EFI_SUCCESS Load STM to MSEG successfully
+ @retval EFI_ALREADY_STARTED STM image is already loaded to MSEG
+ @retval EFI_BUFFER_TOO_SMALL MSEG is smaller than minimal requirement of STM image
+ @retval EFI_UNSUPPORTED MSEG is not enabled
+
+**/
+EFI_STATUS
+EFIAPI
+LoadMonitor (
+ IN EFI_PHYSICAL_ADDRESS StmImage,
+ IN UINTN StmImageSize
+ )
+{
+ MSR_IA32_SMM_MONITOR_CTL_REGISTER SmmMonitorCtl;
+
+ if (mLockLoadMonitor) {
+ return EFI_ACCESS_DENIED;
+ }
+
+ SmmMonitorCtl.Uint64 = AsmReadMsr64 (MSR_IA32_SMM_MONITOR_CTL);
+ if (SmmMonitorCtl.Bits.MsegBase == 0) {
+ return EFI_UNSUPPORTED;
+ }
+
+ if (!StmCheckStmImage (StmImage, StmImageSize)) {
+ return EFI_BUFFER_TOO_SMALL;
+ }
+
+ // Record STM_HASH to PCR 0, just in case it is NOT TXT launch, we still need provide the evidence.
+ TpmMeasureAndLogData(
+ 0, // PcrIndex
+ TXT_EVTYPE_STM_HASH, // EventType
+ NULL, // EventLog
+ 0, // LogLen
+ (VOID *)(UINTN)StmImage, // HashData
+ StmImageSize // HashDataLen
+ );
+
+ StmLoadStmImage (StmImage, StmImageSize);
+
+ mStmState |= EFI_SM_MONITOR_STATE_ENABLED;
+
+ return EFI_SUCCESS;
+}
+
+/**
+ This function return BIOS STM resource.
+ Produced by SmmStm.
+ Comsumed by SmmMpService when Init.
+
+ @return BIOS STM resource
+
+**/
+VOID *
+GetStmResource(
+ VOID
+ )
+{
+ return mStmResourcesPtr;
+}
+
+/**
+ This function notify STM resource change.
+
+ @param StmResource BIOS STM resource
+
+**/
+VOID
+NotifyStmResourceChange (
+ VOID *StmResource
+ )
+{
+ UINTN Index;
+ TXT_PROCESSOR_SMM_DESCRIPTOR *Psd;
+
+ for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {
+ Psd = (TXT_PROCESSOR_SMM_DESCRIPTOR *)((UINTN)gSmst->CpuSaveState[Index] - SMRAM_SAVE_STATE_MAP_OFFSET + TXT_SMM_PSD_OFFSET);
+ Psd->BiosHwResourceRequirementsPtr = (UINT64)(UINTN)StmResource;
+ }
+ return ;
+}
+
+
+/**
+ This is STM setup BIOS callback.
+**/
+VOID
+EFIAPI
+SmmStmSetup (
+ VOID
+ )
+{
+ mStmState |= EFI_SM_MONITOR_STATE_ACTIVATED;
+}
+
+/**
+ This is STM teardown BIOS callback.
+**/
+VOID
+EFIAPI
+SmmStmTeardown (
+ VOID
+ )
+{
+ mStmState &= ~EFI_SM_MONITOR_STATE_ACTIVATED;
+}
+
diff --git a/UefiCpuPkg/Library/SmmCpuFeaturesLib/SmmStm.h b/UefiCpuPkg/Library/SmmCpuFeaturesLib/SmmStm.h
new file mode 100644
index 0000000000..92a4dc00eb
--- /dev/null
+++ b/UefiCpuPkg/Library/SmmCpuFeaturesLib/SmmStm.h
@@ -0,0 +1,176 @@
+/** @file
+ SMM STM support
+
+ Copyright (c) 2015 - 2016, Intel Corporation. All rights reserved.
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php.
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#ifndef _SMM_STM_H_
+#define _SMM_STM_H_
+
+#include
+
+/**
+
+ Create 4G page table for STM.
+ 2M PAE page table in X64 version.
+
+ @param PageTableBase The page table base in MSEG
+
+**/
+VOID
+StmGen4GPageTable (
+ IN UINTN PageTableBase
+ );
+
+/**
+ This is SMM exception handle.
+ Consumed by STM when exception happen.
+
+ @param Context STM protection exception stack frame
+
+ @return the EBX value for STM reference.
+ EBX = 0: resume SMM guest using register state found on exception stack.
+ EBX = 1 to 0x0F: EBX contains a BIOS error code which the STM must record in the
+ TXT.ERRORCODE register and subsequently reset the system via
+ TXT.CMD.SYS_RESET. The value of the TXT.ERRORCODE register is calculated as
+ follows: TXT.ERRORCODE = (EBX & 0x0F) | STM_CRASH_BIOS_PANIC
+ EBX = 0x10 to 0xFFFFFFFF - reserved, do not use.
+
+**/
+UINT32
+EFIAPI
+SmmStmExceptionHandler (
+ IN OUT STM_PROTECTION_EXCEPTION_STACK_FRAME Context
+ );
+
+
+/**
+
+ Get STM state.
+
+ @return STM state
+
+**/
+EFI_SM_MONITOR_STATE
+EFIAPI
+GetMonitorState (
+ VOID
+ );
+
+/**
+
+ Load STM image to MSEG.
+
+ @param StmImage STM image
+ @param StmImageSize STM image size
+
+ @retval EFI_SUCCESS Load STM to MSEG successfully
+ @retval EFI_BUFFER_TOO_SMALL MSEG is smaller than minimal requirement of STM image
+
+**/
+EFI_STATUS
+EFIAPI
+LoadMonitor (
+ IN EFI_PHYSICAL_ADDRESS StmImage,
+ IN UINTN StmImageSize
+ );
+
+/**
+
+ Add resources in list to database. Allocate new memory areas as needed.
+
+ @param ResourceList A pointer to resource list to be added
+ @param NumEntries Optional number of entries.
+ If 0, list must be terminated by END_OF_RESOURCES.
+
+ @retval EFI_SUCCESS If resources are added
+ @retval EFI_INVALID_PARAMETER If nested procedure detected resource failer
+ @retval EFI_OUT_OF_RESOURCES If nested procedure returned it and we cannot allocate more areas.
+
+**/
+EFI_STATUS
+EFIAPI
+AddPiResource (
+ IN STM_RSC *ResourceList,
+ IN UINT32 NumEntries OPTIONAL
+ );
+
+/**
+
+ Delete resources in list to database.
+
+ @param ResourceList A pointer to resource list to be deleted
+ NULL means delete all resources.
+ @param NumEntries Optional number of entries.
+ If 0, list must be terminated by END_OF_RESOURCES.
+
+ @retval EFI_SUCCESS If resources are deleted
+ @retval EFI_INVALID_PARAMETER If nested procedure detected resource failer
+
+**/
+EFI_STATUS
+EFIAPI
+DeletePiResource (
+ IN STM_RSC *ResourceList,
+ IN UINT32 NumEntries OPTIONAL
+ );
+
+/**
+
+ Get BIOS resources.
+
+ @param ResourceList A pointer to resource list to be filled
+ @param ResourceSize On input it means size of resource list input.
+ On output it means size of resource list filled,
+ or the size of resource list to be filled if size of too small.
+
+ @retval EFI_SUCCESS If resources are returned.
+ @retval EFI_BUFFER_TOO_SMALL If resource list buffer is too small to hold the whole resources.
+
+**/
+EFI_STATUS
+EFIAPI
+GetPiResource (
+ OUT STM_RSC *ResourceList,
+ IN OUT UINT32 *ResourceSize
+ );
+
+/**
+ This functin initialize STM configuration table.
+**/
+VOID
+StmSmmConfigurationTableInit (
+ VOID
+ );
+
+/**
+ This function notify STM resource change.
+
+ @param StmResource BIOS STM resource
+
+**/
+VOID
+NotifyStmResourceChange (
+ IN VOID *StmResource
+ );
+
+/**
+ This function return BIOS STM resource.
+
+ @return BIOS STM resource
+
+**/
+VOID *
+GetStmResource (
+ VOID
+ );
+
+#endif
diff --git a/UefiCpuPkg/Library/SmmCpuFeaturesLib/X64/SmiEntry.S b/UefiCpuPkg/Library/SmmCpuFeaturesLib/X64/SmiEntry.S
new file mode 100644
index 0000000000..1f9f91ce10
--- /dev/null
+++ b/UefiCpuPkg/Library/SmmCpuFeaturesLib/X64/SmiEntry.S
@@ -0,0 +1,282 @@
+#------------------------------------------------------------------------------
+#
+# Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php.
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+# Module Name:
+#
+# SmiEntry.S
+#
+# Abstract:
+#
+# Code template of the SMI handler for a particular processor
+#
+#------------------------------------------------------------------------------
+
+ASM_GLOBAL ASM_PFX(gcStmSmiHandlerTemplate)
+ASM_GLOBAL ASM_PFX(gcStmSmiHandlerSize)
+ASM_GLOBAL ASM_PFX(gcStmSmiHandlerOffset)
+ASM_GLOBAL ASM_PFX(gStmSmiCr3)
+ASM_GLOBAL ASM_PFX(gStmSmiStack)
+ASM_GLOBAL ASM_PFX(gStmSmbase)
+ASM_GLOBAL ASM_PFX(gStmXdSupported)
+ASM_GLOBAL ASM_PFX(gStmSmiHandlerIdtr)
+
+.equ MSR_IA32_MISC_ENABLE, 0x1A0
+.equ MSR_EFER, 0xc0000080
+.equ MSR_EFER_XD, 0x800
+
+#
+# Constants relating to TXT_PROCESSOR_SMM_DESCRIPTOR
+#
+.equ DSC_OFFSET, 0xfb00
+.equ DSC_GDTPTR, 0x48
+.equ DSC_GDTSIZ, 0x50
+.equ DSC_CS, 0x14
+.equ DSC_DS, 0x16
+.equ DSC_SS, 0x18
+.equ DSC_OTHERSEG, 0x1a
+#
+# Constants relating to CPU State Save Area
+#
+.equ SSM_DR6, 0xffd0
+.equ SSM_DR7, 0xffc8
+
+.equ PROTECT_MODE_CS, 0x08
+.equ PROTECT_MODE_DS, 0x20
+.equ LONG_MODE_CS, 0x38
+.equ TSS_SEGMENT, 0x40
+.equ GDT_SIZE, 0x50
+
+ .text
+
+ASM_PFX(gcStmSmiHandlerTemplate):
+
+_StmSmiEntryPoint:
+ #
+ # The encoding of BX in 16-bit addressing mode is the same as of RDI in 64-
+ # bit addressing mode. And that coincidence has been used in the following
+ # "64-bit like" 16-bit code. Be aware that once RDI is referenced as a
+ # base address register, it is actually BX that is referenced.
+ #
+ .byte 0xbb # mov bx, imm16
+ .word _StmGdtDesc - _StmSmiEntryPoint + 0x8000
+ #
+ # fix GDT descriptor
+ #
+ .byte 0x2e,0xa1 # mov ax, cs:[offset16]
+ .word DSC_OFFSET + DSC_GDTSIZ
+ .byte 0x48 # dec ax
+ .byte 0x2e
+ movl %eax, (%rdi) # mov cs:[bx], ax
+ .byte 0x66,0x2e,0xa1 # mov eax, cs:[offset16]
+ .word DSC_OFFSET + DSC_GDTPTR
+ .byte 0x2e
+ movw %ax, 2(%rdi)
+ .byte 0x66,0x2e
+ lgdt (%rdi)
+ #
+ # Patch ProtectedMode Segment
+ #
+ .byte 0xb8
+ .word PROTECT_MODE_CS
+ .byte 0x2e
+ movl %eax, -2(%rdi)
+ #
+ # Patch ProtectedMode entry
+ #
+ .byte 0x66, 0xbf # mov edi, SMBASE
+ASM_PFX(gStmSmbase): .space 4
+ lea ((ProtectedMode - _StmSmiEntryPoint) + 0x8000)(%edi), %ax
+ .byte 0x2e
+ movw %ax, -6(%rdi)
+ #
+ # Switch into ProtectedMode
+ #
+ movq %cr0, %rbx
+ .byte 0x66
+ andl $0x9ffafff3, %ebx
+ .byte 0x66
+ orl $0x00000023, %ebx
+
+ movq %rbx, %cr0
+ .byte 0x66, 0xea
+ .space 6
+
+_StmGdtDesc: .space 6
+
+ProtectedMode:
+ movw $PROTECT_MODE_DS, %ax
+ movl %eax, %ds
+ movl %eax, %es
+ movl %eax, %fs
+ movl %eax, %gs
+ movl %eax, %ss
+ .byte 0xbc # mov esp, imm32
+ASM_PFX(gStmSmiStack): .space 4
+ jmp ProtFlatMode
+
+ProtFlatMode:
+ .byte 0xb8
+ASM_PFX(gStmSmiCr3): .space 4
+ movq %rax, %cr3
+ movl $0x668,%eax # as cr4.PGE is not set here, refresh cr3
+ movq %rax, %cr4 # in PreModifyMtrrs() to flush TLB.
+# Load TSS
+ subl $8, %esp # reserve room in stack
+ sgdt (%rsp)
+ movl 2(%rsp), %eax # eax = GDT base
+ addl $8, %esp
+ movb $0x89, %dl
+ movb %dl, (TSS_SEGMENT + 5)(%rax) # clear busy flag
+ movl $TSS_SEGMENT, %eax
+ ltr %ax
+
+# enable NXE if supported
+ .byte 0xb0 # mov al, imm8
+ASM_PFX(gStmXdSupported): .byte 1
+ cmpb $0, %al
+ jz SkipXd
+#
+# Check XD disable bit
+#
+ movl $MSR_IA32_MISC_ENABLE, %ecx
+ rdmsr
+ subl $4, %esp
+ pushq %rdx # save MSR_IA32_MISC_ENABLE[63-32]
+ testl $BIT2, %edx # MSR_IA32_MISC_ENABLE[34]
+ jz L13
+ andw $0x0FFFB, %dx # clear XD Disable bit if it is set
+ wrmsr
+L13:
+ movl $MSR_EFER, %ecx
+ rdmsr
+ orw $MSR_EFER_XD,%ax # enable NXE
+ wrmsr
+ jmp XdDone
+SkipXd:
+ subl $8, %esp
+XdDone:
+
+ #
+ # Switch to LongMode
+ #
+ pushq $LONG_MODE_CS # push cs hardcore here
+ call Base # push return address for retf later
+Base:
+ addl $(LongMode - Base), (%rsp) # offset for far retf, seg is the 1st arg
+
+ movl $MSR_EFER, %ecx
+ rdmsr
+ orb $1,%ah # enable LME
+ wrmsr
+ movq %cr0, %rbx
+ orl $0x080010023, %ebx # enable paging + WP + NE + MP + PE
+ movq %rbx, %cr0
+ retf
+LongMode: # long mode (64-bit code) starts here
+ movabsq $ASM_PFX(gStmSmiHandlerIdtr), %rax
+ lidt (%rax)
+ lea (DSC_OFFSET)(%rdi), %ebx
+ movw DSC_DS(%rbx), %ax
+ movl %eax,%ds
+ movw DSC_OTHERSEG(%rbx), %ax
+ movl %eax,%es
+ movl %eax,%fs
+ movl %eax,%gs
+ movw DSC_SS(%rbx), %ax
+ movl %eax,%ss
+
+CommonHandler:
+ movq 8(%rsp), %rbx
+ # Save FP registers
+
+ subq $0x200, %rsp
+ .byte 0x48 # FXSAVE64
+ fxsave (%rsp)
+
+ addq $-0x20, %rsp
+
+ movq %rbx, %rcx
+ movabsq $ASM_PFX(CpuSmmDebugEntry), %rax
+ call *%rax
+
+ movq %rbx, %rcx
+ movabsq $ASM_PFX(SmiRendezvous), %rax
+ call *%rax
+
+ movq %rbx, %rcx
+ movabsq $ASM_PFX(CpuSmmDebugExit), %rax
+ call *%rax
+
+ addq $0x20, %rsp
+
+ #
+ # Restore FP registers
+ #
+ .byte 0x48 # FXRSTOR64
+ fxrstor (%rsp)
+
+ addq $0x200, %rsp
+
+ movabsq $ASM_PFX(gStmXdSupported), %rax
+ movb (%rax), %al
+ cmpb $0, %al
+ jz L16
+ popq %rdx # get saved MSR_IA32_MISC_ENABLE[63-32]
+ testl $BIT2, %edx
+ jz L16
+ movl $MSR_IA32_MISC_ENABLE, %ecx
+ rdmsr
+ orw $BIT2, %dx # set XD Disable bit if it was set before entering into SMM
+ wrmsr
+
+L16:
+ rsm
+
+_StmSmiHandler:
+#
+# Check XD disable bit
+#
+ xorq %r8, %r8
+ movabsq $ASM_PFX(gStmXdSupported), %rax
+ movb (%rax), %al
+ cmpb $0, %al
+ jz StmXdDone
+ movl $MSR_IA32_MISC_ENABLE, %ecx
+ rdmsr
+ movq %rdx, %r8 # save MSR_IA32_MISC_ENABLE[63-32]
+ testl $BIT2, %edx # MSR_IA32_MISC_ENABLE[34]
+ jz L14
+ andw $0x0FFFB, %dx # clear XD Disable bit if it is set
+ wrmsr
+L14:
+ movl $MSR_EFER, %ecx
+ rdmsr
+ orw $MSR_EFER_XD,%ax # enable NXE
+ wrmsr
+StmXdDone:
+ pushq %r8
+
+ # below step is needed, because STM does not run above code.
+ # we have to run below code to set IDT/CR0/CR4
+ movabsq $ASM_PFX(gStmSmiHandlerIdtr), %rax
+ lidt (%rax)
+
+ movq %cr0, %rax
+ orl $0x80010023, %eax
+ movq %rax, %cr0
+ movq %cr4, %rax
+ movl $0x668, %eax # as cr4.PGE is not set here, refresh cr3
+ movq %rax, %cr4 # in PreModifyMtrrs() to flush TLB.
+ # STM init finish
+ jmp CommonHandler
+
+ASM_PFX(gcStmSmiHandlerSize) : .word . - _StmSmiEntryPoint
+ASM_PFX(gcStmSmiHandlerOffset): .word _StmSmiHandler - _StmSmiEntryPoint
diff --git a/UefiCpuPkg/Library/SmmCpuFeaturesLib/X64/SmiEntry.asm b/UefiCpuPkg/Library/SmmCpuFeaturesLib/X64/SmiEntry.asm
new file mode 100644
index 0000000000..ad51e07079
--- /dev/null
+++ b/UefiCpuPkg/Library/SmmCpuFeaturesLib/X64/SmiEntry.asm
@@ -0,0 +1,281 @@
+;------------------------------------------------------------------------------ ;
+; Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.
+; This program and the accompanying materials
+; are licensed and made available under the terms and conditions of the BSD License
+; which accompanies this distribution. The full text of the license may be found at
+; http://opensource.org/licenses/bsd-license.php.
+;
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+;
+; Module Name:
+;
+; SmiEntry.asm
+;
+; Abstract:
+;
+; Code template of the SMI handler for a particular processor
+;
+;-------------------------------------------------------------------------------
+
+;
+; Variables referenced by C code
+;
+EXTERNDEF SmiRendezvous:PROC
+EXTERNDEF CpuSmmDebugEntry:PROC
+EXTERNDEF CpuSmmDebugExit:PROC
+EXTERNDEF gcStmSmiHandlerTemplate:BYTE
+EXTERNDEF gcStmSmiHandlerSize:WORD
+EXTERNDEF gcStmSmiHandlerOffset:WORD
+EXTERNDEF gStmSmiCr3:DWORD
+EXTERNDEF gStmSmiStack:DWORD
+EXTERNDEF gStmSmbase:DWORD
+EXTERNDEF gStmXdSupported:BYTE
+EXTERNDEF gStmSmiHandlerIdtr:FWORD
+
+MSR_IA32_MISC_ENABLE EQU 1A0h
+MSR_EFER EQU 0c0000080h
+MSR_EFER_XD EQU 0800h
+
+;
+; Constants relating to TXT_PROCESSOR_SMM_DESCRIPTOR
+;
+DSC_OFFSET EQU 0fb00h
+DSC_GDTPTR EQU 48h
+DSC_GDTSIZ EQU 50h
+DSC_CS EQU 14h
+DSC_DS EQU 16h
+DSC_SS EQU 18h
+DSC_OTHERSEG EQU 1ah
+;
+; Constants relating to CPU State Save Area
+;
+SSM_DR6 EQU 0ffd0h
+SSM_DR7 EQU 0ffc8h
+
+PROTECT_MODE_CS EQU 08h
+PROTECT_MODE_DS EQU 20h
+LONG_MODE_CS EQU 38h
+TSS_SEGMENT EQU 40h
+GDT_SIZE EQU 50h
+
+ .code
+
+gcStmSmiHandlerTemplate LABEL BYTE
+
+_StmSmiEntryPoint:
+ ;
+ ; The encoding of BX in 16-bit addressing mode is the same as of RDI in 64-
+ ; bit addressing mode. And that coincidence has been used in the following
+ ; "64-bit like" 16-bit code. Be aware that once RDI is referenced as a
+ ; base address register, it is actually BX that is referenced.
+ ;
+ DB 0bbh ; mov bx, imm16
+ DW offset _StmGdtDesc - _StmSmiEntryPoint + 8000h ; bx = GdtDesc offset
+; fix GDT descriptor
+ DB 2eh, 0a1h ; mov ax, cs:[offset16]
+ DW DSC_OFFSET + DSC_GDTSIZ
+ DB 48h ; dec ax
+ DB 2eh
+ mov [rdi], eax ; mov cs:[bx], ax
+ DB 66h, 2eh, 0a1h ; mov eax, cs:[offset16]
+ DW DSC_OFFSET + DSC_GDTPTR
+ DB 2eh
+ mov [rdi + 2], ax ; mov cs:[bx + 2], eax
+ DB 66h, 2eh
+ lgdt fword ptr [rdi] ; lgdt fword ptr cs:[bx]
+; Patch ProtectedMode Segment
+ DB 0b8h ; mov ax, imm16
+ DW PROTECT_MODE_CS ; set AX for segment directly
+ DB 2eh
+ mov [rdi - 2], eax ; mov cs:[bx - 2], ax
+; Patch ProtectedMode entry
+ DB 66h, 0bfh ; mov edi, SMBASE
+gStmSmbase DD ?
+ lea ax, [edi + (@ProtectedMode - _StmSmiEntryPoint) + 8000h]
+ DB 2eh
+ mov [rdi - 6], ax ; mov cs:[bx - 6], eax
+; Switch into @ProtectedMode
+ mov rbx, cr0
+ DB 66h
+ and ebx, 9ffafff3h
+ DB 66h
+ or ebx, 00000023h
+
+ mov cr0, rbx
+ DB 66h, 0eah
+ DD ?
+ DW ?
+
+_StmGdtDesc FWORD ?
+@ProtectedMode:
+ mov ax, PROTECT_MODE_DS
+ mov ds, ax
+ mov es, ax
+ mov fs, ax
+ mov gs, ax
+ mov ss, ax
+ DB 0bch ; mov esp, imm32
+gStmSmiStack DD ?
+ jmp ProtFlatMode
+
+ProtFlatMode:
+ DB 0b8h ; mov eax, offset gStmSmiCr3
+gStmSmiCr3 DD ?
+ mov cr3, rax
+ mov eax, 668h ; as cr4.PGE is not set here, refresh cr3
+ mov cr4, rax ; in PreModifyMtrrs() to flush TLB.
+; Load TSS
+ sub esp, 8 ; reserve room in stack
+ sgdt fword ptr [rsp]
+ mov eax, [rsp + 2] ; eax = GDT base
+ add esp, 8
+ mov dl, 89h
+ mov [rax + TSS_SEGMENT + 5], dl ; clear busy flag
+ mov eax, TSS_SEGMENT
+ ltr ax
+
+; enable NXE if supported
+ DB 0b0h ; mov al, imm8
+gStmXdSupported DB 1
+ cmp al, 0
+ jz @SkipXd
+;
+; Check XD disable bit
+;
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ sub esp, 4
+ push rdx ; save MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]
+ jz @f
+ and dx, 0FFFBh ; clear XD Disable bit if it is set
+ wrmsr
+@@:
+ mov ecx, MSR_EFER
+ rdmsr
+ or ax, MSR_EFER_XD ; enable NXE
+ wrmsr
+ jmp @XdDone
+@SkipXd:
+ sub esp, 8
+@XdDone:
+
+; Switch into @LongMode
+ push LONG_MODE_CS ; push cs hardcore here
+ call Base ; push return address for retf later
+Base:
+ add dword ptr [rsp], @LongMode - Base; offset for far retf, seg is the 1st arg
+
+ mov ecx, MSR_EFER
+ rdmsr
+ or ah, 1 ; enable LME
+ wrmsr
+ mov rbx, cr0
+ or ebx, 080010023h ; enable paging + WP + NE + MP + PE
+ mov cr0, rbx
+ retf
+@LongMode: ; long mode (64-bit code) starts here
+ mov rax, offset gStmSmiHandlerIdtr
+ lidt fword ptr [rax]
+ lea ebx, [rdi + DSC_OFFSET]
+ mov ax, [rbx + DSC_DS]
+ mov ds, eax
+ mov ax, [rbx + DSC_OTHERSEG]
+ mov es, eax
+ mov fs, eax
+ mov gs, eax
+ mov ax, [rbx + DSC_SS]
+ mov ss, eax
+
+CommonHandler:
+ mov rbx, [rsp + 0x08] ; rbx <- CpuIndex
+
+ ;
+ ; Save FP registers
+ ;
+ sub rsp, 200h
+ DB 48h ; FXSAVE64
+ fxsave [rsp]
+
+ add rsp, -20h
+
+ mov rcx, rbx
+ mov rax, CpuSmmDebugEntry
+ call rax
+
+ mov rcx, rbx
+ mov rax, SmiRendezvous ; rax <- absolute addr of SmiRedezvous
+ call rax
+
+ mov rcx, rbx
+ mov rax, CpuSmmDebugExit
+ call rax
+
+ add rsp, 20h
+
+ ;
+ ; Restore FP registers
+ ;
+ DB 48h ; FXRSTOR64
+ fxrstor [rsp]
+
+ add rsp, 200h
+
+ mov rax, offset ASM_PFX(gStmXdSupported)
+ mov al, [rax]
+ cmp al, 0
+ jz @f
+ pop rdx ; get saved MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2
+ jz @f
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM
+ wrmsr
+
+@@:
+ rsm
+
+_StmSmiHandler:
+;
+; Check XD disable bit
+;
+ xor r8, r8
+ mov rax, offset ASM_PFX(gStmXdSupported)
+ mov al, [rax]
+ cmp al, 0
+ jz @StmXdDone
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ mov r8, rdx ; save MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]
+ jz @f
+ and dx, 0FFFBh ; clear XD Disable bit if it is set
+ wrmsr
+@@:
+ mov ecx, MSR_EFER
+ rdmsr
+ or ax, MSR_EFER_XD ; enable NXE
+ wrmsr
+@StmXdDone:
+ push r8
+
+ ; below step is needed, because STM does not run above code.
+ ; we have to run below code to set IDT/CR0/CR4
+ mov rax, offset gStmSmiHandlerIdtr
+ lidt fword ptr [rax]
+
+ mov rax, cr0
+ or eax, 80010023h ; enable paging + WP + NE + MP + PE
+ mov cr0, rax
+ mov rax, cr4
+ mov eax, 668h ; as cr4.PGE is not set here, refresh cr3
+ mov cr4, rax ; in PreModifyMtrrs() to flush TLB.
+ ; STM init finish
+ jmp CommonHandler
+
+gcStmSmiHandlerSize DW $ - _StmSmiEntryPoint
+gcStmSmiHandlerOffset DW _StmSmiHandler - _StmSmiEntryPoint
+
+ END
diff --git a/UefiCpuPkg/Library/SmmCpuFeaturesLib/X64/SmiEntry.nasm b/UefiCpuPkg/Library/SmmCpuFeaturesLib/X64/SmiEntry.nasm
new file mode 100644
index 0000000000..c801591fc7
--- /dev/null
+++ b/UefiCpuPkg/Library/SmmCpuFeaturesLib/X64/SmiEntry.nasm
@@ -0,0 +1,263 @@
+;------------------------------------------------------------------------------ ;
+; Copyright (c) 2016, Intel Corporation. All rights reserved.
+; This program and the accompanying materials
+; are licensed and made available under the terms and conditions of the BSD License
+; which accompanies this distribution. The full text of the license may be found at
+; http://opensource.org/licenses/bsd-license.php.
+;
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+;
+; Module Name:
+;
+; SmiEntry.nasm
+;
+; Abstract:
+;
+; Code template of the SMI handler for a particular processor
+;
+;-------------------------------------------------------------------------------
+
+;
+; Variables referrenced by C code
+;
+
+%define MSR_IA32_MISC_ENABLE 0x1A0
+%define MSR_EFER 0xc0000080
+%define MSR_EFER_XD 0x800
+
+;
+; Constants relating to TXT_PROCESSOR_SMM_DESCRIPTOR
+;
+%define DSC_OFFSET 0xfb00
+%define DSC_GDTPTR 0x48
+%define DSC_GDTSIZ 0x50
+%define DSC_CS 0x14
+%define DSC_DS 0x16
+%define DSC_SS 0x18
+%define DSC_OTHERSEG 0x1a
+;
+; Constants relating to CPU State Save Area
+;
+%define SSM_DR6 0xffd0
+%define SSM_DR7 0xffc8
+
+%define PROTECT_MODE_CS 0x8
+%define PROTECT_MODE_DS 0x20
+%define LONG_MODE_CS 0x38
+%define TSS_SEGMENT 0x40
+%define GDT_SIZE 0x50
+
+extern ASM_PFX(SmiRendezvous)
+extern ASM_PFX(gStmSmiHandlerIdtr)
+extern ASM_PFX(CpuSmmDebugEntry)
+extern ASM_PFX(CpuSmmDebugExit)
+
+global ASM_PFX(gStmSmbase)
+global ASM_PFX(gStmXdSupported)
+global ASM_PFX(gStmSmiStack)
+global ASM_PFX(gStmSmiCr3)
+global ASM_PFX(gcStmSmiHandlerTemplate)
+global ASM_PFX(gcStmSmiHandlerSize)
+global ASM_PFX(gcStmSmiHandlerOffset)
+
+ DEFAULT REL
+ SECTION .text
+
+BITS 16
+ASM_PFX(gcStmSmiHandlerTemplate):
+_StmSmiEntryPoint:
+ mov bx, _StmGdtDesc - _StmSmiEntryPoint + 0x8000
+ mov ax,[cs:DSC_OFFSET + DSC_GDTSIZ]
+ dec ax
+ mov [cs:bx], ax
+ mov eax, [cs:DSC_OFFSET + DSC_GDTPTR]
+ mov [cs:bx + 2], eax
+o32 lgdt [cs:bx] ; lgdt fword ptr cs:[bx]
+ mov ax, PROTECT_MODE_CS
+ mov [cs:bx-0x2],ax
+ DB 0x66, 0xbf ; mov edi, SMBASE
+ASM_PFX(gStmSmbase): DD 0
+ lea eax, [edi + (@ProtectedMode - _StmSmiEntryPoint) + 0x8000]
+ mov [cs:bx-0x6],eax
+ mov ebx, cr0
+ and ebx, 0x9ffafff3
+ or ebx, 0x23
+ mov cr0, ebx
+ jmp dword 0x0:0x0
+_StmGdtDesc:
+ DW 0
+ DD 0
+
+BITS 32
+@ProtectedMode:
+ mov ax, PROTECT_MODE_DS
+o16 mov ds, ax
+o16 mov es, ax
+o16 mov fs, ax
+o16 mov gs, ax
+o16 mov ss, ax
+ DB 0xbc ; mov esp, imm32
+ASM_PFX(gStmSmiStack): DD 0
+ jmp ProtFlatMode
+
+BITS 64
+ProtFlatMode:
+ DB 0xb8 ; mov eax, offset gStmSmiCr3
+ASM_PFX(gStmSmiCr3): DD 0
+ mov cr3, rax
+ mov eax, 0x668 ; as cr4.PGE is not set here, refresh cr3
+ mov cr4, rax ; in PreModifyMtrrs() to flush TLB.
+; Load TSS
+ sub esp, 8 ; reserve room in stack
+ sgdt [rsp]
+ mov eax, [rsp + 2] ; eax = GDT base
+ add esp, 8
+ mov dl, 0x89
+ mov [rax + TSS_SEGMENT + 5], dl ; clear busy flag
+ mov eax, TSS_SEGMENT
+ ltr ax
+
+; enable NXE if supported
+ DB 0xb0 ; mov al, imm8
+ASM_PFX(gStmXdSupported): DB 1
+ cmp al, 0
+ jz @SkipXd
+;
+; Check XD disable bit
+;
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ sub esp, 4
+ push rdx ; save MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]
+ jz .0
+ and dx, 0xFFFB ; clear XD Disable bit if it is set
+ wrmsr
+.0:
+ mov ecx, MSR_EFER
+ rdmsr
+ or ax, MSR_EFER_XD ; enable NXE
+ wrmsr
+ jmp @XdDone
+@SkipXd:
+ sub esp, 8
+@XdDone:
+
+; Switch into @LongMode
+ push LONG_MODE_CS ; push cs hardcore here
+ call Base ; push return address for retf later
+Base:
+ add dword [rsp], @LongMode - Base; offset for far retf, seg is the 1st arg
+
+ mov ecx, MSR_EFER
+ rdmsr
+ or ah, 1 ; enable LME
+ wrmsr
+ mov rbx, cr0
+ or ebx, 0x80010023 ; enable paging + WP + NE + MP + PE
+ mov cr0, rbx
+ retf
+@LongMode: ; long mode (64-bit code) starts here
+ mov rax, ASM_PFX(gStmSmiHandlerIdtr)
+ lidt [rax]
+ lea ebx, [rdi + DSC_OFFSET]
+ mov ax, [rbx + DSC_DS]
+ mov ds, eax
+ mov ax, [rbx + DSC_OTHERSEG]
+ mov es, eax
+ mov fs, eax
+ mov gs, eax
+ mov ax, [rbx + DSC_SS]
+ mov ss, eax
+
+CommonHandler:
+ mov rbx, [rsp + 0x08] ; rbx <- CpuIndex
+
+ ;
+ ; Save FP registers
+ ;
+ sub rsp, 0x200
+ DB 0x48 ; FXSAVE64
+ fxsave [rsp]
+
+ add rsp, -0x20
+
+ mov rcx, rbx
+ mov rax, CpuSmmDebugEntry
+ call rax
+
+ mov rcx, rbx
+ mov rax, SmiRendezvous ; rax <- absolute addr of SmiRedezvous
+ call rax
+
+ mov rcx, rbx
+ mov rax, CpuSmmDebugExit
+ call rax
+
+ add rsp, 0x20
+
+ ;
+ ; Restore FP registers
+ ;
+ DB 0x48 ; FXRSTOR64
+ fxrstor [rsp]
+
+ add rsp, 0x200
+
+ mov rax, ASM_PFX(gStmXdSupported)
+ mov al, [rax]
+ cmp al, 0
+ jz .1
+ pop rdx ; get saved MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2
+ jz .1
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM
+ wrmsr
+
+.1:
+ rsm
+
+_StmSmiHandler:
+;
+; Check XD disable bit
+;
+ xor r8, r8
+ mov rax, ASM_PFX(gStmXdSupported)
+ mov al, [rax]
+ cmp al, 0
+ jz @StmXdDone
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ mov r8, rdx ; save MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]
+ jz .0
+ and dx, 0xFFFB ; clear XD Disable bit if it is set
+ wrmsr
+.0:
+ mov ecx, MSR_EFER
+ rdmsr
+ or ax, MSR_EFER_XD ; enable NXE
+ wrmsr
+@StmXdDone:
+ push r8
+
+ ; below step is needed, because STM does not run above code.
+ ; we have to run below code to set IDT/CR0/CR4
+
+ mov rax, ASM_PFX(gStmSmiHandlerIdtr)
+ lidt [rax]
+
+ mov rax, cr0
+ or eax, 0x80010023 ; enable paging + WP + NE + MP + PE
+ mov cr0, rax
+ mov rax, cr4
+ mov eax, 0x668 ; as cr4.PGE is not set here, refresh cr3
+ mov cr4, rax ; in PreModifyMtrrs() to flush TLB.
+ ; STM init finish
+ jmp CommonHandler
+
+ASM_PFX(gcStmSmiHandlerSize) : DW $ - _StmSmiEntryPoint
+ASM_PFX(gcStmSmiHandlerOffset) : DW _StmSmiHandler - _StmSmiEntryPoint
diff --git a/UefiCpuPkg/Library/SmmCpuFeaturesLib/X64/SmiException.S b/UefiCpuPkg/Library/SmmCpuFeaturesLib/X64/SmiException.S
new file mode 100644
index 0000000000..4d0cd9ac6f
--- /dev/null
+++ b/UefiCpuPkg/Library/SmmCpuFeaturesLib/X64/SmiException.S
@@ -0,0 +1,178 @@
+#------------------------------------------------------------------------------
+#
+# Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php.
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+# Module Name:
+#
+# SmiException.S
+#
+# Abstract:
+#
+# Exception handlers used in SM mode
+#
+#------------------------------------------------------------------------------
+
+ASM_GLOBAL ASM_PFX(gcStmPsd)
+
+ASM_GLOBAL ASM_PFX(SmmStmExceptionHandler)
+ASM_GLOBAL ASM_PFX(SmmStmSetup)
+ASM_GLOBAL ASM_PFX(SmmStmTeardown)
+
+.equ CODE_SEL, 0x38
+.equ DATA_SEL, 0x20
+.equ TR_SEL, 0x40
+
+.equ MSR_IA32_MISC_ENABLE, 0x1A0
+.equ MSR_EFER, 0x0c0000080
+.equ MSR_EFER_XD, 0x0800
+
+ .data
+
+#
+# This structure serves as a template for all processors.
+#
+ASM_PFX(gcStmPsd):
+ .ascii "TXTPSSIG"
+ .word PSD_SIZE
+ .word 1 # Version
+ .long 0 # LocalApicId
+ .byte 0xF # Cr4Pse;Cr4Pae;Intel64Mode;ExecutionDisableOutsideSmrr
+ .byte 0 # BIOS to STM
+ .byte 0 # STM to BIOS
+ .byte 0
+ .word CODE_SEL
+ .word DATA_SEL
+ .word DATA_SEL
+ .word DATA_SEL
+ .word TR_SEL
+ .word 0
+ .quad 0 # SmmCr3
+ .quad ASM_PFX(_OnStmSetup)
+ .quad ASM_PFX(_OnStmTeardown)
+ .quad 0 # SmmSmiHandlerRip - SMM guest entrypoint
+ .quad 0 # SmmSmiHandlerRsp
+ .quad 0
+ .long 0
+ .long 0x80010100 # RequiredStmSmmRevId
+ .quad ASM_PFX(_OnException)
+ .quad 0 # ExceptionStack
+ .word DATA_SEL
+ .word 0x1F # ExceptionFilter
+ .long 0
+ .quad 0
+ .quad 0 # BiosHwResourceRequirementsPtr
+ .quad 0 # AcpiRsdp
+ .byte 0 # PhysicalAddressBits
+.equ PSD_SIZE, . - ASM_PFX(gcStmPsd)
+
+ .text
+#------------------------------------------------------------------------------
+# SMM Exception handlers
+#------------------------------------------------------------------------------
+
+ASM_GLOBAL ASM_PFX(_OnException)
+ASM_PFX(_OnException):
+ movq %rsp, %rcx
+ subq $0x28, %rsp
+ call ASM_PFX(SmmStmExceptionHandler)
+ addq $0x28, %rsp
+ movl %eax, %ebx
+ movl $4, %eax
+ .byte 0xf, 0x1, 0xc1 # VMCALL
+ jmp .
+
+ASM_GLOBAL ASM_PFX(_OnStmSetup)
+ASM_PFX(_OnStmSetup):
+#
+# Check XD disable bit
+#
+ xorq %r8, %r8
+ movabsq $ASM_PFX(gStmXdSupported), %rax
+ movb (%rax), %al
+ cmpb $0, %al
+ jz StmXdDone1
+ movl $MSR_IA32_MISC_ENABLE, %ecx
+ rdmsr
+ movq %rdx, %r8 # save MSR_IA32_MISC_ENABLE[63-32]
+ testl $BIT2, %edx # MSR_IA32_MISC_ENABLE[34]
+ jz L13
+ andw $0x0FFFB, %dx # clear XD Disable bit if it is set
+ wrmsr
+L13:
+ movl $MSR_EFER, %ecx
+ rdmsr
+ orw $MSR_EFER_XD,%ax # enable NXE
+ wrmsr
+StmXdDone1:
+ pushq %r8
+
+ subq $0x20, %rsp
+ call ASM_PFX(SmmStmSetup)
+ addq 0x20, %rsp
+
+ movabsq $ASM_PFX(gStmXdSupported), %rax
+ movb (%rax), %al
+ cmpb $0, %al
+ jz L14
+ popq %rdx # get saved MSR_IA32_MISC_ENABLE[63-32]
+ testl $BIT2, %edx
+ jz L14
+ movl $MSR_IA32_MISC_ENABLE, %ecx
+ rdmsr
+ orw $BIT2, %dx # set XD Disable bit if it was set before entering into SMM
+ wrmsr
+L14:
+
+ rsm
+
+ASM_GLOBAL ASM_PFX(_OnStmTeardown)
+ASM_PFX(_OnStmTeardown):
+#
+# Check XD disable bit
+#
+ xorq %r8, %r8
+ movabsq $ASM_PFX(gStmXdSupported), %rax
+ movb (%rax), %al
+ cmpb $0, %al
+ jz StmXdDone2
+ movl $MSR_IA32_MISC_ENABLE, %ecx
+ rdmsr
+ movq %rdx, %r8 # save MSR_IA32_MISC_ENABLE[63-32]
+ testl $BIT2, %edx # MSR_IA32_MISC_ENABLE[34]
+ jz L15
+ andw $0x0FFFB, %dx # clear XD Disable bit if it is set
+ wrmsr
+L15:
+ movl $MSR_EFER, %ecx
+ rdmsr
+ orw $MSR_EFER_XD,%ax # enable NXE
+ wrmsr
+StmXdDone2:
+ pushq %r8
+
+ subq $0x20, %rsp
+ call ASM_PFX(SmmStmTeardown)
+ addq $0x20, %rsp
+
+ movabsq $ASM_PFX(gStmXdSupported), %rax
+ movb (%rax), %al
+ cmpb $0, %al
+ jz L16
+ popq %rdx # get saved MSR_IA32_MISC_ENABLE[63-32]
+ testl $BIT2, %edx
+ jz L16
+ movl $MSR_IA32_MISC_ENABLE, %ecx
+ rdmsr
+ orw $BIT2, %dx # set XD Disable bit if it was set before entering into SMM
+ wrmsr
+L16:
+
+ rsm
+
diff --git a/UefiCpuPkg/Library/SmmCpuFeaturesLib/X64/SmiException.asm b/UefiCpuPkg/Library/SmmCpuFeaturesLib/X64/SmiException.asm
new file mode 100644
index 0000000000..33e9860160
--- /dev/null
+++ b/UefiCpuPkg/Library/SmmCpuFeaturesLib/X64/SmiException.asm
@@ -0,0 +1,178 @@
+;------------------------------------------------------------------------------ ;
+; Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.
+; This program and the accompanying materials
+; are licensed and made available under the terms and conditions of the BSD License
+; which accompanies this distribution. The full text of the license may be found at
+; http://opensource.org/licenses/bsd-license.php.
+;
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+;
+; Module Name:
+;
+; SmiException.asm
+;
+; Abstract:
+;
+; Exception handlers used in SM mode
+;
+;-------------------------------------------------------------------------------
+
+EXTERNDEF gcStmPsd:BYTE
+
+EXTERNDEF SmmStmExceptionHandler:PROC
+EXTERNDEF SmmStmSetup:PROC
+EXTERNDEF SmmStmTeardown:PROC
+EXTERNDEF gStmXdSupported:BYTE
+
+CODE_SEL EQU 38h
+DATA_SEL EQU 20h
+TR_SEL EQU 40h
+
+MSR_IA32_MISC_ENABLE EQU 1A0h
+MSR_EFER EQU 0c0000080h
+MSR_EFER_XD EQU 0800h
+
+ .data
+
+;
+; This structure serves as a template for all processors.
+;
+gcStmPsd LABEL BYTE
+ DB 'TXTPSSIG'
+ DW PSD_SIZE
+ DW 1 ; Version
+ DD 0 ; LocalApicId
+ DB 0Fh ; Cr4Pse;Cr4Pae;Intel64Mode;ExecutionDisableOutsideSmrr
+ DB 0 ; BIOS to STM
+ DB 0 ; STM to BIOS
+ DB 0
+ DW CODE_SEL
+ DW DATA_SEL
+ DW DATA_SEL
+ DW DATA_SEL
+ DW TR_SEL
+ DW 0
+ DQ 0 ; SmmCr3
+ DQ _OnStmSetup
+ DQ _OnStmTeardown
+ DQ 0 ; SmmSmiHandlerRip - SMM guest entrypoint
+ DQ 0 ; SmmSmiHandlerRsp
+ DQ 0
+ DD 0
+ DD 80010100h ; RequiredStmSmmRevId
+ DQ _OnException
+ DQ 0 ; ExceptionStack
+ DW DATA_SEL
+ DW 01Fh ; ExceptionFilter
+ DD 0
+ DQ 0
+ DQ 0 ; BiosHwResourceRequirementsPtr
+ DQ 0 ; AcpiRsdp
+ DB 0 ; PhysicalAddressBits
+PSD_SIZE = $ - offset gcStmPsd
+
+ .code
+;------------------------------------------------------------------------------
+; SMM Exception handlers
+;------------------------------------------------------------------------------
+_OnException PROC
+ mov rcx, rsp
+ add rsp, -28h
+ call SmmStmExceptionHandler
+ add rsp, 28h
+ mov ebx, eax
+ mov eax, 4
+ DB 0fh, 01h, 0c1h ; VMCALL
+ jmp $
+_OnException ENDP
+
+_OnStmSetup PROC
+;
+; Check XD disable bit
+;
+ xor r8, r8
+ mov rax, offset ASM_PFX(gStmXdSupported)
+ mov al, [rax]
+ cmp al, 0
+ jz @StmXdDone1
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ mov r8, rdx ; save MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]
+ jz @f
+ and dx, 0FFFBh ; clear XD Disable bit if it is set
+ wrmsr
+@@:
+ mov ecx, MSR_EFER
+ rdmsr
+ or ax, MSR_EFER_XD ; enable NXE
+ wrmsr
+@StmXdDone1:
+ push r8
+
+ add rsp, -20h
+ call SmmStmSetup
+ add rsp, 20h
+
+ mov rax, offset ASM_PFX(gStmXdSupported)
+ mov al, [rax]
+ cmp al, 0
+ jz @f
+ pop rdx ; get saved MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2
+ jz @f
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM
+ wrmsr
+@@:
+
+ rsm
+_OnStmSetup ENDP
+
+_OnStmTeardown PROC
+;
+; Check XD disable bit
+;
+ xor r8, r8
+ mov rax, offset ASM_PFX(gStmXdSupported)
+ mov al, [rax]
+ cmp al, 0
+ jz @StmXdDone2
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ mov r8, rdx ; save MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]
+ jz @f
+ and dx, 0FFFBh ; clear XD Disable bit if it is set
+ wrmsr
+@@:
+ mov ecx, MSR_EFER
+ rdmsr
+ or ax, MSR_EFER_XD ; enable NXE
+ wrmsr
+@StmXdDone2:
+ push r8
+
+ add rsp, -20h
+ call SmmStmTeardown
+ add rsp, 20h
+
+ mov rax, offset ASM_PFX(gStmXdSupported)
+ mov al, [rax]
+ cmp al, 0
+ jz @f
+ pop rdx ; get saved MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2
+ jz @f
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM
+ wrmsr
+@@:
+
+ rsm
+_OnStmTeardown ENDP
+
+ END
diff --git a/UefiCpuPkg/Library/SmmCpuFeaturesLib/X64/SmiException.nasm b/UefiCpuPkg/Library/SmmCpuFeaturesLib/X64/SmiException.nasm
new file mode 100644
index 0000000000..fe1bf3f165
--- /dev/null
+++ b/UefiCpuPkg/Library/SmmCpuFeaturesLib/X64/SmiException.nasm
@@ -0,0 +1,179 @@
+;------------------------------------------------------------------------------ ;
+; Copyright (c) 2016, Intel Corporation. All rights reserved.
+; This program and the accompanying materials
+; are licensed and made available under the terms and conditions of the BSD License
+; which accompanies this distribution. The full text of the license may be found at
+; http://opensource.org/licenses/bsd-license.php.
+;
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+;
+; Module Name:
+;
+; SmiException.nasm
+;
+; Abstract:
+;
+; Exception handlers used in SM mode
+;
+;-------------------------------------------------------------------------------
+
+global ASM_PFX(gcStmPsd)
+
+extern ASM_PFX(SmmStmExceptionHandler)
+extern ASM_PFX(SmmStmSetup)
+extern ASM_PFX(SmmStmTeardown)
+extern ASM_PFX(gStmXdSupported)
+extern ASM_PFX(gStmSmiHandlerIdtr)
+
+%define MSR_IA32_MISC_ENABLE 0x1A0
+%define MSR_EFER 0xc0000080
+%define MSR_EFER_XD 0x800
+
+CODE_SEL equ 0x38
+DATA_SEL equ 0x20
+TR_SEL equ 0x40
+
+ SECTION .data
+
+;
+; This structure serves as a template for all processors.
+;
+ASM_PFX(gcStmPsd):
+ DB 'TXTPSSIG'
+ DW PSD_SIZE
+ DW 1 ; Version
+ DD 0 ; LocalApicId
+ DB 0x0F ; Cr4Pse;Cr4Pae;Intel64Mode;ExecutionDisableOutsideSmrr
+ DB 0 ; BIOS to STM
+ DB 0 ; STM to BIOS
+ DB 0
+ DW CODE_SEL
+ DW DATA_SEL
+ DW DATA_SEL
+ DW DATA_SEL
+ DW TR_SEL
+ DW 0
+ DQ 0 ; SmmCr3
+ DQ ASM_PFX(OnStmSetup)
+ DQ ASM_PFX(OnStmTeardown)
+ DQ 0 ; SmmSmiHandlerRip - SMM guest entrypoint
+ DQ 0 ; SmmSmiHandlerRsp
+ DQ 0
+ DD 0
+ DD 0x80010100 ; RequiredStmSmmRevId
+ DQ ASM_PFX(OnException)
+ DQ 0 ; ExceptionStack
+ DW DATA_SEL
+ DW 0x01F ; ExceptionFilter
+ DD 0
+ DQ 0
+ DQ 0 ; BiosHwResourceRequirementsPtr
+ DQ 0 ; AcpiRsdp
+ DB 0 ; PhysicalAddressBits
+PSD_SIZE equ $ - ASM_PFX(gcStmPsd)
+
+ DEFAULT REL
+ SECTION .text
+;------------------------------------------------------------------------------
+; SMM Exception handlers
+;------------------------------------------------------------------------------
+global ASM_PFX(OnException)
+ASM_PFX(OnException):
+ mov rcx, rsp
+ add rsp, -0x28
+ call ASM_PFX(SmmStmExceptionHandler)
+ add rsp, 0x28
+ mov ebx, eax
+ mov eax, 4
+ DB 0x0f, 0x01, 0x0c1 ; VMCALL
+ jmp $
+
+global ASM_PFX(OnStmSetup)
+ASM_PFX(OnStmSetup):
+;
+; Check XD disable bit
+;
+ xor r8, r8
+ mov rax, ASM_PFX(gStmXdSupported)
+ mov al, [rax]
+ cmp al, 0
+ jz @StmXdDone1
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ mov r8, rdx ; save MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]
+ jz .01
+ and dx, 0xFFFB ; clear XD Disable bit if it is set
+ wrmsr
+.01:
+ mov ecx, MSR_EFER
+ rdmsr
+ or ax, MSR_EFER_XD ; enable NXE
+ wrmsr
+@StmXdDone1:
+ push r8
+
+ add rsp, -0x20
+ call ASM_PFX(SmmStmSetup)
+ add rsp, 0x20
+
+ mov rax, ASM_PFX(gStmXdSupported)
+ mov al, [rax]
+ cmp al, 0
+ jz .11
+ pop rdx ; get saved MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2
+ jz .11
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM
+ wrmsr
+
+.11:
+ rsm
+
+global ASM_PFX(OnStmTeardown)
+ASM_PFX(OnStmTeardown):
+;
+; Check XD disable bit
+;
+ xor r8, r8
+ mov rax, ASM_PFX(gStmXdSupported)
+ mov al, [rax]
+ cmp al, 0
+ jz @StmXdDone2
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ mov r8, rdx ; save MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]
+ jz .02
+ and dx, 0xFFFB ; clear XD Disable bit if it is set
+ wrmsr
+.02:
+ mov ecx, MSR_EFER
+ rdmsr
+ or ax, MSR_EFER_XD ; enable NXE
+ wrmsr
+@StmXdDone2:
+ push r8
+
+ add rsp, -0x20
+ call ASM_PFX(SmmStmTeardown)
+ add rsp, 0x20
+
+ mov rax, ASM_PFX(gStmXdSupported)
+ mov al, [rax]
+ cmp al, 0
+ jz .12
+ pop rdx ; get saved MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2
+ jz .12
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM
+ wrmsr
+
+.12:
+ rsm
+
diff --git a/UefiCpuPkg/Library/SmmCpuFeaturesLib/X64/SmmStmSupport.c b/UefiCpuPkg/Library/SmmCpuFeaturesLib/X64/SmmStmSupport.c
new file mode 100644
index 0000000000..6681234783
--- /dev/null
+++ b/UefiCpuPkg/Library/SmmCpuFeaturesLib/X64/SmmStmSupport.c
@@ -0,0 +1,95 @@
+/** @file
+ SMM STM support functions
+
+ Copyright (c) 2015 - 2016, Intel Corporation. All rights reserved.
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php.
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include
+#include
+
+#include "SmmStm.h"
+
+///
+/// Page Table Entry
+///
+#define IA32_PG_P BIT0
+#define IA32_PG_RW BIT1
+#define IA32_PG_PS BIT7
+
+/**
+
+ Create 4G page table for STM.
+ 2M PAE page table in X64 version.
+
+ @param PageTableBase The page table base in MSEG
+
+**/
+VOID
+StmGen4GPageTable (
+ IN UINTN PageTableBase
+ )
+{
+ UINTN Index;
+ UINTN SubIndex;
+ UINT64 *Pde;
+ UINT64 *Pte;
+ UINT64 *Pml4;
+
+ Pml4 = (UINT64*)(UINTN)PageTableBase;
+ PageTableBase += SIZE_4KB;
+ *Pml4 = PageTableBase | IA32_PG_RW | IA32_PG_P;
+
+ Pde = (UINT64*)(UINTN)PageTableBase;
+ PageTableBase += SIZE_4KB;
+ Pte = (UINT64 *)(UINTN)PageTableBase;
+
+ for (Index = 0; Index < 4; Index++) {
+ *Pde = PageTableBase | IA32_PG_RW | IA32_PG_P;
+ Pde++;
+ PageTableBase += SIZE_4KB;
+
+ for (SubIndex = 0; SubIndex < SIZE_4KB / sizeof (*Pte); SubIndex++) {
+ *Pte = (((Index << 9) + SubIndex) << 21) | IA32_PG_PS | IA32_PG_RW | IA32_PG_P;
+ Pte++;
+ }
+ }
+}
+
+/**
+ This is SMM exception handle.
+ Consumed by STM when exception happen.
+
+ @param Context STM protection exception stack frame
+
+ @return the EBX value for STM reference.
+ EBX = 0: resume SMM guest using register state found on exception stack.
+ EBX = 1 to 0x0F: EBX contains a BIOS error code which the STM must record in the
+ TXT.ERRORCODE register and subsequently reset the system via
+ TXT.CMD.SYS_RESET. The value of the TXT.ERRORCODE register is calculated as
+ follows: TXT.ERRORCODE = (EBX & 0x0F) | STM_CRASH_BIOS_PANIC
+ EBX = 0x10 to 0xFFFFFFFF - reserved, do not use.
+
+**/
+UINT32
+EFIAPI
+SmmStmExceptionHandler (
+ IN OUT STM_PROTECTION_EXCEPTION_STACK_FRAME Context
+ )
+{
+ // TBD - SmmStmExceptionHandler, record information
+ DEBUG ((DEBUG_ERROR, "SmmStmExceptionHandler ...\n"));
+ //
+ // Skip this instruction and continue;
+ //
+ Context.X64StackFrame->Rip += Context.X64StackFrame->VmcsExitInstructionLength;
+
+ return 0;
+}
diff --git a/UefiCpuPkg/UefiCpuPkg.dsc b/UefiCpuPkg/UefiCpuPkg.dsc
index 624641f816..3922f2ddc8 100644
--- a/UefiCpuPkg/UefiCpuPkg.dsc
+++ b/UefiCpuPkg/UefiCpuPkg.dsc
@@ -62,6 +62,7 @@
PeCoffGetEntryPointLib|MdePkg/Library/BasePeCoffGetEntryPointLib/BasePeCoffGetEntryPointLib.inf
PeCoffExtraActionLib|MdePkg/Library/BasePeCoffExtraActionLibNull/BasePeCoffExtraActionLibNull.inf
MicrocodeFlashAccessLib|UefiCpuPkg/Feature/Capsule/Library/MicrocodeFlashAccessLibNull/MicrocodeFlashAccessLibNull.inf
+ TpmMeasurementLib|MdeModulePkg/Library/TpmMeasurementLibNull/TpmMeasurementLibNull.inf
[LibraryClasses.common.SEC]
PlatformSecLib|UefiCpuPkg/Library/PlatformSecLibNull/PlatformSecLibNull.inf
@@ -127,10 +128,17 @@
UefiCpuPkg/Library/PlatformSecLibNull/PlatformSecLibNull.inf
UefiCpuPkg/Library/SmmCpuPlatformHookLibNull/SmmCpuPlatformHookLibNull.inf
UefiCpuPkg/Library/SmmCpuFeaturesLib/SmmCpuFeaturesLib.inf
+ UefiCpuPkg/Library/SmmCpuFeaturesLib/SmmCpuFeaturesLibStm.inf
UefiCpuPkg/PiSmmCommunication/PiSmmCommunicationPei.inf
UefiCpuPkg/PiSmmCommunication/PiSmmCommunicationSmm.inf
UefiCpuPkg/SecCore/SecCore.inf
UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf
+ UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf {
+
+ FILE_GUID = D1D74FE9-7A4E-41D3-A0B3-67F13AD34D94
+
+ SmmCpuFeaturesLib|UefiCpuPkg/Library/SmmCpuFeaturesLib/SmmCpuFeaturesLibStm.inf
+ }
UefiCpuPkg/Universal/Acpi/S3Resume2Pei/S3Resume2Pei.inf
UefiCpuPkg/Feature/Capsule/MicrocodeUpdateDxe/MicrocodeUpdateDxe.inf