From c2fd60f0716a5bd6171329c493daa8df47e99acc Mon Sep 17 00:00:00 2001 From: qhuang8 Date: Thu, 20 Aug 2009 08:04:40 +0000 Subject: [PATCH] Update to make end-of-line consistent for all source files in MdePkg. There are no other updates besides that change. git-svn-id: https://edk2.svn.sourceforge.net/svnroot/edk2/trunk/edk2@9155 6f19259b-4bc3-4df7-8a09-765794883524 --- UefiCpuPkg/CpuDxe/Ia32/CpuAsm.S | 772 ++++++++++++++++---------------- UefiCpuPkg/CpuDxe/Ia32/IvtAsm.S | 130 +++--- UefiCpuPkg/CpuDxe/X64/CpuAsm.S | 718 ++++++++++++++--------------- 3 files changed, 810 insertions(+), 810 deletions(-) diff --git a/UefiCpuPkg/CpuDxe/Ia32/CpuAsm.S b/UefiCpuPkg/CpuDxe/Ia32/CpuAsm.S index 8ff467dd8c..fc00f4ff2b 100755 --- a/UefiCpuPkg/CpuDxe/Ia32/CpuAsm.S +++ b/UefiCpuPkg/CpuDxe/Ia32/CpuAsm.S @@ -1,386 +1,386 @@ -#------------------------------------------------------------------------------ -#* -#* Copyright 2006 - 2009, Intel Corporation -#* All rights reserved. This program and the accompanying materials -#* are licensed and made available under the terms and conditions of the BSD License -#* which accompanies this distribution. The full text of the license may be found at -#* http://opensource.org/licenses/bsd-license.php -#* -#* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, -#* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. -#* -#* CpuAsm.S -#* -#* Abstract: -#* -#------------------------------------------------------------------------------ - - -#.MMX -#.XMM - -#EXTRN ASM_PFX(mErrorCodeFlag):DWORD # Error code flags for exceptions - - -# -# point to the external interrupt vector table -# -ExternalVectorTablePtr: - .byte 0, 0, 0, 0 - -ASM_GLOBAL ASM_PFX(InitializeExternalVectorTablePtr) -ASM_PFX(InitializeExternalVectorTablePtr): - movl 4(%esp), %eax - movl %eax, ExternalVectorTablePtr - ret - -#------------------------------------------------------------------------------ -# VOID -# SetCodeSelector ( -# UINT16 Selector -# ); -#------------------------------------------------------------------------------ -ASM_GLOBAL ASM_PFX(SetCodeSelector) -ASM_PFX(SetCodeSelector): - movl 4(%esp), %ecx - subl $0x10, %esp - leal setCodeSelectorLongJump, %eax - movl %eax, (%esp) - movw %cx, 4(%esp) - .byte 0xFF, 0x2C, 0x24 # jmp *(%esp) note:(FWORD jmp) -setCodeSelectorLongJump: - addl $0x10, %esp - ret - -#------------------------------------------------------------------------------ -# VOID -# SetDataSelectors ( -# UINT16 Selector -# ); -#------------------------------------------------------------------------------ -ASM_GLOBAL ASM_PFX(SetDataSelectors) -ASM_PFX(SetDataSelectors): - movl 4(%esp), %ecx - movw %cx, %ss - movw %cx, %ds - movw %cx, %es - movw %cx, %fs - movw %cx, %gs - ret - -#---------------------------------------; -# CommonInterruptEntry ; -#---------------------------------------; -# The follow algorithm is used for the common interrupt routine. - -ASM_GLOBAL ASM_PFX(CommonInterruptEntry) -ASM_PFX(CommonInterruptEntry): - cli - # - # All interrupt handlers are invoked through interrupt gates, so - # IF flag automatically cleared at the entry point - # - - # - # Calculate vector number - # - # Get the return address of call, actually, it is the - # address of vector number. - # - xchgl (%esp), %ecx - movw (%ecx), %cx - andl $0x0FFFF, %ecx - cmpl $32, %ecx # Intel reserved vector for exceptions? - jae NoErrorCode - bt %ecx, ASM_PFX(mErrorCodeFlag) - jc HasErrorCode - -NoErrorCode: - - # - # Stack: - # +---------------------+ - # + EFlags + - # +---------------------+ - # + CS + - # +---------------------+ - # + EIP + - # +---------------------+ - # + ECX + - # +---------------------+ <-- ESP - # - # Registers: - # ECX - Vector Number - # - - # - # Put Vector Number on stack - # - pushl %ecx - - # - # Put 0 (dummy) error code on stack, and restore ECX - # - xorl %ecx, %ecx # ECX = 0 - xchgl 4(%esp), %ecx - - jmp ErrorCodeAndVectorOnStack - -HasErrorCode: - - # - # Stack: - # +---------------------+ - # + EFlags + - # +---------------------+ - # + CS + - # +---------------------+ - # + EIP + - # +---------------------+ - # + Error Code + - # +---------------------+ - # + ECX + - # +---------------------+ <-- ESP - # - # Registers: - # ECX - Vector Number - # - - # - # Put Vector Number on stack and restore ECX - # - xchgl (%esp), %ecx - - # - # Fall through to join main routine code - # at ErrorCodeAndVectorOnStack - # -CommonInterruptEntry_al_0000: - jmp CommonInterruptEntry_al_0000 - -ErrorCodeAndVectorOnStack: - pushl %ebp - movl %esp, %ebp - - # - # Stack: - # +---------------------+ - # + EFlags + - # +---------------------+ - # + CS + - # +---------------------+ - # + EIP + - # +---------------------+ - # + Error Code + - # +---------------------+ - # + Vector Number + - # +---------------------+ - # + EBP + - # +---------------------+ <-- EBP - # - - # - # Align stack to make sure that EFI_FX_SAVE_STATE_IA32 of EFI_SYSTEM_CONTEXT_IA32 - # is 16-byte aligned - # - andl $0x0fffffff0, %esp - subl $12, %esp - -#; UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax; - pushl %eax - pushl %ecx - pushl %edx - pushl %ebx - leal 24(%ebp), %ecx - pushl %ecx # ESP - pushl (%ebp) # EBP - pushl %esi - pushl %edi - -#; UINT32 Gs, Fs, Es, Ds, Cs, Ss; - movl %ss, %eax - pushl %eax - movzwl 16(%ebp), %eax - pushl %eax - movl %ds, %eax - pushl %eax - movl %es, %eax - pushl %eax - movl %fs, %eax - pushl %eax - movl %gs, %eax - pushl %eax - -#; UINT32 Eip; - movl 12(%ebp), %eax - pushl %eax - -#; UINT32 Gdtr[2], Idtr[2]; - subl $8, %esp - sidt (%esp) - movl 2(%esp), %eax - xchgl (%esp), %eax - andl $0x0FFFF, %eax - movl %eax, 4(%esp) - - subl $8, %esp - sgdt (%esp) - movl 2(%esp), %eax - xchgl (%esp), %eax - andl $0x0FFFF, %eax - movl %eax, 4(%esp) - -#; UINT32 Ldtr, Tr; - xorl %eax, %eax - str %ax - pushl %eax - sldt %ax - pushl %eax - -#; UINT32 EFlags; - movl 20(%ebp), %eax - pushl %eax - -#; UINT32 Cr0, Cr1, Cr2, Cr3, Cr4; - movl %cr4, %eax - orl $0x208, %eax - movl %eax, %cr4 - pushl %eax - movl %cr3, %eax - pushl %eax - movl %cr2, %eax - pushl %eax - xorl %eax, %eax - pushl %eax - movl %cr0, %eax - pushl %eax - -#; UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7; - movl %dr7, %eax - pushl %eax -#; clear Dr7 while executing debugger itself - xorl %eax, %eax - movl %eax, %dr7 - - movl %dr6, %eax - pushl %eax -#; insure all status bits in dr6 are clear... - xorl %eax, %eax - movl %eax, %dr6 - - movl %dr3, %eax - pushl %eax - movl %dr2, %eax - pushl %eax - movl %dr1, %eax - pushl %eax - movl %dr0, %eax - pushl %eax - -#; FX_SAVE_STATE_IA32 FxSaveState; - subl $512, %esp - movl %esp, %edi - .byte 0x0f, 0x0ae, 0x07 #fxsave [edi] - -#; UINT32 ExceptionData; - pushl 8(%ebp) - -#; call into exception handler - movl ExternalVectorTablePtr, %eax # get the interrupt vectors base - orl %eax, %eax # NULL? - jz nullExternalExceptionHandler - - mov 4(%ebp), %ecx - movl (%eax,%ecx,4), %eax - orl %eax, %eax # NULL? - jz nullExternalExceptionHandler - -#; Prepare parameter and call - movl %esp, %edx - pushl %edx - movl 4(%ebp), %edx - pushl %edx - - # - # Call External Exception Handler - # - call *%eax - addl $8, %esp - -nullExternalExceptionHandler: - - cli -#; UINT32 ExceptionData; - addl $4, %esp - -#; FX_SAVE_STATE_IA32 FxSaveState; - movl %esp, %esi - .byte 0x0f, 0x0ae, 0x0e # fxrstor [esi] - addl $512, %esp - -#; UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7; - popl %eax - movl %eax, %dr0 - popl %eax - movl %eax, %dr1 - popl %eax - movl %eax, %dr2 - popl %eax - movl %eax, %dr3 -#; skip restore of dr6. We cleared dr6 during the context save. - addl $4, %esp - popl %eax - movl %eax, %dr7 - -#; UINT32 Cr0, Cr1, Cr2, Cr3, Cr4; - popl %eax - movl %eax, %cr0 - addl $4, %esp # not for Cr1 - popl %eax - movl %eax, %cr2 - popl %eax - movl %eax, %cr3 - popl %eax - movl %eax, %cr4 - -#; UINT32 EFlags; - popl 20(%ebp) - -#; UINT32 Ldtr, Tr; -#; UINT32 Gdtr[2], Idtr[2]; -#; Best not let anyone mess with these particular registers... - addl $24, %esp - -#; UINT32 Eip; - popl 12(%ebp) - -#; UINT32 Gs, Fs, Es, Ds, Cs, Ss; -#; NOTE - modified segment registers could hang the debugger... We -#; could attempt to insulate ourselves against this possibility, -#; but that poses risks as well. -#; - popl %gs - popl %fs - popl %es - popl %ds - popl 16(%ebp) - popl %ss - -#; UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax; - popl %edi - popl %esi - addl $4, %esp # not for ebp - addl $4, %esp # not for esp - popl %ebx - popl %edx - popl %ecx - popl %eax - - movl %ebp, %esp - popl %ebp - addl $8, %esp - iretl - - -#END - +#------------------------------------------------------------------------------ +#* +#* Copyright 2006 - 2009, Intel Corporation +#* All rights reserved. This program and the accompanying materials +#* are licensed and made available under the terms and conditions of the BSD License +#* which accompanies this distribution. The full text of the license may be found at +#* http://opensource.org/licenses/bsd-license.php +#* +#* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +#* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. +#* +#* CpuAsm.S +#* +#* Abstract: +#* +#------------------------------------------------------------------------------ + + +#.MMX +#.XMM + +#EXTRN ASM_PFX(mErrorCodeFlag):DWORD # Error code flags for exceptions + + +# +# point to the external interrupt vector table +# +ExternalVectorTablePtr: + .byte 0, 0, 0, 0 + +ASM_GLOBAL ASM_PFX(InitializeExternalVectorTablePtr) +ASM_PFX(InitializeExternalVectorTablePtr): + movl 4(%esp), %eax + movl %eax, ExternalVectorTablePtr + ret + +#------------------------------------------------------------------------------ +# VOID +# SetCodeSelector ( +# UINT16 Selector +# ); +#------------------------------------------------------------------------------ +ASM_GLOBAL ASM_PFX(SetCodeSelector) +ASM_PFX(SetCodeSelector): + movl 4(%esp), %ecx + subl $0x10, %esp + leal setCodeSelectorLongJump, %eax + movl %eax, (%esp) + movw %cx, 4(%esp) + .byte 0xFF, 0x2C, 0x24 # jmp *(%esp) note:(FWORD jmp) +setCodeSelectorLongJump: + addl $0x10, %esp + ret + +#------------------------------------------------------------------------------ +# VOID +# SetDataSelectors ( +# UINT16 Selector +# ); +#------------------------------------------------------------------------------ +ASM_GLOBAL ASM_PFX(SetDataSelectors) +ASM_PFX(SetDataSelectors): + movl 4(%esp), %ecx + movw %cx, %ss + movw %cx, %ds + movw %cx, %es + movw %cx, %fs + movw %cx, %gs + ret + +#---------------------------------------; +# CommonInterruptEntry ; +#---------------------------------------; +# The follow algorithm is used for the common interrupt routine. + +ASM_GLOBAL ASM_PFX(CommonInterruptEntry) +ASM_PFX(CommonInterruptEntry): + cli + # + # All interrupt handlers are invoked through interrupt gates, so + # IF flag automatically cleared at the entry point + # + + # + # Calculate vector number + # + # Get the return address of call, actually, it is the + # address of vector number. + # + xchgl (%esp), %ecx + movw (%ecx), %cx + andl $0x0FFFF, %ecx + cmpl $32, %ecx # Intel reserved vector for exceptions? + jae NoErrorCode + bt %ecx, ASM_PFX(mErrorCodeFlag) + jc HasErrorCode + +NoErrorCode: + + # + # Stack: + # +---------------------+ + # + EFlags + + # +---------------------+ + # + CS + + # +---------------------+ + # + EIP + + # +---------------------+ + # + ECX + + # +---------------------+ <-- ESP + # + # Registers: + # ECX - Vector Number + # + + # + # Put Vector Number on stack + # + pushl %ecx + + # + # Put 0 (dummy) error code on stack, and restore ECX + # + xorl %ecx, %ecx # ECX = 0 + xchgl 4(%esp), %ecx + + jmp ErrorCodeAndVectorOnStack + +HasErrorCode: + + # + # Stack: + # +---------------------+ + # + EFlags + + # +---------------------+ + # + CS + + # +---------------------+ + # + EIP + + # +---------------------+ + # + Error Code + + # +---------------------+ + # + ECX + + # +---------------------+ <-- ESP + # + # Registers: + # ECX - Vector Number + # + + # + # Put Vector Number on stack and restore ECX + # + xchgl (%esp), %ecx + + # + # Fall through to join main routine code + # at ErrorCodeAndVectorOnStack + # +CommonInterruptEntry_al_0000: + jmp CommonInterruptEntry_al_0000 + +ErrorCodeAndVectorOnStack: + pushl %ebp + movl %esp, %ebp + + # + # Stack: + # +---------------------+ + # + EFlags + + # +---------------------+ + # + CS + + # +---------------------+ + # + EIP + + # +---------------------+ + # + Error Code + + # +---------------------+ + # + Vector Number + + # +---------------------+ + # + EBP + + # +---------------------+ <-- EBP + # + + # + # Align stack to make sure that EFI_FX_SAVE_STATE_IA32 of EFI_SYSTEM_CONTEXT_IA32 + # is 16-byte aligned + # + andl $0x0fffffff0, %esp + subl $12, %esp + +#; UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax; + pushl %eax + pushl %ecx + pushl %edx + pushl %ebx + leal 24(%ebp), %ecx + pushl %ecx # ESP + pushl (%ebp) # EBP + pushl %esi + pushl %edi + +#; UINT32 Gs, Fs, Es, Ds, Cs, Ss; + movl %ss, %eax + pushl %eax + movzwl 16(%ebp), %eax + pushl %eax + movl %ds, %eax + pushl %eax + movl %es, %eax + pushl %eax + movl %fs, %eax + pushl %eax + movl %gs, %eax + pushl %eax + +#; UINT32 Eip; + movl 12(%ebp), %eax + pushl %eax + +#; UINT32 Gdtr[2], Idtr[2]; + subl $8, %esp + sidt (%esp) + movl 2(%esp), %eax + xchgl (%esp), %eax + andl $0x0FFFF, %eax + movl %eax, 4(%esp) + + subl $8, %esp + sgdt (%esp) + movl 2(%esp), %eax + xchgl (%esp), %eax + andl $0x0FFFF, %eax + movl %eax, 4(%esp) + +#; UINT32 Ldtr, Tr; + xorl %eax, %eax + str %ax + pushl %eax + sldt %ax + pushl %eax + +#; UINT32 EFlags; + movl 20(%ebp), %eax + pushl %eax + +#; UINT32 Cr0, Cr1, Cr2, Cr3, Cr4; + movl %cr4, %eax + orl $0x208, %eax + movl %eax, %cr4 + pushl %eax + movl %cr3, %eax + pushl %eax + movl %cr2, %eax + pushl %eax + xorl %eax, %eax + pushl %eax + movl %cr0, %eax + pushl %eax + +#; UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7; + movl %dr7, %eax + pushl %eax +#; clear Dr7 while executing debugger itself + xorl %eax, %eax + movl %eax, %dr7 + + movl %dr6, %eax + pushl %eax +#; insure all status bits in dr6 are clear... + xorl %eax, %eax + movl %eax, %dr6 + + movl %dr3, %eax + pushl %eax + movl %dr2, %eax + pushl %eax + movl %dr1, %eax + pushl %eax + movl %dr0, %eax + pushl %eax + +#; FX_SAVE_STATE_IA32 FxSaveState; + subl $512, %esp + movl %esp, %edi + .byte 0x0f, 0x0ae, 0x07 #fxsave [edi] + +#; UINT32 ExceptionData; + pushl 8(%ebp) + +#; call into exception handler + movl ExternalVectorTablePtr, %eax # get the interrupt vectors base + orl %eax, %eax # NULL? + jz nullExternalExceptionHandler + + mov 4(%ebp), %ecx + movl (%eax,%ecx,4), %eax + orl %eax, %eax # NULL? + jz nullExternalExceptionHandler + +#; Prepare parameter and call + movl %esp, %edx + pushl %edx + movl 4(%ebp), %edx + pushl %edx + + # + # Call External Exception Handler + # + call *%eax + addl $8, %esp + +nullExternalExceptionHandler: + + cli +#; UINT32 ExceptionData; + addl $4, %esp + +#; FX_SAVE_STATE_IA32 FxSaveState; + movl %esp, %esi + .byte 0x0f, 0x0ae, 0x0e # fxrstor [esi] + addl $512, %esp + +#; UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7; + popl %eax + movl %eax, %dr0 + popl %eax + movl %eax, %dr1 + popl %eax + movl %eax, %dr2 + popl %eax + movl %eax, %dr3 +#; skip restore of dr6. We cleared dr6 during the context save. + addl $4, %esp + popl %eax + movl %eax, %dr7 + +#; UINT32 Cr0, Cr1, Cr2, Cr3, Cr4; + popl %eax + movl %eax, %cr0 + addl $4, %esp # not for Cr1 + popl %eax + movl %eax, %cr2 + popl %eax + movl %eax, %cr3 + popl %eax + movl %eax, %cr4 + +#; UINT32 EFlags; + popl 20(%ebp) + +#; UINT32 Ldtr, Tr; +#; UINT32 Gdtr[2], Idtr[2]; +#; Best not let anyone mess with these particular registers... + addl $24, %esp + +#; UINT32 Eip; + popl 12(%ebp) + +#; UINT32 Gs, Fs, Es, Ds, Cs, Ss; +#; NOTE - modified segment registers could hang the debugger... We +#; could attempt to insulate ourselves against this possibility, +#; but that poses risks as well. +#; + popl %gs + popl %fs + popl %es + popl %ds + popl 16(%ebp) + popl %ss + +#; UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax; + popl %edi + popl %esi + addl $4, %esp # not for ebp + addl $4, %esp # not for esp + popl %ebx + popl %edx + popl %ecx + popl %eax + + movl %ebp, %esp + popl %ebp + addl $8, %esp + iretl + + +#END + diff --git a/UefiCpuPkg/CpuDxe/Ia32/IvtAsm.S b/UefiCpuPkg/CpuDxe/Ia32/IvtAsm.S index a2862fb800..b513a10079 100755 --- a/UefiCpuPkg/CpuDxe/Ia32/IvtAsm.S +++ b/UefiCpuPkg/CpuDxe/Ia32/IvtAsm.S @@ -1,65 +1,65 @@ -#------------------------------------------------------------------------------ -# -# Copyright (c) 2006 - 2009, Intel Corporation -# All rights reserved. This program and the accompanying materials -# are licensed and made available under the terms and conditions of the BSD License -# which accompanies this distribution. The full text of the license may be found at -# http://opensource.org/licenses/bsd-license.php -# -# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, -# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. -# -# Module Name: -# -# IvtAsm.S -# -# Abstract: -# -# Interrupt Vector Table -# -#------------------------------------------------------------------------------ - -# -# Interrupt Vector Table -# - -.macro SingleIdtVectorMacro vectorNum - call ASM_PFX(CommonInterruptEntry) - .short \vectorNum - nop -.endm - -.macro EightIdtVectors firstVectorNum - SingleIdtVectorMacro \firstVectorNum - SingleIdtVectorMacro "(\firstVectorNum+1)" - SingleIdtVectorMacro "(\firstVectorNum+2)" - SingleIdtVectorMacro "(\firstVectorNum+3)" - SingleIdtVectorMacro "(\firstVectorNum+4)" - SingleIdtVectorMacro "(\firstVectorNum+5)" - SingleIdtVectorMacro "(\firstVectorNum+6)" - SingleIdtVectorMacro "(\firstVectorNum+7)" -.endm - -.macro SixtyFourIdtVectors firstVectorNum - EightIdtVectors \firstVectorNum - EightIdtVectors "(\firstVectorNum+0x08)" - EightIdtVectors "(\firstVectorNum+0x10)" - EightIdtVectors "(\firstVectorNum+0x18)" - EightIdtVectors "(\firstVectorNum+0x20)" - EightIdtVectors "(\firstVectorNum+0x28)" - EightIdtVectors "(\firstVectorNum+0x30)" - EightIdtVectors "(\firstVectorNum+0x38)" -.endm - -ASM_GLOBAL ASM_PFX(AsmIdtVector00) -.align 8 -ASM_PFX(AsmIdtVector00): - SixtyFourIdtVectors 0x00 - SixtyFourIdtVectors 0x40 - SixtyFourIdtVectors 0x80 - SixtyFourIdtVectors 0xC0 -ASM_GLOBAL ASM_PFX(AsmCommonIdtEnd) -ASM_PFX(AsmCommonIdtEnd): - .byte 0 - - +#------------------------------------------------------------------------------ +# +# Copyright (c) 2006 - 2009, Intel Corporation +# All rights reserved. This program and the accompanying materials +# are licensed and made available under the terms and conditions of the BSD License +# which accompanies this distribution. The full text of the license may be found at +# http://opensource.org/licenses/bsd-license.php +# +# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. +# +# Module Name: +# +# IvtAsm.S +# +# Abstract: +# +# Interrupt Vector Table +# +#------------------------------------------------------------------------------ + +# +# Interrupt Vector Table +# + +.macro SingleIdtVectorMacro vectorNum + call ASM_PFX(CommonInterruptEntry) + .short \vectorNum + nop +.endm + +.macro EightIdtVectors firstVectorNum + SingleIdtVectorMacro \firstVectorNum + SingleIdtVectorMacro "(\firstVectorNum+1)" + SingleIdtVectorMacro "(\firstVectorNum+2)" + SingleIdtVectorMacro "(\firstVectorNum+3)" + SingleIdtVectorMacro "(\firstVectorNum+4)" + SingleIdtVectorMacro "(\firstVectorNum+5)" + SingleIdtVectorMacro "(\firstVectorNum+6)" + SingleIdtVectorMacro "(\firstVectorNum+7)" +.endm + +.macro SixtyFourIdtVectors firstVectorNum + EightIdtVectors \firstVectorNum + EightIdtVectors "(\firstVectorNum+0x08)" + EightIdtVectors "(\firstVectorNum+0x10)" + EightIdtVectors "(\firstVectorNum+0x18)" + EightIdtVectors "(\firstVectorNum+0x20)" + EightIdtVectors "(\firstVectorNum+0x28)" + EightIdtVectors "(\firstVectorNum+0x30)" + EightIdtVectors "(\firstVectorNum+0x38)" +.endm + +ASM_GLOBAL ASM_PFX(AsmIdtVector00) +.align 8 +ASM_PFX(AsmIdtVector00): + SixtyFourIdtVectors 0x00 + SixtyFourIdtVectors 0x40 + SixtyFourIdtVectors 0x80 + SixtyFourIdtVectors 0xC0 +ASM_GLOBAL ASM_PFX(AsmCommonIdtEnd) +ASM_PFX(AsmCommonIdtEnd): + .byte 0 + + diff --git a/UefiCpuPkg/CpuDxe/X64/CpuAsm.S b/UefiCpuPkg/CpuDxe/X64/CpuAsm.S index a6885454fc..2f84cf0235 100755 --- a/UefiCpuPkg/CpuDxe/X64/CpuAsm.S +++ b/UefiCpuPkg/CpuDxe/X64/CpuAsm.S @@ -1,359 +1,359 @@ -# TITLE CpuAsm.S: - -#------------------------------------------------------------------------------ -#* -#* Copyright 2008 - 2009, Intel Corporation -#* All rights reserved. This program and the accompanying materials -#* are licensed and made available under the terms and conditions of the BSD License -#* which accompanies this distribution. The full text of the license may be found at -#* http://opensource.org/licenses/bsd-license.php -#* -#* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, -#* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. -#* -#* CpuAsm.S -#* -#* Abstract: -#* -#------------------------------------------------------------------------------ - - -#text SEGMENT - - -#EXTRN ASM_PFX(mErrorCodeFlag):DWORD # Error code flags for exceptions - - -# -# point to the external interrupt vector table -# -ExternalVectorTablePtr: - .byte 0, 0, 0, 0, 0, 0, 0, 0 - -ASM_GLOBAL ASM_PFX(InitializeExternalVectorTablePtr) -ASM_PFX(InitializeExternalVectorTablePtr): - lea ExternalVectorTablePtr(%rip), %rax # save vector number - mov %rcx, (%rax) - ret - - -#------------------------------------------------------------------------------ -# VOID -# SetCodeSelector ( -# UINT16 Selector -# ); -#------------------------------------------------------------------------------ -ASM_GLOBAL ASM_PFX(SetCodeSelector) -ASM_PFX(SetCodeSelector): - subq $0x10, %rsp - leaq setCodeSelectorLongJump(%rip), %rax - movq %rax, (%rsp) - movw %cx, 4(%rsp) - .byte 0xFF, 0x2C, 0x24 # jmp (%rsp) note:fword jmp -setCodeSelectorLongJump: - addq $0x10, %rsp - ret - -#------------------------------------------------------------------------------ -# VOID -# SetDataSelectors ( -# UINT16 Selector -# ); -#------------------------------------------------------------------------------ -ASM_GLOBAL ASM_PFX(SetDataSelectors) -ASM_PFX(SetDataSelectors): - movw %cx, %ss - movw %cx, %ds - movw %cx, %es - movw %cx, %fs - movw %cx, %gs - ret - -#---------------------------------------; -# CommonInterruptEntry ; -#---------------------------------------; -# The follow algorithm is used for the common interrupt routine. - -ASM_GLOBAL ASM_PFX(CommonInterruptEntry) -ASM_PFX(CommonInterruptEntry): - cli - # - # All interrupt handlers are invoked through interrupt gates, so - # IF flag automatically cleared at the entry point - # - # - # Calculate vector number - # - xchgq (%rsp), %rcx # get the return address of call, actually, it is the address of vector number. - movzwl (%rcx), %ecx - cmp $32, %ecx # Intel reserved vector for exceptions? - jae NoErrorCode - pushq %rax - leaq ASM_PFX(mErrorCodeFlag)(%rip), %rax - bt %ecx, (%rax) - popq %rax - jc CommonInterruptEntry_al_0000 - -NoErrorCode: - - # - # Push a dummy error code on the stack - # to maintain coherent stack map - # - pushq (%rsp) - movq $0, 8(%rsp) -CommonInterruptEntry_al_0000: - pushq %rbp - movq %rsp, %rbp - - # - # Stack: - # +---------------------+ <-- 16-byte aligned ensured by processor - # + Old SS + - # +---------------------+ - # + Old RSP + - # +---------------------+ - # + RFlags + - # +---------------------+ - # + CS + - # +---------------------+ - # + RIP + - # +---------------------+ - # + Error Code + - # +---------------------+ - # + RCX / Vector Number + - # +---------------------+ - # + RBP + - # +---------------------+ <-- RBP, 16-byte aligned - # - - - # - # Since here the stack pointer is 16-byte aligned, so - # EFI_FX_SAVE_STATE_X64 of EFI_SYSTEM_CONTEXT_x64 - # is 16-byte aligned - # - -#; UINT64 Rdi, Rsi, Rbp, Rsp, Rbx, Rdx, Rcx, Rax; -#; UINT64 R8, R9, R10, R11, R12, R13, R14, R15; - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %r11 - pushq %r10 - pushq %r9 - pushq %r8 - pushq %rax - pushq 8(%rbp) # RCX - pushq %rdx - pushq %rbx - pushq 48(%rbp) # RSP - pushq (%rbp) # RBP - pushq %rsi - pushq %rdi - -#; UINT64 Gs, Fs, Es, Ds, Cs, Ss; insure high 16 bits of each is zero - movzwq 56(%rbp), %rax - pushq %rax # for ss - movzwq 32(%rbp), %rax - pushq %rax # for cs - movq %ds, %rax - pushq %rax - movq %es, %rax - pushq %rax - movq %fs, %rax - pushq %rax - movq %gs, %rax - pushq %rax - - movq %rcx, 8(%rbp) # save vector number - -#; UINT64 Rip; - pushq 24(%rbp) - -#; UINT64 Gdtr[2], Idtr[2]; - xorq %rax, %rax - pushq %rax - pushq %rax - sidt (%rsp) - xchgq 2(%rsp), %rax - xchgq (%rsp), %rax - xchgq 8(%rsp), %rax - - xorq %rax, %rax - pushq %rax - pushq %rax - sgdt (%rsp) - xchgq 2(%rsp), %rax - xchgq (%rsp), %rax - xchgq 8(%rsp), %rax - -#; UINT64 Ldtr, Tr; - xorq %rax, %rax - str %ax - pushq %rax - sldt %ax - pushq %rax - -#; UINT64 RFlags; - pushq 40(%rbp) - -#; UINT64 Cr0, Cr1, Cr2, Cr3, Cr4, Cr8; - movq %cr8, %rax - pushq %rax - movq %cr4, %rax - orq $0x208, %rax - movq %rax, %cr4 - pushq %rax - mov %cr3, %rax - pushq %rax - mov %cr2, %rax - pushq %rax - xorq %rax, %rax - pushq %rax - mov %cr0, %rax - pushq %rax - -#; UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7; - movq %dr7, %rax - pushq %rax -#; clear Dr7 while executing debugger itself - xorq %rax, %rax - movq %rax, %dr7 - - movq %dr6, %rax - pushq %rax -#; insure all status bits in dr6 are clear... - xorq %rax, %rax - movq %rax, %dr6 - - movq %dr3, %rax - pushq %rax - movq %dr2, %rax - pushq %rax - movq %dr1, %rax - pushq %rax - movq %dr0, %rax - pushq %rax - -#; FX_SAVE_STATE_X64 FxSaveState; - subq $512, %rsp - movq %rsp, %rdi - .byte 0x0f, 0x0ae, 0x07 #fxsave [rdi] - -#; UINT32 ExceptionData; - pushq 16(%rbp) - -#; call into exception handler - movq 8(%rbp), %rcx - leaq ExternalVectorTablePtr(%rip), %rax - movl (%eax), %eax - movq (%rax,%rcx,8), %rax - orq %rax, %rax # NULL? - - je nonNullValue# - -#; Prepare parameter and call -# mov rcx, [rbp + 8] - mov %rsp, %rdx - # - # Per X64 calling convention, allocate maximum parameter stack space - # and make sure RSP is 16-byte aligned - # - subq $40, %rsp - call *%rax - addq $40, %rsp - -nonNullValue: - cli -#; UINT64 ExceptionData; - addq $8, %rsp - -#; FX_SAVE_STATE_X64 FxSaveState; - - movq %rsp, %rsi - .byte 0x0f, 0x0ae, 0x0E # fxrstor [rsi] - addq $512, %rsp - -#; UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7; - popq %rax - movq %rax, %dr0 - popq %rax - movq %rax, %dr1 - popq %rax - movq %rax, %dr2 - popq %rax - movq %rax, %dr3 -#; skip restore of dr6. We cleared dr6 during the context save. - addq $8, %rsp - popq %rax - movq %rax, %dr7 - -#; UINT64 Cr0, Cr1, Cr2, Cr3, Cr4, Cr8; - popq %rax - movq %rax, %cr0 - addq $8, %rsp # not for Cr1 - popq %rax - movq %rax, %cr2 - popq %rax - movq %rax, %cr3 - popq %rax - movq %rax, %cr4 - popq %rax - movq %rax, %cr8 - -#; UINT64 RFlags; - popq 40(%rbp) - -#; UINT64 Ldtr, Tr; -#; UINT64 Gdtr[2], Idtr[2]; -#; Best not let anyone mess with these particular registers... - addq $48, %rsp - -#; UINT64 Rip; - popq 24(%rbp) - -#; UINT64 Gs, Fs, Es, Ds, Cs, Ss; - popq %rax - # mov %rax, %gs ; not for gs - popq %rax - # mov %rax, %fs ; not for fs - # (X64 will not use fs and gs, so we do not restore it) - popq %rax - movq %rax, %es - popq %rax - movq %rax, %ds - popq 32(%rbp) # for cs - popq 56(%rbp) # for ss - -#; UINT64 Rdi, Rsi, Rbp, Rsp, Rbx, Rdx, Rcx, Rax; -#; UINT64 R8, R9, R10, R11, R12, R13, R14, R15; - popq %rdi - popq %rsi - addq $8, %rsp # not for rbp - popq 48(%rbp) # for rsp - popq %rbx - popq %rdx - popq %rcx - popq %rax - popq %r8 - popq %r9 - popq %r10 - popq %r11 - popq %r12 - popq %r13 - popq %r14 - popq %r15 - - movq %rbp, %rsp - popq %rbp - addq $16, %rsp - iretq - - -#text ENDS - -#END - - +# TITLE CpuAsm.S: + +#------------------------------------------------------------------------------ +#* +#* Copyright 2008 - 2009, Intel Corporation +#* All rights reserved. This program and the accompanying materials +#* are licensed and made available under the terms and conditions of the BSD License +#* which accompanies this distribution. The full text of the license may be found at +#* http://opensource.org/licenses/bsd-license.php +#* +#* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +#* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. +#* +#* CpuAsm.S +#* +#* Abstract: +#* +#------------------------------------------------------------------------------ + + +#text SEGMENT + + +#EXTRN ASM_PFX(mErrorCodeFlag):DWORD # Error code flags for exceptions + + +# +# point to the external interrupt vector table +# +ExternalVectorTablePtr: + .byte 0, 0, 0, 0, 0, 0, 0, 0 + +ASM_GLOBAL ASM_PFX(InitializeExternalVectorTablePtr) +ASM_PFX(InitializeExternalVectorTablePtr): + lea ExternalVectorTablePtr(%rip), %rax # save vector number + mov %rcx, (%rax) + ret + + +#------------------------------------------------------------------------------ +# VOID +# SetCodeSelector ( +# UINT16 Selector +# ); +#------------------------------------------------------------------------------ +ASM_GLOBAL ASM_PFX(SetCodeSelector) +ASM_PFX(SetCodeSelector): + subq $0x10, %rsp + leaq setCodeSelectorLongJump(%rip), %rax + movq %rax, (%rsp) + movw %cx, 4(%rsp) + .byte 0xFF, 0x2C, 0x24 # jmp (%rsp) note:fword jmp +setCodeSelectorLongJump: + addq $0x10, %rsp + ret + +#------------------------------------------------------------------------------ +# VOID +# SetDataSelectors ( +# UINT16 Selector +# ); +#------------------------------------------------------------------------------ +ASM_GLOBAL ASM_PFX(SetDataSelectors) +ASM_PFX(SetDataSelectors): + movw %cx, %ss + movw %cx, %ds + movw %cx, %es + movw %cx, %fs + movw %cx, %gs + ret + +#---------------------------------------; +# CommonInterruptEntry ; +#---------------------------------------; +# The follow algorithm is used for the common interrupt routine. + +ASM_GLOBAL ASM_PFX(CommonInterruptEntry) +ASM_PFX(CommonInterruptEntry): + cli + # + # All interrupt handlers are invoked through interrupt gates, so + # IF flag automatically cleared at the entry point + # + # + # Calculate vector number + # + xchgq (%rsp), %rcx # get the return address of call, actually, it is the address of vector number. + movzwl (%rcx), %ecx + cmp $32, %ecx # Intel reserved vector for exceptions? + jae NoErrorCode + pushq %rax + leaq ASM_PFX(mErrorCodeFlag)(%rip), %rax + bt %ecx, (%rax) + popq %rax + jc CommonInterruptEntry_al_0000 + +NoErrorCode: + + # + # Push a dummy error code on the stack + # to maintain coherent stack map + # + pushq (%rsp) + movq $0, 8(%rsp) +CommonInterruptEntry_al_0000: + pushq %rbp + movq %rsp, %rbp + + # + # Stack: + # +---------------------+ <-- 16-byte aligned ensured by processor + # + Old SS + + # +---------------------+ + # + Old RSP + + # +---------------------+ + # + RFlags + + # +---------------------+ + # + CS + + # +---------------------+ + # + RIP + + # +---------------------+ + # + Error Code + + # +---------------------+ + # + RCX / Vector Number + + # +---------------------+ + # + RBP + + # +---------------------+ <-- RBP, 16-byte aligned + # + + + # + # Since here the stack pointer is 16-byte aligned, so + # EFI_FX_SAVE_STATE_X64 of EFI_SYSTEM_CONTEXT_x64 + # is 16-byte aligned + # + +#; UINT64 Rdi, Rsi, Rbp, Rsp, Rbx, Rdx, Rcx, Rax; +#; UINT64 R8, R9, R10, R11, R12, R13, R14, R15; + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %r11 + pushq %r10 + pushq %r9 + pushq %r8 + pushq %rax + pushq 8(%rbp) # RCX + pushq %rdx + pushq %rbx + pushq 48(%rbp) # RSP + pushq (%rbp) # RBP + pushq %rsi + pushq %rdi + +#; UINT64 Gs, Fs, Es, Ds, Cs, Ss; insure high 16 bits of each is zero + movzwq 56(%rbp), %rax + pushq %rax # for ss + movzwq 32(%rbp), %rax + pushq %rax # for cs + movq %ds, %rax + pushq %rax + movq %es, %rax + pushq %rax + movq %fs, %rax + pushq %rax + movq %gs, %rax + pushq %rax + + movq %rcx, 8(%rbp) # save vector number + +#; UINT64 Rip; + pushq 24(%rbp) + +#; UINT64 Gdtr[2], Idtr[2]; + xorq %rax, %rax + pushq %rax + pushq %rax + sidt (%rsp) + xchgq 2(%rsp), %rax + xchgq (%rsp), %rax + xchgq 8(%rsp), %rax + + xorq %rax, %rax + pushq %rax + pushq %rax + sgdt (%rsp) + xchgq 2(%rsp), %rax + xchgq (%rsp), %rax + xchgq 8(%rsp), %rax + +#; UINT64 Ldtr, Tr; + xorq %rax, %rax + str %ax + pushq %rax + sldt %ax + pushq %rax + +#; UINT64 RFlags; + pushq 40(%rbp) + +#; UINT64 Cr0, Cr1, Cr2, Cr3, Cr4, Cr8; + movq %cr8, %rax + pushq %rax + movq %cr4, %rax + orq $0x208, %rax + movq %rax, %cr4 + pushq %rax + mov %cr3, %rax + pushq %rax + mov %cr2, %rax + pushq %rax + xorq %rax, %rax + pushq %rax + mov %cr0, %rax + pushq %rax + +#; UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7; + movq %dr7, %rax + pushq %rax +#; clear Dr7 while executing debugger itself + xorq %rax, %rax + movq %rax, %dr7 + + movq %dr6, %rax + pushq %rax +#; insure all status bits in dr6 are clear... + xorq %rax, %rax + movq %rax, %dr6 + + movq %dr3, %rax + pushq %rax + movq %dr2, %rax + pushq %rax + movq %dr1, %rax + pushq %rax + movq %dr0, %rax + pushq %rax + +#; FX_SAVE_STATE_X64 FxSaveState; + subq $512, %rsp + movq %rsp, %rdi + .byte 0x0f, 0x0ae, 0x07 #fxsave [rdi] + +#; UINT32 ExceptionData; + pushq 16(%rbp) + +#; call into exception handler + movq 8(%rbp), %rcx + leaq ExternalVectorTablePtr(%rip), %rax + movl (%eax), %eax + movq (%rax,%rcx,8), %rax + orq %rax, %rax # NULL? + + je nonNullValue# + +#; Prepare parameter and call +# mov rcx, [rbp + 8] + mov %rsp, %rdx + # + # Per X64 calling convention, allocate maximum parameter stack space + # and make sure RSP is 16-byte aligned + # + subq $40, %rsp + call *%rax + addq $40, %rsp + +nonNullValue: + cli +#; UINT64 ExceptionData; + addq $8, %rsp + +#; FX_SAVE_STATE_X64 FxSaveState; + + movq %rsp, %rsi + .byte 0x0f, 0x0ae, 0x0E # fxrstor [rsi] + addq $512, %rsp + +#; UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7; + popq %rax + movq %rax, %dr0 + popq %rax + movq %rax, %dr1 + popq %rax + movq %rax, %dr2 + popq %rax + movq %rax, %dr3 +#; skip restore of dr6. We cleared dr6 during the context save. + addq $8, %rsp + popq %rax + movq %rax, %dr7 + +#; UINT64 Cr0, Cr1, Cr2, Cr3, Cr4, Cr8; + popq %rax + movq %rax, %cr0 + addq $8, %rsp # not for Cr1 + popq %rax + movq %rax, %cr2 + popq %rax + movq %rax, %cr3 + popq %rax + movq %rax, %cr4 + popq %rax + movq %rax, %cr8 + +#; UINT64 RFlags; + popq 40(%rbp) + +#; UINT64 Ldtr, Tr; +#; UINT64 Gdtr[2], Idtr[2]; +#; Best not let anyone mess with these particular registers... + addq $48, %rsp + +#; UINT64 Rip; + popq 24(%rbp) + +#; UINT64 Gs, Fs, Es, Ds, Cs, Ss; + popq %rax + # mov %rax, %gs ; not for gs + popq %rax + # mov %rax, %fs ; not for fs + # (X64 will not use fs and gs, so we do not restore it) + popq %rax + movq %rax, %es + popq %rax + movq %rax, %ds + popq 32(%rbp) # for cs + popq 56(%rbp) # for ss + +#; UINT64 Rdi, Rsi, Rbp, Rsp, Rbx, Rdx, Rcx, Rax; +#; UINT64 R8, R9, R10, R11, R12, R13, R14, R15; + popq %rdi + popq %rsi + addq $8, %rsp # not for rbp + popq 48(%rbp) # for rsp + popq %rbx + popq %rdx + popq %rcx + popq %rax + popq %r8 + popq %r9 + popq %r10 + popq %r11 + popq %r12 + popq %r13 + popq %r14 + popq %r15 + + movq %rbp, %rsp + popq %rbp + addq $16, %rsp + iretq + + +#text ENDS + +#END + +