Update to make end-of-line consistent for all source files in MdePkg. There are no other updates besides that change.

git-svn-id: https://edk2.svn.sourceforge.net/svnroot/edk2/trunk/edk2@9155 6f19259b-4bc3-4df7-8a09-765794883524
This commit is contained in:
qhuang8 2009-08-20 08:04:40 +00:00
parent 7cd1603d21
commit c2fd60f071
3 changed files with 810 additions and 810 deletions

View File

@ -1,386 +1,386 @@
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
#* #*
#* Copyright 2006 - 2009, Intel Corporation #* Copyright 2006 - 2009, Intel Corporation
#* All rights reserved. This program and the accompanying materials #* All rights reserved. This program and the accompanying materials
#* are licensed and made available under the terms and conditions of the BSD License #* are licensed and made available under the terms and conditions of the BSD License
#* which accompanies this distribution. The full text of the license may be found at #* which accompanies this distribution. The full text of the license may be found at
#* http://opensource.org/licenses/bsd-license.php #* http://opensource.org/licenses/bsd-license.php
#* #*
#* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, #* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
#* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. #* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#* #*
#* CpuAsm.S #* CpuAsm.S
#* #*
#* Abstract: #* Abstract:
#* #*
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
#.MMX #.MMX
#.XMM #.XMM
#EXTRN ASM_PFX(mErrorCodeFlag):DWORD # Error code flags for exceptions #EXTRN ASM_PFX(mErrorCodeFlag):DWORD # Error code flags for exceptions
# #
# point to the external interrupt vector table # point to the external interrupt vector table
# #
ExternalVectorTablePtr: ExternalVectorTablePtr:
.byte 0, 0, 0, 0 .byte 0, 0, 0, 0
ASM_GLOBAL ASM_PFX(InitializeExternalVectorTablePtr) ASM_GLOBAL ASM_PFX(InitializeExternalVectorTablePtr)
ASM_PFX(InitializeExternalVectorTablePtr): ASM_PFX(InitializeExternalVectorTablePtr):
movl 4(%esp), %eax movl 4(%esp), %eax
movl %eax, ExternalVectorTablePtr movl %eax, ExternalVectorTablePtr
ret ret
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
# VOID # VOID
# SetCodeSelector ( # SetCodeSelector (
# UINT16 Selector # UINT16 Selector
# ); # );
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
ASM_GLOBAL ASM_PFX(SetCodeSelector) ASM_GLOBAL ASM_PFX(SetCodeSelector)
ASM_PFX(SetCodeSelector): ASM_PFX(SetCodeSelector):
movl 4(%esp), %ecx movl 4(%esp), %ecx
subl $0x10, %esp subl $0x10, %esp
leal setCodeSelectorLongJump, %eax leal setCodeSelectorLongJump, %eax
movl %eax, (%esp) movl %eax, (%esp)
movw %cx, 4(%esp) movw %cx, 4(%esp)
.byte 0xFF, 0x2C, 0x24 # jmp *(%esp) note:(FWORD jmp) .byte 0xFF, 0x2C, 0x24 # jmp *(%esp) note:(FWORD jmp)
setCodeSelectorLongJump: setCodeSelectorLongJump:
addl $0x10, %esp addl $0x10, %esp
ret ret
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
# VOID # VOID
# SetDataSelectors ( # SetDataSelectors (
# UINT16 Selector # UINT16 Selector
# ); # );
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
ASM_GLOBAL ASM_PFX(SetDataSelectors) ASM_GLOBAL ASM_PFX(SetDataSelectors)
ASM_PFX(SetDataSelectors): ASM_PFX(SetDataSelectors):
movl 4(%esp), %ecx movl 4(%esp), %ecx
movw %cx, %ss movw %cx, %ss
movw %cx, %ds movw %cx, %ds
movw %cx, %es movw %cx, %es
movw %cx, %fs movw %cx, %fs
movw %cx, %gs movw %cx, %gs
ret ret
#---------------------------------------; #---------------------------------------;
# CommonInterruptEntry ; # CommonInterruptEntry ;
#---------------------------------------; #---------------------------------------;
# The follow algorithm is used for the common interrupt routine. # The follow algorithm is used for the common interrupt routine.
ASM_GLOBAL ASM_PFX(CommonInterruptEntry) ASM_GLOBAL ASM_PFX(CommonInterruptEntry)
ASM_PFX(CommonInterruptEntry): ASM_PFX(CommonInterruptEntry):
cli cli
# #
# All interrupt handlers are invoked through interrupt gates, so # All interrupt handlers are invoked through interrupt gates, so
# IF flag automatically cleared at the entry point # IF flag automatically cleared at the entry point
# #
# #
# Calculate vector number # Calculate vector number
# #
# Get the return address of call, actually, it is the # Get the return address of call, actually, it is the
# address of vector number. # address of vector number.
# #
xchgl (%esp), %ecx xchgl (%esp), %ecx
movw (%ecx), %cx movw (%ecx), %cx
andl $0x0FFFF, %ecx andl $0x0FFFF, %ecx
cmpl $32, %ecx # Intel reserved vector for exceptions? cmpl $32, %ecx # Intel reserved vector for exceptions?
jae NoErrorCode jae NoErrorCode
bt %ecx, ASM_PFX(mErrorCodeFlag) bt %ecx, ASM_PFX(mErrorCodeFlag)
jc HasErrorCode jc HasErrorCode
NoErrorCode: NoErrorCode:
# #
# Stack: # Stack:
# +---------------------+ # +---------------------+
# + EFlags + # + EFlags +
# +---------------------+ # +---------------------+
# + CS + # + CS +
# +---------------------+ # +---------------------+
# + EIP + # + EIP +
# +---------------------+ # +---------------------+
# + ECX + # + ECX +
# +---------------------+ <-- ESP # +---------------------+ <-- ESP
# #
# Registers: # Registers:
# ECX - Vector Number # ECX - Vector Number
# #
# #
# Put Vector Number on stack # Put Vector Number on stack
# #
pushl %ecx pushl %ecx
# #
# Put 0 (dummy) error code on stack, and restore ECX # Put 0 (dummy) error code on stack, and restore ECX
# #
xorl %ecx, %ecx # ECX = 0 xorl %ecx, %ecx # ECX = 0
xchgl 4(%esp), %ecx xchgl 4(%esp), %ecx
jmp ErrorCodeAndVectorOnStack jmp ErrorCodeAndVectorOnStack
HasErrorCode: HasErrorCode:
# #
# Stack: # Stack:
# +---------------------+ # +---------------------+
# + EFlags + # + EFlags +
# +---------------------+ # +---------------------+
# + CS + # + CS +
# +---------------------+ # +---------------------+
# + EIP + # + EIP +
# +---------------------+ # +---------------------+
# + Error Code + # + Error Code +
# +---------------------+ # +---------------------+
# + ECX + # + ECX +
# +---------------------+ <-- ESP # +---------------------+ <-- ESP
# #
# Registers: # Registers:
# ECX - Vector Number # ECX - Vector Number
# #
# #
# Put Vector Number on stack and restore ECX # Put Vector Number on stack and restore ECX
# #
xchgl (%esp), %ecx xchgl (%esp), %ecx
# #
# Fall through to join main routine code # Fall through to join main routine code
# at ErrorCodeAndVectorOnStack # at ErrorCodeAndVectorOnStack
# #
CommonInterruptEntry_al_0000: CommonInterruptEntry_al_0000:
jmp CommonInterruptEntry_al_0000 jmp CommonInterruptEntry_al_0000
ErrorCodeAndVectorOnStack: ErrorCodeAndVectorOnStack:
pushl %ebp pushl %ebp
movl %esp, %ebp movl %esp, %ebp
# #
# Stack: # Stack:
# +---------------------+ # +---------------------+
# + EFlags + # + EFlags +
# +---------------------+ # +---------------------+
# + CS + # + CS +
# +---------------------+ # +---------------------+
# + EIP + # + EIP +
# +---------------------+ # +---------------------+
# + Error Code + # + Error Code +
# +---------------------+ # +---------------------+
# + Vector Number + # + Vector Number +
# +---------------------+ # +---------------------+
# + EBP + # + EBP +
# +---------------------+ <-- EBP # +---------------------+ <-- EBP
# #
# #
# Align stack to make sure that EFI_FX_SAVE_STATE_IA32 of EFI_SYSTEM_CONTEXT_IA32 # Align stack to make sure that EFI_FX_SAVE_STATE_IA32 of EFI_SYSTEM_CONTEXT_IA32
# is 16-byte aligned # is 16-byte aligned
# #
andl $0x0fffffff0, %esp andl $0x0fffffff0, %esp
subl $12, %esp subl $12, %esp
#; UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax; #; UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;
pushl %eax pushl %eax
pushl %ecx pushl %ecx
pushl %edx pushl %edx
pushl %ebx pushl %ebx
leal 24(%ebp), %ecx leal 24(%ebp), %ecx
pushl %ecx # ESP pushl %ecx # ESP
pushl (%ebp) # EBP pushl (%ebp) # EBP
pushl %esi pushl %esi
pushl %edi pushl %edi
#; UINT32 Gs, Fs, Es, Ds, Cs, Ss; #; UINT32 Gs, Fs, Es, Ds, Cs, Ss;
movl %ss, %eax movl %ss, %eax
pushl %eax pushl %eax
movzwl 16(%ebp), %eax movzwl 16(%ebp), %eax
pushl %eax pushl %eax
movl %ds, %eax movl %ds, %eax
pushl %eax pushl %eax
movl %es, %eax movl %es, %eax
pushl %eax pushl %eax
movl %fs, %eax movl %fs, %eax
pushl %eax pushl %eax
movl %gs, %eax movl %gs, %eax
pushl %eax pushl %eax
#; UINT32 Eip; #; UINT32 Eip;
movl 12(%ebp), %eax movl 12(%ebp), %eax
pushl %eax pushl %eax
#; UINT32 Gdtr[2], Idtr[2]; #; UINT32 Gdtr[2], Idtr[2];
subl $8, %esp subl $8, %esp
sidt (%esp) sidt (%esp)
movl 2(%esp), %eax movl 2(%esp), %eax
xchgl (%esp), %eax xchgl (%esp), %eax
andl $0x0FFFF, %eax andl $0x0FFFF, %eax
movl %eax, 4(%esp) movl %eax, 4(%esp)
subl $8, %esp subl $8, %esp
sgdt (%esp) sgdt (%esp)
movl 2(%esp), %eax movl 2(%esp), %eax
xchgl (%esp), %eax xchgl (%esp), %eax
andl $0x0FFFF, %eax andl $0x0FFFF, %eax
movl %eax, 4(%esp) movl %eax, 4(%esp)
#; UINT32 Ldtr, Tr; #; UINT32 Ldtr, Tr;
xorl %eax, %eax xorl %eax, %eax
str %ax str %ax
pushl %eax pushl %eax
sldt %ax sldt %ax
pushl %eax pushl %eax
#; UINT32 EFlags; #; UINT32 EFlags;
movl 20(%ebp), %eax movl 20(%ebp), %eax
pushl %eax pushl %eax
#; UINT32 Cr0, Cr1, Cr2, Cr3, Cr4; #; UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;
movl %cr4, %eax movl %cr4, %eax
orl $0x208, %eax orl $0x208, %eax
movl %eax, %cr4 movl %eax, %cr4
pushl %eax pushl %eax
movl %cr3, %eax movl %cr3, %eax
pushl %eax pushl %eax
movl %cr2, %eax movl %cr2, %eax
pushl %eax pushl %eax
xorl %eax, %eax xorl %eax, %eax
pushl %eax pushl %eax
movl %cr0, %eax movl %cr0, %eax
pushl %eax pushl %eax
#; UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7; #; UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
movl %dr7, %eax movl %dr7, %eax
pushl %eax pushl %eax
#; clear Dr7 while executing debugger itself #; clear Dr7 while executing debugger itself
xorl %eax, %eax xorl %eax, %eax
movl %eax, %dr7 movl %eax, %dr7
movl %dr6, %eax movl %dr6, %eax
pushl %eax pushl %eax
#; insure all status bits in dr6 are clear... #; insure all status bits in dr6 are clear...
xorl %eax, %eax xorl %eax, %eax
movl %eax, %dr6 movl %eax, %dr6
movl %dr3, %eax movl %dr3, %eax
pushl %eax pushl %eax
movl %dr2, %eax movl %dr2, %eax
pushl %eax pushl %eax
movl %dr1, %eax movl %dr1, %eax
pushl %eax pushl %eax
movl %dr0, %eax movl %dr0, %eax
pushl %eax pushl %eax
#; FX_SAVE_STATE_IA32 FxSaveState; #; FX_SAVE_STATE_IA32 FxSaveState;
subl $512, %esp subl $512, %esp
movl %esp, %edi movl %esp, %edi
.byte 0x0f, 0x0ae, 0x07 #fxsave [edi] .byte 0x0f, 0x0ae, 0x07 #fxsave [edi]
#; UINT32 ExceptionData; #; UINT32 ExceptionData;
pushl 8(%ebp) pushl 8(%ebp)
#; call into exception handler #; call into exception handler
movl ExternalVectorTablePtr, %eax # get the interrupt vectors base movl ExternalVectorTablePtr, %eax # get the interrupt vectors base
orl %eax, %eax # NULL? orl %eax, %eax # NULL?
jz nullExternalExceptionHandler jz nullExternalExceptionHandler
mov 4(%ebp), %ecx mov 4(%ebp), %ecx
movl (%eax,%ecx,4), %eax movl (%eax,%ecx,4), %eax
orl %eax, %eax # NULL? orl %eax, %eax # NULL?
jz nullExternalExceptionHandler jz nullExternalExceptionHandler
#; Prepare parameter and call #; Prepare parameter and call
movl %esp, %edx movl %esp, %edx
pushl %edx pushl %edx
movl 4(%ebp), %edx movl 4(%ebp), %edx
pushl %edx pushl %edx
# #
# Call External Exception Handler # Call External Exception Handler
# #
call *%eax call *%eax
addl $8, %esp addl $8, %esp
nullExternalExceptionHandler: nullExternalExceptionHandler:
cli cli
#; UINT32 ExceptionData; #; UINT32 ExceptionData;
addl $4, %esp addl $4, %esp
#; FX_SAVE_STATE_IA32 FxSaveState; #; FX_SAVE_STATE_IA32 FxSaveState;
movl %esp, %esi movl %esp, %esi
.byte 0x0f, 0x0ae, 0x0e # fxrstor [esi] .byte 0x0f, 0x0ae, 0x0e # fxrstor [esi]
addl $512, %esp addl $512, %esp
#; UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7; #; UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
popl %eax popl %eax
movl %eax, %dr0 movl %eax, %dr0
popl %eax popl %eax
movl %eax, %dr1 movl %eax, %dr1
popl %eax popl %eax
movl %eax, %dr2 movl %eax, %dr2
popl %eax popl %eax
movl %eax, %dr3 movl %eax, %dr3
#; skip restore of dr6. We cleared dr6 during the context save. #; skip restore of dr6. We cleared dr6 during the context save.
addl $4, %esp addl $4, %esp
popl %eax popl %eax
movl %eax, %dr7 movl %eax, %dr7
#; UINT32 Cr0, Cr1, Cr2, Cr3, Cr4; #; UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;
popl %eax popl %eax
movl %eax, %cr0 movl %eax, %cr0
addl $4, %esp # not for Cr1 addl $4, %esp # not for Cr1
popl %eax popl %eax
movl %eax, %cr2 movl %eax, %cr2
popl %eax popl %eax
movl %eax, %cr3 movl %eax, %cr3
popl %eax popl %eax
movl %eax, %cr4 movl %eax, %cr4
#; UINT32 EFlags; #; UINT32 EFlags;
popl 20(%ebp) popl 20(%ebp)
#; UINT32 Ldtr, Tr; #; UINT32 Ldtr, Tr;
#; UINT32 Gdtr[2], Idtr[2]; #; UINT32 Gdtr[2], Idtr[2];
#; Best not let anyone mess with these particular registers... #; Best not let anyone mess with these particular registers...
addl $24, %esp addl $24, %esp
#; UINT32 Eip; #; UINT32 Eip;
popl 12(%ebp) popl 12(%ebp)
#; UINT32 Gs, Fs, Es, Ds, Cs, Ss; #; UINT32 Gs, Fs, Es, Ds, Cs, Ss;
#; NOTE - modified segment registers could hang the debugger... We #; NOTE - modified segment registers could hang the debugger... We
#; could attempt to insulate ourselves against this possibility, #; could attempt to insulate ourselves against this possibility,
#; but that poses risks as well. #; but that poses risks as well.
#; #;
popl %gs popl %gs
popl %fs popl %fs
popl %es popl %es
popl %ds popl %ds
popl 16(%ebp) popl 16(%ebp)
popl %ss popl %ss
#; UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax; #; UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;
popl %edi popl %edi
popl %esi popl %esi
addl $4, %esp # not for ebp addl $4, %esp # not for ebp
addl $4, %esp # not for esp addl $4, %esp # not for esp
popl %ebx popl %ebx
popl %edx popl %edx
popl %ecx popl %ecx
popl %eax popl %eax
movl %ebp, %esp movl %ebp, %esp
popl %ebp popl %ebp
addl $8, %esp addl $8, %esp
iretl iretl
#END #END

View File

@ -1,65 +1,65 @@
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
# #
# Copyright (c) 2006 - 2009, Intel Corporation # Copyright (c) 2006 - 2009, Intel Corporation
# All rights reserved. This program and the accompanying materials # All rights reserved. This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License # are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at # which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php # http://opensource.org/licenses/bsd-license.php
# #
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, # THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. # WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
# #
# Module Name: # Module Name:
# #
# IvtAsm.S # IvtAsm.S
# #
# Abstract: # Abstract:
# #
# Interrupt Vector Table # Interrupt Vector Table
# #
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
# #
# Interrupt Vector Table # Interrupt Vector Table
# #
.macro SingleIdtVectorMacro vectorNum .macro SingleIdtVectorMacro vectorNum
call ASM_PFX(CommonInterruptEntry) call ASM_PFX(CommonInterruptEntry)
.short \vectorNum .short \vectorNum
nop nop
.endm .endm
.macro EightIdtVectors firstVectorNum .macro EightIdtVectors firstVectorNum
SingleIdtVectorMacro \firstVectorNum SingleIdtVectorMacro \firstVectorNum
SingleIdtVectorMacro "(\firstVectorNum+1)" SingleIdtVectorMacro "(\firstVectorNum+1)"
SingleIdtVectorMacro "(\firstVectorNum+2)" SingleIdtVectorMacro "(\firstVectorNum+2)"
SingleIdtVectorMacro "(\firstVectorNum+3)" SingleIdtVectorMacro "(\firstVectorNum+3)"
SingleIdtVectorMacro "(\firstVectorNum+4)" SingleIdtVectorMacro "(\firstVectorNum+4)"
SingleIdtVectorMacro "(\firstVectorNum+5)" SingleIdtVectorMacro "(\firstVectorNum+5)"
SingleIdtVectorMacro "(\firstVectorNum+6)" SingleIdtVectorMacro "(\firstVectorNum+6)"
SingleIdtVectorMacro "(\firstVectorNum+7)" SingleIdtVectorMacro "(\firstVectorNum+7)"
.endm .endm
.macro SixtyFourIdtVectors firstVectorNum .macro SixtyFourIdtVectors firstVectorNum
EightIdtVectors \firstVectorNum EightIdtVectors \firstVectorNum
EightIdtVectors "(\firstVectorNum+0x08)" EightIdtVectors "(\firstVectorNum+0x08)"
EightIdtVectors "(\firstVectorNum+0x10)" EightIdtVectors "(\firstVectorNum+0x10)"
EightIdtVectors "(\firstVectorNum+0x18)" EightIdtVectors "(\firstVectorNum+0x18)"
EightIdtVectors "(\firstVectorNum+0x20)" EightIdtVectors "(\firstVectorNum+0x20)"
EightIdtVectors "(\firstVectorNum+0x28)" EightIdtVectors "(\firstVectorNum+0x28)"
EightIdtVectors "(\firstVectorNum+0x30)" EightIdtVectors "(\firstVectorNum+0x30)"
EightIdtVectors "(\firstVectorNum+0x38)" EightIdtVectors "(\firstVectorNum+0x38)"
.endm .endm
ASM_GLOBAL ASM_PFX(AsmIdtVector00) ASM_GLOBAL ASM_PFX(AsmIdtVector00)
.align 8 .align 8
ASM_PFX(AsmIdtVector00): ASM_PFX(AsmIdtVector00):
SixtyFourIdtVectors 0x00 SixtyFourIdtVectors 0x00
SixtyFourIdtVectors 0x40 SixtyFourIdtVectors 0x40
SixtyFourIdtVectors 0x80 SixtyFourIdtVectors 0x80
SixtyFourIdtVectors 0xC0 SixtyFourIdtVectors 0xC0
ASM_GLOBAL ASM_PFX(AsmCommonIdtEnd) ASM_GLOBAL ASM_PFX(AsmCommonIdtEnd)
ASM_PFX(AsmCommonIdtEnd): ASM_PFX(AsmCommonIdtEnd):
.byte 0 .byte 0

View File

@ -1,359 +1,359 @@
# TITLE CpuAsm.S: # TITLE CpuAsm.S:
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
#* #*
#* Copyright 2008 - 2009, Intel Corporation #* Copyright 2008 - 2009, Intel Corporation
#* All rights reserved. This program and the accompanying materials #* All rights reserved. This program and the accompanying materials
#* are licensed and made available under the terms and conditions of the BSD License #* are licensed and made available under the terms and conditions of the BSD License
#* which accompanies this distribution. The full text of the license may be found at #* which accompanies this distribution. The full text of the license may be found at
#* http://opensource.org/licenses/bsd-license.php #* http://opensource.org/licenses/bsd-license.php
#* #*
#* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, #* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
#* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. #* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#* #*
#* CpuAsm.S #* CpuAsm.S
#* #*
#* Abstract: #* Abstract:
#* #*
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
#text SEGMENT #text SEGMENT
#EXTRN ASM_PFX(mErrorCodeFlag):DWORD # Error code flags for exceptions #EXTRN ASM_PFX(mErrorCodeFlag):DWORD # Error code flags for exceptions
# #
# point to the external interrupt vector table # point to the external interrupt vector table
# #
ExternalVectorTablePtr: ExternalVectorTablePtr:
.byte 0, 0, 0, 0, 0, 0, 0, 0 .byte 0, 0, 0, 0, 0, 0, 0, 0
ASM_GLOBAL ASM_PFX(InitializeExternalVectorTablePtr) ASM_GLOBAL ASM_PFX(InitializeExternalVectorTablePtr)
ASM_PFX(InitializeExternalVectorTablePtr): ASM_PFX(InitializeExternalVectorTablePtr):
lea ExternalVectorTablePtr(%rip), %rax # save vector number lea ExternalVectorTablePtr(%rip), %rax # save vector number
mov %rcx, (%rax) mov %rcx, (%rax)
ret ret
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
# VOID # VOID
# SetCodeSelector ( # SetCodeSelector (
# UINT16 Selector # UINT16 Selector
# ); # );
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
ASM_GLOBAL ASM_PFX(SetCodeSelector) ASM_GLOBAL ASM_PFX(SetCodeSelector)
ASM_PFX(SetCodeSelector): ASM_PFX(SetCodeSelector):
subq $0x10, %rsp subq $0x10, %rsp
leaq setCodeSelectorLongJump(%rip), %rax leaq setCodeSelectorLongJump(%rip), %rax
movq %rax, (%rsp) movq %rax, (%rsp)
movw %cx, 4(%rsp) movw %cx, 4(%rsp)
.byte 0xFF, 0x2C, 0x24 # jmp (%rsp) note:fword jmp .byte 0xFF, 0x2C, 0x24 # jmp (%rsp) note:fword jmp
setCodeSelectorLongJump: setCodeSelectorLongJump:
addq $0x10, %rsp addq $0x10, %rsp
ret ret
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
# VOID # VOID
# SetDataSelectors ( # SetDataSelectors (
# UINT16 Selector # UINT16 Selector
# ); # );
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
ASM_GLOBAL ASM_PFX(SetDataSelectors) ASM_GLOBAL ASM_PFX(SetDataSelectors)
ASM_PFX(SetDataSelectors): ASM_PFX(SetDataSelectors):
movw %cx, %ss movw %cx, %ss
movw %cx, %ds movw %cx, %ds
movw %cx, %es movw %cx, %es
movw %cx, %fs movw %cx, %fs
movw %cx, %gs movw %cx, %gs
ret ret
#---------------------------------------; #---------------------------------------;
# CommonInterruptEntry ; # CommonInterruptEntry ;
#---------------------------------------; #---------------------------------------;
# The follow algorithm is used for the common interrupt routine. # The follow algorithm is used for the common interrupt routine.
ASM_GLOBAL ASM_PFX(CommonInterruptEntry) ASM_GLOBAL ASM_PFX(CommonInterruptEntry)
ASM_PFX(CommonInterruptEntry): ASM_PFX(CommonInterruptEntry):
cli cli
# #
# All interrupt handlers are invoked through interrupt gates, so # All interrupt handlers are invoked through interrupt gates, so
# IF flag automatically cleared at the entry point # IF flag automatically cleared at the entry point
# #
# #
# Calculate vector number # Calculate vector number
# #
xchgq (%rsp), %rcx # get the return address of call, actually, it is the address of vector number. xchgq (%rsp), %rcx # get the return address of call, actually, it is the address of vector number.
movzwl (%rcx), %ecx movzwl (%rcx), %ecx
cmp $32, %ecx # Intel reserved vector for exceptions? cmp $32, %ecx # Intel reserved vector for exceptions?
jae NoErrorCode jae NoErrorCode
pushq %rax pushq %rax
leaq ASM_PFX(mErrorCodeFlag)(%rip), %rax leaq ASM_PFX(mErrorCodeFlag)(%rip), %rax
bt %ecx, (%rax) bt %ecx, (%rax)
popq %rax popq %rax
jc CommonInterruptEntry_al_0000 jc CommonInterruptEntry_al_0000
NoErrorCode: NoErrorCode:
# #
# Push a dummy error code on the stack # Push a dummy error code on the stack
# to maintain coherent stack map # to maintain coherent stack map
# #
pushq (%rsp) pushq (%rsp)
movq $0, 8(%rsp) movq $0, 8(%rsp)
CommonInterruptEntry_al_0000: CommonInterruptEntry_al_0000:
pushq %rbp pushq %rbp
movq %rsp, %rbp movq %rsp, %rbp
# #
# Stack: # Stack:
# +---------------------+ <-- 16-byte aligned ensured by processor # +---------------------+ <-- 16-byte aligned ensured by processor
# + Old SS + # + Old SS +
# +---------------------+ # +---------------------+
# + Old RSP + # + Old RSP +
# +---------------------+ # +---------------------+
# + RFlags + # + RFlags +
# +---------------------+ # +---------------------+
# + CS + # + CS +
# +---------------------+ # +---------------------+
# + RIP + # + RIP +
# +---------------------+ # +---------------------+
# + Error Code + # + Error Code +
# +---------------------+ # +---------------------+
# + RCX / Vector Number + # + RCX / Vector Number +
# +---------------------+ # +---------------------+
# + RBP + # + RBP +
# +---------------------+ <-- RBP, 16-byte aligned # +---------------------+ <-- RBP, 16-byte aligned
# #
# #
# Since here the stack pointer is 16-byte aligned, so # Since here the stack pointer is 16-byte aligned, so
# EFI_FX_SAVE_STATE_X64 of EFI_SYSTEM_CONTEXT_x64 # EFI_FX_SAVE_STATE_X64 of EFI_SYSTEM_CONTEXT_x64
# is 16-byte aligned # is 16-byte aligned
# #
#; UINT64 Rdi, Rsi, Rbp, Rsp, Rbx, Rdx, Rcx, Rax; #; UINT64 Rdi, Rsi, Rbp, Rsp, Rbx, Rdx, Rcx, Rax;
#; UINT64 R8, R9, R10, R11, R12, R13, R14, R15; #; UINT64 R8, R9, R10, R11, R12, R13, R14, R15;
pushq %r15 pushq %r15
pushq %r14 pushq %r14
pushq %r13 pushq %r13
pushq %r12 pushq %r12
pushq %r11 pushq %r11
pushq %r10 pushq %r10
pushq %r9 pushq %r9
pushq %r8 pushq %r8
pushq %rax pushq %rax
pushq 8(%rbp) # RCX pushq 8(%rbp) # RCX
pushq %rdx pushq %rdx
pushq %rbx pushq %rbx
pushq 48(%rbp) # RSP pushq 48(%rbp) # RSP
pushq (%rbp) # RBP pushq (%rbp) # RBP
pushq %rsi pushq %rsi
pushq %rdi pushq %rdi
#; UINT64 Gs, Fs, Es, Ds, Cs, Ss; insure high 16 bits of each is zero #; UINT64 Gs, Fs, Es, Ds, Cs, Ss; insure high 16 bits of each is zero
movzwq 56(%rbp), %rax movzwq 56(%rbp), %rax
pushq %rax # for ss pushq %rax # for ss
movzwq 32(%rbp), %rax movzwq 32(%rbp), %rax
pushq %rax # for cs pushq %rax # for cs
movq %ds, %rax movq %ds, %rax
pushq %rax pushq %rax
movq %es, %rax movq %es, %rax
pushq %rax pushq %rax
movq %fs, %rax movq %fs, %rax
pushq %rax pushq %rax
movq %gs, %rax movq %gs, %rax
pushq %rax pushq %rax
movq %rcx, 8(%rbp) # save vector number movq %rcx, 8(%rbp) # save vector number
#; UINT64 Rip; #; UINT64 Rip;
pushq 24(%rbp) pushq 24(%rbp)
#; UINT64 Gdtr[2], Idtr[2]; #; UINT64 Gdtr[2], Idtr[2];
xorq %rax, %rax xorq %rax, %rax
pushq %rax pushq %rax
pushq %rax pushq %rax
sidt (%rsp) sidt (%rsp)
xchgq 2(%rsp), %rax xchgq 2(%rsp), %rax
xchgq (%rsp), %rax xchgq (%rsp), %rax
xchgq 8(%rsp), %rax xchgq 8(%rsp), %rax
xorq %rax, %rax xorq %rax, %rax
pushq %rax pushq %rax
pushq %rax pushq %rax
sgdt (%rsp) sgdt (%rsp)
xchgq 2(%rsp), %rax xchgq 2(%rsp), %rax
xchgq (%rsp), %rax xchgq (%rsp), %rax
xchgq 8(%rsp), %rax xchgq 8(%rsp), %rax
#; UINT64 Ldtr, Tr; #; UINT64 Ldtr, Tr;
xorq %rax, %rax xorq %rax, %rax
str %ax str %ax
pushq %rax pushq %rax
sldt %ax sldt %ax
pushq %rax pushq %rax
#; UINT64 RFlags; #; UINT64 RFlags;
pushq 40(%rbp) pushq 40(%rbp)
#; UINT64 Cr0, Cr1, Cr2, Cr3, Cr4, Cr8; #; UINT64 Cr0, Cr1, Cr2, Cr3, Cr4, Cr8;
movq %cr8, %rax movq %cr8, %rax
pushq %rax pushq %rax
movq %cr4, %rax movq %cr4, %rax
orq $0x208, %rax orq $0x208, %rax
movq %rax, %cr4 movq %rax, %cr4
pushq %rax pushq %rax
mov %cr3, %rax mov %cr3, %rax
pushq %rax pushq %rax
mov %cr2, %rax mov %cr2, %rax
pushq %rax pushq %rax
xorq %rax, %rax xorq %rax, %rax
pushq %rax pushq %rax
mov %cr0, %rax mov %cr0, %rax
pushq %rax pushq %rax
#; UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7; #; UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
movq %dr7, %rax movq %dr7, %rax
pushq %rax pushq %rax
#; clear Dr7 while executing debugger itself #; clear Dr7 while executing debugger itself
xorq %rax, %rax xorq %rax, %rax
movq %rax, %dr7 movq %rax, %dr7
movq %dr6, %rax movq %dr6, %rax
pushq %rax pushq %rax
#; insure all status bits in dr6 are clear... #; insure all status bits in dr6 are clear...
xorq %rax, %rax xorq %rax, %rax
movq %rax, %dr6 movq %rax, %dr6
movq %dr3, %rax movq %dr3, %rax
pushq %rax pushq %rax
movq %dr2, %rax movq %dr2, %rax
pushq %rax pushq %rax
movq %dr1, %rax movq %dr1, %rax
pushq %rax pushq %rax
movq %dr0, %rax movq %dr0, %rax
pushq %rax pushq %rax
#; FX_SAVE_STATE_X64 FxSaveState; #; FX_SAVE_STATE_X64 FxSaveState;
subq $512, %rsp subq $512, %rsp
movq %rsp, %rdi movq %rsp, %rdi
.byte 0x0f, 0x0ae, 0x07 #fxsave [rdi] .byte 0x0f, 0x0ae, 0x07 #fxsave [rdi]
#; UINT32 ExceptionData; #; UINT32 ExceptionData;
pushq 16(%rbp) pushq 16(%rbp)
#; call into exception handler #; call into exception handler
movq 8(%rbp), %rcx movq 8(%rbp), %rcx
leaq ExternalVectorTablePtr(%rip), %rax leaq ExternalVectorTablePtr(%rip), %rax
movl (%eax), %eax movl (%eax), %eax
movq (%rax,%rcx,8), %rax movq (%rax,%rcx,8), %rax
orq %rax, %rax # NULL? orq %rax, %rax # NULL?
je nonNullValue# je nonNullValue#
#; Prepare parameter and call #; Prepare parameter and call
# mov rcx, [rbp + 8] # mov rcx, [rbp + 8]
mov %rsp, %rdx mov %rsp, %rdx
# #
# Per X64 calling convention, allocate maximum parameter stack space # Per X64 calling convention, allocate maximum parameter stack space
# and make sure RSP is 16-byte aligned # and make sure RSP is 16-byte aligned
# #
subq $40, %rsp subq $40, %rsp
call *%rax call *%rax
addq $40, %rsp addq $40, %rsp
nonNullValue: nonNullValue:
cli cli
#; UINT64 ExceptionData; #; UINT64 ExceptionData;
addq $8, %rsp addq $8, %rsp
#; FX_SAVE_STATE_X64 FxSaveState; #; FX_SAVE_STATE_X64 FxSaveState;
movq %rsp, %rsi movq %rsp, %rsi
.byte 0x0f, 0x0ae, 0x0E # fxrstor [rsi] .byte 0x0f, 0x0ae, 0x0E # fxrstor [rsi]
addq $512, %rsp addq $512, %rsp
#; UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7; #; UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
popq %rax popq %rax
movq %rax, %dr0 movq %rax, %dr0
popq %rax popq %rax
movq %rax, %dr1 movq %rax, %dr1
popq %rax popq %rax
movq %rax, %dr2 movq %rax, %dr2
popq %rax popq %rax
movq %rax, %dr3 movq %rax, %dr3
#; skip restore of dr6. We cleared dr6 during the context save. #; skip restore of dr6. We cleared dr6 during the context save.
addq $8, %rsp addq $8, %rsp
popq %rax popq %rax
movq %rax, %dr7 movq %rax, %dr7
#; UINT64 Cr0, Cr1, Cr2, Cr3, Cr4, Cr8; #; UINT64 Cr0, Cr1, Cr2, Cr3, Cr4, Cr8;
popq %rax popq %rax
movq %rax, %cr0 movq %rax, %cr0
addq $8, %rsp # not for Cr1 addq $8, %rsp # not for Cr1
popq %rax popq %rax
movq %rax, %cr2 movq %rax, %cr2
popq %rax popq %rax
movq %rax, %cr3 movq %rax, %cr3
popq %rax popq %rax
movq %rax, %cr4 movq %rax, %cr4
popq %rax popq %rax
movq %rax, %cr8 movq %rax, %cr8
#; UINT64 RFlags; #; UINT64 RFlags;
popq 40(%rbp) popq 40(%rbp)
#; UINT64 Ldtr, Tr; #; UINT64 Ldtr, Tr;
#; UINT64 Gdtr[2], Idtr[2]; #; UINT64 Gdtr[2], Idtr[2];
#; Best not let anyone mess with these particular registers... #; Best not let anyone mess with these particular registers...
addq $48, %rsp addq $48, %rsp
#; UINT64 Rip; #; UINT64 Rip;
popq 24(%rbp) popq 24(%rbp)
#; UINT64 Gs, Fs, Es, Ds, Cs, Ss; #; UINT64 Gs, Fs, Es, Ds, Cs, Ss;
popq %rax popq %rax
# mov %rax, %gs ; not for gs # mov %rax, %gs ; not for gs
popq %rax popq %rax
# mov %rax, %fs ; not for fs # mov %rax, %fs ; not for fs
# (X64 will not use fs and gs, so we do not restore it) # (X64 will not use fs and gs, so we do not restore it)
popq %rax popq %rax
movq %rax, %es movq %rax, %es
popq %rax popq %rax
movq %rax, %ds movq %rax, %ds
popq 32(%rbp) # for cs popq 32(%rbp) # for cs
popq 56(%rbp) # for ss popq 56(%rbp) # for ss
#; UINT64 Rdi, Rsi, Rbp, Rsp, Rbx, Rdx, Rcx, Rax; #; UINT64 Rdi, Rsi, Rbp, Rsp, Rbx, Rdx, Rcx, Rax;
#; UINT64 R8, R9, R10, R11, R12, R13, R14, R15; #; UINT64 R8, R9, R10, R11, R12, R13, R14, R15;
popq %rdi popq %rdi
popq %rsi popq %rsi
addq $8, %rsp # not for rbp addq $8, %rsp # not for rbp
popq 48(%rbp) # for rsp popq 48(%rbp) # for rsp
popq %rbx popq %rbx
popq %rdx popq %rdx
popq %rcx popq %rcx
popq %rax popq %rax
popq %r8 popq %r8
popq %r9 popq %r9
popq %r10 popq %r10
popq %r11 popq %r11
popq %r12 popq %r12
popq %r13 popq %r13
popq %r14 popq %r14
popq %r15 popq %r15
movq %rbp, %rsp movq %rbp, %rsp
popq %rbp popq %rbp
addq $16, %rsp addq $16, %rsp
iretq iretq
#text ENDS #text ENDS
#END #END