OvmfPkg: LoadLinuxLib: Fix kernel entry for 64-bit OVMF

We currently just jump to offset 0x200 in the kernel image, in 64-bit
mode. This is completely broken. If it's a 32-bit kernel, we'll be
jumping into the compressed data payload.

If it's a 64-bit kernel, it'll work... but the 0x200 offset is
explicitly marked as 'may change in the future', has already changed
from 0x100 to 0x200 in the past with no fanfare, and bootloaders are
instructed that they should look at the ELF header to find the offset.
So although it does actually work today, it's still broken in the
"someone needs to whipped for doing it this way" sense of the word.

In fact, the same bug exists in other bootloaders so the 0x200 offset
probably *is* now set in stone. But still it's only valid to use it if
we *know* it's a 64-bit kernel. And we don't. There *is* no ELF header
that we can look at when we're booting a bzImage, and we can't rely on
it having a PE/COFF header either.

The 32-bit entry point is always guaranteed to work, and we need to
support it anyway. So let's just *always* use it, in 32-bit mode, and
then we don't have to make up some horrible heuristics for detecting
32-bit vs. 64-bit kernels.

Contributed-under: TianoCore Contribution Agreement 1.0
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Reviewed-by: Jordan Justen <jordan.l.justen@intel.com>

git-svn-id: https://edk2.svn.sourceforge.net/svnroot/edk2/trunk/edk2@14045 6f19259b-4bc3-4df7-8a09-765794883524
This commit is contained in:
jljusten 2013-01-14 03:10:57 +00:00
parent fda93fc449
commit 61a114ba46
2 changed files with 84 additions and 8 deletions

View File

@ -23,8 +23,47 @@ ASM_GLOBAL ASM_PFX(JumpToKernel)
# );
#------------------------------------------------------------------------------
ASM_PFX(JumpToKernel):
movq %rdx, %rsi
addq $0x200, %rcx
callq %rcx
ret
// Set up for executing kernel. BP in %esi, entry point on the stack
// (64-bit when the 'ret' will use it as 32-bit, but we're little-endian)
movq %rdx, %rsi
pushq %rcx
// Jump into the compatibility mode CS
pushq $0x10
leaq 1f, %rax
pushq %rax
retfq
1: // Now in compatibility mode
.code32
movl $0x18, %eax
movl %eax, %ds
movl %eax, %es
movl %eax, %fs
movl %eax, %gs
movl %eax, %ss
// Disable paging
movl %cr0, %eax
btcl $31, %eax
movl %eax, %cr0
// Disable long mode in EFER
movl $0x0c0000080, %ecx
rdmsr
btcl $8, %eax
wrmsr
// Disable PAE
movl %cr4, %eax
btcl $5, %eax
movl %eax, %cr4
// Zero registers and 'return' to kernel
xorl %ebp, %ebp
xorl %edi, %edi
xorl %ebx, %ebx
ret
.code64

View File

@ -24,10 +24,47 @@
;------------------------------------------------------------------------------
JumpToKernel PROC
mov rsi, rdx
add rcx, 200h
call rcx
ret
; Set up for executing kernel. BP in %esi, entry point on the stack
; (64-bit when the 'ret' will use it as 32-bit, but we're little-endian)
mov rsi, rdx
push rcx
; Jump into the compatibility mode CS
push 10h
lea rax, @F
push rax
DB 048h, 0cbh ; retfq
@@:
; Now in compatibility mode.
DB 0b8h, 018h, 000h, 000h, 000h ; movl $0x18, %eax
DB 08eh, 0d8h ; movl %eax, %ds
DB 08eh, 0c0h ; movl %eax, %es
DB 08eh, 0e0h ; movl %eax, %fs
DB 08eh, 0e8h ; movl %eax, %gs
DB 08eh, 0d0h ; movl %eax, %ss
; Disable paging
DB 00fh, 020h, 0c0h ; movl %cr0, %eax
DB 00fh, 0bah, 0f8h, 01fh ; btcl $31, %eax
DB 00fh, 022h, 0c0h ; movl %eax, %cr0
; Disable long mode in EFER
DB 0b9h, 080h, 000h, 000h, 0c0h ; movl $0x0c0000080, %ecx
DB 00fh, 032h ; rdmsr
DB 00fh, 0bah, 0f8h, 008h ; btcl $8, %eax
DB 00fh, 030h ; wrmsr
; Disable PAE
DB 00fh, 020h, 0e0h ; movl %cr4, %eax
DB 00fh, 0bah, 0f8h, 005h ; btcl $5, %eax
DB 00fh, 022h, 0e0h ; movl %eax, %cr4
DB 031h, 0edh ; xor %ebp, %ebp
DB 031h, 0ffh ; xor %edi, %edi
DB 031h, 0dbh ; xor %ebx, %ebx
DB 0c3h ; ret
JumpToKernel ENDP