2021-12-09 04:27:30 +01:00
|
|
|
/** @file
|
|
|
|
CPU MP Initialize helper function for AMD SEV.
|
|
|
|
|
|
|
|
Copyright (c) 2021, AMD Inc. All rights reserved.<BR>
|
|
|
|
|
|
|
|
SPDX-License-Identifier: BSD-2-Clause-Patent
|
|
|
|
|
|
|
|
**/
|
|
|
|
|
|
|
|
#include "MpLib.h"
|
2022-11-07 07:30:26 +01:00
|
|
|
#include <Library/CcExitLib.h>
|
2021-12-09 04:27:30 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
Get Protected mode code segment with 16-bit default addressing
|
|
|
|
from current GDT table.
|
|
|
|
|
|
|
|
@return Protected mode 16-bit code segment value.
|
|
|
|
**/
|
|
|
|
STATIC
|
|
|
|
UINT16
|
|
|
|
GetProtectedMode16CS (
|
|
|
|
VOID
|
|
|
|
)
|
|
|
|
{
|
|
|
|
IA32_DESCRIPTOR GdtrDesc;
|
|
|
|
IA32_SEGMENT_DESCRIPTOR *GdtEntry;
|
|
|
|
UINTN GdtEntryCount;
|
|
|
|
UINT16 Index;
|
|
|
|
|
|
|
|
Index = (UINT16)-1;
|
|
|
|
AsmReadGdtr (&GdtrDesc);
|
|
|
|
GdtEntryCount = (GdtrDesc.Limit + 1) / sizeof (IA32_SEGMENT_DESCRIPTOR);
|
|
|
|
GdtEntry = (IA32_SEGMENT_DESCRIPTOR *)GdtrDesc.Base;
|
|
|
|
for (Index = 0; Index < GdtEntryCount; Index++) {
|
|
|
|
if ((GdtEntry->Bits.L == 0) &&
|
|
|
|
(GdtEntry->Bits.DB == 0) &&
|
|
|
|
(GdtEntry->Bits.Type > 8))
|
|
|
|
{
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
GdtEntry++;
|
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT (Index != GdtEntryCount);
|
|
|
|
return Index * 8;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
Get Protected mode code segment with 32-bit default addressing
|
|
|
|
from current GDT table.
|
|
|
|
|
|
|
|
@return Protected mode 32-bit code segment value.
|
|
|
|
**/
|
|
|
|
STATIC
|
|
|
|
UINT16
|
|
|
|
GetProtectedMode32CS (
|
|
|
|
VOID
|
|
|
|
)
|
|
|
|
{
|
|
|
|
IA32_DESCRIPTOR GdtrDesc;
|
|
|
|
IA32_SEGMENT_DESCRIPTOR *GdtEntry;
|
|
|
|
UINTN GdtEntryCount;
|
|
|
|
UINT16 Index;
|
|
|
|
|
|
|
|
Index = (UINT16)-1;
|
|
|
|
AsmReadGdtr (&GdtrDesc);
|
|
|
|
GdtEntryCount = (GdtrDesc.Limit + 1) / sizeof (IA32_SEGMENT_DESCRIPTOR);
|
|
|
|
GdtEntry = (IA32_SEGMENT_DESCRIPTOR *)GdtrDesc.Base;
|
|
|
|
for (Index = 0; Index < GdtEntryCount; Index++) {
|
|
|
|
if ((GdtEntry->Bits.L == 0) &&
|
|
|
|
(GdtEntry->Bits.DB == 1) &&
|
|
|
|
(GdtEntry->Bits.Type > 8))
|
|
|
|
{
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
GdtEntry++;
|
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT (Index != GdtEntryCount);
|
|
|
|
return Index * 8;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
Reset an AP when in SEV-ES mode.
|
|
|
|
|
|
|
|
If successful, this function never returns.
|
|
|
|
|
|
|
|
@param[in] Ghcb Pointer to the GHCB
|
|
|
|
@param[in] CpuMpData Pointer to CPU MP Data
|
|
|
|
|
|
|
|
**/
|
|
|
|
VOID
|
|
|
|
MpInitLibSevEsAPReset (
|
|
|
|
IN GHCB *Ghcb,
|
|
|
|
IN CPU_MP_DATA *CpuMpData
|
|
|
|
)
|
|
|
|
{
|
|
|
|
EFI_STATUS Status;
|
|
|
|
UINTN ProcessorNumber;
|
|
|
|
UINT16 Code16, Code32;
|
|
|
|
AP_RESET *APResetFn;
|
|
|
|
UINTN BufferStart;
|
|
|
|
UINTN StackStart;
|
|
|
|
|
|
|
|
Status = GetProcessorNumber (CpuMpData, &ProcessorNumber);
|
|
|
|
ASSERT_EFI_ERROR (Status);
|
|
|
|
|
|
|
|
Code16 = GetProtectedMode16CS ();
|
|
|
|
Code32 = GetProtectedMode32CS ();
|
|
|
|
|
2022-05-07 16:25:19 +02:00
|
|
|
APResetFn = (AP_RESET *)(CpuMpData->WakeupBufferHigh + CpuMpData->AddressMap.SwitchToRealNoNxOffset);
|
2021-12-09 04:27:30 +01:00
|
|
|
|
|
|
|
BufferStart = CpuMpData->MpCpuExchangeInfo->BufferStart;
|
|
|
|
StackStart = CpuMpData->SevEsAPResetStackStart -
|
|
|
|
(AP_RESET_STACK_SIZE * ProcessorNumber);
|
|
|
|
|
|
|
|
//
|
|
|
|
// This call never returns.
|
|
|
|
//
|
|
|
|
APResetFn (BufferStart, Code16, Code32, StackStart);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
Allocate the SEV-ES AP jump table buffer.
|
|
|
|
|
|
|
|
@param[in, out] CpuMpData The pointer to CPU MP Data structure.
|
|
|
|
**/
|
|
|
|
VOID
|
|
|
|
AllocateSevEsAPMemory (
|
|
|
|
IN OUT CPU_MP_DATA *CpuMpData
|
|
|
|
)
|
|
|
|
{
|
|
|
|
if (CpuMpData->SevEsAPBuffer == (UINTN)-1) {
|
|
|
|
CpuMpData->SevEsAPBuffer =
|
|
|
|
CpuMpData->SevEsIsEnabled ? GetSevEsAPMemory () : 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
Program the SEV-ES AP jump table buffer.
|
|
|
|
|
|
|
|
@param[in] SipiVector The SIPI vector used for the AP Reset
|
|
|
|
**/
|
|
|
|
VOID
|
|
|
|
SetSevEsJumpTable (
|
|
|
|
IN UINTN SipiVector
|
|
|
|
)
|
|
|
|
{
|
|
|
|
SEV_ES_AP_JMP_FAR *JmpFar;
|
|
|
|
UINT32 Offset, InsnByte;
|
|
|
|
UINT8 LoNib, HiNib;
|
|
|
|
|
|
|
|
JmpFar = (SEV_ES_AP_JMP_FAR *)(UINTN)FixedPcdGet32 (PcdSevEsWorkAreaBase);
|
|
|
|
ASSERT (JmpFar != NULL);
|
|
|
|
|
|
|
|
//
|
|
|
|
// Obtain the address of the Segment/Rip location in the workarea.
|
|
|
|
// This will be set to a value derived from the SIPI vector and will
|
|
|
|
// be the memory address used for the far jump below.
|
|
|
|
//
|
|
|
|
Offset = FixedPcdGet32 (PcdSevEsWorkAreaBase);
|
|
|
|
Offset += sizeof (JmpFar->InsnBuffer);
|
|
|
|
LoNib = (UINT8)Offset;
|
|
|
|
HiNib = (UINT8)(Offset >> 8);
|
|
|
|
|
|
|
|
//
|
|
|
|
// Program the workarea (which is the initial AP boot address) with
|
|
|
|
// far jump to the SIPI vector (where XX and YY represent the
|
|
|
|
// address of where the SIPI vector is stored.
|
|
|
|
//
|
|
|
|
// JMP FAR [CS:XXYY] => 2E FF 2E YY XX
|
|
|
|
//
|
|
|
|
InsnByte = 0;
|
|
|
|
JmpFar->InsnBuffer[InsnByte++] = 0x2E; // CS override prefix
|
|
|
|
JmpFar->InsnBuffer[InsnByte++] = 0xFF; // JMP (FAR)
|
|
|
|
JmpFar->InsnBuffer[InsnByte++] = 0x2E; // ModRM (JMP memory location)
|
|
|
|
JmpFar->InsnBuffer[InsnByte++] = LoNib; // YY offset ...
|
|
|
|
JmpFar->InsnBuffer[InsnByte++] = HiNib; // XX offset ...
|
|
|
|
|
|
|
|
//
|
|
|
|
// Program the Segment/Rip based on the SIPI vector (always at least
|
|
|
|
// 16-byte aligned, so Rip is set to 0).
|
|
|
|
//
|
|
|
|
JmpFar->Rip = 0;
|
|
|
|
JmpFar->Segment = (UINT16)(SipiVector >> 4);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
The function puts the AP in halt loop.
|
|
|
|
|
|
|
|
@param[in] CpuMpData The pointer to CPU MP Data structure.
|
|
|
|
**/
|
|
|
|
VOID
|
|
|
|
SevEsPlaceApHlt (
|
|
|
|
CPU_MP_DATA *CpuMpData
|
|
|
|
)
|
|
|
|
{
|
|
|
|
MSR_SEV_ES_GHCB_REGISTER Msr;
|
|
|
|
GHCB *Ghcb;
|
|
|
|
UINT64 Status;
|
|
|
|
BOOLEAN DoDecrement;
|
|
|
|
BOOLEAN InterruptState;
|
|
|
|
|
|
|
|
DoDecrement = (BOOLEAN)(CpuMpData->InitFlag == ApInitConfig);
|
|
|
|
|
|
|
|
while (TRUE) {
|
|
|
|
Msr.GhcbPhysicalAddress = AsmReadMsr64 (MSR_SEV_ES_GHCB);
|
|
|
|
Ghcb = Msr.Ghcb;
|
|
|
|
|
2022-11-07 08:50:11 +01:00
|
|
|
CcExitVmgInit (Ghcb, &InterruptState);
|
2021-12-09 04:27:30 +01:00
|
|
|
|
|
|
|
if (DoDecrement) {
|
|
|
|
DoDecrement = FALSE;
|
|
|
|
|
|
|
|
//
|
|
|
|
// Perform the delayed decrement just before issuing the first
|
|
|
|
// VMGEXIT with AP_RESET_HOLD.
|
|
|
|
//
|
|
|
|
InterlockedDecrement ((UINT32 *)&CpuMpData->MpCpuExchangeInfo->NumApsExecuting);
|
|
|
|
}
|
|
|
|
|
2022-11-07 08:50:11 +01:00
|
|
|
Status = CcExitVmgExit (Ghcb, SVM_EXIT_AP_RESET_HOLD, 0, 0);
|
2021-12-09 04:27:30 +01:00
|
|
|
if ((Status == 0) && (Ghcb->SaveArea.SwExitInfo2 != 0)) {
|
2022-11-07 08:50:11 +01:00
|
|
|
CcExitVmgDone (Ghcb, InterruptState);
|
2021-12-09 04:27:30 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2022-11-07 08:50:11 +01:00
|
|
|
CcExitVmgDone (Ghcb, InterruptState);
|
2021-12-09 04:27:30 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Awakened in a new phase? Use the new CpuMpData
|
|
|
|
//
|
|
|
|
if (CpuMpData->NewCpuMpData != NULL) {
|
|
|
|
CpuMpData = CpuMpData->NewCpuMpData;
|
|
|
|
}
|
|
|
|
|
|
|
|
MpInitLibSevEsAPReset (Ghcb, CpuMpData);
|
|
|
|
}
|
UefiCpuPkg/MpInitLib: use BSP to do extended topology check
During AP bringup, just after switching to long mode, APs will do some
cpuid calls to verify that the extended topology leaf (0xB) is available
so they can fetch their x2 APIC IDs from it. In the case of SEV-ES,
these cpuid instructions must be handled by direct use of the GHCB MSR
protocol to fetch the values from the hypervisor, since a #VC handler
is not yet available due to the AP's stack not being set up yet.
For SEV-SNP, rather than relying on the GHCB MSR protocol, it is
expected that these values would be obtained from the SEV-SNP CPUID
table instead. The actual x2 APIC ID (and 8-bit APIC IDs) would still
be fetched from hypervisor using the GHCB MSR protocol however, so
introducing support for the SEV-SNP CPUID table in that part of the AP
bring-up code would only be to handle the checks/validation of the
extended topology leaf.
Rather than introducing all the added complexity needed to handle these
checks via the CPUID table, instead let the BSP do the check in advance,
since it can make use of the #VC handler to avoid the need to scan the
SNP CPUID table directly, and add a flag in ExchangeInfo to communicate
the result of this check to APs.
Cc: Eric Dong <eric.dong@intel.com>
Cc: Ray Ni <ray.ni@intel.com>
Cc: Rahul Kumar <rahul1.kumar@intel.com>
Cc: James Bottomley <jejb@linux.ibm.com>
Cc: Min Xu <min.m.xu@intel.com>
Cc: Jiewen Yao <jiewen.yao@intel.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Jordan Justen <jordan.l.justen@intel.com>
Cc: Ard Biesheuvel <ardb+tianocore@kernel.org>
Cc: Erdem Aktas <erdemaktas@google.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Acked-by: Gerd Hoffmann <kraxel@redhat.com>
Acked-by: Ray Ni <ray.ni@intel.com>
Suggested-by: Brijesh Singh <brijesh.singh@amd.com>
Signed-off-by: Michael Roth <michael.roth@amd.com>
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
2021-12-09 04:27:55 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
The function fills the exchange data for the AP.
|
|
|
|
|
|
|
|
@param[in] ExchangeInfo The pointer to CPU Exchange Data structure
|
|
|
|
**/
|
|
|
|
VOID
|
|
|
|
FillExchangeInfoDataSevEs (
|
|
|
|
IN volatile MP_CPU_EXCHANGE_INFO *ExchangeInfo
|
|
|
|
)
|
|
|
|
{
|
|
|
|
UINT32 StdRangeMax;
|
|
|
|
|
|
|
|
AsmCpuid (CPUID_SIGNATURE, &StdRangeMax, NULL, NULL, NULL);
|
|
|
|
if (StdRangeMax >= CPUID_EXTENDED_TOPOLOGY) {
|
|
|
|
CPUID_EXTENDED_TOPOLOGY_EBX ExtTopoEbx;
|
|
|
|
|
|
|
|
AsmCpuid (CPUID_EXTENDED_TOPOLOGY, NULL, &ExtTopoEbx.Uint32, NULL, NULL);
|
|
|
|
ExchangeInfo->ExtTopoAvail = !!ExtTopoEbx.Bits.LogicalProcessors;
|
|
|
|
}
|
|
|
|
}
|