MdeModulePkg/NvmExpressDxe: Refine PassThru IO queue creation behavior

REF:https://bugzilla.tianocore.org/show_bug.cgi?id=1260

For the PassThru() service of NVM Express Pass Through Protocol, the
current implementation (function NvmExpressPassThru()) will only use the
IO Completion/Submission queues created internally by this driver during
the controller initialization process. Any other IO queues created will
not be consumed.

So the value is little to accept external IO Completion/Submission queue
creation request. This commit will refine the behavior of function
NvmExpressPassThru(), it will only accept driver internal IO queue
creation commands and will return "EFI_UNSUPPORTED" for external ones.

Cc: Jiewen Yao <Jiewen.yao@intel.com>
Cc: Star Zeng <star.zeng@intel.com>
Contributed-under: TianoCore Contribution Agreement 1.1
Signed-off-by: Hao Wu <hao.a.wu@intel.com>
Reviewed-by: Ruiyu Ni <ruiyu.ni@intel.com>
This commit is contained in:
Hao Wu 2018-10-23 20:57:43 +08:00
parent 5687ae5723
commit 8411c9d5c4
3 changed files with 29 additions and 9 deletions

View File

@ -3,7 +3,7 @@
NVM Express specification.
(C) Copyright 2016 Hewlett Packard Enterprise Development LP<BR>
Copyright (c) 2013 - 2017, Intel Corporation. All rights reserved.<BR>
Copyright (c) 2013 - 2018, Intel Corporation. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
@ -147,6 +147,11 @@ struct _NVME_CONTROLLER_PRIVATE_DATA {
NVME_CQHDBL CqHdbl[NVME_MAX_QUEUES];
UINT16 AsyncSqHead;
//
// Flag to indicate internal IO queue creation.
//
BOOLEAN CreateIoQueue;
UINT8 Pt[NVME_MAX_QUEUES];
UINT16 Cid[NVME_MAX_QUEUES];

View File

@ -584,6 +584,7 @@ NvmeCreateIoCompletionQueue (
UINT16 QueueSize;
Status = EFI_SUCCESS;
Private->CreateIoQueue = TRUE;
for (Index = 1; Index < NVME_MAX_QUEUES; Index++) {
ZeroMem (&CommandPacket, sizeof(EFI_NVM_EXPRESS_PASS_THRU_COMMAND_PACKET));
@ -627,6 +628,8 @@ NvmeCreateIoCompletionQueue (
}
}
Private->CreateIoQueue = FALSE;
return Status;
}
@ -653,6 +656,7 @@ NvmeCreateIoSubmissionQueue (
UINT16 QueueSize;
Status = EFI_SUCCESS;
Private->CreateIoQueue = TRUE;
for (Index = 1; Index < NVME_MAX_QUEUES; Index++) {
ZeroMem (&CommandPacket, sizeof(EFI_NVM_EXPRESS_PASS_THRU_COMMAND_PACKET));
@ -698,6 +702,8 @@ NvmeCreateIoSubmissionQueue (
}
}
Private->CreateIoQueue = FALSE;
return Status;
}

View File

@ -587,14 +587,23 @@ NvmExpressPassThru (
}
Sq->Prp[0] = (UINT64)(UINTN)Packet->TransferBuffer;
if ((Packet->QueueType == NVME_ADMIN_QUEUE) &&
((Sq->Opc == NVME_ADMIN_CRIOCQ_CMD) || (Sq->Opc == NVME_ADMIN_CRIOSQ_CMD))) {
//
// Currently, we only use the IO Completion/Submission queues created internally
// by this driver during controller initialization. Any other IO queues created
// will not be consumed here. The value is little to accept external IO queue
// creation requests, so here we will return EFI_UNSUPPORTED for external IO
// queue creation request.
//
if (!Private->CreateIoQueue) {
DEBUG ((DEBUG_ERROR, "NvmExpressPassThru: Does not support external IO queues creation request.\n"));
return EFI_UNSUPPORTED;
}
} else if ((Sq->Opc & (BIT0 | BIT1)) != 0) {
//
// If the NVMe cmd has data in or out, then mapping the user buffer to the PCI controller specific addresses.
// Note here we don't handle data buffer for CreateIOSubmitionQueue and CreateIOCompletionQueue cmds because
// these two cmds are special which requires their data buffer must support simultaneous access by both the
// processor and a PCI Bus Master. It's caller's responsbility to ensure this.
//
if (((Sq->Opc & (BIT0 | BIT1)) != 0) &&
!((Packet->QueueType == NVME_ADMIN_QUEUE) && ((Sq->Opc == NVME_ADMIN_CRIOCQ_CMD) || (Sq->Opc == NVME_ADMIN_CRIOSQ_CMD)))) {
if (((Packet->TransferLength != 0) && (Packet->TransferBuffer == NULL)) ||
((Packet->TransferLength == 0) && (Packet->TransferBuffer != NULL))) {
return EFI_INVALID_PARAMETER;