From 844e813087da2dc3c0da98f0ed103fc8e100b8a6 Mon Sep 17 00:00:00 2001 From: Vivian Nowka-Keane Date: Thu, 23 Jan 2025 17:58:39 -0800 Subject: [PATCH] MdeModulePkg: NvmExpressDxe: Request queues from the controller. Instead of assuming we always want to create two sets of data queues, we request two and the controller will determine how many are allocated. Signed-off-by: Vivian Nowka-Keane to squash Remove check for blockIo2 installation Check for both NSQA and NCQA --- .../Bus/Pci/NvmExpressDxe/NvmExpress.c | 106 ++++++++++----- .../Bus/Pci/NvmExpressDxe/NvmExpress.h | 9 +- .../Bus/Pci/NvmExpressDxe/NvmExpressBlockIo.c | 12 ++ .../Bus/Pci/NvmExpressDxe/NvmExpressHci.c | 126 ++++++++++++++++-- 4 files changed, 208 insertions(+), 45 deletions(-) diff --git a/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpress.c b/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpress.c index 8b7af26c8a..1c5c393291 100644 --- a/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpress.c +++ b/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpress.c @@ -172,14 +172,16 @@ EnumerateNvmeDevNamespace ( Device->BlockIo.WriteBlocks = NvmeBlockIoWriteBlocks; Device->BlockIo.FlushBlocks = NvmeBlockIoFlushBlocks; - // - // Create BlockIo2 Protocol instance - // - Device->BlockIo2.Media = &Device->Media; - Device->BlockIo2.Reset = NvmeBlockIoResetEx; - Device->BlockIo2.ReadBlocksEx = NvmeBlockIoReadBlocksEx; - Device->BlockIo2.WriteBlocksEx = NvmeBlockIoWriteBlocksEx; - Device->BlockIo2.FlushBlocksEx = NvmeBlockIoFlushBlocksEx; + if (Private->NSQA > 1) { + // We have multiple data queues, so we can support the BlockIo2 protocol + + // Create BlockIo2 Protocol instance + Device->BlockIo2.Media = &Device->Media; + Device->BlockIo2.Reset = NvmeBlockIoResetEx; + Device->BlockIo2.ReadBlocksEx = NvmeBlockIoReadBlocksEx; + Device->BlockIo2.WriteBlocksEx = NvmeBlockIoWriteBlocksEx; + Device->BlockIo2.FlushBlocksEx = NvmeBlockIoFlushBlocksEx; + } InitializeListHead (&Device->AsyncQueue); // MU_CHANGE Start - Add Media Sanitize @@ -260,8 +262,6 @@ EnumerateNvmeDevNamespace ( Device->DevicePath, &gEfiBlockIoProtocolGuid, &Device->BlockIo, - &gEfiBlockIo2ProtocolGuid, - &Device->BlockIo2, &gEfiDiskInfoProtocolGuid, &Device->DiskInfo, NULL @@ -271,6 +271,19 @@ EnumerateNvmeDevNamespace ( goto Exit; } + if (Private->NSQA > 1) { + // We have multiple data queues, so we can support the BlockIo2 protocol + Status = gBS->InstallProtocolInterface ( + &Device->DeviceHandle, + &gEfiBlockIo2ProtocolGuid, + EFI_NATIVE_INTERFACE, // this is the only option in the enum..? + &Device->BlockIo2 + ); + if (EFI_ERROR (Status)) { + goto Exit; + } + } + // // Check if the NVMe controller supports the Security Send and Security Receive commands // @@ -288,12 +301,20 @@ EnumerateNvmeDevNamespace ( Device->DevicePath, &gEfiBlockIoProtocolGuid, &Device->BlockIo, - &gEfiBlockIo2ProtocolGuid, - &Device->BlockIo2, &gEfiDiskInfoProtocolGuid, &Device->DiskInfo, NULL ); + + if (Private->NSQA > 1) { + // We have multiple data queues, so we need to uninstall the BlockIo2 protocol + gBS->UninstallProtocolInterface ( + Device->DeviceHandle, + &gEfiBlockIo2ProtocolGuid, + &Device->BlockIo2 + ); + } + goto Exit; } } @@ -487,8 +508,6 @@ UnregisterNvmeNamespace ( Device->DevicePath, &gEfiBlockIoProtocolGuid, &Device->BlockIo, - &gEfiBlockIo2ProtocolGuid, - &Device->BlockIo2, &gEfiDiskInfoProtocolGuid, &Device->DiskInfo, NULL @@ -506,6 +525,28 @@ UnregisterNvmeNamespace ( return Status; } + // + // If BlockIo2 is installed, uninstall it. + // + if (Device->Controller->NSQA > 1) { + Status = gBS->UninstallProtocolInterface ( + Handle, + &gEfiBlockIo2ProtocolGuid, + &Device->BlockIo2 + ); + if (EFI_ERROR (Status)) { + gBS->OpenProtocol ( + Controller, + &gEfiNvmExpressPassThruProtocolGuid, + (VOID **)&DummyInterface, + This->DriverBindingHandle, + Handle, + EFI_OPEN_PROTOCOL_BY_CHILD_CONTROLLER + ); + return Status; + } + } + // // If Storage Security Command Protocol is installed, then uninstall this protocol. // @@ -1112,24 +1153,27 @@ NvmExpressDriverBindingStart ( // // Start the asynchronous I/O completion monitor // - Status = gBS->CreateEvent ( - EVT_TIMER | EVT_NOTIFY_SIGNAL, - TPL_NOTIFY, - ProcessAsyncTaskList, - Private, - &Private->TimerEvent - ); - if (EFI_ERROR (Status)) { - goto Exit; - } + if (Private->NSQA > 1) { + + Status = gBS->CreateEvent ( + EVT_TIMER | EVT_NOTIFY_SIGNAL, + TPL_NOTIFY, + ProcessAsyncTaskList, + Private, + &Private->TimerEvent + ); + if (EFI_ERROR (Status)) { + goto Exit; + } - Status = gBS->SetTimer ( - Private->TimerEvent, - TimerPeriodic, - NVME_HC_ASYNC_TIMER - ); - if (EFI_ERROR (Status)) { - goto Exit; + Status = gBS->SetTimer ( + Private->TimerEvent, + TimerPeriodic, + NVME_HC_ASYNC_TIMER + ); + if (EFI_ERROR (Status)) { + goto Exit; + } } Status = gBS->InstallMultipleProtocolInterfaces ( diff --git a/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpress.h b/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpress.h index 02cf123052..993a5a84b7 100644 --- a/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpress.h +++ b/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpress.h @@ -78,7 +78,8 @@ extern EFI_DRIVER_SUPPORTED_EFI_VERSION_PROTOCOL gNvmExpressDriverSupportedEfiV // #define NVME_ASYNC_CCQ_SIZE 255 -#define NVME_MAX_QUEUES 3 // Number of queues supported by the driver +// Maximum number of queue pairs supported by the driver, including the admin queues. +#define NVME_MAX_QUEUES 3 // MU_CHANGE Start - Add Media Sanitize // @@ -161,6 +162,12 @@ struct _NVME_CONTROLLER_PRIVATE_DATA { // NVME_ADMIN_CONTROLLER_DATA *ControllerData; + // + // Number of Queues Allocated by the controller (0-based where 0 is 1 queue) + // + UINT32 NSQA; // Number of Submission Queues Allocated + UINT32 NCQA; // Number of Completion Queues Allocated + // // 6 x 4kB aligned buffers will be carved out of this buffer. // 1st 4kB boundary is the start of the admin submission queue. diff --git a/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpressBlockIo.c b/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpressBlockIo.c index e0a85c1cb3..c9ec15618f 100644 --- a/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpressBlockIo.c +++ b/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpressBlockIo.c @@ -169,6 +169,10 @@ NvmeRead ( BOOLEAN IsEmpty; EFI_TPL OldTpl; + if (&Device->AsyncQueue == NULL) { + return EFI_UNSUPPORTED; + } + // // Wait for the device's asynchronous I/O queue to become empty. // @@ -255,6 +259,10 @@ NvmeWrite ( BOOLEAN IsEmpty; EFI_TPL OldTpl; + if (&Device->AsyncQueue == NULL) { + return EFI_UNSUPPORTED; + } + // // Wait for the device's asynchronous I/O queue to become empty. // @@ -1540,6 +1548,10 @@ NvmeBlockIoFlushBlocksEx ( Device = NVME_DEVICE_PRIVATE_DATA_FROM_BLOCK_IO2 (This); + if (&Device->AsyncQueue == NULL) { + return EFI_UNSUPPORTED; + } + // // Wait for the asynchronous I/O queue to become empty. // diff --git a/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpressHci.c b/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpressHci.c index 62d0dff491..5554b31778 100644 --- a/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpressHci.c +++ b/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpressHci.c @@ -567,7 +567,89 @@ NvmeIdentifyNamespace ( } /** - Create io completion queue. + Send the Set Features Command to the controller for the number of queues requested. + Note that the number of queues allocated may be different from the number of queues requested. + The number of queues allocated is returned and stored in the controller private data structure + using the NSQA and NCQA fields. + + @param Private The pointer to the NVME_CONTROLLER_PRIVATE_DATA data structure. + @param NCQR The number of completion queues requested + @param NSQR The number of submission queues requested + + @return EFI_SUCCESS Successfully set the number of queues. + @return EFI_DEVICE_ERROR Fail to set the number of queues. + +**/ +EFI_STATUS +NvmeSetFeaturesNumberOfQueues ( + IN OUT NVME_CONTROLLER_PRIVATE_DATA *Private, + IN UINT16 NCQR, + IN UINT16 NSQR + ) +{ + EFI_NVM_EXPRESS_PASS_THRU_COMMAND_PACKET CommandPacket; + EFI_NVM_EXPRESS_COMMAND Command; + EFI_NVM_EXPRESS_COMPLETION Completion; + EFI_STATUS Status; + NVME_ADMIN_SET_FEATURES_CDW10 SetFeatures; + NVME_ADMIN_SET_FEATURES_NUM_QUEUES NumberOfQueuesRequested; + NVME_ADMIN_SET_FEATURES_NUM_QUEUES NumberOfQueuesAllocated; + + Status = EFI_SUCCESS; + + ZeroMem (&CommandPacket, sizeof (EFI_NVM_EXPRESS_PASS_THRU_COMMAND_PACKET)); + ZeroMem (&Command, sizeof (EFI_NVM_EXPRESS_COMMAND)); + ZeroMem (&Completion, sizeof (EFI_NVM_EXPRESS_COMPLETION)); + ZeroMem (&SetFeatures, sizeof (NVME_ADMIN_SET_FEATURES)); + ZeroMem (&NumberOfQueuesRequested, sizeof (NVME_ADMIN_SET_FEATURES_NUM_QUEUES)); + ZeroMem (&NumberOfQueuesAllocated, sizeof (NVME_ADMIN_SET_FEATURES_NUM_QUEUES)); + + CommandPacket.NvmeCmd = &Command; + CommandPacket.NvmeCompletion = &Completion; + CommandPacket.CommandTimeout = NVME_GENERIC_TIMEOUT; + CommandPacket.QueueType = NVME_ADMIN_QUEUE; + Command.Nsid = 0; // NSID must be set to 0h or FFFFFFFFh for an admin command + Command.Cdw0.Opcode = NVME_ADMIN_SET_FEATURES_CMD; + + // Populate the Set Features Cdw10 and Cdw11 according to Nvm Express 1.3d Spec + SetFeatures.Bits.Fid = NVME_FEATURE_NUMBER_OF_QUEUES; + NumberOfQueuesRequested.Bits.NCQ = NCQR; + NumberOfQueuesRequested.Bits.NSQ = NSQR; + CommandPacket.NvmeCmd->Cdw10 = SetFeatures.Uint32; + CommandPacket.NvmeCmd->Cdw11 = NumberOfQueuesRequested.Uint32; + + CommandPacket.NvmeCmd->Flags = CDW10_VALID | CDW11_VALID; + + DEBUG((DEBUG_INFO, "Number of Queues Requested: NSQR=%d, NCQR=%d\n", NSQR, NCQR)); + + // Send the Set Features Command for Number of Queues + Status = Private->Passthru.PassThru ( + &Private->Passthru, + 0, + &CommandPacket, + NULL + ); + + if (EFI_ERROR (Status)) { + DEBUG ((DEBUG_ERROR, "Set Features Command for Number of Queues failed with Status %r\n", Status)); + return Status; + } + + // + // Save the number of queues allocated, adding 1 to account for it being a 0-based value. + // E.g. if 1 pair of data queues is allocated NSQ=0, NCQ=0, then NSQA=1, NCQA=1 + // These numbers do not include the admin queues. + // + NumberOfQueuesAllocated.Uint32 = CommandPacket.NvmeCompletion->DW0; + Private->NSQA = NumberOfQueuesAllocated.Bits.NSQ + 1; + Private->NCQA = NumberOfQueuesAllocated.Bits.NCQ + 1; + + DEBUG((DEBUG_INFO, "Number of Queues Allocated: NSQA=%d, NCQA=%d\n", Private->NSQA, Private->NCQA)); + return Status; +} + +/** + Create io completion queue(s). @param Private The pointer to the NVME_CONTROLLER_PRIVATE_DATA data structure. @@ -591,7 +673,8 @@ NvmeCreateIoCompletionQueue ( Status = EFI_SUCCESS; Private->CreateIoQueue = TRUE; - for (Index = 1; Index < NVME_MAX_QUEUES; Index++) { + // Start from Index 1 because Index 0 is reserved for admin queue + for (Index = 1; Index <= Private->NCQA; Index++) { ZeroMem (&CommandPacket, sizeof (EFI_NVM_EXPRESS_PASS_THRU_COMMAND_PACKET)); ZeroMem (&Command, sizeof (EFI_NVM_EXPRESS_COMMAND)); ZeroMem (&Completion, sizeof (EFI_NVM_EXPRESS_COMPLETION)); @@ -610,11 +693,9 @@ NvmeCreateIoCompletionQueue ( if (PcdGetBool (PcdSupportAlternativeQueueSize)) { QueueSize = MIN (NVME_ALTERNATIVE_MAX_QUEUE_SIZE, Private->Cap.Mqes); } else if (Index == 1) { - QueueSize = NVME_CCQ_SIZE; - } else if (Private->Cap.Mqes > NVME_ASYNC_CCQ_SIZE) { - QueueSize = NVME_ASYNC_CCQ_SIZE; + QueueSize = MIN (NVME_CCQ_SIZE, Private->Cap.Mqes); } else { - QueueSize = Private->Cap.Mqes; + QueueSize = MIN (NVME_ASYNC_CCQ_SIZE, Private->Cap.Mqes); } // MU_CHANGE [END] @@ -632,6 +713,7 @@ NvmeCreateIoCompletionQueue ( NULL ); if (EFI_ERROR (Status)) { + DEBUG ((DEBUG_ERROR, "Create Completion Queue Command %d failed with Status %r\n", Index, Status)); break; } } @@ -642,7 +724,7 @@ NvmeCreateIoCompletionQueue ( } /** - Create io submission queue. + Create io submission queue(s). @param Private The pointer to the NVME_CONTROLLER_PRIVATE_DATA data structure. @@ -666,7 +748,8 @@ NvmeCreateIoSubmissionQueue ( Status = EFI_SUCCESS; Private->CreateIoQueue = TRUE; - for (Index = 1; Index < NVME_MAX_QUEUES; Index++) { + // Start from Index 1 because Index 0 is reserved for admin queue + for (Index = 1; Index <= Private->NSQA; Index++) { ZeroMem (&CommandPacket, sizeof (EFI_NVM_EXPRESS_PASS_THRU_COMMAND_PACKET)); ZeroMem (&Command, sizeof (EFI_NVM_EXPRESS_COMMAND)); ZeroMem (&Completion, sizeof (EFI_NVM_EXPRESS_COMPLETION)); @@ -685,11 +768,9 @@ NvmeCreateIoSubmissionQueue ( if (PcdGetBool (PcdSupportAlternativeQueueSize)) { QueueSize = MIN (NVME_ALTERNATIVE_MAX_QUEUE_SIZE, Private->Cap.Mqes); } else if (Index == 1) { - QueueSize = NVME_CCQ_SIZE; - } else if (Private->Cap.Mqes > NVME_ASYNC_CCQ_SIZE) { - QueueSize = NVME_ASYNC_CCQ_SIZE; + QueueSize = MIN (NVME_CCQ_SIZE, Private->Cap.Mqes); } else { - QueueSize = Private->Cap.Mqes; + QueueSize = MIN (NVME_ASYNC_CCQ_SIZE, Private->Cap.Mqes); } // MU_CHANGE [END] @@ -709,6 +790,7 @@ NvmeCreateIoSubmissionQueue ( NULL ); if (EFI_ERROR (Status)) { + DEBUG ((DEBUG_ERROR, "Create Submission Queue Command %d failed with Status %r\n", Index, Status)); break; } } @@ -817,7 +899,7 @@ NvmeControllerInit ( } // MU_CHANGE [END] - Improve NVMe controller init robustness - + // vnk: assumption of 3 queues made here (based on header file/max queues macro) Private->Cid[0] = 0; Private->Cid[1] = 0; Private->Cid[2] = 0; @@ -863,6 +945,8 @@ NvmeControllerInit ( Acq = (UINT64)(UINTN)(Private->BufferPciAddr + EFI_PAGE_SIZE) & ~0xFFF; } + // vnk: assumption of 3 queues made here + // // Address of I/O submission & completion queue. // @@ -984,6 +1068,22 @@ NvmeControllerInit ( DEBUG ((DEBUG_INFO, " CQES : 0x%x\n", Private->ControllerData->Cqes)); DEBUG ((DEBUG_INFO, " NN : 0x%x\n", Private->ControllerData->Nn)); + // + // Send Set Features Command to request the maximum number of data queue. + // The controller is free to allocate a different number of queues from the number requested. + // The number of queues allocated is returned and stored in the controller private data structure + // using the NSQA and NCQA fields. + // + Status = NvmeSetFeaturesNumberOfQueues (Private, NVME_MAX_QUEUES - 1, NVME_MAX_QUEUES - 1); + + if (EFI_ERROR (Status)) { + return Status; + } + + // TODO do we need to somehow invalidate the CqBuffer[3]/SqBuffer[3] by + // freeing memory and setting their address to null? What happens if we don't? + // Can we check for number of queuess before allocating data queues? + // // Create two I/O completion queues. // One for blocking I/O, one for non-blocking I/O.