TianoCore EDK2 master
Loading...
Searching...
No Matches
NvmExpressPeiPassThru.c
Go to the documentation of this file.
1
11#include "NvmExpressPei.h"
12
23UINT64
26 IN EFI_PHYSICAL_ADDRESS PhysicalAddr,
27 IN UINTN Pages
28 )
29{
30 UINTN PrpEntryNo;
31 UINTN PrpListNo;
32 UINT64 PrpListBase;
33 VOID *PrpListHost;
34 UINTN PrpListIndex;
35 UINTN PrpEntryIndex;
36 UINT64 Remainder;
37 EFI_PHYSICAL_ADDRESS PrpListPhyAddr;
38 UINTN Bytes;
39 UINT8 *PrpEntry;
40 EFI_PHYSICAL_ADDRESS NewPhyAddr;
41
42 //
43 // The number of Prp Entry in a memory page.
44 //
45 PrpEntryNo = EFI_PAGE_SIZE / sizeof (UINT64);
46
47 //
48 // Calculate total PrpList number.
49 //
50 PrpListNo = (UINTN)DivU64x64Remainder ((UINT64)Pages, (UINT64)PrpEntryNo, &Remainder);
51 if (Remainder != 0) {
52 PrpListNo += 1;
53 }
54
55 if (PrpListNo > NVME_PRP_SIZE) {
56 DEBUG ((
57 DEBUG_ERROR,
58 "%a: The implementation only supports PrpList number up to 4."
59 " But %d are needed here.\n",
60 __func__,
61 PrpListNo
62 ));
63 return 0;
64 }
65
66 PrpListHost = (VOID *)(UINTN)NVME_PRP_BASE (Private);
67
68 Bytes = EFI_PAGES_TO_SIZE (PrpListNo);
69 PrpListPhyAddr = (UINT64)(UINTN)(PrpListHost);
70
71 //
72 // Fill all PRP lists except of last one.
73 //
74 ZeroMem (PrpListHost, Bytes);
75 for (PrpListIndex = 0; PrpListIndex < PrpListNo - 1; ++PrpListIndex) {
76 PrpListBase = (UINTN)PrpListHost + PrpListIndex * EFI_PAGE_SIZE;
77
78 for (PrpEntryIndex = 0; PrpEntryIndex < PrpEntryNo; ++PrpEntryIndex) {
79 PrpEntry = (UINT8 *)(UINTN)(PrpListBase + PrpEntryIndex * sizeof (UINT64));
80 if (PrpEntryIndex != PrpEntryNo - 1) {
81 //
82 // Fill all PRP entries except of last one.
83 //
84 CopyMem (PrpEntry, (VOID *)(UINTN)(&PhysicalAddr), sizeof (UINT64));
85 PhysicalAddr += EFI_PAGE_SIZE;
86 } else {
87 //
88 // Fill last PRP entries with next PRP List pointer.
89 //
90 NewPhyAddr = (PrpListPhyAddr + (PrpListIndex + 1) * EFI_PAGE_SIZE);
91 CopyMem (PrpEntry, (VOID *)(UINTN)(&NewPhyAddr), sizeof (UINT64));
92 }
93 }
94 }
95
96 //
97 // Fill last PRP list.
98 //
99 PrpListBase = (UINTN)PrpListHost + PrpListIndex * EFI_PAGE_SIZE;
100 for (PrpEntryIndex = 0; PrpEntryIndex < ((Remainder != 0) ? Remainder : PrpEntryNo); ++PrpEntryIndex) {
101 PrpEntry = (UINT8 *)(UINTN)(PrpListBase + PrpEntryIndex * sizeof (UINT64));
102 CopyMem (PrpEntry, (VOID *)(UINTN)(&PhysicalAddr), sizeof (UINT64));
103
104 PhysicalAddr += EFI_PAGE_SIZE;
105 }
106
107 return PrpListPhyAddr;
108}
109
118 IN volatile NVME_CQ *Cq
119 )
120{
121 if ((Cq->Sct == 0x0) && (Cq->Sc == 0x0)) {
122 return EFI_SUCCESS;
123 }
124
125 DEBUG ((DEBUG_INFO, "Dump NVMe Completion Entry Status from [0x%x]:\n", (UINTN)Cq));
126 DEBUG ((
127 DEBUG_INFO,
128 " SQ Identifier : [0x%x], Phase Tag : [%d], Cmd Identifier : [0x%x]\n",
129 Cq->Sqid,
130 Cq->Pt,
131 Cq->Cid
132 ));
133 DEBUG ((DEBUG_INFO, " Status Code Type : [0x%x], Status Code : [0x%x]\n", Cq->Sct, Cq->Sc));
134 DEBUG ((DEBUG_INFO, " NVMe Cmd Execution Result - "));
135
136 switch (Cq->Sct) {
137 case 0x0:
138 switch (Cq->Sc) {
139 case 0x0:
140 DEBUG ((DEBUG_INFO, "Successful Completion\n"));
141 return EFI_SUCCESS;
142 case 0x1:
143 DEBUG ((DEBUG_INFO, "Invalid Command Opcode\n"));
144 break;
145 case 0x2:
146 DEBUG ((DEBUG_INFO, "Invalid Field in Command\n"));
147 break;
148 case 0x3:
149 DEBUG ((DEBUG_INFO, "Command ID Conflict\n"));
150 break;
151 case 0x4:
152 DEBUG ((DEBUG_INFO, "Data Transfer Error\n"));
153 break;
154 case 0x5:
155 DEBUG ((DEBUG_INFO, "Commands Aborted due to Power Loss Notification\n"));
156 break;
157 case 0x6:
158 DEBUG ((DEBUG_INFO, "Internal Device Error\n"));
159 break;
160 case 0x7:
161 DEBUG ((DEBUG_INFO, "Command Abort Requested\n"));
162 break;
163 case 0x8:
164 DEBUG ((DEBUG_INFO, "Command Aborted due to SQ Deletion\n"));
165 break;
166 case 0x9:
167 DEBUG ((DEBUG_INFO, "Command Aborted due to Failed Fused Command\n"));
168 break;
169 case 0xA:
170 DEBUG ((DEBUG_INFO, "Command Aborted due to Missing Fused Command\n"));
171 break;
172 case 0xB:
173 DEBUG ((DEBUG_INFO, "Invalid Namespace or Format\n"));
174 break;
175 case 0xC:
176 DEBUG ((DEBUG_INFO, "Command Sequence Error\n"));
177 break;
178 case 0xD:
179 DEBUG ((DEBUG_INFO, "Invalid SGL Last Segment Descriptor\n"));
180 break;
181 case 0xE:
182 DEBUG ((DEBUG_INFO, "Invalid Number of SGL Descriptors\n"));
183 break;
184 case 0xF:
185 DEBUG ((DEBUG_INFO, "Data SGL Length Invalid\n"));
186 break;
187 case 0x10:
188 DEBUG ((DEBUG_INFO, "Metadata SGL Length Invalid\n"));
189 break;
190 case 0x11:
191 DEBUG ((DEBUG_INFO, "SGL Descriptor Type Invalid\n"));
192 break;
193 case 0x80:
194 DEBUG ((DEBUG_INFO, "LBA Out of Range\n"));
195 break;
196 case 0x81:
197 DEBUG ((DEBUG_INFO, "Capacity Exceeded\n"));
198 break;
199 case 0x82:
200 DEBUG ((DEBUG_INFO, "Namespace Not Ready\n"));
201 break;
202 case 0x83:
203 DEBUG ((DEBUG_INFO, "Reservation Conflict\n"));
204 break;
205 }
206
207 break;
208
209 case 0x1:
210 switch (Cq->Sc) {
211 case 0x0:
212 DEBUG ((DEBUG_INFO, "Completion Queue Invalid\n"));
213 break;
214 case 0x1:
215 DEBUG ((DEBUG_INFO, "Invalid Queue Identifier\n"));
216 break;
217 case 0x2:
218 DEBUG ((DEBUG_INFO, "Maximum Queue Size Exceeded\n"));
219 break;
220 case 0x3:
221 DEBUG ((DEBUG_INFO, "Abort Command Limit Exceeded\n"));
222 break;
223 case 0x5:
224 DEBUG ((DEBUG_INFO, "Asynchronous Event Request Limit Exceeded\n"));
225 break;
226 case 0x6:
227 DEBUG ((DEBUG_INFO, "Invalid Firmware Slot\n"));
228 break;
229 case 0x7:
230 DEBUG ((DEBUG_INFO, "Invalid Firmware Image\n"));
231 break;
232 case 0x8:
233 DEBUG ((DEBUG_INFO, "Invalid Interrupt Vector\n"));
234 break;
235 case 0x9:
236 DEBUG ((DEBUG_INFO, "Invalid Log Page\n"));
237 break;
238 case 0xA:
239 DEBUG ((DEBUG_INFO, "Invalid Format\n"));
240 break;
241 case 0xB:
242 DEBUG ((DEBUG_INFO, "Firmware Application Requires Conventional Reset\n"));
243 break;
244 case 0xC:
245 DEBUG ((DEBUG_INFO, "Invalid Queue Deletion\n"));
246 break;
247 case 0xD:
248 DEBUG ((DEBUG_INFO, "Feature Identifier Not Saveable\n"));
249 break;
250 case 0xE:
251 DEBUG ((DEBUG_INFO, "Feature Not Changeable\n"));
252 break;
253 case 0xF:
254 DEBUG ((DEBUG_INFO, "Feature Not Namespace Specific\n"));
255 break;
256 case 0x10:
257 DEBUG ((DEBUG_INFO, "Firmware Application Requires NVM Subsystem Reset\n"));
258 break;
259 case 0x80:
260 DEBUG ((DEBUG_INFO, "Conflicting Attributes\n"));
261 break;
262 case 0x81:
263 DEBUG ((DEBUG_INFO, "Invalid Protection Information\n"));
264 break;
265 case 0x82:
266 DEBUG ((DEBUG_INFO, "Attempted Write to Read Only Range\n"));
267 break;
268 }
269
270 break;
271
272 case 0x2:
273 switch (Cq->Sc) {
274 case 0x80:
275 DEBUG ((DEBUG_INFO, "Write Fault\n"));
276 break;
277 case 0x81:
278 DEBUG ((DEBUG_INFO, "Unrecovered Read Error\n"));
279 break;
280 case 0x82:
281 DEBUG ((DEBUG_INFO, "End-to-end Guard Check Error\n"));
282 break;
283 case 0x83:
284 DEBUG ((DEBUG_INFO, "End-to-end Application Tag Check Error\n"));
285 break;
286 case 0x84:
287 DEBUG ((DEBUG_INFO, "End-to-end Reference Tag Check Error\n"));
288 break;
289 case 0x85:
290 DEBUG ((DEBUG_INFO, "Compare Failure\n"));
291 break;
292 case 0x86:
293 DEBUG ((DEBUG_INFO, "Access Denied\n"));
294 break;
295 }
296
297 break;
298
299 default:
300 DEBUG ((DEBUG_INFO, "Unknown error\n"));
301 break;
302 }
303
304 return EFI_DEVICE_ERROR;
305}
306
341 IN UINT32 NamespaceId,
343 )
344{
345 EFI_STATUS Status;
346 NVME_SQ *Sq;
347 volatile NVME_CQ *Cq;
348 UINT8 QueueId;
349 UINTN SqSize;
350 UINTN CqSize;
352 UINTN MapLength;
353 EFI_PHYSICAL_ADDRESS PhyAddr;
354 VOID *MapData;
355 VOID *MapMeta;
356 UINT32 Bytes;
357 UINT32 Offset;
358 UINT32 Data32;
359 UINT64 Timer;
360
361 //
362 // Check the data fields in Packet parameter
363 //
364 if (Packet == NULL) {
365 DEBUG ((
366 DEBUG_ERROR,
367 "%a, Invalid parameter: Packet(%lx)\n",
368 __func__,
369 (UINTN)Packet
370 ));
371 return EFI_INVALID_PARAMETER;
372 }
373
374 if ((Packet->NvmeCmd == NULL) || (Packet->NvmeCompletion == NULL)) {
375 DEBUG ((
376 DEBUG_ERROR,
377 "%a, Invalid parameter: NvmeCmd (%lx)/NvmeCompletion(%lx)\n",
378 __func__,
379 (UINTN)Packet->NvmeCmd,
380 (UINTN)Packet->NvmeCompletion
381 ));
382 return EFI_INVALID_PARAMETER;
383 }
384
385 if ((Packet->QueueType != NVME_ADMIN_QUEUE) && (Packet->QueueType != NVME_IO_QUEUE)) {
386 DEBUG ((
387 DEBUG_ERROR,
388 "%a, Invalid parameter: QueueId(%lx)\n",
389 __func__,
390 (UINTN)Packet->QueueType
391 ));
392 return EFI_INVALID_PARAMETER;
393 }
394
395 QueueId = Packet->QueueType;
396 Sq = Private->SqBuffer[QueueId] + Private->SqTdbl[QueueId].Sqt;
397 Cq = Private->CqBuffer[QueueId] + Private->CqHdbl[QueueId].Cqh;
398 if (QueueId == NVME_ADMIN_QUEUE) {
399 SqSize = NVME_ASQ_SIZE + 1;
400 CqSize = NVME_ACQ_SIZE + 1;
401 } else {
402 SqSize = NVME_CSQ_SIZE + 1;
403 CqSize = NVME_CCQ_SIZE + 1;
404 }
405
406 if (Packet->NvmeCmd->Nsid != NamespaceId) {
407 DEBUG ((
408 DEBUG_ERROR,
409 "%a: Nsid mismatch (%x, %x)\n",
410 __func__,
411 Packet->NvmeCmd->Nsid,
412 NamespaceId
413 ));
414 return EFI_INVALID_PARAMETER;
415 }
416
417 ZeroMem (Sq, sizeof (NVME_SQ));
418 Sq->Opc = (UINT8)Packet->NvmeCmd->Cdw0.Opcode;
419 Sq->Fuse = (UINT8)Packet->NvmeCmd->Cdw0.FusedOperation;
420 Sq->Cid = Private->Cid[QueueId]++;
421 Sq->Nsid = Packet->NvmeCmd->Nsid;
422
423 //
424 // Currently we only support PRP for data transfer, SGL is NOT supported
425 //
426 ASSERT (Sq->Psdt == 0);
427 if (Sq->Psdt != 0) {
428 DEBUG ((DEBUG_ERROR, "%a: Does not support SGL mechanism.\n", __func__));
429 return EFI_UNSUPPORTED;
430 }
431
432 Sq->Prp[0] = (UINT64)(UINTN)Packet->TransferBuffer;
433 Sq->Prp[1] = 0;
434 MapData = NULL;
435 MapMeta = NULL;
436 Status = EFI_SUCCESS;
437 //
438 // If the NVMe cmd has data in or out, then mapping the user buffer to the PCI controller
439 // specific addresses.
440 //
441 if ((Sq->Opc & (BIT0 | BIT1)) != 0) {
442 if (((Packet->TransferLength != 0) && (Packet->TransferBuffer == NULL)) ||
443 ((Packet->TransferLength == 0) && (Packet->TransferBuffer != NULL)))
444 {
445 return EFI_INVALID_PARAMETER;
446 }
447
448 //
449 // Currently, we only support creating IO submission/completion queues that are
450 // allocated internally by the driver.
451 //
452 if ((Packet->QueueType == NVME_ADMIN_QUEUE) &&
453 ((Sq->Opc == NVME_ADMIN_CRIOCQ_CMD) || (Sq->Opc == NVME_ADMIN_CRIOSQ_CMD)))
454 {
455 if ((Packet->TransferBuffer != Private->SqBuffer[NVME_IO_QUEUE]) &&
456 (Packet->TransferBuffer != Private->CqBuffer[NVME_IO_QUEUE]))
457 {
458 DEBUG ((
459 DEBUG_ERROR,
460 "%a: Does not support external IO queues creation request.\n",
461 __func__
462 ));
463 return EFI_UNSUPPORTED;
464 }
465 } else {
466 if ((Sq->Opc & BIT0) != 0) {
468 } else {
470 }
471
472 if ((Packet->TransferLength != 0) && (Packet->TransferBuffer != NULL)) {
473 MapLength = Packet->TransferLength;
474 Status = IoMmuMap (
475 MapOp,
476 Packet->TransferBuffer,
477 &MapLength,
478 &PhyAddr,
479 &MapData
480 );
481 if (EFI_ERROR (Status) || (MapLength != Packet->TransferLength)) {
482 Status = EFI_OUT_OF_RESOURCES;
483 DEBUG ((DEBUG_ERROR, "%a: Fail to map data buffer.\n", __func__));
484 goto Exit;
485 }
486
487 Sq->Prp[0] = PhyAddr;
488 }
489
490 if ((Packet->MetadataLength != 0) && (Packet->MetadataBuffer != NULL)) {
491 MapLength = Packet->MetadataLength;
492 Status = IoMmuMap (
493 MapOp,
494 Packet->MetadataBuffer,
495 &MapLength,
496 &PhyAddr,
497 &MapMeta
498 );
499 if (EFI_ERROR (Status) || (MapLength != Packet->MetadataLength)) {
500 Status = EFI_OUT_OF_RESOURCES;
501 DEBUG ((DEBUG_ERROR, "%a: Fail to map meta data buffer.\n", __func__));
502 goto Exit;
503 }
504
505 Sq->Mptr = PhyAddr;
506 }
507 }
508 }
509
510 //
511 // If the Buffer Size spans more than two memory pages (page Size as defined in CC.Mps),
512 // then build a PRP list in the second PRP submission queue entry.
513 //
514 Offset = ((UINT32)Sq->Prp[0]) & (EFI_PAGE_SIZE - 1);
515 Bytes = Packet->TransferLength;
516
517 if ((Offset + Bytes) > (EFI_PAGE_SIZE * 2)) {
518 //
519 // Create PrpList for remaining Data Buffer.
520 //
521 PhyAddr = (Sq->Prp[0] + EFI_PAGE_SIZE) & ~(EFI_PAGE_SIZE - 1);
522 Sq->Prp[1] = NvmeCreatePrpList (
523 Private,
524 PhyAddr,
525 EFI_SIZE_TO_PAGES (Offset + Bytes) - 1
526 );
527 if (Sq->Prp[1] == 0) {
528 Status = EFI_OUT_OF_RESOURCES;
529 DEBUG ((DEBUG_ERROR, "%a: Create PRP list fail, Status - %r\n", __func__, Status));
530 goto Exit;
531 }
532 } else if ((Offset + Bytes) > EFI_PAGE_SIZE) {
533 Sq->Prp[1] = (Sq->Prp[0] + EFI_PAGE_SIZE) & ~(EFI_PAGE_SIZE - 1);
534 }
535
536 if (Packet->NvmeCmd->Flags & CDW10_VALID) {
537 Sq->Payload.Raw.Cdw10 = Packet->NvmeCmd->Cdw10;
538 }
539
540 if (Packet->NvmeCmd->Flags & CDW11_VALID) {
541 Sq->Payload.Raw.Cdw11 = Packet->NvmeCmd->Cdw11;
542 }
543
544 if (Packet->NvmeCmd->Flags & CDW12_VALID) {
545 Sq->Payload.Raw.Cdw12 = Packet->NvmeCmd->Cdw12;
546 }
547
548 if (Packet->NvmeCmd->Flags & CDW13_VALID) {
549 Sq->Payload.Raw.Cdw13 = Packet->NvmeCmd->Cdw13;
550 }
551
552 if (Packet->NvmeCmd->Flags & CDW14_VALID) {
553 Sq->Payload.Raw.Cdw14 = Packet->NvmeCmd->Cdw14;
554 }
555
556 if (Packet->NvmeCmd->Flags & CDW15_VALID) {
557 Sq->Payload.Raw.Cdw15 = Packet->NvmeCmd->Cdw15;
558 }
559
560 //
561 // Ring the submission queue doorbell.
562 //
563 Private->SqTdbl[QueueId].Sqt++;
564 if (Private->SqTdbl[QueueId].Sqt == SqSize) {
565 Private->SqTdbl[QueueId].Sqt = 0;
566 }
567
568 Data32 = ReadUnaligned32 ((UINT32 *)&Private->SqTdbl[QueueId]);
569 Status = NVME_SET_SQTDBL (Private, QueueId, &Data32);
570 if (EFI_ERROR (Status)) {
571 DEBUG ((DEBUG_ERROR, "%a: NVME_SET_SQTDBL fail, Status - %r\n", __func__, Status));
572 goto Exit;
573 }
574
575 //
576 // Wait for completion queue to get filled in.
577 //
578 Status = EFI_TIMEOUT;
579 Timer = 0;
580 while (Timer < Packet->CommandTimeout) {
581 if (Cq->Pt != Private->Pt[QueueId]) {
582 Status = EFI_SUCCESS;
583 break;
584 }
585
586 MicroSecondDelay (NVME_POLL_INTERVAL);
587 Timer += NVME_POLL_INTERVAL;
588 }
589
590 if (Status == EFI_TIMEOUT) {
591 //
592 // Timeout occurs for an NVMe command, reset the controller to abort the outstanding command
593 //
594 DEBUG ((DEBUG_ERROR, "%a: Timeout occurs for the PassThru command.\n", __func__));
595 Status = NvmeControllerInit (Private);
596 if (EFI_ERROR (Status)) {
597 Status = EFI_DEVICE_ERROR;
598 } else {
599 //
600 // Return EFI_TIMEOUT to indicate a timeout occurs for PassThru command
601 //
602 Status = EFI_TIMEOUT;
603 }
604
605 goto Exit;
606 }
607
608 //
609 // Move forward the Completion Queue head
610 //
611 Private->CqHdbl[QueueId].Cqh++;
612 if (Private->CqHdbl[QueueId].Cqh == CqSize) {
613 Private->CqHdbl[QueueId].Cqh = 0;
614 Private->Pt[QueueId] ^= 1;
615 }
616
617 //
618 // Copy the Respose Queue entry for this command to the callers response buffer
619 //
620 CopyMem (Packet->NvmeCompletion, (VOID *)Cq, sizeof (EFI_NVM_EXPRESS_COMPLETION));
621
622 //
623 // Check the NVMe cmd execution result
624 //
625 Status = NvmeCheckCqStatus (Cq);
626 NVME_SET_CQHDBL (Private, QueueId, &Private->CqHdbl[QueueId]);
627
628Exit:
629 if (MapMeta != NULL) {
630 IoMmuUnmap (MapMeta);
631 }
632
633 if (MapData != NULL) {
634 IoMmuUnmap (MapData);
635 }
636
637 return Status;
638}
639
658EFIAPI
661 OUT UINTN *DevicePathLength,
662 OUT EFI_DEVICE_PATH_PROTOCOL **DevicePath
663 )
664{
666
667 if ((This == NULL) || (DevicePathLength == NULL) || (DevicePath == NULL)) {
668 return EFI_INVALID_PARAMETER;
669 }
670
671 Private = GET_NVME_PEIM_HC_PRIVATE_DATA_FROM_THIS_NVME_PASSTHRU (This);
672
673 *DevicePathLength = Private->DevicePathLength;
674 *DevicePath = AllocateCopyPool (Private->DevicePathLength, Private->DevicePath);
675 if (*DevicePath == NULL) {
676 *DevicePathLength = 0;
677 return EFI_OUT_OF_RESOURCES;
678 }
679
680 return EFI_SUCCESS;
681}
682
720EFIAPI
723 IN OUT UINT32 *NamespaceId
724 )
725{
727 UINT32 DeviceIndex;
728 EFI_STATUS Status;
729
730 if ((This == NULL) || (NamespaceId == NULL)) {
731 return EFI_INVALID_PARAMETER;
732 }
733
734 Private = GET_NVME_PEIM_HC_PRIVATE_DATA_FROM_THIS_NVME_PASSTHRU (This);
735
736 Status = EFI_NOT_FOUND;
737
738 //
739 // If active namespace number is 0, then valid namespace ID is unavailable
740 //
741 if (Private->ActiveNamespaceNum == 0) {
742 return EFI_NOT_FOUND;
743 }
744
745 //
746 // If the NamespaceId input value is 0xFFFFFFFF, then get the first valid namespace ID
747 //
748 if (*NamespaceId == 0xFFFFFFFF) {
749 //
750 // Start with the first namespace ID
751 //
752 *NamespaceId = Private->NamespaceInfo[0].NamespaceId;
753 Status = EFI_SUCCESS;
754 } else {
755 if (*NamespaceId > Private->ControllerData->Nn) {
756 return EFI_INVALID_PARAMETER;
757 }
758
759 if ((*NamespaceId + 1) > Private->ControllerData->Nn) {
760 return EFI_NOT_FOUND;
761 }
762
763 for (DeviceIndex = 0; DeviceIndex < Private->ActiveNamespaceNum; DeviceIndex++) {
764 if (*NamespaceId == Private->NamespaceInfo[DeviceIndex].NamespaceId) {
765 if ((DeviceIndex + 1) < Private->ActiveNamespaceNum) {
766 *NamespaceId = Private->NamespaceInfo[DeviceIndex + 1].NamespaceId;
767 Status = EFI_SUCCESS;
768 }
769
770 break;
771 }
772 }
773 }
774
775 return Status;
776}
777
810EFIAPI
813 IN UINT32 NamespaceId,
815 )
816{
818 EFI_STATUS Status;
819
820 if ((This == NULL) || (Packet == NULL)) {
821 return EFI_INVALID_PARAMETER;
822 }
823
824 Private = GET_NVME_PEIM_HC_PRIVATE_DATA_FROM_THIS_NVME_PASSTHRU (This);
825 //
826 // Check NamespaceId is valid or not.
827 //
828 if ((NamespaceId > Private->ControllerData->Nn) &&
829 (NamespaceId != (UINT32)-1))
830 {
831 return EFI_INVALID_PARAMETER;
832 }
833
834 Status = NvmePassThruExecute (
835 Private,
836 NamespaceId,
837 Packet
838 );
839
840 return Status;
841}
UINT64 UINTN
EFI_STATUS IoMmuUnmap(IN VOID *Mapping)
Definition: DmaMem.c:132
EFI_STATUS IoMmuMap(IN EDKII_IOMMU_OPERATION Operation, IN VOID *HostAddress, IN OUT UINTN *NumberOfBytes, OUT EFI_PHYSICAL_ADDRESS *DeviceAddress, OUT VOID **Mapping)
Definition: DmaMem.c:60
UINTN EFIAPI MicroSecondDelay(IN UINTN MicroSeconds)
UINT64 EFIAPI DivU64x64Remainder(IN UINT64 Dividend, IN UINT64 Divisor, OUT UINT64 *Remainder OPTIONAL)
UINT32 EFIAPI ReadUnaligned32(IN CONST UINT32 *Buffer)
Definition: Unaligned.c:145
VOID *EFIAPI CopyMem(OUT VOID *DestinationBuffer, IN CONST VOID *SourceBuffer, IN UINTN Length)
VOID *EFIAPI ZeroMem(OUT VOID *Buffer, IN UINTN Length)
VOID *EFIAPI AllocateCopyPool(IN UINTN AllocationSize, IN CONST VOID *Buffer)
#define NULL
Definition: Base.h:319
#define IN
Definition: Base.h:279
#define OUT
Definition: Base.h:284
#define DEBUG(Expression)
Definition: DebugLib.h:434
EFI_STATUS NvmeControllerInit(IN NVME_CONTROLLER_PRIVATE_DATA *Private)
EFI_STATUS EFIAPI NvmePassThruGetDevicePath(IN EDKII_PEI_NVM_EXPRESS_PASS_THRU_PPI *This, OUT UINTN *DevicePathLength, OUT EFI_DEVICE_PATH_PROTOCOL **DevicePath)
EFI_STATUS EFIAPI NvmePassThruGetNextNameSpace(IN EDKII_PEI_NVM_EXPRESS_PASS_THRU_PPI *This, IN OUT UINT32 *NamespaceId)
EFI_STATUS NvmePassThruExecute(IN PEI_NVME_CONTROLLER_PRIVATE_DATA *Private, IN UINT32 NamespaceId, IN OUT EFI_NVM_EXPRESS_PASS_THRU_COMMAND_PACKET *Packet)
EFI_STATUS NvmeCheckCqStatus(IN volatile NVME_CQ *Cq)
UINT64 NvmeCreatePrpList(IN PEI_NVME_CONTROLLER_PRIVATE_DATA *Private, IN EFI_PHYSICAL_ADDRESS PhysicalAddr, IN UINTN Pages)
EFI_STATUS EFIAPI NvmePassThru(IN EDKII_PEI_NVM_EXPRESS_PASS_THRU_PPI *This, IN UINT32 NamespaceId, IN OUT EFI_NVM_EXPRESS_PASS_THRU_COMMAND_PACKET *Packet)
EDKII_IOMMU_OPERATION
Definition: IoMmu.h:44
@ EdkiiIoMmuOperationBusMasterWrite
Definition: IoMmu.h:54
@ EdkiiIoMmuOperationBusMasterRead
Definition: IoMmu.h:49
VOID EFIAPI Exit(IN EFI_STATUS Status)
UINT64 EFI_PHYSICAL_ADDRESS
Definition: UefiBaseType.h:50
#define EFI_PAGES_TO_SIZE(Pages)
Definition: UefiBaseType.h:213
RETURN_STATUS EFI_STATUS
Definition: UefiBaseType.h:29
#define EFI_SIZE_TO_PAGES(Size)
Definition: UefiBaseType.h:200
#define EFI_SUCCESS
Definition: UefiBaseType.h:112
Definition: Nvme.h:901
Definition: Nvme.h:865