TianoCore EDK2 master
Loading...
Searching...
No Matches
Commands.c
Go to the documentation of this file.
1
12#include <Library/VirtioLib.h>
13
14#include "VirtioGpu.h"
15
34 IN OUT VGPU_DEV *VgpuDev
35 )
36{
37 UINT8 NextDevStat;
38 EFI_STATUS Status;
39 UINT64 Features;
40 UINT16 QueueSize;
41 UINT64 RingBaseShift;
42
43 //
44 // Execute virtio-v1.0-cs04, 3.1.1 Driver Requirements: Device
45 // Initialization.
46 //
47 // 1. Reset the device.
48 //
49 NextDevStat = 0;
50 Status = VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, NextDevStat);
51 if (EFI_ERROR (Status)) {
52 goto Failed;
53 }
54
55 //
56 // 2. Set the ACKNOWLEDGE status bit [...]
57 //
58 NextDevStat |= VSTAT_ACK;
59 Status = VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, NextDevStat);
60 if (EFI_ERROR (Status)) {
61 goto Failed;
62 }
63
64 //
65 // 3. Set the DRIVER status bit [...]
66 //
67 NextDevStat |= VSTAT_DRIVER;
68 Status = VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, NextDevStat);
69 if (EFI_ERROR (Status)) {
70 goto Failed;
71 }
72
73 //
74 // 4. Read device feature bits...
75 //
76 Status = VgpuDev->VirtIo->GetDeviceFeatures (VgpuDev->VirtIo, &Features);
77 if (EFI_ERROR (Status)) {
78 goto Failed;
79 }
80
81 if ((Features & VIRTIO_F_VERSION_1) == 0) {
82 Status = EFI_UNSUPPORTED;
83 goto Failed;
84 }
85
86 //
87 // We only want the most basic 2D features.
88 //
89 Features &= VIRTIO_F_VERSION_1 | VIRTIO_F_IOMMU_PLATFORM;
90
91 //
92 // ... and write the subset of feature bits understood by the [...] driver to
93 // the device. [...]
94 // 5. Set the FEATURES_OK status bit.
95 // 6. Re-read device status to ensure the FEATURES_OK bit is still set [...]
96 //
97 Status = Virtio10WriteFeatures (VgpuDev->VirtIo, Features, &NextDevStat);
98 if (EFI_ERROR (Status)) {
99 goto Failed;
100 }
101
102 //
103 // 7. Perform device-specific setup, including discovery of virtqueues for
104 // the device [...]
105 //
106 Status = VgpuDev->VirtIo->SetQueueSel (
107 VgpuDev->VirtIo,
108 VIRTIO_GPU_CONTROL_QUEUE
109 );
110 if (EFI_ERROR (Status)) {
111 goto Failed;
112 }
113
114 Status = VgpuDev->VirtIo->GetQueueNumMax (VgpuDev->VirtIo, &QueueSize);
115 if (EFI_ERROR (Status)) {
116 goto Failed;
117 }
118
119 //
120 // We implement each VirtIo GPU command that we use with two descriptors:
121 // request, response.
122 //
123 if (QueueSize < 2) {
124 Status = EFI_UNSUPPORTED;
125 goto Failed;
126 }
127
128 //
129 // [...] population of virtqueues [...]
130 //
131 Status = VirtioRingInit (VgpuDev->VirtIo, QueueSize, &VgpuDev->Ring);
132 if (EFI_ERROR (Status)) {
133 goto Failed;
134 }
135
136 //
137 // If anything fails from here on, we have to release the ring.
138 //
139 Status = VirtioRingMap (
140 VgpuDev->VirtIo,
141 &VgpuDev->Ring,
142 &RingBaseShift,
143 &VgpuDev->RingMap
144 );
145 if (EFI_ERROR (Status)) {
146 goto ReleaseQueue;
147 }
148
149 //
150 // If anything fails from here on, we have to unmap the ring.
151 //
152 Status = VgpuDev->VirtIo->SetQueueAddress (
153 VgpuDev->VirtIo,
154 &VgpuDev->Ring,
155 RingBaseShift
156 );
157 if (EFI_ERROR (Status)) {
158 goto UnmapQueue;
159 }
160
161 //
162 // 8. Set the DRIVER_OK status bit.
163 //
164 NextDevStat |= VSTAT_DRIVER_OK;
165 Status = VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, NextDevStat);
166 if (EFI_ERROR (Status)) {
167 goto UnmapQueue;
168 }
169
170 return EFI_SUCCESS;
171
172UnmapQueue:
173 VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, VgpuDev->RingMap);
174
175ReleaseQueue:
176 VirtioRingUninit (VgpuDev->VirtIo, &VgpuDev->Ring);
177
178Failed:
179 //
180 // If any of these steps go irrecoverably wrong, the driver SHOULD set the
181 // FAILED status bit to indicate that it has given up on the device (it can
182 // reset the device later to restart if desired). [...]
183 //
184 // VirtIo access failure here should not mask the original error.
185 //
186 NextDevStat |= VSTAT_FAILED;
187 VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, NextDevStat);
188
189 return Status;
190}
191
201VOID
203 IN OUT VGPU_DEV *VgpuDev
204 )
205{
206 //
207 // Resetting the VirtIo device makes it release its resources and forget its
208 // configuration.
209 //
210 VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, 0);
211 VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, VgpuDev->RingMap);
212 VirtioRingUninit (VgpuDev->VirtIo, &VgpuDev->Ring);
213}
214
245 IN VGPU_DEV *VgpuDev,
246 IN UINTN NumberOfPages,
247 OUT VOID **HostAddress,
248 OUT EFI_PHYSICAL_ADDRESS *DeviceAddress,
249 OUT VOID **Mapping
250 )
251{
252 EFI_STATUS Status;
253 VOID *NewHostAddress;
254
255 Status = VgpuDev->VirtIo->AllocateSharedPages (
256 VgpuDev->VirtIo,
257 NumberOfPages,
258 &NewHostAddress
259 );
260 if (EFI_ERROR (Status)) {
261 return Status;
262 }
263
264 //
265 // Avoid exposing stale data to the device even temporarily: zero the area
266 // before mapping it.
267 //
268 ZeroMem (NewHostAddress, EFI_PAGES_TO_SIZE (NumberOfPages));
269
271 VgpuDev->VirtIo, // VirtIo
272 VirtioOperationBusMasterCommonBuffer, // Operation
273 NewHostAddress, // HostAddress
274 EFI_PAGES_TO_SIZE (NumberOfPages), // NumberOfBytes
275 DeviceAddress, // DeviceAddress
276 Mapping // Mapping
277 );
278 if (EFI_ERROR (Status)) {
279 goto FreeSharedPages;
280 }
281
282 *HostAddress = NewHostAddress;
283 return EFI_SUCCESS;
284
285FreeSharedPages:
286 VgpuDev->VirtIo->FreeSharedPages (
287 VgpuDev->VirtIo,
288 NumberOfPages,
289 NewHostAddress
290 );
291 return Status;
292}
293
316VOID
318 IN VGPU_DEV *VgpuDev,
319 IN UINTN NumberOfPages,
320 IN VOID *HostAddress,
321 IN VOID *Mapping
322 )
323{
324 VgpuDev->VirtIo->UnmapSharedBuffer (
325 VgpuDev->VirtIo,
326 Mapping
327 );
328 VgpuDev->VirtIo->FreeSharedPages (
329 VgpuDev->VirtIo,
330 NumberOfPages,
331 HostAddress
332 );
333}
334
348VOID
349EFIAPI
351 IN EFI_EVENT Event,
352 IN VOID *Context
353 )
354{
355 VGPU_DEV *VgpuDev;
356
357 DEBUG ((DEBUG_VERBOSE, "%a: Context=0x%p\n", __func__, Context));
358 VgpuDev = Context;
359 VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, 0);
360}
361
413STATIC
416 IN OUT VGPU_DEV *VgpuDev,
417 IN VIRTIO_GPU_CONTROL_TYPE RequestType,
418 IN BOOLEAN Fence,
419 IN OUT volatile VIRTIO_GPU_CONTROL_HEADER *Header,
420 IN UINTN RequestSize,
421 IN VIRTIO_GPU_CONTROL_TYPE ResponseType,
422 IN OUT volatile VIRTIO_GPU_CONTROL_HEADER *Response,
423 IN UINTN ResponseSize
424 )
425{
426 DESC_INDICES Indices;
427 EFI_STATUS Status;
428 UINT32 ResponseSizeRet;
429 EFI_PHYSICAL_ADDRESS RequestDeviceAddress;
430 VOID *RequestMap;
431 EFI_PHYSICAL_ADDRESS ResponseDeviceAddress;
432 VOID *ResponseMap;
433
434 //
435 // Initialize Header.
436 //
437 Header->Type = RequestType;
438 if (Fence) {
439 Header->Flags = VIRTIO_GPU_FLAG_FENCE;
440 Header->FenceId = VgpuDev->FenceId++;
441 } else {
442 Header->Flags = 0;
443 Header->FenceId = 0;
444 }
445
446 Header->CtxId = 0;
447 Header->Padding = 0;
448
449 ASSERT (RequestSize >= sizeof *Header);
450 ASSERT (RequestSize <= MAX_UINT32);
451
452 //
453 // Map request and response to bus master device addresses.
454 //
456 VgpuDev->VirtIo,
457 VirtioOperationBusMasterRead,
458 (VOID *)Header,
459 RequestSize,
460 &RequestDeviceAddress,
461 &RequestMap
462 );
463 if (EFI_ERROR (Status)) {
464 return Status;
465 }
466
468 VgpuDev->VirtIo,
469 VirtioOperationBusMasterWrite,
470 (VOID *)Response,
471 ResponseSize,
472 &ResponseDeviceAddress,
473 &ResponseMap
474 );
475 if (EFI_ERROR (Status)) {
476 goto UnmapRequest;
477 }
478
479 //
480 // Compose the descriptor chain.
481 //
482 VirtioPrepare (&VgpuDev->Ring, &Indices);
484 &VgpuDev->Ring,
485 RequestDeviceAddress,
486 (UINT32)RequestSize,
487 VRING_DESC_F_NEXT,
488 &Indices
489 );
491 &VgpuDev->Ring,
492 ResponseDeviceAddress,
493 (UINT32)ResponseSize,
494 VRING_DESC_F_WRITE,
495 &Indices
496 );
497
498 //
499 // Send the command.
500 //
501 Status = VirtioFlush (
502 VgpuDev->VirtIo,
503 VIRTIO_GPU_CONTROL_QUEUE,
504 &VgpuDev->Ring,
505 &Indices,
506 &ResponseSizeRet
507 );
508 if (EFI_ERROR (Status)) {
509 goto UnmapResponse;
510 }
511
512 //
513 // Verify response size.
514 //
515 if (ResponseSize != ResponseSizeRet) {
516 DEBUG ((
517 DEBUG_ERROR,
518 "%a: malformed response to Request=0x%x\n",
519 __func__,
520 (UINT32)RequestType
521 ));
522 Status = EFI_PROTOCOL_ERROR;
523 goto UnmapResponse;
524 }
525
526 //
527 // Unmap response and request, in reverse order of mapping. On error, the
528 // respective mapping is invalidated anyway, only the data may not have been
529 // committed to system memory (in case of VirtioOperationBusMasterWrite).
530 //
531 Status = VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, ResponseMap);
532 if (EFI_ERROR (Status)) {
533 goto UnmapRequest;
534 }
535
536 Status = VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, RequestMap);
537 if (EFI_ERROR (Status)) {
538 return Status;
539 }
540
541 //
542 // Parse the response.
543 //
544 if (Response->Type == (UINT32)ResponseType) {
545 return EFI_SUCCESS;
546 }
547
548 DEBUG ((
549 DEBUG_ERROR,
550 "%a: Request=0x%x Response=0x%x (expected 0x%x)\n",
551 __func__,
552 (UINT32)RequestType,
553 Response->Type,
554 ResponseType
555 ));
556 return EFI_DEVICE_ERROR;
557
558UnmapResponse:
559 VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, ResponseMap);
560
561UnmapRequest:
562 VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, RequestMap);
563
564 return Status;
565}
566
571STATIC
574 IN OUT VGPU_DEV *VgpuDev,
575 IN VIRTIO_GPU_CONTROL_TYPE RequestType,
576 IN BOOLEAN Fence,
577 IN OUT volatile VIRTIO_GPU_CONTROL_HEADER *Header,
578 IN UINTN RequestSize
579 )
580{
581 volatile VIRTIO_GPU_CONTROL_HEADER Response;
582
584 VgpuDev,
585 RequestType,
586 Fence,
587 Header,
588 RequestSize,
589 VirtioGpuRespOkNodata,
590 &Response,
591 sizeof (Response)
592 );
593}
594
623 IN OUT VGPU_DEV *VgpuDev,
624 IN UINT32 ResourceId,
625 IN VIRTIO_GPU_FORMATS Format,
626 IN UINT32 Width,
627 IN UINT32 Height
628 )
629{
630 volatile VIRTIO_GPU_RESOURCE_CREATE_2D Request;
631
632 if (ResourceId == 0) {
633 return EFI_INVALID_PARAMETER;
634 }
635
636 Request.ResourceId = ResourceId;
637 Request.Format = (UINT32)Format;
638 Request.Width = Width;
639 Request.Height = Height;
640
641 return VirtioGpuSendCommand (
642 VgpuDev,
643 VirtioGpuCmdResourceCreate2d,
644 FALSE, // Fence
645 &Request.Header,
646 sizeof Request
647 );
648}
649
651VirtioGpuResourceUnref (
652 IN OUT VGPU_DEV *VgpuDev,
653 IN UINT32 ResourceId
654 )
655{
656 volatile VIRTIO_GPU_RESOURCE_UNREF Request;
657
658 if (ResourceId == 0) {
659 return EFI_INVALID_PARAMETER;
660 }
661
662 Request.ResourceId = ResourceId;
663 Request.Padding = 0;
664
665 return VirtioGpuSendCommand (
666 VgpuDev,
667 VirtioGpuCmdResourceUnref,
668 FALSE, // Fence
669 &Request.Header,
670 sizeof Request
671 );
672}
673
675VirtioGpuResourceAttachBacking (
676 IN OUT VGPU_DEV *VgpuDev,
677 IN UINT32 ResourceId,
678 IN EFI_PHYSICAL_ADDRESS BackingStoreDeviceAddress,
679 IN UINTN NumberOfPages
680 )
681{
682 volatile VIRTIO_GPU_RESOURCE_ATTACH_BACKING Request;
683
684 if (ResourceId == 0) {
685 return EFI_INVALID_PARAMETER;
686 }
687
688 Request.ResourceId = ResourceId;
689 Request.NrEntries = 1;
690 Request.Entry.Addr = BackingStoreDeviceAddress;
691 Request.Entry.Length = (UINT32)EFI_PAGES_TO_SIZE (NumberOfPages);
692 Request.Entry.Padding = 0;
693
694 return VirtioGpuSendCommand (
695 VgpuDev,
696 VirtioGpuCmdResourceAttachBacking,
697 FALSE, // Fence
698 &Request.Header,
699 sizeof Request
700 );
701}
702
704VirtioGpuResourceDetachBacking (
705 IN OUT VGPU_DEV *VgpuDev,
706 IN UINT32 ResourceId
707 )
708{
709 volatile VIRTIO_GPU_RESOURCE_DETACH_BACKING Request;
710
711 if (ResourceId == 0) {
712 return EFI_INVALID_PARAMETER;
713 }
714
715 Request.ResourceId = ResourceId;
716 Request.Padding = 0;
717
718 //
719 // In this case, we set Fence to TRUE, because after this function returns,
720 // the caller might reasonably want to repurpose the backing pages
721 // immediately. Thus we should ensure that the host releases all references
722 // to the backing pages before we return.
723 //
724 return VirtioGpuSendCommand (
725 VgpuDev,
726 VirtioGpuCmdResourceDetachBacking,
727 TRUE, // Fence
728 &Request.Header,
729 sizeof Request
730 );
731}
732
734VirtioGpuSetScanout (
735 IN OUT VGPU_DEV *VgpuDev,
736 IN UINT32 X,
737 IN UINT32 Y,
738 IN UINT32 Width,
739 IN UINT32 Height,
740 IN UINT32 ScanoutId,
741 IN UINT32 ResourceId
742 )
743{
744 volatile VIRTIO_GPU_SET_SCANOUT Request;
745
746 //
747 // Unlike for most other commands, ResourceId=0 is valid; it
748 // is used to disable a scanout.
749 //
750 Request.Rectangle.X = X;
751 Request.Rectangle.Y = Y;
752 Request.Rectangle.Width = Width;
753 Request.Rectangle.Height = Height;
754 Request.ScanoutId = ScanoutId;
755 Request.ResourceId = ResourceId;
756
757 return VirtioGpuSendCommand (
758 VgpuDev,
759 VirtioGpuCmdSetScanout,
760 FALSE, // Fence
761 &Request.Header,
762 sizeof Request
763 );
764}
765
767VirtioGpuTransferToHost2d (
768 IN OUT VGPU_DEV *VgpuDev,
769 IN UINT32 X,
770 IN UINT32 Y,
771 IN UINT32 Width,
772 IN UINT32 Height,
773 IN UINT64 Offset,
774 IN UINT32 ResourceId
775 )
776{
777 volatile VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D Request;
778
779 if (ResourceId == 0) {
780 return EFI_INVALID_PARAMETER;
781 }
782
783 Request.Rectangle.X = X;
784 Request.Rectangle.Y = Y;
785 Request.Rectangle.Width = Width;
786 Request.Rectangle.Height = Height;
787 Request.Offset = Offset;
788 Request.ResourceId = ResourceId;
789 Request.Padding = 0;
790
791 return VirtioGpuSendCommand (
792 VgpuDev,
793 VirtioGpuCmdTransferToHost2d,
794 FALSE, // Fence
795 &Request.Header,
796 sizeof Request
797 );
798}
799
801VirtioGpuResourceFlush (
802 IN OUT VGPU_DEV *VgpuDev,
803 IN UINT32 X,
804 IN UINT32 Y,
805 IN UINT32 Width,
806 IN UINT32 Height,
807 IN UINT32 ResourceId
808 )
809{
810 volatile VIRTIO_GPU_RESOURCE_FLUSH Request;
811
812 if (ResourceId == 0) {
813 return EFI_INVALID_PARAMETER;
814 }
815
816 Request.Rectangle.X = X;
817 Request.Rectangle.Y = Y;
818 Request.Rectangle.Width = Width;
819 Request.Rectangle.Height = Height;
820 Request.ResourceId = ResourceId;
821 Request.Padding = 0;
822
823 return VirtioGpuSendCommand (
824 VgpuDev,
825 VirtioGpuCmdResourceFlush,
826 FALSE, // Fence
827 &Request.Header,
828 sizeof Request
829 );
830}
831
833VirtioGpuGetDisplayInfo (
834 IN OUT VGPU_DEV *VgpuDev,
835 volatile VIRTIO_GPU_RESP_DISPLAY_INFO *Response
836 )
837{
838 volatile VIRTIO_GPU_CONTROL_HEADER Request;
839
841 VgpuDev,
842 VirtioGpuCmdGetDisplayInfo,
843 FALSE, // Fence
844 &Request,
845 sizeof Request,
846 VirtioGpuRespOkDisplayInfo,
847 &Response->Header,
848 sizeof *Response
849 );
850}
UINT64 UINTN
VOID *EFIAPI ZeroMem(OUT VOID *Buffer, IN UINTN Length)
STATIC EFI_STATUS VirtioGpuSendCommand(IN OUT VGPU_DEV *VgpuDev, IN VIRTIO_GPU_CONTROL_TYPE RequestType, IN BOOLEAN Fence, IN OUT volatile VIRTIO_GPU_CONTROL_HEADER *Header, IN UINTN RequestSize)
Definition: Commands.c:573
VOID VirtioGpuUninit(IN OUT VGPU_DEV *VgpuDev)
Definition: Commands.c:202
VOID EFIAPI VirtioGpuExitBoot(IN EFI_EVENT Event, IN VOID *Context)
Definition: Commands.c:350
VOID VirtioGpuUnmapAndFreeBackingStore(IN VGPU_DEV *VgpuDev, IN UINTN NumberOfPages, IN VOID *HostAddress, IN VOID *Mapping)
Definition: Commands.c:317
STATIC EFI_STATUS VirtioGpuSendCommandWithReply(IN OUT VGPU_DEV *VgpuDev, IN VIRTIO_GPU_CONTROL_TYPE RequestType, IN BOOLEAN Fence, IN OUT volatile VIRTIO_GPU_CONTROL_HEADER *Header, IN UINTN RequestSize, IN VIRTIO_GPU_CONTROL_TYPE ResponseType, IN OUT volatile VIRTIO_GPU_CONTROL_HEADER *Response, IN UINTN ResponseSize)
Definition: Commands.c:415
EFI_STATUS VirtioGpuAllocateZeroAndMapBackingStore(IN VGPU_DEV *VgpuDev, IN UINTN NumberOfPages, OUT VOID **HostAddress, OUT EFI_PHYSICAL_ADDRESS *DeviceAddress, OUT VOID **Mapping)
Definition: Commands.c:244
EFI_STATUS VirtioGpuResourceCreate2d(IN OUT VGPU_DEV *VgpuDev, IN UINT32 ResourceId, IN VIRTIO_GPU_FORMATS Format, IN UINT32 Width, IN UINT32 Height)
Definition: Commands.c:622
EFI_STATUS VirtioGpuInit(IN OUT VGPU_DEV *VgpuDev)
Definition: Commands.c:33
#define STATIC
Definition: Base.h:264
#define TRUE
Definition: Base.h:301
#define FALSE
Definition: Base.h:307
#define IN
Definition: Base.h:279
#define OUT
Definition: Base.h:284
#define DEBUG(Expression)
Definition: DebugLib.h:434
UINT64 EFI_PHYSICAL_ADDRESS
Definition: UefiBaseType.h:50
#define EFI_PAGES_TO_SIZE(Pages)
Definition: UefiBaseType.h:213
RETURN_STATUS EFI_STATUS
Definition: UefiBaseType.h:29
VOID * EFI_EVENT
Definition: UefiBaseType.h:37
#define EFI_SUCCESS
Definition: UefiBaseType.h:112
EFI_STATUS EFIAPI VirtioMapAllBytesInSharedBuffer(IN VIRTIO_DEVICE_PROTOCOL *VirtIo, IN VIRTIO_MAP_OPERATION Operation, IN VOID *HostAddress, IN UINTN NumberOfBytes, OUT EFI_PHYSICAL_ADDRESS *DeviceAddress, OUT VOID **Mapping)
Definition: VirtioLib.c:469
VOID EFIAPI VirtioAppendDesc(IN OUT VRING *Ring, IN UINT64 BufferDeviceAddress, IN UINT32 BufferSize, IN UINT16 Flags, IN OUT DESC_INDICES *Indices)
Definition: VirtioLib.c:228
EFI_STATUS EFIAPI VirtioFlush(IN VIRTIO_DEVICE_PROTOCOL *VirtIo, IN UINT16 VirtQueueId, IN OUT VRING *Ring, IN DESC_INDICES *Indices, OUT UINT32 *UsedLen OPTIONAL)
Definition: VirtioLib.c:274
EFI_STATUS EFIAPI VirtioRingMap(IN VIRTIO_DEVICE_PROTOCOL *VirtIo, IN VRING *Ring, OUT UINT64 *RingBaseShift, OUT VOID **Mapping)
Definition: VirtioLib.c:529
EFI_STATUS EFIAPI Virtio10WriteFeatures(IN VIRTIO_DEVICE_PROTOCOL *VirtIo, IN UINT64 Features, IN OUT UINT8 *DeviceStatus)
Definition: VirtioLib.c:391
VOID EFIAPI VirtioPrepare(IN OUT VRING *Ring, OUT DESC_INDICES *Indices)
Definition: VirtioLib.c:167
EFI_STATUS EFIAPI VirtioRingInit(IN VIRTIO_DEVICE_PROTOCOL *VirtIo, IN UINT16 QueueSize, OUT VRING *Ring)
Definition: VirtioLib.c:49
VOID EFIAPI VirtioRingUninit(IN VIRTIO_DEVICE_PROTOCOL *VirtIo, IN OUT VRING *Ring)
Definition: VirtioLib.c:144