TianoCore EDK2 master
Loading...
Searching...
No Matches
MemoryEncryption.c
Go to the documentation of this file.
1
21#include <Uefi.h>
22#include <Uefi/UefiBaseType.h>
23#include <Library/CpuLib.h>
24#include <Library/BaseLib.h>
25#include <Library/DebugLib.h>
27#include "VirtualMemory.h"
29#include <Library/TdxLib.h>
33
34typedef enum {
35 SetSharedBit,
36 ClearSharedBit
37} TDX_PAGETABLE_MODE;
38
39STATIC PAGE_TABLE_POOL *mPageTablePool = NULL;
40
41#define MAX_RETRIES_PER_PAGE 3
42
51BOOLEAN
52EFIAPI
54 VOID
55 )
56{
57 return CC_GUEST_IS_TDX (PcdGet64 (PcdConfidentialComputingGuestAttr));
58}
59
67UINT64
69 VOID
70 )
71{
72 return TdSharedPageMask ();
73}
74
94BOOLEAN
96 IN UINTN PoolPages
97 )
98{
99 VOID *Buffer;
100
101 //
102 // Always reserve at least PAGE_TABLE_POOL_UNIT_PAGES, including one page for
103 // header.
104 //
105 PoolPages += 1; // Add one page for header.
106 PoolPages = ((PoolPages - 1) / PAGE_TABLE_POOL_UNIT_PAGES + 1) *
107 PAGE_TABLE_POOL_UNIT_PAGES;
108 Buffer = AllocateAlignedPages (PoolPages, PAGE_TABLE_POOL_ALIGNMENT);
109 if (Buffer == NULL) {
110 DEBUG ((DEBUG_ERROR, "ERROR: Out of aligned pages\r\n"));
111 return FALSE;
112 }
113
114 //
115 // Link all pools into a list for easier track later.
116 //
117 if (mPageTablePool == NULL) {
118 mPageTablePool = Buffer;
119 mPageTablePool->NextPool = mPageTablePool;
120 } else {
121 ((PAGE_TABLE_POOL *)Buffer)->NextPool = mPageTablePool->NextPool;
122 mPageTablePool->NextPool = Buffer;
123 mPageTablePool = Buffer;
124 }
125
126 //
127 // Reserve one page for pool header.
128 //
129 mPageTablePool->FreePages = PoolPages - 1;
130 mPageTablePool->Offset = EFI_PAGES_TO_SIZE (1);
131
132 return TRUE;
133}
134
152STATIC
153VOID *
154EFIAPI
156 IN UINTN Pages
157 )
158{
159 VOID *Buffer;
160
161 if (Pages == 0) {
162 return NULL;
163 }
164
165 //
166 // Renew the pool if necessary.
167 //
168 if ((mPageTablePool == NULL) ||
169 (Pages > mPageTablePool->FreePages))
170 {
171 if (!InitializePageTablePool (Pages)) {
172 return NULL;
173 }
174 }
175
176 Buffer = (UINT8 *)mPageTablePool + mPageTablePool->Offset;
177
178 mPageTablePool->Offset += EFI_PAGES_TO_SIZE (Pages);
179 mPageTablePool->FreePages -= Pages;
180
181 DEBUG ((
182 DEBUG_VERBOSE,
183 "%a:%a: Buffer=0x%Lx Pages=%ld\n",
184 gEfiCallerBaseName,
185 __func__,
186 Buffer,
187 Pages
188 ));
189
190 return Buffer;
191}
192
203STATIC
204VOID
206 IN PHYSICAL_ADDRESS PhysicalAddress,
207 IN OUT UINT64 *PageEntry2M,
208 IN PHYSICAL_ADDRESS StackBase,
209 IN UINTN StackSize,
210 IN UINT64 AddressEncMask
211 )
212{
213 PHYSICAL_ADDRESS PhysicalAddress4K;
214 UINTN IndexOfPageTableEntries;
215 PAGE_TABLE_4K_ENTRY *PageTableEntry, *PageTableEntry1;
216
217 PageTableEntry = AllocatePageTableMemory (1);
218
219 PageTableEntry1 = PageTableEntry;
220
221 if (PageTableEntry == NULL) {
222 ASSERT (FALSE);
223 return;
224 }
225
226 PhysicalAddress4K = PhysicalAddress;
227 for (IndexOfPageTableEntries = 0;
228 IndexOfPageTableEntries < 512;
229 (IndexOfPageTableEntries++,
230 PageTableEntry++,
231 PhysicalAddress4K += SIZE_4KB))
232 {
233 //
234 // Fill in the Page Table entries
235 //
236 PageTableEntry->Uint64 = (UINT64)PhysicalAddress4K | AddressEncMask;
237 PageTableEntry->Bits.ReadWrite = 1;
238 PageTableEntry->Bits.Present = 1;
239 if ((PhysicalAddress4K >= StackBase) &&
240 (PhysicalAddress4K < StackBase + StackSize))
241 {
242 //
243 // Set Nx bit for stack.
244 //
245 PageTableEntry->Bits.Nx = 1;
246 }
247 }
248
249 //
250 // Fill in 2M page entry.
251 //
252 *PageEntry2M = ((UINT64)(UINTN)PageTableEntry1 |
253 IA32_PG_P | IA32_PG_RW | AddressEncMask);
254}
255
264STATIC
265VOID
267 IN UINTN PageTableBase,
268 IN EFI_PHYSICAL_ADDRESS Address,
269 IN BOOLEAN Level4Paging
270 )
271{
272 UINTN Index;
273 UINTN EntryIndex;
274 UINT64 AddressEncMask;
275 UINT64 ActiveAddressEncMask;
276 EFI_PHYSICAL_ADDRESS PhysicalAddress;
277 UINT64 *PageTable;
278 UINT64 *NewPageTable;
279 UINT64 PageAttr;
280 UINT64 LevelSize[5];
281 UINT64 LevelMask[5];
282 UINTN LevelShift[5];
283 UINTN Level;
284 UINT64 PoolUnitSize;
285
286 if (PageTableBase == 0) {
287 ASSERT (FALSE);
288 return;
289 }
290
291 //
292 // Since the page table is always from page table pool, which is always
293 // located at the boundary of PcdPageTablePoolAlignment, we just need to
294 // set the whole pool unit to be read-only.
295 //
296 Address = Address & PAGE_TABLE_POOL_ALIGN_MASK;
297
298 LevelShift[1] = PAGING_L1_ADDRESS_SHIFT;
299 LevelShift[2] = PAGING_L2_ADDRESS_SHIFT;
300 LevelShift[3] = PAGING_L3_ADDRESS_SHIFT;
301 LevelShift[4] = PAGING_L4_ADDRESS_SHIFT;
302
303 LevelMask[1] = PAGING_4K_ADDRESS_MASK_64;
304 LevelMask[2] = PAGING_2M_ADDRESS_MASK_64;
305 LevelMask[3] = PAGING_1G_ADDRESS_MASK_64;
306 LevelMask[4] = PAGING_1G_ADDRESS_MASK_64;
307
308 LevelSize[1] = SIZE_4KB;
309 LevelSize[2] = SIZE_2MB;
310 LevelSize[3] = SIZE_1GB;
311 LevelSize[4] = SIZE_512GB;
312
313 AddressEncMask = GetMemEncryptionAddressMask () &
314 PAGING_1G_ADDRESS_MASK_64;
315 PageTable = (UINT64 *)(UINTN)PageTableBase;
316 PoolUnitSize = PAGE_TABLE_POOL_UNIT_SIZE;
317
318 for (Level = (Level4Paging) ? 4 : 3; Level > 0; --Level) {
319 Index = ((UINTN)RShiftU64 (Address, LevelShift[Level]));
320 Index &= PAGING_PAE_INDEX_MASK;
321
322 PageAttr = PageTable[Index];
323 ActiveAddressEncMask = GetMemEncryptionAddressMask () & PageAttr;
324
325 if ((PageAttr & IA32_PG_PS) == 0) {
326 //
327 // Go to next level of table.
328 //
329 PageTable = (UINT64 *)(UINTN)(PageAttr & ~AddressEncMask &
330 PAGING_4K_ADDRESS_MASK_64);
331 continue;
332 }
333
334 if (PoolUnitSize >= LevelSize[Level]) {
335 //
336 // Clear R/W bit if current page granularity is not larger than pool unit
337 // size.
338 //
339 if ((PageAttr & IA32_PG_RW) != 0) {
340 while (PoolUnitSize > 0) {
341 //
342 // PAGE_TABLE_POOL_UNIT_SIZE and PAGE_TABLE_POOL_ALIGNMENT are fit in
343 // one page (2MB). Then we don't need to update attributes for pages
344 // crossing page directory. ASSERT below is for that purpose.
345 //
346 ASSERT (Index < EFI_PAGE_SIZE/sizeof (UINT64));
347
348 PageTable[Index] &= ~(UINT64)IA32_PG_RW;
349 PoolUnitSize -= LevelSize[Level];
350
351 ++Index;
352 }
353 }
354
355 break;
356 } else {
357 //
358 // The smaller granularity of page must be needed.
359 //
360 ASSERT (Level > 1);
361
362 NewPageTable = AllocatePageTableMemory (1);
363 if (NewPageTable == NULL) {
364 ASSERT (FALSE);
365 return;
366 }
367
368 PhysicalAddress = PageAttr & LevelMask[Level];
369 for (EntryIndex = 0;
370 EntryIndex < EFI_PAGE_SIZE/sizeof (UINT64);
371 ++EntryIndex)
372 {
373 NewPageTable[EntryIndex] = PhysicalAddress | ActiveAddressEncMask |
374 IA32_PG_P | IA32_PG_RW;
375 if (Level > 2) {
376 NewPageTable[EntryIndex] |= IA32_PG_PS;
377 }
378
379 PhysicalAddress += LevelSize[Level - 1];
380 }
381
382 PageTable[Index] = (UINT64)(UINTN)NewPageTable | ActiveAddressEncMask |
383 IA32_PG_P | IA32_PG_RW;
384 PageTable = NewPageTable;
385 }
386 }
387}
388
396STATIC
397VOID
399 IN UINTN PageTableBase,
400 IN BOOLEAN Level4Paging
401 )
402{
403 PAGE_TABLE_POOL *HeadPool;
404 PAGE_TABLE_POOL *Pool;
405 UINT64 PoolSize;
406 EFI_PHYSICAL_ADDRESS Address;
407
408 if (mPageTablePool == NULL) {
409 return;
410 }
411
412 //
413 // SetPageTablePoolReadOnly might update mPageTablePool. It's safer to
414 // remember original one in advance.
415 //
416 HeadPool = mPageTablePool;
417 Pool = HeadPool;
418 do {
419 Address = (EFI_PHYSICAL_ADDRESS)(UINTN)Pool;
420 PoolSize = Pool->Offset + EFI_PAGES_TO_SIZE (Pool->FreePages);
421
422 //
423 // The size of one pool must be multiple of PAGE_TABLE_POOL_UNIT_SIZE,
424 // which is one of page size of the processor (2MB by default). Let's apply
425 // the protection to them one by one.
426 //
427 while (PoolSize > 0) {
428 SetPageTablePoolReadOnly (PageTableBase, Address, Level4Paging);
429 Address += PAGE_TABLE_POOL_UNIT_SIZE;
430 PoolSize -= PAGE_TABLE_POOL_UNIT_SIZE;
431 }
432
433 Pool = Pool->NextPool;
434 } while (Pool != HeadPool);
435}
436
447STATIC
448VOID
450 IN PHYSICAL_ADDRESS PhysicalAddress,
451 IN OUT UINT64 *PageEntry1G,
452 IN PHYSICAL_ADDRESS StackBase,
453 IN UINTN StackSize
454 )
455{
456 PHYSICAL_ADDRESS PhysicalAddress2M;
457 UINTN IndexOfPageDirectoryEntries;
458 PAGE_TABLE_ENTRY *PageDirectoryEntry;
459 UINT64 AddressEncMask;
460 UINT64 ActiveAddressEncMask;
461
462 PageDirectoryEntry = AllocatePageTableMemory (1);
463 if (PageDirectoryEntry == NULL) {
464 return;
465 }
466
467 AddressEncMask = GetMemEncryptionAddressMask ();
468 ASSERT (PageDirectoryEntry != NULL);
469
470 ActiveAddressEncMask = *PageEntry1G & AddressEncMask;
471 //
472 // Fill in 1G page entry.
473 //
474 *PageEntry1G = ((UINT64)(UINTN)PageDirectoryEntry |
475 IA32_PG_P | IA32_PG_RW | ActiveAddressEncMask);
476
477 PhysicalAddress2M = PhysicalAddress;
478 for (IndexOfPageDirectoryEntries = 0;
479 IndexOfPageDirectoryEntries < 512;
480 (IndexOfPageDirectoryEntries++,
481 PageDirectoryEntry++,
482 PhysicalAddress2M += SIZE_2MB))
483 {
484 if ((PhysicalAddress2M < StackBase + StackSize) &&
485 ((PhysicalAddress2M + SIZE_2MB) > StackBase))
486 {
487 //
488 // Need to split this 2M page that covers stack range.
489 //
491 PhysicalAddress2M,
492 (UINT64 *)PageDirectoryEntry,
493 StackBase,
494 StackSize,
495 ActiveAddressEncMask
496 );
497 } else {
498 //
499 // Fill in the Page Directory entries
500 //
501 PageDirectoryEntry->Uint64 = (UINT64)PhysicalAddress2M | ActiveAddressEncMask;
502 PageDirectoryEntry->Bits.ReadWrite = 1;
503 PageDirectoryEntry->Bits.Present = 1;
504 PageDirectoryEntry->Bits.MustBe1 = 1;
505 }
506 }
507}
508
518STATIC
521 IN OUT UINT64 *PageTablePointer,
522 IN TDX_PAGETABLE_MODE Mode,
523 IN PHYSICAL_ADDRESS PhysicalAddress,
524 IN UINT64 Length
525 )
526{
527 UINT64 AddressEncMask;
528 UINT64 TdStatus;
529 EFI_STATUS Status;
530 EDKII_MEMORY_ACCEPT_PROTOCOL *MemoryAcceptProtocol;
531
532 UINT64 MapGpaRetryAddr;
533 UINT32 RetryCount;
534 UINT64 EndAddress;
535
536 MapGpaRetryAddr = 0;
537 RetryCount = 0;
538
539 AddressEncMask = GetMemEncryptionAddressMask ();
540
541 //
542 // Set or clear page table entry. Also, set shared bit in physical address, before calling MapGPA
543 //
544 if (Mode == SetSharedBit) {
545 *PageTablePointer |= AddressEncMask;
546 PhysicalAddress |= AddressEncMask;
547 } else {
548 *PageTablePointer &= ~AddressEncMask;
549 PhysicalAddress &= ~AddressEncMask;
550 }
551
552 EndAddress = PhysicalAddress + Length;
553 while (RetryCount < MAX_RETRIES_PER_PAGE) {
554 TdStatus = TdVmCall (TDVMCALL_MAPGPA, PhysicalAddress, Length, 0, 0, &MapGpaRetryAddr);
555 if (TdStatus != TDVMCALL_STATUS_RETRY) {
556 break;
557 }
558
559 DEBUG ((DEBUG_VERBOSE, "%a: TdVmcall(MAPGPA) Retry PhysicalAddress is %llx, MapGpaRetryAddr is %llx\n", __func__, PhysicalAddress, MapGpaRetryAddr));
560
561 if ((MapGpaRetryAddr < PhysicalAddress) || (MapGpaRetryAddr >= EndAddress)) {
562 DEBUG ((
563 DEBUG_ERROR,
564 "%a: TdVmcall(MAPGPA) failed with MapGpaRetryAddr(%llx) less than PhysicalAddress(%llx) or more than or equal to EndAddress(%llx) \n",
565 __func__,
566 MapGpaRetryAddr,
567 PhysicalAddress,
568 EndAddress
569 ));
570 break;
571 }
572
573 if (MapGpaRetryAddr == PhysicalAddress) {
574 RetryCount++;
575 continue;
576 }
577
578 PhysicalAddress = MapGpaRetryAddr;
579 Length = EndAddress - PhysicalAddress;
580 RetryCount = 0;
581 }
582
583 if (TdStatus != 0) {
584 DEBUG ((DEBUG_ERROR, "%a: TdVmcall(MAPGPA) failed with %llx\n", __func__, TdStatus));
585 ASSERT (FALSE);
586 return EFI_DEVICE_ERROR;
587 }
588
589 //
590 // If changing shared to private, must accept-page again
591 //
592 if (Mode == ClearSharedBit) {
593 Status = gBS->LocateProtocol (&gEdkiiMemoryAcceptProtocolGuid, NULL, (VOID **)&MemoryAcceptProtocol);
594 if (EFI_ERROR (Status)) {
595 DEBUG ((DEBUG_ERROR, "%a: Failed to locate MemoryAcceptProtocol with %r\n", __func__, Status));
596 ASSERT (FALSE);
597 return Status;
598 }
599
600 Status = MemoryAcceptProtocol->AcceptMemory (MemoryAcceptProtocol, PhysicalAddress, Length);
601 if (EFI_ERROR (Status)) {
602 DEBUG ((DEBUG_ERROR, "%a: Failed to AcceptMemory with %r\n", __func__, Status));
603 ASSERT (FALSE);
604 return Status;
605 }
606 }
607
608 DEBUG ((
609 DEBUG_VERBOSE,
610 "%a:%a: pte=0x%Lx AddressEncMask=0x%Lx Mode=0x%x MapGPA Status=0x%x\n",
611 gEfiCallerBaseName,
612 __func__,
613 *PageTablePointer,
614 AddressEncMask,
615 Mode,
616 Status
617 ));
618
619 return EFI_SUCCESS;
620}
621
629STATIC
630BOOLEAN
632 VOID
633 )
634{
635 return ((AsmReadCr0 () & BIT16) != 0);
636}
637
641STATIC
642VOID
644 VOID
645 )
646{
647 AsmWriteCr0 (AsmReadCr0 () & ~BIT16);
648}
649
653VOID
655 VOID
656 )
657{
658 AsmWriteCr0 (AsmReadCr0 () | BIT16);
659}
660
686STATIC
687RETURN_STATUS
688EFIAPI
690 IN PHYSICAL_ADDRESS Cr3BaseAddress,
691 IN PHYSICAL_ADDRESS PhysicalAddress,
692 IN UINTN Length,
693 IN TDX_PAGETABLE_MODE Mode
694 )
695{
696 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;
697 PAGE_MAP_AND_DIRECTORY_POINTER *PageUpperDirectoryPointerEntry;
698 PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;
699 PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;
700 PAGE_TABLE_ENTRY *PageDirectory2MEntry;
701 PAGE_TABLE_4K_ENTRY *PageTableEntry;
702 UINT64 PgTableMask;
703 UINT64 AddressEncMask;
704 UINT64 ActiveEncMask;
705 BOOLEAN IsWpEnabled;
706 RETURN_STATUS Status;
707 IA32_CR4 Cr4;
708 BOOLEAN Page5LevelSupport;
709
710 //
711 // Set PageMapLevel4Entry to suppress incorrect compiler/analyzer warnings.
712 //
713 PageMapLevel4Entry = NULL;
714
715 DEBUG ((
716 DEBUG_VERBOSE,
717 "%a:%a: Cr3Base=0x%Lx Physical=0x%Lx Length=0x%Lx Mode=%a\n",
718 gEfiCallerBaseName,
719 __func__,
720 Cr3BaseAddress,
721 PhysicalAddress,
722 (UINT64)Length,
723 (Mode == SetSharedBit) ? "Shared" : "Private"
724 ));
725
726 //
727 // Check if we have a valid memory encryption mask
728 //
729 AddressEncMask = GetMemEncryptionAddressMask ();
730
731 PgTableMask = AddressEncMask | EFI_PAGE_MASK;
732
733 if (Length == 0) {
735 }
736
737 //
738 // Make sure that the page table is changeable.
739 //
740 IsWpEnabled = IsReadOnlyPageWriteProtected ();
741 if (IsWpEnabled) {
743 }
744
745 //
746 // If Cr3BaseAddress is not specified then read the current CR3
747 //
748 if (Cr3BaseAddress == 0) {
749 Cr3BaseAddress = AsmReadCr3 ();
750 }
751
752 //
753 // CPU will already have LA57 enabled so just check CR4
754 //
755 Cr4.UintN = AsmReadCr4 ();
756
757 Page5LevelSupport = (Cr4.Bits.LA57 ? TRUE : FALSE);
758 //
759 // If 5-level pages, adjust Cr3BaseAddress to point to first 4-level page directory,
760 // we will only have 1
761 //
762 if (Page5LevelSupport) {
763 Cr3BaseAddress = *(UINT64 *)Cr3BaseAddress & ~PgTableMask;
764 }
765
766 Status = EFI_SUCCESS;
767
768 while (Length) {
769 PageMapLevel4Entry = (VOID *)(Cr3BaseAddress & ~PgTableMask);
770 PageMapLevel4Entry += PML4_OFFSET (PhysicalAddress);
771 if (!PageMapLevel4Entry->Bits.Present) {
772 DEBUG ((
773 DEBUG_ERROR,
774 "%a:%a: bad PML4 for Physical=0x%Lx\n",
775 gEfiCallerBaseName,
776 __func__,
777 PhysicalAddress
778 ));
779 Status = RETURN_NO_MAPPING;
780 goto Done;
781 }
782
783 PageDirectory1GEntry = (VOID *)(
784 (PageMapLevel4Entry->Bits.PageTableBaseAddress <<
785 12) & ~PgTableMask
786 );
787 PageDirectory1GEntry += PDP_OFFSET (PhysicalAddress);
788 if (!PageDirectory1GEntry->Bits.Present) {
789 DEBUG ((
790 DEBUG_ERROR,
791 "%a:%a: bad PDPE for Physical=0x%Lx\n",
792 gEfiCallerBaseName,
793 __func__,
794 PhysicalAddress
795 ));
796 Status = RETURN_NO_MAPPING;
797 goto Done;
798 }
799
800 //
801 // If the MustBe1 bit is not 1, it's not actually a 1GB entry
802 //
803 if (PageDirectory1GEntry->Bits.MustBe1) {
804 //
805 // Valid 1GB page
806 // If we have at least 1GB to go, we can just update this entry
807 //
808 if (!(PhysicalAddress & (BIT30 - 1)) && (Length >= BIT30)) {
809 Status = SetOrClearSharedBit (&PageDirectory1GEntry->Uint64, Mode, PhysicalAddress, BIT30);
810 if (EFI_ERROR (Status)) {
811 goto Done;
812 }
813
814 DEBUG ((
815 DEBUG_VERBOSE,
816 "%a:%a: updated 1GB entry for Physical=0x%Lx\n",
817 gEfiCallerBaseName,
818 __func__,
819 PhysicalAddress
820 ));
821 PhysicalAddress += BIT30;
822 Length -= BIT30;
823 } else {
824 //
825 // We must split the page
826 //
827 DEBUG ((
828 DEBUG_VERBOSE,
829 "%a:%a: splitting 1GB page for Physical=0x%Lx\n",
830 gEfiCallerBaseName,
831 __func__,
832 PhysicalAddress
833 ));
835 (UINT64)PageDirectory1GEntry->Bits.PageTableBaseAddress << 30,
836 (UINT64 *)PageDirectory1GEntry,
837 0,
838 0
839 );
840 continue;
841 }
842 } else {
843 //
844 // Actually a PDP
845 //
846 PageUpperDirectoryPointerEntry =
847 (PAGE_MAP_AND_DIRECTORY_POINTER *)PageDirectory1GEntry;
848 PageDirectory2MEntry =
849 (VOID *)(
850 (PageUpperDirectoryPointerEntry->Bits.PageTableBaseAddress <<
851 12) & ~PgTableMask
852 );
853 PageDirectory2MEntry += PDE_OFFSET (PhysicalAddress);
854 if (!PageDirectory2MEntry->Bits.Present) {
855 DEBUG ((
856 DEBUG_ERROR,
857 "%a:%a: bad PDE for Physical=0x%Lx\n",
858 gEfiCallerBaseName,
859 __func__,
860 PhysicalAddress
861 ));
862 Status = RETURN_NO_MAPPING;
863 goto Done;
864 }
865
866 //
867 // If the MustBe1 bit is not a 1, it's not a 2MB entry
868 //
869 if (PageDirectory2MEntry->Bits.MustBe1) {
870 //
871 // Valid 2MB page
872 // If we have at least 2MB left to go, we can just update this entry
873 //
874 if (!(PhysicalAddress & (BIT21-1)) && (Length >= BIT21)) {
875 Status = SetOrClearSharedBit (&PageDirectory2MEntry->Uint64, Mode, PhysicalAddress, BIT21);
876 if (EFI_ERROR (Status)) {
877 goto Done;
878 }
879
880 PhysicalAddress += BIT21;
881 Length -= BIT21;
882 } else {
883 //
884 // We must split up this page into 4K pages
885 //
886 DEBUG ((
887 DEBUG_VERBOSE,
888 "%a:%a: splitting 2MB page for Physical=0x%Lx\n",
889 gEfiCallerBaseName,
890 __func__,
891 PhysicalAddress
892 ));
893
894 ActiveEncMask = PageDirectory2MEntry->Uint64 & AddressEncMask;
895
897 (UINT64)PageDirectory2MEntry->Bits.PageTableBaseAddress << 21,
898 (UINT64 *)PageDirectory2MEntry,
899 0,
900 0,
901 ActiveEncMask
902 );
903 continue;
904 }
905 } else {
906 PageDirectoryPointerEntry =
907 (PAGE_MAP_AND_DIRECTORY_POINTER *)PageDirectory2MEntry;
908 PageTableEntry =
909 (VOID *)(
910 (PageDirectoryPointerEntry->Bits.PageTableBaseAddress <<
911 12) & ~PgTableMask
912 );
913 PageTableEntry += PTE_OFFSET (PhysicalAddress);
914 if (!PageTableEntry->Bits.Present) {
915 DEBUG ((
916 DEBUG_ERROR,
917 "%a:%a: bad PTE for Physical=0x%Lx\n",
918 gEfiCallerBaseName,
919 __func__,
920 PhysicalAddress
921 ));
922 Status = RETURN_NO_MAPPING;
923 goto Done;
924 }
925
926 Status = SetOrClearSharedBit (&PageTableEntry->Uint64, Mode, PhysicalAddress, EFI_PAGE_SIZE);
927 if (EFI_ERROR (Status)) {
928 goto Done;
929 }
930
931 PhysicalAddress += EFI_PAGE_SIZE;
932 Length -= EFI_PAGE_SIZE;
933 }
934 }
935 }
936
937 //
938 // Protect the page table by marking the memory used for page table to be
939 // read-only.
940 //
941 if (IsWpEnabled) {
942 EnablePageTableProtection ((UINTN)PageMapLevel4Entry, TRUE);
943 }
944
945 //
946 // Flush TLB
947 //
948 CpuFlushTlb ();
949
950Done:
951 //
952 // Restore page table write protection, if any.
953 //
954 if (IsWpEnabled) {
956 }
957
958 return Status;
959}
960
978RETURN_STATUS
979EFIAPI
981 IN PHYSICAL_ADDRESS Cr3BaseAddress,
982 IN PHYSICAL_ADDRESS BaseAddress,
983 IN UINTN NumPages
984 )
985{
987 Cr3BaseAddress,
988 BaseAddress,
989 EFI_PAGES_TO_SIZE (NumPages),
990 SetSharedBit
991 );
992}
993
1011RETURN_STATUS
1012EFIAPI
1014 IN PHYSICAL_ADDRESS Cr3BaseAddress,
1015 IN PHYSICAL_ADDRESS BaseAddress,
1016 IN UINTN NumPages
1017 )
1018{
1020 Cr3BaseAddress,
1021 BaseAddress,
1022 EFI_PAGES_TO_SIZE (NumPages),
1023 ClearSharedBit
1024 );
1025}
UINT64 UINTN
UINT64 EFIAPI RShiftU64(IN UINT64 Operand, IN UINTN Count)
Definition: RShiftU64.c:28
UINTN EFIAPI TdVmCall(IN UINT64 Leaf, IN UINT64 Arg1, IN UINT64 Arg2, IN UINT64 Arg3, IN UINT64 Arg4, IN OUT VOID *Results)
Definition: IntelTdxNull.c:59
VOID EFIAPI CpuFlushTlb(VOID)
UINTN EFIAPI AsmReadCr3(VOID)
UINTN EFIAPI AsmReadCr0(VOID)
UINTN EFIAPI AsmWriteCr0(UINTN Cr0)
UINTN EFIAPI AsmReadCr4(VOID)
#define NULL
Definition: Base.h:319
#define STATIC
Definition: Base.h:264
#define RETURN_NO_MAPPING
Definition: Base.h:1157
#define TRUE
Definition: Base.h:301
#define FALSE
Definition: Base.h:307
#define IN
Definition: Base.h:279
#define OUT
Definition: Base.h:284
#define RETURN_INVALID_PARAMETER
Definition: Base.h:1076
#define DEBUG(Expression)
Definition: DebugLib.h:434
STATIC EFI_STATUS SetOrClearSharedBit(IN OUT UINT64 *PageTablePointer, IN TDX_PAGETABLE_MODE Mode, IN PHYSICAL_ADDRESS PhysicalAddress, IN UINT64 Length)
STATIC VOID SetPageTablePoolReadOnly(IN UINTN PageTableBase, IN EFI_PHYSICAL_ADDRESS Address, IN BOOLEAN Level4Paging)
STATIC VOID Split1GPageTo2M(IN PHYSICAL_ADDRESS PhysicalAddress, IN OUT UINT64 *PageEntry1G, IN PHYSICAL_ADDRESS StackBase, IN UINTN StackSize)
VOID EnableReadOnlyPageWriteProtect(VOID)
STATIC UINT64 GetMemEncryptionAddressMask(VOID)
RETURN_STATUS EFIAPI MemEncryptTdxSetPageSharedBit(IN PHYSICAL_ADDRESS Cr3BaseAddress, IN PHYSICAL_ADDRESS BaseAddress, IN UINTN NumPages)
STATIC RETURN_STATUS EFIAPI SetMemorySharedOrPrivate(IN PHYSICAL_ADDRESS Cr3BaseAddress, IN PHYSICAL_ADDRESS PhysicalAddress, IN UINTN Length, IN TDX_PAGETABLE_MODE Mode)
STATIC BOOLEAN InitializePageTablePool(IN UINTN PoolPages)
STATIC VOID *EFIAPI AllocatePageTableMemory(IN UINTN Pages)
STATIC VOID Split2MPageTo4K(IN PHYSICAL_ADDRESS PhysicalAddress, IN OUT UINT64 *PageEntry2M, IN PHYSICAL_ADDRESS StackBase, IN UINTN StackSize, IN UINT64 AddressEncMask)
STATIC VOID DisableReadOnlyPageWriteProtect(VOID)
BOOLEAN EFIAPI MemEncryptTdxIsEnabled(VOID)
STATIC VOID EnablePageTableProtection(IN UINTN PageTableBase, IN BOOLEAN Level4Paging)
RETURN_STATUS EFIAPI MemEncryptTdxClearPageSharedBit(IN PHYSICAL_ADDRESS Cr3BaseAddress, IN PHYSICAL_ADDRESS BaseAddress, IN UINTN NumPages)
STATIC BOOLEAN IsReadOnlyPageWriteProtected(VOID)
#define PcdGet64(TokenName)
Definition: PcdLib.h:375
VOID *EFIAPI AllocateAlignedPages(IN UINTN Pages, IN UINTN Alignment)
UINT64 EFIAPI TdSharedPageMask(VOID)
Definition: TdInfo.c:68
UINT64 EFI_PHYSICAL_ADDRESS
Definition: UefiBaseType.h:50
#define EFI_PAGES_TO_SIZE(Pages)
Definition: UefiBaseType.h:213
RETURN_STATUS EFI_STATUS
Definition: UefiBaseType.h:29
#define EFI_SUCCESS
Definition: UefiBaseType.h:112
EFI_BOOT_SERVICES * gBS