TianoCore EDK2 master
Loading...
Searching...
No Matches
PeiDxeVirtualMemory.c
Go to the documentation of this file.
1
14#include <Library/CpuLib.h>
16#include <Register/Amd/Cpuid.h>
17#include <Register/Cpuid.h>
18
19#include "VirtualMemory.h"
20#include "SnpPageStateChange.h"
21
22STATIC BOOLEAN mAddressEncMaskChecked = FALSE;
23STATIC UINT64 mAddressEncMask;
24STATIC PAGE_TABLE_POOL *mPageTablePool = NULL;
25
26STATIC VOID *mPscBuffer = NULL;
27
28typedef enum {
29 SetCBit,
30 ClearCBit
31} MAP_RANGE_MODE;
32
39UINT64
40EFIAPI
42 VOID
43 )
44{
45 UINT64 EncryptionMask;
46
47 if (mAddressEncMaskChecked) {
48 return mAddressEncMask;
49 }
50
51 EncryptionMask = MemEncryptSevGetEncryptionMask ();
52
53 mAddressEncMask = EncryptionMask & PAGING_1G_ADDRESS_MASK_64;
54 mAddressEncMaskChecked = TRUE;
55
56 return mAddressEncMask;
57}
58
78BOOLEAN
80 IN UINTN PoolPages
81 )
82{
83 VOID *Buffer;
84
85 //
86 // Always reserve at least PAGE_TABLE_POOL_UNIT_PAGES, including one page for
87 // header.
88 //
89 PoolPages += 1; // Add one page for header.
90 PoolPages = ((PoolPages - 1) / PAGE_TABLE_POOL_UNIT_PAGES + 1) *
91 PAGE_TABLE_POOL_UNIT_PAGES;
92 Buffer = AllocateAlignedPages (PoolPages, PAGE_TABLE_POOL_ALIGNMENT);
93 if (Buffer == NULL) {
94 DEBUG ((DEBUG_ERROR, "ERROR: Out of aligned pages\r\n"));
95 return FALSE;
96 }
97
98 //
99 // Link all pools into a list for easier track later.
100 //
101 if (mPageTablePool == NULL) {
102 mPageTablePool = Buffer;
103 mPageTablePool->NextPool = mPageTablePool;
104 } else {
105 ((PAGE_TABLE_POOL *)Buffer)->NextPool = mPageTablePool->NextPool;
106 mPageTablePool->NextPool = Buffer;
107 mPageTablePool = Buffer;
108 }
109
110 //
111 // Reserve one page for pool header.
112 //
113 mPageTablePool->FreePages = PoolPages - 1;
114 mPageTablePool->Offset = EFI_PAGES_TO_SIZE (1);
115
116 return TRUE;
117}
118
136STATIC
137VOID *
138EFIAPI
140 IN UINTN Pages
141 )
142{
143 VOID *Buffer;
144
145 if (Pages == 0) {
146 return NULL;
147 }
148
149 //
150 // Renew the pool if necessary.
151 //
152 if ((mPageTablePool == NULL) ||
153 (Pages > mPageTablePool->FreePages))
154 {
155 if (!InitializePageTablePool (Pages)) {
156 return NULL;
157 }
158 }
159
160 Buffer = (UINT8 *)mPageTablePool + mPageTablePool->Offset;
161
162 mPageTablePool->Offset += EFI_PAGES_TO_SIZE (Pages);
163 mPageTablePool->FreePages -= Pages;
164
165 DEBUG ((
166 DEBUG_VERBOSE,
167 "%a:%a: Buffer=0x%Lx Pages=%ld\n",
168 gEfiCallerBaseName,
169 __func__,
170 Buffer,
171 Pages
172 ));
173
174 return Buffer;
175}
176
187STATIC
188VOID
190 IN PHYSICAL_ADDRESS PhysicalAddress,
191 IN OUT UINT64 *PageEntry2M,
192 IN PHYSICAL_ADDRESS StackBase,
193 IN UINTN StackSize
194 )
195{
196 PHYSICAL_ADDRESS PhysicalAddress4K;
197 UINTN IndexOfPageTableEntries;
198 PAGE_TABLE_4K_ENTRY *PageTableEntry;
199 PAGE_TABLE_4K_ENTRY *PageTableEntry1;
200 UINT64 AddressEncMask;
201
202 PageTableEntry = AllocatePageTableMemory (1);
203
204 PageTableEntry1 = PageTableEntry;
205
206 AddressEncMask = InternalGetMemEncryptionAddressMask ();
207
208 ASSERT (PageTableEntry != NULL);
209 ASSERT (*PageEntry2M & AddressEncMask);
210
211 PhysicalAddress4K = PhysicalAddress;
212 for (IndexOfPageTableEntries = 0;
213 IndexOfPageTableEntries < 512;
214 (IndexOfPageTableEntries++,
215 PageTableEntry++,
216 PhysicalAddress4K += SIZE_4KB))
217 {
218 //
219 // Fill in the Page Table entries
220 //
221 PageTableEntry->Uint64 = (UINT64)PhysicalAddress4K | AddressEncMask;
222 PageTableEntry->Bits.ReadWrite = 1;
223 PageTableEntry->Bits.Present = 1;
224 if ((PhysicalAddress4K >= StackBase) &&
225 (PhysicalAddress4K < StackBase + StackSize))
226 {
227 //
228 // Set Nx bit for stack.
229 //
230 PageTableEntry->Bits.Nx = 1;
231 }
232 }
233
234 //
235 // Fill in 2M page entry.
236 //
237 // AddressEncMask is not set for non-leaf entries since CpuPageTableLib doesn't consume
238 // encryption mask PCD. The encryption mask is overlapped with the PageTableBaseAddress
239 // field of non-leaf page table entries. If encryption mask is set for non-leaf entries,
240 // issue happens when CpuPageTableLib code use the non-leaf entry PageTableBaseAddress
241 // field with the encryption mask set to find the next level page table.
242 //
243 *PageEntry2M = ((UINT64)(UINTN)PageTableEntry1 |
244 IA32_PG_P | IA32_PG_RW);
245}
246
255STATIC
256VOID
258 IN UINTN PageTableBase,
259 IN EFI_PHYSICAL_ADDRESS Address,
260 IN BOOLEAN Level4Paging
261 )
262{
263 UINTN Index;
264 UINTN EntryIndex;
265 UINT64 AddressEncMask;
266 EFI_PHYSICAL_ADDRESS PhysicalAddress;
267 UINT64 *PageTable;
268 UINT64 *NewPageTable;
269 UINT64 PageAttr;
270 UINT64 LevelSize[5];
271 UINT64 LevelMask[5];
272 UINTN LevelShift[5];
273 UINTN Level;
274 UINT64 PoolUnitSize;
275
276 ASSERT (PageTableBase != 0);
277
278 //
279 // Since the page table is always from page table pool, which is always
280 // located at the boundary of PcdPageTablePoolAlignment, we just need to
281 // set the whole pool unit to be read-only.
282 //
283 Address = Address & PAGE_TABLE_POOL_ALIGN_MASK;
284
285 LevelShift[1] = PAGING_L1_ADDRESS_SHIFT;
286 LevelShift[2] = PAGING_L2_ADDRESS_SHIFT;
287 LevelShift[3] = PAGING_L3_ADDRESS_SHIFT;
288 LevelShift[4] = PAGING_L4_ADDRESS_SHIFT;
289
290 LevelMask[1] = PAGING_4K_ADDRESS_MASK_64;
291 LevelMask[2] = PAGING_2M_ADDRESS_MASK_64;
292 LevelMask[3] = PAGING_1G_ADDRESS_MASK_64;
293 LevelMask[4] = PAGING_1G_ADDRESS_MASK_64;
294
295 LevelSize[1] = SIZE_4KB;
296 LevelSize[2] = SIZE_2MB;
297 LevelSize[3] = SIZE_1GB;
298 LevelSize[4] = SIZE_512GB;
299
300 AddressEncMask = InternalGetMemEncryptionAddressMask ();
301 PageTable = (UINT64 *)(UINTN)PageTableBase;
302 PoolUnitSize = PAGE_TABLE_POOL_UNIT_SIZE;
303
304 for (Level = (Level4Paging) ? 4 : 3; Level > 0; --Level) {
305 Index = ((UINTN)RShiftU64 (Address, LevelShift[Level]));
306 Index &= PAGING_PAE_INDEX_MASK;
307
308 PageAttr = PageTable[Index];
309 if ((PageAttr & IA32_PG_PS) == 0) {
310 //
311 // Go to next level of table.
312 //
313 PageTable = (UINT64 *)(UINTN)(PageAttr & ~AddressEncMask &
314 PAGING_4K_ADDRESS_MASK_64);
315 continue;
316 }
317
318 if (PoolUnitSize >= LevelSize[Level]) {
319 //
320 // Clear R/W bit if current page granularity is not larger than pool unit
321 // size.
322 //
323 if ((PageAttr & IA32_PG_RW) != 0) {
324 while (PoolUnitSize > 0) {
325 //
326 // PAGE_TABLE_POOL_UNIT_SIZE and PAGE_TABLE_POOL_ALIGNMENT are fit in
327 // one page (2MB). Then we don't need to update attributes for pages
328 // crossing page directory. ASSERT below is for that purpose.
329 //
330 ASSERT (Index < EFI_PAGE_SIZE/sizeof (UINT64));
331
332 PageTable[Index] &= ~(UINT64)IA32_PG_RW;
333 PoolUnitSize -= LevelSize[Level];
334
335 ++Index;
336 }
337 }
338
339 break;
340 } else {
341 //
342 // The smaller granularity of page must be needed.
343 //
344 ASSERT (Level > 1);
345
346 NewPageTable = AllocatePageTableMemory (1);
347 ASSERT (NewPageTable != NULL);
348
349 PhysicalAddress = PageAttr & LevelMask[Level];
350 for (EntryIndex = 0;
351 EntryIndex < EFI_PAGE_SIZE/sizeof (UINT64);
352 ++EntryIndex)
353 {
354 NewPageTable[EntryIndex] = PhysicalAddress | AddressEncMask |
355 IA32_PG_P | IA32_PG_RW;
356 if (Level > 2) {
357 NewPageTable[EntryIndex] |= IA32_PG_PS;
358 }
359
360 PhysicalAddress += LevelSize[Level - 1];
361 }
362
363 //
364 // AddressEncMask is not set for non-leaf entries because of the way CpuPageTableLib works
365 //
366 PageTable[Index] = (UINT64)(UINTN)NewPageTable |
367 IA32_PG_P | IA32_PG_RW;
368 PageTable = NewPageTable;
369 }
370 }
371}
372
380STATIC
381VOID
383 IN UINTN PageTableBase,
384 IN BOOLEAN Level4Paging
385 )
386{
387 PAGE_TABLE_POOL *HeadPool;
388 PAGE_TABLE_POOL *Pool;
389 UINT64 PoolSize;
390 EFI_PHYSICAL_ADDRESS Address;
391
392 if (mPageTablePool == NULL) {
393 return;
394 }
395
396 //
397 // SetPageTablePoolReadOnly might update mPageTablePool. It's safer to
398 // remember original one in advance.
399 //
400 HeadPool = mPageTablePool;
401 Pool = HeadPool;
402 do {
403 Address = (EFI_PHYSICAL_ADDRESS)(UINTN)Pool;
404 PoolSize = Pool->Offset + EFI_PAGES_TO_SIZE (Pool->FreePages);
405
406 //
407 // The size of one pool must be multiple of PAGE_TABLE_POOL_UNIT_SIZE,
408 // which is one of page size of the processor (2MB by default). Let's apply
409 // the protection to them one by one.
410 //
411 while (PoolSize > 0) {
412 SetPageTablePoolReadOnly (PageTableBase, Address, Level4Paging);
413 Address += PAGE_TABLE_POOL_UNIT_SIZE;
414 PoolSize -= PAGE_TABLE_POOL_UNIT_SIZE;
415 }
416
417 Pool = Pool->NextPool;
418 } while (Pool != HeadPool);
419}
420
431STATIC
432VOID
434 IN PHYSICAL_ADDRESS PhysicalAddress,
435 IN OUT UINT64 *PageEntry1G,
436 IN PHYSICAL_ADDRESS StackBase,
437 IN UINTN StackSize
438 )
439{
440 PHYSICAL_ADDRESS PhysicalAddress2M;
441 UINTN IndexOfPageDirectoryEntries;
442 PAGE_TABLE_ENTRY *PageDirectoryEntry;
443 UINT64 AddressEncMask;
444
445 PageDirectoryEntry = AllocatePageTableMemory (1);
446
447 AddressEncMask = InternalGetMemEncryptionAddressMask ();
448 ASSERT (PageDirectoryEntry != NULL);
449 ASSERT (*PageEntry1G & AddressEncMask);
450 //
451 // Fill in 1G page entry.
452 //
453 // AddressEncMask is not set for non-leaf entries because of the way CpuPageTableLib works
454 //
455 *PageEntry1G = ((UINT64)(UINTN)PageDirectoryEntry |
456 IA32_PG_P | IA32_PG_RW);
457
458 PhysicalAddress2M = PhysicalAddress;
459 for (IndexOfPageDirectoryEntries = 0;
460 IndexOfPageDirectoryEntries < 512;
461 (IndexOfPageDirectoryEntries++,
462 PageDirectoryEntry++,
463 PhysicalAddress2M += SIZE_2MB))
464 {
465 if ((PhysicalAddress2M < StackBase + StackSize) &&
466 ((PhysicalAddress2M + SIZE_2MB) > StackBase))
467 {
468 //
469 // Need to split this 2M page that covers stack range.
470 //
472 PhysicalAddress2M,
473 (UINT64 *)PageDirectoryEntry,
474 StackBase,
475 StackSize
476 );
477 } else {
478 //
479 // Fill in the Page Directory entries
480 //
481 PageDirectoryEntry->Uint64 = (UINT64)PhysicalAddress2M | AddressEncMask;
482 PageDirectoryEntry->Bits.ReadWrite = 1;
483 PageDirectoryEntry->Bits.Present = 1;
484 PageDirectoryEntry->Bits.MustBe1 = 1;
485 }
486 }
487}
488
496STATIC VOID
498 IN OUT UINT64 *PageTablePointer,
499 IN MAP_RANGE_MODE Mode
500 )
501{
502 UINT64 AddressEncMask;
503
504 AddressEncMask = InternalGetMemEncryptionAddressMask ();
505
506 if (Mode == SetCBit) {
507 *PageTablePointer |= AddressEncMask;
508 } else {
509 *PageTablePointer &= ~AddressEncMask;
510 }
511}
512
520STATIC
521BOOLEAN
523 VOID
524 )
525{
526 return ((AsmReadCr0 () & BIT16) != 0);
527}
528
532STATIC
533VOID
535 VOID
536 )
537{
538 AsmWriteCr0 (AsmReadCr0 () & ~BIT16);
539}
540
544STATIC
545VOID
547 VOID
548 )
549{
550 AsmWriteCr0 (AsmReadCr0 () | BIT16);
551}
552
553RETURN_STATUS
554EFIAPI
556 IN PHYSICAL_ADDRESS Cr3BaseAddress,
557 IN PHYSICAL_ADDRESS PhysicalAddress,
558 IN UINTN Length
559 )
560{
561 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;
562 PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;
563 UINT64 PgTableMask;
564 UINT64 *NewPageTable;
565 UINT64 AddressEncMask;
566 BOOLEAN IsWpEnabled;
567 RETURN_STATUS Status;
568
569 //
570 // Set PageMapLevel4Entry to suppress incorrect compiler/analyzer warnings.
571 //
572 PageMapLevel4Entry = NULL;
573
574 DEBUG ((
575 DEBUG_VERBOSE,
576 "%a:%a: Cr3Base=0x%Lx Physical=0x%Lx Length=0x%Lx\n",
577 gEfiCallerBaseName,
578 __func__,
579 Cr3BaseAddress,
580 PhysicalAddress,
581 (UINT64)Length
582 ));
583
584 if (Length == 0) {
586 }
587
588 //
589 // Check if we have a valid memory encryption mask
590 //
591 AddressEncMask = InternalGetMemEncryptionAddressMask ();
592 if (!AddressEncMask) {
594 }
595
596 PgTableMask = AddressEncMask | EFI_PAGE_MASK;
597
598 //
599 // Make sure that the page table is changeable.
600 //
601 IsWpEnabled = IsReadOnlyPageWriteProtected ();
602 if (IsWpEnabled) {
604 }
605
606 Status = EFI_SUCCESS;
607
608 while (Length) {
609 //
610 // If Cr3BaseAddress is not specified then read the current CR3
611 //
612 if (Cr3BaseAddress == 0) {
613 Cr3BaseAddress = AsmReadCr3 ();
614 }
615
616 PageMapLevel4Entry = (VOID *)(Cr3BaseAddress & ~PgTableMask);
617 PageMapLevel4Entry += PML4_OFFSET (PhysicalAddress);
618 if (!PageMapLevel4Entry->Bits.Present) {
619 NewPageTable = AllocatePageTableMemory (1);
620 if (NewPageTable == NULL) {
621 DEBUG ((
622 DEBUG_ERROR,
623 "%a:%a: failed to allocate a new PML4 entry\n",
624 gEfiCallerBaseName,
625 __func__
626 ));
627 Status = RETURN_NO_MAPPING;
628 goto Done;
629 }
630
631 SetMem (NewPageTable, EFI_PAGE_SIZE, 0);
632
633 //
634 // AddressEncMask is not set for non-leaf entries because of the way CpuPageTableLib works
635 //
636 PageMapLevel4Entry->Uint64 = (UINT64)(UINTN)NewPageTable;
637 PageMapLevel4Entry->Bits.MustBeZero = 0;
638 PageMapLevel4Entry->Bits.ReadWrite = 1;
639 PageMapLevel4Entry->Bits.Present = 1;
640 }
641
642 PageDirectory1GEntry = (VOID *)(
643 (PageMapLevel4Entry->Bits.PageTableBaseAddress <<
644 12) & ~PgTableMask
645 );
646 PageDirectory1GEntry += PDP_OFFSET (PhysicalAddress);
647 if (!PageDirectory1GEntry->Bits.Present) {
648 PageDirectory1GEntry->Bits.Present = 1;
649 PageDirectory1GEntry->Bits.MustBe1 = 1;
650 PageDirectory1GEntry->Bits.MustBeZero = 0;
651 PageDirectory1GEntry->Bits.ReadWrite = 1;
652 PageDirectory1GEntry->Uint64 |= (UINT64)PhysicalAddress | AddressEncMask;
653 }
654
655 if (Length <= BIT30) {
656 Length = 0;
657 } else {
658 Length -= BIT30;
659 }
660
661 PhysicalAddress += BIT30;
662 }
663
664 //
665 // Flush TLB
666 //
667 CpuFlushTlb ();
668
669Done:
670 //
671 // Restore page table write protection, if any.
672 //
673 if (IsWpEnabled) {
675 }
676
677 return Status;
678}
679
708STATIC
709RETURN_STATUS
710EFIAPI
712 IN PHYSICAL_ADDRESS Cr3BaseAddress,
713 IN PHYSICAL_ADDRESS PhysicalAddress,
714 IN UINTN Length,
715 IN MAP_RANGE_MODE Mode,
716 IN BOOLEAN CacheFlush,
717 IN BOOLEAN Mmio
718 )
719{
720 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;
721 PAGE_MAP_AND_DIRECTORY_POINTER *PageUpperDirectoryPointerEntry;
722 PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;
723 PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;
724 PAGE_TABLE_ENTRY *PageDirectory2MEntry;
725 PHYSICAL_ADDRESS OrigPhysicalAddress;
726 PAGE_TABLE_4K_ENTRY *PageTableEntry;
727 UINT64 PgTableMask;
728 UINT64 AddressEncMask;
729 BOOLEAN IsWpEnabled;
730 UINTN OrigLength;
731 RETURN_STATUS Status;
732
733 //
734 // Set PageMapLevel4Entry to suppress incorrect compiler/analyzer warnings.
735 //
736 PageMapLevel4Entry = NULL;
737
738 DEBUG ((
739 DEBUG_VERBOSE,
740 "%a:%a: Cr3Base=0x%Lx Physical=0x%Lx Length=0x%Lx Mode=%a CacheFlush=%u Mmio=%u\n",
741 gEfiCallerBaseName,
742 __func__,
743 Cr3BaseAddress,
744 PhysicalAddress,
745 (UINT64)Length,
746 (Mode == SetCBit) ? "Encrypt" : "Decrypt",
747 (UINT32)CacheFlush,
748 (UINT32)Mmio
749 ));
750
751 //
752 // Check if we have a valid memory encryption mask
753 //
754 AddressEncMask = InternalGetMemEncryptionAddressMask ();
755 if (!AddressEncMask) {
757 }
758
759 PgTableMask = AddressEncMask | EFI_PAGE_MASK;
760
761 if (Length == 0) {
763 }
764
765 //
766 // We are going to change the memory encryption attribute from C=0 -> C=1 or
767 // vice versa Flush the caches to ensure that data is written into memory
768 // with correct C-bit
769 //
770 if (CacheFlush) {
771 WriteBackInvalidateDataCacheRange ((VOID *)(UINTN)PhysicalAddress, Length);
772 }
773
774 //
775 // Make sure that the page table is changeable.
776 //
777 IsWpEnabled = IsReadOnlyPageWriteProtected ();
778 if (IsWpEnabled) {
780 }
781
782 Status = EFI_SUCCESS;
783
784 //
785 // To maintain the security gurantees we must set the page to shared in the RMP
786 // table before clearing the memory encryption mask from the current page table.
787 //
788 // The InternalSetPageState() is used for setting the page state in the RMP table.
789 //
790 if (!Mmio && (Mode == ClearCBit) && MemEncryptSevSnpIsEnabled ()) {
791 if (mPscBuffer == NULL) {
792 mPscBuffer = AllocateReservedPages (1);
793 ASSERT (mPscBuffer != NULL);
794 }
795
797 PhysicalAddress,
798 EFI_SIZE_TO_PAGES (Length),
799 SevSnpPageShared,
800 FALSE,
801 mPscBuffer,
802 EFI_PAGE_SIZE
803 );
804 }
805
806 //
807 // Save the specified length and physical address (we need it later).
808 //
809 OrigLength = Length;
810 OrigPhysicalAddress = PhysicalAddress;
811
812 while (Length != 0) {
813 //
814 // If Cr3BaseAddress is not specified then read the current CR3
815 //
816 if (Cr3BaseAddress == 0) {
817 Cr3BaseAddress = AsmReadCr3 ();
818 }
819
820 PageMapLevel4Entry = (VOID *)(Cr3BaseAddress & ~PgTableMask);
821 PageMapLevel4Entry += PML4_OFFSET (PhysicalAddress);
822 if (!PageMapLevel4Entry->Bits.Present) {
823 DEBUG ((
824 DEBUG_ERROR,
825 "%a:%a: bad PML4 for Physical=0x%Lx\n",
826 gEfiCallerBaseName,
827 __func__,
828 PhysicalAddress
829 ));
830 Status = RETURN_NO_MAPPING;
831 goto Done;
832 }
833
834 PageDirectory1GEntry = (VOID *)(
835 (PageMapLevel4Entry->Bits.PageTableBaseAddress <<
836 12) & ~PgTableMask
837 );
838 PageDirectory1GEntry += PDP_OFFSET (PhysicalAddress);
839 if (!PageDirectory1GEntry->Bits.Present) {
840 DEBUG ((
841 DEBUG_ERROR,
842 "%a:%a: bad PDPE for Physical=0x%Lx\n",
843 gEfiCallerBaseName,
844 __func__,
845 PhysicalAddress
846 ));
847 Status = RETURN_NO_MAPPING;
848 goto Done;
849 }
850
851 //
852 // If the MustBe1 bit is not 1, it's not actually a 1GB entry
853 //
854 if (PageDirectory1GEntry->Bits.MustBe1) {
855 //
856 // Valid 1GB page
857 // If we have at least 1GB to go, we can just update this entry
858 //
859 if (((PhysicalAddress & (BIT30 - 1)) == 0) && (Length >= BIT30)) {
860 SetOrClearCBit (&PageDirectory1GEntry->Uint64, Mode);
861 DEBUG ((
862 DEBUG_VERBOSE,
863 "%a:%a: updated 1GB entry for Physical=0x%Lx\n",
864 gEfiCallerBaseName,
865 __func__,
866 PhysicalAddress
867 ));
868 PhysicalAddress += BIT30;
869 Length -= BIT30;
870 } else {
871 //
872 // We must split the page
873 //
874 DEBUG ((
875 DEBUG_VERBOSE,
876 "%a:%a: splitting 1GB page for Physical=0x%Lx\n",
877 gEfiCallerBaseName,
878 __func__,
879 PhysicalAddress
880 ));
882 (UINT64)PageDirectory1GEntry->Bits.PageTableBaseAddress << 30,
883 (UINT64 *)PageDirectory1GEntry,
884 0,
885 0
886 );
887 continue;
888 }
889 } else {
890 //
891 // Actually a PDP
892 //
893 PageUpperDirectoryPointerEntry =
894 (PAGE_MAP_AND_DIRECTORY_POINTER *)PageDirectory1GEntry;
895 PageDirectory2MEntry =
896 (VOID *)(
897 (PageUpperDirectoryPointerEntry->Bits.PageTableBaseAddress <<
898 12) & ~PgTableMask
899 );
900 PageDirectory2MEntry += PDE_OFFSET (PhysicalAddress);
901 if (!PageDirectory2MEntry->Bits.Present) {
902 DEBUG ((
903 DEBUG_ERROR,
904 "%a:%a: bad PDE for Physical=0x%Lx\n",
905 gEfiCallerBaseName,
906 __func__,
907 PhysicalAddress
908 ));
909 Status = RETURN_NO_MAPPING;
910 goto Done;
911 }
912
913 //
914 // If the MustBe1 bit is not a 1, it's not a 2MB entry
915 //
916 if (PageDirectory2MEntry->Bits.MustBe1) {
917 //
918 // Valid 2MB page
919 // If we have at least 2MB left to go, we can just update this entry
920 //
921 if (((PhysicalAddress & (BIT21-1)) == 0) && (Length >= BIT21)) {
922 SetOrClearCBit (&PageDirectory2MEntry->Uint64, Mode);
923 PhysicalAddress += BIT21;
924 Length -= BIT21;
925 } else {
926 //
927 // We must split up this page into 4K pages
928 //
929 DEBUG ((
930 DEBUG_VERBOSE,
931 "%a:%a: splitting 2MB page for Physical=0x%Lx\n",
932 gEfiCallerBaseName,
933 __func__,
934 PhysicalAddress
935 ));
937 (UINT64)PageDirectory2MEntry->Bits.PageTableBaseAddress << 21,
938 (UINT64 *)PageDirectory2MEntry,
939 0,
940 0
941 );
942 continue;
943 }
944 } else {
945 PageDirectoryPointerEntry =
946 (PAGE_MAP_AND_DIRECTORY_POINTER *)PageDirectory2MEntry;
947 PageTableEntry =
948 (VOID *)(
949 (PageDirectoryPointerEntry->Bits.PageTableBaseAddress <<
950 12) & ~PgTableMask
951 );
952 PageTableEntry += PTE_OFFSET (PhysicalAddress);
953 if (!PageTableEntry->Bits.Present) {
954 DEBUG ((
955 DEBUG_ERROR,
956 "%a:%a: bad PTE for Physical=0x%Lx\n",
957 gEfiCallerBaseName,
958 __func__,
959 PhysicalAddress
960 ));
961 Status = RETURN_NO_MAPPING;
962 goto Done;
963 }
964
965 SetOrClearCBit (&PageTableEntry->Uint64, Mode);
966 PhysicalAddress += EFI_PAGE_SIZE;
967 Length -= EFI_PAGE_SIZE;
968 }
969 }
970 }
971
972 //
973 // Protect the page table by marking the memory used for page table to be
974 // read-only.
975 //
976 if (IsWpEnabled) {
977 EnablePageTableProtection ((UINTN)PageMapLevel4Entry, TRUE);
978 }
979
980 //
981 // Flush TLB
982 //
983 CpuFlushTlb ();
984
985 //
986 // SEV-SNP requires that all the private pages (i.e pages mapped encrypted) must be
987 // added in the RMP table before the access.
988 //
989 // The InternalSetPageState() is used for setting the page state in the RMP table.
990 //
991 if ((Mode == SetCBit) && MemEncryptSevSnpIsEnabled ()) {
992 if (mPscBuffer == NULL) {
993 mPscBuffer = AllocateReservedPages (1);
994 ASSERT (mPscBuffer != NULL);
995 }
996
998 OrigPhysicalAddress,
999 EFI_SIZE_TO_PAGES (OrigLength),
1000 SevSnpPagePrivate,
1001 FALSE,
1002 mPscBuffer,
1003 EFI_PAGE_SIZE
1004 );
1005 }
1006
1007Done:
1008 //
1009 // Restore page table write protection, if any.
1010 //
1011 if (IsWpEnabled) {
1013 }
1014
1015 return Status;
1016}
1017
1034RETURN_STATUS
1035EFIAPI
1037 IN PHYSICAL_ADDRESS Cr3BaseAddress,
1038 IN PHYSICAL_ADDRESS PhysicalAddress,
1039 IN UINTN Length
1040 )
1041{
1042 return SetMemoryEncDec (
1043 Cr3BaseAddress,
1044 PhysicalAddress,
1045 Length,
1046 ClearCBit,
1047 TRUE,
1048 FALSE
1049 );
1050}
1051
1068RETURN_STATUS
1069EFIAPI
1071 IN PHYSICAL_ADDRESS Cr3BaseAddress,
1072 IN PHYSICAL_ADDRESS PhysicalAddress,
1073 IN UINTN Length
1074 )
1075{
1076 return SetMemoryEncDec (
1077 Cr3BaseAddress,
1078 PhysicalAddress,
1079 Length,
1080 SetCBit,
1081 TRUE,
1082 FALSE
1083 );
1084}
1085
1102RETURN_STATUS
1103EFIAPI
1105 IN PHYSICAL_ADDRESS Cr3BaseAddress,
1106 IN PHYSICAL_ADDRESS PhysicalAddress,
1107 IN UINTN Length
1108 )
1109{
1110 return SetMemoryEncDec (
1111 Cr3BaseAddress,
1112 PhysicalAddress,
1113 Length,
1114 ClearCBit,
1115 FALSE,
1116 TRUE
1117 );
1118}
UINT64 UINTN
VOID *EFIAPI WriteBackInvalidateDataCacheRange(IN VOID *Address, IN UINTN Length)
UINT64 EFIAPI RShiftU64(IN UINT64 Operand, IN UINTN Count)
Definition: RShiftU64.c:28
VOID *EFIAPI SetMem(OUT VOID *Buffer, IN UINTN Length, IN UINT8 Value)
Definition: SetMemWrapper.c:38
VOID EFIAPI CpuFlushTlb(VOID)
VOID *EFIAPI AllocateReservedPages(IN UINTN Pages)
UINTN EFIAPI AsmReadCr3(VOID)
UINTN EFIAPI AsmReadCr0(VOID)
UINTN EFIAPI AsmWriteCr0(UINTN Cr0)
#define NULL
Definition: Base.h:319
#define STATIC
Definition: Base.h:264
#define RETURN_ACCESS_DENIED
Definition: Base.h:1147
#define RETURN_NO_MAPPING
Definition: Base.h:1157
#define TRUE
Definition: Base.h:301
#define FALSE
Definition: Base.h:307
#define IN
Definition: Base.h:279
#define OUT
Definition: Base.h:284
#define RETURN_INVALID_PARAMETER
Definition: Base.h:1076
#define DEBUG(Expression)
Definition: DebugLib.h:434
UINT64 EFIAPI MemEncryptSevGetEncryptionMask(VOID)
BOOLEAN EFIAPI MemEncryptSevSnpIsEnabled(VOID)
STATIC VOID SetPageTablePoolReadOnly(IN UINTN PageTableBase, IN EFI_PHYSICAL_ADDRESS Address, IN BOOLEAN Level4Paging)
STATIC RETURN_STATUS EFIAPI SetMemoryEncDec(IN PHYSICAL_ADDRESS Cr3BaseAddress, IN PHYSICAL_ADDRESS PhysicalAddress, IN UINTN Length, IN MAP_RANGE_MODE Mode, IN BOOLEAN CacheFlush, IN BOOLEAN Mmio)
STATIC VOID Split1GPageTo2M(IN PHYSICAL_ADDRESS PhysicalAddress, IN OUT UINT64 *PageEntry1G, IN PHYSICAL_ADDRESS StackBase, IN UINTN StackSize)
RETURN_STATUS EFIAPI InternalMemEncryptSevCreateIdentityMap1G(IN PHYSICAL_ADDRESS Cr3BaseAddress, IN PHYSICAL_ADDRESS PhysicalAddress, IN UINTN Length)
RETURN_STATUS EFIAPI InternalMemEncryptSevSetMemoryDecrypted(IN PHYSICAL_ADDRESS Cr3BaseAddress, IN PHYSICAL_ADDRESS PhysicalAddress, IN UINTN Length)
RETURN_STATUS EFIAPI InternalMemEncryptSevClearMmioPageEncMask(IN PHYSICAL_ADDRESS Cr3BaseAddress, IN PHYSICAL_ADDRESS PhysicalAddress, IN UINTN Length)
STATIC BOOLEAN InitializePageTablePool(IN UINTN PoolPages)
STATIC VOID *EFIAPI AllocatePageTableMemory(IN UINTN Pages)
UINT64 EFIAPI InternalGetMemEncryptionAddressMask(VOID)
STATIC VOID Split2MPageTo4K(IN PHYSICAL_ADDRESS PhysicalAddress, IN OUT UINT64 *PageEntry2M, IN PHYSICAL_ADDRESS StackBase, IN UINTN StackSize)
RETURN_STATUS EFIAPI InternalMemEncryptSevSetMemoryEncrypted(IN PHYSICAL_ADDRESS Cr3BaseAddress, IN PHYSICAL_ADDRESS PhysicalAddress, IN UINTN Length)
STATIC VOID DisableReadOnlyPageWriteProtect(VOID)
STATIC VOID SetOrClearCBit(IN OUT UINT64 *PageTablePointer, IN MAP_RANGE_MODE Mode)
STATIC VOID EnablePageTableProtection(IN UINTN PageTableBase, IN BOOLEAN Level4Paging)
STATIC VOID EnableReadOnlyPageWriteProtect(VOID)
STATIC BOOLEAN IsReadOnlyPageWriteProtected(VOID)
VOID *EFIAPI AllocateAlignedPages(IN UINTN Pages, IN UINTN Alignment)
VOID InternalSetPageState(IN EFI_PHYSICAL_ADDRESS BaseAddress, IN UINTN NumPages, IN SEV_SNP_PAGE_STATE State, IN BOOLEAN UseLargeEntry, IN VOID *PscBuffer, IN UINTN PscBufferSize)
UINT64 EFI_PHYSICAL_ADDRESS
Definition: UefiBaseType.h:50
#define EFI_PAGES_TO_SIZE(Pages)
Definition: UefiBaseType.h:213
#define EFI_SIZE_TO_PAGES(Size)
Definition: UefiBaseType.h:200
#define EFI_SUCCESS
Definition: UefiBaseType.h:112