TianoCore EDK2 master
Loading...
Searching...
No Matches
VirtualMemory.c
Go to the documentation of this file.
1
26#include "DxeIpl.h"
27#include "VirtualMemory.h"
28
29//
30// Global variable to keep track current available memory used as page table.
31//
32PAGE_TABLE_POOL *mPageTablePool = NULL;
33
43VOID
45 IN VOID *HobStart
46 )
47{
50 BOOLEAN DoClear;
51
52 RscHob.Raw = HobStart;
53 MemHob.Raw = HobStart;
54 DoClear = FALSE;
55
56 //
57 // Check if page 0 exists and free
58 //
59 while ((RscHob.Raw = GetNextHob (
60 EFI_HOB_TYPE_RESOURCE_DESCRIPTOR,
61 RscHob.Raw
62 )) != NULL)
63 {
64 if ((RscHob.ResourceDescriptor->ResourceType == EFI_RESOURCE_SYSTEM_MEMORY) &&
65 (RscHob.ResourceDescriptor->PhysicalStart == 0))
66 {
67 DoClear = TRUE;
68 //
69 // Make sure memory at 0-4095 has not been allocated.
70 //
71 while ((MemHob.Raw = GetNextHob (
72 EFI_HOB_TYPE_MEMORY_ALLOCATION,
73 MemHob.Raw
74 )) != NULL)
75 {
76 if (MemHob.MemoryAllocation->AllocDescriptor.MemoryBaseAddress
77 < EFI_PAGE_SIZE)
78 {
79 DoClear = FALSE;
80 break;
81 }
82
83 MemHob.Raw = GET_NEXT_HOB (MemHob);
84 }
85
86 break;
87 }
88
89 RscHob.Raw = GET_NEXT_HOB (RscHob);
90 }
91
92 if (DoClear) {
93 DEBUG ((DEBUG_INFO, "Clearing first 4K-page!\r\n"));
94 SetMem (NULL, EFI_PAGE_SIZE, 0);
95 }
96
97 return;
98}
99
107BOOLEAN
109 VOID
110 )
111{
112 return ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT0) != 0);
113}
114
122BOOLEAN
124 VOID
125 )
126{
127 UINT32 RegEax;
128 UINT32 RegEdx;
129 BOOLEAN Available;
130
131 Available = FALSE;
132 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
133 if (RegEax >= 0x80000001) {
134 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
135 if ((RegEdx & BIT20) != 0) {
136 //
137 // Bit 20: Execute Disable Bit available.
138 //
139 Available = TRUE;
140 }
141 }
142
143 return Available;
144}
145
153BOOLEAN
155 VOID
156 )
157{
159 return FALSE;
160 }
161
162 //
163 // XD flag (BIT63) in page table entry is only valid if IA32_EFER.NXE is set.
164 // Features controlled by Following PCDs need this feature to be enabled.
165 //
166 return (PcdGetBool (PcdSetNxForStack) ||
167 PcdGet64 (PcdDxeNxMemoryProtectionPolicy) != 0 ||
168 PcdGet32 (PcdImageProtectionPolicy) != 0);
169}
170
175VOID
177 VOID
178 )
179{
180 UINT64 MsrRegisters;
181
182 MsrRegisters = AsmReadMsr64 (0xC0000080);
183 if ((MsrRegisters & BIT11) == 0) {
184 MsrRegisters |= BIT11;
185 AsmWriteMsr64 (0xC0000080, MsrRegisters);
186 }
187}
188
203BOOLEAN
205 IN EFI_PHYSICAL_ADDRESS Address,
206 IN UINTN Size,
207 IN EFI_PHYSICAL_ADDRESS StackBase,
208 IN UINTN StackSize,
209 IN EFI_PHYSICAL_ADDRESS GhcbBase,
210 IN UINTN GhcbSize
211 )
212{
213 if (IsNullDetectionEnabled () && (Address == 0)) {
214 return TRUE;
215 }
216
217 if (PcdGetBool (PcdCpuStackGuard)) {
218 if ((StackBase >= Address) && (StackBase < (Address + Size))) {
219 return TRUE;
220 }
221 }
222
223 if (PcdGetBool (PcdSetNxForStack)) {
224 if ((Address < StackBase + StackSize) && ((Address + Size) > StackBase)) {
225 return TRUE;
226 }
227 }
228
229 if (GhcbBase != 0) {
230 if ((Address < GhcbBase + GhcbSize) && ((Address + Size) > GhcbBase)) {
231 return TRUE;
232 }
233 }
234
235 return FALSE;
236}
237
255BOOLEAN
257 IN UINTN PoolPages
258 )
259{
260 VOID *Buffer;
261
262 //
263 // Always reserve at least PAGE_TABLE_POOL_UNIT_PAGES, including one page for
264 // header.
265 //
266 PoolPages += 1; // Add one page for header.
267 PoolPages = ((PoolPages - 1) / PAGE_TABLE_POOL_UNIT_PAGES + 1) *
268 PAGE_TABLE_POOL_UNIT_PAGES;
269 Buffer = AllocateAlignedPages (PoolPages, PAGE_TABLE_POOL_ALIGNMENT);
270 if (Buffer == NULL) {
271 DEBUG ((DEBUG_ERROR, "ERROR: Out of aligned pages\r\n"));
272 return FALSE;
273 }
274
275 //
276 // Link all pools into a list for easier track later.
277 //
278 if (mPageTablePool == NULL) {
279 mPageTablePool = Buffer;
280 mPageTablePool->NextPool = mPageTablePool;
281 } else {
282 ((PAGE_TABLE_POOL *)Buffer)->NextPool = mPageTablePool->NextPool;
283 mPageTablePool->NextPool = Buffer;
284 mPageTablePool = Buffer;
285 }
286
287 //
288 // Reserve one page for pool header.
289 //
290 mPageTablePool->FreePages = PoolPages - 1;
291 mPageTablePool->Offset = EFI_PAGES_TO_SIZE (1);
292
293 return TRUE;
294}
295
313VOID *
315 IN UINTN Pages
316 )
317{
318 VOID *Buffer;
319
320 if (Pages == 0) {
321 return NULL;
322 }
323
324 //
325 // Renew the pool if necessary.
326 //
327 if ((mPageTablePool == NULL) ||
328 (Pages > mPageTablePool->FreePages))
329 {
330 if (!InitializePageTablePool (Pages)) {
331 return NULL;
332 }
333 }
334
335 Buffer = (UINT8 *)mPageTablePool + mPageTablePool->Offset;
336
337 mPageTablePool->Offset += EFI_PAGES_TO_SIZE (Pages);
338 mPageTablePool->FreePages -= Pages;
339
340 return Buffer;
341}
342
354VOID
356 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,
357 IN OUT UINT64 *PageEntry2M,
358 IN EFI_PHYSICAL_ADDRESS StackBase,
359 IN UINTN StackSize,
360 IN EFI_PHYSICAL_ADDRESS GhcbBase,
361 IN UINTN GhcbSize
362 )
363{
364 EFI_PHYSICAL_ADDRESS PhysicalAddress4K;
365 UINTN IndexOfPageTableEntries;
366 PAGE_TABLE_4K_ENTRY *PageTableEntry;
367 UINT64 AddressEncMask;
368
369 //
370 // Make sure AddressEncMask is contained to smallest supported address field
371 //
372 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
373
374 PageTableEntry = AllocatePageTableMemory (1);
375 ASSERT (PageTableEntry != NULL);
376
377 //
378 // Fill in 2M page entry.
379 //
380 *PageEntry2M = (UINT64)(UINTN)PageTableEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;
381
382 PhysicalAddress4K = PhysicalAddress;
383 for (IndexOfPageTableEntries = 0; IndexOfPageTableEntries < 512; IndexOfPageTableEntries++, PageTableEntry++, PhysicalAddress4K += SIZE_4KB) {
384 //
385 // Fill in the Page Table entries
386 //
387 PageTableEntry->Uint64 = (UINT64)PhysicalAddress4K;
388
389 //
390 // The GHCB range consists of two pages per CPU, the GHCB and a
391 // per-CPU variable page. The GHCB page needs to be mapped as an
392 // unencrypted page while the per-CPU variable page needs to be
393 // mapped encrypted. These pages alternate in assignment.
394 //
395 if ( (GhcbBase == 0)
396 || (PhysicalAddress4K < GhcbBase)
397 || (PhysicalAddress4K >= GhcbBase + GhcbSize)
398 || (((PhysicalAddress4K - GhcbBase) & SIZE_4KB) != 0))
399 {
400 PageTableEntry->Uint64 |= AddressEncMask;
401 }
402
403 PageTableEntry->Bits.ReadWrite = 1;
404
405 if ((IsNullDetectionEnabled () && (PhysicalAddress4K == 0)) ||
406 (PcdGetBool (PcdCpuStackGuard) && (PhysicalAddress4K == StackBase)))
407 {
408 PageTableEntry->Bits.Present = 0;
409 } else {
410 PageTableEntry->Bits.Present = 1;
411 }
412
413 if ( PcdGetBool (PcdSetNxForStack)
414 && (PhysicalAddress4K >= StackBase)
415 && (PhysicalAddress4K < StackBase + StackSize))
416 {
417 //
418 // Set Nx bit for stack.
419 //
420 PageTableEntry->Bits.Nx = 1;
421 }
422 }
423}
424
436VOID
438 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,
439 IN OUT UINT64 *PageEntry1G,
440 IN EFI_PHYSICAL_ADDRESS StackBase,
441 IN UINTN StackSize,
442 IN EFI_PHYSICAL_ADDRESS GhcbBase,
443 IN UINTN GhcbSize
444 )
445{
446 EFI_PHYSICAL_ADDRESS PhysicalAddress2M;
447 UINTN IndexOfPageDirectoryEntries;
448 PAGE_TABLE_ENTRY *PageDirectoryEntry;
449 UINT64 AddressEncMask;
450
451 //
452 // Make sure AddressEncMask is contained to smallest supported address field
453 //
454 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
455
456 PageDirectoryEntry = AllocatePageTableMemory (1);
457 ASSERT (PageDirectoryEntry != NULL);
458
459 //
460 // Fill in 1G page entry.
461 //
462 *PageEntry1G = (UINT64)(UINTN)PageDirectoryEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;
463
464 PhysicalAddress2M = PhysicalAddress;
465 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PhysicalAddress2M += SIZE_2MB) {
466 if (ToSplitPageTable (PhysicalAddress2M, SIZE_2MB, StackBase, StackSize, GhcbBase, GhcbSize)) {
467 //
468 // Need to split this 2M page that covers NULL or stack range.
469 //
470 Split2MPageTo4K (PhysicalAddress2M, (UINT64 *)PageDirectoryEntry, StackBase, StackSize, GhcbBase, GhcbSize);
471 } else {
472 //
473 // Fill in the Page Directory entries
474 //
475 PageDirectoryEntry->Uint64 = (UINT64)PhysicalAddress2M | AddressEncMask;
476 PageDirectoryEntry->Bits.ReadWrite = 1;
477 PageDirectoryEntry->Bits.Present = 1;
478 PageDirectoryEntry->Bits.MustBe1 = 1;
479 }
480 }
481}
482
491VOID
493 IN UINTN PageTableBase,
494 IN EFI_PHYSICAL_ADDRESS Address,
495 IN BOOLEAN Level4Paging
496 )
497{
498 UINTN Index;
499 UINTN EntryIndex;
500 UINT64 AddressEncMask;
501 EFI_PHYSICAL_ADDRESS PhysicalAddress;
502 UINT64 *PageTable;
503 UINT64 *NewPageTable;
504 UINT64 PageAttr;
505 UINT64 LevelSize[5];
506 UINT64 LevelMask[5];
507 UINTN LevelShift[5];
508 UINTN Level;
509 UINT64 PoolUnitSize;
510
511 ASSERT (PageTableBase != 0);
512
513 //
514 // Since the page table is always from page table pool, which is always
515 // located at the boundary of PcdPageTablePoolAlignment, we just need to
516 // set the whole pool unit to be read-only.
517 //
518 Address = Address & PAGE_TABLE_POOL_ALIGN_MASK;
519
520 LevelShift[1] = PAGING_L1_ADDRESS_SHIFT;
521 LevelShift[2] = PAGING_L2_ADDRESS_SHIFT;
522 LevelShift[3] = PAGING_L3_ADDRESS_SHIFT;
523 LevelShift[4] = PAGING_L4_ADDRESS_SHIFT;
524
525 LevelMask[1] = PAGING_4K_ADDRESS_MASK_64;
526 LevelMask[2] = PAGING_2M_ADDRESS_MASK_64;
527 LevelMask[3] = PAGING_1G_ADDRESS_MASK_64;
528 LevelMask[4] = PAGING_1G_ADDRESS_MASK_64;
529
530 LevelSize[1] = SIZE_4KB;
531 LevelSize[2] = SIZE_2MB;
532 LevelSize[3] = SIZE_1GB;
533 LevelSize[4] = SIZE_512GB;
534
535 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &
536 PAGING_1G_ADDRESS_MASK_64;
537 PageTable = (UINT64 *)(UINTN)PageTableBase;
538 PoolUnitSize = PAGE_TABLE_POOL_UNIT_SIZE;
539
540 for (Level = (Level4Paging) ? 4 : 3; Level > 0; --Level) {
541 Index = ((UINTN)RShiftU64 (Address, LevelShift[Level]));
542 Index &= PAGING_PAE_INDEX_MASK;
543
544 PageAttr = PageTable[Index];
545 if ((PageAttr & IA32_PG_PS) == 0) {
546 //
547 // Go to next level of table.
548 //
549 PageTable = (UINT64 *)(UINTN)(PageAttr & ~AddressEncMask &
550 PAGING_4K_ADDRESS_MASK_64);
551 continue;
552 }
553
554 if (PoolUnitSize >= LevelSize[Level]) {
555 //
556 // Clear R/W bit if current page granularity is not larger than pool unit
557 // size.
558 //
559 if ((PageAttr & IA32_PG_RW) != 0) {
560 while (PoolUnitSize > 0) {
561 //
562 // PAGE_TABLE_POOL_UNIT_SIZE and PAGE_TABLE_POOL_ALIGNMENT are fit in
563 // one page (2MB). Then we don't need to update attributes for pages
564 // crossing page directory. ASSERT below is for that purpose.
565 //
566 ASSERT (Index < EFI_PAGE_SIZE/sizeof (UINT64));
567
568 PageTable[Index] &= ~(UINT64)IA32_PG_RW;
569 PoolUnitSize -= LevelSize[Level];
570
571 ++Index;
572 }
573 }
574
575 break;
576 } else {
577 //
578 // The smaller granularity of page must be needed.
579 //
580 ASSERT (Level > 1);
581
582 NewPageTable = AllocatePageTableMemory (1);
583 ASSERT (NewPageTable != NULL);
584
585 PhysicalAddress = PageAttr & LevelMask[Level];
586 for (EntryIndex = 0;
587 EntryIndex < EFI_PAGE_SIZE/sizeof (UINT64);
588 ++EntryIndex)
589 {
590 NewPageTable[EntryIndex] = PhysicalAddress | AddressEncMask |
591 IA32_PG_P | IA32_PG_RW;
592 if (Level > 2) {
593 NewPageTable[EntryIndex] |= IA32_PG_PS;
594 }
595
596 PhysicalAddress += LevelSize[Level - 1];
597 }
598
599 PageTable[Index] = (UINT64)(UINTN)NewPageTable | AddressEncMask |
600 IA32_PG_P | IA32_PG_RW;
601 PageTable = NewPageTable;
602 }
603 }
604}
605
613VOID
615 IN UINTN PageTableBase,
616 IN BOOLEAN Level4Paging
617 )
618{
619 PAGE_TABLE_POOL *HeadPool;
620 PAGE_TABLE_POOL *Pool;
621 UINT64 PoolSize;
622 EFI_PHYSICAL_ADDRESS Address;
623
624 if (mPageTablePool == NULL) {
625 return;
626 }
627
628 //
629 // No need to clear CR0.WP since PageTableBase has't been written to CR3 yet.
630 // SetPageTablePoolReadOnly might update mPageTablePool. It's safer to
631 // remember original one in advance.
632 //
633 HeadPool = mPageTablePool;
634 Pool = HeadPool;
635 do {
636 Address = (EFI_PHYSICAL_ADDRESS)(UINTN)Pool;
637 PoolSize = Pool->Offset + EFI_PAGES_TO_SIZE (Pool->FreePages);
638
639 //
640 // The size of one pool must be multiple of PAGE_TABLE_POOL_UNIT_SIZE, which
641 // is one of page size of the processor (2MB by default). Let's apply the
642 // protection to them one by one.
643 //
644 while (PoolSize > 0) {
645 SetPageTablePoolReadOnly (PageTableBase, Address, Level4Paging);
646 Address += PAGE_TABLE_POOL_UNIT_SIZE;
647 PoolSize -= PAGE_TABLE_POOL_UNIT_SIZE;
648 }
649
650 Pool = Pool->NextPool;
651 } while (Pool != HeadPool);
652
653 //
654 // Enable write protection, after page table attribute updated.
655 //
656 AsmWriteCr0 (AsmReadCr0 () | CR0_WP);
657}
658
671UINTN
673 IN EFI_PHYSICAL_ADDRESS StackBase,
674 IN UINTN StackSize,
675 IN EFI_PHYSICAL_ADDRESS GhcbBase,
676 IN UINTN GhcbSize
677 )
678{
679 UINT32 RegEax;
681 UINT32 RegEdx;
682 UINT8 PhysicalAddressBits;
683 EFI_PHYSICAL_ADDRESS PageAddress;
684 UINTN IndexOfPml5Entries;
685 UINTN IndexOfPml4Entries;
686 UINTN IndexOfPdpEntries;
687 UINTN IndexOfPageDirectoryEntries;
688 UINT32 NumberOfPml5EntriesNeeded;
689 UINT32 NumberOfPml4EntriesNeeded;
690 UINT32 NumberOfPdpEntriesNeeded;
691 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel5Entry;
692 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;
694 PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;
695 PAGE_TABLE_ENTRY *PageDirectoryEntry;
696 UINTN TotalPagesNum;
697 UINTN BigPageAddress;
698 VOID *Hob;
699 BOOLEAN Page5LevelEnabled;
700 BOOLEAN Page1GSupport;
701 PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;
702 UINT64 AddressEncMask;
703 IA32_CR4 Cr4;
704
705 //
706 // Set PageMapLevel5Entry to suppress incorrect compiler/analyzer warnings
707 //
708 PageMapLevel5Entry = NULL;
709
710 //
711 // Make sure AddressEncMask is contained to smallest supported address field
712 //
713 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
714
715 Page1GSupport = FALSE;
716 if (PcdGetBool (PcdUse1GPageTable)) {
717 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
718 if (RegEax >= 0x80000001) {
719 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
720 if ((RegEdx & BIT26) != 0) {
721 Page1GSupport = TRUE;
722 }
723 }
724 }
725
726 //
727 // Get physical address bits supported.
728 //
729 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
730 if (Hob != NULL) {
731 PhysicalAddressBits = ((EFI_HOB_CPU *)Hob)->SizeOfMemorySpace;
732 } else {
733 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
734 if (RegEax >= 0x80000008) {
735 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
736 PhysicalAddressBits = (UINT8)RegEax;
737 } else {
738 PhysicalAddressBits = 36;
739 }
740 }
741
742 if (sizeof (UINTN) == sizeof (UINT64)) {
743 //
744 // If cpu has already run in 64bit long mode PEI, Page table Level in DXE must align with previous level.
745 //
746 Cr4.UintN = AsmReadCr4 ();
747 Page5LevelEnabled = (Cr4.Bits.LA57 != 0);
748 if (Page5LevelEnabled) {
749 ASSERT (PcdGetBool (PcdUse5LevelPageTable));
750 }
751 } else {
752 //
753 // If cpu runs in 32bit protected mode PEI, Page table Level in DXE is decided by PCD and feature capability.
754 //
755 Page5LevelEnabled = FALSE;
756 if (PcdGetBool (PcdUse5LevelPageTable)) {
757 AsmCpuidEx (
760 NULL,
761 NULL,
762 &EcxFlags.Uint32,
763 NULL
764 );
765 if (EcxFlags.Bits.FiveLevelPage != 0) {
766 Page5LevelEnabled = TRUE;
767 }
768 }
769 }
770
771 DEBUG ((DEBUG_INFO, "AddressBits=%u 5LevelPaging=%u 1GPage=%u\n", PhysicalAddressBits, Page5LevelEnabled, Page1GSupport));
772
773 //
774 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses
775 // when 5-Level Paging is disabled,
776 // due to either unsupported by HW, or disabled by PCD.
777 //
778 ASSERT (PhysicalAddressBits <= 52);
779 if (!Page5LevelEnabled && (PhysicalAddressBits > 48)) {
780 PhysicalAddressBits = 48;
781 }
782
783 //
784 // Calculate the table entries needed.
785 //
786 NumberOfPml5EntriesNeeded = 1;
787 if (PhysicalAddressBits > 48) {
788 NumberOfPml5EntriesNeeded = (UINT32)LShiftU64 (1, PhysicalAddressBits - 48);
789 PhysicalAddressBits = 48;
790 }
791
792 NumberOfPml4EntriesNeeded = 1;
793 if (PhysicalAddressBits > 39) {
794 NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, PhysicalAddressBits - 39);
795 PhysicalAddressBits = 39;
796 }
797
798 NumberOfPdpEntriesNeeded = 1;
799 ASSERT (PhysicalAddressBits > 30);
800 NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, PhysicalAddressBits - 30);
801
802 //
803 // Pre-allocate big pages to avoid later allocations.
804 //
805 if (!Page1GSupport) {
806 TotalPagesNum = ((NumberOfPdpEntriesNeeded + 1) * NumberOfPml4EntriesNeeded + 1) * NumberOfPml5EntriesNeeded + 1;
807 } else {
808 TotalPagesNum = (NumberOfPml4EntriesNeeded + 1) * NumberOfPml5EntriesNeeded + 1;
809 }
810
811 //
812 // Substract the one page occupied by PML5 entries if 5-Level Paging is disabled.
813 //
814 if (!Page5LevelEnabled) {
815 TotalPagesNum--;
816 }
817
818 DEBUG ((
819 DEBUG_INFO,
820 "Pml5=%u Pml4=%u Pdp=%u TotalPage=%Lu\n",
821 NumberOfPml5EntriesNeeded,
822 NumberOfPml4EntriesNeeded,
823 NumberOfPdpEntriesNeeded,
824 (UINT64)TotalPagesNum
825 ));
826
827 BigPageAddress = (UINTN)AllocatePageTableMemory (TotalPagesNum);
828 ASSERT (BigPageAddress != 0);
829
830 //
831 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
832 //
833 PageMap = (VOID *)BigPageAddress;
834 if (Page5LevelEnabled) {
835 //
836 // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.
837 //
838 PageMapLevel5Entry = PageMap;
839 BigPageAddress += SIZE_4KB;
840 }
841
842 PageAddress = 0;
843
844 for ( IndexOfPml5Entries = 0
845 ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded
846 ; IndexOfPml5Entries++)
847 {
848 //
849 // Each PML5 entry points to a page of PML4 entires.
850 // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.
851 // When 5-Level Paging is disabled, below allocation happens only once.
852 //
853 PageMapLevel4Entry = (VOID *)BigPageAddress;
854 BigPageAddress += SIZE_4KB;
855
856 if (Page5LevelEnabled) {
857 //
858 // Make a PML5 Entry
859 //
860 PageMapLevel5Entry->Uint64 = (UINT64)(UINTN)PageMapLevel4Entry | AddressEncMask;
861 PageMapLevel5Entry->Bits.ReadWrite = 1;
862 PageMapLevel5Entry->Bits.Present = 1;
863 PageMapLevel5Entry++;
864 }
865
866 for ( IndexOfPml4Entries = 0
867 ; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512)
868 ; IndexOfPml4Entries++, PageMapLevel4Entry++)
869 {
870 //
871 // Each PML4 entry points to a page of Page Directory Pointer entires.
872 // So lets allocate space for them and fill them in in the IndexOfPdpEntries loop.
873 //
874 PageDirectoryPointerEntry = (VOID *)BigPageAddress;
875 BigPageAddress += SIZE_4KB;
876
877 //
878 // Make a PML4 Entry
879 //
880 PageMapLevel4Entry->Uint64 = (UINT64)(UINTN)PageDirectoryPointerEntry | AddressEncMask;
881 PageMapLevel4Entry->Bits.ReadWrite = 1;
882 PageMapLevel4Entry->Bits.Present = 1;
883
884 if (Page1GSupport) {
885 PageDirectory1GEntry = (VOID *)PageDirectoryPointerEntry;
886
887 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
888 if (ToSplitPageTable (PageAddress, SIZE_1GB, StackBase, StackSize, GhcbBase, GhcbSize)) {
889 Split1GPageTo2M (PageAddress, (UINT64 *)PageDirectory1GEntry, StackBase, StackSize, GhcbBase, GhcbSize);
890 } else {
891 //
892 // Fill in the Page Directory entries
893 //
894 PageDirectory1GEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;
895 PageDirectory1GEntry->Bits.ReadWrite = 1;
896 PageDirectory1GEntry->Bits.Present = 1;
897 PageDirectory1GEntry->Bits.MustBe1 = 1;
898 }
899 }
900 } else {
901 for ( IndexOfPdpEntries = 0
902 ; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512)
903 ; IndexOfPdpEntries++, PageDirectoryPointerEntry++)
904 {
905 //
906 // Each Directory Pointer entries points to a page of Page Directory entires.
907 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
908 //
909 PageDirectoryEntry = (VOID *)BigPageAddress;
910 BigPageAddress += SIZE_4KB;
911
912 //
913 // Fill in a Page Directory Pointer Entries
914 //
915 PageDirectoryPointerEntry->Uint64 = (UINT64)(UINTN)PageDirectoryEntry | AddressEncMask;
916 PageDirectoryPointerEntry->Bits.ReadWrite = 1;
917 PageDirectoryPointerEntry->Bits.Present = 1;
918
919 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
920 if (ToSplitPageTable (PageAddress, SIZE_2MB, StackBase, StackSize, GhcbBase, GhcbSize)) {
921 //
922 // Need to split this 2M page that covers NULL or stack range.
923 //
924 Split2MPageTo4K (PageAddress, (UINT64 *)PageDirectoryEntry, StackBase, StackSize, GhcbBase, GhcbSize);
925 } else {
926 //
927 // Fill in the Page Directory entries
928 //
929 PageDirectoryEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;
930 PageDirectoryEntry->Bits.ReadWrite = 1;
931 PageDirectoryEntry->Bits.Present = 1;
932 PageDirectoryEntry->Bits.MustBe1 = 1;
933 }
934 }
935 }
936
937 //
938 // Fill with null entry for unused PDPTE
939 //
940 ZeroMem (PageDirectoryPointerEntry, (512 - IndexOfPdpEntries) * sizeof (PAGE_MAP_AND_DIRECTORY_POINTER));
941 }
942 }
943
944 //
945 // For the PML4 entries we are not using fill in a null entry.
946 //
947 ZeroMem (PageMapLevel4Entry, (512 - IndexOfPml4Entries) * sizeof (PAGE_MAP_AND_DIRECTORY_POINTER));
948 }
949
950 if (Page5LevelEnabled) {
951 Cr4.UintN = AsmReadCr4 ();
952 Cr4.Bits.LA57 = 1;
953 AsmWriteCr4 (Cr4.UintN);
954 //
955 // For the PML5 entries we are not using fill in a null entry.
956 //
957 ZeroMem (PageMapLevel5Entry, (512 - IndexOfPml5Entries) * sizeof (PAGE_MAP_AND_DIRECTORY_POINTER));
958 }
959
960 //
961 // Protect the page table by marking the memory used for page table to be
962 // read-only.
963 //
965
966 //
967 // Set IA32_EFER.NXE if necessary.
968 //
969 if (IsEnableNonExecNeeded ()) {
971 }
972
973 return (UINTN)PageMap;
974}
UINT64 UINTN
VOID *EFIAPI GetFirstHob(IN UINT16 Type)
Definition: HobLib.c:142
VOID *EFIAPI GetNextHob(IN UINT16 Type, IN CONST VOID *HobStart)
Definition: HobLib.c:103
UINT64 EFIAPI RShiftU64(IN UINT64 Operand, IN UINTN Count)
Definition: RShiftU64.c:28
UINT64 EFIAPI LShiftU64(IN UINT64 Operand, IN UINTN Count)
Definition: LShiftU64.c:28
VOID *EFIAPI SetMem(OUT VOID *Buffer, IN UINTN Length, IN UINT8 Value)
Definition: SetMemWrapper.c:38
VOID *EFIAPI ZeroMem(OUT VOID *Buffer, IN UINTN Length)
UINT32 EFIAPI AsmCpuidEx(IN UINT32 Index, IN UINT32 SubIndex, OUT UINT32 *RegisterEax OPTIONAL, OUT UINT32 *RegisterEbx OPTIONAL, OUT UINT32 *RegisterEcx OPTIONAL, OUT UINT32 *RegisterEdx OPTIONAL)
Definition: CpuIdEx.c:43
UINT64 EFIAPI AsmReadMsr64(IN UINT32 Index)
Definition: GccInlinePriv.c:60
UINTN EFIAPI AsmWriteCr4(UINTN Cr4)
UINTN EFIAPI AsmReadCr0(VOID)
UINTN EFIAPI AsmWriteCr0(UINTN Cr0)
UINT64 EFIAPI AsmWriteMsr64(IN UINT32 Index, IN UINT64 Value)
UINTN EFIAPI AsmReadCr4(VOID)
BOOLEAN ToSplitPageTable(IN EFI_PHYSICAL_ADDRESS Address, IN UINTN Size, IN EFI_PHYSICAL_ADDRESS StackBase, IN UINTN StackSize, IN EFI_PHYSICAL_ADDRESS GhcbBase, IN UINTN GhcbSize)
VOID Split2MPageTo4K(IN EFI_PHYSICAL_ADDRESS PhysicalAddress, IN OUT UINT64 *PageEntry2M, IN EFI_PHYSICAL_ADDRESS StackBase, IN UINTN StackSize, IN EFI_PHYSICAL_ADDRESS GhcbBase, IN UINTN GhcbSize)
BOOLEAN IsExecuteDisableBitAvailable(VOID)
BOOLEAN IsEnableNonExecNeeded(VOID)
VOID ClearFirst4KPage(IN VOID *HobStart)
Definition: VirtualMemory.c:44
VOID EnablePageTableProtection(IN UINTN PageTableBase, IN BOOLEAN Level4Paging)
VOID * AllocatePageTableMemory(IN UINTN Pages)
UINTN CreateIdentityMappingPageTables(IN EFI_PHYSICAL_ADDRESS StackBase, IN UINTN StackSize, IN EFI_PHYSICAL_ADDRESS GhcbBase, IN UINTN GhcbSize)
VOID Split1GPageTo2M(IN EFI_PHYSICAL_ADDRESS PhysicalAddress, IN OUT UINT64 *PageEntry1G, IN EFI_PHYSICAL_ADDRESS StackBase, IN UINTN StackSize, IN EFI_PHYSICAL_ADDRESS GhcbBase, IN UINTN GhcbSize)
BOOLEAN InitializePageTablePool(IN UINTN PoolPages)
BOOLEAN IsNullDetectionEnabled(VOID)
VOID SetPageTablePoolReadOnly(IN UINTN PageTableBase, IN EFI_PHYSICAL_ADDRESS Address, IN BOOLEAN Level4Paging)
VOID EnableExecuteDisableBit(VOID)
#define NULL
Definition: Base.h:319
#define TRUE
Definition: Base.h:301
#define FALSE
Definition: Base.h:307
#define IN
Definition: Base.h:279
#define OUT
Definition: Base.h:284
#define DEBUG(Expression)
Definition: DebugLib.h:434
#define CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS
Definition: Cpuid.h:1301
#define CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO
Definition: Cpuid.h:1306
UINT32 EFIAPI AsmCpuid(IN UINT32 Index, OUT UINT32 *RegisterEax OPTIONAL, OUT UINT32 *RegisterEbx OPTIONAL, OUT UINT32 *RegisterEcx OPTIONAL, OUT UINT32 *RegisterEdx OPTIONAL)
Definition: CpuId.c:36
#define PcdGet64(TokenName)
Definition: PcdLib.h:375
#define PcdGet8(TokenName)
Definition: PcdLib.h:336
#define PcdGet32(TokenName)
Definition: PcdLib.h:362
#define PcdGetBool(TokenName)
Definition: PcdLib.h:401
VOID *EFIAPI AllocateAlignedPages(IN UINTN Pages, IN UINTN Alignment)
UINT64 EFI_PHYSICAL_ADDRESS
Definition: UefiBaseType.h:50
#define EFI_PAGES_TO_SIZE(Pages)
Definition: UefiBaseType.h:213
EFI_PHYSICAL_ADDRESS MemoryBaseAddress
Definition: PiHob.h:119
EFI_HOB_MEMORY_ALLOCATION_HEADER AllocDescriptor
Definition: PiHob.h:153
EFI_PHYSICAL_ADDRESS PhysicalStart
Definition: PiHob.h:328
EFI_RESOURCE_TYPE ResourceType
Definition: PiHob.h:320
struct CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX::@709 Bits