TianoCore EDK2 master
Loading...
Searching...
No Matches
HeapGuard.c
Go to the documentation of this file.
1
9#include "DxeMain.h"
10#include "Imem.h"
11#include "HeapGuard.h"
12
13//
14// Global to avoid infinite reentrance of memory allocation when updating
15// page table attributes, which may need allocate pages for new PDE/PTE.
16//
17GLOBAL_REMOVE_IF_UNREFERENCED BOOLEAN mOnGuarding = FALSE;
18
19//
20// Pointer to table tracking the Guarded memory with bitmap, in which '1'
21// is used to indicate memory guarded. '0' might be free memory or Guard
22// page itself, depending on status of memory adjacent to it.
23//
24GLOBAL_REMOVE_IF_UNREFERENCED UINT64 mGuardedMemoryMap = 0;
25
26//
27// Current depth level of map table pointed by mGuardedMemoryMap.
28// mMapLevel must be initialized at least by 1. It will be automatically
29// updated according to the address of memory just tracked.
30//
32
33//
34// Shift and mask for each level of map table
35//
36GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH]
37 = GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS;
38GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH]
39 = GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS;
40
41//
42// Used for promoting freed but not used pages.
43//
44GLOBAL_REMOVE_IF_UNREFERENCED EFI_PHYSICAL_ADDRESS mLastPromotedPage = BASE_4GB;
45
56VOID
59 IN UINTN BitNumber,
60 IN UINT64 *BitMap
61 )
62{
63 UINTN Lsbs;
64 UINTN Qwords;
65 UINTN Msbs;
66 UINTN StartBit;
67 UINTN EndBit;
68
69 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
70 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
71
72 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {
73 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
74 GUARDED_HEAP_MAP_ENTRY_BITS;
75 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
76 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
77 } else {
78 Msbs = BitNumber;
79 Lsbs = 0;
80 Qwords = 0;
81 }
82
83 if (Msbs > 0) {
84 *BitMap |= LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
85 BitMap += 1;
86 }
87
88 if (Qwords > 0) {
89 SetMem64 (
90 (VOID *)BitMap,
91 Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES,
92 (UINT64)-1
93 );
94 BitMap += Qwords;
95 }
96
97 if (Lsbs > 0) {
98 *BitMap |= (LShiftU64 (1, Lsbs) - 1);
99 }
100}
101
111STATIC
112VOID
114 IN EFI_PHYSICAL_ADDRESS Address,
115 IN UINTN BitNumber,
116 IN UINT64 *BitMap
117 )
118{
119 UINTN Lsbs;
120 UINTN Qwords;
121 UINTN Msbs;
122 UINTN StartBit;
123 UINTN EndBit;
124
125 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
126 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
127
128 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {
129 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
130 GUARDED_HEAP_MAP_ENTRY_BITS;
131 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
132 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
133 } else {
134 Msbs = BitNumber;
135 Lsbs = 0;
136 Qwords = 0;
137 }
138
139 if (Msbs > 0) {
140 *BitMap &= ~LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
141 BitMap += 1;
142 }
143
144 if (Qwords > 0) {
145 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES, 0);
146 BitMap += Qwords;
147 }
148
149 if (Lsbs > 0) {
150 *BitMap &= ~(LShiftU64 (1, Lsbs) - 1);
151 }
152}
153
166STATIC
167UINT64
169 IN EFI_PHYSICAL_ADDRESS Address,
170 IN UINTN BitNumber,
171 IN UINT64 *BitMap
172 )
173{
174 UINTN StartBit;
175 UINTN EndBit;
176 UINTN Lsbs;
177 UINTN Msbs;
178 UINT64 Result;
179
180 ASSERT (BitNumber <= GUARDED_HEAP_MAP_ENTRY_BITS);
181
182 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
183 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
184
185 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {
186 Msbs = GUARDED_HEAP_MAP_ENTRY_BITS - StartBit;
187 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
188 } else {
189 Msbs = BitNumber;
190 Lsbs = 0;
191 }
192
193 if ((StartBit == 0) && (BitNumber == GUARDED_HEAP_MAP_ENTRY_BITS)) {
194 Result = *BitMap;
195 } else {
196 Result = RShiftU64 ((*BitMap), StartBit) & (LShiftU64 (1, Msbs) - 1);
197 if (Lsbs > 0) {
198 BitMap += 1;
199 Result |= LShiftU64 ((*BitMap) & (LShiftU64 (1, Lsbs) - 1), Msbs);
200 }
201 }
202
203 return Result;
204}
205
216UINTN
218 IN EFI_PHYSICAL_ADDRESS Address,
219 IN BOOLEAN AllocMapUnit,
220 OUT UINT64 **BitMap
221 )
222{
223 UINTN Level;
224 UINT64 *GuardMap;
225 UINT64 MapMemory;
226 UINTN Index;
227 UINTN Size;
228 UINTN BitsToUnitEnd;
229 EFI_STATUS Status;
230
231 MapMemory = 0;
232
233 //
234 // Adjust current map table depth according to the address to access
235 //
236 while (AllocMapUnit &&
237 mMapLevel < GUARDED_HEAP_MAP_TABLE_DEPTH &&
238 RShiftU64 (
239 Address,
240 mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1]
241 ) != 0)
242 {
243 if (mGuardedMemoryMap != 0) {
244 Size = (mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1] + 1)
245 * GUARDED_HEAP_MAP_ENTRY_BYTES;
249 EFI_SIZE_TO_PAGES (Size),
250 &MapMemory,
251 FALSE
252 );
253 ASSERT_EFI_ERROR (Status);
254 ASSERT (MapMemory != 0);
255
256 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
257
258 *(UINT64 *)(UINTN)MapMemory = mGuardedMemoryMap;
259 mGuardedMemoryMap = MapMemory;
260 }
261
262 mMapLevel++;
263 }
264
265 GuardMap = &mGuardedMemoryMap;
266 for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
267 Level < GUARDED_HEAP_MAP_TABLE_DEPTH;
268 ++Level)
269 {
270 if (*GuardMap == 0) {
271 if (!AllocMapUnit) {
272 GuardMap = NULL;
273 break;
274 }
275
276 Size = (mLevelMask[Level] + 1) * GUARDED_HEAP_MAP_ENTRY_BYTES;
280 EFI_SIZE_TO_PAGES (Size),
281 &MapMemory,
282 FALSE
283 );
284 ASSERT_EFI_ERROR (Status);
285 ASSERT (MapMemory != 0);
286
287 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
288 *GuardMap = MapMemory;
289 }
290
291 Index = (UINTN)RShiftU64 (Address, mLevelShift[Level]);
292 Index &= mLevelMask[Level];
293 GuardMap = (UINT64 *)(UINTN)((*GuardMap) + Index * sizeof (UINT64));
294 }
295
296 BitsToUnitEnd = GUARDED_HEAP_MAP_BITS - GUARDED_HEAP_MAP_BIT_INDEX (Address);
297 *BitMap = GuardMap;
298
299 return BitsToUnitEnd;
300}
301
310VOID
311EFIAPI
313 IN EFI_PHYSICAL_ADDRESS Address,
314 IN UINTN NumberOfPages
315 )
316{
317 UINT64 *BitMap;
318 UINTN Bits;
319 UINTN BitsToUnitEnd;
320
321 while (NumberOfPages > 0) {
322 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
323 ASSERT (BitMap != NULL);
324
325 if (NumberOfPages > BitsToUnitEnd) {
326 // Cross map unit
327 Bits = BitsToUnitEnd;
328 } else {
329 Bits = NumberOfPages;
330 }
331
332 SetBits (Address, Bits, BitMap);
333
334 NumberOfPages -= Bits;
335 Address += EFI_PAGES_TO_SIZE (Bits);
336 }
337}
338
347VOID
348EFIAPI
350 IN EFI_PHYSICAL_ADDRESS Address,
351 IN UINTN NumberOfPages
352 )
353{
354 UINT64 *BitMap;
355 UINTN Bits;
356 UINTN BitsToUnitEnd;
357
358 while (NumberOfPages > 0) {
359 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
360 ASSERT (BitMap != NULL);
361
362 if (NumberOfPages > BitsToUnitEnd) {
363 // Cross map unit
364 Bits = BitsToUnitEnd;
365 } else {
366 Bits = NumberOfPages;
367 }
368
369 ClearBits (Address, Bits, BitMap);
370
371 NumberOfPages -= Bits;
372 Address += EFI_PAGES_TO_SIZE (Bits);
373 }
374}
375
384UINT64
386 IN EFI_PHYSICAL_ADDRESS Address,
387 IN UINTN NumberOfPages
388 )
389{
390 UINT64 *BitMap;
391 UINTN Bits;
392 UINT64 Result;
393 UINTN Shift;
394 UINTN BitsToUnitEnd;
395
396 ASSERT (NumberOfPages <= GUARDED_HEAP_MAP_ENTRY_BITS);
397
398 Result = 0;
399 Shift = 0;
400 while (NumberOfPages > 0) {
401 BitsToUnitEnd = FindGuardedMemoryMap (Address, FALSE, &BitMap);
402
403 if (NumberOfPages > BitsToUnitEnd) {
404 // Cross map unit
405 Bits = BitsToUnitEnd;
406 } else {
407 Bits = NumberOfPages;
408 }
409
410 if (BitMap != NULL) {
411 Result |= LShiftU64 (GetBits (Address, Bits, BitMap), Shift);
412 }
413
414 Shift += Bits;
415 NumberOfPages -= Bits;
416 Address += EFI_PAGES_TO_SIZE (Bits);
417 }
418
419 return Result;
420}
421
429UINTN
430EFIAPI
433 )
434{
435 UINT64 *GuardMap;
436
437 FindGuardedMemoryMap (Address, FALSE, &GuardMap);
438 if (GuardMap != NULL) {
439 if (RShiftU64 (
440 *GuardMap,
441 GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address)
442 ) & 1)
443 {
444 return 1;
445 }
446 }
447
448 return 0;
449}
450
459BOOLEAN
460EFIAPI
463 )
464{
465 UINT64 BitMap;
466
467 //
468 // There must be at least one guarded page before and/or after given
469 // address if it's a Guard page. The bitmap pattern should be one of
470 // 001, 100 and 101
471 //
472 BitMap = GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 3);
473 return ((BitMap == BIT0) || (BitMap == BIT2) || (BitMap == (BIT2 | BIT0)));
474}
475
484BOOLEAN
485EFIAPI
488 )
489{
490 return (GetGuardMapBit (Address) == 1);
491}
492
502VOID
503EFIAPI
505 IN EFI_PHYSICAL_ADDRESS BaseAddress
506 )
507{
508 EFI_STATUS Status;
509
510 if (gCpu == NULL) {
511 return;
512 }
513
514 //
515 // Set flag to make sure allocating memory without GUARD for page table
516 // operation; otherwise infinite loops could be caused.
517 //
518 mOnGuarding = TRUE;
519 //
520 // Note: This might overwrite other attributes needed by other features,
521 // such as NX memory protection.
522 //
523 Status = gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, EFI_MEMORY_RP);
524 ASSERT_EFI_ERROR (Status);
525 mOnGuarding = FALSE;
526}
527
537VOID
538EFIAPI
540 IN EFI_PHYSICAL_ADDRESS BaseAddress
541 )
542{
543 UINT64 Attributes;
544 EFI_STATUS Status;
545
546 if (gCpu == NULL) {
547 return;
548 }
549
550 //
551 // Once the Guard page is unset, it will be freed back to memory pool. NX
552 // memory protection must be restored for this page if NX is enabled for free
553 // memory.
554 //
555 Attributes = 0;
556 if ((PcdGet64 (PcdDxeNxMemoryProtectionPolicy) & (1 << EfiConventionalMemory)) != 0) {
557 Attributes |= EFI_MEMORY_XP;
558 }
559
560 //
561 // Set flag to make sure allocating memory without GUARD for page table
562 // operation; otherwise infinite loops could be caused.
563 //
564 mOnGuarding = TRUE;
565 //
566 // Note: This might overwrite other attributes needed by other features,
567 // such as memory protection (NX). Please make sure they are not enabled
568 // at the same time.
569 //
570 Status = gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, Attributes);
571 ASSERT_EFI_ERROR (Status);
572 mOnGuarding = FALSE;
573}
574
586BOOLEAN
588 IN EFI_MEMORY_TYPE MemoryType,
589 IN EFI_ALLOCATE_TYPE AllocateType,
590 IN UINT8 PageOrPool
591 )
592{
593 UINT64 TestBit;
594 UINT64 ConfigBit;
595
596 if (AllocateType == AllocateAddress) {
597 return FALSE;
598 }
599
600 if ((PcdGet8 (PcdHeapGuardPropertyMask) & PageOrPool) == 0) {
601 return FALSE;
602 }
603
604 if (PageOrPool == GUARD_HEAP_TYPE_POOL) {
605 ConfigBit = PcdGet64 (PcdHeapGuardPoolType);
606 } else if (PageOrPool == GUARD_HEAP_TYPE_PAGE) {
607 ConfigBit = PcdGet64 (PcdHeapGuardPageType);
608 } else {
609 ConfigBit = (UINT64)-1;
610 }
611
612 if ((UINT32)MemoryType >= MEMORY_TYPE_OS_RESERVED_MIN) {
613 TestBit = BIT63;
614 } else if ((UINT32)MemoryType >= MEMORY_TYPE_OEM_RESERVED_MIN) {
615 TestBit = BIT62;
616 } else if (MemoryType < EfiMaxMemoryType) {
617 TestBit = LShiftU64 (1, MemoryType);
618 } else if (MemoryType == EfiMaxMemoryType) {
619 TestBit = (UINT64)-1;
620 } else {
621 TestBit = 0;
622 }
623
624 return ((ConfigBit & TestBit) != 0);
625}
626
636BOOLEAN
638 IN EFI_MEMORY_TYPE MemoryType
639 )
640{
641 return IsMemoryTypeToGuard (
642 MemoryType,
644 GUARD_HEAP_TYPE_POOL
645 );
646}
647
657BOOLEAN
659 IN EFI_MEMORY_TYPE MemoryType,
660 IN EFI_ALLOCATE_TYPE AllocateType
661 )
662{
663 return IsMemoryTypeToGuard (MemoryType, AllocateType, GUARD_HEAP_TYPE_PAGE);
664}
665
673BOOLEAN
675 UINT8 GuardType
676 )
677{
678 return IsMemoryTypeToGuard (EfiMaxMemoryType, AllocateAnyPages, GuardType);
679}
680
689VOID
692 IN UINTN NumberOfPages
693 )
694{
695 EFI_PHYSICAL_ADDRESS GuardPage;
696
697 //
698 // Set tail Guard
699 //
700 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
701 if (!IsGuardPage (GuardPage)) {
702 SetGuardPage (GuardPage);
703 }
704
705 // Set head Guard
706 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
707 if (!IsGuardPage (GuardPage)) {
708 SetGuardPage (GuardPage);
709 }
710
711 //
712 // Mark the memory range as Guarded
713 //
714 SetGuardedMemoryBits (Memory, NumberOfPages);
715}
716
725VOID
728 IN UINTN NumberOfPages
729 )
730{
731 EFI_PHYSICAL_ADDRESS GuardPage;
732 UINT64 GuardBitmap;
733
734 if (NumberOfPages == 0) {
735 return;
736 }
737
738 //
739 // Head Guard must be one page before, if any.
740 //
741 // MSB-> 1 0 <-LSB
742 // -------------------
743 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)
744 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)
745 // 1 X -> Don't free first page (need a new Guard)
746 // (it'll be turned into a Guard page later)
747 // -------------------
748 // Start -> -1 -2
749 //
750 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
751 GuardBitmap = GetGuardedMemoryBits (Memory - EFI_PAGES_TO_SIZE (2), 2);
752 if ((GuardBitmap & BIT1) == 0) {
753 //
754 // Head Guard exists.
755 //
756 if ((GuardBitmap & BIT0) == 0) {
757 //
758 // If the head Guard is not a tail Guard of adjacent memory block,
759 // unset it.
760 //
761 UnsetGuardPage (GuardPage);
762 }
763 } else {
764 //
765 // Pages before memory to free are still in Guard. It's a partial free
766 // case. Turn first page of memory block to free into a new Guard.
767 //
768 SetGuardPage (Memory);
769 }
770
771 //
772 // Tail Guard must be the page after this memory block to free, if any.
773 //
774 // MSB-> 1 0 <-LSB
775 // --------------------
776 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)
777 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)
778 // X 1 -> Don't free last page (need a new Guard)
779 // (it'll be turned into a Guard page later)
780 // --------------------
781 // +1 +0 <- End
782 //
783 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
784 GuardBitmap = GetGuardedMemoryBits (GuardPage, 2);
785 if ((GuardBitmap & BIT0) == 0) {
786 //
787 // Tail Guard exists.
788 //
789 if ((GuardBitmap & BIT1) == 0) {
790 //
791 // If the tail Guard is not a head Guard of adjacent memory block,
792 // free it; otherwise, keep it.
793 //
794 UnsetGuardPage (GuardPage);
795 }
796 } else {
797 //
798 // Pages after memory to free are still in Guard. It's a partial free
799 // case. We need to keep one page to be a head Guard.
800 //
801 SetGuardPage (GuardPage - EFI_PAGES_TO_SIZE (1));
802 }
803
804 //
805 // No matter what, we just clear the mark of the Guarded memory.
806 //
807 ClearGuardedMemoryBits (Memory, NumberOfPages);
808}
809
824UINT64
826 IN UINT64 Start,
827 IN UINT64 Size,
828 IN UINT64 SizeRequested
829 )
830{
831 UINT64 Target;
832
833 //
834 // UEFI spec requires that allocated pool must be 8-byte aligned. If it's
835 // indicated to put the pool near the Tail Guard, we need extra bytes to
836 // make sure alignment of the returned pool address.
837 //
838 if ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) == 0) {
839 SizeRequested = ALIGN_VALUE (SizeRequested, 8);
840 }
841
842 Target = Start + Size - SizeRequested;
843 ASSERT (Target >= Start);
844 if (Target == 0) {
845 return 0;
846 }
847
848 if (!IsGuardPage (Start + Size)) {
849 // No Guard at tail to share. One more page is needed.
850 Target -= EFI_PAGES_TO_SIZE (1);
851 }
852
853 // Out of range?
854 if (Target < Start) {
855 return 0;
856 }
857
858 // At the edge?
859 if (Target == Start) {
860 if (!IsGuardPage (Target - EFI_PAGES_TO_SIZE (1))) {
861 // No enough space for a new head Guard if no Guard at head to share.
862 return 0;
863 }
864 }
865
866 // OK, we have enough pages for memory and its Guards. Return the End of the
867 // free space.
868 return Target + SizeRequested - 1;
869}
870
883VOID
886 IN OUT UINTN *NumberOfPages
887 )
888{
890 EFI_PHYSICAL_ADDRESS MemoryToTest;
891 UINTN PagesToFree;
892 UINT64 GuardBitmap;
893
894 if ((Memory == NULL) || (NumberOfPages == NULL) || (*NumberOfPages == 0)) {
895 return;
896 }
897
898 Start = *Memory;
899 PagesToFree = *NumberOfPages;
900
901 //
902 // Head Guard must be one page before, if any.
903 //
904 // MSB-> 1 0 <-LSB
905 // -------------------
906 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)
907 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)
908 // 1 X -> Don't free first page (need a new Guard)
909 // (it'll be turned into a Guard page later)
910 // -------------------
911 // Start -> -1 -2
912 //
913 MemoryToTest = Start - EFI_PAGES_TO_SIZE (2);
914 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);
915 if ((GuardBitmap & BIT1) == 0) {
916 //
917 // Head Guard exists.
918 //
919 if ((GuardBitmap & BIT0) == 0) {
920 //
921 // If the head Guard is not a tail Guard of adjacent memory block,
922 // free it; otherwise, keep it.
923 //
924 Start -= EFI_PAGES_TO_SIZE (1);
925 PagesToFree += 1;
926 }
927 } else {
928 //
929 // No Head Guard, and pages before memory to free are still in Guard. It's a
930 // partial free case. We need to keep one page to be a tail Guard.
931 //
932 Start += EFI_PAGES_TO_SIZE (1);
933 PagesToFree -= 1;
934 }
935
936 //
937 // Tail Guard must be the page after this memory block to free, if any.
938 //
939 // MSB-> 1 0 <-LSB
940 // --------------------
941 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)
942 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)
943 // X 1 -> Don't free last page (need a new Guard)
944 // (it'll be turned into a Guard page later)
945 // --------------------
946 // +1 +0 <- End
947 //
948 MemoryToTest = Start + EFI_PAGES_TO_SIZE (PagesToFree);
949 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);
950 if ((GuardBitmap & BIT0) == 0) {
951 //
952 // Tail Guard exists.
953 //
954 if ((GuardBitmap & BIT1) == 0) {
955 //
956 // If the tail Guard is not a head Guard of adjacent memory block,
957 // free it; otherwise, keep it.
958 //
959 PagesToFree += 1;
960 }
961 } else if (PagesToFree > 0) {
962 //
963 // No Tail Guard, and pages after memory to free are still in Guard. It's a
964 // partial free case. We need to keep one page to be a head Guard.
965 //
966 PagesToFree -= 1;
967 }
968
969 *Memory = Start;
970 *NumberOfPages = PagesToFree;
971}
972
981VOID
984 IN OUT UINTN *NumberOfPages
985 )
986{
987 //
988 // FindFreePages() has already taken the Guard into account. It's safe to
989 // adjust the start address and/or number of pages here, to make sure that
990 // the Guards are also "allocated".
991 //
992 if (!IsGuardPage (*Memory + EFI_PAGES_TO_SIZE (*NumberOfPages))) {
993 // No tail Guard, add one.
994 *NumberOfPages += 1;
995 }
996
997 if (!IsGuardPage (*Memory - EFI_PAGE_SIZE)) {
998 // No head Guard, add one.
999 *Memory -= EFI_PAGE_SIZE;
1000 *NumberOfPages += 1;
1001 }
1002}
1003
1015VOID *
1017 IN EFI_PHYSICAL_ADDRESS Memory,
1018 IN UINTN NoPages,
1019 IN UINTN Size
1020 )
1021{
1022 if ((Memory == 0) || ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0)) {
1023 //
1024 // Pool head is put near the head Guard
1025 //
1026 return (VOID *)(UINTN)Memory;
1027 }
1028
1029 //
1030 // Pool head is put near the tail Guard
1031 //
1032 Size = ALIGN_VALUE (Size, 8);
1033 return (VOID *)(UINTN)(Memory + EFI_PAGES_TO_SIZE (NoPages) - Size);
1034}
1035
1046VOID *
1048 IN EFI_PHYSICAL_ADDRESS Memory,
1049 IN UINTN NoPages,
1050 IN UINTN Size
1051 )
1052{
1053 if ((Memory == 0) || ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0)) {
1054 //
1055 // Pool head is put near the head Guard
1056 //
1057 return (VOID *)(UINTN)Memory;
1058 }
1059
1060 //
1061 // Pool head is put near the tail Guard. We need to exactly undo the addition done in AdjustPoolHeadA
1062 // because we may not have allocated the pool head on the first allocated page, since we are aligned to
1063 // the tail and on some architectures, the runtime page allocation granularity is > one page. So we allocate
1064 // more pages than we need and put the pool head somewhere past the first page.
1065 //
1066 return (VOID *)(UINTN)(Memory + Size - EFI_PAGES_TO_SIZE (NoPages));
1067}
1068
1080 IN UINT64 Start,
1081 IN UINTN NumberOfPages,
1082 IN EFI_MEMORY_TYPE NewType
1083 )
1084{
1085 UINT64 OldStart;
1086 UINTN OldPages;
1087
1088 if (NewType == EfiConventionalMemory) {
1089 OldStart = Start;
1090 OldPages = NumberOfPages;
1091
1092 AdjustMemoryF (&Start, &NumberOfPages);
1093 //
1094 // It's safe to unset Guard page inside memory lock because there should
1095 // be no memory allocation occurred in updating memory page attribute at
1096 // this point. And unsetting Guard page before free will prevent Guard
1097 // page just freed back to pool from being allocated right away before
1098 // marking it usable (from non-present to present).
1099 //
1100 UnsetGuardForMemory (OldStart, OldPages);
1101 if (NumberOfPages == 0) {
1102 return EFI_SUCCESS;
1103 }
1104 } else {
1105 AdjustMemoryA (&Start, &NumberOfPages);
1106 }
1107
1108 return CoreConvertPages (Start, NumberOfPages, NewType);
1109}
1110
1114VOID
1116 VOID
1117 )
1118{
1119 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
1120 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
1121 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
1122 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
1123 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
1124 UINT64 TableEntry;
1125 UINT64 Address;
1126 UINT64 GuardPage;
1127 INTN Level;
1128 UINTN Index;
1129 BOOLEAN OnGuarding;
1130
1131 if ((mGuardedMemoryMap == 0) ||
1132 (mMapLevel == 0) ||
1133 (mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH))
1134 {
1135 return;
1136 }
1137
1138 CopyMem (Entries, mLevelMask, sizeof (Entries));
1139 CopyMem (Shifts, mLevelShift, sizeof (Shifts));
1140
1141 SetMem (Tables, sizeof (Tables), 0);
1142 SetMem (Addresses, sizeof (Addresses), 0);
1143 SetMem (Indices, sizeof (Indices), 0);
1144
1145 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1146 Tables[Level] = mGuardedMemoryMap;
1147 Address = 0;
1148 OnGuarding = FALSE;
1149
1150 DEBUG_CODE (
1152 );
1153
1154 while (TRUE) {
1155 if (Indices[Level] > Entries[Level]) {
1156 Tables[Level] = 0;
1157 Level -= 1;
1158 } else {
1159 TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]];
1160 Address = Addresses[Level];
1161
1162 if (TableEntry == 0) {
1163 OnGuarding = FALSE;
1164 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1165 Level += 1;
1166 Tables[Level] = TableEntry;
1167 Addresses[Level] = Address;
1168 Indices[Level] = 0;
1169
1170 continue;
1171 } else {
1172 Index = 0;
1173 while (Index < GUARDED_HEAP_MAP_ENTRY_BITS) {
1174 if ((TableEntry & 1) == 1) {
1175 if (OnGuarding) {
1176 GuardPage = 0;
1177 } else {
1178 GuardPage = Address - EFI_PAGE_SIZE;
1179 }
1180
1181 OnGuarding = TRUE;
1182 } else {
1183 if (OnGuarding) {
1184 GuardPage = Address;
1185 } else {
1186 GuardPage = 0;
1187 }
1188
1189 OnGuarding = FALSE;
1190 }
1191
1192 if (GuardPage != 0) {
1193 SetGuardPage (GuardPage);
1194 }
1195
1196 if (TableEntry == 0) {
1197 break;
1198 }
1199
1200 TableEntry = RShiftU64 (TableEntry, 1);
1201 Address += EFI_PAGE_SIZE;
1202 Index += 1;
1203 }
1204 }
1205 }
1206
1207 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
1208 break;
1209 }
1210
1211 Indices[Level] += 1;
1212 Address = (Level == 0) ? 0 : Addresses[Level - 1];
1213 Addresses[Level] = Address | LShiftU64 (Indices[Level], Shifts[Level]);
1214 }
1215}
1216
1224VOID
1226 OUT EFI_PHYSICAL_ADDRESS *Address
1227 )
1228{
1229 EFI_PHYSICAL_ADDRESS AddressGranularity;
1230 EFI_PHYSICAL_ADDRESS BaseAddress;
1231 UINTN Level;
1232 UINT64 Map;
1233 INTN Index;
1234
1235 ASSERT (mMapLevel >= 1);
1236
1237 BaseAddress = 0;
1238 Map = mGuardedMemoryMap;
1239 for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1240 Level < GUARDED_HEAP_MAP_TABLE_DEPTH;
1241 ++Level)
1242 {
1243 AddressGranularity = LShiftU64 (1, mLevelShift[Level]);
1244
1245 //
1246 // Find the non-NULL entry at largest index.
1247 //
1248 for (Index = (INTN)mLevelMask[Level]; Index >= 0; --Index) {
1249 if (((UINT64 *)(UINTN)Map)[Index] != 0) {
1250 BaseAddress += MultU64x32 (AddressGranularity, (UINT32)Index);
1251 Map = ((UINT64 *)(UINTN)Map)[Index];
1252 break;
1253 }
1254 }
1255 }
1256
1257 //
1258 // Find the non-zero MSB then get the page address.
1259 //
1260 while (Map != 0) {
1261 Map = RShiftU64 (Map, 1);
1262 BaseAddress += EFI_PAGES_TO_SIZE (1);
1263 }
1264
1265 *Address = BaseAddress;
1266}
1267
1276VOID
1278 IN EFI_PHYSICAL_ADDRESS BaseAddress,
1279 IN UINTN Pages
1280 )
1281{
1282 SetGuardedMemoryBits (BaseAddress, Pages);
1283}
1284
1293VOID
1294EFIAPI
1296 IN EFI_PHYSICAL_ADDRESS BaseAddress,
1297 IN UINTN Pages
1298 )
1299{
1300 EFI_STATUS Status;
1301
1302 //
1303 // Legacy memory lower than 1MB might be accessed with no allocation. Leave
1304 // them alone.
1305 //
1306 if (BaseAddress < BASE_1MB) {
1307 return;
1308 }
1309
1310 MarkFreedPages (BaseAddress, Pages);
1311 if (gCpu != NULL) {
1312 //
1313 // Set flag to make sure allocating memory without GUARD for page table
1314 // operation; otherwise infinite loops could be caused.
1315 //
1316 mOnGuarding = TRUE;
1317 //
1318 // Note: This might overwrite other attributes needed by other features,
1319 // such as NX memory protection.
1320 //
1321 Status = gCpu->SetMemoryAttributes (
1322 gCpu,
1323 BaseAddress,
1324 EFI_PAGES_TO_SIZE (Pages),
1325 EFI_MEMORY_RP
1326 );
1327 //
1328 // Normally we should ASSERT the returned Status. But there might be memory
1329 // alloc/free involved in SetMemoryAttributes(), which might fail this
1330 // calling. It's rare case so it's OK to let a few tiny holes be not-guarded.
1331 //
1332 if (EFI_ERROR (Status)) {
1333 DEBUG ((DEBUG_WARN, "Failed to guard freed pages: %p (%lu)\n", BaseAddress, (UINT64)Pages));
1334 }
1335
1336 mOnGuarding = FALSE;
1337 }
1338}
1339
1348VOID
1349EFIAPI
1351 IN EFI_PHYSICAL_ADDRESS BaseAddress,
1352 IN UINTN Pages
1353 )
1354{
1355 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {
1356 GuardFreedPages (BaseAddress, Pages);
1357 }
1358}
1359
1364VOID
1366 VOID
1367 )
1368{
1369 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
1370 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
1371 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
1372 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
1373 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
1374 UINT64 TableEntry;
1375 UINT64 Address;
1376 UINT64 GuardPage;
1377 INTN Level;
1378 UINT64 BitIndex;
1379 UINTN GuardPageNumber;
1380
1381 if ((mGuardedMemoryMap == 0) ||
1382 (mMapLevel == 0) ||
1383 (mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH))
1384 {
1385 return;
1386 }
1387
1388 CopyMem (Entries, mLevelMask, sizeof (Entries));
1389 CopyMem (Shifts, mLevelShift, sizeof (Shifts));
1390
1391 SetMem (Tables, sizeof (Tables), 0);
1392 SetMem (Addresses, sizeof (Addresses), 0);
1393 SetMem (Indices, sizeof (Indices), 0);
1394
1395 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1396 Tables[Level] = mGuardedMemoryMap;
1397 Address = 0;
1398 GuardPage = (UINT64)-1;
1399 GuardPageNumber = 0;
1400
1401 while (TRUE) {
1402 if (Indices[Level] > Entries[Level]) {
1403 Tables[Level] = 0;
1404 Level -= 1;
1405 } else {
1406 TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]];
1407 Address = Addresses[Level];
1408
1409 if (TableEntry == 0) {
1410 GuardPageNumber = 0;
1411 GuardPage = (UINT64)-1;
1412 } else {
1413 if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1414 Level += 1;
1415 Tables[Level] = TableEntry;
1416 Addresses[Level] = Address;
1417 Indices[Level] = 0;
1418
1419 continue;
1420 } else {
1421 BitIndex = 1;
1422 while (BitIndex != 0) {
1423 if ((TableEntry & BitIndex) != 0) {
1424 if (GuardPage == (UINT64)-1) {
1425 GuardPage = Address;
1426 }
1427
1428 ++GuardPageNumber;
1429 } else if (GuardPageNumber > 0) {
1430 GuardFreedPages (GuardPage, GuardPageNumber);
1431 GuardPageNumber = 0;
1432 GuardPage = (UINT64)-1;
1433 }
1434
1435 if (TableEntry == 0) {
1436 break;
1437 }
1438
1439 Address += EFI_PAGES_TO_SIZE (1);
1440 BitIndex = LShiftU64 (BitIndex, 1);
1441 }
1442 }
1443 }
1444 }
1445
1446 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
1447 break;
1448 }
1449
1450 Indices[Level] += 1;
1451 Address = (Level == 0) ? 0 : Addresses[Level - 1];
1452 Addresses[Level] = Address | LShiftU64 (Indices[Level], Shifts[Level]);
1453 }
1454
1455 //
1456 // Update the maximum address of freed page which can be used for memory
1457 // promotion upon out-of-memory-space.
1458 //
1460 if (Address != 0) {
1461 mLastPromotedPage = Address;
1462 }
1463}
1464
1475VOID
1477 IN EFI_MEMORY_DESCRIPTOR *MemoryMapEntry,
1478 IN EFI_PHYSICAL_ADDRESS MaxAddress
1479 )
1480{
1481 EFI_PHYSICAL_ADDRESS EndAddress;
1482 UINT64 Bitmap;
1483 INTN Pages;
1484
1485 if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED) ||
1486 (MemoryMapEntry->Type >= EfiMemoryMappedIO))
1487 {
1488 return;
1489 }
1490
1491 Bitmap = 0;
1492 Pages = EFI_SIZE_TO_PAGES ((UINTN)(MaxAddress - MemoryMapEntry->PhysicalStart));
1493 Pages -= (INTN)MemoryMapEntry->NumberOfPages;
1494 while (Pages > 0) {
1495 if (Bitmap == 0) {
1496 EndAddress = MemoryMapEntry->PhysicalStart +
1497 EFI_PAGES_TO_SIZE ((UINTN)MemoryMapEntry->NumberOfPages);
1498 Bitmap = GetGuardedMemoryBits (EndAddress, GUARDED_HEAP_MAP_ENTRY_BITS);
1499 }
1500
1501 if ((Bitmap & 1) == 0) {
1502 break;
1503 }
1504
1505 Pages--;
1506 MemoryMapEntry->NumberOfPages++;
1507 Bitmap = RShiftU64 (Bitmap, 1);
1508 }
1509}
1510
1530BOOLEAN
1532 OUT EFI_PHYSICAL_ADDRESS *StartAddress,
1533 OUT EFI_PHYSICAL_ADDRESS *EndAddress
1534 )
1535{
1536 EFI_STATUS Status;
1537 UINTN AvailablePages;
1538 UINT64 Bitmap;
1540
1541 if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {
1542 return FALSE;
1543 }
1544
1545 //
1546 // Similar to memory allocation service, always search the freed pages in
1547 // descending direction.
1548 //
1549 Start = mLastPromotedPage;
1550 AvailablePages = 0;
1551 while (AvailablePages == 0) {
1552 Start -= EFI_PAGES_TO_SIZE (GUARDED_HEAP_MAP_ENTRY_BITS);
1553 //
1554 // If the address wraps around, try the really freed pages at top.
1555 //
1556 if (Start > mLastPromotedPage) {
1558 ASSERT (Start != 0);
1559 Start -= EFI_PAGES_TO_SIZE (GUARDED_HEAP_MAP_ENTRY_BITS);
1560 }
1561
1562 Bitmap = GetGuardedMemoryBits (Start, GUARDED_HEAP_MAP_ENTRY_BITS);
1563 while (Bitmap > 0) {
1564 if ((Bitmap & 1) != 0) {
1565 ++AvailablePages;
1566 } else if (AvailablePages == 0) {
1567 Start += EFI_PAGES_TO_SIZE (1);
1568 } else {
1569 break;
1570 }
1571
1572 Bitmap = RShiftU64 (Bitmap, 1);
1573 }
1574 }
1575
1576 if (AvailablePages != 0) {
1577 DEBUG ((DEBUG_INFO, "Promoted pages: %lX (%lx)\r\n", Start, (UINT64)AvailablePages));
1578 ClearGuardedMemoryBits (Start, AvailablePages);
1579
1580 if (gCpu != NULL) {
1581 //
1582 // Set flag to make sure allocating memory without GUARD for page table
1583 // operation; otherwise infinite loops could be caused.
1584 //
1585 mOnGuarding = TRUE;
1586 Status = gCpu->SetMemoryAttributes (gCpu, Start, EFI_PAGES_TO_SIZE (AvailablePages), 0);
1587 ASSERT_EFI_ERROR (Status);
1588 mOnGuarding = FALSE;
1589 }
1590
1591 mLastPromotedPage = Start;
1592 *StartAddress = Start;
1593 *EndAddress = Start + EFI_PAGES_TO_SIZE (AvailablePages) - 1;
1594 return TRUE;
1595 }
1596
1597 return FALSE;
1598}
1599
1603VOID
1605 VOID
1606 )
1607{
1608 ASSERT (gCpu != NULL);
1609
1610 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_PAGE|GUARD_HEAP_TYPE_POOL) &&
1611 IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED))
1612 {
1613 DEBUG ((DEBUG_ERROR, "Heap guard and freed memory guard cannot be enabled at the same time.\n"));
1614 CpuDeadLoop ();
1615 }
1616
1617 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_PAGE|GUARD_HEAP_TYPE_POOL)) {
1619 }
1620
1621 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {
1623 }
1624}
1625
1634VOID
1636 IN UINT64 Value,
1637 OUT CHAR8 *BinString
1638 )
1639{
1640 UINTN Index;
1641
1642 if (BinString == NULL) {
1643 return;
1644 }
1645
1646 for (Index = 64; Index > 0; --Index) {
1647 BinString[Index - 1] = '0' + (Value & 1);
1648 Value = RShiftU64 (Value, 1);
1649 }
1650
1651 BinString[64] = '\0';
1652}
1653
1657VOID
1658EFIAPI
1660 VOID
1661 )
1662{
1663 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
1664 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
1665 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
1666 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
1667 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
1668 UINT64 TableEntry;
1669 UINT64 Address;
1670 INTN Level;
1671 UINTN RepeatZero;
1672 CHAR8 String[GUARDED_HEAP_MAP_ENTRY_BITS + 1];
1673 CHAR8 *Ruler1;
1674 CHAR8 *Ruler2;
1675
1676 if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_ALL)) {
1677 return;
1678 }
1679
1680 if ((mGuardedMemoryMap == 0) ||
1681 (mMapLevel == 0) ||
1682 (mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH))
1683 {
1684 return;
1685 }
1686
1687 Ruler1 = " 3 2 1 0";
1688 Ruler2 = "FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210";
1689
1690 DEBUG ((
1691 HEAP_GUARD_DEBUG_LEVEL,
1692 "============================="
1693 " Guarded Memory Bitmap "
1694 "==============================\r\n"
1695 ));
1696 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler1));
1697 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler2));
1698
1699 CopyMem (Entries, mLevelMask, sizeof (Entries));
1700 CopyMem (Shifts, mLevelShift, sizeof (Shifts));
1701
1702 SetMem (Indices, sizeof (Indices), 0);
1703 SetMem (Tables, sizeof (Tables), 0);
1704 SetMem (Addresses, sizeof (Addresses), 0);
1705
1706 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1707 Tables[Level] = mGuardedMemoryMap;
1708 Address = 0;
1709 RepeatZero = 0;
1710
1711 while (TRUE) {
1712 if (Indices[Level] > Entries[Level]) {
1713 Tables[Level] = 0;
1714 Level -= 1;
1715 RepeatZero = 0;
1716
1717 DEBUG ((
1718 HEAP_GUARD_DEBUG_LEVEL,
1719 "========================================="
1720 "=========================================\r\n"
1721 ));
1722 } else {
1723 TableEntry = ((UINT64 *)(UINTN)Tables[Level])[Indices[Level]];
1724 Address = Addresses[Level];
1725
1726 if (TableEntry == 0) {
1727 if (Level == GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1728 if (RepeatZero == 0) {
1729 Uint64ToBinString (TableEntry, String);
1730 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
1731 } else if (RepeatZero == 1) {
1732 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "... : ...\r\n"));
1733 }
1734
1735 RepeatZero += 1;
1736 }
1737 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1738 Level += 1;
1739 Tables[Level] = TableEntry;
1740 Addresses[Level] = Address;
1741 Indices[Level] = 0;
1742 RepeatZero = 0;
1743
1744 continue;
1745 } else {
1746 RepeatZero = 0;
1747 Uint64ToBinString (TableEntry, String);
1748 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
1749 }
1750 }
1751
1752 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
1753 break;
1754 }
1755
1756 Indices[Level] += 1;
1757 Address = (Level == 0) ? 0 : Addresses[Level - 1];
1758 Addresses[Level] = Address | LShiftU64 (Indices[Level], Shifts[Level]);
1759 }
1760}
UINT64 UINTN
INT64 INTN
VOID EFIAPI CpuDeadLoop(VOID)
Definition: CpuDeadLoop.c:25
UINT64 EFIAPI RShiftU64(IN UINT64 Operand, IN UINTN Count)
Definition: RShiftU64.c:28
UINT64 EFIAPI MultU64x32(IN UINT64 Multiplicand, IN UINT32 Multiplier)
Definition: MultU64x32.c:27
UINT64 EFIAPI LShiftU64(IN UINT64 Operand, IN UINTN Count)
Definition: LShiftU64.c:28
VOID *EFIAPI SetMem64(OUT VOID *Buffer, IN UINTN Length, IN UINT64 Value)
VOID *EFIAPI CopyMem(OUT VOID *DestinationBuffer, IN CONST VOID *SourceBuffer, IN UINTN Length)
VOID *EFIAPI SetMem(OUT VOID *Buffer, IN UINTN Length, IN UINT8 Value)
Definition: SetMemWrapper.c:38
VOID GetLastGuardedFreePageAddress(OUT EFI_PHYSICAL_ADDRESS *Address)
Definition: HeapGuard.c:1225
VOID SetGuardForMemory(IN EFI_PHYSICAL_ADDRESS Memory, IN UINTN NumberOfPages)
Definition: HeapGuard.c:690
UINTN EFIAPI GetGuardMapBit(IN EFI_PHYSICAL_ADDRESS Address)
Definition: HeapGuard.c:431
VOID AdjustMemoryF(IN OUT EFI_PHYSICAL_ADDRESS *Memory, IN OUT UINTN *NumberOfPages)
Definition: HeapGuard.c:884
VOID EFIAPI GuardFreedPagesChecked(IN EFI_PHYSICAL_ADDRESS BaseAddress, IN UINTN Pages)
Definition: HeapGuard.c:1350
VOID * AdjustPoolHeadF(IN EFI_PHYSICAL_ADDRESS Memory, IN UINTN NoPages, IN UINTN Size)
Definition: HeapGuard.c:1047
VOID SetAllGuardPages(VOID)
Definition: HeapGuard.c:1115
VOID EFIAPI DumpGuardedMemoryBitmap(VOID)
Definition: HeapGuard.c:1659
BOOLEAN PromoteGuardedFreePages(OUT EFI_PHYSICAL_ADDRESS *StartAddress, OUT EFI_PHYSICAL_ADDRESS *EndAddress)
Definition: HeapGuard.c:1531
STATIC UINT64 GetBits(IN EFI_PHYSICAL_ADDRESS Address, IN UINTN BitNumber, IN UINT64 *BitMap)
Definition: HeapGuard.c:168
BOOLEAN EFIAPI IsGuardPage(IN EFI_PHYSICAL_ADDRESS Address)
Definition: HeapGuard.c:461
STATIC VOID ClearBits(IN EFI_PHYSICAL_ADDRESS Address, IN UINTN BitNumber, IN UINT64 *BitMap)
Definition: HeapGuard.c:113
VOID EFIAPI SetGuardPage(IN EFI_PHYSICAL_ADDRESS BaseAddress)
Definition: HeapGuard.c:504
VOID EFIAPI UnsetGuardPage(IN EFI_PHYSICAL_ADDRESS BaseAddress)
Definition: HeapGuard.c:539
VOID Uint64ToBinString(IN UINT64 Value, OUT CHAR8 *BinString)
Definition: HeapGuard.c:1635
VOID MergeGuardPages(IN EFI_MEMORY_DESCRIPTOR *MemoryMapEntry, IN EFI_PHYSICAL_ADDRESS MaxAddress)
Definition: HeapGuard.c:1476
UINT64 GetGuardedMemoryBits(IN EFI_PHYSICAL_ADDRESS Address, IN UINTN NumberOfPages)
Definition: HeapGuard.c:385
BOOLEAN IsHeapGuardEnabled(UINT8 GuardType)
Definition: HeapGuard.c:674
VOID GuardAllFreedPages(VOID)
Definition: HeapGuard.c:1365
VOID AdjustMemoryA(IN OUT EFI_PHYSICAL_ADDRESS *Memory, IN OUT UINTN *NumberOfPages)
Definition: HeapGuard.c:982
VOID UnsetGuardForMemory(IN EFI_PHYSICAL_ADDRESS Memory, IN UINTN NumberOfPages)
Definition: HeapGuard.c:726
VOID EFIAPI GuardFreedPages(IN EFI_PHYSICAL_ADDRESS BaseAddress, IN UINTN Pages)
Definition: HeapGuard.c:1295
UINT64 AdjustMemoryS(IN UINT64 Start, IN UINT64 Size, IN UINT64 SizeRequested)
Definition: HeapGuard.c:825
VOID MarkFreedPages(IN EFI_PHYSICAL_ADDRESS BaseAddress, IN UINTN Pages)
Definition: HeapGuard.c:1277
STATIC VOID SetBits(IN EFI_PHYSICAL_ADDRESS Address, IN UINTN BitNumber, IN UINT64 *BitMap)
Definition: HeapGuard.c:57
BOOLEAN IsPoolTypeToGuard(IN EFI_MEMORY_TYPE MemoryType)
Definition: HeapGuard.c:637
VOID EFIAPI ClearGuardedMemoryBits(IN EFI_PHYSICAL_ADDRESS Address, IN UINTN NumberOfPages)
Definition: HeapGuard.c:349
BOOLEAN EFIAPI IsMemoryGuarded(IN EFI_PHYSICAL_ADDRESS Address)
Definition: HeapGuard.c:486
VOID EFIAPI SetGuardedMemoryBits(IN EFI_PHYSICAL_ADDRESS Address, IN UINTN NumberOfPages)
Definition: HeapGuard.c:312
BOOLEAN IsPageTypeToGuard(IN EFI_MEMORY_TYPE MemoryType, IN EFI_ALLOCATE_TYPE AllocateType)
Definition: HeapGuard.c:658
UINTN FindGuardedMemoryMap(IN EFI_PHYSICAL_ADDRESS Address, IN BOOLEAN AllocMapUnit, OUT UINT64 **BitMap)
Definition: HeapGuard.c:217
EFI_STATUS CoreConvertPagesWithGuard(IN UINT64 Start, IN UINTN NumberOfPages, IN EFI_MEMORY_TYPE NewType)
Definition: HeapGuard.c:1079
VOID HeapGuardCpuArchProtocolNotify(VOID)
Definition: HeapGuard.c:1604
VOID * AdjustPoolHeadA(IN EFI_PHYSICAL_ADDRESS Memory, IN UINTN NoPages, IN UINTN Size)
Definition: HeapGuard.c:1016
BOOLEAN IsMemoryTypeToGuard(IN EFI_MEMORY_TYPE MemoryType, IN EFI_ALLOCATE_TYPE AllocateType, IN UINT8 PageOrPool)
Definition: HeapGuard.c:587
EFI_STATUS CoreConvertPages(IN UINT64 Start, IN UINT64 NumberOfPages, IN EFI_MEMORY_TYPE NewType)
Definition: Page.c:1080
EFI_STATUS EFIAPI CoreInternalAllocatePages(IN EFI_ALLOCATE_TYPE Type, IN EFI_MEMORY_TYPE MemoryType, IN UINTN NumberOfPages, IN OUT EFI_PHYSICAL_ADDRESS *Memory, IN BOOLEAN NeedGuard)
Definition: Page.c:1381
#define NULL
Definition: Base.h:319
#define STATIC
Definition: Base.h:264
#define ALIGN_VALUE(Value, Alignment)
Definition: Base.h:948
#define TRUE
Definition: Base.h:301
#define FALSE
Definition: Base.h:307
#define IN
Definition: Base.h:279
#define OUT
Definition: Base.h:284
#define GLOBAL_REMOVE_IF_UNREFERENCED
Definition: Base.h:48
#define ASSERT_EFI_ERROR(StatusParameter)
Definition: DebugLib.h:462
#define DEBUG(Expression)
Definition: DebugLib.h:434
#define DEBUG_CODE(Expression)
Definition: DebugLib.h:590
#define PcdGet64(TokenName)
Definition: PcdLib.h:375
#define PcdGet8(TokenName)
Definition: PcdLib.h:336
UINT64 EFI_PHYSICAL_ADDRESS
Definition: UefiBaseType.h:50
#define EFI_PAGES_TO_SIZE(Pages)
Definition: UefiBaseType.h:213
RETURN_STATUS EFI_STATUS
Definition: UefiBaseType.h:29
#define EFI_SIZE_TO_PAGES(Size)
Definition: UefiBaseType.h:200
#define EFI_SUCCESS
Definition: UefiBaseType.h:112
EFI_MEMORY_TYPE
@ EfiBootServicesData
@ EfiConventionalMemory
@ EfiMemoryMappedIO
EFI_ALLOCATE_TYPE
Definition: UefiSpec.h:29
@ AllocateAddress
Definition: UefiSpec.h:42
@ AllocateAnyPages
Definition: UefiSpec.h:33