TianoCore EDK2 master
Loading...
Searching...
No Matches
HeapGuard.h
Go to the documentation of this file.
1
9#ifndef _HEAPGUARD_H_
10#define _HEAPGUARD_H_
11
12//
13// Following macros are used to define and access the guarded memory bitmap
14// table.
15//
16// To simplify the access and reduce the memory used for this table, the
17// table is constructed in the similar way as page table structure but in
18// reverse direction, i.e. from bottom growing up to top.
19//
20// - 1-bit tracks 1 page (4KB)
21// - 1-UINT64 map entry tracks 256KB memory
22// - 1K-UINT64 map table tracks 256MB memory
23// - Five levels of tables can track any address of memory of 64-bit
24// system, like below.
25//
26// 512 * 512 * 512 * 512 * 1K * 64b * 4K
27// 111111111 111111111 111111111 111111111 1111111111 111111 111111111111
28// 63 54 45 36 27 17 11 0
29// 9b 9b 9b 9b 10b 6b 12b
30// L0 -> L1 -> L2 -> L3 -> L4 -> bits -> page
31// 1FF 1FF 1FF 1FF 3FF 3F FFF
32//
33// L4 table has 1K * sizeof(UINT64) = 8K (2-page), which can track 256MB
34// memory. Each table of L0-L3 will be allocated when its memory address
35// range is to be tracked. Only 1-page will be allocated each time. This
36// can save memories used to establish this map table.
37//
38// For a normal configuration of system with 4G memory, two levels of tables
39// can track the whole memory, because two levels (L3+L4) of map tables have
40// already coverred 37-bit of memory address. And for a normal UEFI BIOS,
41// less than 128M memory would be consumed during boot. That means we just
42// need
43//
44// 1-page (L3) + 2-page (L4)
45//
46// memory (3 pages) to track the memory allocation works. In this case,
47// there's no need to setup L0-L2 tables.
48//
49
50//
51// Each entry occupies 8B/64b. 1-page can hold 512 entries, which spans 9
52// bits in address. (512 = 1 << 9)
53//
54#define BYTE_LENGTH_SHIFT 3 // (8 = 1 << 3)
55
56#define GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT \
57 (EFI_PAGE_SHIFT - BYTE_LENGTH_SHIFT)
58
59#define GUARDED_HEAP_MAP_TABLE_DEPTH 5
60
61// Use UINT64_index + bit_index_of_UINT64 to locate the bit in may
62#define GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT 6 // (64 = 1 << 6)
63
64#define GUARDED_HEAP_MAP_ENTRY_BITS \
65 (1 << GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT)
66
67#define GUARDED_HEAP_MAP_ENTRY_BYTES \
68 (GUARDED_HEAP_MAP_ENTRY_BITS / 8)
69
70// L4 table address width: 64 - 9 * 4 - 6 - 12 = 10b
71#define GUARDED_HEAP_MAP_ENTRY_SHIFT \
72 (GUARDED_HEAP_MAP_ENTRY_BITS \
73 - GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT * 4 \
74 - GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT \
75 - EFI_PAGE_SHIFT)
76
77// L4 table address mask: (1 << 10 - 1) = 0x3FF
78#define GUARDED_HEAP_MAP_ENTRY_MASK \
79 ((1 << GUARDED_HEAP_MAP_ENTRY_SHIFT) - 1)
80
81// Size of each L4 table: (1 << 10) * 8 = 8KB = 2-page
82#define GUARDED_HEAP_MAP_SIZE \
83 ((1 << GUARDED_HEAP_MAP_ENTRY_SHIFT) * GUARDED_HEAP_MAP_ENTRY_BYTES)
84
85// Memory size tracked by one L4 table: 8KB * 8 * 4KB = 256MB
86#define GUARDED_HEAP_MAP_UNIT_SIZE \
87 (GUARDED_HEAP_MAP_SIZE * 8 * EFI_PAGE_SIZE)
88
89// L4 table entry number: 8KB / 8 = 1024
90#define GUARDED_HEAP_MAP_ENTRIES_PER_UNIT \
91 (GUARDED_HEAP_MAP_SIZE / GUARDED_HEAP_MAP_ENTRY_BYTES)
92
93// L4 table entry indexing
94#define GUARDED_HEAP_MAP_ENTRY_INDEX(Address) \
95 (RShiftU64 (Address, EFI_PAGE_SHIFT \
96 + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT) \
97 & GUARDED_HEAP_MAP_ENTRY_MASK)
98
99// L4 table entry bit indexing
100#define GUARDED_HEAP_MAP_ENTRY_BIT_INDEX(Address) \
101 (RShiftU64 (Address, EFI_PAGE_SHIFT) \
102 & ((1 << GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT) - 1))
103
104//
105// Total bits (pages) tracked by one L4 table (65536-bit)
106//
107#define GUARDED_HEAP_MAP_BITS \
108 (1 << (GUARDED_HEAP_MAP_ENTRY_SHIFT \
109 + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT))
110
111//
112// Bit indexing inside the whole L4 table (0 - 65535)
113//
114#define GUARDED_HEAP_MAP_BIT_INDEX(Address) \
115 (RShiftU64 (Address, EFI_PAGE_SHIFT) \
116 & ((1 << (GUARDED_HEAP_MAP_ENTRY_SHIFT \
117 + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT)) - 1))
118
119//
120// Memory address bit width tracked by L4 table: 10 + 6 + 12 = 28
121//
122#define GUARDED_HEAP_MAP_TABLE_SHIFT \
123 (GUARDED_HEAP_MAP_ENTRY_SHIFT + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT \
124 + EFI_PAGE_SHIFT)
125
126//
127// Macro used to initialize the local array variable for map table traversing
128// {55, 46, 37, 28, 18}
129//
130#define GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS \
131 { \
132 GUARDED_HEAP_MAP_TABLE_SHIFT + GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT * 3, \
133 GUARDED_HEAP_MAP_TABLE_SHIFT + GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT * 2, \
134 GUARDED_HEAP_MAP_TABLE_SHIFT + GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT, \
135 GUARDED_HEAP_MAP_TABLE_SHIFT, \
136 EFI_PAGE_SHIFT + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT \
137 }
138
139//
140// Masks used to extract address range of each level of table
141// {0x1FF, 0x1FF, 0x1FF, 0x1FF, 0x3FF}
142//
143#define GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS \
144 { \
145 (1 << GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT) - 1, \
146 (1 << GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT) - 1, \
147 (1 << GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT) - 1, \
148 (1 << GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT) - 1, \
149 (1 << GUARDED_HEAP_MAP_ENTRY_SHIFT) - 1 \
150 }
151
152//
153// Memory type to guard (matching the related PCD definition)
154//
155#define GUARD_HEAP_TYPE_PAGE BIT0
156#define GUARD_HEAP_TYPE_POOL BIT1
157#define GUARD_HEAP_TYPE_FREED BIT4
158#define GUARD_HEAP_TYPE_ALL \
159 (GUARD_HEAP_TYPE_PAGE|GUARD_HEAP_TYPE_POOL|GUARD_HEAP_TYPE_FREED)
160
161//
162// Debug message level
163//
164#define HEAP_GUARD_DEBUG_LEVEL (DEBUG_POOL|DEBUG_PAGE)
165
166typedef struct {
167 UINT32 TailMark;
168 UINT32 HeadMark;
169 EFI_PHYSICAL_ADDRESS Address;
170 LIST_ENTRY Link;
172
191 IN UINT64 Start,
192 IN UINT64 NumberOfPages,
193 IN EFI_MEMORY_TYPE NewType
194 );
195
207 IN UINT64 Start,
208 IN UINTN NumberOfPages,
209 IN EFI_MEMORY_TYPE NewType
210 );
211
220VOID
223 IN UINTN NumberOfPages
224 );
225
234VOID
237 IN UINTN NumberOfPages
238 );
239
248VOID
251 IN OUT UINTN *NumberOfPages
252 );
253
266VOID
269 IN OUT UINTN *NumberOfPages
270 );
271
286UINT64
288 IN UINT64 Start,
289 IN UINT64 Size,
290 IN UINT64 SizeRequested
291 );
292
302BOOLEAN
304 IN EFI_MEMORY_TYPE MemoryType
305 );
306
316BOOLEAN
318 IN EFI_MEMORY_TYPE MemoryType,
319 IN EFI_ALLOCATE_TYPE AllocateType
320 );
321
330BOOLEAN
331EFIAPI
334 );
335
344BOOLEAN
345EFIAPI
348 );
349
353VOID
354EFIAPI
356 VOID
357 );
358
370VOID *
373 IN UINTN NoPages,
374 IN UINTN Size
375 );
376
387VOID *
390 IN UINTN NoPages,
391 IN UINTN Size
392 );
393
401BOOLEAN
403 UINT8 GuardType
404 );
405
409VOID
411 VOID
412 );
413
424VOID
426 IN EFI_MEMORY_DESCRIPTOR *MemoryMapEntry,
427 IN EFI_PHYSICAL_ADDRESS MaxAddress
428 );
429
438VOID
439EFIAPI
441 IN EFI_PHYSICAL_ADDRESS BaseAddress,
442 IN UINTN Pages
443 );
444
464BOOLEAN
466 OUT EFI_PHYSICAL_ADDRESS *StartAddress,
467 OUT EFI_PHYSICAL_ADDRESS *EndAddress
468 );
469
470extern BOOLEAN mOnGuarding;
471
472//
473// The heap guard system does not support non-EFI_PAGE_SIZE alignments.
474// Architectures that require larger RUNTIME_PAGE_ALLOCATION_GRANULARITY
475// cannot have EfiRuntimeServicesCode, EfiRuntimeServicesData, EfiReservedMemoryType,
476// and EfiACPIMemoryNVS guarded. OSes do not map guard pages anyway, so this is a
477// minimal loss. Not guarding prevents alignment mismatches
478//
480 RUNTIME_PAGE_ALLOCATION_GRANULARITY == EFI_PAGE_SIZE ||
481 (((FixedPcdGet64 (PcdHeapGuardPageType) & 0x461) == 0) &&
482 ((FixedPcdGet64 (PcdHeapGuardPoolType) & 0x461) == 0)),
483 "Unsupported Heap Guard configuration on system with greater than EFI_PAGE_SIZE RUNTIME_PAGE_ALLOCATION_GRANULARITY"
484 );
485
486#endif
UINT64 UINTN
VOID SetGuardForMemory(IN EFI_PHYSICAL_ADDRESS Memory, IN UINTN NumberOfPages)
Definition: HeapGuard.c:690
VOID AdjustMemoryF(IN OUT EFI_PHYSICAL_ADDRESS *Memory, IN OUT UINTN *NumberOfPages)
Definition: HeapGuard.c:884
VOID EFIAPI GuardFreedPagesChecked(IN EFI_PHYSICAL_ADDRESS BaseAddress, IN UINTN Pages)
Definition: HeapGuard.c:1350
VOID * AdjustPoolHeadF(IN EFI_PHYSICAL_ADDRESS Memory, IN UINTN NoPages, IN UINTN Size)
Definition: HeapGuard.c:1047
VOID EFIAPI DumpGuardedMemoryBitmap(VOID)
Definition: HeapGuard.c:1659
BOOLEAN PromoteGuardedFreePages(OUT EFI_PHYSICAL_ADDRESS *StartAddress, OUT EFI_PHYSICAL_ADDRESS *EndAddress)
Definition: HeapGuard.c:1531
BOOLEAN EFIAPI IsGuardPage(IN EFI_PHYSICAL_ADDRESS Address)
Definition: HeapGuard.c:461
VOID MergeGuardPages(IN EFI_MEMORY_DESCRIPTOR *MemoryMapEntry, IN EFI_PHYSICAL_ADDRESS MaxAddress)
Definition: HeapGuard.c:1476
BOOLEAN IsHeapGuardEnabled(UINT8 GuardType)
Definition: HeapGuard.c:674
EFI_STATUS CoreConvertPages(IN UINT64 Start, IN UINT64 NumberOfPages, IN EFI_MEMORY_TYPE NewType)
Definition: Page.c:1080
VOID AdjustMemoryA(IN OUT EFI_PHYSICAL_ADDRESS *Memory, IN OUT UINTN *NumberOfPages)
Definition: HeapGuard.c:982
VOID UnsetGuardForMemory(IN EFI_PHYSICAL_ADDRESS Memory, IN UINTN NumberOfPages)
Definition: HeapGuard.c:726
UINT64 AdjustMemoryS(IN UINT64 Start, IN UINT64 Size, IN UINT64 SizeRequested)
Definition: HeapGuard.c:825
BOOLEAN IsPoolTypeToGuard(IN EFI_MEMORY_TYPE MemoryType)
Definition: HeapGuard.c:637
BOOLEAN EFIAPI IsMemoryGuarded(IN EFI_PHYSICAL_ADDRESS Address)
Definition: HeapGuard.c:486
BOOLEAN IsPageTypeToGuard(IN EFI_MEMORY_TYPE MemoryType, IN EFI_ALLOCATE_TYPE AllocateType)
Definition: HeapGuard.c:658
EFI_STATUS CoreConvertPagesWithGuard(IN UINT64 Start, IN UINTN NumberOfPages, IN EFI_MEMORY_TYPE NewType)
Definition: HeapGuard.c:1079
VOID HeapGuardCpuArchProtocolNotify(VOID)
Definition: HeapGuard.c:1604
VOID * AdjustPoolHeadA(IN EFI_PHYSICAL_ADDRESS Memory, IN UINTN NoPages, IN UINTN Size)
Definition: HeapGuard.c:1016
#define STATIC_ASSERT
Definition: Base.h:808
#define IN
Definition: Base.h:279
#define OUT
Definition: Base.h:284
#define FixedPcdGet64(TokenName)
Definition: PcdLib.h:106
UINT64 EFI_PHYSICAL_ADDRESS
Definition: UefiBaseType.h:50
RETURN_STATUS EFI_STATUS
Definition: UefiBaseType.h:29
EFI_MEMORY_TYPE
EFI_ALLOCATE_TYPE
Definition: UefiSpec.h:29