TianoCore EDK2 master
Loading...
Searching...
No Matches
SmmProfileArch.c
Go to the documentation of this file.
1
11#include "PiSmmCpuCommon.h"
12#include "SmmProfileInternal.h"
13
14//
15// Current page index.
16//
17UINTN mPFPageIndex;
18
19//
20// Pool for dynamically creating page table in page fault handler.
21//
22UINT64 mPFPageBuffer;
23
24//
25// Store the uplink information for each page being used.
26//
27UINT64 *mPFPageUplink[MAX_PF_PAGE_COUNT];
28
35VOID
37 OUT UINTN *Cr3
38 )
39{
40 ASSERT (Cr3 != NULL);
41
42 //
43 // Generate level4 page table for the first 4GB memory space
44 // Return the address of PML4 (to set CR3)
45 //
46 *Cr3 = GenSmmPageTable (Paging4Level, 32);
47
48 return;
49}
50
55VOID
57 VOID
58 )
59{
60 VOID *Address;
61
62 //
63 // Pre-Allocate memory for page fault handler
64 //
65 Address = NULL;
66 Address = AllocatePages (MAX_PF_PAGE_COUNT);
67 ASSERT (Address != NULL);
68
69 mPFPageBuffer = (UINT64)(UINTN)Address;
70 mPFPageIndex = 0;
71 ZeroMem ((VOID *)(UINTN)mPFPageBuffer, EFI_PAGE_SIZE * MAX_PF_PAGE_COUNT);
72 ZeroMem (mPFPageUplink, sizeof (mPFPageUplink));
73
74 return;
75}
76
83VOID
85 UINT64 *Uplink
86 )
87{
88 UINT64 Address;
89
90 //
91 // Get the buffer
92 //
93 Address = mPFPageBuffer + EFI_PAGES_TO_SIZE (mPFPageIndex);
94 ZeroMem ((VOID *)(UINTN)Address, EFI_PAGE_SIZE);
95
96 //
97 // Cut the previous uplink if it exists and wasn't overwritten
98 //
99 if ((mPFPageUplink[mPFPageIndex] != NULL) && ((*mPFPageUplink[mPFPageIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK) == Address)) {
100 *mPFPageUplink[mPFPageIndex] = 0;
101 }
102
103 //
104 // Link & Record the current uplink
105 //
106 *Uplink = Address | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
107 mPFPageUplink[mPFPageIndex] = Uplink;
108
109 mPFPageIndex = (mPFPageIndex + 1) % MAX_PF_PAGE_COUNT;
110}
111
116VOID
118 VOID
119 )
120{
121 UINT64 *PageTable;
122 UINT64 *PageTableTop;
123 UINT64 PFAddress;
124 UINTN StartBit;
125 UINTN EndBit;
126 UINT64 PTIndex;
127 UINTN Index;
128 SMM_PAGE_SIZE_TYPE PageSize;
129 UINTN NumOfPages;
130 UINTN PageAttribute;
131 EFI_STATUS Status;
132 UINT64 *UpperEntry;
133 BOOLEAN Enable5LevelPaging;
134 IA32_CR4 Cr4;
135
136 //
137 // Set default SMM page attribute
138 //
139 PageSize = SmmPageSize2M;
140 NumOfPages = 1;
141 PageAttribute = 0;
142
143 EndBit = 0;
144 PageTableTop = (UINT64 *)(AsmReadCr3 () & gPhyMask);
145 PFAddress = AsmReadCr2 ();
146
147 Cr4.UintN = AsmReadCr4 ();
148 Enable5LevelPaging = (BOOLEAN)(Cr4.Bits.LA57 != 0);
149
150 Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);
151 //
152 // If platform not support page table attribute, set default SMM page attribute
153 //
154 if (Status != EFI_SUCCESS) {
155 PageSize = SmmPageSize2M;
156 NumOfPages = 1;
157 PageAttribute = 0;
158 }
159
160 if (PageSize >= MaxSmmPageSizeType) {
161 PageSize = SmmPageSize2M;
162 }
163
164 if (NumOfPages > 512) {
165 NumOfPages = 512;
166 }
167
168 switch (PageSize) {
169 case SmmPageSize4K:
170 //
171 // BIT12 to BIT20 is Page Table index
172 //
173 EndBit = 12;
174 break;
175 case SmmPageSize2M:
176 //
177 // BIT21 to BIT29 is Page Directory index
178 //
179 EndBit = 21;
180 PageAttribute |= (UINTN)IA32_PG_PS;
181 break;
182 case SmmPageSize1G:
183 if (!m1GPageTableSupport) {
184 DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));
185 ASSERT (FALSE);
186 }
187
188 //
189 // BIT30 to BIT38 is Page Directory Pointer Table index
190 //
191 EndBit = 30;
192 PageAttribute |= (UINTN)IA32_PG_PS;
193 break;
194 default:
195 ASSERT (FALSE);
196 }
197
198 //
199 // If execute-disable is enabled, set NX bit
200 //
201 if (mXdEnabled) {
202 PageAttribute |= IA32_PG_NX;
203 }
204
205 for (Index = 0; Index < NumOfPages; Index++) {
206 PageTable = PageTableTop;
207 UpperEntry = NULL;
208 for (StartBit = Enable5LevelPaging ? 48 : 39; StartBit > 12; StartBit -= 9) {
209 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
210
211 //
212 // Iterate through the page table to find the appropriate page table entry for page creation if one of the following cases is met:
213 // 1) StartBit > EndBit: The PageSize of current entry is bigger than the platform-specified PageSize granularity.
214 // 2) IA32_PG_P bit is 0 & IA32_PG_PS bit is not 0: The current entry is present and it's a non-leaf entry.
215 //
216 if ((StartBit > EndBit) || ((((PageTable[PTIndex] & IA32_PG_P) != 0) && ((PageTable[PTIndex] & IA32_PG_PS) == 0)))) {
217 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
218 //
219 // If the entry is not present, allocate one page from page pool for it
220 //
221 PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
222 } else {
223 //
224 // Save the upper entry address
225 //
226 UpperEntry = PageTable + PTIndex;
227 }
228
229 //
230 // BIT9 to BIT11 of entry is used to save access record,
231 // initialize value is 7
232 //
233 PageTable[PTIndex] |= (UINT64)IA32_PG_A;
234 SetAccNum (PageTable + PTIndex, 7);
235 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);
236 } else {
237 //
238 // Found the appropriate entry.
239 //
240 break;
241 }
242 }
243
244 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
245
246 //
247 // Fill the new entry
248 //
249 PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << StartBit) - 1)) |
250 PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;
251 if (UpperEntry != NULL) {
252 SetSubEntriesNum (UpperEntry, (GetSubEntriesNum (UpperEntry) + 1) & 0x1FF);
253 }
254
255 //
256 // Get the next page address if we need to create more page tables
257 //
258 PFAddress += (1ull << StartBit);
259 }
260}
261
274VOID
276 UINT64 *PageTable,
277 UINT64 PFAddress,
278 UINTN CpuIndex,
279 UINTN ErrorCode,
280 BOOLEAN *IsValidPFAddress
281 )
282{
283 UINTN PTIndex;
284 UINT64 Address;
285 BOOLEAN Nx;
286 BOOLEAN Existed;
287 UINTN Index;
288 UINTN PFIndex;
289 IA32_CR4 Cr4;
290 BOOLEAN Enable5LevelPaging;
291
292 ASSERT ((PageTable != NULL) && (IsValidPFAddress != NULL));
293
294 Cr4.UintN = AsmReadCr4 ();
295 Enable5LevelPaging = (BOOLEAN)(Cr4.Bits.LA57 == 1);
296
297 //
298 // If page fault address is 4GB above.
299 //
300
301 //
302 // Check if page fault address has existed in page table.
303 // If it exists in page table but page fault is generated,
304 // there are 2 possible reasons: 1. present flag is set to 0; 2. instruction fetch in protected memory range.
305 //
306 Existed = FALSE;
307 PageTable = (UINT64 *)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK);
308 PTIndex = 0;
309 if (Enable5LevelPaging) {
310 PTIndex = BitFieldRead64 (PFAddress, 48, 56);
311 }
312
313 if ((!Enable5LevelPaging) || ((PageTable[PTIndex] & IA32_PG_P) != 0)) {
314 // PML5E
315 if (Enable5LevelPaging) {
316 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
317 }
318
319 PTIndex = BitFieldRead64 (PFAddress, 39, 47);
320 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
321 // PML4E
322 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
323 PTIndex = BitFieldRead64 (PFAddress, 30, 38);
324 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
325 // PDPTE
326 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
327 PTIndex = BitFieldRead64 (PFAddress, 21, 29);
328 // PD
329 if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
330 //
331 // 2MB page
332 //
333 Address = (UINT64)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
334 if ((Address & ~((1ull << 21) - 1)) == ((PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 21) - 1)))) {
335 Existed = TRUE;
336 }
337 } else {
338 //
339 // 4KB page
340 //
341 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask& PHYSICAL_ADDRESS_MASK);
342 if (PageTable != 0) {
343 //
344 // When there is a valid entry to map to 4KB page, need not create a new entry to map 2MB.
345 //
346 PTIndex = BitFieldRead64 (PFAddress, 12, 20);
347 Address = (UINT64)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
348 if ((Address & ~((1ull << 12) - 1)) == (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) {
349 Existed = TRUE;
350 }
351 }
352 }
353 }
354 }
355 }
356
357 //
358 // If page entry does not existed in page table at all, create a new entry.
359 //
360 if (!Existed) {
361 if (IsSmmProfilePFAddressAbove4GValid (PFAddress, &Nx)) {
362 //
363 // If page fault address above 4GB is in protected range but it causes a page fault exception,
364 // Will create a page entry for this page fault address, make page table entry as present/rw and execution-disable.
365 // this access is not saved into SMM profile data.
366 //
367 *IsValidPFAddress = TRUE;
368 }
369
370 //
371 // Create one entry in page table for page fault address.
372 //
374 //
375 // Find the page table entry created just now.
376 //
377 PageTable = (UINT64 *)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK);
378 PFAddress = AsmReadCr2 ();
379 // PML5E
380 if (Enable5LevelPaging) {
381 PTIndex = BitFieldRead64 (PFAddress, 48, 56);
382 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
383 }
384
385 // PML4E
386 PTIndex = BitFieldRead64 (PFAddress, 39, 47);
387 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
388 // PDPTE
389 PTIndex = BitFieldRead64 (PFAddress, 30, 38);
390 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
391 // PD
392 PTIndex = BitFieldRead64 (PFAddress, 21, 29);
393 Address = PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK;
394 //
395 // Check if 2MB-page entry need be changed to 4KB-page entry.
396 //
397 if (IsAddressSplit (Address)) {
398 AcquirePage (&PageTable[PTIndex]);
399
400 // PTE
401 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
402 for (Index = 0; Index < 512; Index++) {
403 PageTable[Index] = Address | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
404 if (!IsSmmProfilePFAddressAbove4GValid (Address, &Nx)) {
405 PageTable[Index] = PageTable[Index] & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
406 }
407
408 if (Nx && mXdSupported) {
409 PageTable[Index] = PageTable[Index] | IA32_PG_NX;
410 }
411
412 if (Address == (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) {
413 PTIndex = Index;
414 }
415
416 Address += SIZE_4KB;
417 } // end for PT
418 } else {
419 //
420 // Update 2MB page entry.
421 //
422 if (!IsSmmProfilePFAddressAbove4GValid (Address, &Nx)) {
423 //
424 // Patch to remove present flag and rw flag.
425 //
426 PageTable[PTIndex] = PageTable[PTIndex] & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
427 }
428
429 //
430 // Set XD bit to 1
431 //
432 if (Nx && mXdSupported) {
433 PageTable[PTIndex] = PageTable[PTIndex] | IA32_PG_NX;
434 }
435 }
436 }
437
438 //
439 // Record old entries with non-present status
440 // Old entries include the memory which instruction is at and the memory which instruction access.
441 //
442 //
443 ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);
444 if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {
445 PFIndex = mPFEntryCount[CpuIndex];
446 mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex];
447 mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];
448 mPFEntryCount[CpuIndex]++;
449 }
450
451 //
452 // Add present flag or clear XD flag to make page fault handler succeed.
453 //
454 PageTable[PTIndex] |= (UINT64)(PAGE_ATTRIBUTE_BITS);
455 if ((ErrorCode & IA32_PF_EC_ID) != 0) {
456 //
457 // If page fault is caused by instruction fetch, clear XD bit in the entry.
458 //
459 PageTable[PTIndex] &= ~IA32_PG_NX;
460 }
461
462 return;
463}
464
472VOID
474 IN OUT EFI_SYSTEM_CONTEXT SystemContext
475 )
476{
477 SystemContext.SystemContextX64->Rflags &= (UINTN) ~BIT8;
478}
UINT64 UINTN
INT64 INTN
UINT64 EFIAPI BitFieldRead64(IN UINT64 Operand, IN UINTN StartBit, IN UINTN EndBit)
Definition: BitField.c:719
VOID *EFIAPI ZeroMem(OUT VOID *Buffer, IN UINTN Length)
UINTN EFIAPI AsmReadCr3(VOID)
UINTN EFIAPI AsmReadCr2(VOID)
UINTN EFIAPI AsmReadCr4(VOID)
UINT64 AllocPage(VOID)
Definition: PageTbl.c:76
VOID ClearTrapFlag(IN OUT EFI_SYSTEM_CONTEXT SystemContext)
VOID SmmProfileMapPFAddress(VOID)
VOID InitPagesForPFHandler(VOID)
VOID RestorePageTableAbove4G(UINT64 *PageTable, UINT64 PFAddress, UINTN CpuIndex, UINTN ErrorCode, BOOLEAN *IsValidPFAddress)
VOID InitSmmS3Cr3(OUT UINTN *Cr3)
#define NULL
Definition: Base.h:319
#define TRUE
Definition: Base.h:301
#define FALSE
Definition: Base.h:307
#define IN
Definition: Base.h:279
#define OUT
Definition: Base.h:284
#define DEBUG(Expression)
Definition: DebugLib.h:434
UINTN GenSmmPageTable(IN PAGING_MODE PagingMode, IN UINT8 PhysicalAddressBits)
VOID *EFIAPI AllocatePages(IN UINTN Pages)
SMM_PAGE_SIZE_TYPE
EFI_STATUS EFIAPI GetPlatformPageTableAttribute(IN UINT64 Address, IN OUT SMM_PAGE_SIZE_TYPE *PageSize, IN OUT UINTN *NumOfPages, IN OUT UINTN *PageAttribute)
BOOLEAN IsSmmProfilePFAddressAbove4GValid(IN EFI_PHYSICAL_ADDRESS Address, OUT BOOLEAN *Nx)
Definition: SmmProfile.c:328
BOOLEAN IsAddressSplit(IN EFI_PHYSICAL_ADDRESS Address)
Definition: SmmProfile.c:356
#define EFI_PAGES_TO_SIZE(Pages)
Definition: UefiBaseType.h:213
RETURN_STATUS EFI_STATUS
Definition: UefiBaseType.h:29
#define EFI_SUCCESS
Definition: UefiBaseType.h:112
UINT64 GetSubEntriesNum(IN UINT64 *Entry)
Definition: PageTbl.c:126
VOID SetAccNum(IN OUT UINT64 *Entry, IN UINT64 Acc)
Definition: PageTbl.c:333
VOID SetSubEntriesNum(IN OUT UINT64 *Entry, IN UINT64 SubEntryNum)
Definition: PageTbl.c:105
VOID AcquirePage(UINT64 *Uplink)