TianoCore EDK2 master
Loading...
Searching...
No Matches
PiSmmCpuCommon.c
Go to the documentation of this file.
1
12#include "PiSmmCpuCommon.h"
13
14//
15// SMM CPU Private Data structure that contains SMM Configuration Protocol
16// along its supporting fields.
17//
18SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {
19 SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature
20 NULL, // SmmCpuHandle
21 NULL, // Pointer to ProcessorInfo array
22 NULL, // Pointer to Operation array
23 NULL, // Pointer to CpuSaveStateSize array
24 NULL, // Pointer to CpuSaveState array
25 {
26 { 0 }
27 }, // SmmReservedSmramRegion
28 {
29 SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp
30 0, // SmmCoreEntryContext.CurrentlyExecutingCpu
31 0, // SmmCoreEntryContext.NumberOfCpus
32 NULL, // SmmCoreEntryContext.CpuSaveStateSize
33 NULL // SmmCoreEntryContext.CpuSaveState
34 },
35 NULL, // SmmCoreEntry
36 {
37 mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions
38 RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry
39 },
40 NULL, // pointer to Ap Wrapper Func array
41 { NULL, NULL }, // List_Entry for Tokens.
42};
43
44CPU_HOT_PLUG_DATA mCpuHotPlugData = {
45 CPU_HOT_PLUG_DATA_REVISION_1, // Revision
46 0, // Array Length of SmBase and APIC ID
47 NULL, // Pointer to APIC ID array
48 NULL, // Pointer to SMBASE array
49 0, // Reserved
50 0, // SmrrBase
51 0 // SmrrSize
52};
53
54//
55// Global pointer used to access mSmmCpuPrivateData from outside and inside SMM
56//
57SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;
58
63
70};
71
79};
80
81EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];
82
83volatile BOOLEAN *mSmmInitialized = NULL;
84UINT32 mBspApicId = 0;
85
86//
87// SMM stack information
88//
89UINTN mSmmStackArrayBase;
90UINTN mSmmStackArrayEnd;
91UINTN mSmmStackSize;
92
93UINTN mSmmShadowStackSize;
94BOOLEAN mCetSupported = TRUE;
95
96UINTN mMaxNumberOfCpus = 0;
97UINTN mNumberOfCpus = 0;
98
99//
100// Global used to cache PCD for SMM Code Access Check enable
101//
102BOOLEAN mSmmCodeAccessCheckEnable = FALSE;
103
104//
105// Global used to cache SMM Debug Agent Supported ot not
106//
107BOOLEAN mSmmDebugAgentSupport = FALSE;
108
109//
110// Global copy of the PcdPteMemoryEncryptionAddressOrMask
111//
112UINT64 mAddressEncMask = 0;
113
114//
115// Spin lock used to serialize setting of SMM Code Access Check feature
116//
117SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;
118
119//
120// Saved SMM ranges information
121//
122EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;
123UINTN mSmmCpuSmramRangeCount;
124
125UINT8 mPhysicalAddressBits;
126
131VOID
133 VOID
134 )
135{
136 EFI_STATUS Status;
137 BOOLEAN InterruptState;
138 IA32_DESCRIPTOR DxeIdtr;
139
140 //
141 // There are 32 (not 255) entries in it since only processor
142 // generated exceptions will be handled.
143 //
144 gcSmiIdtr.Limit = (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32) - 1;
145 //
146 // Allocate page aligned IDT, because it might be set as read only.
147 //
148 gcSmiIdtr.Base = (UINTN)AllocateCodePages (EFI_SIZE_TO_PAGES (gcSmiIdtr.Limit + 1));
149 ASSERT (gcSmiIdtr.Base != 0);
150 ZeroMem ((VOID *)gcSmiIdtr.Base, gcSmiIdtr.Limit + 1);
151
152 //
153 // Disable Interrupt and save DXE IDT table
154 //
155 InterruptState = SaveAndDisableInterrupts ();
156 AsmReadIdtr (&DxeIdtr);
157 //
158 // Load SMM temporary IDT table
159 //
160 AsmWriteIdtr (&gcSmiIdtr);
161 //
162 // Setup SMM default exception handlers, SMM IDT table
163 // will be updated and saved in gcSmiIdtr
164 //
165 Status = InitializeCpuExceptionHandlers (NULL);
166 ASSERT_EFI_ERROR (Status);
167 //
168 // Restore DXE IDT table and CPU interrupt
169 //
170 AsmWriteIdtr ((IA32_DESCRIPTOR *)&DxeIdtr);
171 SetInterruptState (InterruptState);
172}
173
180VOID
182 IN UINTN CallerIpAddress
183 )
184{
185 UINTN Pe32Data;
186 VOID *PdbPointer;
187
188 //
189 // Find Image Base
190 //
191 Pe32Data = PeCoffSearchImageBase (CallerIpAddress);
192 if (Pe32Data != 0) {
193 DEBUG ((DEBUG_ERROR, "It is invoked from the instruction before IP(0x%p)", (VOID *)CallerIpAddress));
194 PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *)Pe32Data);
195 if (PdbPointer != NULL) {
196 DEBUG ((DEBUG_ERROR, " in module (%a)\n", PdbPointer));
197 }
198 }
199}
200
216EFIAPI
219 IN UINTN Width,
221 IN UINTN CpuIndex,
222 OUT VOID *Buffer
223 )
224{
225 EFI_STATUS Status;
226
227 //
228 // Retrieve pointer to the specified CPU's SMM Save State buffer
229 //
230 if ((CpuIndex >= gMmst->NumberOfCpus) || (Buffer == NULL)) {
231 return EFI_INVALID_PARAMETER;
232 }
233
234 //
235 // The SpeculationBarrier() call here is to ensure the above check for the
236 // CpuIndex has been completed before the execution of subsequent codes.
237 //
239
240 //
241 // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID
242 //
243 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {
244 //
245 // The pseudo-register only supports the 64-bit size specified by Width.
246 //
247 if (Width != sizeof (UINT64)) {
248 return EFI_INVALID_PARAMETER;
249 }
250
251 //
252 // If the processor is in SMM at the time the SMI occurred,
253 // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.
254 // Otherwise, EFI_NOT_FOUND is returned.
255 //
256 if (*(mSmmMpSyncData->CpuData[CpuIndex].Present)) {
257 *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;
258 return EFI_SUCCESS;
259 } else {
260 return EFI_NOT_FOUND;
261 }
262 }
263
264 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
265 return EFI_INVALID_PARAMETER;
266 }
267
268 Status = MmSaveStateReadRegister (CpuIndex, Register, Width, Buffer);
269
270 return Status;
271}
272
288EFIAPI
291 IN UINTN Width,
293 IN UINTN CpuIndex,
294 IN CONST VOID *Buffer
295 )
296{
297 EFI_STATUS Status;
298
299 //
300 // Retrieve pointer to the specified CPU's SMM Save State buffer
301 //
302 if ((CpuIndex >= gMmst->NumberOfCpus) || (Buffer == NULL)) {
303 return EFI_INVALID_PARAMETER;
304 }
305
306 //
307 // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored
308 //
309 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {
310 return EFI_SUCCESS;
311 }
312
313 if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {
314 return EFI_INVALID_PARAMETER;
315 }
316
317 Status = MmSaveStateWriteRegister (CpuIndex, Register, Width, Buffer);
318
319 return Status;
320}
321
326VOID
328 VOID
329 )
330{
331 UINT32 ApicId;
332 UINTN Index;
333 BOOLEAN IsBsp;
334
335 ApicId = GetApicId ();
336
337 IsBsp = (BOOLEAN)(mBspApicId == ApicId);
338
339 ASSERT (mNumberOfCpus <= mMaxNumberOfCpus);
340
341 for (Index = 0; Index < mNumberOfCpus; Index++) {
342 if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {
343 PERF_CODE (
344 MpPerfBegin (Index, SMM_MP_PERF_PROCEDURE_ID (InitializeSmm));
345 );
346 //
347 // Initialize SMM specific features on the currently executing CPU
348 //
350 Index,
351 IsBsp,
352 gSmmCpuPrivate->ProcessorInfo,
353 &mCpuHotPlugData
354 );
355
356 if (!mSmmS3Flag) {
357 //
358 // Check XD and BTS features on each processor on normal boot
359 //
360 CheckFeatureSupported (Index);
361
362 if (mIsStandaloneMm) {
363 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);
364
365 //
366 // Standalone MM does not allow call out to DXE at anytime.
367 // Code Access check can be enabled in the first SMI.
368 // While SMM needs to defer the enabling to EndOfDxe.
369 //
370 // Enable SMM Code Access Check feature.
371 //
373 }
374 } else if (IsBsp) {
375 //
376 // BSP rebase is already done above.
377 // Initialize private data during S3 resume
378 //
380 }
381
382 PERF_CODE (
383 MpPerfEnd (Index, SMM_MP_PERF_PROCEDURE_ID (InitializeSmm));
384 );
385
386 return;
387 }
388 }
389
390 ASSERT (FALSE);
391}
392
397VOID
399 VOID
400 )
401{
402 UINTN Index;
403
405
406 if (mSmmInitialized == NULL) {
407 mSmmInitialized = (BOOLEAN *)AllocatePool (sizeof (BOOLEAN) * mMaxNumberOfCpus);
408 }
409
410 ASSERT (mSmmInitialized != NULL);
411 if (mSmmInitialized == NULL) {
413 return;
414 }
415
416 //
417 // Reset the mSmmInitialized to false.
418 //
419 ZeroMem ((VOID *)mSmmInitialized, sizeof (BOOLEAN) * mMaxNumberOfCpus);
420
421 //
422 // Initialize the lock used to serialize the MSR programming in BSP and all APs
423 //
424 InitializeSpinLock (mConfigSmmCodeAccessCheckLock);
425
426 //
427 // Get the BSP ApicId.
428 //
429 mBspApicId = GetApicId ();
430
431 //
432 // Issue SMI IPI (All Excluding Self SMM IPI + BSP SMM IPI) for SMM init
433 //
434 SendSmiIpi (mBspApicId);
436
437 //
438 // Wait for all processors to finish its 1st SMI
439 //
440 for (Index = 0; Index < mNumberOfCpus; Index++) {
441 while (!(BOOLEAN)mSmmInitialized[Index]) {
442 }
443 }
444
446}
447
458INTN
459EFIAPI
461 IN CONST VOID *Buffer1,
462 IN CONST VOID *Buffer2
463 )
464{
465 if ((*(SMM_BASE_HOB_DATA **)Buffer1)->ProcessorIndex > (*(SMM_BASE_HOB_DATA **)Buffer2)->ProcessorIndex) {
466 return 1;
467 } else if ((*(SMM_BASE_HOB_DATA **)Buffer1)->ProcessorIndex < (*(SMM_BASE_HOB_DATA **)Buffer2)->ProcessorIndex) {
468 return -1;
469 }
470
471 return 0;
472}
473
487STATIC
490 IN UINTN MaxNumberOfCpus,
491 OUT UINTN **AllocatedSmBaseBuffer
492 )
493{
494 UINTN HobCount;
495 EFI_HOB_GUID_TYPE *GuidHob;
496 SMM_BASE_HOB_DATA *SmmBaseHobData;
497 UINTN NumberOfProcessors;
498 SMM_BASE_HOB_DATA **SmBaseHobs;
499 UINTN *SmBaseBuffer;
500 UINTN HobIndex;
501 UINTN SortBuffer;
502 UINTN ProcessorIndex;
503 UINT64 PrevProcessorIndex;
504 EFI_HOB_GUID_TYPE *FirstSmmBaseGuidHob;
505
506 SmmBaseHobData = NULL;
507 HobIndex = 0;
508 ProcessorIndex = 0;
509 HobCount = 0;
510 NumberOfProcessors = 0;
511
512 FirstSmmBaseGuidHob = GetFirstGuidHob (&gSmmBaseHobGuid);
513 if (FirstSmmBaseGuidHob == NULL) {
514 return EFI_NOT_FOUND;
515 }
516
517 GuidHob = FirstSmmBaseGuidHob;
518 while (GuidHob != NULL) {
519 HobCount++;
520 SmmBaseHobData = GET_GUID_HOB_DATA (GuidHob);
521 NumberOfProcessors += SmmBaseHobData->NumberOfProcessors;
522
523 if (NumberOfProcessors >= MaxNumberOfCpus) {
524 break;
525 }
526
527 GuidHob = GetNextGuidHob (&gSmmBaseHobGuid, GET_NEXT_HOB (GuidHob));
528 }
529
530 ASSERT (NumberOfProcessors == MaxNumberOfCpus);
531 if (NumberOfProcessors != MaxNumberOfCpus) {
532 CpuDeadLoop ();
533 }
534
535 SmBaseHobs = AllocatePool (sizeof (SMM_BASE_HOB_DATA *) * HobCount);
536 if (SmBaseHobs == NULL) {
537 return EFI_OUT_OF_RESOURCES;
538 }
539
540 //
541 // Record each SmmBaseHob pointer in the SmBaseHobs.
542 // The FirstSmmBaseGuidHob is to speed up this while-loop
543 // without needing to look for SmBaseHob from beginning.
544 //
545 GuidHob = FirstSmmBaseGuidHob;
546 while (HobIndex < HobCount) {
547 SmBaseHobs[HobIndex++] = GET_GUID_HOB_DATA (GuidHob);
548 GuidHob = GetNextGuidHob (&gSmmBaseHobGuid, GET_NEXT_HOB (GuidHob));
549 }
550
551 SmBaseBuffer = (UINTN *)AllocatePool (sizeof (UINTN) * (MaxNumberOfCpus));
552 ASSERT (SmBaseBuffer != NULL);
553 if (SmBaseBuffer == NULL) {
554 FreePool (SmBaseHobs);
555 return EFI_OUT_OF_RESOURCES;
556 }
557
558 QuickSort (SmBaseHobs, HobCount, sizeof (SMM_BASE_HOB_DATA *), (BASE_SORT_COMPARE)SmBaseHobCompare, &SortBuffer);
559 PrevProcessorIndex = 0;
560 for (HobIndex = 0; HobIndex < HobCount; HobIndex++) {
561 //
562 // Make sure no overlap and no gap in the CPU range covered by each HOB
563 //
564 ASSERT (SmBaseHobs[HobIndex]->ProcessorIndex == PrevProcessorIndex);
565
566 //
567 // Cache each SmBase in order.
568 //
569 for (ProcessorIndex = 0; ProcessorIndex < SmBaseHobs[HobIndex]->NumberOfProcessors; ProcessorIndex++) {
570 SmBaseBuffer[PrevProcessorIndex + ProcessorIndex] = (UINTN)SmBaseHobs[HobIndex]->SmBase[ProcessorIndex];
571 }
572
573 PrevProcessorIndex += SmBaseHobs[HobIndex]->NumberOfProcessors;
574 }
575
576 FreePool (SmBaseHobs);
577 *AllocatedSmBaseBuffer = SmBaseBuffer;
578 return EFI_SUCCESS;
579}
580
591INTN
592EFIAPI
594 IN CONST VOID *Buffer1,
595 IN CONST VOID *Buffer2
596 )
597{
598 if ((*(MP_INFORMATION2_HOB_DATA **)Buffer1)->ProcessorIndex > (*(MP_INFORMATION2_HOB_DATA **)Buffer2)->ProcessorIndex) {
599 return 1;
600 } else if ((*(MP_INFORMATION2_HOB_DATA **)Buffer1)->ProcessorIndex < (*(MP_INFORMATION2_HOB_DATA **)Buffer2)->ProcessorIndex) {
601 return -1;
602 }
603
604 return 0;
605}
606
617 OUT UINTN *NumberOfCpus,
618 OUT UINTN *MaxNumberOfCpus
619 )
620{
621 EFI_HOB_GUID_TYPE *GuidHob;
622 EFI_HOB_GUID_TYPE *FirstMpInfo2Hob;
623 MP_INFORMATION2_HOB_DATA *MpInformation2HobData;
624 UINTN HobCount;
625 UINTN HobIndex;
626 MP_INFORMATION2_HOB_DATA **MpInfo2Hobs;
627 UINTN SortBuffer;
628 UINTN ProcessorIndex;
629 UINT64 PrevProcessorIndex;
630 MP_INFORMATION2_ENTRY *MpInformation2Entry;
631 EFI_PROCESSOR_INFORMATION *ProcessorInfo;
632
633 GuidHob = NULL;
634 MpInformation2HobData = NULL;
635 FirstMpInfo2Hob = NULL;
636 MpInfo2Hobs = NULL;
637 HobIndex = 0;
638 HobCount = 0;
639
640 FirstMpInfo2Hob = GetFirstGuidHob (&gMpInformation2HobGuid);
641
642 if (mIsStandaloneMm) {
643 ASSERT (FirstMpInfo2Hob != NULL);
644 } else {
645 if (FirstMpInfo2Hob == NULL) {
646 DEBUG ((DEBUG_INFO, "%a: [INFO] gMpInformation2HobGuid HOB not found.\n", __func__));
647 return GetMpInformationFromMpServices (NumberOfCpus, MaxNumberOfCpus);
648 }
649 }
650
651 GuidHob = FirstMpInfo2Hob;
652 while (GuidHob != NULL) {
653 MpInformation2HobData = GET_GUID_HOB_DATA (GuidHob);
654
655 //
656 // This is the last MpInformationHob in the HOB list.
657 //
658 if (MpInformation2HobData->NumberOfProcessors == 0) {
659 ASSERT (HobCount != 0);
660 break;
661 }
662
663 HobCount++;
664 *NumberOfCpus += MpInformation2HobData->NumberOfProcessors;
665 GuidHob = GetNextGuidHob (&gMpInformation2HobGuid, GET_NEXT_HOB (GuidHob));
666 }
667
668 *MaxNumberOfCpus = *NumberOfCpus;
669
670 if (!mIsStandaloneMm) {
671 ASSERT (*NumberOfCpus <= GetSupportedMaxLogicalProcessorNumber ());
672
673 //
674 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors
675 //
676 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
677 *MaxNumberOfCpus = GetSupportedMaxLogicalProcessorNumber ();
678 }
679 }
680
681 MpInfo2Hobs = AllocatePool (sizeof (MP_INFORMATION2_HOB_DATA *) * HobCount);
682 ASSERT (MpInfo2Hobs != NULL);
683 if (MpInfo2Hobs == NULL) {
684 return NULL;
685 }
686
687 //
688 // Record each MpInformation2Hob pointer in the MpInfo2Hobs.
689 // The FirstMpInfo2Hob is to speed up this while-loop without
690 // needing to look for MpInfo2Hob from beginning.
691 //
692 GuidHob = FirstMpInfo2Hob;
693 while (HobIndex < HobCount) {
694 MpInfo2Hobs[HobIndex++] = GET_GUID_HOB_DATA (GuidHob);
695 GuidHob = GetNextGuidHob (&gMpInformation2HobGuid, GET_NEXT_HOB (GuidHob));
696 }
697
698 ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * (*MaxNumberOfCpus));
699 ASSERT (ProcessorInfo != NULL);
700 if (ProcessorInfo == NULL) {
701 FreePool (MpInfo2Hobs);
702 return NULL;
703 }
704
705 QuickSort (MpInfo2Hobs, HobCount, sizeof (MP_INFORMATION2_HOB_DATA *), (BASE_SORT_COMPARE)MpInformation2HobCompare, &SortBuffer);
706 PrevProcessorIndex = 0;
707 for (HobIndex = 0; HobIndex < HobCount; HobIndex++) {
708 //
709 // Make sure no overlap and no gap in the CPU range covered by each HOB
710 //
711 ASSERT (MpInfo2Hobs[HobIndex]->ProcessorIndex == PrevProcessorIndex);
712
713 //
714 // Cache each EFI_PROCESSOR_INFORMATION in order.
715 //
716 for (ProcessorIndex = 0; ProcessorIndex < MpInfo2Hobs[HobIndex]->NumberOfProcessors; ProcessorIndex++) {
717 MpInformation2Entry = GET_MP_INFORMATION_ENTRY (MpInfo2Hobs[HobIndex], ProcessorIndex);
718 CopyMem (
719 &ProcessorInfo[PrevProcessorIndex + ProcessorIndex],
720 &MpInformation2Entry->ProcessorInfo,
722 );
723 }
724
725 PrevProcessorIndex += MpInfo2Hobs[HobIndex]->NumberOfProcessors;
726 }
727
728 FreePool (MpInfo2Hobs);
729 return ProcessorInfo;
730}
731
741 VOID
742 )
743{
744 EFI_STATUS Status;
745 UINTN Index;
746 UINTN TileCodeSize;
747 UINTN TileDataSize;
748 UINTN TileSize;
749 UINT8 *Stacks;
750 UINT32 RegEax;
751 UINT32 RegEbx;
752 UINT32 RegEcx;
753 UINT32 RegEdx;
754 CPUID_EXTENDED_CPU_SIG_EDX ExtendedRegEdx;
755 UINTN FamilyId;
756 UINTN ModelId;
757 UINT32 Cr3;
758
760
761 //
762 // Initialize address fixup
763 //
765
766 //
767 // Initialize Debug Agent to support source level debug in SMM code
768 //
769 InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, &mSmmDebugAgentSupport, NULL);
770
771 //
772 // Report the start of CPU SMM initialization.
773 //
776 EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT
777 );
778
779 //
780 // Find out SMRR Base and SMRR Size
781 //
782 FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);
783
784 //
785 // Retrieve NumberOfProcessors, MaxNumberOfCpus and EFI_PROCESSOR_INFORMATION for all CPU from MpInformation2 HOB.
786 //
787 gSmmCpuPrivate->ProcessorInfo = GetMpInformation (&mNumberOfCpus, &mMaxNumberOfCpus);
788 ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL);
789
790 //
791 // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.
792 // A constant BSP index makes no sense because it may be hot removed.
793 //
795 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
796 ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));
797 }
798
800
801 //
802 // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.
803 //
804 mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);
805 DEBUG ((DEBUG_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));
806
807 gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;
808
809 PERF_CODE (
810 InitializeMpPerf (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
811 );
812
813 //
814 // The CPU save state and code for the SMI entry point are tiled within an SMRAM
815 // allocated buffer. The minimum size of this buffer for a uniprocessor system
816 // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area
817 // just below SMBASE + 64KB. If more than one CPU is present in the platform,
818 // then the SMI entry point and the CPU save state areas can be tiles to minimize
819 // the total amount SMRAM required for all the CPUs. The tile size can be computed
820 // by adding the // CPU save state size, any extra CPU specific context, and
821 // the size of code that must be placed at the SMI entry point to transfer
822 // control to a C function in the native SMM execution mode. This size is
823 // rounded up to the nearest power of 2 to give the tile size for a each CPU.
824 // The total amount of memory required is the maximum number of CPUs that
825 // platform supports times the tile size. The picture below shows the tiling,
826 // where m is the number of tiles that fit in 32KB.
827 //
828 // +-----------------------------+ <-- 2^n offset from Base of allocated buffer
829 // | CPU m+1 Save State |
830 // +-----------------------------+
831 // | CPU m+1 Extra Data |
832 // +-----------------------------+
833 // | Padding |
834 // +-----------------------------+
835 // | CPU 2m SMI Entry |
836 // +#############################+ <-- Base of allocated buffer + 64 KB
837 // | CPU m-1 Save State |
838 // +-----------------------------+
839 // | CPU m-1 Extra Data |
840 // +-----------------------------+
841 // | Padding |
842 // +-----------------------------+
843 // | CPU 2m-1 SMI Entry |
844 // +=============================+ <-- 2^n offset from Base of allocated buffer
845 // | . . . . . . . . . . . . |
846 // +=============================+ <-- 2^n offset from Base of allocated buffer
847 // | CPU 2 Save State |
848 // +-----------------------------+
849 // | CPU 2 Extra Data |
850 // +-----------------------------+
851 // | Padding |
852 // +-----------------------------+
853 // | CPU m+1 SMI Entry |
854 // +=============================+ <-- Base of allocated buffer + 32 KB
855 // | CPU 1 Save State |
856 // +-----------------------------+
857 // | CPU 1 Extra Data |
858 // +-----------------------------+
859 // | Padding |
860 // +-----------------------------+
861 // | CPU m SMI Entry |
862 // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB
863 // | CPU 0 Save State |
864 // +-----------------------------+
865 // | CPU 0 Extra Data |
866 // +-----------------------------+
867 // | Padding |
868 // +-----------------------------+
869 // | CPU m-1 SMI Entry |
870 // +=============================+ <-- 2^n offset from Base of allocated buffer
871 // | . . . . . . . . . . . . |
872 // +=============================+ <-- 2^n offset from Base of allocated buffer
873 // | Padding |
874 // +-----------------------------+
875 // | CPU 1 SMI Entry |
876 // +=============================+ <-- 2^n offset from Base of allocated buffer
877 // | Padding |
878 // +-----------------------------+
879 // | CPU 0 SMI Entry |
880 // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB
881 //
882
883 //
884 // Retrieve CPU Family
885 //
887 FamilyId = (RegEax >> 8) & 0xf;
888 ModelId = (RegEax >> 4) & 0xf;
889 if ((FamilyId == 0x06) || (FamilyId == 0x0f)) {
890 ModelId = ModelId | ((RegEax >> 12) & 0xf0);
891 }
892
893 RegEdx = 0;
894 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
895 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {
897 }
898
899 //
900 // Determine the mode of the CPU at the time an SMI occurs
901 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
902 // Volume 3C, Section 34.4.1.1
903 //
904 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;
905 if ((RegEdx & BIT29) != 0) {
906 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;
907 }
908
909 if (FamilyId == 0x06) {
910 if ((ModelId == 0x17) || (ModelId == 0x0f) || (ModelId == 0x1c)) {
911 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;
912 }
913 }
914
915 DEBUG ((DEBUG_INFO, "PcdControlFlowEnforcementPropertyMask = %d\n", PcdGet32 (PcdControlFlowEnforcementPropertyMask)));
916 if (PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) {
917 AsmCpuid (CPUID_SIGNATURE, &RegEax, NULL, NULL, NULL);
920 DEBUG ((DEBUG_INFO, "CPUID[7/0] ECX - 0x%08x\n", RegEcx));
921 DEBUG ((DEBUG_INFO, " CET_SS - 0x%08x\n", RegEcx & CPUID_CET_SS));
922 DEBUG ((DEBUG_INFO, " CET_IBT - 0x%08x\n", RegEdx & CPUID_CET_IBT));
923 if ((RegEcx & CPUID_CET_SS) == 0) {
924 mCetSupported = FALSE;
925 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
926 }
927
928 if (mCetSupported) {
930 DEBUG ((DEBUG_INFO, "CPUID[D/1] EBX - 0x%08x, ECX - 0x%08x\n", RegEbx, RegEcx));
931 AsmCpuidEx (CPUID_EXTENDED_STATE, 11, &RegEax, NULL, &RegEcx, NULL);
932 DEBUG ((DEBUG_INFO, "CPUID[D/11] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));
933 AsmCpuidEx (CPUID_EXTENDED_STATE, 12, &RegEax, NULL, &RegEcx, NULL);
934 DEBUG ((DEBUG_INFO, "CPUID[D/12] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));
935 }
936 } else {
937 mCetSupported = FALSE;
938 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
939 }
940 } else {
941 mCetSupported = FALSE;
942 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
943 }
944
945 //
946 // Check XD supported or not.
947 //
948 RegEax = 0;
949 ExtendedRegEdx.Uint32 = 0;
950 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
951 if (RegEax <= CPUID_EXTENDED_FUNCTION) {
952 //
953 // Extended CPUID functions are not supported on this processor.
954 //
955 mXdSupported = FALSE;
956 PatchInstructionX86 (gPatchXdSupported, mXdSupported, 1);
957 }
958
959 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &ExtendedRegEdx.Uint32);
960 if (ExtendedRegEdx.Bits.NX == 0) {
961 //
962 // Execute Disable Bit feature is not supported on this processor.
963 //
964 mXdSupported = FALSE;
965 PatchInstructionX86 (gPatchXdSupported, mXdSupported, 1);
966 }
967
969 //
970 // AMD processors do not support MSR_IA32_MISC_ENABLE
971 //
972 PatchInstructionX86 (gPatchMsrIa32MiscEnableSupported, FALSE, 1);
973 }
974
975 //
976 // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU
977 // specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.
978 // This size is rounded up to nearest power of 2.
979 //
980 TileCodeSize = GetSmiHandlerSize ();
981 TileCodeSize = ALIGN_VALUE (TileCodeSize, SIZE_4KB);
982 TileDataSize = (SMRAM_SAVE_STATE_MAP_OFFSET - SMM_PSD_OFFSET) + sizeof (SMRAM_SAVE_STATE_MAP);
983 TileDataSize = ALIGN_VALUE (TileDataSize, SIZE_4KB);
984 TileSize = TileDataSize + TileCodeSize - 1;
985 TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);
986 DEBUG ((DEBUG_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));
987
988 //
989 // If the TileSize is larger than space available for the SMI Handler of
990 // CPU[i], the extra CPU specific context of CPU[i+1], and the SMRAM Save
991 // State Map of CPU[i+1], then ASSERT(). If this ASSERT() is triggered, then
992 // the SMI Handler size must be reduced or the size of the extra CPU specific
993 // context must be reduced.
994 //
995 ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));
996
997 //
998 // Check whether the Required TileSize is enough.
999 //
1000 if (TileSize > SIZE_8KB) {
1001 DEBUG ((DEBUG_ERROR, "The Range of Smbase in SMRAM is not enough -- Required TileSize = 0x%08x, Actual TileSize = 0x%08x\n", TileSize, SIZE_8KB));
1002 FreePool (gSmmCpuPrivate->ProcessorInfo);
1003 CpuDeadLoop ();
1005 }
1006
1007 //
1008 // Retrieve the allocated SmmBase from gSmmBaseHobGuid. If found,
1009 // means the SmBase relocation has been done.
1010 //
1011 mCpuHotPlugData.SmBase = NULL;
1012 Status = GetSmBase (mMaxNumberOfCpus, &mCpuHotPlugData.SmBase);
1013 ASSERT (!EFI_ERROR (Status));
1014 if (EFI_ERROR (Status)) {
1015 CpuDeadLoop ();
1016 }
1017
1018 //
1019 // ASSERT SmBase has been relocated.
1020 //
1021 ASSERT (mCpuHotPlugData.SmBase != NULL);
1022
1023 //
1024 // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.
1025 //
1026 gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus);
1027 ASSERT (gSmmCpuPrivate->Operation != NULL);
1028
1029 gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);
1030 ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL);
1031
1032 gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus);
1033 ASSERT (gSmmCpuPrivate->CpuSaveState != NULL);
1034
1035 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize;
1036 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState;
1037
1038 //
1039 // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.
1040 //
1041 mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);
1042 ASSERT (mCpuHotPlugData.ApicId != NULL);
1043 mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;
1044
1045 //
1046 // Retrieve APIC ID of each enabled processor from the MP Services protocol.
1047 // Also compute the SMBASE address, CPU Save State address, and CPU Save state
1048 // size for each CPU in the platform
1049 //
1050 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1051 gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof (SMRAM_SAVE_STATE_MAP);
1052 gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);
1053 gSmmCpuPrivate->Operation[Index] = SmmCpuNone;
1054
1055 if (Index < mNumberOfCpus) {
1056 mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;
1057
1058 DEBUG ((
1059 DEBUG_INFO,
1060 "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",
1061 Index,
1062 (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,
1063 mCpuHotPlugData.SmBase[Index],
1064 gSmmCpuPrivate->CpuSaveState[Index],
1065 gSmmCpuPrivate->CpuSaveStateSize[Index]
1066 ));
1067 } else {
1068 gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;
1069 mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;
1070 }
1071 }
1072
1073 //
1074 // Allocate SMI stacks for all processors.
1075 //
1076 mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)));
1077 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
1078 //
1079 // SMM Stack Guard Enabled
1080 // 2 more pages is allocated for each processor, one is guard page and the other is known good stack.
1081 //
1082 // +--------------------------------------------------+-----+--------------------------------------------------+
1083 // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |
1084 // +--------------------------------------------------+-----+--------------------------------------------------+
1085 // | 4K | 4K PcdCpuSmmStackSize| | 4K | 4K PcdCpuSmmStackSize|
1086 // |<---------------- mSmmStackSize ----------------->| |<---------------- mSmmStackSize ----------------->|
1087 // | | | |
1088 // |<------------------ Processor 0 ----------------->| |<------------------ Processor n ----------------->|
1089 //
1090 mSmmStackSize += EFI_PAGES_TO_SIZE (2);
1091 }
1092
1093 mSmmShadowStackSize = 0;
1094 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
1095 mSmmShadowStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmShadowStackSize)));
1096
1097 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
1098 //
1099 // SMM Stack Guard Enabled
1100 // Append Shadow Stack after normal stack
1101 // 2 more pages is allocated for each processor, one is guard page and the other is known good shadow stack.
1102 //
1103 // |= Stacks
1104 // +--------------------------------------------------+---------------------------------------------------------------+
1105 // | Known Good Stack | Guard Page | SMM Stack | Known Good Shadow Stack | Guard Page | SMM Shadow Stack |
1106 // +--------------------------------------------------+---------------------------------------------------------------+
1107 // | 4K | 4K |PcdCpuSmmStackSize| 4K | 4K |PcdCpuSmmShadowStackSize|
1108 // |<---------------- mSmmStackSize ----------------->|<--------------------- mSmmShadowStackSize ------------------->|
1109 // | |
1110 // |<-------------------------------------------- Processor N ------------------------------------------------------->|
1111 //
1112 mSmmShadowStackSize += EFI_PAGES_TO_SIZE (2);
1113 } else {
1114 //
1115 // SMM Stack Guard Disabled (Known Good Stack is still required for potential stack switch.)
1116 // Append Shadow Stack after normal stack with 1 more page as known good shadow stack.
1117 // 1 more pages is allocated for each processor, it is known good stack.
1118 //
1119 //
1120 // |= Stacks
1121 // +-------------------------------------+--------------------------------------------------+
1122 // | Known Good Stack | SMM Stack | Known Good Shadow Stack | SMM Shadow Stack |
1123 // +-------------------------------------+--------------------------------------------------+
1124 // | 4K |PcdCpuSmmStackSize| 4K |PcdCpuSmmShadowStackSize|
1125 // |<---------- mSmmStackSize ---------->|<--------------- mSmmShadowStackSize ------------>|
1126 // | |
1127 // |<-------------------------------- Processor N ----------------------------------------->|
1128 //
1129 mSmmShadowStackSize += EFI_PAGES_TO_SIZE (1);
1130 mSmmStackSize += EFI_PAGES_TO_SIZE (1);
1131 }
1132 }
1133
1134 Stacks = (UINT8 *)AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (mSmmStackSize + mSmmShadowStackSize)));
1135 ASSERT (Stacks != NULL);
1136 mSmmStackArrayBase = (UINTN)Stacks;
1137 mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (mSmmStackSize + mSmmShadowStackSize) - 1;
1138
1139 DEBUG ((DEBUG_INFO, "Stacks - 0x%x\n", Stacks));
1140 DEBUG ((DEBUG_INFO, "mSmmStackSize - 0x%x\n", mSmmStackSize));
1141 DEBUG ((DEBUG_INFO, "PcdCpuSmmStackGuard - 0x%x\n", FeaturePcdGet (PcdCpuSmmStackGuard)));
1142 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
1143 DEBUG ((DEBUG_INFO, "mSmmShadowStackSize - 0x%x\n", mSmmShadowStackSize));
1144 }
1145
1146 //
1147 // Initialize IDT
1148 //
1150
1151 //
1152 // SMM Time initialization
1153 //
1155
1156 //
1157 // Initialize mSmmProfileEnabled
1158 //
1159 mSmmProfileEnabled = IsSmmProfileEnabled ();
1160
1161 //
1162 // Initialize MP globals
1163 //
1164 Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize, mSmmShadowStackSize);
1165
1166 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
1167 for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
1169 Cr3,
1170 (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + (mSmmStackSize + mSmmShadowStackSize) * Index,
1171 mSmmShadowStackSize
1172 );
1173 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
1175 Cr3,
1176 mPagingMode,
1177 (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + EFI_PAGES_TO_SIZE (1) + (mSmmStackSize + mSmmShadowStackSize) * Index,
1179 EFI_MEMORY_RP,
1180 TRUE,
1181 NULL
1182 );
1183 }
1184 }
1185 }
1186
1187 //
1188 // For relocated SMBASE, some MSRs & CSRs are still required to be configured in SMM Mode for SMM Initialization.
1189 // Those MSRs & CSRs must be configured before normal SMI sources happen.
1190 // So, here is to issue SMI IPI (All Excluding Self SMM IPI + BSP SMM IPI) to execute first SMI init.
1191 //
1193
1194 //
1195 // Call hook for BSP to perform extra actions in normal mode after all
1196 // SMM base addresses have been relocated on all CPUs
1197 //
1199
1200 DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));
1201
1202 //
1203 // Fill in SMM Reserved Regions
1204 //
1205 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;
1206 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;
1207
1208 //
1209 // Install the SMM CPU Protocol into SMM protocol database
1210 //
1211 Status = gMmst->MmInstallProtocolInterface (
1213 &gEfiSmmCpuProtocolGuid,
1215 &mSmmCpu
1216 );
1217 ASSERT_EFI_ERROR (Status);
1218
1219 //
1220 // Install the SMM Memory Attribute Protocol into SMM protocol database
1221 //
1222 Status = gMmst->MmInstallProtocolInterface (
1224 &gEdkiiSmmMemoryAttributeProtocolGuid,
1227 );
1228 ASSERT_EFI_ERROR (Status);
1229
1230 //
1231 // Initialize global buffer for MM MP.
1232 //
1234
1235 //
1236 // Initialize Package First Thread Index Info.
1237 //
1239
1240 //
1241 // Install the SMM Mp Protocol into SMM protocol database
1242 //
1243 Status = gMmst->MmInstallProtocolInterface (
1245 &gEfiMmMpProtocolGuid,
1247 &mSmmMp
1248 );
1249 ASSERT_EFI_ERROR (Status);
1250
1251 //
1252 // Initialize SMM CPU Services Support
1253 //
1255 ASSERT_EFI_ERROR (Status);
1256
1257 //
1258 // Initialize SMM Profile feature
1259 //
1260 InitSmmProfile (Cr3);
1261
1264
1265 DEBUG ((DEBUG_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));
1266
1268 return EFI_SUCCESS;
1269}
1270
1281INTN
1282EFIAPI
1284 IN CONST VOID *Buffer1,
1285 IN CONST VOID *Buffer2
1286 )
1287{
1288 if (((EFI_SMRAM_DESCRIPTOR *)Buffer1)->CpuStart > ((EFI_SMRAM_DESCRIPTOR *)Buffer2)->CpuStart) {
1289 return 1;
1290 } else if (((EFI_SMRAM_DESCRIPTOR *)Buffer1)->CpuStart < ((EFI_SMRAM_DESCRIPTOR *)Buffer2)->CpuStart) {
1291 return -1;
1292 }
1293
1294 return 0;
1295}
1296
1304VOID
1306 OUT UINT32 *SmrrBase,
1307 OUT UINT32 *SmrrSize
1308 )
1309{
1310 VOID *GuidHob;
1311 EFI_SMRAM_HOB_DESCRIPTOR_BLOCK *DescriptorBlock;
1312 EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;
1313 UINTN Index;
1314 UINT64 MaxSize;
1315 BOOLEAN Found;
1316 EFI_SMRAM_DESCRIPTOR SmramDescriptor;
1317
1318 ASSERT (SmrrBase != NULL && SmrrSize != NULL);
1319
1320 //
1321 // Get SMRAM information
1322 //
1323 GuidHob = GetFirstGuidHob (&gEfiSmmSmramMemoryGuid);
1324 ASSERT (GuidHob != NULL);
1325 DescriptorBlock = (EFI_SMRAM_HOB_DESCRIPTOR_BLOCK *)GET_GUID_HOB_DATA (GuidHob);
1326 mSmmCpuSmramRangeCount = DescriptorBlock->NumberOfSmmReservedRegions;
1327 mSmmCpuSmramRanges = DescriptorBlock->Descriptor;
1328
1329 //
1330 // Sort the mSmmCpuSmramRanges
1331 //
1332 QuickSort (mSmmCpuSmramRanges, mSmmCpuSmramRangeCount, sizeof (EFI_SMRAM_DESCRIPTOR), (BASE_SORT_COMPARE)CpuSmramRangeCompare, &SmramDescriptor);
1333
1334 //
1335 // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size
1336 //
1337 CurrentSmramRange = NULL;
1338 for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < mSmmCpuSmramRangeCount; Index++) {
1339 //
1340 // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization
1341 //
1342 if ((mSmmCpuSmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {
1343 continue;
1344 }
1345
1346 if (mSmmCpuSmramRanges[Index].CpuStart >= BASE_1MB) {
1347 if ((mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) <= SMRR_MAX_ADDRESS) {
1348 if (mSmmCpuSmramRanges[Index].PhysicalSize >= MaxSize) {
1349 MaxSize = mSmmCpuSmramRanges[Index].PhysicalSize;
1350 CurrentSmramRange = &mSmmCpuSmramRanges[Index];
1351 }
1352 }
1353 }
1354 }
1355
1356 ASSERT (CurrentSmramRange != NULL);
1357
1358 *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;
1359 *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;
1360
1361 do {
1362 Found = FALSE;
1363 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {
1364 if ((mSmmCpuSmramRanges[Index].CpuStart < *SmrrBase) &&
1365 (*SmrrBase == (mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize)))
1366 {
1367 *SmrrBase = (UINT32)mSmmCpuSmramRanges[Index].CpuStart;
1368 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);
1369 Found = TRUE;
1370 } else if (((*SmrrBase + *SmrrSize) == mSmmCpuSmramRanges[Index].CpuStart) && (mSmmCpuSmramRanges[Index].PhysicalSize > 0)) {
1371 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);
1372 Found = TRUE;
1373 }
1374 }
1375 } while (Found);
1376
1377 DEBUG ((DEBUG_INFO, "%a: SMRR Base = 0x%x, SMRR Size = 0x%x\n", __func__, *SmrrBase, *SmrrSize));
1378}
1379
1386VOID
1387EFIAPI
1389 IN OUT VOID *Buffer
1390 )
1391{
1392 UINTN CpuIndex;
1393 UINT64 SmmFeatureControlMsr;
1394 UINT64 NewSmmFeatureControlMsr;
1395
1396 //
1397 // Retrieve the CPU Index from the context passed in
1398 //
1399 CpuIndex = *(UINTN *)Buffer;
1400
1401 //
1402 // Get the current SMM Feature Control MSR value
1403 //
1404 SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl);
1405
1406 //
1407 // Compute the new SMM Feature Control MSR value
1408 //
1409 NewSmmFeatureControlMsr = SmmFeatureControlMsr;
1410 if (mSmmCodeAccessCheckEnable) {
1411 NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;
1412 if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {
1413 NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT;
1414 }
1415 }
1416
1417 //
1418 // Only set the SMM Feature Control MSR value if the new value is different than the current value
1419 //
1420 if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) {
1421 SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr);
1422 }
1423
1424 //
1425 // Release the spin lock user to serialize the updates to the SMM Feature Control MSR
1426 //
1427 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);
1428}
1429
1434VOID
1436 VOID
1437 )
1438{
1439 UINTN Index;
1440 EFI_STATUS Status;
1441
1443
1444 //
1445 // Check to see if the Feature Control MSR is supported on this CPU
1446 //
1447 Index = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
1448
1449 //
1450 // Acquire Config SMM Code Access Check spin lock. The BSP will release the
1451 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1452 //
1453 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);
1454
1455 //
1456 // Enable SMM Code Access Check feature on the BSP.
1457 //
1459
1460 //
1461 // Enable SMM Code Access Check feature for the APs.
1462 //
1463 for (Index = 0; Index < gMmst->NumberOfCpus; Index++) {
1464 if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
1465 if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == INVALID_APIC_ID) {
1466 //
1467 // If this processor does not exist
1468 //
1469 continue;
1470 }
1471
1472 //
1473 // Acquire Config SMM Code Access Check spin lock. The AP will release the
1474 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1475 //
1476 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);
1477
1478 //
1479 // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.
1480 //
1481 Status = gMmst->MmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index);
1482 ASSERT_EFI_ERROR (Status);
1483
1484 //
1485 // Wait for the AP to release the Config SMM Code Access Check spin lock.
1486 //
1487 while (!AcquireSpinLockOrFail (mConfigSmmCodeAccessCheckLock)) {
1488 CpuPause ();
1489 }
1490
1491 //
1492 // Release the Config SMM Code Access Check spin lock.
1493 //
1494 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);
1495 }
1496 }
1497
1499}
1500
1508VOID *
1510 IN UINTN Pages
1511 )
1512{
1513 EFI_STATUS Status;
1514 EFI_PHYSICAL_ADDRESS Memory;
1515
1516 if (Pages == 0) {
1517 return NULL;
1518 }
1519
1520 Status = gMmst->MmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);
1521 if (EFI_ERROR (Status)) {
1522 return NULL;
1523 }
1524
1525 return (VOID *)(UINTN)Memory;
1526}
1527
1532VOID
1534 VOID
1535 )
1536{
1538}
UINT64 UINTN
INT64 INTN
STATIC UINT8 mSmmSaveStateRegisterLma
VOID *EFIAPI GetFirstGuidHob(IN CONST EFI_GUID *Guid)
Definition: HobLib.c:215
VOID *EFIAPI GetNextGuidHob(IN CONST EFI_GUID *Guid, IN CONST VOID *HobStart)
Definition: HobLib.c:176
BOOLEAN EFIAPI SetInterruptState(IN BOOLEAN InterruptState)
Definition: Cpu.c:48
INTN(EFIAPI * BASE_SORT_COMPARE)(IN CONST VOID *Buffer1, IN CONST VOID *Buffer2)
Definition: BaseLib.h:3285
BOOLEAN EFIAPI SaveAndDisableInterrupts(VOID)
Definition: Cpu.c:21
UINT32 EFIAPI GetPowerOfTwo32(IN UINT32 Operand)
VOID EFIAPI CpuDeadLoop(VOID)
Definition: CpuDeadLoop.c:25
VOID EFIAPI QuickSort(IN OUT VOID *BufferToSort, IN CONST UINTN Count, IN CONST UINTN ElementSize, IN BASE_SORT_COMPARE CompareFunction, OUT VOID *BufferOneElement)
Definition: QuickSort.c:36
VOID EFIAPI CpuPause(VOID)
VOID EFIAPI SpeculationBarrier(VOID)
VOID *EFIAPI CopyMem(OUT VOID *DestinationBuffer, IN CONST VOID *SourceBuffer, IN UINTN Length)
VOID *EFIAPI ZeroMem(OUT VOID *Buffer, IN UINTN Length)
VOID(EFIAPI * EFI_CPU_INTERRUPT_HANDLER)(IN CONST EFI_EXCEPTION_TYPE InterruptType, IN CONST EFI_SYSTEM_CONTEXT SystemContext)
Definition: Cpu.h:52
UINT32 EFIAPI AsmCpuidEx(IN UINT32 Index, IN UINT32 SubIndex, OUT UINT32 *RegisterEax OPTIONAL, OUT UINT32 *RegisterEbx OPTIONAL, OUT UINT32 *RegisterEcx OPTIONAL, OUT UINT32 *RegisterEdx OPTIONAL)
Definition: CpuIdEx.c:43
RETURN_STATUS ConvertMemoryPageAttributes(IN PAGE_TABLE_LIB_PAGING_CONTEXT *PagingContext OPTIONAL, IN PHYSICAL_ADDRESS BaseAddress, IN UINT64 Length, IN UINT64 Attributes, IN PAGE_ACTION PageAction, IN PAGE_TABLE_LIB_ALLOCATE_PAGES AllocatePagesFunc OPTIONAL, OUT BOOLEAN *IsSplitted OPTIONAL, OUT BOOLEAN *IsModified OPTIONAL)
Definition: CpuPageTable.c:719
VOID InitSmmS3ResumeState(VOID)
Definition: CpuS3.c:192
VOID RestoreSmmConfigurationInS3(VOID)
Definition: CpuS3.c:31
EFI_STATUS InitializeSmmCpuServices(IN EFI_HANDLE Handle)
Definition: CpuService.c:373
VOID EFIAPI InitializeDebugAgent(IN UINT32 InitFlag, IN VOID *Context OPTIONAL, IN DEBUG_AGENT_CONTINUE Function OPTIONAL)
VOID EFIAPI FreePool(IN VOID *Buffer)
VOID EFIAPI SendSmiIpi(IN UINT32 ApicId)
Definition: BaseXApicLib.c:427
UINT32 EFIAPI GetApicId(VOID)
Definition: BaseXApicLib.c:337
VOID EFIAPI SendSmiIpiAllExcludingSelf(VOID)
Definition: BaseXApicLib.c:446
#define NULL
Definition: Base.h:319
#define CONST
Definition: Base.h:259
#define RETURN_BUFFER_TOO_SMALL
Definition: Base.h:1093
#define STATIC
Definition: Base.h:264
#define ALIGN_VALUE(Value, Alignment)
Definition: Base.h:948
#define TRUE
Definition: Base.h:301
#define FALSE
Definition: Base.h:307
#define IN
Definition: Base.h:279
#define OUT
Definition: Base.h:284
#define ASSERT_EFI_ERROR(StatusParameter)
Definition: DebugLib.h:462
#define DEBUG_CODE_BEGIN()
Definition: DebugLib.h:564
#define DEBUG(Expression)
Definition: DebugLib.h:434
#define DEBUG_CODE_END()
Definition: DebugLib.h:578
#define REPORT_STATUS_CODE(Type, Value)
#define SMM_HANDLER_OFFSET
#define CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS
Definition: Cpuid.h:1301
#define CPUID_SIGNATURE
Definition: Cpuid.h:45
#define CPUID_VERSION_INFO
Definition: Cpuid.h:81
#define CPUID_EXTENDED_STATE
Definition: Cpuid.h:1918
#define CPUID_EXTENDED_STATE_SUB_LEAF
Definition: Cpuid.h:2033
#define CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO
Definition: Cpuid.h:1306
#define CPUID_EXTENDED_CPU_SIG
Definition: Cpuid.h:3768
#define SMRAM_SAVE_STATE_MAP_OFFSET
UINT32 EFIAPI AsmCpuid(IN UINT32 Index, OUT UINT32 *RegisterEax OPTIONAL, OUT UINT32 *RegisterEbx OPTIONAL, OUT UINT32 *RegisterEcx OPTIONAL, OUT UINT32 *RegisterEdx OPTIONAL)
Definition: CpuId.c:36
EFI_MM_SAVE_STATE_REGISTER
Definition: MmCpu.h:25
EFI_STATUS EFIAPI MmSaveStateWriteRegister(IN UINTN CpuIndex, IN EFI_MM_SAVE_STATE_REGISTER Register, IN UINTN Width, IN CONST VOID *Buffer)
EFI_STATUS EFIAPI MmSaveStateReadRegister(IN UINTN CpuIndex, IN EFI_MM_SAVE_STATE_REGISTER Register, IN UINTN Width, OUT VOID *Buffer)
UINT64 EFIAPI SmmCpuFeaturesGetSmmRegister(IN UINTN CpuIndex, IN SMM_REG_NAME RegName)
VOID EFIAPI SmmCpuFeaturesSmmRelocationComplete(VOID)
VOID EFIAPI SmmCpuFeaturesInitializeProcessor(IN UINTN CpuIndex, IN BOOLEAN IsMonarch, IN EFI_PROCESSOR_INFORMATION *ProcessorInfo, IN CPU_HOT_PLUG_DATA *CpuHotPlugData)
VOID EFIAPI SmmCpuFeaturesSetSmmRegister(IN UINTN CpuIndex, IN SMM_REG_NAME RegName, IN UINT64 Value)
#define PcdGet32(TokenName)
Definition: PcdLib.h:362
#define PcdGetBool(TokenName)
Definition: PcdLib.h:401
#define FeaturePcdGet(TokenName)
Definition: PcdLib.h:50
UINTN EFIAPI PeCoffSearchImageBase(IN UINTN Address)
#define PERF_FUNCTION_END()
#define PERF_FUNCTION_BEGIN()
#define PERF_CODE(Expression)
VOID FindSmramInfo(OUT UINT32 *SmrrBase, OUT UINT32 *SmrrSize)
VOID ConfigSmmCodeAccessCheck(VOID)
EFI_PROCESSOR_INFORMATION * GetMpInformation(OUT UINTN *NumberOfCpus, OUT UINTN *MaxNumberOfCpus)
EFI_STATUS EFIAPI SmmWriteSaveState(IN CONST EFI_SMM_CPU_PROTOCOL *This, IN UINTN Width, IN EFI_SMM_SAVE_STATE_REGISTER Register, IN UINTN CpuIndex, IN CONST VOID *Buffer)
VOID InitializeSmmIdt(VOID)
EFI_STATUS PiSmmCpuEntryCommon(VOID)
VOID EFIAPI ConfigSmmCodeAccessCheckOnCurrentProcessor(IN OUT VOID *Buffer)
INTN EFIAPI CpuSmramRangeCompare(IN CONST VOID *Buffer1, IN CONST VOID *Buffer2)
STATIC EFI_STATUS GetSmBase(IN UINTN MaxNumberOfCpus, OUT UINTN **AllocatedSmBaseBuffer)
EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL mSmmMemoryAttribute
VOID * AllocateCodePages(IN UINTN Pages)
VOID ExecuteFirstSmiInit(VOID)
EFI_HANDLE mSmmCpuHandle
VOID InitializeSmm(VOID)
VOID DumpModuleInfoByIp(IN UINTN CallerIpAddress)
VOID PerformPreTasks(VOID)
EFI_SMM_CPU_PROTOCOL mSmmCpu
INTN EFIAPI MpInformation2HobCompare(IN CONST VOID *Buffer1, IN CONST VOID *Buffer2)
EFI_STATUS EFIAPI SmmReadSaveState(IN CONST EFI_SMM_CPU_PROTOCOL *This, IN UINTN Width, IN EFI_SMM_SAVE_STATE_REGISTER Register, IN UINTN CpuIndex, OUT VOID *Buffer)
INTN EFIAPI SmBaseHobCompare(IN CONST VOID *Buffer1, IN CONST VOID *Buffer2)
VOID GetAcpiS3EnableFlag(VOID)
EFI_STATUS EFIAPI EdkiiSmmGetMemoryAttributes(IN EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL *This, IN EFI_PHYSICAL_ADDRESS BaseAddress, IN UINT64 Length, IN UINT64 *Attributes)
EFI_PROCESSOR_INFORMATION * GetMpInformationFromMpServices(OUT UINTN *NumberOfCpus, OUT UINTN *MaxNumberOfCpus)
VOID EFIAPI PiSmmCpuSmiEntryFixupAddress()
EFI_STATUS SetShadowStack(IN UINTN Cr3, IN EFI_PHYSICAL_ADDRESS BaseAddress, IN UINT64 Length)
EFI_STATUS EFIAPI EdkiiSmmSetMemoryAttributes(IN EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL *This, IN EFI_PHYSICAL_ADDRESS BaseAddress, IN UINT64 Length, IN UINT64 Attributes)
UINTN EFIAPI GetSmiHandlerSize(VOID)
VOID InitializeSmmTimer(VOID)
Definition: SyncTimer.c:29
BOOLEAN IsSmmProfileEnabled(VOID)
UINTN GetSupportedMaxLogicalProcessorNumber(VOID)
EFI_MM_MP_PROTOCOL mSmmMp
Definition: SmmMp.c:16
EFI_STATUS EFIAPI EdkiiSmmClearMemoryAttributes(IN EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL *This, IN EFI_PHYSICAL_ADDRESS BaseAddress, IN UINT64 Length, IN UINT64 Attributes)
#define EFI_PROGRESS_CODE
Definition: PiStatusCode.h:43
VOID *EFIAPI AllocatePool(IN UINTN AllocationSize)
VOID *EFIAPI AllocatePages(IN UINTN Pages)
EFI_STATUS EFIAPI Register(IN EFI_PEI_RSC_HANDLER_CALLBACK Callback)
@ SmmRegFeatureControl
VOID MpPerfBegin(IN UINTN CpuIndex, IN UINTN MpProcedureId)
Definition: SmmMpPerf.c:78
VOID MpPerfEnd(IN UINTN CpuIndex, IN UINTN MpProcedureId)
Definition: SmmMpPerf.c:93
VOID InitializeMpPerf(UINTN NumberofCpus)
Definition: SmmMpPerf.c:29
VOID CheckFeatureSupported(IN UINTN CpuIndex)
Definition: SmmProfile.c:859
VOID InitSmmProfile(UINT32 Cr3)
Definition: SmmProfile.c:1036
SPIN_LOCK *EFIAPI AcquireSpinLock(IN OUT SPIN_LOCK *SpinLock)
SPIN_LOCK *EFIAPI InitializeSpinLock(OUT SPIN_LOCK *SpinLock)
volatile UINTN SPIN_LOCK
SPIN_LOCK *EFIAPI ReleaseSpinLock(IN OUT SPIN_LOCK *SpinLock)
BOOLEAN EFIAPI AcquireSpinLockOrFail(IN OUT SPIN_LOCK *SpinLock)
UINT64 EFI_PHYSICAL_ADDRESS
Definition: UefiBaseType.h:50
#define EFI_PAGES_TO_SIZE(Pages)
Definition: UefiBaseType.h:213
RETURN_STATUS EFI_STATUS
Definition: UefiBaseType.h:29
#define EFI_SIZE_TO_PAGES(Size)
Definition: UefiBaseType.h:200
VOID * EFI_HANDLE
Definition: UefiBaseType.h:33
#define EFI_SUCCESS
Definition: UefiBaseType.h:112
VOID InitializeDataForMmMp(VOID)
Definition: MpService.c:1791
EFI_STATUS EFIAPI SmmStartupThisAp(IN EFI_AP_PROCEDURE Procedure, IN UINTN CpuIndex, IN OUT VOID *ProcArguments OPTIONAL)
Definition: MpService.c:1434
VOID InitPackageFirstThreadIndexInfo(VOID)
Definition: MpService.c:1752
UINT32 InitializeMpServiceData(IN VOID *Stacks, IN UINTN StackSize, IN UINTN ShadowStackSize)
Definition: MpService.c:1940
EFI_STATUS EFIAPI RegisterSmmEntry(IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This, IN EFI_SMM_ENTRY_POINT SmmEntryPoint)
Definition: MpService.c:2041
VOID EFIAPI InitializeMpSyncData(VOID)
Definition: MpService.c:1865
@ EfiRuntimeServicesCode
@ EFI_NATIVE_INTERFACE
Definition: UefiSpec.h:1193
@ AllocateAnyPages
Definition: UefiSpec.h:33
BOOLEAN EFIAPI StandardSignatureIsAuthenticAMD(VOID)
Definition: X86BaseCpuLib.c:27
VOID EFIAPI PatchInstructionX86(OUT X86_ASSEMBLY_PATCH_LABEL *InstructionEnd, IN UINT64 PatchValue, IN UINTN ValueSize)
VOID EFIAPI AsmReadIdtr(OUT IA32_DESCRIPTOR *Idtr)
Definition: X86ReadIdtr.c:24
VOID EFIAPI AsmWriteIdtr(IN CONST IA32_DESCRIPTOR *Idtr)
EFI_INSTALL_PROTOCOL_INTERFACE MmInstallProtocolInterface
Definition: PiMmCis.h:327
EFI_MM_STARTUP_THIS_AP MmStartupThisAp
Definition: PiMmCis.h:282
UINTN CurrentlyExecutingCpu
Definition: PiSmmCis.h:69
UINTN * CpuSaveStateSize
Definition: PiSmmCis.h:80
EFI_PHYSICAL_ADDRESS SmramReservedStart
EFI_PHYSICAL_ADDRESS CpuStart
Definition: PiMultiPhase.h:127
EFI_SMRAM_DESCRIPTOR Descriptor[1]
UINT32 NumberOfProcessors
Definition: SmmBaseHob.h:65
struct CPUID_EXTENDED_CPU_SIG_EDX::@750 Bits