TianoCore EDK2 master
CpuS3.c
Go to the documentation of this file.
1
9#include "PiSmmCpuDxeSmm.h"
10
11#pragma pack(1)
12typedef struct {
13 UINTN Lock;
14 VOID *StackStart;
15 UINTN StackSize;
16 VOID *ApFunction;
17 IA32_DESCRIPTOR GdtrProfile;
18 IA32_DESCRIPTOR IdtrProfile;
19 UINT32 BufferStart;
20 UINT32 Cr3;
21 UINTN InitializeFloatingPointUnitsAddress;
23#pragma pack()
24
25typedef struct {
26 UINT8 *RendezvousFunnelAddress;
27 UINTN PModeEntryOffset;
28 UINTN FlatJumpOffset;
29 UINTN Size;
30 UINTN LModeEntryOffset;
31 UINTN LongJumpOffset;
33
34//
35// Flags used when program the register.
36//
37typedef struct {
38 volatile UINTN MemoryMappedLock; // Spinlock used to program mmio
39 volatile UINT32 *CoreSemaphoreCount; // Semaphore container used to program
40 // core level semaphore.
41 volatile UINT32 *PackageSemaphoreCount; // Semaphore container used to program
42 // package level semaphore.
44
45//
46// Signal that SMM BASE relocation is complete.
47//
48volatile BOOLEAN mInitApsAfterSmmBaseReloc;
49
56VOID *
57EFIAPI
59 MP_ASSEMBLY_ADDRESS_MAP *AddressMap
60 );
61
62#define LEGACY_REGION_SIZE (2 * 0x1000)
63#define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
64
66ACPI_CPU_DATA mAcpiCpuData;
67volatile UINT32 mNumberToFinish;
68MP_CPU_EXCHANGE_INFO *mExchangeInfo;
69BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;
70
71//
72// S3 boot flag
73//
74BOOLEAN mSmmS3Flag = FALSE;
75
76//
77// Pointer to structure used during S3 Resume
78//
79SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;
80
81BOOLEAN mAcpiS3Enable = TRUE;
82
83UINT8 *mApHltLoopCode = NULL;
84UINT8 mApHltLoopCodeTemplate[] = {
85 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]
86 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]
87 0xFA, // cli
88 0xF4, // hlt
89 0xEB, 0xFC // jmp $-2
90};
91
97VOID
98EFIAPI
100 EFI_PHYSICAL_ADDRESS MtrrTable
101 )
102
103/*++
104
105Routine Description:
106
107 Sync up the MTRR values for all processors.
108
109Arguments:
110
111Returns:
112 None
113
114--*/
115{
116 MTRR_SETTINGS *MtrrSettings;
117
118 MtrrSettings = (MTRR_SETTINGS *)(UINTN)MtrrTable;
119 MtrrSetAllMtrrs (MtrrSettings);
120}
121
128VOID
130 IN OUT volatile UINT32 *Sem
131 )
132{
134}
135
146VOID
148 IN OUT volatile UINT32 *Sem
149 )
150{
151 UINT32 Value;
152
153 do {
154 Value = *Sem;
155 } while (Value == 0 ||
157 Sem,
158 Value,
159 Value - 1
160 ) != Value);
161}
162
172UINTN
174 IN UINT32 CrIndex,
175 IN BOOLEAN Read,
176 IN OUT UINTN *CrValue
177 )
178{
179 switch (CrIndex) {
180 case 0:
181 if (Read) {
182 *CrValue = AsmReadCr0 ();
183 } else {
184 AsmWriteCr0 (*CrValue);
185 }
186
187 break;
188 case 2:
189 if (Read) {
190 *CrValue = AsmReadCr2 ();
191 } else {
192 AsmWriteCr2 (*CrValue);
193 }
194
195 break;
196 case 3:
197 if (Read) {
198 *CrValue = AsmReadCr3 ();
199 } else {
200 AsmWriteCr3 (*CrValue);
201 }
202
203 break;
204 case 4:
205 if (Read) {
206 *CrValue = AsmReadCr4 ();
207 } else {
208 AsmWriteCr4 (*CrValue);
209 }
210
211 break;
212 default:
213 return EFI_UNSUPPORTED;
214 }
215
216 return EFI_SUCCESS;
217}
218
229VOID
231 IN CPU_REGISTER_TABLE *RegisterTable,
232 IN EFI_CPU_PHYSICAL_LOCATION *ApLocation,
233 IN CPU_STATUS_INFORMATION *CpuStatus,
235 )
236{
237 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
238 UINTN Index;
239 UINTN Value;
240 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntryHead;
241 volatile UINT32 *SemaphorePtr;
242 UINT32 FirstThread;
243 UINT32 CurrentThread;
244 UINT32 CurrentCore;
245 UINTN ProcessorIndex;
246 UINT32 *ThreadCountPerPackage;
247 UINT8 *ThreadCountPerCore;
248 EFI_STATUS Status;
249 UINT64 CurrentValue;
250
251 //
252 // Traverse Register Table of this logical processor
253 //
254 RegisterTableEntryHead = (CPU_REGISTER_TABLE_ENTRY *)(UINTN)RegisterTable->RegisterTableEntry;
255
256 for (Index = 0; Index < RegisterTable->TableLength; Index++) {
257 RegisterTableEntry = &RegisterTableEntryHead[Index];
258
259 //
260 // Check the type of specified register
261 //
262 switch (RegisterTableEntry->RegisterType) {
263 //
264 // The specified register is Control Register
265 //
266 case ControlRegister:
267 Status = ReadWriteCr (RegisterTableEntry->Index, TRUE, &Value);
268 if (EFI_ERROR (Status)) {
269 break;
270 }
271
272 if (RegisterTableEntry->TestThenWrite) {
273 CurrentValue = BitFieldRead64 (
274 Value,
275 RegisterTableEntry->ValidBitStart,
276 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1
277 );
278 if (CurrentValue == RegisterTableEntry->Value) {
279 break;
280 }
281 }
282
283 Value = (UINTN)BitFieldWrite64 (
284 Value,
285 RegisterTableEntry->ValidBitStart,
286 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
287 RegisterTableEntry->Value
288 );
289 ReadWriteCr (RegisterTableEntry->Index, FALSE, &Value);
290 break;
291 //
292 // The specified register is Model Specific Register
293 //
294 case Msr:
295 if (RegisterTableEntry->TestThenWrite) {
296 Value = (UINTN)AsmReadMsr64 (RegisterTableEntry->Index);
297 if (RegisterTableEntry->ValidBitLength >= 64) {
298 if (Value == RegisterTableEntry->Value) {
299 break;
300 }
301 } else {
302 CurrentValue = BitFieldRead64 (
303 Value,
304 RegisterTableEntry->ValidBitStart,
305 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1
306 );
307 if (CurrentValue == RegisterTableEntry->Value) {
308 break;
309 }
310 }
311 }
312
313 //
314 // If this function is called to restore register setting after INIT signal,
315 // there is no need to restore MSRs in register table.
316 //
317 if (RegisterTableEntry->ValidBitLength >= 64) {
318 //
319 // If length is not less than 64 bits, then directly write without reading
320 //
322 RegisterTableEntry->Index,
323 RegisterTableEntry->Value
324 );
325 } else {
326 //
327 // Set the bit section according to bit start and length
328 //
330 RegisterTableEntry->Index,
331 RegisterTableEntry->ValidBitStart,
332 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
333 RegisterTableEntry->Value
334 );
335 }
336
337 break;
338 //
339 // MemoryMapped operations
340 //
341 case MemoryMapped:
342 AcquireSpinLock (&CpuFlags->MemoryMappedLock);
344 (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),
345 RegisterTableEntry->ValidBitStart,
346 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
347 (UINT32)RegisterTableEntry->Value
348 );
349 ReleaseSpinLock (&CpuFlags->MemoryMappedLock);
350 break;
351 //
352 // Enable or disable cache
353 //
354 case CacheControl:
355 //
356 // If value of the entry is 0, then disable cache. Otherwise, enable cache.
357 //
358 if (RegisterTableEntry->Value == 0) {
360 } else {
362 }
363
364 break;
365
366 case Semaphore:
367 // Semaphore works logic like below:
368 //
369 // V(x) = LibReleaseSemaphore (Semaphore[FirstThread + x]);
370 // P(x) = LibWaitForSemaphore (Semaphore[FirstThread + x]);
371 //
372 // All threads (T0...Tn) waits in P() line and continues running
373 // together.
374 //
375 //
376 // T0 T1 ... Tn
377 //
378 // V(0...n) V(0...n) ... V(0...n)
379 // n * P(0) n * P(1) ... n * P(n)
380 //
381 ASSERT (
382 (ApLocation != NULL) &&
383 (CpuStatus->ThreadCountPerPackage != 0) &&
384 (CpuStatus->ThreadCountPerCore != 0) &&
385 (CpuFlags->CoreSemaphoreCount != NULL) &&
386 (CpuFlags->PackageSemaphoreCount != NULL)
387 );
388 switch (RegisterTableEntry->Value) {
389 case CoreDepType:
390 SemaphorePtr = CpuFlags->CoreSemaphoreCount;
391 ThreadCountPerCore = (UINT8 *)(UINTN)CpuStatus->ThreadCountPerCore;
392
393 CurrentCore = ApLocation->Package * CpuStatus->MaxCoreCount + ApLocation->Core;
394 //
395 // Get Offset info for the first thread in the core which current thread belongs to.
396 //
397 FirstThread = CurrentCore * CpuStatus->MaxThreadCount;
398 CurrentThread = FirstThread + ApLocation->Thread;
399
400 //
401 // Different cores may have different valid threads in them. If driver maintail clearly
402 // thread index in different cores, the logic will be much complicated.
403 // Here driver just simply records the max thread number in all cores and use it as expect
404 // thread number for all cores.
405 // In below two steps logic, first current thread will Release semaphore for each thread
406 // in current core. Maybe some threads are not valid in this core, but driver don't
407 // care. Second, driver will let current thread wait semaphore for all valid threads in
408 // current core. Because only the valid threads will do release semaphore for this
409 // thread, driver here only need to wait the valid thread count.
410 //
411
412 //
413 // First Notify ALL THREADs in current Core that this thread is ready.
414 //
415 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex++) {
416 S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);
417 }
418
419 //
420 // Second, check whether all VALID THREADs (not all threads) in current core are ready.
421 //
422 for (ProcessorIndex = 0; ProcessorIndex < ThreadCountPerCore[CurrentCore]; ProcessorIndex++) {
423 S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);
424 }
425
426 break;
427
428 case PackageDepType:
429 SemaphorePtr = CpuFlags->PackageSemaphoreCount;
430 ThreadCountPerPackage = (UINT32 *)(UINTN)CpuStatus->ThreadCountPerPackage;
431 //
432 // Get Offset info for the first thread in the package which current thread belongs to.
433 //
434 FirstThread = ApLocation->Package * CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount;
435 //
436 // Get the possible threads count for current package.
437 //
438 CurrentThread = FirstThread + CpuStatus->MaxThreadCount * ApLocation->Core + ApLocation->Thread;
439
440 //
441 // Different packages may have different valid threads in them. If driver maintail clearly
442 // thread index in different packages, the logic will be much complicated.
443 // Here driver just simply records the max thread number in all packages and use it as expect
444 // thread number for all packages.
445 // In below two steps logic, first current thread will Release semaphore for each thread
446 // in current package. Maybe some threads are not valid in this package, but driver don't
447 // care. Second, driver will let current thread wait semaphore for all valid threads in
448 // current package. Because only the valid threads will do release semaphore for this
449 // thread, driver here only need to wait the valid thread count.
450 //
451
452 //
453 // First Notify ALL THREADS in current package that this thread is ready.
454 //
455 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount * CpuStatus->MaxCoreCount; ProcessorIndex++) {
456 S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);
457 }
458
459 //
460 // Second, check whether VALID THREADS (not all threads) in current package are ready.
461 //
462 for (ProcessorIndex = 0; ProcessorIndex < ThreadCountPerPackage[ApLocation->Package]; ProcessorIndex++) {
463 S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);
464 }
465
466 break;
467
468 default:
469 break;
470 }
471
472 break;
473
474 default:
475 break;
476 }
477 }
478}
479
487VOID
489 IN BOOLEAN PreSmmRegisterTable
490 )
491{
492 CPU_FEATURE_INIT_DATA *FeatureInitData;
493 CPU_REGISTER_TABLE *RegisterTable;
494 CPU_REGISTER_TABLE *RegisterTables;
495 UINT32 InitApicId;
496 UINTN ProcIndex;
497 UINTN Index;
498
499 FeatureInitData = &mAcpiCpuData.CpuFeatureInitData;
500
501 if (PreSmmRegisterTable) {
502 RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)FeatureInitData->PreSmmInitRegisterTable;
503 } else {
504 RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)FeatureInitData->RegisterTable;
505 }
506
507 if (RegisterTables == NULL) {
508 return;
509 }
510
511 InitApicId = GetInitialApicId ();
512 RegisterTable = NULL;
513 ProcIndex = (UINTN)-1;
514 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
515 if (RegisterTables[Index].InitialApicId == InitApicId) {
516 RegisterTable = &RegisterTables[Index];
517 ProcIndex = Index;
518 break;
519 }
520 }
521
522 ASSERT (RegisterTable != NULL);
523
524 if (FeatureInitData->ApLocation != 0) {
526 RegisterTable,
527 (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)FeatureInitData->ApLocation + ProcIndex,
528 &FeatureInitData->CpuStatus,
529 &mCpuFlags
530 );
531 } else {
533 RegisterTable,
534 NULL,
535 &FeatureInitData->CpuStatus,
536 &mCpuFlags
537 );
538 }
539}
540
544VOID
546 VOID
547 )
548{
549 UINTN TopOfStack;
550 UINT8 Stack[128];
551
552 LoadMtrrData (mAcpiCpuData.MtrrTable);
553
555
556 //
557 // Count down the number with lock mechanism.
558 //
559 InterlockedDecrement (&mNumberToFinish);
560
561 //
562 // Wait for BSP to signal SMM Base relocation done.
563 //
564 while (!mInitApsAfterSmmBaseReloc) {
565 CpuPause ();
566 }
567
570
572
573 //
574 // Place AP into the safe code, count down the number with lock mechanism in the safe code.
575 //
576 TopOfStack = (UINTN)Stack + sizeof (Stack);
577 TopOfStack &= ~(UINTN)(CPU_STACK_ALIGNMENT - 1);
578 CopyMem ((VOID *)(UINTN)mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));
579 TransferApToSafeState ((UINTN)mApHltLoopCode, TopOfStack, (UINTN)&mNumberToFinish);
580}
581
589VOID
591 EFI_PHYSICAL_ADDRESS WorkingBuffer
592 )
593{
594 EFI_PHYSICAL_ADDRESS StartupVector;
595 MP_ASSEMBLY_ADDRESS_MAP AddressMap;
596
597 //
598 // Get the address map of startup code for AP,
599 // including code size, and offset of long jump instructions to redirect.
600 //
601 ZeroMem (&AddressMap, sizeof (AddressMap));
602 AsmGetAddressMap (&AddressMap);
603
604 StartupVector = WorkingBuffer;
605
606 //
607 // Copy AP startup code to startup vector, and then redirect the long jump
608 // instructions for mode switching.
609 //
610 CopyMem ((VOID *)(UINTN)StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);
611 *(UINT32 *)(UINTN)(StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32)(StartupVector + AddressMap.PModeEntryOffset);
612 if (AddressMap.LongJumpOffset != 0) {
613 *(UINT32 *)(UINTN)(StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32)(StartupVector + AddressMap.LModeEntryOffset);
614 }
615
616 //
617 // Get the start address of exchange data between BSP and AP.
618 //
619 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *)(UINTN)(StartupVector + AddressMap.Size);
620 ZeroMem ((VOID *)mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));
621
622 CopyMem ((VOID *)(UINTN)&mExchangeInfo->GdtrProfile, (VOID *)(UINTN)mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));
623 CopyMem ((VOID *)(UINTN)&mExchangeInfo->IdtrProfile, (VOID *)(UINTN)mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));
624
625 mExchangeInfo->StackStart = (VOID *)(UINTN)mAcpiCpuData.StackAddress;
626 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;
627 mExchangeInfo->BufferStart = (UINT32)StartupVector;
628 mExchangeInfo->Cr3 = (UINT32)(AsmReadCr3 ());
629 mExchangeInfo->InitializeFloatingPointUnitsAddress = (UINTN)InitializeFloatingPointUnits;
630}
631
639VOID
641 VOID
642 )
643{
644 LoadMtrrData (mAcpiCpuData.MtrrTable);
645
647
649
650 PrepareApStartupVector (mAcpiCpuData.StartupVector);
651
652 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
653 ASSERT (mNumberOfCpus <= mAcpiCpuData.NumberOfCpus);
654 } else {
655 ASSERT (mNumberOfCpus == mAcpiCpuData.NumberOfCpus);
656 }
657
658 mNumberToFinish = (UINT32)(mNumberOfCpus - 1);
659 mExchangeInfo->ApFunction = (VOID *)(UINTN)InitializeAp;
660
661 //
662 // Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.
663 //
664 mInitApsAfterSmmBaseReloc = FALSE;
665
666 //
667 // Send INIT IPI - SIPI to all APs
668 //
669 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);
670
671 while (mNumberToFinish > 0) {
672 CpuPause ();
673 }
674}
675
683VOID
685 VOID
686 )
687{
688 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
689 ASSERT (mNumberOfCpus <= mAcpiCpuData.NumberOfCpus);
690 } else {
691 ASSERT (mNumberOfCpus == mAcpiCpuData.NumberOfCpus);
692 }
693
694 mNumberToFinish = (UINT32)(mNumberOfCpus - 1);
695
696 //
697 // Signal that SMM base relocation is complete and to continue initialization for all APs.
698 //
699 mInitApsAfterSmmBaseReloc = TRUE;
700
701 //
702 // Must begin set register after all APs have continue their initialization.
703 // This is a requirement to support semaphore mechanism in register table.
704 // Because if semaphore's dependence type is package type, semaphore will wait
705 // for all Aps in one package finishing their tasks before set next register
706 // for all APs. If the Aps not begin its task during BSP doing its task, the
707 // BSP thread will hang because it is waiting for other Aps in the same
708 // package finishing their task.
709 //
711
712 while (mNumberToFinish > 0) {
713 CpuPause ();
714 }
715}
716
721VOID
723 VOID
724 )
725{
726 if (!mAcpiS3Enable) {
727 return;
728 }
729
730 //
731 // Restore SMM Configuration in S3 boot path.
732 //
733 if (mRestoreSmmConfigurationInS3) {
734 //
735 // Need make sure gSmst is correct because below function may use them.
736 //
737 gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;
738 gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
739 gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
740 gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;
741 gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;
742
743 //
744 // Configure SMM Code Access Check feature if available.
745 //
747
749
750 mRestoreSmmConfigurationInS3 = FALSE;
751 }
752}
753
759VOID
760EFIAPI
762 VOID
763 )
764{
765 SMM_S3_RESUME_STATE *SmmS3ResumeState;
766 IA32_DESCRIPTOR Ia32Idtr;
767 IA32_DESCRIPTOR X64Idtr;
768 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];
769 EFI_STATUS Status;
770
771 DEBUG ((DEBUG_INFO, "SmmRestoreCpu()\n"));
772
773 mSmmS3Flag = TRUE;
774
775 //
776 // See if there is enough context to resume PEI Phase
777 //
778 if (mSmmS3ResumeState == NULL) {
779 DEBUG ((DEBUG_ERROR, "No context to return to PEI Phase\n"));
780 CpuDeadLoop ();
781 }
782
783 SmmS3ResumeState = mSmmS3ResumeState;
784 ASSERT (SmmS3ResumeState != NULL);
785
786 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
787 //
788 // Save the IA32 IDT Descriptor
789 //
790 AsmReadIdtr ((IA32_DESCRIPTOR *)&Ia32Idtr);
791
792 //
793 // Setup X64 IDT table
794 //
795 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);
796 X64Idtr.Base = (UINTN)IdtEntryTable;
797 X64Idtr.Limit = (UINT16)(sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);
798 AsmWriteIdtr ((IA32_DESCRIPTOR *)&X64Idtr);
799
800 //
801 // Setup the default exception handler
802 //
804 ASSERT_EFI_ERROR (Status);
805
806 //
807 // Initialize Debug Agent to support source level debug
808 //
809 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);
810 }
811
812 //
813 // Skip initialization if mAcpiCpuData is not valid
814 //
815 if (mAcpiCpuData.NumberOfCpus > 0) {
816 //
817 // First time microcode load and restore MTRRs
818 //
820 }
821
822 //
823 // Restore SMBASE for BSP and all APs
824 //
826
827 //
828 // Skip initialization if mAcpiCpuData is not valid
829 //
830 if (mAcpiCpuData.NumberOfCpus > 0) {
831 //
832 // Restore MSRs for BSP and all APs
833 //
835 }
836
837 //
838 // Set a flag to restore SMM configuration in S3 path.
839 //
840 mRestoreSmmConfigurationInS3 = TRUE;
841
842 DEBUG ((DEBUG_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));
843 DEBUG ((DEBUG_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));
844 DEBUG ((DEBUG_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));
845 DEBUG ((DEBUG_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));
846 DEBUG ((DEBUG_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));
847
848 //
849 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase
850 //
851 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {
852 DEBUG ((DEBUG_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));
853
855 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,
856 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,
857 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,
858 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer
859 );
860 }
861
862 //
863 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase
864 //
865 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
866 DEBUG ((DEBUG_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));
867 //
868 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.
869 //
871 //
872 // Restore IA32 IDT table
873 //
874 AsmWriteIdtr ((IA32_DESCRIPTOR *)&Ia32Idtr);
876 SmmS3ResumeState->ReturnCs,
877 (UINT32)SmmS3ResumeState->ReturnEntryPoint,
878 (UINT32)SmmS3ResumeState->ReturnContext1,
879 (UINT32)SmmS3ResumeState->ReturnContext2,
880 (UINT32)SmmS3ResumeState->ReturnStackPointer
881 );
882 }
883
884 //
885 // Can not resume PEI Phase
886 //
887 DEBUG ((DEBUG_ERROR, "No context to return to PEI Phase\n"));
888 CpuDeadLoop ();
889}
890
897VOID
899 IN UINT32 Cr3
900 )
901{
902 VOID *GuidHob;
903 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;
904 SMM_S3_RESUME_STATE *SmmS3ResumeState;
905 EFI_PHYSICAL_ADDRESS Address;
906 EFI_STATUS Status;
907
908 if (!mAcpiS3Enable) {
909 return;
910 }
911
912 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);
913 if (GuidHob == NULL) {
914 DEBUG ((
915 DEBUG_ERROR,
916 "ERROR:%a(): HOB(gEfiAcpiVariableGuid=%g) needed by S3 resume doesn't exist!\n",
917 __FUNCTION__,
918 &gEfiAcpiVariableGuid
919 ));
920 CpuDeadLoop ();
921 } else {
922 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *)GET_GUID_HOB_DATA (GuidHob);
923
924 DEBUG ((DEBUG_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));
925 DEBUG ((DEBUG_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));
926
927 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;
928 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));
929
930 mSmmS3ResumeState = SmmS3ResumeState;
931 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;
932
933 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;
934
935 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;
936 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));
937 if (SmmS3ResumeState->SmmS3StackBase == 0) {
938 SmmS3ResumeState->SmmS3StackSize = 0;
939 }
940
941 SmmS3ResumeState->SmmS3Cr0 = mSmmCr0;
942 SmmS3ResumeState->SmmS3Cr3 = Cr3;
943 SmmS3ResumeState->SmmS3Cr4 = mSmmCr4;
944
945 if (sizeof (UINTN) == sizeof (UINT64)) {
946 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;
947 }
948
949 if (sizeof (UINTN) == sizeof (UINT32)) {
950 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;
951 }
952
953 //
954 // Patch SmmS3ResumeState->SmmS3Cr3
955 //
956 InitSmmS3Cr3 ();
957 }
958
959 //
960 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in
961 // protected mode on S3 path
962 //
963 Address = BASE_4GB - 1;
964 Status = gBS->AllocatePages (
967 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate)),
968 &Address
969 );
970 ASSERT_EFI_ERROR (Status);
971 mApHltLoopCode = (UINT8 *)(UINTN)Address;
972}
973
982VOID
984 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,
985 IN CPU_REGISTER_TABLE *SourceRegisterTableList,
986 IN UINT32 NumberOfCpus
987 )
988{
989 UINTN Index;
990 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
991
992 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
993 for (Index = 0; Index < NumberOfCpus; Index++) {
994 if (DestinationRegisterTableList[Index].TableLength != 0) {
995 DestinationRegisterTableList[Index].AllocatedSize = DestinationRegisterTableList[Index].TableLength * sizeof (CPU_REGISTER_TABLE_ENTRY);
996 RegisterTableEntry = AllocateCopyPool (
997 DestinationRegisterTableList[Index].AllocatedSize,
998 (VOID *)(UINTN)SourceRegisterTableList[Index].RegisterTableEntry
999 );
1000 ASSERT (RegisterTableEntry != NULL);
1001 DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;
1002 }
1003 }
1004}
1005
1015BOOLEAN
1017 IN CPU_REGISTER_TABLE *RegisterTable,
1018 IN UINT32 NumberOfCpus
1019 )
1020{
1021 UINTN Index;
1022
1023 if (RegisterTable != NULL) {
1024 for (Index = 0; Index < NumberOfCpus; Index++) {
1025 if (RegisterTable[Index].TableLength != 0) {
1026 return FALSE;
1027 }
1028 }
1029 }
1030
1031 return TRUE;
1032}
1033
1041VOID
1043 IN OUT CPU_FEATURE_INIT_DATA *CpuFeatureInitDataDst,
1044 IN CPU_FEATURE_INIT_DATA *CpuFeatureInitDataSrc
1045 )
1046{
1047 CPU_STATUS_INFORMATION *CpuStatus;
1048
1049 if (!IsRegisterTableEmpty ((CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus)) {
1050 CpuFeatureInitDataDst->PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
1051 ASSERT (CpuFeatureInitDataDst->PreSmmInitRegisterTable != 0);
1052
1054 (CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataDst->PreSmmInitRegisterTable,
1055 (CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->PreSmmInitRegisterTable,
1056 mAcpiCpuData.NumberOfCpus
1057 );
1058 }
1059
1060 if (!IsRegisterTableEmpty ((CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->RegisterTable, mAcpiCpuData.NumberOfCpus)) {
1061 CpuFeatureInitDataDst->RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
1062 ASSERT (CpuFeatureInitDataDst->RegisterTable != 0);
1063
1065 (CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataDst->RegisterTable,
1066 (CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->RegisterTable,
1067 mAcpiCpuData.NumberOfCpus
1068 );
1069 }
1070
1071 CpuStatus = &CpuFeatureInitDataDst->CpuStatus;
1072 CopyMem (CpuStatus, &CpuFeatureInitDataSrc->CpuStatus, sizeof (CPU_STATUS_INFORMATION));
1073
1074 if (CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerPackage != 0) {
1075 CpuStatus->ThreadCountPerPackage = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
1076 sizeof (UINT32) * CpuStatus->PackageCount,
1077 (UINT32 *)(UINTN)CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerPackage
1078 );
1079 ASSERT (CpuStatus->ThreadCountPerPackage != 0);
1080 }
1081
1082 if (CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerCore != 0) {
1083 CpuStatus->ThreadCountPerCore = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
1084 sizeof (UINT8) * (CpuStatus->PackageCount * CpuStatus->MaxCoreCount),
1085 (UINT32 *)(UINTN)CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerCore
1086 );
1087 ASSERT (CpuStatus->ThreadCountPerCore != 0);
1088 }
1089
1090 if (CpuFeatureInitDataSrc->ApLocation != 0) {
1091 CpuFeatureInitDataDst->ApLocation = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
1092 mAcpiCpuData.NumberOfCpus * sizeof (EFI_CPU_PHYSICAL_LOCATION),
1093 (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)CpuFeatureInitDataSrc->ApLocation
1094 );
1095 ASSERT (CpuFeatureInitDataDst->ApLocation != 0);
1096 }
1097}
1098
1103VOID
1105 VOID
1106 )
1107{
1108 ACPI_CPU_DATA *AcpiCpuData;
1109 IA32_DESCRIPTOR *Gdtr;
1110 IA32_DESCRIPTOR *Idtr;
1111 VOID *GdtForAp;
1112 VOID *IdtForAp;
1113 VOID *MachineCheckHandlerForAp;
1114 CPU_STATUS_INFORMATION *CpuStatus;
1115
1116 if (!mAcpiS3Enable) {
1117 return;
1118 }
1119
1120 //
1121 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0
1122 //
1123 mAcpiCpuData.NumberOfCpus = 0;
1124
1125 //
1126 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM
1127 //
1128 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);
1129 if (AcpiCpuData == 0) {
1130 return;
1131 }
1132
1133 //
1134 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.
1135 //
1136 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));
1137
1138 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));
1139 ASSERT (mAcpiCpuData.MtrrTable != 0);
1140
1141 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));
1142
1143 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
1144 ASSERT (mAcpiCpuData.GdtrProfile != 0);
1145
1146 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));
1147
1148 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
1149 ASSERT (mAcpiCpuData.IdtrProfile != 0);
1150
1151 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));
1152
1153 //
1154 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.
1155 //
1156 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;
1157 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;
1158
1159 GdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);
1160 ASSERT (GdtForAp != NULL);
1161 IdtForAp = (VOID *)((UINTN)GdtForAp + (Gdtr->Limit + 1));
1162 MachineCheckHandlerForAp = (VOID *)((UINTN)IdtForAp + (Idtr->Limit + 1));
1163
1164 CopyMem (GdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);
1165 CopyMem (IdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);
1166 CopyMem (MachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);
1167
1168 Gdtr->Base = (UINTN)GdtForAp;
1169 Idtr->Base = (UINTN)IdtForAp;
1170 mAcpiCpuData.ApMachineCheckHandlerBase = (EFI_PHYSICAL_ADDRESS)(UINTN)MachineCheckHandlerForAp;
1171
1172 ZeroMem (&mAcpiCpuData.CpuFeatureInitData, sizeof (CPU_FEATURE_INIT_DATA));
1173
1174 if (!PcdGetBool (PcdCpuFeaturesInitOnS3Resume)) {
1175 //
1176 // If the CPU features will not be initialized by CpuFeaturesPei module during
1177 // next ACPI S3 resume, copy the CPU features initialization data into SMRAM,
1178 // which will be consumed in SmmRestoreCpu during next S3 resume.
1179 //
1180 CopyCpuFeatureInitDatatoSmram (&mAcpiCpuData.CpuFeatureInitData, &AcpiCpuData->CpuFeatureInitData);
1181
1182 CpuStatus = &mAcpiCpuData.CpuFeatureInitData.CpuStatus;
1183
1184 mCpuFlags.CoreSemaphoreCount = AllocateZeroPool (
1185 sizeof (UINT32) * CpuStatus->PackageCount *
1186 CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount
1187 );
1188 ASSERT (mCpuFlags.CoreSemaphoreCount != NULL);
1189
1190 mCpuFlags.PackageSemaphoreCount = AllocateZeroPool (
1191 sizeof (UINT32) * CpuStatus->PackageCount *
1192 CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount
1193 );
1194 ASSERT (mCpuFlags.PackageSemaphoreCount != NULL);
1195
1196 InitializeSpinLock ((SPIN_LOCK *)&mCpuFlags.MemoryMappedLock);
1197 }
1198}
1199
1204VOID
1206 VOID
1207 )
1208{
1209 mAcpiS3Enable = PcdGetBool (PcdAcpiS3Enable);
1210}
UINT64 UINTN
#define CPU_STACK_ALIGNMENT
VOID *EFIAPI GetFirstGuidHob(IN CONST EFI_GUID *Guid)
Definition: HobLib.c:215
#define NULL
Definition: Base.h:312
#define TRUE
Definition: Base.h:301
#define FALSE
Definition: Base.h:307
#define IN
Definition: Base.h:279
#define OUT
Definition: Base.h:284
VOID EFIAPI SwitchStack(IN SWITCH_STACK_ENTRY_POINT EntryPoint, IN VOID *Context1 OPTIONAL, IN VOID *Context2 OPTIONAL, IN VOID *NewStack,...)
Definition: SwitchStack.c:42
UINT64 EFIAPI BitFieldWrite64(IN UINT64 Operand, IN UINTN StartBit, IN UINTN EndBit, IN UINT64 Value)
Definition: BitField.c:755
VOID EFIAPI CpuDeadLoop(VOID)
Definition: CpuDeadLoop.c:23
VOID EFIAPI CpuPause(VOID)
UINT64 EFIAPI BitFieldRead64(IN UINT64 Operand, IN UINTN StartBit, IN UINTN EndBit)
Definition: BitField.c:719
UINT64 EFIAPI LShiftU64(IN UINT64 Operand, IN UINTN Count)
Definition: LShiftU64.c:28
VOID(EFIAPI * SWITCH_STACK_ENTRY_POINT)(IN VOID *Context1 OPTIONAL, IN VOID *Context2 OPTIONAL)
Definition: BaseLib.h:4519
VOID *EFIAPI CopyMem(OUT VOID *DestinationBuffer, IN CONST VOID *SourceBuffer, IN UINTN Length)
VOID *EFIAPI ZeroMem(OUT VOID *Buffer, IN UINTN Length)
EFI_STATUS EFIAPI InitializeCpuExceptionHandlers(IN EFI_VECTOR_HANDOFF_INFO *VectorInfo OPTIONAL)
VOID GetAcpiS3EnableFlag(VOID)
Definition: CpuS3.c:1205
VOID ProgramProcessorRegister(IN CPU_REGISTER_TABLE *RegisterTable, IN EFI_CPU_PHYSICAL_LOCATION *ApLocation, IN CPU_STATUS_INFORMATION *CpuStatus, IN PROGRAM_CPU_REGISTER_FLAGS *CpuFlags)
Definition: CpuS3.c:230
VOID EFIAPI SmmRestoreCpu(VOID)
Definition: CpuS3.c:761
UINTN ReadWriteCr(IN UINT32 CrIndex, IN BOOLEAN Read, IN OUT UINTN *CrValue)
Definition: CpuS3.c:173
VOID S3WaitForSemaphore(IN OUT volatile UINT32 *Sem)
Definition: CpuS3.c:147
VOID RestoreSmmConfigurationInS3(VOID)
Definition: CpuS3.c:722
VOID GetAcpiCpuData(VOID)
Definition: CpuS3.c:1104
VOID *EFIAPI AsmGetAddressMap(MP_ASSEMBLY_ADDRESS_MAP *AddressMap)
VOID CopyCpuFeatureInitDatatoSmram(IN OUT CPU_FEATURE_INIT_DATA *CpuFeatureInitDataDst, IN CPU_FEATURE_INIT_DATA *CpuFeatureInitDataSrc)
Definition: CpuS3.c:1042
VOID InitializeCpuAfterRebase(VOID)
Definition: CpuS3.c:684
VOID S3ReleaseSemaphore(IN OUT volatile UINT32 *Sem)
Definition: CpuS3.c:129
VOID PrepareApStartupVector(EFI_PHYSICAL_ADDRESS WorkingBuffer)
Definition: CpuS3.c:590
VOID InitSmmS3ResumeState(IN UINT32 Cr3)
Definition: CpuS3.c:898
VOID SetRegister(IN BOOLEAN PreSmmRegisterTable)
Definition: CpuS3.c:488
VOID InitializeAp(VOID)
Definition: CpuS3.c:545
VOID EFIAPI LoadMtrrData(EFI_PHYSICAL_ADDRESS MtrrTable)
Definition: CpuS3.c:99
VOID CopyRegisterTable(IN CPU_REGISTER_TABLE *DestinationRegisterTableList, IN CPU_REGISTER_TABLE *SourceRegisterTableList, IN UINT32 NumberOfCpus)
Definition: CpuS3.c:983
BOOLEAN IsRegisterTableEmpty(IN CPU_REGISTER_TABLE *RegisterTable, IN UINT32 NumberOfCpus)
Definition: CpuS3.c:1016
VOID InitializeCpuBeforeRebase(VOID)
Definition: CpuS3.c:640
BOOLEAN EFIAPI SaveAndSetDebugTimerInterrupt(IN BOOLEAN EnableStatus)
VOID EFIAPI InitializeDebugAgent(IN UINT32 InitFlag, IN VOID *Context OPTIONAL, IN DEBUG_AGENT_CONTINUE Function OPTIONAL)
#define ASSERT_EFI_ERROR(StatusParameter)
Definition: DebugLib.h:440
#define DEBUG(Expression)
Definition: DebugLib.h:417
#define ASSERT(Expression)
Definition: DebugLib.h:391
VOID EFIAPI AsmDisableCache(VOID)
Definition: DisableCache.c:18
VOID *EFIAPI AllocateZeroPool(IN UINTN AllocationSize)
VOID *EFIAPI AllocateCopyPool(IN UINTN AllocationSize, IN CONST VOID *Buffer)
VOID EFIAPI AsmEnableCache(VOID)
Definition: EnableCache.c:18
#define GET_GUID_HOB_DATA(HobStart)
Definition: HobLib.h:544
UINTN EFIAPI AsmReadCr3(VOID)
UINTN EFIAPI AsmWriteCr2(UINTN Cr2)
UINT64 EFIAPI AsmReadMsr64(IN UINT32 Index)
Definition: GccInlinePriv.c:60
UINTN EFIAPI AsmWriteCr3(UINTN Cr3)
UINTN EFIAPI AsmWriteCr4(UINTN Cr4)
UINTN EFIAPI AsmReadCr0(VOID)
UINTN EFIAPI AsmWriteCr0(UINTN Cr0)
UINTN EFIAPI AsmReadCr2(VOID)
UINT64 EFIAPI AsmWriteMsr64(IN UINT32 Index, IN UINT64 Value)
UINTN EFIAPI AsmReadCr4(VOID)
VOID TransferApToSafeState(IN UINTN ApHltLoopCode, IN UINTN TopOfStack, IN UINTN NumberToFinishAddress)
Definition: SmmFuncsArch.c:154
VOID InitSmmS3Cr3(VOID)
UINT32 EFIAPI MmioBitFieldWrite32(IN UINTN Address, IN UINTN StartBit, IN UINTN EndBit, IN UINT32 Value)
Definition: IoHighLevel.c:1912
VOID EFIAPI SendInitSipiSipiAllExcludingSelf(IN UINT32 StartupRoutine)
Definition: BaseXApicLib.c:551
VOID EFIAPI DisableLvtInterrupts(VOID)
Definition: BaseXApicLib.c:660
UINT32 EFIAPI GetInitialApicId(VOID)
Definition: BaseXApicLib.c:299
VOID EFIAPI ProgramVirtualWireMode(VOID)
Definition: BaseXApicLib.c:617
MTRR_SETTINGS *EFIAPI MtrrSetAllMtrrs(IN MTRR_SETTINGS *MtrrSetting)
Definition: MtrrLib.c:2800
VOID EFIAPI SmmCpuFeaturesCompleteSmmReadyToLock(VOID)
#define PcdGet64(TokenName)
Definition: PcdLib.h:375
#define PcdGetBool(TokenName)
Definition: PcdLib.h:401
#define FeaturePcdGet(TokenName)
Definition: PcdLib.h:50
EFI_SMM_SYSTEM_TABLE2 * gSmst
VOID ConfigSmmCodeAccessCheck(VOID)
VOID EFIAPI SmmRelocateBases(VOID)
VOID *EFIAPI AllocatePool(IN UINTN AllocationSize)
VOID *EFIAPI AllocatePages(IN UINTN Pages)
EFI_STATUS EFIAPI Lock(IN EFI_SMM_ACCESS2_PROTOCOL *This)
Definition: SmmAccessDxe.c:133
UINT32 EFIAPI InterlockedIncrement(IN volatile UINT32 *Value)
SPIN_LOCK *EFIAPI AcquireSpinLock(IN OUT SPIN_LOCK *SpinLock)
UINT32 EFIAPI InterlockedCompareExchange32(IN OUT volatile UINT32 *Value, IN UINT32 CompareValue, IN UINT32 ExchangeValue)
UINT32 EFIAPI InterlockedDecrement(IN volatile UINT32 *Value)
SPIN_LOCK *EFIAPI InitializeSpinLock(OUT SPIN_LOCK *SpinLock)
volatile UINTN SPIN_LOCK
SPIN_LOCK *EFIAPI ReleaseSpinLock(IN OUT SPIN_LOCK *SpinLock)
UINT64 EFI_PHYSICAL_ADDRESS
Definition: UefiBaseType.h:49
RETURN_STATUS EFI_STATUS
Definition: UefiBaseType.h:28
#define EFI_SIZE_TO_PAGES(Size)
Definition: UefiBaseType.h:197
#define EFI_SUCCESS
Definition: UefiBaseType.h:111
EFI_BOOT_SERVICES * gBS
VOID EFIAPI InitializeFloatingPointUnits(VOID)
@ EfiACPIMemoryNVS
@ AllocateMaxAddress
Definition: UefiSpec.h:37
VOID EFIAPI AsmDisablePaging64(IN UINT16 Cs, IN UINT32 EntryPoint, IN UINT32 Context1 OPTIONAL, IN UINT32 Context2 OPTIONAL, IN UINT32 NewStack)
UINT64 EFIAPI AsmMsrBitFieldWrite64(IN UINT32 Index, IN UINTN StartBit, IN UINTN EndBit, IN UINT64 Value)
Definition: X86Msr.c:505
VOID EFIAPI AsmReadIdtr(OUT IA32_DESCRIPTOR *Idtr)
Definition: X86ReadIdtr.c:24
VOID EFIAPI AsmWriteIdtr(IN CONST IA32_DESCRIPTOR *Idtr)
UINTN CurrentlyExecutingCpu
Definition: PiSmmCis.h:69
UINTN * CpuSaveStateSize
Definition: PiSmmCis.h:80
EFI_SMM_STARTUP_THIS_AP SmmStartupThisAp
Definition: PiSmmCis.h:140
UINTN * CpuSaveStateSize
Definition: PiSmmCis.h:160
EFI_PHYSICAL_ADDRESS CpuStart
Definition: PiMultiPhase.h:120