MdePkg: Clean up source files

1. Do not use tab characters
2. No trailing white space in one line
3. All files must end with CRLF

Contributed-under: TianoCore Contribution Agreement 1.1
Signed-off-by: Liming Gao <liming.gao@intel.com>
This commit is contained in:
Liming Gao
2018-06-27 21:11:33 +08:00
parent d1102dba72
commit 9095d37b8f
729 changed files with 15683 additions and 15683 deletions

View File

@@ -1,6 +1,6 @@
#------------------------------------------------------------------------------
#
# Copyright (c) 2006 - 2008, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
@@ -44,19 +44,19 @@ ASM_PFX(AsmCpuidEx):
test %r10, %r10
jz L1
mov %ecx,(%r10)
L1:
L1:
mov %r8, %rcx
jrcxz L2
movl %eax,(%rcx)
L2:
L2:
mov %r9, %rcx
jrcxz L3
mov %ebx, (%rcx)
L3:
L3:
mov 0x40(%rsp), %rcx
jrcxz L4
mov %edx, (%rcx)
L4:
L4:
pop %rax # restore Index to rax as return value
pop %rbx
ret

View File

@@ -1,6 +1,6 @@
#------------------------------------------------------------------------------
#
# Copyright (c) 2006 - 2009, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
@@ -21,7 +21,7 @@
#
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# VOID
@@ -37,29 +37,29 @@
ASM_GLOBAL ASM_PFX(InternalX86DisablePaging64)
ASM_PFX(InternalX86DisablePaging64):
cli
cli
lea L1(%rip), %rsi # rsi <- The start address of transition code
mov 0x28(%rsp), %edi # rdi <- New stack
lea _mTransitionEnd(%rip), %rax # rax <- end of transition code
sub %rsi, %rax # rax <- The size of transition piece code
add $4, %rax # round rax up to the next 4 byte boundary
and $0xfc, %al
sub %rax, %rdi # rdi <- use stack to hold transition code
sub %rax, %rdi # rdi <- use stack to hold transition code
mov %edi, %r10d # r10 <- The start address of transicition code below 4G
push %rcx # save rcx to stack
mov %rax, %rcx # rcx <- The size of transition piece code
rep
movsb # copy transition code to (new stack - 64byte) below 4G
pop %rcx # restore rcx
mov %r8d, %esi
mov %r9d, %edi
mov %r8d, %esi
mov %r9d, %edi
mov %r10d, %eax
sub $4, %eax
push %rcx # push Cs to stack
push %r10 # push address of transition code on stack
push %r10 # push address of transition code on stack
.byte 0x48, 0xcb # retq: Use far return to load CS register from stack
# (Use raw byte code since some GNU assemblers generates incorrect code for "retq")
# (Use raw byte code since some GNU assemblers generates incorrect code for "retq")
L1:
mov %eax,%esp # set up new stack
mov %cr0,%rax
@@ -68,9 +68,9 @@ L1:
mov %edx,%ebx # save EntryPoint to ebx, for rdmsr will overwrite edx
mov $0xc0000080,%ecx
rdmsr
rdmsr
and $0xfe,%ah # clear LME
wrmsr
wrmsr
mov %cr4,%rax
and $0xdf,%al # clear PAE
mov %rax,%cr4

View File

@@ -1,6 +1,6 @@
#------------------------------------------------------------------------------
#
# Copyright (c) 2006 - 2008, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
@@ -15,7 +15,7 @@
#
# Abstract:
#
# Flush all caches with a WBINVD instruction, clear the CD bit of CR0 to 0, and clear
# Flush all caches with a WBINVD instruction, clear the CD bit of CR0 to 0, and clear
# the NW bit of CR0 to 0
#
# Notes:

View File

@@ -1,8 +1,8 @@
/** @file
GCC inline implementation of BaseLib processor specific functions.
Copyright (c) 2006 - 2010, Intel Corporation. All rights reserved.<BR>
Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
@@ -33,7 +33,7 @@ MemoryFence (
)
{
// This is a little bit of overkill and it is more about the compiler that it is
// actually processor synchronization. This is like the _ReadWriteBarrier
// actually processor synchronization. This is like the _ReadWriteBarrier
// Microsoft specific intrinsic
__asm__ __volatile__ ("":::"memory");
}
@@ -66,7 +66,7 @@ EFIAPI
DisableInterrupts (
VOID
)
{
{
__asm__ __volatile__ ("cli"::: "memory");
}
@@ -130,14 +130,14 @@ AsmReadMsr64 (
{
UINT32 LowData;
UINT32 HighData;
__asm__ __volatile__ (
"rdmsr"
: "=a" (LowData), // %0
"=d" (HighData) // %1
: "c" (Index) // %2
);
return (((UINT64)HighData) << 32) | LowData;
}
@@ -170,7 +170,7 @@ AsmWriteMsr64 (
LowData = (UINT32)(Value);
HighData = (UINT32)(Value >> 32);
__asm__ __volatile__ (
"wrmsr"
:
@@ -178,7 +178,7 @@ AsmWriteMsr64 (
"a" (LowData),
"d" (HighData)
);
return Value;
}
@@ -201,13 +201,13 @@ AsmReadEflags (
)
{
UINTN Eflags;
__asm__ __volatile__ (
"pushfq \n\t"
"pop %0 "
: "=r" (Eflags) // %0
);
return Eflags;
}
@@ -230,12 +230,12 @@ AsmReadCr0 (
)
{
UINTN Data;
__asm__ __volatile__ (
"mov %%cr0,%0"
"mov %%cr0,%0"
: "=r" (Data) // %0
);
return Data;
}
@@ -257,12 +257,12 @@ AsmReadCr2 (
)
{
UINTN Data;
__asm__ __volatile__ (
"mov %%cr2, %0"
"mov %%cr2, %0"
: "=r" (Data) // %0
);
return Data;
}
@@ -283,12 +283,12 @@ AsmReadCr3 (
)
{
UINTN Data;
__asm__ __volatile__ (
"mov %%cr3, %0"
"mov %%cr3, %0"
: "=r" (Data) // %0
);
return Data;
}
@@ -310,12 +310,12 @@ AsmReadCr4 (
)
{
UINTN Data;
__asm__ __volatile__ (
"mov %%cr4, %0"
"mov %%cr4, %0"
: "=r" (Data) // %0
);
return Data;
}
@@ -441,12 +441,12 @@ AsmReadDr0 (
)
{
UINTN Data;
__asm__ __volatile__ (
"mov %%dr0, %0"
: "=r" (Data)
);
return Data;
}
@@ -468,12 +468,12 @@ AsmReadDr1 (
)
{
UINTN Data;
__asm__ __volatile__ (
"mov %%dr1, %0"
: "=r" (Data)
);
return Data;
}
@@ -495,12 +495,12 @@ AsmReadDr2 (
)
{
UINTN Data;
__asm__ __volatile__ (
"mov %%dr2, %0"
: "=r" (Data)
);
return Data;
}
@@ -522,12 +522,12 @@ AsmReadDr3 (
)
{
UINTN Data;
__asm__ __volatile__ (
"mov %%dr3, %0"
: "=r" (Data)
);
return Data;
}
@@ -549,12 +549,12 @@ AsmReadDr4 (
)
{
UINTN Data;
__asm__ __volatile__ (
"mov %%dr4, %0"
: "=r" (Data)
);
return Data;
}
@@ -576,12 +576,12 @@ AsmReadDr5 (
)
{
UINTN Data;
__asm__ __volatile__ (
"mov %%dr5, %0"
: "=r" (Data)
);
return Data;
}
@@ -603,12 +603,12 @@ AsmReadDr6 (
)
{
UINTN Data;
__asm__ __volatile__ (
"mov %%dr6, %0"
: "=r" (Data)
);
return Data;
}
@@ -630,12 +630,12 @@ AsmReadDr7 (
)
{
UINTN Data;
__asm__ __volatile__ (
"mov %%dr7, %0"
: "=r" (Data)
);
return Data;
}
@@ -864,12 +864,12 @@ AsmReadCs (
)
{
UINT16 Data;
__asm__ __volatile__ (
"mov %%cs, %0"
:"=a" (Data)
);
return Data;
}
@@ -890,12 +890,12 @@ AsmReadDs (
)
{
UINT16 Data;
__asm__ __volatile__ (
"mov %%ds, %0"
:"=a" (Data)
);
return Data;
}
@@ -916,12 +916,12 @@ AsmReadEs (
)
{
UINT16 Data;
__asm__ __volatile__ (
"mov %%es, %0"
:"=a" (Data)
);
return Data;
}
@@ -942,12 +942,12 @@ AsmReadFs (
)
{
UINT16 Data;
__asm__ __volatile__ (
"mov %%fs, %0"
:"=a" (Data)
);
return Data;
}
@@ -968,12 +968,12 @@ AsmReadGs (
)
{
UINT16 Data;
__asm__ __volatile__ (
"mov %%gs, %0"
:"=a" (Data)
);
return Data;
}
@@ -994,12 +994,12 @@ AsmReadSs (
)
{
UINT16 Data;
__asm__ __volatile__ (
"mov %%ds, %0"
:"=a" (Data)
);
return Data;
}
@@ -1020,12 +1020,12 @@ AsmReadTr (
)
{
UINT16 Data;
__asm__ __volatile__ (
"str %0"
: "=r" (Data)
);
return Data;
}
@@ -1072,7 +1072,7 @@ InternalX86WriteGdtr (
:
: "m" (*Gdtr)
);
}
@@ -1137,12 +1137,12 @@ AsmReadLdtr (
)
{
UINT16 Data;
__asm__ __volatile__ (
"sldt %0"
: "=g" (Data) // %0
);
return Data;
}
@@ -1190,7 +1190,7 @@ InternalX86FxSave (
"fxsave %0"
:
: "m" (*Buffer) // %0
);
);
}
@@ -1239,7 +1239,7 @@ AsmReadMm0 (
"movd %%mm0, %0 \n\t"
: "=r" (Data) // %0
);
return Data;
}
@@ -1265,7 +1265,7 @@ AsmReadMm1 (
"movd %%mm1, %0 \n\t"
: "=r" (Data) // %0
);
return Data;
}
@@ -1291,7 +1291,7 @@ AsmReadMm2 (
"movd %%mm2, %0 \n\t"
: "=r" (Data) // %0
);
return Data;
}
@@ -1317,7 +1317,7 @@ AsmReadMm3 (
"movd %%mm3, %0 \n\t"
: "=r" (Data) // %0
);
return Data;
}
@@ -1343,7 +1343,7 @@ AsmReadMm4 (
"movd %%mm4, %0 \n\t"
: "=r" (Data) // %0
);
return Data;
}
@@ -1369,7 +1369,7 @@ AsmReadMm5 (
"movd %%mm5, %0 \n\t"
: "=r" (Data) // %0
);
return Data;
}
@@ -1395,7 +1395,7 @@ AsmReadMm6 (
"movd %%mm6, %0 \n\t"
: "=r" (Data) // %0
);
return Data;
}
@@ -1421,7 +1421,7 @@ AsmReadMm7 (
"movd %%mm7, %0 \n\t"
: "=r" (Data) // %0
);
return Data;
}
@@ -1443,7 +1443,7 @@ AsmWriteMm0 (
{
__asm__ __volatile__ (
"movd %0, %%mm0" // %0
:
:
: "m" (Value)
);
}
@@ -1466,7 +1466,7 @@ AsmWriteMm1 (
{
__asm__ __volatile__ (
"movd %0, %%mm1" // %0
:
:
: "m" (Value)
);
}
@@ -1489,7 +1489,7 @@ AsmWriteMm2 (
{
__asm__ __volatile__ (
"movd %0, %%mm2" // %0
:
:
: "m" (Value)
);
}
@@ -1512,7 +1512,7 @@ AsmWriteMm3 (
{
__asm__ __volatile__ (
"movd %0, %%mm3" // %0
:
:
: "m" (Value)
);
}
@@ -1535,7 +1535,7 @@ AsmWriteMm4 (
{
__asm__ __volatile__ (
"movd %0, %%mm4" // %0
:
:
: "m" (Value)
);
}
@@ -1558,7 +1558,7 @@ AsmWriteMm5 (
{
__asm__ __volatile__ (
"movd %0, %%mm5" // %0
:
:
: "m" (Value)
);
}
@@ -1581,7 +1581,7 @@ AsmWriteMm6 (
{
__asm__ __volatile__ (
"movd %0, %%mm6" // %0
:
:
: "m" (Value)
);
}
@@ -1604,7 +1604,7 @@ AsmWriteMm7 (
{
__asm__ __volatile__ (
"movd %0, %%mm7" // %0
:
:
: "m" (Value)
);
}
@@ -1627,14 +1627,14 @@ AsmReadTsc (
{
UINT32 LowData;
UINT32 HiData;
__asm__ __volatile__ (
"rdtsc"
: "=a" (LowData),
"=d" (HiData)
);
return (((UINT64)HiData) << 32) | LowData;
return (((UINT64)HiData) << 32) | LowData;
}
@@ -1657,15 +1657,15 @@ AsmReadPmc (
{
UINT32 LowData;
UINT32 HiData;
__asm__ __volatile__ (
"rdpmc"
: "=a" (LowData),
"=d" (HiData)
: "c" (Index)
);
return (((UINT64)HiData) << 32) | LowData;
return (((UINT64)HiData) << 32) | LowData;
}
@@ -1700,7 +1700,7 @@ AsmMonitor (
"c" (Ecx),
"d" (Edx)
);
return Eax;
}
@@ -1728,12 +1728,12 @@ AsmMwait (
{
__asm__ __volatile__ (
"mwait"
:
:
: "a" (Eax),
"c" (Ecx)
);
return Eax;
return Eax;
}
@@ -1768,7 +1768,7 @@ AsmInvd (
)
{
__asm__ __volatile__ ("invd":::"memory");
}
@@ -1796,10 +1796,10 @@ AsmFlushCacheLine (
__asm__ __volatile__ (
"clflush (%0)"
:
: "r" (LinearAddress)
: "r" (LinearAddress)
: "memory"
);
return LinearAddress;
}

View File

@@ -1,6 +1,6 @@
#------------------------------------------------------------------------------
#
# Copyright (c) 2006 - 2008, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
@@ -49,6 +49,6 @@ ASM_PFX(InternalLongJump):
movdqu 0xB8(%rcx), %xmm12
movdqu 0xC8(%rcx), %xmm13
movdqu 0xD8(%rcx), %xmm14
movdqu 0xE8(%rcx), %xmm15
movdqu 0xE8(%rcx), %xmm15
mov %rdx, %rax # set return value
jmp *0x48(%rcx)

View File

@@ -1,6 +1,6 @@
#------------------------------------------------------------------------------
#
# Copyright (c) 2006 - 2008, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
@@ -39,7 +39,7 @@ ASM_PFX(SetJump):
mov %rdx,0x48(%rcx)
# save non-volatile fp registers
stmxcsr 0x50(%rcx)
movdqu %xmm6, 0x58(%rcx)
movdqu %xmm6, 0x58(%rcx)
movdqu %xmm7, 0x68(%rcx)
movdqu %xmm8, 0x78(%rcx)
movdqu %xmm9, 0x88(%rcx)
@@ -48,6 +48,6 @@ ASM_PFX(SetJump):
movdqu %xmm12, 0xB8(%rcx)
movdqu %xmm13, 0xC8(%rcx)
movdqu %xmm14, 0xD8(%rcx)
movdqu %xmm15, 0xE8(%rcx)
movdqu %xmm15, 0xE8(%rcx)
xor %rax,%rax
jmpq *%rdx

View File

@@ -1,6 +1,6 @@
#------------------------------------------------------------------------------
#
# Copyright (c) 2006 - 2008, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
@@ -37,9 +37,9 @@
#------------------------------------------------------------------------------
ASM_GLOBAL ASM_PFX(InternalSwitchStack)
ASM_PFX(InternalSwitchStack):
pushq %rbp
movq %rsp, %rbp
pushq %rbp
movq %rsp, %rbp
mov %rcx, %rax // Shift registers for new call
mov %rdx, %rcx
mov %r8, %rdx

View File

@@ -1,6 +1,6 @@
#------------------------------------------------------------------------------
#
# Copyright (c) 2006 - 2013, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
@@ -49,7 +49,7 @@ ASM_GLOBAL ASM_PFX(InternalAsmThunk16)
.set IA32_REGS_SIZE, 56
.data
.set Lm16Size, ASM_PFX(InternalAsmThunk16) - ASM_PFX(m16Start)
ASM_PFX(m16Size): .word Lm16Size
.set LmThunk16Attr, L_ThunkAttr - ASM_PFX(m16Start)
@@ -85,7 +85,7 @@ ASM_PFX(BackFromUserCode):
.byte 0xe # push cs
.byte 0x66
call L_Base # push eip
L_Base:
L_Base:
.byte 0x66
pushq $0 # reserved high order 32 bits of EFlags
.byte 0x66, 0x9c # pushfd actually
@@ -102,13 +102,13 @@ L_ThunkAttr: .space 4
movl $0x15cd2401,%eax # mov ax, 2401h & int 15h
cli # disable interrupts
jnc L_2
L_1:
L_1:
testb $THUNK_ATTRIBUTE_DISABLE_A20_MASK_KBD_CTRL, %dl
jz L_2
inb $0x92,%al
orb $2,%al
outb %al, $0x92 # deactivate A20M#
L_2:
L_2:
xorw %ax, %ax # xor eax, eax
movl %ss, %eax # mov ax, ss
lea IA32_REGS_SIZE(%esp), %bp
@@ -180,13 +180,13 @@ ASM_PFX(ToUserCode):
movw %bx,%sp # set up 16-bit stack pointer
.byte 0x66 # make the following call 32-bit
call L_Base1 # push eip
L_Base1:
L_Base1:
popw %bp # ebp <- address of L_Base1
pushq (IA32_REGS_SIZE + 2)(%esp)
lea 0x0c(%rsi), %eax
pushq %rax
lret # execution begins at next instruction
L_RealMode:
L_RealMode:
.byte 0x66,0x2e # CS and operand size override
lidt (_16Idtr - L_Base1)(%rsi)
.byte 0x66,0x61 # popad
@@ -243,7 +243,7 @@ ASM_PFX(InternalAsmThunk16):
pushq %rbx
pushq %rsi
pushq %rdi
movl %ds, %ebx
pushq %rbx # Save ds segment register on the stack
movl %es, %ebx
@@ -257,7 +257,7 @@ ASM_PFX(InternalAsmThunk16):
movzwl _SS(%rsi), %r8d
movl _ESP(%rsi), %edi
lea -(IA32_REGS_SIZE + 4)(%edi), %rdi
imul $16, %r8d, %eax
imul $16, %r8d, %eax
movl %edi,%ebx # ebx <- stack for 16-bit code
pushq $(IA32_REGS_SIZE / 4)
addl %eax,%edi # edi <- linear address of 16-bit stack
@@ -268,26 +268,26 @@ ASM_PFX(InternalAsmThunk16):
movl %edx,%eax # eax <- transition code address
andl $0xf,%edx
shll $12,%eax # segment address in high order 16 bits
.set LBackFromUserCodeDelta, ASM_PFX(BackFromUserCode) - ASM_PFX(m16Start)
.set LBackFromUserCodeDelta, ASM_PFX(BackFromUserCode) - ASM_PFX(m16Start)
lea (LBackFromUserCodeDelta)(%rdx), %ax
stosl # [edi] <- return address of user code
sgdt 0x60(%rsp) # save GDT stack in argument space
movzwq 0x60(%rsp), %r10 # r10 <- GDT limit
lea ((ASM_PFX(InternalAsmThunk16) - L_SavedCr4) + 0xf)(%rcx), %r11
andq $0xfffffffffffffff0, %r11 # r11 <- 16-byte aligned shadowed GDT table in real mode buffer
movzwq 0x60(%rsp), %r10 # r10 <- GDT limit
lea ((ASM_PFX(InternalAsmThunk16) - L_SavedCr4) + 0xf)(%rcx), %r11
andq $0xfffffffffffffff0, %r11 # r11 <- 16-byte aligned shadowed GDT table in real mode buffer
movw %r10w, (SavedGdt - L_SavedCr4)(%rcx) # save the limit of shadowed GDT table
movq %r11, (SavedGdt - L_SavedCr4 + 0x2)(%rcx) # save the base address of shadowed GDT table
movq 0x62(%rsp) ,%rsi # rsi <- the original GDT base address
xchg %r10, %rcx # save rcx to r10 and initialize rcx to be the limit of GDT table
xchg %r10, %rcx # save rcx to r10 and initialize rcx to be the limit of GDT table
incq %rcx # rcx <- the size of memory to copy
xchg %r11, %rdi # save rdi to r11 and initialize rdi to the base address of shadowed GDT table
rep
movsb # perform memory copy to shadow GDT table
movq %r10, %rcx # restore the orignal rcx before memory copy
movq %r11, %rdi # restore the original rdi before memory copy
sidt 0x50(%rsp)
movq %cr0, %rax
.set LSavedCrDelta, L_SavedCr0 - L_SavedCr4
@@ -311,21 +311,21 @@ ASM_PFX(InternalAsmThunk16):
.byte 0xff, 0x69 # jmp (_EntryPoint - L_SavedCr4)(%rcx)
.set Ltemp1, _EntryPoint - L_SavedCr4
.byte Ltemp1
L_RetFromRealMode:
L_RetFromRealMode:
popfq
lgdt 0x60(%rsp) # restore protected mode GDTR
lidt 0x50(%rsp) # restore protected mode IDTR
lea -IA32_REGS_SIZE(%rbp), %eax
.byte 0x0f, 0xa9 # pop gs
.byte 0x0f, 0xa1 # pop fs
popq %rbx
movl %ebx, %ss
popq %rbx
movl %ebx, %es
popq %rbx
movl %ebx, %ds
popq %rdi
popq %rsi
popq %rbx

View File

@@ -3,7 +3,7 @@
;------------------------------------------------------------------------------
;
; Copyright (c) 2006 - 2013, Intel Corporation. All rights reserved.<BR>
; Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
; This program and the accompanying materials
; are licensed and made available under the terms and conditions of the BSD License
; which accompanies this distribution. The full text of the license may be found at
@@ -240,14 +240,14 @@ BITS 64
push rbx
push rsi
push rdi
mov ebx, ds
push rbx ; Save ds segment register on the stack
mov ebx, es
push rbx ; Save es segment register on the stack
mov ebx, ss
push rbx ; Save ss segment register on the stack
push fs
push gs
mov rsi, rcx
@@ -266,15 +266,15 @@ BITS 64
shl eax, 12 ; segment address in high order 16 bits
lea ax, [rdx + (_BackFromUserCode - ASM_PFX(m16Start))] ; offset address
stosd ; [edi] <- return address of user code
sgdt [rsp + 60h] ; save GDT stack in argument space
movzx r10, word [rsp + 60h] ; r10 <- GDT limit
movzx r10, word [rsp + 60h] ; r10 <- GDT limit
lea r11, [rcx + (ASM_PFX(InternalAsmThunk16) - _BackFromUserCode.SavedCr4End) + 0xf]
and r11, ~0xf ; r11 <- 16-byte aligned shadowed GDT table in real mode buffer
mov [rcx + (SavedGdt - _BackFromUserCode.SavedCr4End)], r10w ; save the limit of shadowed GDT table
mov [rcx + (SavedGdt - _BackFromUserCode.SavedCr4End) + 2], r11 ; save the base address of shadowed GDT table
mov rsi, [rsp + 62h] ; rsi <- the original GDT base address
xchg rcx, r10 ; save rcx to r10 and initialize rcx to be the limit of GDT table
inc rcx ; rcx <- the size of memory to copy
@@ -282,7 +282,7 @@ BITS 64
rep movsb ; perform memory copy to shadow GDT table
mov rcx, r10 ; restore the orignal rcx before memory copy
mov rdi, r11 ; restore the original rdi before memory copy
sidt [rsp + 50h] ; save IDT stack in argument space
mov rax, cr0
mov [rcx + (_BackFromUserCode.SavedCr0End - 4 - _BackFromUserCode.SavedCr4End)], eax