BaseMemoryLibSse2: Take advantage of write combining buffers
The current SSE2 implementation of the ZeroMem(), SetMem(), SetMem16(), SetMem32 and SetMem64 functions is writing 16 bytes per 16 bytes. It hurts the performances so bad that this is even slower than a simple 'rep stos' (4% slower) in regular DRAM. To take full advantages of the 'movntdq' instruction it is better to "queue" a total of 64 bytes in the write combining buffers. This patch implement such a change. Below is a table where I measured (with 'rdtsc') the time to write an entire 100MB RAM buffer. These functions operate almost two times faster. | Function | Arch | Untouched | 64 bytes | Result | |----------+------+-----------+----------+--------| | ZeroMem | Ia32 | 17765947 | 9136062 | 1.945x | | ZeroMem | X64 | 17525170 | 9233391 | 1.898x | | SetMem | Ia32 | 17522291 | 9137272 | 1.918x | | SetMem | X64 | 17949261 | 9176978 | 1.956x | | SetMem16 | Ia32 | 18219673 | 9372062 | 1.944x | | SetMem16 | X64 | 17523331 | 9275184 | 1.889x | | SetMem32 | Ia32 | 18495036 | 9273053 | 1.994x | | SetMem32 | X64 | 17368864 | 9285885 | 1.870x | | SetMem64 | Ia32 | 18564473 | 9241362 | 2.009x | | SetMem64 | X64 | 17506951 | 9280148 | 1.886x | Signed-off-by: Jeremy Compostella <jeremy.compostella@intel.com> Reviewed-by: Liming Gao <gaoliming@byosoft.com.cn>
This commit is contained in:
committed by
mergify[bot]
parent
19c87b7d44
commit
d25fd8710d
@@ -42,8 +42,8 @@ ASM_PFX(InternalMemSetMem):
|
||||
rep stosb
|
||||
.0:
|
||||
mov rcx, rdx
|
||||
and rdx, 15
|
||||
shr rcx, 4
|
||||
and rdx, 63
|
||||
shr rcx, 6
|
||||
jz @SetBytes
|
||||
mov ah, al ; ax <- Value repeats twice
|
||||
movdqa [rsp + 0x10], xmm0 ; save xmm0
|
||||
@@ -52,7 +52,10 @@ ASM_PFX(InternalMemSetMem):
|
||||
movlhps xmm0, xmm0 ; xmm0 <- Value repeats 16 times
|
||||
.1:
|
||||
movntdq [rdi], xmm0 ; rdi should be 16-byte aligned
|
||||
add rdi, 16
|
||||
movntdq [rdi + 16], xmm0
|
||||
movntdq [rdi + 32], xmm0
|
||||
movntdq [rdi + 48], xmm0
|
||||
add rdi, 64
|
||||
loop .1
|
||||
mfence
|
||||
movdqa xmm0, [rsp + 0x10] ; restore xmm0
|
||||
|
@@ -33,7 +33,7 @@ ASM_PFX(InternalMemSetMem16):
|
||||
mov r9, rdi
|
||||
xor rcx, rcx
|
||||
sub rcx, rdi
|
||||
and rcx, 15
|
||||
and rcx, 63
|
||||
mov rax, r8
|
||||
jz .0
|
||||
shr rcx, 1
|
||||
@@ -43,15 +43,18 @@ ASM_PFX(InternalMemSetMem16):
|
||||
rep stosw
|
||||
.0:
|
||||
mov rcx, rdx
|
||||
and edx, 7
|
||||
shr rcx, 3
|
||||
and edx, 31
|
||||
shr rcx, 5
|
||||
jz @SetWords
|
||||
movd xmm0, eax
|
||||
pshuflw xmm0, xmm0, 0
|
||||
movlhps xmm0, xmm0
|
||||
.1:
|
||||
movntdq [rdi], xmm0
|
||||
add rdi, 16
|
||||
movntdq [rdi + 16], xmm0
|
||||
movntdq [rdi + 32], xmm0
|
||||
movntdq [rdi + 48], xmm0
|
||||
add rdi, 64
|
||||
loop .1
|
||||
mfence
|
||||
@SetWords:
|
||||
|
@@ -43,14 +43,17 @@ ASM_PFX(InternalMemSetMem32):
|
||||
rep stosd
|
||||
.0:
|
||||
mov rcx, rdx
|
||||
and edx, 3
|
||||
shr rcx, 2
|
||||
and edx, 15
|
||||
shr rcx, 4
|
||||
jz @SetDwords
|
||||
movd xmm0, eax
|
||||
pshufd xmm0, xmm0, 0
|
||||
.1:
|
||||
movntdq [rdi], xmm0
|
||||
add rdi, 16
|
||||
movntdq [rdi + 16], xmm0
|
||||
movntdq [rdi + 32], xmm0
|
||||
movntdq [rdi + 48], xmm0
|
||||
add rdi, 64
|
||||
loop .1
|
||||
mfence
|
||||
@SetDwords:
|
||||
|
@@ -37,17 +37,28 @@ ASM_PFX(InternalMemSetMem64):
|
||||
add rdx, 8
|
||||
dec rcx
|
||||
.0:
|
||||
shr rcx, 1
|
||||
push rbx
|
||||
mov rbx, rcx
|
||||
and rbx, 7
|
||||
shr rcx, 3
|
||||
jz @SetQwords
|
||||
movlhps xmm0, xmm0
|
||||
.1:
|
||||
movntdq [rdx], xmm0
|
||||
lea rdx, [rdx + 16]
|
||||
movntdq [rdx + 16], xmm0
|
||||
movntdq [rdx + 32], xmm0
|
||||
movntdq [rdx + 48], xmm0
|
||||
lea rdx, [rdx + 64]
|
||||
loop .1
|
||||
mfence
|
||||
@SetQwords:
|
||||
jnc .2
|
||||
mov [rdx], r8
|
||||
push rdi
|
||||
mov rcx, rbx
|
||||
mov rax, r8
|
||||
mov rdi, rdx
|
||||
rep stosq
|
||||
pop rdi
|
||||
.2:
|
||||
pop rbx
|
||||
ret
|
||||
|
||||
|
@@ -32,7 +32,7 @@ ASM_PFX(InternalMemZeroMem):
|
||||
xor rcx, rcx
|
||||
xor eax, eax
|
||||
sub rcx, rdi
|
||||
and rcx, 15
|
||||
and rcx, 63
|
||||
mov r8, rdi
|
||||
jz .0
|
||||
cmp rcx, rdx
|
||||
@@ -41,13 +41,16 @@ ASM_PFX(InternalMemZeroMem):
|
||||
rep stosb
|
||||
.0:
|
||||
mov rcx, rdx
|
||||
and edx, 15
|
||||
shr rcx, 4
|
||||
and edx, 63
|
||||
shr rcx, 6
|
||||
jz @ZeroBytes
|
||||
pxor xmm0, xmm0
|
||||
.1:
|
||||
movntdq [rdi], xmm0 ; rdi should be 16-byte aligned
|
||||
add rdi, 16
|
||||
movntdq [rdi], xmm0
|
||||
movntdq [rdi + 16], xmm0
|
||||
movntdq [rdi + 32], xmm0
|
||||
movntdq [rdi + 48], xmm0
|
||||
add rdi, 64
|
||||
loop .1
|
||||
mfence
|
||||
@ZeroBytes:
|
||||
|
Reference in New Issue
Block a user