The current SSE2 implementation of the ZeroMem(), SetMem(), SetMem16(), SetMem32 and SetMem64 functions is writing 16 bytes per 16 bytes. It hurts the performances so bad that this is even slower than a simple 'rep stos' (4% slower) in regular DRAM. To take full advantages of the 'movntdq' instruction it is better to "queue" a total of 64 bytes in the write combining buffers. This patch implement such a change. Below is a table where I measured (with 'rdtsc') the time to write an entire 100MB RAM buffer. These functions operate almost two times faster. | Function | Arch | Untouched | 64 bytes | Result | |----------+------+-----------+----------+--------| | ZeroMem | Ia32 | 17765947 | 9136062 | 1.945x | | ZeroMem | X64 | 17525170 | 9233391 | 1.898x | | SetMem | Ia32 | 17522291 | 9137272 | 1.918x | | SetMem | X64 | 17949261 | 9176978 | 1.956x | | SetMem16 | Ia32 | 18219673 | 9372062 | 1.944x | | SetMem16 | X64 | 17523331 | 9275184 | 1.889x | | SetMem32 | Ia32 | 18495036 | 9273053 | 1.994x | | SetMem32 | X64 | 17368864 | 9285885 | 1.870x | | SetMem64 | Ia32 | 18564473 | 9241362 | 2.009x | | SetMem64 | X64 | 17506951 | 9280148 | 1.886x | Signed-off-by: Jeremy Compostella <jeremy.compostella@intel.com> Reviewed-by: Liming Gao <gaoliming@byosoft.com.cn>
69 lines
2.1 KiB
NASM
69 lines
2.1 KiB
NASM
;------------------------------------------------------------------------------
|
|
;
|
|
; Copyright (c) 2006, Intel Corporation. All rights reserved.<BR>
|
|
; SPDX-License-Identifier: BSD-2-Clause-Patent
|
|
;
|
|
; Module Name:
|
|
;
|
|
; SetMem.nasm
|
|
;
|
|
; Abstract:
|
|
;
|
|
; SetMem function
|
|
;
|
|
; Notes:
|
|
;
|
|
;------------------------------------------------------------------------------
|
|
|
|
DEFAULT REL
|
|
SECTION .text
|
|
|
|
;------------------------------------------------------------------------------
|
|
; VOID *
|
|
; InternalMemSetMem (
|
|
; IN VOID *Buffer,
|
|
; IN UINTN Count,
|
|
; IN UINT8 Value
|
|
; )
|
|
;------------------------------------------------------------------------------
|
|
global ASM_PFX(InternalMemSetMem)
|
|
ASM_PFX(InternalMemSetMem):
|
|
push rdi
|
|
mov rdi, rcx ; rdi <- Buffer
|
|
mov al, r8b ; al <- Value
|
|
mov r9, rdi ; r9 <- Buffer as return value
|
|
xor rcx, rcx
|
|
sub rcx, rdi
|
|
and rcx, 15 ; rcx + rdi aligns on 16-byte boundary
|
|
jz .0
|
|
cmp rcx, rdx
|
|
cmova rcx, rdx
|
|
sub rdx, rcx
|
|
rep stosb
|
|
.0:
|
|
mov rcx, rdx
|
|
and rdx, 63
|
|
shr rcx, 6
|
|
jz @SetBytes
|
|
mov ah, al ; ax <- Value repeats twice
|
|
movdqa [rsp + 0x10], xmm0 ; save xmm0
|
|
movd xmm0, eax ; xmm0[0..16] <- Value repeats twice
|
|
pshuflw xmm0, xmm0, 0 ; xmm0[0..63] <- Value repeats 8 times
|
|
movlhps xmm0, xmm0 ; xmm0 <- Value repeats 16 times
|
|
.1:
|
|
movntdq [rdi], xmm0 ; rdi should be 16-byte aligned
|
|
movntdq [rdi + 16], xmm0
|
|
movntdq [rdi + 32], xmm0
|
|
movntdq [rdi + 48], xmm0
|
|
add rdi, 64
|
|
loop .1
|
|
mfence
|
|
movdqa xmm0, [rsp + 0x10] ; restore xmm0
|
|
@SetBytes:
|
|
mov ecx, edx ; high 32 bits of rcx are always zero
|
|
rep stosb
|
|
mov rax, r9 ; rax <- Return value
|
|
pop rdi
|
|
ret
|
|
|