The current SSE2 implementation of the ZeroMem(), SetMem(), SetMem16(), SetMem32 and SetMem64 functions is writing 16 bytes per 16 bytes. It hurts the performances so bad that this is even slower than a simple 'rep stos' (4% slower) in regular DRAM. To take full advantages of the 'movntdq' instruction it is better to "queue" a total of 64 bytes in the write combining buffers. This patch implement such a change. Below is a table where I measured (with 'rdtsc') the time to write an entire 100MB RAM buffer. These functions operate almost two times faster. | Function | Arch | Untouched | 64 bytes | Result | |----------+------+-----------+----------+--------| | ZeroMem | Ia32 | 17765947 | 9136062 | 1.945x | | ZeroMem | X64 | 17525170 | 9233391 | 1.898x | | SetMem | Ia32 | 17522291 | 9137272 | 1.918x | | SetMem | X64 | 17949261 | 9176978 | 1.956x | | SetMem16 | Ia32 | 18219673 | 9372062 | 1.944x | | SetMem16 | X64 | 17523331 | 9275184 | 1.889x | | SetMem32 | Ia32 | 18495036 | 9273053 | 1.994x | | SetMem32 | X64 | 17368864 | 9285885 | 1.870x | | SetMem64 | Ia32 | 18564473 | 9241362 | 2.009x | | SetMem64 | X64 | 17506951 | 9280148 | 1.886x | Signed-off-by: Jeremy Compostella <jeremy.compostella@intel.com> Reviewed-by: Liming Gao <gaoliming@byosoft.com.cn>
71 lines
2.1 KiB
NASM
71 lines
2.1 KiB
NASM
;------------------------------------------------------------------------------
|
|
;
|
|
; Copyright (c) 2006, Intel Corporation. All rights reserved.<BR>
|
|
; SPDX-License-Identifier: BSD-2-Clause-Patent
|
|
;
|
|
; Module Name:
|
|
;
|
|
; SetMem.nasm
|
|
;
|
|
; Abstract:
|
|
;
|
|
; SetMem function
|
|
;
|
|
; Notes:
|
|
;
|
|
;------------------------------------------------------------------------------
|
|
|
|
SECTION .text
|
|
|
|
;------------------------------------------------------------------------------
|
|
; VOID *
|
|
; EFIAPI
|
|
; InternalMemSetMem (
|
|
; IN VOID *Buffer,
|
|
; IN UINTN Count,
|
|
; IN UINT8 Value
|
|
; );
|
|
;------------------------------------------------------------------------------
|
|
global ASM_PFX(InternalMemSetMem)
|
|
ASM_PFX(InternalMemSetMem):
|
|
push edi
|
|
mov edx, [esp + 12] ; edx <- Count
|
|
mov edi, [esp + 8] ; edi <- Buffer
|
|
mov al, [esp + 16] ; al <- Value
|
|
xor ecx, ecx
|
|
sub ecx, edi
|
|
and ecx, 63 ; ecx + edi aligns on 16-byte boundary
|
|
jz .0
|
|
cmp ecx, edx
|
|
cmova ecx, edx
|
|
sub edx, ecx
|
|
rep stosb
|
|
.0:
|
|
mov ecx, edx
|
|
and edx, 63
|
|
shr ecx, 6 ; ecx <- # of DQwords to set
|
|
jz @SetBytes
|
|
mov ah, al ; ax <- Value | (Value << 8)
|
|
add esp, -16
|
|
movdqu [esp], xmm0 ; save xmm0
|
|
movd xmm0, eax
|
|
pshuflw xmm0, xmm0, 0 ; xmm0[0..63] <- Value repeats 8 times
|
|
movlhps xmm0, xmm0 ; xmm0 <- Value repeats 16 times
|
|
.1:
|
|
movntdq [edi], xmm0 ; edi should be 16-byte aligned
|
|
movntdq [edi + 16], xmm0
|
|
movntdq [edi + 32], xmm0
|
|
movntdq [edi + 48], xmm0
|
|
add edi, 64
|
|
loop .1
|
|
mfence
|
|
movdqu xmm0, [esp] ; restore xmm0
|
|
add esp, 16 ; stack cleanup
|
|
@SetBytes:
|
|
mov ecx, edx
|
|
rep stosb
|
|
mov eax, [esp + 8] ; eax <- Buffer as return value
|
|
pop edi
|
|
ret
|
|
|