The current SSE2 implementation of the ZeroMem(), SetMem(), SetMem16(), SetMem32 and SetMem64 functions is writing 16 bytes per 16 bytes. It hurts the performances so bad that this is even slower than a simple 'rep stos' (4% slower) in regular DRAM. To take full advantages of the 'movntdq' instruction it is better to "queue" a total of 64 bytes in the write combining buffers. This patch implement such a change. Below is a table where I measured (with 'rdtsc') the time to write an entire 100MB RAM buffer. These functions operate almost two times faster. | Function | Arch | Untouched | 64 bytes | Result | |----------+------+-----------+----------+--------| | ZeroMem | Ia32 | 17765947 | 9136062 | 1.945x | | ZeroMem | X64 | 17525170 | 9233391 | 1.898x | | SetMem | Ia32 | 17522291 | 9137272 | 1.918x | | SetMem | X64 | 17949261 | 9176978 | 1.956x | | SetMem16 | Ia32 | 18219673 | 9372062 | 1.944x | | SetMem16 | X64 | 17523331 | 9275184 | 1.889x | | SetMem32 | Ia32 | 18495036 | 9273053 | 1.994x | | SetMem32 | X64 | 17368864 | 9285885 | 1.870x | | SetMem64 | Ia32 | 18564473 | 9241362 | 2.009x | | SetMem64 | X64 | 17506951 | 9280148 | 1.886x | Signed-off-by: Jeremy Compostella <jeremy.compostella@intel.com> Reviewed-by: Liming Gao <gaoliming@byosoft.com.cn>
67 lines
1.5 KiB
NASM
67 lines
1.5 KiB
NASM
;------------------------------------------------------------------------------
|
|
;
|
|
; Copyright (c) 2006, Intel Corporation. All rights reserved.<BR>
|
|
; SPDX-License-Identifier: BSD-2-Clause-Patent
|
|
;
|
|
; Module Name:
|
|
;
|
|
; SetMem64.nasm
|
|
;
|
|
; Abstract:
|
|
;
|
|
; SetMem64 function
|
|
;
|
|
; Notes:
|
|
;
|
|
;------------------------------------------------------------------------------
|
|
|
|
SECTION .text
|
|
|
|
;------------------------------------------------------------------------------
|
|
; VOID *
|
|
; EFIAPI
|
|
; InternalMemSetMem64 (
|
|
; IN VOID *Buffer,
|
|
; IN UINTN Count,
|
|
; IN UINT64 Value
|
|
; )
|
|
;------------------------------------------------------------------------------
|
|
global ASM_PFX(InternalMemSetMem64)
|
|
ASM_PFX(InternalMemSetMem64):
|
|
mov eax, [esp + 4] ; eax <- Buffer
|
|
mov ecx, [esp + 8] ; ecx <- Count
|
|
test al, 8
|
|
mov edx, eax
|
|
movq xmm0, qword [esp + 12]
|
|
jz .0
|
|
movq qword [edx], xmm0
|
|
add edx, 8
|
|
dec ecx
|
|
.0:
|
|
push ebx
|
|
mov ebx, ecx
|
|
and ebx, 7
|
|
shr ecx, 3
|
|
jz @SetQwords
|
|
movlhps xmm0, xmm0
|
|
.1:
|
|
movntdq [edx], xmm0
|
|
movntdq [edx + 16], xmm0
|
|
movntdq [edx + 32], xmm0
|
|
movntdq [edx + 48], xmm0
|
|
lea edx, [edx + 64]
|
|
loop .1
|
|
mfence
|
|
@SetQwords:
|
|
test ebx, ebx
|
|
jz .3
|
|
mov ecx, ebx
|
|
.2
|
|
movq qword [edx], xmm0
|
|
lea edx, [edx + 8]
|
|
loop .2
|
|
.3:
|
|
pop ebx
|
|
ret
|
|
|