This is the mail archive of the
libc-alpha@sourceware.org
mailing list for the glibc project.
[RFC] Improve rawmemchr implementation.
- From: OndÅej BÃlka <neleai at seznam dot cz>
- To: libc-alpha at sourceware dot org
- Date: Mon, 23 Sep 2013 21:37:40 +0200
- Subject: [RFC] Improve rawmemchr implementation.
- Authentication-results: sourceware.org; auth=none
- References: <20130816120314 dot GA25879 at domone dot kolej dot mff dot cuni dot cz> <20130816121256 dot GA26328 at domone dot kolej dot mff dot cuni dot cz> <20130902095530 dot GF11034 at domone dot kolej dot mff dot cuni dot cz>
On Mon, Sep 02, 2013 at 11:55:30AM +0200, OndÅej BÃlka wrote:
> Ping,
>
> I noticed that we use strong alias instead of weak, is that intentional?
>
> strong_alias (rawmemchr, __rawmemchr)
>
Hi,
I used evolver to optimize my rawmemchr, for most architectures a better
scheduling given us extra 2%.
There is catch that on bulldozer a loop is slower than original so if we
want this speedup we need to introduce again ifunc
http://kam.mff.cuni.cz/~ondra/benchmark_string/rawmemchr_profile.html
http://kam.mff.cuni.cz/~ondra/benchmark_string/rawmemchr_profile230913.tar.bz2
diff --git a/sysdeps/x86_64/rawmemchr.S b/sysdeps/x86_64/rawmemchr.S
index f4d5591..29419c9 100644
--- a/sysdeps/x86_64/rawmemchr.S
+++ b/sysdeps/x86_64/rawmemchr.S
@@ -1,7 +1,6 @@
-/* fast SSE2 memchr with 64 byte loop and pmaxub instruction using
+/* fast SSE2 rawmemchr
- Copyright (C) 2011-2013 Free Software Foundation, Inc.
- Contributed by Intel Corporation.
+ Copyright (C) 2013 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -20,186 +19,107 @@
#include <sysdep.h>
+#define ALIGN(n) .p2align n
+
.text
ENTRY (rawmemchr)
- movd %rsi, %xmm1
- mov %rdi, %rcx
-
- punpcklbw %xmm1, %xmm1
+ movd %esi, %xmm1
+ movq %rdi, %rax
+ andl $4095, %eax
punpcklbw %xmm1, %xmm1
-
- and $63, %rcx
+ cmpl $4032, %eax
+ punpcklwd %xmm1, %xmm1
pshufd $0, %xmm1, %xmm1
-
- cmp $48, %rcx
- ja L(crosscache)
-
+ jg L(cross_page)
movdqu (%rdi), %xmm0
pcmpeqb %xmm1, %xmm0
-/* Check if there is a match. */
- pmovmskb %xmm0, %eax
- test %eax, %eax
-
- jnz L(matches)
- add $16, %rdi
- and $-16, %rdi
- jmp L(loop_prolog)
-
- .p2align 4
-L(crosscache):
- and $15, %rcx
- and $-16, %rdi
- movdqa (%rdi), %xmm0
-
- pcmpeqb %xmm1, %xmm0
-/* Check if there is a match. */
pmovmskb %xmm0, %eax
-/* Remove the leading bytes. */
- sar %cl, %eax
test %eax, %eax
- je L(unaligned_no_match)
-/* Check which byte is a match. */
- bsf %eax, %eax
-
- add %rdi, %rax
- add %rcx, %rax
+ je L(next_48_bytes)
+ bsf %ax, %ax
+ addq %rdi, %rax
ret
- .p2align 4
-L(unaligned_no_match):
- add $16, %rdi
-
- .p2align 4
-L(loop_prolog):
- movdqa (%rdi), %xmm0
- pcmpeqb %xmm1, %xmm0
- pmovmskb %xmm0, %eax
- test %eax, %eax
- jnz L(matches)
-
- movdqa 16(%rdi), %xmm2
- pcmpeqb %xmm1, %xmm2
- pmovmskb %xmm2, %eax
- test %eax, %eax
- jnz L(matches16)
-
- movdqa 32(%rdi), %xmm3
+L(next_48_bytes):
+ movdqu 48(%rdi), %xmm4
+ movdqu 32(%rdi), %xmm3
pcmpeqb %xmm1, %xmm3
- pmovmskb %xmm3, %eax
- test %eax, %eax
- jnz L(matches32)
-
- movdqa 48(%rdi), %xmm4
pcmpeqb %xmm1, %xmm4
- add $64, %rdi
- pmovmskb %xmm4, %eax
- test %eax, %eax
- jnz L(matches0)
-
- test $0x3f, %rdi
- jz L(align64_loop)
+ pmovmskb %xmm4, %ecx
+ movdqu 16(%rdi), %xmm2
+ pmovmskb %xmm3, %eax
+ salq $32, %rcx
+ pcmpeqb %xmm1, %xmm2
+ salq $16, %rax
+ pmovmskb %xmm2, %edx
+ orq %rdx, %rax
+ orq %rcx, %rax
+ je L(loop_start)
+ bsfq %rax, %rax
+ lea 16(%rdi, %rax), %rax
+ ret
- movdqa (%rdi), %xmm0
- pcmpeqb %xmm1, %xmm0
- pmovmskb %xmm0, %eax
- test %eax, %eax
- jnz L(matches)
+L(loop_start):
+ andq $-64, %rdi
- movdqa 16(%rdi), %xmm2
+ ALIGN (4)
+L(loop64):
+ movdqa 64(%rdi), %xmm2
+ movdqa 80(%rdi), %xmm3
pcmpeqb %xmm1, %xmm2
- pmovmskb %xmm2, %eax
- test %eax, %eax
- jnz L(matches16)
-
- movdqa 32(%rdi), %xmm3
+ movdqa 112(%rdi), %xmm5
+ movdqa 96(%rdi), %xmm4
+ addq $64, %rdi
+ pcmpeqb %xmm1, %xmm5
+ pmaxub %xmm2, %xmm5
pcmpeqb %xmm1, %xmm3
+ pcmpeqb %xmm1, %xmm4
+ pmaxub %xmm3, %xmm5
+ pmaxub %xmm4, %xmm5
+ pmovmskb %xmm5, %edx
+ testl %edx, %edx
+ je L(loop64)
+ pmovmskb %xmm4, %esi
+ salq $48, %rdx
+ salq $32, %rsi
pmovmskb %xmm3, %eax
- test %eax, %eax
- jnz L(matches32)
+ pmovmskb %xmm2, %ecx
+ salq $16, %rax
+ orq %rsi, %rax
+ orq %rcx, %rax
+ orq %rdx, %rax
+ bsfq %rax, %rax
+ addq %rdi, %rax
+ ret
- movdqa 48(%rdi), %xmm3
+ ALIGN (4)
+L(cross_page):
+ movq %rdi, %rcx
+ andq $-64, %rdi
+ movdqa (%rdi), %xmm2
+ movdqa 16(%rdi), %xmm3
+ movdqa 48(%rdi), %xmm5
+ movdqa 32(%rdi), %xmm4
pcmpeqb %xmm1, %xmm3
- pmovmskb %xmm3, %eax
-
- add $64, %rdi
- test %eax, %eax
- jnz L(matches0)
-
- and $-64, %rdi
-
- .p2align 4
-L(align64_loop):
- movdqa (%rdi), %xmm0
- movdqa 16(%rdi), %xmm2
- movdqa 32(%rdi), %xmm3
- movdqa 48(%rdi), %xmm4
-
- pcmpeqb %xmm1, %xmm0
pcmpeqb %xmm1, %xmm2
- pcmpeqb %xmm1, %xmm3
+ pmovmskb %xmm2, %r8d
pcmpeqb %xmm1, %xmm4
-
- pmaxub %xmm0, %xmm3
- pmaxub %xmm2, %xmm4
- pmaxub %xmm3, %xmm4
- pmovmskb %xmm4, %eax
-
- add $64, %rdi
-
- test %eax, %eax
- jz L(align64_loop)
-
- sub $64, %rdi
-
- pmovmskb %xmm0, %eax
- test %eax, %eax
- jnz L(matches)
-
- pmovmskb %xmm2, %eax
- test %eax, %eax
- jnz L(matches16)
-
- movdqa 32(%rdi), %xmm3
- pcmpeqb %xmm1, %xmm3
-
- pcmpeqb 48(%rdi), %xmm1
+ pcmpeqb %xmm1, %xmm5
+ pmovmskb %xmm5, %edx
+ pmovmskb %xmm4, %esi
pmovmskb %xmm3, %eax
- test %eax, %eax
- jnz L(matches32)
-
- pmovmskb %xmm1, %eax
- bsf %eax, %eax
- lea 48(%rdi, %rax), %rax
- ret
-
- .p2align 4
-L(matches0):
- bsf %eax, %eax
- lea -16(%rax, %rdi), %rax
- ret
-
- .p2align 4
-L(matches):
- bsf %eax, %eax
- add %rdi, %rax
- ret
-
- .p2align 4
-L(matches16):
- bsf %eax, %eax
- lea 16(%rax, %rdi), %rax
- ret
-
- .p2align 4
-L(matches32):
- bsf %eax, %eax
- lea 32(%rax, %rdi), %rax
- ret
-
- .p2align 4
-L(return_null):
- xor %rax, %rax
- ret
+ salq $32, %rsi
+ salq $16, %rax
+ salq $48, %rdx
+ orq %rsi, %rax
+ orq %r8, %rax
+ orq %rdx, %rax
+ shrq %cl, %rax
+ testq %rax, %rax
+ je L(loop64)
+ bsfq %rax, %rax
+ addq %rcx, %rax
+ ret
END (rawmemchr)