1 | /* strlen used for begining of str{n}cat using AVX2. |
2 | Copyright (C) 2011-2023 Free Software Foundation, Inc. |
3 | This file is part of the GNU C Library. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <https://www.gnu.org/licenses/>. */ |
18 | |
19 | |
20 | /* NOTE: This file is meant to be included by strcat-avx2 or |
21 | strncat-avx2 and does not standalone. Before including %rdi |
22 | must be saved in %rax. */ |
23 | |
24 | |
25 | /* Simple strlen implementation that ends at |
26 | L(strcat_strlen_done). */ |
27 | movq %rdi, %r8 |
28 | andq $(VEC_SIZE * -1), %r8 |
29 | VPCMPEQ (%r8), %VZERO, %VMM(0) |
30 | vpmovmskb %VMM(0), %ecx |
31 | shrxl %edi, %ecx, %ecx |
32 | testl %ecx, %ecx |
33 | jnz L(bsf_and_done_v0) |
34 | |
35 | VPCMPEQ VEC_SIZE(%r8), %VZERO, %VMM(0) |
36 | vpmovmskb %VMM(0), %ecx |
37 | leaq (VEC_SIZE)(%r8), %rdi |
38 | testl %ecx, %ecx |
39 | jnz L(bsf_and_done_v0) |
40 | |
41 | VPCMPEQ (VEC_SIZE * 1)(%rdi), %VZERO, %VMM(0) |
42 | vpmovmskb %VMM(0), %ecx |
43 | testl %ecx, %ecx |
44 | jnz L(bsf_and_done_v1) |
45 | |
46 | VPCMPEQ (VEC_SIZE * 2)(%rdi), %VZERO, %VMM(0) |
47 | vpmovmskb %VMM(0), %ecx |
48 | testl %ecx, %ecx |
49 | jnz L(bsf_and_done_v2) |
50 | |
51 | VPCMPEQ (VEC_SIZE * 3)(%rdi), %VZERO, %VMM(0) |
52 | vpmovmskb %VMM(0), %ecx |
53 | testl %ecx, %ecx |
54 | jnz L(bsf_and_done_v3) |
55 | |
56 | orq $(VEC_SIZE * 4 - 1), %rdi |
57 | .p2align 4,, 8 |
58 | L(loop_2x_vec): |
59 | VMOVA (VEC_SIZE * 0 + 1)(%rdi), %VMM(0) |
60 | VPMIN (VEC_SIZE * 1 + 1)(%rdi), %VMM(0), %VMM(1) |
61 | VMOVA (VEC_SIZE * 2 + 1)(%rdi), %VMM(2) |
62 | VPMIN (VEC_SIZE * 3 + 1)(%rdi), %VMM(2), %VMM(3) |
63 | VPMIN %VMM(1), %VMM(3), %VMM(3) |
64 | VPCMPEQ %VMM(3), %VZERO, %VMM(3) |
65 | vpmovmskb %VMM(3), %r8d |
66 | subq $(VEC_SIZE * -4), %rdi |
67 | testl %r8d, %r8d |
68 | jz L(loop_2x_vec) |
69 | |
70 | addq $(VEC_SIZE * -4 + 1), %rdi |
71 | |
72 | VPCMPEQ %VMM(0), %VZERO, %VMM(0) |
73 | vpmovmskb %VMM(0), %ecx |
74 | testl %ecx, %ecx |
75 | jnz L(bsf_and_done_v0) |
76 | |
77 | VPCMPEQ %VMM(1), %VZERO, %VMM(1) |
78 | vpmovmskb %VMM(1), %ecx |
79 | testl %ecx, %ecx |
80 | jnz L(bsf_and_done_v1) |
81 | |
82 | VPCMPEQ %VMM(2), %VZERO, %VMM(2) |
83 | vpmovmskb %VMM(2), %ecx |
84 | testl %ecx, %ecx |
85 | jnz L(bsf_and_done_v2) |
86 | |
87 | movl %r8d, %ecx |
88 | L(bsf_and_done_v3): |
89 | addq $VEC_SIZE, %rdi |
90 | L(bsf_and_done_v2): |
91 | bsfl %ecx, %ecx |
92 | leaq (VEC_SIZE * 2)(%rdi, %rcx), %rdi |
93 | jmp L(strcat_strlen_done) |
94 | |
95 | .p2align 4,, 4 |
96 | L(bsf_and_done_v1): |
97 | addq $VEC_SIZE, %rdi |
98 | L(bsf_and_done_v0): |
99 | bsfl %ecx, %ecx |
100 | addq %rcx, %rdi |
101 | L(strcat_strlen_done): |
102 | |