1 | /* memset optimized with AVX512 for KNL hardware. |
2 | Copyright (C) 2015-2018 Free Software Foundation, Inc. |
3 | This file is part of the GNU C Library. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <http://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <sysdep.h> |
20 | |
21 | #if IS_IN (libc) |
22 | |
23 | #include "asm-syntax.h" |
24 | #ifndef MEMSET |
25 | # define MEMSET __memset_avx512_no_vzeroupper |
26 | # define MEMSET_CHK __memset_chk_avx512_no_vzeroupper |
27 | #endif |
28 | |
29 | .section .text.avx512,"ax" ,@progbits |
30 | #if defined PIC |
31 | ENTRY (MEMSET_CHK) |
32 | cmpq %rdx, %rcx |
33 | jb HIDDEN_JUMPTARGET (__chk_fail) |
34 | END (MEMSET_CHK) |
35 | #endif |
36 | |
37 | ENTRY (MEMSET) |
38 | vpxor %xmm0, %xmm0, %xmm0 |
39 | vmovd %esi, %xmm1 |
40 | lea (%rdi, %rdx), %rsi |
41 | mov %rdi, %rax |
42 | vpshufb %xmm0, %xmm1, %xmm0 |
43 | cmp $16, %rdx |
44 | jb L(less_16bytes) |
45 | cmp $512, %rdx |
46 | vbroadcastss %xmm0, %zmm2 |
47 | ja L(512bytesormore) |
48 | cmp $256, %rdx |
49 | jb L(less_256bytes) |
50 | vmovups %zmm2, (%rdi) |
51 | vmovups %zmm2, 0x40(%rdi) |
52 | vmovups %zmm2, 0x80(%rdi) |
53 | vmovups %zmm2, 0xC0(%rdi) |
54 | vmovups %zmm2, -0x100(%rsi) |
55 | vmovups %zmm2, -0xC0(%rsi) |
56 | vmovups %zmm2, -0x80(%rsi) |
57 | vmovups %zmm2, -0x40(%rsi) |
58 | ret |
59 | |
60 | L(less_256bytes): |
61 | cmp $128, %dl |
62 | jb L(less_128bytes) |
63 | vmovups %zmm2, (%rdi) |
64 | vmovups %zmm2, 0x40(%rdi) |
65 | vmovups %zmm2, -0x80(%rsi) |
66 | vmovups %zmm2, -0x40(%rsi) |
67 | ret |
68 | |
69 | L(less_128bytes): |
70 | cmp $64, %dl |
71 | jb L(less_64bytes) |
72 | vmovups %zmm2, (%rdi) |
73 | vmovups %zmm2, -0x40(%rsi) |
74 | ret |
75 | |
76 | L(less_64bytes): |
77 | cmp $32, %dl |
78 | jb L(less_32bytes) |
79 | vmovdqu %ymm2, (%rdi) |
80 | vmovdqu %ymm2, -0x20(%rsi) |
81 | ret |
82 | |
83 | L(less_32bytes): |
84 | vmovdqu %xmm0, (%rdi) |
85 | vmovdqu %xmm0, -0x10(%rsi) |
86 | ret |
87 | |
88 | L(less_16bytes): |
89 | cmp $8, %dl |
90 | jb L(less_8bytes) |
91 | vmovq %xmm0, (%rdi) |
92 | vmovq %xmm0, -0x08(%rsi) |
93 | ret |
94 | |
95 | L(less_8bytes): |
96 | vmovd %xmm0, %ecx |
97 | cmp $4, %dl |
98 | jb L(less_4bytes) |
99 | mov %ecx, (%rdi) |
100 | mov %ecx, -0x04(%rsi) |
101 | ret |
102 | |
103 | L(less_4bytes): |
104 | cmp $2, %dl |
105 | jb L(less_2bytes) |
106 | mov %cx, (%rdi) |
107 | mov %cx, -0x02(%rsi) |
108 | ret |
109 | |
110 | L(less_2bytes): |
111 | cmp $1, %dl |
112 | jb L(less_1bytes) |
113 | mov %cl, (%rdi) |
114 | L(less_1bytes): |
115 | ret |
116 | |
117 | L(512bytesormore): |
118 | mov __x86_shared_cache_size_half(%rip), %rcx |
119 | cmp %rcx, %rdx |
120 | ja L(preloop_large) |
121 | cmp $1024, %rdx |
122 | ja L(1024bytesormore) |
123 | |
124 | vmovups %zmm2, (%rdi) |
125 | vmovups %zmm2, 0x40(%rdi) |
126 | vmovups %zmm2, 0x80(%rdi) |
127 | vmovups %zmm2, 0xC0(%rdi) |
128 | vmovups %zmm2, 0x100(%rdi) |
129 | vmovups %zmm2, 0x140(%rdi) |
130 | vmovups %zmm2, 0x180(%rdi) |
131 | vmovups %zmm2, 0x1C0(%rdi) |
132 | vmovups %zmm2, -0x200(%rsi) |
133 | vmovups %zmm2, -0x1C0(%rsi) |
134 | vmovups %zmm2, -0x180(%rsi) |
135 | vmovups %zmm2, -0x140(%rsi) |
136 | vmovups %zmm2, -0x100(%rsi) |
137 | vmovups %zmm2, -0xC0(%rsi) |
138 | vmovups %zmm2, -0x80(%rsi) |
139 | vmovups %zmm2, -0x40(%rsi) |
140 | ret |
141 | |
142 | /* Align on 64 and loop with aligned stores. */ |
143 | L(1024bytesormore): |
144 | sub $0x100, %rsi |
145 | vmovups %zmm2, (%rax) |
146 | and $-0x40, %rdi |
147 | add $0x40, %rdi |
148 | |
149 | L(gobble_256bytes_loop): |
150 | vmovaps %zmm2, (%rdi) |
151 | vmovaps %zmm2, 0x40(%rdi) |
152 | vmovaps %zmm2, 0x80(%rdi) |
153 | vmovaps %zmm2, 0xC0(%rdi) |
154 | add $0x100, %rdi |
155 | cmp %rsi, %rdi |
156 | jb L(gobble_256bytes_loop) |
157 | vmovups %zmm2, (%rsi) |
158 | vmovups %zmm2, 0x40(%rsi) |
159 | vmovups %zmm2, 0x80(%rsi) |
160 | vmovups %zmm2, 0xC0(%rsi) |
161 | ret |
162 | |
163 | /* Align on 128 and loop with non-temporal stores. */ |
164 | L(preloop_large): |
165 | and $-0x80, %rdi |
166 | add $0x80, %rdi |
167 | vmovups %zmm2, (%rax) |
168 | vmovups %zmm2, 0x40(%rax) |
169 | sub $0x200, %rsi |
170 | |
171 | L(gobble_512bytes_nt_loop): |
172 | vmovntdq %zmm2, (%rdi) |
173 | vmovntdq %zmm2, 0x40(%rdi) |
174 | vmovntdq %zmm2, 0x80(%rdi) |
175 | vmovntdq %zmm2, 0xC0(%rdi) |
176 | vmovntdq %zmm2, 0x100(%rdi) |
177 | vmovntdq %zmm2, 0x140(%rdi) |
178 | vmovntdq %zmm2, 0x180(%rdi) |
179 | vmovntdq %zmm2, 0x1C0(%rdi) |
180 | add $0x200, %rdi |
181 | cmp %rsi, %rdi |
182 | jb L(gobble_512bytes_nt_loop) |
183 | sfence |
184 | vmovups %zmm2, (%rsi) |
185 | vmovups %zmm2, 0x40(%rsi) |
186 | vmovups %zmm2, 0x80(%rsi) |
187 | vmovups %zmm2, 0xC0(%rsi) |
188 | vmovups %zmm2, 0x100(%rsi) |
189 | vmovups %zmm2, 0x140(%rsi) |
190 | vmovups %zmm2, 0x180(%rsi) |
191 | vmovups %zmm2, 0x1C0(%rsi) |
192 | ret |
193 | END (MEMSET) |
194 | #endif |
195 | |