Lines 1-279
Link Here
|
1 |
--- cipher/crc-intel-pclmul.c.orig 2016-04-07 15:30:08 UTC |
|
|
2 |
+++ cipher/crc-intel-pclmul.c |
3 |
@@ -143,7 +143,7 @@ crc32_reflected_bulk (u32 *pcrc, const b |
4 |
[inbuf_2] "m" (inbuf[2 * 16]), |
5 |
[inbuf_3] "m" (inbuf[3 * 16]), |
6 |
[crc] "m" (*pcrc) |
7 |
- : ); |
8 |
+ ); |
9 |
|
10 |
inbuf += 4 * 16; |
11 |
inlen -= 4 * 16; |
12 |
@@ -151,7 +151,7 @@ crc32_reflected_bulk (u32 *pcrc, const b |
13 |
asm volatile ("movdqa %[k1k2], %%xmm4\n\t" |
14 |
: |
15 |
: [k1k2] "m" (consts->k[1 - 1]) |
16 |
- : ); |
17 |
+ ); |
18 |
|
19 |
/* Fold by 4. */ |
20 |
while (inlen >= 4 * 16) |
21 |
@@ -188,7 +188,7 @@ crc32_reflected_bulk (u32 *pcrc, const b |
22 |
[inbuf_1] "m" (inbuf[1 * 16]), |
23 |
[inbuf_2] "m" (inbuf[2 * 16]), |
24 |
[inbuf_3] "m" (inbuf[3 * 16]) |
25 |
- : ); |
26 |
+ ); |
27 |
|
28 |
inbuf += 4 * 16; |
29 |
inlen -= 4 * 16; |
30 |
@@ -199,7 +199,7 @@ crc32_reflected_bulk (u32 *pcrc, const b |
31 |
: |
32 |
: [k3k4] "m" (consts->k[3 - 1]), |
33 |
[my_p] "m" (consts->my_p[0]) |
34 |
- : ); |
35 |
+ ); |
36 |
|
37 |
/* Fold 4 to 1. */ |
38 |
|
39 |
@@ -222,7 +222,7 @@ crc32_reflected_bulk (u32 *pcrc, const b |
40 |
"pxor %%xmm4, %%xmm0\n\t" |
41 |
: |
42 |
: |
43 |
- : ); |
44 |
+ ); |
45 |
} |
46 |
else |
47 |
{ |
48 |
@@ -236,7 +236,7 @@ crc32_reflected_bulk (u32 *pcrc, const b |
49 |
[crc] "m" (*pcrc), |
50 |
[k3k4] "m" (consts->k[3 - 1]), |
51 |
[my_p] "m" (consts->my_p[0]) |
52 |
- : ); |
53 |
+ ); |
54 |
|
55 |
inbuf += 16; |
56 |
inlen -= 16; |
57 |
@@ -256,7 +256,7 @@ crc32_reflected_bulk (u32 *pcrc, const b |
58 |
"pxor %%xmm1, %%xmm0\n\t" |
59 |
: |
60 |
: [inbuf] "m" (*inbuf) |
61 |
- : ); |
62 |
+ ); |
63 |
|
64 |
inbuf += 16; |
65 |
inlen -= 16; |
66 |
@@ -288,7 +288,7 @@ crc32_reflected_bulk (u32 *pcrc, const b |
67 |
[mask] "m" (crc32_partial_fold_input_mask[inlen]), |
68 |
[shl_shuf] "m" (crc32_refl_shuf_shift[inlen]), |
69 |
[shr_shuf] "m" (crc32_refl_shuf_shift[inlen + 16]) |
70 |
- : ); |
71 |
+ ); |
72 |
|
73 |
inbuf += inlen; |
74 |
inlen -= inlen; |
75 |
@@ -318,7 +318,7 @@ crc32_reflected_bulk (u32 *pcrc, const b |
76 |
"pextrd $2, %%xmm0, %[out]\n\t" |
77 |
: [out] "=m" (*pcrc) |
78 |
: [k5] "m" (consts->k[5 - 1]) |
79 |
- : ); |
80 |
+ ); |
81 |
} |
82 |
|
83 |
static inline void |
84 |
@@ -333,7 +333,7 @@ crc32_reflected_less_than_16 (u32 *pcrc, |
85 |
asm volatile ("movdqa %[my_p], %%xmm5\n\t" |
86 |
: |
87 |
: [my_p] "m" (consts->my_p[0]) |
88 |
- : ); |
89 |
+ ); |
90 |
|
91 |
if (inlen == 1) |
92 |
{ |
93 |
@@ -372,7 +372,7 @@ crc32_reflected_less_than_16 (u32 *pcrc, |
94 |
: [out] "=m" (*pcrc) |
95 |
: [in] "rm" (data), |
96 |
[crc] "rm" (crc) |
97 |
- : ); |
98 |
+ ); |
99 |
} |
100 |
else if (inlen == 4) |
101 |
{ |
102 |
@@ -391,7 +391,7 @@ crc32_reflected_less_than_16 (u32 *pcrc, |
103 |
: [in] "m" (*inbuf), |
104 |
[crc] "m" (*pcrc), |
105 |
[my_p] "m" (consts->my_p[0]) |
106 |
- : ); |
107 |
+ ); |
108 |
} |
109 |
else |
110 |
{ |
111 |
@@ -404,14 +404,14 @@ crc32_reflected_less_than_16 (u32 *pcrc, |
112 |
[crc] "m" (*pcrc), |
113 |
[my_p] "m" (consts->my_p[0]), |
114 |
[k3k4] "m" (consts->k[3 - 1]) |
115 |
- : ); |
116 |
+ ); |
117 |
|
118 |
if (inlen >= 8) |
119 |
{ |
120 |
asm volatile ("movq %[inbuf], %%xmm0\n\t" |
121 |
: |
122 |
: [inbuf] "m" (*inbuf) |
123 |
- : ); |
124 |
+ ); |
125 |
if (inlen > 8) |
126 |
{ |
127 |
asm volatile (/*"pinsrq $1, %[inbuf_tail], %%xmm0\n\t"*/ |
128 |
@@ -422,7 +422,7 @@ crc32_reflected_less_than_16 (u32 *pcrc, |
129 |
: [inbuf_tail] "m" (inbuf[inlen - 8]), |
130 |
[merge_shuf] "m" |
131 |
(*crc32_merge9to15_shuf[inlen - 9]) |
132 |
- : ); |
133 |
+ ); |
134 |
} |
135 |
} |
136 |
else |
137 |
@@ -435,7 +435,7 @@ crc32_reflected_less_than_16 (u32 *pcrc, |
138 |
[inbuf_tail] "m" (inbuf[inlen - 4]), |
139 |
[merge_shuf] "m" |
140 |
(*crc32_merge5to7_shuf[inlen - 5]) |
141 |
- : ); |
142 |
+ ); |
143 |
} |
144 |
|
145 |
/* Final fold. */ |
146 |
@@ -465,7 +465,7 @@ crc32_reflected_less_than_16 (u32 *pcrc, |
147 |
"pextrd $2, %%xmm0, %[out]\n\t" |
148 |
: [out] "=m" (*pcrc) |
149 |
: [k5] "m" (consts->k[5 - 1]) |
150 |
- : ); |
151 |
+ ); |
152 |
} |
153 |
} |
154 |
|
155 |
@@ -477,7 +477,7 @@ crc32_bulk (u32 *pcrc, const byte *inbuf |
156 |
asm volatile ("movdqa %[bswap], %%xmm7\n\t" |
157 |
: |
158 |
: [bswap] "m" (*crc32_bswap_shuf) |
159 |
- : ); |
160 |
+ ); |
161 |
|
162 |
if (inlen >= 8 * 16) |
163 |
{ |
164 |
@@ -497,7 +497,7 @@ crc32_bulk (u32 *pcrc, const byte *inbuf |
165 |
[inbuf_2] "m" (inbuf[2 * 16]), |
166 |
[inbuf_3] "m" (inbuf[3 * 16]), |
167 |
[crc] "m" (*pcrc) |
168 |
- : ); |
169 |
+ ); |
170 |
|
171 |
inbuf += 4 * 16; |
172 |
inlen -= 4 * 16; |
173 |
@@ -505,7 +505,7 @@ crc32_bulk (u32 *pcrc, const byte *inbuf |
174 |
asm volatile ("movdqa %[k1k2], %%xmm4\n\t" |
175 |
: |
176 |
: [k1k2] "m" (consts->k[1 - 1]) |
177 |
- : ); |
178 |
+ ); |
179 |
|
180 |
/* Fold by 4. */ |
181 |
while (inlen >= 4 * 16) |
182 |
@@ -546,7 +546,7 @@ crc32_bulk (u32 *pcrc, const byte *inbuf |
183 |
[inbuf_1] "m" (inbuf[1 * 16]), |
184 |
[inbuf_2] "m" (inbuf[2 * 16]), |
185 |
[inbuf_3] "m" (inbuf[3 * 16]) |
186 |
- : ); |
187 |
+ ); |
188 |
|
189 |
inbuf += 4 * 16; |
190 |
inlen -= 4 * 16; |
191 |
@@ -557,7 +557,7 @@ crc32_bulk (u32 *pcrc, const byte *inbuf |
192 |
: |
193 |
: [k3k4] "m" (consts->k[3 - 1]), |
194 |
[my_p] "m" (consts->my_p[0]) |
195 |
- : ); |
196 |
+ ); |
197 |
|
198 |
/* Fold 4 to 1. */ |
199 |
|
200 |
@@ -580,7 +580,7 @@ crc32_bulk (u32 *pcrc, const byte *inbuf |
201 |
"pxor %%xmm4, %%xmm0\n\t" |
202 |
: |
203 |
: |
204 |
- : ); |
205 |
+ ); |
206 |
} |
207 |
else |
208 |
{ |
209 |
@@ -595,7 +595,7 @@ crc32_bulk (u32 *pcrc, const byte *inbuf |
210 |
[crc] "m" (*pcrc), |
211 |
[k3k4] "m" (consts->k[3 - 1]), |
212 |
[my_p] "m" (consts->my_p[0]) |
213 |
- : ); |
214 |
+ ); |
215 |
|
216 |
inbuf += 16; |
217 |
inlen -= 16; |
218 |
@@ -616,7 +616,7 @@ crc32_bulk (u32 *pcrc, const byte *inbuf |
219 |
"pxor %%xmm1, %%xmm0\n\t" |
220 |
: |
221 |
: [inbuf] "m" (*inbuf) |
222 |
- : ); |
223 |
+ ); |
224 |
|
225 |
inbuf += 16; |
226 |
inlen -= 16; |
227 |
@@ -650,7 +650,7 @@ crc32_bulk (u32 *pcrc, const byte *inbuf |
228 |
[mask] "m" (crc32_partial_fold_input_mask[inlen]), |
229 |
[shl_shuf] "m" (crc32_refl_shuf_shift[32 - inlen]), |
230 |
[shr_shuf] "m" (crc32_shuf_shift[inlen + 16]) |
231 |
- : ); |
232 |
+ ); |
233 |
|
234 |
inbuf += inlen; |
235 |
inlen -= inlen; |
236 |
@@ -697,7 +697,7 @@ crc32_less_than_16 (u32 *pcrc, const byt |
237 |
asm volatile ("movdqa %[my_p], %%xmm5\n\t" |
238 |
: |
239 |
: [my_p] "m" (consts->my_p[0]) |
240 |
- : ); |
241 |
+ ); |
242 |
|
243 |
if (inlen == 1) |
244 |
{ |
245 |
@@ -774,14 +774,14 @@ crc32_less_than_16 (u32 *pcrc, const byt |
246 |
[crc] "m" (*pcrc), |
247 |
[my_p] "m" (consts->my_p[0]), |
248 |
[k3k4] "m" (consts->k[3 - 1]) |
249 |
- : ); |
250 |
+ ); |
251 |
|
252 |
if (inlen >= 8) |
253 |
{ |
254 |
asm volatile ("movq %[inbuf], %%xmm0\n\t" |
255 |
: |
256 |
: [inbuf] "m" (*inbuf) |
257 |
- : ); |
258 |
+ ); |
259 |
if (inlen > 8) |
260 |
{ |
261 |
asm volatile (/*"pinsrq $1, %[inbuf_tail], %%xmm0\n\t"*/ |
262 |
@@ -792,7 +792,7 @@ crc32_less_than_16 (u32 *pcrc, const byt |
263 |
: [inbuf_tail] "m" (inbuf[inlen - 8]), |
264 |
[merge_shuf] "m" |
265 |
(*crc32_merge9to15_shuf[inlen - 9]) |
266 |
- : ); |
267 |
+ ); |
268 |
} |
269 |
} |
270 |
else |
271 |
@@ -805,7 +805,7 @@ crc32_less_than_16 (u32 *pcrc, const byt |
272 |
[inbuf_tail] "m" (inbuf[inlen - 4]), |
273 |
[merge_shuf] "m" |
274 |
(*crc32_merge5to7_shuf[inlen - 5]) |
275 |
- : ); |
276 |
+ ); |
277 |
} |
278 |
|
279 |
/* Final fold. */ |