Line 0
Link Here
|
|
|
1 |
/*- |
2 |
* SPDX-License-Identifier: BSD-2-Clause |
3 |
* |
4 |
* Copyright (c) 2014-2018 Netflix Inc. |
5 |
* All rights reserved. |
6 |
* |
7 |
* Redistribution and use in source and binary forms, with or without |
8 |
* modification, are permitted provided that the following conditions |
9 |
* are met: |
10 |
* 1. Redistributions of source code must retain the above copyright |
11 |
* notice, this list of conditions and the following disclaimer. |
12 |
* 2. Redistributions in binary form must reproduce the above copyright |
13 |
* notice, this list of conditions and the following disclaimer in the |
14 |
* documentation and/or other materials provided with the distribution. |
15 |
* |
16 |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
17 |
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
18 |
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
19 |
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
20 |
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
21 |
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
22 |
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
23 |
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
24 |
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
25 |
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
26 |
* SUCH DAMAGE. |
27 |
* |
28 |
* |
29 |
*/ |
30 |
#include <sys/cdefs.h> |
31 |
__FBSDID("$FreeBSD$"); |
32 |
|
33 |
#include <sys/types.h> |
34 |
#include <sys/param.h> |
35 |
#include <sys/kernel.h> |
36 |
#include <sys/ktls.h> |
37 |
#include <sys/lock.h> |
38 |
#include <sys/malloc.h> |
39 |
#include <sys/mutex.h> |
40 |
#include <sys/proc.h> |
41 |
#include <sys/sockbuf.h> |
42 |
#include <sys/filedesc.h> |
43 |
#include <sys/sysctl.h> |
44 |
#include <sys/counter.h> |
45 |
#include <sys/uio.h> |
46 |
#include <sys/module.h> |
47 |
#include <opencrypto/xform.h> |
48 |
#include <machine/fpu.h> |
49 |
|
50 |
#include "aes_gcm.h" |
51 |
|
52 |
#define KTLS_INTELISA_AEAD_TAGLEN 16 |
53 |
|
54 |
struct isa_gcm_struct { |
55 |
struct gcm_key_data key_data; |
56 |
struct gcm_context_data ctx_data; |
57 |
void (*gcm_pre) (const void *key, struct gcm_key_data *); /* Done once per key */ |
58 |
void (*gcm_init) (const struct gcm_key_data *key_data, |
59 |
struct gcm_context_data *context_data, |
60 |
uint8_t *iv, |
61 |
uint8_t const *aad, |
62 |
uint64_t aad_len); /* Done at start of crypt */ |
63 |
void (*gcm_upd) (const struct gcm_key_data *key_data, |
64 |
struct gcm_context_data *context_data, |
65 |
uint8_t *out, |
66 |
const uint8_t *in, |
67 |
uint64_t len); /* With each block of data */ |
68 |
void (*gcm_upd_nt) (const struct gcm_key_data *key_data, |
69 |
struct gcm_context_data *context_data, |
70 |
uint8_t *out, |
71 |
const uint8_t *in, |
72 |
uint64_t len); /* With each block of data */ |
73 |
void (*gcm_final) (const struct gcm_key_data *key_data, |
74 |
struct gcm_context_data *context_data, |
75 |
uint8_t *tag, |
76 |
uint64_t tag_len); /* Pulls out the tag */ |
77 |
}; |
78 |
|
79 |
SYSCTL_DECL(_kern_ipc_tls); |
80 |
|
81 |
static int ktls_use_intel_isa_gcm = 1; |
82 |
SYSCTL_INT(_kern_ipc_tls, OID_AUTO, isa_gcm, CTLFLAG_RW, |
83 |
&ktls_use_intel_isa_gcm, 1, |
84 |
"Should we use the Intel ISA GCM if available"); |
85 |
|
86 |
SYSCTL_DECL(_kern_ipc_tls_stats); |
87 |
|
88 |
static counter_u64_t ktls_offload_isa_aead; |
89 |
SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, isa_aead_crypts, |
90 |
CTLFLAG_RD, &ktls_offload_isa_aead, |
91 |
"Total number of Intel ISA TLS AEAD encrypts called"); |
92 |
|
93 |
static counter_u64_t intelisa_unaligned_mem_b; |
94 |
SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, isa_unaligned_bytes, |
95 |
CTLFLAG_RD, &intelisa_unaligned_mem_b, |
96 |
"Byte cnt of intel isa unaligned"); |
97 |
|
98 |
static counter_u64_t intelisa_aligned_mem_b; |
99 |
SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, isa_aligned_bytes, |
100 |
CTLFLAG_RD, &intelisa_aligned_mem_b, |
101 |
"Byte cnt of intel isa aligned"); |
102 |
|
103 |
static counter_u64_t intelisa_unaligned_mem; |
104 |
SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, isa_unaligned, |
105 |
CTLFLAG_RD, &intelisa_unaligned_mem, |
106 |
"Call cnt of intel isa unaligned"); |
107 |
|
108 |
static counter_u64_t intelisa_aligned_mem; |
109 |
SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, isa_aligned, |
110 |
CTLFLAG_RD, &intelisa_aligned_mem, |
111 |
"Call cnt of intel isa aligned"); |
112 |
|
113 |
static MALLOC_DEFINE(M_INTEL_ISA, "isal_tls", "Intel ISA-L TLS"); |
114 |
|
115 |
static int |
116 |
intel_isa_seal(struct isa_gcm_struct *isa, |
117 |
struct iovec *outiov, int numiovs, |
118 |
uint8_t * nd, int noncelen, |
119 |
struct iovec *iniov, |
120 |
uint8_t * ad, int adlen, |
121 |
uint8_t * tagout, size_t *taglen) |
122 |
{ |
123 |
int i; |
124 |
bool nt = true; |
125 |
bool misaligned_len, misaligned_start; |
126 |
int fixup = 0; |
127 |
uint8_t *in; |
128 |
uint8_t *out; |
129 |
uint64_t len; |
130 |
uint8_t iv[32]; |
131 |
uint8_t const IVend[] = GCM_IV_END_MARK; |
132 |
|
133 |
if (noncelen > 28) { |
134 |
return (-1); |
135 |
} |
136 |
memcpy(iv, nd, noncelen); |
137 |
memcpy(&iv[noncelen], IVend, sizeof(IVend)); |
138 |
isa->gcm_init(&isa->key_data, &isa->ctx_data, iv, ad, (size_t)adlen); |
139 |
for (i = 0; i < numiovs; i++) { |
140 |
in = iniov[i].iov_base; |
141 |
out = outiov[i].iov_base; |
142 |
len = iniov[i].iov_len; |
143 |
|
144 |
misaligned_start = ((uintptr_t)in & 0xf) != 0; |
145 |
misaligned_len = (len & 0xf) != 0; |
146 |
|
147 |
if (misaligned_start || misaligned_len) { |
148 |
/* |
149 |
* Try to do as much of a page using |
150 |
* non-temporals as we possibly can, and leave |
151 |
* a ragged tail as a separate chunk. |
152 |
*/ |
153 |
if (nt && !misaligned_start && len > 0xf) { |
154 |
len = len & ~0xf; |
155 |
fixup = iniov[i].iov_len - len; |
156 |
} else { |
157 |
nt = false; |
158 |
} |
159 |
} |
160 |
fixup_done: |
161 |
if (nt) { |
162 |
isa->gcm_upd_nt(&isa->key_data, &isa->ctx_data, out, in, len); |
163 |
counter_u64_add(intelisa_aligned_mem, 1); |
164 |
counter_u64_add(intelisa_aligned_mem_b, len); |
165 |
} else { |
166 |
isa->gcm_upd(&isa->key_data, &isa->ctx_data, out, in, len); |
167 |
counter_u64_add(intelisa_unaligned_mem, 1); |
168 |
counter_u64_add(intelisa_unaligned_mem_b, len); |
169 |
} |
170 |
if (fixup) { |
171 |
in += len; |
172 |
out += len; |
173 |
len = fixup; |
174 |
fixup = 0; |
175 |
nt = false; |
176 |
goto fixup_done; |
177 |
} |
178 |
} |
179 |
isa->gcm_final(&isa->key_data, &isa->ctx_data, tagout, *taglen); |
180 |
return (0); |
181 |
} |
182 |
|
183 |
static int |
184 |
ktls_intelisa_aead_encrypt(struct ktls_session *tls, |
185 |
const struct tls_record_layer *hdr, uint8_t *trailer, struct iovec *iniov, |
186 |
struct iovec *outiov, int iovcnt, uint64_t seqno) |
187 |
{ |
188 |
struct isa_gcm_struct *isa; |
189 |
struct tls_aead_data ad; |
190 |
struct tls_nonce_data nd; |
191 |
size_t noncelen, adlen, taglen; |
192 |
int ret; |
193 |
uint16_t tls_comp_len; |
194 |
|
195 |
isa = (struct isa_gcm_struct *)tls->cipher; |
196 |
|
197 |
KASSERT(isa != NULL, ("Null cipher")); |
198 |
counter_u64_add(ktls_offload_isa_aead, 1); |
199 |
taglen = KTLS_INTELISA_AEAD_TAGLEN; |
200 |
|
201 |
/* Setup the nonce */ |
202 |
memcpy(nd.fixed, tls->params.iv, TLS_AEAD_GCM_LEN); |
203 |
memcpy(&nd.seq, hdr + 1, sizeof(nd.seq)); |
204 |
noncelen = sizeof(nd); |
205 |
/* Setup the associated data */ |
206 |
tls_comp_len = ntohs(hdr->tls_length) - |
207 |
(KTLS_INTELISA_AEAD_TAGLEN + sizeof(nd.seq)); |
208 |
ad.seq = htobe64(seqno); |
209 |
ad.type = hdr->tls_type; |
210 |
ad.tls_vmajor = hdr->tls_vmajor; |
211 |
ad.tls_vminor = hdr->tls_vminor; |
212 |
ad.tls_length = htons(tls_comp_len); |
213 |
adlen = sizeof(ad); |
214 |
ret = intel_isa_seal(isa, outiov, iovcnt, |
215 |
(uint8_t *) & nd, noncelen, iniov, |
216 |
(uint8_t *) & ad, adlen, trailer, &taglen); |
217 |
|
218 |
return(ret); |
219 |
} |
220 |
|
221 |
|
222 |
static int |
223 |
ktls_intelisa_setup_cipher(struct isa_gcm_struct *isa, uint8_t *key) |
224 |
{ |
225 |
struct fpu_kern_ctx *fpu_ctx; |
226 |
|
227 |
if (key == NULL) { |
228 |
return (EINVAL); |
229 |
} |
230 |
fpu_ctx = fpu_kern_alloc_ctx(FPU_KERN_NOWAIT); |
231 |
if (fpu_ctx == NULL) { |
232 |
return (ENOMEM); |
233 |
} |
234 |
fpu_kern_enter(curthread, fpu_ctx, FPU_KERN_NORMAL); |
235 |
isa->gcm_pre(key, &isa->key_data); |
236 |
fpu_kern_leave(curthread, fpu_ctx); |
237 |
fpu_kern_free_ctx(fpu_ctx); |
238 |
return (0); |
239 |
} |
240 |
|
241 |
static void |
242 |
ktls_intelisa_free(struct ktls_session *tls) |
243 |
{ |
244 |
struct isa_gcm_struct *isa; |
245 |
|
246 |
isa = tls->cipher; |
247 |
explicit_bzero(isa, sizeof(*isa)); |
248 |
free(isa, M_INTEL_ISA); |
249 |
} |
250 |
|
251 |
static int |
252 |
ktls_intelisa_try(struct socket *so, struct ktls_session *tls) |
253 |
{ |
254 |
struct isa_gcm_struct *isa; |
255 |
int error; |
256 |
|
257 |
if (ktls_use_intel_isa_gcm && |
258 |
tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) { |
259 |
isa = malloc(sizeof (*isa), M_INTEL_ISA, M_NOWAIT | M_ZERO); |
260 |
if (isa == NULL) { |
261 |
return (ENOMEM); |
262 |
} |
263 |
switch (tls->params.cipher_key_len) { |
264 |
case 16: |
265 |
isa->gcm_pre = aes_gcm_pre_128; |
266 |
isa->gcm_init = aes_gcm_init_128; |
267 |
isa->gcm_upd = aes_gcm_enc_128_update; |
268 |
isa->gcm_upd_nt = aes_gcm_enc_128_update_nt; |
269 |
isa->gcm_final = aes_gcm_enc_128_finalize; |
270 |
break; |
271 |
case 32: |
272 |
isa->gcm_pre = aes_gcm_pre_256; |
273 |
isa->gcm_init = aes_gcm_init_256; |
274 |
isa->gcm_upd = aes_gcm_enc_256_update; |
275 |
isa->gcm_upd_nt = aes_gcm_enc_256_update_nt; |
276 |
isa->gcm_final = aes_gcm_enc_256_finalize; |
277 |
break; |
278 |
default: |
279 |
free(isa, M_INTEL_ISA); |
280 |
return (EOPNOTSUPP); |
281 |
} |
282 |
|
283 |
error = ktls_intelisa_setup_cipher(isa, tls->params.cipher_key); |
284 |
if (error) { |
285 |
free(isa, M_INTEL_ISA); |
286 |
return (error); |
287 |
} |
288 |
|
289 |
tls->cipher = isa; |
290 |
tls->sw_encrypt = ktls_intelisa_aead_encrypt; |
291 |
tls->free = ktls_intelisa_free; |
292 |
return (0); |
293 |
} |
294 |
return (EOPNOTSUPP); |
295 |
} |
296 |
|
297 |
struct ktls_crypto_backend intelisa_backend = { |
298 |
.name = "Intel ISA-L", |
299 |
.prio = 20, |
300 |
.api_version = KTLS_API_VERSION, |
301 |
.try = ktls_intelisa_try, |
302 |
}; |
303 |
|
304 |
static int |
305 |
intelisa_init(void) |
306 |
{ |
307 |
ktls_offload_isa_aead = counter_u64_alloc(M_WAITOK); |
308 |
intelisa_aligned_mem = counter_u64_alloc(M_WAITOK); |
309 |
intelisa_aligned_mem_b = counter_u64_alloc(M_WAITOK); |
310 |
intelisa_unaligned_mem = counter_u64_alloc(M_WAITOK); |
311 |
intelisa_unaligned_mem_b = counter_u64_alloc(M_WAITOK); |
312 |
return (ktls_crypto_backend_register(&intelisa_backend)); |
313 |
} |
314 |
|
315 |
static int |
316 |
intelisa_unload(void) |
317 |
{ |
318 |
int error; |
319 |
|
320 |
error = ktls_crypto_backend_deregister(&intelisa_backend); |
321 |
if (error) |
322 |
return (error); |
323 |
counter_u64_free(ktls_offload_isa_aead); |
324 |
counter_u64_free(intelisa_aligned_mem); |
325 |
counter_u64_free(intelisa_aligned_mem_b); |
326 |
counter_u64_free(intelisa_unaligned_mem); |
327 |
counter_u64_free(intelisa_unaligned_mem_b); |
328 |
return (0); |
329 |
} |
330 |
|
331 |
static int |
332 |
intelisa_module_event_handler(module_t mod, int evt, void *arg) |
333 |
{ |
334 |
switch (evt) { |
335 |
case MOD_LOAD: |
336 |
return (intelisa_init()); |
337 |
case MOD_UNLOAD: |
338 |
return (intelisa_unload()); |
339 |
default: |
340 |
return (EOPNOTSUPP); |
341 |
} |
342 |
} |
343 |
|
344 |
static moduledata_t intelisa_moduledata = { |
345 |
"intelisa", |
346 |
intelisa_module_event_handler, |
347 |
NULL |
348 |
}; |
349 |
|
350 |
DECLARE_MODULE(intelisa, intelisa_moduledata, SI_SUB_PROTO_END, SI_ORDER_ANY); |