Blob


1 /* $OpenBSD: softraid_crypto.c,v 1.91 2013/03/31 15:44:52 jsing Exp $ */
2 /*
3 * Copyright (c) 2007 Marco Peereboom <marco@peereboom.us>
4 * Copyright (c) 2008 Hans-Joerg Hoexer <hshoexer@openbsd.org>
5 * Copyright (c) 2008 Damien Miller <djm@mindrot.org>
6 * Copyright (c) 2009 Joel Sing <jsing@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
21 #include "bio.h"
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/buf.h>
26 #include <sys/device.h>
27 #include <sys/ioctl.h>
28 #include <sys/proc.h>
29 #include <sys/malloc.h>
30 #include <sys/pool.h>
31 #include <sys/kernel.h>
32 #include <sys/disk.h>
33 #include <sys/rwlock.h>
34 #include <sys/queue.h>
35 #include <sys/fcntl.h>
36 #include <sys/disklabel.h>
37 #include <sys/mount.h>
38 #include <sys/sensors.h>
39 #include <sys/stat.h>
40 #include <sys/conf.h>
41 #include <sys/uio.h>
42 #include <sys/dkio.h>
44 #include <crypto/cryptodev.h>
45 #include <crypto/cryptosoft.h>
46 #include <crypto/rijndael.h>
47 #include <crypto/md5.h>
48 #include <crypto/sha1.h>
49 #include <crypto/sha2.h>
50 #include <crypto/hmac.h>
52 #include <scsi/scsi_all.h>
53 #include <scsi/scsiconf.h>
54 #include <scsi/scsi_disk.h>
56 #include <dev/softraidvar.h>
57 #include <dev/rndvar.h>
59 /*
60 * The per-I/O data that we need to preallocate. We cannot afford to allow I/O
61 * to start failing when memory pressure kicks in. We can store this in the WU
62 * because we assert that only one ccb per WU will ever be active.
63 */
64 struct sr_crypto_wu {
65 TAILQ_ENTRY(sr_crypto_wu) cr_link;
66 struct uio cr_uio;
67 struct iovec cr_iov;
68 struct cryptop *cr_crp;
69 struct cryptodesc *cr_descs;
70 struct sr_workunit *cr_wu;
71 void *cr_dmabuf;
72 };
75 struct sr_crypto_wu *sr_crypto_wu_get(struct sr_workunit *, int);
76 void sr_crypto_wu_put(struct sr_crypto_wu *);
77 int sr_crypto_create_keys(struct sr_discipline *);
78 int sr_crypto_get_kdf(struct bioc_createraid *,
79 struct sr_discipline *);
80 int sr_crypto_decrypt(u_char *, u_char *, u_char *, size_t, int);
81 int sr_crypto_encrypt(u_char *, u_char *, u_char *, size_t, int);
82 int sr_crypto_decrypt_key(struct sr_discipline *);
83 int sr_crypto_change_maskkey(struct sr_discipline *,
84 struct sr_crypto_kdfinfo *, struct sr_crypto_kdfinfo *);
85 int sr_crypto_create(struct sr_discipline *,
86 struct bioc_createraid *, int, int64_t);
87 int sr_crypto_assemble(struct sr_discipline *,
88 struct bioc_createraid *, int, void *);
89 int sr_crypto_alloc_resources(struct sr_discipline *);
90 void sr_crypto_free_resources(struct sr_discipline *);
91 int sr_crypto_ioctl(struct sr_discipline *,
92 struct bioc_discipline *);
93 int sr_crypto_meta_opt_handler(struct sr_discipline *,
94 struct sr_meta_opt_hdr *);
95 int sr_crypto_write(struct cryptop *);
96 int sr_crypto_rw(struct sr_workunit *);
97 int sr_crypto_rw2(struct sr_workunit *, struct sr_crypto_wu *);
98 void sr_crypto_done(struct sr_workunit *);
99 int sr_crypto_read(struct cryptop *);
100 void sr_crypto_finish_io(struct sr_workunit *);
101 void sr_crypto_calculate_check_hmac_sha1(u_int8_t *, int,
102 u_int8_t *, int, u_char *);
103 void sr_crypto_hotplug(struct sr_discipline *, struct disk *, int);
105 #ifdef SR_DEBUG0
106 void sr_crypto_dumpkeys(struct sr_discipline *);
107 #endif
109 /* Discipline initialisation. */
110 void
111 sr_crypto_discipline_init(struct sr_discipline *sd)
113 int i;
115 /* Fill out discipline members. */
116 sd->sd_type = SR_MD_CRYPTO;
117 strlcpy(sd->sd_name, "CRYPTO", sizeof(sd->sd_name));
118 sd->sd_capabilities = SR_CAP_SYSTEM_DISK | SR_CAP_AUTO_ASSEMBLE;
119 sd->sd_max_wu = SR_CRYPTO_NOWU;
121 for (i = 0; i < SR_CRYPTO_MAXKEYS; i++)
122 sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
124 /* Setup discipline specific function pointers. */
125 sd->sd_alloc_resources = sr_crypto_alloc_resources;
126 sd->sd_assemble = sr_crypto_assemble;
127 sd->sd_create = sr_crypto_create;
128 sd->sd_free_resources = sr_crypto_free_resources;
129 sd->sd_ioctl_handler = sr_crypto_ioctl;
130 sd->sd_meta_opt_handler = sr_crypto_meta_opt_handler;
131 sd->sd_scsi_rw = sr_crypto_rw;
132 sd->sd_scsi_done = sr_crypto_done;
135 int
136 sr_crypto_create(struct sr_discipline *sd, struct bioc_createraid *bc,
137 int no_chunk, int64_t coerced_size)
139 struct sr_meta_opt_item *omi;
140 int rv = EINVAL;
142 if (no_chunk != 1) {
143 sr_error(sd->sd_sc, "%s requires exactly one chunk",
144 sd->sd_name);
145 goto done;
148 /* Create crypto optional metadata. */
149 omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF,
150 M_WAITOK | M_ZERO);
151 omi->omi_som = malloc(sizeof(struct sr_meta_crypto), M_DEVBUF,
152 M_WAITOK | M_ZERO);
153 omi->omi_som->som_type = SR_OPT_CRYPTO;
154 omi->omi_som->som_length = sizeof(struct sr_meta_crypto);
155 SLIST_INSERT_HEAD(&sd->sd_meta_opt, omi, omi_link);
156 sd->mds.mdd_crypto.scr_meta = (struct sr_meta_crypto *)omi->omi_som;
157 sd->sd_meta->ssdi.ssd_opt_no++;
159 sd->mds.mdd_crypto.key_disk = NULL;
161 if (bc->bc_key_disk != NODEV) {
163 /* Create a key disk. */
164 if (sr_crypto_get_kdf(bc, sd))
165 goto done;
166 sd->mds.mdd_crypto.key_disk =
167 sr_crypto_create_key_disk(sd, bc->bc_key_disk);
168 if (sd->mds.mdd_crypto.key_disk == NULL)
169 goto done;
170 sd->sd_capabilities |= SR_CAP_AUTO_ASSEMBLE;
172 } else if (bc->bc_opaque_flags & BIOC_SOOUT) {
174 /* No hint available yet. */
175 bc->bc_opaque_status = BIOC_SOINOUT_FAILED;
176 rv = EAGAIN;
177 goto done;
179 } else if (sr_crypto_get_kdf(bc, sd))
180 goto done;
182 /* Passphrase volumes cannot be automatically assembled. */
183 if (!(bc->bc_flags & BIOC_SCNOAUTOASSEMBLE) && bc->bc_key_disk == NODEV)
184 goto done;
186 sd->sd_meta->ssdi.ssd_size = coerced_size;
188 sr_crypto_create_keys(sd);
190 sd->sd_max_ccb_per_wu = no_chunk;
192 rv = 0;
193 done:
194 return (rv);
197 int
198 sr_crypto_assemble(struct sr_discipline *sd, struct bioc_createraid *bc,
199 int no_chunk, void *data)
201 int rv = EINVAL;
203 sd->mds.mdd_crypto.key_disk = NULL;
205 /* Crypto optional metadata must already exist... */
206 if (sd->mds.mdd_crypto.scr_meta == NULL)
207 goto done;
209 if (data != NULL) {
210 /* Kernel already has mask key. */
211 bcopy(data, sd->mds.mdd_crypto.scr_maskkey,
212 sizeof(sd->mds.mdd_crypto.scr_maskkey));
213 } else if (bc->bc_key_disk != NODEV) {
214 /* Read the mask key from the key disk. */
215 sd->mds.mdd_crypto.key_disk =
216 sr_crypto_read_key_disk(sd, bc->bc_key_disk);
217 if (sd->mds.mdd_crypto.key_disk == NULL)
218 goto done;
219 } else if (bc->bc_opaque_flags & BIOC_SOOUT) {
220 /* provide userland with kdf hint */
221 if (bc->bc_opaque == NULL)
222 goto done;
224 if (sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint) <
225 bc->bc_opaque_size)
226 goto done;
228 if (copyout(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
229 bc->bc_opaque, bc->bc_opaque_size))
230 goto done;
232 /* we're done */
233 bc->bc_opaque_status = BIOC_SOINOUT_OK;
234 rv = EAGAIN;
235 goto done;
236 } else if (bc->bc_opaque_flags & BIOC_SOIN) {
237 /* get kdf with maskkey from userland */
238 if (sr_crypto_get_kdf(bc, sd))
239 goto done;
240 } else
241 goto done;
243 sd->sd_max_ccb_per_wu = sd->sd_meta->ssdi.ssd_chunk_no;
245 rv = 0;
246 done:
247 return (rv);
250 struct sr_crypto_wu *
251 sr_crypto_wu_get(struct sr_workunit *wu, int encrypt)
253 struct scsi_xfer *xs = wu->swu_xs;
254 struct sr_discipline *sd = wu->swu_dis;
255 struct sr_crypto_wu *crwu;
256 struct cryptodesc *crd;
257 int flags, i, n;
258 daddr64_t blk = 0;
259 u_int keyndx;
261 DNPRINTF(SR_D_DIS, "%s: sr_crypto_wu_get wu: %p encrypt: %d\n",
262 DEVNAME(sd->sd_sc), wu, encrypt);
264 mtx_enter(&sd->mds.mdd_crypto.scr_mutex);
265 if ((crwu = TAILQ_FIRST(&sd->mds.mdd_crypto.scr_wus)) != NULL)
266 TAILQ_REMOVE(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link);
267 mtx_leave(&sd->mds.mdd_crypto.scr_mutex);
268 if (crwu == NULL)
269 panic("sr_crypto_wu_get: out of wus");
271 crwu->cr_uio.uio_iovcnt = 1;
272 crwu->cr_uio.uio_iov->iov_len = xs->datalen;
273 if (xs->flags & SCSI_DATA_OUT) {
274 crwu->cr_uio.uio_iov->iov_base = crwu->cr_dmabuf;
275 bcopy(xs->data, crwu->cr_uio.uio_iov->iov_base, xs->datalen);
276 } else
277 crwu->cr_uio.uio_iov->iov_base = xs->data;
279 if (xs->cmdlen == 10)
280 blk = _4btol(((struct scsi_rw_big *)xs->cmd)->addr);
281 else if (xs->cmdlen == 16)
282 blk = _8btol(((struct scsi_rw_16 *)xs->cmd)->addr);
283 else if (xs->cmdlen == 6)
284 blk = _3btol(((struct scsi_rw *)xs->cmd)->addr);
286 n = xs->datalen >> DEV_BSHIFT;
288 /*
289 * We preallocated enough crypto descs for up to MAXPHYS of I/O.
290 * Since there may be less than that we need to tweak the linked list
291 * of crypto desc structures to be just long enough for our needs.
292 */
293 crd = crwu->cr_descs;
294 for (i = 0; i < ((MAXPHYS >> DEV_BSHIFT) - n); i++) {
295 crd = crd->crd_next;
296 KASSERT(crd);
298 crwu->cr_crp->crp_desc = crd;
299 flags = (encrypt ? CRD_F_ENCRYPT : 0) |
300 CRD_F_IV_PRESENT | CRD_F_IV_EXPLICIT;
302 /* Select crypto session based on block number */
303 keyndx = blk >> SR_CRYPTO_KEY_BLKSHIFT;
304 if (keyndx >= SR_CRYPTO_MAXKEYS)
305 goto unwind;
306 crwu->cr_crp->crp_sid = sd->mds.mdd_crypto.scr_sid[keyndx];
307 if (crwu->cr_crp->crp_sid == (u_int64_t)-1)
308 goto unwind;
310 crwu->cr_crp->crp_ilen = xs->datalen;
311 crwu->cr_crp->crp_alloctype = M_DEVBUF;
312 crwu->cr_crp->crp_buf = &crwu->cr_uio;
313 for (i = 0, crd = crwu->cr_crp->crp_desc; crd;
314 i++, blk++, crd = crd->crd_next) {
315 crd->crd_skip = i << DEV_BSHIFT;
316 crd->crd_len = DEV_BSIZE;
317 crd->crd_inject = 0;
318 crd->crd_flags = flags;
319 crd->crd_alg = CRYPTO_AES_XTS;
321 switch (sd->mds.mdd_crypto.scr_meta->scm_alg) {
322 case SR_CRYPTOA_AES_XTS_128:
323 crd->crd_klen = 256;
324 break;
325 case SR_CRYPTOA_AES_XTS_256:
326 crd->crd_klen = 512;
327 break;
328 default:
329 goto unwind;
331 crd->crd_key = sd->mds.mdd_crypto.scr_key[0];
332 bcopy(&blk, crd->crd_iv, sizeof(blk));
334 crwu->cr_wu = wu;
335 crwu->cr_crp->crp_opaque = crwu;
337 return (crwu);
339 unwind:
340 /* steal the descriptors back from the cryptop */
341 crwu->cr_crp->crp_desc = NULL;
343 return (NULL);
346 void
347 sr_crypto_wu_put(struct sr_crypto_wu *crwu)
349 struct cryptop *crp = crwu->cr_crp;
350 struct sr_workunit *wu = crwu->cr_wu;
351 struct sr_discipline *sd = wu->swu_dis;
353 DNPRINTF(SR_D_DIS, "%s: sr_crypto_wu_put crwu: %p\n",
354 DEVNAME(wu->swu_dis->sd_sc), crwu);
356 /* steal the descriptors back from the cryptop */
357 crp->crp_desc = NULL;
359 mtx_enter(&sd->mds.mdd_crypto.scr_mutex);
360 TAILQ_INSERT_TAIL(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link);
361 mtx_leave(&sd->mds.mdd_crypto.scr_mutex);
364 int
365 sr_crypto_get_kdf(struct bioc_createraid *bc, struct sr_discipline *sd)
367 int rv = EINVAL;
368 struct sr_crypto_kdfinfo *kdfinfo;
370 if (!(bc->bc_opaque_flags & BIOC_SOIN))
371 return (rv);
372 if (bc->bc_opaque == NULL)
373 return (rv);
374 if (bc->bc_opaque_size != sizeof(*kdfinfo))
375 return (rv);
377 kdfinfo = malloc(bc->bc_opaque_size, M_DEVBUF, M_WAITOK | M_ZERO);
378 if (copyin(bc->bc_opaque, kdfinfo, bc->bc_opaque_size))
379 goto out;
381 if (kdfinfo->len != bc->bc_opaque_size)
382 goto out;
384 /* copy KDF hint to disk meta data */
385 if (kdfinfo->flags & SR_CRYPTOKDF_HINT) {
386 if (sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint) <
387 kdfinfo->genkdf.len)
388 goto out;
389 bcopy(&kdfinfo->genkdf,
390 sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
391 kdfinfo->genkdf.len);
394 /* copy mask key to run-time meta data */
395 if ((kdfinfo->flags & SR_CRYPTOKDF_KEY)) {
396 if (sizeof(sd->mds.mdd_crypto.scr_maskkey) <
397 sizeof(kdfinfo->maskkey))
398 goto out;
399 bcopy(&kdfinfo->maskkey, sd->mds.mdd_crypto.scr_maskkey,
400 sizeof(kdfinfo->maskkey));
403 bc->bc_opaque_status = BIOC_SOINOUT_OK;
404 rv = 0;
405 out:
406 explicit_bzero(kdfinfo, bc->bc_opaque_size);
407 free(kdfinfo, M_DEVBUF);
409 return (rv);
412 int
413 sr_crypto_encrypt(u_char *p, u_char *c, u_char *key, size_t size, int alg)
415 rijndael_ctx ctx;
416 int i, rv = 1;
418 switch (alg) {
419 case SR_CRYPTOM_AES_ECB_256:
420 if (rijndael_set_key_enc_only(&ctx, key, 256) != 0)
421 goto out;
422 for (i = 0; i < size; i += RIJNDAEL128_BLOCK_LEN)
423 rijndael_encrypt(&ctx, &p[i], &c[i]);
424 rv = 0;
425 break;
426 default:
427 DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %u\n",
428 "softraid", alg);
429 rv = -1;
430 goto out;
433 out:
434 explicit_bzero(&ctx, sizeof(ctx));
435 return (rv);
438 int
439 sr_crypto_decrypt(u_char *c, u_char *p, u_char *key, size_t size, int alg)
441 rijndael_ctx ctx;
442 int i, rv = 1;
444 switch (alg) {
445 case SR_CRYPTOM_AES_ECB_256:
446 if (rijndael_set_key(&ctx, key, 256) != 0)
447 goto out;
448 for (i = 0; i < size; i += RIJNDAEL128_BLOCK_LEN)
449 rijndael_decrypt(&ctx, &c[i], &p[i]);
450 rv = 0;
451 break;
452 default:
453 DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %u\n",
454 "softraid", alg);
455 rv = -1;
456 goto out;
459 out:
460 explicit_bzero(&ctx, sizeof(ctx));
461 return (rv);
464 void
465 sr_crypto_calculate_check_hmac_sha1(u_int8_t *maskkey, int maskkey_size,
466 u_int8_t *key, int key_size, u_char *check_digest)
468 u_char check_key[SHA1_DIGEST_LENGTH];
469 HMAC_SHA1_CTX hmacctx;
470 SHA1_CTX shactx;
472 bzero(check_key, sizeof(check_key));
473 bzero(&hmacctx, sizeof(hmacctx));
474 bzero(&shactx, sizeof(shactx));
476 /* k = SHA1(mask_key) */
477 SHA1Init(&shactx);
478 SHA1Update(&shactx, maskkey, maskkey_size);
479 SHA1Final(check_key, &shactx);
481 /* mac = HMAC_SHA1_k(unencrypted key) */
482 HMAC_SHA1_Init(&hmacctx, check_key, sizeof(check_key));
483 HMAC_SHA1_Update(&hmacctx, key, key_size);
484 HMAC_SHA1_Final(check_digest, &hmacctx);
486 explicit_bzero(check_key, sizeof(check_key));
487 explicit_bzero(&hmacctx, sizeof(hmacctx));
488 explicit_bzero(&shactx, sizeof(shactx));
491 int
492 sr_crypto_decrypt_key(struct sr_discipline *sd)
494 u_char check_digest[SHA1_DIGEST_LENGTH];
495 int rv = 1;
497 DNPRINTF(SR_D_DIS, "%s: sr_crypto_decrypt_key\n", DEVNAME(sd->sd_sc));
499 if (sd->mds.mdd_crypto.scr_meta->scm_check_alg != SR_CRYPTOC_HMAC_SHA1)
500 goto out;
502 if (sr_crypto_decrypt((u_char *)sd->mds.mdd_crypto.scr_meta->scm_key,
503 (u_char *)sd->mds.mdd_crypto.scr_key,
504 sd->mds.mdd_crypto.scr_maskkey, sizeof(sd->mds.mdd_crypto.scr_key),
505 sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1)
506 goto out;
508 #ifdef SR_DEBUG0
509 sr_crypto_dumpkeys(sd);
510 #endif
512 /* Check that the key decrypted properly. */
513 sr_crypto_calculate_check_hmac_sha1(sd->mds.mdd_crypto.scr_maskkey,
514 sizeof(sd->mds.mdd_crypto.scr_maskkey),
515 (u_int8_t *)sd->mds.mdd_crypto.scr_key,
516 sizeof(sd->mds.mdd_crypto.scr_key),
517 check_digest);
518 if (memcmp(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac,
519 check_digest, sizeof(check_digest)) != 0) {
520 explicit_bzero(sd->mds.mdd_crypto.scr_key,
521 sizeof(sd->mds.mdd_crypto.scr_key));
522 goto out;
525 rv = 0; /* Success */
526 out:
527 /* we don't need the mask key anymore */
528 explicit_bzero(&sd->mds.mdd_crypto.scr_maskkey,
529 sizeof(sd->mds.mdd_crypto.scr_maskkey));
531 explicit_bzero(check_digest, sizeof(check_digest));
533 return rv;
536 int
537 sr_crypto_create_keys(struct sr_discipline *sd)
540 DNPRINTF(SR_D_DIS, "%s: sr_crypto_create_keys\n",
541 DEVNAME(sd->sd_sc));
543 if (AES_MAXKEYBYTES < sizeof(sd->mds.mdd_crypto.scr_maskkey))
544 return (1);
546 /* XXX allow user to specify */
547 sd->mds.mdd_crypto.scr_meta->scm_alg = SR_CRYPTOA_AES_XTS_256;
549 /* generate crypto keys */
550 arc4random_buf(sd->mds.mdd_crypto.scr_key,
551 sizeof(sd->mds.mdd_crypto.scr_key));
553 /* Mask the disk keys. */
554 sd->mds.mdd_crypto.scr_meta->scm_mask_alg = SR_CRYPTOM_AES_ECB_256;
555 sr_crypto_encrypt((u_char *)sd->mds.mdd_crypto.scr_key,
556 (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key,
557 sd->mds.mdd_crypto.scr_maskkey, sizeof(sd->mds.mdd_crypto.scr_key),
558 sd->mds.mdd_crypto.scr_meta->scm_mask_alg);
560 /* Prepare key decryption check code. */
561 sd->mds.mdd_crypto.scr_meta->scm_check_alg = SR_CRYPTOC_HMAC_SHA1;
562 sr_crypto_calculate_check_hmac_sha1(sd->mds.mdd_crypto.scr_maskkey,
563 sizeof(sd->mds.mdd_crypto.scr_maskkey),
564 (u_int8_t *)sd->mds.mdd_crypto.scr_key,
565 sizeof(sd->mds.mdd_crypto.scr_key),
566 sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac);
568 /* Erase the plaintext disk keys */
569 explicit_bzero(sd->mds.mdd_crypto.scr_key,
570 sizeof(sd->mds.mdd_crypto.scr_key));
572 #ifdef SR_DEBUG0
573 sr_crypto_dumpkeys(sd);
574 #endif
576 sd->mds.mdd_crypto.scr_meta->scm_flags = SR_CRYPTOF_KEY |
577 SR_CRYPTOF_KDFHINT;
579 return (0);
582 int
583 sr_crypto_change_maskkey(struct sr_discipline *sd,
584 struct sr_crypto_kdfinfo *kdfinfo1, struct sr_crypto_kdfinfo *kdfinfo2)
586 u_char check_digest[SHA1_DIGEST_LENGTH];
587 u_char *c, *p = NULL;
588 size_t ksz;
589 int rv = 1;
591 DNPRINTF(SR_D_DIS, "%s: sr_crypto_change_maskkey\n",
592 DEVNAME(sd->sd_sc));
594 if (sd->mds.mdd_crypto.scr_meta->scm_check_alg != SR_CRYPTOC_HMAC_SHA1)
595 goto out;
597 c = (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key;
598 ksz = sizeof(sd->mds.mdd_crypto.scr_key);
599 p = malloc(ksz, M_DEVBUF, M_WAITOK | M_CANFAIL | M_ZERO);
600 if (p == NULL)
601 goto out;
603 if (sr_crypto_decrypt(c, p, kdfinfo1->maskkey, ksz,
604 sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1)
605 goto out;
607 #ifdef SR_DEBUG0
608 sr_crypto_dumpkeys(sd);
609 #endif
611 sr_crypto_calculate_check_hmac_sha1(kdfinfo1->maskkey,
612 sizeof(kdfinfo1->maskkey), p, ksz, check_digest);
613 if (memcmp(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac,
614 check_digest, sizeof(check_digest)) != 0) {
615 sr_error(sd->sd_sc, "incorrect key or passphrase");
616 rv = EPERM;
617 goto out;
620 /* Mask the disk keys. */
621 c = (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key;
622 if (sr_crypto_encrypt(p, c, kdfinfo2->maskkey, ksz,
623 sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1)
624 goto out;
626 /* Prepare key decryption check code. */
627 sd->mds.mdd_crypto.scr_meta->scm_check_alg = SR_CRYPTOC_HMAC_SHA1;
628 sr_crypto_calculate_check_hmac_sha1(kdfinfo2->maskkey,
629 sizeof(kdfinfo2->maskkey), (u_int8_t *)sd->mds.mdd_crypto.scr_key,
630 sizeof(sd->mds.mdd_crypto.scr_key), check_digest);
632 /* Copy new encrypted key and HMAC to metadata. */
633 bcopy(check_digest, sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac,
634 sizeof(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac));
636 rv = 0; /* Success */
638 out:
639 if (p) {
640 explicit_bzero(p, ksz);
641 free(p, M_DEVBUF);
644 explicit_bzero(check_digest, sizeof(check_digest));
645 explicit_bzero(&kdfinfo1->maskkey, sizeof(kdfinfo1->maskkey));
646 explicit_bzero(&kdfinfo2->maskkey, sizeof(kdfinfo2->maskkey));
648 return (rv);
651 struct sr_chunk *
652 sr_crypto_create_key_disk(struct sr_discipline *sd, dev_t dev)
654 struct sr_softc *sc = sd->sd_sc;
655 struct sr_discipline *fakesd = NULL;
656 struct sr_metadata *sm = NULL;
657 struct sr_meta_chunk *km;
658 struct sr_meta_opt_item *omi = NULL;
659 struct sr_meta_keydisk *skm;
660 struct sr_chunk *key_disk = NULL;
661 struct disklabel label;
662 struct vnode *vn;
663 char devname[32];
664 int c, part, open = 0;
666 /*
667 * Create a metadata structure on the key disk and store
668 * keying material in the optional metadata.
669 */
671 sr_meta_getdevname(sc, dev, devname, sizeof(devname));
673 /* Make sure chunk is not already in use. */
674 c = sr_chunk_in_use(sc, dev);
675 if (c != BIOC_SDINVALID && c != BIOC_SDOFFLINE) {
676 sr_error(sc, "%s is already in use", devname);
677 goto done;
680 /* Open device. */
681 if (bdevvp(dev, &vn)) {
682 sr_error(sc, "cannot open key disk %s", devname);
683 goto done;
685 if (VOP_OPEN(vn, FREAD | FWRITE, NOCRED, curproc)) {
686 DNPRINTF(SR_D_META,"%s: sr_crypto_create_key_disk cannot "
687 "open %s\n", DEVNAME(sc), devname);
688 vput(vn);
689 goto fail;
691 open = 1; /* close dev on error */
693 /* Get partition details. */
694 part = DISKPART(dev);
695 if (VOP_IOCTL(vn, DIOCGDINFO, (caddr_t)&label,
696 FREAD, NOCRED, curproc)) {
697 DNPRINTF(SR_D_META, "%s: sr_crypto_create_key_disk ioctl "
698 "failed\n", DEVNAME(sc));
699 VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc);
700 vput(vn);
701 goto fail;
703 if (label.d_secsize != DEV_BSIZE) {
704 sr_error(sc, "%s has unsupported sector size (%d)",
705 devname, label.d_secsize);
706 goto fail;
708 if (label.d_partitions[part].p_fstype != FS_RAID) {
709 sr_error(sc, "%s partition not of type RAID (%d)\n",
710 devname, label.d_partitions[part].p_fstype);
711 goto fail;
714 /*
715 * Create and populate chunk metadata.
716 */
718 key_disk = malloc(sizeof(struct sr_chunk), M_DEVBUF, M_WAITOK | M_ZERO);
719 km = &key_disk->src_meta;
721 key_disk->src_dev_mm = dev;
722 key_disk->src_vn = vn;
723 strlcpy(key_disk->src_devname, devname, sizeof(km->scmi.scm_devname));
724 key_disk->src_size = 0;
726 km->scmi.scm_volid = sd->sd_meta->ssdi.ssd_level;
727 km->scmi.scm_chunk_id = 0;
728 km->scmi.scm_size = 0;
729 km->scmi.scm_coerced_size = 0;
730 strlcpy(km->scmi.scm_devname, devname, sizeof(km->scmi.scm_devname));
731 bcopy(&sd->sd_meta->ssdi.ssd_uuid, &km->scmi.scm_uuid,
732 sizeof(struct sr_uuid));
734 sr_checksum(sc, km, &km->scm_checksum,
735 sizeof(struct sr_meta_chunk_invariant));
737 km->scm_status = BIOC_SDONLINE;
739 /*
740 * Create and populate our own discipline and metadata.
741 */
743 sm = malloc(sizeof(struct sr_metadata), M_DEVBUF, M_WAITOK | M_ZERO);
744 sm->ssdi.ssd_magic = SR_MAGIC;
745 sm->ssdi.ssd_version = SR_META_VERSION;
746 sm->ssd_ondisk = 0;
747 sm->ssdi.ssd_vol_flags = 0;
748 bcopy(&sd->sd_meta->ssdi.ssd_uuid, &sm->ssdi.ssd_uuid,
749 sizeof(struct sr_uuid));
750 sm->ssdi.ssd_chunk_no = 1;
751 sm->ssdi.ssd_volid = SR_KEYDISK_VOLID;
752 sm->ssdi.ssd_level = SR_KEYDISK_LEVEL;
753 sm->ssdi.ssd_size = 0;
754 strlcpy(sm->ssdi.ssd_vendor, "OPENBSD", sizeof(sm->ssdi.ssd_vendor));
755 snprintf(sm->ssdi.ssd_product, sizeof(sm->ssdi.ssd_product),
756 "SR %s", "KEYDISK");
757 snprintf(sm->ssdi.ssd_revision, sizeof(sm->ssdi.ssd_revision),
758 "%03d", SR_META_VERSION);
760 fakesd = malloc(sizeof(struct sr_discipline), M_DEVBUF,
761 M_WAITOK | M_ZERO);
762 fakesd->sd_sc = sd->sd_sc;
763 fakesd->sd_meta = sm;
764 fakesd->sd_meta_type = SR_META_F_NATIVE;
765 fakesd->sd_vol_status = BIOC_SVONLINE;
766 strlcpy(fakesd->sd_name, "KEYDISK", sizeof(fakesd->sd_name));
767 SLIST_INIT(&fakesd->sd_meta_opt);
769 /* Add chunk to volume. */
770 fakesd->sd_vol.sv_chunks = malloc(sizeof(struct sr_chunk *), M_DEVBUF,
771 M_WAITOK | M_ZERO);
772 fakesd->sd_vol.sv_chunks[0] = key_disk;
773 SLIST_INIT(&fakesd->sd_vol.sv_chunk_list);
774 SLIST_INSERT_HEAD(&fakesd->sd_vol.sv_chunk_list, key_disk, src_link);
776 /* Generate mask key. */
777 arc4random_buf(sd->mds.mdd_crypto.scr_maskkey,
778 sizeof(sd->mds.mdd_crypto.scr_maskkey));
780 /* Copy mask key to optional metadata area. */
781 omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF,
782 M_WAITOK | M_ZERO);
783 omi->omi_som = malloc(sizeof(struct sr_meta_keydisk), M_DEVBUF,
784 M_WAITOK | M_ZERO);
785 omi->omi_som->som_type = SR_OPT_KEYDISK;
786 omi->omi_som->som_length = sizeof(struct sr_meta_keydisk);
787 skm = (struct sr_meta_keydisk *)omi->omi_som;
788 bcopy(sd->mds.mdd_crypto.scr_maskkey, &skm->skm_maskkey,
789 sizeof(skm->skm_maskkey));
790 SLIST_INSERT_HEAD(&fakesd->sd_meta_opt, omi, omi_link);
791 fakesd->sd_meta->ssdi.ssd_opt_no++;
793 /* Save metadata. */
794 if (sr_meta_save(fakesd, SR_META_DIRTY)) {
795 sr_error(sc, "could not save metadata to %s", devname);
796 goto fail;
799 goto done;
801 fail:
802 if (key_disk)
803 free(key_disk, M_DEVBUF);
804 key_disk = NULL;
806 done:
807 if (omi)
808 free(omi, M_DEVBUF);
809 if (fakesd && fakesd->sd_vol.sv_chunks)
810 free(fakesd->sd_vol.sv_chunks, M_DEVBUF);
811 if (fakesd)
812 free(fakesd, M_DEVBUF);
813 if (sm)
814 free(sm, M_DEVBUF);
815 if (open) {
816 VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc);
817 vput(vn);
820 return key_disk;
823 struct sr_chunk *
824 sr_crypto_read_key_disk(struct sr_discipline *sd, dev_t dev)
826 struct sr_softc *sc = sd->sd_sc;
827 struct sr_metadata *sm = NULL;
828 struct sr_meta_opt_item *omi, *omi_next;
829 struct sr_meta_opt_hdr *omh;
830 struct sr_meta_keydisk *skm;
831 struct sr_meta_opt_head som;
832 struct sr_chunk *key_disk = NULL;
833 struct disklabel label;
834 struct vnode *vn = NULL;
835 char devname[32];
836 int c, part, open = 0;
838 /*
839 * Load a key disk and load keying material into memory.
840 */
842 SLIST_INIT(&som);
844 sr_meta_getdevname(sc, dev, devname, sizeof(devname));
846 /* Make sure chunk is not already in use. */
847 c = sr_chunk_in_use(sc, dev);
848 if (c != BIOC_SDINVALID && c != BIOC_SDOFFLINE) {
849 sr_error(sc, "%s is already in use", devname);
850 goto done;
853 /* Open device. */
854 if (bdevvp(dev, &vn)) {
855 sr_error(sc, "cannot open key disk %s", devname);
856 goto done;
858 if (VOP_OPEN(vn, FREAD | FWRITE, NOCRED, curproc)) {
859 DNPRINTF(SR_D_META,"%s: sr_crypto_read_key_disk cannot "
860 "open %s\n", DEVNAME(sc), devname);
861 vput(vn);
862 goto done;
864 open = 1; /* close dev on error */
866 /* Get partition details. */
867 part = DISKPART(dev);
868 if (VOP_IOCTL(vn, DIOCGDINFO, (caddr_t)&label, FREAD,
869 NOCRED, curproc)) {
870 DNPRINTF(SR_D_META, "%s: sr_crypto_read_key_disk ioctl "
871 "failed\n", DEVNAME(sc));
872 VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc);
873 vput(vn);
874 goto done;
876 if (label.d_secsize != DEV_BSIZE) {
877 sr_error(sc, "%s has unsupported sector size (%d)",
878 devname, label.d_secsize);
879 goto done;
881 if (label.d_partitions[part].p_fstype != FS_RAID) {
882 sr_error(sc, "%s partition not of type RAID (%d)\n",
883 devname, label.d_partitions[part].p_fstype);
884 goto done;
887 /*
888 * Read and validate key disk metadata.
889 */
890 sm = malloc(SR_META_SIZE * 512, M_DEVBUF, M_WAITOK | M_ZERO);
891 if (sr_meta_native_read(sd, dev, sm, NULL)) {
892 sr_error(sc, "native bootprobe could not read native metadata");
893 goto done;
896 if (sr_meta_validate(sd, dev, sm, NULL)) {
897 DNPRINTF(SR_D_META, "%s: invalid metadata\n",
898 DEVNAME(sc));
899 goto done;
902 /* Make sure this is a key disk. */
903 if (sm->ssdi.ssd_level != SR_KEYDISK_LEVEL) {
904 sr_error(sc, "%s is not a key disk", devname);
905 goto done;
908 /* Construct key disk chunk. */
909 key_disk = malloc(sizeof(struct sr_chunk), M_DEVBUF, M_WAITOK | M_ZERO);
910 key_disk->src_dev_mm = dev;
911 key_disk->src_vn = vn;
912 key_disk->src_size = 0;
914 bcopy((struct sr_meta_chunk *)(sm + 1), &key_disk->src_meta,
915 sizeof(key_disk->src_meta));
917 /* Read mask key from optional metadata. */
918 sr_meta_opt_load(sc, sm, &som);
919 SLIST_FOREACH(omi, &som, omi_link) {
920 omh = omi->omi_som;
921 if (omh->som_type == SR_OPT_KEYDISK) {
922 skm = (struct sr_meta_keydisk *)omh;
923 bcopy(&skm->skm_maskkey,
924 sd->mds.mdd_crypto.scr_maskkey,
925 sizeof(sd->mds.mdd_crypto.scr_maskkey));
926 } else if (omh->som_type == SR_OPT_CRYPTO) {
927 /* Original keydisk format with key in crypto area. */
928 bcopy(omh + sizeof(struct sr_meta_opt_hdr),
929 sd->mds.mdd_crypto.scr_maskkey,
930 sizeof(sd->mds.mdd_crypto.scr_maskkey));
934 open = 0;
936 done:
937 for (omi = SLIST_FIRST(&som); omi != SLIST_END(&som); omi = omi_next) {
938 omi_next = SLIST_NEXT(omi, omi_link);
939 if (omi->omi_som)
940 free(omi->omi_som, M_DEVBUF);
941 free(omi, M_DEVBUF);
944 if (sm)
945 free(sm, M_DEVBUF);
947 if (vn && open) {
948 VOP_CLOSE(vn, FREAD, NOCRED, curproc);
949 vput(vn);
952 return key_disk;
955 int
956 sr_crypto_alloc_resources(struct sr_discipline *sd)
958 struct cryptoini cri;
959 struct sr_crypto_wu *crwu;
960 u_int num_keys, i;
962 DNPRINTF(SR_D_DIS, "%s: sr_crypto_alloc_resources\n",
963 DEVNAME(sd->sd_sc));
965 for (i = 0; i < SR_CRYPTO_MAXKEYS; i++)
966 sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
968 if (sr_wu_alloc(sd)) {
969 sr_error(sd->sd_sc, "unable to allocate work units");
970 return (ENOMEM);
972 if (sr_ccb_alloc(sd)) {
973 sr_error(sd->sd_sc, "unable to allocate CCBs");
974 return (ENOMEM);
976 if (sr_crypto_decrypt_key(sd)) {
977 sr_error(sd->sd_sc, "incorrect key or passphrase");
978 return (EPERM);
981 /*
982 * For each wu allocate the uio, iovec and crypto structures.
983 * these have to be allocated now because during runtime we can't
984 * fail an allocation without failing the io (which can cause real
985 * problems).
986 */
987 mtx_init(&sd->mds.mdd_crypto.scr_mutex, IPL_BIO);
988 TAILQ_INIT(&sd->mds.mdd_crypto.scr_wus);
989 for (i = 0; i < sd->sd_max_wu; i++) {
990 crwu = malloc(sizeof(*crwu), M_DEVBUF,
991 M_WAITOK | M_ZERO | M_CANFAIL);
992 if (crwu == NULL)
993 return (ENOMEM);
994 /* put it on the list now so if we fail it'll be freed */
995 mtx_enter(&sd->mds.mdd_crypto.scr_mutex);
996 TAILQ_INSERT_TAIL(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link);
997 mtx_leave(&sd->mds.mdd_crypto.scr_mutex);
999 crwu->cr_uio.uio_iov = &crwu->cr_iov;
1000 crwu->cr_dmabuf = dma_alloc(MAXPHYS, PR_WAITOK);
1001 crwu->cr_crp = crypto_getreq(MAXPHYS >> DEV_BSHIFT);
1002 if (crwu->cr_crp == NULL)
1003 return (ENOMEM);
1004 /* steal the list of cryptodescs */
1005 crwu->cr_descs = crwu->cr_crp->crp_desc;
1006 crwu->cr_crp->crp_desc = NULL;
1009 bzero(&cri, sizeof(cri));
1010 cri.cri_alg = CRYPTO_AES_XTS;
1011 switch (sd->mds.mdd_crypto.scr_meta->scm_alg) {
1012 case SR_CRYPTOA_AES_XTS_128:
1013 cri.cri_klen = 256;
1014 break;
1015 case SR_CRYPTOA_AES_XTS_256:
1016 cri.cri_klen = 512;
1017 break;
1018 default:
1019 return (EINVAL);
1022 /* Allocate a session for every 2^SR_CRYPTO_KEY_BLKSHIFT blocks */
1023 num_keys = sd->sd_meta->ssdi.ssd_size >> SR_CRYPTO_KEY_BLKSHIFT;
1024 if (num_keys >= SR_CRYPTO_MAXKEYS)
1025 return (EFBIG);
1026 for (i = 0; i <= num_keys; i++) {
1027 cri.cri_key = sd->mds.mdd_crypto.scr_key[i];
1028 if (crypto_newsession(&sd->mds.mdd_crypto.scr_sid[i],
1029 &cri, 0) != 0) {
1030 for (i = 0;
1031 sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1;
1032 i++) {
1033 crypto_freesession(
1034 sd->mds.mdd_crypto.scr_sid[i]);
1035 sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
1037 return (EINVAL);
1041 sr_hotplug_register(sd, sr_crypto_hotplug);
1043 return (0);
1046 void
1047 sr_crypto_free_resources(struct sr_discipline *sd)
1049 struct sr_crypto_wu *crwu;
1050 u_int i;
1052 DNPRINTF(SR_D_DIS, "%s: sr_crypto_free_resources\n",
1053 DEVNAME(sd->sd_sc));
1055 if (sd->mds.mdd_crypto.key_disk != NULL) {
1056 explicit_bzero(sd->mds.mdd_crypto.key_disk, sizeof
1057 sd->mds.mdd_crypto.key_disk);
1058 free(sd->mds.mdd_crypto.key_disk, M_DEVBUF);
1061 sr_hotplug_unregister(sd, sr_crypto_hotplug);
1063 for (i = 0; sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1; i++) {
1064 crypto_freesession(sd->mds.mdd_crypto.scr_sid[i]);
1065 sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
1068 mtx_enter(&sd->mds.mdd_crypto.scr_mutex);
1069 while ((crwu = TAILQ_FIRST(&sd->mds.mdd_crypto.scr_wus)) != NULL) {
1070 TAILQ_REMOVE(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link);
1072 if (crwu->cr_dmabuf != NULL)
1073 dma_free(crwu->cr_dmabuf, MAXPHYS);
1074 if (crwu->cr_crp) {
1075 /* twiddle cryptoreq back */
1076 crwu->cr_crp->crp_desc = crwu->cr_descs;
1077 crypto_freereq(crwu->cr_crp);
1079 free(crwu, M_DEVBUF);
1081 mtx_leave(&sd->mds.mdd_crypto.scr_mutex);
1083 sr_wu_free(sd);
1084 sr_ccb_free(sd);
1087 int
1088 sr_crypto_ioctl(struct sr_discipline *sd, struct bioc_discipline *bd)
1090 struct sr_crypto_kdfpair kdfpair;
1091 struct sr_crypto_kdfinfo kdfinfo1, kdfinfo2;
1092 int size, rv = 1;
1094 DNPRINTF(SR_D_IOCTL, "%s: sr_crypto_ioctl %u\n",
1095 DEVNAME(sd->sd_sc), bd->bd_cmd);
1097 switch (bd->bd_cmd) {
1098 case SR_IOCTL_GET_KDFHINT:
1100 /* Get KDF hint for userland. */
1101 size = sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint);
1102 if (bd->bd_data == NULL || bd->bd_size > size)
1103 goto bad;
1104 if (copyout(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
1105 bd->bd_data, bd->bd_size))
1106 goto bad;
1108 rv = 0;
1110 break;
1112 case SR_IOCTL_CHANGE_PASSPHRASE:
1114 /* Attempt to change passphrase. */
1116 size = sizeof(kdfpair);
1117 if (bd->bd_data == NULL || bd->bd_size > size)
1118 goto bad;
1119 if (copyin(bd->bd_data, &kdfpair, size))
1120 goto bad;
1122 size = sizeof(kdfinfo1);
1123 if (kdfpair.kdfinfo1 == NULL || kdfpair.kdfsize1 > size)
1124 goto bad;
1125 if (copyin(kdfpair.kdfinfo1, &kdfinfo1, size))
1126 goto bad;
1128 size = sizeof(kdfinfo2);
1129 if (kdfpair.kdfinfo2 == NULL || kdfpair.kdfsize2 > size)
1130 goto bad;
1131 if (copyin(kdfpair.kdfinfo2, &kdfinfo2, size))
1132 goto bad;
1134 if (sr_crypto_change_maskkey(sd, &kdfinfo1, &kdfinfo2))
1135 goto bad;
1137 /* Save metadata to disk. */
1138 rv = sr_meta_save(sd, SR_META_DIRTY);
1140 break;
1143 bad:
1144 explicit_bzero(&kdfpair, sizeof(kdfpair));
1145 explicit_bzero(&kdfinfo1, sizeof(kdfinfo1));
1146 explicit_bzero(&kdfinfo2, sizeof(kdfinfo2));
1148 return (rv);
1151 int
1152 sr_crypto_meta_opt_handler(struct sr_discipline *sd, struct sr_meta_opt_hdr *om)
1154 int rv = EINVAL;
1156 if (om->som_type == SR_OPT_CRYPTO) {
1157 sd->mds.mdd_crypto.scr_meta = (struct sr_meta_crypto *)om;
1158 rv = 0;
1161 return (rv);
1164 int
1165 sr_crypto_rw(struct sr_workunit *wu)
1167 struct sr_crypto_wu *crwu;
1168 int s, rv = 0;
1170 DNPRINTF(SR_D_DIS, "%s: sr_crypto_rw wu: %p\n",
1171 DEVNAME(wu->swu_dis->sd_sc), wu);
1173 if (wu->swu_xs->flags & SCSI_DATA_OUT) {
1174 crwu = sr_crypto_wu_get(wu, 1);
1175 if (crwu == NULL)
1176 return (1);
1177 crwu->cr_crp->crp_callback = sr_crypto_write;
1178 s = splvm();
1179 if (crypto_invoke(crwu->cr_crp))
1180 rv = 1;
1181 else
1182 rv = crwu->cr_crp->crp_etype;
1183 splx(s);
1184 } else
1185 rv = sr_crypto_rw2(wu, NULL);
1187 return (rv);
1190 int
1191 sr_crypto_write(struct cryptop *crp)
1193 struct sr_crypto_wu *crwu = crp->crp_opaque;
1194 struct sr_workunit *wu = crwu->cr_wu;
1195 int s;
1197 DNPRINTF(SR_D_INTR, "%s: sr_crypto_write: wu %x xs: %x\n",
1198 DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs);
1200 if (crp->crp_etype) {
1201 /* fail io */
1202 wu->swu_xs->error = XS_DRIVER_STUFFUP;
1203 s = splbio();
1204 sr_crypto_finish_io(wu);
1205 splx(s);
1208 return (sr_crypto_rw2(wu, crwu));
1211 int
1212 sr_crypto_rw2(struct sr_workunit *wu, struct sr_crypto_wu *crwu)
1214 struct sr_discipline *sd = wu->swu_dis;
1215 struct scsi_xfer *xs = wu->swu_xs;
1216 struct sr_ccb *ccb;
1217 struct uio *uio;
1218 int s;
1219 daddr64_t blk;
1221 if (sr_validate_io(wu, &blk, "sr_crypto_rw2"))
1222 goto bad;
1224 blk += sd->sd_meta->ssd_data_offset;
1226 ccb = sr_ccb_rw(sd, 0, blk, xs->datalen, xs->data, xs->flags, 0);
1227 if (!ccb) {
1228 /* should never happen but handle more gracefully */
1229 printf("%s: %s: too many ccbs queued\n",
1230 DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname);
1231 goto bad;
1233 if (!ISSET(xs->flags, SCSI_DATA_IN)) {
1234 uio = crwu->cr_crp->crp_buf;
1235 ccb->ccb_buf.b_data = uio->uio_iov->iov_base;
1236 ccb->ccb_opaque = crwu;
1238 sr_wu_enqueue_ccb(wu, ccb);
1240 s = splbio();
1242 if (sr_check_io_collision(wu))
1243 goto queued;
1245 sr_raid_startwu(wu);
1247 queued:
1248 splx(s);
1249 return (0);
1250 bad:
1251 /* wu is unwound by sr_wu_put */
1252 if (crwu)
1253 crwu->cr_crp->crp_etype = EINVAL;
1254 return (1);
1257 void
1258 sr_crypto_done(struct sr_workunit *wu)
1260 struct scsi_xfer *xs = wu->swu_xs;
1261 struct sr_crypto_wu *crwu;
1262 struct sr_ccb *ccb;
1263 int s;
1265 /* If this was a successful read, initiate decryption of the data. */
1266 if (ISSET(xs->flags, SCSI_DATA_IN) && xs->error == XS_NOERROR) {
1267 /* only fails on implementation error */
1268 crwu = sr_crypto_wu_get(wu, 0);
1269 if (crwu == NULL)
1270 panic("sr_crypto_intr: no wu");
1271 crwu->cr_crp->crp_callback = sr_crypto_read;
1272 ccb = TAILQ_FIRST(&wu->swu_ccb);
1273 if (ccb == NULL)
1274 panic("sr_crypto_done: no ccbs on workunit");
1275 ccb->ccb_opaque = crwu;
1276 DNPRINTF(SR_D_INTR, "%s: sr_crypto_intr: crypto_invoke %p\n",
1277 DEVNAME(wu->swu_dis->sd_sc), crwu->cr_crp);
1278 s = splvm();
1279 crypto_invoke(crwu->cr_crp);
1280 splx(s);
1281 return;
1284 s = splbio();
1285 sr_crypto_finish_io(wu);
1286 splx(s);
1289 void
1290 sr_crypto_finish_io(struct sr_workunit *wu)
1292 struct sr_discipline *sd = wu->swu_dis;
1293 struct scsi_xfer *xs = wu->swu_xs;
1294 struct sr_ccb *ccb;
1295 #ifdef SR_DEBUG
1296 struct sr_softc *sc = sd->sd_sc;
1297 #endif /* SR_DEBUG */
1299 splassert(IPL_BIO);
1301 DNPRINTF(SR_D_INTR, "%s: sr_crypto_finish_io: wu %x xs: %x\n",
1302 DEVNAME(sc), wu, xs);
1304 if (wu->swu_cb_active == 1)
1305 panic("%s: sr_crypto_finish_io", DEVNAME(sd->sd_sc));
1306 TAILQ_FOREACH(ccb, &wu->swu_ccb, ccb_link) {
1307 if (ccb->ccb_opaque == NULL)
1308 continue;
1309 sr_crypto_wu_put(ccb->ccb_opaque);
1312 sr_scsi_done(sd, xs);
1315 int
1316 sr_crypto_read(struct cryptop *crp)
1318 struct sr_crypto_wu *crwu = crp->crp_opaque;
1319 struct sr_workunit *wu = crwu->cr_wu;
1320 int s;
1322 DNPRINTF(SR_D_INTR, "%s: sr_crypto_read: wu %x xs: %x\n",
1323 DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs);
1325 if (crp->crp_etype)
1326 wu->swu_xs->error = XS_DRIVER_STUFFUP;
1328 s = splbio();
1329 sr_crypto_finish_io(wu);
1330 splx(s);
1332 return (0);
1335 void
1336 sr_crypto_hotplug(struct sr_discipline *sd, struct disk *diskp, int action)
1338 DNPRINTF(SR_D_MISC, "%s: sr_crypto_hotplug: %s %d\n",
1339 DEVNAME(sd->sd_sc), diskp->dk_name, action);
1342 #ifdef SR_DEBUG0
1343 void
1344 sr_crypto_dumpkeys(struct sr_discipline *sd)
1346 int i, j;
1348 printf("sr_crypto_dumpkeys:\n");
1349 for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) {
1350 printf("\tscm_key[%d]: 0x", i);
1351 for (j = 0; j < SR_CRYPTO_KEYBYTES; j++) {
1352 printf("%02x",
1353 sd->mds.mdd_crypto.scr_meta->scm_key[i][j]);
1355 printf("\n");
1357 printf("sr_crypto_dumpkeys: runtime data keys:\n");
1358 for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) {
1359 printf("\tscr_key[%d]: 0x", i);
1360 for (j = 0; j < SR_CRYPTO_KEYBYTES; j++) {
1361 printf("%02x",
1362 sd->mds.mdd_crypto.scr_key[i][j]);
1364 printf("\n");
1367 #endif /* SR_DEBUG */