Blob


1 /* $OpenBSD: softraid_crypto.c,v 1.139 2020/07/13 00:06:22 kn Exp $ */
2 /*
3 * Copyright (c) 2007 Marco Peereboom <marco@peereboom.us>
4 * Copyright (c) 2008 Hans-Joerg Hoexer <hshoexer@openbsd.org>
5 * Copyright (c) 2008 Damien Miller <djm@mindrot.org>
6 * Copyright (c) 2009 Joel Sing <jsing@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
21 #include "bio.h"
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/buf.h>
26 #include <sys/device.h>
27 #include <sys/ioctl.h>
28 #include <sys/malloc.h>
29 #include <sys/pool.h>
30 #include <sys/kernel.h>
31 #include <sys/disk.h>
32 #include <sys/rwlock.h>
33 #include <sys/queue.h>
34 #include <sys/fcntl.h>
35 #include <sys/disklabel.h>
36 #include <sys/vnode.h>
37 #include <sys/mount.h>
38 #include <sys/sensors.h>
39 #include <sys/stat.h>
40 #include <sys/conf.h>
41 #include <sys/uio.h>
42 #include <sys/dkio.h>
44 #include <crypto/cryptodev.h>
45 #include <crypto/rijndael.h>
46 #include <crypto/md5.h>
47 #include <crypto/sha1.h>
48 #include <crypto/sha2.h>
49 #include <crypto/hmac.h>
51 #include <scsi/scsi_all.h>
52 #include <scsi/scsiconf.h>
53 #include <scsi/scsi_disk.h>
55 #include <dev/softraidvar.h>
57 struct sr_crypto_wu *sr_crypto_prepare(struct sr_workunit *, int);
58 int sr_crypto_create_keys(struct sr_discipline *);
59 int sr_crypto_get_kdf(struct bioc_createraid *,
60 struct sr_discipline *);
61 int sr_crypto_decrypt(u_char *, u_char *, u_char *, size_t, int);
62 int sr_crypto_encrypt(u_char *, u_char *, u_char *, size_t, int);
63 int sr_crypto_decrypt_key(struct sr_discipline *);
64 int sr_crypto_change_maskkey(struct sr_discipline *,
65 struct sr_crypto_kdfinfo *, struct sr_crypto_kdfinfo *);
66 int sr_crypto_create(struct sr_discipline *,
67 struct bioc_createraid *, int, int64_t);
68 int sr_crypto_assemble(struct sr_discipline *,
69 struct bioc_createraid *, int, void *);
70 int sr_crypto_alloc_resources(struct sr_discipline *);
71 void sr_crypto_free_resources(struct sr_discipline *);
72 int sr_crypto_ioctl(struct sr_discipline *,
73 struct bioc_discipline *);
74 int sr_crypto_meta_opt_handler(struct sr_discipline *,
75 struct sr_meta_opt_hdr *);
76 void sr_crypto_write(struct cryptop *);
77 int sr_crypto_rw(struct sr_workunit *);
78 int sr_crypto_dev_rw(struct sr_workunit *, struct sr_crypto_wu *);
79 void sr_crypto_done(struct sr_workunit *);
80 void sr_crypto_read(struct cryptop *);
81 void sr_crypto_calculate_check_hmac_sha1(u_int8_t *, int,
82 u_int8_t *, int, u_char *);
83 void sr_crypto_hotplug(struct sr_discipline *, struct disk *, int);
85 #ifdef SR_DEBUG0
86 void sr_crypto_dumpkeys(struct sr_discipline *);
87 #endif
89 /* Discipline initialisation. */
90 void
91 sr_crypto_discipline_init(struct sr_discipline *sd)
92 {
93 int i;
95 /* Fill out discipline members. */
96 sd->sd_wu_size = sizeof(struct sr_crypto_wu);
97 sd->sd_type = SR_MD_CRYPTO;
98 strlcpy(sd->sd_name, "CRYPTO", sizeof(sd->sd_name));
99 sd->sd_capabilities = SR_CAP_SYSTEM_DISK | SR_CAP_AUTO_ASSEMBLE;
100 sd->sd_max_wu = SR_CRYPTO_NOWU;
102 for (i = 0; i < SR_CRYPTO_MAXKEYS; i++)
103 sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
105 /* Setup discipline specific function pointers. */
106 sd->sd_alloc_resources = sr_crypto_alloc_resources;
107 sd->sd_assemble = sr_crypto_assemble;
108 sd->sd_create = sr_crypto_create;
109 sd->sd_free_resources = sr_crypto_free_resources;
110 sd->sd_ioctl_handler = sr_crypto_ioctl;
111 sd->sd_meta_opt_handler = sr_crypto_meta_opt_handler;
112 sd->sd_scsi_rw = sr_crypto_rw;
113 sd->sd_scsi_done = sr_crypto_done;
116 int
117 sr_crypto_create(struct sr_discipline *sd, struct bioc_createraid *bc,
118 int no_chunk, int64_t coerced_size)
120 struct sr_meta_opt_item *omi;
121 int rv = EINVAL;
123 if (no_chunk != 1) {
124 sr_error(sd->sd_sc, "%s requires exactly one chunk",
125 sd->sd_name);
126 goto done;
129 if (coerced_size > SR_CRYPTO_MAXSIZE) {
130 sr_error(sd->sd_sc, "%s exceeds maximum size (%lli > %llu)",
131 sd->sd_name, coerced_size, SR_CRYPTO_MAXSIZE);
132 goto done;
135 /* Create crypto optional metadata. */
136 omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF,
137 M_WAITOK | M_ZERO);
138 omi->omi_som = malloc(sizeof(struct sr_meta_crypto), M_DEVBUF,
139 M_WAITOK | M_ZERO);
140 omi->omi_som->som_type = SR_OPT_CRYPTO;
141 omi->omi_som->som_length = sizeof(struct sr_meta_crypto);
142 SLIST_INSERT_HEAD(&sd->sd_meta_opt, omi, omi_link);
143 sd->mds.mdd_crypto.scr_meta = (struct sr_meta_crypto *)omi->omi_som;
144 sd->sd_meta->ssdi.ssd_opt_no++;
146 sd->mds.mdd_crypto.key_disk = NULL;
148 if (bc->bc_key_disk != NODEV) {
150 /* Create a key disk. */
151 if (sr_crypto_get_kdf(bc, sd))
152 goto done;
153 sd->mds.mdd_crypto.key_disk =
154 sr_crypto_create_key_disk(sd, bc->bc_key_disk);
155 if (sd->mds.mdd_crypto.key_disk == NULL)
156 goto done;
157 sd->sd_capabilities |= SR_CAP_AUTO_ASSEMBLE;
159 } else if (bc->bc_opaque_flags & BIOC_SOOUT) {
161 /* No hint available yet. */
162 bc->bc_opaque_status = BIOC_SOINOUT_FAILED;
163 rv = EAGAIN;
164 goto done;
166 } else if (sr_crypto_get_kdf(bc, sd))
167 goto done;
169 /* Passphrase volumes cannot be automatically assembled. */
170 if (!(bc->bc_flags & BIOC_SCNOAUTOASSEMBLE) && bc->bc_key_disk == NODEV)
171 goto done;
173 sd->sd_meta->ssdi.ssd_size = coerced_size;
175 sr_crypto_create_keys(sd);
177 sd->sd_max_ccb_per_wu = no_chunk;
179 rv = 0;
180 done:
181 return (rv);
184 int
185 sr_crypto_assemble(struct sr_discipline *sd, struct bioc_createraid *bc,
186 int no_chunk, void *data)
188 int rv = EINVAL;
190 sd->mds.mdd_crypto.key_disk = NULL;
192 /* Crypto optional metadata must already exist... */
193 if (sd->mds.mdd_crypto.scr_meta == NULL)
194 goto done;
196 if (data != NULL) {
197 /* Kernel already has mask key. */
198 memcpy(sd->mds.mdd_crypto.scr_maskkey, data,
199 sizeof(sd->mds.mdd_crypto.scr_maskkey));
200 } else if (bc->bc_key_disk != NODEV) {
201 /* Read the mask key from the key disk. */
202 sd->mds.mdd_crypto.key_disk =
203 sr_crypto_read_key_disk(sd, bc->bc_key_disk);
204 if (sd->mds.mdd_crypto.key_disk == NULL)
205 goto done;
206 } else if (bc->bc_opaque_flags & BIOC_SOOUT) {
207 /* provide userland with kdf hint */
208 if (bc->bc_opaque == NULL)
209 goto done;
211 if (sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint) <
212 bc->bc_opaque_size)
213 goto done;
215 if (copyout(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
216 bc->bc_opaque, bc->bc_opaque_size))
217 goto done;
219 /* we're done */
220 bc->bc_opaque_status = BIOC_SOINOUT_OK;
221 rv = EAGAIN;
222 goto done;
223 } else if (bc->bc_opaque_flags & BIOC_SOIN) {
224 /* get kdf with maskkey from userland */
225 if (sr_crypto_get_kdf(bc, sd))
226 goto done;
227 } else
228 goto done;
230 sd->sd_max_ccb_per_wu = sd->sd_meta->ssdi.ssd_chunk_no;
232 rv = 0;
233 done:
234 return (rv);
237 struct sr_crypto_wu *
238 sr_crypto_prepare(struct sr_workunit *wu, int encrypt)
240 struct scsi_xfer *xs = wu->swu_xs;
241 struct sr_discipline *sd = wu->swu_dis;
242 struct sr_crypto_wu *crwu;
243 struct cryptodesc *crd;
244 int flags, i, n;
245 daddr_t blkno;
246 u_int keyndx;
248 DNPRINTF(SR_D_DIS, "%s: sr_crypto_prepare wu %p encrypt %d\n",
249 DEVNAME(sd->sd_sc), wu, encrypt);
251 crwu = (struct sr_crypto_wu *)wu;
252 crwu->cr_uio.uio_iovcnt = 1;
253 crwu->cr_uio.uio_iov->iov_len = xs->datalen;
254 if (xs->flags & SCSI_DATA_OUT) {
255 crwu->cr_uio.uio_iov->iov_base = crwu->cr_dmabuf;
256 memcpy(crwu->cr_uio.uio_iov->iov_base, xs->data, xs->datalen);
257 } else
258 crwu->cr_uio.uio_iov->iov_base = xs->data;
260 blkno = wu->swu_blk_start;
261 n = xs->datalen >> DEV_BSHIFT;
263 /*
264 * We preallocated enough crypto descs for up to MAXPHYS of I/O.
265 * Since there may be less than that we need to tweak the amount
266 * of crypto desc structures to be just long enough for our needs.
267 */
268 KASSERT(crwu->cr_crp->crp_ndescalloc >= n);
269 crwu->cr_crp->crp_ndesc = n;
270 flags = (encrypt ? CRD_F_ENCRYPT : 0) |
271 CRD_F_IV_PRESENT | CRD_F_IV_EXPLICIT;
273 /*
274 * Select crypto session based on block number.
276 * XXX - this does not handle the case where the read/write spans
277 * across a different key blocks (e.g. 0.5TB boundary). Currently
278 * this is already broken by the use of scr_key[0] below.
279 */
280 keyndx = blkno >> SR_CRYPTO_KEY_BLKSHIFT;
281 crwu->cr_crp->crp_sid = sd->mds.mdd_crypto.scr_sid[keyndx];
283 crwu->cr_crp->crp_opaque = crwu;
284 crwu->cr_crp->crp_ilen = xs->datalen;
285 crwu->cr_crp->crp_alloctype = M_DEVBUF;
286 crwu->cr_crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_NOQUEUE;
287 crwu->cr_crp->crp_buf = &crwu->cr_uio;
288 for (i = 0; i < crwu->cr_crp->crp_ndesc; i++, blkno++) {
289 crd = &crwu->cr_crp->crp_desc[i];
290 crd->crd_skip = i << DEV_BSHIFT;
291 crd->crd_len = DEV_BSIZE;
292 crd->crd_inject = 0;
293 crd->crd_flags = flags;
294 crd->crd_alg = sd->mds.mdd_crypto.scr_alg;
295 crd->crd_klen = sd->mds.mdd_crypto.scr_klen;
296 crd->crd_key = sd->mds.mdd_crypto.scr_key[0];
297 memcpy(crd->crd_iv, &blkno, sizeof(blkno));
300 return (crwu);
303 int
304 sr_crypto_get_kdf(struct bioc_createraid *bc, struct sr_discipline *sd)
306 int rv = EINVAL;
307 struct sr_crypto_kdfinfo *kdfinfo;
309 if (!(bc->bc_opaque_flags & BIOC_SOIN))
310 return (rv);
311 if (bc->bc_opaque == NULL)
312 return (rv);
313 if (bc->bc_opaque_size != sizeof(*kdfinfo))
314 return (rv);
316 kdfinfo = malloc(bc->bc_opaque_size, M_DEVBUF, M_WAITOK | M_ZERO);
317 if (copyin(bc->bc_opaque, kdfinfo, bc->bc_opaque_size))
318 goto out;
320 if (kdfinfo->len != bc->bc_opaque_size)
321 goto out;
323 /* copy KDF hint to disk meta data */
324 if (kdfinfo->flags & SR_CRYPTOKDF_HINT) {
325 if (sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint) <
326 kdfinfo->genkdf.len)
327 goto out;
328 memcpy(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
329 &kdfinfo->genkdf, kdfinfo->genkdf.len);
332 /* copy mask key to run-time meta data */
333 if ((kdfinfo->flags & SR_CRYPTOKDF_KEY)) {
334 if (sizeof(sd->mds.mdd_crypto.scr_maskkey) <
335 sizeof(kdfinfo->maskkey))
336 goto out;
337 memcpy(sd->mds.mdd_crypto.scr_maskkey, &kdfinfo->maskkey,
338 sizeof(kdfinfo->maskkey));
341 bc->bc_opaque_status = BIOC_SOINOUT_OK;
342 rv = 0;
343 out:
344 explicit_bzero(kdfinfo, bc->bc_opaque_size);
345 free(kdfinfo, M_DEVBUF, bc->bc_opaque_size);
347 return (rv);
350 int
351 sr_crypto_encrypt(u_char *p, u_char *c, u_char *key, size_t size, int alg)
353 rijndael_ctx ctx;
354 int i, rv = 1;
356 switch (alg) {
357 case SR_CRYPTOM_AES_ECB_256:
358 if (rijndael_set_key_enc_only(&ctx, key, 256) != 0)
359 goto out;
360 for (i = 0; i < size; i += RIJNDAEL128_BLOCK_LEN)
361 rijndael_encrypt(&ctx, &p[i], &c[i]);
362 rv = 0;
363 break;
364 default:
365 DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %d\n",
366 "softraid", alg);
367 rv = -1;
368 goto out;
371 out:
372 explicit_bzero(&ctx, sizeof(ctx));
373 return (rv);
376 int
377 sr_crypto_decrypt(u_char *c, u_char *p, u_char *key, size_t size, int alg)
379 rijndael_ctx ctx;
380 int i, rv = 1;
382 switch (alg) {
383 case SR_CRYPTOM_AES_ECB_256:
384 if (rijndael_set_key(&ctx, key, 256) != 0)
385 goto out;
386 for (i = 0; i < size; i += RIJNDAEL128_BLOCK_LEN)
387 rijndael_decrypt(&ctx, &c[i], &p[i]);
388 rv = 0;
389 break;
390 default:
391 DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %d\n",
392 "softraid", alg);
393 rv = -1;
394 goto out;
397 out:
398 explicit_bzero(&ctx, sizeof(ctx));
399 return (rv);
402 void
403 sr_crypto_calculate_check_hmac_sha1(u_int8_t *maskkey, int maskkey_size,
404 u_int8_t *key, int key_size, u_char *check_digest)
406 u_char check_key[SHA1_DIGEST_LENGTH];
407 HMAC_SHA1_CTX hmacctx;
408 SHA1_CTX shactx;
410 bzero(check_key, sizeof(check_key));
411 bzero(&hmacctx, sizeof(hmacctx));
412 bzero(&shactx, sizeof(shactx));
414 /* k = SHA1(mask_key) */
415 SHA1Init(&shactx);
416 SHA1Update(&shactx, maskkey, maskkey_size);
417 SHA1Final(check_key, &shactx);
419 /* mac = HMAC_SHA1_k(unencrypted key) */
420 HMAC_SHA1_Init(&hmacctx, check_key, sizeof(check_key));
421 HMAC_SHA1_Update(&hmacctx, key, key_size);
422 HMAC_SHA1_Final(check_digest, &hmacctx);
424 explicit_bzero(check_key, sizeof(check_key));
425 explicit_bzero(&hmacctx, sizeof(hmacctx));
426 explicit_bzero(&shactx, sizeof(shactx));
429 int
430 sr_crypto_decrypt_key(struct sr_discipline *sd)
432 u_char check_digest[SHA1_DIGEST_LENGTH];
433 int rv = 1;
435 DNPRINTF(SR_D_DIS, "%s: sr_crypto_decrypt_key\n", DEVNAME(sd->sd_sc));
437 if (sd->mds.mdd_crypto.scr_meta->scm_check_alg != SR_CRYPTOC_HMAC_SHA1)
438 goto out;
440 if (sr_crypto_decrypt((u_char *)sd->mds.mdd_crypto.scr_meta->scm_key,
441 (u_char *)sd->mds.mdd_crypto.scr_key,
442 sd->mds.mdd_crypto.scr_maskkey, sizeof(sd->mds.mdd_crypto.scr_key),
443 sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1)
444 goto out;
446 #ifdef SR_DEBUG0
447 sr_crypto_dumpkeys(sd);
448 #endif
450 /* Check that the key decrypted properly. */
451 sr_crypto_calculate_check_hmac_sha1(sd->mds.mdd_crypto.scr_maskkey,
452 sizeof(sd->mds.mdd_crypto.scr_maskkey),
453 (u_int8_t *)sd->mds.mdd_crypto.scr_key,
454 sizeof(sd->mds.mdd_crypto.scr_key),
455 check_digest);
456 if (memcmp(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac,
457 check_digest, sizeof(check_digest)) != 0) {
458 explicit_bzero(sd->mds.mdd_crypto.scr_key,
459 sizeof(sd->mds.mdd_crypto.scr_key));
460 goto out;
463 rv = 0; /* Success */
464 out:
465 /* we don't need the mask key anymore */
466 explicit_bzero(&sd->mds.mdd_crypto.scr_maskkey,
467 sizeof(sd->mds.mdd_crypto.scr_maskkey));
469 explicit_bzero(check_digest, sizeof(check_digest));
471 return rv;
474 int
475 sr_crypto_create_keys(struct sr_discipline *sd)
478 DNPRINTF(SR_D_DIS, "%s: sr_crypto_create_keys\n",
479 DEVNAME(sd->sd_sc));
481 if (AES_MAXKEYBYTES < sizeof(sd->mds.mdd_crypto.scr_maskkey))
482 return (1);
484 /* XXX allow user to specify */
485 sd->mds.mdd_crypto.scr_meta->scm_alg = SR_CRYPTOA_AES_XTS_256;
487 /* generate crypto keys */
488 arc4random_buf(sd->mds.mdd_crypto.scr_key,
489 sizeof(sd->mds.mdd_crypto.scr_key));
491 /* Mask the disk keys. */
492 sd->mds.mdd_crypto.scr_meta->scm_mask_alg = SR_CRYPTOM_AES_ECB_256;
493 sr_crypto_encrypt((u_char *)sd->mds.mdd_crypto.scr_key,
494 (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key,
495 sd->mds.mdd_crypto.scr_maskkey, sizeof(sd->mds.mdd_crypto.scr_key),
496 sd->mds.mdd_crypto.scr_meta->scm_mask_alg);
498 /* Prepare key decryption check code. */
499 sd->mds.mdd_crypto.scr_meta->scm_check_alg = SR_CRYPTOC_HMAC_SHA1;
500 sr_crypto_calculate_check_hmac_sha1(sd->mds.mdd_crypto.scr_maskkey,
501 sizeof(sd->mds.mdd_crypto.scr_maskkey),
502 (u_int8_t *)sd->mds.mdd_crypto.scr_key,
503 sizeof(sd->mds.mdd_crypto.scr_key),
504 sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac);
506 /* Erase the plaintext disk keys */
507 explicit_bzero(sd->mds.mdd_crypto.scr_key,
508 sizeof(sd->mds.mdd_crypto.scr_key));
510 #ifdef SR_DEBUG0
511 sr_crypto_dumpkeys(sd);
512 #endif
514 sd->mds.mdd_crypto.scr_meta->scm_flags = SR_CRYPTOF_KEY |
515 SR_CRYPTOF_KDFHINT;
517 return (0);
520 int
521 sr_crypto_change_maskkey(struct sr_discipline *sd,
522 struct sr_crypto_kdfinfo *kdfinfo1, struct sr_crypto_kdfinfo *kdfinfo2)
524 u_char check_digest[SHA1_DIGEST_LENGTH];
525 u_char *c, *p = NULL;
526 size_t ksz;
527 int rv = 1;
529 DNPRINTF(SR_D_DIS, "%s: sr_crypto_change_maskkey\n",
530 DEVNAME(sd->sd_sc));
532 if (sd->mds.mdd_crypto.scr_meta->scm_check_alg != SR_CRYPTOC_HMAC_SHA1)
533 goto out;
535 c = (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key;
536 ksz = sizeof(sd->mds.mdd_crypto.scr_key);
537 p = malloc(ksz, M_DEVBUF, M_WAITOK | M_CANFAIL | M_ZERO);
538 if (p == NULL)
539 goto out;
541 if (sr_crypto_decrypt(c, p, kdfinfo1->maskkey, ksz,
542 sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1)
543 goto out;
545 #ifdef SR_DEBUG0
546 sr_crypto_dumpkeys(sd);
547 #endif
549 sr_crypto_calculate_check_hmac_sha1(kdfinfo1->maskkey,
550 sizeof(kdfinfo1->maskkey), p, ksz, check_digest);
551 if (memcmp(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac,
552 check_digest, sizeof(check_digest)) != 0) {
553 sr_error(sd->sd_sc, "incorrect key or passphrase");
554 rv = EPERM;
555 goto out;
558 /* Copy new KDF hint to metadata, if supplied. */
559 if (kdfinfo2->flags & SR_CRYPTOKDF_HINT) {
560 if (kdfinfo2->genkdf.len >
561 sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint))
562 goto out;
563 explicit_bzero(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
564 sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint));
565 memcpy(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
566 &kdfinfo2->genkdf, kdfinfo2->genkdf.len);
569 /* Mask the disk keys. */
570 c = (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key;
571 if (sr_crypto_encrypt(p, c, kdfinfo2->maskkey, ksz,
572 sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1)
573 goto out;
575 /* Prepare key decryption check code. */
576 sd->mds.mdd_crypto.scr_meta->scm_check_alg = SR_CRYPTOC_HMAC_SHA1;
577 sr_crypto_calculate_check_hmac_sha1(kdfinfo2->maskkey,
578 sizeof(kdfinfo2->maskkey), (u_int8_t *)sd->mds.mdd_crypto.scr_key,
579 sizeof(sd->mds.mdd_crypto.scr_key), check_digest);
581 /* Copy new encrypted key and HMAC to metadata. */
582 memcpy(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac, check_digest,
583 sizeof(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac));
585 rv = 0; /* Success */
587 out:
588 if (p) {
589 explicit_bzero(p, ksz);
590 free(p, M_DEVBUF, ksz);
593 explicit_bzero(check_digest, sizeof(check_digest));
594 explicit_bzero(&kdfinfo1->maskkey, sizeof(kdfinfo1->maskkey));
595 explicit_bzero(&kdfinfo2->maskkey, sizeof(kdfinfo2->maskkey));
597 return (rv);
600 struct sr_chunk *
601 sr_crypto_create_key_disk(struct sr_discipline *sd, dev_t dev)
603 struct sr_softc *sc = sd->sd_sc;
604 struct sr_discipline *fakesd = NULL;
605 struct sr_metadata *sm = NULL;
606 struct sr_meta_chunk *km;
607 struct sr_meta_opt_item *omi = NULL;
608 struct sr_meta_keydisk *skm;
609 struct sr_chunk *key_disk = NULL;
610 struct disklabel label;
611 struct vnode *vn;
612 char devname[32];
613 int c, part, open = 0;
615 /*
616 * Create a metadata structure on the key disk and store
617 * keying material in the optional metadata.
618 */
620 sr_meta_getdevname(sc, dev, devname, sizeof(devname));
622 /* Make sure chunk is not already in use. */
623 c = sr_chunk_in_use(sc, dev);
624 if (c != BIOC_SDINVALID && c != BIOC_SDOFFLINE) {
625 sr_error(sc, "%s is already in use", devname);
626 goto done;
629 /* Open device. */
630 if (bdevvp(dev, &vn)) {
631 sr_error(sc, "cannot open key disk %s", devname);
632 goto done;
634 if (VOP_OPEN(vn, FREAD | FWRITE, NOCRED, curproc)) {
635 DNPRINTF(SR_D_META,"%s: sr_crypto_create_key_disk cannot "
636 "open %s\n", DEVNAME(sc), devname);
637 vput(vn);
638 goto done;
640 open = 1; /* close dev on error */
642 /* Get partition details. */
643 part = DISKPART(dev);
644 if (VOP_IOCTL(vn, DIOCGDINFO, (caddr_t)&label,
645 FREAD, NOCRED, curproc)) {
646 DNPRINTF(SR_D_META, "%s: sr_crypto_create_key_disk ioctl "
647 "failed\n", DEVNAME(sc));
648 goto done;
650 if (label.d_partitions[part].p_fstype != FS_RAID) {
651 sr_error(sc, "%s partition not of type RAID (%d)",
652 devname, label.d_partitions[part].p_fstype);
653 goto done;
656 /*
657 * Create and populate chunk metadata.
658 */
660 key_disk = malloc(sizeof(struct sr_chunk), M_DEVBUF, M_WAITOK | M_ZERO);
661 km = &key_disk->src_meta;
663 key_disk->src_dev_mm = dev;
664 key_disk->src_vn = vn;
665 strlcpy(key_disk->src_devname, devname, sizeof(km->scmi.scm_devname));
666 key_disk->src_size = 0;
668 km->scmi.scm_volid = sd->sd_meta->ssdi.ssd_level;
669 km->scmi.scm_chunk_id = 0;
670 km->scmi.scm_size = 0;
671 km->scmi.scm_coerced_size = 0;
672 strlcpy(km->scmi.scm_devname, devname, sizeof(km->scmi.scm_devname));
673 memcpy(&km->scmi.scm_uuid, &sd->sd_meta->ssdi.ssd_uuid,
674 sizeof(struct sr_uuid));
676 sr_checksum(sc, km, &km->scm_checksum,
677 sizeof(struct sr_meta_chunk_invariant));
679 km->scm_status = BIOC_SDONLINE;
681 /*
682 * Create and populate our own discipline and metadata.
683 */
685 sm = malloc(sizeof(struct sr_metadata), M_DEVBUF, M_WAITOK | M_ZERO);
686 sm->ssdi.ssd_magic = SR_MAGIC;
687 sm->ssdi.ssd_version = SR_META_VERSION;
688 sm->ssd_ondisk = 0;
689 sm->ssdi.ssd_vol_flags = 0;
690 memcpy(&sm->ssdi.ssd_uuid, &sd->sd_meta->ssdi.ssd_uuid,
691 sizeof(struct sr_uuid));
692 sm->ssdi.ssd_chunk_no = 1;
693 sm->ssdi.ssd_volid = SR_KEYDISK_VOLID;
694 sm->ssdi.ssd_level = SR_KEYDISK_LEVEL;
695 sm->ssdi.ssd_size = 0;
696 strlcpy(sm->ssdi.ssd_vendor, "OPENBSD", sizeof(sm->ssdi.ssd_vendor));
697 snprintf(sm->ssdi.ssd_product, sizeof(sm->ssdi.ssd_product),
698 "SR %s", "KEYDISK");
699 snprintf(sm->ssdi.ssd_revision, sizeof(sm->ssdi.ssd_revision),
700 "%03d", SR_META_VERSION);
702 fakesd = malloc(sizeof(struct sr_discipline), M_DEVBUF,
703 M_WAITOK | M_ZERO);
704 fakesd->sd_sc = sd->sd_sc;
705 fakesd->sd_meta = sm;
706 fakesd->sd_meta_type = SR_META_F_NATIVE;
707 fakesd->sd_vol_status = BIOC_SVONLINE;
708 strlcpy(fakesd->sd_name, "KEYDISK", sizeof(fakesd->sd_name));
709 SLIST_INIT(&fakesd->sd_meta_opt);
711 /* Add chunk to volume. */
712 fakesd->sd_vol.sv_chunks = malloc(sizeof(struct sr_chunk *), M_DEVBUF,
713 M_WAITOK | M_ZERO);
714 fakesd->sd_vol.sv_chunks[0] = key_disk;
715 SLIST_INIT(&fakesd->sd_vol.sv_chunk_list);
716 SLIST_INSERT_HEAD(&fakesd->sd_vol.sv_chunk_list, key_disk, src_link);
718 /* Generate mask key. */
719 arc4random_buf(sd->mds.mdd_crypto.scr_maskkey,
720 sizeof(sd->mds.mdd_crypto.scr_maskkey));
722 /* Copy mask key to optional metadata area. */
723 omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF,
724 M_WAITOK | M_ZERO);
725 omi->omi_som = malloc(sizeof(struct sr_meta_keydisk), M_DEVBUF,
726 M_WAITOK | M_ZERO);
727 omi->omi_som->som_type = SR_OPT_KEYDISK;
728 omi->omi_som->som_length = sizeof(struct sr_meta_keydisk);
729 skm = (struct sr_meta_keydisk *)omi->omi_som;
730 memcpy(&skm->skm_maskkey, sd->mds.mdd_crypto.scr_maskkey,
731 sizeof(skm->skm_maskkey));
732 SLIST_INSERT_HEAD(&fakesd->sd_meta_opt, omi, omi_link);
733 fakesd->sd_meta->ssdi.ssd_opt_no++;
735 /* Save metadata. */
736 if (sr_meta_save(fakesd, SR_META_DIRTY)) {
737 sr_error(sc, "could not save metadata to %s", devname);
738 goto fail;
741 goto done;
743 fail:
744 free(key_disk, M_DEVBUF, sizeof(struct sr_chunk));
745 key_disk = NULL;
747 done:
748 free(omi, M_DEVBUF, sizeof(struct sr_meta_opt_item));
749 if (fakesd && fakesd->sd_vol.sv_chunks)
750 free(fakesd->sd_vol.sv_chunks, M_DEVBUF,
751 sizeof(struct sr_chunk *));
752 free(fakesd, M_DEVBUF, sizeof(struct sr_discipline));
753 free(sm, M_DEVBUF, sizeof(struct sr_metadata));
754 if (open) {
755 VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc);
756 vput(vn);
759 return key_disk;
762 struct sr_chunk *
763 sr_crypto_read_key_disk(struct sr_discipline *sd, dev_t dev)
765 struct sr_softc *sc = sd->sd_sc;
766 struct sr_metadata *sm = NULL;
767 struct sr_meta_opt_item *omi, *omi_next;
768 struct sr_meta_opt_hdr *omh;
769 struct sr_meta_keydisk *skm;
770 struct sr_meta_opt_head som;
771 struct sr_chunk *key_disk = NULL;
772 struct disklabel label;
773 struct vnode *vn = NULL;
774 char devname[32];
775 int c, part, open = 0;
777 /*
778 * Load a key disk and load keying material into memory.
779 */
781 SLIST_INIT(&som);
783 sr_meta_getdevname(sc, dev, devname, sizeof(devname));
785 /* Make sure chunk is not already in use. */
786 c = sr_chunk_in_use(sc, dev);
787 if (c != BIOC_SDINVALID && c != BIOC_SDOFFLINE) {
788 sr_error(sc, "%s is already in use", devname);
789 goto done;
792 /* Open device. */
793 if (bdevvp(dev, &vn)) {
794 sr_error(sc, "cannot open key disk %s", devname);
795 goto done;
797 if (VOP_OPEN(vn, FREAD, NOCRED, curproc)) {
798 DNPRINTF(SR_D_META,"%s: sr_crypto_read_key_disk cannot "
799 "open %s\n", DEVNAME(sc), devname);
800 vput(vn);
801 goto done;
803 open = 1; /* close dev on error */
805 /* Get partition details. */
806 part = DISKPART(dev);
807 if (VOP_IOCTL(vn, DIOCGDINFO, (caddr_t)&label, FREAD,
808 NOCRED, curproc)) {
809 DNPRINTF(SR_D_META, "%s: sr_crypto_read_key_disk ioctl "
810 "failed\n", DEVNAME(sc));
811 goto done;
813 if (label.d_partitions[part].p_fstype != FS_RAID) {
814 sr_error(sc, "%s partition not of type RAID (%d)",
815 devname, label.d_partitions[part].p_fstype);
816 goto done;
819 /*
820 * Read and validate key disk metadata.
821 */
822 sm = malloc(SR_META_SIZE * DEV_BSIZE, M_DEVBUF, M_WAITOK | M_ZERO);
823 if (sr_meta_native_read(sd, dev, sm, NULL)) {
824 sr_error(sc, "native bootprobe could not read native metadata");
825 goto done;
828 if (sr_meta_validate(sd, dev, sm, NULL)) {
829 DNPRINTF(SR_D_META, "%s: invalid metadata\n",
830 DEVNAME(sc));
831 goto done;
834 /* Make sure this is a key disk. */
835 if (sm->ssdi.ssd_level != SR_KEYDISK_LEVEL) {
836 sr_error(sc, "%s is not a key disk", devname);
837 goto done;
840 /* Construct key disk chunk. */
841 key_disk = malloc(sizeof(struct sr_chunk), M_DEVBUF, M_WAITOK | M_ZERO);
842 key_disk->src_dev_mm = dev;
843 key_disk->src_vn = vn;
844 key_disk->src_size = 0;
846 memcpy(&key_disk->src_meta, (struct sr_meta_chunk *)(sm + 1),
847 sizeof(key_disk->src_meta));
849 /* Read mask key from optional metadata. */
850 sr_meta_opt_load(sc, sm, &som);
851 SLIST_FOREACH(omi, &som, omi_link) {
852 omh = omi->omi_som;
853 if (omh->som_type == SR_OPT_KEYDISK) {
854 skm = (struct sr_meta_keydisk *)omh;
855 memcpy(sd->mds.mdd_crypto.scr_maskkey, &skm->skm_maskkey,
856 sizeof(sd->mds.mdd_crypto.scr_maskkey));
857 } else if (omh->som_type == SR_OPT_CRYPTO) {
858 /* Original keydisk format with key in crypto area. */
859 memcpy(sd->mds.mdd_crypto.scr_maskkey,
860 omh + sizeof(struct sr_meta_opt_hdr),
861 sizeof(sd->mds.mdd_crypto.scr_maskkey));
865 open = 0;
867 done:
868 for (omi = SLIST_FIRST(&som); omi != NULL; omi = omi_next) {
869 omi_next = SLIST_NEXT(omi, omi_link);
870 free(omi->omi_som, M_DEVBUF, 0);
871 free(omi, M_DEVBUF, sizeof(struct sr_meta_opt_item));
874 free(sm, M_DEVBUF, SR_META_SIZE * DEV_BSIZE);
876 if (vn && open) {
877 VOP_CLOSE(vn, FREAD, NOCRED, curproc);
878 vput(vn);
881 return key_disk;
884 static void
885 sr_crypto_free_sessions(struct sr_discipline *sd)
887 u_int i;
889 for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) {
890 if (sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1) {
891 crypto_freesession(sd->mds.mdd_crypto.scr_sid[i]);
892 sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
897 int
898 sr_crypto_alloc_resources(struct sr_discipline *sd)
900 struct sr_workunit *wu;
901 struct sr_crypto_wu *crwu;
902 struct cryptoini cri;
903 u_int num_keys, i;
905 DNPRINTF(SR_D_DIS, "%s: sr_crypto_alloc_resources\n",
906 DEVNAME(sd->sd_sc));
908 sd->mds.mdd_crypto.scr_alg = CRYPTO_AES_XTS;
909 switch (sd->mds.mdd_crypto.scr_meta->scm_alg) {
910 case SR_CRYPTOA_AES_XTS_128:
911 sd->mds.mdd_crypto.scr_klen = 256;
912 break;
913 case SR_CRYPTOA_AES_XTS_256:
914 sd->mds.mdd_crypto.scr_klen = 512;
915 break;
916 default:
917 sr_error(sd->sd_sc, "unknown crypto algorithm");
918 return (EINVAL);
921 for (i = 0; i < SR_CRYPTO_MAXKEYS; i++)
922 sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
924 if (sr_wu_alloc(sd)) {
925 sr_error(sd->sd_sc, "unable to allocate work units");
926 return (ENOMEM);
928 if (sr_ccb_alloc(sd)) {
929 sr_error(sd->sd_sc, "unable to allocate CCBs");
930 return (ENOMEM);
932 if (sr_crypto_decrypt_key(sd)) {
933 sr_error(sd->sd_sc, "incorrect key or passphrase");
934 return (EPERM);
937 /*
938 * For each work unit allocate the uio, iovec and crypto structures.
939 * These have to be allocated now because during runtime we cannot
940 * fail an allocation without failing the I/O (which can cause real
941 * problems).
942 */
943 TAILQ_FOREACH(wu, &sd->sd_wu, swu_next) {
944 crwu = (struct sr_crypto_wu *)wu;
945 crwu->cr_uio.uio_iov = &crwu->cr_iov;
946 crwu->cr_dmabuf = dma_alloc(MAXPHYS, PR_WAITOK);
947 crwu->cr_crp = crypto_getreq(MAXPHYS >> DEV_BSHIFT);
948 if (crwu->cr_crp == NULL)
949 return (ENOMEM);
952 memset(&cri, 0, sizeof(cri));
953 cri.cri_alg = sd->mds.mdd_crypto.scr_alg;
954 cri.cri_klen = sd->mds.mdd_crypto.scr_klen;
956 /* Allocate a session for every 2^SR_CRYPTO_KEY_BLKSHIFT blocks. */
957 num_keys = ((sd->sd_meta->ssdi.ssd_size - 1) >>
958 SR_CRYPTO_KEY_BLKSHIFT) + 1;
959 if (num_keys > SR_CRYPTO_MAXKEYS)
960 return (EFBIG);
961 for (i = 0; i < num_keys; i++) {
962 cri.cri_key = sd->mds.mdd_crypto.scr_key[i];
963 if (crypto_newsession(&sd->mds.mdd_crypto.scr_sid[i],
964 &cri, 0) != 0) {
965 sr_crypto_free_sessions(sd);
966 return (EINVAL);
970 sr_hotplug_register(sd, sr_crypto_hotplug);
972 return (0);
975 void
976 sr_crypto_free_resources(struct sr_discipline *sd)
978 struct sr_workunit *wu;
979 struct sr_crypto_wu *crwu;
981 DNPRINTF(SR_D_DIS, "%s: sr_crypto_free_resources\n",
982 DEVNAME(sd->sd_sc));
984 if (sd->mds.mdd_crypto.key_disk != NULL) {
985 explicit_bzero(sd->mds.mdd_crypto.key_disk,
986 sizeof(*sd->mds.mdd_crypto.key_disk));
987 free(sd->mds.mdd_crypto.key_disk, M_DEVBUF,
988 sizeof(*sd->mds.mdd_crypto.key_disk));
991 sr_hotplug_unregister(sd, sr_crypto_hotplug);
993 sr_crypto_free_sessions(sd);
995 TAILQ_FOREACH(wu, &sd->sd_wu, swu_next) {
996 crwu = (struct sr_crypto_wu *)wu;
997 if (crwu->cr_dmabuf)
998 dma_free(crwu->cr_dmabuf, MAXPHYS);
999 if (crwu->cr_crp)
1000 crypto_freereq(crwu->cr_crp);
1003 sr_wu_free(sd);
1004 sr_ccb_free(sd);
1007 int
1008 sr_crypto_ioctl(struct sr_discipline *sd, struct bioc_discipline *bd)
1010 struct sr_crypto_kdfpair kdfpair;
1011 struct sr_crypto_kdfinfo kdfinfo1, kdfinfo2;
1012 int size, rv = 1;
1014 DNPRINTF(SR_D_IOCTL, "%s: sr_crypto_ioctl %u\n",
1015 DEVNAME(sd->sd_sc), bd->bd_cmd);
1017 switch (bd->bd_cmd) {
1018 case SR_IOCTL_GET_KDFHINT:
1020 /* Get KDF hint for userland. */
1021 size = sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint);
1022 if (bd->bd_data == NULL || bd->bd_size > size)
1023 goto bad;
1024 if (copyout(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
1025 bd->bd_data, bd->bd_size))
1026 goto bad;
1028 rv = 0;
1030 break;
1032 case SR_IOCTL_CHANGE_PASSPHRASE:
1034 /* Attempt to change passphrase. */
1036 size = sizeof(kdfpair);
1037 if (bd->bd_data == NULL || bd->bd_size > size)
1038 goto bad;
1039 if (copyin(bd->bd_data, &kdfpair, size))
1040 goto bad;
1042 size = sizeof(kdfinfo1);
1043 if (kdfpair.kdfinfo1 == NULL || kdfpair.kdfsize1 > size)
1044 goto bad;
1045 if (copyin(kdfpair.kdfinfo1, &kdfinfo1, size))
1046 goto bad;
1048 size = sizeof(kdfinfo2);
1049 if (kdfpair.kdfinfo2 == NULL || kdfpair.kdfsize2 > size)
1050 goto bad;
1051 if (copyin(kdfpair.kdfinfo2, &kdfinfo2, size))
1052 goto bad;
1054 if (sr_crypto_change_maskkey(sd, &kdfinfo1, &kdfinfo2))
1055 goto bad;
1057 /* Save metadata to disk. */
1058 rv = sr_meta_save(sd, SR_META_DIRTY);
1060 break;
1063 bad:
1064 explicit_bzero(&kdfpair, sizeof(kdfpair));
1065 explicit_bzero(&kdfinfo1, sizeof(kdfinfo1));
1066 explicit_bzero(&kdfinfo2, sizeof(kdfinfo2));
1068 return (rv);
1071 int
1072 sr_crypto_meta_opt_handler(struct sr_discipline *sd, struct sr_meta_opt_hdr *om)
1074 int rv = EINVAL;
1076 if (om->som_type == SR_OPT_CRYPTO) {
1077 sd->mds.mdd_crypto.scr_meta = (struct sr_meta_crypto *)om;
1078 rv = 0;
1081 return (rv);
1084 int
1085 sr_crypto_rw(struct sr_workunit *wu)
1087 struct sr_crypto_wu *crwu;
1088 daddr_t blkno;
1089 int rv = 0;
1091 DNPRINTF(SR_D_DIS, "%s: sr_crypto_rw wu %p\n",
1092 DEVNAME(wu->swu_dis->sd_sc), wu);
1094 if (sr_validate_io(wu, &blkno, "sr_crypto_rw"))
1095 return (1);
1097 if (wu->swu_xs->flags & SCSI_DATA_OUT) {
1098 crwu = sr_crypto_prepare(wu, 1);
1099 crwu->cr_crp->crp_callback = sr_crypto_write;
1100 rv = crypto_dispatch(crwu->cr_crp);
1101 if (rv == 0)
1102 rv = crwu->cr_crp->crp_etype;
1103 } else
1104 rv = sr_crypto_dev_rw(wu, NULL);
1106 return (rv);
1109 void
1110 sr_crypto_write(struct cryptop *crp)
1112 struct sr_crypto_wu *crwu = crp->crp_opaque;
1113 struct sr_workunit *wu = &crwu->cr_wu;
1114 int s;
1116 DNPRINTF(SR_D_INTR, "%s: sr_crypto_write: wu %p xs: %p\n",
1117 DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs);
1119 if (crp->crp_etype) {
1120 /* fail io */
1121 wu->swu_xs->error = XS_DRIVER_STUFFUP;
1122 s = splbio();
1123 sr_scsi_done(wu->swu_dis, wu->swu_xs);
1124 splx(s);
1127 sr_crypto_dev_rw(wu, crwu);
1130 int
1131 sr_crypto_dev_rw(struct sr_workunit *wu, struct sr_crypto_wu *crwu)
1133 struct sr_discipline *sd = wu->swu_dis;
1134 struct scsi_xfer *xs = wu->swu_xs;
1135 struct sr_ccb *ccb;
1136 struct uio *uio;
1137 daddr_t blkno;
1139 blkno = wu->swu_blk_start;
1141 ccb = sr_ccb_rw(sd, 0, blkno, xs->datalen, xs->data, xs->flags, 0);
1142 if (!ccb) {
1143 /* should never happen but handle more gracefully */
1144 printf("%s: %s: too many ccbs queued\n",
1145 DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname);
1146 goto bad;
1148 if (!ISSET(xs->flags, SCSI_DATA_IN)) {
1149 uio = crwu->cr_crp->crp_buf;
1150 ccb->ccb_buf.b_data = uio->uio_iov->iov_base;
1151 ccb->ccb_opaque = crwu;
1153 sr_wu_enqueue_ccb(wu, ccb);
1154 sr_schedule_wu(wu);
1156 return (0);
1158 bad:
1159 /* wu is unwound by sr_wu_put */
1160 if (crwu)
1161 crwu->cr_crp->crp_etype = EINVAL;
1162 return (1);
1165 void
1166 sr_crypto_done(struct sr_workunit *wu)
1168 struct scsi_xfer *xs = wu->swu_xs;
1169 struct sr_crypto_wu *crwu;
1170 int s;
1172 /* If this was a successful read, initiate decryption of the data. */
1173 if (ISSET(xs->flags, SCSI_DATA_IN) && xs->error == XS_NOERROR) {
1174 crwu = sr_crypto_prepare(wu, 0);
1175 crwu->cr_crp->crp_callback = sr_crypto_read;
1176 DNPRINTF(SR_D_INTR, "%s: sr_crypto_done: crypto_dispatch %p\n",
1177 DEVNAME(wu->swu_dis->sd_sc), crwu->cr_crp);
1178 crypto_dispatch(crwu->cr_crp);
1179 return;
1182 s = splbio();
1183 sr_scsi_done(wu->swu_dis, wu->swu_xs);
1184 splx(s);
1187 void
1188 sr_crypto_read(struct cryptop *crp)
1190 struct sr_crypto_wu *crwu = crp->crp_opaque;
1191 struct sr_workunit *wu = &crwu->cr_wu;
1192 int s;
1194 DNPRINTF(SR_D_INTR, "%s: sr_crypto_read: wu %p xs: %p\n",
1195 DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs);
1197 if (crp->crp_etype)
1198 wu->swu_xs->error = XS_DRIVER_STUFFUP;
1200 s = splbio();
1201 sr_scsi_done(wu->swu_dis, wu->swu_xs);
1202 splx(s);
1205 void
1206 sr_crypto_hotplug(struct sr_discipline *sd, struct disk *diskp, int action)
1208 DNPRINTF(SR_D_MISC, "%s: sr_crypto_hotplug: %s %d\n",
1209 DEVNAME(sd->sd_sc), diskp->dk_name, action);
1212 #ifdef SR_DEBUG0
1213 void
1214 sr_crypto_dumpkeys(struct sr_discipline *sd)
1216 int i, j;
1218 printf("sr_crypto_dumpkeys:\n");
1219 for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) {
1220 printf("\tscm_key[%d]: 0x", i);
1221 for (j = 0; j < SR_CRYPTO_KEYBYTES; j++) {
1222 printf("%02x",
1223 sd->mds.mdd_crypto.scr_meta->scm_key[i][j]);
1225 printf("\n");
1227 printf("sr_crypto_dumpkeys: runtime data keys:\n");
1228 for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) {
1229 printf("\tscr_key[%d]: 0x", i);
1230 for (j = 0; j < SR_CRYPTO_KEYBYTES; j++) {
1231 printf("%02x",
1232 sd->mds.mdd_crypto.scr_key[i][j]);
1234 printf("\n");
1237 #endif /* SR_DEBUG */