Blob


1 /* $OpenBSD: softraid_crypto.c,v 1.104 2014/01/21 04:23:14 jsing Exp $ */
2 /*
3 * Copyright (c) 2007 Marco Peereboom <marco@peereboom.us>
4 * Copyright (c) 2008 Hans-Joerg Hoexer <hshoexer@openbsd.org>
5 * Copyright (c) 2008 Damien Miller <djm@mindrot.org>
6 * Copyright (c) 2009 Joel Sing <jsing@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
21 #include "bio.h"
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/buf.h>
26 #include <sys/device.h>
27 #include <sys/ioctl.h>
28 #include <sys/proc.h>
29 #include <sys/malloc.h>
30 #include <sys/pool.h>
31 #include <sys/kernel.h>
32 #include <sys/disk.h>
33 #include <sys/rwlock.h>
34 #include <sys/queue.h>
35 #include <sys/fcntl.h>
36 #include <sys/disklabel.h>
37 #include <sys/vnode.h>
38 #include <sys/mount.h>
39 #include <sys/sensors.h>
40 #include <sys/stat.h>
41 #include <sys/conf.h>
42 #include <sys/uio.h>
43 #include <sys/dkio.h>
45 #include <crypto/cryptodev.h>
46 #include <crypto/cryptosoft.h>
47 #include <crypto/rijndael.h>
48 #include <crypto/md5.h>
49 #include <crypto/sha1.h>
50 #include <crypto/sha2.h>
51 #include <crypto/hmac.h>
53 #include <scsi/scsi_all.h>
54 #include <scsi/scsiconf.h>
55 #include <scsi/scsi_disk.h>
57 #include <dev/softraidvar.h>
58 #include <dev/rndvar.h>
60 /*
61 * The per-I/O data that we need to preallocate. We cannot afford to allow I/O
62 * to start failing when memory pressure kicks in. We can store this in the WU
63 * because we assert that only one ccb per WU will ever be active.
64 */
65 struct sr_crypto_wu {
66 TAILQ_ENTRY(sr_crypto_wu) cr_link;
67 struct uio cr_uio;
68 struct iovec cr_iov;
69 struct cryptop *cr_crp;
70 struct cryptodesc *cr_descs;
71 struct sr_workunit *cr_wu;
72 void *cr_dmabuf;
73 };
76 struct sr_crypto_wu *sr_crypto_wu_get(struct sr_workunit *, int);
77 void sr_crypto_wu_put(struct sr_crypto_wu *);
78 int sr_crypto_create_keys(struct sr_discipline *);
79 int sr_crypto_get_kdf(struct bioc_createraid *,
80 struct sr_discipline *);
81 int sr_crypto_decrypt(u_char *, u_char *, u_char *, size_t, int);
82 int sr_crypto_encrypt(u_char *, u_char *, u_char *, size_t, int);
83 int sr_crypto_decrypt_key(struct sr_discipline *);
84 int sr_crypto_change_maskkey(struct sr_discipline *,
85 struct sr_crypto_kdfinfo *, struct sr_crypto_kdfinfo *);
86 int sr_crypto_create(struct sr_discipline *,
87 struct bioc_createraid *, int, int64_t);
88 int sr_crypto_assemble(struct sr_discipline *,
89 struct bioc_createraid *, int, void *);
90 int sr_crypto_alloc_resources(struct sr_discipline *);
91 void sr_crypto_free_resources(struct sr_discipline *);
92 int sr_crypto_ioctl(struct sr_discipline *,
93 struct bioc_discipline *);
94 int sr_crypto_meta_opt_handler(struct sr_discipline *,
95 struct sr_meta_opt_hdr *);
96 int sr_crypto_write(struct cryptop *);
97 int sr_crypto_rw(struct sr_workunit *);
98 int sr_crypto_dev_rw(struct sr_workunit *, struct sr_crypto_wu *);
99 void sr_crypto_done(struct sr_workunit *);
100 int sr_crypto_read(struct cryptop *);
101 void sr_crypto_finish_io(struct sr_workunit *);
102 void sr_crypto_calculate_check_hmac_sha1(u_int8_t *, int,
103 u_int8_t *, int, u_char *);
104 void sr_crypto_hotplug(struct sr_discipline *, struct disk *, int);
106 #ifdef SR_DEBUG0
107 void sr_crypto_dumpkeys(struct sr_discipline *);
108 #endif
110 /* Discipline initialisation. */
111 void
112 sr_crypto_discipline_init(struct sr_discipline *sd)
114 int i;
116 /* Fill out discipline members. */
117 sd->sd_type = SR_MD_CRYPTO;
118 strlcpy(sd->sd_name, "CRYPTO", sizeof(sd->sd_name));
119 sd->sd_capabilities = SR_CAP_SYSTEM_DISK | SR_CAP_AUTO_ASSEMBLE;
120 sd->sd_max_wu = SR_CRYPTO_NOWU;
122 for (i = 0; i < SR_CRYPTO_MAXKEYS; i++)
123 sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
125 /* Setup discipline specific function pointers. */
126 sd->sd_alloc_resources = sr_crypto_alloc_resources;
127 sd->sd_assemble = sr_crypto_assemble;
128 sd->sd_create = sr_crypto_create;
129 sd->sd_free_resources = sr_crypto_free_resources;
130 sd->sd_ioctl_handler = sr_crypto_ioctl;
131 sd->sd_meta_opt_handler = sr_crypto_meta_opt_handler;
132 sd->sd_scsi_rw = sr_crypto_rw;
133 sd->sd_scsi_done = sr_crypto_done;
136 int
137 sr_crypto_create(struct sr_discipline *sd, struct bioc_createraid *bc,
138 int no_chunk, int64_t coerced_size)
140 struct sr_meta_opt_item *omi;
141 int rv = EINVAL;
143 if (no_chunk != 1) {
144 sr_error(sd->sd_sc, "%s requires exactly one chunk",
145 sd->sd_name);
146 goto done;
149 /* Create crypto optional metadata. */
150 omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF,
151 M_WAITOK | M_ZERO);
152 omi->omi_som = malloc(sizeof(struct sr_meta_crypto), M_DEVBUF,
153 M_WAITOK | M_ZERO);
154 omi->omi_som->som_type = SR_OPT_CRYPTO;
155 omi->omi_som->som_length = sizeof(struct sr_meta_crypto);
156 SLIST_INSERT_HEAD(&sd->sd_meta_opt, omi, omi_link);
157 sd->mds.mdd_crypto.scr_meta = (struct sr_meta_crypto *)omi->omi_som;
158 sd->sd_meta->ssdi.ssd_opt_no++;
160 sd->mds.mdd_crypto.key_disk = NULL;
162 if (bc->bc_key_disk != NODEV) {
164 /* Create a key disk. */
165 if (sr_crypto_get_kdf(bc, sd))
166 goto done;
167 sd->mds.mdd_crypto.key_disk =
168 sr_crypto_create_key_disk(sd, bc->bc_key_disk);
169 if (sd->mds.mdd_crypto.key_disk == NULL)
170 goto done;
171 sd->sd_capabilities |= SR_CAP_AUTO_ASSEMBLE;
173 } else if (bc->bc_opaque_flags & BIOC_SOOUT) {
175 /* No hint available yet. */
176 bc->bc_opaque_status = BIOC_SOINOUT_FAILED;
177 rv = EAGAIN;
178 goto done;
180 } else if (sr_crypto_get_kdf(bc, sd))
181 goto done;
183 /* Passphrase volumes cannot be automatically assembled. */
184 if (!(bc->bc_flags & BIOC_SCNOAUTOASSEMBLE) && bc->bc_key_disk == NODEV)
185 goto done;
187 sd->sd_meta->ssdi.ssd_size = coerced_size;
189 sr_crypto_create_keys(sd);
191 sd->sd_max_ccb_per_wu = no_chunk;
193 rv = 0;
194 done:
195 return (rv);
198 int
199 sr_crypto_assemble(struct sr_discipline *sd, struct bioc_createraid *bc,
200 int no_chunk, void *data)
202 int rv = EINVAL;
204 sd->mds.mdd_crypto.key_disk = NULL;
206 /* Crypto optional metadata must already exist... */
207 if (sd->mds.mdd_crypto.scr_meta == NULL)
208 goto done;
210 if (data != NULL) {
211 /* Kernel already has mask key. */
212 bcopy(data, sd->mds.mdd_crypto.scr_maskkey,
213 sizeof(sd->mds.mdd_crypto.scr_maskkey));
214 } else if (bc->bc_key_disk != NODEV) {
215 /* Read the mask key from the key disk. */
216 sd->mds.mdd_crypto.key_disk =
217 sr_crypto_read_key_disk(sd, bc->bc_key_disk);
218 if (sd->mds.mdd_crypto.key_disk == NULL)
219 goto done;
220 } else if (bc->bc_opaque_flags & BIOC_SOOUT) {
221 /* provide userland with kdf hint */
222 if (bc->bc_opaque == NULL)
223 goto done;
225 if (sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint) <
226 bc->bc_opaque_size)
227 goto done;
229 if (copyout(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
230 bc->bc_opaque, bc->bc_opaque_size))
231 goto done;
233 /* we're done */
234 bc->bc_opaque_status = BIOC_SOINOUT_OK;
235 rv = EAGAIN;
236 goto done;
237 } else if (bc->bc_opaque_flags & BIOC_SOIN) {
238 /* get kdf with maskkey from userland */
239 if (sr_crypto_get_kdf(bc, sd))
240 goto done;
241 } else
242 goto done;
244 sd->sd_max_ccb_per_wu = sd->sd_meta->ssdi.ssd_chunk_no;
246 rv = 0;
247 done:
248 return (rv);
251 struct sr_crypto_wu *
252 sr_crypto_wu_get(struct sr_workunit *wu, int encrypt)
254 struct scsi_xfer *xs = wu->swu_xs;
255 struct sr_discipline *sd = wu->swu_dis;
256 struct sr_crypto_wu *crwu;
257 struct cryptodesc *crd;
258 int flags, i, n;
259 daddr_t blk;
260 u_int keyndx;
262 DNPRINTF(SR_D_DIS, "%s: sr_crypto_wu_get wu %p encrypt %d\n",
263 DEVNAME(sd->sd_sc), wu, encrypt);
265 mtx_enter(&sd->mds.mdd_crypto.scr_mutex);
266 if ((crwu = TAILQ_FIRST(&sd->mds.mdd_crypto.scr_wus)) != NULL)
267 TAILQ_REMOVE(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link);
268 mtx_leave(&sd->mds.mdd_crypto.scr_mutex);
269 if (crwu == NULL)
270 panic("sr_crypto_wu_get: out of work units");
272 crwu->cr_uio.uio_iovcnt = 1;
273 crwu->cr_uio.uio_iov->iov_len = xs->datalen;
274 if (xs->flags & SCSI_DATA_OUT) {
275 crwu->cr_uio.uio_iov->iov_base = crwu->cr_dmabuf;
276 bcopy(xs->data, crwu->cr_uio.uio_iov->iov_base, xs->datalen);
277 } else
278 crwu->cr_uio.uio_iov->iov_base = xs->data;
280 blk = wu->swu_blk_start;
281 n = xs->datalen >> DEV_BSHIFT;
283 /*
284 * We preallocated enough crypto descs for up to MAXPHYS of I/O.
285 * Since there may be less than that we need to tweak the linked list
286 * of crypto desc structures to be just long enough for our needs.
287 */
288 crd = crwu->cr_descs;
289 for (i = 0; i < ((MAXPHYS >> DEV_BSHIFT) - n); i++) {
290 crd = crd->crd_next;
291 KASSERT(crd);
293 crwu->cr_crp->crp_desc = crd;
294 flags = (encrypt ? CRD_F_ENCRYPT : 0) |
295 CRD_F_IV_PRESENT | CRD_F_IV_EXPLICIT;
297 /*
298 * Select crypto session based on block number.
300 * XXX - this does not handle the case where the read/write spans
301 * across a different key blocks (e.g. 0.5TB boundary). Currently
302 * this is already broken by the use of scr_key[0] below.
303 */
304 keyndx = blk >> SR_CRYPTO_KEY_BLKSHIFT;
305 crwu->cr_crp->crp_sid = sd->mds.mdd_crypto.scr_sid[keyndx];
307 crwu->cr_crp->crp_ilen = xs->datalen;
308 crwu->cr_crp->crp_alloctype = M_DEVBUF;
309 crwu->cr_crp->crp_buf = &crwu->cr_uio;
310 for (i = 0, crd = crwu->cr_crp->crp_desc; crd;
311 i++, blk++, crd = crd->crd_next) {
312 crd->crd_skip = i << DEV_BSHIFT;
313 crd->crd_len = DEV_BSIZE;
314 crd->crd_inject = 0;
315 crd->crd_flags = flags;
316 crd->crd_alg = sd->mds.mdd_crypto.scr_alg;
317 crd->crd_klen = sd->mds.mdd_crypto.scr_klen;
318 crd->crd_key = sd->mds.mdd_crypto.scr_key[0];
319 bcopy(&blk, crd->crd_iv, sizeof(blk));
321 crwu->cr_wu = wu;
322 crwu->cr_crp->crp_opaque = crwu;
324 return (crwu);
327 void
328 sr_crypto_wu_put(struct sr_crypto_wu *crwu)
330 struct sr_workunit *wu = crwu->cr_wu;
331 struct sr_discipline *sd = wu->swu_dis;
333 DNPRINTF(SR_D_DIS, "%s: sr_crypto_wu_put crwu: %p\n",
334 DEVNAME(wu->swu_dis->sd_sc), crwu);
336 mtx_enter(&sd->mds.mdd_crypto.scr_mutex);
337 TAILQ_INSERT_TAIL(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link);
338 mtx_leave(&sd->mds.mdd_crypto.scr_mutex);
341 int
342 sr_crypto_get_kdf(struct bioc_createraid *bc, struct sr_discipline *sd)
344 int rv = EINVAL;
345 struct sr_crypto_kdfinfo *kdfinfo;
347 if (!(bc->bc_opaque_flags & BIOC_SOIN))
348 return (rv);
349 if (bc->bc_opaque == NULL)
350 return (rv);
351 if (bc->bc_opaque_size != sizeof(*kdfinfo))
352 return (rv);
354 kdfinfo = malloc(bc->bc_opaque_size, M_DEVBUF, M_WAITOK | M_ZERO);
355 if (copyin(bc->bc_opaque, kdfinfo, bc->bc_opaque_size))
356 goto out;
358 if (kdfinfo->len != bc->bc_opaque_size)
359 goto out;
361 /* copy KDF hint to disk meta data */
362 if (kdfinfo->flags & SR_CRYPTOKDF_HINT) {
363 if (sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint) <
364 kdfinfo->genkdf.len)
365 goto out;
366 bcopy(&kdfinfo->genkdf,
367 sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
368 kdfinfo->genkdf.len);
371 /* copy mask key to run-time meta data */
372 if ((kdfinfo->flags & SR_CRYPTOKDF_KEY)) {
373 if (sizeof(sd->mds.mdd_crypto.scr_maskkey) <
374 sizeof(kdfinfo->maskkey))
375 goto out;
376 bcopy(&kdfinfo->maskkey, sd->mds.mdd_crypto.scr_maskkey,
377 sizeof(kdfinfo->maskkey));
380 bc->bc_opaque_status = BIOC_SOINOUT_OK;
381 rv = 0;
382 out:
383 explicit_bzero(kdfinfo, bc->bc_opaque_size);
384 free(kdfinfo, M_DEVBUF);
386 return (rv);
389 int
390 sr_crypto_encrypt(u_char *p, u_char *c, u_char *key, size_t size, int alg)
392 rijndael_ctx ctx;
393 int i, rv = 1;
395 switch (alg) {
396 case SR_CRYPTOM_AES_ECB_256:
397 if (rijndael_set_key_enc_only(&ctx, key, 256) != 0)
398 goto out;
399 for (i = 0; i < size; i += RIJNDAEL128_BLOCK_LEN)
400 rijndael_encrypt(&ctx, &p[i], &c[i]);
401 rv = 0;
402 break;
403 default:
404 DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %d\n",
405 "softraid", alg);
406 rv = -1;
407 goto out;
410 out:
411 explicit_bzero(&ctx, sizeof(ctx));
412 return (rv);
415 int
416 sr_crypto_decrypt(u_char *c, u_char *p, u_char *key, size_t size, int alg)
418 rijndael_ctx ctx;
419 int i, rv = 1;
421 switch (alg) {
422 case SR_CRYPTOM_AES_ECB_256:
423 if (rijndael_set_key(&ctx, key, 256) != 0)
424 goto out;
425 for (i = 0; i < size; i += RIJNDAEL128_BLOCK_LEN)
426 rijndael_decrypt(&ctx, &c[i], &p[i]);
427 rv = 0;
428 break;
429 default:
430 DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %d\n",
431 "softraid", alg);
432 rv = -1;
433 goto out;
436 out:
437 explicit_bzero(&ctx, sizeof(ctx));
438 return (rv);
441 void
442 sr_crypto_calculate_check_hmac_sha1(u_int8_t *maskkey, int maskkey_size,
443 u_int8_t *key, int key_size, u_char *check_digest)
445 u_char check_key[SHA1_DIGEST_LENGTH];
446 HMAC_SHA1_CTX hmacctx;
447 SHA1_CTX shactx;
449 bzero(check_key, sizeof(check_key));
450 bzero(&hmacctx, sizeof(hmacctx));
451 bzero(&shactx, sizeof(shactx));
453 /* k = SHA1(mask_key) */
454 SHA1Init(&shactx);
455 SHA1Update(&shactx, maskkey, maskkey_size);
456 SHA1Final(check_key, &shactx);
458 /* mac = HMAC_SHA1_k(unencrypted key) */
459 HMAC_SHA1_Init(&hmacctx, check_key, sizeof(check_key));
460 HMAC_SHA1_Update(&hmacctx, key, key_size);
461 HMAC_SHA1_Final(check_digest, &hmacctx);
463 explicit_bzero(check_key, sizeof(check_key));
464 explicit_bzero(&hmacctx, sizeof(hmacctx));
465 explicit_bzero(&shactx, sizeof(shactx));
468 int
469 sr_crypto_decrypt_key(struct sr_discipline *sd)
471 u_char check_digest[SHA1_DIGEST_LENGTH];
472 int rv = 1;
474 DNPRINTF(SR_D_DIS, "%s: sr_crypto_decrypt_key\n", DEVNAME(sd->sd_sc));
476 if (sd->mds.mdd_crypto.scr_meta->scm_check_alg != SR_CRYPTOC_HMAC_SHA1)
477 goto out;
479 if (sr_crypto_decrypt((u_char *)sd->mds.mdd_crypto.scr_meta->scm_key,
480 (u_char *)sd->mds.mdd_crypto.scr_key,
481 sd->mds.mdd_crypto.scr_maskkey, sizeof(sd->mds.mdd_crypto.scr_key),
482 sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1)
483 goto out;
485 #ifdef SR_DEBUG0
486 sr_crypto_dumpkeys(sd);
487 #endif
489 /* Check that the key decrypted properly. */
490 sr_crypto_calculate_check_hmac_sha1(sd->mds.mdd_crypto.scr_maskkey,
491 sizeof(sd->mds.mdd_crypto.scr_maskkey),
492 (u_int8_t *)sd->mds.mdd_crypto.scr_key,
493 sizeof(sd->mds.mdd_crypto.scr_key),
494 check_digest);
495 if (memcmp(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac,
496 check_digest, sizeof(check_digest)) != 0) {
497 explicit_bzero(sd->mds.mdd_crypto.scr_key,
498 sizeof(sd->mds.mdd_crypto.scr_key));
499 goto out;
502 rv = 0; /* Success */
503 out:
504 /* we don't need the mask key anymore */
505 explicit_bzero(&sd->mds.mdd_crypto.scr_maskkey,
506 sizeof(sd->mds.mdd_crypto.scr_maskkey));
508 explicit_bzero(check_digest, sizeof(check_digest));
510 return rv;
513 int
514 sr_crypto_create_keys(struct sr_discipline *sd)
517 DNPRINTF(SR_D_DIS, "%s: sr_crypto_create_keys\n",
518 DEVNAME(sd->sd_sc));
520 if (AES_MAXKEYBYTES < sizeof(sd->mds.mdd_crypto.scr_maskkey))
521 return (1);
523 /* XXX allow user to specify */
524 sd->mds.mdd_crypto.scr_meta->scm_alg = SR_CRYPTOA_AES_XTS_256;
526 /* generate crypto keys */
527 arc4random_buf(sd->mds.mdd_crypto.scr_key,
528 sizeof(sd->mds.mdd_crypto.scr_key));
530 /* Mask the disk keys. */
531 sd->mds.mdd_crypto.scr_meta->scm_mask_alg = SR_CRYPTOM_AES_ECB_256;
532 sr_crypto_encrypt((u_char *)sd->mds.mdd_crypto.scr_key,
533 (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key,
534 sd->mds.mdd_crypto.scr_maskkey, sizeof(sd->mds.mdd_crypto.scr_key),
535 sd->mds.mdd_crypto.scr_meta->scm_mask_alg);
537 /* Prepare key decryption check code. */
538 sd->mds.mdd_crypto.scr_meta->scm_check_alg = SR_CRYPTOC_HMAC_SHA1;
539 sr_crypto_calculate_check_hmac_sha1(sd->mds.mdd_crypto.scr_maskkey,
540 sizeof(sd->mds.mdd_crypto.scr_maskkey),
541 (u_int8_t *)sd->mds.mdd_crypto.scr_key,
542 sizeof(sd->mds.mdd_crypto.scr_key),
543 sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac);
545 /* Erase the plaintext disk keys */
546 explicit_bzero(sd->mds.mdd_crypto.scr_key,
547 sizeof(sd->mds.mdd_crypto.scr_key));
549 #ifdef SR_DEBUG0
550 sr_crypto_dumpkeys(sd);
551 #endif
553 sd->mds.mdd_crypto.scr_meta->scm_flags = SR_CRYPTOF_KEY |
554 SR_CRYPTOF_KDFHINT;
556 return (0);
559 int
560 sr_crypto_change_maskkey(struct sr_discipline *sd,
561 struct sr_crypto_kdfinfo *kdfinfo1, struct sr_crypto_kdfinfo *kdfinfo2)
563 u_char check_digest[SHA1_DIGEST_LENGTH];
564 u_char *c, *p = NULL;
565 size_t ksz;
566 int rv = 1;
568 DNPRINTF(SR_D_DIS, "%s: sr_crypto_change_maskkey\n",
569 DEVNAME(sd->sd_sc));
571 if (sd->mds.mdd_crypto.scr_meta->scm_check_alg != SR_CRYPTOC_HMAC_SHA1)
572 goto out;
574 c = (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key;
575 ksz = sizeof(sd->mds.mdd_crypto.scr_key);
576 p = malloc(ksz, M_DEVBUF, M_WAITOK | M_CANFAIL | M_ZERO);
577 if (p == NULL)
578 goto out;
580 if (sr_crypto_decrypt(c, p, kdfinfo1->maskkey, ksz,
581 sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1)
582 goto out;
584 #ifdef SR_DEBUG0
585 sr_crypto_dumpkeys(sd);
586 #endif
588 sr_crypto_calculate_check_hmac_sha1(kdfinfo1->maskkey,
589 sizeof(kdfinfo1->maskkey), p, ksz, check_digest);
590 if (memcmp(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac,
591 check_digest, sizeof(check_digest)) != 0) {
592 sr_error(sd->sd_sc, "incorrect key or passphrase");
593 rv = EPERM;
594 goto out;
597 /* Mask the disk keys. */
598 c = (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key;
599 if (sr_crypto_encrypt(p, c, kdfinfo2->maskkey, ksz,
600 sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1)
601 goto out;
603 /* Prepare key decryption check code. */
604 sd->mds.mdd_crypto.scr_meta->scm_check_alg = SR_CRYPTOC_HMAC_SHA1;
605 sr_crypto_calculate_check_hmac_sha1(kdfinfo2->maskkey,
606 sizeof(kdfinfo2->maskkey), (u_int8_t *)sd->mds.mdd_crypto.scr_key,
607 sizeof(sd->mds.mdd_crypto.scr_key), check_digest);
609 /* Copy new encrypted key and HMAC to metadata. */
610 bcopy(check_digest, sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac,
611 sizeof(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac));
613 rv = 0; /* Success */
615 out:
616 if (p) {
617 explicit_bzero(p, ksz);
618 free(p, M_DEVBUF);
621 explicit_bzero(check_digest, sizeof(check_digest));
622 explicit_bzero(&kdfinfo1->maskkey, sizeof(kdfinfo1->maskkey));
623 explicit_bzero(&kdfinfo2->maskkey, sizeof(kdfinfo2->maskkey));
625 return (rv);
628 struct sr_chunk *
629 sr_crypto_create_key_disk(struct sr_discipline *sd, dev_t dev)
631 struct sr_softc *sc = sd->sd_sc;
632 struct sr_discipline *fakesd = NULL;
633 struct sr_metadata *sm = NULL;
634 struct sr_meta_chunk *km;
635 struct sr_meta_opt_item *omi = NULL;
636 struct sr_meta_keydisk *skm;
637 struct sr_chunk *key_disk = NULL;
638 struct disklabel label;
639 struct vnode *vn;
640 char devname[32];
641 int c, part, open = 0;
643 /*
644 * Create a metadata structure on the key disk and store
645 * keying material in the optional metadata.
646 */
648 sr_meta_getdevname(sc, dev, devname, sizeof(devname));
650 /* Make sure chunk is not already in use. */
651 c = sr_chunk_in_use(sc, dev);
652 if (c != BIOC_SDINVALID && c != BIOC_SDOFFLINE) {
653 sr_error(sc, "%s is already in use", devname);
654 goto done;
657 /* Open device. */
658 if (bdevvp(dev, &vn)) {
659 sr_error(sc, "cannot open key disk %s", devname);
660 goto done;
662 if (VOP_OPEN(vn, FREAD | FWRITE, NOCRED, curproc)) {
663 DNPRINTF(SR_D_META,"%s: sr_crypto_create_key_disk cannot "
664 "open %s\n", DEVNAME(sc), devname);
665 vput(vn);
666 goto fail;
668 open = 1; /* close dev on error */
670 /* Get partition details. */
671 part = DISKPART(dev);
672 if (VOP_IOCTL(vn, DIOCGDINFO, (caddr_t)&label,
673 FREAD, NOCRED, curproc)) {
674 DNPRINTF(SR_D_META, "%s: sr_crypto_create_key_disk ioctl "
675 "failed\n", DEVNAME(sc));
676 VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc);
677 vput(vn);
678 goto fail;
680 if (label.d_secsize != DEV_BSIZE) {
681 sr_error(sc, "%s has unsupported sector size (%d)",
682 devname, label.d_secsize);
683 goto fail;
685 if (label.d_partitions[part].p_fstype != FS_RAID) {
686 sr_error(sc, "%s partition not of type RAID (%d)\n",
687 devname, label.d_partitions[part].p_fstype);
688 goto fail;
691 /*
692 * Create and populate chunk metadata.
693 */
695 key_disk = malloc(sizeof(struct sr_chunk), M_DEVBUF, M_WAITOK | M_ZERO);
696 km = &key_disk->src_meta;
698 key_disk->src_dev_mm = dev;
699 key_disk->src_vn = vn;
700 strlcpy(key_disk->src_devname, devname, sizeof(km->scmi.scm_devname));
701 key_disk->src_size = 0;
703 km->scmi.scm_volid = sd->sd_meta->ssdi.ssd_level;
704 km->scmi.scm_chunk_id = 0;
705 km->scmi.scm_size = 0;
706 km->scmi.scm_coerced_size = 0;
707 strlcpy(km->scmi.scm_devname, devname, sizeof(km->scmi.scm_devname));
708 bcopy(&sd->sd_meta->ssdi.ssd_uuid, &km->scmi.scm_uuid,
709 sizeof(struct sr_uuid));
711 sr_checksum(sc, km, &km->scm_checksum,
712 sizeof(struct sr_meta_chunk_invariant));
714 km->scm_status = BIOC_SDONLINE;
716 /*
717 * Create and populate our own discipline and metadata.
718 */
720 sm = malloc(sizeof(struct sr_metadata), M_DEVBUF, M_WAITOK | M_ZERO);
721 sm->ssdi.ssd_magic = SR_MAGIC;
722 sm->ssdi.ssd_version = SR_META_VERSION;
723 sm->ssd_ondisk = 0;
724 sm->ssdi.ssd_vol_flags = 0;
725 bcopy(&sd->sd_meta->ssdi.ssd_uuid, &sm->ssdi.ssd_uuid,
726 sizeof(struct sr_uuid));
727 sm->ssdi.ssd_chunk_no = 1;
728 sm->ssdi.ssd_volid = SR_KEYDISK_VOLID;
729 sm->ssdi.ssd_level = SR_KEYDISK_LEVEL;
730 sm->ssdi.ssd_size = 0;
731 strlcpy(sm->ssdi.ssd_vendor, "OPENBSD", sizeof(sm->ssdi.ssd_vendor));
732 snprintf(sm->ssdi.ssd_product, sizeof(sm->ssdi.ssd_product),
733 "SR %s", "KEYDISK");
734 snprintf(sm->ssdi.ssd_revision, sizeof(sm->ssdi.ssd_revision),
735 "%03d", SR_META_VERSION);
737 fakesd = malloc(sizeof(struct sr_discipline), M_DEVBUF,
738 M_WAITOK | M_ZERO);
739 fakesd->sd_sc = sd->sd_sc;
740 fakesd->sd_meta = sm;
741 fakesd->sd_meta_type = SR_META_F_NATIVE;
742 fakesd->sd_vol_status = BIOC_SVONLINE;
743 strlcpy(fakesd->sd_name, "KEYDISK", sizeof(fakesd->sd_name));
744 SLIST_INIT(&fakesd->sd_meta_opt);
746 /* Add chunk to volume. */
747 fakesd->sd_vol.sv_chunks = malloc(sizeof(struct sr_chunk *), M_DEVBUF,
748 M_WAITOK | M_ZERO);
749 fakesd->sd_vol.sv_chunks[0] = key_disk;
750 SLIST_INIT(&fakesd->sd_vol.sv_chunk_list);
751 SLIST_INSERT_HEAD(&fakesd->sd_vol.sv_chunk_list, key_disk, src_link);
753 /* Generate mask key. */
754 arc4random_buf(sd->mds.mdd_crypto.scr_maskkey,
755 sizeof(sd->mds.mdd_crypto.scr_maskkey));
757 /* Copy mask key to optional metadata area. */
758 omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF,
759 M_WAITOK | M_ZERO);
760 omi->omi_som = malloc(sizeof(struct sr_meta_keydisk), M_DEVBUF,
761 M_WAITOK | M_ZERO);
762 omi->omi_som->som_type = SR_OPT_KEYDISK;
763 omi->omi_som->som_length = sizeof(struct sr_meta_keydisk);
764 skm = (struct sr_meta_keydisk *)omi->omi_som;
765 bcopy(sd->mds.mdd_crypto.scr_maskkey, &skm->skm_maskkey,
766 sizeof(skm->skm_maskkey));
767 SLIST_INSERT_HEAD(&fakesd->sd_meta_opt, omi, omi_link);
768 fakesd->sd_meta->ssdi.ssd_opt_no++;
770 /* Save metadata. */
771 if (sr_meta_save(fakesd, SR_META_DIRTY)) {
772 sr_error(sc, "could not save metadata to %s", devname);
773 goto fail;
776 goto done;
778 fail:
779 if (key_disk)
780 free(key_disk, M_DEVBUF);
781 key_disk = NULL;
783 done:
784 if (omi)
785 free(omi, M_DEVBUF);
786 if (fakesd && fakesd->sd_vol.sv_chunks)
787 free(fakesd->sd_vol.sv_chunks, M_DEVBUF);
788 if (fakesd)
789 free(fakesd, M_DEVBUF);
790 if (sm)
791 free(sm, M_DEVBUF);
792 if (open) {
793 VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc);
794 vput(vn);
797 return key_disk;
800 struct sr_chunk *
801 sr_crypto_read_key_disk(struct sr_discipline *sd, dev_t dev)
803 struct sr_softc *sc = sd->sd_sc;
804 struct sr_metadata *sm = NULL;
805 struct sr_meta_opt_item *omi, *omi_next;
806 struct sr_meta_opt_hdr *omh;
807 struct sr_meta_keydisk *skm;
808 struct sr_meta_opt_head som;
809 struct sr_chunk *key_disk = NULL;
810 struct disklabel label;
811 struct vnode *vn = NULL;
812 char devname[32];
813 int c, part, open = 0;
815 /*
816 * Load a key disk and load keying material into memory.
817 */
819 SLIST_INIT(&som);
821 sr_meta_getdevname(sc, dev, devname, sizeof(devname));
823 /* Make sure chunk is not already in use. */
824 c = sr_chunk_in_use(sc, dev);
825 if (c != BIOC_SDINVALID && c != BIOC_SDOFFLINE) {
826 sr_error(sc, "%s is already in use", devname);
827 goto done;
830 /* Open device. */
831 if (bdevvp(dev, &vn)) {
832 sr_error(sc, "cannot open key disk %s", devname);
833 goto done;
835 if (VOP_OPEN(vn, FREAD | FWRITE, NOCRED, curproc)) {
836 DNPRINTF(SR_D_META,"%s: sr_crypto_read_key_disk cannot "
837 "open %s\n", DEVNAME(sc), devname);
838 vput(vn);
839 goto done;
841 open = 1; /* close dev on error */
843 /* Get partition details. */
844 part = DISKPART(dev);
845 if (VOP_IOCTL(vn, DIOCGDINFO, (caddr_t)&label, FREAD,
846 NOCRED, curproc)) {
847 DNPRINTF(SR_D_META, "%s: sr_crypto_read_key_disk ioctl "
848 "failed\n", DEVNAME(sc));
849 VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc);
850 vput(vn);
851 goto done;
853 if (label.d_secsize != DEV_BSIZE) {
854 sr_error(sc, "%s has unsupported sector size (%d)",
855 devname, label.d_secsize);
856 goto done;
858 if (label.d_partitions[part].p_fstype != FS_RAID) {
859 sr_error(sc, "%s partition not of type RAID (%d)\n",
860 devname, label.d_partitions[part].p_fstype);
861 goto done;
864 /*
865 * Read and validate key disk metadata.
866 */
867 sm = malloc(SR_META_SIZE * 512, M_DEVBUF, M_WAITOK | M_ZERO);
868 if (sr_meta_native_read(sd, dev, sm, NULL)) {
869 sr_error(sc, "native bootprobe could not read native metadata");
870 goto done;
873 if (sr_meta_validate(sd, dev, sm, NULL)) {
874 DNPRINTF(SR_D_META, "%s: invalid metadata\n",
875 DEVNAME(sc));
876 goto done;
879 /* Make sure this is a key disk. */
880 if (sm->ssdi.ssd_level != SR_KEYDISK_LEVEL) {
881 sr_error(sc, "%s is not a key disk", devname);
882 goto done;
885 /* Construct key disk chunk. */
886 key_disk = malloc(sizeof(struct sr_chunk), M_DEVBUF, M_WAITOK | M_ZERO);
887 key_disk->src_dev_mm = dev;
888 key_disk->src_vn = vn;
889 key_disk->src_size = 0;
891 bcopy((struct sr_meta_chunk *)(sm + 1), &key_disk->src_meta,
892 sizeof(key_disk->src_meta));
894 /* Read mask key from optional metadata. */
895 sr_meta_opt_load(sc, sm, &som);
896 SLIST_FOREACH(omi, &som, omi_link) {
897 omh = omi->omi_som;
898 if (omh->som_type == SR_OPT_KEYDISK) {
899 skm = (struct sr_meta_keydisk *)omh;
900 bcopy(&skm->skm_maskkey,
901 sd->mds.mdd_crypto.scr_maskkey,
902 sizeof(sd->mds.mdd_crypto.scr_maskkey));
903 } else if (omh->som_type == SR_OPT_CRYPTO) {
904 /* Original keydisk format with key in crypto area. */
905 bcopy(omh + sizeof(struct sr_meta_opt_hdr),
906 sd->mds.mdd_crypto.scr_maskkey,
907 sizeof(sd->mds.mdd_crypto.scr_maskkey));
911 open = 0;
913 done:
914 for (omi = SLIST_FIRST(&som); omi != SLIST_END(&som); omi = omi_next) {
915 omi_next = SLIST_NEXT(omi, omi_link);
916 if (omi->omi_som)
917 free(omi->omi_som, M_DEVBUF);
918 free(omi, M_DEVBUF);
921 if (sm)
922 free(sm, M_DEVBUF);
924 if (vn && open) {
925 VOP_CLOSE(vn, FREAD, NOCRED, curproc);
926 vput(vn);
929 return key_disk;
932 int
933 sr_crypto_alloc_resources(struct sr_discipline *sd)
935 struct cryptoini cri;
936 struct sr_crypto_wu *crwu;
937 u_int num_keys, i;
939 DNPRINTF(SR_D_DIS, "%s: sr_crypto_alloc_resources\n",
940 DEVNAME(sd->sd_sc));
942 sd->mds.mdd_crypto.scr_alg = CRYPTO_AES_XTS;
943 switch (sd->mds.mdd_crypto.scr_meta->scm_alg) {
944 case SR_CRYPTOA_AES_XTS_128:
945 sd->mds.mdd_crypto.scr_klen = 256;
946 break;
947 case SR_CRYPTOA_AES_XTS_256:
948 sd->mds.mdd_crypto.scr_klen = 512;
949 break;
950 default:
951 sr_error(sd->sd_sc, "unknown crypto algorithm");
952 return (EINVAL);
955 for (i = 0; i < SR_CRYPTO_MAXKEYS; i++)
956 sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
958 if (sr_wu_alloc(sd, sizeof(struct sr_workunit))) {
959 sr_error(sd->sd_sc, "unable to allocate work units");
960 return (ENOMEM);
962 if (sr_ccb_alloc(sd)) {
963 sr_error(sd->sd_sc, "unable to allocate CCBs");
964 return (ENOMEM);
966 if (sr_crypto_decrypt_key(sd)) {
967 sr_error(sd->sd_sc, "incorrect key or passphrase");
968 return (EPERM);
971 /*
972 * For each wu allocate the uio, iovec and crypto structures.
973 * these have to be allocated now because during runtime we can't
974 * fail an allocation without failing the io (which can cause real
975 * problems).
976 */
977 mtx_init(&sd->mds.mdd_crypto.scr_mutex, IPL_BIO);
978 TAILQ_INIT(&sd->mds.mdd_crypto.scr_wus);
979 for (i = 0; i < sd->sd_max_wu; i++) {
980 crwu = malloc(sizeof(*crwu), M_DEVBUF,
981 M_WAITOK | M_ZERO | M_CANFAIL);
982 if (crwu == NULL)
983 return (ENOMEM);
984 /* put it on the list now so if we fail it'll be freed */
985 mtx_enter(&sd->mds.mdd_crypto.scr_mutex);
986 TAILQ_INSERT_TAIL(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link);
987 mtx_leave(&sd->mds.mdd_crypto.scr_mutex);
989 crwu->cr_uio.uio_iov = &crwu->cr_iov;
990 crwu->cr_dmabuf = dma_alloc(MAXPHYS, PR_WAITOK);
991 crwu->cr_crp = crypto_getreq(MAXPHYS >> DEV_BSHIFT);
992 if (crwu->cr_crp == NULL)
993 return (ENOMEM);
994 crwu->cr_descs = crwu->cr_crp->crp_desc;
997 memset(&cri, 0, sizeof(cri));
998 cri.cri_alg = sd->mds.mdd_crypto.scr_alg;
999 cri.cri_klen = sd->mds.mdd_crypto.scr_klen;
1001 /* Allocate a session for every 2^SR_CRYPTO_KEY_BLKSHIFT blocks */
1002 num_keys = sd->sd_meta->ssdi.ssd_size >> SR_CRYPTO_KEY_BLKSHIFT;
1003 if (num_keys >= SR_CRYPTO_MAXKEYS)
1004 return (EFBIG);
1005 for (i = 0; i <= num_keys; i++) {
1006 cri.cri_key = sd->mds.mdd_crypto.scr_key[i];
1007 if (crypto_newsession(&sd->mds.mdd_crypto.scr_sid[i],
1008 &cri, 0) != 0) {
1009 for (i = 0;
1010 sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1;
1011 i++) {
1012 crypto_freesession(
1013 sd->mds.mdd_crypto.scr_sid[i]);
1014 sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
1016 return (EINVAL);
1020 sr_hotplug_register(sd, sr_crypto_hotplug);
1022 return (0);
1025 void
1026 sr_crypto_free_resources(struct sr_discipline *sd)
1028 struct sr_crypto_wu *crwu;
1029 u_int i;
1031 DNPRINTF(SR_D_DIS, "%s: sr_crypto_free_resources\n",
1032 DEVNAME(sd->sd_sc));
1034 if (sd->mds.mdd_crypto.key_disk != NULL) {
1035 explicit_bzero(sd->mds.mdd_crypto.key_disk, sizeof
1036 sd->mds.mdd_crypto.key_disk);
1037 free(sd->mds.mdd_crypto.key_disk, M_DEVBUF);
1040 sr_hotplug_unregister(sd, sr_crypto_hotplug);
1042 for (i = 0; sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1; i++) {
1043 crypto_freesession(sd->mds.mdd_crypto.scr_sid[i]);
1044 sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
1047 mtx_enter(&sd->mds.mdd_crypto.scr_mutex);
1048 while ((crwu = TAILQ_FIRST(&sd->mds.mdd_crypto.scr_wus)) != NULL) {
1049 TAILQ_REMOVE(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link);
1051 if (crwu->cr_dmabuf != NULL)
1052 dma_free(crwu->cr_dmabuf, MAXPHYS);
1053 if (crwu->cr_crp) {
1054 crwu->cr_crp->crp_desc = crwu->cr_descs;
1055 crypto_freereq(crwu->cr_crp);
1057 free(crwu, M_DEVBUF);
1059 mtx_leave(&sd->mds.mdd_crypto.scr_mutex);
1061 sr_wu_free(sd);
1062 sr_ccb_free(sd);
1065 int
1066 sr_crypto_ioctl(struct sr_discipline *sd, struct bioc_discipline *bd)
1068 struct sr_crypto_kdfpair kdfpair;
1069 struct sr_crypto_kdfinfo kdfinfo1, kdfinfo2;
1070 int size, rv = 1;
1072 DNPRINTF(SR_D_IOCTL, "%s: sr_crypto_ioctl %u\n",
1073 DEVNAME(sd->sd_sc), bd->bd_cmd);
1075 switch (bd->bd_cmd) {
1076 case SR_IOCTL_GET_KDFHINT:
1078 /* Get KDF hint for userland. */
1079 size = sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint);
1080 if (bd->bd_data == NULL || bd->bd_size > size)
1081 goto bad;
1082 if (copyout(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
1083 bd->bd_data, bd->bd_size))
1084 goto bad;
1086 rv = 0;
1088 break;
1090 case SR_IOCTL_CHANGE_PASSPHRASE:
1092 /* Attempt to change passphrase. */
1094 size = sizeof(kdfpair);
1095 if (bd->bd_data == NULL || bd->bd_size > size)
1096 goto bad;
1097 if (copyin(bd->bd_data, &kdfpair, size))
1098 goto bad;
1100 size = sizeof(kdfinfo1);
1101 if (kdfpair.kdfinfo1 == NULL || kdfpair.kdfsize1 > size)
1102 goto bad;
1103 if (copyin(kdfpair.kdfinfo1, &kdfinfo1, size))
1104 goto bad;
1106 size = sizeof(kdfinfo2);
1107 if (kdfpair.kdfinfo2 == NULL || kdfpair.kdfsize2 > size)
1108 goto bad;
1109 if (copyin(kdfpair.kdfinfo2, &kdfinfo2, size))
1110 goto bad;
1112 if (sr_crypto_change_maskkey(sd, &kdfinfo1, &kdfinfo2))
1113 goto bad;
1115 /* Save metadata to disk. */
1116 rv = sr_meta_save(sd, SR_META_DIRTY);
1118 break;
1121 bad:
1122 explicit_bzero(&kdfpair, sizeof(kdfpair));
1123 explicit_bzero(&kdfinfo1, sizeof(kdfinfo1));
1124 explicit_bzero(&kdfinfo2, sizeof(kdfinfo2));
1126 return (rv);
1129 int
1130 sr_crypto_meta_opt_handler(struct sr_discipline *sd, struct sr_meta_opt_hdr *om)
1132 int rv = EINVAL;
1134 if (om->som_type == SR_OPT_CRYPTO) {
1135 sd->mds.mdd_crypto.scr_meta = (struct sr_meta_crypto *)om;
1136 rv = 0;
1139 return (rv);
1142 int
1143 sr_crypto_rw(struct sr_workunit *wu)
1145 struct sr_crypto_wu *crwu;
1146 daddr_t blk;
1147 int s, rv = 0;
1149 DNPRINTF(SR_D_DIS, "%s: sr_crypto_rw wu %p\n",
1150 DEVNAME(wu->swu_dis->sd_sc), wu);
1152 if (sr_validate_io(wu, &blk, "sr_crypto_rw"))
1153 return (1);
1155 if (wu->swu_xs->flags & SCSI_DATA_OUT) {
1156 crwu = sr_crypto_wu_get(wu, 1);
1157 if (crwu == NULL)
1158 return (1);
1159 crwu->cr_crp->crp_callback = sr_crypto_write;
1160 s = splvm();
1161 rv = crypto_invoke(crwu->cr_crp);
1162 if (rv == 0)
1163 rv = crwu->cr_crp->crp_etype;
1164 splx(s);
1165 } else
1166 rv = sr_crypto_dev_rw(wu, NULL);
1168 return (rv);
1171 int
1172 sr_crypto_write(struct cryptop *crp)
1174 struct sr_crypto_wu *crwu = crp->crp_opaque;
1175 struct sr_workunit *wu = crwu->cr_wu;
1176 int s;
1178 DNPRINTF(SR_D_INTR, "%s: sr_crypto_write: wu %x xs: %x\n",
1179 DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs);
1181 if (crp->crp_etype) {
1182 /* fail io */
1183 wu->swu_xs->error = XS_DRIVER_STUFFUP;
1184 s = splbio();
1185 sr_crypto_finish_io(wu);
1186 splx(s);
1189 return (sr_crypto_dev_rw(wu, crwu));
1192 int
1193 sr_crypto_dev_rw(struct sr_workunit *wu, struct sr_crypto_wu *crwu)
1195 struct sr_discipline *sd = wu->swu_dis;
1196 struct scsi_xfer *xs = wu->swu_xs;
1197 struct sr_ccb *ccb;
1198 struct uio *uio;
1199 daddr_t blk;
1201 blk = wu->swu_blk_start;
1202 blk += sd->sd_meta->ssd_data_offset;
1204 ccb = sr_ccb_rw(sd, 0, blk, xs->datalen, xs->data, xs->flags, 0);
1205 if (!ccb) {
1206 /* should never happen but handle more gracefully */
1207 printf("%s: %s: too many ccbs queued\n",
1208 DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname);
1209 goto bad;
1211 if (!ISSET(xs->flags, SCSI_DATA_IN)) {
1212 uio = crwu->cr_crp->crp_buf;
1213 ccb->ccb_buf.b_data = uio->uio_iov->iov_base;
1214 ccb->ccb_opaque = crwu;
1216 sr_wu_enqueue_ccb(wu, ccb);
1217 sr_schedule_wu(wu);
1219 return (0);
1221 bad:
1222 /* wu is unwound by sr_wu_put */
1223 if (crwu)
1224 crwu->cr_crp->crp_etype = EINVAL;
1225 return (1);
1228 void
1229 sr_crypto_done(struct sr_workunit *wu)
1231 struct scsi_xfer *xs = wu->swu_xs;
1232 struct sr_crypto_wu *crwu;
1233 struct sr_ccb *ccb;
1234 int s;
1236 /* If this was a successful read, initiate decryption of the data. */
1237 if (ISSET(xs->flags, SCSI_DATA_IN) && xs->error == XS_NOERROR) {
1238 /* only fails on implementation error */
1239 crwu = sr_crypto_wu_get(wu, 0);
1240 if (crwu == NULL)
1241 panic("sr_crypto_intr: no wu");
1242 crwu->cr_crp->crp_callback = sr_crypto_read;
1243 ccb = TAILQ_FIRST(&wu->swu_ccb);
1244 if (ccb == NULL)
1245 panic("sr_crypto_done: no ccbs on workunit");
1246 ccb->ccb_opaque = crwu;
1247 DNPRINTF(SR_D_INTR, "%s: sr_crypto_intr: crypto_invoke %p\n",
1248 DEVNAME(wu->swu_dis->sd_sc), crwu->cr_crp);
1249 s = splvm();
1250 crypto_invoke(crwu->cr_crp);
1251 splx(s);
1252 return;
1255 s = splbio();
1256 sr_crypto_finish_io(wu);
1257 splx(s);
1260 void
1261 sr_crypto_finish_io(struct sr_workunit *wu)
1263 struct sr_discipline *sd = wu->swu_dis;
1264 struct scsi_xfer *xs = wu->swu_xs;
1265 struct sr_ccb *ccb;
1266 #ifdef SR_DEBUG
1267 struct sr_softc *sc = sd->sd_sc;
1268 #endif /* SR_DEBUG */
1270 splassert(IPL_BIO);
1272 DNPRINTF(SR_D_INTR, "%s: sr_crypto_finish_io: wu %x xs: %x\n",
1273 DEVNAME(sc), wu, xs);
1275 if (wu->swu_cb_active == 1)
1276 panic("%s: sr_crypto_finish_io", DEVNAME(sd->sd_sc));
1277 TAILQ_FOREACH(ccb, &wu->swu_ccb, ccb_link) {
1278 if (ccb->ccb_opaque == NULL)
1279 continue;
1280 sr_crypto_wu_put(ccb->ccb_opaque);
1283 sr_scsi_done(sd, xs);
1286 int
1287 sr_crypto_read(struct cryptop *crp)
1289 struct sr_crypto_wu *crwu = crp->crp_opaque;
1290 struct sr_workunit *wu = crwu->cr_wu;
1291 int s;
1293 DNPRINTF(SR_D_INTR, "%s: sr_crypto_read: wu %x xs: %x\n",
1294 DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs);
1296 if (crp->crp_etype)
1297 wu->swu_xs->error = XS_DRIVER_STUFFUP;
1299 s = splbio();
1300 sr_crypto_finish_io(wu);
1301 splx(s);
1303 return (0);
1306 void
1307 sr_crypto_hotplug(struct sr_discipline *sd, struct disk *diskp, int action)
1309 DNPRINTF(SR_D_MISC, "%s: sr_crypto_hotplug: %s %d\n",
1310 DEVNAME(sd->sd_sc), diskp->dk_name, action);
1313 #ifdef SR_DEBUG0
1314 void
1315 sr_crypto_dumpkeys(struct sr_discipline *sd)
1317 int i, j;
1319 printf("sr_crypto_dumpkeys:\n");
1320 for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) {
1321 printf("\tscm_key[%d]: 0x", i);
1322 for (j = 0; j < SR_CRYPTO_KEYBYTES; j++) {
1323 printf("%02x",
1324 sd->mds.mdd_crypto.scr_meta->scm_key[i][j]);
1326 printf("\n");
1328 printf("sr_crypto_dumpkeys: runtime data keys:\n");
1329 for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) {
1330 printf("\tscr_key[%d]: 0x", i);
1331 for (j = 0; j < SR_CRYPTO_KEYBYTES; j++) {
1332 printf("%02x",
1333 sd->mds.mdd_crypto.scr_key[i][j]);
1335 printf("\n");
1338 #endif /* SR_DEBUG */