Blob


1 --- test021.left.txt
2 +++ test021.right.txt
3 @@ -1,4 +1,4 @@
4 -/* $OpenBSD: softraid_crypto.c,v 1.91 2013/03/31 15:44:52 jsing Exp $ */
5 +/* $OpenBSD: softraid_crypto.c,v 1.139 2020/07/13 00:06:22 kn Exp $ */
6 /*
7 * Copyright (c) 2007 Marco Peereboom <marco@peereboom.us>
8 * Copyright (c) 2008 Hans-Joerg Hoexer <hshoexer@openbsd.org>
9 @@ -25,7 +25,6 @@
10 #include <sys/buf.h>
11 #include <sys/device.h>
12 #include <sys/ioctl.h>
13 -#include <sys/proc.h>
14 #include <sys/malloc.h>
15 #include <sys/pool.h>
16 #include <sys/kernel.h>
17 @@ -34,6 +33,7 @@
18 #include <sys/queue.h>
19 #include <sys/fcntl.h>
20 #include <sys/disklabel.h>
21 +#include <sys/vnode.h>
22 #include <sys/mount.h>
23 #include <sys/sensors.h>
24 #include <sys/stat.h>
25 @@ -42,7 +42,6 @@
26 #include <sys/dkio.h>
28 #include <crypto/cryptodev.h>
29 -#include <crypto/cryptosoft.h>
30 #include <crypto/rijndael.h>
31 #include <crypto/md5.h>
32 #include <crypto/sha1.h>
33 @@ -54,7 +53,6 @@
34 #include <scsi/scsi_disk.h>
36 #include <dev/softraidvar.h>
37 -#include <dev/rndvar.h>
39 /*
40 * The per-I/O data that we need to preallocate. We cannot afford to allow I/O
41 @@ -62,18 +60,15 @@
42 * because we assert that only one ccb per WU will ever be active.
43 */
44 struct sr_crypto_wu {
45 - TAILQ_ENTRY(sr_crypto_wu) cr_link;
46 + struct sr_workunit cr_wu; /* Must be first. */
47 struct uio cr_uio;
48 struct iovec cr_iov;
49 struct cryptop *cr_crp;
50 - struct cryptodesc *cr_descs;
51 - struct sr_workunit *cr_wu;
52 void *cr_dmabuf;
53 };
56 -struct sr_crypto_wu *sr_crypto_wu_get(struct sr_workunit *, int);
57 -void sr_crypto_wu_put(struct sr_crypto_wu *);
58 +struct sr_crypto_wu *sr_crypto_prepare(struct sr_workunit *, int);
59 int sr_crypto_create_keys(struct sr_discipline *);
60 int sr_crypto_get_kdf(struct bioc_createraid *,
61 struct sr_discipline *);
62 @@ -92,12 +87,11 @@
63 struct bioc_discipline *);
64 int sr_crypto_meta_opt_handler(struct sr_discipline *,
65 struct sr_meta_opt_hdr *);
66 -int sr_crypto_write(struct cryptop *);
67 +void sr_crypto_write(struct cryptop *);
68 int sr_crypto_rw(struct sr_workunit *);
69 -int sr_crypto_rw2(struct sr_workunit *, struct sr_crypto_wu *);
70 +int sr_crypto_dev_rw(struct sr_workunit *, struct sr_crypto_wu *);
71 void sr_crypto_done(struct sr_workunit *);
72 -int sr_crypto_read(struct cryptop *);
73 -void sr_crypto_finish_io(struct sr_workunit *);
74 +void sr_crypto_read(struct cryptop *);
75 void sr_crypto_calculate_check_hmac_sha1(u_int8_t *, int,
76 u_int8_t *, int, u_char *);
77 void sr_crypto_hotplug(struct sr_discipline *, struct disk *, int);
78 @@ -113,6 +107,7 @@
79 int i;
81 /* Fill out discipline members. */
82 + sd->sd_wu_size = sizeof(struct sr_crypto_wu);
83 sd->sd_type = SR_MD_CRYPTO;
84 strlcpy(sd->sd_name, "CRYPTO", sizeof(sd->sd_name));
85 sd->sd_capabilities = SR_CAP_SYSTEM_DISK | SR_CAP_AUTO_ASSEMBLE;
86 @@ -143,8 +138,14 @@
87 sr_error(sd->sd_sc, "%s requires exactly one chunk",
88 sd->sd_name);
89 goto done;
90 - }
91 + }
93 + if (coerced_size > SR_CRYPTO_MAXSIZE) {
94 + sr_error(sd->sd_sc, "%s exceeds maximum size (%lli > %llu)",
95 + sd->sd_name, coerced_size, SR_CRYPTO_MAXSIZE);
96 + goto done;
97 + }
98 +
99 /* Create crypto optional metadata. */
100 omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF,
101 M_WAITOK | M_ZERO);
102 @@ -208,7 +209,7 @@
104 if (data != NULL) {
105 /* Kernel already has mask key. */
106 - bcopy(data, sd->mds.mdd_crypto.scr_maskkey,
107 + memcpy(sd->mds.mdd_crypto.scr_maskkey, data,
108 sizeof(sd->mds.mdd_crypto.scr_maskkey));
109 } else if (bc->bc_key_disk != NODEV) {
110 /* Read the mask key from the key disk. */
111 @@ -248,117 +249,69 @@
114 struct sr_crypto_wu *
115 -sr_crypto_wu_get(struct sr_workunit *wu, int encrypt)
116 +sr_crypto_prepare(struct sr_workunit *wu, int encrypt)
118 struct scsi_xfer *xs = wu->swu_xs;
119 struct sr_discipline *sd = wu->swu_dis;
120 struct sr_crypto_wu *crwu;
121 struct cryptodesc *crd;
122 int flags, i, n;
123 - daddr64_t blk = 0;
124 + daddr_t blkno;
125 u_int keyndx;
127 - DNPRINTF(SR_D_DIS, "%s: sr_crypto_wu_get wu: %p encrypt: %d\n",
128 + DNPRINTF(SR_D_DIS, "%s: sr_crypto_prepare wu %p encrypt %d\n",
129 DEVNAME(sd->sd_sc), wu, encrypt);
131 - mtx_enter(&sd->mds.mdd_crypto.scr_mutex);
132 - if ((crwu = TAILQ_FIRST(&sd->mds.mdd_crypto.scr_wus)) != NULL)
133 - TAILQ_REMOVE(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link);
134 - mtx_leave(&sd->mds.mdd_crypto.scr_mutex);
135 - if (crwu == NULL)
136 - panic("sr_crypto_wu_get: out of wus");
138 + crwu = (struct sr_crypto_wu *)wu;
139 crwu->cr_uio.uio_iovcnt = 1;
140 crwu->cr_uio.uio_iov->iov_len = xs->datalen;
141 if (xs->flags & SCSI_DATA_OUT) {
142 crwu->cr_uio.uio_iov->iov_base = crwu->cr_dmabuf;
143 - bcopy(xs->data, crwu->cr_uio.uio_iov->iov_base, xs->datalen);
144 + memcpy(crwu->cr_uio.uio_iov->iov_base, xs->data, xs->datalen);
145 } else
146 crwu->cr_uio.uio_iov->iov_base = xs->data;
148 - if (xs->cmdlen == 10)
149 - blk = _4btol(((struct scsi_rw_big *)xs->cmd)->addr);
150 - else if (xs->cmdlen == 16)
151 - blk = _8btol(((struct scsi_rw_16 *)xs->cmd)->addr);
152 - else if (xs->cmdlen == 6)
153 - blk = _3btol(((struct scsi_rw *)xs->cmd)->addr);
155 + blkno = wu->swu_blk_start;
156 n = xs->datalen >> DEV_BSHIFT;
158 /*
159 * We preallocated enough crypto descs for up to MAXPHYS of I/O.
160 - * Since there may be less than that we need to tweak the linked list
161 + * Since there may be less than that we need to tweak the amount
162 * of crypto desc structures to be just long enough for our needs.
163 */
164 - crd = crwu->cr_descs;
165 - for (i = 0; i < ((MAXPHYS >> DEV_BSHIFT) - n); i++) {
166 - crd = crd->crd_next;
167 - KASSERT(crd);
168 - }
169 - crwu->cr_crp->crp_desc = crd;
170 + KASSERT(crwu->cr_crp->crp_ndescalloc >= n);
171 + crwu->cr_crp->crp_ndesc = n;
172 flags = (encrypt ? CRD_F_ENCRYPT : 0) |
173 CRD_F_IV_PRESENT | CRD_F_IV_EXPLICIT;
175 - /* Select crypto session based on block number */
176 - keyndx = blk >> SR_CRYPTO_KEY_BLKSHIFT;
177 - if (keyndx >= SR_CRYPTO_MAXKEYS)
178 - goto unwind;
179 + /*
180 + * Select crypto session based on block number.
181 + *
182 + * XXX - this does not handle the case where the read/write spans
183 + * across a different key blocks (e.g. 0.5TB boundary). Currently
184 + * this is already broken by the use of scr_key[0] below.
185 + */
186 + keyndx = blkno >> SR_CRYPTO_KEY_BLKSHIFT;
187 crwu->cr_crp->crp_sid = sd->mds.mdd_crypto.scr_sid[keyndx];
188 - if (crwu->cr_crp->crp_sid == (u_int64_t)-1)
189 - goto unwind;
191 + crwu->cr_crp->crp_opaque = crwu;
192 crwu->cr_crp->crp_ilen = xs->datalen;
193 crwu->cr_crp->crp_alloctype = M_DEVBUF;
194 + crwu->cr_crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_NOQUEUE;
195 crwu->cr_crp->crp_buf = &crwu->cr_uio;
196 - for (i = 0, crd = crwu->cr_crp->crp_desc; crd;
197 - i++, blk++, crd = crd->crd_next) {
198 + for (i = 0; i < crwu->cr_crp->crp_ndesc; i++, blkno++) {
199 + crd = &crwu->cr_crp->crp_desc[i];
200 crd->crd_skip = i << DEV_BSHIFT;
201 crd->crd_len = DEV_BSIZE;
202 crd->crd_inject = 0;
203 crd->crd_flags = flags;
204 - crd->crd_alg = CRYPTO_AES_XTS;
206 - switch (sd->mds.mdd_crypto.scr_meta->scm_alg) {
207 - case SR_CRYPTOA_AES_XTS_128:
208 - crd->crd_klen = 256;
209 - break;
210 - case SR_CRYPTOA_AES_XTS_256:
211 - crd->crd_klen = 512;
212 - break;
213 - default:
214 - goto unwind;
215 - }
216 + crd->crd_alg = sd->mds.mdd_crypto.scr_alg;
217 + crd->crd_klen = sd->mds.mdd_crypto.scr_klen;
218 crd->crd_key = sd->mds.mdd_crypto.scr_key[0];
219 - bcopy(&blk, crd->crd_iv, sizeof(blk));
220 + memcpy(crd->crd_iv, &blkno, sizeof(blkno));
222 - crwu->cr_wu = wu;
223 - crwu->cr_crp->crp_opaque = crwu;
225 return (crwu);
227 -unwind:
228 - /* steal the descriptors back from the cryptop */
229 - crwu->cr_crp->crp_desc = NULL;
231 - return (NULL);
232 -}
234 -void
235 -sr_crypto_wu_put(struct sr_crypto_wu *crwu)
236 -{
237 - struct cryptop *crp = crwu->cr_crp;
238 - struct sr_workunit *wu = crwu->cr_wu;
239 - struct sr_discipline *sd = wu->swu_dis;
241 - DNPRINTF(SR_D_DIS, "%s: sr_crypto_wu_put crwu: %p\n",
242 - DEVNAME(wu->swu_dis->sd_sc), crwu);
244 - /* steal the descriptors back from the cryptop */
245 - crp->crp_desc = NULL;
247 - mtx_enter(&sd->mds.mdd_crypto.scr_mutex);
248 - TAILQ_INSERT_TAIL(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link);
249 - mtx_leave(&sd->mds.mdd_crypto.scr_mutex);
252 int
253 @@ -386,9 +339,8 @@
254 if (sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint) <
255 kdfinfo->genkdf.len)
256 goto out;
257 - bcopy(&kdfinfo->genkdf,
258 - sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
259 - kdfinfo->genkdf.len);
260 + memcpy(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
261 + &kdfinfo->genkdf, kdfinfo->genkdf.len);
264 /* copy mask key to run-time meta data */
265 @@ -396,7 +348,7 @@
266 if (sizeof(sd->mds.mdd_crypto.scr_maskkey) <
267 sizeof(kdfinfo->maskkey))
268 goto out;
269 - bcopy(&kdfinfo->maskkey, sd->mds.mdd_crypto.scr_maskkey,
270 + memcpy(sd->mds.mdd_crypto.scr_maskkey, &kdfinfo->maskkey,
271 sizeof(kdfinfo->maskkey));
274 @@ -404,7 +356,7 @@
275 rv = 0;
276 out:
277 explicit_bzero(kdfinfo, bc->bc_opaque_size);
278 - free(kdfinfo, M_DEVBUF);
279 + free(kdfinfo, M_DEVBUF, bc->bc_opaque_size);
281 return (rv);
283 @@ -424,7 +376,7 @@
284 rv = 0;
285 break;
286 default:
287 - DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %u\n",
288 + DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %d\n",
289 "softraid", alg);
290 rv = -1;
291 goto out;
292 @@ -450,7 +402,7 @@
293 rv = 0;
294 break;
295 default:
296 - DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %u\n",
297 + DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %d\n",
298 "softraid", alg);
299 rv = -1;
300 goto out;
301 @@ -615,6 +567,17 @@
302 sr_error(sd->sd_sc, "incorrect key or passphrase");
303 rv = EPERM;
304 goto out;
305 + }
307 + /* Copy new KDF hint to metadata, if supplied. */
308 + if (kdfinfo2->flags & SR_CRYPTOKDF_HINT) {
309 + if (kdfinfo2->genkdf.len >
310 + sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint))
311 + goto out;
312 + explicit_bzero(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
313 + sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint));
314 + memcpy(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
315 + &kdfinfo2->genkdf, kdfinfo2->genkdf.len);
318 /* Mask the disk keys. */
319 @@ -630,7 +593,7 @@
320 sizeof(sd->mds.mdd_crypto.scr_key), check_digest);
322 /* Copy new encrypted key and HMAC to metadata. */
323 - bcopy(check_digest, sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac,
324 + memcpy(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac, check_digest,
325 sizeof(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac));
327 rv = 0; /* Success */
328 @@ -638,7 +601,7 @@
329 out:
330 if (p) {
331 explicit_bzero(p, ksz);
332 - free(p, M_DEVBUF);
333 + free(p, M_DEVBUF, ksz);
336 explicit_bzero(check_digest, sizeof(check_digest));
337 @@ -686,7 +649,7 @@
338 DNPRINTF(SR_D_META,"%s: sr_crypto_create_key_disk cannot "
339 "open %s\n", DEVNAME(sc), devname);
340 vput(vn);
341 - goto fail;
342 + goto done;
344 open = 1; /* close dev on error */
346 @@ -696,19 +659,12 @@
347 FREAD, NOCRED, curproc)) {
348 DNPRINTF(SR_D_META, "%s: sr_crypto_create_key_disk ioctl "
349 "failed\n", DEVNAME(sc));
350 - VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc);
351 - vput(vn);
352 - goto fail;
353 + goto done;
355 - if (label.d_secsize != DEV_BSIZE) {
356 - sr_error(sc, "%s has unsupported sector size (%d)",
357 - devname, label.d_secsize);
358 - goto fail;
359 - }
360 if (label.d_partitions[part].p_fstype != FS_RAID) {
361 - sr_error(sc, "%s partition not of type RAID (%d)\n",
362 + sr_error(sc, "%s partition not of type RAID (%d)",
363 devname, label.d_partitions[part].p_fstype);
364 - goto fail;
365 + goto done;
368 /*
369 @@ -728,7 +684,7 @@
370 km->scmi.scm_size = 0;
371 km->scmi.scm_coerced_size = 0;
372 strlcpy(km->scmi.scm_devname, devname, sizeof(km->scmi.scm_devname));
373 - bcopy(&sd->sd_meta->ssdi.ssd_uuid, &km->scmi.scm_uuid,
374 + memcpy(&km->scmi.scm_uuid, &sd->sd_meta->ssdi.ssd_uuid,
375 sizeof(struct sr_uuid));
377 sr_checksum(sc, km, &km->scm_checksum,
378 @@ -745,7 +701,7 @@
379 sm->ssdi.ssd_version = SR_META_VERSION;
380 sm->ssd_ondisk = 0;
381 sm->ssdi.ssd_vol_flags = 0;
382 - bcopy(&sd->sd_meta->ssdi.ssd_uuid, &sm->ssdi.ssd_uuid,
383 + memcpy(&sm->ssdi.ssd_uuid, &sd->sd_meta->ssdi.ssd_uuid,
384 sizeof(struct sr_uuid));
385 sm->ssdi.ssd_chunk_no = 1;
386 sm->ssdi.ssd_volid = SR_KEYDISK_VOLID;
387 @@ -785,7 +741,7 @@
388 omi->omi_som->som_type = SR_OPT_KEYDISK;
389 omi->omi_som->som_length = sizeof(struct sr_meta_keydisk);
390 skm = (struct sr_meta_keydisk *)omi->omi_som;
391 - bcopy(sd->mds.mdd_crypto.scr_maskkey, &skm->skm_maskkey,
392 + memcpy(&skm->skm_maskkey, sd->mds.mdd_crypto.scr_maskkey,
393 sizeof(skm->skm_maskkey));
394 SLIST_INSERT_HEAD(&fakesd->sd_meta_opt, omi, omi_link);
395 fakesd->sd_meta->ssdi.ssd_opt_no++;
396 @@ -799,19 +755,16 @@
397 goto done;
399 fail:
400 - if (key_disk)
401 - free(key_disk, M_DEVBUF);
402 + free(key_disk, M_DEVBUF, sizeof(struct sr_chunk));
403 key_disk = NULL;
405 done:
406 - if (omi)
407 - free(omi, M_DEVBUF);
408 + free(omi, M_DEVBUF, sizeof(struct sr_meta_opt_item));
409 if (fakesd && fakesd->sd_vol.sv_chunks)
410 - free(fakesd->sd_vol.sv_chunks, M_DEVBUF);
411 - if (fakesd)
412 - free(fakesd, M_DEVBUF);
413 - if (sm)
414 - free(sm, M_DEVBUF);
415 + free(fakesd->sd_vol.sv_chunks, M_DEVBUF,
416 + sizeof(struct sr_chunk *));
417 + free(fakesd, M_DEVBUF, sizeof(struct sr_discipline));
418 + free(sm, M_DEVBUF, sizeof(struct sr_metadata));
419 if (open) {
420 VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc);
421 vput(vn);
422 @@ -855,7 +808,7 @@
423 sr_error(sc, "cannot open key disk %s", devname);
424 goto done;
426 - if (VOP_OPEN(vn, FREAD | FWRITE, NOCRED, curproc)) {
427 + if (VOP_OPEN(vn, FREAD, NOCRED, curproc)) {
428 DNPRINTF(SR_D_META,"%s: sr_crypto_read_key_disk cannot "
429 "open %s\n", DEVNAME(sc), devname);
430 vput(vn);
431 @@ -869,17 +822,10 @@
432 NOCRED, curproc)) {
433 DNPRINTF(SR_D_META, "%s: sr_crypto_read_key_disk ioctl "
434 "failed\n", DEVNAME(sc));
435 - VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc);
436 - vput(vn);
437 goto done;
439 - if (label.d_secsize != DEV_BSIZE) {
440 - sr_error(sc, "%s has unsupported sector size (%d)",
441 - devname, label.d_secsize);
442 - goto done;
443 - }
444 if (label.d_partitions[part].p_fstype != FS_RAID) {
445 - sr_error(sc, "%s partition not of type RAID (%d)\n",
446 + sr_error(sc, "%s partition not of type RAID (%d)",
447 devname, label.d_partitions[part].p_fstype);
448 goto done;
450 @@ -887,7 +833,7 @@
451 /*
452 * Read and validate key disk metadata.
453 */
454 - sm = malloc(SR_META_SIZE * 512, M_DEVBUF, M_WAITOK | M_ZERO);
455 + sm = malloc(SR_META_SIZE * DEV_BSIZE, M_DEVBUF, M_WAITOK | M_ZERO);
456 if (sr_meta_native_read(sd, dev, sm, NULL)) {
457 sr_error(sc, "native bootprobe could not read native metadata");
458 goto done;
459 @@ -911,7 +857,7 @@
460 key_disk->src_vn = vn;
461 key_disk->src_size = 0;
463 - bcopy((struct sr_meta_chunk *)(sm + 1), &key_disk->src_meta,
464 + memcpy(&key_disk->src_meta, (struct sr_meta_chunk *)(sm + 1),
465 sizeof(key_disk->src_meta));
467 /* Read mask key from optional metadata. */
468 @@ -920,13 +866,12 @@
469 omh = omi->omi_som;
470 if (omh->som_type == SR_OPT_KEYDISK) {
471 skm = (struct sr_meta_keydisk *)omh;
472 - bcopy(&skm->skm_maskkey,
473 - sd->mds.mdd_crypto.scr_maskkey,
474 + memcpy(sd->mds.mdd_crypto.scr_maskkey, &skm->skm_maskkey,
475 sizeof(sd->mds.mdd_crypto.scr_maskkey));
476 } else if (omh->som_type == SR_OPT_CRYPTO) {
477 /* Original keydisk format with key in crypto area. */
478 - bcopy(omh + sizeof(struct sr_meta_opt_hdr),
479 - sd->mds.mdd_crypto.scr_maskkey,
480 + memcpy(sd->mds.mdd_crypto.scr_maskkey,
481 + omh + sizeof(struct sr_meta_opt_hdr),
482 sizeof(sd->mds.mdd_crypto.scr_maskkey));
485 @@ -934,15 +879,13 @@
486 open = 0;
488 done:
489 - for (omi = SLIST_FIRST(&som); omi != SLIST_END(&som); omi = omi_next) {
490 + for (omi = SLIST_FIRST(&som); omi != NULL; omi = omi_next) {
491 omi_next = SLIST_NEXT(omi, omi_link);
492 - if (omi->omi_som)
493 - free(omi->omi_som, M_DEVBUF);
494 - free(omi, M_DEVBUF);
495 + free(omi->omi_som, M_DEVBUF, 0);
496 + free(omi, M_DEVBUF, sizeof(struct sr_meta_opt_item));
499 - if (sm)
500 - free(sm, M_DEVBUF);
501 + free(sm, M_DEVBUF, SR_META_SIZE * DEV_BSIZE);
503 if (vn && open) {
504 VOP_CLOSE(vn, FREAD, NOCRED, curproc);
505 @@ -950,18 +893,45 @@
508 return key_disk;
509 +}
511 +static void
512 +sr_crypto_free_sessions(struct sr_discipline *sd)
513 +{
514 + u_int i;
516 + for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) {
517 + if (sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1) {
518 + crypto_freesession(sd->mds.mdd_crypto.scr_sid[i]);
519 + sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
520 + }
521 + }
524 int
525 sr_crypto_alloc_resources(struct sr_discipline *sd)
527 - struct cryptoini cri;
528 + struct sr_workunit *wu;
529 struct sr_crypto_wu *crwu;
530 + struct cryptoini cri;
531 u_int num_keys, i;
533 DNPRINTF(SR_D_DIS, "%s: sr_crypto_alloc_resources\n",
534 DEVNAME(sd->sd_sc));
536 + sd->mds.mdd_crypto.scr_alg = CRYPTO_AES_XTS;
537 + switch (sd->mds.mdd_crypto.scr_meta->scm_alg) {
538 + case SR_CRYPTOA_AES_XTS_128:
539 + sd->mds.mdd_crypto.scr_klen = 256;
540 + break;
541 + case SR_CRYPTOA_AES_XTS_256:
542 + sd->mds.mdd_crypto.scr_klen = 512;
543 + break;
544 + default:
545 + sr_error(sd->sd_sc, "unknown crypto algorithm");
546 + return (EINVAL);
547 + }
549 for (i = 0; i < SR_CRYPTO_MAXKEYS; i++)
550 sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
552 @@ -979,61 +949,34 @@
555 /*
556 - * For each wu allocate the uio, iovec and crypto structures.
557 - * these have to be allocated now because during runtime we can't
558 - * fail an allocation without failing the io (which can cause real
559 + * For each work unit allocate the uio, iovec and crypto structures.
560 + * These have to be allocated now because during runtime we cannot
561 + * fail an allocation without failing the I/O (which can cause real
562 * problems).
563 */
564 - mtx_init(&sd->mds.mdd_crypto.scr_mutex, IPL_BIO);
565 - TAILQ_INIT(&sd->mds.mdd_crypto.scr_wus);
566 - for (i = 0; i < sd->sd_max_wu; i++) {
567 - crwu = malloc(sizeof(*crwu), M_DEVBUF,
568 - M_WAITOK | M_ZERO | M_CANFAIL);
569 - if (crwu == NULL)
570 - return (ENOMEM);
571 - /* put it on the list now so if we fail it'll be freed */
572 - mtx_enter(&sd->mds.mdd_crypto.scr_mutex);
573 - TAILQ_INSERT_TAIL(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link);
574 - mtx_leave(&sd->mds.mdd_crypto.scr_mutex);
576 + TAILQ_FOREACH(wu, &sd->sd_wu, swu_next) {
577 + crwu = (struct sr_crypto_wu *)wu;
578 crwu->cr_uio.uio_iov = &crwu->cr_iov;
579 crwu->cr_dmabuf = dma_alloc(MAXPHYS, PR_WAITOK);
580 crwu->cr_crp = crypto_getreq(MAXPHYS >> DEV_BSHIFT);
581 if (crwu->cr_crp == NULL)
582 return (ENOMEM);
583 - /* steal the list of cryptodescs */
584 - crwu->cr_descs = crwu->cr_crp->crp_desc;
585 - crwu->cr_crp->crp_desc = NULL;
588 - bzero(&cri, sizeof(cri));
589 - cri.cri_alg = CRYPTO_AES_XTS;
590 - switch (sd->mds.mdd_crypto.scr_meta->scm_alg) {
591 - case SR_CRYPTOA_AES_XTS_128:
592 - cri.cri_klen = 256;
593 - break;
594 - case SR_CRYPTOA_AES_XTS_256:
595 - cri.cri_klen = 512;
596 - break;
597 - default:
598 - return (EINVAL);
599 - }
600 + memset(&cri, 0, sizeof(cri));
601 + cri.cri_alg = sd->mds.mdd_crypto.scr_alg;
602 + cri.cri_klen = sd->mds.mdd_crypto.scr_klen;
604 - /* Allocate a session for every 2^SR_CRYPTO_KEY_BLKSHIFT blocks */
605 - num_keys = sd->sd_meta->ssdi.ssd_size >> SR_CRYPTO_KEY_BLKSHIFT;
606 - if (num_keys >= SR_CRYPTO_MAXKEYS)
607 + /* Allocate a session for every 2^SR_CRYPTO_KEY_BLKSHIFT blocks. */
608 + num_keys = ((sd->sd_meta->ssdi.ssd_size - 1) >>
609 + SR_CRYPTO_KEY_BLKSHIFT) + 1;
610 + if (num_keys > SR_CRYPTO_MAXKEYS)
611 return (EFBIG);
612 - for (i = 0; i <= num_keys; i++) {
613 + for (i = 0; i < num_keys; i++) {
614 cri.cri_key = sd->mds.mdd_crypto.scr_key[i];
615 if (crypto_newsession(&sd->mds.mdd_crypto.scr_sid[i],
616 &cri, 0) != 0) {
617 - for (i = 0;
618 - sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1;
619 - i++) {
620 - crypto_freesession(
621 - sd->mds.mdd_crypto.scr_sid[i]);
622 - sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
623 - }
624 + sr_crypto_free_sessions(sd);
625 return (EINVAL);
628 @@ -1046,39 +989,30 @@
629 void
630 sr_crypto_free_resources(struct sr_discipline *sd)
632 + struct sr_workunit *wu;
633 struct sr_crypto_wu *crwu;
634 - u_int i;
636 DNPRINTF(SR_D_DIS, "%s: sr_crypto_free_resources\n",
637 DEVNAME(sd->sd_sc));
639 if (sd->mds.mdd_crypto.key_disk != NULL) {
640 - explicit_bzero(sd->mds.mdd_crypto.key_disk, sizeof
641 - sd->mds.mdd_crypto.key_disk);
642 - free(sd->mds.mdd_crypto.key_disk, M_DEVBUF);
643 + explicit_bzero(sd->mds.mdd_crypto.key_disk,
644 + sizeof(*sd->mds.mdd_crypto.key_disk));
645 + free(sd->mds.mdd_crypto.key_disk, M_DEVBUF,
646 + sizeof(*sd->mds.mdd_crypto.key_disk));
649 sr_hotplug_unregister(sd, sr_crypto_hotplug);
651 - for (i = 0; sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1; i++) {
652 - crypto_freesession(sd->mds.mdd_crypto.scr_sid[i]);
653 - sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
654 - }
655 + sr_crypto_free_sessions(sd);
657 - mtx_enter(&sd->mds.mdd_crypto.scr_mutex);
658 - while ((crwu = TAILQ_FIRST(&sd->mds.mdd_crypto.scr_wus)) != NULL) {
659 - TAILQ_REMOVE(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link);
661 - if (crwu->cr_dmabuf != NULL)
662 + TAILQ_FOREACH(wu, &sd->sd_wu, swu_next) {
663 + crwu = (struct sr_crypto_wu *)wu;
664 + if (crwu->cr_dmabuf)
665 dma_free(crwu->cr_dmabuf, MAXPHYS);
666 - if (crwu->cr_crp) {
667 - /* twiddle cryptoreq back */
668 - crwu->cr_crp->crp_desc = crwu->cr_descs;
669 + if (crwu->cr_crp)
670 crypto_freereq(crwu->cr_crp);
671 - }
672 - free(crwu, M_DEVBUF);
674 - mtx_leave(&sd->mds.mdd_crypto.scr_mutex);
676 sr_wu_free(sd);
677 sr_ccb_free(sd);
678 @@ -1165,65 +1099,60 @@
679 sr_crypto_rw(struct sr_workunit *wu)
681 struct sr_crypto_wu *crwu;
682 - int s, rv = 0;
683 + daddr_t blkno;
684 + int rv = 0;
686 - DNPRINTF(SR_D_DIS, "%s: sr_crypto_rw wu: %p\n",
687 + DNPRINTF(SR_D_DIS, "%s: sr_crypto_rw wu %p\n",
688 DEVNAME(wu->swu_dis->sd_sc), wu);
690 - if (wu->swu_xs->flags & SCSI_DATA_OUT) {
691 - crwu = sr_crypto_wu_get(wu, 1);
692 - if (crwu == NULL)
693 - return (1);
694 + if (sr_validate_io(wu, &blkno, "sr_crypto_rw"))
695 + return (1);
697 + if (wu->swu_xs->flags & SCSI_DATA_OUT) {
698 + crwu = sr_crypto_prepare(wu, 1);
699 crwu->cr_crp->crp_callback = sr_crypto_write;
700 - s = splvm();
701 - if (crypto_invoke(crwu->cr_crp))
702 - rv = 1;
703 - else
704 + rv = crypto_dispatch(crwu->cr_crp);
705 + if (rv == 0)
706 rv = crwu->cr_crp->crp_etype;
707 - splx(s);
708 } else
709 - rv = sr_crypto_rw2(wu, NULL);
710 + rv = sr_crypto_dev_rw(wu, NULL);
712 return (rv);
715 -int
716 +void
717 sr_crypto_write(struct cryptop *crp)
719 struct sr_crypto_wu *crwu = crp->crp_opaque;
720 - struct sr_workunit *wu = crwu->cr_wu;
721 + struct sr_workunit *wu = &crwu->cr_wu;
722 int s;
724 - DNPRINTF(SR_D_INTR, "%s: sr_crypto_write: wu %x xs: %x\n",
725 + DNPRINTF(SR_D_INTR, "%s: sr_crypto_write: wu %p xs: %p\n",
726 DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs);
728 if (crp->crp_etype) {
729 /* fail io */
730 wu->swu_xs->error = XS_DRIVER_STUFFUP;
731 s = splbio();
732 - sr_crypto_finish_io(wu);
733 + sr_scsi_done(wu->swu_dis, wu->swu_xs);
734 splx(s);
737 - return (sr_crypto_rw2(wu, crwu));
738 + sr_crypto_dev_rw(wu, crwu);
741 int
742 -sr_crypto_rw2(struct sr_workunit *wu, struct sr_crypto_wu *crwu)
743 +sr_crypto_dev_rw(struct sr_workunit *wu, struct sr_crypto_wu *crwu)
745 struct sr_discipline *sd = wu->swu_dis;
746 struct scsi_xfer *xs = wu->swu_xs;
747 struct sr_ccb *ccb;
748 struct uio *uio;
749 - int s;
750 - daddr64_t blk;
751 + daddr_t blkno;
753 - if (sr_validate_io(wu, &blk, "sr_crypto_rw2"))
754 - goto bad;
755 + blkno = wu->swu_blk_start;
757 - blk += sd->sd_meta->ssd_data_offset;
759 - ccb = sr_ccb_rw(sd, 0, blk, xs->datalen, xs->data, xs->flags, 0);
760 + ccb = sr_ccb_rw(sd, 0, blkno, xs->datalen, xs->data, xs->flags, 0);
761 if (!ccb) {
762 /* should never happen but handle more gracefully */
763 printf("%s: %s: too many ccbs queued\n",
764 @@ -1236,17 +1165,10 @@
765 ccb->ccb_opaque = crwu;
767 sr_wu_enqueue_ccb(wu, ccb);
768 + sr_schedule_wu(wu);
770 - s = splbio();
772 - if (sr_check_io_collision(wu))
773 - goto queued;
775 - sr_raid_startwu(wu);
777 -queued:
778 - splx(s);
779 return (0);
781 bad:
782 /* wu is unwound by sr_wu_put */
783 if (crwu)
784 @@ -1259,77 +1181,39 @@
786 struct scsi_xfer *xs = wu->swu_xs;
787 struct sr_crypto_wu *crwu;
788 - struct sr_ccb *ccb;
789 int s;
791 /* If this was a successful read, initiate decryption of the data. */
792 if (ISSET(xs->flags, SCSI_DATA_IN) && xs->error == XS_NOERROR) {
793 - /* only fails on implementation error */
794 - crwu = sr_crypto_wu_get(wu, 0);
795 - if (crwu == NULL)
796 - panic("sr_crypto_intr: no wu");
797 + crwu = sr_crypto_prepare(wu, 0);
798 crwu->cr_crp->crp_callback = sr_crypto_read;
799 - ccb = TAILQ_FIRST(&wu->swu_ccb);
800 - if (ccb == NULL)
801 - panic("sr_crypto_done: no ccbs on workunit");
802 - ccb->ccb_opaque = crwu;
803 - DNPRINTF(SR_D_INTR, "%s: sr_crypto_intr: crypto_invoke %p\n",
804 + DNPRINTF(SR_D_INTR, "%s: sr_crypto_done: crypto_dispatch %p\n",
805 DEVNAME(wu->swu_dis->sd_sc), crwu->cr_crp);
806 - s = splvm();
807 - crypto_invoke(crwu->cr_crp);
808 - splx(s);
809 + crypto_dispatch(crwu->cr_crp);
810 return;
813 s = splbio();
814 - sr_crypto_finish_io(wu);
815 + sr_scsi_done(wu->swu_dis, wu->swu_xs);
816 splx(s);
819 void
820 -sr_crypto_finish_io(struct sr_workunit *wu)
821 -{
822 - struct sr_discipline *sd = wu->swu_dis;
823 - struct scsi_xfer *xs = wu->swu_xs;
824 - struct sr_ccb *ccb;
825 -#ifdef SR_DEBUG
826 - struct sr_softc *sc = sd->sd_sc;
827 -#endif /* SR_DEBUG */
829 - splassert(IPL_BIO);
831 - DNPRINTF(SR_D_INTR, "%s: sr_crypto_finish_io: wu %x xs: %x\n",
832 - DEVNAME(sc), wu, xs);
834 - if (wu->swu_cb_active == 1)
835 - panic("%s: sr_crypto_finish_io", DEVNAME(sd->sd_sc));
836 - TAILQ_FOREACH(ccb, &wu->swu_ccb, ccb_link) {
837 - if (ccb->ccb_opaque == NULL)
838 - continue;
839 - sr_crypto_wu_put(ccb->ccb_opaque);
840 - }
842 - sr_scsi_done(sd, xs);
843 -}
845 -int
846 sr_crypto_read(struct cryptop *crp)
848 struct sr_crypto_wu *crwu = crp->crp_opaque;
849 - struct sr_workunit *wu = crwu->cr_wu;
850 + struct sr_workunit *wu = &crwu->cr_wu;
851 int s;
853 - DNPRINTF(SR_D_INTR, "%s: sr_crypto_read: wu %x xs: %x\n",
854 + DNPRINTF(SR_D_INTR, "%s: sr_crypto_read: wu %p xs: %p\n",
855 DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs);
857 if (crp->crp_etype)
858 wu->swu_xs->error = XS_DRIVER_STUFFUP;
860 s = splbio();
861 - sr_crypto_finish_io(wu);
862 + sr_scsi_done(wu->swu_dis, wu->swu_xs);
863 splx(s);
865 - return (0);
868 void