Commit Diff


commit - e8eedebc88e40698d3d763e6f00601484d8cabf3
commit + c285a1f8b9de1be75e044892c4c7af139e5156bc
blob - /dev/null
blob + 0260dddb49db5bd1e6042ecec6c939392ba07ae1 (mode 644)
--- /dev/null
+++ test/expect021.diff
@@ -0,0 +1,867 @@
+--- test021.left.txt
++++ test021.right.txt
+@@ -1,4 +1,4 @@
+-/* $OpenBSD: softraid_crypto.c,v 1.91 2013/03/31 15:44:52 jsing Exp $ */
++/* $OpenBSD: softraid_crypto.c,v 1.139 2020/07/13 00:06:22 kn Exp $ */
+ /*
+  * Copyright (c) 2007 Marco Peereboom <marco@peereboom.us>
+  * Copyright (c) 2008 Hans-Joerg Hoexer <hshoexer@openbsd.org>
+@@ -25,7 +25,6 @@
+ #include <sys/buf.h>
+ #include <sys/device.h>
+ #include <sys/ioctl.h>
+-#include <sys/proc.h>
+ #include <sys/malloc.h>
+ #include <sys/pool.h>
+ #include <sys/kernel.h>
+@@ -34,6 +33,7 @@
+ #include <sys/queue.h>
+ #include <sys/fcntl.h>
+ #include <sys/disklabel.h>
++#include <sys/vnode.h>
+ #include <sys/mount.h>
+ #include <sys/sensors.h>
+ #include <sys/stat.h>
+@@ -42,7 +42,6 @@
+ #include <sys/dkio.h>
+ 
+ #include <crypto/cryptodev.h>
+-#include <crypto/cryptosoft.h>
+ #include <crypto/rijndael.h>
+ #include <crypto/md5.h>
+ #include <crypto/sha1.h>
+@@ -54,7 +53,6 @@
+ #include <scsi/scsi_disk.h>
+ 
+ #include <dev/softraidvar.h>
+-#include <dev/rndvar.h>
+ 
+ /*
+  * The per-I/O data that we need to preallocate. We cannot afford to allow I/O
+@@ -62,18 +60,15 @@
+  * because we assert that only one ccb per WU will ever be active.
+  */
+ struct sr_crypto_wu {
+-	TAILQ_ENTRY(sr_crypto_wu)	 cr_link;
++	struct sr_workunit		 cr_wu;		/* Must be first. */
+ 	struct uio			 cr_uio;
+ 	struct iovec			 cr_iov;
+ 	struct cryptop	 		*cr_crp;
+-	struct cryptodesc		*cr_descs;
+-	struct sr_workunit		*cr_wu;
+ 	void				*cr_dmabuf;
+ };
+ 
+ 
+-struct sr_crypto_wu *sr_crypto_wu_get(struct sr_workunit *, int);
+-void		sr_crypto_wu_put(struct sr_crypto_wu *);
++struct sr_crypto_wu *sr_crypto_prepare(struct sr_workunit *, int);
+ int		sr_crypto_create_keys(struct sr_discipline *);
+ int		sr_crypto_get_kdf(struct bioc_createraid *,
+ 		    struct sr_discipline *);
+@@ -92,12 +87,11 @@
+ 		    struct bioc_discipline *);
+ int		sr_crypto_meta_opt_handler(struct sr_discipline *,
+ 		    struct sr_meta_opt_hdr *);
+-int		sr_crypto_write(struct cryptop *);
++void		sr_crypto_write(struct cryptop *);
+ int		sr_crypto_rw(struct sr_workunit *);
+-int		sr_crypto_rw2(struct sr_workunit *, struct sr_crypto_wu *);
++int		sr_crypto_dev_rw(struct sr_workunit *, struct sr_crypto_wu *);
+ void		sr_crypto_done(struct sr_workunit *);
+-int		sr_crypto_read(struct cryptop *);
+-void		sr_crypto_finish_io(struct sr_workunit *);
++void		sr_crypto_read(struct cryptop *);
+ void		sr_crypto_calculate_check_hmac_sha1(u_int8_t *, int,
+ 		   u_int8_t *, int, u_char *);
+ void		sr_crypto_hotplug(struct sr_discipline *, struct disk *, int);
+@@ -113,6 +107,7 @@
+ 	int i;
+ 
+ 	/* Fill out discipline members. */
++	sd->sd_wu_size = sizeof(struct sr_crypto_wu);
+ 	sd->sd_type = SR_MD_CRYPTO;
+ 	strlcpy(sd->sd_name, "CRYPTO", sizeof(sd->sd_name));
+ 	sd->sd_capabilities = SR_CAP_SYSTEM_DISK | SR_CAP_AUTO_ASSEMBLE;
+@@ -143,8 +138,14 @@
+ 		sr_error(sd->sd_sc, "%s requires exactly one chunk",
+ 		    sd->sd_name);
+ 		goto done;
+-        }
++	}
+ 
++	if (coerced_size > SR_CRYPTO_MAXSIZE) {
++		sr_error(sd->sd_sc, "%s exceeds maximum size (%lli > %llu)",
++		    sd->sd_name, coerced_size, SR_CRYPTO_MAXSIZE);
++		goto done;
++	}
++
+ 	/* Create crypto optional metadata. */
+ 	omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF,
+ 	    M_WAITOK | M_ZERO);
+@@ -208,7 +209,7 @@
+ 
+ 	if (data != NULL) {
+ 		/* Kernel already has mask key. */
+-		bcopy(data, sd->mds.mdd_crypto.scr_maskkey,
++		memcpy(sd->mds.mdd_crypto.scr_maskkey, data,
+ 		    sizeof(sd->mds.mdd_crypto.scr_maskkey));
+ 	} else if (bc->bc_key_disk != NODEV) {
+ 		/* Read the mask key from the key disk. */
+@@ -248,119 +249,71 @@
+ }
+ 
+ struct sr_crypto_wu *
+-sr_crypto_wu_get(struct sr_workunit *wu, int encrypt)
++sr_crypto_prepare(struct sr_workunit *wu, int encrypt)
+ {
+ 	struct scsi_xfer	*xs = wu->swu_xs;
+ 	struct sr_discipline	*sd = wu->swu_dis;
+ 	struct sr_crypto_wu	*crwu;
+ 	struct cryptodesc	*crd;
+ 	int			flags, i, n;
+-	daddr64_t		blk = 0;
++	daddr_t			blkno;
+ 	u_int			keyndx;
+ 
+-	DNPRINTF(SR_D_DIS, "%s: sr_crypto_wu_get wu: %p encrypt: %d\n",
++	DNPRINTF(SR_D_DIS, "%s: sr_crypto_prepare wu %p encrypt %d\n",
+ 	    DEVNAME(sd->sd_sc), wu, encrypt);
+ 
+-	mtx_enter(&sd->mds.mdd_crypto.scr_mutex);
+-	if ((crwu = TAILQ_FIRST(&sd->mds.mdd_crypto.scr_wus)) != NULL)
+-		TAILQ_REMOVE(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link);
+-	mtx_leave(&sd->mds.mdd_crypto.scr_mutex);
+-	if (crwu == NULL)
+-		panic("sr_crypto_wu_get: out of wus");
+-
++	crwu = (struct sr_crypto_wu *)wu;
+ 	crwu->cr_uio.uio_iovcnt = 1;
+ 	crwu->cr_uio.uio_iov->iov_len = xs->datalen;
+ 	if (xs->flags & SCSI_DATA_OUT) {
+ 		crwu->cr_uio.uio_iov->iov_base = crwu->cr_dmabuf;
+-		bcopy(xs->data, crwu->cr_uio.uio_iov->iov_base, xs->datalen);
++		memcpy(crwu->cr_uio.uio_iov->iov_base, xs->data, xs->datalen);
+ 	} else
+ 		crwu->cr_uio.uio_iov->iov_base = xs->data;
+ 
+-	if (xs->cmdlen == 10)
+-		blk = _4btol(((struct scsi_rw_big *)xs->cmd)->addr);
+-	else if (xs->cmdlen == 16)
+-		blk = _8btol(((struct scsi_rw_16 *)xs->cmd)->addr);
+-	else if (xs->cmdlen == 6)
+-		blk = _3btol(((struct scsi_rw *)xs->cmd)->addr);
+-
++	blkno = wu->swu_blk_start;
+ 	n = xs->datalen >> DEV_BSHIFT;
+ 
+ 	/*
+ 	 * We preallocated enough crypto descs for up to MAXPHYS of I/O.
+-	 * Since there may be less than that we need to tweak the linked list
++	 * Since there may be less than that we need to tweak the amount
+ 	 * of crypto desc structures to be just long enough for our needs.
+ 	 */
+-	crd = crwu->cr_descs;
+-	for (i = 0; i < ((MAXPHYS >> DEV_BSHIFT) - n); i++) {
+-		crd = crd->crd_next;
+-		KASSERT(crd);
+-	}
+-	crwu->cr_crp->crp_desc = crd;
++	KASSERT(crwu->cr_crp->crp_ndescalloc >= n);
++	crwu->cr_crp->crp_ndesc = n;
+ 	flags = (encrypt ? CRD_F_ENCRYPT : 0) |
+ 	    CRD_F_IV_PRESENT | CRD_F_IV_EXPLICIT;
+ 
+-	/* Select crypto session based on block number */
+-	keyndx = blk >> SR_CRYPTO_KEY_BLKSHIFT;
+-	if (keyndx >= SR_CRYPTO_MAXKEYS)
+-		goto unwind;
++	/*
++	 * Select crypto session based on block number.
++	 *
++	 * XXX - this does not handle the case where the read/write spans
++	 * across a different key blocks (e.g. 0.5TB boundary). Currently
++	 * this is already broken by the use of scr_key[0] below.
++	 */
++	keyndx = blkno >> SR_CRYPTO_KEY_BLKSHIFT;
+ 	crwu->cr_crp->crp_sid = sd->mds.mdd_crypto.scr_sid[keyndx];
+-	if (crwu->cr_crp->crp_sid == (u_int64_t)-1)
+-		goto unwind;
+ 
++	crwu->cr_crp->crp_opaque = crwu;
+ 	crwu->cr_crp->crp_ilen = xs->datalen;
+ 	crwu->cr_crp->crp_alloctype = M_DEVBUF;
++	crwu->cr_crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_NOQUEUE;
+ 	crwu->cr_crp->crp_buf = &crwu->cr_uio;
+-	for (i = 0, crd = crwu->cr_crp->crp_desc; crd;
+-	    i++, blk++, crd = crd->crd_next) {
++	for (i = 0; i < crwu->cr_crp->crp_ndesc; i++, blkno++) {
++		crd = &crwu->cr_crp->crp_desc[i];
+ 		crd->crd_skip = i << DEV_BSHIFT;
+ 		crd->crd_len = DEV_BSIZE;
+ 		crd->crd_inject = 0;
+ 		crd->crd_flags = flags;
+-		crd->crd_alg = CRYPTO_AES_XTS;
+-
+-		switch (sd->mds.mdd_crypto.scr_meta->scm_alg) {
+-		case SR_CRYPTOA_AES_XTS_128:
+-			crd->crd_klen = 256;
+-			break;
+-		case SR_CRYPTOA_AES_XTS_256:
+-			crd->crd_klen = 512;
+-			break;
+-		default:
+-			goto unwind;
+-		}
++		crd->crd_alg = sd->mds.mdd_crypto.scr_alg;
++		crd->crd_klen = sd->mds.mdd_crypto.scr_klen;
+ 		crd->crd_key = sd->mds.mdd_crypto.scr_key[0];
+-		bcopy(&blk, crd->crd_iv, sizeof(blk));
++		memcpy(crd->crd_iv, &blkno, sizeof(blkno));
+ 	}
+-	crwu->cr_wu = wu;
+-	crwu->cr_crp->crp_opaque = crwu;
+ 
+ 	return (crwu);
+-
+-unwind:
+-	/* steal the descriptors back from the cryptop */
+-	crwu->cr_crp->crp_desc = NULL;
+-
+-	return (NULL);
+ }
+ 
+-void
+-sr_crypto_wu_put(struct sr_crypto_wu *crwu)
+-{
+-	struct cryptop		*crp = crwu->cr_crp;
+-	struct sr_workunit	*wu = crwu->cr_wu;
+-	struct sr_discipline	*sd = wu->swu_dis;
+-
+-	DNPRINTF(SR_D_DIS, "%s: sr_crypto_wu_put crwu: %p\n",
+-	    DEVNAME(wu->swu_dis->sd_sc), crwu);
+-
+-	/* steal the descriptors back from the cryptop */
+-	crp->crp_desc = NULL;
+-
+-	mtx_enter(&sd->mds.mdd_crypto.scr_mutex);
+-	TAILQ_INSERT_TAIL(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link);
+-	mtx_leave(&sd->mds.mdd_crypto.scr_mutex);
+-}
+-
+ int
+ sr_crypto_get_kdf(struct bioc_createraid *bc, struct sr_discipline *sd)
+ {
+@@ -386,9 +339,8 @@
+ 		if (sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint) <
+ 		    kdfinfo->genkdf.len)
+ 			goto out;
+-		bcopy(&kdfinfo->genkdf,
+-		    sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
+-		    kdfinfo->genkdf.len);
++		memcpy(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
++		    &kdfinfo->genkdf, kdfinfo->genkdf.len);
+ 	}
+ 
+ 	/* copy mask key to run-time meta data */
+@@ -396,7 +348,7 @@
+ 		if (sizeof(sd->mds.mdd_crypto.scr_maskkey) <
+ 		    sizeof(kdfinfo->maskkey))
+ 			goto out;
+-		bcopy(&kdfinfo->maskkey, sd->mds.mdd_crypto.scr_maskkey,
++		memcpy(sd->mds.mdd_crypto.scr_maskkey, &kdfinfo->maskkey,
+ 		    sizeof(kdfinfo->maskkey));
+ 	}
+ 
+@@ -404,7 +356,7 @@
+ 	rv = 0;
+ out:
+ 	explicit_bzero(kdfinfo, bc->bc_opaque_size);
+-	free(kdfinfo, M_DEVBUF);
++	free(kdfinfo, M_DEVBUF, bc->bc_opaque_size);
+ 
+ 	return (rv);
+ }
+@@ -424,7 +376,7 @@
+ 		rv = 0;
+ 		break;
+ 	default:
+-		DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %u\n",
++		DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %d\n",
+ 		    "softraid", alg);
+ 		rv = -1;
+ 		goto out;
+@@ -450,7 +402,7 @@
+ 		rv = 0;
+ 		break;
+ 	default:
+-		DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %u\n",
++		DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %d\n",
+ 		    "softraid", alg);
+ 		rv = -1;
+ 		goto out;
+@@ -617,6 +569,17 @@
+ 		goto out;
+ 	}
+ 
++	/* Copy new KDF hint to metadata, if supplied. */
++	if (kdfinfo2->flags & SR_CRYPTOKDF_HINT) {
++		if (kdfinfo2->genkdf.len >
++		    sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint))
++			goto out;
++		explicit_bzero(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
++		    sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint));
++		memcpy(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
++		    &kdfinfo2->genkdf, kdfinfo2->genkdf.len);
++	}
++
+ 	/* Mask the disk keys. */
+ 	c = (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key;
+ 	if (sr_crypto_encrypt(p, c, kdfinfo2->maskkey, ksz,
+@@ -630,7 +593,7 @@
+ 	    sizeof(sd->mds.mdd_crypto.scr_key), check_digest);
+ 
+ 	/* Copy new encrypted key and HMAC to metadata. */
+-	bcopy(check_digest, sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac,
++	memcpy(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac, check_digest,
+ 	    sizeof(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac));
+ 
+ 	rv = 0; /* Success */
+@@ -638,7 +601,7 @@
+ out:
+ 	if (p) {
+ 		explicit_bzero(p, ksz);
+-		free(p, M_DEVBUF);
++		free(p, M_DEVBUF, ksz);
+ 	}
+ 
+ 	explicit_bzero(check_digest, sizeof(check_digest));
+@@ -686,7 +649,7 @@
+ 		DNPRINTF(SR_D_META,"%s: sr_crypto_create_key_disk cannot "
+ 		    "open %s\n", DEVNAME(sc), devname);
+ 		vput(vn);
+-		goto fail;
++		goto done;
+ 	}
+ 	open = 1; /* close dev on error */
+ 
+@@ -696,19 +659,12 @@
+ 	    FREAD, NOCRED, curproc)) {
+ 		DNPRINTF(SR_D_META, "%s: sr_crypto_create_key_disk ioctl "
+ 		    "failed\n", DEVNAME(sc));
+-		VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc);
+-		vput(vn);
+-		goto fail;
++		goto done;
+ 	}
+-	if (label.d_secsize != DEV_BSIZE) {
+-		sr_error(sc, "%s has unsupported sector size (%d)",
+-		    devname, label.d_secsize);
+-		goto fail;
+-	}
+ 	if (label.d_partitions[part].p_fstype != FS_RAID) {
+-		sr_error(sc, "%s partition not of type RAID (%d)\n",
++		sr_error(sc, "%s partition not of type RAID (%d)",
+ 		    devname, label.d_partitions[part].p_fstype);
+-		goto fail;
++		goto done;
+ 	}
+ 
+ 	/*
+@@ -728,7 +684,7 @@
+ 	km->scmi.scm_size = 0;
+ 	km->scmi.scm_coerced_size = 0;
+ 	strlcpy(km->scmi.scm_devname, devname, sizeof(km->scmi.scm_devname));
+-	bcopy(&sd->sd_meta->ssdi.ssd_uuid, &km->scmi.scm_uuid,
++	memcpy(&km->scmi.scm_uuid, &sd->sd_meta->ssdi.ssd_uuid,
+ 	    sizeof(struct sr_uuid));
+ 
+ 	sr_checksum(sc, km, &km->scm_checksum,
+@@ -745,7 +701,7 @@
+ 	sm->ssdi.ssd_version = SR_META_VERSION;
+ 	sm->ssd_ondisk = 0;
+ 	sm->ssdi.ssd_vol_flags = 0;
+-	bcopy(&sd->sd_meta->ssdi.ssd_uuid, &sm->ssdi.ssd_uuid,
++	memcpy(&sm->ssdi.ssd_uuid, &sd->sd_meta->ssdi.ssd_uuid,
+ 	    sizeof(struct sr_uuid));
+ 	sm->ssdi.ssd_chunk_no = 1;
+ 	sm->ssdi.ssd_volid = SR_KEYDISK_VOLID;
+@@ -785,7 +741,7 @@
+ 	omi->omi_som->som_type = SR_OPT_KEYDISK;
+ 	omi->omi_som->som_length = sizeof(struct sr_meta_keydisk);
+ 	skm = (struct sr_meta_keydisk *)omi->omi_som;
+-	bcopy(sd->mds.mdd_crypto.scr_maskkey, &skm->skm_maskkey,
++	memcpy(&skm->skm_maskkey, sd->mds.mdd_crypto.scr_maskkey,
+ 	    sizeof(skm->skm_maskkey));
+ 	SLIST_INSERT_HEAD(&fakesd->sd_meta_opt, omi, omi_link);
+ 	fakesd->sd_meta->ssdi.ssd_opt_no++;
+@@ -799,19 +755,16 @@
+ 	goto done;
+ 
+ fail:
+-	if (key_disk)
+-		free(key_disk, M_DEVBUF);
++	free(key_disk, M_DEVBUF, sizeof(struct sr_chunk));
+ 	key_disk = NULL;
+ 
+ done:
+-	if (omi)
+-		free(omi, M_DEVBUF);
++	free(omi, M_DEVBUF, sizeof(struct sr_meta_opt_item));
+ 	if (fakesd && fakesd->sd_vol.sv_chunks)
+-		free(fakesd->sd_vol.sv_chunks, M_DEVBUF);
+-	if (fakesd)
+-		free(fakesd, M_DEVBUF);
+-	if (sm)
+-		free(sm, M_DEVBUF);
++		free(fakesd->sd_vol.sv_chunks, M_DEVBUF,
++		    sizeof(struct sr_chunk *));
++	free(fakesd, M_DEVBUF, sizeof(struct sr_discipline));
++	free(sm, M_DEVBUF, sizeof(struct sr_metadata));
+ 	if (open) {
+ 		VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc);
+ 		vput(vn);
+@@ -855,7 +808,7 @@
+ 		sr_error(sc, "cannot open key disk %s", devname);
+ 		goto done;
+ 	}
+-	if (VOP_OPEN(vn, FREAD | FWRITE, NOCRED, curproc)) {
++	if (VOP_OPEN(vn, FREAD, NOCRED, curproc)) {
+ 		DNPRINTF(SR_D_META,"%s: sr_crypto_read_key_disk cannot "
+ 		    "open %s\n", DEVNAME(sc), devname);
+ 		vput(vn);
+@@ -869,17 +822,10 @@
+ 	    NOCRED, curproc)) {
+ 		DNPRINTF(SR_D_META, "%s: sr_crypto_read_key_disk ioctl "
+ 		    "failed\n", DEVNAME(sc));
+-		VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc);
+-		vput(vn);
+ 		goto done;
+ 	}
+-	if (label.d_secsize != DEV_BSIZE) {
+-		sr_error(sc, "%s has unsupported sector size (%d)",
+-		    devname, label.d_secsize);
+-		goto done;
+-	}
+ 	if (label.d_partitions[part].p_fstype != FS_RAID) {
+-		sr_error(sc, "%s partition not of type RAID (%d)\n",
++		sr_error(sc, "%s partition not of type RAID (%d)",
+ 		    devname, label.d_partitions[part].p_fstype);
+ 		goto done;
+ 	}
+@@ -887,7 +833,7 @@
+ 	/*
+ 	 * Read and validate key disk metadata.
+ 	 */
+-	sm = malloc(SR_META_SIZE * 512, M_DEVBUF, M_WAITOK | M_ZERO);
++	sm = malloc(SR_META_SIZE * DEV_BSIZE, M_DEVBUF, M_WAITOK | M_ZERO);
+ 	if (sr_meta_native_read(sd, dev, sm, NULL)) {
+ 		sr_error(sc, "native bootprobe could not read native metadata");
+ 		goto done;
+@@ -911,7 +857,7 @@
+ 	key_disk->src_vn = vn;
+ 	key_disk->src_size = 0;
+ 
+-	bcopy((struct sr_meta_chunk *)(sm + 1), &key_disk->src_meta,
++	memcpy(&key_disk->src_meta, (struct sr_meta_chunk *)(sm + 1),
+ 	    sizeof(key_disk->src_meta));
+ 
+ 	/* Read mask key from optional metadata. */
+@@ -920,13 +866,12 @@
+ 		omh = omi->omi_som;
+ 		if (omh->som_type == SR_OPT_KEYDISK) {
+ 			skm = (struct sr_meta_keydisk *)omh;
+-			bcopy(&skm->skm_maskkey,
+-			    sd->mds.mdd_crypto.scr_maskkey,
++			memcpy(sd->mds.mdd_crypto.scr_maskkey, &skm->skm_maskkey,
+ 			    sizeof(sd->mds.mdd_crypto.scr_maskkey));
+ 		} else if (omh->som_type == SR_OPT_CRYPTO) {
+ 			/* Original keydisk format with key in crypto area. */
+-			bcopy(omh + sizeof(struct sr_meta_opt_hdr),
+-			    sd->mds.mdd_crypto.scr_maskkey,
++			memcpy(sd->mds.mdd_crypto.scr_maskkey,
++			    omh + sizeof(struct sr_meta_opt_hdr),
+ 			    sizeof(sd->mds.mdd_crypto.scr_maskkey));
+ 		}
+ 	}
+@@ -934,15 +879,13 @@
+ 	open = 0;
+ 
+ done:
+-	for (omi = SLIST_FIRST(&som); omi != SLIST_END(&som); omi = omi_next) {
++	for (omi = SLIST_FIRST(&som); omi != NULL; omi = omi_next) {
+ 		omi_next = SLIST_NEXT(omi, omi_link);
+-		if (omi->omi_som)
+-			free(omi->omi_som, M_DEVBUF);
+-		free(omi, M_DEVBUF);
++		free(omi->omi_som, M_DEVBUF, 0);
++		free(omi, M_DEVBUF, sizeof(struct sr_meta_opt_item));
+ 	}
+ 
+-	if (sm)
+-		free(sm, M_DEVBUF);
++	free(sm, M_DEVBUF, SR_META_SIZE * DEV_BSIZE);
+ 
+ 	if (vn && open) {
+ 		VOP_CLOSE(vn, FREAD, NOCRED, curproc);
+@@ -952,16 +895,43 @@
+ 	return key_disk;
+ }
+ 
++static void
++sr_crypto_free_sessions(struct sr_discipline *sd)
++{
++	u_int			i;
++
++	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) {
++		if (sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1) {
++			crypto_freesession(sd->mds.mdd_crypto.scr_sid[i]);
++			sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
++		}
++	}
++}
++
+ int
+ sr_crypto_alloc_resources(struct sr_discipline *sd)
+ {
+-	struct cryptoini	cri;
++	struct sr_workunit	*wu;
+ 	struct sr_crypto_wu	*crwu;
++	struct cryptoini	cri;
+ 	u_int			num_keys, i;
+ 
+ 	DNPRINTF(SR_D_DIS, "%s: sr_crypto_alloc_resources\n",
+ 	    DEVNAME(sd->sd_sc));
+ 
++	sd->mds.mdd_crypto.scr_alg = CRYPTO_AES_XTS;
++	switch (sd->mds.mdd_crypto.scr_meta->scm_alg) {
++	case SR_CRYPTOA_AES_XTS_128:
++		sd->mds.mdd_crypto.scr_klen = 256;
++		break;
++	case SR_CRYPTOA_AES_XTS_256:
++		sd->mds.mdd_crypto.scr_klen = 512;
++		break;
++	default:
++		sr_error(sd->sd_sc, "unknown crypto algorithm");
++		return (EINVAL);
++	}
++
+ 	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++)
+ 		sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
+ 
+@@ -979,61 +949,34 @@
+ 	}
+ 
+ 	/*
+-	 * For each wu allocate the uio, iovec and crypto structures.
+-	 * these have to be allocated now because during runtime we can't
+-	 * fail an allocation without failing the io (which can cause real
++	 * For each work unit allocate the uio, iovec and crypto structures.
++	 * These have to be allocated now because during runtime we cannot
++	 * fail an allocation without failing the I/O (which can cause real
+ 	 * problems).
+ 	 */
+-	mtx_init(&sd->mds.mdd_crypto.scr_mutex, IPL_BIO);
+-	TAILQ_INIT(&sd->mds.mdd_crypto.scr_wus);
+-	for (i = 0; i < sd->sd_max_wu; i++) {
+-		crwu = malloc(sizeof(*crwu), M_DEVBUF,
+-		    M_WAITOK | M_ZERO | M_CANFAIL);
+-		if (crwu == NULL)
+-		    return (ENOMEM);
+-		/* put it on the list now so if we fail it'll be freed */
+-		mtx_enter(&sd->mds.mdd_crypto.scr_mutex);
+-		TAILQ_INSERT_TAIL(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link);
+-		mtx_leave(&sd->mds.mdd_crypto.scr_mutex);
+-
++	TAILQ_FOREACH(wu, &sd->sd_wu, swu_next) {
++		crwu = (struct sr_crypto_wu *)wu;
+ 		crwu->cr_uio.uio_iov = &crwu->cr_iov;
+ 		crwu->cr_dmabuf = dma_alloc(MAXPHYS, PR_WAITOK);
+ 		crwu->cr_crp = crypto_getreq(MAXPHYS >> DEV_BSHIFT);
+ 		if (crwu->cr_crp == NULL)
+ 			return (ENOMEM);
+-		/* steal the list of cryptodescs */
+-		crwu->cr_descs = crwu->cr_crp->crp_desc;
+-		crwu->cr_crp->crp_desc = NULL;
+ 	}
+ 
+-	bzero(&cri, sizeof(cri));
+-	cri.cri_alg = CRYPTO_AES_XTS;
+-	switch (sd->mds.mdd_crypto.scr_meta->scm_alg) {
+-	case SR_CRYPTOA_AES_XTS_128:
+-		cri.cri_klen = 256;
+-		break;
+-	case SR_CRYPTOA_AES_XTS_256:
+-		cri.cri_klen = 512;
+-		break;
+-	default:
+-		return (EINVAL);
+-	}
++	memset(&cri, 0, sizeof(cri));
++	cri.cri_alg = sd->mds.mdd_crypto.scr_alg;
++	cri.cri_klen = sd->mds.mdd_crypto.scr_klen;
+ 
+-	/* Allocate a session for every 2^SR_CRYPTO_KEY_BLKSHIFT blocks */
+-	num_keys = sd->sd_meta->ssdi.ssd_size >> SR_CRYPTO_KEY_BLKSHIFT;
+-	if (num_keys >= SR_CRYPTO_MAXKEYS)
++	/* Allocate a session for every 2^SR_CRYPTO_KEY_BLKSHIFT blocks. */
++	num_keys = ((sd->sd_meta->ssdi.ssd_size - 1) >>
++	    SR_CRYPTO_KEY_BLKSHIFT) + 1;
++	if (num_keys > SR_CRYPTO_MAXKEYS)
+ 		return (EFBIG);
+-	for (i = 0; i <= num_keys; i++) {
++	for (i = 0; i < num_keys; i++) {
+ 		cri.cri_key = sd->mds.mdd_crypto.scr_key[i];
+ 		if (crypto_newsession(&sd->mds.mdd_crypto.scr_sid[i],
+ 		    &cri, 0) != 0) {
+-			for (i = 0;
+-			     sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1;
+-			     i++) {
+-				crypto_freesession(
+-				    sd->mds.mdd_crypto.scr_sid[i]);
+-				sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
+-			}
++			sr_crypto_free_sessions(sd);
+ 			return (EINVAL);
+ 		}
+ 	}
+@@ -1046,39 +989,30 @@
+ void
+ sr_crypto_free_resources(struct sr_discipline *sd)
+ {
++	struct sr_workunit	*wu;
+ 	struct sr_crypto_wu	*crwu;
+-	u_int			i;
+ 
+ 	DNPRINTF(SR_D_DIS, "%s: sr_crypto_free_resources\n",
+ 	    DEVNAME(sd->sd_sc));
+ 
+ 	if (sd->mds.mdd_crypto.key_disk != NULL) {
+-		explicit_bzero(sd->mds.mdd_crypto.key_disk, sizeof
+-		    sd->mds.mdd_crypto.key_disk);
+-		free(sd->mds.mdd_crypto.key_disk, M_DEVBUF);
++		explicit_bzero(sd->mds.mdd_crypto.key_disk,
++		    sizeof(*sd->mds.mdd_crypto.key_disk));
++		free(sd->mds.mdd_crypto.key_disk, M_DEVBUF,
++		    sizeof(*sd->mds.mdd_crypto.key_disk));
+ 	}
+ 
+ 	sr_hotplug_unregister(sd, sr_crypto_hotplug);
+ 
+-	for (i = 0; sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1; i++) {
+-		crypto_freesession(sd->mds.mdd_crypto.scr_sid[i]);
+-		sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
+-	}
++	sr_crypto_free_sessions(sd);
+ 
+-	mtx_enter(&sd->mds.mdd_crypto.scr_mutex);
+-	while ((crwu = TAILQ_FIRST(&sd->mds.mdd_crypto.scr_wus)) != NULL) {
+-		TAILQ_REMOVE(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link);
+-
+-		if (crwu->cr_dmabuf != NULL)
++	TAILQ_FOREACH(wu, &sd->sd_wu, swu_next) {
++		crwu = (struct sr_crypto_wu *)wu;
++		if (crwu->cr_dmabuf)
+ 			dma_free(crwu->cr_dmabuf, MAXPHYS);
+-		if (crwu->cr_crp) {
+-			/* twiddle cryptoreq back */
+-			crwu->cr_crp->crp_desc = crwu->cr_descs;
++		if (crwu->cr_crp)
+ 			crypto_freereq(crwu->cr_crp);
+-		}
+-		free(crwu, M_DEVBUF);
+ 	}
+-	mtx_leave(&sd->mds.mdd_crypto.scr_mutex);
+ 
+ 	sr_wu_free(sd);
+ 	sr_ccb_free(sd);
+@@ -1165,65 +1099,60 @@
+ sr_crypto_rw(struct sr_workunit *wu)
+ {
+ 	struct sr_crypto_wu	*crwu;
+-	int			s, rv = 0;
++	daddr_t			blkno;
++	int			rv = 0;
+ 
+-	DNPRINTF(SR_D_DIS, "%s: sr_crypto_rw wu: %p\n",
++	DNPRINTF(SR_D_DIS, "%s: sr_crypto_rw wu %p\n",
+ 	    DEVNAME(wu->swu_dis->sd_sc), wu);
+ 
++	if (sr_validate_io(wu, &blkno, "sr_crypto_rw"))
++		return (1);
++
+ 	if (wu->swu_xs->flags & SCSI_DATA_OUT) {
+-		crwu = sr_crypto_wu_get(wu, 1);
+-		if (crwu == NULL)
+-			return (1);
++		crwu = sr_crypto_prepare(wu, 1);
+ 		crwu->cr_crp->crp_callback = sr_crypto_write;
+-		s = splvm();
+-		if (crypto_invoke(crwu->cr_crp))
+-			rv = 1;
+-		else
++		rv = crypto_dispatch(crwu->cr_crp);
++		if (rv == 0)
+ 			rv = crwu->cr_crp->crp_etype;
+-		splx(s);
+ 	} else
+-		rv = sr_crypto_rw2(wu, NULL);
++		rv = sr_crypto_dev_rw(wu, NULL);
+ 
+ 	return (rv);
+ }
+ 
+-int
++void
+ sr_crypto_write(struct cryptop *crp)
+ {
+ 	struct sr_crypto_wu	*crwu = crp->crp_opaque;
+-	struct sr_workunit	*wu = crwu->cr_wu;
++	struct sr_workunit	*wu = &crwu->cr_wu;
+ 	int			s;
+ 
+-	DNPRINTF(SR_D_INTR, "%s: sr_crypto_write: wu %x xs: %x\n",
++	DNPRINTF(SR_D_INTR, "%s: sr_crypto_write: wu %p xs: %p\n",
+ 	    DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs);
+ 
+ 	if (crp->crp_etype) {
+ 		/* fail io */
+ 		wu->swu_xs->error = XS_DRIVER_STUFFUP;
+ 		s = splbio();
+-		sr_crypto_finish_io(wu);
++		sr_scsi_done(wu->swu_dis, wu->swu_xs);
+ 		splx(s);
+ 	}
+ 
+-	return (sr_crypto_rw2(wu, crwu));
++	sr_crypto_dev_rw(wu, crwu);
+ }
+ 
+ int
+-sr_crypto_rw2(struct sr_workunit *wu, struct sr_crypto_wu *crwu)
++sr_crypto_dev_rw(struct sr_workunit *wu, struct sr_crypto_wu *crwu)
+ {
+ 	struct sr_discipline	*sd = wu->swu_dis;
+ 	struct scsi_xfer	*xs = wu->swu_xs;
+ 	struct sr_ccb		*ccb;
+ 	struct uio		*uio;
+-	int			s;
+-	daddr64_t		blk;
++	daddr_t			blkno;
+ 
+-	if (sr_validate_io(wu, &blk, "sr_crypto_rw2"))
+-		goto bad;
++	blkno = wu->swu_blk_start;
+ 
+-	blk += sd->sd_meta->ssd_data_offset;
+-
+-	ccb = sr_ccb_rw(sd, 0, blk, xs->datalen, xs->data, xs->flags, 0);
++	ccb = sr_ccb_rw(sd, 0, blkno, xs->datalen, xs->data, xs->flags, 0);
+ 	if (!ccb) {
+ 		/* should never happen but handle more gracefully */
+ 		printf("%s: %s: too many ccbs queued\n",
+@@ -1236,17 +1165,10 @@
+ 		ccb->ccb_opaque = crwu;
+ 	}
+ 	sr_wu_enqueue_ccb(wu, ccb);
++	sr_schedule_wu(wu);
+ 
+-	s = splbio();
+-
+-	if (sr_check_io_collision(wu))
+-		goto queued;
+-
+-	sr_raid_startwu(wu);
+-
+-queued:
+-	splx(s);
+ 	return (0);
++
+ bad:
+ 	/* wu is unwound by sr_wu_put */
+ 	if (crwu)
+@@ -1259,77 +1181,39 @@
+ {
+ 	struct scsi_xfer	*xs = wu->swu_xs;
+ 	struct sr_crypto_wu	*crwu;
+-	struct sr_ccb		*ccb;
+ 	int			s;
+ 
+ 	/* If this was a successful read, initiate decryption of the data. */
+ 	if (ISSET(xs->flags, SCSI_DATA_IN) && xs->error == XS_NOERROR) {
+-		/* only fails on implementation error */
+-		crwu = sr_crypto_wu_get(wu, 0);
+-		if (crwu == NULL)
+-			panic("sr_crypto_intr: no wu");
++		crwu = sr_crypto_prepare(wu, 0);
+ 		crwu->cr_crp->crp_callback = sr_crypto_read;
+-		ccb = TAILQ_FIRST(&wu->swu_ccb);
+-		if (ccb == NULL)
+-			panic("sr_crypto_done: no ccbs on workunit");
+-		ccb->ccb_opaque = crwu;
+-		DNPRINTF(SR_D_INTR, "%s: sr_crypto_intr: crypto_invoke %p\n",
++		DNPRINTF(SR_D_INTR, "%s: sr_crypto_done: crypto_dispatch %p\n",
+ 		    DEVNAME(wu->swu_dis->sd_sc), crwu->cr_crp);
+-		s = splvm();
+-		crypto_invoke(crwu->cr_crp);
+-		splx(s);
++		crypto_dispatch(crwu->cr_crp);
+ 		return;
+ 	}
+ 
+ 	s = splbio();
+-	sr_crypto_finish_io(wu);
++	sr_scsi_done(wu->swu_dis, wu->swu_xs);
+ 	splx(s);
+ }
+ 
+ void
+-sr_crypto_finish_io(struct sr_workunit *wu)
+-{
+-	struct sr_discipline	*sd = wu->swu_dis;
+-	struct scsi_xfer	*xs = wu->swu_xs;
+-	struct sr_ccb		*ccb;
+-#ifdef SR_DEBUG
+-	struct sr_softc		*sc = sd->sd_sc;
+-#endif /* SR_DEBUG */
+-
+-	splassert(IPL_BIO);
+-
+-	DNPRINTF(SR_D_INTR, "%s: sr_crypto_finish_io: wu %x xs: %x\n",
+-	    DEVNAME(sc), wu, xs);
+-
+-	if (wu->swu_cb_active == 1)
+-		panic("%s: sr_crypto_finish_io", DEVNAME(sd->sd_sc));
+-	TAILQ_FOREACH(ccb, &wu->swu_ccb, ccb_link) {
+-		if (ccb->ccb_opaque == NULL)
+-			continue;
+-		sr_crypto_wu_put(ccb->ccb_opaque);
+-	}
+-
+-	sr_scsi_done(sd, xs);
+-}
+-
+-int
+ sr_crypto_read(struct cryptop *crp)
+ {
+ 	struct sr_crypto_wu	*crwu = crp->crp_opaque;
+-	struct sr_workunit	*wu = crwu->cr_wu;
++	struct sr_workunit	*wu = &crwu->cr_wu;
+ 	int			s;
+ 
+-	DNPRINTF(SR_D_INTR, "%s: sr_crypto_read: wu %x xs: %x\n",
++	DNPRINTF(SR_D_INTR, "%s: sr_crypto_read: wu %p xs: %p\n",
+ 	    DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs);
+ 
+ 	if (crp->crp_etype)
+ 		wu->swu_xs->error = XS_DRIVER_STUFFUP;
+ 
+ 	s = splbio();
+-	sr_crypto_finish_io(wu);
++	sr_scsi_done(wu->swu_dis, wu->swu_xs);
+ 	splx(s);
+-
+-	return (0);
+ }
+ 
+ void
blob - /dev/null
blob + 952d2852309492b6536c616cf5f2aa28d20cbb71 (mode 644)
--- /dev/null
+++ test/test021.left.txt
@@ -0,0 +1,1367 @@
+/* $OpenBSD: softraid_crypto.c,v 1.91 2013/03/31 15:44:52 jsing Exp $ */
+/*
+ * Copyright (c) 2007 Marco Peereboom <marco@peereboom.us>
+ * Copyright (c) 2008 Hans-Joerg Hoexer <hshoexer@openbsd.org>
+ * Copyright (c) 2008 Damien Miller <djm@mindrot.org>
+ * Copyright (c) 2009 Joel Sing <jsing@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "bio.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/buf.h>
+#include <sys/device.h>
+#include <sys/ioctl.h>
+#include <sys/proc.h>
+#include <sys/malloc.h>
+#include <sys/pool.h>
+#include <sys/kernel.h>
+#include <sys/disk.h>
+#include <sys/rwlock.h>
+#include <sys/queue.h>
+#include <sys/fcntl.h>
+#include <sys/disklabel.h>
+#include <sys/mount.h>
+#include <sys/sensors.h>
+#include <sys/stat.h>
+#include <sys/conf.h>
+#include <sys/uio.h>
+#include <sys/dkio.h>
+
+#include <crypto/cryptodev.h>
+#include <crypto/cryptosoft.h>
+#include <crypto/rijndael.h>
+#include <crypto/md5.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+#include <crypto/hmac.h>
+
+#include <scsi/scsi_all.h>
+#include <scsi/scsiconf.h>
+#include <scsi/scsi_disk.h>
+
+#include <dev/softraidvar.h>
+#include <dev/rndvar.h>
+
+/*
+ * The per-I/O data that we need to preallocate. We cannot afford to allow I/O
+ * to start failing when memory pressure kicks in. We can store this in the WU
+ * because we assert that only one ccb per WU will ever be active.
+ */
+struct sr_crypto_wu {
+	TAILQ_ENTRY(sr_crypto_wu)	 cr_link;
+	struct uio			 cr_uio;
+	struct iovec			 cr_iov;
+	struct cryptop	 		*cr_crp;
+	struct cryptodesc		*cr_descs;
+	struct sr_workunit		*cr_wu;
+	void				*cr_dmabuf;
+};
+
+
+struct sr_crypto_wu *sr_crypto_wu_get(struct sr_workunit *, int);
+void		sr_crypto_wu_put(struct sr_crypto_wu *);
+int		sr_crypto_create_keys(struct sr_discipline *);
+int		sr_crypto_get_kdf(struct bioc_createraid *,
+		    struct sr_discipline *);
+int		sr_crypto_decrypt(u_char *, u_char *, u_char *, size_t, int);
+int		sr_crypto_encrypt(u_char *, u_char *, u_char *, size_t, int);
+int		sr_crypto_decrypt_key(struct sr_discipline *);
+int		sr_crypto_change_maskkey(struct sr_discipline *,
+		    struct sr_crypto_kdfinfo *, struct sr_crypto_kdfinfo *);
+int		sr_crypto_create(struct sr_discipline *,
+		    struct bioc_createraid *, int, int64_t);
+int		sr_crypto_assemble(struct sr_discipline *,
+		    struct bioc_createraid *, int, void *);
+int		sr_crypto_alloc_resources(struct sr_discipline *);
+void		sr_crypto_free_resources(struct sr_discipline *);
+int		sr_crypto_ioctl(struct sr_discipline *,
+		    struct bioc_discipline *);
+int		sr_crypto_meta_opt_handler(struct sr_discipline *,
+		    struct sr_meta_opt_hdr *);
+int		sr_crypto_write(struct cryptop *);
+int		sr_crypto_rw(struct sr_workunit *);
+int		sr_crypto_rw2(struct sr_workunit *, struct sr_crypto_wu *);
+void		sr_crypto_done(struct sr_workunit *);
+int		sr_crypto_read(struct cryptop *);
+void		sr_crypto_finish_io(struct sr_workunit *);
+void		sr_crypto_calculate_check_hmac_sha1(u_int8_t *, int,
+		   u_int8_t *, int, u_char *);
+void		sr_crypto_hotplug(struct sr_discipline *, struct disk *, int);
+
+#ifdef SR_DEBUG0
+void		 sr_crypto_dumpkeys(struct sr_discipline *);
+#endif
+
+/* Discipline initialisation. */
+void
+sr_crypto_discipline_init(struct sr_discipline *sd)
+{
+	int i;
+
+	/* Fill out discipline members. */
+	sd->sd_type = SR_MD_CRYPTO;
+	strlcpy(sd->sd_name, "CRYPTO", sizeof(sd->sd_name));
+	sd->sd_capabilities = SR_CAP_SYSTEM_DISK | SR_CAP_AUTO_ASSEMBLE;
+	sd->sd_max_wu = SR_CRYPTO_NOWU;
+
+	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++)
+		sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
+
+	/* Setup discipline specific function pointers. */
+	sd->sd_alloc_resources = sr_crypto_alloc_resources;
+	sd->sd_assemble = sr_crypto_assemble;
+	sd->sd_create = sr_crypto_create;
+	sd->sd_free_resources = sr_crypto_free_resources;
+	sd->sd_ioctl_handler = sr_crypto_ioctl;
+	sd->sd_meta_opt_handler = sr_crypto_meta_opt_handler;
+	sd->sd_scsi_rw = sr_crypto_rw;
+	sd->sd_scsi_done = sr_crypto_done;
+}
+
+int
+sr_crypto_create(struct sr_discipline *sd, struct bioc_createraid *bc,
+    int no_chunk, int64_t coerced_size)
+{
+	struct sr_meta_opt_item	*omi;
+	int			rv = EINVAL;
+
+	if (no_chunk != 1) {
+		sr_error(sd->sd_sc, "%s requires exactly one chunk",
+		    sd->sd_name);
+		goto done;
+        }
+
+	/* Create crypto optional metadata. */
+	omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF,
+	    M_WAITOK | M_ZERO);
+	omi->omi_som = malloc(sizeof(struct sr_meta_crypto), M_DEVBUF,
+	    M_WAITOK | M_ZERO);
+	omi->omi_som->som_type = SR_OPT_CRYPTO;
+	omi->omi_som->som_length = sizeof(struct sr_meta_crypto);
+	SLIST_INSERT_HEAD(&sd->sd_meta_opt, omi, omi_link);
+	sd->mds.mdd_crypto.scr_meta = (struct sr_meta_crypto *)omi->omi_som;
+	sd->sd_meta->ssdi.ssd_opt_no++;
+
+	sd->mds.mdd_crypto.key_disk = NULL;
+
+	if (bc->bc_key_disk != NODEV) {
+
+		/* Create a key disk. */
+		if (sr_crypto_get_kdf(bc, sd))
+			goto done;
+		sd->mds.mdd_crypto.key_disk =
+		    sr_crypto_create_key_disk(sd, bc->bc_key_disk);
+		if (sd->mds.mdd_crypto.key_disk == NULL)
+			goto done;
+		sd->sd_capabilities |= SR_CAP_AUTO_ASSEMBLE;
+
+	} else if (bc->bc_opaque_flags & BIOC_SOOUT) {
+
+		/* No hint available yet. */
+		bc->bc_opaque_status = BIOC_SOINOUT_FAILED;
+		rv = EAGAIN;
+		goto done;
+
+	} else if (sr_crypto_get_kdf(bc, sd))
+		goto done;
+
+	/* Passphrase volumes cannot be automatically assembled. */
+	if (!(bc->bc_flags & BIOC_SCNOAUTOASSEMBLE) && bc->bc_key_disk == NODEV)
+		goto done;
+
+	sd->sd_meta->ssdi.ssd_size = coerced_size;
+
+	sr_crypto_create_keys(sd);
+
+	sd->sd_max_ccb_per_wu = no_chunk;
+
+	rv = 0;
+done:
+	return (rv);
+}
+
+int
+sr_crypto_assemble(struct sr_discipline *sd, struct bioc_createraid *bc,
+    int no_chunk, void *data)
+{
+	int	rv = EINVAL;
+
+	sd->mds.mdd_crypto.key_disk = NULL;
+
+	/* Crypto optional metadata must already exist... */
+	if (sd->mds.mdd_crypto.scr_meta == NULL)
+		goto done;
+
+	if (data != NULL) {
+		/* Kernel already has mask key. */
+		bcopy(data, sd->mds.mdd_crypto.scr_maskkey,
+		    sizeof(sd->mds.mdd_crypto.scr_maskkey));
+	} else if (bc->bc_key_disk != NODEV) {
+		/* Read the mask key from the key disk. */
+		sd->mds.mdd_crypto.key_disk =
+		    sr_crypto_read_key_disk(sd, bc->bc_key_disk);
+		if (sd->mds.mdd_crypto.key_disk == NULL)
+			goto done;
+	} else if (bc->bc_opaque_flags & BIOC_SOOUT) {
+		/* provide userland with kdf hint */
+		if (bc->bc_opaque == NULL)
+			goto done;
+
+		if (sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint) <
+		    bc->bc_opaque_size)
+			goto done;
+
+		if (copyout(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
+		    bc->bc_opaque, bc->bc_opaque_size))
+			goto done;
+
+		/* we're done */
+		bc->bc_opaque_status = BIOC_SOINOUT_OK;
+		rv = EAGAIN;
+		goto done;
+	} else if (bc->bc_opaque_flags & BIOC_SOIN) {
+		/* get kdf with maskkey from userland */
+		if (sr_crypto_get_kdf(bc, sd))
+			goto done;
+	} else
+		goto done;
+
+	sd->sd_max_ccb_per_wu = sd->sd_meta->ssdi.ssd_chunk_no;
+
+	rv = 0;
+done:
+	return (rv);
+}
+
+struct sr_crypto_wu *
+sr_crypto_wu_get(struct sr_workunit *wu, int encrypt)
+{
+	struct scsi_xfer	*xs = wu->swu_xs;
+	struct sr_discipline	*sd = wu->swu_dis;
+	struct sr_crypto_wu	*crwu;
+	struct cryptodesc	*crd;
+	int			flags, i, n;
+	daddr64_t		blk = 0;
+	u_int			keyndx;
+
+	DNPRINTF(SR_D_DIS, "%s: sr_crypto_wu_get wu: %p encrypt: %d\n",
+	    DEVNAME(sd->sd_sc), wu, encrypt);
+
+	mtx_enter(&sd->mds.mdd_crypto.scr_mutex);
+	if ((crwu = TAILQ_FIRST(&sd->mds.mdd_crypto.scr_wus)) != NULL)
+		TAILQ_REMOVE(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link);
+	mtx_leave(&sd->mds.mdd_crypto.scr_mutex);
+	if (crwu == NULL)
+		panic("sr_crypto_wu_get: out of wus");
+
+	crwu->cr_uio.uio_iovcnt = 1;
+	crwu->cr_uio.uio_iov->iov_len = xs->datalen;
+	if (xs->flags & SCSI_DATA_OUT) {
+		crwu->cr_uio.uio_iov->iov_base = crwu->cr_dmabuf;
+		bcopy(xs->data, crwu->cr_uio.uio_iov->iov_base, xs->datalen);
+	} else
+		crwu->cr_uio.uio_iov->iov_base = xs->data;
+
+	if (xs->cmdlen == 10)
+		blk = _4btol(((struct scsi_rw_big *)xs->cmd)->addr);
+	else if (xs->cmdlen == 16)
+		blk = _8btol(((struct scsi_rw_16 *)xs->cmd)->addr);
+	else if (xs->cmdlen == 6)
+		blk = _3btol(((struct scsi_rw *)xs->cmd)->addr);
+
+	n = xs->datalen >> DEV_BSHIFT;
+
+	/*
+	 * We preallocated enough crypto descs for up to MAXPHYS of I/O.
+	 * Since there may be less than that we need to tweak the linked list
+	 * of crypto desc structures to be just long enough for our needs.
+	 */
+	crd = crwu->cr_descs;
+	for (i = 0; i < ((MAXPHYS >> DEV_BSHIFT) - n); i++) {
+		crd = crd->crd_next;
+		KASSERT(crd);
+	}
+	crwu->cr_crp->crp_desc = crd;
+	flags = (encrypt ? CRD_F_ENCRYPT : 0) |
+	    CRD_F_IV_PRESENT | CRD_F_IV_EXPLICIT;
+
+	/* Select crypto session based on block number */
+	keyndx = blk >> SR_CRYPTO_KEY_BLKSHIFT;
+	if (keyndx >= SR_CRYPTO_MAXKEYS)
+		goto unwind;
+	crwu->cr_crp->crp_sid = sd->mds.mdd_crypto.scr_sid[keyndx];
+	if (crwu->cr_crp->crp_sid == (u_int64_t)-1)
+		goto unwind;
+
+	crwu->cr_crp->crp_ilen = xs->datalen;
+	crwu->cr_crp->crp_alloctype = M_DEVBUF;
+	crwu->cr_crp->crp_buf = &crwu->cr_uio;
+	for (i = 0, crd = crwu->cr_crp->crp_desc; crd;
+	    i++, blk++, crd = crd->crd_next) {
+		crd->crd_skip = i << DEV_BSHIFT;
+		crd->crd_len = DEV_BSIZE;
+		crd->crd_inject = 0;
+		crd->crd_flags = flags;
+		crd->crd_alg = CRYPTO_AES_XTS;
+
+		switch (sd->mds.mdd_crypto.scr_meta->scm_alg) {
+		case SR_CRYPTOA_AES_XTS_128:
+			crd->crd_klen = 256;
+			break;
+		case SR_CRYPTOA_AES_XTS_256:
+			crd->crd_klen = 512;
+			break;
+		default:
+			goto unwind;
+		}
+		crd->crd_key = sd->mds.mdd_crypto.scr_key[0];
+		bcopy(&blk, crd->crd_iv, sizeof(blk));
+	}
+	crwu->cr_wu = wu;
+	crwu->cr_crp->crp_opaque = crwu;
+
+	return (crwu);
+
+unwind:
+	/* steal the descriptors back from the cryptop */
+	crwu->cr_crp->crp_desc = NULL;
+
+	return (NULL);
+}
+
+void
+sr_crypto_wu_put(struct sr_crypto_wu *crwu)
+{
+	struct cryptop		*crp = crwu->cr_crp;
+	struct sr_workunit	*wu = crwu->cr_wu;
+	struct sr_discipline	*sd = wu->swu_dis;
+
+	DNPRINTF(SR_D_DIS, "%s: sr_crypto_wu_put crwu: %p\n",
+	    DEVNAME(wu->swu_dis->sd_sc), crwu);
+
+	/* steal the descriptors back from the cryptop */
+	crp->crp_desc = NULL;
+
+	mtx_enter(&sd->mds.mdd_crypto.scr_mutex);
+	TAILQ_INSERT_TAIL(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link);
+	mtx_leave(&sd->mds.mdd_crypto.scr_mutex);
+}
+
+int
+sr_crypto_get_kdf(struct bioc_createraid *bc, struct sr_discipline *sd)
+{
+	int			rv = EINVAL;
+	struct sr_crypto_kdfinfo *kdfinfo;
+
+	if (!(bc->bc_opaque_flags & BIOC_SOIN))
+		return (rv);
+	if (bc->bc_opaque == NULL)
+		return (rv);
+	if (bc->bc_opaque_size != sizeof(*kdfinfo))
+		return (rv);
+
+	kdfinfo = malloc(bc->bc_opaque_size, M_DEVBUF, M_WAITOK | M_ZERO);
+	if (copyin(bc->bc_opaque, kdfinfo, bc->bc_opaque_size))
+		goto out;
+
+	if (kdfinfo->len != bc->bc_opaque_size)
+		goto out;
+
+	/* copy KDF hint to disk meta data */
+	if (kdfinfo->flags & SR_CRYPTOKDF_HINT) {
+		if (sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint) <
+		    kdfinfo->genkdf.len)
+			goto out;
+		bcopy(&kdfinfo->genkdf,
+		    sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
+		    kdfinfo->genkdf.len);
+	}
+
+	/* copy mask key to run-time meta data */
+	if ((kdfinfo->flags & SR_CRYPTOKDF_KEY)) {
+		if (sizeof(sd->mds.mdd_crypto.scr_maskkey) <
+		    sizeof(kdfinfo->maskkey))
+			goto out;
+		bcopy(&kdfinfo->maskkey, sd->mds.mdd_crypto.scr_maskkey,
+		    sizeof(kdfinfo->maskkey));
+	}
+
+	bc->bc_opaque_status = BIOC_SOINOUT_OK;
+	rv = 0;
+out:
+	explicit_bzero(kdfinfo, bc->bc_opaque_size);
+	free(kdfinfo, M_DEVBUF);
+
+	return (rv);
+}
+
+int
+sr_crypto_encrypt(u_char *p, u_char *c, u_char *key, size_t size, int alg)
+{
+	rijndael_ctx		ctx;
+	int			i, rv = 1;
+
+	switch (alg) {
+	case SR_CRYPTOM_AES_ECB_256:
+		if (rijndael_set_key_enc_only(&ctx, key, 256) != 0)
+			goto out;
+		for (i = 0; i < size; i += RIJNDAEL128_BLOCK_LEN)
+			rijndael_encrypt(&ctx, &p[i], &c[i]);
+		rv = 0;
+		break;
+	default:
+		DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %u\n",
+		    "softraid", alg);
+		rv = -1;
+		goto out;
+	}
+
+out:
+	explicit_bzero(&ctx, sizeof(ctx));
+	return (rv);
+}
+
+int
+sr_crypto_decrypt(u_char *c, u_char *p, u_char *key, size_t size, int alg)
+{
+	rijndael_ctx		ctx;
+	int			i, rv = 1;
+
+	switch (alg) {
+	case SR_CRYPTOM_AES_ECB_256:
+		if (rijndael_set_key(&ctx, key, 256) != 0)
+			goto out;
+		for (i = 0; i < size; i += RIJNDAEL128_BLOCK_LEN)
+			rijndael_decrypt(&ctx, &c[i], &p[i]);
+		rv = 0;
+		break;
+	default:
+		DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %u\n",
+		    "softraid", alg);
+		rv = -1;
+		goto out;
+	}
+
+out:
+	explicit_bzero(&ctx, sizeof(ctx));
+	return (rv);
+}
+
+void
+sr_crypto_calculate_check_hmac_sha1(u_int8_t *maskkey, int maskkey_size,
+    u_int8_t *key, int key_size, u_char *check_digest)
+{
+	u_char			check_key[SHA1_DIGEST_LENGTH];
+	HMAC_SHA1_CTX		hmacctx;
+	SHA1_CTX		shactx;
+
+	bzero(check_key, sizeof(check_key));
+	bzero(&hmacctx, sizeof(hmacctx));
+	bzero(&shactx, sizeof(shactx));
+
+	/* k = SHA1(mask_key) */
+	SHA1Init(&shactx);
+	SHA1Update(&shactx, maskkey, maskkey_size);
+	SHA1Final(check_key, &shactx);
+
+	/* mac = HMAC_SHA1_k(unencrypted key) */
+	HMAC_SHA1_Init(&hmacctx, check_key, sizeof(check_key));
+	HMAC_SHA1_Update(&hmacctx, key, key_size);
+	HMAC_SHA1_Final(check_digest, &hmacctx);
+
+	explicit_bzero(check_key, sizeof(check_key));
+	explicit_bzero(&hmacctx, sizeof(hmacctx));
+	explicit_bzero(&shactx, sizeof(shactx));
+}
+
+int
+sr_crypto_decrypt_key(struct sr_discipline *sd)
+{
+	u_char			check_digest[SHA1_DIGEST_LENGTH];
+	int			rv = 1;
+
+	DNPRINTF(SR_D_DIS, "%s: sr_crypto_decrypt_key\n", DEVNAME(sd->sd_sc));
+
+	if (sd->mds.mdd_crypto.scr_meta->scm_check_alg != SR_CRYPTOC_HMAC_SHA1)
+		goto out;
+
+	if (sr_crypto_decrypt((u_char *)sd->mds.mdd_crypto.scr_meta->scm_key,
+	    (u_char *)sd->mds.mdd_crypto.scr_key,
+	    sd->mds.mdd_crypto.scr_maskkey, sizeof(sd->mds.mdd_crypto.scr_key),
+	    sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1)
+		goto out;
+
+#ifdef SR_DEBUG0
+	sr_crypto_dumpkeys(sd);
+#endif
+
+	/* Check that the key decrypted properly. */
+	sr_crypto_calculate_check_hmac_sha1(sd->mds.mdd_crypto.scr_maskkey,
+	    sizeof(sd->mds.mdd_crypto.scr_maskkey),
+	    (u_int8_t *)sd->mds.mdd_crypto.scr_key,
+	    sizeof(sd->mds.mdd_crypto.scr_key),
+	    check_digest);
+	if (memcmp(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac,
+	    check_digest, sizeof(check_digest)) != 0) {
+		explicit_bzero(sd->mds.mdd_crypto.scr_key,
+		    sizeof(sd->mds.mdd_crypto.scr_key));
+		goto out;
+	}
+
+	rv = 0; /* Success */
+out:
+	/* we don't need the mask key anymore */
+	explicit_bzero(&sd->mds.mdd_crypto.scr_maskkey,
+	    sizeof(sd->mds.mdd_crypto.scr_maskkey));
+
+	explicit_bzero(check_digest, sizeof(check_digest));
+
+	return rv;
+}
+
+int
+sr_crypto_create_keys(struct sr_discipline *sd)
+{
+
+	DNPRINTF(SR_D_DIS, "%s: sr_crypto_create_keys\n",
+	    DEVNAME(sd->sd_sc));
+
+	if (AES_MAXKEYBYTES < sizeof(sd->mds.mdd_crypto.scr_maskkey))
+		return (1);
+
+	/* XXX allow user to specify */
+	sd->mds.mdd_crypto.scr_meta->scm_alg = SR_CRYPTOA_AES_XTS_256;
+
+	/* generate crypto keys */
+	arc4random_buf(sd->mds.mdd_crypto.scr_key,
+	    sizeof(sd->mds.mdd_crypto.scr_key));
+
+	/* Mask the disk keys. */
+	sd->mds.mdd_crypto.scr_meta->scm_mask_alg = SR_CRYPTOM_AES_ECB_256;
+	sr_crypto_encrypt((u_char *)sd->mds.mdd_crypto.scr_key,
+	    (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key,
+	    sd->mds.mdd_crypto.scr_maskkey, sizeof(sd->mds.mdd_crypto.scr_key),
+	    sd->mds.mdd_crypto.scr_meta->scm_mask_alg);
+
+	/* Prepare key decryption check code. */
+	sd->mds.mdd_crypto.scr_meta->scm_check_alg = SR_CRYPTOC_HMAC_SHA1;
+	sr_crypto_calculate_check_hmac_sha1(sd->mds.mdd_crypto.scr_maskkey,
+	    sizeof(sd->mds.mdd_crypto.scr_maskkey),
+	    (u_int8_t *)sd->mds.mdd_crypto.scr_key,
+	    sizeof(sd->mds.mdd_crypto.scr_key),
+	    sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac);
+
+	/* Erase the plaintext disk keys */
+	explicit_bzero(sd->mds.mdd_crypto.scr_key,
+	    sizeof(sd->mds.mdd_crypto.scr_key));
+
+#ifdef SR_DEBUG0
+	sr_crypto_dumpkeys(sd);
+#endif
+
+	sd->mds.mdd_crypto.scr_meta->scm_flags = SR_CRYPTOF_KEY |
+	    SR_CRYPTOF_KDFHINT;
+
+	return (0);
+}
+
+int
+sr_crypto_change_maskkey(struct sr_discipline *sd,
+  struct sr_crypto_kdfinfo *kdfinfo1, struct sr_crypto_kdfinfo *kdfinfo2)
+{
+	u_char			check_digest[SHA1_DIGEST_LENGTH];
+	u_char			*c, *p = NULL;
+	size_t			ksz;
+	int			rv = 1;
+
+	DNPRINTF(SR_D_DIS, "%s: sr_crypto_change_maskkey\n",
+	    DEVNAME(sd->sd_sc));
+
+	if (sd->mds.mdd_crypto.scr_meta->scm_check_alg != SR_CRYPTOC_HMAC_SHA1)
+		goto out;
+
+	c = (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key;
+	ksz = sizeof(sd->mds.mdd_crypto.scr_key);
+	p = malloc(ksz, M_DEVBUF, M_WAITOK | M_CANFAIL | M_ZERO);
+	if (p == NULL)
+		goto out;
+
+	if (sr_crypto_decrypt(c, p, kdfinfo1->maskkey, ksz,
+	    sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1)
+		goto out;
+
+#ifdef SR_DEBUG0
+	sr_crypto_dumpkeys(sd);
+#endif
+
+	sr_crypto_calculate_check_hmac_sha1(kdfinfo1->maskkey,
+	    sizeof(kdfinfo1->maskkey), p, ksz, check_digest);
+	if (memcmp(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac,
+	    check_digest, sizeof(check_digest)) != 0) {
+		sr_error(sd->sd_sc, "incorrect key or passphrase");
+		rv = EPERM;
+		goto out;
+	}
+
+	/* Mask the disk keys. */
+	c = (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key;
+	if (sr_crypto_encrypt(p, c, kdfinfo2->maskkey, ksz,
+	    sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1)
+		goto out;
+
+	/* Prepare key decryption check code. */
+	sd->mds.mdd_crypto.scr_meta->scm_check_alg = SR_CRYPTOC_HMAC_SHA1;
+	sr_crypto_calculate_check_hmac_sha1(kdfinfo2->maskkey,
+	    sizeof(kdfinfo2->maskkey), (u_int8_t *)sd->mds.mdd_crypto.scr_key,
+	    sizeof(sd->mds.mdd_crypto.scr_key), check_digest);
+
+	/* Copy new encrypted key and HMAC to metadata. */
+	bcopy(check_digest, sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac,
+	    sizeof(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac));
+
+	rv = 0; /* Success */
+
+out:
+	if (p) {
+		explicit_bzero(p, ksz);
+		free(p, M_DEVBUF);
+	}
+
+	explicit_bzero(check_digest, sizeof(check_digest));
+	explicit_bzero(&kdfinfo1->maskkey, sizeof(kdfinfo1->maskkey));
+	explicit_bzero(&kdfinfo2->maskkey, sizeof(kdfinfo2->maskkey));
+
+	return (rv);
+}
+
+struct sr_chunk *
+sr_crypto_create_key_disk(struct sr_discipline *sd, dev_t dev)
+{
+	struct sr_softc		*sc = sd->sd_sc;
+	struct sr_discipline	*fakesd = NULL;
+	struct sr_metadata	*sm = NULL;
+	struct sr_meta_chunk    *km;
+	struct sr_meta_opt_item *omi = NULL;
+	struct sr_meta_keydisk	*skm;
+	struct sr_chunk		*key_disk = NULL;
+	struct disklabel	label;
+	struct vnode		*vn;
+	char			devname[32];
+	int			c, part, open = 0;
+
+	/*
+	 * Create a metadata structure on the key disk and store
+	 * keying material in the optional metadata.
+	 */
+
+	sr_meta_getdevname(sc, dev, devname, sizeof(devname));
+
+	/* Make sure chunk is not already in use. */
+	c = sr_chunk_in_use(sc, dev);
+	if (c != BIOC_SDINVALID && c != BIOC_SDOFFLINE) {
+		sr_error(sc, "%s is already in use", devname);
+		goto done;
+	}
+
+	/* Open device. */
+	if (bdevvp(dev, &vn)) {
+		sr_error(sc, "cannot open key disk %s", devname);
+		goto done;
+	}
+	if (VOP_OPEN(vn, FREAD | FWRITE, NOCRED, curproc)) {
+		DNPRINTF(SR_D_META,"%s: sr_crypto_create_key_disk cannot "
+		    "open %s\n", DEVNAME(sc), devname);
+		vput(vn);
+		goto fail;
+	}
+	open = 1; /* close dev on error */
+
+	/* Get partition details. */
+	part = DISKPART(dev);
+	if (VOP_IOCTL(vn, DIOCGDINFO, (caddr_t)&label,
+	    FREAD, NOCRED, curproc)) {
+		DNPRINTF(SR_D_META, "%s: sr_crypto_create_key_disk ioctl "
+		    "failed\n", DEVNAME(sc));
+		VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc);
+		vput(vn);
+		goto fail;
+	}
+	if (label.d_secsize != DEV_BSIZE) {
+		sr_error(sc, "%s has unsupported sector size (%d)",
+		    devname, label.d_secsize);
+		goto fail;
+	}
+	if (label.d_partitions[part].p_fstype != FS_RAID) {
+		sr_error(sc, "%s partition not of type RAID (%d)\n",
+		    devname, label.d_partitions[part].p_fstype);
+		goto fail;
+	}
+
+	/*
+	 * Create and populate chunk metadata.
+	 */
+
+	key_disk = malloc(sizeof(struct sr_chunk), M_DEVBUF, M_WAITOK | M_ZERO);
+	km = &key_disk->src_meta;
+
+	key_disk->src_dev_mm = dev;
+	key_disk->src_vn = vn;
+	strlcpy(key_disk->src_devname, devname, sizeof(km->scmi.scm_devname));
+	key_disk->src_size = 0;
+
+	km->scmi.scm_volid = sd->sd_meta->ssdi.ssd_level;
+	km->scmi.scm_chunk_id = 0;
+	km->scmi.scm_size = 0;
+	km->scmi.scm_coerced_size = 0;
+	strlcpy(km->scmi.scm_devname, devname, sizeof(km->scmi.scm_devname));
+	bcopy(&sd->sd_meta->ssdi.ssd_uuid, &km->scmi.scm_uuid,
+	    sizeof(struct sr_uuid));
+
+	sr_checksum(sc, km, &km->scm_checksum,
+	    sizeof(struct sr_meta_chunk_invariant));
+
+	km->scm_status = BIOC_SDONLINE;
+
+	/*
+	 * Create and populate our own discipline and metadata.
+	 */
+
+	sm = malloc(sizeof(struct sr_metadata), M_DEVBUF, M_WAITOK | M_ZERO);
+	sm->ssdi.ssd_magic = SR_MAGIC;
+	sm->ssdi.ssd_version = SR_META_VERSION;
+	sm->ssd_ondisk = 0;
+	sm->ssdi.ssd_vol_flags = 0;
+	bcopy(&sd->sd_meta->ssdi.ssd_uuid, &sm->ssdi.ssd_uuid,
+	    sizeof(struct sr_uuid));
+	sm->ssdi.ssd_chunk_no = 1;
+	sm->ssdi.ssd_volid = SR_KEYDISK_VOLID;
+	sm->ssdi.ssd_level = SR_KEYDISK_LEVEL;
+	sm->ssdi.ssd_size = 0;
+	strlcpy(sm->ssdi.ssd_vendor, "OPENBSD", sizeof(sm->ssdi.ssd_vendor));
+	snprintf(sm->ssdi.ssd_product, sizeof(sm->ssdi.ssd_product),
+	    "SR %s", "KEYDISK");
+	snprintf(sm->ssdi.ssd_revision, sizeof(sm->ssdi.ssd_revision),
+	    "%03d", SR_META_VERSION);
+
+	fakesd = malloc(sizeof(struct sr_discipline), M_DEVBUF,
+	    M_WAITOK | M_ZERO);
+	fakesd->sd_sc = sd->sd_sc;
+	fakesd->sd_meta = sm;
+	fakesd->sd_meta_type = SR_META_F_NATIVE;
+	fakesd->sd_vol_status = BIOC_SVONLINE;
+	strlcpy(fakesd->sd_name, "KEYDISK", sizeof(fakesd->sd_name));
+	SLIST_INIT(&fakesd->sd_meta_opt);
+
+	/* Add chunk to volume. */
+	fakesd->sd_vol.sv_chunks = malloc(sizeof(struct sr_chunk *), M_DEVBUF,
+	    M_WAITOK | M_ZERO);
+	fakesd->sd_vol.sv_chunks[0] = key_disk;
+	SLIST_INIT(&fakesd->sd_vol.sv_chunk_list);
+	SLIST_INSERT_HEAD(&fakesd->sd_vol.sv_chunk_list, key_disk, src_link);
+
+	/* Generate mask key. */
+	arc4random_buf(sd->mds.mdd_crypto.scr_maskkey,
+	    sizeof(sd->mds.mdd_crypto.scr_maskkey));
+
+	/* Copy mask key to optional metadata area. */
+	omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF,
+	    M_WAITOK | M_ZERO);
+	omi->omi_som = malloc(sizeof(struct sr_meta_keydisk), M_DEVBUF,
+	    M_WAITOK | M_ZERO);
+	omi->omi_som->som_type = SR_OPT_KEYDISK;
+	omi->omi_som->som_length = sizeof(struct sr_meta_keydisk);
+	skm = (struct sr_meta_keydisk *)omi->omi_som;
+	bcopy(sd->mds.mdd_crypto.scr_maskkey, &skm->skm_maskkey,
+	    sizeof(skm->skm_maskkey));
+	SLIST_INSERT_HEAD(&fakesd->sd_meta_opt, omi, omi_link);
+	fakesd->sd_meta->ssdi.ssd_opt_no++;
+
+	/* Save metadata. */
+	if (sr_meta_save(fakesd, SR_META_DIRTY)) {
+		sr_error(sc, "could not save metadata to %s", devname);
+		goto fail;
+	}
+
+	goto done;
+
+fail:
+	if (key_disk)
+		free(key_disk, M_DEVBUF);
+	key_disk = NULL;
+
+done:
+	if (omi)
+		free(omi, M_DEVBUF);
+	if (fakesd && fakesd->sd_vol.sv_chunks)
+		free(fakesd->sd_vol.sv_chunks, M_DEVBUF);
+	if (fakesd)
+		free(fakesd, M_DEVBUF);
+	if (sm)
+		free(sm, M_DEVBUF);
+	if (open) {
+		VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc);
+		vput(vn);
+	}
+
+	return key_disk;
+}
+
+struct sr_chunk *
+sr_crypto_read_key_disk(struct sr_discipline *sd, dev_t dev)
+{
+	struct sr_softc		*sc = sd->sd_sc;
+	struct sr_metadata	*sm = NULL;
+	struct sr_meta_opt_item *omi, *omi_next;
+	struct sr_meta_opt_hdr	*omh;
+	struct sr_meta_keydisk	*skm;
+	struct sr_meta_opt_head som;
+	struct sr_chunk		*key_disk = NULL;
+	struct disklabel	label;
+	struct vnode		*vn = NULL;
+	char			devname[32];
+	int			c, part, open = 0;
+
+	/*
+	 * Load a key disk and load keying material into memory.
+	 */
+
+	SLIST_INIT(&som);
+
+	sr_meta_getdevname(sc, dev, devname, sizeof(devname));
+
+	/* Make sure chunk is not already in use. */
+	c = sr_chunk_in_use(sc, dev);
+	if (c != BIOC_SDINVALID && c != BIOC_SDOFFLINE) {
+		sr_error(sc, "%s is already in use", devname);
+		goto done;
+	}
+
+	/* Open device. */
+	if (bdevvp(dev, &vn)) {
+		sr_error(sc, "cannot open key disk %s", devname);
+		goto done;
+	}
+	if (VOP_OPEN(vn, FREAD | FWRITE, NOCRED, curproc)) {
+		DNPRINTF(SR_D_META,"%s: sr_crypto_read_key_disk cannot "
+		    "open %s\n", DEVNAME(sc), devname);
+		vput(vn);
+		goto done;
+	}
+	open = 1; /* close dev on error */
+
+	/* Get partition details. */
+	part = DISKPART(dev);
+	if (VOP_IOCTL(vn, DIOCGDINFO, (caddr_t)&label, FREAD,
+	    NOCRED, curproc)) {
+		DNPRINTF(SR_D_META, "%s: sr_crypto_read_key_disk ioctl "
+		    "failed\n", DEVNAME(sc));
+		VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc);
+		vput(vn);
+		goto done;
+	}
+	if (label.d_secsize != DEV_BSIZE) {
+		sr_error(sc, "%s has unsupported sector size (%d)",
+		    devname, label.d_secsize);
+		goto done;
+	}
+	if (label.d_partitions[part].p_fstype != FS_RAID) {
+		sr_error(sc, "%s partition not of type RAID (%d)\n",
+		    devname, label.d_partitions[part].p_fstype);
+		goto done;
+	}
+
+	/*
+	 * Read and validate key disk metadata.
+	 */
+	sm = malloc(SR_META_SIZE * 512, M_DEVBUF, M_WAITOK | M_ZERO);
+	if (sr_meta_native_read(sd, dev, sm, NULL)) {
+		sr_error(sc, "native bootprobe could not read native metadata");
+		goto done;
+	}
+
+	if (sr_meta_validate(sd, dev, sm, NULL)) {
+		DNPRINTF(SR_D_META, "%s: invalid metadata\n",
+		    DEVNAME(sc));
+		goto done;
+	}
+
+	/* Make sure this is a key disk. */
+	if (sm->ssdi.ssd_level != SR_KEYDISK_LEVEL) {
+		sr_error(sc, "%s is not a key disk", devname);
+		goto done;
+	}
+
+	/* Construct key disk chunk. */
+	key_disk = malloc(sizeof(struct sr_chunk), M_DEVBUF, M_WAITOK | M_ZERO);
+	key_disk->src_dev_mm = dev;
+	key_disk->src_vn = vn;
+	key_disk->src_size = 0;
+
+	bcopy((struct sr_meta_chunk *)(sm + 1), &key_disk->src_meta,
+	    sizeof(key_disk->src_meta));
+
+	/* Read mask key from optional metadata. */
+	sr_meta_opt_load(sc, sm, &som);
+	SLIST_FOREACH(omi, &som, omi_link) {
+		omh = omi->omi_som;
+		if (omh->som_type == SR_OPT_KEYDISK) {
+			skm = (struct sr_meta_keydisk *)omh;
+			bcopy(&skm->skm_maskkey,
+			    sd->mds.mdd_crypto.scr_maskkey,
+			    sizeof(sd->mds.mdd_crypto.scr_maskkey));
+		} else if (omh->som_type == SR_OPT_CRYPTO) {
+			/* Original keydisk format with key in crypto area. */
+			bcopy(omh + sizeof(struct sr_meta_opt_hdr),
+			    sd->mds.mdd_crypto.scr_maskkey,
+			    sizeof(sd->mds.mdd_crypto.scr_maskkey));
+		}
+	}
+
+	open = 0;
+
+done:
+	for (omi = SLIST_FIRST(&som); omi != SLIST_END(&som); omi = omi_next) {
+		omi_next = SLIST_NEXT(omi, omi_link);
+		if (omi->omi_som)
+			free(omi->omi_som, M_DEVBUF);
+		free(omi, M_DEVBUF);
+	}
+
+	if (sm)
+		free(sm, M_DEVBUF);
+
+	if (vn && open) {
+		VOP_CLOSE(vn, FREAD, NOCRED, curproc);
+		vput(vn);
+	}
+
+	return key_disk;
+}
+
+int
+sr_crypto_alloc_resources(struct sr_discipline *sd)
+{
+	struct cryptoini	cri;
+	struct sr_crypto_wu	*crwu;
+	u_int			num_keys, i;
+
+	DNPRINTF(SR_D_DIS, "%s: sr_crypto_alloc_resources\n",
+	    DEVNAME(sd->sd_sc));
+
+	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++)
+		sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
+
+	if (sr_wu_alloc(sd)) {
+		sr_error(sd->sd_sc, "unable to allocate work units");
+		return (ENOMEM);
+	}
+	if (sr_ccb_alloc(sd)) {
+		sr_error(sd->sd_sc, "unable to allocate CCBs");
+		return (ENOMEM);
+	}
+	if (sr_crypto_decrypt_key(sd)) {
+		sr_error(sd->sd_sc, "incorrect key or passphrase");
+		return (EPERM);
+	}
+
+	/*
+	 * For each wu allocate the uio, iovec and crypto structures.
+	 * these have to be allocated now because during runtime we can't
+	 * fail an allocation without failing the io (which can cause real
+	 * problems).
+	 */
+	mtx_init(&sd->mds.mdd_crypto.scr_mutex, IPL_BIO);
+	TAILQ_INIT(&sd->mds.mdd_crypto.scr_wus);
+	for (i = 0; i < sd->sd_max_wu; i++) {
+		crwu = malloc(sizeof(*crwu), M_DEVBUF,
+		    M_WAITOK | M_ZERO | M_CANFAIL);
+		if (crwu == NULL)
+		    return (ENOMEM);
+		/* put it on the list now so if we fail it'll be freed */
+		mtx_enter(&sd->mds.mdd_crypto.scr_mutex);
+		TAILQ_INSERT_TAIL(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link);
+		mtx_leave(&sd->mds.mdd_crypto.scr_mutex);
+
+		crwu->cr_uio.uio_iov = &crwu->cr_iov;
+		crwu->cr_dmabuf = dma_alloc(MAXPHYS, PR_WAITOK);
+		crwu->cr_crp = crypto_getreq(MAXPHYS >> DEV_BSHIFT);
+		if (crwu->cr_crp == NULL)
+			return (ENOMEM);
+		/* steal the list of cryptodescs */
+		crwu->cr_descs = crwu->cr_crp->crp_desc;
+		crwu->cr_crp->crp_desc = NULL;
+	}
+
+	bzero(&cri, sizeof(cri));
+	cri.cri_alg = CRYPTO_AES_XTS;
+	switch (sd->mds.mdd_crypto.scr_meta->scm_alg) {
+	case SR_CRYPTOA_AES_XTS_128:
+		cri.cri_klen = 256;
+		break;
+	case SR_CRYPTOA_AES_XTS_256:
+		cri.cri_klen = 512;
+		break;
+	default:
+		return (EINVAL);
+	}
+
+	/* Allocate a session for every 2^SR_CRYPTO_KEY_BLKSHIFT blocks */
+	num_keys = sd->sd_meta->ssdi.ssd_size >> SR_CRYPTO_KEY_BLKSHIFT;
+	if (num_keys >= SR_CRYPTO_MAXKEYS)
+		return (EFBIG);
+	for (i = 0; i <= num_keys; i++) {
+		cri.cri_key = sd->mds.mdd_crypto.scr_key[i];
+		if (crypto_newsession(&sd->mds.mdd_crypto.scr_sid[i],
+		    &cri, 0) != 0) {
+			for (i = 0;
+			     sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1;
+			     i++) {
+				crypto_freesession(
+				    sd->mds.mdd_crypto.scr_sid[i]);
+				sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
+			}
+			return (EINVAL);
+		}
+	}
+
+	sr_hotplug_register(sd, sr_crypto_hotplug);
+
+	return (0);
+}
+
+void
+sr_crypto_free_resources(struct sr_discipline *sd)
+{
+	struct sr_crypto_wu	*crwu;
+	u_int			i;
+
+	DNPRINTF(SR_D_DIS, "%s: sr_crypto_free_resources\n",
+	    DEVNAME(sd->sd_sc));
+
+	if (sd->mds.mdd_crypto.key_disk != NULL) {
+		explicit_bzero(sd->mds.mdd_crypto.key_disk, sizeof
+		    sd->mds.mdd_crypto.key_disk);
+		free(sd->mds.mdd_crypto.key_disk, M_DEVBUF);
+	}
+
+	sr_hotplug_unregister(sd, sr_crypto_hotplug);
+
+	for (i = 0; sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1; i++) {
+		crypto_freesession(sd->mds.mdd_crypto.scr_sid[i]);
+		sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
+	}
+
+	mtx_enter(&sd->mds.mdd_crypto.scr_mutex);
+	while ((crwu = TAILQ_FIRST(&sd->mds.mdd_crypto.scr_wus)) != NULL) {
+		TAILQ_REMOVE(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link);
+
+		if (crwu->cr_dmabuf != NULL)
+			dma_free(crwu->cr_dmabuf, MAXPHYS);
+		if (crwu->cr_crp) {
+			/* twiddle cryptoreq back */
+			crwu->cr_crp->crp_desc = crwu->cr_descs;
+			crypto_freereq(crwu->cr_crp);
+		}
+		free(crwu, M_DEVBUF);
+	}
+	mtx_leave(&sd->mds.mdd_crypto.scr_mutex);
+
+	sr_wu_free(sd);
+	sr_ccb_free(sd);
+}
+
+int
+sr_crypto_ioctl(struct sr_discipline *sd, struct bioc_discipline *bd)
+{
+	struct sr_crypto_kdfpair kdfpair;
+	struct sr_crypto_kdfinfo kdfinfo1, kdfinfo2;
+	int			size, rv = 1;
+
+	DNPRINTF(SR_D_IOCTL, "%s: sr_crypto_ioctl %u\n",
+	    DEVNAME(sd->sd_sc), bd->bd_cmd);
+
+	switch (bd->bd_cmd) {
+	case SR_IOCTL_GET_KDFHINT:
+
+		/* Get KDF hint for userland. */
+		size = sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint);
+		if (bd->bd_data == NULL || bd->bd_size > size)
+			goto bad;
+		if (copyout(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
+		    bd->bd_data, bd->bd_size))
+			goto bad;
+
+		rv = 0;
+
+		break;
+
+	case SR_IOCTL_CHANGE_PASSPHRASE:
+
+		/* Attempt to change passphrase. */
+
+		size = sizeof(kdfpair);
+		if (bd->bd_data == NULL || bd->bd_size > size)
+			goto bad;
+		if (copyin(bd->bd_data, &kdfpair, size))
+			goto bad;
+
+		size = sizeof(kdfinfo1);
+		if (kdfpair.kdfinfo1 == NULL || kdfpair.kdfsize1 > size)
+			goto bad;
+		if (copyin(kdfpair.kdfinfo1, &kdfinfo1, size))
+			goto bad;
+
+		size = sizeof(kdfinfo2);
+		if (kdfpair.kdfinfo2 == NULL || kdfpair.kdfsize2 > size)
+			goto bad;
+		if (copyin(kdfpair.kdfinfo2, &kdfinfo2, size))
+			goto bad;
+
+		if (sr_crypto_change_maskkey(sd, &kdfinfo1, &kdfinfo2))
+			goto bad;
+
+		/* Save metadata to disk. */
+		rv = sr_meta_save(sd, SR_META_DIRTY);
+
+		break;
+	}
+
+bad:
+	explicit_bzero(&kdfpair, sizeof(kdfpair));
+	explicit_bzero(&kdfinfo1, sizeof(kdfinfo1));
+	explicit_bzero(&kdfinfo2, sizeof(kdfinfo2));
+
+	return (rv);
+}
+
+int
+sr_crypto_meta_opt_handler(struct sr_discipline *sd, struct sr_meta_opt_hdr *om)
+{
+	int rv = EINVAL;
+
+	if (om->som_type == SR_OPT_CRYPTO) {
+		sd->mds.mdd_crypto.scr_meta = (struct sr_meta_crypto *)om;
+		rv = 0;
+	}
+
+	return (rv);
+}
+
+int
+sr_crypto_rw(struct sr_workunit *wu)
+{
+	struct sr_crypto_wu	*crwu;
+	int			s, rv = 0;
+
+	DNPRINTF(SR_D_DIS, "%s: sr_crypto_rw wu: %p\n",
+	    DEVNAME(wu->swu_dis->sd_sc), wu);
+
+	if (wu->swu_xs->flags & SCSI_DATA_OUT) {
+		crwu = sr_crypto_wu_get(wu, 1);
+		if (crwu == NULL)
+			return (1);
+		crwu->cr_crp->crp_callback = sr_crypto_write;
+		s = splvm();
+		if (crypto_invoke(crwu->cr_crp))
+			rv = 1;
+		else
+			rv = crwu->cr_crp->crp_etype;
+		splx(s);
+	} else
+		rv = sr_crypto_rw2(wu, NULL);
+
+	return (rv);
+}
+
+int
+sr_crypto_write(struct cryptop *crp)
+{
+	struct sr_crypto_wu	*crwu = crp->crp_opaque;
+	struct sr_workunit	*wu = crwu->cr_wu;
+	int			s;
+
+	DNPRINTF(SR_D_INTR, "%s: sr_crypto_write: wu %x xs: %x\n",
+	    DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs);
+
+	if (crp->crp_etype) {
+		/* fail io */
+		wu->swu_xs->error = XS_DRIVER_STUFFUP;
+		s = splbio();
+		sr_crypto_finish_io(wu);
+		splx(s);
+	}
+
+	return (sr_crypto_rw2(wu, crwu));
+}
+
+int
+sr_crypto_rw2(struct sr_workunit *wu, struct sr_crypto_wu *crwu)
+{
+	struct sr_discipline	*sd = wu->swu_dis;
+	struct scsi_xfer	*xs = wu->swu_xs;
+	struct sr_ccb		*ccb;
+	struct uio		*uio;
+	int			s;
+	daddr64_t		blk;
+
+	if (sr_validate_io(wu, &blk, "sr_crypto_rw2"))
+		goto bad;
+
+	blk += sd->sd_meta->ssd_data_offset;
+
+	ccb = sr_ccb_rw(sd, 0, blk, xs->datalen, xs->data, xs->flags, 0);
+	if (!ccb) {
+		/* should never happen but handle more gracefully */
+		printf("%s: %s: too many ccbs queued\n",
+		    DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname);
+		goto bad;
+	}
+	if (!ISSET(xs->flags, SCSI_DATA_IN)) {
+		uio = crwu->cr_crp->crp_buf;
+		ccb->ccb_buf.b_data = uio->uio_iov->iov_base;
+		ccb->ccb_opaque = crwu;
+	}
+	sr_wu_enqueue_ccb(wu, ccb);
+
+	s = splbio();
+
+	if (sr_check_io_collision(wu))
+		goto queued;
+
+	sr_raid_startwu(wu);
+
+queued:
+	splx(s);
+	return (0);
+bad:
+	/* wu is unwound by sr_wu_put */
+	if (crwu)
+		crwu->cr_crp->crp_etype = EINVAL;
+	return (1);
+}
+
+void
+sr_crypto_done(struct sr_workunit *wu)
+{
+	struct scsi_xfer	*xs = wu->swu_xs;
+	struct sr_crypto_wu	*crwu;
+	struct sr_ccb		*ccb;
+	int			s;
+
+	/* If this was a successful read, initiate decryption of the data. */
+	if (ISSET(xs->flags, SCSI_DATA_IN) && xs->error == XS_NOERROR) {
+		/* only fails on implementation error */
+		crwu = sr_crypto_wu_get(wu, 0);
+		if (crwu == NULL)
+			panic("sr_crypto_intr: no wu");
+		crwu->cr_crp->crp_callback = sr_crypto_read;
+		ccb = TAILQ_FIRST(&wu->swu_ccb);
+		if (ccb == NULL)
+			panic("sr_crypto_done: no ccbs on workunit");
+		ccb->ccb_opaque = crwu;
+		DNPRINTF(SR_D_INTR, "%s: sr_crypto_intr: crypto_invoke %p\n",
+		    DEVNAME(wu->swu_dis->sd_sc), crwu->cr_crp);
+		s = splvm();
+		crypto_invoke(crwu->cr_crp);
+		splx(s);
+		return;
+	}
+
+	s = splbio();
+	sr_crypto_finish_io(wu);
+	splx(s);
+}
+
+void
+sr_crypto_finish_io(struct sr_workunit *wu)
+{
+	struct sr_discipline	*sd = wu->swu_dis;
+	struct scsi_xfer	*xs = wu->swu_xs;
+	struct sr_ccb		*ccb;
+#ifdef SR_DEBUG
+	struct sr_softc		*sc = sd->sd_sc;
+#endif /* SR_DEBUG */
+
+	splassert(IPL_BIO);
+
+	DNPRINTF(SR_D_INTR, "%s: sr_crypto_finish_io: wu %x xs: %x\n",
+	    DEVNAME(sc), wu, xs);
+
+	if (wu->swu_cb_active == 1)
+		panic("%s: sr_crypto_finish_io", DEVNAME(sd->sd_sc));
+	TAILQ_FOREACH(ccb, &wu->swu_ccb, ccb_link) {
+		if (ccb->ccb_opaque == NULL)
+			continue;
+		sr_crypto_wu_put(ccb->ccb_opaque);
+	}
+
+	sr_scsi_done(sd, xs);
+}
+
+int
+sr_crypto_read(struct cryptop *crp)
+{
+	struct sr_crypto_wu	*crwu = crp->crp_opaque;
+	struct sr_workunit	*wu = crwu->cr_wu;
+	int			s;
+
+	DNPRINTF(SR_D_INTR, "%s: sr_crypto_read: wu %x xs: %x\n",
+	    DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs);
+
+	if (crp->crp_etype)
+		wu->swu_xs->error = XS_DRIVER_STUFFUP;
+
+	s = splbio();
+	sr_crypto_finish_io(wu);
+	splx(s);
+
+	return (0);
+}
+
+void
+sr_crypto_hotplug(struct sr_discipline *sd, struct disk *diskp, int action)
+{
+	DNPRINTF(SR_D_MISC, "%s: sr_crypto_hotplug: %s %d\n",
+	    DEVNAME(sd->sd_sc), diskp->dk_name, action);
+}
+
+#ifdef SR_DEBUG0
+void
+sr_crypto_dumpkeys(struct sr_discipline *sd)
+{
+	int			i, j;
+
+	printf("sr_crypto_dumpkeys:\n");
+	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) {
+		printf("\tscm_key[%d]: 0x", i);
+		for (j = 0; j < SR_CRYPTO_KEYBYTES; j++) {
+			printf("%02x",
+			    sd->mds.mdd_crypto.scr_meta->scm_key[i][j]);
+		}
+		printf("\n");
+	}
+	printf("sr_crypto_dumpkeys: runtime data keys:\n");
+	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) {
+		printf("\tscr_key[%d]: 0x", i);
+		for (j = 0; j < SR_CRYPTO_KEYBYTES; j++) {
+			printf("%02x",
+			    sd->mds.mdd_crypto.scr_key[i][j]);
+		}
+		printf("\n");
+	}
+}
+#endif	/* SR_DEBUG */
blob - /dev/null
blob + 7e2cc400cddbd07b35b31e747b7aa0cfadde3a73 (mode 644)
--- /dev/null
+++ test/test021.right.txt
@@ -0,0 +1,1251 @@
+/* $OpenBSD: softraid_crypto.c,v 1.139 2020/07/13 00:06:22 kn Exp $ */
+/*
+ * Copyright (c) 2007 Marco Peereboom <marco@peereboom.us>
+ * Copyright (c) 2008 Hans-Joerg Hoexer <hshoexer@openbsd.org>
+ * Copyright (c) 2008 Damien Miller <djm@mindrot.org>
+ * Copyright (c) 2009 Joel Sing <jsing@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "bio.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/buf.h>
+#include <sys/device.h>
+#include <sys/ioctl.h>
+#include <sys/malloc.h>
+#include <sys/pool.h>
+#include <sys/kernel.h>
+#include <sys/disk.h>
+#include <sys/rwlock.h>
+#include <sys/queue.h>
+#include <sys/fcntl.h>
+#include <sys/disklabel.h>
+#include <sys/vnode.h>
+#include <sys/mount.h>
+#include <sys/sensors.h>
+#include <sys/stat.h>
+#include <sys/conf.h>
+#include <sys/uio.h>
+#include <sys/dkio.h>
+
+#include <crypto/cryptodev.h>
+#include <crypto/rijndael.h>
+#include <crypto/md5.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+#include <crypto/hmac.h>
+
+#include <scsi/scsi_all.h>
+#include <scsi/scsiconf.h>
+#include <scsi/scsi_disk.h>
+
+#include <dev/softraidvar.h>
+
+/*
+ * The per-I/O data that we need to preallocate. We cannot afford to allow I/O
+ * to start failing when memory pressure kicks in. We can store this in the WU
+ * because we assert that only one ccb per WU will ever be active.
+ */
+struct sr_crypto_wu {
+	struct sr_workunit		 cr_wu;		/* Must be first. */
+	struct uio			 cr_uio;
+	struct iovec			 cr_iov;
+	struct cryptop	 		*cr_crp;
+	void				*cr_dmabuf;
+};
+
+
+struct sr_crypto_wu *sr_crypto_prepare(struct sr_workunit *, int);
+int		sr_crypto_create_keys(struct sr_discipline *);
+int		sr_crypto_get_kdf(struct bioc_createraid *,
+		    struct sr_discipline *);
+int		sr_crypto_decrypt(u_char *, u_char *, u_char *, size_t, int);
+int		sr_crypto_encrypt(u_char *, u_char *, u_char *, size_t, int);
+int		sr_crypto_decrypt_key(struct sr_discipline *);
+int		sr_crypto_change_maskkey(struct sr_discipline *,
+		    struct sr_crypto_kdfinfo *, struct sr_crypto_kdfinfo *);
+int		sr_crypto_create(struct sr_discipline *,
+		    struct bioc_createraid *, int, int64_t);
+int		sr_crypto_assemble(struct sr_discipline *,
+		    struct bioc_createraid *, int, void *);
+int		sr_crypto_alloc_resources(struct sr_discipline *);
+void		sr_crypto_free_resources(struct sr_discipline *);
+int		sr_crypto_ioctl(struct sr_discipline *,
+		    struct bioc_discipline *);
+int		sr_crypto_meta_opt_handler(struct sr_discipline *,
+		    struct sr_meta_opt_hdr *);
+void		sr_crypto_write(struct cryptop *);
+int		sr_crypto_rw(struct sr_workunit *);
+int		sr_crypto_dev_rw(struct sr_workunit *, struct sr_crypto_wu *);
+void		sr_crypto_done(struct sr_workunit *);
+void		sr_crypto_read(struct cryptop *);
+void		sr_crypto_calculate_check_hmac_sha1(u_int8_t *, int,
+		   u_int8_t *, int, u_char *);
+void		sr_crypto_hotplug(struct sr_discipline *, struct disk *, int);
+
+#ifdef SR_DEBUG0
+void		 sr_crypto_dumpkeys(struct sr_discipline *);
+#endif
+
+/* Discipline initialisation. */
+void
+sr_crypto_discipline_init(struct sr_discipline *sd)
+{
+	int i;
+
+	/* Fill out discipline members. */
+	sd->sd_wu_size = sizeof(struct sr_crypto_wu);
+	sd->sd_type = SR_MD_CRYPTO;
+	strlcpy(sd->sd_name, "CRYPTO", sizeof(sd->sd_name));
+	sd->sd_capabilities = SR_CAP_SYSTEM_DISK | SR_CAP_AUTO_ASSEMBLE;
+	sd->sd_max_wu = SR_CRYPTO_NOWU;
+
+	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++)
+		sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
+
+	/* Setup discipline specific function pointers. */
+	sd->sd_alloc_resources = sr_crypto_alloc_resources;
+	sd->sd_assemble = sr_crypto_assemble;
+	sd->sd_create = sr_crypto_create;
+	sd->sd_free_resources = sr_crypto_free_resources;
+	sd->sd_ioctl_handler = sr_crypto_ioctl;
+	sd->sd_meta_opt_handler = sr_crypto_meta_opt_handler;
+	sd->sd_scsi_rw = sr_crypto_rw;
+	sd->sd_scsi_done = sr_crypto_done;
+}
+
+int
+sr_crypto_create(struct sr_discipline *sd, struct bioc_createraid *bc,
+    int no_chunk, int64_t coerced_size)
+{
+	struct sr_meta_opt_item	*omi;
+	int			rv = EINVAL;
+
+	if (no_chunk != 1) {
+		sr_error(sd->sd_sc, "%s requires exactly one chunk",
+		    sd->sd_name);
+		goto done;
+	}
+
+	if (coerced_size > SR_CRYPTO_MAXSIZE) {
+		sr_error(sd->sd_sc, "%s exceeds maximum size (%lli > %llu)",
+		    sd->sd_name, coerced_size, SR_CRYPTO_MAXSIZE);
+		goto done;
+	}
+
+	/* Create crypto optional metadata. */
+	omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF,
+	    M_WAITOK | M_ZERO);
+	omi->omi_som = malloc(sizeof(struct sr_meta_crypto), M_DEVBUF,
+	    M_WAITOK | M_ZERO);
+	omi->omi_som->som_type = SR_OPT_CRYPTO;
+	omi->omi_som->som_length = sizeof(struct sr_meta_crypto);
+	SLIST_INSERT_HEAD(&sd->sd_meta_opt, omi, omi_link);
+	sd->mds.mdd_crypto.scr_meta = (struct sr_meta_crypto *)omi->omi_som;
+	sd->sd_meta->ssdi.ssd_opt_no++;
+
+	sd->mds.mdd_crypto.key_disk = NULL;
+
+	if (bc->bc_key_disk != NODEV) {
+
+		/* Create a key disk. */
+		if (sr_crypto_get_kdf(bc, sd))
+			goto done;
+		sd->mds.mdd_crypto.key_disk =
+		    sr_crypto_create_key_disk(sd, bc->bc_key_disk);
+		if (sd->mds.mdd_crypto.key_disk == NULL)
+			goto done;
+		sd->sd_capabilities |= SR_CAP_AUTO_ASSEMBLE;
+
+	} else if (bc->bc_opaque_flags & BIOC_SOOUT) {
+
+		/* No hint available yet. */
+		bc->bc_opaque_status = BIOC_SOINOUT_FAILED;
+		rv = EAGAIN;
+		goto done;
+
+	} else if (sr_crypto_get_kdf(bc, sd))
+		goto done;
+
+	/* Passphrase volumes cannot be automatically assembled. */
+	if (!(bc->bc_flags & BIOC_SCNOAUTOASSEMBLE) && bc->bc_key_disk == NODEV)
+		goto done;
+
+	sd->sd_meta->ssdi.ssd_size = coerced_size;
+
+	sr_crypto_create_keys(sd);
+
+	sd->sd_max_ccb_per_wu = no_chunk;
+
+	rv = 0;
+done:
+	return (rv);
+}
+
+int
+sr_crypto_assemble(struct sr_discipline *sd, struct bioc_createraid *bc,
+    int no_chunk, void *data)
+{
+	int	rv = EINVAL;
+
+	sd->mds.mdd_crypto.key_disk = NULL;
+
+	/* Crypto optional metadata must already exist... */
+	if (sd->mds.mdd_crypto.scr_meta == NULL)
+		goto done;
+
+	if (data != NULL) {
+		/* Kernel already has mask key. */
+		memcpy(sd->mds.mdd_crypto.scr_maskkey, data,
+		    sizeof(sd->mds.mdd_crypto.scr_maskkey));
+	} else if (bc->bc_key_disk != NODEV) {
+		/* Read the mask key from the key disk. */
+		sd->mds.mdd_crypto.key_disk =
+		    sr_crypto_read_key_disk(sd, bc->bc_key_disk);
+		if (sd->mds.mdd_crypto.key_disk == NULL)
+			goto done;
+	} else if (bc->bc_opaque_flags & BIOC_SOOUT) {
+		/* provide userland with kdf hint */
+		if (bc->bc_opaque == NULL)
+			goto done;
+
+		if (sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint) <
+		    bc->bc_opaque_size)
+			goto done;
+
+		if (copyout(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
+		    bc->bc_opaque, bc->bc_opaque_size))
+			goto done;
+
+		/* we're done */
+		bc->bc_opaque_status = BIOC_SOINOUT_OK;
+		rv = EAGAIN;
+		goto done;
+	} else if (bc->bc_opaque_flags & BIOC_SOIN) {
+		/* get kdf with maskkey from userland */
+		if (sr_crypto_get_kdf(bc, sd))
+			goto done;
+	} else
+		goto done;
+
+	sd->sd_max_ccb_per_wu = sd->sd_meta->ssdi.ssd_chunk_no;
+
+	rv = 0;
+done:
+	return (rv);
+}
+
+struct sr_crypto_wu *
+sr_crypto_prepare(struct sr_workunit *wu, int encrypt)
+{
+	struct scsi_xfer	*xs = wu->swu_xs;
+	struct sr_discipline	*sd = wu->swu_dis;
+	struct sr_crypto_wu	*crwu;
+	struct cryptodesc	*crd;
+	int			flags, i, n;
+	daddr_t			blkno;
+	u_int			keyndx;
+
+	DNPRINTF(SR_D_DIS, "%s: sr_crypto_prepare wu %p encrypt %d\n",
+	    DEVNAME(sd->sd_sc), wu, encrypt);
+
+	crwu = (struct sr_crypto_wu *)wu;
+	crwu->cr_uio.uio_iovcnt = 1;
+	crwu->cr_uio.uio_iov->iov_len = xs->datalen;
+	if (xs->flags & SCSI_DATA_OUT) {
+		crwu->cr_uio.uio_iov->iov_base = crwu->cr_dmabuf;
+		memcpy(crwu->cr_uio.uio_iov->iov_base, xs->data, xs->datalen);
+	} else
+		crwu->cr_uio.uio_iov->iov_base = xs->data;
+
+	blkno = wu->swu_blk_start;
+	n = xs->datalen >> DEV_BSHIFT;
+
+	/*
+	 * We preallocated enough crypto descs for up to MAXPHYS of I/O.
+	 * Since there may be less than that we need to tweak the amount
+	 * of crypto desc structures to be just long enough for our needs.
+	 */
+	KASSERT(crwu->cr_crp->crp_ndescalloc >= n);
+	crwu->cr_crp->crp_ndesc = n;
+	flags = (encrypt ? CRD_F_ENCRYPT : 0) |
+	    CRD_F_IV_PRESENT | CRD_F_IV_EXPLICIT;
+
+	/*
+	 * Select crypto session based on block number.
+	 *
+	 * XXX - this does not handle the case where the read/write spans
+	 * across a different key blocks (e.g. 0.5TB boundary). Currently
+	 * this is already broken by the use of scr_key[0] below.
+	 */
+	keyndx = blkno >> SR_CRYPTO_KEY_BLKSHIFT;
+	crwu->cr_crp->crp_sid = sd->mds.mdd_crypto.scr_sid[keyndx];
+
+	crwu->cr_crp->crp_opaque = crwu;
+	crwu->cr_crp->crp_ilen = xs->datalen;
+	crwu->cr_crp->crp_alloctype = M_DEVBUF;
+	crwu->cr_crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_NOQUEUE;
+	crwu->cr_crp->crp_buf = &crwu->cr_uio;
+	for (i = 0; i < crwu->cr_crp->crp_ndesc; i++, blkno++) {
+		crd = &crwu->cr_crp->crp_desc[i];
+		crd->crd_skip = i << DEV_BSHIFT;
+		crd->crd_len = DEV_BSIZE;
+		crd->crd_inject = 0;
+		crd->crd_flags = flags;
+		crd->crd_alg = sd->mds.mdd_crypto.scr_alg;
+		crd->crd_klen = sd->mds.mdd_crypto.scr_klen;
+		crd->crd_key = sd->mds.mdd_crypto.scr_key[0];
+		memcpy(crd->crd_iv, &blkno, sizeof(blkno));
+	}
+
+	return (crwu);
+}
+
+int
+sr_crypto_get_kdf(struct bioc_createraid *bc, struct sr_discipline *sd)
+{
+	int			rv = EINVAL;
+	struct sr_crypto_kdfinfo *kdfinfo;
+
+	if (!(bc->bc_opaque_flags & BIOC_SOIN))
+		return (rv);
+	if (bc->bc_opaque == NULL)
+		return (rv);
+	if (bc->bc_opaque_size != sizeof(*kdfinfo))
+		return (rv);
+
+	kdfinfo = malloc(bc->bc_opaque_size, M_DEVBUF, M_WAITOK | M_ZERO);
+	if (copyin(bc->bc_opaque, kdfinfo, bc->bc_opaque_size))
+		goto out;
+
+	if (kdfinfo->len != bc->bc_opaque_size)
+		goto out;
+
+	/* copy KDF hint to disk meta data */
+	if (kdfinfo->flags & SR_CRYPTOKDF_HINT) {
+		if (sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint) <
+		    kdfinfo->genkdf.len)
+			goto out;
+		memcpy(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
+		    &kdfinfo->genkdf, kdfinfo->genkdf.len);
+	}
+
+	/* copy mask key to run-time meta data */
+	if ((kdfinfo->flags & SR_CRYPTOKDF_KEY)) {
+		if (sizeof(sd->mds.mdd_crypto.scr_maskkey) <
+		    sizeof(kdfinfo->maskkey))
+			goto out;
+		memcpy(sd->mds.mdd_crypto.scr_maskkey, &kdfinfo->maskkey,
+		    sizeof(kdfinfo->maskkey));
+	}
+
+	bc->bc_opaque_status = BIOC_SOINOUT_OK;
+	rv = 0;
+out:
+	explicit_bzero(kdfinfo, bc->bc_opaque_size);
+	free(kdfinfo, M_DEVBUF, bc->bc_opaque_size);
+
+	return (rv);
+}
+
+int
+sr_crypto_encrypt(u_char *p, u_char *c, u_char *key, size_t size, int alg)
+{
+	rijndael_ctx		ctx;
+	int			i, rv = 1;
+
+	switch (alg) {
+	case SR_CRYPTOM_AES_ECB_256:
+		if (rijndael_set_key_enc_only(&ctx, key, 256) != 0)
+			goto out;
+		for (i = 0; i < size; i += RIJNDAEL128_BLOCK_LEN)
+			rijndael_encrypt(&ctx, &p[i], &c[i]);
+		rv = 0;
+		break;
+	default:
+		DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %d\n",
+		    "softraid", alg);
+		rv = -1;
+		goto out;
+	}
+
+out:
+	explicit_bzero(&ctx, sizeof(ctx));
+	return (rv);
+}
+
+int
+sr_crypto_decrypt(u_char *c, u_char *p, u_char *key, size_t size, int alg)
+{
+	rijndael_ctx		ctx;
+	int			i, rv = 1;
+
+	switch (alg) {
+	case SR_CRYPTOM_AES_ECB_256:
+		if (rijndael_set_key(&ctx, key, 256) != 0)
+			goto out;
+		for (i = 0; i < size; i += RIJNDAEL128_BLOCK_LEN)
+			rijndael_decrypt(&ctx, &c[i], &p[i]);
+		rv = 0;
+		break;
+	default:
+		DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %d\n",
+		    "softraid", alg);
+		rv = -1;
+		goto out;
+	}
+
+out:
+	explicit_bzero(&ctx, sizeof(ctx));
+	return (rv);
+}
+
+void
+sr_crypto_calculate_check_hmac_sha1(u_int8_t *maskkey, int maskkey_size,
+    u_int8_t *key, int key_size, u_char *check_digest)
+{
+	u_char			check_key[SHA1_DIGEST_LENGTH];
+	HMAC_SHA1_CTX		hmacctx;
+	SHA1_CTX		shactx;
+
+	bzero(check_key, sizeof(check_key));
+	bzero(&hmacctx, sizeof(hmacctx));
+	bzero(&shactx, sizeof(shactx));
+
+	/* k = SHA1(mask_key) */
+	SHA1Init(&shactx);
+	SHA1Update(&shactx, maskkey, maskkey_size);
+	SHA1Final(check_key, &shactx);
+
+	/* mac = HMAC_SHA1_k(unencrypted key) */
+	HMAC_SHA1_Init(&hmacctx, check_key, sizeof(check_key));
+	HMAC_SHA1_Update(&hmacctx, key, key_size);
+	HMAC_SHA1_Final(check_digest, &hmacctx);
+
+	explicit_bzero(check_key, sizeof(check_key));
+	explicit_bzero(&hmacctx, sizeof(hmacctx));
+	explicit_bzero(&shactx, sizeof(shactx));
+}
+
+int
+sr_crypto_decrypt_key(struct sr_discipline *sd)
+{
+	u_char			check_digest[SHA1_DIGEST_LENGTH];
+	int			rv = 1;
+
+	DNPRINTF(SR_D_DIS, "%s: sr_crypto_decrypt_key\n", DEVNAME(sd->sd_sc));
+
+	if (sd->mds.mdd_crypto.scr_meta->scm_check_alg != SR_CRYPTOC_HMAC_SHA1)
+		goto out;
+
+	if (sr_crypto_decrypt((u_char *)sd->mds.mdd_crypto.scr_meta->scm_key,
+	    (u_char *)sd->mds.mdd_crypto.scr_key,
+	    sd->mds.mdd_crypto.scr_maskkey, sizeof(sd->mds.mdd_crypto.scr_key),
+	    sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1)
+		goto out;
+
+#ifdef SR_DEBUG0
+	sr_crypto_dumpkeys(sd);
+#endif
+
+	/* Check that the key decrypted properly. */
+	sr_crypto_calculate_check_hmac_sha1(sd->mds.mdd_crypto.scr_maskkey,
+	    sizeof(sd->mds.mdd_crypto.scr_maskkey),
+	    (u_int8_t *)sd->mds.mdd_crypto.scr_key,
+	    sizeof(sd->mds.mdd_crypto.scr_key),
+	    check_digest);
+	if (memcmp(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac,
+	    check_digest, sizeof(check_digest)) != 0) {
+		explicit_bzero(sd->mds.mdd_crypto.scr_key,
+		    sizeof(sd->mds.mdd_crypto.scr_key));
+		goto out;
+	}
+
+	rv = 0; /* Success */
+out:
+	/* we don't need the mask key anymore */
+	explicit_bzero(&sd->mds.mdd_crypto.scr_maskkey,
+	    sizeof(sd->mds.mdd_crypto.scr_maskkey));
+
+	explicit_bzero(check_digest, sizeof(check_digest));
+
+	return rv;
+}
+
+int
+sr_crypto_create_keys(struct sr_discipline *sd)
+{
+
+	DNPRINTF(SR_D_DIS, "%s: sr_crypto_create_keys\n",
+	    DEVNAME(sd->sd_sc));
+
+	if (AES_MAXKEYBYTES < sizeof(sd->mds.mdd_crypto.scr_maskkey))
+		return (1);
+
+	/* XXX allow user to specify */
+	sd->mds.mdd_crypto.scr_meta->scm_alg = SR_CRYPTOA_AES_XTS_256;
+
+	/* generate crypto keys */
+	arc4random_buf(sd->mds.mdd_crypto.scr_key,
+	    sizeof(sd->mds.mdd_crypto.scr_key));
+
+	/* Mask the disk keys. */
+	sd->mds.mdd_crypto.scr_meta->scm_mask_alg = SR_CRYPTOM_AES_ECB_256;
+	sr_crypto_encrypt((u_char *)sd->mds.mdd_crypto.scr_key,
+	    (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key,
+	    sd->mds.mdd_crypto.scr_maskkey, sizeof(sd->mds.mdd_crypto.scr_key),
+	    sd->mds.mdd_crypto.scr_meta->scm_mask_alg);
+
+	/* Prepare key decryption check code. */
+	sd->mds.mdd_crypto.scr_meta->scm_check_alg = SR_CRYPTOC_HMAC_SHA1;
+	sr_crypto_calculate_check_hmac_sha1(sd->mds.mdd_crypto.scr_maskkey,
+	    sizeof(sd->mds.mdd_crypto.scr_maskkey),
+	    (u_int8_t *)sd->mds.mdd_crypto.scr_key,
+	    sizeof(sd->mds.mdd_crypto.scr_key),
+	    sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac);
+
+	/* Erase the plaintext disk keys */
+	explicit_bzero(sd->mds.mdd_crypto.scr_key,
+	    sizeof(sd->mds.mdd_crypto.scr_key));
+
+#ifdef SR_DEBUG0
+	sr_crypto_dumpkeys(sd);
+#endif
+
+	sd->mds.mdd_crypto.scr_meta->scm_flags = SR_CRYPTOF_KEY |
+	    SR_CRYPTOF_KDFHINT;
+
+	return (0);
+}
+
+int
+sr_crypto_change_maskkey(struct sr_discipline *sd,
+  struct sr_crypto_kdfinfo *kdfinfo1, struct sr_crypto_kdfinfo *kdfinfo2)
+{
+	u_char			check_digest[SHA1_DIGEST_LENGTH];
+	u_char			*c, *p = NULL;
+	size_t			ksz;
+	int			rv = 1;
+
+	DNPRINTF(SR_D_DIS, "%s: sr_crypto_change_maskkey\n",
+	    DEVNAME(sd->sd_sc));
+
+	if (sd->mds.mdd_crypto.scr_meta->scm_check_alg != SR_CRYPTOC_HMAC_SHA1)
+		goto out;
+
+	c = (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key;
+	ksz = sizeof(sd->mds.mdd_crypto.scr_key);
+	p = malloc(ksz, M_DEVBUF, M_WAITOK | M_CANFAIL | M_ZERO);
+	if (p == NULL)
+		goto out;
+
+	if (sr_crypto_decrypt(c, p, kdfinfo1->maskkey, ksz,
+	    sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1)
+		goto out;
+
+#ifdef SR_DEBUG0
+	sr_crypto_dumpkeys(sd);
+#endif
+
+	sr_crypto_calculate_check_hmac_sha1(kdfinfo1->maskkey,
+	    sizeof(kdfinfo1->maskkey), p, ksz, check_digest);
+	if (memcmp(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac,
+	    check_digest, sizeof(check_digest)) != 0) {
+		sr_error(sd->sd_sc, "incorrect key or passphrase");
+		rv = EPERM;
+		goto out;
+	}
+
+	/* Copy new KDF hint to metadata, if supplied. */
+	if (kdfinfo2->flags & SR_CRYPTOKDF_HINT) {
+		if (kdfinfo2->genkdf.len >
+		    sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint))
+			goto out;
+		explicit_bzero(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
+		    sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint));
+		memcpy(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
+		    &kdfinfo2->genkdf, kdfinfo2->genkdf.len);
+	}
+
+	/* Mask the disk keys. */
+	c = (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key;
+	if (sr_crypto_encrypt(p, c, kdfinfo2->maskkey, ksz,
+	    sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1)
+		goto out;
+
+	/* Prepare key decryption check code. */
+	sd->mds.mdd_crypto.scr_meta->scm_check_alg = SR_CRYPTOC_HMAC_SHA1;
+	sr_crypto_calculate_check_hmac_sha1(kdfinfo2->maskkey,
+	    sizeof(kdfinfo2->maskkey), (u_int8_t *)sd->mds.mdd_crypto.scr_key,
+	    sizeof(sd->mds.mdd_crypto.scr_key), check_digest);
+
+	/* Copy new encrypted key and HMAC to metadata. */
+	memcpy(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac, check_digest,
+	    sizeof(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac));
+
+	rv = 0; /* Success */
+
+out:
+	if (p) {
+		explicit_bzero(p, ksz);
+		free(p, M_DEVBUF, ksz);
+	}
+
+	explicit_bzero(check_digest, sizeof(check_digest));
+	explicit_bzero(&kdfinfo1->maskkey, sizeof(kdfinfo1->maskkey));
+	explicit_bzero(&kdfinfo2->maskkey, sizeof(kdfinfo2->maskkey));
+
+	return (rv);
+}
+
+struct sr_chunk *
+sr_crypto_create_key_disk(struct sr_discipline *sd, dev_t dev)
+{
+	struct sr_softc		*sc = sd->sd_sc;
+	struct sr_discipline	*fakesd = NULL;
+	struct sr_metadata	*sm = NULL;
+	struct sr_meta_chunk    *km;
+	struct sr_meta_opt_item *omi = NULL;
+	struct sr_meta_keydisk	*skm;
+	struct sr_chunk		*key_disk = NULL;
+	struct disklabel	label;
+	struct vnode		*vn;
+	char			devname[32];
+	int			c, part, open = 0;
+
+	/*
+	 * Create a metadata structure on the key disk and store
+	 * keying material in the optional metadata.
+	 */
+
+	sr_meta_getdevname(sc, dev, devname, sizeof(devname));
+
+	/* Make sure chunk is not already in use. */
+	c = sr_chunk_in_use(sc, dev);
+	if (c != BIOC_SDINVALID && c != BIOC_SDOFFLINE) {
+		sr_error(sc, "%s is already in use", devname);
+		goto done;
+	}
+
+	/* Open device. */
+	if (bdevvp(dev, &vn)) {
+		sr_error(sc, "cannot open key disk %s", devname);
+		goto done;
+	}
+	if (VOP_OPEN(vn, FREAD | FWRITE, NOCRED, curproc)) {
+		DNPRINTF(SR_D_META,"%s: sr_crypto_create_key_disk cannot "
+		    "open %s\n", DEVNAME(sc), devname);
+		vput(vn);
+		goto done;
+	}
+	open = 1; /* close dev on error */
+
+	/* Get partition details. */
+	part = DISKPART(dev);
+	if (VOP_IOCTL(vn, DIOCGDINFO, (caddr_t)&label,
+	    FREAD, NOCRED, curproc)) {
+		DNPRINTF(SR_D_META, "%s: sr_crypto_create_key_disk ioctl "
+		    "failed\n", DEVNAME(sc));
+		goto done;
+	}
+	if (label.d_partitions[part].p_fstype != FS_RAID) {
+		sr_error(sc, "%s partition not of type RAID (%d)",
+		    devname, label.d_partitions[part].p_fstype);
+		goto done;
+	}
+
+	/*
+	 * Create and populate chunk metadata.
+	 */
+
+	key_disk = malloc(sizeof(struct sr_chunk), M_DEVBUF, M_WAITOK | M_ZERO);
+	km = &key_disk->src_meta;
+
+	key_disk->src_dev_mm = dev;
+	key_disk->src_vn = vn;
+	strlcpy(key_disk->src_devname, devname, sizeof(km->scmi.scm_devname));
+	key_disk->src_size = 0;
+
+	km->scmi.scm_volid = sd->sd_meta->ssdi.ssd_level;
+	km->scmi.scm_chunk_id = 0;
+	km->scmi.scm_size = 0;
+	km->scmi.scm_coerced_size = 0;
+	strlcpy(km->scmi.scm_devname, devname, sizeof(km->scmi.scm_devname));
+	memcpy(&km->scmi.scm_uuid, &sd->sd_meta->ssdi.ssd_uuid,
+	    sizeof(struct sr_uuid));
+
+	sr_checksum(sc, km, &km->scm_checksum,
+	    sizeof(struct sr_meta_chunk_invariant));
+
+	km->scm_status = BIOC_SDONLINE;
+
+	/*
+	 * Create and populate our own discipline and metadata.
+	 */
+
+	sm = malloc(sizeof(struct sr_metadata), M_DEVBUF, M_WAITOK | M_ZERO);
+	sm->ssdi.ssd_magic = SR_MAGIC;
+	sm->ssdi.ssd_version = SR_META_VERSION;
+	sm->ssd_ondisk = 0;
+	sm->ssdi.ssd_vol_flags = 0;
+	memcpy(&sm->ssdi.ssd_uuid, &sd->sd_meta->ssdi.ssd_uuid,
+	    sizeof(struct sr_uuid));
+	sm->ssdi.ssd_chunk_no = 1;
+	sm->ssdi.ssd_volid = SR_KEYDISK_VOLID;
+	sm->ssdi.ssd_level = SR_KEYDISK_LEVEL;
+	sm->ssdi.ssd_size = 0;
+	strlcpy(sm->ssdi.ssd_vendor, "OPENBSD", sizeof(sm->ssdi.ssd_vendor));
+	snprintf(sm->ssdi.ssd_product, sizeof(sm->ssdi.ssd_product),
+	    "SR %s", "KEYDISK");
+	snprintf(sm->ssdi.ssd_revision, sizeof(sm->ssdi.ssd_revision),
+	    "%03d", SR_META_VERSION);
+
+	fakesd = malloc(sizeof(struct sr_discipline), M_DEVBUF,
+	    M_WAITOK | M_ZERO);
+	fakesd->sd_sc = sd->sd_sc;
+	fakesd->sd_meta = sm;
+	fakesd->sd_meta_type = SR_META_F_NATIVE;
+	fakesd->sd_vol_status = BIOC_SVONLINE;
+	strlcpy(fakesd->sd_name, "KEYDISK", sizeof(fakesd->sd_name));
+	SLIST_INIT(&fakesd->sd_meta_opt);
+
+	/* Add chunk to volume. */
+	fakesd->sd_vol.sv_chunks = malloc(sizeof(struct sr_chunk *), M_DEVBUF,
+	    M_WAITOK | M_ZERO);
+	fakesd->sd_vol.sv_chunks[0] = key_disk;
+	SLIST_INIT(&fakesd->sd_vol.sv_chunk_list);
+	SLIST_INSERT_HEAD(&fakesd->sd_vol.sv_chunk_list, key_disk, src_link);
+
+	/* Generate mask key. */
+	arc4random_buf(sd->mds.mdd_crypto.scr_maskkey,
+	    sizeof(sd->mds.mdd_crypto.scr_maskkey));
+
+	/* Copy mask key to optional metadata area. */
+	omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF,
+	    M_WAITOK | M_ZERO);
+	omi->omi_som = malloc(sizeof(struct sr_meta_keydisk), M_DEVBUF,
+	    M_WAITOK | M_ZERO);
+	omi->omi_som->som_type = SR_OPT_KEYDISK;
+	omi->omi_som->som_length = sizeof(struct sr_meta_keydisk);
+	skm = (struct sr_meta_keydisk *)omi->omi_som;
+	memcpy(&skm->skm_maskkey, sd->mds.mdd_crypto.scr_maskkey,
+	    sizeof(skm->skm_maskkey));
+	SLIST_INSERT_HEAD(&fakesd->sd_meta_opt, omi, omi_link);
+	fakesd->sd_meta->ssdi.ssd_opt_no++;
+
+	/* Save metadata. */
+	if (sr_meta_save(fakesd, SR_META_DIRTY)) {
+		sr_error(sc, "could not save metadata to %s", devname);
+		goto fail;
+	}
+
+	goto done;
+
+fail:
+	free(key_disk, M_DEVBUF, sizeof(struct sr_chunk));
+	key_disk = NULL;
+
+done:
+	free(omi, M_DEVBUF, sizeof(struct sr_meta_opt_item));
+	if (fakesd && fakesd->sd_vol.sv_chunks)
+		free(fakesd->sd_vol.sv_chunks, M_DEVBUF,
+		    sizeof(struct sr_chunk *));
+	free(fakesd, M_DEVBUF, sizeof(struct sr_discipline));
+	free(sm, M_DEVBUF, sizeof(struct sr_metadata));
+	if (open) {
+		VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc);
+		vput(vn);
+	}
+
+	return key_disk;
+}
+
+struct sr_chunk *
+sr_crypto_read_key_disk(struct sr_discipline *sd, dev_t dev)
+{
+	struct sr_softc		*sc = sd->sd_sc;
+	struct sr_metadata	*sm = NULL;
+	struct sr_meta_opt_item *omi, *omi_next;
+	struct sr_meta_opt_hdr	*omh;
+	struct sr_meta_keydisk	*skm;
+	struct sr_meta_opt_head som;
+	struct sr_chunk		*key_disk = NULL;
+	struct disklabel	label;
+	struct vnode		*vn = NULL;
+	char			devname[32];
+	int			c, part, open = 0;
+
+	/*
+	 * Load a key disk and load keying material into memory.
+	 */
+
+	SLIST_INIT(&som);
+
+	sr_meta_getdevname(sc, dev, devname, sizeof(devname));
+
+	/* Make sure chunk is not already in use. */
+	c = sr_chunk_in_use(sc, dev);
+	if (c != BIOC_SDINVALID && c != BIOC_SDOFFLINE) {
+		sr_error(sc, "%s is already in use", devname);
+		goto done;
+	}
+
+	/* Open device. */
+	if (bdevvp(dev, &vn)) {
+		sr_error(sc, "cannot open key disk %s", devname);
+		goto done;
+	}
+	if (VOP_OPEN(vn, FREAD, NOCRED, curproc)) {
+		DNPRINTF(SR_D_META,"%s: sr_crypto_read_key_disk cannot "
+		    "open %s\n", DEVNAME(sc), devname);
+		vput(vn);
+		goto done;
+	}
+	open = 1; /* close dev on error */
+
+	/* Get partition details. */
+	part = DISKPART(dev);
+	if (VOP_IOCTL(vn, DIOCGDINFO, (caddr_t)&label, FREAD,
+	    NOCRED, curproc)) {
+		DNPRINTF(SR_D_META, "%s: sr_crypto_read_key_disk ioctl "
+		    "failed\n", DEVNAME(sc));
+		goto done;
+	}
+	if (label.d_partitions[part].p_fstype != FS_RAID) {
+		sr_error(sc, "%s partition not of type RAID (%d)",
+		    devname, label.d_partitions[part].p_fstype);
+		goto done;
+	}
+
+	/*
+	 * Read and validate key disk metadata.
+	 */
+	sm = malloc(SR_META_SIZE * DEV_BSIZE, M_DEVBUF, M_WAITOK | M_ZERO);
+	if (sr_meta_native_read(sd, dev, sm, NULL)) {
+		sr_error(sc, "native bootprobe could not read native metadata");
+		goto done;
+	}
+
+	if (sr_meta_validate(sd, dev, sm, NULL)) {
+		DNPRINTF(SR_D_META, "%s: invalid metadata\n",
+		    DEVNAME(sc));
+		goto done;
+	}
+
+	/* Make sure this is a key disk. */
+	if (sm->ssdi.ssd_level != SR_KEYDISK_LEVEL) {
+		sr_error(sc, "%s is not a key disk", devname);
+		goto done;
+	}
+
+	/* Construct key disk chunk. */
+	key_disk = malloc(sizeof(struct sr_chunk), M_DEVBUF, M_WAITOK | M_ZERO);
+	key_disk->src_dev_mm = dev;
+	key_disk->src_vn = vn;
+	key_disk->src_size = 0;
+
+	memcpy(&key_disk->src_meta, (struct sr_meta_chunk *)(sm + 1),
+	    sizeof(key_disk->src_meta));
+
+	/* Read mask key from optional metadata. */
+	sr_meta_opt_load(sc, sm, &som);
+	SLIST_FOREACH(omi, &som, omi_link) {
+		omh = omi->omi_som;
+		if (omh->som_type == SR_OPT_KEYDISK) {
+			skm = (struct sr_meta_keydisk *)omh;
+			memcpy(sd->mds.mdd_crypto.scr_maskkey, &skm->skm_maskkey,
+			    sizeof(sd->mds.mdd_crypto.scr_maskkey));
+		} else if (omh->som_type == SR_OPT_CRYPTO) {
+			/* Original keydisk format with key in crypto area. */
+			memcpy(sd->mds.mdd_crypto.scr_maskkey,
+			    omh + sizeof(struct sr_meta_opt_hdr),
+			    sizeof(sd->mds.mdd_crypto.scr_maskkey));
+		}
+	}
+
+	open = 0;
+
+done:
+	for (omi = SLIST_FIRST(&som); omi != NULL; omi = omi_next) {
+		omi_next = SLIST_NEXT(omi, omi_link);
+		free(omi->omi_som, M_DEVBUF, 0);
+		free(omi, M_DEVBUF, sizeof(struct sr_meta_opt_item));
+	}
+
+	free(sm, M_DEVBUF, SR_META_SIZE * DEV_BSIZE);
+
+	if (vn && open) {
+		VOP_CLOSE(vn, FREAD, NOCRED, curproc);
+		vput(vn);
+	}
+
+	return key_disk;
+}
+
+static void
+sr_crypto_free_sessions(struct sr_discipline *sd)
+{
+	u_int			i;
+
+	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) {
+		if (sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1) {
+			crypto_freesession(sd->mds.mdd_crypto.scr_sid[i]);
+			sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
+		}
+	}
+}
+
+int
+sr_crypto_alloc_resources(struct sr_discipline *sd)
+{
+	struct sr_workunit	*wu;
+	struct sr_crypto_wu	*crwu;
+	struct cryptoini	cri;
+	u_int			num_keys, i;
+
+	DNPRINTF(SR_D_DIS, "%s: sr_crypto_alloc_resources\n",
+	    DEVNAME(sd->sd_sc));
+
+	sd->mds.mdd_crypto.scr_alg = CRYPTO_AES_XTS;
+	switch (sd->mds.mdd_crypto.scr_meta->scm_alg) {
+	case SR_CRYPTOA_AES_XTS_128:
+		sd->mds.mdd_crypto.scr_klen = 256;
+		break;
+	case SR_CRYPTOA_AES_XTS_256:
+		sd->mds.mdd_crypto.scr_klen = 512;
+		break;
+	default:
+		sr_error(sd->sd_sc, "unknown crypto algorithm");
+		return (EINVAL);
+	}
+
+	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++)
+		sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
+
+	if (sr_wu_alloc(sd)) {
+		sr_error(sd->sd_sc, "unable to allocate work units");
+		return (ENOMEM);
+	}
+	if (sr_ccb_alloc(sd)) {
+		sr_error(sd->sd_sc, "unable to allocate CCBs");
+		return (ENOMEM);
+	}
+	if (sr_crypto_decrypt_key(sd)) {
+		sr_error(sd->sd_sc, "incorrect key or passphrase");
+		return (EPERM);
+	}
+
+	/*
+	 * For each work unit allocate the uio, iovec and crypto structures.
+	 * These have to be allocated now because during runtime we cannot
+	 * fail an allocation without failing the I/O (which can cause real
+	 * problems).
+	 */
+	TAILQ_FOREACH(wu, &sd->sd_wu, swu_next) {
+		crwu = (struct sr_crypto_wu *)wu;
+		crwu->cr_uio.uio_iov = &crwu->cr_iov;
+		crwu->cr_dmabuf = dma_alloc(MAXPHYS, PR_WAITOK);
+		crwu->cr_crp = crypto_getreq(MAXPHYS >> DEV_BSHIFT);
+		if (crwu->cr_crp == NULL)
+			return (ENOMEM);
+	}
+
+	memset(&cri, 0, sizeof(cri));
+	cri.cri_alg = sd->mds.mdd_crypto.scr_alg;
+	cri.cri_klen = sd->mds.mdd_crypto.scr_klen;
+
+	/* Allocate a session for every 2^SR_CRYPTO_KEY_BLKSHIFT blocks. */
+	num_keys = ((sd->sd_meta->ssdi.ssd_size - 1) >>
+	    SR_CRYPTO_KEY_BLKSHIFT) + 1;
+	if (num_keys > SR_CRYPTO_MAXKEYS)
+		return (EFBIG);
+	for (i = 0; i < num_keys; i++) {
+		cri.cri_key = sd->mds.mdd_crypto.scr_key[i];
+		if (crypto_newsession(&sd->mds.mdd_crypto.scr_sid[i],
+		    &cri, 0) != 0) {
+			sr_crypto_free_sessions(sd);
+			return (EINVAL);
+		}
+	}
+
+	sr_hotplug_register(sd, sr_crypto_hotplug);
+
+	return (0);
+}
+
+void
+sr_crypto_free_resources(struct sr_discipline *sd)
+{
+	struct sr_workunit	*wu;
+	struct sr_crypto_wu	*crwu;
+
+	DNPRINTF(SR_D_DIS, "%s: sr_crypto_free_resources\n",
+	    DEVNAME(sd->sd_sc));
+
+	if (sd->mds.mdd_crypto.key_disk != NULL) {
+		explicit_bzero(sd->mds.mdd_crypto.key_disk,
+		    sizeof(*sd->mds.mdd_crypto.key_disk));
+		free(sd->mds.mdd_crypto.key_disk, M_DEVBUF,
+		    sizeof(*sd->mds.mdd_crypto.key_disk));
+	}
+
+	sr_hotplug_unregister(sd, sr_crypto_hotplug);
+
+	sr_crypto_free_sessions(sd);
+
+	TAILQ_FOREACH(wu, &sd->sd_wu, swu_next) {
+		crwu = (struct sr_crypto_wu *)wu;
+		if (crwu->cr_dmabuf)
+			dma_free(crwu->cr_dmabuf, MAXPHYS);
+		if (crwu->cr_crp)
+			crypto_freereq(crwu->cr_crp);
+	}
+
+	sr_wu_free(sd);
+	sr_ccb_free(sd);
+}
+
+int
+sr_crypto_ioctl(struct sr_discipline *sd, struct bioc_discipline *bd)
+{
+	struct sr_crypto_kdfpair kdfpair;
+	struct sr_crypto_kdfinfo kdfinfo1, kdfinfo2;
+	int			size, rv = 1;
+
+	DNPRINTF(SR_D_IOCTL, "%s: sr_crypto_ioctl %u\n",
+	    DEVNAME(sd->sd_sc), bd->bd_cmd);
+
+	switch (bd->bd_cmd) {
+	case SR_IOCTL_GET_KDFHINT:
+
+		/* Get KDF hint for userland. */
+		size = sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint);
+		if (bd->bd_data == NULL || bd->bd_size > size)
+			goto bad;
+		if (copyout(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
+		    bd->bd_data, bd->bd_size))
+			goto bad;
+
+		rv = 0;
+
+		break;
+
+	case SR_IOCTL_CHANGE_PASSPHRASE:
+
+		/* Attempt to change passphrase. */
+
+		size = sizeof(kdfpair);
+		if (bd->bd_data == NULL || bd->bd_size > size)
+			goto bad;
+		if (copyin(bd->bd_data, &kdfpair, size))
+			goto bad;
+
+		size = sizeof(kdfinfo1);
+		if (kdfpair.kdfinfo1 == NULL || kdfpair.kdfsize1 > size)
+			goto bad;
+		if (copyin(kdfpair.kdfinfo1, &kdfinfo1, size))
+			goto bad;
+
+		size = sizeof(kdfinfo2);
+		if (kdfpair.kdfinfo2 == NULL || kdfpair.kdfsize2 > size)
+			goto bad;
+		if (copyin(kdfpair.kdfinfo2, &kdfinfo2, size))
+			goto bad;
+
+		if (sr_crypto_change_maskkey(sd, &kdfinfo1, &kdfinfo2))
+			goto bad;
+
+		/* Save metadata to disk. */
+		rv = sr_meta_save(sd, SR_META_DIRTY);
+
+		break;
+	}
+
+bad:
+	explicit_bzero(&kdfpair, sizeof(kdfpair));
+	explicit_bzero(&kdfinfo1, sizeof(kdfinfo1));
+	explicit_bzero(&kdfinfo2, sizeof(kdfinfo2));
+
+	return (rv);
+}
+
+int
+sr_crypto_meta_opt_handler(struct sr_discipline *sd, struct sr_meta_opt_hdr *om)
+{
+	int rv = EINVAL;
+
+	if (om->som_type == SR_OPT_CRYPTO) {
+		sd->mds.mdd_crypto.scr_meta = (struct sr_meta_crypto *)om;
+		rv = 0;
+	}
+
+	return (rv);
+}
+
+int
+sr_crypto_rw(struct sr_workunit *wu)
+{
+	struct sr_crypto_wu	*crwu;
+	daddr_t			blkno;
+	int			rv = 0;
+
+	DNPRINTF(SR_D_DIS, "%s: sr_crypto_rw wu %p\n",
+	    DEVNAME(wu->swu_dis->sd_sc), wu);
+
+	if (sr_validate_io(wu, &blkno, "sr_crypto_rw"))
+		return (1);
+
+	if (wu->swu_xs->flags & SCSI_DATA_OUT) {
+		crwu = sr_crypto_prepare(wu, 1);
+		crwu->cr_crp->crp_callback = sr_crypto_write;
+		rv = crypto_dispatch(crwu->cr_crp);
+		if (rv == 0)
+			rv = crwu->cr_crp->crp_etype;
+	} else
+		rv = sr_crypto_dev_rw(wu, NULL);
+
+	return (rv);
+}
+
+void
+sr_crypto_write(struct cryptop *crp)
+{
+	struct sr_crypto_wu	*crwu = crp->crp_opaque;
+	struct sr_workunit	*wu = &crwu->cr_wu;
+	int			s;
+
+	DNPRINTF(SR_D_INTR, "%s: sr_crypto_write: wu %p xs: %p\n",
+	    DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs);
+
+	if (crp->crp_etype) {
+		/* fail io */
+		wu->swu_xs->error = XS_DRIVER_STUFFUP;
+		s = splbio();
+		sr_scsi_done(wu->swu_dis, wu->swu_xs);
+		splx(s);
+	}
+
+	sr_crypto_dev_rw(wu, crwu);
+}
+
+int
+sr_crypto_dev_rw(struct sr_workunit *wu, struct sr_crypto_wu *crwu)
+{
+	struct sr_discipline	*sd = wu->swu_dis;
+	struct scsi_xfer	*xs = wu->swu_xs;
+	struct sr_ccb		*ccb;
+	struct uio		*uio;
+	daddr_t			blkno;
+
+	blkno = wu->swu_blk_start;
+
+	ccb = sr_ccb_rw(sd, 0, blkno, xs->datalen, xs->data, xs->flags, 0);
+	if (!ccb) {
+		/* should never happen but handle more gracefully */
+		printf("%s: %s: too many ccbs queued\n",
+		    DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname);
+		goto bad;
+	}
+	if (!ISSET(xs->flags, SCSI_DATA_IN)) {
+		uio = crwu->cr_crp->crp_buf;
+		ccb->ccb_buf.b_data = uio->uio_iov->iov_base;
+		ccb->ccb_opaque = crwu;
+	}
+	sr_wu_enqueue_ccb(wu, ccb);
+	sr_schedule_wu(wu);
+
+	return (0);
+
+bad:
+	/* wu is unwound by sr_wu_put */
+	if (crwu)
+		crwu->cr_crp->crp_etype = EINVAL;
+	return (1);
+}
+
+void
+sr_crypto_done(struct sr_workunit *wu)
+{
+	struct scsi_xfer	*xs = wu->swu_xs;
+	struct sr_crypto_wu	*crwu;
+	int			s;
+
+	/* If this was a successful read, initiate decryption of the data. */
+	if (ISSET(xs->flags, SCSI_DATA_IN) && xs->error == XS_NOERROR) {
+		crwu = sr_crypto_prepare(wu, 0);
+		crwu->cr_crp->crp_callback = sr_crypto_read;
+		DNPRINTF(SR_D_INTR, "%s: sr_crypto_done: crypto_dispatch %p\n",
+		    DEVNAME(wu->swu_dis->sd_sc), crwu->cr_crp);
+		crypto_dispatch(crwu->cr_crp);
+		return;
+	}
+
+	s = splbio();
+	sr_scsi_done(wu->swu_dis, wu->swu_xs);
+	splx(s);
+}
+
+void
+sr_crypto_read(struct cryptop *crp)
+{
+	struct sr_crypto_wu	*crwu = crp->crp_opaque;
+	struct sr_workunit	*wu = &crwu->cr_wu;
+	int			s;
+
+	DNPRINTF(SR_D_INTR, "%s: sr_crypto_read: wu %p xs: %p\n",
+	    DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs);
+
+	if (crp->crp_etype)
+		wu->swu_xs->error = XS_DRIVER_STUFFUP;
+
+	s = splbio();
+	sr_scsi_done(wu->swu_dis, wu->swu_xs);
+	splx(s);
+}
+
+void
+sr_crypto_hotplug(struct sr_discipline *sd, struct disk *diskp, int action)
+{
+	DNPRINTF(SR_D_MISC, "%s: sr_crypto_hotplug: %s %d\n",
+	    DEVNAME(sd->sd_sc), diskp->dk_name, action);
+}
+
+#ifdef SR_DEBUG0
+void
+sr_crypto_dumpkeys(struct sr_discipline *sd)
+{
+	int			i, j;
+
+	printf("sr_crypto_dumpkeys:\n");
+	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) {
+		printf("\tscm_key[%d]: 0x", i);
+		for (j = 0; j < SR_CRYPTO_KEYBYTES; j++) {
+			printf("%02x",
+			    sd->mds.mdd_crypto.scr_meta->scm_key[i][j]);
+		}
+		printf("\n");
+	}
+	printf("sr_crypto_dumpkeys: runtime data keys:\n");
+	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) {
+		printf("\tscr_key[%d]: 0x", i);
+		for (j = 0; j < SR_CRYPTO_KEYBYTES; j++) {
+			printf("%02x",
+			    sd->mds.mdd_crypto.scr_key[i][j]);
+		}
+		printf("\n");
+	}
+}
+#endif	/* SR_DEBUG */