mirror of https://github.com/OpenIPC/firmware.git
264 lines
7.4 KiB
Diff
264 lines
7.4 KiB
Diff
diff -drupN a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
|
|
--- a/drivers/md/dm-crypt.c 2018-08-06 17:23:04.000000000 +0300
|
|
+++ b/drivers/md/dm-crypt.c 2022-06-12 05:28:14.000000000 +0300
|
|
@@ -178,6 +178,7 @@ struct crypt_config {
|
|
unsigned int key_parts; /* independent parts in key buffer */
|
|
unsigned int key_extra_size; /* additional keys length */
|
|
u8 key[0];
|
|
+ bool emmc_sup_crypto;
|
|
};
|
|
|
|
#define MIN_IOS 64
|
|
@@ -185,6 +186,10 @@ struct crypt_config {
|
|
static void clone_init(struct dm_crypt_io *, struct bio *);
|
|
static void kcryptd_queue_crypt(struct dm_crypt_io *io);
|
|
static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
|
|
+#ifdef CONFIG_SUNXI_EMCE
|
|
+static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async);
|
|
+extern int sunxi_emce_set_key(const u8 *mkey, u32 len);
|
|
+#endif
|
|
|
|
/*
|
|
* Use this to access cipher attributes that are the same for each CPU.
|
|
@@ -1111,13 +1116,20 @@ static void crypt_endio(struct bio *clon
|
|
/*
|
|
* free the processed pages
|
|
*/
|
|
+#ifndef CONFIG_SUNXI_EMCE
|
|
if (rw == WRITE)
|
|
- crypt_free_buffer_pages(cc, clone);
|
|
-
|
|
+#else
|
|
+ if ((!(cc->emmc_sup_crypto)) && (rw == WRITE))
|
|
+#endif
|
|
+ crypt_free_buffer_pages(cc, clone);
|
|
error = clone->bi_error;
|
|
bio_put(clone);
|
|
|
|
+#ifndef CONFIG_SUNXI_EMCE
|
|
if (rw == READ && !error) {
|
|
+#else
|
|
+ if ((!(cc->emmc_sup_crypto)) && (rw == READ && !error)) {
|
|
+#endif
|
|
kcryptd_queue_crypt(io);
|
|
return;
|
|
}
|
|
@@ -1138,7 +1150,11 @@ static void clone_init(struct dm_crypt_i
|
|
bio_set_op_attrs(clone, bio_op(io->base_bio), bio_flags(io->base_bio));
|
|
}
|
|
|
|
+#ifndef CONFIG_SUNXI_EMCE
|
|
static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
|
|
+#else
|
|
+static int kcryptd_io_only(struct dm_crypt_io *io, gfp_t gfp)
|
|
+#endif
|
|
{
|
|
struct crypt_config *cc = io->cc;
|
|
struct bio *clone;
|
|
@@ -1161,6 +1177,12 @@ static int kcryptd_io_read(struct dm_cry
|
|
generic_make_request(clone);
|
|
return 0;
|
|
}
|
|
+#ifdef CONFIG_SUNXI_EMCE
|
|
+static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
|
|
+{
|
|
+ return kcryptd_io_only(io, gfp);
|
|
+}
|
|
+#endif
|
|
|
|
static void kcryptd_io_read_work(struct work_struct *work)
|
|
{
|
|
@@ -1182,9 +1204,19 @@ static void kcryptd_queue_read(struct dm
|
|
|
|
static void kcryptd_io_write(struct dm_crypt_io *io)
|
|
{
|
|
+#ifdef CONFIG_SUNXI_EMCE
|
|
+ if ((io->cc->emmc_sup_crypto)) {
|
|
+ crypt_inc_pending(io);
|
|
+ if (kcryptd_io_only(io, GFP_NOIO))
|
|
+ io->error = -ENOMEM;
|
|
+ crypt_dec_pending(io);
|
|
+ } else
|
|
+#endif
|
|
+ {
|
|
struct bio *clone = io->ctx.bio_out;
|
|
|
|
generic_make_request(clone);
|
|
+ }
|
|
}
|
|
|
|
#define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
|
|
@@ -1727,6 +1759,25 @@ bad_mem:
|
|
return -ENOMEM;
|
|
}
|
|
|
|
+#ifdef CONFIG_SUNXI_EMCE
|
|
+static void kcryptd_io_read_handle(struct dm_crypt_io *io)
|
|
+{
|
|
+ if (kcryptd_io_read(io, GFP_NOWAIT))
|
|
+ kcryptd_queue_read(io);
|
|
+}
|
|
+static void kcryptd_io_write_handle(struct dm_crypt_io *io)
|
|
+{
|
|
+ if (io->cc->emmc_sup_crypto)
|
|
+ kcryptd_queue_read(io);
|
|
+ else
|
|
+ kcryptd_queue_crypt(io);
|
|
+}
|
|
+int sunxi_crypt_need_crypt(struct bio *bio)
|
|
+{
|
|
+ return bio->bi_end_io == crypt_endio;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(sunxi_crypt_need_crypt);
|
|
+#endif
|
|
/*
|
|
* Construct an encryption mapping:
|
|
* <cipher> <key> <iv_offset> <dev_path> <start>
|
|
@@ -1737,7 +1788,7 @@ static int crypt_ctr(struct dm_target *t
|
|
unsigned int key_size, opt_params;
|
|
unsigned long long tmpll;
|
|
int ret;
|
|
- size_t iv_size_padding;
|
|
+ size_t iv_size_padding = 0;
|
|
struct dm_arg_set as;
|
|
const char *opt_string;
|
|
char dummy;
|
|
@@ -1761,11 +1812,54 @@ static int crypt_ctr(struct dm_target *t
|
|
cc->key_size = key_size;
|
|
|
|
ti->private = cc;
|
|
- ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
|
|
- if (ret < 0)
|
|
- goto bad;
|
|
+ if (strcmp(argv[3], "/dev/block/by-name/UDISK")) {
|
|
+ ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
|
|
+ if (ret < 0)
|
|
+ goto bad;
|
|
+ } else {
|
|
+#ifndef CONFIG_SUNXI_EMCE
|
|
+ ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
|
|
+ if (ret < 0)
|
|
+ goto bad;
|
|
+#endif
|
|
+ }
|
|
|
|
- cc->dmreq_start = sizeof(struct skcipher_request);
|
|
+#ifdef CONFIG_SUNXI_EMCE
|
|
+ if (!strcmp(argv[3], "/dev/block/by-name/UDISK")) {
|
|
+ cc->emmc_sup_crypto = 1;
|
|
+ /*set key for emce*/
|
|
+ if (cc->key_size &&
|
|
+ crypt_decode_key(cc->key, argv[1], cc->key_size) < 0) {
|
|
+ ti->error = "crypt decode key fail";
|
|
+ goto bad;
|
|
+ }
|
|
+ set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
|
|
+
|
|
+ if (sunxi_emce_set_key(cc->key, cc->key_size)) {
|
|
+ ti->error = "sunxi emce set key fail";
|
|
+ goto bad;
|
|
+ }
|
|
+ } else {
|
|
+ cc->emmc_sup_crypto = 0;
|
|
+ cc->dmreq_start = sizeof(struct skcipher_request);
|
|
+ cc->dmreq_start += crypto_skcipher_reqsize(any_tfm(cc));
|
|
+ cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
|
|
+
|
|
+ if (crypto_skcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN)
|
|
+ /* Allocate the padding exactly */
|
|
+ iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
|
|
+ & crypto_skcipher_alignmask(any_tfm(cc));
|
|
+ else {
|
|
+ /*
|
|
+ * If the cipher requires greater alignment than kmalloc
|
|
+ * alignment, we don't know the exact position of the
|
|
+ * initialization vector. We must assume worst case.
|
|
+ */
|
|
+ iv_size_padding = crypto_skcipher_alignmask(any_tfm(cc));
|
|
+ }
|
|
+ }
|
|
+#else
|
|
+ cc->dmreq_start = sizeof(struct ablkcipher_request);
|
|
cc->dmreq_start += crypto_skcipher_reqsize(any_tfm(cc));
|
|
cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
|
|
|
|
@@ -1781,6 +1875,7 @@ static int crypt_ctr(struct dm_target *t
|
|
*/
|
|
iv_size_padding = crypto_skcipher_alignmask(any_tfm(cc));
|
|
}
|
|
+#endif
|
|
|
|
ret = -ENOMEM;
|
|
cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
|
|
@@ -1866,21 +1961,48 @@ static int crypt_ctr(struct dm_target *t
|
|
}
|
|
|
|
ret = -ENOMEM;
|
|
- cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1);
|
|
+ cc->io_queue = alloc_workqueue("kcryptd_io",
|
|
+ WQ_HIGHPRI |
|
|
+ WQ_MEM_RECLAIM,
|
|
+ 1);
|
|
if (!cc->io_queue) {
|
|
ti->error = "Couldn't create kcryptd io queue";
|
|
goto bad;
|
|
}
|
|
|
|
+#ifdef CONFIG_MMC_EMCE
|
|
+ if (!(cc->emmc_sup_crypto)) {
|
|
+ if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
|
|
+ cc->crypt_queue = alloc_workqueue("kcryptd",
|
|
+ WQ_HIGHPRI |
|
|
+ WQ_MEM_RECLAIM, 1);
|
|
+ else
|
|
+ cc->crypt_queue = alloc_workqueue("kcryptd",
|
|
+ WQ_HIGHPRI |
|
|
+ WQ_MEM_RECLAIM |
|
|
+ WQ_UNBOUND,
|
|
+ num_online_cpus());
|
|
+ if (!cc->crypt_queue) {
|
|
+ ti->error = "Couldn't create kcryptd queue";
|
|
+ goto bad;
|
|
+ }
|
|
+ }
|
|
+#else
|
|
if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
|
|
- cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
|
|
+ cc->crypt_queue = alloc_workqueue("kcryptd",
|
|
+ WQ_HIGHPRI |
|
|
+ WQ_MEM_RECLAIM, 1);
|
|
else
|
|
- cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
|
|
+ cc->crypt_queue = alloc_workqueue("kcryptd",
|
|
+ WQ_HIGHPRI |
|
|
+ WQ_MEM_RECLAIM |
|
|
+ WQ_UNBOUND,
|
|
num_online_cpus());
|
|
if (!cc->crypt_queue) {
|
|
ti->error = "Couldn't create kcryptd queue";
|
|
goto bad;
|
|
}
|
|
+#endif
|
|
|
|
init_waitqueue_head(&cc->write_thread_wait);
|
|
cc->write_tree = RB_ROOT;
|
|
@@ -1934,11 +2056,18 @@ static int crypt_map(struct dm_target *t
|
|
crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
|
|
io->ctx.req = (struct skcipher_request *)(io + 1);
|
|
|
|
+#ifdef CONFIG_SUNXI_EMCE
|
|
+ if (bio_data_dir(io->base_bio) == READ)
|
|
+ kcryptd_io_read_handle(io);
|
|
+ else
|
|
+ kcryptd_io_write_handle(io);
|
|
+#else
|
|
if (bio_data_dir(io->base_bio) == READ) {
|
|
if (kcryptd_io_read(io, GFP_NOWAIT))
|
|
kcryptd_queue_read(io);
|
|
} else
|
|
kcryptd_queue_crypt(io);
|
|
+#endif
|
|
|
|
return DM_MAPIO_SUBMITTED;
|
|
}
|