diff -drupN a/block/blk-merge.c b/block/blk-merge.c
--- a/block/blk-merge.c	2018-08-06 17:23:04.000000000 +0300
+++ b/block/blk-merge.c	2022-06-12 05:28:14.000000000 +0300
@@ -11,6 +11,8 @@
 
 #include "blk.h"
 
+extern int sunxi_crypt_need_crypt(struct bio *bio);
+
 static struct bio *blk_bio_discard_split(struct request_queue *q,
 					 struct bio *bio,
 					 struct bio_set *bs,
@@ -492,6 +494,59 @@ int blk_rq_map_sg(struct request_queue *
 }
 EXPORT_SYMBOL(blk_rq_map_sg);
 
+int sunxi_blk_rq_map_sg(struct request_queue *q, struct request *rq,
+		  struct scatterlist *sglist)
+{
+	struct scatterlist *sg = NULL;
+	int nsegs = 0;
+	int crypt_flags = 0;
+
+	if (rq->bio) {
+#if defined(CONFIG_DM_CRYPT) && defined(CONFIG_SUNXI_EMCE)
+		crypt_flags = sunxi_crypt_need_crypt(rq->bio);
+#endif
+		nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
+		}
+
+	if (sg && crypt_flags)
+		sglist->offset |= (1 << ((sizeof(sglist->offset) << 3) - 1));
+
+	if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
+	    (blk_rq_bytes(rq) & q->dma_pad_mask)) {
+		unsigned int pad_len =
+			(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
+
+		sg->length += pad_len;
+		rq->extra_len += pad_len;
+	}
+
+	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
+		if (op_is_write(req_op(rq)))
+			memset(q->dma_drain_buffer, 0, q->dma_drain_size);
+
+		sg_unmark_end(sg);
+		sg = sg_next(sg);
+		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
+			    q->dma_drain_size,
+			    ((unsigned long)q->dma_drain_buffer) &
+			    (PAGE_SIZE - 1));
+		nsegs++;
+		rq->extra_len += q->dma_drain_size;
+	}
+
+	if (sg)
+		sg_mark_end(sg);
+
+	/*
+	 * Something must have been wrong if the figured number of
+	 * segment is bigger than number of req's physical segments
+	 */
+	WARN_ON(nsegs > rq->nr_phys_segments);
+
+	return nsegs;
+}
+EXPORT_SYMBOL(sunxi_blk_rq_map_sg);
+
 static inline int ll_new_hw_segment(struct request_queue *q,
 				    struct request *req,
 				    struct bio *bio)