firmware/br-ext-chip-allwinner/board/v83x/kernel/patches/00000-arch_arm_mm_dma-mappi...

135 lines
4.2 KiB
Diff

diff -drupN a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
--- a/arch/arm/mm/dma-mapping.c 2018-08-06 17:23:04.000000000 +0300
+++ b/arch/arm/mm/dma-mapping.c 2022-06-12 05:28:14.000000000 +0300
@@ -519,7 +519,7 @@ static int __dma_update_pte(pte_t *pte,
return 0;
}
-static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
+static void __maybe_unused __dma_remap(struct page *page, size_t size, pgprot_t prot)
{
unsigned long start = (unsigned long) page_address(page);
unsigned end = start + size;
@@ -617,8 +617,8 @@ static void *__alloc_from_contiguous(str
return NULL;
}
} else {
- __dma_remap(page, size, prot);
- ptr = page_address(page);
+ ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
+ prot, caller);
}
out:
@@ -633,7 +633,7 @@ static void __free_from_contiguous(struc
if (PageHighMem(page))
__dma_free_remap(cpu_addr, size);
else
- __dma_remap(page, size, PAGE_KERNEL);
+ dma_common_free_remap(cpu_addr, size, VM_USERMAP);
}
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
}
@@ -1720,43 +1720,33 @@ static int __map_sg_chunk(struct device
bool is_coherent)
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
- dma_addr_t iova, iova_base;
+ dma_addr_t iova;
int ret = 0;
- unsigned int count;
- struct scatterlist *s;
- int prot;
+ int prot = __dma_direction_to_prot(dir);
size = PAGE_ALIGN(size);
*handle = DMA_ERROR_CODE;
- iova_base = iova = __alloc_iova(mapping, size);
+ iova = __alloc_iova(mapping, size);
if (iova == DMA_ERROR_CODE)
return -ENOMEM;
- for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
- phys_addr_t phys = page_to_phys(sg_page(s));
- unsigned int len = PAGE_ALIGN(s->offset + s->length);
-
- if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
- __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
-
- prot = __dma_direction_to_prot(dir);
-
- ret = iommu_map(mapping->domain, iova, phys, len, prot);
- if (ret < 0)
- goto fail;
- count += len >> PAGE_SHIFT;
- iova += len;
+ if (iommu_map_sg(mapping->domain, iova, sg, size, prot) < size) {
+ pr_err("%s, %d, iommu_map_sg failed\n", __func__, __LINE__);
+ ret = -ENOMEM;
+ goto fail;
}
- *handle = iova_base;
+ *handle = iova;
return 0;
fail:
- iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
- __free_iova(mapping, iova_base, size);
+ __free_iova(mapping, iova, size);
return ret;
}
+void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir);
+
static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, unsigned long attrs,
bool is_coherent)
@@ -1767,6 +1757,9 @@ static int __iommu_map_sg(struct device
unsigned int size = s->offset + s->length;
unsigned int max = dma_get_max_seg_size(dev);
+ if (!is_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ arm_iommu_sync_sg_for_device(dev, sg, nents, dir);
+
for (i = 1; i < nents; i++) {
s = sg_next(s);
@@ -2342,6 +2335,7 @@ static struct dma_map_ops *arm_get_iommu
return coherent ? &iommu_coherent_ops : &iommu_ops;
}
+static struct dma_iommu_mapping *sunxi_mapping;
static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu)
{
@@ -2350,12 +2344,16 @@ static bool arm_setup_iommu_dma_ops(stru
if (!iommu)
return false;
- mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
- if (IS_ERR(mapping)) {
- pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
- size, dev_name(dev));
- return false;
- }
+ if (!sunxi_mapping) {
+ mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
+ if (IS_ERR(mapping)) {
+ pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
+ size, dev_name(dev));
+ return false;
+ }
+ sunxi_mapping = mapping;
+ } else
+ mapping = sunxi_mapping;
if (__arm_iommu_attach_device(dev, mapping)) {
pr_warn("Failed to attached device %s to IOMMU_mapping\n",
@@ -2415,3 +2413,4 @@ void arch_teardown_dma_ops(struct device
{
arm_teardown_iommu_dma_ops(dev);
}
+EXPORT_SYMBOL(v7_dma_flush_range);