blob: 1aea01ba12628463a0942a26d9cc740d9f0db081 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Vladimir Murzin1c51c422017-05-24 11:24:30 +01002/*
3 * Based on linux/arch/arm/mm/dma-mapping.c
4 *
5 * Copyright (C) 2000-2004 Russell King
Vladimir Murzin1c51c422017-05-24 11:24:30 +01006 */
7
8#include <linux/export.h>
9#include <linux/mm.h>
Christoph Hellwig19dca8c2017-12-23 13:46:06 +010010#include <linux/dma-direct.h>
Vladimir Murzin1c51c422017-05-24 11:24:30 +010011#include <linux/scatterlist.h>
12
13#include <asm/cachetype.h>
14#include <asm/cacheflush.h>
15#include <asm/outercache.h>
16#include <asm/cp15.h>
17
18#include "dma.h"
19
20/*
Christoph Hellwig356da6d2018-12-06 13:39:32 -080021 * The generic direct mapping code is used if
Vladimir Murzin1c51c422017-05-24 11:24:30 +010022 * - MMU/MPU is off
23 * - cpu is v7m w/o cache support
24 * - device is coherent
25 * otherwise arm_nommu_dma_ops is used.
26 *
27 * arm_nommu_dma_ops rely on consistent DMA memory (please, refer to
28 * [1] on how to declare such memory).
29 *
30 * [1] Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
31 */
32
33static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
34 dma_addr_t *dma_handle, gfp_t gfp,
35 unsigned long attrs)
36
37{
Vladimir Murzin878ec362017-07-20 11:19:59 +010038 void *ret;
Vladimir Murzin1c51c422017-05-24 11:24:30 +010039
40 /*
Vladimir Murzin878ec362017-07-20 11:19:59 +010041 * Try generic allocator first if we are advertised that
42 * consistency is not required.
43 */
44
45 if (attrs & DMA_ATTR_NON_CONSISTENT)
Christoph Hellwigbc3ec752018-09-08 11:22:43 +020046 return dma_direct_alloc_pages(dev, size, dma_handle, gfp,
47 attrs);
Vladimir Murzin878ec362017-07-20 11:19:59 +010048
49 ret = dma_alloc_from_global_coherent(size, dma_handle);
50
51 /*
52 * dma_alloc_from_global_coherent() may fail because:
53 *
Vladimir Murzin1c51c422017-05-24 11:24:30 +010054 * - no consistent DMA region has been defined, so we can't
55 * continue.
56 * - there is no space left in consistent DMA region, so we
57 * only can fallback to generic allocator if we are
58 * advertised that consistency is not required.
59 */
60
Vladimir Murzin878ec362017-07-20 11:19:59 +010061 WARN_ON_ONCE(ret == NULL);
62 return ret;
Vladimir Murzin1c51c422017-05-24 11:24:30 +010063}
64
65static void arm_nommu_dma_free(struct device *dev, size_t size,
66 void *cpu_addr, dma_addr_t dma_addr,
67 unsigned long attrs)
68{
Vladimir Murzin878ec362017-07-20 11:19:59 +010069 if (attrs & DMA_ATTR_NON_CONSISTENT) {
Christoph Hellwigbc3ec752018-09-08 11:22:43 +020070 dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
Vladimir Murzin878ec362017-07-20 11:19:59 +010071 } else {
72 int ret = dma_release_from_global_coherent(get_order(size),
73 cpu_addr);
74
75 WARN_ON_ONCE(ret == 0);
76 }
Vladimir Murzin1c51c422017-05-24 11:24:30 +010077
78 return;
79}
80
Vladimir Murzin878ec362017-07-20 11:19:59 +010081static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
82 void *cpu_addr, dma_addr_t dma_addr, size_t size,
83 unsigned long attrs)
84{
85 int ret;
86
87 if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
88 return ret;
89
Christoph Hellwig58b04402018-09-11 08:55:28 +020090 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
Vladimir Murzin878ec362017-07-20 11:19:59 +010091}
92
93
Vladimir Murzin1c51c422017-05-24 11:24:30 +010094static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size,
95 enum dma_data_direction dir)
96{
97 dmac_map_area(__va(paddr), size, dir);
98
99 if (dir == DMA_FROM_DEVICE)
100 outer_inv_range(paddr, paddr + size);
101 else
102 outer_clean_range(paddr, paddr + size);
103}
104
105static void __dma_page_dev_to_cpu(phys_addr_t paddr, size_t size,
106 enum dma_data_direction dir)
107{
108 if (dir != DMA_TO_DEVICE) {
109 outer_inv_range(paddr, paddr + size);
110 dmac_unmap_area(__va(paddr), size, dir);
111 }
112}
113
114static dma_addr_t arm_nommu_dma_map_page(struct device *dev, struct page *page,
115 unsigned long offset, size_t size,
116 enum dma_data_direction dir,
117 unsigned long attrs)
118{
119 dma_addr_t handle = page_to_phys(page) + offset;
120
121 __dma_page_cpu_to_dev(handle, size, dir);
122
123 return handle;
124}
125
126static void arm_nommu_dma_unmap_page(struct device *dev, dma_addr_t handle,
127 size_t size, enum dma_data_direction dir,
128 unsigned long attrs)
129{
130 __dma_page_dev_to_cpu(handle, size, dir);
131}
132
133
134static int arm_nommu_dma_map_sg(struct device *dev, struct scatterlist *sgl,
135 int nents, enum dma_data_direction dir,
136 unsigned long attrs)
137{
138 int i;
139 struct scatterlist *sg;
140
141 for_each_sg(sgl, sg, nents, i) {
142 sg_dma_address(sg) = sg_phys(sg);
143 sg_dma_len(sg) = sg->length;
144 __dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir);
145 }
146
147 return nents;
148}
149
150static void arm_nommu_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
151 int nents, enum dma_data_direction dir,
152 unsigned long attrs)
153{
154 struct scatterlist *sg;
155 int i;
156
157 for_each_sg(sgl, sg, nents, i)
158 __dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir);
159}
160
161static void arm_nommu_dma_sync_single_for_device(struct device *dev,
162 dma_addr_t handle, size_t size, enum dma_data_direction dir)
163{
164 __dma_page_cpu_to_dev(handle, size, dir);
165}
166
167static void arm_nommu_dma_sync_single_for_cpu(struct device *dev,
168 dma_addr_t handle, size_t size, enum dma_data_direction dir)
169{
170 __dma_page_cpu_to_dev(handle, size, dir);
171}
172
173static void arm_nommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
174 int nents, enum dma_data_direction dir)
175{
176 struct scatterlist *sg;
177 int i;
178
179 for_each_sg(sgl, sg, nents, i)
180 __dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir);
181}
182
183static void arm_nommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
184 int nents, enum dma_data_direction dir)
185{
186 struct scatterlist *sg;
187 int i;
188
189 for_each_sg(sgl, sg, nents, i)
190 __dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir);
191}
192
193const struct dma_map_ops arm_nommu_dma_ops = {
194 .alloc = arm_nommu_dma_alloc,
195 .free = arm_nommu_dma_free,
Vladimir Murzin878ec362017-07-20 11:19:59 +0100196 .mmap = arm_nommu_dma_mmap,
Vladimir Murzin1c51c422017-05-24 11:24:30 +0100197 .map_page = arm_nommu_dma_map_page,
198 .unmap_page = arm_nommu_dma_unmap_page,
199 .map_sg = arm_nommu_dma_map_sg,
200 .unmap_sg = arm_nommu_dma_unmap_sg,
201 .sync_single_for_device = arm_nommu_dma_sync_single_for_device,
202 .sync_single_for_cpu = arm_nommu_dma_sync_single_for_cpu,
203 .sync_sg_for_device = arm_nommu_dma_sync_sg_for_device,
204 .sync_sg_for_cpu = arm_nommu_dma_sync_sg_for_cpu,
205};
206EXPORT_SYMBOL(arm_nommu_dma_ops);
207
Vladimir Murzin1c51c422017-05-24 11:24:30 +0100208void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
209 const struct iommu_ops *iommu, bool coherent)
210{
Vladimir Murzin1c51c422017-05-24 11:24:30 +0100211 if (IS_ENABLED(CONFIG_CPU_V7M)) {
212 /*
213 * Cache support for v7m is optional, so can be treated as
214 * coherent if no cache has been detected. Note that it is not
215 * enough to check if MPU is in use or not since in absense of
216 * MPU system memory map is used.
217 */
218 dev->archdata.dma_coherent = (cacheid) ? coherent : true;
219 } else {
220 /*
221 * Assume coherent DMA in case MMU/MPU has not been set up.
222 */
223 dev->archdata.dma_coherent = (get_cr() & CR_M) ? coherent : true;
224 }
225
Christoph Hellwig356da6d2018-12-06 13:39:32 -0800226 if (!dev->archdata.dma_coherent)
227 set_dma_ops(dev, &arm_nommu_dma_ops);
Vladimir Murzin1c51c422017-05-24 11:24:30 +0100228}