blob: 0bf1468c35a38daa707546e40cfde9a6a376ae48 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Vineet Gupta1162b072013-01-18 15:12:20 +05302/*
3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
Vineet Gupta1162b072013-01-18 15:12:20 +05304 */
5
Christoph Hellwig6c3e71d2018-05-18 15:41:32 +02006#include <linux/dma-noncoherent.h>
Alexey Brodkinf2b0b252015-05-25 19:54:28 +03007#include <asm/cache.h>
Vineet Gupta1162b072013-01-18 15:12:20 +05308#include <asm/cacheflush.h>
9
Eugeniy Paltsev2820a702018-07-30 19:26:34 +030010/*
11 * ARCH specific callbacks for generic noncoherent DMA ops (dma/noncoherent.c)
12 * - hardware IOC not available (or "dma-coherent" not set for device in DT)
13 * - But still handle both coherent and non-coherent requests from caller
14 *
15 * For DMA coherent hardware (IOC) generic code suffices
16 */
Christoph Hellwig6c3e71d2018-05-18 15:41:32 +020017void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
18 gfp_t gfp, unsigned long attrs)
Vineet Gupta1162b072013-01-18 15:12:20 +053019{
Vineet Guptad98a15a2016-03-14 15:03:59 +053020 unsigned long order = get_order(size);
21 struct page *page;
22 phys_addr_t paddr;
23 void *kvaddr;
Eugeniy Paltsevdd452102018-07-30 19:26:36 +030024 bool need_coh = !(attrs & DMA_ATTR_NON_CONSISTENT);
25
26 /*
27 * __GFP_HIGHMEM flag is cleared by upper layer functions
28 * (in include/linux/dma-mapping.h) so we should never get a
29 * __GFP_HIGHMEM here.
30 */
31 BUG_ON(gfp & __GFP_HIGHMEM);
Vineet Gupta1162b072013-01-18 15:12:20 +053032
Christoph Hellwig518a2f12018-12-14 09:00:40 +010033 page = alloc_pages(gfp | __GFP_ZERO, order);
Vineet Guptad98a15a2016-03-14 15:03:59 +053034 if (!page)
Vineet Gupta1162b072013-01-18 15:12:20 +053035 return NULL;
36
Vineet Gupta6b700392016-03-14 15:34:36 +053037 /* This is linear addr (0x8000_0000 based) */
38 paddr = page_to_phys(page);
39
Christoph Hellwig57723cb2017-12-20 11:57:40 +010040 *dma_handle = paddr;
Vineet Gupta1162b072013-01-18 15:12:20 +053041
Eugeniy Paltsevdd452102018-07-30 19:26:36 +030042 /*
43 * A coherent buffer needs MMU mapping to enforce non-cachability.
44 * kvaddr is kernel Virtual address (0x7000_0000 based).
45 */
46 if (need_coh) {
Vineet Guptaf5db19e2016-03-16 15:04:39 +053047 kvaddr = ioremap_nocache(paddr, size);
Vineet Gupta6b700392016-03-14 15:34:36 +053048 if (kvaddr == NULL) {
49 __free_pages(page, order);
50 return NULL;
51 }
52 } else {
Vineet Guptaf5db19e2016-03-16 15:04:39 +053053 kvaddr = (void *)(u32)paddr;
Vineet Guptad98a15a2016-03-14 15:03:59 +053054 }
Vineet Gupta1162b072013-01-18 15:12:20 +053055
Vineet Gupta795f4552015-04-03 12:37:07 +030056 /*
57 * Evict any existing L1 and/or L2 lines for the backing page
58 * in case it was used earlier as a normal "cached" page.
59 * Yeah this bit us - STAR 9000898266
60 *
61 * Although core does call flush_cache_vmap(), it gets kvaddr hence
62 * can't be used to efficiently flush L1 and/or L2 which need paddr
63 * Currently flush_cache_vmap nukes the L1 cache completely which
64 * will be optimized as a separate commit
65 */
Vineet Gupta6b700392016-03-14 15:34:36 +053066 if (need_coh)
Vineet Guptaf5db19e2016-03-16 15:04:39 +053067 dma_cache_wback_inv(paddr, size);
Vineet Gupta795f4552015-04-03 12:37:07 +030068
Vineet Gupta1162b072013-01-18 15:12:20 +053069 return kvaddr;
70}
Vineet Gupta1162b072013-01-18 15:12:20 +053071
Christoph Hellwig6c3e71d2018-05-18 15:41:32 +020072void arch_dma_free(struct device *dev, size_t size, void *vaddr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070073 dma_addr_t dma_handle, unsigned long attrs)
Vineet Gupta1162b072013-01-18 15:12:20 +053074{
Christoph Hellwig57723cb2017-12-20 11:57:40 +010075 phys_addr_t paddr = dma_handle;
Vladimir Kondratievb4dff282016-07-03 10:07:48 +030076 struct page *page = virt_to_page(paddr);
Vineet Guptad98a15a2016-03-14 15:03:59 +053077
Eugeniy Paltsevdd452102018-07-30 19:26:36 +030078 if (!(attrs & DMA_ATTR_NON_CONSISTENT))
Christoph Hellwig052c96d2016-01-20 15:01:26 -080079 iounmap((void __force __iomem *)vaddr);
Vineet Gupta1162b072013-01-18 15:12:20 +053080
Vineet Guptad98a15a2016-03-14 15:03:59 +053081 __free_pages(page, get_order(size));
Vineet Gupta1162b072013-01-18 15:12:20 +053082}
Vineet Gupta1162b072013-01-18 15:12:20 +053083
Christoph Hellwig58b04402018-09-11 08:55:28 +020084long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
85 dma_addr_t dma_addr)
Alexey Brodkina79a8122016-11-03 18:06:13 +030086{
Christoph Hellwig58b04402018-09-11 08:55:28 +020087 return __phys_to_pfn(dma_addr);
Alexey Brodkina79a8122016-11-03 18:06:13 +030088}
89
Eugeniy Paltsev4c612ad2018-07-24 17:13:02 +030090/*
91 * Cache operations depending on function and direction argument, inspired by
92 * https://lkml.org/lkml/2018/5/18/979
93 * "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20]
94 * dma-mapping: provide a generic dma-noncoherent implementation)"
95 *
96 * | map == for_device | unmap == for_cpu
97 * |----------------------------------------------------------------
98 * TO_DEV | writeback writeback | none none
99 * FROM_DEV | invalidate invalidate | invalidate* invalidate*
100 * BIDIR | writeback+inv writeback+inv | invalidate invalidate
101 *
102 * [*] needed for CPU speculative prefetches
103 *
104 * NOTE: we don't check the validity of direction argument as it is done in
105 * upper layer functions (in include/linux/dma-mapping.h)
106 */
107
Christoph Hellwig6c3e71d2018-05-18 15:41:32 +0200108void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
109 size_t size, enum dma_data_direction dir)
Christoph Hellwig713a7462018-05-18 15:14:28 +0200110{
Eugeniy Paltsev4c612ad2018-07-24 17:13:02 +0300111 switch (dir) {
112 case DMA_TO_DEVICE:
113 dma_cache_wback(paddr, size);
114 break;
115
116 case DMA_FROM_DEVICE:
117 dma_cache_inv(paddr, size);
118 break;
119
120 case DMA_BIDIRECTIONAL:
121 dma_cache_wback_inv(paddr, size);
122 break;
123
124 default:
125 break;
126 }
Christoph Hellwig713a7462018-05-18 15:14:28 +0200127}
128
Christoph Hellwig6c3e71d2018-05-18 15:41:32 +0200129void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
130 size_t size, enum dma_data_direction dir)
Christoph Hellwig713a7462018-05-18 15:14:28 +0200131{
Eugeniy Paltsev4c612ad2018-07-24 17:13:02 +0300132 switch (dir) {
133 case DMA_TO_DEVICE:
134 break;
135
136 /* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
137 case DMA_FROM_DEVICE:
138 case DMA_BIDIRECTIONAL:
139 dma_cache_inv(paddr, size);
140 break;
141
142 default:
143 break;
144 }
Christoph Hellwig713a7462018-05-18 15:14:28 +0200145}
Eugeniy Paltsev2820a702018-07-30 19:26:34 +0300146
147/*
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200148 * Plug in direct dma map ops.
Eugeniy Paltsev2820a702018-07-30 19:26:34 +0300149 */
150void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
151 const struct iommu_ops *iommu, bool coherent)
152{
153 /*
154 * IOC hardware snoops all DMA traffic keeping the caches consistent
155 * with memory - eliding need for any explicit cache maintenance of
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200156 * DMA buffers.
Eugeniy Paltsev2820a702018-07-30 19:26:34 +0300157 */
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200158 if (is_isa_arcv2() && ioc_enable && coherent)
159 dev->dma_coherent = true;
160
161 dev_info(dev, "use %sncoherent DMA ops\n",
162 dev->dma_coherent ? "" : "non");
Eugeniy Paltsev2820a702018-07-30 19:26:34 +0300163}