blob: db203ff69ccfac59503c94eb6db557ed4e052710 [file] [log] [blame]
Vineet Gupta1162b072013-01-18 15:12:20 +05301/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
Christoph Hellwig6c3e71d2018-05-18 15:41:32 +02009#include <linux/dma-noncoherent.h>
Alexey Brodkinf2b0b252015-05-25 19:54:28 +030010#include <asm/cache.h>
Vineet Gupta1162b072013-01-18 15:12:20 +053011#include <asm/cacheflush.h>
12
Eugeniy Paltsev2820a702018-07-30 19:26:34 +030013/*
14 * ARCH specific callbacks for generic noncoherent DMA ops (dma/noncoherent.c)
15 * - hardware IOC not available (or "dma-coherent" not set for device in DT)
16 * - But still handle both coherent and non-coherent requests from caller
17 *
18 * For DMA coherent hardware (IOC) generic code suffices
19 */
Christoph Hellwig6c3e71d2018-05-18 15:41:32 +020020void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
21 gfp_t gfp, unsigned long attrs)
Vineet Gupta1162b072013-01-18 15:12:20 +053022{
Vineet Guptad98a15a2016-03-14 15:03:59 +053023 unsigned long order = get_order(size);
24 struct page *page;
25 phys_addr_t paddr;
26 void *kvaddr;
Eugeniy Paltsevdd452102018-07-30 19:26:36 +030027 bool need_coh = !(attrs & DMA_ATTR_NON_CONSISTENT);
28
29 /*
30 * __GFP_HIGHMEM flag is cleared by upper layer functions
31 * (in include/linux/dma-mapping.h) so we should never get a
32 * __GFP_HIGHMEM here.
33 */
34 BUG_ON(gfp & __GFP_HIGHMEM);
Vineet Gupta1162b072013-01-18 15:12:20 +053035
Vineet Guptad98a15a2016-03-14 15:03:59 +053036 page = alloc_pages(gfp, order);
37 if (!page)
Vineet Gupta1162b072013-01-18 15:12:20 +053038 return NULL;
39
Vineet Gupta6b700392016-03-14 15:34:36 +053040 /* This is linear addr (0x8000_0000 based) */
41 paddr = page_to_phys(page);
42
Christoph Hellwig57723cb2017-12-20 11:57:40 +010043 *dma_handle = paddr;
Vineet Gupta1162b072013-01-18 15:12:20 +053044
Eugeniy Paltsevdd452102018-07-30 19:26:36 +030045 /*
46 * A coherent buffer needs MMU mapping to enforce non-cachability.
47 * kvaddr is kernel Virtual address (0x7000_0000 based).
48 */
49 if (need_coh) {
Vineet Guptaf5db19e2016-03-16 15:04:39 +053050 kvaddr = ioremap_nocache(paddr, size);
Vineet Gupta6b700392016-03-14 15:34:36 +053051 if (kvaddr == NULL) {
52 __free_pages(page, order);
53 return NULL;
54 }
55 } else {
Vineet Guptaf5db19e2016-03-16 15:04:39 +053056 kvaddr = (void *)(u32)paddr;
Vineet Guptad98a15a2016-03-14 15:03:59 +053057 }
Vineet Gupta1162b072013-01-18 15:12:20 +053058
Vineet Gupta795f4552015-04-03 12:37:07 +030059 /*
60 * Evict any existing L1 and/or L2 lines for the backing page
61 * in case it was used earlier as a normal "cached" page.
62 * Yeah this bit us - STAR 9000898266
63 *
64 * Although core does call flush_cache_vmap(), it gets kvaddr hence
65 * can't be used to efficiently flush L1 and/or L2 which need paddr
66 * Currently flush_cache_vmap nukes the L1 cache completely which
67 * will be optimized as a separate commit
68 */
Vineet Gupta6b700392016-03-14 15:34:36 +053069 if (need_coh)
Vineet Guptaf5db19e2016-03-16 15:04:39 +053070 dma_cache_wback_inv(paddr, size);
Vineet Gupta795f4552015-04-03 12:37:07 +030071
Vineet Gupta1162b072013-01-18 15:12:20 +053072 return kvaddr;
73}
Vineet Gupta1162b072013-01-18 15:12:20 +053074
Christoph Hellwig6c3e71d2018-05-18 15:41:32 +020075void arch_dma_free(struct device *dev, size_t size, void *vaddr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070076 dma_addr_t dma_handle, unsigned long attrs)
Vineet Gupta1162b072013-01-18 15:12:20 +053077{
Christoph Hellwig57723cb2017-12-20 11:57:40 +010078 phys_addr_t paddr = dma_handle;
Vladimir Kondratievb4dff282016-07-03 10:07:48 +030079 struct page *page = virt_to_page(paddr);
Vineet Guptad98a15a2016-03-14 15:03:59 +053080
Eugeniy Paltsevdd452102018-07-30 19:26:36 +030081 if (!(attrs & DMA_ATTR_NON_CONSISTENT))
Christoph Hellwig052c96d2016-01-20 15:01:26 -080082 iounmap((void __force __iomem *)vaddr);
Vineet Gupta1162b072013-01-18 15:12:20 +053083
Vineet Guptad98a15a2016-03-14 15:03:59 +053084 __free_pages(page, get_order(size));
Vineet Gupta1162b072013-01-18 15:12:20 +053085}
Vineet Gupta1162b072013-01-18 15:12:20 +053086
Christoph Hellwig58b04402018-09-11 08:55:28 +020087long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
88 dma_addr_t dma_addr)
Alexey Brodkina79a8122016-11-03 18:06:13 +030089{
Christoph Hellwig58b04402018-09-11 08:55:28 +020090 return __phys_to_pfn(dma_addr);
Alexey Brodkina79a8122016-11-03 18:06:13 +030091}
92
Eugeniy Paltsev4c612ad2018-07-24 17:13:02 +030093/*
94 * Cache operations depending on function and direction argument, inspired by
95 * https://lkml.org/lkml/2018/5/18/979
96 * "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20]
97 * dma-mapping: provide a generic dma-noncoherent implementation)"
98 *
99 * | map == for_device | unmap == for_cpu
100 * |----------------------------------------------------------------
101 * TO_DEV | writeback writeback | none none
102 * FROM_DEV | invalidate invalidate | invalidate* invalidate*
103 * BIDIR | writeback+inv writeback+inv | invalidate invalidate
104 *
105 * [*] needed for CPU speculative prefetches
106 *
107 * NOTE: we don't check the validity of direction argument as it is done in
108 * upper layer functions (in include/linux/dma-mapping.h)
109 */
110
Christoph Hellwig6c3e71d2018-05-18 15:41:32 +0200111void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
112 size_t size, enum dma_data_direction dir)
Christoph Hellwig713a7462018-05-18 15:14:28 +0200113{
Eugeniy Paltsev4c612ad2018-07-24 17:13:02 +0300114 switch (dir) {
115 case DMA_TO_DEVICE:
116 dma_cache_wback(paddr, size);
117 break;
118
119 case DMA_FROM_DEVICE:
120 dma_cache_inv(paddr, size);
121 break;
122
123 case DMA_BIDIRECTIONAL:
124 dma_cache_wback_inv(paddr, size);
125 break;
126
127 default:
128 break;
129 }
Christoph Hellwig713a7462018-05-18 15:14:28 +0200130}
131
Christoph Hellwig6c3e71d2018-05-18 15:41:32 +0200132void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
133 size_t size, enum dma_data_direction dir)
Christoph Hellwig713a7462018-05-18 15:14:28 +0200134{
Eugeniy Paltsev4c612ad2018-07-24 17:13:02 +0300135 switch (dir) {
136 case DMA_TO_DEVICE:
137 break;
138
139 /* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
140 case DMA_FROM_DEVICE:
141 case DMA_BIDIRECTIONAL:
142 dma_cache_inv(paddr, size);
143 break;
144
145 default:
146 break;
147 }
Christoph Hellwig713a7462018-05-18 15:14:28 +0200148}
Eugeniy Paltsev2820a702018-07-30 19:26:34 +0300149
150/*
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200151 * Plug in direct dma map ops.
Eugeniy Paltsev2820a702018-07-30 19:26:34 +0300152 */
153void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
154 const struct iommu_ops *iommu, bool coherent)
155{
156 /*
157 * IOC hardware snoops all DMA traffic keeping the caches consistent
158 * with memory - eliding need for any explicit cache maintenance of
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200159 * DMA buffers.
Eugeniy Paltsev2820a702018-07-30 19:26:34 +0300160 */
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200161 if (is_isa_arcv2() && ioc_enable && coherent)
162 dev->dma_coherent = true;
163
164 dev_info(dev, "use %sncoherent DMA ops\n",
165 dev->dma_coherent ? "" : "non");
Eugeniy Paltsev2820a702018-07-30 19:26:34 +0300166}