Thomas Gleixner | d2912cb | 2019-06-04 10:11:33 +0200 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 4 | */ |
| 5 | |
Christoph Hellwig | 6c3e71d | 2018-05-18 15:41:32 +0200 | [diff] [blame] | 6 | #include <linux/dma-noncoherent.h> |
Alexey Brodkin | f2b0b25 | 2015-05-25 19:54:28 +0300 | [diff] [blame] | 7 | #include <asm/cache.h> |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 8 | #include <asm/cacheflush.h> |
| 9 | |
Eugeniy Paltsev | 2820a70 | 2018-07-30 19:26:34 +0300 | [diff] [blame] | 10 | /* |
| 11 | * ARCH specific callbacks for generic noncoherent DMA ops (dma/noncoherent.c) |
| 12 | * - hardware IOC not available (or "dma-coherent" not set for device in DT) |
| 13 | * - But still handle both coherent and non-coherent requests from caller |
| 14 | * |
| 15 | * For DMA coherent hardware (IOC) generic code suffices |
| 16 | */ |
Christoph Hellwig | 6c3e71d | 2018-05-18 15:41:32 +0200 | [diff] [blame] | 17 | void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, |
| 18 | gfp_t gfp, unsigned long attrs) |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 19 | { |
Vineet Gupta | d98a15a | 2016-03-14 15:03:59 +0530 | [diff] [blame] | 20 | unsigned long order = get_order(size); |
| 21 | struct page *page; |
| 22 | phys_addr_t paddr; |
| 23 | void *kvaddr; |
Eugeniy Paltsev | dd45210 | 2018-07-30 19:26:36 +0300 | [diff] [blame] | 24 | bool need_coh = !(attrs & DMA_ATTR_NON_CONSISTENT); |
| 25 | |
| 26 | /* |
| 27 | * __GFP_HIGHMEM flag is cleared by upper layer functions |
| 28 | * (in include/linux/dma-mapping.h) so we should never get a |
| 29 | * __GFP_HIGHMEM here. |
| 30 | */ |
| 31 | BUG_ON(gfp & __GFP_HIGHMEM); |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 32 | |
Christoph Hellwig | 518a2f1 | 2018-12-14 09:00:40 +0100 | [diff] [blame] | 33 | page = alloc_pages(gfp | __GFP_ZERO, order); |
Vineet Gupta | d98a15a | 2016-03-14 15:03:59 +0530 | [diff] [blame] | 34 | if (!page) |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 35 | return NULL; |
| 36 | |
Vineet Gupta | 6b70039 | 2016-03-14 15:34:36 +0530 | [diff] [blame] | 37 | /* This is linear addr (0x8000_0000 based) */ |
| 38 | paddr = page_to_phys(page); |
| 39 | |
Christoph Hellwig | 57723cb | 2017-12-20 11:57:40 +0100 | [diff] [blame] | 40 | *dma_handle = paddr; |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 41 | |
Eugeniy Paltsev | dd45210 | 2018-07-30 19:26:36 +0300 | [diff] [blame] | 42 | /* |
| 43 | * A coherent buffer needs MMU mapping to enforce non-cachability. |
| 44 | * kvaddr is kernel Virtual address (0x7000_0000 based). |
| 45 | */ |
| 46 | if (need_coh) { |
Vineet Gupta | f5db19e | 2016-03-16 15:04:39 +0530 | [diff] [blame] | 47 | kvaddr = ioremap_nocache(paddr, size); |
Vineet Gupta | 6b70039 | 2016-03-14 15:34:36 +0530 | [diff] [blame] | 48 | if (kvaddr == NULL) { |
| 49 | __free_pages(page, order); |
| 50 | return NULL; |
| 51 | } |
| 52 | } else { |
Vineet Gupta | f5db19e | 2016-03-16 15:04:39 +0530 | [diff] [blame] | 53 | kvaddr = (void *)(u32)paddr; |
Vineet Gupta | d98a15a | 2016-03-14 15:03:59 +0530 | [diff] [blame] | 54 | } |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 55 | |
Vineet Gupta | 795f455 | 2015-04-03 12:37:07 +0300 | [diff] [blame] | 56 | /* |
| 57 | * Evict any existing L1 and/or L2 lines for the backing page |
| 58 | * in case it was used earlier as a normal "cached" page. |
| 59 | * Yeah this bit us - STAR 9000898266 |
| 60 | * |
| 61 | * Although core does call flush_cache_vmap(), it gets kvaddr hence |
| 62 | * can't be used to efficiently flush L1 and/or L2 which need paddr |
| 63 | * Currently flush_cache_vmap nukes the L1 cache completely which |
| 64 | * will be optimized as a separate commit |
| 65 | */ |
Vineet Gupta | 6b70039 | 2016-03-14 15:34:36 +0530 | [diff] [blame] | 66 | if (need_coh) |
Vineet Gupta | f5db19e | 2016-03-16 15:04:39 +0530 | [diff] [blame] | 67 | dma_cache_wback_inv(paddr, size); |
Vineet Gupta | 795f455 | 2015-04-03 12:37:07 +0300 | [diff] [blame] | 68 | |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 69 | return kvaddr; |
| 70 | } |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 71 | |
Christoph Hellwig | 6c3e71d | 2018-05-18 15:41:32 +0200 | [diff] [blame] | 72 | void arch_dma_free(struct device *dev, size_t size, void *vaddr, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 73 | dma_addr_t dma_handle, unsigned long attrs) |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 74 | { |
Christoph Hellwig | 57723cb | 2017-12-20 11:57:40 +0100 | [diff] [blame] | 75 | phys_addr_t paddr = dma_handle; |
Vladimir Kondratiev | b4dff28 | 2016-07-03 10:07:48 +0300 | [diff] [blame] | 76 | struct page *page = virt_to_page(paddr); |
Vineet Gupta | d98a15a | 2016-03-14 15:03:59 +0530 | [diff] [blame] | 77 | |
Eugeniy Paltsev | dd45210 | 2018-07-30 19:26:36 +0300 | [diff] [blame] | 78 | if (!(attrs & DMA_ATTR_NON_CONSISTENT)) |
Christoph Hellwig | 052c96d | 2016-01-20 15:01:26 -0800 | [diff] [blame] | 79 | iounmap((void __force __iomem *)vaddr); |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 80 | |
Vineet Gupta | d98a15a | 2016-03-14 15:03:59 +0530 | [diff] [blame] | 81 | __free_pages(page, get_order(size)); |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 82 | } |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 83 | |
Christoph Hellwig | 58b0440 | 2018-09-11 08:55:28 +0200 | [diff] [blame] | 84 | long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, |
| 85 | dma_addr_t dma_addr) |
Alexey Brodkin | a79a812 | 2016-11-03 18:06:13 +0300 | [diff] [blame] | 86 | { |
Christoph Hellwig | 58b0440 | 2018-09-11 08:55:28 +0200 | [diff] [blame] | 87 | return __phys_to_pfn(dma_addr); |
Alexey Brodkin | a79a812 | 2016-11-03 18:06:13 +0300 | [diff] [blame] | 88 | } |
| 89 | |
Eugeniy Paltsev | 4c612ad | 2018-07-24 17:13:02 +0300 | [diff] [blame] | 90 | /* |
| 91 | * Cache operations depending on function and direction argument, inspired by |
| 92 | * https://lkml.org/lkml/2018/5/18/979 |
| 93 | * "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20] |
| 94 | * dma-mapping: provide a generic dma-noncoherent implementation)" |
| 95 | * |
| 96 | * | map == for_device | unmap == for_cpu |
| 97 | * |---------------------------------------------------------------- |
| 98 | * TO_DEV | writeback writeback | none none |
| 99 | * FROM_DEV | invalidate invalidate | invalidate* invalidate* |
| 100 | * BIDIR | writeback+inv writeback+inv | invalidate invalidate |
| 101 | * |
| 102 | * [*] needed for CPU speculative prefetches |
| 103 | * |
| 104 | * NOTE: we don't check the validity of direction argument as it is done in |
| 105 | * upper layer functions (in include/linux/dma-mapping.h) |
| 106 | */ |
| 107 | |
Christoph Hellwig | 6c3e71d | 2018-05-18 15:41:32 +0200 | [diff] [blame] | 108 | void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, |
| 109 | size_t size, enum dma_data_direction dir) |
Christoph Hellwig | 713a746 | 2018-05-18 15:14:28 +0200 | [diff] [blame] | 110 | { |
Eugeniy Paltsev | 4c612ad | 2018-07-24 17:13:02 +0300 | [diff] [blame] | 111 | switch (dir) { |
| 112 | case DMA_TO_DEVICE: |
| 113 | dma_cache_wback(paddr, size); |
| 114 | break; |
| 115 | |
| 116 | case DMA_FROM_DEVICE: |
| 117 | dma_cache_inv(paddr, size); |
| 118 | break; |
| 119 | |
| 120 | case DMA_BIDIRECTIONAL: |
| 121 | dma_cache_wback_inv(paddr, size); |
| 122 | break; |
| 123 | |
| 124 | default: |
| 125 | break; |
| 126 | } |
Christoph Hellwig | 713a746 | 2018-05-18 15:14:28 +0200 | [diff] [blame] | 127 | } |
| 128 | |
Christoph Hellwig | 6c3e71d | 2018-05-18 15:41:32 +0200 | [diff] [blame] | 129 | void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, |
| 130 | size_t size, enum dma_data_direction dir) |
Christoph Hellwig | 713a746 | 2018-05-18 15:14:28 +0200 | [diff] [blame] | 131 | { |
Eugeniy Paltsev | 4c612ad | 2018-07-24 17:13:02 +0300 | [diff] [blame] | 132 | switch (dir) { |
| 133 | case DMA_TO_DEVICE: |
| 134 | break; |
| 135 | |
| 136 | /* FROM_DEVICE invalidate needed if speculative CPU prefetch only */ |
| 137 | case DMA_FROM_DEVICE: |
| 138 | case DMA_BIDIRECTIONAL: |
| 139 | dma_cache_inv(paddr, size); |
| 140 | break; |
| 141 | |
| 142 | default: |
| 143 | break; |
| 144 | } |
Christoph Hellwig | 713a746 | 2018-05-18 15:14:28 +0200 | [diff] [blame] | 145 | } |
Eugeniy Paltsev | 2820a70 | 2018-07-30 19:26:34 +0300 | [diff] [blame] | 146 | |
| 147 | /* |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 148 | * Plug in direct dma map ops. |
Eugeniy Paltsev | 2820a70 | 2018-07-30 19:26:34 +0300 | [diff] [blame] | 149 | */ |
| 150 | void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, |
| 151 | const struct iommu_ops *iommu, bool coherent) |
| 152 | { |
| 153 | /* |
| 154 | * IOC hardware snoops all DMA traffic keeping the caches consistent |
| 155 | * with memory - eliding need for any explicit cache maintenance of |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 156 | * DMA buffers. |
Eugeniy Paltsev | 2820a70 | 2018-07-30 19:26:34 +0300 | [diff] [blame] | 157 | */ |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 158 | if (is_isa_arcv2() && ioc_enable && coherent) |
| 159 | dev->dma_coherent = true; |
| 160 | |
| 161 | dev_info(dev, "use %sncoherent DMA ops\n", |
| 162 | dev->dma_coherent ? "" : "non"); |
Eugeniy Paltsev | 2820a70 | 2018-07-30 19:26:34 +0300 | [diff] [blame] | 163 | } |