blob: ec47e6079f5d08371a65ea21277b2985bec989d5 [file] [log] [blame]
Vineet Gupta1162b072013-01-18 15:12:20 +05301/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9/*
10 * DMA Coherent API Notes
11 *
12 * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
Andrea Gelmini25474762016-05-21 13:45:35 +020013 * implemented by accessing it using a kernel virtual address, with
Vineet Gupta1162b072013-01-18 15:12:20 +053014 * Cache bit off in the TLB entry.
15 *
16 * The default DMA address == Phy address which is 0x8000_0000 based.
Vineet Gupta1162b072013-01-18 15:12:20 +053017 */
18
Christoph Hellwig6c3e71d2018-05-18 15:41:32 +020019#include <linux/dma-noncoherent.h>
Alexey Brodkinf2b0b252015-05-25 19:54:28 +030020#include <asm/cache.h>
Vineet Gupta1162b072013-01-18 15:12:20 +053021#include <asm/cacheflush.h>
22
Christoph Hellwig6c3e71d2018-05-18 15:41:32 +020023void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
24 gfp_t gfp, unsigned long attrs)
Vineet Gupta1162b072013-01-18 15:12:20 +053025{
Vineet Guptad98a15a2016-03-14 15:03:59 +053026 unsigned long order = get_order(size);
27 struct page *page;
28 phys_addr_t paddr;
29 void *kvaddr;
Vineet Gupta6b700392016-03-14 15:34:36 +053030 int need_coh = 1, need_kvaddr = 0;
Vineet Gupta1162b072013-01-18 15:12:20 +053031
Vineet Guptad98a15a2016-03-14 15:03:59 +053032 page = alloc_pages(gfp, order);
33 if (!page)
Vineet Gupta1162b072013-01-18 15:12:20 +053034 return NULL;
35
Alexey Brodkinf2b0b252015-05-25 19:54:28 +030036 /*
37 * IOC relies on all data (even coherent DMA data) being in cache
38 * Thus allocate normal cached memory
39 *
40 * The gains with IOC are two pronged:
Vineet Gupta6b700392016-03-14 15:34:36 +053041 * -For streaming data, elides need for cache maintenance, saving
Alexey Brodkinf2b0b252015-05-25 19:54:28 +030042 * cycles in flush code, and bus bandwidth as all the lines of a
43 * buffer need to be flushed out to memory
44 * -For coherent data, Read/Write to buffers terminate early in cache
45 * (vs. always going to memory - thus are faster)
46 */
Vineet Guptacf986d42016-10-13 15:58:59 -070047 if ((is_isa_arcv2() && ioc_enable) ||
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070048 (attrs & DMA_ATTR_NON_CONSISTENT))
Vineet Gupta6b700392016-03-14 15:34:36 +053049 need_coh = 0;
50
51 /*
52 * - A coherent buffer needs MMU mapping to enforce non-cachability
53 * - A highmem page needs a virtual handle (hence MMU mapping)
54 * independent of cachability
55 */
56 if (PageHighMem(page) || need_coh)
57 need_kvaddr = 1;
58
59 /* This is linear addr (0x8000_0000 based) */
60 paddr = page_to_phys(page);
61
Christoph Hellwig57723cb2017-12-20 11:57:40 +010062 *dma_handle = paddr;
Vineet Gupta1162b072013-01-18 15:12:20 +053063
64 /* This is kernel Virtual address (0x7000_0000 based) */
Vineet Gupta6b700392016-03-14 15:34:36 +053065 if (need_kvaddr) {
Vineet Guptaf5db19e2016-03-16 15:04:39 +053066 kvaddr = ioremap_nocache(paddr, size);
Vineet Gupta6b700392016-03-14 15:34:36 +053067 if (kvaddr == NULL) {
68 __free_pages(page, order);
69 return NULL;
70 }
71 } else {
Vineet Guptaf5db19e2016-03-16 15:04:39 +053072 kvaddr = (void *)(u32)paddr;
Vineet Guptad98a15a2016-03-14 15:03:59 +053073 }
Vineet Gupta1162b072013-01-18 15:12:20 +053074
Vineet Gupta795f4552015-04-03 12:37:07 +030075 /*
76 * Evict any existing L1 and/or L2 lines for the backing page
77 * in case it was used earlier as a normal "cached" page.
78 * Yeah this bit us - STAR 9000898266
79 *
80 * Although core does call flush_cache_vmap(), it gets kvaddr hence
81 * can't be used to efficiently flush L1 and/or L2 which need paddr
82 * Currently flush_cache_vmap nukes the L1 cache completely which
83 * will be optimized as a separate commit
84 */
Vineet Gupta6b700392016-03-14 15:34:36 +053085 if (need_coh)
Vineet Guptaf5db19e2016-03-16 15:04:39 +053086 dma_cache_wback_inv(paddr, size);
Vineet Gupta795f4552015-04-03 12:37:07 +030087
Vineet Gupta1162b072013-01-18 15:12:20 +053088 return kvaddr;
89}
Vineet Gupta1162b072013-01-18 15:12:20 +053090
Christoph Hellwig6c3e71d2018-05-18 15:41:32 +020091void arch_dma_free(struct device *dev, size_t size, void *vaddr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070092 dma_addr_t dma_handle, unsigned long attrs)
Vineet Gupta1162b072013-01-18 15:12:20 +053093{
Christoph Hellwig57723cb2017-12-20 11:57:40 +010094 phys_addr_t paddr = dma_handle;
Vladimir Kondratievb4dff282016-07-03 10:07:48 +030095 struct page *page = virt_to_page(paddr);
Vineet Gupta6b700392016-03-14 15:34:36 +053096 int is_non_coh = 1;
Vineet Guptad98a15a2016-03-14 15:03:59 +053097
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070098 is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) ||
Vineet Guptacf986d42016-10-13 15:58:59 -070099 (is_isa_arcv2() && ioc_enable);
Vineet Gupta6b700392016-03-14 15:34:36 +0530100
101 if (PageHighMem(page) || !is_non_coh)
Christoph Hellwig052c96d2016-01-20 15:01:26 -0800102 iounmap((void __force __iomem *)vaddr);
Vineet Gupta1162b072013-01-18 15:12:20 +0530103
Vineet Guptad98a15a2016-03-14 15:03:59 +0530104 __free_pages(page, get_order(size));
Vineet Gupta1162b072013-01-18 15:12:20 +0530105}
Vineet Gupta1162b072013-01-18 15:12:20 +0530106
Christoph Hellwig6c3e71d2018-05-18 15:41:32 +0200107int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
108 void *cpu_addr, dma_addr_t dma_addr, size_t size,
109 unsigned long attrs)
Alexey Brodkina79a8122016-11-03 18:06:13 +0300110{
111 unsigned long user_count = vma_pages(vma);
112 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
Christoph Hellwig57723cb2017-12-20 11:57:40 +0100113 unsigned long pfn = __phys_to_pfn(dma_addr);
Alexey Brodkina79a8122016-11-03 18:06:13 +0300114 unsigned long off = vma->vm_pgoff;
115 int ret = -ENXIO;
116
117 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
118
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100119 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
Alexey Brodkina79a8122016-11-03 18:06:13 +0300120 return ret;
121
122 if (off < count && user_count <= (count - off)) {
123 ret = remap_pfn_range(vma, vma->vm_start,
124 pfn + off,
125 user_count << PAGE_SHIFT,
126 vma->vm_page_prot);
127 }
128
129 return ret;
130}
131
Eugeniy Paltsev4c612ad2018-07-24 17:13:02 +0300132/*
133 * Cache operations depending on function and direction argument, inspired by
134 * https://lkml.org/lkml/2018/5/18/979
135 * "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20]
136 * dma-mapping: provide a generic dma-noncoherent implementation)"
137 *
138 * | map == for_device | unmap == for_cpu
139 * |----------------------------------------------------------------
140 * TO_DEV | writeback writeback | none none
141 * FROM_DEV | invalidate invalidate | invalidate* invalidate*
142 * BIDIR | writeback+inv writeback+inv | invalidate invalidate
143 *
144 * [*] needed for CPU speculative prefetches
145 *
146 * NOTE: we don't check the validity of direction argument as it is done in
147 * upper layer functions (in include/linux/dma-mapping.h)
148 */
149
Christoph Hellwig6c3e71d2018-05-18 15:41:32 +0200150void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
151 size_t size, enum dma_data_direction dir)
Christoph Hellwig713a7462018-05-18 15:14:28 +0200152{
Eugeniy Paltsev4c612ad2018-07-24 17:13:02 +0300153 switch (dir) {
154 case DMA_TO_DEVICE:
155 dma_cache_wback(paddr, size);
156 break;
157
158 case DMA_FROM_DEVICE:
159 dma_cache_inv(paddr, size);
160 break;
161
162 case DMA_BIDIRECTIONAL:
163 dma_cache_wback_inv(paddr, size);
164 break;
165
166 default:
167 break;
168 }
Christoph Hellwig713a7462018-05-18 15:14:28 +0200169}
170
Christoph Hellwig6c3e71d2018-05-18 15:41:32 +0200171void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
172 size_t size, enum dma_data_direction dir)
Christoph Hellwig713a7462018-05-18 15:14:28 +0200173{
Eugeniy Paltsev4c612ad2018-07-24 17:13:02 +0300174 switch (dir) {
175 case DMA_TO_DEVICE:
176 break;
177
178 /* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
179 case DMA_FROM_DEVICE:
180 case DMA_BIDIRECTIONAL:
181 dma_cache_inv(paddr, size);
182 break;
183
184 default:
185 break;
186 }
Christoph Hellwig713a7462018-05-18 15:14:28 +0200187}