blob: c5f26a87d238a867fdf097ca1b038824c0dc6129 [file] [log] [blame]
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -04001/*
2 * Copyright 2010
3 * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
4 *
5 * This code provides a IOMMU for Xen PV guests with PCI passthrough.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License v2.0 as published by
9 * the Free Software Foundation
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * PV guests under Xen are running in an non-contiguous memory architecture.
17 *
18 * When PCI pass-through is utilized, this necessitates an IOMMU for
19 * translating bus (DMA) to virtual and vice-versa and also providing a
20 * mechanism to have contiguous pages for device drivers operations (say DMA
21 * operations).
22 *
23 * Specifically, under Xen the Linux idea of pages is an illusion. It
24 * assumes that pages start at zero and go up to the available memory. To
25 * help with that, the Linux Xen MMU provides a lookup mechanism to
26 * translate the page frame numbers (PFN) to machine frame numbers (MFN)
27 * and vice-versa. The MFN are the "real" frame numbers. Furthermore
28 * memory is not contiguous. Xen hypervisor stitches memory for guests
29 * from different pools, which means there is no guarantee that PFN==MFN
30 * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
31 * allocated in descending order (high to low), meaning the guest might
32 * never get any MFN's under the 4GB mark.
33 *
34 */
35
Joe Perches283c0972013-06-28 03:21:41 -070036#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
37
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040038#include <linux/bootmem.h>
Mike Rapoport20132882018-10-30 15:09:21 -070039#include <linux/memblock.h>
Christoph Hellwigea8c64a2018-01-10 16:21:13 +010040#include <linux/dma-direct.h>
Paul Gortmaker63c97442011-07-10 13:22:07 -040041#include <linux/export.h>
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040042#include <xen/swiotlb-xen.h>
43#include <xen/page.h>
44#include <xen/xen-ops.h>
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -040045#include <xen/hvc-console.h>
Zoltan Kiss2b2b6142013-09-04 21:11:05 +010046
Stefano Stabellini83862cc2013-10-10 13:40:44 +000047#include <asm/dma-mapping.h>
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +000048#include <asm/xen/page-coherent.h>
Konrad Rzeszutek Wilke1d8f622013-11-08 15:36:09 -050049
Zoltan Kiss2b2b6142013-09-04 21:11:05 +010050#include <trace/events/swiotlb.h>
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040051/*
52 * Used to do a quick range check in swiotlb_tbl_unmap_single and
53 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
54 * API.
55 */
56
Christoph Hellwig4d048db2017-05-21 13:23:27 +020057#define XEN_SWIOTLB_ERROR_CODE (~(dma_addr_t)0x0)
58
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040059static char *xen_io_tlb_start, *xen_io_tlb_end;
60static unsigned long xen_io_tlb_nslabs;
61/*
62 * Quick lookup value of the bus address of the IOTLB.
63 */
64
Konrad Rzeszutek Wilkb8b0f552012-08-21 14:49:34 -040065static u64 start_dma_addr;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040066
Ian Campbelle17b2f12014-01-20 11:30:41 +000067/*
Julien Grall9435cce2015-09-09 15:18:45 +010068 * Both of these functions should avoid XEN_PFN_PHYS because phys_addr_t
Ian Campbelle17b2f12014-01-20 11:30:41 +000069 * can be 32bit when dma_addr_t is 64bit leading to a loss in
70 * information if the shift is done before casting to 64bit.
71 */
Stefano Stabellini6b42a7e2013-10-25 10:33:27 +000072static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040073{
Julien Grall9435cce2015-09-09 15:18:45 +010074 unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr));
75 dma_addr_t dma = (dma_addr_t)bfn << XEN_PAGE_SHIFT;
Ian Campbelle17b2f12014-01-20 11:30:41 +000076
Julien Grall9435cce2015-09-09 15:18:45 +010077 dma |= paddr & ~XEN_PAGE_MASK;
Ian Campbelle17b2f12014-01-20 11:30:41 +000078
79 return dma;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040080}
81
Stefano Stabellini6b42a7e2013-10-25 10:33:27 +000082static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040083{
Julien Grall9435cce2015-09-09 15:18:45 +010084 unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr));
85 dma_addr_t dma = (dma_addr_t)xen_pfn << XEN_PAGE_SHIFT;
Ian Campbelle17b2f12014-01-20 11:30:41 +000086 phys_addr_t paddr = dma;
87
Julien Grall9435cce2015-09-09 15:18:45 +010088 paddr |= baddr & ~XEN_PAGE_MASK;
Ian Campbelle17b2f12014-01-20 11:30:41 +000089
90 return paddr;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040091}
92
Stefano Stabellini6b42a7e2013-10-25 10:33:27 +000093static inline dma_addr_t xen_virt_to_bus(void *address)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040094{
95 return xen_phys_to_bus(virt_to_phys(address));
96}
97
Julien Grall9435cce2015-09-09 15:18:45 +010098static int check_pages_physically_contiguous(unsigned long xen_pfn,
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040099 unsigned int offset,
100 size_t length)
101{
Julien Grall32e09872015-08-07 17:34:35 +0100102 unsigned long next_bfn;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400103 int i;
104 int nr_pages;
105
Julien Grall9435cce2015-09-09 15:18:45 +0100106 next_bfn = pfn_to_bfn(xen_pfn);
107 nr_pages = (offset + length + XEN_PAGE_SIZE-1) >> XEN_PAGE_SHIFT;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400108
109 for (i = 1; i < nr_pages; i++) {
Julien Grall9435cce2015-09-09 15:18:45 +0100110 if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400111 return 0;
112 }
113 return 1;
114}
115
Stefano Stabellini6b42a7e2013-10-25 10:33:27 +0000116static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400117{
Julien Grall9435cce2015-09-09 15:18:45 +0100118 unsigned long xen_pfn = XEN_PFN_DOWN(p);
119 unsigned int offset = p & ~XEN_PAGE_MASK;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400120
Julien Grall9435cce2015-09-09 15:18:45 +0100121 if (offset + size <= XEN_PAGE_SIZE)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400122 return 0;
Julien Grall9435cce2015-09-09 15:18:45 +0100123 if (check_pages_physically_contiguous(xen_pfn, offset, size))
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400124 return 0;
125 return 1;
126}
127
128static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
129{
Julien Grall9435cce2015-09-09 15:18:45 +0100130 unsigned long bfn = XEN_PFN_DOWN(dma_addr);
131 unsigned long xen_pfn = bfn_to_local_pfn(bfn);
132 phys_addr_t paddr = XEN_PFN_PHYS(xen_pfn);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400133
134 /* If the address is outside our domain, it CAN
135 * have the same virtual address as another address
136 * in our domain. Therefore _only_ check address within our domain.
137 */
Julien Grall9435cce2015-09-09 15:18:45 +0100138 if (pfn_valid(PFN_DOWN(paddr))) {
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400139 return paddr >= virt_to_phys(xen_io_tlb_start) &&
140 paddr < virt_to_phys(xen_io_tlb_end);
141 }
142 return 0;
143}
144
145static int max_dma_bits = 32;
146
147static int
148xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
149{
150 int i, rc;
151 int dma_bits;
Stefano Stabellini69908902013-10-09 16:56:32 +0000152 dma_addr_t dma_handle;
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000153 phys_addr_t p = virt_to_phys(buf);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400154
155 dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
156
157 i = 0;
158 do {
159 int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
160
161 do {
162 rc = xen_create_contiguous_region(
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000163 p + (i << IO_TLB_SHIFT),
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400164 get_order(slabs << IO_TLB_SHIFT),
Stefano Stabellini69908902013-10-09 16:56:32 +0000165 dma_bits, &dma_handle);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400166 } while (rc && dma_bits++ < max_dma_bits);
167 if (rc)
168 return rc;
169
170 i += slabs;
171 } while (i < nslabs);
172 return 0;
173}
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400174static unsigned long xen_set_nslabs(unsigned long nr_tbl)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400175{
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400176 if (!nr_tbl) {
FUJITA Tomonori5f98ecd2011-06-05 11:47:29 +0900177 xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
178 xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400179 } else
180 xen_io_tlb_nslabs = nr_tbl;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400181
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400182 return xen_io_tlb_nslabs << IO_TLB_SHIFT;
183}
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400184
185enum xen_swiotlb_err {
186 XEN_SWIOTLB_UNKNOWN = 0,
187 XEN_SWIOTLB_ENOMEM,
188 XEN_SWIOTLB_EFIXUP
189};
190
191static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
192{
193 switch (err) {
194 case XEN_SWIOTLB_ENOMEM:
195 return "Cannot allocate Xen-SWIOTLB buffer\n";
196 case XEN_SWIOTLB_EFIXUP:
197 return "Failed to get contiguous memory for DMA from Xen!\n"\
198 "You either: don't have the permissions, do not have"\
199 " enough free memory under 4GB, or the hypervisor memory"\
200 " is too fragmented!";
201 default:
202 break;
203 }
204 return "";
205}
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400206int __ref xen_swiotlb_init(int verbose, bool early)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400207{
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400208 unsigned long bytes, order;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400209 int rc = -ENOMEM;
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400210 enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400211 unsigned int repeat = 3;
212
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400213 xen_io_tlb_nslabs = swiotlb_nr_tbl();
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400214retry:
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400215 bytes = xen_set_nslabs(xen_io_tlb_nslabs);
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400216 order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400217 /*
218 * Get IO TLB memory from any location.
219 */
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400220 if (early)
Mike Rapoport15c3c112018-10-30 15:08:58 -0700221 xen_io_tlb_start = memblock_alloc(PAGE_ALIGN(bytes),
222 PAGE_SIZE);
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400223 else {
224#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
225#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
226 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
Stefano Stabellini87465152015-04-24 10:16:40 +0100227 xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order);
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400228 if (xen_io_tlb_start)
229 break;
230 order--;
231 }
232 if (order != get_order(bytes)) {
Joe Perches283c0972013-06-28 03:21:41 -0700233 pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
234 (PAGE_SIZE << order) >> 20);
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400235 xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
236 bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
237 }
238 }
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400239 if (!xen_io_tlb_start) {
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400240 m_ret = XEN_SWIOTLB_ENOMEM;
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400241 goto error;
242 }
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400243 xen_io_tlb_end = xen_io_tlb_start + bytes;
244 /*
245 * And replace that memory with pages under 4GB.
246 */
247 rc = xen_swiotlb_fixup(xen_io_tlb_start,
248 bytes,
249 xen_io_tlb_nslabs);
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400250 if (rc) {
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400251 if (early)
Mike Rapoport20132882018-10-30 15:09:21 -0700252 memblock_free(__pa(xen_io_tlb_start),
253 PAGE_ALIGN(bytes));
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400254 else {
255 free_pages((unsigned long)xen_io_tlb_start, order);
256 xen_io_tlb_start = NULL;
257 }
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400258 m_ret = XEN_SWIOTLB_EFIXUP;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400259 goto error;
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400260 }
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400261 start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
Konrad Rzeszutek Wilkc468bde2012-09-17 10:20:09 -0400262 if (early) {
Yinghai Luac2cbab2013-01-24 12:20:16 -0800263 if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs,
264 verbose))
265 panic("Cannot allocate SWIOTLB buffer");
Konrad Rzeszutek Wilkc468bde2012-09-17 10:20:09 -0400266 rc = 0;
267 } else
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400268 rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
Konrad Rzeszutek Wilk7453c542016-12-20 10:02:02 -0500269
270 if (!rc)
271 swiotlb_set_max_segment(PAGE_SIZE);
272
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400273 return rc;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400274error:
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400275 if (repeat--) {
276 xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
277 (xen_io_tlb_nslabs >> 1));
Joe Perches283c0972013-06-28 03:21:41 -0700278 pr_info("Lowering to %luMB\n",
279 (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400280 goto retry;
281 }
Joe Perches283c0972013-06-28 03:21:41 -0700282 pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400283 if (early)
284 panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
285 else
286 free_pages((unsigned long)xen_io_tlb_start, order);
287 return rc;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400288}
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200289
290static void *
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400291xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +0200292 dma_addr_t *dma_handle, gfp_t flags,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700293 unsigned long attrs)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400294{
295 void *ret;
296 int order = get_order(size);
297 u64 dma_mask = DMA_BIT_MASK(32);
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400298 phys_addr_t phys;
299 dma_addr_t dev_addr;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400300
301 /*
302 * Ignore region specifiers - the kernel's ideas of
303 * pseudo-phys memory layout has nothing to do with the
304 * machine physical layout. We can't allocate highmem
305 * because we can't return a pointer to it.
306 */
307 flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
308
Joe Jin7250f422018-10-16 15:21:16 -0700309 /* Convert the size to actually allocated. */
310 size = 1UL << (order + XEN_PAGE_SHIFT);
311
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000312 /* On ARM this function returns an ioremap'ped virtual address for
313 * which virt_to_phys doesn't return the corresponding physical
314 * address. In fact on ARM virt_to_phys only works for kernel direct
315 * mapped RAM memory. Also see comment below.
316 */
317 ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400318
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400319 if (!ret)
320 return ret;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400321
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400322 if (hwdev && hwdev->coherent_dma_mask)
Christoph Hellwig038d07a2018-03-19 11:38:14 +0100323 dma_mask = hwdev->coherent_dma_mask;
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400324
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000325 /* At this point dma_handle is the physical address, next we are
326 * going to set it to the machine address.
327 * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
328 * to *dma_handle. */
329 phys = *dma_handle;
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400330 dev_addr = xen_phys_to_bus(phys);
331 if (((dev_addr + size - 1 <= dma_mask)) &&
332 !range_straddles_page_boundary(phys, size))
333 *dma_handle = dev_addr;
334 else {
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000335 if (xen_create_contiguous_region(phys, order,
Stefano Stabellini69908902013-10-09 16:56:32 +0000336 fls64(dma_mask), dma_handle) != 0) {
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000337 xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400338 return NULL;
339 }
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400340 }
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400341 memset(ret, 0, size);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400342 return ret;
343}
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400344
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200345static void
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400346xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700347 dma_addr_t dev_addr, unsigned long attrs)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400348{
349 int order = get_order(size);
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400350 phys_addr_t phys;
351 u64 dma_mask = DMA_BIT_MASK(32);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400352
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400353 if (hwdev && hwdev->coherent_dma_mask)
354 dma_mask = hwdev->coherent_dma_mask;
355
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000356 /* do not use virt_to_phys because on ARM it doesn't return you the
357 * physical address */
358 phys = xen_bus_to_phys(dev_addr);
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400359
Joe Jin7250f422018-10-16 15:21:16 -0700360 /* Convert the size to actually allocated. */
361 size = 1UL << (order + XEN_PAGE_SHIFT);
362
Joe Jin4855c922018-05-17 12:33:28 -0700363 if (((dev_addr + size - 1 <= dma_mask)) ||
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400364 range_straddles_page_boundary(phys, size))
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000365 xen_destroy_contiguous_region(phys, order);
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400366
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000367 xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400368}
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400369
370/*
371 * Map a single buffer of the indicated size for DMA in streaming mode. The
372 * physical address to use is returned.
373 *
374 * Once the device is given the dma address, the device owns this memory until
375 * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
376 */
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200377static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400378 unsigned long offset, size_t size,
379 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700380 unsigned long attrs)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400381{
Alexander Duycke05ed4d2012-10-15 10:19:39 -0700382 phys_addr_t map, phys = page_to_phys(page) + offset;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400383 dma_addr_t dev_addr = xen_phys_to_bus(phys);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400384
385 BUG_ON(dir == DMA_NONE);
386 /*
387 * If the address happens to be in the device's DMA window,
388 * we can safely return the device addr and not worry about bounce
389 * buffering it.
390 */
391 if (dma_capable(dev, dev_addr, size) &&
Stefano Stabellinia4dba132014-11-21 11:07:39 +0000392 !range_straddles_page_boundary(phys, size) &&
Julien Grall291be102015-09-09 15:17:33 +0100393 !xen_arch_need_swiotlb(dev, phys, dev_addr) &&
Geert Uytterhoevenae7871b2016-12-16 14:28:41 +0100394 (swiotlb_force != SWIOTLB_FORCE)) {
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000395 /* we are not interested in the dma_addr returned by
396 * xen_dma_map_page, only in the potential cache flushes executed
397 * by the function. */
Stefano Stabellinia0f2dee2014-11-21 11:04:39 +0000398 xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400399 return dev_addr;
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000400 }
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400401
402 /*
403 * Oh well, have to allocate and map a bounce buffer.
404 */
Zoltan Kiss2b2b6142013-09-04 21:11:05 +0100405 trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
406
Alexander Duyck0443fa02016-11-02 07:13:02 -0400407 map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir,
408 attrs);
Alexander Duycke05ed4d2012-10-15 10:19:39 -0700409 if (map == SWIOTLB_MAP_ERROR)
Christoph Hellwig4d048db2017-05-21 13:23:27 +0200410 return XEN_SWIOTLB_ERROR_CODE;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400411
Stefano Stabellinif1225ee2017-01-19 10:39:09 -0800412 dev_addr = xen_phys_to_bus(map);
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000413 xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
Stefano Stabellinia0f2dee2014-11-21 11:04:39 +0000414 dev_addr, map & ~PAGE_MASK, size, dir, attrs);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400415
416 /*
417 * Ensure that the address returned is DMA'ble
418 */
Alexander Duyck76418422016-11-02 07:12:47 -0400419 if (dma_capable(dev, dev_addr, size))
420 return dev_addr;
421
Alexander Duyckd29fa0c2016-11-10 07:05:31 -0500422 attrs |= DMA_ATTR_SKIP_CPU_SYNC;
423 swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
Alexander Duyck76418422016-11-02 07:12:47 -0400424
Christoph Hellwig4d048db2017-05-21 13:23:27 +0200425 return XEN_SWIOTLB_ERROR_CODE;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400426}
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400427
428/*
429 * Unmap a single streaming mode DMA translation. The dma_addr and size must
430 * match what was provided for in a previous xen_swiotlb_map_page call. All
431 * other usages are undefined.
432 *
433 * After this call, reads by the cpu to the buffer are guaranteed to see
434 * whatever the device wrote there.
435 */
436static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000437 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700438 unsigned long attrs)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400439{
440 phys_addr_t paddr = xen_bus_to_phys(dev_addr);
441
442 BUG_ON(dir == DMA_NONE);
443
Stefano Stabellinid6883e62014-11-21 11:09:39 +0000444 xen_dma_unmap_page(hwdev, dev_addr, size, dir, attrs);
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000445
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400446 /* NOTE: We use dev_addr here, not paddr! */
447 if (is_xen_swiotlb_buffer(dev_addr)) {
Alexander Duyck0443fa02016-11-02 07:13:02 -0400448 swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400449 return;
450 }
451
452 if (dir != DMA_FROM_DEVICE)
453 return;
454
455 /*
456 * phys_to_virt doesn't work with hihgmem page but we could
457 * call dma_mark_clean() with hihgmem page here. However, we
458 * are fine since dma_mark_clean() is null on POWERPC. We can
459 * make dma_mark_clean() take a physical address if necessary.
460 */
461 dma_mark_clean(phys_to_virt(paddr), size);
462}
463
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200464static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400465 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700466 unsigned long attrs)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400467{
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000468 xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400469}
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400470
471/*
472 * Make physical memory consistent for a single streaming mode DMA translation
473 * after a transfer.
474 *
475 * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer
476 * using the cpu, yet do not wish to teardown the dma mapping, you must
477 * call this function before doing so. At the next point you give the dma
478 * address back to the card, you must first perform a
479 * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer
480 */
481static void
482xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
483 size_t size, enum dma_data_direction dir,
484 enum dma_sync_target target)
485{
486 phys_addr_t paddr = xen_bus_to_phys(dev_addr);
487
488 BUG_ON(dir == DMA_NONE);
489
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000490 if (target == SYNC_FOR_CPU)
Stefano Stabellinid6883e62014-11-21 11:09:39 +0000491 xen_dma_sync_single_for_cpu(hwdev, dev_addr, size, dir);
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000492
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400493 /* NOTE: We use dev_addr here, not paddr! */
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000494 if (is_xen_swiotlb_buffer(dev_addr))
Alexander Duyckfbfda892012-10-15 10:19:49 -0700495 swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000496
497 if (target == SYNC_FOR_DEVICE)
Stefano Stabellini9490c6c2014-11-21 16:55:12 +0000498 xen_dma_sync_single_for_device(hwdev, dev_addr, size, dir);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400499
500 if (dir != DMA_FROM_DEVICE)
501 return;
502
503 dma_mark_clean(phys_to_virt(paddr), size);
504}
505
506void
507xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
508 size_t size, enum dma_data_direction dir)
509{
510 xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
511}
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400512
513void
514xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
515 size_t size, enum dma_data_direction dir)
516{
517 xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
518}
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200519
520/*
521 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
522 * concerning calls here are the same as for swiotlb_unmap_page() above.
523 */
524static void
525xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
526 int nelems, enum dma_data_direction dir,
527 unsigned long attrs)
528{
529 struct scatterlist *sg;
530 int i;
531
532 BUG_ON(dir == DMA_NONE);
533
534 for_each_sg(sgl, sg, nelems, i)
535 xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs);
536
537}
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400538
539/*
540 * Map a set of buffers described by scatterlist in streaming mode for DMA.
541 * This is the scatter-gather version of the above xen_swiotlb_map_page
542 * interface. Here the scatter gather list elements are each tagged with the
543 * appropriate dma address and length. They are obtained via
544 * sg_dma_{address,length}(SG).
545 *
546 * NOTE: An implementation may be able to use a smaller number of
547 * DMA address/length pairs than there are SG table elements.
548 * (for example via virtual mapping capabilities)
549 * The routine returns the number of addr/length pairs actually
550 * used, at most nents.
551 *
552 * Device ownership issues as mentioned above for xen_swiotlb_map_page are the
553 * same here.
554 */
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200555static int
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400556xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
557 int nelems, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700558 unsigned long attrs)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400559{
560 struct scatterlist *sg;
561 int i;
562
563 BUG_ON(dir == DMA_NONE);
564
565 for_each_sg(sgl, sg, nelems, i) {
566 phys_addr_t paddr = sg_phys(sg);
567 dma_addr_t dev_addr = xen_phys_to_bus(paddr);
568
Geert Uytterhoevenae7871b2016-12-16 14:28:41 +0100569 if (swiotlb_force == SWIOTLB_FORCE ||
Julien Grall291be102015-09-09 15:17:33 +0100570 xen_arch_need_swiotlb(hwdev, paddr, dev_addr) ||
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400571 !dma_capable(hwdev, dev_addr, sg->length) ||
572 range_straddles_page_boundary(paddr, sg->length)) {
Alexander Duycke05ed4d2012-10-15 10:19:39 -0700573 phys_addr_t map = swiotlb_tbl_map_single(hwdev,
574 start_dma_addr,
575 sg_phys(sg),
576 sg->length,
Alexander Duyck0443fa02016-11-02 07:13:02 -0400577 dir, attrs);
Alexander Duycke05ed4d2012-10-15 10:19:39 -0700578 if (map == SWIOTLB_MAP_ERROR) {
Stefano Stabellini783d0282013-10-25 10:33:26 +0000579 dev_warn(hwdev, "swiotlb buffer is full\n");
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400580 /* Don't panic here, we expect map_sg users
581 to do proper error handling. */
Alexander Duyck0443fa02016-11-02 07:13:02 -0400582 attrs |= DMA_ATTR_SKIP_CPU_SYNC;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400583 xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
584 attrs);
Stefano Stabellini781575c2013-08-05 17:30:48 +0100585 sg_dma_len(sgl) = 0;
Stefano Stabellini15177602013-10-29 00:37:37 +0000586 return 0;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400587 }
Stefano Stabellinif1225ee2017-01-19 10:39:09 -0800588 dev_addr = xen_phys_to_bus(map);
Stefano Stabellini71bfae92013-11-15 14:22:15 +0000589 xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
Stefano Stabellinia0f2dee2014-11-21 11:04:39 +0000590 dev_addr,
Stefano Stabellini71bfae92013-11-15 14:22:15 +0000591 map & ~PAGE_MASK,
592 sg->length,
593 dir,
594 attrs);
Stefano Stabellinif1225ee2017-01-19 10:39:09 -0800595 sg->dma_address = dev_addr;
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000596 } else {
597 /* we are not interested in the dma_addr returned by
598 * xen_dma_map_page, only in the potential cache flushes executed
599 * by the function. */
600 xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT),
Stefano Stabellinia0f2dee2014-11-21 11:04:39 +0000601 dev_addr,
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000602 paddr & ~PAGE_MASK,
603 sg->length,
604 dir,
605 attrs);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400606 sg->dma_address = dev_addr;
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000607 }
Stefano Stabellini781575c2013-08-05 17:30:48 +0100608 sg_dma_len(sg) = sg->length;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400609 }
610 return nelems;
611}
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400612
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400613/*
614 * Make physical memory consistent for a set of streaming mode DMA translations
615 * after a transfer.
616 *
617 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
618 * and usage.
619 */
620static void
621xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
622 int nelems, enum dma_data_direction dir,
623 enum dma_sync_target target)
624{
625 struct scatterlist *sg;
626 int i;
627
628 for_each_sg(sgl, sg, nelems, i)
629 xen_swiotlb_sync_single(hwdev, sg->dma_address,
Stefano Stabellini781575c2013-08-05 17:30:48 +0100630 sg_dma_len(sg), dir, target);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400631}
632
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200633static void
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400634xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
635 int nelems, enum dma_data_direction dir)
636{
637 xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
638}
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400639
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200640static void
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400641xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
642 int nelems, enum dma_data_direction dir)
643{
644 xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
645}
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400646
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400647/*
648 * Return whether the given device DMA address mask can be supported
649 * properly. For example, if your device can only drive the low 24-bits
650 * during bus mastering, then you would pass 0x00ffffff as the mask to
651 * this function.
652 */
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200653static int
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400654xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
655{
656 return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
657}
Stefano Stabellinieb1ddc02013-10-09 16:56:33 +0000658
Stefano Stabellini7e91c7d2017-02-07 19:58:02 +0200659/*
660 * Create userspace mapping for the DMA-coherent memory.
661 * This function should be called with the pages from the current domain only,
662 * passing pages mapped from other domains would lead to memory corruption.
663 */
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200664static int
Stefano Stabellini7e91c7d2017-02-07 19:58:02 +0200665xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
666 void *cpu_addr, dma_addr_t dma_addr, size_t size,
667 unsigned long attrs)
668{
669#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
Stefano Stabellinid5ff5062017-04-13 14:04:22 -0700670 if (xen_get_dma_ops(dev)->mmap)
671 return xen_get_dma_ops(dev)->mmap(dev, vma, cpu_addr,
Stefano Stabellini7e91c7d2017-02-07 19:58:02 +0200672 dma_addr, size, attrs);
673#endif
Christoph Hellwig58b04402018-09-11 08:55:28 +0200674 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
Stefano Stabellini7e91c7d2017-02-07 19:58:02 +0200675}
Andrii Anisov69369f52017-02-07 19:58:03 +0200676
677/*
678 * This function should be called with the pages from the current domain only,
679 * passing pages mapped from other domains would lead to memory corruption.
680 */
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200681static int
Andrii Anisov69369f52017-02-07 19:58:03 +0200682xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
683 void *cpu_addr, dma_addr_t handle, size_t size,
684 unsigned long attrs)
685{
686#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
Stefano Stabellinid5ff5062017-04-13 14:04:22 -0700687 if (xen_get_dma_ops(dev)->get_sgtable) {
Andrii Anisov69369f52017-02-07 19:58:03 +0200688#if 0
689 /*
690 * This check verifies that the page belongs to the current domain and
691 * is not one mapped from another domain.
692 * This check is for debug only, and should not go to production build
693 */
694 unsigned long bfn = PHYS_PFN(dma_to_phys(dev, handle));
695 BUG_ON (!page_is_ram(bfn));
696#endif
Stefano Stabellinid5ff5062017-04-13 14:04:22 -0700697 return xen_get_dma_ops(dev)->get_sgtable(dev, sgt, cpu_addr,
Andrii Anisov69369f52017-02-07 19:58:03 +0200698 handle, size, attrs);
699 }
700#endif
Christoph Hellwig9406a492018-08-23 09:39:38 +0200701 return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size, attrs);
Andrii Anisov69369f52017-02-07 19:58:03 +0200702}
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200703
Christoph Hellwig4d048db2017-05-21 13:23:27 +0200704static int xen_swiotlb_mapping_error(struct device *dev, dma_addr_t dma_addr)
705{
706 return dma_addr == XEN_SWIOTLB_ERROR_CODE;
707}
708
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200709const struct dma_map_ops xen_swiotlb_dma_ops = {
710 .alloc = xen_swiotlb_alloc_coherent,
711 .free = xen_swiotlb_free_coherent,
712 .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
713 .sync_single_for_device = xen_swiotlb_sync_single_for_device,
714 .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
715 .sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
716 .map_sg = xen_swiotlb_map_sg_attrs,
717 .unmap_sg = xen_swiotlb_unmap_sg_attrs,
718 .map_page = xen_swiotlb_map_page,
719 .unmap_page = xen_swiotlb_unmap_page,
720 .dma_supported = xen_swiotlb_dma_supported,
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200721 .mmap = xen_swiotlb_dma_mmap,
722 .get_sgtable = xen_swiotlb_get_sgtable,
Christoph Hellwig4d048db2017-05-21 13:23:27 +0200723 .mapping_error = xen_swiotlb_mapping_error,
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200724};