Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
| 6 | * Copyright (C) 1994, 1995 Waldorf GmbH |
Ralf Baechle | 966f440 | 2006-03-15 11:36:31 +0000 | [diff] [blame] | 7 | * Copyright (C) 1994 - 2000, 06 Ralf Baechle |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. |
| 9 | * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. |
Ralf Baechle | 7034228 | 2013-01-22 12:59:30 +0100 | [diff] [blame] | 10 | * Author: Maciej W. Rozycki <macro@mips.com> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | */ |
| 12 | #ifndef _ASM_IO_H |
| 13 | #define _ASM_IO_H |
| 14 | |
Serge Semin | 9748e33 | 2018-07-09 16:57:12 +0300 | [diff] [blame] | 15 | #define ARCH_HAS_IOREMAP_WC |
| 16 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/compiler.h> |
| 18 | #include <linux/kernel.h> |
| 19 | #include <linux/types.h> |
Jim Quinlan | 92d1159 | 2012-09-06 11:36:55 -0400 | [diff] [blame] | 20 | #include <linux/irqflags.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | |
| 22 | #include <asm/addrspace.h> |
Maciej W. Rozycki | 4ae0452 | 2018-10-08 01:37:01 +0100 | [diff] [blame] | 23 | #include <asm/barrier.h> |
Yoichi Yuasa | 893a057 | 2012-07-18 14:12:01 -0700 | [diff] [blame] | 24 | #include <asm/bug.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include <asm/byteorder.h> |
| 26 | #include <asm/cpu.h> |
| 27 | #include <asm/cpu-features.h> |
Ralf Baechle | 140c172 | 2006-12-07 15:35:43 +0100 | [diff] [blame] | 28 | #include <asm-generic/iomap.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include <asm/page.h> |
| 30 | #include <asm/pgtable-bits.h> |
| 31 | #include <asm/processor.h> |
Ralf Baechle | fe00f94 | 2005-03-01 19:22:29 +0000 | [diff] [blame] | 32 | #include <asm/string.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | |
Maciej W. Rozycki | c3455b0 | 2005-06-30 10:48:40 +0000 | [diff] [blame] | 34 | #include <ioremap.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | #include <mangle-port.h> |
| 36 | |
| 37 | /* |
Maciej W. Rozycki | 4912ba7 | 2005-02-22 21:49:17 +0000 | [diff] [blame] | 38 | * Raw operations are never swapped in software. OTOH values that raw |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | * operations are working on may or may not have been swapped by the bus |
| 40 | * hardware. An example use would be for flash memory that's used for |
| 41 | * execute in place. |
| 42 | */ |
Ralf Baechle | 21a151d | 2007-10-11 23:46:15 +0100 | [diff] [blame] | 43 | # define __raw_ioswabb(a, x) (x) |
| 44 | # define __raw_ioswabw(a, x) (x) |
| 45 | # define __raw_ioswabl(a, x) (x) |
| 46 | # define __raw_ioswabq(a, x) (x) |
| 47 | # define ____raw_ioswabq(a, x) (x) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | |
Atsushi Nemoto | a843313 | 2006-02-17 01:36:24 +0900 | [diff] [blame] | 49 | /* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | #define IO_SPACE_LIMIT 0xffff |
| 52 | |
| 53 | /* |
| 54 | * On MIPS I/O ports are memory mapped, so we access them using normal |
| 55 | * load/store instructions. mips_io_port_base is the virtual address to |
| 56 | * which all ports are being mapped. For sake of efficiency some code |
| 57 | * assumes that this is an address that can be loaded with a single lui |
| 58 | * instruction, so the lower 16 bits must be zero. Should be true on |
| 59 | * on any sane architecture; generic code does not use this assumption. |
| 60 | */ |
| 61 | extern const unsigned long mips_io_port_base; |
| 62 | |
Ralf Baechle | 966f440 | 2006-03-15 11:36:31 +0000 | [diff] [blame] | 63 | /* |
| 64 | * Gcc will generate code to load the value of mips_io_port_base after each |
| 65 | * function call which may be fairly wasteful in some cases. So we don't |
| 66 | * play quite by the book. We tell gcc mips_io_port_base is a long variable |
| 67 | * which solves the code generation issue. Now we need to violate the |
| 68 | * aliasing rules a little to make initialization possible and finally we |
| 69 | * will need the barrier() to fight side effects of the aliasing chat. |
| 70 | * This trickery will eventually collapse under gcc's optimizer. Oh well. |
| 71 | */ |
| 72 | static inline void set_io_port_base(unsigned long base) |
| 73 | { |
| 74 | * (unsigned long *) &mips_io_port_base = base; |
| 75 | barrier(); |
| 76 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | |
| 78 | /* |
Paul Burton | b962aeb | 2018-08-29 14:54:00 -0700 | [diff] [blame] | 79 | * Provide the necessary definitions for generic iomap. We make use of |
| 80 | * mips_io_port_base for iomap(), but we don't reserve any low addresses for |
| 81 | * use with I/O ports. |
| 82 | */ |
Maciej W. Rozycki | 4ae0452 | 2018-10-08 01:37:01 +0100 | [diff] [blame] | 83 | |
Paul Burton | b962aeb | 2018-08-29 14:54:00 -0700 | [diff] [blame] | 84 | #define HAVE_ARCH_PIO_SIZE |
| 85 | #define PIO_OFFSET mips_io_port_base |
| 86 | #define PIO_MASK IO_SPACE_LIMIT |
| 87 | #define PIO_RESERVED 0x0UL |
| 88 | |
| 89 | /* |
Maciej W. Rozycki | 4ae0452 | 2018-10-08 01:37:01 +0100 | [diff] [blame] | 90 | * Enforce in-order execution of data I/O. In the MIPS architecture |
| 91 | * these are equivalent to corresponding platform-specific memory |
| 92 | * barriers defined in <asm/barrier.h>. API pinched from PowerPC, |
| 93 | * with sync additionally defined. |
| 94 | */ |
| 95 | #define iobarrier_rw() mb() |
| 96 | #define iobarrier_r() rmb() |
| 97 | #define iobarrier_w() wmb() |
| 98 | #define iobarrier_sync() iob() |
| 99 | |
Maciej W. Rozycki | a711d43 | 2018-10-08 01:37:07 +0100 | [diff] [blame] | 100 | /* Some callers use this older API instead. */ |
| 101 | #define mmiowb() iobarrier_w() |
| 102 | |
Maciej W. Rozycki | 4ae0452 | 2018-10-08 01:37:01 +0100 | [diff] [blame] | 103 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | * virt_to_phys - map virtual addresses to physical |
| 105 | * @address: address to remap |
| 106 | * |
| 107 | * The returned physical address is the physical (CPU) mapping for |
| 108 | * the memory address given. It is only valid to use this function on |
| 109 | * addresses directly mapped or allocated via kmalloc. |
| 110 | * |
| 111 | * This function does not give bus mappings for DMA transfers. In |
| 112 | * almost all conceivable cases a device driver should not be using |
| 113 | * this function |
| 114 | */ |
Franck Bui-Huu | 99e3b94 | 2006-10-19 13:19:59 +0200 | [diff] [blame] | 115 | static inline unsigned long virt_to_phys(volatile const void *address) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | { |
David Daney | 49c426b | 2013-05-07 17:11:16 +0000 | [diff] [blame] | 117 | return __pa(address); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | } |
| 119 | |
| 120 | /* |
| 121 | * phys_to_virt - map physical address to virtual |
| 122 | * @address: address to remap |
| 123 | * |
| 124 | * The returned virtual address is a current CPU mapping for |
| 125 | * the memory address given. It is only valid to use this function on |
| 126 | * addresses that have a kernel mapping |
| 127 | * |
| 128 | * This function does not handle bus mappings for DMA transfers. In |
| 129 | * almost all conceivable cases a device driver should not be using |
| 130 | * this function |
| 131 | */ |
| 132 | static inline void * phys_to_virt(unsigned long address) |
| 133 | { |
Franck Bui-Huu | 6f284a2 | 2007-01-10 09:44:05 +0100 | [diff] [blame] | 134 | return (void *)(address + PAGE_OFFSET - PHYS_OFFSET); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | } |
| 136 | |
| 137 | /* |
| 138 | * ISA I/O bus memory addresses are 1:1 with the physical address. |
| 139 | */ |
Paul Burton | 0494d7f | 2018-07-27 18:23:19 -0700 | [diff] [blame] | 140 | static inline unsigned long isa_virt_to_bus(volatile void *address) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | { |
Paul Burton | 0494d7f | 2018-07-27 18:23:19 -0700 | [diff] [blame] | 142 | return virt_to_phys(address); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | } |
| 144 | |
Paul Burton | 0494d7f | 2018-07-27 18:23:19 -0700 | [diff] [blame] | 145 | static inline void *isa_bus_to_virt(unsigned long address) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | { |
Paul Burton | 0494d7f | 2018-07-27 18:23:19 -0700 | [diff] [blame] | 147 | return phys_to_virt(address); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | } |
| 149 | |
| 150 | #define isa_page_to_bus page_to_phys |
| 151 | |
| 152 | /* |
| 153 | * However PCI ones are not necessarily 1:1 and therefore these interfaces |
| 154 | * are forbidden in portable PCI drivers. |
| 155 | * |
| 156 | * Allow them for x86 for legacy drivers, though. |
| 157 | */ |
| 158 | #define virt_to_bus virt_to_phys |
| 159 | #define bus_to_virt phys_to_virt |
| 160 | |
| 161 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | * Change "struct page" to physical address. |
| 163 | */ |
| 164 | #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) |
| 165 | |
Ralf Baechle | 15d45cc | 2014-11-22 00:22:09 +0100 | [diff] [blame] | 166 | extern void __iomem * __ioremap(phys_addr_t offset, phys_addr_t size, unsigned long flags); |
Ralf Baechle | d89e36d | 2006-10-19 14:21:47 +0100 | [diff] [blame] | 167 | extern void __iounmap(const volatile void __iomem *addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | |
Ralf Baechle | 15d45cc | 2014-11-22 00:22:09 +0100 | [diff] [blame] | 169 | static inline void __iomem * __ioremap_mode(phys_addr_t offset, unsigned long size, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | unsigned long flags) |
| 171 | { |
Atsushi Nemoto | 5ddcb3c | 2007-06-26 01:14:01 +0900 | [diff] [blame] | 172 | void __iomem *addr = plat_ioremap(offset, size, flags); |
| 173 | |
| 174 | if (addr) |
| 175 | return addr; |
| 176 | |
Ralf Baechle | 15d45cc | 2014-11-22 00:22:09 +0100 | [diff] [blame] | 177 | #define __IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL)) |
Maciej W. Rozycki | c3455b0 | 2005-06-30 10:48:40 +0000 | [diff] [blame] | 178 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | if (cpu_has_64bit_addresses) { |
| 180 | u64 base = UNCAC_BASE; |
| 181 | |
| 182 | /* |
| 183 | * R10000 supports a 2 bit uncached attribute therefore |
| 184 | * UNCAC_BASE may not equal IO_BASE. |
| 185 | */ |
| 186 | if (flags == _CACHE_UNCACHED) |
| 187 | base = (u64) IO_BASE; |
Ralf Baechle | fe00f94 | 2005-03-01 19:22:29 +0000 | [diff] [blame] | 188 | return (void __iomem *) (unsigned long) (base + offset); |
Maciej W. Rozycki | c3455b0 | 2005-06-30 10:48:40 +0000 | [diff] [blame] | 189 | } else if (__builtin_constant_p(offset) && |
| 190 | __builtin_constant_p(size) && __builtin_constant_p(flags)) { |
Ralf Baechle | 15d45cc | 2014-11-22 00:22:09 +0100 | [diff] [blame] | 191 | phys_addr_t phys_addr, last_addr; |
Maciej W. Rozycki | c3455b0 | 2005-06-30 10:48:40 +0000 | [diff] [blame] | 192 | |
| 193 | phys_addr = fixup_bigphys_addr(offset, size); |
| 194 | |
| 195 | /* Don't allow wraparound or zero size. */ |
| 196 | last_addr = phys_addr + size - 1; |
| 197 | if (!size || last_addr < phys_addr) |
| 198 | return NULL; |
| 199 | |
| 200 | /* |
| 201 | * Map uncached objects in the low 512MB of address |
| 202 | * space using KSEG1. |
| 203 | */ |
| 204 | if (__IS_LOW512(phys_addr) && __IS_LOW512(last_addr) && |
| 205 | flags == _CACHE_UNCACHED) |
Atsushi Nemoto | c0cf500 | 2007-07-11 23:12:00 +0900 | [diff] [blame] | 206 | return (void __iomem *) |
| 207 | (unsigned long)CKSEG1ADDR(phys_addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | } |
| 209 | |
| 210 | return __ioremap(offset, size, flags); |
Maciej W. Rozycki | c3455b0 | 2005-06-30 10:48:40 +0000 | [diff] [blame] | 211 | |
| 212 | #undef __IS_LOW512 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | } |
| 214 | |
| 215 | /* |
| 216 | * ioremap - map bus memory into CPU space |
| 217 | * @offset: bus address of the memory |
| 218 | * @size: size of the resource to map |
| 219 | * |
| 220 | * ioremap performs a platform specific sequence of operations to |
| 221 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ |
| 222 | * writew/writel functions and the other mmio helpers. The returned |
| 223 | * address is not guaranteed to be usable directly as a virtual |
| 224 | * address. |
| 225 | */ |
| 226 | #define ioremap(offset, size) \ |
| 227 | __ioremap_mode((offset), (size), _CACHE_UNCACHED) |
| 228 | |
| 229 | /* |
| 230 | * ioremap_nocache - map bus memory into CPU space |
| 231 | * @offset: bus address of the memory |
| 232 | * @size: size of the resource to map |
| 233 | * |
| 234 | * ioremap_nocache performs a platform specific sequence of operations to |
| 235 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ |
| 236 | * writew/writel functions and the other mmio helpers. The returned |
| 237 | * address is not guaranteed to be usable directly as a virtual |
| 238 | * address. |
| 239 | * |
| 240 | * This version of ioremap ensures that the memory is marked uncachable |
| 241 | * on the CPU as well as honouring existing caching rules from things like |
| 242 | * the PCI bus. Note that there are other caches and buffers on many |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 243 | * busses. In particular driver authors should read up on PCI writes |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | * |
| 245 | * It's useful if some control registers are in such an area and |
| 246 | * write combining or read caching is not desirable: |
| 247 | */ |
| 248 | #define ioremap_nocache(offset, size) \ |
| 249 | __ioremap_mode((offset), (size), _CACHE_UNCACHED) |
Ben Hutchings | da11f98 | 2015-10-06 00:56:56 +0100 | [diff] [blame] | 250 | #define ioremap_uc ioremap_nocache |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 251 | |
| 252 | /* |
Ralf Baechle | 7034228 | 2013-01-22 12:59:30 +0100 | [diff] [blame] | 253 | * ioremap_cachable - map bus memory into CPU space |
| 254 | * @offset: bus address of the memory |
| 255 | * @size: size of the resource to map |
Ralf Baechle | 778e2ac | 2006-02-28 17:04:20 +0000 | [diff] [blame] | 256 | * |
| 257 | * ioremap_nocache performs a platform specific sequence of operations to |
| 258 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ |
| 259 | * writew/writel functions and the other mmio helpers. The returned |
| 260 | * address is not guaranteed to be usable directly as a virtual |
| 261 | * address. |
| 262 | * |
| 263 | * This version of ioremap ensures that the memory is marked cachable by |
Ralf Baechle | 7034228 | 2013-01-22 12:59:30 +0100 | [diff] [blame] | 264 | * the CPU. Also enables full write-combining. Useful for some |
Ralf Baechle | 778e2ac | 2006-02-28 17:04:20 +0000 | [diff] [blame] | 265 | * memory-like regions on I/O busses. |
| 266 | */ |
| 267 | #define ioremap_cachable(offset, size) \ |
Chris Dearman | 3513369 | 2007-09-19 00:58:24 +0100 | [diff] [blame] | 268 | __ioremap_mode((offset), (size), _page_cachable_default) |
Maciej W. Rozycki | a68f376 | 2016-01-09 02:05:31 +0000 | [diff] [blame] | 269 | #define ioremap_cache ioremap_cachable |
Ralf Baechle | 778e2ac | 2006-02-28 17:04:20 +0000 | [diff] [blame] | 270 | |
| 271 | /* |
Serge Semin | 9748e33 | 2018-07-09 16:57:12 +0300 | [diff] [blame] | 272 | * ioremap_wc - map bus memory into CPU space |
| 273 | * @offset: bus address of the memory |
| 274 | * @size: size of the resource to map |
| 275 | * |
| 276 | * ioremap_wc performs a platform specific sequence of operations to |
| 277 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ |
| 278 | * writew/writel functions and the other mmio helpers. The returned |
| 279 | * address is not guaranteed to be usable directly as a virtual |
| 280 | * address. |
| 281 | * |
| 282 | * This version of ioremap ensures that the memory is marked uncachable |
| 283 | * but accelerated by means of write-combining feature. It is specifically |
| 284 | * useful for PCIe prefetchable windows, which may vastly improve a |
| 285 | * communications performance. If it was determined on boot stage, what |
| 286 | * CPU CCA doesn't support UCA, the method shall fall-back to the |
| 287 | * _CACHE_UNCACHED option (see cpu_probe() method). |
| 288 | */ |
| 289 | #define ioremap_wc(offset, size) \ |
| 290 | __ioremap_mode((offset), (size), boot_cpu_data.writecombine) |
| 291 | |
Ralf Baechle | d89e36d | 2006-10-19 14:21:47 +0100 | [diff] [blame] | 292 | static inline void iounmap(const volatile void __iomem *addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | { |
Atsushi Nemoto | 5ddcb3c | 2007-06-26 01:14:01 +0900 | [diff] [blame] | 294 | if (plat_iounmap(addr)) |
| 295 | return; |
| 296 | |
Maciej W. Rozycki | c3455b0 | 2005-06-30 10:48:40 +0000 | [diff] [blame] | 297 | #define __IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1) |
| 298 | |
| 299 | if (cpu_has_64bit_addresses || |
| 300 | (__builtin_constant_p(addr) && __IS_KSEG1(addr))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 301 | return; |
| 302 | |
| 303 | __iounmap(addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 304 | |
Maciej W. Rozycki | c3455b0 | 2005-06-30 10:48:40 +0000 | [diff] [blame] | 305 | #undef __IS_KSEG1 |
| 306 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 307 | |
Huacai Chen | c824ad1 | 2018-09-05 17:33:01 +0800 | [diff] [blame] | 308 | #if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_CPU_LOONGSON3) |
Huacai Chen | 1e820da3 | 2016-03-03 09:45:13 +0800 | [diff] [blame] | 309 | #define war_io_reorder_wmb() wmb() |
David Daney | 8faca49 | 2008-12-11 15:33:29 -0800 | [diff] [blame] | 310 | #else |
Sinan Kaya | f6b7aee | 2018-04-03 08:55:03 -0400 | [diff] [blame] | 311 | #define war_io_reorder_wmb() barrier() |
David Daney | 8faca49 | 2008-12-11 15:33:29 -0800 | [diff] [blame] | 312 | #endif |
| 313 | |
Maciej W. Rozycki | 3d474da | 2018-10-08 01:37:16 +0100 | [diff] [blame^] | 314 | #define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, barrier, irq) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 | \ |
| 316 | static inline void pfx##write##bwlq(type val, \ |
| 317 | volatile void __iomem *mem) \ |
| 318 | { \ |
| 319 | volatile type *__mem; \ |
| 320 | type __val; \ |
| 321 | \ |
Maciej W. Rozycki | 3d474da | 2018-10-08 01:37:16 +0100 | [diff] [blame^] | 322 | if (barrier) \ |
| 323 | iobarrier_rw(); \ |
| 324 | else \ |
| 325 | war_io_reorder_wmb(); \ |
David Daney | 8faca49 | 2008-12-11 15:33:29 -0800 | [diff] [blame] | 326 | \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 | __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ |
| 328 | \ |
Atsushi Nemoto | a843313 | 2006-02-17 01:36:24 +0900 | [diff] [blame] | 329 | __val = pfx##ioswab##bwlq(__mem, val); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 | \ |
Ralf Baechle | 7034228 | 2013-01-22 12:59:30 +0100 | [diff] [blame] | 331 | if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | *__mem = __val; \ |
| 333 | else if (cpu_has_64bits) { \ |
| 334 | unsigned long __flags; \ |
| 335 | type __tmp; \ |
| 336 | \ |
| 337 | if (irq) \ |
| 338 | local_irq_save(__flags); \ |
| 339 | __asm__ __volatile__( \ |
Ralf Baechle | a809d46 | 2014-03-30 13:20:10 +0200 | [diff] [blame] | 340 | ".set arch=r4000" "\t\t# __writeq""\n\t" \ |
Ralf Baechle | 7034228 | 2013-01-22 12:59:30 +0100 | [diff] [blame] | 341 | "dsll32 %L0, %L0, 0" "\n\t" \ |
| 342 | "dsrl32 %L0, %L0, 0" "\n\t" \ |
| 343 | "dsll32 %M0, %M0, 0" "\n\t" \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 344 | "or %L0, %L0, %M0" "\n\t" \ |
| 345 | "sd %L0, %2" "\n\t" \ |
| 346 | ".set mips0" "\n" \ |
| 347 | : "=r" (__tmp) \ |
Ralf Baechle | b77bb37 | 2011-06-30 14:43:14 +0100 | [diff] [blame] | 348 | : "0" (__val), "m" (*__mem)); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 | if (irq) \ |
| 350 | local_irq_restore(__flags); \ |
| 351 | } else \ |
| 352 | BUG(); \ |
| 353 | } \ |
| 354 | \ |
Atsushi Nemoto | b887d3f | 2006-02-09 00:57:44 +0900 | [diff] [blame] | 355 | static inline type pfx##read##bwlq(const volatile void __iomem *mem) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 356 | { \ |
| 357 | volatile type *__mem; \ |
| 358 | type __val; \ |
| 359 | \ |
| 360 | __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ |
| 361 | \ |
Maciej W. Rozycki | 3d474da | 2018-10-08 01:37:16 +0100 | [diff] [blame^] | 362 | if (barrier) \ |
| 363 | iobarrier_rw(); \ |
| 364 | \ |
Ralf Baechle | 7034228 | 2013-01-22 12:59:30 +0100 | [diff] [blame] | 365 | if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 | __val = *__mem; \ |
| 367 | else if (cpu_has_64bits) { \ |
| 368 | unsigned long __flags; \ |
| 369 | \ |
Thiemo Seufer | 049b13c | 2005-02-21 11:44:31 +0000 | [diff] [blame] | 370 | if (irq) \ |
| 371 | local_irq_save(__flags); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 372 | __asm__ __volatile__( \ |
Ralf Baechle | a809d46 | 2014-03-30 13:20:10 +0200 | [diff] [blame] | 373 | ".set arch=r4000" "\t\t# __readq" "\n\t" \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 374 | "ld %L0, %1" "\n\t" \ |
Ralf Baechle | 7034228 | 2013-01-22 12:59:30 +0100 | [diff] [blame] | 375 | "dsra32 %M0, %L0, 0" "\n\t" \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 376 | "sll %L0, %L0, 0" "\n\t" \ |
| 377 | ".set mips0" "\n" \ |
| 378 | : "=r" (__val) \ |
Ralf Baechle | b77bb37 | 2011-06-30 14:43:14 +0100 | [diff] [blame] | 379 | : "m" (*__mem)); \ |
Thiemo Seufer | 049b13c | 2005-02-21 11:44:31 +0000 | [diff] [blame] | 380 | if (irq) \ |
| 381 | local_irq_restore(__flags); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 382 | } else { \ |
| 383 | __val = 0; \ |
| 384 | BUG(); \ |
| 385 | } \ |
| 386 | \ |
Sinan Kaya | a1cc703 | 2018-04-12 22:30:44 -0400 | [diff] [blame] | 387 | /* prevent prefetching of coherent DMA data prematurely */ \ |
| 388 | rmb(); \ |
Atsushi Nemoto | a843313 | 2006-02-17 01:36:24 +0900 | [diff] [blame] | 389 | return pfx##ioswab##bwlq(__mem, __val); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | } |
| 391 | |
Maciej W. Rozycki | 3d474da | 2018-10-08 01:37:16 +0100 | [diff] [blame^] | 392 | #define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, barrier, p) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 393 | \ |
| 394 | static inline void pfx##out##bwlq##p(type val, unsigned long port) \ |
| 395 | { \ |
| 396 | volatile type *__addr; \ |
| 397 | type __val; \ |
| 398 | \ |
Maciej W. Rozycki | 3d474da | 2018-10-08 01:37:16 +0100 | [diff] [blame^] | 399 | if (barrier) \ |
| 400 | iobarrier_rw(); \ |
| 401 | else \ |
| 402 | war_io_reorder_wmb(); \ |
David Daney | 8faca49 | 2008-12-11 15:33:29 -0800 | [diff] [blame] | 403 | \ |
Atsushi Nemoto | a843313 | 2006-02-17 01:36:24 +0900 | [diff] [blame] | 404 | __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 405 | \ |
Atsushi Nemoto | a843313 | 2006-02-17 01:36:24 +0900 | [diff] [blame] | 406 | __val = pfx##ioswab##bwlq(__addr, val); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 407 | \ |
Ralf Baechle | 9d58f30 | 2005-09-23 20:02:38 +0000 | [diff] [blame] | 408 | /* Really, we want this to be atomic */ \ |
| 409 | BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ |
| 410 | \ |
| 411 | *__addr = __val; \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 412 | } \ |
| 413 | \ |
| 414 | static inline type pfx##in##bwlq##p(unsigned long port) \ |
| 415 | { \ |
| 416 | volatile type *__addr; \ |
| 417 | type __val; \ |
| 418 | \ |
Atsushi Nemoto | a843313 | 2006-02-17 01:36:24 +0900 | [diff] [blame] | 419 | __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 420 | \ |
Ralf Baechle | 9d58f30 | 2005-09-23 20:02:38 +0000 | [diff] [blame] | 421 | BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ |
| 422 | \ |
Maciej W. Rozycki | 3d474da | 2018-10-08 01:37:16 +0100 | [diff] [blame^] | 423 | if (barrier) \ |
| 424 | iobarrier_rw(); \ |
| 425 | \ |
Ralf Baechle | 9d58f30 | 2005-09-23 20:02:38 +0000 | [diff] [blame] | 426 | __val = *__addr; \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 427 | \ |
Huacai Chen | 18f3e95 | 2018-06-12 17:54:42 +0800 | [diff] [blame] | 428 | /* prevent prefetching of coherent DMA data prematurely */ \ |
| 429 | rmb(); \ |
Atsushi Nemoto | a843313 | 2006-02-17 01:36:24 +0900 | [diff] [blame] | 430 | return pfx##ioswab##bwlq(__addr, __val); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 431 | } |
| 432 | |
| 433 | #define __BUILD_MEMORY_PFX(bus, bwlq, type) \ |
| 434 | \ |
Maciej W. Rozycki | 3d474da | 2018-10-08 01:37:16 +0100 | [diff] [blame^] | 435 | __BUILD_MEMORY_SINGLE(bus, bwlq, type, 1, 1) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 436 | |
Ralf Baechle | 9d58f30 | 2005-09-23 20:02:38 +0000 | [diff] [blame] | 437 | #define BUILDIO_MEM(bwlq, type) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 438 | \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 439 | __BUILD_MEMORY_PFX(__raw_, bwlq, type) \ |
Maciej W. Rozycki | 4912ba7 | 2005-02-22 21:49:17 +0000 | [diff] [blame] | 440 | __BUILD_MEMORY_PFX(, bwlq, type) \ |
Al Viro | 290f10a | 2005-12-07 23:12:54 -0500 | [diff] [blame] | 441 | __BUILD_MEMORY_PFX(__mem_, bwlq, type) \ |
Ralf Baechle | 9d58f30 | 2005-09-23 20:02:38 +0000 | [diff] [blame] | 442 | |
| 443 | BUILDIO_MEM(b, u8) |
| 444 | BUILDIO_MEM(w, u16) |
| 445 | BUILDIO_MEM(l, u32) |
| 446 | BUILDIO_MEM(q, u64) |
| 447 | |
| 448 | #define __BUILD_IOPORT_PFX(bus, bwlq, type) \ |
Maciej W. Rozycki | 3d474da | 2018-10-08 01:37:16 +0100 | [diff] [blame^] | 449 | __BUILD_IOPORT_SINGLE(bus, bwlq, type, 1,) \ |
| 450 | __BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, _p) |
Ralf Baechle | 9d58f30 | 2005-09-23 20:02:38 +0000 | [diff] [blame] | 451 | |
| 452 | #define BUILDIO_IOPORT(bwlq, type) \ |
| 453 | __BUILD_IOPORT_PFX(, bwlq, type) \ |
Al Viro | 290f10a | 2005-12-07 23:12:54 -0500 | [diff] [blame] | 454 | __BUILD_IOPORT_PFX(__mem_, bwlq, type) |
Ralf Baechle | 9d58f30 | 2005-09-23 20:02:38 +0000 | [diff] [blame] | 455 | |
| 456 | BUILDIO_IOPORT(b, u8) |
| 457 | BUILDIO_IOPORT(w, u16) |
| 458 | BUILDIO_IOPORT(l, u32) |
| 459 | #ifdef CONFIG_64BIT |
| 460 | BUILDIO_IOPORT(q, u64) |
| 461 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 462 | |
| 463 | #define __BUILDIO(bwlq, type) \ |
| 464 | \ |
Maciej W. Rozycki | 3d474da | 2018-10-08 01:37:16 +0100 | [diff] [blame^] | 465 | __BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 1, 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 466 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 467 | __BUILDIO(q, u64) |
| 468 | |
| 469 | #define readb_relaxed readb |
| 470 | #define readw_relaxed readw |
| 471 | #define readl_relaxed readl |
| 472 | #define readq_relaxed readq |
| 473 | |
Florian Fainelli | edd4201 | 2013-05-31 13:07:44 +0000 | [diff] [blame] | 474 | #define writeb_relaxed writeb |
| 475 | #define writew_relaxed writew |
| 476 | #define writel_relaxed writel |
| 477 | #define writeq_relaxed writeq |
| 478 | |
Florian Fainelli | f868ba2 | 2009-12-16 11:29:06 +0100 | [diff] [blame] | 479 | #define readb_be(addr) \ |
| 480 | __raw_readb((__force unsigned *)(addr)) |
| 481 | #define readw_be(addr) \ |
| 482 | be16_to_cpu(__raw_readw((__force unsigned *)(addr))) |
| 483 | #define readl_be(addr) \ |
| 484 | be32_to_cpu(__raw_readl((__force unsigned *)(addr))) |
| 485 | #define readq_be(addr) \ |
| 486 | be64_to_cpu(__raw_readq((__force unsigned *)(addr))) |
| 487 | |
| 488 | #define writeb_be(val, addr) \ |
| 489 | __raw_writeb((val), (__force unsigned *)(addr)) |
| 490 | #define writew_be(val, addr) \ |
| 491 | __raw_writew(cpu_to_be16((val)), (__force unsigned *)(addr)) |
| 492 | #define writel_be(val, addr) \ |
| 493 | __raw_writel(cpu_to_be32((val)), (__force unsigned *)(addr)) |
| 494 | #define writeq_be(val, addr) \ |
| 495 | __raw_writeq(cpu_to_be64((val)), (__force unsigned *)(addr)) |
| 496 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 497 | /* |
| 498 | * Some code tests for these symbols |
| 499 | */ |
| 500 | #define readq readq |
| 501 | #define writeq writeq |
| 502 | |
| 503 | #define __BUILD_MEMORY_STRING(bwlq, type) \ |
| 504 | \ |
Arnaud Giersch | 99289a4 | 2005-11-13 00:38:18 +0100 | [diff] [blame] | 505 | static inline void writes##bwlq(volatile void __iomem *mem, \ |
| 506 | const void *addr, unsigned int count) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 507 | { \ |
Arnaud Giersch | 99289a4 | 2005-11-13 00:38:18 +0100 | [diff] [blame] | 508 | const volatile type *__addr = addr; \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 509 | \ |
| 510 | while (count--) { \ |
Al Viro | 290f10a | 2005-12-07 23:12:54 -0500 | [diff] [blame] | 511 | __mem_write##bwlq(*__addr, mem); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 512 | __addr++; \ |
| 513 | } \ |
| 514 | } \ |
| 515 | \ |
| 516 | static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \ |
| 517 | unsigned int count) \ |
| 518 | { \ |
| 519 | volatile type *__addr = addr; \ |
| 520 | \ |
| 521 | while (count--) { \ |
Al Viro | 290f10a | 2005-12-07 23:12:54 -0500 | [diff] [blame] | 522 | *__addr = __mem_read##bwlq(mem); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 523 | __addr++; \ |
| 524 | } \ |
| 525 | } |
| 526 | |
| 527 | #define __BUILD_IOPORT_STRING(bwlq, type) \ |
| 528 | \ |
Ralf Baechle | ecba36d | 2005-04-18 14:54:43 +0000 | [diff] [blame] | 529 | static inline void outs##bwlq(unsigned long port, const void *addr, \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 530 | unsigned int count) \ |
| 531 | { \ |
Ralf Baechle | ecba36d | 2005-04-18 14:54:43 +0000 | [diff] [blame] | 532 | const volatile type *__addr = addr; \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 533 | \ |
| 534 | while (count--) { \ |
Al Viro | 290f10a | 2005-12-07 23:12:54 -0500 | [diff] [blame] | 535 | __mem_out##bwlq(*__addr, port); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 536 | __addr++; \ |
| 537 | } \ |
| 538 | } \ |
| 539 | \ |
| 540 | static inline void ins##bwlq(unsigned long port, void *addr, \ |
| 541 | unsigned int count) \ |
| 542 | { \ |
| 543 | volatile type *__addr = addr; \ |
| 544 | \ |
| 545 | while (count--) { \ |
Al Viro | 290f10a | 2005-12-07 23:12:54 -0500 | [diff] [blame] | 546 | *__addr = __mem_in##bwlq(port); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 547 | __addr++; \ |
| 548 | } \ |
| 549 | } |
| 550 | |
| 551 | #define BUILDSTRING(bwlq, type) \ |
| 552 | \ |
| 553 | __BUILD_MEMORY_STRING(bwlq, type) \ |
| 554 | __BUILD_IOPORT_STRING(bwlq, type) |
| 555 | |
| 556 | BUILDSTRING(b, u8) |
| 557 | BUILDSTRING(w, u16) |
| 558 | BUILDSTRING(l, u32) |
Ralf Baechle | 9d58f30 | 2005-09-23 20:02:38 +0000 | [diff] [blame] | 559 | #ifdef CONFIG_64BIT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 560 | BUILDSTRING(q, u64) |
Ralf Baechle | 9d58f30 | 2005-09-23 20:02:38 +0000 | [diff] [blame] | 561 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 562 | |
Ralf Baechle | fe00f94 | 2005-03-01 19:22:29 +0000 | [diff] [blame] | 563 | static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count) |
| 564 | { |
| 565 | memset((void __force *) addr, val, count); |
| 566 | } |
| 567 | static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count) |
| 568 | { |
| 569 | memcpy(dst, (void __force *) src, count); |
| 570 | } |
| 571 | static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count) |
| 572 | { |
| 573 | memcpy((void __force *) dst, src, count); |
| 574 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 575 | |
| 576 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 577 | * The caches on some architectures aren't dma-coherent and have need to |
| 578 | * handle this in software. There are three types of operations that |
| 579 | * can be applied to dma buffers. |
| 580 | * |
| 581 | * - dma_cache_wback_inv(start, size) makes caches and coherent by |
| 582 | * writing the content of the caches back to memory, if necessary. |
| 583 | * The function also invalidates the affected part of the caches as |
| 584 | * necessary before DMA transfers from outside to memory. |
| 585 | * - dma_cache_wback(start, size) makes caches and coherent by |
| 586 | * writing the content of the caches back to memory, if necessary. |
| 587 | * The function also invalidates the affected part of the caches as |
| 588 | * necessary before DMA transfers from outside to memory. |
| 589 | * - dma_cache_inv(start, size) invalidates the affected parts of the |
| 590 | * caches. Dirty lines of the caches may be written back or simply |
| 591 | * be discarded. This operation is necessary before dma operations |
| 592 | * to the memory. |
Ralf Baechle | 622a9ed | 2007-10-16 23:29:42 -0700 | [diff] [blame] | 593 | * |
| 594 | * This API used to be exported; it now is for arch code internal use only. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 595 | */ |
Christoph Hellwig | 972dc3b | 2018-06-15 13:08:31 +0200 | [diff] [blame] | 596 | #ifdef CONFIG_DMA_NONCOHERENT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 597 | |
| 598 | extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size); |
| 599 | extern void (*_dma_cache_wback)(unsigned long start, unsigned long size); |
| 600 | extern void (*_dma_cache_inv)(unsigned long start, unsigned long size); |
| 601 | |
Ralf Baechle | 21a151d | 2007-10-11 23:46:15 +0100 | [diff] [blame] | 602 | #define dma_cache_wback_inv(start, size) _dma_cache_wback_inv(start, size) |
| 603 | #define dma_cache_wback(start, size) _dma_cache_wback(start, size) |
| 604 | #define dma_cache_inv(start, size) _dma_cache_inv(start, size) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 605 | |
| 606 | #else /* Sane hardware */ |
| 607 | |
Ralf Baechle | 7034228 | 2013-01-22 12:59:30 +0100 | [diff] [blame] | 608 | #define dma_cache_wback_inv(start,size) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 609 | do { (void) (start); (void) (size); } while (0) |
| 610 | #define dma_cache_wback(start,size) \ |
| 611 | do { (void) (start); (void) (size); } while (0) |
| 612 | #define dma_cache_inv(start,size) \ |
| 613 | do { (void) (start); (void) (size); } while (0) |
| 614 | |
Christoph Hellwig | 972dc3b | 2018-06-15 13:08:31 +0200 | [diff] [blame] | 615 | #endif /* CONFIG_DMA_NONCOHERENT */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 616 | |
| 617 | /* |
| 618 | * Read a 32-bit register that requires a 64-bit read cycle on the bus. |
| 619 | * Avoid interrupt mucking, just adjust the address for 4-byte access. |
| 620 | * Assume the addresses are 8-byte aligned. |
| 621 | */ |
| 622 | #ifdef __MIPSEB__ |
| 623 | #define __CSR_32_ADJUST 4 |
| 624 | #else |
| 625 | #define __CSR_32_ADJUST 0 |
| 626 | #endif |
| 627 | |
Ralf Baechle | 21a151d | 2007-10-11 23:46:15 +0100 | [diff] [blame] | 628 | #define csr_out32(v, a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 629 | #define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST)) |
| 630 | |
| 631 | /* |
| 632 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem |
| 633 | * access |
| 634 | */ |
| 635 | #define xlate_dev_mem_ptr(p) __va(p) |
| 636 | |
| 637 | /* |
| 638 | * Convert a virtual cached pointer to an uncached pointer |
| 639 | */ |
| 640 | #define xlate_dev_kmem_ptr(p) p |
| 641 | |
Paul Burton | d8c825e | 2017-08-12 21:36:15 -0700 | [diff] [blame] | 642 | void __ioread64_copy(void *to, const void __iomem *from, size_t count); |
| 643 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 644 | #endif /* _ASM_IO_H */ |