Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 1 | /* |
| 2 | * fs/dax.c - Direct Access filesystem code |
| 3 | * Copyright (c) 2013-2014 Intel Corporation |
| 4 | * Author: Matthew Wilcox <matthew.r.wilcox@intel.com> |
| 5 | * Author: Ross Zwisler <ross.zwisler@linux.intel.com> |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify it |
| 8 | * under the terms and conditions of the GNU General Public License, |
| 9 | * version 2, as published by the Free Software Foundation. |
| 10 | * |
| 11 | * This program is distributed in the hope it will be useful, but WITHOUT |
| 12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 14 | * more details. |
| 15 | */ |
| 16 | |
| 17 | #include <linux/atomic.h> |
| 18 | #include <linux/blkdev.h> |
| 19 | #include <linux/buffer_head.h> |
Ross Zwisler | d77e92e | 2015-09-09 10:29:40 -0600 | [diff] [blame] | 20 | #include <linux/dax.h> |
Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 21 | #include <linux/fs.h> |
| 22 | #include <linux/genhd.h> |
Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 23 | #include <linux/highmem.h> |
| 24 | #include <linux/memcontrol.h> |
| 25 | #include <linux/mm.h> |
Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 26 | #include <linux/mutex.h> |
Ross Zwisler | 9973c98 | 2016-01-22 15:10:47 -0800 | [diff] [blame] | 27 | #include <linux/pagevec.h> |
Ross Zwisler | 2765cfb | 2015-08-18 13:55:40 -0600 | [diff] [blame] | 28 | #include <linux/pmem.h> |
Matthew Wilcox | 289c6ae | 2015-02-16 15:58:59 -0800 | [diff] [blame] | 29 | #include <linux/sched.h> |
Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 30 | #include <linux/uio.h> |
Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 31 | #include <linux/vmstat.h> |
Dan Williams | 34c0fd5 | 2016-01-15 16:56:14 -0800 | [diff] [blame] | 32 | #include <linux/pfn_t.h> |
Dan Williams | 0e749e5 | 2016-01-15 16:55:53 -0800 | [diff] [blame] | 33 | #include <linux/sizes.h> |
Christoph Hellwig | a254e56 | 2016-09-19 11:24:49 +1000 | [diff] [blame] | 34 | #include <linux/iomap.h> |
| 35 | #include "internal.h" |
Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 36 | |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 37 | /* We choose 4096 entries - same as per-zone page wait tables */ |
| 38 | #define DAX_WAIT_TABLE_BITS 12 |
| 39 | #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS) |
| 40 | |
Ross Zwisler | ce95ab0f | 2016-11-08 11:31:44 +1100 | [diff] [blame] | 41 | static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES]; |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 42 | |
| 43 | static int __init init_dax_wait_table(void) |
| 44 | { |
| 45 | int i; |
| 46 | |
| 47 | for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++) |
| 48 | init_waitqueue_head(wait_table + i); |
| 49 | return 0; |
| 50 | } |
| 51 | fs_initcall(init_dax_wait_table); |
| 52 | |
Dan Williams | b2e0d16 | 2016-01-15 16:55:59 -0800 | [diff] [blame] | 53 | static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax) |
| 54 | { |
| 55 | struct request_queue *q = bdev->bd_queue; |
| 56 | long rc = -EIO; |
| 57 | |
Dan Williams | 7a9eb20 | 2016-06-03 18:06:47 -0700 | [diff] [blame] | 58 | dax->addr = ERR_PTR(-EIO); |
Dan Williams | b2e0d16 | 2016-01-15 16:55:59 -0800 | [diff] [blame] | 59 | if (blk_queue_enter(q, true) != 0) |
| 60 | return rc; |
| 61 | |
| 62 | rc = bdev_direct_access(bdev, dax); |
| 63 | if (rc < 0) { |
Dan Williams | 7a9eb20 | 2016-06-03 18:06:47 -0700 | [diff] [blame] | 64 | dax->addr = ERR_PTR(rc); |
Dan Williams | b2e0d16 | 2016-01-15 16:55:59 -0800 | [diff] [blame] | 65 | blk_queue_exit(q); |
| 66 | return rc; |
| 67 | } |
| 68 | return rc; |
| 69 | } |
| 70 | |
| 71 | static void dax_unmap_atomic(struct block_device *bdev, |
| 72 | const struct blk_dax_ctl *dax) |
| 73 | { |
| 74 | if (IS_ERR(dax->addr)) |
| 75 | return; |
| 76 | blk_queue_exit(bdev->bd_queue); |
| 77 | } |
| 78 | |
Dan Williams | d1a5f2b4 | 2016-01-28 20:25:31 -0800 | [diff] [blame] | 79 | struct page *read_dax_sector(struct block_device *bdev, sector_t n) |
| 80 | { |
| 81 | struct page *page = alloc_pages(GFP_KERNEL, 0); |
| 82 | struct blk_dax_ctl dax = { |
| 83 | .size = PAGE_SIZE, |
| 84 | .sector = n & ~((((int) PAGE_SIZE) / 512) - 1), |
| 85 | }; |
| 86 | long rc; |
| 87 | |
| 88 | if (!page) |
| 89 | return ERR_PTR(-ENOMEM); |
| 90 | |
| 91 | rc = dax_map_atomic(bdev, &dax); |
| 92 | if (rc < 0) |
| 93 | return ERR_PTR(rc); |
| 94 | memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE); |
| 95 | dax_unmap_atomic(bdev, &dax); |
| 96 | return page; |
| 97 | } |
| 98 | |
Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 99 | static bool buffer_written(struct buffer_head *bh) |
| 100 | { |
| 101 | return buffer_mapped(bh) && !buffer_unwritten(bh); |
| 102 | } |
| 103 | |
Dan Williams | b2e0d16 | 2016-01-15 16:55:59 -0800 | [diff] [blame] | 104 | static sector_t to_sector(const struct buffer_head *bh, |
| 105 | const struct inode *inode) |
| 106 | { |
| 107 | sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9); |
| 108 | |
| 109 | return sector; |
| 110 | } |
| 111 | |
Omar Sandoval | a95cd63 | 2015-03-16 04:33:51 -0700 | [diff] [blame] | 112 | static ssize_t dax_io(struct inode *inode, struct iov_iter *iter, |
| 113 | loff_t start, loff_t end, get_block_t get_block, |
| 114 | struct buffer_head *bh) |
Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 115 | { |
Dan Williams | b2e0d16 | 2016-01-15 16:55:59 -0800 | [diff] [blame] | 116 | loff_t pos = start, max = start, bh_max = start; |
Dan Williams | 14df6a4 | 2016-06-01 21:03:32 -0700 | [diff] [blame] | 117 | bool hole = false; |
Dan Williams | b2e0d16 | 2016-01-15 16:55:59 -0800 | [diff] [blame] | 118 | struct block_device *bdev = NULL; |
| 119 | int rw = iov_iter_rw(iter), rc; |
| 120 | long map_len = 0; |
| 121 | struct blk_dax_ctl dax = { |
Dan Williams | 7a9eb20 | 2016-06-03 18:06:47 -0700 | [diff] [blame] | 122 | .addr = ERR_PTR(-EIO), |
Dan Williams | b2e0d16 | 2016-01-15 16:55:59 -0800 | [diff] [blame] | 123 | }; |
Jan Kara | 069c77b | 2016-05-11 11:58:51 +0200 | [diff] [blame] | 124 | unsigned blkbits = inode->i_blkbits; |
| 125 | sector_t file_blks = (i_size_read(inode) + (1 << blkbits) - 1) |
| 126 | >> blkbits; |
Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 127 | |
Dan Williams | b2e0d16 | 2016-01-15 16:55:59 -0800 | [diff] [blame] | 128 | if (rw == READ) |
Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 129 | end = min(end, i_size_read(inode)); |
| 130 | |
| 131 | while (pos < end) { |
Ross Zwisler | 2765cfb | 2015-08-18 13:55:40 -0600 | [diff] [blame] | 132 | size_t len; |
Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 133 | if (pos == max) { |
Jeff Moyer | e94f5a22 | 2015-08-14 16:15:31 -0400 | [diff] [blame] | 134 | long page = pos >> PAGE_SHIFT; |
| 135 | sector_t block = page << (PAGE_SHIFT - blkbits); |
Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 136 | unsigned first = pos - (block << blkbits); |
| 137 | long size; |
| 138 | |
| 139 | if (pos == bh_max) { |
| 140 | bh->b_size = PAGE_ALIGN(end - pos); |
| 141 | bh->b_state = 0; |
Dan Williams | b2e0d16 | 2016-01-15 16:55:59 -0800 | [diff] [blame] | 142 | rc = get_block(inode, block, bh, rw == WRITE); |
| 143 | if (rc) |
Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 144 | break; |
Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 145 | bh_max = pos - first + bh->b_size; |
Dan Williams | b2e0d16 | 2016-01-15 16:55:59 -0800 | [diff] [blame] | 146 | bdev = bh->b_bdev; |
Jan Kara | 069c77b | 2016-05-11 11:58:51 +0200 | [diff] [blame] | 147 | /* |
| 148 | * We allow uninitialized buffers for writes |
| 149 | * beyond EOF as those cannot race with faults |
| 150 | */ |
| 151 | WARN_ON_ONCE( |
| 152 | (buffer_new(bh) && block < file_blks) || |
| 153 | (rw == WRITE && buffer_unwritten(bh))); |
Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 154 | } else { |
| 155 | unsigned done = bh->b_size - |
| 156 | (bh_max - (pos - first)); |
| 157 | bh->b_blocknr += done >> blkbits; |
| 158 | bh->b_size -= done; |
| 159 | } |
| 160 | |
Dan Williams | b2e0d16 | 2016-01-15 16:55:59 -0800 | [diff] [blame] | 161 | hole = rw == READ && !buffer_written(bh); |
Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 162 | if (hole) { |
Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 163 | size = bh->b_size - first; |
| 164 | } else { |
Dan Williams | b2e0d16 | 2016-01-15 16:55:59 -0800 | [diff] [blame] | 165 | dax_unmap_atomic(bdev, &dax); |
| 166 | dax.sector = to_sector(bh, inode); |
| 167 | dax.size = bh->b_size; |
| 168 | map_len = dax_map_atomic(bdev, &dax); |
| 169 | if (map_len < 0) { |
| 170 | rc = map_len; |
Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 171 | break; |
Dan Williams | b2e0d16 | 2016-01-15 16:55:59 -0800 | [diff] [blame] | 172 | } |
Dan Williams | b2e0d16 | 2016-01-15 16:55:59 -0800 | [diff] [blame] | 173 | dax.addr += first; |
| 174 | size = map_len - first; |
Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 175 | } |
Eric Sandeen | 0239543 | 2016-06-23 16:54:46 -0500 | [diff] [blame] | 176 | /* |
| 177 | * pos + size is one past the last offset for IO, |
| 178 | * so pos + size can overflow loff_t at extreme offsets. |
| 179 | * Cast to u64 to catch this and get the true minimum. |
| 180 | */ |
| 181 | max = min_t(u64, pos + size, end); |
Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 182 | } |
| 183 | |
Ross Zwisler | 2765cfb | 2015-08-18 13:55:40 -0600 | [diff] [blame] | 184 | if (iov_iter_rw(iter) == WRITE) { |
Dan Williams | b2e0d16 | 2016-01-15 16:55:59 -0800 | [diff] [blame] | 185 | len = copy_from_iter_pmem(dax.addr, max - pos, iter); |
Ross Zwisler | 2765cfb | 2015-08-18 13:55:40 -0600 | [diff] [blame] | 186 | } else if (!hole) |
Dan Williams | b2e0d16 | 2016-01-15 16:55:59 -0800 | [diff] [blame] | 187 | len = copy_to_iter((void __force *) dax.addr, max - pos, |
Ross Zwisler | e2e0539 | 2015-08-18 13:55:41 -0600 | [diff] [blame] | 188 | iter); |
Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 189 | else |
| 190 | len = iov_iter_zero(max - pos, iter); |
| 191 | |
Al Viro | cadfbb6 | 2015-11-10 19:42:49 -0700 | [diff] [blame] | 192 | if (!len) { |
Dan Williams | b2e0d16 | 2016-01-15 16:55:59 -0800 | [diff] [blame] | 193 | rc = -EFAULT; |
Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 194 | break; |
Al Viro | cadfbb6 | 2015-11-10 19:42:49 -0700 | [diff] [blame] | 195 | } |
Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 196 | |
| 197 | pos += len; |
Dan Williams | b2e0d16 | 2016-01-15 16:55:59 -0800 | [diff] [blame] | 198 | if (!IS_ERR(dax.addr)) |
| 199 | dax.addr += len; |
Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 200 | } |
| 201 | |
Dan Williams | b2e0d16 | 2016-01-15 16:55:59 -0800 | [diff] [blame] | 202 | dax_unmap_atomic(bdev, &dax); |
Ross Zwisler | 2765cfb | 2015-08-18 13:55:40 -0600 | [diff] [blame] | 203 | |
Dan Williams | b2e0d16 | 2016-01-15 16:55:59 -0800 | [diff] [blame] | 204 | return (pos == start) ? rc : pos - start; |
Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 205 | } |
| 206 | |
| 207 | /** |
| 208 | * dax_do_io - Perform I/O to a DAX file |
Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 209 | * @iocb: The control block for this I/O |
| 210 | * @inode: The file which the I/O is directed at |
| 211 | * @iter: The addresses to do I/O from or to |
Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 212 | * @get_block: The filesystem method used to translate file offsets to blocks |
| 213 | * @end_io: A filesystem callback for I/O completion |
| 214 | * @flags: See below |
| 215 | * |
| 216 | * This function uses the same locking scheme as do_blockdev_direct_IO: |
| 217 | * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the |
| 218 | * caller for writes. For reads, we take and release the i_mutex ourselves. |
| 219 | * If DIO_LOCKING is not set, the filesystem takes care of its own locking. |
| 220 | * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O |
| 221 | * is in progress. |
| 222 | */ |
Omar Sandoval | a95cd63 | 2015-03-16 04:33:51 -0700 | [diff] [blame] | 223 | ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode, |
Christoph Hellwig | c8b8e32 | 2016-04-07 08:51:58 -0700 | [diff] [blame] | 224 | struct iov_iter *iter, get_block_t get_block, |
Omar Sandoval | a95cd63 | 2015-03-16 04:33:51 -0700 | [diff] [blame] | 225 | dio_iodone_t end_io, int flags) |
Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 226 | { |
| 227 | struct buffer_head bh; |
| 228 | ssize_t retval = -EINVAL; |
Christoph Hellwig | c8b8e32 | 2016-04-07 08:51:58 -0700 | [diff] [blame] | 229 | loff_t pos = iocb->ki_pos; |
Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 230 | loff_t end = pos + iov_iter_count(iter); |
| 231 | |
| 232 | memset(&bh, 0, sizeof(bh)); |
Ross Zwisler | eab95db | 2016-01-22 15:10:59 -0800 | [diff] [blame] | 233 | bh.b_bdev = inode->i_sb->s_bdev; |
Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 234 | |
Jan Kara | c3d98e3 | 2016-05-11 11:58:52 +0200 | [diff] [blame] | 235 | if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) |
Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 236 | inode_lock(inode); |
Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 237 | |
| 238 | /* Protects against truncate */ |
Matthew Wilcox | bbab37d | 2015-07-03 10:40:42 -0400 | [diff] [blame] | 239 | if (!(flags & DIO_SKIP_DIO_COUNT)) |
| 240 | inode_dio_begin(inode); |
Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 241 | |
Omar Sandoval | a95cd63 | 2015-03-16 04:33:51 -0700 | [diff] [blame] | 242 | retval = dax_io(inode, iter, pos, end, get_block, &bh); |
Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 243 | |
Omar Sandoval | a95cd63 | 2015-03-16 04:33:51 -0700 | [diff] [blame] | 244 | if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) |
Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 245 | inode_unlock(inode); |
Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 246 | |
Christoph Hellwig | 187372a | 2016-02-08 14:40:51 +1100 | [diff] [blame] | 247 | if (end_io) { |
| 248 | int err; |
| 249 | |
| 250 | err = end_io(iocb, pos, retval, bh.b_private); |
| 251 | if (err) |
| 252 | retval = err; |
| 253 | } |
Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 254 | |
Matthew Wilcox | bbab37d | 2015-07-03 10:40:42 -0400 | [diff] [blame] | 255 | if (!(flags & DIO_SKIP_DIO_COUNT)) |
| 256 | inode_dio_end(inode); |
Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 257 | return retval; |
| 258 | } |
| 259 | EXPORT_SYMBOL_GPL(dax_do_io); |
Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 260 | |
| 261 | /* |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 262 | * DAX radix tree locking |
| 263 | */ |
| 264 | struct exceptional_entry_key { |
| 265 | struct address_space *mapping; |
Ross Zwisler | 63e95b5 | 2016-11-08 11:32:20 +1100 | [diff] [blame] | 266 | pgoff_t entry_start; |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 267 | }; |
| 268 | |
| 269 | struct wait_exceptional_entry_queue { |
| 270 | wait_queue_t wait; |
| 271 | struct exceptional_entry_key key; |
| 272 | }; |
| 273 | |
Ross Zwisler | 63e95b5 | 2016-11-08 11:32:20 +1100 | [diff] [blame] | 274 | static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping, |
| 275 | pgoff_t index, void *entry, struct exceptional_entry_key *key) |
| 276 | { |
| 277 | unsigned long hash; |
| 278 | |
| 279 | /* |
| 280 | * If 'entry' is a PMD, align the 'index' that we use for the wait |
| 281 | * queue to the start of that PMD. This ensures that all offsets in |
| 282 | * the range covered by the PMD map to the same bit lock. |
| 283 | */ |
| 284 | if (RADIX_DAX_TYPE(entry) == RADIX_DAX_PMD) |
| 285 | index &= ~((1UL << (PMD_SHIFT - PAGE_SHIFT)) - 1); |
| 286 | |
| 287 | key->mapping = mapping; |
| 288 | key->entry_start = index; |
| 289 | |
| 290 | hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS); |
| 291 | return wait_table + hash; |
| 292 | } |
| 293 | |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 294 | static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode, |
| 295 | int sync, void *keyp) |
| 296 | { |
| 297 | struct exceptional_entry_key *key = keyp; |
| 298 | struct wait_exceptional_entry_queue *ewait = |
| 299 | container_of(wait, struct wait_exceptional_entry_queue, wait); |
| 300 | |
| 301 | if (key->mapping != ewait->key.mapping || |
Ross Zwisler | 63e95b5 | 2016-11-08 11:32:20 +1100 | [diff] [blame] | 302 | key->entry_start != ewait->key.entry_start) |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 303 | return 0; |
| 304 | return autoremove_wake_function(wait, mode, sync, NULL); |
| 305 | } |
| 306 | |
| 307 | /* |
| 308 | * Check whether the given slot is locked. The function must be called with |
| 309 | * mapping->tree_lock held |
| 310 | */ |
| 311 | static inline int slot_locked(struct address_space *mapping, void **slot) |
| 312 | { |
| 313 | unsigned long entry = (unsigned long) |
| 314 | radix_tree_deref_slot_protected(slot, &mapping->tree_lock); |
| 315 | return entry & RADIX_DAX_ENTRY_LOCK; |
| 316 | } |
| 317 | |
| 318 | /* |
| 319 | * Mark the given slot is locked. The function must be called with |
| 320 | * mapping->tree_lock held |
| 321 | */ |
| 322 | static inline void *lock_slot(struct address_space *mapping, void **slot) |
| 323 | { |
| 324 | unsigned long entry = (unsigned long) |
| 325 | radix_tree_deref_slot_protected(slot, &mapping->tree_lock); |
| 326 | |
| 327 | entry |= RADIX_DAX_ENTRY_LOCK; |
| 328 | radix_tree_replace_slot(slot, (void *)entry); |
| 329 | return (void *)entry; |
| 330 | } |
| 331 | |
| 332 | /* |
| 333 | * Mark the given slot is unlocked. The function must be called with |
| 334 | * mapping->tree_lock held |
| 335 | */ |
| 336 | static inline void *unlock_slot(struct address_space *mapping, void **slot) |
| 337 | { |
| 338 | unsigned long entry = (unsigned long) |
| 339 | radix_tree_deref_slot_protected(slot, &mapping->tree_lock); |
| 340 | |
| 341 | entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK; |
| 342 | radix_tree_replace_slot(slot, (void *)entry); |
| 343 | return (void *)entry; |
| 344 | } |
| 345 | |
| 346 | /* |
| 347 | * Lookup entry in radix tree, wait for it to become unlocked if it is |
| 348 | * exceptional entry and return it. The caller must call |
| 349 | * put_unlocked_mapping_entry() when he decided not to lock the entry or |
| 350 | * put_locked_mapping_entry() when he locked the entry and now wants to |
| 351 | * unlock it. |
| 352 | * |
| 353 | * The function must be called with mapping->tree_lock held. |
| 354 | */ |
| 355 | static void *get_unlocked_mapping_entry(struct address_space *mapping, |
| 356 | pgoff_t index, void ***slotp) |
| 357 | { |
Ross Zwisler | e3ad61c | 2016-11-08 11:32:12 +1100 | [diff] [blame] | 358 | void *entry, **slot; |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 359 | struct wait_exceptional_entry_queue ewait; |
Ross Zwisler | 63e95b5 | 2016-11-08 11:32:20 +1100 | [diff] [blame] | 360 | wait_queue_head_t *wq; |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 361 | |
| 362 | init_wait(&ewait.wait); |
| 363 | ewait.wait.func = wake_exceptional_entry_func; |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 364 | |
| 365 | for (;;) { |
Ross Zwisler | e3ad61c | 2016-11-08 11:32:12 +1100 | [diff] [blame] | 366 | entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 367 | &slot); |
Ross Zwisler | e3ad61c | 2016-11-08 11:32:12 +1100 | [diff] [blame] | 368 | if (!entry || !radix_tree_exceptional_entry(entry) || |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 369 | !slot_locked(mapping, slot)) { |
| 370 | if (slotp) |
| 371 | *slotp = slot; |
Ross Zwisler | e3ad61c | 2016-11-08 11:32:12 +1100 | [diff] [blame] | 372 | return entry; |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 373 | } |
Ross Zwisler | 63e95b5 | 2016-11-08 11:32:20 +1100 | [diff] [blame] | 374 | |
| 375 | wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key); |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 376 | prepare_to_wait_exclusive(wq, &ewait.wait, |
| 377 | TASK_UNINTERRUPTIBLE); |
| 378 | spin_unlock_irq(&mapping->tree_lock); |
| 379 | schedule(); |
| 380 | finish_wait(wq, &ewait.wait); |
| 381 | spin_lock_irq(&mapping->tree_lock); |
| 382 | } |
| 383 | } |
| 384 | |
Ross Zwisler | 422476c | 2016-11-08 11:33:44 +1100 | [diff] [blame^] | 385 | static void put_locked_mapping_entry(struct address_space *mapping, |
| 386 | pgoff_t index, void *entry) |
| 387 | { |
| 388 | if (!radix_tree_exceptional_entry(entry)) { |
| 389 | unlock_page(entry); |
| 390 | put_page(entry); |
| 391 | } else { |
| 392 | dax_unlock_mapping_entry(mapping, index); |
| 393 | } |
| 394 | } |
| 395 | |
| 396 | /* |
| 397 | * Called when we are done with radix tree entry we looked up via |
| 398 | * get_unlocked_mapping_entry() and which we didn't lock in the end. |
| 399 | */ |
| 400 | static void put_unlocked_mapping_entry(struct address_space *mapping, |
| 401 | pgoff_t index, void *entry) |
| 402 | { |
| 403 | if (!radix_tree_exceptional_entry(entry)) |
| 404 | return; |
| 405 | |
| 406 | /* We have to wake up next waiter for the radix tree entry lock */ |
| 407 | dax_wake_mapping_entry_waiter(mapping, index, entry, false); |
| 408 | } |
| 409 | |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 410 | /* |
| 411 | * Find radix tree entry at given index. If it points to a page, return with |
| 412 | * the page locked. If it points to the exceptional entry, return with the |
| 413 | * radix tree entry locked. If the radix tree doesn't contain given index, |
| 414 | * create empty exceptional entry for the index and return with it locked. |
| 415 | * |
| 416 | * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For |
| 417 | * persistent memory the benefit is doubtful. We can add that later if we can |
| 418 | * show it helps. |
| 419 | */ |
| 420 | static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index) |
| 421 | { |
Ross Zwisler | e3ad61c | 2016-11-08 11:32:12 +1100 | [diff] [blame] | 422 | void *entry, **slot; |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 423 | |
| 424 | restart: |
| 425 | spin_lock_irq(&mapping->tree_lock); |
Ross Zwisler | e3ad61c | 2016-11-08 11:32:12 +1100 | [diff] [blame] | 426 | entry = get_unlocked_mapping_entry(mapping, index, &slot); |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 427 | /* No entry for given index? Make sure radix tree is big enough. */ |
Ross Zwisler | e3ad61c | 2016-11-08 11:32:12 +1100 | [diff] [blame] | 428 | if (!entry) { |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 429 | int err; |
| 430 | |
| 431 | spin_unlock_irq(&mapping->tree_lock); |
| 432 | err = radix_tree_preload( |
| 433 | mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM); |
| 434 | if (err) |
| 435 | return ERR_PTR(err); |
Ross Zwisler | e3ad61c | 2016-11-08 11:32:12 +1100 | [diff] [blame] | 436 | entry = (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 437 | RADIX_DAX_ENTRY_LOCK); |
| 438 | spin_lock_irq(&mapping->tree_lock); |
Ross Zwisler | e3ad61c | 2016-11-08 11:32:12 +1100 | [diff] [blame] | 439 | err = radix_tree_insert(&mapping->page_tree, index, entry); |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 440 | radix_tree_preload_end(); |
| 441 | if (err) { |
| 442 | spin_unlock_irq(&mapping->tree_lock); |
| 443 | /* Someone already created the entry? */ |
| 444 | if (err == -EEXIST) |
| 445 | goto restart; |
| 446 | return ERR_PTR(err); |
| 447 | } |
| 448 | /* Good, we have inserted empty locked entry into the tree. */ |
| 449 | mapping->nrexceptional++; |
| 450 | spin_unlock_irq(&mapping->tree_lock); |
Ross Zwisler | e3ad61c | 2016-11-08 11:32:12 +1100 | [diff] [blame] | 451 | return entry; |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 452 | } |
| 453 | /* Normal page in radix tree? */ |
Ross Zwisler | e3ad61c | 2016-11-08 11:32:12 +1100 | [diff] [blame] | 454 | if (!radix_tree_exceptional_entry(entry)) { |
| 455 | struct page *page = entry; |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 456 | |
| 457 | get_page(page); |
| 458 | spin_unlock_irq(&mapping->tree_lock); |
| 459 | lock_page(page); |
| 460 | /* Page got truncated? Retry... */ |
| 461 | if (unlikely(page->mapping != mapping)) { |
| 462 | unlock_page(page); |
| 463 | put_page(page); |
| 464 | goto restart; |
| 465 | } |
| 466 | return page; |
| 467 | } |
Ross Zwisler | e3ad61c | 2016-11-08 11:32:12 +1100 | [diff] [blame] | 468 | entry = lock_slot(mapping, slot); |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 469 | spin_unlock_irq(&mapping->tree_lock); |
Ross Zwisler | e3ad61c | 2016-11-08 11:32:12 +1100 | [diff] [blame] | 470 | return entry; |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 471 | } |
| 472 | |
Ross Zwisler | 63e95b5 | 2016-11-08 11:32:20 +1100 | [diff] [blame] | 473 | /* |
| 474 | * We do not necessarily hold the mapping->tree_lock when we call this |
| 475 | * function so it is possible that 'entry' is no longer a valid item in the |
| 476 | * radix tree. This is okay, though, because all we really need to do is to |
| 477 | * find the correct waitqueue where tasks might be sleeping waiting for that |
| 478 | * old 'entry' and wake them. |
| 479 | */ |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 480 | void dax_wake_mapping_entry_waiter(struct address_space *mapping, |
Ross Zwisler | 63e95b5 | 2016-11-08 11:32:20 +1100 | [diff] [blame] | 481 | pgoff_t index, void *entry, bool wake_all) |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 482 | { |
Ross Zwisler | 63e95b5 | 2016-11-08 11:32:20 +1100 | [diff] [blame] | 483 | struct exceptional_entry_key key; |
| 484 | wait_queue_head_t *wq; |
| 485 | |
| 486 | wq = dax_entry_waitqueue(mapping, index, entry, &key); |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 487 | |
| 488 | /* |
| 489 | * Checking for locked entry and prepare_to_wait_exclusive() happens |
| 490 | * under mapping->tree_lock, ditto for entry handling in our callers. |
| 491 | * So at this point all tasks that could have seen our entry locked |
| 492 | * must be in the waitqueue and the following check will see them. |
| 493 | */ |
Ross Zwisler | 63e95b5 | 2016-11-08 11:32:20 +1100 | [diff] [blame] | 494 | if (waitqueue_active(wq)) |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 495 | __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key); |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 496 | } |
| 497 | |
Jan Kara | bc2466e | 2016-05-12 18:29:19 +0200 | [diff] [blame] | 498 | void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index) |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 499 | { |
Ross Zwisler | e3ad61c | 2016-11-08 11:32:12 +1100 | [diff] [blame] | 500 | void *entry, **slot; |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 501 | |
| 502 | spin_lock_irq(&mapping->tree_lock); |
Ross Zwisler | e3ad61c | 2016-11-08 11:32:12 +1100 | [diff] [blame] | 503 | entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot); |
| 504 | if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) || |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 505 | !slot_locked(mapping, slot))) { |
| 506 | spin_unlock_irq(&mapping->tree_lock); |
| 507 | return; |
| 508 | } |
| 509 | unlock_slot(mapping, slot); |
| 510 | spin_unlock_irq(&mapping->tree_lock); |
Ross Zwisler | 63e95b5 | 2016-11-08 11:32:20 +1100 | [diff] [blame] | 511 | dax_wake_mapping_entry_waiter(mapping, index, entry, false); |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 512 | } |
| 513 | |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 514 | /* |
| 515 | * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree |
| 516 | * entry to get unlocked before deleting it. |
| 517 | */ |
| 518 | int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) |
| 519 | { |
| 520 | void *entry; |
| 521 | |
| 522 | spin_lock_irq(&mapping->tree_lock); |
| 523 | entry = get_unlocked_mapping_entry(mapping, index, NULL); |
| 524 | /* |
| 525 | * This gets called from truncate / punch_hole path. As such, the caller |
| 526 | * must hold locks protecting against concurrent modifications of the |
| 527 | * radix tree (usually fs-private i_mmap_sem for writing). Since the |
| 528 | * caller has seen exceptional entry for this index, we better find it |
| 529 | * at that index as well... |
| 530 | */ |
| 531 | if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry))) { |
| 532 | spin_unlock_irq(&mapping->tree_lock); |
| 533 | return 0; |
| 534 | } |
| 535 | radix_tree_delete(&mapping->page_tree, index); |
| 536 | mapping->nrexceptional--; |
| 537 | spin_unlock_irq(&mapping->tree_lock); |
Ross Zwisler | 63e95b5 | 2016-11-08 11:32:20 +1100 | [diff] [blame] | 538 | dax_wake_mapping_entry_waiter(mapping, index, entry, true); |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 539 | |
| 540 | return 1; |
| 541 | } |
| 542 | |
| 543 | /* |
Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 544 | * The user has performed a load from a hole in the file. Allocating |
| 545 | * a new page in the file would cause excessive storage usage for |
| 546 | * workloads with sparse files. We allocate a page cache page instead. |
| 547 | * We'll kick it out of the page cache if it's ever written to, |
| 548 | * otherwise it will simply fall out of the page cache under memory |
| 549 | * pressure without ever having been dirtied. |
| 550 | */ |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 551 | static int dax_load_hole(struct address_space *mapping, void *entry, |
| 552 | struct vm_fault *vmf) |
Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 553 | { |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 554 | struct page *page; |
Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 555 | |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 556 | /* Hole page already exists? Return it... */ |
| 557 | if (!radix_tree_exceptional_entry(entry)) { |
| 558 | vmf->page = entry; |
| 559 | return VM_FAULT_LOCKED; |
| 560 | } |
| 561 | |
| 562 | /* This will replace locked radix tree entry with a hole page */ |
| 563 | page = find_or_create_page(mapping, vmf->pgoff, |
| 564 | vmf->gfp_mask | __GFP_ZERO); |
| 565 | if (!page) { |
| 566 | put_locked_mapping_entry(mapping, vmf->pgoff, entry); |
| 567 | return VM_FAULT_OOM; |
| 568 | } |
Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 569 | vmf->page = page; |
| 570 | return VM_FAULT_LOCKED; |
| 571 | } |
| 572 | |
Christoph Hellwig | b0d5e82 | 2016-09-19 11:24:49 +1000 | [diff] [blame] | 573 | static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size, |
| 574 | struct page *to, unsigned long vaddr) |
Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 575 | { |
Dan Williams | b2e0d16 | 2016-01-15 16:55:59 -0800 | [diff] [blame] | 576 | struct blk_dax_ctl dax = { |
Christoph Hellwig | b0d5e82 | 2016-09-19 11:24:49 +1000 | [diff] [blame] | 577 | .sector = sector, |
| 578 | .size = size, |
Dan Williams | b2e0d16 | 2016-01-15 16:55:59 -0800 | [diff] [blame] | 579 | }; |
Ross Zwisler | e2e0539 | 2015-08-18 13:55:41 -0600 | [diff] [blame] | 580 | void *vto; |
| 581 | |
Dan Williams | b2e0d16 | 2016-01-15 16:55:59 -0800 | [diff] [blame] | 582 | if (dax_map_atomic(bdev, &dax) < 0) |
| 583 | return PTR_ERR(dax.addr); |
Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 584 | vto = kmap_atomic(to); |
Dan Williams | b2e0d16 | 2016-01-15 16:55:59 -0800 | [diff] [blame] | 585 | copy_user_page(vto, (void __force *)dax.addr, vaddr, to); |
Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 586 | kunmap_atomic(vto); |
Dan Williams | b2e0d16 | 2016-01-15 16:55:59 -0800 | [diff] [blame] | 587 | dax_unmap_atomic(bdev, &dax); |
Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 588 | return 0; |
| 589 | } |
| 590 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 591 | #define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_SHIFT)) |
Ross Zwisler | 9973c98 | 2016-01-22 15:10:47 -0800 | [diff] [blame] | 592 | |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 593 | static void *dax_insert_mapping_entry(struct address_space *mapping, |
| 594 | struct vm_fault *vmf, |
| 595 | void *entry, sector_t sector) |
Ross Zwisler | 9973c98 | 2016-01-22 15:10:47 -0800 | [diff] [blame] | 596 | { |
| 597 | struct radix_tree_root *page_tree = &mapping->page_tree; |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 598 | int error = 0; |
| 599 | bool hole_fill = false; |
| 600 | void *new_entry; |
| 601 | pgoff_t index = vmf->pgoff; |
Ross Zwisler | 9973c98 | 2016-01-22 15:10:47 -0800 | [diff] [blame] | 602 | |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 603 | if (vmf->flags & FAULT_FLAG_WRITE) |
Dmitry Monakhov | d2b2a28 | 2016-02-05 15:36:55 -0800 | [diff] [blame] | 604 | __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); |
Ross Zwisler | 9973c98 | 2016-01-22 15:10:47 -0800 | [diff] [blame] | 605 | |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 606 | /* Replacing hole page with block mapping? */ |
| 607 | if (!radix_tree_exceptional_entry(entry)) { |
| 608 | hole_fill = true; |
| 609 | /* |
| 610 | * Unmap the page now before we remove it from page cache below. |
| 611 | * The page is locked so it cannot be faulted in again. |
| 612 | */ |
| 613 | unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT, |
| 614 | PAGE_SIZE, 0); |
| 615 | error = radix_tree_preload(vmf->gfp_mask & ~__GFP_HIGHMEM); |
| 616 | if (error) |
| 617 | return ERR_PTR(error); |
Ross Zwisler | 9973c98 | 2016-01-22 15:10:47 -0800 | [diff] [blame] | 618 | } |
| 619 | |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 620 | spin_lock_irq(&mapping->tree_lock); |
| 621 | new_entry = (void *)((unsigned long)RADIX_DAX_ENTRY(sector, false) | |
| 622 | RADIX_DAX_ENTRY_LOCK); |
| 623 | if (hole_fill) { |
| 624 | __delete_from_page_cache(entry, NULL); |
| 625 | /* Drop pagecache reference */ |
| 626 | put_page(entry); |
| 627 | error = radix_tree_insert(page_tree, index, new_entry); |
| 628 | if (error) { |
| 629 | new_entry = ERR_PTR(error); |
Ross Zwisler | 9973c98 | 2016-01-22 15:10:47 -0800 | [diff] [blame] | 630 | goto unlock; |
| 631 | } |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 632 | mapping->nrexceptional++; |
| 633 | } else { |
| 634 | void **slot; |
| 635 | void *ret; |
Ross Zwisler | 9973c98 | 2016-01-22 15:10:47 -0800 | [diff] [blame] | 636 | |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 637 | ret = __radix_tree_lookup(page_tree, index, NULL, &slot); |
| 638 | WARN_ON_ONCE(ret != entry); |
| 639 | radix_tree_replace_slot(slot, new_entry); |
Ross Zwisler | 9973c98 | 2016-01-22 15:10:47 -0800 | [diff] [blame] | 640 | } |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 641 | if (vmf->flags & FAULT_FLAG_WRITE) |
Ross Zwisler | 9973c98 | 2016-01-22 15:10:47 -0800 | [diff] [blame] | 642 | radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY); |
| 643 | unlock: |
| 644 | spin_unlock_irq(&mapping->tree_lock); |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 645 | if (hole_fill) { |
| 646 | radix_tree_preload_end(); |
| 647 | /* |
| 648 | * We don't need hole page anymore, it has been replaced with |
| 649 | * locked radix tree entry now. |
| 650 | */ |
| 651 | if (mapping->a_ops->freepage) |
| 652 | mapping->a_ops->freepage(entry); |
| 653 | unlock_page(entry); |
| 654 | put_page(entry); |
| 655 | } |
| 656 | return new_entry; |
Ross Zwisler | 9973c98 | 2016-01-22 15:10:47 -0800 | [diff] [blame] | 657 | } |
| 658 | |
| 659 | static int dax_writeback_one(struct block_device *bdev, |
| 660 | struct address_space *mapping, pgoff_t index, void *entry) |
| 661 | { |
| 662 | struct radix_tree_root *page_tree = &mapping->page_tree; |
| 663 | int type = RADIX_DAX_TYPE(entry); |
| 664 | struct radix_tree_node *node; |
| 665 | struct blk_dax_ctl dax; |
| 666 | void **slot; |
| 667 | int ret = 0; |
| 668 | |
| 669 | spin_lock_irq(&mapping->tree_lock); |
| 670 | /* |
| 671 | * Regular page slots are stabilized by the page lock even |
| 672 | * without the tree itself locked. These unlocked entries |
| 673 | * need verification under the tree lock. |
| 674 | */ |
| 675 | if (!__radix_tree_lookup(page_tree, index, &node, &slot)) |
| 676 | goto unlock; |
| 677 | if (*slot != entry) |
| 678 | goto unlock; |
| 679 | |
| 680 | /* another fsync thread may have already written back this entry */ |
| 681 | if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)) |
| 682 | goto unlock; |
| 683 | |
| 684 | if (WARN_ON_ONCE(type != RADIX_DAX_PTE && type != RADIX_DAX_PMD)) { |
| 685 | ret = -EIO; |
| 686 | goto unlock; |
| 687 | } |
| 688 | |
| 689 | dax.sector = RADIX_DAX_SECTOR(entry); |
| 690 | dax.size = (type == RADIX_DAX_PMD ? PMD_SIZE : PAGE_SIZE); |
| 691 | spin_unlock_irq(&mapping->tree_lock); |
| 692 | |
| 693 | /* |
| 694 | * We cannot hold tree_lock while calling dax_map_atomic() because it |
| 695 | * eventually calls cond_resched(). |
| 696 | */ |
| 697 | ret = dax_map_atomic(bdev, &dax); |
| 698 | if (ret < 0) |
| 699 | return ret; |
| 700 | |
| 701 | if (WARN_ON_ONCE(ret < dax.size)) { |
| 702 | ret = -EIO; |
| 703 | goto unmap; |
| 704 | } |
| 705 | |
| 706 | wb_cache_pmem(dax.addr, dax.size); |
| 707 | |
| 708 | spin_lock_irq(&mapping->tree_lock); |
| 709 | radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE); |
| 710 | spin_unlock_irq(&mapping->tree_lock); |
| 711 | unmap: |
| 712 | dax_unmap_atomic(bdev, &dax); |
| 713 | return ret; |
| 714 | |
| 715 | unlock: |
| 716 | spin_unlock_irq(&mapping->tree_lock); |
| 717 | return ret; |
| 718 | } |
| 719 | |
| 720 | /* |
| 721 | * Flush the mapping to the persistent domain within the byte range of [start, |
| 722 | * end]. This is required by data integrity operations to ensure file data is |
| 723 | * on persistent storage prior to completion of the operation. |
| 724 | */ |
Ross Zwisler | 7f6d5b5 | 2016-02-26 15:19:55 -0800 | [diff] [blame] | 725 | int dax_writeback_mapping_range(struct address_space *mapping, |
| 726 | struct block_device *bdev, struct writeback_control *wbc) |
Ross Zwisler | 9973c98 | 2016-01-22 15:10:47 -0800 | [diff] [blame] | 727 | { |
| 728 | struct inode *inode = mapping->host; |
Ross Zwisler | 9973c98 | 2016-01-22 15:10:47 -0800 | [diff] [blame] | 729 | pgoff_t start_index, end_index, pmd_index; |
| 730 | pgoff_t indices[PAGEVEC_SIZE]; |
| 731 | struct pagevec pvec; |
| 732 | bool done = false; |
| 733 | int i, ret = 0; |
| 734 | void *entry; |
| 735 | |
| 736 | if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT)) |
| 737 | return -EIO; |
| 738 | |
Ross Zwisler | 7f6d5b5 | 2016-02-26 15:19:55 -0800 | [diff] [blame] | 739 | if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL) |
| 740 | return 0; |
| 741 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 742 | start_index = wbc->range_start >> PAGE_SHIFT; |
| 743 | end_index = wbc->range_end >> PAGE_SHIFT; |
Ross Zwisler | 9973c98 | 2016-01-22 15:10:47 -0800 | [diff] [blame] | 744 | pmd_index = DAX_PMD_INDEX(start_index); |
| 745 | |
| 746 | rcu_read_lock(); |
| 747 | entry = radix_tree_lookup(&mapping->page_tree, pmd_index); |
| 748 | rcu_read_unlock(); |
| 749 | |
| 750 | /* see if the start of our range is covered by a PMD entry */ |
| 751 | if (entry && RADIX_DAX_TYPE(entry) == RADIX_DAX_PMD) |
| 752 | start_index = pmd_index; |
| 753 | |
| 754 | tag_pages_for_writeback(mapping, start_index, end_index); |
| 755 | |
| 756 | pagevec_init(&pvec, 0); |
| 757 | while (!done) { |
| 758 | pvec.nr = find_get_entries_tag(mapping, start_index, |
| 759 | PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE, |
| 760 | pvec.pages, indices); |
| 761 | |
| 762 | if (pvec.nr == 0) |
| 763 | break; |
| 764 | |
| 765 | for (i = 0; i < pvec.nr; i++) { |
| 766 | if (indices[i] > end_index) { |
| 767 | done = true; |
| 768 | break; |
| 769 | } |
| 770 | |
| 771 | ret = dax_writeback_one(bdev, mapping, indices[i], |
| 772 | pvec.pages[i]); |
| 773 | if (ret < 0) |
| 774 | return ret; |
| 775 | } |
| 776 | } |
Ross Zwisler | 9973c98 | 2016-01-22 15:10:47 -0800 | [diff] [blame] | 777 | return 0; |
| 778 | } |
| 779 | EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); |
| 780 | |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 781 | static int dax_insert_mapping(struct address_space *mapping, |
Christoph Hellwig | 1aaba09 | 2016-09-19 11:24:49 +1000 | [diff] [blame] | 782 | struct block_device *bdev, sector_t sector, size_t size, |
| 783 | void **entryp, struct vm_area_struct *vma, struct vm_fault *vmf) |
Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 784 | { |
Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 785 | unsigned long vaddr = (unsigned long)vmf->virtual_address; |
Dan Williams | b2e0d16 | 2016-01-15 16:55:59 -0800 | [diff] [blame] | 786 | struct blk_dax_ctl dax = { |
Christoph Hellwig | 1aaba09 | 2016-09-19 11:24:49 +1000 | [diff] [blame] | 787 | .sector = sector, |
| 788 | .size = size, |
Dan Williams | b2e0d16 | 2016-01-15 16:55:59 -0800 | [diff] [blame] | 789 | }; |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 790 | void *ret; |
| 791 | void *entry = *entryp; |
Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 792 | |
Jan Kara | 4d9a2c8 | 2016-05-12 18:29:20 +0200 | [diff] [blame] | 793 | if (dax_map_atomic(bdev, &dax) < 0) |
| 794 | return PTR_ERR(dax.addr); |
Dan Williams | b2e0d16 | 2016-01-15 16:55:59 -0800 | [diff] [blame] | 795 | dax_unmap_atomic(bdev, &dax); |
Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 796 | |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 797 | ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector); |
Jan Kara | 4d9a2c8 | 2016-05-12 18:29:20 +0200 | [diff] [blame] | 798 | if (IS_ERR(ret)) |
| 799 | return PTR_ERR(ret); |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 800 | *entryp = ret; |
Ross Zwisler | 9973c98 | 2016-01-22 15:10:47 -0800 | [diff] [blame] | 801 | |
Jan Kara | 4d9a2c8 | 2016-05-12 18:29:20 +0200 | [diff] [blame] | 802 | return vm_insert_mixed(vma, vaddr, dax.pfn); |
Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 803 | } |
| 804 | |
Dave Chinner | ce5c5d5 | 2015-06-04 09:18:18 +1000 | [diff] [blame] | 805 | /** |
Ross Zwisler | 6b52499 | 2016-07-26 15:21:05 -0700 | [diff] [blame] | 806 | * dax_fault - handle a page fault on a DAX file |
Dave Chinner | ce5c5d5 | 2015-06-04 09:18:18 +1000 | [diff] [blame] | 807 | * @vma: The virtual memory area where the fault occurred |
| 808 | * @vmf: The description of the fault |
| 809 | * @get_block: The filesystem method used to translate file offsets to blocks |
| 810 | * |
| 811 | * When a page fault occurs, filesystems may call this helper in their |
Ross Zwisler | 6b52499 | 2016-07-26 15:21:05 -0700 | [diff] [blame] | 812 | * fault handler for DAX files. dax_fault() assumes the caller has done all |
Dave Chinner | ce5c5d5 | 2015-06-04 09:18:18 +1000 | [diff] [blame] | 813 | * the necessary locking for the page fault to proceed successfully. |
| 814 | */ |
Ross Zwisler | 6b52499 | 2016-07-26 15:21:05 -0700 | [diff] [blame] | 815 | int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, |
Jan Kara | 02fbd13 | 2016-05-11 11:58:48 +0200 | [diff] [blame] | 816 | get_block_t get_block) |
Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 817 | { |
| 818 | struct file *file = vma->vm_file; |
| 819 | struct address_space *mapping = file->f_mapping; |
| 820 | struct inode *inode = mapping->host; |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 821 | void *entry; |
Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 822 | struct buffer_head bh; |
| 823 | unsigned long vaddr = (unsigned long)vmf->virtual_address; |
| 824 | unsigned blkbits = inode->i_blkbits; |
| 825 | sector_t block; |
| 826 | pgoff_t size; |
| 827 | int error; |
| 828 | int major = 0; |
| 829 | |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 830 | /* |
| 831 | * Check whether offset isn't beyond end of file now. Caller is supposed |
| 832 | * to hold locks serializing us with truncate / punch hole so this is |
| 833 | * a reliable test. |
| 834 | */ |
Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 835 | size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 836 | if (vmf->pgoff >= size) |
| 837 | return VM_FAULT_SIGBUS; |
| 838 | |
| 839 | memset(&bh, 0, sizeof(bh)); |
| 840 | block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits); |
Ross Zwisler | eab95db | 2016-01-22 15:10:59 -0800 | [diff] [blame] | 841 | bh.b_bdev = inode->i_sb->s_bdev; |
Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 842 | bh.b_size = PAGE_SIZE; |
| 843 | |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 844 | entry = grab_mapping_entry(mapping, vmf->pgoff); |
| 845 | if (IS_ERR(entry)) { |
| 846 | error = PTR_ERR(entry); |
| 847 | goto out; |
Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 848 | } |
| 849 | |
| 850 | error = get_block(inode, block, &bh, 0); |
| 851 | if (!error && (bh.b_size < PAGE_SIZE)) |
| 852 | error = -EIO; /* fs corruption? */ |
| 853 | if (error) |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 854 | goto unlock_entry; |
Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 855 | |
| 856 | if (vmf->cow_page) { |
| 857 | struct page *new_page = vmf->cow_page; |
| 858 | if (buffer_written(&bh)) |
Christoph Hellwig | b0d5e82 | 2016-09-19 11:24:49 +1000 | [diff] [blame] | 859 | error = copy_user_dax(bh.b_bdev, to_sector(&bh, inode), |
| 860 | bh.b_size, new_page, vaddr); |
Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 861 | else |
| 862 | clear_user_highpage(new_page, vaddr); |
| 863 | if (error) |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 864 | goto unlock_entry; |
| 865 | if (!radix_tree_exceptional_entry(entry)) { |
| 866 | vmf->page = entry; |
Jan Kara | bc2466e | 2016-05-12 18:29:19 +0200 | [diff] [blame] | 867 | return VM_FAULT_LOCKED; |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 868 | } |
Jan Kara | bc2466e | 2016-05-12 18:29:19 +0200 | [diff] [blame] | 869 | vmf->entry = entry; |
| 870 | return VM_FAULT_DAX_LOCKED; |
Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 871 | } |
| 872 | |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 873 | if (!buffer_mapped(&bh)) { |
| 874 | if (vmf->flags & FAULT_FLAG_WRITE) { |
| 875 | error = get_block(inode, block, &bh, 1); |
| 876 | count_vm_event(PGMAJFAULT); |
| 877 | mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); |
| 878 | major = VM_FAULT_MAJOR; |
| 879 | if (!error && (bh.b_size < PAGE_SIZE)) |
| 880 | error = -EIO; |
| 881 | if (error) |
| 882 | goto unlock_entry; |
| 883 | } else { |
| 884 | return dax_load_hole(mapping, entry, vmf); |
| 885 | } |
Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 886 | } |
| 887 | |
Jan Kara | 02fbd13 | 2016-05-11 11:58:48 +0200 | [diff] [blame] | 888 | /* Filesystem should not return unwritten buffers to us! */ |
Jan Kara | 2b10945 | 2016-05-11 11:58:50 +0200 | [diff] [blame] | 889 | WARN_ON_ONCE(buffer_unwritten(&bh) || buffer_new(&bh)); |
Christoph Hellwig | 1aaba09 | 2016-09-19 11:24:49 +1000 | [diff] [blame] | 890 | error = dax_insert_mapping(mapping, bh.b_bdev, to_sector(&bh, inode), |
| 891 | bh.b_size, &entry, vma, vmf); |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 892 | unlock_entry: |
| 893 | put_locked_mapping_entry(mapping, vmf->pgoff, entry); |
Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 894 | out: |
| 895 | if (error == -ENOMEM) |
| 896 | return VM_FAULT_OOM | major; |
| 897 | /* -EBUSY is fine, somebody else faulted on the same PTE */ |
| 898 | if ((error < 0) && (error != -EBUSY)) |
| 899 | return VM_FAULT_SIGBUS | major; |
| 900 | return VM_FAULT_NOPAGE | major; |
Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 901 | } |
Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 902 | EXPORT_SYMBOL_GPL(dax_fault); |
Matthew Wilcox | 4c0ccfe | 2015-02-16 15:59:06 -0800 | [diff] [blame] | 903 | |
| 904 | /** |
Boaz Harrosh | 0e3b210 | 2015-04-15 16:15:14 -0700 | [diff] [blame] | 905 | * dax_pfn_mkwrite - handle first write to DAX page |
| 906 | * @vma: The virtual memory area where the fault occurred |
| 907 | * @vmf: The description of the fault |
Boaz Harrosh | 0e3b210 | 2015-04-15 16:15:14 -0700 | [diff] [blame] | 908 | */ |
| 909 | int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) |
| 910 | { |
Ross Zwisler | 9973c98 | 2016-01-22 15:10:47 -0800 | [diff] [blame] | 911 | struct file *file = vma->vm_file; |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 912 | struct address_space *mapping = file->f_mapping; |
| 913 | void *entry; |
| 914 | pgoff_t index = vmf->pgoff; |
Boaz Harrosh | 0e3b210 | 2015-04-15 16:15:14 -0700 | [diff] [blame] | 915 | |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 916 | spin_lock_irq(&mapping->tree_lock); |
| 917 | entry = get_unlocked_mapping_entry(mapping, index, NULL); |
| 918 | if (!entry || !radix_tree_exceptional_entry(entry)) |
| 919 | goto out; |
| 920 | radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY); |
| 921 | put_unlocked_mapping_entry(mapping, index, entry); |
| 922 | out: |
| 923 | spin_unlock_irq(&mapping->tree_lock); |
Boaz Harrosh | 0e3b210 | 2015-04-15 16:15:14 -0700 | [diff] [blame] | 924 | return VM_FAULT_NOPAGE; |
| 925 | } |
| 926 | EXPORT_SYMBOL_GPL(dax_pfn_mkwrite); |
| 927 | |
Vishal Verma | 4b0228f | 2016-04-21 15:13:46 -0400 | [diff] [blame] | 928 | static bool dax_range_is_aligned(struct block_device *bdev, |
| 929 | unsigned int offset, unsigned int length) |
| 930 | { |
| 931 | unsigned short sector_size = bdev_logical_block_size(bdev); |
| 932 | |
| 933 | if (!IS_ALIGNED(offset, sector_size)) |
| 934 | return false; |
| 935 | if (!IS_ALIGNED(length, sector_size)) |
| 936 | return false; |
| 937 | |
| 938 | return true; |
| 939 | } |
| 940 | |
Christoph Hellwig | 679c8bd | 2016-05-09 10:47:04 +0200 | [diff] [blame] | 941 | int __dax_zero_page_range(struct block_device *bdev, sector_t sector, |
| 942 | unsigned int offset, unsigned int length) |
| 943 | { |
| 944 | struct blk_dax_ctl dax = { |
| 945 | .sector = sector, |
| 946 | .size = PAGE_SIZE, |
| 947 | }; |
| 948 | |
Vishal Verma | 4b0228f | 2016-04-21 15:13:46 -0400 | [diff] [blame] | 949 | if (dax_range_is_aligned(bdev, offset, length)) { |
| 950 | sector_t start_sector = dax.sector + (offset >> 9); |
| 951 | |
| 952 | return blkdev_issue_zeroout(bdev, start_sector, |
| 953 | length >> 9, GFP_NOFS, true); |
| 954 | } else { |
| 955 | if (dax_map_atomic(bdev, &dax) < 0) |
| 956 | return PTR_ERR(dax.addr); |
| 957 | clear_pmem(dax.addr + offset, length); |
Vishal Verma | 4b0228f | 2016-04-21 15:13:46 -0400 | [diff] [blame] | 958 | dax_unmap_atomic(bdev, &dax); |
| 959 | } |
Christoph Hellwig | 679c8bd | 2016-05-09 10:47:04 +0200 | [diff] [blame] | 960 | return 0; |
| 961 | } |
| 962 | EXPORT_SYMBOL_GPL(__dax_zero_page_range); |
| 963 | |
Boaz Harrosh | 0e3b210 | 2015-04-15 16:15:14 -0700 | [diff] [blame] | 964 | /** |
Matthew Wilcox | 25726bc | 2015-02-16 15:59:35 -0800 | [diff] [blame] | 965 | * dax_zero_page_range - zero a range within a page of a DAX file |
Matthew Wilcox | 4c0ccfe | 2015-02-16 15:59:06 -0800 | [diff] [blame] | 966 | * @inode: The file being truncated |
| 967 | * @from: The file offset that is being truncated to |
Matthew Wilcox | 25726bc | 2015-02-16 15:59:35 -0800 | [diff] [blame] | 968 | * @length: The number of bytes to zero |
Matthew Wilcox | 4c0ccfe | 2015-02-16 15:59:06 -0800 | [diff] [blame] | 969 | * @get_block: The filesystem method used to translate file offsets to blocks |
| 970 | * |
Matthew Wilcox | 25726bc | 2015-02-16 15:59:35 -0800 | [diff] [blame] | 971 | * This function can be called by a filesystem when it is zeroing part of a |
| 972 | * page in a DAX file. This is intended for hole-punch operations. If |
| 973 | * you are truncating a file, the helper function dax_truncate_page() may be |
| 974 | * more convenient. |
Matthew Wilcox | 4c0ccfe | 2015-02-16 15:59:06 -0800 | [diff] [blame] | 975 | */ |
Matthew Wilcox | 25726bc | 2015-02-16 15:59:35 -0800 | [diff] [blame] | 976 | int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length, |
| 977 | get_block_t get_block) |
Matthew Wilcox | 4c0ccfe | 2015-02-16 15:59:06 -0800 | [diff] [blame] | 978 | { |
| 979 | struct buffer_head bh; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 980 | pgoff_t index = from >> PAGE_SHIFT; |
| 981 | unsigned offset = from & (PAGE_SIZE-1); |
Matthew Wilcox | 4c0ccfe | 2015-02-16 15:59:06 -0800 | [diff] [blame] | 982 | int err; |
| 983 | |
| 984 | /* Block boundary? Nothing to do */ |
| 985 | if (!length) |
| 986 | return 0; |
Ross Zwisler | aada54f | 2016-11-08 11:32:00 +1100 | [diff] [blame] | 987 | if (WARN_ON_ONCE((offset + length) > PAGE_SIZE)) |
| 988 | return -EINVAL; |
Matthew Wilcox | 4c0ccfe | 2015-02-16 15:59:06 -0800 | [diff] [blame] | 989 | |
| 990 | memset(&bh, 0, sizeof(bh)); |
Ross Zwisler | eab95db | 2016-01-22 15:10:59 -0800 | [diff] [blame] | 991 | bh.b_bdev = inode->i_sb->s_bdev; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 992 | bh.b_size = PAGE_SIZE; |
Matthew Wilcox | 4c0ccfe | 2015-02-16 15:59:06 -0800 | [diff] [blame] | 993 | err = get_block(inode, index, &bh, 0); |
Christoph Hellwig | 679c8bd | 2016-05-09 10:47:04 +0200 | [diff] [blame] | 994 | if (err < 0 || !buffer_written(&bh)) |
Matthew Wilcox | 4c0ccfe | 2015-02-16 15:59:06 -0800 | [diff] [blame] | 995 | return err; |
Dan Williams | b2e0d16 | 2016-01-15 16:55:59 -0800 | [diff] [blame] | 996 | |
Christoph Hellwig | 679c8bd | 2016-05-09 10:47:04 +0200 | [diff] [blame] | 997 | return __dax_zero_page_range(bh.b_bdev, to_sector(&bh, inode), |
| 998 | offset, length); |
Matthew Wilcox | 4c0ccfe | 2015-02-16 15:59:06 -0800 | [diff] [blame] | 999 | } |
Matthew Wilcox | 25726bc | 2015-02-16 15:59:35 -0800 | [diff] [blame] | 1000 | EXPORT_SYMBOL_GPL(dax_zero_page_range); |
| 1001 | |
| 1002 | /** |
| 1003 | * dax_truncate_page - handle a partial page being truncated in a DAX file |
| 1004 | * @inode: The file being truncated |
| 1005 | * @from: The file offset that is being truncated to |
| 1006 | * @get_block: The filesystem method used to translate file offsets to blocks |
| 1007 | * |
| 1008 | * Similar to block_truncate_page(), this function can be called by a |
| 1009 | * filesystem when it is truncating a DAX file to handle the partial page. |
Matthew Wilcox | 25726bc | 2015-02-16 15:59:35 -0800 | [diff] [blame] | 1010 | */ |
| 1011 | int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block) |
| 1012 | { |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 1013 | unsigned length = PAGE_ALIGN(from) - from; |
Matthew Wilcox | 25726bc | 2015-02-16 15:59:35 -0800 | [diff] [blame] | 1014 | return dax_zero_page_range(inode, from, length, get_block); |
| 1015 | } |
Matthew Wilcox | 4c0ccfe | 2015-02-16 15:59:06 -0800 | [diff] [blame] | 1016 | EXPORT_SYMBOL_GPL(dax_truncate_page); |
Christoph Hellwig | a254e56 | 2016-09-19 11:24:49 +1000 | [diff] [blame] | 1017 | |
| 1018 | #ifdef CONFIG_FS_IOMAP |
Ross Zwisler | 333ccc9 | 2016-11-08 11:33:09 +1100 | [diff] [blame] | 1019 | static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos) |
| 1020 | { |
| 1021 | return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9); |
| 1022 | } |
| 1023 | |
Christoph Hellwig | a254e56 | 2016-09-19 11:24:49 +1000 | [diff] [blame] | 1024 | static loff_t |
Ross Zwisler | 11c59c9 | 2016-11-08 11:32:46 +1100 | [diff] [blame] | 1025 | dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, |
Christoph Hellwig | a254e56 | 2016-09-19 11:24:49 +1000 | [diff] [blame] | 1026 | struct iomap *iomap) |
| 1027 | { |
| 1028 | struct iov_iter *iter = data; |
| 1029 | loff_t end = pos + length, done = 0; |
| 1030 | ssize_t ret = 0; |
| 1031 | |
| 1032 | if (iov_iter_rw(iter) == READ) { |
| 1033 | end = min(end, i_size_read(inode)); |
| 1034 | if (pos >= end) |
| 1035 | return 0; |
| 1036 | |
| 1037 | if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) |
| 1038 | return iov_iter_zero(min(length, end - pos), iter); |
| 1039 | } |
| 1040 | |
| 1041 | if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED)) |
| 1042 | return -EIO; |
| 1043 | |
| 1044 | while (pos < end) { |
| 1045 | unsigned offset = pos & (PAGE_SIZE - 1); |
| 1046 | struct blk_dax_ctl dax = { 0 }; |
| 1047 | ssize_t map_len; |
| 1048 | |
Ross Zwisler | 333ccc9 | 2016-11-08 11:33:09 +1100 | [diff] [blame] | 1049 | dax.sector = dax_iomap_sector(iomap, pos); |
Christoph Hellwig | a254e56 | 2016-09-19 11:24:49 +1000 | [diff] [blame] | 1050 | dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK; |
| 1051 | map_len = dax_map_atomic(iomap->bdev, &dax); |
| 1052 | if (map_len < 0) { |
| 1053 | ret = map_len; |
| 1054 | break; |
| 1055 | } |
| 1056 | |
| 1057 | dax.addr += offset; |
| 1058 | map_len -= offset; |
| 1059 | if (map_len > end - pos) |
| 1060 | map_len = end - pos; |
| 1061 | |
| 1062 | if (iov_iter_rw(iter) == WRITE) |
| 1063 | map_len = copy_from_iter_pmem(dax.addr, map_len, iter); |
| 1064 | else |
| 1065 | map_len = copy_to_iter(dax.addr, map_len, iter); |
| 1066 | dax_unmap_atomic(iomap->bdev, &dax); |
| 1067 | if (map_len <= 0) { |
| 1068 | ret = map_len ? map_len : -EFAULT; |
| 1069 | break; |
| 1070 | } |
| 1071 | |
| 1072 | pos += map_len; |
| 1073 | length -= map_len; |
| 1074 | done += map_len; |
| 1075 | } |
| 1076 | |
| 1077 | return done ? done : ret; |
| 1078 | } |
| 1079 | |
| 1080 | /** |
Ross Zwisler | 11c59c9 | 2016-11-08 11:32:46 +1100 | [diff] [blame] | 1081 | * dax_iomap_rw - Perform I/O to a DAX file |
Christoph Hellwig | a254e56 | 2016-09-19 11:24:49 +1000 | [diff] [blame] | 1082 | * @iocb: The control block for this I/O |
| 1083 | * @iter: The addresses to do I/O from or to |
| 1084 | * @ops: iomap ops passed from the file system |
| 1085 | * |
| 1086 | * This function performs read and write operations to directly mapped |
| 1087 | * persistent memory. The callers needs to take care of read/write exclusion |
| 1088 | * and evicting any page cache pages in the region under I/O. |
| 1089 | */ |
| 1090 | ssize_t |
Ross Zwisler | 11c59c9 | 2016-11-08 11:32:46 +1100 | [diff] [blame] | 1091 | dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, |
Christoph Hellwig | a254e56 | 2016-09-19 11:24:49 +1000 | [diff] [blame] | 1092 | struct iomap_ops *ops) |
| 1093 | { |
| 1094 | struct address_space *mapping = iocb->ki_filp->f_mapping; |
| 1095 | struct inode *inode = mapping->host; |
| 1096 | loff_t pos = iocb->ki_pos, ret = 0, done = 0; |
| 1097 | unsigned flags = 0; |
| 1098 | |
| 1099 | if (iov_iter_rw(iter) == WRITE) |
| 1100 | flags |= IOMAP_WRITE; |
| 1101 | |
| 1102 | /* |
| 1103 | * Yes, even DAX files can have page cache attached to them: A zeroed |
| 1104 | * page is inserted into the pagecache when we have to serve a write |
| 1105 | * fault on a hole. It should never be dirtied and can simply be |
| 1106 | * dropped from the pagecache once we get real data for the page. |
| 1107 | * |
| 1108 | * XXX: This is racy against mmap, and there's nothing we can do about |
| 1109 | * it. We'll eventually need to shift this down even further so that |
| 1110 | * we can check if we allocated blocks over a hole first. |
| 1111 | */ |
| 1112 | if (mapping->nrpages) { |
| 1113 | ret = invalidate_inode_pages2_range(mapping, |
| 1114 | pos >> PAGE_SHIFT, |
| 1115 | (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT); |
| 1116 | WARN_ON_ONCE(ret); |
| 1117 | } |
| 1118 | |
| 1119 | while (iov_iter_count(iter)) { |
| 1120 | ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops, |
Ross Zwisler | 11c59c9 | 2016-11-08 11:32:46 +1100 | [diff] [blame] | 1121 | iter, dax_iomap_actor); |
Christoph Hellwig | a254e56 | 2016-09-19 11:24:49 +1000 | [diff] [blame] | 1122 | if (ret <= 0) |
| 1123 | break; |
| 1124 | pos += ret; |
| 1125 | done += ret; |
| 1126 | } |
| 1127 | |
| 1128 | iocb->ki_pos += done; |
| 1129 | return done ? done : ret; |
| 1130 | } |
Ross Zwisler | 11c59c9 | 2016-11-08 11:32:46 +1100 | [diff] [blame] | 1131 | EXPORT_SYMBOL_GPL(dax_iomap_rw); |
Christoph Hellwig | a7d73fe | 2016-09-19 11:24:50 +1000 | [diff] [blame] | 1132 | |
| 1133 | /** |
Ross Zwisler | 11c59c9 | 2016-11-08 11:32:46 +1100 | [diff] [blame] | 1134 | * dax_iomap_fault - handle a page fault on a DAX file |
Christoph Hellwig | a7d73fe | 2016-09-19 11:24:50 +1000 | [diff] [blame] | 1135 | * @vma: The virtual memory area where the fault occurred |
| 1136 | * @vmf: The description of the fault |
| 1137 | * @ops: iomap ops passed from the file system |
| 1138 | * |
| 1139 | * When a page fault occurs, filesystems may call this helper in their fault |
| 1140 | * or mkwrite handler for DAX files. Assumes the caller has done all the |
| 1141 | * necessary locking for the page fault to proceed successfully. |
| 1142 | */ |
Ross Zwisler | 11c59c9 | 2016-11-08 11:32:46 +1100 | [diff] [blame] | 1143 | int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, |
Christoph Hellwig | a7d73fe | 2016-09-19 11:24:50 +1000 | [diff] [blame] | 1144 | struct iomap_ops *ops) |
| 1145 | { |
| 1146 | struct address_space *mapping = vma->vm_file->f_mapping; |
| 1147 | struct inode *inode = mapping->host; |
| 1148 | unsigned long vaddr = (unsigned long)vmf->virtual_address; |
| 1149 | loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT; |
| 1150 | sector_t sector; |
| 1151 | struct iomap iomap = { 0 }; |
| 1152 | unsigned flags = 0; |
| 1153 | int error, major = 0; |
Ross Zwisler | 1550290 | 2016-11-08 11:33:26 +1100 | [diff] [blame] | 1154 | int locked_status = 0; |
Christoph Hellwig | a7d73fe | 2016-09-19 11:24:50 +1000 | [diff] [blame] | 1155 | void *entry; |
| 1156 | |
| 1157 | /* |
| 1158 | * Check whether offset isn't beyond end of file now. Caller is supposed |
| 1159 | * to hold locks serializing us with truncate / punch hole so this is |
| 1160 | * a reliable test. |
| 1161 | */ |
| 1162 | if (pos >= i_size_read(inode)) |
| 1163 | return VM_FAULT_SIGBUS; |
| 1164 | |
| 1165 | entry = grab_mapping_entry(mapping, vmf->pgoff); |
| 1166 | if (IS_ERR(entry)) { |
| 1167 | error = PTR_ERR(entry); |
| 1168 | goto out; |
| 1169 | } |
| 1170 | |
| 1171 | if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page) |
| 1172 | flags |= IOMAP_WRITE; |
| 1173 | |
| 1174 | /* |
| 1175 | * Note that we don't bother to use iomap_apply here: DAX required |
| 1176 | * the file system block size to be equal the page size, which means |
| 1177 | * that we never have to deal with more than a single extent here. |
| 1178 | */ |
| 1179 | error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap); |
| 1180 | if (error) |
| 1181 | goto unlock_entry; |
| 1182 | if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) { |
| 1183 | error = -EIO; /* fs corruption? */ |
Ross Zwisler | 1550290 | 2016-11-08 11:33:26 +1100 | [diff] [blame] | 1184 | goto finish_iomap; |
Christoph Hellwig | a7d73fe | 2016-09-19 11:24:50 +1000 | [diff] [blame] | 1185 | } |
| 1186 | |
Ross Zwisler | 333ccc9 | 2016-11-08 11:33:09 +1100 | [diff] [blame] | 1187 | sector = dax_iomap_sector(&iomap, pos); |
Christoph Hellwig | a7d73fe | 2016-09-19 11:24:50 +1000 | [diff] [blame] | 1188 | |
| 1189 | if (vmf->cow_page) { |
| 1190 | switch (iomap.type) { |
| 1191 | case IOMAP_HOLE: |
| 1192 | case IOMAP_UNWRITTEN: |
| 1193 | clear_user_highpage(vmf->cow_page, vaddr); |
| 1194 | break; |
| 1195 | case IOMAP_MAPPED: |
| 1196 | error = copy_user_dax(iomap.bdev, sector, PAGE_SIZE, |
| 1197 | vmf->cow_page, vaddr); |
| 1198 | break; |
| 1199 | default: |
| 1200 | WARN_ON_ONCE(1); |
| 1201 | error = -EIO; |
| 1202 | break; |
| 1203 | } |
| 1204 | |
| 1205 | if (error) |
Ross Zwisler | 1550290 | 2016-11-08 11:33:26 +1100 | [diff] [blame] | 1206 | goto finish_iomap; |
Christoph Hellwig | a7d73fe | 2016-09-19 11:24:50 +1000 | [diff] [blame] | 1207 | if (!radix_tree_exceptional_entry(entry)) { |
| 1208 | vmf->page = entry; |
Ross Zwisler | 1550290 | 2016-11-08 11:33:26 +1100 | [diff] [blame] | 1209 | locked_status = VM_FAULT_LOCKED; |
| 1210 | } else { |
| 1211 | vmf->entry = entry; |
| 1212 | locked_status = VM_FAULT_DAX_LOCKED; |
Christoph Hellwig | a7d73fe | 2016-09-19 11:24:50 +1000 | [diff] [blame] | 1213 | } |
Ross Zwisler | 1550290 | 2016-11-08 11:33:26 +1100 | [diff] [blame] | 1214 | goto finish_iomap; |
Christoph Hellwig | a7d73fe | 2016-09-19 11:24:50 +1000 | [diff] [blame] | 1215 | } |
| 1216 | |
| 1217 | switch (iomap.type) { |
| 1218 | case IOMAP_MAPPED: |
| 1219 | if (iomap.flags & IOMAP_F_NEW) { |
| 1220 | count_vm_event(PGMAJFAULT); |
| 1221 | mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); |
| 1222 | major = VM_FAULT_MAJOR; |
| 1223 | } |
| 1224 | error = dax_insert_mapping(mapping, iomap.bdev, sector, |
| 1225 | PAGE_SIZE, &entry, vma, vmf); |
| 1226 | break; |
| 1227 | case IOMAP_UNWRITTEN: |
| 1228 | case IOMAP_HOLE: |
Ross Zwisler | 1550290 | 2016-11-08 11:33:26 +1100 | [diff] [blame] | 1229 | if (!(vmf->flags & FAULT_FLAG_WRITE)) { |
| 1230 | locked_status = dax_load_hole(mapping, entry, vmf); |
| 1231 | break; |
| 1232 | } |
Christoph Hellwig | a7d73fe | 2016-09-19 11:24:50 +1000 | [diff] [blame] | 1233 | /*FALLTHRU*/ |
| 1234 | default: |
| 1235 | WARN_ON_ONCE(1); |
| 1236 | error = -EIO; |
| 1237 | break; |
| 1238 | } |
| 1239 | |
Ross Zwisler | 1550290 | 2016-11-08 11:33:26 +1100 | [diff] [blame] | 1240 | finish_iomap: |
| 1241 | if (ops->iomap_end) { |
| 1242 | if (error) { |
| 1243 | /* keep previous error */ |
| 1244 | ops->iomap_end(inode, pos, PAGE_SIZE, 0, flags, |
| 1245 | &iomap); |
| 1246 | } else { |
| 1247 | error = ops->iomap_end(inode, pos, PAGE_SIZE, |
| 1248 | PAGE_SIZE, flags, &iomap); |
| 1249 | } |
| 1250 | } |
Christoph Hellwig | a7d73fe | 2016-09-19 11:24:50 +1000 | [diff] [blame] | 1251 | unlock_entry: |
Ross Zwisler | 1550290 | 2016-11-08 11:33:26 +1100 | [diff] [blame] | 1252 | if (!locked_status || error) |
| 1253 | put_locked_mapping_entry(mapping, vmf->pgoff, entry); |
Christoph Hellwig | a7d73fe | 2016-09-19 11:24:50 +1000 | [diff] [blame] | 1254 | out: |
| 1255 | if (error == -ENOMEM) |
| 1256 | return VM_FAULT_OOM | major; |
| 1257 | /* -EBUSY is fine, somebody else faulted on the same PTE */ |
| 1258 | if (error < 0 && error != -EBUSY) |
| 1259 | return VM_FAULT_SIGBUS | major; |
Ross Zwisler | 1550290 | 2016-11-08 11:33:26 +1100 | [diff] [blame] | 1260 | if (locked_status) { |
| 1261 | WARN_ON_ONCE(error); /* -EBUSY from ops->iomap_end? */ |
| 1262 | return locked_status; |
| 1263 | } |
Christoph Hellwig | a7d73fe | 2016-09-19 11:24:50 +1000 | [diff] [blame] | 1264 | return VM_FAULT_NOPAGE | major; |
| 1265 | } |
Ross Zwisler | 11c59c9 | 2016-11-08 11:32:46 +1100 | [diff] [blame] | 1266 | EXPORT_SYMBOL_GPL(dax_iomap_fault); |
Christoph Hellwig | a254e56 | 2016-09-19 11:24:49 +1000 | [diff] [blame] | 1267 | #endif /* CONFIG_FS_IOMAP */ |