blob: 042d3b31b413a588b1701d1da159527bbb146bbd [file] [log] [blame]
Matthew Wilcoxd475c632015-02-16 15:58:56 -08001/*
2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 */
16
17#include <linux/atomic.h>
18#include <linux/blkdev.h>
19#include <linux/buffer_head.h>
Ross Zwislerd77e92e2015-09-09 10:29:40 -060020#include <linux/dax.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080021#include <linux/fs.h>
22#include <linux/genhd.h>
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -080023#include <linux/highmem.h>
24#include <linux/memcontrol.h>
25#include <linux/mm.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080026#include <linux/mutex.h>
Ross Zwisler9973c982016-01-22 15:10:47 -080027#include <linux/pagevec.h>
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080028#include <linux/sched.h>
Ingo Molnarf361bf42017-02-03 23:47:37 +010029#include <linux/sched/signal.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080030#include <linux/uio.h>
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -080031#include <linux/vmstat.h>
Dan Williams34c0fd52016-01-15 16:56:14 -080032#include <linux/pfn_t.h>
Dan Williams0e749e52016-01-15 16:55:53 -080033#include <linux/sizes.h>
Jan Kara4b4bb462016-12-14 15:07:53 -080034#include <linux/mmu_notifier.h>
Christoph Hellwiga254e562016-09-19 11:24:49 +100035#include <linux/iomap.h>
36#include "internal.h"
Matthew Wilcoxd475c632015-02-16 15:58:56 -080037
Ross Zwisler282a8e02017-02-22 15:39:50 -080038#define CREATE_TRACE_POINTS
39#include <trace/events/fs_dax.h>
40
Matthew Wilcoxcfc93c62018-03-28 11:48:03 -040041static inline unsigned int pe_order(enum page_entry_size pe_size)
42{
43 if (pe_size == PE_SIZE_PTE)
44 return PAGE_SHIFT - PAGE_SHIFT;
45 if (pe_size == PE_SIZE_PMD)
46 return PMD_SHIFT - PAGE_SHIFT;
47 if (pe_size == PE_SIZE_PUD)
48 return PUD_SHIFT - PAGE_SHIFT;
49 return ~0;
50}
51
Jan Karaac401cc2016-05-12 18:29:18 +020052/* We choose 4096 entries - same as per-zone page wait tables */
53#define DAX_WAIT_TABLE_BITS 12
54#define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
55
Ross Zwisler917f3452017-09-06 16:18:58 -070056/* The 'colour' (ie low bits) within a PMD of a page offset. */
57#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
Matthew Wilcox977fbdc2018-01-31 16:17:36 -080058#define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT)
Ross Zwisler917f3452017-09-06 16:18:58 -070059
Matthew Wilcoxcfc93c62018-03-28 11:48:03 -040060/* The order of a PMD entry */
61#define PMD_ORDER (PMD_SHIFT - PAGE_SHIFT)
62
Ross Zwislerce95ab0f2016-11-08 11:31:44 +110063static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
Jan Karaac401cc2016-05-12 18:29:18 +020064
65static int __init init_dax_wait_table(void)
66{
67 int i;
68
69 for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
70 init_waitqueue_head(wait_table + i);
71 return 0;
72}
73fs_initcall(init_dax_wait_table);
74
Ross Zwisler527b19d2017-09-06 16:18:51 -070075/*
Matthew Wilcox3159f942017-11-03 13:30:42 -040076 * DAX pagecache entries use XArray value entries so they can't be mistaken
77 * for pages. We use one bit for locking, one bit for the entry size (PMD)
78 * and two more to tell us if the entry is a zero page or an empty entry that
79 * is just used for locking. In total four special bits.
Ross Zwisler527b19d2017-09-06 16:18:51 -070080 *
81 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
82 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
83 * block allocation.
84 */
Matthew Wilcox3159f942017-11-03 13:30:42 -040085#define DAX_SHIFT (4)
86#define DAX_LOCKED (1UL << 0)
87#define DAX_PMD (1UL << 1)
88#define DAX_ZERO_PAGE (1UL << 2)
89#define DAX_EMPTY (1UL << 3)
Ross Zwisler527b19d2017-09-06 16:18:51 -070090
Matthew Wilcoxa77d19f2018-03-27 13:39:38 -040091static unsigned long dax_to_pfn(void *entry)
Ross Zwisler527b19d2017-09-06 16:18:51 -070092{
Matthew Wilcox3159f942017-11-03 13:30:42 -040093 return xa_to_value(entry) >> DAX_SHIFT;
Ross Zwisler527b19d2017-09-06 16:18:51 -070094}
95
Matthew Wilcox9f32d222018-06-12 09:46:30 -040096static void *dax_make_entry(pfn_t pfn, unsigned long flags)
97{
98 return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
99}
100
Matthew Wilcoxcfc93c62018-03-28 11:48:03 -0400101static bool dax_is_locked(void *entry)
102{
103 return xa_to_value(entry) & DAX_LOCKED;
104}
105
Matthew Wilcoxa77d19f2018-03-27 13:39:38 -0400106static unsigned int dax_entry_order(void *entry)
Ross Zwisler527b19d2017-09-06 16:18:51 -0700107{
Matthew Wilcox3159f942017-11-03 13:30:42 -0400108 if (xa_to_value(entry) & DAX_PMD)
Matthew Wilcoxcfc93c62018-03-28 11:48:03 -0400109 return PMD_ORDER;
Ross Zwisler527b19d2017-09-06 16:18:51 -0700110 return 0;
111}
112
Matthew Wilcoxfda490d2018-11-16 15:07:31 -0500113static unsigned long dax_is_pmd_entry(void *entry)
Ross Zwisler642261a2016-11-08 11:34:45 +1100114{
Matthew Wilcox3159f942017-11-03 13:30:42 -0400115 return xa_to_value(entry) & DAX_PMD;
Ross Zwisler642261a2016-11-08 11:34:45 +1100116}
117
Matthew Wilcoxfda490d2018-11-16 15:07:31 -0500118static bool dax_is_pte_entry(void *entry)
Ross Zwisler642261a2016-11-08 11:34:45 +1100119{
Matthew Wilcox3159f942017-11-03 13:30:42 -0400120 return !(xa_to_value(entry) & DAX_PMD);
Ross Zwisler642261a2016-11-08 11:34:45 +1100121}
122
123static int dax_is_zero_entry(void *entry)
124{
Matthew Wilcox3159f942017-11-03 13:30:42 -0400125 return xa_to_value(entry) & DAX_ZERO_PAGE;
Ross Zwisler642261a2016-11-08 11:34:45 +1100126}
127
128static int dax_is_empty_entry(void *entry)
129{
Matthew Wilcox3159f942017-11-03 13:30:42 -0400130 return xa_to_value(entry) & DAX_EMPTY;
Ross Zwisler642261a2016-11-08 11:34:45 +1100131}
132
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800133/*
Matthew Wilcoxa77d19f2018-03-27 13:39:38 -0400134 * DAX page cache entry locking
Jan Karaac401cc2016-05-12 18:29:18 +0200135 */
136struct exceptional_entry_key {
Matthew Wilcoxec4907f2018-03-28 11:01:43 -0400137 struct xarray *xa;
Ross Zwisler63e95b52016-11-08 11:32:20 +1100138 pgoff_t entry_start;
Jan Karaac401cc2016-05-12 18:29:18 +0200139};
140
141struct wait_exceptional_entry_queue {
Ingo Molnarac6424b2017-06-20 12:06:13 +0200142 wait_queue_entry_t wait;
Jan Karaac401cc2016-05-12 18:29:18 +0200143 struct exceptional_entry_key key;
144};
145
Matthew Wilcoxb15cd802018-03-29 22:58:27 -0400146static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
147 void *entry, struct exceptional_entry_key *key)
Ross Zwisler63e95b52016-11-08 11:32:20 +1100148{
149 unsigned long hash;
Matthew Wilcoxb15cd802018-03-29 22:58:27 -0400150 unsigned long index = xas->xa_index;
Ross Zwisler63e95b52016-11-08 11:32:20 +1100151
152 /*
153 * If 'entry' is a PMD, align the 'index' that we use for the wait
154 * queue to the start of that PMD. This ensures that all offsets in
155 * the range covered by the PMD map to the same bit lock.
156 */
Ross Zwisler642261a2016-11-08 11:34:45 +1100157 if (dax_is_pmd_entry(entry))
Ross Zwisler917f3452017-09-06 16:18:58 -0700158 index &= ~PG_PMD_COLOUR;
Matthew Wilcoxb15cd802018-03-29 22:58:27 -0400159 key->xa = xas->xa;
Ross Zwisler63e95b52016-11-08 11:32:20 +1100160 key->entry_start = index;
161
Matthew Wilcoxb15cd802018-03-29 22:58:27 -0400162 hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS);
Ross Zwisler63e95b52016-11-08 11:32:20 +1100163 return wait_table + hash;
164}
165
Matthew Wilcoxec4907f2018-03-28 11:01:43 -0400166static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
167 unsigned int mode, int sync, void *keyp)
Jan Karaac401cc2016-05-12 18:29:18 +0200168{
169 struct exceptional_entry_key *key = keyp;
170 struct wait_exceptional_entry_queue *ewait =
171 container_of(wait, struct wait_exceptional_entry_queue, wait);
172
Matthew Wilcoxec4907f2018-03-28 11:01:43 -0400173 if (key->xa != ewait->key.xa ||
Ross Zwisler63e95b52016-11-08 11:32:20 +1100174 key->entry_start != ewait->key.entry_start)
Jan Karaac401cc2016-05-12 18:29:18 +0200175 return 0;
176 return autoremove_wake_function(wait, mode, sync, NULL);
177}
178
179/*
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700180 * @entry may no longer be the entry at the index in the mapping.
181 * The important information it's conveying is whether the entry at
182 * this index used to be a PMD entry.
Ross Zwislere30331f2017-09-06 16:18:39 -0700183 */
Matthew Wilcoxb15cd802018-03-29 22:58:27 -0400184static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
Ross Zwislere30331f2017-09-06 16:18:39 -0700185{
186 struct exceptional_entry_key key;
187 wait_queue_head_t *wq;
188
Matthew Wilcoxb15cd802018-03-29 22:58:27 -0400189 wq = dax_entry_waitqueue(xas, entry, &key);
Ross Zwislere30331f2017-09-06 16:18:39 -0700190
191 /*
192 * Checking for locked entry and prepare_to_wait_exclusive() happens
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700193 * under the i_pages lock, ditto for entry handling in our callers.
Ross Zwislere30331f2017-09-06 16:18:39 -0700194 * So at this point all tasks that could have seen our entry locked
195 * must be in the waitqueue and the following check will see them.
196 */
197 if (waitqueue_active(wq))
198 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
199}
200
Matthew Wilcoxcfc93c62018-03-28 11:48:03 -0400201/*
202 * Look up entry in page cache, wait for it to become unlocked if it
203 * is a DAX entry and return it. The caller must subsequently call
204 * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry()
205 * if it did.
206 *
207 * Must be called with the i_pages lock held.
208 */
209static void *get_unlocked_entry(struct xa_state *xas)
210{
211 void *entry;
212 struct wait_exceptional_entry_queue ewait;
213 wait_queue_head_t *wq;
214
215 init_wait(&ewait.wait);
216 ewait.wait.func = wake_exceptional_entry_func;
217
218 for (;;) {
Matthew Wilcox0e40de02018-11-16 15:19:13 -0500219 entry = xas_find_conflict(xas);
220 if (!entry || WARN_ON_ONCE(!xa_is_value(entry)) ||
Matthew Wilcoxcfc93c62018-03-28 11:48:03 -0400221 !dax_is_locked(entry))
222 return entry;
223
Matthew Wilcoxb15cd802018-03-29 22:58:27 -0400224 wq = dax_entry_waitqueue(xas, entry, &ewait.key);
Matthew Wilcoxcfc93c62018-03-28 11:48:03 -0400225 prepare_to_wait_exclusive(wq, &ewait.wait,
226 TASK_UNINTERRUPTIBLE);
227 xas_unlock_irq(xas);
228 xas_reset(xas);
229 schedule();
230 finish_wait(wq, &ewait.wait);
231 xas_lock_irq(xas);
232 }
233}
234
Matthew Wilcox55e56f02018-11-27 13:16:34 -0800235/*
236 * The only thing keeping the address space around is the i_pages lock
237 * (it's cycled in clear_inode() after removing the entries from i_pages)
238 * After we call xas_unlock_irq(), we cannot touch xas->xa.
239 */
240static void wait_entry_unlocked(struct xa_state *xas, void *entry)
241{
242 struct wait_exceptional_entry_queue ewait;
243 wait_queue_head_t *wq;
244
245 init_wait(&ewait.wait);
246 ewait.wait.func = wake_exceptional_entry_func;
247
248 wq = dax_entry_waitqueue(xas, entry, &ewait.key);
Dan Williamsd8a70642018-12-21 11:35:53 -0800249 /*
250 * Unlike get_unlocked_entry() there is no guarantee that this
251 * path ever successfully retrieves an unlocked entry before an
252 * inode dies. Perform a non-exclusive wait in case this path
253 * never successfully performs its own wake up.
254 */
255 prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
Matthew Wilcox55e56f02018-11-27 13:16:34 -0800256 xas_unlock_irq(xas);
257 schedule();
258 finish_wait(wq, &ewait.wait);
Matthew Wilcox55e56f02018-11-27 13:16:34 -0800259}
260
Matthew Wilcoxcfc93c62018-03-28 11:48:03 -0400261static void put_unlocked_entry(struct xa_state *xas, void *entry)
262{
263 /* If we were the only waiter woken, wake the next one */
264 if (entry)
265 dax_wake_entry(xas, entry, false);
266}
267
268/*
269 * We used the xa_state to get the entry, but then we locked the entry and
270 * dropped the xa_lock, so we know the xa_state is stale and must be reset
271 * before use.
272 */
273static void dax_unlock_entry(struct xa_state *xas, void *entry)
274{
275 void *old;
276
Matthew Wilcox7ae2ea72018-11-09 20:09:37 -0500277 BUG_ON(dax_is_locked(entry));
Matthew Wilcoxcfc93c62018-03-28 11:48:03 -0400278 xas_reset(xas);
279 xas_lock_irq(xas);
280 old = xas_store(xas, entry);
281 xas_unlock_irq(xas);
282 BUG_ON(!dax_is_locked(old));
283 dax_wake_entry(xas, entry, false);
284}
285
286/*
287 * Return: The entry stored at this location before it was locked.
288 */
289static void *dax_lock_entry(struct xa_state *xas, void *entry)
290{
291 unsigned long v = xa_to_value(entry);
292 return xas_store(xas, xa_mk_value(v | DAX_LOCKED));
293}
294
Dan Williamsd2c997c2017-12-22 22:02:48 -0800295static unsigned long dax_entry_size(void *entry)
296{
297 if (dax_is_zero_entry(entry))
298 return 0;
299 else if (dax_is_empty_entry(entry))
300 return 0;
301 else if (dax_is_pmd_entry(entry))
302 return PMD_SIZE;
303 else
304 return PAGE_SIZE;
305}
306
Matthew Wilcoxa77d19f2018-03-27 13:39:38 -0400307static unsigned long dax_end_pfn(void *entry)
Dan Williamsd2c997c2017-12-22 22:02:48 -0800308{
Matthew Wilcoxa77d19f2018-03-27 13:39:38 -0400309 return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
Dan Williamsd2c997c2017-12-22 22:02:48 -0800310}
311
312/*
313 * Iterate through all mapped pfns represented by an entry, i.e. skip
314 * 'empty' and 'zero' entries.
315 */
316#define for_each_mapped_pfn(entry, pfn) \
Matthew Wilcoxa77d19f2018-03-27 13:39:38 -0400317 for (pfn = dax_to_pfn(entry); \
318 pfn < dax_end_pfn(entry); pfn++)
Dan Williamsd2c997c2017-12-22 22:02:48 -0800319
Dan Williams73449da2018-07-13 21:49:50 -0700320/*
321 * TODO: for reflink+dax we need a way to associate a single page with
322 * multiple address_space instances at different linear_page_index()
323 * offsets.
324 */
325static void dax_associate_entry(void *entry, struct address_space *mapping,
326 struct vm_area_struct *vma, unsigned long address)
Dan Williamsd2c997c2017-12-22 22:02:48 -0800327{
Dan Williams73449da2018-07-13 21:49:50 -0700328 unsigned long size = dax_entry_size(entry), pfn, index;
329 int i = 0;
Dan Williamsd2c997c2017-12-22 22:02:48 -0800330
331 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
332 return;
333
Dan Williams73449da2018-07-13 21:49:50 -0700334 index = linear_page_index(vma, address & ~(size - 1));
Dan Williamsd2c997c2017-12-22 22:02:48 -0800335 for_each_mapped_pfn(entry, pfn) {
336 struct page *page = pfn_to_page(pfn);
337
338 WARN_ON_ONCE(page->mapping);
339 page->mapping = mapping;
Dan Williams73449da2018-07-13 21:49:50 -0700340 page->index = index + i++;
Dan Williamsd2c997c2017-12-22 22:02:48 -0800341 }
342}
343
344static void dax_disassociate_entry(void *entry, struct address_space *mapping,
345 bool trunc)
346{
347 unsigned long pfn;
348
349 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
350 return;
351
352 for_each_mapped_pfn(entry, pfn) {
353 struct page *page = pfn_to_page(pfn);
354
355 WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
356 WARN_ON_ONCE(page->mapping && page->mapping != mapping);
357 page->mapping = NULL;
Dan Williams73449da2018-07-13 21:49:50 -0700358 page->index = 0;
Dan Williamsd2c997c2017-12-22 22:02:48 -0800359 }
360}
361
Dan Williams5fac7402018-03-09 17:44:31 -0800362static struct page *dax_busy_page(void *entry)
363{
364 unsigned long pfn;
365
366 for_each_mapped_pfn(entry, pfn) {
367 struct page *page = pfn_to_page(pfn);
368
369 if (page_ref_count(page) > 1)
370 return page;
371 }
372 return NULL;
373}
374
Matthew Wilcoxc5bbd452018-11-16 14:37:06 -0500375/*
376 * dax_lock_mapping_entry - Lock the DAX entry corresponding to a page
377 * @page: The page whose entry we want to lock
378 *
379 * Context: Process context.
Matthew Wilcox27359fd2018-11-30 11:05:06 -0500380 * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could
381 * not be locked.
Matthew Wilcoxc5bbd452018-11-16 14:37:06 -0500382 */
Matthew Wilcox27359fd2018-11-30 11:05:06 -0500383dax_entry_t dax_lock_page(struct page *page)
Dan Williamsc2a7d2a2018-07-13 21:50:16 -0700384{
Matthew Wilcox9f32d222018-06-12 09:46:30 -0400385 XA_STATE(xas, NULL, 0);
386 void *entry;
Dan Williamsc2a7d2a2018-07-13 21:50:16 -0700387
Matthew Wilcoxc5bbd452018-11-16 14:37:06 -0500388 /* Ensure page->mapping isn't freed while we look at it */
389 rcu_read_lock();
Dan Williamsc2a7d2a2018-07-13 21:50:16 -0700390 for (;;) {
Matthew Wilcox9f32d222018-06-12 09:46:30 -0400391 struct address_space *mapping = READ_ONCE(page->mapping);
Dan Williamsc2a7d2a2018-07-13 21:50:16 -0700392
Matthew Wilcox27359fd2018-11-30 11:05:06 -0500393 entry = NULL;
Matthew Wilcoxc93db7b2018-11-27 13:16:33 -0800394 if (!mapping || !dax_mapping(mapping))
Matthew Wilcoxc5bbd452018-11-16 14:37:06 -0500395 break;
Dan Williamsc2a7d2a2018-07-13 21:50:16 -0700396
397 /*
398 * In the device-dax case there's no need to lock, a
399 * struct dev_pagemap pin is sufficient to keep the
400 * inode alive, and we assume we have dev_pagemap pin
401 * otherwise we would not have a valid pfn_to_page()
402 * translation.
403 */
Matthew Wilcox27359fd2018-11-30 11:05:06 -0500404 entry = (void *)~0UL;
Matthew Wilcox9f32d222018-06-12 09:46:30 -0400405 if (S_ISCHR(mapping->host->i_mode))
Matthew Wilcoxc5bbd452018-11-16 14:37:06 -0500406 break;
Dan Williamsc2a7d2a2018-07-13 21:50:16 -0700407
Matthew Wilcox9f32d222018-06-12 09:46:30 -0400408 xas.xa = &mapping->i_pages;
409 xas_lock_irq(&xas);
Dan Williamsc2a7d2a2018-07-13 21:50:16 -0700410 if (mapping != page->mapping) {
Matthew Wilcox9f32d222018-06-12 09:46:30 -0400411 xas_unlock_irq(&xas);
Dan Williamsc2a7d2a2018-07-13 21:50:16 -0700412 continue;
413 }
Matthew Wilcox9f32d222018-06-12 09:46:30 -0400414 xas_set(&xas, page->index);
415 entry = xas_load(&xas);
416 if (dax_is_locked(entry)) {
Matthew Wilcoxc5bbd452018-11-16 14:37:06 -0500417 rcu_read_unlock();
Matthew Wilcox55e56f02018-11-27 13:16:34 -0800418 wait_entry_unlocked(&xas, entry);
Matthew Wilcoxc5bbd452018-11-16 14:37:06 -0500419 rcu_read_lock();
Matthew Wilcox6d7cd8c2018-11-06 13:11:57 -0500420 continue;
Dan Williamsc2a7d2a2018-07-13 21:50:16 -0700421 }
Matthew Wilcox9f32d222018-06-12 09:46:30 -0400422 dax_lock_entry(&xas, entry);
423 xas_unlock_irq(&xas);
Matthew Wilcoxc5bbd452018-11-16 14:37:06 -0500424 break;
Dan Williamsc2a7d2a2018-07-13 21:50:16 -0700425 }
Matthew Wilcoxc5bbd452018-11-16 14:37:06 -0500426 rcu_read_unlock();
Matthew Wilcox27359fd2018-11-30 11:05:06 -0500427 return (dax_entry_t)entry;
Dan Williamsc2a7d2a2018-07-13 21:50:16 -0700428}
429
Matthew Wilcox27359fd2018-11-30 11:05:06 -0500430void dax_unlock_page(struct page *page, dax_entry_t cookie)
Dan Williamsc2a7d2a2018-07-13 21:50:16 -0700431{
432 struct address_space *mapping = page->mapping;
Matthew Wilcox9f32d222018-06-12 09:46:30 -0400433 XA_STATE(xas, &mapping->i_pages, page->index);
Dan Williamsc2a7d2a2018-07-13 21:50:16 -0700434
Matthew Wilcox9f32d222018-06-12 09:46:30 -0400435 if (S_ISCHR(mapping->host->i_mode))
Dan Williamsc2a7d2a2018-07-13 21:50:16 -0700436 return;
437
Matthew Wilcox27359fd2018-11-30 11:05:06 -0500438 dax_unlock_entry(&xas, (void *)cookie);
Dan Williamsc2a7d2a2018-07-13 21:50:16 -0700439}
440
Jan Karaac401cc2016-05-12 18:29:18 +0200441/*
Matthew Wilcoxa77d19f2018-03-27 13:39:38 -0400442 * Find page cache entry at given index. If it is a DAX entry, return it
443 * with the entry locked. If the page cache doesn't contain an entry at
444 * that index, add a locked empty entry.
Jan Karaac401cc2016-05-12 18:29:18 +0200445 *
Matthew Wilcox3159f942017-11-03 13:30:42 -0400446 * When requesting an entry with size DAX_PMD, grab_mapping_entry() will
Matthew Wilcoxb15cd802018-03-29 22:58:27 -0400447 * either return that locked entry or will return VM_FAULT_FALLBACK.
448 * This will happen if there are any PTE entries within the PMD range
449 * that we are requesting.
Ross Zwisler642261a2016-11-08 11:34:45 +1100450 *
Matthew Wilcoxb15cd802018-03-29 22:58:27 -0400451 * We always favor PTE entries over PMD entries. There isn't a flow where we
452 * evict PTE entries in order to 'upgrade' them to a PMD entry. A PMD
453 * insertion will fail if it finds any PTE entries already in the tree, and a
454 * PTE insertion will cause an existing PMD entry to be unmapped and
455 * downgraded to PTE entries. This happens for both PMD zero pages as
456 * well as PMD empty entries.
Ross Zwisler642261a2016-11-08 11:34:45 +1100457 *
Matthew Wilcoxb15cd802018-03-29 22:58:27 -0400458 * The exception to this downgrade path is for PMD entries that have
459 * real storage backing them. We will leave these real PMD entries in
460 * the tree, and PTE writes will simply dirty the entire PMD entry.
Ross Zwisler642261a2016-11-08 11:34:45 +1100461 *
Jan Karaac401cc2016-05-12 18:29:18 +0200462 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
463 * persistent memory the benefit is doubtful. We can add that later if we can
464 * show it helps.
Matthew Wilcoxb15cd802018-03-29 22:58:27 -0400465 *
466 * On error, this function does not return an ERR_PTR. Instead it returns
467 * a VM_FAULT code, encoded as an xarray internal entry. The ERR_PTR values
468 * overlap with xarray value entries.
Jan Karaac401cc2016-05-12 18:29:18 +0200469 */
Matthew Wilcoxb15cd802018-03-29 22:58:27 -0400470static void *grab_mapping_entry(struct xa_state *xas,
471 struct address_space *mapping, unsigned long size_flag)
Jan Karaac401cc2016-05-12 18:29:18 +0200472{
Matthew Wilcoxb15cd802018-03-29 22:58:27 -0400473 unsigned long index = xas->xa_index;
474 bool pmd_downgrade = false; /* splitting PMD entry into PTE entries? */
475 void *entry;
Jan Karaac401cc2016-05-12 18:29:18 +0200476
Matthew Wilcoxb15cd802018-03-29 22:58:27 -0400477retry:
478 xas_lock_irq(xas);
479 entry = get_unlocked_entry(xas);
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700480
Ross Zwisler642261a2016-11-08 11:34:45 +1100481 if (entry) {
Matthew Wilcox0e40de02018-11-16 15:19:13 -0500482 if (!xa_is_value(entry)) {
Matthew Wilcoxb15cd802018-03-29 22:58:27 -0400483 xas_set_err(xas, EIO);
484 goto out_unlock;
485 }
486
Matthew Wilcox3159f942017-11-03 13:30:42 -0400487 if (size_flag & DAX_PMD) {
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700488 if (dax_is_pte_entry(entry)) {
Matthew Wilcoxb15cd802018-03-29 22:58:27 -0400489 put_unlocked_entry(xas, entry);
490 goto fallback;
Ross Zwisler642261a2016-11-08 11:34:45 +1100491 }
492 } else { /* trying to grab a PTE entry */
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700493 if (dax_is_pmd_entry(entry) &&
Ross Zwisler642261a2016-11-08 11:34:45 +1100494 (dax_is_zero_entry(entry) ||
495 dax_is_empty_entry(entry))) {
496 pmd_downgrade = true;
497 }
498 }
499 }
500
Matthew Wilcoxb15cd802018-03-29 22:58:27 -0400501 if (pmd_downgrade) {
502 /*
503 * Make sure 'entry' remains valid while we drop
504 * the i_pages lock.
505 */
506 dax_lock_entry(xas, entry);
Jan Karaac401cc2016-05-12 18:29:18 +0200507
Ross Zwisler642261a2016-11-08 11:34:45 +1100508 /*
509 * Besides huge zero pages the only other thing that gets
510 * downgraded are empty entries which don't need to be
511 * unmapped.
512 */
Matthew Wilcoxb15cd802018-03-29 22:58:27 -0400513 if (dax_is_zero_entry(entry)) {
514 xas_unlock_irq(xas);
515 unmap_mapping_pages(mapping,
516 xas->xa_index & ~PG_PMD_COLOUR,
517 PG_PMD_NR, false);
518 xas_reset(xas);
519 xas_lock_irq(xas);
Ross Zwislere11f8b72017-04-07 16:04:57 -0700520 }
521
Matthew Wilcoxb15cd802018-03-29 22:58:27 -0400522 dax_disassociate_entry(entry, mapping, false);
523 xas_store(xas, NULL); /* undo the PMD join */
524 dax_wake_entry(xas, entry, true);
525 mapping->nrexceptional--;
526 entry = NULL;
527 xas_set(xas, index);
Jan Karaac401cc2016-05-12 18:29:18 +0200528 }
Matthew Wilcoxb15cd802018-03-29 22:58:27 -0400529
530 if (entry) {
531 dax_lock_entry(xas, entry);
532 } else {
533 entry = dax_make_entry(pfn_to_pfn_t(0), size_flag | DAX_EMPTY);
534 dax_lock_entry(xas, entry);
535 if (xas_error(xas))
536 goto out_unlock;
537 mapping->nrexceptional++;
538 }
539
540out_unlock:
541 xas_unlock_irq(xas);
542 if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM))
543 goto retry;
544 if (xas->xa_node == XA_ERROR(-ENOMEM))
545 return xa_mk_internal(VM_FAULT_OOM);
546 if (xas_error(xas))
547 return xa_mk_internal(VM_FAULT_SIGBUS);
Ross Zwislere3ad61c2016-11-08 11:32:12 +1100548 return entry;
Matthew Wilcoxb15cd802018-03-29 22:58:27 -0400549fallback:
550 xas_unlock_irq(xas);
551 return xa_mk_internal(VM_FAULT_FALLBACK);
Jan Karaac401cc2016-05-12 18:29:18 +0200552}
553
Dan Williams5fac7402018-03-09 17:44:31 -0800554/**
555 * dax_layout_busy_page - find first pinned page in @mapping
556 * @mapping: address space to scan for a page with ref count > 1
557 *
558 * DAX requires ZONE_DEVICE mapped pages. These pages are never
559 * 'onlined' to the page allocator so they are considered idle when
560 * page->count == 1. A filesystem uses this interface to determine if
561 * any page in the mapping is busy, i.e. for DMA, or other
562 * get_user_pages() usages.
563 *
564 * It is expected that the filesystem is holding locks to block the
565 * establishment of new mappings in this address_space. I.e. it expects
566 * to be able to run unmap_mapping_range() and subsequently not race
567 * mapping_mapped() becoming true.
568 */
569struct page *dax_layout_busy_page(struct address_space *mapping)
570{
Matthew Wilcox084a8992018-05-17 13:03:48 -0400571 XA_STATE(xas, &mapping->i_pages, 0);
572 void *entry;
573 unsigned int scanned = 0;
Dan Williams5fac7402018-03-09 17:44:31 -0800574 struct page *page = NULL;
Dan Williams5fac7402018-03-09 17:44:31 -0800575
576 /*
577 * In the 'limited' case get_user_pages() for dax is disabled.
578 */
579 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
580 return NULL;
581
582 if (!dax_mapping(mapping) || !mapping_mapped(mapping))
583 return NULL;
584
Dan Williams5fac7402018-03-09 17:44:31 -0800585 /*
586 * If we race get_user_pages_fast() here either we'll see the
Matthew Wilcox084a8992018-05-17 13:03:48 -0400587 * elevated page count in the iteration and wait, or
Dan Williams5fac7402018-03-09 17:44:31 -0800588 * get_user_pages_fast() will see that the page it took a reference
589 * against is no longer mapped in the page tables and bail to the
590 * get_user_pages() slow path. The slow path is protected by
591 * pte_lock() and pmd_lock(). New references are not taken without
592 * holding those locks, and unmap_mapping_range() will not zero the
593 * pte or pmd without holding the respective lock, so we are
594 * guaranteed to either see new references or prevent new
595 * references from being established.
596 */
597 unmap_mapping_range(mapping, 0, 0, 1);
598
Matthew Wilcox084a8992018-05-17 13:03:48 -0400599 xas_lock_irq(&xas);
600 xas_for_each(&xas, entry, ULONG_MAX) {
601 if (WARN_ON_ONCE(!xa_is_value(entry)))
602 continue;
603 if (unlikely(dax_is_locked(entry)))
604 entry = get_unlocked_entry(&xas);
605 if (entry)
606 page = dax_busy_page(entry);
607 put_unlocked_entry(&xas, entry);
Dan Williams5fac7402018-03-09 17:44:31 -0800608 if (page)
609 break;
Matthew Wilcox084a8992018-05-17 13:03:48 -0400610 if (++scanned % XA_CHECK_SCHED)
611 continue;
612
613 xas_pause(&xas);
614 xas_unlock_irq(&xas);
615 cond_resched();
616 xas_lock_irq(&xas);
Dan Williams5fac7402018-03-09 17:44:31 -0800617 }
Matthew Wilcox084a8992018-05-17 13:03:48 -0400618 xas_unlock_irq(&xas);
Dan Williams5fac7402018-03-09 17:44:31 -0800619 return page;
620}
621EXPORT_SYMBOL_GPL(dax_layout_busy_page);
622
Matthew Wilcoxa77d19f2018-03-27 13:39:38 -0400623static int __dax_invalidate_entry(struct address_space *mapping,
Jan Karac6dcf522016-08-10 17:22:44 +0200624 pgoff_t index, bool trunc)
625{
Matthew Wilcox07f2d892018-03-28 15:40:41 -0400626 XA_STATE(xas, &mapping->i_pages, index);
Jan Karac6dcf522016-08-10 17:22:44 +0200627 int ret = 0;
628 void *entry;
Jan Karac6dcf522016-08-10 17:22:44 +0200629
Matthew Wilcox07f2d892018-03-28 15:40:41 -0400630 xas_lock_irq(&xas);
631 entry = get_unlocked_entry(&xas);
Matthew Wilcox3159f942017-11-03 13:30:42 -0400632 if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
Jan Karac6dcf522016-08-10 17:22:44 +0200633 goto out;
634 if (!trunc &&
Matthew Wilcox07f2d892018-03-28 15:40:41 -0400635 (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) ||
636 xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE)))
Jan Karac6dcf522016-08-10 17:22:44 +0200637 goto out;
Dan Williamsd2c997c2017-12-22 22:02:48 -0800638 dax_disassociate_entry(entry, mapping, trunc);
Matthew Wilcox07f2d892018-03-28 15:40:41 -0400639 xas_store(&xas, NULL);
Jan Karac6dcf522016-08-10 17:22:44 +0200640 mapping->nrexceptional--;
641 ret = 1;
642out:
Matthew Wilcox07f2d892018-03-28 15:40:41 -0400643 put_unlocked_entry(&xas, entry);
644 xas_unlock_irq(&xas);
Jan Karac6dcf522016-08-10 17:22:44 +0200645 return ret;
646}
Matthew Wilcox07f2d892018-03-28 15:40:41 -0400647
Jan Karaac401cc2016-05-12 18:29:18 +0200648/*
Matthew Wilcox3159f942017-11-03 13:30:42 -0400649 * Delete DAX entry at @index from @mapping. Wait for it
650 * to be unlocked before deleting it.
Jan Karaac401cc2016-05-12 18:29:18 +0200651 */
652int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
653{
Matthew Wilcoxa77d19f2018-03-27 13:39:38 -0400654 int ret = __dax_invalidate_entry(mapping, index, true);
Jan Karaac401cc2016-05-12 18:29:18 +0200655
Jan Karaac401cc2016-05-12 18:29:18 +0200656 /*
657 * This gets called from truncate / punch_hole path. As such, the caller
658 * must hold locks protecting against concurrent modifications of the
Matthew Wilcoxa77d19f2018-03-27 13:39:38 -0400659 * page cache (usually fs-private i_mmap_sem for writing). Since the
Matthew Wilcox3159f942017-11-03 13:30:42 -0400660 * caller has seen a DAX entry for this index, we better find it
Jan Karaac401cc2016-05-12 18:29:18 +0200661 * at that index as well...
662 */
Jan Karac6dcf522016-08-10 17:22:44 +0200663 WARN_ON_ONCE(!ret);
664 return ret;
665}
Jan Karaac401cc2016-05-12 18:29:18 +0200666
Jan Karac6dcf522016-08-10 17:22:44 +0200667/*
Matthew Wilcox3159f942017-11-03 13:30:42 -0400668 * Invalidate DAX entry if it is clean.
Jan Karac6dcf522016-08-10 17:22:44 +0200669 */
670int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
671 pgoff_t index)
672{
Matthew Wilcoxa77d19f2018-03-27 13:39:38 -0400673 return __dax_invalidate_entry(mapping, index, false);
Jan Karaac401cc2016-05-12 18:29:18 +0200674}
675
Dan Williamscccbce62017-01-27 13:31:42 -0800676static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
677 sector_t sector, size_t size, struct page *to,
678 unsigned long vaddr)
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800679{
Dan Williamscccbce62017-01-27 13:31:42 -0800680 void *vto, *kaddr;
681 pgoff_t pgoff;
Dan Williamscccbce62017-01-27 13:31:42 -0800682 long rc;
683 int id;
Ross Zwislere2e05392015-08-18 13:55:41 -0600684
Dan Williamscccbce62017-01-27 13:31:42 -0800685 rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
686 if (rc)
687 return rc;
688
689 id = dax_read_lock();
Huaisheng Ye86ed9132018-07-30 15:15:48 +0800690 rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, NULL);
Dan Williamscccbce62017-01-27 13:31:42 -0800691 if (rc < 0) {
692 dax_read_unlock(id);
693 return rc;
694 }
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800695 vto = kmap_atomic(to);
Dan Williamscccbce62017-01-27 13:31:42 -0800696 copy_user_page(vto, (void __force *)kaddr, vaddr, to);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800697 kunmap_atomic(vto);
Dan Williamscccbce62017-01-27 13:31:42 -0800698 dax_read_unlock(id);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800699 return 0;
700}
701
Ross Zwisler642261a2016-11-08 11:34:45 +1100702/*
703 * By this point grab_mapping_entry() has ensured that we have a locked entry
704 * of the appropriate size so we don't have to worry about downgrading PMDs to
705 * PTEs. If we happen to be trying to insert a PTE and there is a PMD
706 * already in the tree, we will skip the insertion and just dirty the PMD as
707 * appropriate.
708 */
Matthew Wilcoxb15cd802018-03-29 22:58:27 -0400709static void *dax_insert_entry(struct xa_state *xas,
710 struct address_space *mapping, struct vm_fault *vmf,
711 void *entry, pfn_t pfn, unsigned long flags, bool dirty)
Ross Zwisler9973c982016-01-22 15:10:47 -0800712{
Matthew Wilcoxb15cd802018-03-29 22:58:27 -0400713 void *new_entry = dax_make_entry(pfn, flags);
Ross Zwisler9973c982016-01-22 15:10:47 -0800714
Jan Karaf5b7b742017-11-01 16:36:40 +0100715 if (dirty)
Dmitry Monakhovd2b2a282016-02-05 15:36:55 -0800716 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
Ross Zwisler9973c982016-01-22 15:10:47 -0800717
Matthew Wilcox3159f942017-11-03 13:30:42 -0400718 if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) {
Matthew Wilcoxb15cd802018-03-29 22:58:27 -0400719 unsigned long index = xas->xa_index;
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700720 /* we are replacing a zero page with block mapping */
721 if (dax_is_pmd_entry(entry))
Matthew Wilcox977fbdc2018-01-31 16:17:36 -0800722 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
Matthew Wilcoxb15cd802018-03-29 22:58:27 -0400723 PG_PMD_NR, false);
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700724 else /* pte entry */
Matthew Wilcoxb15cd802018-03-29 22:58:27 -0400725 unmap_mapping_pages(mapping, index, 1, false);
Ross Zwisler9973c982016-01-22 15:10:47 -0800726 }
727
Matthew Wilcoxb15cd802018-03-29 22:58:27 -0400728 xas_reset(xas);
729 xas_lock_irq(xas);
Dan Williamsd2c997c2017-12-22 22:02:48 -0800730 if (dax_entry_size(entry) != dax_entry_size(new_entry)) {
731 dax_disassociate_entry(entry, mapping, false);
Dan Williams73449da2018-07-13 21:49:50 -0700732 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
Dan Williamsd2c997c2017-12-22 22:02:48 -0800733 }
Ross Zwisler642261a2016-11-08 11:34:45 +1100734
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700735 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
Ross Zwisler642261a2016-11-08 11:34:45 +1100736 /*
Matthew Wilcoxa77d19f2018-03-27 13:39:38 -0400737 * Only swap our new entry into the page cache if the current
Ross Zwisler642261a2016-11-08 11:34:45 +1100738 * entry is a zero page or an empty entry. If a normal PTE or
Matthew Wilcoxa77d19f2018-03-27 13:39:38 -0400739 * PMD entry is already in the cache, we leave it alone. This
Ross Zwisler642261a2016-11-08 11:34:45 +1100740 * means that if we are trying to insert a PTE and the
741 * existing entry is a PMD, we will just leave the PMD in the
742 * tree and dirty it if necessary.
743 */
Matthew Wilcoxb15cd802018-03-29 22:58:27 -0400744 void *old = dax_lock_entry(xas, new_entry);
745 WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) |
746 DAX_LOCKED));
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700747 entry = new_entry;
Matthew Wilcoxb15cd802018-03-29 22:58:27 -0400748 } else {
749 xas_load(xas); /* Walk the xa_state */
Ross Zwisler9973c982016-01-22 15:10:47 -0800750 }
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700751
Jan Karaf5b7b742017-11-01 16:36:40 +0100752 if (dirty)
Matthew Wilcoxb15cd802018-03-29 22:58:27 -0400753 xas_set_mark(xas, PAGECACHE_TAG_DIRTY);
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700754
Matthew Wilcoxb15cd802018-03-29 22:58:27 -0400755 xas_unlock_irq(xas);
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700756 return entry;
Ross Zwisler9973c982016-01-22 15:10:47 -0800757}
758
Matthew Wilcoxa77d19f2018-03-27 13:39:38 -0400759static inline
760unsigned long pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
Jan Kara4b4bb462016-12-14 15:07:53 -0800761{
762 unsigned long address;
763
764 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
765 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
766 return address;
767}
768
769/* Walk all mappings of a given index of a file and writeprotect them */
Matthew Wilcoxa77d19f2018-03-27 13:39:38 -0400770static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
771 unsigned long pfn)
Jan Kara4b4bb462016-12-14 15:07:53 -0800772{
773 struct vm_area_struct *vma;
Ross Zwislerf729c8c2017-01-10 16:57:24 -0800774 pte_t pte, *ptep = NULL;
775 pmd_t *pmdp = NULL;
Jan Kara4b4bb462016-12-14 15:07:53 -0800776 spinlock_t *ptl;
Jan Kara4b4bb462016-12-14 15:07:53 -0800777
778 i_mmap_lock_read(mapping);
779 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
Jérôme Glissea4d1a882017-08-31 17:17:26 -0400780 unsigned long address, start, end;
Jan Kara4b4bb462016-12-14 15:07:53 -0800781
782 cond_resched();
783
784 if (!(vma->vm_flags & VM_SHARED))
785 continue;
786
787 address = pgoff_address(index, vma);
Jérôme Glissea4d1a882017-08-31 17:17:26 -0400788
789 /*
790 * Note because we provide start/end to follow_pte_pmd it will
791 * call mmu_notifier_invalidate_range_start() on our behalf
792 * before taking any lock.
793 */
794 if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl))
Jan Kara4b4bb462016-12-14 15:07:53 -0800795 continue;
Jan Kara4b4bb462016-12-14 15:07:53 -0800796
Jérôme Glisse0f108512017-11-15 17:34:07 -0800797 /*
798 * No need to call mmu_notifier_invalidate_range() as we are
799 * downgrading page table protection not changing it to point
800 * to a new page.
801 *
Mike Rapoportad56b732018-03-21 21:22:47 +0200802 * See Documentation/vm/mmu_notifier.rst
Jérôme Glisse0f108512017-11-15 17:34:07 -0800803 */
Ross Zwislerf729c8c2017-01-10 16:57:24 -0800804 if (pmdp) {
805#ifdef CONFIG_FS_DAX_PMD
806 pmd_t pmd;
807
808 if (pfn != pmd_pfn(*pmdp))
809 goto unlock_pmd;
Linus Torvaldsf6f37322017-12-15 18:53:22 -0800810 if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
Ross Zwislerf729c8c2017-01-10 16:57:24 -0800811 goto unlock_pmd;
812
813 flush_cache_page(vma, address, pfn);
814 pmd = pmdp_huge_clear_flush(vma, address, pmdp);
815 pmd = pmd_wrprotect(pmd);
816 pmd = pmd_mkclean(pmd);
817 set_pmd_at(vma->vm_mm, address, pmdp, pmd);
Ross Zwislerf729c8c2017-01-10 16:57:24 -0800818unlock_pmd:
Ross Zwislerf729c8c2017-01-10 16:57:24 -0800819#endif
Jan H. Schönherree190ca2018-01-31 16:14:04 -0800820 spin_unlock(ptl);
Ross Zwislerf729c8c2017-01-10 16:57:24 -0800821 } else {
822 if (pfn != pte_pfn(*ptep))
823 goto unlock_pte;
824 if (!pte_dirty(*ptep) && !pte_write(*ptep))
825 goto unlock_pte;
826
827 flush_cache_page(vma, address, pfn);
828 pte = ptep_clear_flush(vma, address, ptep);
829 pte = pte_wrprotect(pte);
830 pte = pte_mkclean(pte);
831 set_pte_at(vma->vm_mm, address, ptep, pte);
Ross Zwislerf729c8c2017-01-10 16:57:24 -0800832unlock_pte:
833 pte_unmap_unlock(ptep, ptl);
834 }
Jan Kara4b4bb462016-12-14 15:07:53 -0800835
Jérôme Glissea4d1a882017-08-31 17:17:26 -0400836 mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
Jan Kara4b4bb462016-12-14 15:07:53 -0800837 }
838 i_mmap_unlock_read(mapping);
839}
840
Matthew Wilcox9fc747f62018-03-28 16:03:45 -0400841static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
842 struct address_space *mapping, void *entry)
Ross Zwisler9973c982016-01-22 15:10:47 -0800843{
Dan Williams3fe07912017-10-14 17:13:45 -0700844 unsigned long pfn;
845 long ret = 0;
Dan Williamscccbce62017-01-27 13:31:42 -0800846 size_t size;
Ross Zwisler9973c982016-01-22 15:10:47 -0800847
Ross Zwisler9973c982016-01-22 15:10:47 -0800848 /*
Jan Karaa6abc2c2016-12-14 15:07:47 -0800849 * A page got tagged dirty in DAX mapping? Something is seriously
850 * wrong.
Ross Zwisler9973c982016-01-22 15:10:47 -0800851 */
Matthew Wilcox3159f942017-11-03 13:30:42 -0400852 if (WARN_ON(!xa_is_value(entry)))
Jan Karaa6abc2c2016-12-14 15:07:47 -0800853 return -EIO;
Ross Zwisler9973c982016-01-22 15:10:47 -0800854
Matthew Wilcox9fc747f62018-03-28 16:03:45 -0400855 if (unlikely(dax_is_locked(entry))) {
856 void *old_entry = entry;
857
858 entry = get_unlocked_entry(xas);
859
860 /* Entry got punched out / reallocated? */
861 if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
862 goto put_unlocked;
863 /*
864 * Entry got reallocated elsewhere? No need to writeback.
865 * We have to compare pfns as we must not bail out due to
866 * difference in lockbit or entry type.
867 */
868 if (dax_to_pfn(old_entry) != dax_to_pfn(entry))
869 goto put_unlocked;
870 if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
871 dax_is_zero_entry(entry))) {
872 ret = -EIO;
873 goto put_unlocked;
874 }
875
876 /* Another fsync thread may have already done this entry */
877 if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE))
878 goto put_unlocked;
Ross Zwisler9973c982016-01-22 15:10:47 -0800879 }
880
Jan Karaa6abc2c2016-12-14 15:07:47 -0800881 /* Lock the entry to serialize with page faults */
Matthew Wilcox9fc747f62018-03-28 16:03:45 -0400882 dax_lock_entry(xas, entry);
883
Jan Karaa6abc2c2016-12-14 15:07:47 -0800884 /*
885 * We can clear the tag now but we have to be careful so that concurrent
886 * dax_writeback_one() calls for the same index cannot finish before we
887 * actually flush the caches. This is achieved as the calls will look
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700888 * at the entry only under the i_pages lock and once they do that
889 * they will see the entry locked and wait for it to unlock.
Jan Karaa6abc2c2016-12-14 15:07:47 -0800890 */
Matthew Wilcox9fc747f62018-03-28 16:03:45 -0400891 xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE);
892 xas_unlock_irq(xas);
Jan Karaa6abc2c2016-12-14 15:07:47 -0800893
Ross Zwisler642261a2016-11-08 11:34:45 +1100894 /*
895 * Even if dax_writeback_mapping_range() was given a wbc->range_start
896 * in the middle of a PMD, the 'index' we are given will be aligned to
Dan Williams3fe07912017-10-14 17:13:45 -0700897 * the start index of the PMD, as will the pfn we pull from 'entry'.
898 * This allows us to flush for PMD_SIZE and not have to worry about
899 * partial PMD writebacks.
Ross Zwisler642261a2016-11-08 11:34:45 +1100900 */
Matthew Wilcoxa77d19f2018-03-27 13:39:38 -0400901 pfn = dax_to_pfn(entry);
902 size = PAGE_SIZE << dax_entry_order(entry);
Dan Williamscccbce62017-01-27 13:31:42 -0800903
Matthew Wilcox9fc747f62018-03-28 16:03:45 -0400904 dax_entry_mkclean(mapping, xas->xa_index, pfn);
Dan Williams3fe07912017-10-14 17:13:45 -0700905 dax_flush(dax_dev, page_address(pfn_to_page(pfn)), size);
Jan Kara4b4bb462016-12-14 15:07:53 -0800906 /*
907 * After we have flushed the cache, we can clear the dirty tag. There
908 * cannot be new dirty data in the pfn after the flush has completed as
909 * the pfn mappings are writeprotected and fault waits for mapping
910 * entry lock.
911 */
Matthew Wilcox9fc747f62018-03-28 16:03:45 -0400912 xas_reset(xas);
913 xas_lock_irq(xas);
914 xas_store(xas, entry);
915 xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
916 dax_wake_entry(xas, entry, false);
917
918 trace_dax_writeback_one(mapping->host, xas->xa_index,
919 size >> PAGE_SHIFT);
Ross Zwisler9973c982016-01-22 15:10:47 -0800920 return ret;
921
Jan Karaa6abc2c2016-12-14 15:07:47 -0800922 put_unlocked:
Matthew Wilcox9fc747f62018-03-28 16:03:45 -0400923 put_unlocked_entry(xas, entry);
Ross Zwisler9973c982016-01-22 15:10:47 -0800924 return ret;
925}
926
927/*
928 * Flush the mapping to the persistent domain within the byte range of [start,
929 * end]. This is required by data integrity operations to ensure file data is
930 * on persistent storage prior to completion of the operation.
931 */
Ross Zwisler7f6d5b52016-02-26 15:19:55 -0800932int dax_writeback_mapping_range(struct address_space *mapping,
933 struct block_device *bdev, struct writeback_control *wbc)
Ross Zwisler9973c982016-01-22 15:10:47 -0800934{
Matthew Wilcox9fc747f62018-03-28 16:03:45 -0400935 XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT);
Ross Zwisler9973c982016-01-22 15:10:47 -0800936 struct inode *inode = mapping->host;
Matthew Wilcox9fc747f62018-03-28 16:03:45 -0400937 pgoff_t end_index = wbc->range_end >> PAGE_SHIFT;
Dan Williamscccbce62017-01-27 13:31:42 -0800938 struct dax_device *dax_dev;
Matthew Wilcox9fc747f62018-03-28 16:03:45 -0400939 void *entry;
940 int ret = 0;
941 unsigned int scanned = 0;
Ross Zwisler9973c982016-01-22 15:10:47 -0800942
943 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
944 return -EIO;
945
Ross Zwisler7f6d5b52016-02-26 15:19:55 -0800946 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
947 return 0;
948
Dan Williamscccbce62017-01-27 13:31:42 -0800949 dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
950 if (!dax_dev)
951 return -EIO;
952
Matthew Wilcox9fc747f62018-03-28 16:03:45 -0400953 trace_dax_writeback_range(inode, xas.xa_index, end_index);
Ross Zwisler9973c982016-01-22 15:10:47 -0800954
Matthew Wilcox9fc747f62018-03-28 16:03:45 -0400955 tag_pages_for_writeback(mapping, xas.xa_index, end_index);
Ross Zwislerd14a3f42017-05-08 16:00:10 -0700956
Matthew Wilcox9fc747f62018-03-28 16:03:45 -0400957 xas_lock_irq(&xas);
958 xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) {
959 ret = dax_writeback_one(&xas, dax_dev, mapping, entry);
960 if (ret < 0) {
961 mapping_set_error(mapping, ret);
Ross Zwisler9973c982016-01-22 15:10:47 -0800962 break;
Ross Zwisler9973c982016-01-22 15:10:47 -0800963 }
Matthew Wilcox9fc747f62018-03-28 16:03:45 -0400964 if (++scanned % XA_CHECK_SCHED)
965 continue;
966
967 xas_pause(&xas);
968 xas_unlock_irq(&xas);
969 cond_resched();
970 xas_lock_irq(&xas);
Ross Zwisler9973c982016-01-22 15:10:47 -0800971 }
Matthew Wilcox9fc747f62018-03-28 16:03:45 -0400972 xas_unlock_irq(&xas);
Dan Williamscccbce62017-01-27 13:31:42 -0800973 put_dax(dax_dev);
Matthew Wilcox9fc747f62018-03-28 16:03:45 -0400974 trace_dax_writeback_range_done(inode, xas.xa_index, end_index);
975 return ret;
Ross Zwisler9973c982016-01-22 15:10:47 -0800976}
977EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
978
Jan Kara31a6f1a2017-11-01 16:36:32 +0100979static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800980{
Linus Torvaldsa3841f92017-11-17 09:51:57 -0800981 return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9;
Jan Kara31a6f1a2017-11-01 16:36:32 +0100982}
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800983
Jan Kara5e161e42017-11-01 16:36:33 +0100984static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
985 pfn_t *pfnp)
986{
987 const sector_t sector = dax_iomap_sector(iomap, pos);
988 pgoff_t pgoff;
Jan Kara5e161e42017-11-01 16:36:33 +0100989 int id, rc;
990 long length;
991
992 rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff);
Dan Williamscccbce62017-01-27 13:31:42 -0800993 if (rc)
994 return rc;
Dan Williamscccbce62017-01-27 13:31:42 -0800995 id = dax_read_lock();
Jan Kara5e161e42017-11-01 16:36:33 +0100996 length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
Huaisheng Ye86ed9132018-07-30 15:15:48 +0800997 NULL, pfnp);
Jan Kara5e161e42017-11-01 16:36:33 +0100998 if (length < 0) {
999 rc = length;
1000 goto out;
Dan Williamscccbce62017-01-27 13:31:42 -08001001 }
Jan Kara5e161e42017-11-01 16:36:33 +01001002 rc = -EINVAL;
1003 if (PFN_PHYS(length) < size)
1004 goto out;
1005 if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
1006 goto out;
1007 /* For larger pages we need devmap */
1008 if (length > 1 && !pfn_t_devmap(*pfnp))
1009 goto out;
1010 rc = 0;
1011out:
Dan Williamscccbce62017-01-27 13:31:42 -08001012 dax_read_unlock(id);
Jan Kara5e161e42017-11-01 16:36:33 +01001013 return rc;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -08001014}
1015
Ross Zwislere30331f2017-09-06 16:18:39 -07001016/*
Ross Zwisler91d25ba2017-09-06 16:18:43 -07001017 * The user has performed a load from a hole in the file. Allocating a new
1018 * page in the file would cause excessive storage usage for workloads with
1019 * sparse files. Instead we insert a read-only mapping of the 4k zero page.
1020 * If this page is ever written to we will re-fault and change the mapping to
1021 * point to real DAX storage instead.
Ross Zwislere30331f2017-09-06 16:18:39 -07001022 */
Matthew Wilcoxb15cd802018-03-29 22:58:27 -04001023static vm_fault_t dax_load_hole(struct xa_state *xas,
1024 struct address_space *mapping, void **entry,
1025 struct vm_fault *vmf)
Ross Zwislere30331f2017-09-06 16:18:39 -07001026{
1027 struct inode *inode = mapping->host;
Ross Zwisler91d25ba2017-09-06 16:18:43 -07001028 unsigned long vaddr = vmf->address;
Matthew Wilcoxb90ca5c2018-09-11 21:27:44 -07001029 pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
1030 vm_fault_t ret;
Ross Zwislere30331f2017-09-06 16:18:39 -07001031
Matthew Wilcoxb15cd802018-03-29 22:58:27 -04001032 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
Matthew Wilcox3159f942017-11-03 13:30:42 -04001033 DAX_ZERO_PAGE, false);
1034
Souptick Joarderab77dab2018-06-07 17:04:29 -07001035 ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
Ross Zwislere30331f2017-09-06 16:18:39 -07001036 trace_dax_load_hole(inode, vmf, ret);
1037 return ret;
1038}
1039
Vishal Verma4b0228f2016-04-21 15:13:46 -04001040static bool dax_range_is_aligned(struct block_device *bdev,
1041 unsigned int offset, unsigned int length)
1042{
1043 unsigned short sector_size = bdev_logical_block_size(bdev);
1044
1045 if (!IS_ALIGNED(offset, sector_size))
1046 return false;
1047 if (!IS_ALIGNED(length, sector_size))
1048 return false;
1049
1050 return true;
1051}
1052
Dan Williamscccbce62017-01-27 13:31:42 -08001053int __dax_zero_page_range(struct block_device *bdev,
1054 struct dax_device *dax_dev, sector_t sector,
1055 unsigned int offset, unsigned int size)
Christoph Hellwig679c8bd2016-05-09 10:47:04 +02001056{
Dan Williamscccbce62017-01-27 13:31:42 -08001057 if (dax_range_is_aligned(bdev, offset, size)) {
1058 sector_t start_sector = sector + (offset >> 9);
Vishal Verma4b0228f2016-04-21 15:13:46 -04001059
1060 return blkdev_issue_zeroout(bdev, start_sector,
Linus Torvalds53ef7d02017-05-05 18:49:20 -07001061 size >> 9, GFP_NOFS, 0);
Vishal Verma4b0228f2016-04-21 15:13:46 -04001062 } else {
Dan Williamscccbce62017-01-27 13:31:42 -08001063 pgoff_t pgoff;
1064 long rc, id;
1065 void *kaddr;
Dan Williamscccbce62017-01-27 13:31:42 -08001066
Dan Williamse84b83b2017-05-10 19:38:13 -07001067 rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
Dan Williamscccbce62017-01-27 13:31:42 -08001068 if (rc)
1069 return rc;
1070
1071 id = dax_read_lock();
Huaisheng Ye86ed9132018-07-30 15:15:48 +08001072 rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL);
Dan Williamscccbce62017-01-27 13:31:42 -08001073 if (rc < 0) {
1074 dax_read_unlock(id);
1075 return rc;
1076 }
Dan Williams81f55872017-05-29 13:12:20 -07001077 memset(kaddr + offset, 0, size);
Mikulas Patockac3ca0152017-08-31 21:47:43 -04001078 dax_flush(dax_dev, kaddr + offset, size);
Dan Williamscccbce62017-01-27 13:31:42 -08001079 dax_read_unlock(id);
Vishal Verma4b0228f2016-04-21 15:13:46 -04001080 }
Christoph Hellwig679c8bd2016-05-09 10:47:04 +02001081 return 0;
1082}
1083EXPORT_SYMBOL_GPL(__dax_zero_page_range);
1084
Christoph Hellwiga254e562016-09-19 11:24:49 +10001085static loff_t
Ross Zwisler11c59c92016-11-08 11:32:46 +11001086dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
Christoph Hellwiga254e562016-09-19 11:24:49 +10001087 struct iomap *iomap)
1088{
Dan Williamscccbce62017-01-27 13:31:42 -08001089 struct block_device *bdev = iomap->bdev;
1090 struct dax_device *dax_dev = iomap->dax_dev;
Christoph Hellwiga254e562016-09-19 11:24:49 +10001091 struct iov_iter *iter = data;
1092 loff_t end = pos + length, done = 0;
1093 ssize_t ret = 0;
Dan Williamsa77d4782018-03-16 17:36:44 -07001094 size_t xfer;
Dan Williamscccbce62017-01-27 13:31:42 -08001095 int id;
Christoph Hellwiga254e562016-09-19 11:24:49 +10001096
1097 if (iov_iter_rw(iter) == READ) {
1098 end = min(end, i_size_read(inode));
1099 if (pos >= end)
1100 return 0;
1101
1102 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1103 return iov_iter_zero(min(length, end - pos), iter);
1104 }
1105
1106 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
1107 return -EIO;
1108
Jan Karae3fce682016-08-10 17:10:28 +02001109 /*
1110 * Write can allocate block for an area which has a hole page mapped
1111 * into page tables. We have to tear down these mappings so that data
1112 * written by write(2) is visible in mmap.
1113 */
Jan Karacd656372017-05-12 15:46:50 -07001114 if (iomap->flags & IOMAP_F_NEW) {
Jan Karae3fce682016-08-10 17:10:28 +02001115 invalidate_inode_pages2_range(inode->i_mapping,
1116 pos >> PAGE_SHIFT,
1117 (end - 1) >> PAGE_SHIFT);
1118 }
1119
Dan Williamscccbce62017-01-27 13:31:42 -08001120 id = dax_read_lock();
Christoph Hellwiga254e562016-09-19 11:24:49 +10001121 while (pos < end) {
1122 unsigned offset = pos & (PAGE_SIZE - 1);
Dan Williamscccbce62017-01-27 13:31:42 -08001123 const size_t size = ALIGN(length + offset, PAGE_SIZE);
1124 const sector_t sector = dax_iomap_sector(iomap, pos);
Christoph Hellwiga254e562016-09-19 11:24:49 +10001125 ssize_t map_len;
Dan Williamscccbce62017-01-27 13:31:42 -08001126 pgoff_t pgoff;
1127 void *kaddr;
Christoph Hellwiga254e562016-09-19 11:24:49 +10001128
Michal Hockod1908f52017-02-03 13:13:26 -08001129 if (fatal_signal_pending(current)) {
1130 ret = -EINTR;
1131 break;
1132 }
1133
Dan Williamscccbce62017-01-27 13:31:42 -08001134 ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
1135 if (ret)
1136 break;
1137
1138 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
Huaisheng Ye86ed9132018-07-30 15:15:48 +08001139 &kaddr, NULL);
Christoph Hellwiga254e562016-09-19 11:24:49 +10001140 if (map_len < 0) {
1141 ret = map_len;
1142 break;
1143 }
1144
Dan Williamscccbce62017-01-27 13:31:42 -08001145 map_len = PFN_PHYS(map_len);
1146 kaddr += offset;
Christoph Hellwiga254e562016-09-19 11:24:49 +10001147 map_len -= offset;
1148 if (map_len > end - pos)
1149 map_len = end - pos;
1150
Ross Zwislera2e050f2017-09-06 16:18:54 -07001151 /*
1152 * The userspace address for the memory copy has already been
1153 * validated via access_ok() in either vfs_read() or
1154 * vfs_write(), depending on which operation we are doing.
1155 */
Christoph Hellwiga254e562016-09-19 11:24:49 +10001156 if (iov_iter_rw(iter) == WRITE)
Dan Williamsa77d4782018-03-16 17:36:44 -07001157 xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
Dan Williamsfec53772017-05-29 21:56:49 -07001158 map_len, iter);
Christoph Hellwiga254e562016-09-19 11:24:49 +10001159 else
Dan Williamsa77d4782018-03-16 17:36:44 -07001160 xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr,
Dan Williamsb3a9a0c2018-05-02 06:46:33 -07001161 map_len, iter);
Christoph Hellwiga254e562016-09-19 11:24:49 +10001162
Dan Williamsa77d4782018-03-16 17:36:44 -07001163 pos += xfer;
1164 length -= xfer;
1165 done += xfer;
1166
1167 if (xfer == 0)
1168 ret = -EFAULT;
1169 if (xfer < map_len)
1170 break;
Christoph Hellwiga254e562016-09-19 11:24:49 +10001171 }
Dan Williamscccbce62017-01-27 13:31:42 -08001172 dax_read_unlock(id);
Christoph Hellwiga254e562016-09-19 11:24:49 +10001173
1174 return done ? done : ret;
1175}
1176
1177/**
Ross Zwisler11c59c92016-11-08 11:32:46 +11001178 * dax_iomap_rw - Perform I/O to a DAX file
Christoph Hellwiga254e562016-09-19 11:24:49 +10001179 * @iocb: The control block for this I/O
1180 * @iter: The addresses to do I/O from or to
1181 * @ops: iomap ops passed from the file system
1182 *
1183 * This function performs read and write operations to directly mapped
1184 * persistent memory. The callers needs to take care of read/write exclusion
1185 * and evicting any page cache pages in the region under I/O.
1186 */
1187ssize_t
Ross Zwisler11c59c92016-11-08 11:32:46 +11001188dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -08001189 const struct iomap_ops *ops)
Christoph Hellwiga254e562016-09-19 11:24:49 +10001190{
1191 struct address_space *mapping = iocb->ki_filp->f_mapping;
1192 struct inode *inode = mapping->host;
1193 loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1194 unsigned flags = 0;
1195
Christoph Hellwig168316d2017-02-08 14:43:13 -05001196 if (iov_iter_rw(iter) == WRITE) {
1197 lockdep_assert_held_exclusive(&inode->i_rwsem);
Christoph Hellwiga254e562016-09-19 11:24:49 +10001198 flags |= IOMAP_WRITE;
Christoph Hellwig168316d2017-02-08 14:43:13 -05001199 } else {
1200 lockdep_assert_held(&inode->i_rwsem);
1201 }
Christoph Hellwiga254e562016-09-19 11:24:49 +10001202
Christoph Hellwiga254e562016-09-19 11:24:49 +10001203 while (iov_iter_count(iter)) {
1204 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
Ross Zwisler11c59c92016-11-08 11:32:46 +11001205 iter, dax_iomap_actor);
Christoph Hellwiga254e562016-09-19 11:24:49 +10001206 if (ret <= 0)
1207 break;
1208 pos += ret;
1209 done += ret;
1210 }
1211
1212 iocb->ki_pos += done;
1213 return done ? done : ret;
1214}
Ross Zwisler11c59c92016-11-08 11:32:46 +11001215EXPORT_SYMBOL_GPL(dax_iomap_rw);
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001216
Souptick Joarderab77dab2018-06-07 17:04:29 -07001217static vm_fault_t dax_fault_return(int error)
Jan Kara9f141d62016-10-19 14:34:31 +02001218{
1219 if (error == 0)
1220 return VM_FAULT_NOPAGE;
1221 if (error == -ENOMEM)
1222 return VM_FAULT_OOM;
1223 return VM_FAULT_SIGBUS;
1224}
1225
Dan Williamsaaa422c2017-11-13 16:38:44 -08001226/*
1227 * MAP_SYNC on a dax mapping guarantees dirty metadata is
1228 * flushed on write-faults (non-cow), but not read-faults.
1229 */
1230static bool dax_fault_is_synchronous(unsigned long flags,
1231 struct vm_area_struct *vma, struct iomap *iomap)
1232{
1233 return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC)
1234 && (iomap->flags & IOMAP_F_DIRTY);
1235}
1236
Souptick Joarderab77dab2018-06-07 17:04:29 -07001237static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
Jan Karac0b24622018-01-07 16:38:43 -05001238 int *iomap_errp, const struct iomap_ops *ops)
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001239{
Jan Karaa0987ad2017-11-01 16:36:34 +01001240 struct vm_area_struct *vma = vmf->vma;
1241 struct address_space *mapping = vma->vm_file->f_mapping;
Matthew Wilcoxb15cd802018-03-29 22:58:27 -04001242 XA_STATE(xas, &mapping->i_pages, vmf->pgoff);
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001243 struct inode *inode = mapping->host;
Jan Kara1a29d852016-12-14 15:07:01 -08001244 unsigned long vaddr = vmf->address;
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001245 loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001246 struct iomap iomap = { 0 };
Jan Kara9484ab12016-11-10 10:26:50 +11001247 unsigned flags = IOMAP_FAULT;
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001248 int error, major = 0;
Jan Karad2c43ef2017-11-01 16:36:35 +01001249 bool write = vmf->flags & FAULT_FLAG_WRITE;
Jan Karacaa51d22017-11-01 16:36:42 +01001250 bool sync;
Souptick Joarderab77dab2018-06-07 17:04:29 -07001251 vm_fault_t ret = 0;
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001252 void *entry;
Jan Kara1b5a1cb2017-11-01 16:36:36 +01001253 pfn_t pfn;
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001254
Souptick Joarderab77dab2018-06-07 17:04:29 -07001255 trace_dax_pte_fault(inode, vmf, ret);
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001256 /*
1257 * Check whether offset isn't beyond end of file now. Caller is supposed
1258 * to hold locks serializing us with truncate / punch hole so this is
1259 * a reliable test.
1260 */
Ross Zwislera9c42b32017-05-08 16:00:00 -07001261 if (pos >= i_size_read(inode)) {
Souptick Joarderab77dab2018-06-07 17:04:29 -07001262 ret = VM_FAULT_SIGBUS;
Ross Zwislera9c42b32017-05-08 16:00:00 -07001263 goto out;
1264 }
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001265
Jan Karad2c43ef2017-11-01 16:36:35 +01001266 if (write && !vmf->cow_page)
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001267 flags |= IOMAP_WRITE;
1268
Matthew Wilcoxb15cd802018-03-29 22:58:27 -04001269 entry = grab_mapping_entry(&xas, mapping, 0);
1270 if (xa_is_internal(entry)) {
1271 ret = xa_to_internal(entry);
Jan Kara13e451f2017-05-12 15:46:57 -07001272 goto out;
1273 }
1274
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001275 /*
Ross Zwislere2093922017-06-02 14:46:37 -07001276 * It is possible, particularly with mixed reads & writes to private
1277 * mappings, that we have raced with a PMD fault that overlaps with
1278 * the PTE we need to set up. If so just return and the fault will be
1279 * retried.
1280 */
1281 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
Souptick Joarderab77dab2018-06-07 17:04:29 -07001282 ret = VM_FAULT_NOPAGE;
Ross Zwislere2093922017-06-02 14:46:37 -07001283 goto unlock_entry;
1284 }
1285
1286 /*
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001287 * Note that we don't bother to use iomap_apply here: DAX required
1288 * the file system block size to be equal the page size, which means
1289 * that we never have to deal with more than a single extent here.
1290 */
1291 error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
Jan Karac0b24622018-01-07 16:38:43 -05001292 if (iomap_errp)
1293 *iomap_errp = error;
Ross Zwislera9c42b32017-05-08 16:00:00 -07001294 if (error) {
Souptick Joarderab77dab2018-06-07 17:04:29 -07001295 ret = dax_fault_return(error);
Jan Kara13e451f2017-05-12 15:46:57 -07001296 goto unlock_entry;
Ross Zwislera9c42b32017-05-08 16:00:00 -07001297 }
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001298 if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
Jan Kara13e451f2017-05-12 15:46:57 -07001299 error = -EIO; /* fs corruption? */
1300 goto error_finish_iomap;
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001301 }
1302
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001303 if (vmf->cow_page) {
Jan Kara31a6f1a2017-11-01 16:36:32 +01001304 sector_t sector = dax_iomap_sector(&iomap, pos);
1305
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001306 switch (iomap.type) {
1307 case IOMAP_HOLE:
1308 case IOMAP_UNWRITTEN:
1309 clear_user_highpage(vmf->cow_page, vaddr);
1310 break;
1311 case IOMAP_MAPPED:
Dan Williamscccbce62017-01-27 13:31:42 -08001312 error = copy_user_dax(iomap.bdev, iomap.dax_dev,
1313 sector, PAGE_SIZE, vmf->cow_page, vaddr);
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001314 break;
1315 default:
1316 WARN_ON_ONCE(1);
1317 error = -EIO;
1318 break;
1319 }
1320
1321 if (error)
Jan Kara13e451f2017-05-12 15:46:57 -07001322 goto error_finish_iomap;
Jan Karab1aa8122016-12-14 15:07:24 -08001323
1324 __SetPageUptodate(vmf->cow_page);
Souptick Joarderab77dab2018-06-07 17:04:29 -07001325 ret = finish_fault(vmf);
1326 if (!ret)
1327 ret = VM_FAULT_DONE_COW;
Jan Kara13e451f2017-05-12 15:46:57 -07001328 goto finish_iomap;
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001329 }
1330
Dan Williamsaaa422c2017-11-13 16:38:44 -08001331 sync = dax_fault_is_synchronous(flags, vma, &iomap);
Jan Karacaa51d22017-11-01 16:36:42 +01001332
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001333 switch (iomap.type) {
1334 case IOMAP_MAPPED:
1335 if (iomap.flags & IOMAP_F_NEW) {
1336 count_vm_event(PGMAJFAULT);
Jan Karaa0987ad2017-11-01 16:36:34 +01001337 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001338 major = VM_FAULT_MAJOR;
1339 }
Jan Kara1b5a1cb2017-11-01 16:36:36 +01001340 error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn);
1341 if (error < 0)
1342 goto error_finish_iomap;
1343
Matthew Wilcoxb15cd802018-03-29 22:58:27 -04001344 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
Jan Karacaa51d22017-11-01 16:36:42 +01001345 0, write && !sync);
Jan Kara1b5a1cb2017-11-01 16:36:36 +01001346
Jan Karacaa51d22017-11-01 16:36:42 +01001347 /*
1348 * If we are doing synchronous page fault and inode needs fsync,
1349 * we can insert PTE into page tables only after that happens.
1350 * Skip insertion for now and return the pfn so that caller can
1351 * insert it after fsync is done.
1352 */
1353 if (sync) {
1354 if (WARN_ON_ONCE(!pfnp)) {
1355 error = -EIO;
1356 goto error_finish_iomap;
1357 }
1358 *pfnp = pfn;
Souptick Joarderab77dab2018-06-07 17:04:29 -07001359 ret = VM_FAULT_NEEDDSYNC | major;
Jan Karacaa51d22017-11-01 16:36:42 +01001360 goto finish_iomap;
1361 }
Jan Kara1b5a1cb2017-11-01 16:36:36 +01001362 trace_dax_insert_mapping(inode, vmf, entry);
1363 if (write)
Souptick Joarderab77dab2018-06-07 17:04:29 -07001364 ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn);
Jan Kara1b5a1cb2017-11-01 16:36:36 +01001365 else
Souptick Joarderab77dab2018-06-07 17:04:29 -07001366 ret = vmf_insert_mixed(vma, vaddr, pfn);
Jan Kara1b5a1cb2017-11-01 16:36:36 +01001367
Souptick Joarderab77dab2018-06-07 17:04:29 -07001368 goto finish_iomap;
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001369 case IOMAP_UNWRITTEN:
1370 case IOMAP_HOLE:
Jan Karad2c43ef2017-11-01 16:36:35 +01001371 if (!write) {
Matthew Wilcoxb15cd802018-03-29 22:58:27 -04001372 ret = dax_load_hole(&xas, mapping, &entry, vmf);
Jan Kara13e451f2017-05-12 15:46:57 -07001373 goto finish_iomap;
Ross Zwisler15502902016-11-08 11:33:26 +11001374 }
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001375 /*FALLTHRU*/
1376 default:
1377 WARN_ON_ONCE(1);
1378 error = -EIO;
1379 break;
1380 }
1381
Jan Kara13e451f2017-05-12 15:46:57 -07001382 error_finish_iomap:
Souptick Joarderab77dab2018-06-07 17:04:29 -07001383 ret = dax_fault_return(error);
Jan Kara9f141d62016-10-19 14:34:31 +02001384 finish_iomap:
1385 if (ops->iomap_end) {
1386 int copied = PAGE_SIZE;
1387
Souptick Joarderab77dab2018-06-07 17:04:29 -07001388 if (ret & VM_FAULT_ERROR)
Jan Kara9f141d62016-10-19 14:34:31 +02001389 copied = 0;
1390 /*
1391 * The fault is done by now and there's no way back (other
1392 * thread may be already happily using PTE we have installed).
1393 * Just ignore error from ->iomap_end since we cannot do much
1394 * with it.
1395 */
1396 ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
Ross Zwisler15502902016-11-08 11:33:26 +11001397 }
Jan Kara13e451f2017-05-12 15:46:57 -07001398 unlock_entry:
Matthew Wilcoxb15cd802018-03-29 22:58:27 -04001399 dax_unlock_entry(&xas, entry);
Jan Kara13e451f2017-05-12 15:46:57 -07001400 out:
Souptick Joarderab77dab2018-06-07 17:04:29 -07001401 trace_dax_pte_fault_done(inode, vmf, ret);
1402 return ret | major;
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001403}
Ross Zwisler642261a2016-11-08 11:34:45 +11001404
1405#ifdef CONFIG_FS_DAX_PMD
Matthew Wilcoxb15cd802018-03-29 22:58:27 -04001406static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1407 struct iomap *iomap, void **entry)
Ross Zwisler642261a2016-11-08 11:34:45 +11001408{
Dave Jiangf4200392017-02-22 15:40:06 -08001409 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1410 unsigned long pmd_addr = vmf->address & PMD_MASK;
Ross Zwisler653b2ea2017-02-22 15:39:57 -08001411 struct inode *inode = mapping->host;
Ross Zwisler642261a2016-11-08 11:34:45 +11001412 struct page *zero_page;
1413 spinlock_t *ptl;
1414 pmd_t pmd_entry;
Dan Williams3fe07912017-10-14 17:13:45 -07001415 pfn_t pfn;
Ross Zwisler642261a2016-11-08 11:34:45 +11001416
Dave Jiangf4200392017-02-22 15:40:06 -08001417 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
Ross Zwisler642261a2016-11-08 11:34:45 +11001418
1419 if (unlikely(!zero_page))
Ross Zwisler653b2ea2017-02-22 15:39:57 -08001420 goto fallback;
Ross Zwisler642261a2016-11-08 11:34:45 +11001421
Dan Williams3fe07912017-10-14 17:13:45 -07001422 pfn = page_to_pfn_t(zero_page);
Matthew Wilcoxb15cd802018-03-29 22:58:27 -04001423 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
Matthew Wilcox3159f942017-11-03 13:30:42 -04001424 DAX_PMD | DAX_ZERO_PAGE, false);
Ross Zwisler642261a2016-11-08 11:34:45 +11001425
Dave Jiangf4200392017-02-22 15:40:06 -08001426 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1427 if (!pmd_none(*(vmf->pmd))) {
Ross Zwisler642261a2016-11-08 11:34:45 +11001428 spin_unlock(ptl);
Ross Zwisler653b2ea2017-02-22 15:39:57 -08001429 goto fallback;
Ross Zwisler642261a2016-11-08 11:34:45 +11001430 }
1431
Dave Jiangf4200392017-02-22 15:40:06 -08001432 pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
Ross Zwisler642261a2016-11-08 11:34:45 +11001433 pmd_entry = pmd_mkhuge(pmd_entry);
Dave Jiangf4200392017-02-22 15:40:06 -08001434 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
Ross Zwisler642261a2016-11-08 11:34:45 +11001435 spin_unlock(ptl);
Matthew Wilcoxb15cd802018-03-29 22:58:27 -04001436 trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
Ross Zwisler642261a2016-11-08 11:34:45 +11001437 return VM_FAULT_NOPAGE;
Ross Zwisler653b2ea2017-02-22 15:39:57 -08001438
1439fallback:
Matthew Wilcoxb15cd802018-03-29 22:58:27 -04001440 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
Ross Zwisler653b2ea2017-02-22 15:39:57 -08001441 return VM_FAULT_FALLBACK;
Ross Zwisler642261a2016-11-08 11:34:45 +11001442}
1443
Souptick Joarderab77dab2018-06-07 17:04:29 -07001444static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
Dave Jianga2d58162017-02-24 14:56:59 -08001445 const struct iomap_ops *ops)
Ross Zwisler642261a2016-11-08 11:34:45 +11001446{
Dave Jiangf4200392017-02-22 15:40:06 -08001447 struct vm_area_struct *vma = vmf->vma;
Ross Zwisler642261a2016-11-08 11:34:45 +11001448 struct address_space *mapping = vma->vm_file->f_mapping;
Matthew Wilcoxb15cd802018-03-29 22:58:27 -04001449 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
Dave Jiangd8a849e2017-02-22 15:40:03 -08001450 unsigned long pmd_addr = vmf->address & PMD_MASK;
1451 bool write = vmf->flags & FAULT_FLAG_WRITE;
Jan Karacaa51d22017-11-01 16:36:42 +01001452 bool sync;
Jan Kara9484ab12016-11-10 10:26:50 +11001453 unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
Ross Zwisler642261a2016-11-08 11:34:45 +11001454 struct inode *inode = mapping->host;
Souptick Joarderab77dab2018-06-07 17:04:29 -07001455 vm_fault_t result = VM_FAULT_FALLBACK;
Ross Zwisler642261a2016-11-08 11:34:45 +11001456 struct iomap iomap = { 0 };
Matthew Wilcoxb15cd802018-03-29 22:58:27 -04001457 pgoff_t max_pgoff;
Ross Zwisler642261a2016-11-08 11:34:45 +11001458 void *entry;
1459 loff_t pos;
1460 int error;
Jan Kara302a5e32017-11-01 16:36:37 +01001461 pfn_t pfn;
Ross Zwisler642261a2016-11-08 11:34:45 +11001462
Ross Zwisler282a8e02017-02-22 15:39:50 -08001463 /*
1464 * Check whether offset isn't beyond end of file now. Caller is
1465 * supposed to hold locks serializing us with truncate / punch hole so
1466 * this is a reliable test.
1467 */
Jeff Moyer957ac8c2017-11-14 20:37:27 -05001468 max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
Ross Zwisler282a8e02017-02-22 15:39:50 -08001469
Dave Jiangf4200392017-02-22 15:40:06 -08001470 trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
Ross Zwisler282a8e02017-02-22 15:39:50 -08001471
Ross Zwislerfffa2812017-08-25 15:55:36 -07001472 /*
1473 * Make sure that the faulting address's PMD offset (color) matches
1474 * the PMD offset from the start of the file. This is necessary so
1475 * that a PMD range in the page table overlaps exactly with a PMD
Matthew Wilcoxa77d19f2018-03-27 13:39:38 -04001476 * range in the page cache.
Ross Zwislerfffa2812017-08-25 15:55:36 -07001477 */
1478 if ((vmf->pgoff & PG_PMD_COLOUR) !=
1479 ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1480 goto fallback;
1481
Ross Zwisler642261a2016-11-08 11:34:45 +11001482 /* Fall back to PTEs if we're going to COW */
1483 if (write && !(vma->vm_flags & VM_SHARED))
1484 goto fallback;
1485
1486 /* If the PMD would extend outside the VMA */
1487 if (pmd_addr < vma->vm_start)
1488 goto fallback;
1489 if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1490 goto fallback;
1491
Matthew Wilcoxb15cd802018-03-29 22:58:27 -04001492 if (xas.xa_index >= max_pgoff) {
Ross Zwisler282a8e02017-02-22 15:39:50 -08001493 result = VM_FAULT_SIGBUS;
1494 goto out;
1495 }
Ross Zwisler642261a2016-11-08 11:34:45 +11001496
1497 /* If the PMD would extend beyond the file size */
Matthew Wilcoxb15cd802018-03-29 22:58:27 -04001498 if ((xas.xa_index | PG_PMD_COLOUR) >= max_pgoff)
Ross Zwisler642261a2016-11-08 11:34:45 +11001499 goto fallback;
1500
1501 /*
Matthew Wilcoxb15cd802018-03-29 22:58:27 -04001502 * grab_mapping_entry() will make sure we get an empty PMD entry,
1503 * a zero PMD entry or a DAX PMD. If it can't (because a PTE
1504 * entry is already in the array, for instance), it will return
1505 * VM_FAULT_FALLBACK.
Jan Kara9f141d62016-10-19 14:34:31 +02001506 */
Matthew Wilcoxb15cd802018-03-29 22:58:27 -04001507 entry = grab_mapping_entry(&xas, mapping, DAX_PMD);
1508 if (xa_is_internal(entry)) {
1509 result = xa_to_internal(entry);
Ross Zwisler876f2942017-05-12 15:47:00 -07001510 goto fallback;
Matthew Wilcoxb15cd802018-03-29 22:58:27 -04001511 }
Ross Zwisler876f2942017-05-12 15:47:00 -07001512
1513 /*
Ross Zwislere2093922017-06-02 14:46:37 -07001514 * It is possible, particularly with mixed reads & writes to private
1515 * mappings, that we have raced with a PTE fault that overlaps with
1516 * the PMD we need to set up. If so just return and the fault will be
1517 * retried.
1518 */
1519 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1520 !pmd_devmap(*vmf->pmd)) {
1521 result = 0;
1522 goto unlock_entry;
1523 }
1524
1525 /*
Ross Zwisler876f2942017-05-12 15:47:00 -07001526 * Note that we don't use iomap_apply here. We aren't doing I/O, only
1527 * setting up a mapping, so really we're using iomap_begin() as a way
1528 * to look up our filesystem block.
1529 */
Matthew Wilcoxb15cd802018-03-29 22:58:27 -04001530 pos = (loff_t)xas.xa_index << PAGE_SHIFT;
Ross Zwisler876f2942017-05-12 15:47:00 -07001531 error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
1532 if (error)
1533 goto unlock_entry;
1534
1535 if (iomap.offset + iomap.length < pos + PMD_SIZE)
Jan Kara9f141d62016-10-19 14:34:31 +02001536 goto finish_iomap;
1537
Dan Williamsaaa422c2017-11-13 16:38:44 -08001538 sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap);
Jan Karacaa51d22017-11-01 16:36:42 +01001539
Ross Zwisler642261a2016-11-08 11:34:45 +11001540 switch (iomap.type) {
1541 case IOMAP_MAPPED:
Jan Kara302a5e32017-11-01 16:36:37 +01001542 error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn);
1543 if (error < 0)
1544 goto finish_iomap;
1545
Matthew Wilcoxb15cd802018-03-29 22:58:27 -04001546 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
Matthew Wilcox3159f942017-11-03 13:30:42 -04001547 DAX_PMD, write && !sync);
Jan Kara302a5e32017-11-01 16:36:37 +01001548
Jan Karacaa51d22017-11-01 16:36:42 +01001549 /*
1550 * If we are doing synchronous page fault and inode needs fsync,
1551 * we can insert PMD into page tables only after that happens.
1552 * Skip insertion for now and return the pfn so that caller can
1553 * insert it after fsync is done.
1554 */
1555 if (sync) {
1556 if (WARN_ON_ONCE(!pfnp))
1557 goto finish_iomap;
1558 *pfnp = pfn;
1559 result = VM_FAULT_NEEDDSYNC;
1560 goto finish_iomap;
1561 }
1562
Jan Kara302a5e32017-11-01 16:36:37 +01001563 trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry);
1564 result = vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn,
1565 write);
Ross Zwisler642261a2016-11-08 11:34:45 +11001566 break;
1567 case IOMAP_UNWRITTEN:
1568 case IOMAP_HOLE:
1569 if (WARN_ON_ONCE(write))
Ross Zwisler876f2942017-05-12 15:47:00 -07001570 break;
Matthew Wilcoxb15cd802018-03-29 22:58:27 -04001571 result = dax_pmd_load_hole(&xas, vmf, &iomap, &entry);
Ross Zwisler642261a2016-11-08 11:34:45 +11001572 break;
1573 default:
1574 WARN_ON_ONCE(1);
1575 break;
1576 }
1577
Jan Kara9f141d62016-10-19 14:34:31 +02001578 finish_iomap:
1579 if (ops->iomap_end) {
1580 int copied = PMD_SIZE;
1581
1582 if (result == VM_FAULT_FALLBACK)
1583 copied = 0;
1584 /*
1585 * The fault is done by now and there's no way back (other
1586 * thread may be already happily using PMD we have installed).
1587 * Just ignore error from ->iomap_end since we cannot do much
1588 * with it.
1589 */
1590 ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
1591 &iomap);
1592 }
Ross Zwisler876f2942017-05-12 15:47:00 -07001593 unlock_entry:
Matthew Wilcoxb15cd802018-03-29 22:58:27 -04001594 dax_unlock_entry(&xas, entry);
Ross Zwisler642261a2016-11-08 11:34:45 +11001595 fallback:
1596 if (result == VM_FAULT_FALLBACK) {
Dave Jiangd8a849e2017-02-22 15:40:03 -08001597 split_huge_pmd(vma, vmf->pmd, vmf->address);
Ross Zwisler642261a2016-11-08 11:34:45 +11001598 count_vm_event(THP_FAULT_FALLBACK);
1599 }
Ross Zwisler282a8e02017-02-22 15:39:50 -08001600out:
Dave Jiangf4200392017-02-22 15:40:06 -08001601 trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
Ross Zwisler642261a2016-11-08 11:34:45 +11001602 return result;
1603}
Dave Jianga2d58162017-02-24 14:56:59 -08001604#else
Souptick Joarderab77dab2018-06-07 17:04:29 -07001605static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
Arnd Bergmann01cddfe2017-02-27 14:26:44 -08001606 const struct iomap_ops *ops)
Dave Jianga2d58162017-02-24 14:56:59 -08001607{
1608 return VM_FAULT_FALLBACK;
1609}
Ross Zwisler642261a2016-11-08 11:34:45 +11001610#endif /* CONFIG_FS_DAX_PMD */
Dave Jianga2d58162017-02-24 14:56:59 -08001611
1612/**
1613 * dax_iomap_fault - handle a page fault on a DAX file
1614 * @vmf: The description of the fault
Jan Karacec04e82017-11-01 16:36:38 +01001615 * @pe_size: Size of the page to fault in
Jan Kara9a0dd422017-11-01 16:36:39 +01001616 * @pfnp: PFN to insert for synchronous faults if fsync is required
Jan Karac0b24622018-01-07 16:38:43 -05001617 * @iomap_errp: Storage for detailed error code in case of error
Jan Karacec04e82017-11-01 16:36:38 +01001618 * @ops: Iomap ops passed from the file system
Dave Jianga2d58162017-02-24 14:56:59 -08001619 *
1620 * When a page fault occurs, filesystems may call this helper in
1621 * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1622 * has done all the necessary locking for page fault to proceed
1623 * successfully.
1624 */
Souptick Joarderab77dab2018-06-07 17:04:29 -07001625vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
Jan Karac0b24622018-01-07 16:38:43 -05001626 pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
Dave Jianga2d58162017-02-24 14:56:59 -08001627{
Dave Jiangc791ace2017-02-24 14:57:08 -08001628 switch (pe_size) {
1629 case PE_SIZE_PTE:
Jan Karac0b24622018-01-07 16:38:43 -05001630 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
Dave Jiangc791ace2017-02-24 14:57:08 -08001631 case PE_SIZE_PMD:
Jan Kara9a0dd422017-11-01 16:36:39 +01001632 return dax_iomap_pmd_fault(vmf, pfnp, ops);
Dave Jianga2d58162017-02-24 14:56:59 -08001633 default:
1634 return VM_FAULT_FALLBACK;
1635 }
1636}
1637EXPORT_SYMBOL_GPL(dax_iomap_fault);
Jan Kara71eab6d2017-11-01 16:36:43 +01001638
Matthew Wilcoxa77d19f2018-03-27 13:39:38 -04001639/*
Jan Kara71eab6d2017-11-01 16:36:43 +01001640 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
1641 * @vmf: The description of the fault
Jan Kara71eab6d2017-11-01 16:36:43 +01001642 * @pfn: PFN to insert
Matthew Wilcoxcfc93c62018-03-28 11:48:03 -04001643 * @order: Order of entry to insert.
Jan Kara71eab6d2017-11-01 16:36:43 +01001644 *
Matthew Wilcoxa77d19f2018-03-27 13:39:38 -04001645 * This function inserts a writeable PTE or PMD entry into the page tables
1646 * for an mmaped DAX file. It also marks the page cache entry as dirty.
Jan Kara71eab6d2017-11-01 16:36:43 +01001647 */
Matthew Wilcoxcfc93c62018-03-28 11:48:03 -04001648static vm_fault_t
1649dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
Jan Kara71eab6d2017-11-01 16:36:43 +01001650{
1651 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
Matthew Wilcoxcfc93c62018-03-28 11:48:03 -04001652 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
1653 void *entry;
Souptick Joarderab77dab2018-06-07 17:04:29 -07001654 vm_fault_t ret;
Jan Kara71eab6d2017-11-01 16:36:43 +01001655
Matthew Wilcoxcfc93c62018-03-28 11:48:03 -04001656 xas_lock_irq(&xas);
1657 entry = get_unlocked_entry(&xas);
Jan Kara71eab6d2017-11-01 16:36:43 +01001658 /* Did we race with someone splitting entry or so? */
1659 if (!entry ||
Matthew Wilcoxcfc93c62018-03-28 11:48:03 -04001660 (order == 0 && !dax_is_pte_entry(entry)) ||
Matthew Wilcox0e40de02018-11-16 15:19:13 -05001661 (order == PMD_ORDER && !dax_is_pmd_entry(entry))) {
Matthew Wilcoxcfc93c62018-03-28 11:48:03 -04001662 put_unlocked_entry(&xas, entry);
1663 xas_unlock_irq(&xas);
Jan Kara71eab6d2017-11-01 16:36:43 +01001664 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
1665 VM_FAULT_NOPAGE);
1666 return VM_FAULT_NOPAGE;
1667 }
Matthew Wilcoxcfc93c62018-03-28 11:48:03 -04001668 xas_set_mark(&xas, PAGECACHE_TAG_DIRTY);
1669 dax_lock_entry(&xas, entry);
1670 xas_unlock_irq(&xas);
1671 if (order == 0)
Souptick Joarderab77dab2018-06-07 17:04:29 -07001672 ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
Jan Kara71eab6d2017-11-01 16:36:43 +01001673#ifdef CONFIG_FS_DAX_PMD
Matthew Wilcoxcfc93c62018-03-28 11:48:03 -04001674 else if (order == PMD_ORDER)
Souptick Joarderab77dab2018-06-07 17:04:29 -07001675 ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
Jan Kara71eab6d2017-11-01 16:36:43 +01001676 pfn, true);
Jan Kara71eab6d2017-11-01 16:36:43 +01001677#endif
Matthew Wilcoxcfc93c62018-03-28 11:48:03 -04001678 else
Souptick Joarderab77dab2018-06-07 17:04:29 -07001679 ret = VM_FAULT_FALLBACK;
Matthew Wilcoxcfc93c62018-03-28 11:48:03 -04001680 dax_unlock_entry(&xas, entry);
Souptick Joarderab77dab2018-06-07 17:04:29 -07001681 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
1682 return ret;
Jan Kara71eab6d2017-11-01 16:36:43 +01001683}
1684
1685/**
1686 * dax_finish_sync_fault - finish synchronous page fault
1687 * @vmf: The description of the fault
1688 * @pe_size: Size of entry to be inserted
1689 * @pfn: PFN to insert
1690 *
1691 * This function ensures that the file range touched by the page fault is
1692 * stored persistently on the media and handles inserting of appropriate page
1693 * table entry.
1694 */
Souptick Joarderab77dab2018-06-07 17:04:29 -07001695vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
1696 enum page_entry_size pe_size, pfn_t pfn)
Jan Kara71eab6d2017-11-01 16:36:43 +01001697{
1698 int err;
1699 loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
Matthew Wilcoxcfc93c62018-03-28 11:48:03 -04001700 unsigned int order = pe_order(pe_size);
1701 size_t len = PAGE_SIZE << order;
Jan Kara71eab6d2017-11-01 16:36:43 +01001702
Jan Kara71eab6d2017-11-01 16:36:43 +01001703 err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
1704 if (err)
1705 return VM_FAULT_SIGBUS;
Matthew Wilcoxcfc93c62018-03-28 11:48:03 -04001706 return dax_insert_pfn_mkwrite(vmf, pfn, order);
Jan Kara71eab6d2017-11-01 16:36:43 +01001707}
1708EXPORT_SYMBOL_GPL(dax_finish_sync_fault);