blob: 57ec272038da698b7053d4cc362a3533b2a87f11 [file] [log] [blame]
Matthew Wilcoxd475c632015-02-16 15:58:56 -08001/*
2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 */
16
17#include <linux/atomic.h>
18#include <linux/blkdev.h>
19#include <linux/buffer_head.h>
Ross Zwislerd77e92e2015-09-09 10:29:40 -060020#include <linux/dax.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080021#include <linux/fs.h>
22#include <linux/genhd.h>
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -080023#include <linux/highmem.h>
24#include <linux/memcontrol.h>
25#include <linux/mm.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080026#include <linux/mutex.h>
Ross Zwisler9973c982016-01-22 15:10:47 -080027#include <linux/pagevec.h>
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080028#include <linux/sched.h>
Ingo Molnarf361bf42017-02-03 23:47:37 +010029#include <linux/sched/signal.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080030#include <linux/uio.h>
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -080031#include <linux/vmstat.h>
Dan Williams34c0fd52016-01-15 16:56:14 -080032#include <linux/pfn_t.h>
Dan Williams0e749e52016-01-15 16:55:53 -080033#include <linux/sizes.h>
Jan Kara4b4bb462016-12-14 15:07:53 -080034#include <linux/mmu_notifier.h>
Christoph Hellwiga254e562016-09-19 11:24:49 +100035#include <linux/iomap.h>
36#include "internal.h"
Matthew Wilcoxd475c632015-02-16 15:58:56 -080037
Ross Zwisler282a8e02017-02-22 15:39:50 -080038#define CREATE_TRACE_POINTS
39#include <trace/events/fs_dax.h>
40
Jan Karaac401cc2016-05-12 18:29:18 +020041/* We choose 4096 entries - same as per-zone page wait tables */
42#define DAX_WAIT_TABLE_BITS 12
43#define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
44
Ross Zwisler917f3452017-09-06 16:18:58 -070045/* The 'colour' (ie low bits) within a PMD of a page offset. */
46#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
Matthew Wilcox977fbdc2018-01-31 16:17:36 -080047#define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT)
Ross Zwisler917f3452017-09-06 16:18:58 -070048
Ross Zwislerce95ab0f2016-11-08 11:31:44 +110049static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
Jan Karaac401cc2016-05-12 18:29:18 +020050
51static int __init init_dax_wait_table(void)
52{
53 int i;
54
55 for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
56 init_waitqueue_head(wait_table + i);
57 return 0;
58}
59fs_initcall(init_dax_wait_table);
60
Ross Zwisler527b19d2017-09-06 16:18:51 -070061/*
62 * We use lowest available bit in exceptional entry for locking, one bit for
63 * the entry size (PMD) and two more to tell us if the entry is a zero page or
64 * an empty entry that is just used for locking. In total four special bits.
65 *
66 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
67 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
68 * block allocation.
69 */
70#define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 4)
71#define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
72#define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
73#define RADIX_DAX_ZERO_PAGE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
74#define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3))
75
Dan Williams3fe07912017-10-14 17:13:45 -070076static unsigned long dax_radix_pfn(void *entry)
Ross Zwisler527b19d2017-09-06 16:18:51 -070077{
78 return (unsigned long)entry >> RADIX_DAX_SHIFT;
79}
80
Dan Williams3fe07912017-10-14 17:13:45 -070081static void *dax_radix_locked_entry(unsigned long pfn, unsigned long flags)
Ross Zwisler527b19d2017-09-06 16:18:51 -070082{
83 return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags |
Dan Williams3fe07912017-10-14 17:13:45 -070084 (pfn << RADIX_DAX_SHIFT) | RADIX_DAX_ENTRY_LOCK);
Ross Zwisler527b19d2017-09-06 16:18:51 -070085}
86
87static unsigned int dax_radix_order(void *entry)
88{
89 if ((unsigned long)entry & RADIX_DAX_PMD)
90 return PMD_SHIFT - PAGE_SHIFT;
91 return 0;
92}
93
Ross Zwisler642261a2016-11-08 11:34:45 +110094static int dax_is_pmd_entry(void *entry)
95{
96 return (unsigned long)entry & RADIX_DAX_PMD;
97}
98
99static int dax_is_pte_entry(void *entry)
100{
101 return !((unsigned long)entry & RADIX_DAX_PMD);
102}
103
104static int dax_is_zero_entry(void *entry)
105{
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700106 return (unsigned long)entry & RADIX_DAX_ZERO_PAGE;
Ross Zwisler642261a2016-11-08 11:34:45 +1100107}
108
109static int dax_is_empty_entry(void *entry)
110{
111 return (unsigned long)entry & RADIX_DAX_EMPTY;
112}
113
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800114/*
Jan Karaac401cc2016-05-12 18:29:18 +0200115 * DAX radix tree locking
116 */
117struct exceptional_entry_key {
118 struct address_space *mapping;
Ross Zwisler63e95b52016-11-08 11:32:20 +1100119 pgoff_t entry_start;
Jan Karaac401cc2016-05-12 18:29:18 +0200120};
121
122struct wait_exceptional_entry_queue {
Ingo Molnarac6424b2017-06-20 12:06:13 +0200123 wait_queue_entry_t wait;
Jan Karaac401cc2016-05-12 18:29:18 +0200124 struct exceptional_entry_key key;
125};
126
Ross Zwisler63e95b52016-11-08 11:32:20 +1100127static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
128 pgoff_t index, void *entry, struct exceptional_entry_key *key)
129{
130 unsigned long hash;
131
132 /*
133 * If 'entry' is a PMD, align the 'index' that we use for the wait
134 * queue to the start of that PMD. This ensures that all offsets in
135 * the range covered by the PMD map to the same bit lock.
136 */
Ross Zwisler642261a2016-11-08 11:34:45 +1100137 if (dax_is_pmd_entry(entry))
Ross Zwisler917f3452017-09-06 16:18:58 -0700138 index &= ~PG_PMD_COLOUR;
Ross Zwisler63e95b52016-11-08 11:32:20 +1100139
140 key->mapping = mapping;
141 key->entry_start = index;
142
143 hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
144 return wait_table + hash;
145}
146
Ingo Molnarac6424b2017-06-20 12:06:13 +0200147static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode,
Jan Karaac401cc2016-05-12 18:29:18 +0200148 int sync, void *keyp)
149{
150 struct exceptional_entry_key *key = keyp;
151 struct wait_exceptional_entry_queue *ewait =
152 container_of(wait, struct wait_exceptional_entry_queue, wait);
153
154 if (key->mapping != ewait->key.mapping ||
Ross Zwisler63e95b52016-11-08 11:32:20 +1100155 key->entry_start != ewait->key.entry_start)
Jan Karaac401cc2016-05-12 18:29:18 +0200156 return 0;
157 return autoremove_wake_function(wait, mode, sync, NULL);
158}
159
160/*
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700161 * @entry may no longer be the entry at the index in the mapping.
162 * The important information it's conveying is whether the entry at
163 * this index used to be a PMD entry.
Ross Zwislere30331f2017-09-06 16:18:39 -0700164 */
Ross Zwislerd01ad192017-09-06 16:18:47 -0700165static void dax_wake_mapping_entry_waiter(struct address_space *mapping,
Ross Zwislere30331f2017-09-06 16:18:39 -0700166 pgoff_t index, void *entry, bool wake_all)
167{
168 struct exceptional_entry_key key;
169 wait_queue_head_t *wq;
170
171 wq = dax_entry_waitqueue(mapping, index, entry, &key);
172
173 /*
174 * Checking for locked entry and prepare_to_wait_exclusive() happens
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700175 * under the i_pages lock, ditto for entry handling in our callers.
Ross Zwislere30331f2017-09-06 16:18:39 -0700176 * So at this point all tasks that could have seen our entry locked
177 * must be in the waitqueue and the following check will see them.
178 */
179 if (waitqueue_active(wq))
180 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
181}
182
183/*
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700184 * Check whether the given slot is locked. Must be called with the i_pages
185 * lock held.
Jan Karaac401cc2016-05-12 18:29:18 +0200186 */
187static inline int slot_locked(struct address_space *mapping, void **slot)
188{
189 unsigned long entry = (unsigned long)
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700190 radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock);
Jan Karaac401cc2016-05-12 18:29:18 +0200191 return entry & RADIX_DAX_ENTRY_LOCK;
192}
193
194/*
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700195 * Mark the given slot as locked. Must be called with the i_pages lock held.
Jan Karaac401cc2016-05-12 18:29:18 +0200196 */
197static inline void *lock_slot(struct address_space *mapping, void **slot)
198{
199 unsigned long entry = (unsigned long)
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700200 radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock);
Jan Karaac401cc2016-05-12 18:29:18 +0200201
202 entry |= RADIX_DAX_ENTRY_LOCK;
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700203 radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry);
Jan Karaac401cc2016-05-12 18:29:18 +0200204 return (void *)entry;
205}
206
207/*
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700208 * Mark the given slot as unlocked. Must be called with the i_pages lock held.
Jan Karaac401cc2016-05-12 18:29:18 +0200209 */
210static inline void *unlock_slot(struct address_space *mapping, void **slot)
211{
212 unsigned long entry = (unsigned long)
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700213 radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock);
Jan Karaac401cc2016-05-12 18:29:18 +0200214
215 entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700216 radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry);
Jan Karaac401cc2016-05-12 18:29:18 +0200217 return (void *)entry;
218}
219
220/*
221 * Lookup entry in radix tree, wait for it to become unlocked if it is
222 * exceptional entry and return it. The caller must call
223 * put_unlocked_mapping_entry() when he decided not to lock the entry or
224 * put_locked_mapping_entry() when he locked the entry and now wants to
225 * unlock it.
226 *
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700227 * Must be called with the i_pages lock held.
Jan Karaac401cc2016-05-12 18:29:18 +0200228 */
Dan Williamsc2a7d2a2018-07-13 21:50:16 -0700229static void *__get_unlocked_mapping_entry(struct address_space *mapping,
230 pgoff_t index, void ***slotp, bool (*wait_fn)(void))
Jan Karaac401cc2016-05-12 18:29:18 +0200231{
Ross Zwislere3ad61c2016-11-08 11:32:12 +1100232 void *entry, **slot;
Jan Karaac401cc2016-05-12 18:29:18 +0200233 struct wait_exceptional_entry_queue ewait;
Ross Zwisler63e95b52016-11-08 11:32:20 +1100234 wait_queue_head_t *wq;
Jan Karaac401cc2016-05-12 18:29:18 +0200235
236 init_wait(&ewait.wait);
237 ewait.wait.func = wake_exceptional_entry_func;
Jan Karaac401cc2016-05-12 18:29:18 +0200238
239 for (;;) {
Dan Williamsc2a7d2a2018-07-13 21:50:16 -0700240 bool revalidate;
241
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700242 entry = __radix_tree_lookup(&mapping->i_pages, index, NULL,
Jan Karaac401cc2016-05-12 18:29:18 +0200243 &slot);
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700244 if (!entry ||
245 WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)) ||
Jan Karaac401cc2016-05-12 18:29:18 +0200246 !slot_locked(mapping, slot)) {
247 if (slotp)
248 *slotp = slot;
Ross Zwislere3ad61c2016-11-08 11:32:12 +1100249 return entry;
Jan Karaac401cc2016-05-12 18:29:18 +0200250 }
Ross Zwisler63e95b52016-11-08 11:32:20 +1100251
252 wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
Jan Karaac401cc2016-05-12 18:29:18 +0200253 prepare_to_wait_exclusive(wq, &ewait.wait,
254 TASK_UNINTERRUPTIBLE);
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700255 xa_unlock_irq(&mapping->i_pages);
Dan Williamsc2a7d2a2018-07-13 21:50:16 -0700256 revalidate = wait_fn();
Jan Karaac401cc2016-05-12 18:29:18 +0200257 finish_wait(wq, &ewait.wait);
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700258 xa_lock_irq(&mapping->i_pages);
Dan Williamsc2a7d2a2018-07-13 21:50:16 -0700259 if (revalidate)
260 return ERR_PTR(-EAGAIN);
Jan Karaac401cc2016-05-12 18:29:18 +0200261 }
262}
263
Dan Williamsc2a7d2a2018-07-13 21:50:16 -0700264static bool entry_wait(void)
265{
266 schedule();
267 /*
268 * Never return an ERR_PTR() from
269 * __get_unlocked_mapping_entry(), just keep looping.
270 */
271 return false;
272}
273
274static void *get_unlocked_mapping_entry(struct address_space *mapping,
275 pgoff_t index, void ***slotp)
276{
277 return __get_unlocked_mapping_entry(mapping, index, slotp, entry_wait);
278}
279
280static void unlock_mapping_entry(struct address_space *mapping, pgoff_t index)
Jan Karab1aa8122016-12-14 15:07:24 -0800281{
282 void *entry, **slot;
283
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700284 xa_lock_irq(&mapping->i_pages);
285 entry = __radix_tree_lookup(&mapping->i_pages, index, NULL, &slot);
Jan Karab1aa8122016-12-14 15:07:24 -0800286 if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
287 !slot_locked(mapping, slot))) {
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700288 xa_unlock_irq(&mapping->i_pages);
Jan Karab1aa8122016-12-14 15:07:24 -0800289 return;
290 }
291 unlock_slot(mapping, slot);
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700292 xa_unlock_irq(&mapping->i_pages);
Jan Karab1aa8122016-12-14 15:07:24 -0800293 dax_wake_mapping_entry_waiter(mapping, index, entry, false);
294}
295
Jan Karaac401cc2016-05-12 18:29:18 +0200296static void put_locked_mapping_entry(struct address_space *mapping,
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700297 pgoff_t index)
Jan Karaac401cc2016-05-12 18:29:18 +0200298{
Dan Williamsc2a7d2a2018-07-13 21:50:16 -0700299 unlock_mapping_entry(mapping, index);
Jan Karaac401cc2016-05-12 18:29:18 +0200300}
301
302/*
303 * Called when we are done with radix tree entry we looked up via
304 * get_unlocked_mapping_entry() and which we didn't lock in the end.
305 */
306static void put_unlocked_mapping_entry(struct address_space *mapping,
307 pgoff_t index, void *entry)
308{
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700309 if (!entry)
Jan Karaac401cc2016-05-12 18:29:18 +0200310 return;
311
312 /* We have to wake up next waiter for the radix tree entry lock */
Ross Zwisler422476c2016-11-08 11:33:44 +1100313 dax_wake_mapping_entry_waiter(mapping, index, entry, false);
314}
315
Dan Williamsd2c997c2017-12-22 22:02:48 -0800316static unsigned long dax_entry_size(void *entry)
317{
318 if (dax_is_zero_entry(entry))
319 return 0;
320 else if (dax_is_empty_entry(entry))
321 return 0;
322 else if (dax_is_pmd_entry(entry))
323 return PMD_SIZE;
324 else
325 return PAGE_SIZE;
326}
327
328static unsigned long dax_radix_end_pfn(void *entry)
329{
330 return dax_radix_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
331}
332
333/*
334 * Iterate through all mapped pfns represented by an entry, i.e. skip
335 * 'empty' and 'zero' entries.
336 */
337#define for_each_mapped_pfn(entry, pfn) \
338 for (pfn = dax_radix_pfn(entry); \
339 pfn < dax_radix_end_pfn(entry); pfn++)
340
Dan Williams73449da2018-07-13 21:49:50 -0700341/*
342 * TODO: for reflink+dax we need a way to associate a single page with
343 * multiple address_space instances at different linear_page_index()
344 * offsets.
345 */
346static void dax_associate_entry(void *entry, struct address_space *mapping,
347 struct vm_area_struct *vma, unsigned long address)
Dan Williamsd2c997c2017-12-22 22:02:48 -0800348{
Dan Williams73449da2018-07-13 21:49:50 -0700349 unsigned long size = dax_entry_size(entry), pfn, index;
350 int i = 0;
Dan Williamsd2c997c2017-12-22 22:02:48 -0800351
352 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
353 return;
354
Dan Williams73449da2018-07-13 21:49:50 -0700355 index = linear_page_index(vma, address & ~(size - 1));
Dan Williamsd2c997c2017-12-22 22:02:48 -0800356 for_each_mapped_pfn(entry, pfn) {
357 struct page *page = pfn_to_page(pfn);
358
359 WARN_ON_ONCE(page->mapping);
360 page->mapping = mapping;
Dan Williams73449da2018-07-13 21:49:50 -0700361 page->index = index + i++;
Dan Williamsd2c997c2017-12-22 22:02:48 -0800362 }
363}
364
365static void dax_disassociate_entry(void *entry, struct address_space *mapping,
366 bool trunc)
367{
368 unsigned long pfn;
369
370 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
371 return;
372
373 for_each_mapped_pfn(entry, pfn) {
374 struct page *page = pfn_to_page(pfn);
375
376 WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
377 WARN_ON_ONCE(page->mapping && page->mapping != mapping);
378 page->mapping = NULL;
Dan Williams73449da2018-07-13 21:49:50 -0700379 page->index = 0;
Dan Williamsd2c997c2017-12-22 22:02:48 -0800380 }
381}
382
Dan Williams5fac7402018-03-09 17:44:31 -0800383static struct page *dax_busy_page(void *entry)
384{
385 unsigned long pfn;
386
387 for_each_mapped_pfn(entry, pfn) {
388 struct page *page = pfn_to_page(pfn);
389
390 if (page_ref_count(page) > 1)
391 return page;
392 }
393 return NULL;
394}
395
Dan Williamsc2a7d2a2018-07-13 21:50:16 -0700396static bool entry_wait_revalidate(void)
397{
398 rcu_read_unlock();
399 schedule();
400 rcu_read_lock();
401
402 /*
403 * Tell __get_unlocked_mapping_entry() to take a break, we need
404 * to revalidate page->mapping after dropping locks
405 */
406 return true;
407}
408
409bool dax_lock_mapping_entry(struct page *page)
410{
411 pgoff_t index;
412 struct inode *inode;
413 bool did_lock = false;
414 void *entry = NULL, **slot;
415 struct address_space *mapping;
416
417 rcu_read_lock();
418 for (;;) {
419 mapping = READ_ONCE(page->mapping);
420
421 if (!dax_mapping(mapping))
422 break;
423
424 /*
425 * In the device-dax case there's no need to lock, a
426 * struct dev_pagemap pin is sufficient to keep the
427 * inode alive, and we assume we have dev_pagemap pin
428 * otherwise we would not have a valid pfn_to_page()
429 * translation.
430 */
431 inode = mapping->host;
432 if (S_ISCHR(inode->i_mode)) {
433 did_lock = true;
434 break;
435 }
436
437 xa_lock_irq(&mapping->i_pages);
438 if (mapping != page->mapping) {
439 xa_unlock_irq(&mapping->i_pages);
440 continue;
441 }
442 index = page->index;
443
444 entry = __get_unlocked_mapping_entry(mapping, index, &slot,
445 entry_wait_revalidate);
446 if (!entry) {
447 xa_unlock_irq(&mapping->i_pages);
448 break;
449 } else if (IS_ERR(entry)) {
450 WARN_ON_ONCE(PTR_ERR(entry) != -EAGAIN);
451 continue;
452 }
453 lock_slot(mapping, slot);
454 did_lock = true;
455 xa_unlock_irq(&mapping->i_pages);
456 break;
457 }
458 rcu_read_unlock();
459
460 return did_lock;
461}
462
463void dax_unlock_mapping_entry(struct page *page)
464{
465 struct address_space *mapping = page->mapping;
466 struct inode *inode = mapping->host;
467
468 if (S_ISCHR(inode->i_mode))
469 return;
470
471 unlock_mapping_entry(mapping, page->index);
472}
473
Jan Karaac401cc2016-05-12 18:29:18 +0200474/*
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700475 * Find radix tree entry at given index. If it points to an exceptional entry,
476 * return it with the radix tree entry locked. If the radix tree doesn't
477 * contain given index, create an empty exceptional entry for the index and
478 * return with it locked.
Jan Karaac401cc2016-05-12 18:29:18 +0200479 *
Ross Zwisler642261a2016-11-08 11:34:45 +1100480 * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
481 * either return that locked entry or will return an error. This error will
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700482 * happen if there are any 4k entries within the 2MiB range that we are
483 * requesting.
Ross Zwisler642261a2016-11-08 11:34:45 +1100484 *
485 * We always favor 4k entries over 2MiB entries. There isn't a flow where we
486 * evict 4k entries in order to 'upgrade' them to a 2MiB entry. A 2MiB
487 * insertion will fail if it finds any 4k entries already in the tree, and a
488 * 4k insertion will cause an existing 2MiB entry to be unmapped and
489 * downgraded to 4k entries. This happens for both 2MiB huge zero pages as
490 * well as 2MiB empty entries.
491 *
492 * The exception to this downgrade path is for 2MiB DAX PMD entries that have
493 * real storage backing them. We will leave these real 2MiB DAX entries in
494 * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
495 *
Jan Karaac401cc2016-05-12 18:29:18 +0200496 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
497 * persistent memory the benefit is doubtful. We can add that later if we can
498 * show it helps.
499 */
Ross Zwisler642261a2016-11-08 11:34:45 +1100500static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
501 unsigned long size_flag)
Jan Karaac401cc2016-05-12 18:29:18 +0200502{
Ross Zwisler642261a2016-11-08 11:34:45 +1100503 bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
Ross Zwislere3ad61c2016-11-08 11:32:12 +1100504 void *entry, **slot;
Jan Karaac401cc2016-05-12 18:29:18 +0200505
506restart:
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700507 xa_lock_irq(&mapping->i_pages);
Ross Zwislere3ad61c2016-11-08 11:32:12 +1100508 entry = get_unlocked_mapping_entry(mapping, index, &slot);
Ross Zwisler642261a2016-11-08 11:34:45 +1100509
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700510 if (WARN_ON_ONCE(entry && !radix_tree_exceptional_entry(entry))) {
511 entry = ERR_PTR(-EIO);
512 goto out_unlock;
513 }
514
Ross Zwisler642261a2016-11-08 11:34:45 +1100515 if (entry) {
516 if (size_flag & RADIX_DAX_PMD) {
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700517 if (dax_is_pte_entry(entry)) {
Ross Zwisler642261a2016-11-08 11:34:45 +1100518 put_unlocked_mapping_entry(mapping, index,
519 entry);
520 entry = ERR_PTR(-EEXIST);
521 goto out_unlock;
522 }
523 } else { /* trying to grab a PTE entry */
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700524 if (dax_is_pmd_entry(entry) &&
Ross Zwisler642261a2016-11-08 11:34:45 +1100525 (dax_is_zero_entry(entry) ||
526 dax_is_empty_entry(entry))) {
527 pmd_downgrade = true;
528 }
529 }
530 }
531
Jan Karaac401cc2016-05-12 18:29:18 +0200532 /* No entry for given index? Make sure radix tree is big enough. */
Ross Zwisler642261a2016-11-08 11:34:45 +1100533 if (!entry || pmd_downgrade) {
Jan Karaac401cc2016-05-12 18:29:18 +0200534 int err;
535
Ross Zwisler642261a2016-11-08 11:34:45 +1100536 if (pmd_downgrade) {
537 /*
538 * Make sure 'entry' remains valid while we drop
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700539 * the i_pages lock.
Ross Zwisler642261a2016-11-08 11:34:45 +1100540 */
541 entry = lock_slot(mapping, slot);
542 }
543
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700544 xa_unlock_irq(&mapping->i_pages);
Ross Zwisler642261a2016-11-08 11:34:45 +1100545 /*
546 * Besides huge zero pages the only other thing that gets
547 * downgraded are empty entries which don't need to be
548 * unmapped.
549 */
550 if (pmd_downgrade && dax_is_zero_entry(entry))
Matthew Wilcox977fbdc2018-01-31 16:17:36 -0800551 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
552 PG_PMD_NR, false);
Ross Zwisler642261a2016-11-08 11:34:45 +1100553
Jan Kara0cb80b42016-12-12 21:34:12 -0500554 err = radix_tree_preload(
555 mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
556 if (err) {
557 if (pmd_downgrade)
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700558 put_locked_mapping_entry(mapping, index);
Jan Kara0cb80b42016-12-12 21:34:12 -0500559 return ERR_PTR(err);
560 }
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700561 xa_lock_irq(&mapping->i_pages);
Ross Zwisler642261a2016-11-08 11:34:45 +1100562
Ross Zwislere11f8b72017-04-07 16:04:57 -0700563 if (!entry) {
564 /*
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700565 * We needed to drop the i_pages lock while calling
Ross Zwislere11f8b72017-04-07 16:04:57 -0700566 * radix_tree_preload() and we didn't have an entry to
567 * lock. See if another thread inserted an entry at
568 * our index during this time.
569 */
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700570 entry = __radix_tree_lookup(&mapping->i_pages, index,
Ross Zwislere11f8b72017-04-07 16:04:57 -0700571 NULL, &slot);
572 if (entry) {
573 radix_tree_preload_end();
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700574 xa_unlock_irq(&mapping->i_pages);
Ross Zwislere11f8b72017-04-07 16:04:57 -0700575 goto restart;
576 }
577 }
578
Ross Zwisler642261a2016-11-08 11:34:45 +1100579 if (pmd_downgrade) {
Dan Williamsd2c997c2017-12-22 22:02:48 -0800580 dax_disassociate_entry(entry, mapping, false);
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700581 radix_tree_delete(&mapping->i_pages, index);
Ross Zwisler642261a2016-11-08 11:34:45 +1100582 mapping->nrexceptional--;
583 dax_wake_mapping_entry_waiter(mapping, index, entry,
584 true);
585 }
586
587 entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);
588
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700589 err = __radix_tree_insert(&mapping->i_pages, index,
Ross Zwisler642261a2016-11-08 11:34:45 +1100590 dax_radix_order(entry), entry);
Jan Karaac401cc2016-05-12 18:29:18 +0200591 radix_tree_preload_end();
592 if (err) {
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700593 xa_unlock_irq(&mapping->i_pages);
Ross Zwisler642261a2016-11-08 11:34:45 +1100594 /*
Ross Zwislere11f8b72017-04-07 16:04:57 -0700595 * Our insertion of a DAX entry failed, most likely
596 * because we were inserting a PMD entry and it
597 * collided with a PTE sized entry at a different
598 * index in the PMD range. We haven't inserted
599 * anything into the radix tree and have no waiters to
600 * wake.
Ross Zwisler642261a2016-11-08 11:34:45 +1100601 */
Jan Karaac401cc2016-05-12 18:29:18 +0200602 return ERR_PTR(err);
603 }
604 /* Good, we have inserted empty locked entry into the tree. */
605 mapping->nrexceptional++;
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700606 xa_unlock_irq(&mapping->i_pages);
Ross Zwislere3ad61c2016-11-08 11:32:12 +1100607 return entry;
Jan Karaac401cc2016-05-12 18:29:18 +0200608 }
Ross Zwislere3ad61c2016-11-08 11:32:12 +1100609 entry = lock_slot(mapping, slot);
Ross Zwisler642261a2016-11-08 11:34:45 +1100610 out_unlock:
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700611 xa_unlock_irq(&mapping->i_pages);
Ross Zwislere3ad61c2016-11-08 11:32:12 +1100612 return entry;
Jan Karaac401cc2016-05-12 18:29:18 +0200613}
614
Dan Williams5fac7402018-03-09 17:44:31 -0800615/**
616 * dax_layout_busy_page - find first pinned page in @mapping
617 * @mapping: address space to scan for a page with ref count > 1
618 *
619 * DAX requires ZONE_DEVICE mapped pages. These pages are never
620 * 'onlined' to the page allocator so they are considered idle when
621 * page->count == 1. A filesystem uses this interface to determine if
622 * any page in the mapping is busy, i.e. for DMA, or other
623 * get_user_pages() usages.
624 *
625 * It is expected that the filesystem is holding locks to block the
626 * establishment of new mappings in this address_space. I.e. it expects
627 * to be able to run unmap_mapping_range() and subsequently not race
628 * mapping_mapped() becoming true.
629 */
630struct page *dax_layout_busy_page(struct address_space *mapping)
631{
632 pgoff_t indices[PAGEVEC_SIZE];
633 struct page *page = NULL;
634 struct pagevec pvec;
635 pgoff_t index, end;
636 unsigned i;
637
638 /*
639 * In the 'limited' case get_user_pages() for dax is disabled.
640 */
641 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
642 return NULL;
643
644 if (!dax_mapping(mapping) || !mapping_mapped(mapping))
645 return NULL;
646
647 pagevec_init(&pvec);
648 index = 0;
649 end = -1;
650
651 /*
652 * If we race get_user_pages_fast() here either we'll see the
653 * elevated page count in the pagevec_lookup and wait, or
654 * get_user_pages_fast() will see that the page it took a reference
655 * against is no longer mapped in the page tables and bail to the
656 * get_user_pages() slow path. The slow path is protected by
657 * pte_lock() and pmd_lock(). New references are not taken without
658 * holding those locks, and unmap_mapping_range() will not zero the
659 * pte or pmd without holding the respective lock, so we are
660 * guaranteed to either see new references or prevent new
661 * references from being established.
662 */
663 unmap_mapping_range(mapping, 0, 0, 1);
664
665 while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
666 min(end - index, (pgoff_t)PAGEVEC_SIZE),
667 indices)) {
668 for (i = 0; i < pagevec_count(&pvec); i++) {
669 struct page *pvec_ent = pvec.pages[i];
670 void *entry;
671
672 index = indices[i];
673 if (index >= end)
674 break;
675
676 if (!radix_tree_exceptional_entry(pvec_ent))
677 continue;
678
679 xa_lock_irq(&mapping->i_pages);
680 entry = get_unlocked_mapping_entry(mapping, index, NULL);
681 if (entry)
682 page = dax_busy_page(entry);
683 put_unlocked_mapping_entry(mapping, index, entry);
684 xa_unlock_irq(&mapping->i_pages);
685 if (page)
686 break;
687 }
688 pagevec_remove_exceptionals(&pvec);
689 pagevec_release(&pvec);
690 index++;
691
692 if (page)
693 break;
694 }
695 return page;
696}
697EXPORT_SYMBOL_GPL(dax_layout_busy_page);
698
Jan Karac6dcf522016-08-10 17:22:44 +0200699static int __dax_invalidate_mapping_entry(struct address_space *mapping,
700 pgoff_t index, bool trunc)
701{
702 int ret = 0;
703 void *entry;
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700704 struct radix_tree_root *pages = &mapping->i_pages;
Jan Karac6dcf522016-08-10 17:22:44 +0200705
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700706 xa_lock_irq(pages);
Jan Karac6dcf522016-08-10 17:22:44 +0200707 entry = get_unlocked_mapping_entry(mapping, index, NULL);
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700708 if (!entry || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)))
Jan Karac6dcf522016-08-10 17:22:44 +0200709 goto out;
710 if (!trunc &&
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700711 (radix_tree_tag_get(pages, index, PAGECACHE_TAG_DIRTY) ||
712 radix_tree_tag_get(pages, index, PAGECACHE_TAG_TOWRITE)))
Jan Karac6dcf522016-08-10 17:22:44 +0200713 goto out;
Dan Williamsd2c997c2017-12-22 22:02:48 -0800714 dax_disassociate_entry(entry, mapping, trunc);
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700715 radix_tree_delete(pages, index);
Jan Karac6dcf522016-08-10 17:22:44 +0200716 mapping->nrexceptional--;
717 ret = 1;
718out:
719 put_unlocked_mapping_entry(mapping, index, entry);
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700720 xa_unlock_irq(pages);
Jan Karac6dcf522016-08-10 17:22:44 +0200721 return ret;
722}
Jan Karaac401cc2016-05-12 18:29:18 +0200723/*
724 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
725 * entry to get unlocked before deleting it.
726 */
727int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
728{
Jan Karac6dcf522016-08-10 17:22:44 +0200729 int ret = __dax_invalidate_mapping_entry(mapping, index, true);
Jan Karaac401cc2016-05-12 18:29:18 +0200730
Jan Karaac401cc2016-05-12 18:29:18 +0200731 /*
732 * This gets called from truncate / punch_hole path. As such, the caller
733 * must hold locks protecting against concurrent modifications of the
734 * radix tree (usually fs-private i_mmap_sem for writing). Since the
735 * caller has seen exceptional entry for this index, we better find it
736 * at that index as well...
737 */
Jan Karac6dcf522016-08-10 17:22:44 +0200738 WARN_ON_ONCE(!ret);
739 return ret;
740}
Jan Karaac401cc2016-05-12 18:29:18 +0200741
Jan Karac6dcf522016-08-10 17:22:44 +0200742/*
Jan Karac6dcf522016-08-10 17:22:44 +0200743 * Invalidate exceptional DAX entry if it is clean.
744 */
745int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
746 pgoff_t index)
747{
748 return __dax_invalidate_mapping_entry(mapping, index, false);
Jan Karaac401cc2016-05-12 18:29:18 +0200749}
750
Dan Williamscccbce62017-01-27 13:31:42 -0800751static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
752 sector_t sector, size_t size, struct page *to,
753 unsigned long vaddr)
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800754{
Dan Williamscccbce62017-01-27 13:31:42 -0800755 void *vto, *kaddr;
756 pgoff_t pgoff;
757 pfn_t pfn;
758 long rc;
759 int id;
Ross Zwislere2e05392015-08-18 13:55:41 -0600760
Dan Williamscccbce62017-01-27 13:31:42 -0800761 rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
762 if (rc)
763 return rc;
764
765 id = dax_read_lock();
766 rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
767 if (rc < 0) {
768 dax_read_unlock(id);
769 return rc;
770 }
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800771 vto = kmap_atomic(to);
Dan Williamscccbce62017-01-27 13:31:42 -0800772 copy_user_page(vto, (void __force *)kaddr, vaddr, to);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800773 kunmap_atomic(vto);
Dan Williamscccbce62017-01-27 13:31:42 -0800774 dax_read_unlock(id);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800775 return 0;
776}
777
Ross Zwisler642261a2016-11-08 11:34:45 +1100778/*
779 * By this point grab_mapping_entry() has ensured that we have a locked entry
780 * of the appropriate size so we don't have to worry about downgrading PMDs to
781 * PTEs. If we happen to be trying to insert a PTE and there is a PMD
782 * already in the tree, we will skip the insertion and just dirty the PMD as
783 * appropriate.
784 */
Jan Karaac401cc2016-05-12 18:29:18 +0200785static void *dax_insert_mapping_entry(struct address_space *mapping,
786 struct vm_fault *vmf,
Dan Williams3fe07912017-10-14 17:13:45 -0700787 void *entry, pfn_t pfn_t,
Jan Karaf5b7b742017-11-01 16:36:40 +0100788 unsigned long flags, bool dirty)
Ross Zwisler9973c982016-01-22 15:10:47 -0800789{
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700790 struct radix_tree_root *pages = &mapping->i_pages;
Dan Williams3fe07912017-10-14 17:13:45 -0700791 unsigned long pfn = pfn_t_to_pfn(pfn_t);
Jan Karaac401cc2016-05-12 18:29:18 +0200792 pgoff_t index = vmf->pgoff;
Dan Williams3fe07912017-10-14 17:13:45 -0700793 void *new_entry;
Ross Zwisler9973c982016-01-22 15:10:47 -0800794
Jan Karaf5b7b742017-11-01 16:36:40 +0100795 if (dirty)
Dmitry Monakhovd2b2a282016-02-05 15:36:55 -0800796 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
Ross Zwisler9973c982016-01-22 15:10:47 -0800797
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700798 if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) {
799 /* we are replacing a zero page with block mapping */
800 if (dax_is_pmd_entry(entry))
Matthew Wilcox977fbdc2018-01-31 16:17:36 -0800801 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
802 PG_PMD_NR, false);
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700803 else /* pte entry */
Matthew Wilcox977fbdc2018-01-31 16:17:36 -0800804 unmap_mapping_pages(mapping, vmf->pgoff, 1, false);
Ross Zwisler9973c982016-01-22 15:10:47 -0800805 }
806
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700807 xa_lock_irq(pages);
Dan Williams3fe07912017-10-14 17:13:45 -0700808 new_entry = dax_radix_locked_entry(pfn, flags);
Dan Williamsd2c997c2017-12-22 22:02:48 -0800809 if (dax_entry_size(entry) != dax_entry_size(new_entry)) {
810 dax_disassociate_entry(entry, mapping, false);
Dan Williams73449da2018-07-13 21:49:50 -0700811 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
Dan Williamsd2c997c2017-12-22 22:02:48 -0800812 }
Ross Zwisler642261a2016-11-08 11:34:45 +1100813
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700814 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
Ross Zwisler642261a2016-11-08 11:34:45 +1100815 /*
816 * Only swap our new entry into the radix tree if the current
817 * entry is a zero page or an empty entry. If a normal PTE or
818 * PMD entry is already in the tree, we leave it alone. This
819 * means that if we are trying to insert a PTE and the
820 * existing entry is a PMD, we will just leave the PMD in the
821 * tree and dirty it if necessary.
822 */
Johannes Weinerf7942432016-12-12 16:43:41 -0800823 struct radix_tree_node *node;
Jan Karaac401cc2016-05-12 18:29:18 +0200824 void **slot;
825 void *ret;
Ross Zwisler9973c982016-01-22 15:10:47 -0800826
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700827 ret = __radix_tree_lookup(pages, index, &node, &slot);
Jan Karaac401cc2016-05-12 18:29:18 +0200828 WARN_ON_ONCE(ret != entry);
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700829 __radix_tree_replace(pages, node, slot,
Mel Gormanc7df8ad2017-11-15 17:37:41 -0800830 new_entry, NULL);
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700831 entry = new_entry;
Ross Zwisler9973c982016-01-22 15:10:47 -0800832 }
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700833
Jan Karaf5b7b742017-11-01 16:36:40 +0100834 if (dirty)
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700835 radix_tree_tag_set(pages, index, PAGECACHE_TAG_DIRTY);
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700836
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700837 xa_unlock_irq(pages);
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700838 return entry;
Ross Zwisler9973c982016-01-22 15:10:47 -0800839}
840
Jan Kara4b4bb462016-12-14 15:07:53 -0800841static inline unsigned long
842pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
843{
844 unsigned long address;
845
846 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
847 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
848 return address;
849}
850
851/* Walk all mappings of a given index of a file and writeprotect them */
852static void dax_mapping_entry_mkclean(struct address_space *mapping,
853 pgoff_t index, unsigned long pfn)
854{
855 struct vm_area_struct *vma;
Ross Zwislerf729c8c2017-01-10 16:57:24 -0800856 pte_t pte, *ptep = NULL;
857 pmd_t *pmdp = NULL;
Jan Kara4b4bb462016-12-14 15:07:53 -0800858 spinlock_t *ptl;
Jan Kara4b4bb462016-12-14 15:07:53 -0800859
860 i_mmap_lock_read(mapping);
861 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
Jérôme Glissea4d1a882017-08-31 17:17:26 -0400862 unsigned long address, start, end;
Jan Kara4b4bb462016-12-14 15:07:53 -0800863
864 cond_resched();
865
866 if (!(vma->vm_flags & VM_SHARED))
867 continue;
868
869 address = pgoff_address(index, vma);
Jérôme Glissea4d1a882017-08-31 17:17:26 -0400870
871 /*
872 * Note because we provide start/end to follow_pte_pmd it will
873 * call mmu_notifier_invalidate_range_start() on our behalf
874 * before taking any lock.
875 */
876 if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl))
Jan Kara4b4bb462016-12-14 15:07:53 -0800877 continue;
Jan Kara4b4bb462016-12-14 15:07:53 -0800878
Jérôme Glisse0f108512017-11-15 17:34:07 -0800879 /*
880 * No need to call mmu_notifier_invalidate_range() as we are
881 * downgrading page table protection not changing it to point
882 * to a new page.
883 *
Mike Rapoportad56b732018-03-21 21:22:47 +0200884 * See Documentation/vm/mmu_notifier.rst
Jérôme Glisse0f108512017-11-15 17:34:07 -0800885 */
Ross Zwislerf729c8c2017-01-10 16:57:24 -0800886 if (pmdp) {
887#ifdef CONFIG_FS_DAX_PMD
888 pmd_t pmd;
889
890 if (pfn != pmd_pfn(*pmdp))
891 goto unlock_pmd;
Linus Torvaldsf6f37322017-12-15 18:53:22 -0800892 if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
Ross Zwislerf729c8c2017-01-10 16:57:24 -0800893 goto unlock_pmd;
894
895 flush_cache_page(vma, address, pfn);
896 pmd = pmdp_huge_clear_flush(vma, address, pmdp);
897 pmd = pmd_wrprotect(pmd);
898 pmd = pmd_mkclean(pmd);
899 set_pmd_at(vma->vm_mm, address, pmdp, pmd);
Ross Zwislerf729c8c2017-01-10 16:57:24 -0800900unlock_pmd:
Ross Zwislerf729c8c2017-01-10 16:57:24 -0800901#endif
Jan H. Schönherree190ca2018-01-31 16:14:04 -0800902 spin_unlock(ptl);
Ross Zwislerf729c8c2017-01-10 16:57:24 -0800903 } else {
904 if (pfn != pte_pfn(*ptep))
905 goto unlock_pte;
906 if (!pte_dirty(*ptep) && !pte_write(*ptep))
907 goto unlock_pte;
908
909 flush_cache_page(vma, address, pfn);
910 pte = ptep_clear_flush(vma, address, ptep);
911 pte = pte_wrprotect(pte);
912 pte = pte_mkclean(pte);
913 set_pte_at(vma->vm_mm, address, ptep, pte);
Ross Zwislerf729c8c2017-01-10 16:57:24 -0800914unlock_pte:
915 pte_unmap_unlock(ptep, ptl);
916 }
Jan Kara4b4bb462016-12-14 15:07:53 -0800917
Jérôme Glissea4d1a882017-08-31 17:17:26 -0400918 mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
Jan Kara4b4bb462016-12-14 15:07:53 -0800919 }
920 i_mmap_unlock_read(mapping);
921}
922
Dan Williams3fe07912017-10-14 17:13:45 -0700923static int dax_writeback_one(struct dax_device *dax_dev,
924 struct address_space *mapping, pgoff_t index, void *entry)
Ross Zwisler9973c982016-01-22 15:10:47 -0800925{
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700926 struct radix_tree_root *pages = &mapping->i_pages;
Dan Williams3fe07912017-10-14 17:13:45 -0700927 void *entry2, **slot;
928 unsigned long pfn;
929 long ret = 0;
Dan Williamscccbce62017-01-27 13:31:42 -0800930 size_t size;
Ross Zwisler9973c982016-01-22 15:10:47 -0800931
Ross Zwisler9973c982016-01-22 15:10:47 -0800932 /*
Jan Karaa6abc2c2016-12-14 15:07:47 -0800933 * A page got tagged dirty in DAX mapping? Something is seriously
934 * wrong.
Ross Zwisler9973c982016-01-22 15:10:47 -0800935 */
Jan Karaa6abc2c2016-12-14 15:07:47 -0800936 if (WARN_ON(!radix_tree_exceptional_entry(entry)))
937 return -EIO;
Ross Zwisler9973c982016-01-22 15:10:47 -0800938
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700939 xa_lock_irq(pages);
Jan Karaa6abc2c2016-12-14 15:07:47 -0800940 entry2 = get_unlocked_mapping_entry(mapping, index, &slot);
941 /* Entry got punched out / reallocated? */
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700942 if (!entry2 || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry2)))
Jan Karaa6abc2c2016-12-14 15:07:47 -0800943 goto put_unlocked;
944 /*
945 * Entry got reallocated elsewhere? No need to writeback. We have to
Dan Williams3fe07912017-10-14 17:13:45 -0700946 * compare pfns as we must not bail out due to difference in lockbit
Jan Karaa6abc2c2016-12-14 15:07:47 -0800947 * or entry type.
948 */
Dan Williams3fe07912017-10-14 17:13:45 -0700949 if (dax_radix_pfn(entry2) != dax_radix_pfn(entry))
Jan Karaa6abc2c2016-12-14 15:07:47 -0800950 goto put_unlocked;
Ross Zwisler642261a2016-11-08 11:34:45 +1100951 if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
952 dax_is_zero_entry(entry))) {
Ross Zwisler9973c982016-01-22 15:10:47 -0800953 ret = -EIO;
Jan Karaa6abc2c2016-12-14 15:07:47 -0800954 goto put_unlocked;
Ross Zwisler9973c982016-01-22 15:10:47 -0800955 }
956
Jan Karaa6abc2c2016-12-14 15:07:47 -0800957 /* Another fsync thread may have already written back this entry */
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700958 if (!radix_tree_tag_get(pages, index, PAGECACHE_TAG_TOWRITE))
Jan Karaa6abc2c2016-12-14 15:07:47 -0800959 goto put_unlocked;
960 /* Lock the entry to serialize with page faults */
961 entry = lock_slot(mapping, slot);
962 /*
963 * We can clear the tag now but we have to be careful so that concurrent
964 * dax_writeback_one() calls for the same index cannot finish before we
965 * actually flush the caches. This is achieved as the calls will look
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700966 * at the entry only under the i_pages lock and once they do that
967 * they will see the entry locked and wait for it to unlock.
Jan Karaa6abc2c2016-12-14 15:07:47 -0800968 */
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700969 radix_tree_tag_clear(pages, index, PAGECACHE_TAG_TOWRITE);
970 xa_unlock_irq(pages);
Jan Karaa6abc2c2016-12-14 15:07:47 -0800971
Ross Zwisler642261a2016-11-08 11:34:45 +1100972 /*
973 * Even if dax_writeback_mapping_range() was given a wbc->range_start
974 * in the middle of a PMD, the 'index' we are given will be aligned to
Dan Williams3fe07912017-10-14 17:13:45 -0700975 * the start index of the PMD, as will the pfn we pull from 'entry'.
976 * This allows us to flush for PMD_SIZE and not have to worry about
977 * partial PMD writebacks.
Ross Zwisler642261a2016-11-08 11:34:45 +1100978 */
Dan Williams3fe07912017-10-14 17:13:45 -0700979 pfn = dax_radix_pfn(entry);
Dan Williamscccbce62017-01-27 13:31:42 -0800980 size = PAGE_SIZE << dax_radix_order(entry);
981
Dan Williams3fe07912017-10-14 17:13:45 -0700982 dax_mapping_entry_mkclean(mapping, index, pfn);
983 dax_flush(dax_dev, page_address(pfn_to_page(pfn)), size);
Jan Kara4b4bb462016-12-14 15:07:53 -0800984 /*
985 * After we have flushed the cache, we can clear the dirty tag. There
986 * cannot be new dirty data in the pfn after the flush has completed as
987 * the pfn mappings are writeprotected and fault waits for mapping
988 * entry lock.
989 */
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700990 xa_lock_irq(pages);
991 radix_tree_tag_clear(pages, index, PAGECACHE_TAG_DIRTY);
992 xa_unlock_irq(pages);
Ross Zwislerf9bc3a02017-05-08 16:00:13 -0700993 trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT);
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700994 put_locked_mapping_entry(mapping, index);
Ross Zwisler9973c982016-01-22 15:10:47 -0800995 return ret;
996
Jan Karaa6abc2c2016-12-14 15:07:47 -0800997 put_unlocked:
998 put_unlocked_mapping_entry(mapping, index, entry2);
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700999 xa_unlock_irq(pages);
Ross Zwisler9973c982016-01-22 15:10:47 -08001000 return ret;
1001}
1002
1003/*
1004 * Flush the mapping to the persistent domain within the byte range of [start,
1005 * end]. This is required by data integrity operations to ensure file data is
1006 * on persistent storage prior to completion of the operation.
1007 */
Ross Zwisler7f6d5b52016-02-26 15:19:55 -08001008int dax_writeback_mapping_range(struct address_space *mapping,
1009 struct block_device *bdev, struct writeback_control *wbc)
Ross Zwisler9973c982016-01-22 15:10:47 -08001010{
1011 struct inode *inode = mapping->host;
Ross Zwisler642261a2016-11-08 11:34:45 +11001012 pgoff_t start_index, end_index;
Ross Zwisler9973c982016-01-22 15:10:47 -08001013 pgoff_t indices[PAGEVEC_SIZE];
Dan Williamscccbce62017-01-27 13:31:42 -08001014 struct dax_device *dax_dev;
Ross Zwisler9973c982016-01-22 15:10:47 -08001015 struct pagevec pvec;
1016 bool done = false;
1017 int i, ret = 0;
Ross Zwisler9973c982016-01-22 15:10:47 -08001018
1019 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
1020 return -EIO;
1021
Ross Zwisler7f6d5b52016-02-26 15:19:55 -08001022 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
1023 return 0;
1024
Dan Williamscccbce62017-01-27 13:31:42 -08001025 dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
1026 if (!dax_dev)
1027 return -EIO;
1028
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001029 start_index = wbc->range_start >> PAGE_SHIFT;
1030 end_index = wbc->range_end >> PAGE_SHIFT;
Ross Zwisler9973c982016-01-22 15:10:47 -08001031
Ross Zwislerd14a3f42017-05-08 16:00:10 -07001032 trace_dax_writeback_range(inode, start_index, end_index);
1033
Ross Zwisler9973c982016-01-22 15:10:47 -08001034 tag_pages_for_writeback(mapping, start_index, end_index);
1035
Mel Gorman86679822017-11-15 17:37:52 -08001036 pagevec_init(&pvec);
Ross Zwisler9973c982016-01-22 15:10:47 -08001037 while (!done) {
1038 pvec.nr = find_get_entries_tag(mapping, start_index,
1039 PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
1040 pvec.pages, indices);
1041
1042 if (pvec.nr == 0)
1043 break;
1044
1045 for (i = 0; i < pvec.nr; i++) {
1046 if (indices[i] > end_index) {
1047 done = true;
1048 break;
1049 }
1050
Dan Williams3fe07912017-10-14 17:13:45 -07001051 ret = dax_writeback_one(dax_dev, mapping, indices[i],
1052 pvec.pages[i]);
Jeff Layton819ec6b2017-07-06 07:02:27 -04001053 if (ret < 0) {
1054 mapping_set_error(mapping, ret);
Ross Zwislerd14a3f42017-05-08 16:00:10 -07001055 goto out;
Jeff Layton819ec6b2017-07-06 07:02:27 -04001056 }
Ross Zwisler9973c982016-01-22 15:10:47 -08001057 }
Jan Kara1eb643d2017-06-23 15:08:46 -07001058 start_index = indices[pvec.nr - 1] + 1;
Ross Zwisler9973c982016-01-22 15:10:47 -08001059 }
Ross Zwislerd14a3f42017-05-08 16:00:10 -07001060out:
Dan Williamscccbce62017-01-27 13:31:42 -08001061 put_dax(dax_dev);
Ross Zwislerd14a3f42017-05-08 16:00:10 -07001062 trace_dax_writeback_range_done(inode, start_index, end_index);
1063 return (ret < 0 ? ret : 0);
Ross Zwisler9973c982016-01-22 15:10:47 -08001064}
1065EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
1066
Jan Kara31a6f1a2017-11-01 16:36:32 +01001067static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -08001068{
Linus Torvaldsa3841f92017-11-17 09:51:57 -08001069 return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9;
Jan Kara31a6f1a2017-11-01 16:36:32 +01001070}
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -08001071
Jan Kara5e161e42017-11-01 16:36:33 +01001072static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
1073 pfn_t *pfnp)
1074{
1075 const sector_t sector = dax_iomap_sector(iomap, pos);
1076 pgoff_t pgoff;
1077 void *kaddr;
1078 int id, rc;
1079 long length;
1080
1081 rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff);
Dan Williamscccbce62017-01-27 13:31:42 -08001082 if (rc)
1083 return rc;
Dan Williamscccbce62017-01-27 13:31:42 -08001084 id = dax_read_lock();
Jan Kara5e161e42017-11-01 16:36:33 +01001085 length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
1086 &kaddr, pfnp);
1087 if (length < 0) {
1088 rc = length;
1089 goto out;
Dan Williamscccbce62017-01-27 13:31:42 -08001090 }
Jan Kara5e161e42017-11-01 16:36:33 +01001091 rc = -EINVAL;
1092 if (PFN_PHYS(length) < size)
1093 goto out;
1094 if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
1095 goto out;
1096 /* For larger pages we need devmap */
1097 if (length > 1 && !pfn_t_devmap(*pfnp))
1098 goto out;
1099 rc = 0;
1100out:
Dan Williamscccbce62017-01-27 13:31:42 -08001101 dax_read_unlock(id);
Jan Kara5e161e42017-11-01 16:36:33 +01001102 return rc;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -08001103}
1104
Ross Zwislere30331f2017-09-06 16:18:39 -07001105/*
Ross Zwisler91d25ba2017-09-06 16:18:43 -07001106 * The user has performed a load from a hole in the file. Allocating a new
1107 * page in the file would cause excessive storage usage for workloads with
1108 * sparse files. Instead we insert a read-only mapping of the 4k zero page.
1109 * If this page is ever written to we will re-fault and change the mapping to
1110 * point to real DAX storage instead.
Ross Zwislere30331f2017-09-06 16:18:39 -07001111 */
Souptick Joarderab77dab2018-06-07 17:04:29 -07001112static vm_fault_t dax_load_hole(struct address_space *mapping, void *entry,
Ross Zwislere30331f2017-09-06 16:18:39 -07001113 struct vm_fault *vmf)
1114{
1115 struct inode *inode = mapping->host;
Ross Zwisler91d25ba2017-09-06 16:18:43 -07001116 unsigned long vaddr = vmf->address;
Souptick Joarderab77dab2018-06-07 17:04:29 -07001117 vm_fault_t ret = VM_FAULT_NOPAGE;
Ross Zwisler91d25ba2017-09-06 16:18:43 -07001118 struct page *zero_page;
Dan Williams3fe07912017-10-14 17:13:45 -07001119 pfn_t pfn;
Ross Zwislere30331f2017-09-06 16:18:39 -07001120
Ross Zwisler91d25ba2017-09-06 16:18:43 -07001121 zero_page = ZERO_PAGE(0);
1122 if (unlikely(!zero_page)) {
Ross Zwislere30331f2017-09-06 16:18:39 -07001123 ret = VM_FAULT_OOM;
1124 goto out;
1125 }
1126
Dan Williams3fe07912017-10-14 17:13:45 -07001127 pfn = page_to_pfn_t(zero_page);
Matthew Wilcoxcc4a90a2018-06-02 19:39:37 -07001128 dax_insert_mapping_entry(mapping, vmf, entry, pfn, RADIX_DAX_ZERO_PAGE,
1129 false);
Souptick Joarderab77dab2018-06-07 17:04:29 -07001130 ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
Ross Zwislere30331f2017-09-06 16:18:39 -07001131out:
1132 trace_dax_load_hole(inode, vmf, ret);
1133 return ret;
1134}
1135
Vishal Verma4b0228f2016-04-21 15:13:46 -04001136static bool dax_range_is_aligned(struct block_device *bdev,
1137 unsigned int offset, unsigned int length)
1138{
1139 unsigned short sector_size = bdev_logical_block_size(bdev);
1140
1141 if (!IS_ALIGNED(offset, sector_size))
1142 return false;
1143 if (!IS_ALIGNED(length, sector_size))
1144 return false;
1145
1146 return true;
1147}
1148
Dan Williamscccbce62017-01-27 13:31:42 -08001149int __dax_zero_page_range(struct block_device *bdev,
1150 struct dax_device *dax_dev, sector_t sector,
1151 unsigned int offset, unsigned int size)
Christoph Hellwig679c8bd2016-05-09 10:47:04 +02001152{
Dan Williamscccbce62017-01-27 13:31:42 -08001153 if (dax_range_is_aligned(bdev, offset, size)) {
1154 sector_t start_sector = sector + (offset >> 9);
Vishal Verma4b0228f2016-04-21 15:13:46 -04001155
1156 return blkdev_issue_zeroout(bdev, start_sector,
Linus Torvalds53ef7d02017-05-05 18:49:20 -07001157 size >> 9, GFP_NOFS, 0);
Vishal Verma4b0228f2016-04-21 15:13:46 -04001158 } else {
Dan Williamscccbce62017-01-27 13:31:42 -08001159 pgoff_t pgoff;
1160 long rc, id;
1161 void *kaddr;
1162 pfn_t pfn;
1163
Dan Williamse84b83b2017-05-10 19:38:13 -07001164 rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
Dan Williamscccbce62017-01-27 13:31:42 -08001165 if (rc)
1166 return rc;
1167
1168 id = dax_read_lock();
Dan Williamse84b83b2017-05-10 19:38:13 -07001169 rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr,
Dan Williamscccbce62017-01-27 13:31:42 -08001170 &pfn);
1171 if (rc < 0) {
1172 dax_read_unlock(id);
1173 return rc;
1174 }
Dan Williams81f55872017-05-29 13:12:20 -07001175 memset(kaddr + offset, 0, size);
Mikulas Patockac3ca0152017-08-31 21:47:43 -04001176 dax_flush(dax_dev, kaddr + offset, size);
Dan Williamscccbce62017-01-27 13:31:42 -08001177 dax_read_unlock(id);
Vishal Verma4b0228f2016-04-21 15:13:46 -04001178 }
Christoph Hellwig679c8bd2016-05-09 10:47:04 +02001179 return 0;
1180}
1181EXPORT_SYMBOL_GPL(__dax_zero_page_range);
1182
Christoph Hellwiga254e562016-09-19 11:24:49 +10001183static loff_t
Ross Zwisler11c59c92016-11-08 11:32:46 +11001184dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
Christoph Hellwiga254e562016-09-19 11:24:49 +10001185 struct iomap *iomap)
1186{
Dan Williamscccbce62017-01-27 13:31:42 -08001187 struct block_device *bdev = iomap->bdev;
1188 struct dax_device *dax_dev = iomap->dax_dev;
Christoph Hellwiga254e562016-09-19 11:24:49 +10001189 struct iov_iter *iter = data;
1190 loff_t end = pos + length, done = 0;
1191 ssize_t ret = 0;
Dan Williamsa77d4782018-03-16 17:36:44 -07001192 size_t xfer;
Dan Williamscccbce62017-01-27 13:31:42 -08001193 int id;
Christoph Hellwiga254e562016-09-19 11:24:49 +10001194
1195 if (iov_iter_rw(iter) == READ) {
1196 end = min(end, i_size_read(inode));
1197 if (pos >= end)
1198 return 0;
1199
1200 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1201 return iov_iter_zero(min(length, end - pos), iter);
1202 }
1203
1204 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
1205 return -EIO;
1206
Jan Karae3fce682016-08-10 17:10:28 +02001207 /*
1208 * Write can allocate block for an area which has a hole page mapped
1209 * into page tables. We have to tear down these mappings so that data
1210 * written by write(2) is visible in mmap.
1211 */
Jan Karacd656372017-05-12 15:46:50 -07001212 if (iomap->flags & IOMAP_F_NEW) {
Jan Karae3fce682016-08-10 17:10:28 +02001213 invalidate_inode_pages2_range(inode->i_mapping,
1214 pos >> PAGE_SHIFT,
1215 (end - 1) >> PAGE_SHIFT);
1216 }
1217
Dan Williamscccbce62017-01-27 13:31:42 -08001218 id = dax_read_lock();
Christoph Hellwiga254e562016-09-19 11:24:49 +10001219 while (pos < end) {
1220 unsigned offset = pos & (PAGE_SIZE - 1);
Dan Williamscccbce62017-01-27 13:31:42 -08001221 const size_t size = ALIGN(length + offset, PAGE_SIZE);
1222 const sector_t sector = dax_iomap_sector(iomap, pos);
Christoph Hellwiga254e562016-09-19 11:24:49 +10001223 ssize_t map_len;
Dan Williamscccbce62017-01-27 13:31:42 -08001224 pgoff_t pgoff;
1225 void *kaddr;
1226 pfn_t pfn;
Christoph Hellwiga254e562016-09-19 11:24:49 +10001227
Michal Hockod1908f52017-02-03 13:13:26 -08001228 if (fatal_signal_pending(current)) {
1229 ret = -EINTR;
1230 break;
1231 }
1232
Dan Williamscccbce62017-01-27 13:31:42 -08001233 ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
1234 if (ret)
1235 break;
1236
1237 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
1238 &kaddr, &pfn);
Christoph Hellwiga254e562016-09-19 11:24:49 +10001239 if (map_len < 0) {
1240 ret = map_len;
1241 break;
1242 }
1243
Dan Williamscccbce62017-01-27 13:31:42 -08001244 map_len = PFN_PHYS(map_len);
1245 kaddr += offset;
Christoph Hellwiga254e562016-09-19 11:24:49 +10001246 map_len -= offset;
1247 if (map_len > end - pos)
1248 map_len = end - pos;
1249
Ross Zwislera2e050f2017-09-06 16:18:54 -07001250 /*
1251 * The userspace address for the memory copy has already been
1252 * validated via access_ok() in either vfs_read() or
1253 * vfs_write(), depending on which operation we are doing.
1254 */
Christoph Hellwiga254e562016-09-19 11:24:49 +10001255 if (iov_iter_rw(iter) == WRITE)
Dan Williamsa77d4782018-03-16 17:36:44 -07001256 xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
Dan Williamsfec53772017-05-29 21:56:49 -07001257 map_len, iter);
Christoph Hellwiga254e562016-09-19 11:24:49 +10001258 else
Dan Williamsa77d4782018-03-16 17:36:44 -07001259 xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr,
Dan Williamsb3a9a0c2018-05-02 06:46:33 -07001260 map_len, iter);
Christoph Hellwiga254e562016-09-19 11:24:49 +10001261
Dan Williamsa77d4782018-03-16 17:36:44 -07001262 pos += xfer;
1263 length -= xfer;
1264 done += xfer;
1265
1266 if (xfer == 0)
1267 ret = -EFAULT;
1268 if (xfer < map_len)
1269 break;
Christoph Hellwiga254e562016-09-19 11:24:49 +10001270 }
Dan Williamscccbce62017-01-27 13:31:42 -08001271 dax_read_unlock(id);
Christoph Hellwiga254e562016-09-19 11:24:49 +10001272
1273 return done ? done : ret;
1274}
1275
1276/**
Ross Zwisler11c59c92016-11-08 11:32:46 +11001277 * dax_iomap_rw - Perform I/O to a DAX file
Christoph Hellwiga254e562016-09-19 11:24:49 +10001278 * @iocb: The control block for this I/O
1279 * @iter: The addresses to do I/O from or to
1280 * @ops: iomap ops passed from the file system
1281 *
1282 * This function performs read and write operations to directly mapped
1283 * persistent memory. The callers needs to take care of read/write exclusion
1284 * and evicting any page cache pages in the region under I/O.
1285 */
1286ssize_t
Ross Zwisler11c59c92016-11-08 11:32:46 +11001287dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -08001288 const struct iomap_ops *ops)
Christoph Hellwiga254e562016-09-19 11:24:49 +10001289{
1290 struct address_space *mapping = iocb->ki_filp->f_mapping;
1291 struct inode *inode = mapping->host;
1292 loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1293 unsigned flags = 0;
1294
Christoph Hellwig168316d2017-02-08 14:43:13 -05001295 if (iov_iter_rw(iter) == WRITE) {
1296 lockdep_assert_held_exclusive(&inode->i_rwsem);
Christoph Hellwiga254e562016-09-19 11:24:49 +10001297 flags |= IOMAP_WRITE;
Christoph Hellwig168316d2017-02-08 14:43:13 -05001298 } else {
1299 lockdep_assert_held(&inode->i_rwsem);
1300 }
Christoph Hellwiga254e562016-09-19 11:24:49 +10001301
Christoph Hellwiga254e562016-09-19 11:24:49 +10001302 while (iov_iter_count(iter)) {
1303 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
Ross Zwisler11c59c92016-11-08 11:32:46 +11001304 iter, dax_iomap_actor);
Christoph Hellwiga254e562016-09-19 11:24:49 +10001305 if (ret <= 0)
1306 break;
1307 pos += ret;
1308 done += ret;
1309 }
1310
1311 iocb->ki_pos += done;
1312 return done ? done : ret;
1313}
Ross Zwisler11c59c92016-11-08 11:32:46 +11001314EXPORT_SYMBOL_GPL(dax_iomap_rw);
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001315
Souptick Joarderab77dab2018-06-07 17:04:29 -07001316static vm_fault_t dax_fault_return(int error)
Jan Kara9f141d62016-10-19 14:34:31 +02001317{
1318 if (error == 0)
1319 return VM_FAULT_NOPAGE;
1320 if (error == -ENOMEM)
1321 return VM_FAULT_OOM;
1322 return VM_FAULT_SIGBUS;
1323}
1324
Dan Williamsaaa422c2017-11-13 16:38:44 -08001325/*
1326 * MAP_SYNC on a dax mapping guarantees dirty metadata is
1327 * flushed on write-faults (non-cow), but not read-faults.
1328 */
1329static bool dax_fault_is_synchronous(unsigned long flags,
1330 struct vm_area_struct *vma, struct iomap *iomap)
1331{
1332 return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC)
1333 && (iomap->flags & IOMAP_F_DIRTY);
1334}
1335
Souptick Joarderab77dab2018-06-07 17:04:29 -07001336static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
Jan Karac0b24622018-01-07 16:38:43 -05001337 int *iomap_errp, const struct iomap_ops *ops)
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001338{
Jan Karaa0987ad2017-11-01 16:36:34 +01001339 struct vm_area_struct *vma = vmf->vma;
1340 struct address_space *mapping = vma->vm_file->f_mapping;
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001341 struct inode *inode = mapping->host;
Jan Kara1a29d852016-12-14 15:07:01 -08001342 unsigned long vaddr = vmf->address;
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001343 loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001344 struct iomap iomap = { 0 };
Jan Kara9484ab12016-11-10 10:26:50 +11001345 unsigned flags = IOMAP_FAULT;
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001346 int error, major = 0;
Jan Karad2c43ef2017-11-01 16:36:35 +01001347 bool write = vmf->flags & FAULT_FLAG_WRITE;
Jan Karacaa51d22017-11-01 16:36:42 +01001348 bool sync;
Souptick Joarderab77dab2018-06-07 17:04:29 -07001349 vm_fault_t ret = 0;
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001350 void *entry;
Jan Kara1b5a1cb2017-11-01 16:36:36 +01001351 pfn_t pfn;
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001352
Souptick Joarderab77dab2018-06-07 17:04:29 -07001353 trace_dax_pte_fault(inode, vmf, ret);
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001354 /*
1355 * Check whether offset isn't beyond end of file now. Caller is supposed
1356 * to hold locks serializing us with truncate / punch hole so this is
1357 * a reliable test.
1358 */
Ross Zwislera9c42b32017-05-08 16:00:00 -07001359 if (pos >= i_size_read(inode)) {
Souptick Joarderab77dab2018-06-07 17:04:29 -07001360 ret = VM_FAULT_SIGBUS;
Ross Zwislera9c42b32017-05-08 16:00:00 -07001361 goto out;
1362 }
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001363
Jan Karad2c43ef2017-11-01 16:36:35 +01001364 if (write && !vmf->cow_page)
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001365 flags |= IOMAP_WRITE;
1366
Jan Kara13e451f2017-05-12 15:46:57 -07001367 entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
1368 if (IS_ERR(entry)) {
Souptick Joarderab77dab2018-06-07 17:04:29 -07001369 ret = dax_fault_return(PTR_ERR(entry));
Jan Kara13e451f2017-05-12 15:46:57 -07001370 goto out;
1371 }
1372
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001373 /*
Ross Zwislere2093922017-06-02 14:46:37 -07001374 * It is possible, particularly with mixed reads & writes to private
1375 * mappings, that we have raced with a PMD fault that overlaps with
1376 * the PTE we need to set up. If so just return and the fault will be
1377 * retried.
1378 */
1379 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
Souptick Joarderab77dab2018-06-07 17:04:29 -07001380 ret = VM_FAULT_NOPAGE;
Ross Zwislere2093922017-06-02 14:46:37 -07001381 goto unlock_entry;
1382 }
1383
1384 /*
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001385 * Note that we don't bother to use iomap_apply here: DAX required
1386 * the file system block size to be equal the page size, which means
1387 * that we never have to deal with more than a single extent here.
1388 */
1389 error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
Jan Karac0b24622018-01-07 16:38:43 -05001390 if (iomap_errp)
1391 *iomap_errp = error;
Ross Zwislera9c42b32017-05-08 16:00:00 -07001392 if (error) {
Souptick Joarderab77dab2018-06-07 17:04:29 -07001393 ret = dax_fault_return(error);
Jan Kara13e451f2017-05-12 15:46:57 -07001394 goto unlock_entry;
Ross Zwislera9c42b32017-05-08 16:00:00 -07001395 }
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001396 if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
Jan Kara13e451f2017-05-12 15:46:57 -07001397 error = -EIO; /* fs corruption? */
1398 goto error_finish_iomap;
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001399 }
1400
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001401 if (vmf->cow_page) {
Jan Kara31a6f1a2017-11-01 16:36:32 +01001402 sector_t sector = dax_iomap_sector(&iomap, pos);
1403
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001404 switch (iomap.type) {
1405 case IOMAP_HOLE:
1406 case IOMAP_UNWRITTEN:
1407 clear_user_highpage(vmf->cow_page, vaddr);
1408 break;
1409 case IOMAP_MAPPED:
Dan Williamscccbce62017-01-27 13:31:42 -08001410 error = copy_user_dax(iomap.bdev, iomap.dax_dev,
1411 sector, PAGE_SIZE, vmf->cow_page, vaddr);
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001412 break;
1413 default:
1414 WARN_ON_ONCE(1);
1415 error = -EIO;
1416 break;
1417 }
1418
1419 if (error)
Jan Kara13e451f2017-05-12 15:46:57 -07001420 goto error_finish_iomap;
Jan Karab1aa8122016-12-14 15:07:24 -08001421
1422 __SetPageUptodate(vmf->cow_page);
Souptick Joarderab77dab2018-06-07 17:04:29 -07001423 ret = finish_fault(vmf);
1424 if (!ret)
1425 ret = VM_FAULT_DONE_COW;
Jan Kara13e451f2017-05-12 15:46:57 -07001426 goto finish_iomap;
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001427 }
1428
Dan Williamsaaa422c2017-11-13 16:38:44 -08001429 sync = dax_fault_is_synchronous(flags, vma, &iomap);
Jan Karacaa51d22017-11-01 16:36:42 +01001430
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001431 switch (iomap.type) {
1432 case IOMAP_MAPPED:
1433 if (iomap.flags & IOMAP_F_NEW) {
1434 count_vm_event(PGMAJFAULT);
Jan Karaa0987ad2017-11-01 16:36:34 +01001435 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001436 major = VM_FAULT_MAJOR;
1437 }
Jan Kara1b5a1cb2017-11-01 16:36:36 +01001438 error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn);
1439 if (error < 0)
1440 goto error_finish_iomap;
1441
Dan Williams3fe07912017-10-14 17:13:45 -07001442 entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
Jan Karacaa51d22017-11-01 16:36:42 +01001443 0, write && !sync);
Jan Kara1b5a1cb2017-11-01 16:36:36 +01001444
Jan Karacaa51d22017-11-01 16:36:42 +01001445 /*
1446 * If we are doing synchronous page fault and inode needs fsync,
1447 * we can insert PTE into page tables only after that happens.
1448 * Skip insertion for now and return the pfn so that caller can
1449 * insert it after fsync is done.
1450 */
1451 if (sync) {
1452 if (WARN_ON_ONCE(!pfnp)) {
1453 error = -EIO;
1454 goto error_finish_iomap;
1455 }
1456 *pfnp = pfn;
Souptick Joarderab77dab2018-06-07 17:04:29 -07001457 ret = VM_FAULT_NEEDDSYNC | major;
Jan Karacaa51d22017-11-01 16:36:42 +01001458 goto finish_iomap;
1459 }
Jan Kara1b5a1cb2017-11-01 16:36:36 +01001460 trace_dax_insert_mapping(inode, vmf, entry);
1461 if (write)
Souptick Joarderab77dab2018-06-07 17:04:29 -07001462 ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn);
Jan Kara1b5a1cb2017-11-01 16:36:36 +01001463 else
Souptick Joarderab77dab2018-06-07 17:04:29 -07001464 ret = vmf_insert_mixed(vma, vaddr, pfn);
Jan Kara1b5a1cb2017-11-01 16:36:36 +01001465
Souptick Joarderab77dab2018-06-07 17:04:29 -07001466 goto finish_iomap;
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001467 case IOMAP_UNWRITTEN:
1468 case IOMAP_HOLE:
Jan Karad2c43ef2017-11-01 16:36:35 +01001469 if (!write) {
Souptick Joarderab77dab2018-06-07 17:04:29 -07001470 ret = dax_load_hole(mapping, entry, vmf);
Jan Kara13e451f2017-05-12 15:46:57 -07001471 goto finish_iomap;
Ross Zwisler15502902016-11-08 11:33:26 +11001472 }
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001473 /*FALLTHRU*/
1474 default:
1475 WARN_ON_ONCE(1);
1476 error = -EIO;
1477 break;
1478 }
1479
Jan Kara13e451f2017-05-12 15:46:57 -07001480 error_finish_iomap:
Souptick Joarderab77dab2018-06-07 17:04:29 -07001481 ret = dax_fault_return(error);
Jan Kara9f141d62016-10-19 14:34:31 +02001482 finish_iomap:
1483 if (ops->iomap_end) {
1484 int copied = PAGE_SIZE;
1485
Souptick Joarderab77dab2018-06-07 17:04:29 -07001486 if (ret & VM_FAULT_ERROR)
Jan Kara9f141d62016-10-19 14:34:31 +02001487 copied = 0;
1488 /*
1489 * The fault is done by now and there's no way back (other
1490 * thread may be already happily using PTE we have installed).
1491 * Just ignore error from ->iomap_end since we cannot do much
1492 * with it.
1493 */
1494 ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
Ross Zwisler15502902016-11-08 11:33:26 +11001495 }
Jan Kara13e451f2017-05-12 15:46:57 -07001496 unlock_entry:
Ross Zwisler91d25ba2017-09-06 16:18:43 -07001497 put_locked_mapping_entry(mapping, vmf->pgoff);
Jan Kara13e451f2017-05-12 15:46:57 -07001498 out:
Souptick Joarderab77dab2018-06-07 17:04:29 -07001499 trace_dax_pte_fault_done(inode, vmf, ret);
1500 return ret | major;
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001501}
Ross Zwisler642261a2016-11-08 11:34:45 +11001502
1503#ifdef CONFIG_FS_DAX_PMD
Souptick Joarderab77dab2018-06-07 17:04:29 -07001504static vm_fault_t dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
Ross Zwisler91d25ba2017-09-06 16:18:43 -07001505 void *entry)
Ross Zwisler642261a2016-11-08 11:34:45 +11001506{
Dave Jiangf4200392017-02-22 15:40:06 -08001507 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1508 unsigned long pmd_addr = vmf->address & PMD_MASK;
Ross Zwisler653b2ea2017-02-22 15:39:57 -08001509 struct inode *inode = mapping->host;
Ross Zwisler642261a2016-11-08 11:34:45 +11001510 struct page *zero_page;
Ross Zwisler653b2ea2017-02-22 15:39:57 -08001511 void *ret = NULL;
Ross Zwisler642261a2016-11-08 11:34:45 +11001512 spinlock_t *ptl;
1513 pmd_t pmd_entry;
Dan Williams3fe07912017-10-14 17:13:45 -07001514 pfn_t pfn;
Ross Zwisler642261a2016-11-08 11:34:45 +11001515
Dave Jiangf4200392017-02-22 15:40:06 -08001516 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
Ross Zwisler642261a2016-11-08 11:34:45 +11001517
1518 if (unlikely(!zero_page))
Ross Zwisler653b2ea2017-02-22 15:39:57 -08001519 goto fallback;
Ross Zwisler642261a2016-11-08 11:34:45 +11001520
Dan Williams3fe07912017-10-14 17:13:45 -07001521 pfn = page_to_pfn_t(zero_page);
1522 ret = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
Jan Karaf5b7b742017-11-01 16:36:40 +01001523 RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE, false);
Ross Zwisler642261a2016-11-08 11:34:45 +11001524
Dave Jiangf4200392017-02-22 15:40:06 -08001525 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1526 if (!pmd_none(*(vmf->pmd))) {
Ross Zwisler642261a2016-11-08 11:34:45 +11001527 spin_unlock(ptl);
Ross Zwisler653b2ea2017-02-22 15:39:57 -08001528 goto fallback;
Ross Zwisler642261a2016-11-08 11:34:45 +11001529 }
1530
Dave Jiangf4200392017-02-22 15:40:06 -08001531 pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
Ross Zwisler642261a2016-11-08 11:34:45 +11001532 pmd_entry = pmd_mkhuge(pmd_entry);
Dave Jiangf4200392017-02-22 15:40:06 -08001533 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
Ross Zwisler642261a2016-11-08 11:34:45 +11001534 spin_unlock(ptl);
Dave Jiangf4200392017-02-22 15:40:06 -08001535 trace_dax_pmd_load_hole(inode, vmf, zero_page, ret);
Ross Zwisler642261a2016-11-08 11:34:45 +11001536 return VM_FAULT_NOPAGE;
Ross Zwisler653b2ea2017-02-22 15:39:57 -08001537
1538fallback:
Dave Jiangf4200392017-02-22 15:40:06 -08001539 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret);
Ross Zwisler653b2ea2017-02-22 15:39:57 -08001540 return VM_FAULT_FALLBACK;
Ross Zwisler642261a2016-11-08 11:34:45 +11001541}
1542
Souptick Joarderab77dab2018-06-07 17:04:29 -07001543static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
Dave Jianga2d58162017-02-24 14:56:59 -08001544 const struct iomap_ops *ops)
Ross Zwisler642261a2016-11-08 11:34:45 +11001545{
Dave Jiangf4200392017-02-22 15:40:06 -08001546 struct vm_area_struct *vma = vmf->vma;
Ross Zwisler642261a2016-11-08 11:34:45 +11001547 struct address_space *mapping = vma->vm_file->f_mapping;
Dave Jiangd8a849e2017-02-22 15:40:03 -08001548 unsigned long pmd_addr = vmf->address & PMD_MASK;
1549 bool write = vmf->flags & FAULT_FLAG_WRITE;
Jan Karacaa51d22017-11-01 16:36:42 +01001550 bool sync;
Jan Kara9484ab12016-11-10 10:26:50 +11001551 unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
Ross Zwisler642261a2016-11-08 11:34:45 +11001552 struct inode *inode = mapping->host;
Souptick Joarderab77dab2018-06-07 17:04:29 -07001553 vm_fault_t result = VM_FAULT_FALLBACK;
Ross Zwisler642261a2016-11-08 11:34:45 +11001554 struct iomap iomap = { 0 };
1555 pgoff_t max_pgoff, pgoff;
Ross Zwisler642261a2016-11-08 11:34:45 +11001556 void *entry;
1557 loff_t pos;
1558 int error;
Jan Kara302a5e32017-11-01 16:36:37 +01001559 pfn_t pfn;
Ross Zwisler642261a2016-11-08 11:34:45 +11001560
Ross Zwisler282a8e02017-02-22 15:39:50 -08001561 /*
1562 * Check whether offset isn't beyond end of file now. Caller is
1563 * supposed to hold locks serializing us with truncate / punch hole so
1564 * this is a reliable test.
1565 */
1566 pgoff = linear_page_index(vma, pmd_addr);
Jeff Moyer957ac8c2017-11-14 20:37:27 -05001567 max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
Ross Zwisler282a8e02017-02-22 15:39:50 -08001568
Dave Jiangf4200392017-02-22 15:40:06 -08001569 trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
Ross Zwisler282a8e02017-02-22 15:39:50 -08001570
Ross Zwislerfffa2812017-08-25 15:55:36 -07001571 /*
1572 * Make sure that the faulting address's PMD offset (color) matches
1573 * the PMD offset from the start of the file. This is necessary so
1574 * that a PMD range in the page table overlaps exactly with a PMD
1575 * range in the radix tree.
1576 */
1577 if ((vmf->pgoff & PG_PMD_COLOUR) !=
1578 ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1579 goto fallback;
1580
Ross Zwisler642261a2016-11-08 11:34:45 +11001581 /* Fall back to PTEs if we're going to COW */
1582 if (write && !(vma->vm_flags & VM_SHARED))
1583 goto fallback;
1584
1585 /* If the PMD would extend outside the VMA */
1586 if (pmd_addr < vma->vm_start)
1587 goto fallback;
1588 if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1589 goto fallback;
1590
Jeff Moyer957ac8c2017-11-14 20:37:27 -05001591 if (pgoff >= max_pgoff) {
Ross Zwisler282a8e02017-02-22 15:39:50 -08001592 result = VM_FAULT_SIGBUS;
1593 goto out;
1594 }
Ross Zwisler642261a2016-11-08 11:34:45 +11001595
1596 /* If the PMD would extend beyond the file size */
Jeff Moyer957ac8c2017-11-14 20:37:27 -05001597 if ((pgoff | PG_PMD_COLOUR) >= max_pgoff)
Ross Zwisler642261a2016-11-08 11:34:45 +11001598 goto fallback;
1599
1600 /*
Ross Zwisler91d25ba2017-09-06 16:18:43 -07001601 * grab_mapping_entry() will make sure we get a 2MiB empty entry, a
1602 * 2MiB zero page entry or a DAX PMD. If it can't (because a 4k page
1603 * is already in the tree, for instance), it will return -EEXIST and
1604 * we just fall back to 4k entries.
Jan Kara9f141d62016-10-19 14:34:31 +02001605 */
1606 entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
1607 if (IS_ERR(entry))
Ross Zwisler876f2942017-05-12 15:47:00 -07001608 goto fallback;
1609
1610 /*
Ross Zwislere2093922017-06-02 14:46:37 -07001611 * It is possible, particularly with mixed reads & writes to private
1612 * mappings, that we have raced with a PTE fault that overlaps with
1613 * the PMD we need to set up. If so just return and the fault will be
1614 * retried.
1615 */
1616 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1617 !pmd_devmap(*vmf->pmd)) {
1618 result = 0;
1619 goto unlock_entry;
1620 }
1621
1622 /*
Ross Zwisler876f2942017-05-12 15:47:00 -07001623 * Note that we don't use iomap_apply here. We aren't doing I/O, only
1624 * setting up a mapping, so really we're using iomap_begin() as a way
1625 * to look up our filesystem block.
1626 */
1627 pos = (loff_t)pgoff << PAGE_SHIFT;
1628 error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
1629 if (error)
1630 goto unlock_entry;
1631
1632 if (iomap.offset + iomap.length < pos + PMD_SIZE)
Jan Kara9f141d62016-10-19 14:34:31 +02001633 goto finish_iomap;
1634
Dan Williamsaaa422c2017-11-13 16:38:44 -08001635 sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap);
Jan Karacaa51d22017-11-01 16:36:42 +01001636
Ross Zwisler642261a2016-11-08 11:34:45 +11001637 switch (iomap.type) {
1638 case IOMAP_MAPPED:
Jan Kara302a5e32017-11-01 16:36:37 +01001639 error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn);
1640 if (error < 0)
1641 goto finish_iomap;
1642
Dan Williams3fe07912017-10-14 17:13:45 -07001643 entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
Jan Karacaa51d22017-11-01 16:36:42 +01001644 RADIX_DAX_PMD, write && !sync);
Jan Kara302a5e32017-11-01 16:36:37 +01001645
Jan Karacaa51d22017-11-01 16:36:42 +01001646 /*
1647 * If we are doing synchronous page fault and inode needs fsync,
1648 * we can insert PMD into page tables only after that happens.
1649 * Skip insertion for now and return the pfn so that caller can
1650 * insert it after fsync is done.
1651 */
1652 if (sync) {
1653 if (WARN_ON_ONCE(!pfnp))
1654 goto finish_iomap;
1655 *pfnp = pfn;
1656 result = VM_FAULT_NEEDDSYNC;
1657 goto finish_iomap;
1658 }
1659
Jan Kara302a5e32017-11-01 16:36:37 +01001660 trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry);
1661 result = vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn,
1662 write);
Ross Zwisler642261a2016-11-08 11:34:45 +11001663 break;
1664 case IOMAP_UNWRITTEN:
1665 case IOMAP_HOLE:
1666 if (WARN_ON_ONCE(write))
Ross Zwisler876f2942017-05-12 15:47:00 -07001667 break;
Ross Zwisler91d25ba2017-09-06 16:18:43 -07001668 result = dax_pmd_load_hole(vmf, &iomap, entry);
Ross Zwisler642261a2016-11-08 11:34:45 +11001669 break;
1670 default:
1671 WARN_ON_ONCE(1);
1672 break;
1673 }
1674
Jan Kara9f141d62016-10-19 14:34:31 +02001675 finish_iomap:
1676 if (ops->iomap_end) {
1677 int copied = PMD_SIZE;
1678
1679 if (result == VM_FAULT_FALLBACK)
1680 copied = 0;
1681 /*
1682 * The fault is done by now and there's no way back (other
1683 * thread may be already happily using PMD we have installed).
1684 * Just ignore error from ->iomap_end since we cannot do much
1685 * with it.
1686 */
1687 ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
1688 &iomap);
1689 }
Ross Zwisler876f2942017-05-12 15:47:00 -07001690 unlock_entry:
Ross Zwisler91d25ba2017-09-06 16:18:43 -07001691 put_locked_mapping_entry(mapping, pgoff);
Ross Zwisler642261a2016-11-08 11:34:45 +11001692 fallback:
1693 if (result == VM_FAULT_FALLBACK) {
Dave Jiangd8a849e2017-02-22 15:40:03 -08001694 split_huge_pmd(vma, vmf->pmd, vmf->address);
Ross Zwisler642261a2016-11-08 11:34:45 +11001695 count_vm_event(THP_FAULT_FALLBACK);
1696 }
Ross Zwisler282a8e02017-02-22 15:39:50 -08001697out:
Dave Jiangf4200392017-02-22 15:40:06 -08001698 trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
Ross Zwisler642261a2016-11-08 11:34:45 +11001699 return result;
1700}
Dave Jianga2d58162017-02-24 14:56:59 -08001701#else
Souptick Joarderab77dab2018-06-07 17:04:29 -07001702static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
Arnd Bergmann01cddfe2017-02-27 14:26:44 -08001703 const struct iomap_ops *ops)
Dave Jianga2d58162017-02-24 14:56:59 -08001704{
1705 return VM_FAULT_FALLBACK;
1706}
Ross Zwisler642261a2016-11-08 11:34:45 +11001707#endif /* CONFIG_FS_DAX_PMD */
Dave Jianga2d58162017-02-24 14:56:59 -08001708
1709/**
1710 * dax_iomap_fault - handle a page fault on a DAX file
1711 * @vmf: The description of the fault
Jan Karacec04e82017-11-01 16:36:38 +01001712 * @pe_size: Size of the page to fault in
Jan Kara9a0dd422017-11-01 16:36:39 +01001713 * @pfnp: PFN to insert for synchronous faults if fsync is required
Jan Karac0b24622018-01-07 16:38:43 -05001714 * @iomap_errp: Storage for detailed error code in case of error
Jan Karacec04e82017-11-01 16:36:38 +01001715 * @ops: Iomap ops passed from the file system
Dave Jianga2d58162017-02-24 14:56:59 -08001716 *
1717 * When a page fault occurs, filesystems may call this helper in
1718 * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1719 * has done all the necessary locking for page fault to proceed
1720 * successfully.
1721 */
Souptick Joarderab77dab2018-06-07 17:04:29 -07001722vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
Jan Karac0b24622018-01-07 16:38:43 -05001723 pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
Dave Jianga2d58162017-02-24 14:56:59 -08001724{
Dave Jiangc791ace2017-02-24 14:57:08 -08001725 switch (pe_size) {
1726 case PE_SIZE_PTE:
Jan Karac0b24622018-01-07 16:38:43 -05001727 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
Dave Jiangc791ace2017-02-24 14:57:08 -08001728 case PE_SIZE_PMD:
Jan Kara9a0dd422017-11-01 16:36:39 +01001729 return dax_iomap_pmd_fault(vmf, pfnp, ops);
Dave Jianga2d58162017-02-24 14:56:59 -08001730 default:
1731 return VM_FAULT_FALLBACK;
1732 }
1733}
1734EXPORT_SYMBOL_GPL(dax_iomap_fault);
Jan Kara71eab6d2017-11-01 16:36:43 +01001735
1736/**
1737 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
1738 * @vmf: The description of the fault
1739 * @pe_size: Size of entry to be inserted
1740 * @pfn: PFN to insert
1741 *
1742 * This function inserts writeable PTE or PMD entry into page tables for mmaped
1743 * DAX file. It takes care of marking corresponding radix tree entry as dirty
1744 * as well.
1745 */
Souptick Joarderab77dab2018-06-07 17:04:29 -07001746static vm_fault_t dax_insert_pfn_mkwrite(struct vm_fault *vmf,
Jan Kara71eab6d2017-11-01 16:36:43 +01001747 enum page_entry_size pe_size,
1748 pfn_t pfn)
1749{
1750 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1751 void *entry, **slot;
1752 pgoff_t index = vmf->pgoff;
Souptick Joarderab77dab2018-06-07 17:04:29 -07001753 vm_fault_t ret;
Jan Kara71eab6d2017-11-01 16:36:43 +01001754
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001755 xa_lock_irq(&mapping->i_pages);
Jan Kara71eab6d2017-11-01 16:36:43 +01001756 entry = get_unlocked_mapping_entry(mapping, index, &slot);
1757 /* Did we race with someone splitting entry or so? */
1758 if (!entry ||
1759 (pe_size == PE_SIZE_PTE && !dax_is_pte_entry(entry)) ||
1760 (pe_size == PE_SIZE_PMD && !dax_is_pmd_entry(entry))) {
1761 put_unlocked_mapping_entry(mapping, index, entry);
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001762 xa_unlock_irq(&mapping->i_pages);
Jan Kara71eab6d2017-11-01 16:36:43 +01001763 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
1764 VM_FAULT_NOPAGE);
1765 return VM_FAULT_NOPAGE;
1766 }
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001767 radix_tree_tag_set(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY);
Jan Kara71eab6d2017-11-01 16:36:43 +01001768 entry = lock_slot(mapping, slot);
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001769 xa_unlock_irq(&mapping->i_pages);
Jan Kara71eab6d2017-11-01 16:36:43 +01001770 switch (pe_size) {
1771 case PE_SIZE_PTE:
Souptick Joarderab77dab2018-06-07 17:04:29 -07001772 ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
Jan Kara71eab6d2017-11-01 16:36:43 +01001773 break;
1774#ifdef CONFIG_FS_DAX_PMD
1775 case PE_SIZE_PMD:
Souptick Joarderab77dab2018-06-07 17:04:29 -07001776 ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
Jan Kara71eab6d2017-11-01 16:36:43 +01001777 pfn, true);
1778 break;
1779#endif
1780 default:
Souptick Joarderab77dab2018-06-07 17:04:29 -07001781 ret = VM_FAULT_FALLBACK;
Jan Kara71eab6d2017-11-01 16:36:43 +01001782 }
1783 put_locked_mapping_entry(mapping, index);
Souptick Joarderab77dab2018-06-07 17:04:29 -07001784 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
1785 return ret;
Jan Kara71eab6d2017-11-01 16:36:43 +01001786}
1787
1788/**
1789 * dax_finish_sync_fault - finish synchronous page fault
1790 * @vmf: The description of the fault
1791 * @pe_size: Size of entry to be inserted
1792 * @pfn: PFN to insert
1793 *
1794 * This function ensures that the file range touched by the page fault is
1795 * stored persistently on the media and handles inserting of appropriate page
1796 * table entry.
1797 */
Souptick Joarderab77dab2018-06-07 17:04:29 -07001798vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
1799 enum page_entry_size pe_size, pfn_t pfn)
Jan Kara71eab6d2017-11-01 16:36:43 +01001800{
1801 int err;
1802 loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
1803 size_t len = 0;
1804
1805 if (pe_size == PE_SIZE_PTE)
1806 len = PAGE_SIZE;
1807 else if (pe_size == PE_SIZE_PMD)
1808 len = PMD_SIZE;
1809 else
1810 WARN_ON_ONCE(1);
1811 err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
1812 if (err)
1813 return VM_FAULT_SIGBUS;
1814 return dax_insert_pfn_mkwrite(vmf, pe_size, pfn);
1815}
1816EXPORT_SYMBOL_GPL(dax_finish_sync_fault);