blob: de35289df20d431c8ad3c84b4195941417c4ef35 [file] [log] [blame]
Thomas Gleixnerc942fdd2019-05-27 08:55:06 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Jérôme Glisse133ff0e2017-09-08 16:11:23 -07002/*
3 * Copyright 2013 Red Hat Inc.
4 *
Jérôme Glissef813f212018-10-30 15:04:06 -07005 * Authors: Jérôme Glisse <jglisse@redhat.com>
Jérôme Glisse133ff0e2017-09-08 16:11:23 -07006 */
7/*
8 * Refer to include/linux/hmm.h for information about heterogeneous memory
9 * management or HMM for short.
10 */
11#include <linux/mm.h>
12#include <linux/hmm.h>
Jérôme Glisse858b54d2017-09-08 16:12:02 -070013#include <linux/init.h>
Jérôme Glisseda4c3c72017-09-08 16:11:31 -070014#include <linux/rmap.h>
15#include <linux/swap.h>
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070016#include <linux/slab.h>
17#include <linux/sched.h>
Jérôme Glisse4ef589d2017-09-08 16:11:58 -070018#include <linux/mmzone.h>
19#include <linux/pagemap.h>
Jérôme Glisseda4c3c72017-09-08 16:11:31 -070020#include <linux/swapops.h>
21#include <linux/hugetlb.h>
Jérôme Glisse4ef589d2017-09-08 16:11:58 -070022#include <linux/memremap.h>
Jason Gunthorpec8a53b22019-05-23 10:36:46 -030023#include <linux/sched/mm.h>
Jérôme Glisse7b2d55d22017-09-08 16:11:46 -070024#include <linux/jump_label.h>
Jérôme Glisse55c0ece2019-05-13 17:20:28 -070025#include <linux/dma-mapping.h>
Jérôme Glissec0b12402017-09-08 16:11:27 -070026#include <linux/mmu_notifier.h>
Jérôme Glisse4ef589d2017-09-08 16:11:58 -070027#include <linux/memory_hotplug.h>
28
29#define PA_SECTION_SIZE (1UL << PA_SECTION_SHIFT)
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070030
Jérôme Glisse6b368cd2017-09-08 16:12:32 -070031#if IS_ENABLED(CONFIG_HMM_MIRROR)
Jérôme Glissec0b12402017-09-08 16:11:27 -070032static const struct mmu_notifier_ops hmm_mmu_notifier_ops;
33
Jérôme Glisse704f3f22019-05-13 17:19:48 -070034/**
35 * hmm_get_or_create - register HMM against an mm (HMM internal)
36 *
37 * @mm: mm struct to attach to
38 * Returns: returns an HMM object, either by referencing the existing
39 * (per-process) object, or by creating a new one.
40 *
41 * This is not intended to be used directly by device drivers. If mm already
42 * has an HMM struct then it get a reference on it and returns it. Otherwise
43 * it allocates an HMM struct, initializes it, associate it with the mm and
44 * returns it.
45 */
46static struct hmm *hmm_get_or_create(struct mm_struct *mm)
47{
Jason Gunthorpe8a9320b2019-05-23 10:24:13 -030048 struct hmm *hmm;
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070049
Jason Gunthorpe8a9320b2019-05-23 10:24:13 -030050 lockdep_assert_held_exclusive(&mm->mmap_sem);
51
52 /* Abuse the page_table_lock to also protect mm->hmm. */
53 spin_lock(&mm->page_table_lock);
54 hmm = mm->hmm;
55 if (mm->hmm && kref_get_unless_zero(&mm->hmm->kref))
56 goto out_unlock;
57 spin_unlock(&mm->page_table_lock);
Jérôme Glissec0b12402017-09-08 16:11:27 -070058
59 hmm = kmalloc(sizeof(*hmm), GFP_KERNEL);
60 if (!hmm)
61 return NULL;
Jérôme Glissea3e0d412019-05-13 17:20:01 -070062 init_waitqueue_head(&hmm->wq);
Jérôme Glissec0b12402017-09-08 16:11:27 -070063 INIT_LIST_HEAD(&hmm->mirrors);
64 init_rwsem(&hmm->mirrors_sem);
Jérôme Glissec0b12402017-09-08 16:11:27 -070065 hmm->mmu_notifier.ops = NULL;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -070066 INIT_LIST_HEAD(&hmm->ranges);
Jason Gunthorpe5a136b42019-06-07 12:10:33 -030067 spin_lock_init(&hmm->ranges_lock);
Jérôme Glisse704f3f22019-05-13 17:19:48 -070068 kref_init(&hmm->kref);
Jérôme Glissea3e0d412019-05-13 17:20:01 -070069 hmm->notifiers = 0;
Jérôme Glissec0b12402017-09-08 16:11:27 -070070 hmm->mm = mm;
Jason Gunthorpe8a9320b2019-05-23 10:24:13 -030071
72 hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops;
73 if (__mmu_notifier_register(&hmm->mmu_notifier, mm)) {
74 kfree(hmm);
75 return NULL;
76 }
77
Jason Gunthorpec8a53b22019-05-23 10:36:46 -030078 mmgrab(hmm->mm);
Jérôme Glissec0b12402017-09-08 16:11:27 -070079
Ralph Campbell86a2d592018-10-30 15:04:14 -070080 /*
Jason Gunthorpe8a9320b2019-05-23 10:24:13 -030081 * We hold the exclusive mmap_sem here so we know that mm->hmm is
82 * still NULL or 0 kref, and is safe to update.
Ralph Campbell86a2d592018-10-30 15:04:14 -070083 */
Ralph Campbell86a2d592018-10-30 15:04:14 -070084 spin_lock(&mm->page_table_lock);
Jason Gunthorpe8a9320b2019-05-23 10:24:13 -030085 mm->hmm = hmm;
86
87out_unlock:
Ralph Campbell86a2d592018-10-30 15:04:14 -070088 spin_unlock(&mm->page_table_lock);
Jason Gunthorpe8a9320b2019-05-23 10:24:13 -030089 return hmm;
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070090}
91
Jason Gunthorpe6d7c3cd2019-05-22 16:52:52 -030092static void hmm_free_rcu(struct rcu_head *rcu)
93{
Jason Gunthorpe8a9320b2019-05-23 10:24:13 -030094 struct hmm *hmm = container_of(rcu, struct hmm, rcu);
95
96 mmdrop(hmm->mm);
97 kfree(hmm);
Jason Gunthorpe6d7c3cd2019-05-22 16:52:52 -030098}
99
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700100static void hmm_free(struct kref *kref)
101{
102 struct hmm *hmm = container_of(kref, struct hmm, kref);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700103
Jason Gunthorpe8a9320b2019-05-23 10:24:13 -0300104 spin_lock(&hmm->mm->page_table_lock);
105 if (hmm->mm->hmm == hmm)
106 hmm->mm->hmm = NULL;
107 spin_unlock(&hmm->mm->page_table_lock);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700108
Jason Gunthorpe8a9320b2019-05-23 10:24:13 -0300109 mmu_notifier_unregister_no_release(&hmm->mmu_notifier, hmm->mm);
Jason Gunthorpe6d7c3cd2019-05-22 16:52:52 -0300110 mmu_notifier_call_srcu(&hmm->rcu, hmm_free_rcu);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700111}
112
113static inline void hmm_put(struct hmm *hmm)
114{
115 kref_put(&hmm->kref, hmm_free);
116}
117
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700118static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
Jérôme Glissec0b12402017-09-08 16:11:27 -0700119{
Jason Gunthorpe6d7c3cd2019-05-22 16:52:52 -0300120 struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700121 struct hmm_mirror *mirror;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700122
Jason Gunthorpe6d7c3cd2019-05-22 16:52:52 -0300123 /* Bail out if hmm is in the process of being freed */
124 if (!kref_get_unless_zero(&hmm->kref))
125 return;
126
Jason Gunthorpe47f24592019-05-23 11:08:28 -0300127 /*
128 * Since hmm_range_register() holds the mmget() lock hmm_release() is
129 * prevented as long as a range exists.
130 */
131 WARN_ON(!list_empty_careful(&hmm->ranges));
Ralph Campbelle1401512018-04-10 16:28:19 -0700132
Jason Gunthorpe14331722019-05-24 12:14:08 -0300133 down_read(&hmm->mirrors_sem);
134 list_for_each_entry(mirror, &hmm->mirrors, list) {
135 /*
136 * Note: The driver is not allowed to trigger
137 * hmm_mirror_unregister() from this thread.
138 */
139 if (mirror->ops->release)
Ralph Campbelle1401512018-04-10 16:28:19 -0700140 mirror->ops->release(mirror);
Ralph Campbelle1401512018-04-10 16:28:19 -0700141 }
Jason Gunthorpe14331722019-05-24 12:14:08 -0300142 up_read(&hmm->mirrors_sem);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700143
144 hmm_put(hmm);
Ralph Campbelle1401512018-04-10 16:28:19 -0700145}
146
Jason Gunthorpe5a136b42019-06-07 12:10:33 -0300147static void notifiers_decrement(struct hmm *hmm)
148{
149 unsigned long flags;
150
151 spin_lock_irqsave(&hmm->ranges_lock, flags);
152 hmm->notifiers--;
153 if (!hmm->notifiers) {
154 struct hmm_range *range;
155
156 list_for_each_entry(range, &hmm->ranges, list) {
157 if (range->valid)
158 continue;
159 range->valid = true;
160 }
161 wake_up_all(&hmm->wq);
162 }
163 spin_unlock_irqrestore(&hmm->ranges_lock, flags);
164}
165
Michal Hocko93065ac2018-08-21 21:52:33 -0700166static int hmm_invalidate_range_start(struct mmu_notifier *mn,
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700167 const struct mmu_notifier_range *nrange)
Jérôme Glissec0b12402017-09-08 16:11:27 -0700168{
Jason Gunthorpe6d7c3cd2019-05-22 16:52:52 -0300169 struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700170 struct hmm_mirror *mirror;
Jérôme Glisseec131b22018-10-30 15:04:28 -0700171 struct hmm_update update;
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700172 struct hmm_range *range;
Jason Gunthorpe5a136b42019-06-07 12:10:33 -0300173 unsigned long flags;
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700174 int ret = 0;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700175
Jason Gunthorpe6d7c3cd2019-05-22 16:52:52 -0300176 if (!kref_get_unless_zero(&hmm->kref))
177 return 0;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700178
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700179 update.start = nrange->start;
180 update.end = nrange->end;
Jérôme Glisseec131b22018-10-30 15:04:28 -0700181 update.event = HMM_UPDATE_INVALIDATE;
Jérôme Glissedfcd6662019-05-13 17:20:38 -0700182 update.blockable = mmu_notifier_range_blockable(nrange);
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700183
Jason Gunthorpe5a136b42019-06-07 12:10:33 -0300184 spin_lock_irqsave(&hmm->ranges_lock, flags);
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700185 hmm->notifiers++;
186 list_for_each_entry(range, &hmm->ranges, list) {
187 if (update.end < range->start || update.start >= range->end)
188 continue;
189
190 range->valid = false;
191 }
Jason Gunthorpe5a136b42019-06-07 12:10:33 -0300192 spin_unlock_irqrestore(&hmm->ranges_lock, flags);
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700193
Jérôme Glissedfcd6662019-05-13 17:20:38 -0700194 if (mmu_notifier_range_blockable(nrange))
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700195 down_read(&hmm->mirrors_sem);
196 else if (!down_read_trylock(&hmm->mirrors_sem)) {
197 ret = -EAGAIN;
198 goto out;
199 }
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700200
Jason Gunthorpe5a136b42019-06-07 12:10:33 -0300201 list_for_each_entry(mirror, &hmm->mirrors, list) {
202 int rc;
203
204 rc = mirror->ops->sync_cpu_device_pagetables(mirror, &update);
205 if (rc) {
206 if (WARN_ON(update.blockable || rc != -EAGAIN))
207 continue;
208 ret = -EAGAIN;
Ralph Campbell085ea252019-05-06 16:29:39 -0700209 break;
Jason Gunthorpe5a136b42019-06-07 12:10:33 -0300210 }
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700211 }
212 up_read(&hmm->mirrors_sem);
213
214out:
Jason Gunthorpe5a136b42019-06-07 12:10:33 -0300215 if (ret)
216 notifiers_decrement(hmm);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700217 hmm_put(hmm);
218 return ret;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700219}
220
221static void hmm_invalidate_range_end(struct mmu_notifier *mn,
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700222 const struct mmu_notifier_range *nrange)
Jérôme Glissec0b12402017-09-08 16:11:27 -0700223{
Jason Gunthorpe6d7c3cd2019-05-22 16:52:52 -0300224 struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700225
Jason Gunthorpe6d7c3cd2019-05-22 16:52:52 -0300226 if (!kref_get_unless_zero(&hmm->kref))
227 return;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700228
Jason Gunthorpe5a136b42019-06-07 12:10:33 -0300229 notifiers_decrement(hmm);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700230 hmm_put(hmm);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700231}
232
233static const struct mmu_notifier_ops hmm_mmu_notifier_ops = {
Ralph Campbelle1401512018-04-10 16:28:19 -0700234 .release = hmm_release,
Jérôme Glissec0b12402017-09-08 16:11:27 -0700235 .invalidate_range_start = hmm_invalidate_range_start,
236 .invalidate_range_end = hmm_invalidate_range_end,
237};
238
239/*
240 * hmm_mirror_register() - register a mirror against an mm
241 *
242 * @mirror: new mirror struct to register
243 * @mm: mm to register against
Ralph Campbell085ea252019-05-06 16:29:39 -0700244 * Return: 0 on success, -ENOMEM if no memory, -EINVAL if invalid arguments
Jérôme Glissec0b12402017-09-08 16:11:27 -0700245 *
246 * To start mirroring a process address space, the device driver must register
247 * an HMM mirror struct.
Jérôme Glissec0b12402017-09-08 16:11:27 -0700248 */
249int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm)
250{
Jason Gunthorpe8a1a0cd2019-05-23 11:23:30 -0300251 lockdep_assert_held_exclusive(&mm->mmap_sem);
252
Jérôme Glissec0b12402017-09-08 16:11:27 -0700253 /* Sanity check */
254 if (!mm || !mirror || !mirror->ops)
255 return -EINVAL;
256
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700257 mirror->hmm = hmm_get_or_create(mm);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700258 if (!mirror->hmm)
259 return -ENOMEM;
260
261 down_write(&mirror->hmm->mirrors_sem);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700262 list_add(&mirror->list, &mirror->hmm->mirrors);
263 up_write(&mirror->hmm->mirrors_sem);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700264
265 return 0;
266}
267EXPORT_SYMBOL(hmm_mirror_register);
268
269/*
270 * hmm_mirror_unregister() - unregister a mirror
271 *
Ralph Campbell085ea252019-05-06 16:29:39 -0700272 * @mirror: mirror struct to unregister
Jérôme Glissec0b12402017-09-08 16:11:27 -0700273 *
274 * Stop mirroring a process address space, and cleanup.
275 */
276void hmm_mirror_unregister(struct hmm_mirror *mirror)
277{
Jason Gunthorpe187229c2019-05-23 11:31:45 -0300278 struct hmm *hmm = mirror->hmm;
Jérôme Glissec01cbba2018-04-10 16:28:23 -0700279
Jérôme Glissec0b12402017-09-08 16:11:27 -0700280 down_write(&hmm->mirrors_sem);
Jason Gunthorpe14331722019-05-24 12:14:08 -0300281 list_del(&mirror->list);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700282 up_write(&hmm->mirrors_sem);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700283 hmm_put(hmm);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700284}
285EXPORT_SYMBOL(hmm_mirror_unregister);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700286
Jérôme Glisse74eee182017-09-08 16:11:35 -0700287struct hmm_vma_walk {
288 struct hmm_range *range;
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700289 struct dev_pagemap *pgmap;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700290 unsigned long last;
291 bool fault;
292 bool block;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700293};
294
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700295static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr,
296 bool write_fault, uint64_t *pfn)
Jérôme Glisse74eee182017-09-08 16:11:35 -0700297{
Kuehling, Felix9b1ae602019-05-10 19:53:24 +0000298 unsigned int flags = FAULT_FLAG_REMOTE;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700299 struct hmm_vma_walk *hmm_vma_walk = walk->private;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700300 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700301 struct vm_area_struct *vma = walk->vma;
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700302 vm_fault_t ret;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700303
304 flags |= hmm_vma_walk->block ? 0 : FAULT_FLAG_ALLOW_RETRY;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700305 flags |= write_fault ? FAULT_FLAG_WRITE : 0;
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700306 ret = handle_mm_fault(vma, addr, flags);
307 if (ret & VM_FAULT_RETRY)
Jérôme Glisse73231612019-05-13 17:19:58 -0700308 return -EAGAIN;
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700309 if (ret & VM_FAULT_ERROR) {
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700310 *pfn = range->values[HMM_PFN_ERROR];
Jérôme Glisse74eee182017-09-08 16:11:35 -0700311 return -EFAULT;
312 }
313
Jérôme Glisse73231612019-05-13 17:19:58 -0700314 return -EBUSY;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700315}
316
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700317static int hmm_pfns_bad(unsigned long addr,
318 unsigned long end,
319 struct mm_walk *walk)
320{
Jérôme Glissec7195472018-04-10 16:28:27 -0700321 struct hmm_vma_walk *hmm_vma_walk = walk->private;
322 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisseff05c0c2018-04-10 16:28:38 -0700323 uint64_t *pfns = range->pfns;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700324 unsigned long i;
325
326 i = (addr - range->start) >> PAGE_SHIFT;
327 for (; addr < end; addr += PAGE_SIZE, i++)
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700328 pfns[i] = range->values[HMM_PFN_ERROR];
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700329
330 return 0;
331}
332
Jérôme Glisse5504ed22018-04-10 16:28:46 -0700333/*
334 * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s)
335 * @start: range virtual start address (inclusive)
336 * @end: range virtual end address (exclusive)
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700337 * @fault: should we fault or not ?
338 * @write_fault: write fault ?
Jérôme Glisse5504ed22018-04-10 16:28:46 -0700339 * @walk: mm_walk structure
Ralph Campbell085ea252019-05-06 16:29:39 -0700340 * Return: 0 on success, -EBUSY after page fault, or page fault error
Jérôme Glisse5504ed22018-04-10 16:28:46 -0700341 *
342 * This function will be called whenever pmd_none() or pte_none() returns true,
343 * or whenever there is no page directory covering the virtual address range.
344 */
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700345static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end,
346 bool fault, bool write_fault,
347 struct mm_walk *walk)
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700348{
Jérôme Glisse74eee182017-09-08 16:11:35 -0700349 struct hmm_vma_walk *hmm_vma_walk = walk->private;
350 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisseff05c0c2018-04-10 16:28:38 -0700351 uint64_t *pfns = range->pfns;
Jérôme Glisse63d50662019-05-13 17:20:18 -0700352 unsigned long i, page_size;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700353
Jérôme Glisse74eee182017-09-08 16:11:35 -0700354 hmm_vma_walk->last = addr;
Jérôme Glisse63d50662019-05-13 17:20:18 -0700355 page_size = hmm_range_page_size(range);
356 i = (addr - range->start) >> range->page_shift;
357
358 for (; addr < end; addr += page_size, i++) {
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700359 pfns[i] = range->values[HMM_PFN_NONE];
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700360 if (fault || write_fault) {
Jérôme Glisse74eee182017-09-08 16:11:35 -0700361 int ret;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700362
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700363 ret = hmm_vma_do_fault(walk, addr, write_fault,
364 &pfns[i]);
Jérôme Glisse73231612019-05-13 17:19:58 -0700365 if (ret != -EBUSY)
Jérôme Glisse74eee182017-09-08 16:11:35 -0700366 return ret;
367 }
368 }
369
Jérôme Glisse73231612019-05-13 17:19:58 -0700370 return (fault || write_fault) ? -EBUSY : 0;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700371}
372
373static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
374 uint64_t pfns, uint64_t cpu_flags,
375 bool *fault, bool *write_fault)
376{
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700377 struct hmm_range *range = hmm_vma_walk->range;
378
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700379 if (!hmm_vma_walk->fault)
380 return;
381
Jérôme Glisse023a0192019-05-13 17:20:05 -0700382 /*
383 * So we not only consider the individual per page request we also
384 * consider the default flags requested for the range. The API can
385 * be use in 2 fashions. The first one where the HMM user coalesce
386 * multiple page fault into one request and set flags per pfns for
387 * of those faults. The second one where the HMM user want to pre-
388 * fault a range with specific flags. For the latter one it is a
389 * waste to have the user pre-fill the pfn arrays with a default
390 * flags value.
391 */
392 pfns = (pfns & range->pfn_flags_mask) | range->default_flags;
393
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700394 /* We aren't ask to do anything ... */
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700395 if (!(pfns & range->flags[HMM_PFN_VALID]))
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700396 return;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700397 /* If this is device memory than only fault if explicitly requested */
398 if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
399 /* Do we fault on device memory ? */
400 if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) {
401 *write_fault = pfns & range->flags[HMM_PFN_WRITE];
402 *fault = true;
403 }
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700404 return;
405 }
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700406
407 /* If CPU page table is not valid then we need to fault */
408 *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]);
409 /* Need to write fault ? */
410 if ((pfns & range->flags[HMM_PFN_WRITE]) &&
411 !(cpu_flags & range->flags[HMM_PFN_WRITE])) {
412 *write_fault = true;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700413 *fault = true;
414 }
415}
416
417static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
418 const uint64_t *pfns, unsigned long npages,
419 uint64_t cpu_flags, bool *fault,
420 bool *write_fault)
421{
422 unsigned long i;
423
424 if (!hmm_vma_walk->fault) {
425 *fault = *write_fault = false;
426 return;
427 }
428
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700429 *fault = *write_fault = false;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700430 for (i = 0; i < npages; ++i) {
431 hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags,
432 fault, write_fault);
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700433 if ((*write_fault))
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700434 return;
435 }
436}
437
438static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
439 struct mm_walk *walk)
440{
441 struct hmm_vma_walk *hmm_vma_walk = walk->private;
442 struct hmm_range *range = hmm_vma_walk->range;
443 bool fault, write_fault;
444 unsigned long i, npages;
445 uint64_t *pfns;
446
447 i = (addr - range->start) >> PAGE_SHIFT;
448 npages = (end - addr) >> PAGE_SHIFT;
449 pfns = &range->pfns[i];
450 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
451 0, &fault, &write_fault);
452 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
453}
454
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700455static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd)
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700456{
457 if (pmd_protnone(pmd))
458 return 0;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700459 return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] |
460 range->flags[HMM_PFN_WRITE] :
461 range->flags[HMM_PFN_VALID];
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700462}
463
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700464static inline uint64_t pud_to_hmm_pfn_flags(struct hmm_range *range, pud_t pud)
465{
466 if (!pud_present(pud))
467 return 0;
468 return pud_write(pud) ? range->flags[HMM_PFN_VALID] |
469 range->flags[HMM_PFN_WRITE] :
470 range->flags[HMM_PFN_VALID];
471}
472
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700473static int hmm_vma_handle_pmd(struct mm_walk *walk,
474 unsigned long addr,
475 unsigned long end,
476 uint64_t *pfns,
477 pmd_t pmd)
478{
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700479#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700480 struct hmm_vma_walk *hmm_vma_walk = walk->private;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700481 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700482 unsigned long pfn, npages, i;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700483 bool fault, write_fault;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700484 uint64_t cpu_flags;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700485
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700486 npages = (end - addr) >> PAGE_SHIFT;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700487 cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700488 hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags,
489 &fault, &write_fault);
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700490
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700491 if (pmd_protnone(pmd) || fault || write_fault)
492 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700493
494 pfn = pmd_pfn(pmd) + pte_index(addr);
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700495 for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
496 if (pmd_devmap(pmd)) {
497 hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
498 hmm_vma_walk->pgmap);
499 if (unlikely(!hmm_vma_walk->pgmap))
500 return -EBUSY;
501 }
Jérôme Glisse391aab12019-05-13 17:20:31 -0700502 pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags;
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700503 }
504 if (hmm_vma_walk->pgmap) {
505 put_dev_pagemap(hmm_vma_walk->pgmap);
506 hmm_vma_walk->pgmap = NULL;
507 }
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700508 hmm_vma_walk->last = end;
509 return 0;
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700510#else
511 /* If THP is not enabled then we should never reach that code ! */
512 return -EINVAL;
513#endif
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700514}
515
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700516static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700517{
Philip Yang789c2af2019-05-23 16:32:31 -0400518 if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700519 return 0;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700520 return pte_write(pte) ? range->flags[HMM_PFN_VALID] |
521 range->flags[HMM_PFN_WRITE] :
522 range->flags[HMM_PFN_VALID];
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700523}
524
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700525static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
526 unsigned long end, pmd_t *pmdp, pte_t *ptep,
527 uint64_t *pfn)
528{
529 struct hmm_vma_walk *hmm_vma_walk = walk->private;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700530 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700531 struct vm_area_struct *vma = walk->vma;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700532 bool fault, write_fault;
533 uint64_t cpu_flags;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700534 pte_t pte = *ptep;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700535 uint64_t orig_pfn = *pfn;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700536
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700537 *pfn = range->values[HMM_PFN_NONE];
Jérôme Glisse73231612019-05-13 17:19:58 -0700538 fault = write_fault = false;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700539
540 if (pte_none(pte)) {
Jérôme Glisse73231612019-05-13 17:19:58 -0700541 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0,
542 &fault, &write_fault);
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700543 if (fault || write_fault)
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700544 goto fault;
545 return 0;
546 }
547
548 if (!pte_present(pte)) {
549 swp_entry_t entry = pte_to_swp_entry(pte);
550
551 if (!non_swap_entry(entry)) {
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700552 if (fault || write_fault)
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700553 goto fault;
554 return 0;
555 }
556
557 /*
558 * This is a special swap entry, ignore migration, use
559 * device and report anything else as error.
560 */
561 if (is_device_private_entry(entry)) {
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700562 cpu_flags = range->flags[HMM_PFN_VALID] |
563 range->flags[HMM_PFN_DEVICE_PRIVATE];
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700564 cpu_flags |= is_write_device_private_entry(entry) ?
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700565 range->flags[HMM_PFN_WRITE] : 0;
566 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
567 &fault, &write_fault);
568 if (fault || write_fault)
569 goto fault;
Jérôme Glisse391aab12019-05-13 17:20:31 -0700570 *pfn = hmm_device_entry_from_pfn(range,
571 swp_offset(entry));
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700572 *pfn |= cpu_flags;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700573 return 0;
574 }
575
576 if (is_migration_entry(entry)) {
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700577 if (fault || write_fault) {
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700578 pte_unmap(ptep);
579 hmm_vma_walk->last = addr;
580 migration_entry_wait(vma->vm_mm,
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700581 pmdp, addr);
Jérôme Glisse73231612019-05-13 17:19:58 -0700582 return -EBUSY;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700583 }
584 return 0;
585 }
586
587 /* Report error for everything else */
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700588 *pfn = range->values[HMM_PFN_ERROR];
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700589 return -EFAULT;
Jérôme Glisse73231612019-05-13 17:19:58 -0700590 } else {
591 cpu_flags = pte_to_hmm_pfn_flags(range, pte);
592 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
593 &fault, &write_fault);
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700594 }
595
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700596 if (fault || write_fault)
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700597 goto fault;
598
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700599 if (pte_devmap(pte)) {
600 hmm_vma_walk->pgmap = get_dev_pagemap(pte_pfn(pte),
601 hmm_vma_walk->pgmap);
602 if (unlikely(!hmm_vma_walk->pgmap))
603 return -EBUSY;
604 } else if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pte_special(pte)) {
605 *pfn = range->values[HMM_PFN_SPECIAL];
606 return -EFAULT;
607 }
608
Jérôme Glisse391aab12019-05-13 17:20:31 -0700609 *pfn = hmm_device_entry_from_pfn(range, pte_pfn(pte)) | cpu_flags;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700610 return 0;
611
612fault:
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700613 if (hmm_vma_walk->pgmap) {
614 put_dev_pagemap(hmm_vma_walk->pgmap);
615 hmm_vma_walk->pgmap = NULL;
616 }
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700617 pte_unmap(ptep);
618 /* Fault any virtual address we were asked to fault */
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700619 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700620}
621
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700622static int hmm_vma_walk_pmd(pmd_t *pmdp,
623 unsigned long start,
624 unsigned long end,
625 struct mm_walk *walk)
626{
Jérôme Glisse74eee182017-09-08 16:11:35 -0700627 struct hmm_vma_walk *hmm_vma_walk = walk->private;
628 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glissed08faca2018-10-30 15:04:20 -0700629 struct vm_area_struct *vma = walk->vma;
Jérôme Glisseff05c0c2018-04-10 16:28:38 -0700630 uint64_t *pfns = range->pfns;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700631 unsigned long addr = start, i;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700632 pte_t *ptep;
Jérôme Glissed08faca2018-10-30 15:04:20 -0700633 pmd_t pmd;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700634
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700635
636again:
Jérôme Glissed08faca2018-10-30 15:04:20 -0700637 pmd = READ_ONCE(*pmdp);
638 if (pmd_none(pmd))
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700639 return hmm_vma_walk_hole(start, end, walk);
640
Jérôme Glissed08faca2018-10-30 15:04:20 -0700641 if (pmd_huge(pmd) && (range->vma->vm_flags & VM_HUGETLB))
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700642 return hmm_pfns_bad(start, end, walk);
643
Jérôme Glissed08faca2018-10-30 15:04:20 -0700644 if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
645 bool fault, write_fault;
646 unsigned long npages;
647 uint64_t *pfns;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700648
Jérôme Glissed08faca2018-10-30 15:04:20 -0700649 i = (addr - range->start) >> PAGE_SHIFT;
650 npages = (end - addr) >> PAGE_SHIFT;
651 pfns = &range->pfns[i];
652
653 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
654 0, &fault, &write_fault);
655 if (fault || write_fault) {
656 hmm_vma_walk->last = addr;
657 pmd_migration_entry_wait(vma->vm_mm, pmdp);
Jérôme Glisse73231612019-05-13 17:19:58 -0700658 return -EBUSY;
Jérôme Glissed08faca2018-10-30 15:04:20 -0700659 }
660 return 0;
661 } else if (!pmd_present(pmd))
662 return hmm_pfns_bad(start, end, walk);
663
664 if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700665 /*
666 * No need to take pmd_lock here, even if some other threads
667 * is splitting the huge pmd we will get that event through
668 * mmu_notifier callback.
669 *
670 * So just read pmd value and check again its a transparent
671 * huge or device mapping one and compute corresponding pfn
672 * values.
673 */
674 pmd = pmd_read_atomic(pmdp);
675 barrier();
676 if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
677 goto again;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700678
Jérôme Glissed08faca2018-10-30 15:04:20 -0700679 i = (addr - range->start) >> PAGE_SHIFT;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700680 return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700681 }
682
Jérôme Glissed08faca2018-10-30 15:04:20 -0700683 /*
684 * We have handled all the valid case above ie either none, migration,
685 * huge or transparent huge. At this point either it is a valid pmd
686 * entry pointing to pte directory or it is a bad pmd that will not
687 * recover.
688 */
689 if (pmd_bad(pmd))
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700690 return hmm_pfns_bad(start, end, walk);
691
692 ptep = pte_offset_map(pmdp, addr);
Jérôme Glissed08faca2018-10-30 15:04:20 -0700693 i = (addr - range->start) >> PAGE_SHIFT;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700694 for (; addr < end; addr += PAGE_SIZE, ptep++, i++) {
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700695 int r;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700696
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700697 r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]);
698 if (r) {
699 /* hmm_vma_handle_pte() did unmap pte directory */
700 hmm_vma_walk->last = addr;
701 return r;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700702 }
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700703 }
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700704 if (hmm_vma_walk->pgmap) {
705 /*
706 * We do put_dev_pagemap() here and not in hmm_vma_handle_pte()
707 * so that we can leverage get_dev_pagemap() optimization which
708 * will not re-take a reference on a pgmap if we already have
709 * one.
710 */
711 put_dev_pagemap(hmm_vma_walk->pgmap);
712 hmm_vma_walk->pgmap = NULL;
713 }
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700714 pte_unmap(ptep - 1);
715
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700716 hmm_vma_walk->last = addr;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700717 return 0;
718}
719
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700720static int hmm_vma_walk_pud(pud_t *pudp,
721 unsigned long start,
722 unsigned long end,
723 struct mm_walk *walk)
724{
725 struct hmm_vma_walk *hmm_vma_walk = walk->private;
726 struct hmm_range *range = hmm_vma_walk->range;
727 unsigned long addr = start, next;
728 pmd_t *pmdp;
729 pud_t pud;
730 int ret;
731
732again:
733 pud = READ_ONCE(*pudp);
734 if (pud_none(pud))
735 return hmm_vma_walk_hole(start, end, walk);
736
737 if (pud_huge(pud) && pud_devmap(pud)) {
738 unsigned long i, npages, pfn;
739 uint64_t *pfns, cpu_flags;
740 bool fault, write_fault;
741
742 if (!pud_present(pud))
743 return hmm_vma_walk_hole(start, end, walk);
744
745 i = (addr - range->start) >> PAGE_SHIFT;
746 npages = (end - addr) >> PAGE_SHIFT;
747 pfns = &range->pfns[i];
748
749 cpu_flags = pud_to_hmm_pfn_flags(range, pud);
750 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
751 cpu_flags, &fault, &write_fault);
752 if (fault || write_fault)
753 return hmm_vma_walk_hole_(addr, end, fault,
754 write_fault, walk);
755
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700756 pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
757 for (i = 0; i < npages; ++i, ++pfn) {
758 hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
759 hmm_vma_walk->pgmap);
760 if (unlikely(!hmm_vma_walk->pgmap))
761 return -EBUSY;
Jérôme Glisse391aab12019-05-13 17:20:31 -0700762 pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
763 cpu_flags;
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700764 }
765 if (hmm_vma_walk->pgmap) {
766 put_dev_pagemap(hmm_vma_walk->pgmap);
767 hmm_vma_walk->pgmap = NULL;
768 }
769 hmm_vma_walk->last = end;
770 return 0;
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700771 }
772
773 split_huge_pud(walk->vma, pudp, addr);
774 if (pud_none(*pudp))
775 goto again;
776
777 pmdp = pmd_offset(pudp, addr);
778 do {
779 next = pmd_addr_end(addr, end);
780 ret = hmm_vma_walk_pmd(pmdp, addr, next, walk);
781 if (ret)
782 return ret;
783 } while (pmdp++, addr = next, addr != end);
784
785 return 0;
786}
787
Jérôme Glisse63d50662019-05-13 17:20:18 -0700788static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
789 unsigned long start, unsigned long end,
790 struct mm_walk *walk)
791{
792#ifdef CONFIG_HUGETLB_PAGE
793 unsigned long addr = start, i, pfn, mask, size, pfn_inc;
794 struct hmm_vma_walk *hmm_vma_walk = walk->private;
795 struct hmm_range *range = hmm_vma_walk->range;
796 struct vm_area_struct *vma = walk->vma;
797 struct hstate *h = hstate_vma(vma);
798 uint64_t orig_pfn, cpu_flags;
799 bool fault, write_fault;
800 spinlock_t *ptl;
801 pte_t entry;
802 int ret = 0;
803
804 size = 1UL << huge_page_shift(h);
805 mask = size - 1;
806 if (range->page_shift != PAGE_SHIFT) {
807 /* Make sure we are looking at full page. */
808 if (start & mask)
809 return -EINVAL;
810 if (end < (start + size))
811 return -EINVAL;
812 pfn_inc = size >> PAGE_SHIFT;
813 } else {
814 pfn_inc = 1;
815 size = PAGE_SIZE;
816 }
817
818
819 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
820 entry = huge_ptep_get(pte);
821
822 i = (start - range->start) >> range->page_shift;
823 orig_pfn = range->pfns[i];
824 range->pfns[i] = range->values[HMM_PFN_NONE];
825 cpu_flags = pte_to_hmm_pfn_flags(range, entry);
826 fault = write_fault = false;
827 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
828 &fault, &write_fault);
829 if (fault || write_fault) {
830 ret = -ENOENT;
831 goto unlock;
832 }
833
834 pfn = pte_pfn(entry) + ((start & mask) >> range->page_shift);
835 for (; addr < end; addr += size, i++, pfn += pfn_inc)
Jérôme Glisse391aab12019-05-13 17:20:31 -0700836 range->pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
837 cpu_flags;
Jérôme Glisse63d50662019-05-13 17:20:18 -0700838 hmm_vma_walk->last = end;
839
840unlock:
841 spin_unlock(ptl);
842
843 if (ret == -ENOENT)
844 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
845
846 return ret;
847#else /* CONFIG_HUGETLB_PAGE */
848 return -EINVAL;
849#endif
850}
851
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700852static void hmm_pfns_clear(struct hmm_range *range,
853 uint64_t *pfns,
Jérôme Glisse33cd47d2018-04-10 16:28:54 -0700854 unsigned long addr,
855 unsigned long end)
856{
857 for (; addr < end; addr += PAGE_SIZE, pfns++)
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700858 *pfns = range->values[HMM_PFN_NONE];
Jérôme Glisse33cd47d2018-04-10 16:28:54 -0700859}
860
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700861/*
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700862 * hmm_range_register() - start tracking change to CPU page table over a range
Jérôme Glisse25f23a02019-05-13 17:19:55 -0700863 * @range: range
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700864 * @mm: the mm struct for the range of virtual address
865 * @start: start virtual address (inclusive)
866 * @end: end virtual address (exclusive)
Jérôme Glisse63d50662019-05-13 17:20:18 -0700867 * @page_shift: expect page shift for the range
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700868 * Returns 0 on success, -EFAULT if the address space is no longer valid
Jérôme Glisse25f23a02019-05-13 17:19:55 -0700869 *
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700870 * Track updates to the CPU page table see include/linux/hmm.h
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700871 */
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700872int hmm_range_register(struct hmm_range *range,
Jason Gunthorpee36acfe2019-05-23 09:41:19 -0300873 struct hmm_mirror *mirror,
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700874 unsigned long start,
Jérôme Glisse63d50662019-05-13 17:20:18 -0700875 unsigned long end,
876 unsigned page_shift)
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700877{
Jérôme Glisse63d50662019-05-13 17:20:18 -0700878 unsigned long mask = ((1UL << page_shift) - 1UL);
Jason Gunthorpee36acfe2019-05-23 09:41:19 -0300879 struct hmm *hmm = mirror->hmm;
Jason Gunthorpe5a136b42019-06-07 12:10:33 -0300880 unsigned long flags;
Jérôme Glisse63d50662019-05-13 17:20:18 -0700881
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700882 range->valid = false;
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700883 range->hmm = NULL;
884
Jérôme Glisse63d50662019-05-13 17:20:18 -0700885 if ((start & mask) || (end & mask))
886 return -EINVAL;
887 if (start >= end)
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700888 return -EINVAL;
889
Jérôme Glisse63d50662019-05-13 17:20:18 -0700890 range->page_shift = page_shift;
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700891 range->start = start;
892 range->end = end;
893
Jason Gunthorpe47f24592019-05-23 11:08:28 -0300894 /* Prevent hmm_release() from running while the range is valid */
895 if (!mmget_not_zero(hmm->mm))
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700896 return -EFAULT;
Jérôme Glisse86586a42018-04-10 16:28:34 -0700897
Ralph Campbell085ea252019-05-06 16:29:39 -0700898 /* Initialize range to track CPU page table updates. */
Jason Gunthorpe5a136b42019-06-07 12:10:33 -0300899 spin_lock_irqsave(&hmm->ranges_lock, flags);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700900
Ralph Campbell085ea252019-05-06 16:29:39 -0700901 range->hmm = hmm;
Jason Gunthorpee36acfe2019-05-23 09:41:19 -0300902 kref_get(&hmm->kref);
Jason Gunthorpe157816f2019-05-23 11:43:43 -0300903 list_add(&range->list, &hmm->ranges);
Jérôme Glisse74eee182017-09-08 16:11:35 -0700904
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700905 /*
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700906 * If there are any concurrent notifiers we have to wait for them for
907 * the range to be valid (see hmm_range_wait_until_valid()).
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700908 */
Ralph Campbell085ea252019-05-06 16:29:39 -0700909 if (!hmm->notifiers)
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700910 range->valid = true;
Jason Gunthorpe5a136b42019-06-07 12:10:33 -0300911 spin_unlock_irqrestore(&hmm->ranges_lock, flags);
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700912
913 return 0;
914}
915EXPORT_SYMBOL(hmm_range_register);
916
917/*
918 * hmm_range_unregister() - stop tracking change to CPU page table over a range
919 * @range: range
920 *
921 * Range struct is used to track updates to the CPU page table after a call to
922 * hmm_range_register(). See include/linux/hmm.h for how to use it.
923 */
924void hmm_range_unregister(struct hmm_range *range)
925{
Ralph Campbell085ea252019-05-06 16:29:39 -0700926 struct hmm *hmm = range->hmm;
Jason Gunthorpe5a136b42019-06-07 12:10:33 -0300927 unsigned long flags;
Ralph Campbell085ea252019-05-06 16:29:39 -0700928
Jason Gunthorpe5a136b42019-06-07 12:10:33 -0300929 spin_lock_irqsave(&hmm->ranges_lock, flags);
Jason Gunthorpe47f24592019-05-23 11:08:28 -0300930 list_del_init(&range->list);
Jason Gunthorpe5a136b42019-06-07 12:10:33 -0300931 spin_unlock_irqrestore(&hmm->ranges_lock, flags);
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700932
933 /* Drop reference taken by hmm_range_register() */
Jason Gunthorpe47f24592019-05-23 11:08:28 -0300934 mmput(hmm->mm);
Ralph Campbell085ea252019-05-06 16:29:39 -0700935 hmm_put(hmm);
Jason Gunthorpe2dcc3eb2019-05-23 11:40:24 -0300936
937 /*
938 * The range is now invalid and the ref on the hmm is dropped, so
939 * poison the pointer. Leave other fields in place, for the caller's
940 * use.
941 */
942 range->valid = false;
943 memset(&range->hmm, POISON_INUSE, sizeof(range->hmm));
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700944}
945EXPORT_SYMBOL(hmm_range_unregister);
946
947/*
948 * hmm_range_snapshot() - snapshot CPU page table for a range
949 * @range: range
Ralph Campbell085ea252019-05-06 16:29:39 -0700950 * Return: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700951 * permission (for instance asking for write and range is read only),
952 * -EAGAIN if you need to retry, -EFAULT invalid (ie either no valid
953 * vma or it is illegal to access that range), number of valid pages
954 * in range->pfns[] (from range start address).
955 *
956 * This snapshots the CPU page table for a range of virtual addresses. Snapshot
957 * validity is tracked by range struct. See in include/linux/hmm.h for example
958 * on how to use.
959 */
960long hmm_range_snapshot(struct hmm_range *range)
961{
Jérôme Glisse63d50662019-05-13 17:20:18 -0700962 const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700963 unsigned long start = range->start, end;
964 struct hmm_vma_walk hmm_vma_walk;
965 struct hmm *hmm = range->hmm;
966 struct vm_area_struct *vma;
967 struct mm_walk mm_walk;
968
Jason Gunthorpe47f24592019-05-23 11:08:28 -0300969 lockdep_assert_held(&hmm->mm->mmap_sem);
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700970 do {
971 /* If range is no longer valid force retry. */
972 if (!range->valid)
973 return -EAGAIN;
974
975 vma = find_vma(hmm->mm, start);
Jérôme Glisse63d50662019-05-13 17:20:18 -0700976 if (vma == NULL || (vma->vm_flags & device_vma))
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700977 return -EFAULT;
978
Jérôme Glisse63d50662019-05-13 17:20:18 -0700979 if (is_vm_hugetlb_page(vma)) {
Jason Gunthorpe1c2308f02019-05-27 17:02:21 -0300980 if (huge_page_shift(hstate_vma(vma)) !=
981 range->page_shift &&
Jérôme Glisse63d50662019-05-13 17:20:18 -0700982 range->page_shift != PAGE_SHIFT)
983 return -EINVAL;
984 } else {
985 if (range->page_shift != PAGE_SHIFT)
986 return -EINVAL;
987 }
988
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700989 if (!(vma->vm_flags & VM_READ)) {
990 /*
991 * If vma do not allow read access, then assume that it
992 * does not allow write access, either. HMM does not
993 * support architecture that allow write without read.
994 */
995 hmm_pfns_clear(range, range->pfns,
996 range->start, range->end);
997 return -EPERM;
998 }
999
1000 range->vma = vma;
Jérôme Glisse992de9a2019-05-13 17:20:21 -07001001 hmm_vma_walk.pgmap = NULL;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001002 hmm_vma_walk.last = start;
1003 hmm_vma_walk.fault = false;
1004 hmm_vma_walk.range = range;
1005 mm_walk.private = &hmm_vma_walk;
1006 end = min(range->end, vma->vm_end);
1007
1008 mm_walk.vma = vma;
1009 mm_walk.mm = vma->vm_mm;
1010 mm_walk.pte_entry = NULL;
1011 mm_walk.test_walk = NULL;
1012 mm_walk.hugetlb_entry = NULL;
Jérôme Glisse992de9a2019-05-13 17:20:21 -07001013 mm_walk.pud_entry = hmm_vma_walk_pud;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001014 mm_walk.pmd_entry = hmm_vma_walk_pmd;
1015 mm_walk.pte_hole = hmm_vma_walk_hole;
Jérôme Glisse63d50662019-05-13 17:20:18 -07001016 mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001017
1018 walk_page_range(start, end, &mm_walk);
1019 start = end;
1020 } while (start < range->end);
1021
Jérôme Glisse25f23a02019-05-13 17:19:55 -07001022 return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -07001023}
Jérôme Glisse25f23a02019-05-13 17:19:55 -07001024EXPORT_SYMBOL(hmm_range_snapshot);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -07001025
1026/*
Jérôme Glisse73231612019-05-13 17:19:58 -07001027 * hmm_range_fault() - try to fault some address in a virtual address range
Jérôme Glisse08232a42018-04-10 16:28:30 -07001028 * @range: range being faulted
Jérôme Glisse74eee182017-09-08 16:11:35 -07001029 * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
Ralph Campbell085ea252019-05-06 16:29:39 -07001030 * Return: number of valid pages in range->pfns[] (from range start
Jérôme Glisse73231612019-05-13 17:19:58 -07001031 * address). This may be zero. If the return value is negative,
1032 * then one of the following values may be returned:
1033 *
1034 * -EINVAL invalid arguments or mm or virtual address are in an
Jérôme Glisse63d50662019-05-13 17:20:18 -07001035 * invalid vma (for instance device file vma).
Jérôme Glisse73231612019-05-13 17:19:58 -07001036 * -ENOMEM: Out of memory.
1037 * -EPERM: Invalid permission (for instance asking for write and
1038 * range is read only).
1039 * -EAGAIN: If you need to retry and mmap_sem was drop. This can only
1040 * happens if block argument is false.
1041 * -EBUSY: If the the range is being invalidated and you should wait
1042 * for invalidation to finish.
1043 * -EFAULT: Invalid (ie either no valid vma or it is illegal to access
1044 * that range), number of valid pages in range->pfns[] (from
1045 * range start address).
Jérôme Glisse74eee182017-09-08 16:11:35 -07001046 *
1047 * This is similar to a regular CPU page fault except that it will not trigger
Jérôme Glisse73231612019-05-13 17:19:58 -07001048 * any memory migration if the memory being faulted is not accessible by CPUs
1049 * and caller does not ask for migration.
Jérôme Glisse74eee182017-09-08 16:11:35 -07001050 *
Jérôme Glisseff05c0c2018-04-10 16:28:38 -07001051 * On error, for one virtual address in the range, the function will mark the
1052 * corresponding HMM pfn entry with an error flag.
Jérôme Glisse74eee182017-09-08 16:11:35 -07001053 */
Jérôme Glisse73231612019-05-13 17:19:58 -07001054long hmm_range_fault(struct hmm_range *range, bool block)
Jérôme Glisse74eee182017-09-08 16:11:35 -07001055{
Jérôme Glisse63d50662019-05-13 17:20:18 -07001056 const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001057 unsigned long start = range->start, end;
Jérôme Glisse74eee182017-09-08 16:11:35 -07001058 struct hmm_vma_walk hmm_vma_walk;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001059 struct hmm *hmm = range->hmm;
1060 struct vm_area_struct *vma;
Jérôme Glisse74eee182017-09-08 16:11:35 -07001061 struct mm_walk mm_walk;
Jérôme Glisse74eee182017-09-08 16:11:35 -07001062 int ret;
1063
Jason Gunthorpe47f24592019-05-23 11:08:28 -03001064 lockdep_assert_held(&hmm->mm->mmap_sem);
Jérôme Glisse74eee182017-09-08 16:11:35 -07001065
1066 do {
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001067 /* If range is no longer valid force retry. */
1068 if (!range->valid) {
1069 up_read(&hmm->mm->mmap_sem);
1070 return -EAGAIN;
1071 }
Jérôme Glisse74eee182017-09-08 16:11:35 -07001072
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001073 vma = find_vma(hmm->mm, start);
Jérôme Glisse63d50662019-05-13 17:20:18 -07001074 if (vma == NULL || (vma->vm_flags & device_vma))
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001075 return -EFAULT;
Jérôme Glisse74eee182017-09-08 16:11:35 -07001076
Jérôme Glisse63d50662019-05-13 17:20:18 -07001077 if (is_vm_hugetlb_page(vma)) {
1078 if (huge_page_shift(hstate_vma(vma)) !=
1079 range->page_shift &&
1080 range->page_shift != PAGE_SHIFT)
1081 return -EINVAL;
1082 } else {
1083 if (range->page_shift != PAGE_SHIFT)
1084 return -EINVAL;
1085 }
1086
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001087 if (!(vma->vm_flags & VM_READ)) {
1088 /*
1089 * If vma do not allow read access, then assume that it
1090 * does not allow write access, either. HMM does not
1091 * support architecture that allow write without read.
1092 */
1093 hmm_pfns_clear(range, range->pfns,
1094 range->start, range->end);
1095 return -EPERM;
1096 }
1097
1098 range->vma = vma;
Jérôme Glisse992de9a2019-05-13 17:20:21 -07001099 hmm_vma_walk.pgmap = NULL;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001100 hmm_vma_walk.last = start;
1101 hmm_vma_walk.fault = true;
1102 hmm_vma_walk.block = block;
1103 hmm_vma_walk.range = range;
1104 mm_walk.private = &hmm_vma_walk;
1105 end = min(range->end, vma->vm_end);
1106
1107 mm_walk.vma = vma;
1108 mm_walk.mm = vma->vm_mm;
1109 mm_walk.pte_entry = NULL;
1110 mm_walk.test_walk = NULL;
1111 mm_walk.hugetlb_entry = NULL;
Jérôme Glisse992de9a2019-05-13 17:20:21 -07001112 mm_walk.pud_entry = hmm_vma_walk_pud;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001113 mm_walk.pmd_entry = hmm_vma_walk_pmd;
1114 mm_walk.pte_hole = hmm_vma_walk_hole;
Jérôme Glisse63d50662019-05-13 17:20:18 -07001115 mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001116
1117 do {
1118 ret = walk_page_range(start, end, &mm_walk);
1119 start = hmm_vma_walk.last;
1120
1121 /* Keep trying while the range is valid. */
1122 } while (ret == -EBUSY && range->valid);
1123
1124 if (ret) {
1125 unsigned long i;
1126
1127 i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
1128 hmm_pfns_clear(range, &range->pfns[i],
1129 hmm_vma_walk.last, range->end);
1130 return ret;
1131 }
1132 start = end;
1133
1134 } while (start < range->end);
Jérôme Glisse704f3f22019-05-13 17:19:48 -07001135
Jérôme Glisse73231612019-05-13 17:19:58 -07001136 return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
Jérôme Glisse74eee182017-09-08 16:11:35 -07001137}
Jérôme Glisse73231612019-05-13 17:19:58 -07001138EXPORT_SYMBOL(hmm_range_fault);
Jérôme Glisse55c0ece2019-05-13 17:20:28 -07001139
1140/**
1141 * hmm_range_dma_map() - hmm_range_fault() and dma map page all in one.
1142 * @range: range being faulted
1143 * @device: device against to dma map page to
1144 * @daddrs: dma address of mapped pages
1145 * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
Ralph Campbell085ea252019-05-06 16:29:39 -07001146 * Return: number of pages mapped on success, -EAGAIN if mmap_sem have been
Jérôme Glisse55c0ece2019-05-13 17:20:28 -07001147 * drop and you need to try again, some other error value otherwise
1148 *
1149 * Note same usage pattern as hmm_range_fault().
1150 */
1151long hmm_range_dma_map(struct hmm_range *range,
1152 struct device *device,
1153 dma_addr_t *daddrs,
1154 bool block)
1155{
1156 unsigned long i, npages, mapped;
1157 long ret;
1158
1159 ret = hmm_range_fault(range, block);
1160 if (ret <= 0)
1161 return ret ? ret : -EBUSY;
1162
1163 npages = (range->end - range->start) >> PAGE_SHIFT;
1164 for (i = 0, mapped = 0; i < npages; ++i) {
1165 enum dma_data_direction dir = DMA_TO_DEVICE;
1166 struct page *page;
1167
1168 /*
1169 * FIXME need to update DMA API to provide invalid DMA address
1170 * value instead of a function to test dma address value. This
1171 * would remove lot of dumb code duplicated accross many arch.
1172 *
1173 * For now setting it to 0 here is good enough as the pfns[]
1174 * value is what is use to check what is valid and what isn't.
1175 */
1176 daddrs[i] = 0;
1177
Jérôme Glisse391aab12019-05-13 17:20:31 -07001178 page = hmm_device_entry_to_page(range, range->pfns[i]);
Jérôme Glisse55c0ece2019-05-13 17:20:28 -07001179 if (page == NULL)
1180 continue;
1181
1182 /* Check if range is being invalidated */
1183 if (!range->valid) {
1184 ret = -EBUSY;
1185 goto unmap;
1186 }
1187
1188 /* If it is read and write than map bi-directional. */
1189 if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
1190 dir = DMA_BIDIRECTIONAL;
1191
1192 daddrs[i] = dma_map_page(device, page, 0, PAGE_SIZE, dir);
1193 if (dma_mapping_error(device, daddrs[i])) {
1194 ret = -EFAULT;
1195 goto unmap;
1196 }
1197
1198 mapped++;
1199 }
1200
1201 return mapped;
1202
1203unmap:
1204 for (npages = i, i = 0; (i < npages) && mapped; ++i) {
1205 enum dma_data_direction dir = DMA_TO_DEVICE;
1206 struct page *page;
1207
Jérôme Glisse391aab12019-05-13 17:20:31 -07001208 page = hmm_device_entry_to_page(range, range->pfns[i]);
Jérôme Glisse55c0ece2019-05-13 17:20:28 -07001209 if (page == NULL)
1210 continue;
1211
1212 if (dma_mapping_error(device, daddrs[i]))
1213 continue;
1214
1215 /* If it is read and write than map bi-directional. */
1216 if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
1217 dir = DMA_BIDIRECTIONAL;
1218
1219 dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir);
1220 mapped--;
1221 }
1222
1223 return ret;
1224}
1225EXPORT_SYMBOL(hmm_range_dma_map);
1226
1227/**
1228 * hmm_range_dma_unmap() - unmap range of that was map with hmm_range_dma_map()
1229 * @range: range being unmapped
1230 * @vma: the vma against which the range (optional)
1231 * @device: device against which dma map was done
1232 * @daddrs: dma address of mapped pages
1233 * @dirty: dirty page if it had the write flag set
Ralph Campbell085ea252019-05-06 16:29:39 -07001234 * Return: number of page unmapped on success, -EINVAL otherwise
Jérôme Glisse55c0ece2019-05-13 17:20:28 -07001235 *
1236 * Note that caller MUST abide by mmu notifier or use HMM mirror and abide
1237 * to the sync_cpu_device_pagetables() callback so that it is safe here to
1238 * call set_page_dirty(). Caller must also take appropriate locks to avoid
1239 * concurrent mmu notifier or sync_cpu_device_pagetables() to make progress.
1240 */
1241long hmm_range_dma_unmap(struct hmm_range *range,
1242 struct vm_area_struct *vma,
1243 struct device *device,
1244 dma_addr_t *daddrs,
1245 bool dirty)
1246{
1247 unsigned long i, npages;
1248 long cpages = 0;
1249
1250 /* Sanity check. */
1251 if (range->end <= range->start)
1252 return -EINVAL;
1253 if (!daddrs)
1254 return -EINVAL;
1255 if (!range->pfns)
1256 return -EINVAL;
1257
1258 npages = (range->end - range->start) >> PAGE_SHIFT;
1259 for (i = 0; i < npages; ++i) {
1260 enum dma_data_direction dir = DMA_TO_DEVICE;
1261 struct page *page;
1262
Jérôme Glisse391aab12019-05-13 17:20:31 -07001263 page = hmm_device_entry_to_page(range, range->pfns[i]);
Jérôme Glisse55c0ece2019-05-13 17:20:28 -07001264 if (page == NULL)
1265 continue;
1266
1267 /* If it is read and write than map bi-directional. */
1268 if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) {
1269 dir = DMA_BIDIRECTIONAL;
1270
1271 /*
1272 * See comments in function description on why it is
1273 * safe here to call set_page_dirty()
1274 */
1275 if (dirty)
1276 set_page_dirty(page);
1277 }
1278
1279 /* Unmap and clear pfns/dma address */
1280 dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir);
1281 range->pfns[i] = range->values[HMM_PFN_NONE];
1282 /* FIXME see comments in hmm_vma_dma_map() */
1283 daddrs[i] = 0;
1284 cpages++;
1285 }
1286
1287 return cpages;
1288}
1289EXPORT_SYMBOL(hmm_range_dma_unmap);
Jérôme Glissec0b12402017-09-08 16:11:27 -07001290#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001291
1292
Jérôme Glissedf6ad692017-09-08 16:12:24 -07001293#if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC)
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001294struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma,
1295 unsigned long addr)
1296{
1297 struct page *page;
1298
1299 page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
1300 if (!page)
1301 return NULL;
1302 lock_page(page);
1303 return page;
1304}
1305EXPORT_SYMBOL(hmm_vma_alloc_locked_page);
1306
1307
1308static void hmm_devmem_ref_release(struct percpu_ref *ref)
1309{
1310 struct hmm_devmem *devmem;
1311
1312 devmem = container_of(ref, struct hmm_devmem, ref);
1313 complete(&devmem->completion);
1314}
1315
1316static void hmm_devmem_ref_exit(void *data)
1317{
1318 struct percpu_ref *ref = data;
1319 struct hmm_devmem *devmem;
1320
1321 devmem = container_of(ref, struct hmm_devmem, ref);
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001322 wait_for_completion(&devmem->completion);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001323 percpu_ref_exit(ref);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001324}
1325
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001326static void hmm_devmem_ref_kill(struct percpu_ref *ref)
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001327{
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001328 percpu_ref_kill(ref);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001329}
1330
Souptick Joarderb57e622e62019-03-11 23:28:10 -07001331static vm_fault_t hmm_devmem_fault(struct vm_area_struct *vma,
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001332 unsigned long addr,
1333 const struct page *page,
1334 unsigned int flags,
1335 pmd_t *pmdp)
1336{
1337 struct hmm_devmem *devmem = page->pgmap->data;
1338
1339 return devmem->ops->fault(devmem, vma, addr, page, flags, pmdp);
1340}
1341
1342static void hmm_devmem_free(struct page *page, void *data)
1343{
1344 struct hmm_devmem *devmem = data;
1345
Dan Williams2fa147b2018-07-13 21:50:01 -07001346 page->mapping = NULL;
1347
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001348 devmem->ops->free(devmem, page);
1349}
1350
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001351/*
1352 * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory
1353 *
1354 * @ops: memory event device driver callback (see struct hmm_devmem_ops)
1355 * @device: device struct to bind the resource too
1356 * @size: size in bytes of the device memory to add
Ralph Campbell085ea252019-05-06 16:29:39 -07001357 * Return: pointer to new hmm_devmem struct ERR_PTR otherwise
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001358 *
1359 * This function first finds an empty range of physical address big enough to
1360 * contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which
1361 * in turn allocates struct pages. It does not do anything beyond that; all
1362 * events affecting the memory will go through the various callbacks provided
1363 * by hmm_devmem_ops struct.
1364 *
1365 * Device driver should call this function during device initialization and
1366 * is then responsible of memory management. HMM only provides helpers.
1367 */
1368struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
1369 struct device *device,
1370 unsigned long size)
1371{
1372 struct hmm_devmem *devmem;
1373 resource_size_t addr;
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001374 void *result;
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001375 int ret;
1376
Dan Williamse76384882018-05-16 11:46:08 -07001377 dev_pagemap_get_ops();
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001378
Dan Williams58ef15b2018-12-28 00:35:07 -08001379 devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001380 if (!devmem)
1381 return ERR_PTR(-ENOMEM);
1382
1383 init_completion(&devmem->completion);
1384 devmem->pfn_first = -1UL;
1385 devmem->pfn_last = -1UL;
1386 devmem->resource = NULL;
1387 devmem->device = device;
1388 devmem->ops = ops;
1389
1390 ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
1391 0, GFP_KERNEL);
1392 if (ret)
Dan Williams58ef15b2018-12-28 00:35:07 -08001393 return ERR_PTR(ret);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001394
Dan Williams58ef15b2018-12-28 00:35:07 -08001395 ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, &devmem->ref);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001396 if (ret)
Dan Williams58ef15b2018-12-28 00:35:07 -08001397 return ERR_PTR(ret);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001398
1399 size = ALIGN(size, PA_SECTION_SIZE);
1400 addr = min((unsigned long)iomem_resource.end,
1401 (1UL << MAX_PHYSMEM_BITS) - 1);
1402 addr = addr - size + 1UL;
1403
1404 /*
1405 * FIXME add a new helper to quickly walk resource tree and find free
1406 * range
1407 *
1408 * FIXME what about ioport_resource resource ?
1409 */
1410 for (; addr > size && addr >= iomem_resource.start; addr -= size) {
1411 ret = region_intersects(addr, size, 0, IORES_DESC_NONE);
1412 if (ret != REGION_DISJOINT)
1413 continue;
1414
1415 devmem->resource = devm_request_mem_region(device, addr, size,
1416 dev_name(device));
Dan Williams58ef15b2018-12-28 00:35:07 -08001417 if (!devmem->resource)
1418 return ERR_PTR(-ENOMEM);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001419 break;
1420 }
Dan Williams58ef15b2018-12-28 00:35:07 -08001421 if (!devmem->resource)
1422 return ERR_PTR(-ERANGE);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001423
1424 devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY;
1425 devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
1426 devmem->pfn_last = devmem->pfn_first +
1427 (resource_size(devmem->resource) >> PAGE_SHIFT);
Dan Williams063a7d12018-12-28 00:39:46 -08001428 devmem->page_fault = hmm_devmem_fault;
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001429
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001430 devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
1431 devmem->pagemap.res = *devmem->resource;
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001432 devmem->pagemap.page_free = hmm_devmem_free;
1433 devmem->pagemap.altmap_valid = false;
1434 devmem->pagemap.ref = &devmem->ref;
1435 devmem->pagemap.data = devmem;
1436 devmem->pagemap.kill = hmm_devmem_ref_kill;
Dan Williams58ef15b2018-12-28 00:35:07 -08001437
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001438 result = devm_memremap_pages(devmem->device, &devmem->pagemap);
1439 if (IS_ERR(result))
1440 return result;
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001441 return devmem;
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001442}
Dan Williams02917e92018-12-28 00:35:15 -08001443EXPORT_SYMBOL_GPL(hmm_devmem_add);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001444
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001445struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
1446 struct device *device,
1447 struct resource *res)
1448{
1449 struct hmm_devmem *devmem;
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001450 void *result;
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001451 int ret;
1452
1453 if (res->desc != IORES_DESC_DEVICE_PUBLIC_MEMORY)
1454 return ERR_PTR(-EINVAL);
1455
Dan Williamse76384882018-05-16 11:46:08 -07001456 dev_pagemap_get_ops();
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001457
Dan Williams58ef15b2018-12-28 00:35:07 -08001458 devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001459 if (!devmem)
1460 return ERR_PTR(-ENOMEM);
1461
1462 init_completion(&devmem->completion);
1463 devmem->pfn_first = -1UL;
1464 devmem->pfn_last = -1UL;
1465 devmem->resource = res;
1466 devmem->device = device;
1467 devmem->ops = ops;
1468
1469 ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
1470 0, GFP_KERNEL);
1471 if (ret)
Dan Williams58ef15b2018-12-28 00:35:07 -08001472 return ERR_PTR(ret);
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001473
Dan Williams58ef15b2018-12-28 00:35:07 -08001474 ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit,
1475 &devmem->ref);
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001476 if (ret)
Dan Williams58ef15b2018-12-28 00:35:07 -08001477 return ERR_PTR(ret);
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001478
1479 devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
1480 devmem->pfn_last = devmem->pfn_first +
1481 (resource_size(devmem->resource) >> PAGE_SHIFT);
Dan Williams063a7d12018-12-28 00:39:46 -08001482 devmem->page_fault = hmm_devmem_fault;
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001483
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001484 devmem->pagemap.type = MEMORY_DEVICE_PUBLIC;
1485 devmem->pagemap.res = *devmem->resource;
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001486 devmem->pagemap.page_free = hmm_devmem_free;
1487 devmem->pagemap.altmap_valid = false;
1488 devmem->pagemap.ref = &devmem->ref;
1489 devmem->pagemap.data = devmem;
1490 devmem->pagemap.kill = hmm_devmem_ref_kill;
Dan Williams58ef15b2018-12-28 00:35:07 -08001491
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001492 result = devm_memremap_pages(devmem->device, &devmem->pagemap);
1493 if (IS_ERR(result))
1494 return result;
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001495 return devmem;
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001496}
Dan Williams02917e92018-12-28 00:35:15 -08001497EXPORT_SYMBOL_GPL(hmm_devmem_add_resource);
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001498
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001499/*
Jérôme Glisse858b54d2017-09-08 16:12:02 -07001500 * A device driver that wants to handle multiple devices memory through a
1501 * single fake device can use hmm_device to do so. This is purely a helper
1502 * and it is not needed to make use of any HMM functionality.
1503 */
1504#define HMM_DEVICE_MAX 256
1505
1506static DECLARE_BITMAP(hmm_device_mask, HMM_DEVICE_MAX);
1507static DEFINE_SPINLOCK(hmm_device_lock);
1508static struct class *hmm_device_class;
1509static dev_t hmm_device_devt;
1510
1511static void hmm_device_release(struct device *device)
1512{
1513 struct hmm_device *hmm_device;
1514
1515 hmm_device = container_of(device, struct hmm_device, device);
1516 spin_lock(&hmm_device_lock);
1517 clear_bit(hmm_device->minor, hmm_device_mask);
1518 spin_unlock(&hmm_device_lock);
1519
1520 kfree(hmm_device);
1521}
1522
1523struct hmm_device *hmm_device_new(void *drvdata)
1524{
1525 struct hmm_device *hmm_device;
1526
1527 hmm_device = kzalloc(sizeof(*hmm_device), GFP_KERNEL);
1528 if (!hmm_device)
1529 return ERR_PTR(-ENOMEM);
1530
1531 spin_lock(&hmm_device_lock);
1532 hmm_device->minor = find_first_zero_bit(hmm_device_mask, HMM_DEVICE_MAX);
1533 if (hmm_device->minor >= HMM_DEVICE_MAX) {
1534 spin_unlock(&hmm_device_lock);
1535 kfree(hmm_device);
1536 return ERR_PTR(-EBUSY);
1537 }
1538 set_bit(hmm_device->minor, hmm_device_mask);
1539 spin_unlock(&hmm_device_lock);
1540
1541 dev_set_name(&hmm_device->device, "hmm_device%d", hmm_device->minor);
1542 hmm_device->device.devt = MKDEV(MAJOR(hmm_device_devt),
1543 hmm_device->minor);
1544 hmm_device->device.release = hmm_device_release;
1545 dev_set_drvdata(&hmm_device->device, drvdata);
1546 hmm_device->device.class = hmm_device_class;
1547 device_initialize(&hmm_device->device);
1548
1549 return hmm_device;
1550}
1551EXPORT_SYMBOL(hmm_device_new);
1552
1553void hmm_device_put(struct hmm_device *hmm_device)
1554{
1555 put_device(&hmm_device->device);
1556}
1557EXPORT_SYMBOL(hmm_device_put);
1558
1559static int __init hmm_init(void)
1560{
1561 int ret;
1562
1563 ret = alloc_chrdev_region(&hmm_device_devt, 0,
1564 HMM_DEVICE_MAX,
1565 "hmm_device");
1566 if (ret)
1567 return ret;
1568
1569 hmm_device_class = class_create(THIS_MODULE, "hmm_device");
1570 if (IS_ERR(hmm_device_class)) {
1571 unregister_chrdev_region(hmm_device_devt, HMM_DEVICE_MAX);
1572 return PTR_ERR(hmm_device_class);
1573 }
1574 return 0;
1575}
1576
1577device_initcall(hmm_init);
Jérôme Glissedf6ad692017-09-08 16:12:24 -07001578#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */