blob: b1c9b05bf26f4192acbf2d2e3f8fbcdf9904ea06 [file] [log] [blame]
Jérôme Glisse133ff0e2017-09-08 16:11:23 -07001/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
Jérôme Glissef813f212018-10-30 15:04:06 -070014 * Authors: Jérôme Glisse <jglisse@redhat.com>
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070015 */
16/*
17 * Refer to include/linux/hmm.h for information about heterogeneous memory
18 * management or HMM for short.
19 */
20#include <linux/mm.h>
21#include <linux/hmm.h>
Jérôme Glisse858b54d2017-09-08 16:12:02 -070022#include <linux/init.h>
Jérôme Glisseda4c3c72017-09-08 16:11:31 -070023#include <linux/rmap.h>
24#include <linux/swap.h>
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070025#include <linux/slab.h>
26#include <linux/sched.h>
Jérôme Glisse4ef589d2017-09-08 16:11:58 -070027#include <linux/mmzone.h>
28#include <linux/pagemap.h>
Jérôme Glisseda4c3c72017-09-08 16:11:31 -070029#include <linux/swapops.h>
30#include <linux/hugetlb.h>
Jérôme Glisse4ef589d2017-09-08 16:11:58 -070031#include <linux/memremap.h>
Jérôme Glisse7b2d55d22017-09-08 16:11:46 -070032#include <linux/jump_label.h>
Jérôme Glissec0b12402017-09-08 16:11:27 -070033#include <linux/mmu_notifier.h>
Jérôme Glisse4ef589d2017-09-08 16:11:58 -070034#include <linux/memory_hotplug.h>
35
36#define PA_SECTION_SIZE (1UL << PA_SECTION_SHIFT)
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070037
Jérôme Glisse6b368cd2017-09-08 16:12:32 -070038#if IS_ENABLED(CONFIG_HMM_MIRROR)
Jérôme Glissec0b12402017-09-08 16:11:27 -070039static const struct mmu_notifier_ops hmm_mmu_notifier_ops;
40
Jérôme Glisse704f3f22019-05-13 17:19:48 -070041static inline struct hmm *mm_get_hmm(struct mm_struct *mm)
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070042{
Jérôme Glissec0b12402017-09-08 16:11:27 -070043 struct hmm *hmm = READ_ONCE(mm->hmm);
Jérôme Glisse704f3f22019-05-13 17:19:48 -070044
45 if (hmm && kref_get_unless_zero(&hmm->kref))
46 return hmm;
47
48 return NULL;
49}
50
51/**
52 * hmm_get_or_create - register HMM against an mm (HMM internal)
53 *
54 * @mm: mm struct to attach to
55 * Returns: returns an HMM object, either by referencing the existing
56 * (per-process) object, or by creating a new one.
57 *
58 * This is not intended to be used directly by device drivers. If mm already
59 * has an HMM struct then it get a reference on it and returns it. Otherwise
60 * it allocates an HMM struct, initializes it, associate it with the mm and
61 * returns it.
62 */
63static struct hmm *hmm_get_or_create(struct mm_struct *mm)
64{
65 struct hmm *hmm = mm_get_hmm(mm);
Jérôme Glissec0b12402017-09-08 16:11:27 -070066 bool cleanup = false;
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070067
Jérôme Glissec0b12402017-09-08 16:11:27 -070068 if (hmm)
69 return hmm;
70
71 hmm = kmalloc(sizeof(*hmm), GFP_KERNEL);
72 if (!hmm)
73 return NULL;
Jérôme Glissea3e0d412019-05-13 17:20:01 -070074 init_waitqueue_head(&hmm->wq);
Jérôme Glissec0b12402017-09-08 16:11:27 -070075 INIT_LIST_HEAD(&hmm->mirrors);
76 init_rwsem(&hmm->mirrors_sem);
Jérôme Glissec0b12402017-09-08 16:11:27 -070077 hmm->mmu_notifier.ops = NULL;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -070078 INIT_LIST_HEAD(&hmm->ranges);
Jérôme Glissea3e0d412019-05-13 17:20:01 -070079 mutex_init(&hmm->lock);
Jérôme Glisse704f3f22019-05-13 17:19:48 -070080 kref_init(&hmm->kref);
Jérôme Glissea3e0d412019-05-13 17:20:01 -070081 hmm->notifiers = 0;
82 hmm->dead = false;
Jérôme Glissec0b12402017-09-08 16:11:27 -070083 hmm->mm = mm;
84
Jérôme Glissec0b12402017-09-08 16:11:27 -070085 spin_lock(&mm->page_table_lock);
86 if (!mm->hmm)
87 mm->hmm = hmm;
88 else
89 cleanup = true;
90 spin_unlock(&mm->page_table_lock);
91
Ralph Campbell86a2d592018-10-30 15:04:14 -070092 if (cleanup)
93 goto error;
94
95 /*
96 * We should only get here if hold the mmap_sem in write mode ie on
97 * registration of first mirror through hmm_mirror_register()
98 */
99 hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops;
100 if (__mmu_notifier_register(&hmm->mmu_notifier, mm))
101 goto error_mm;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700102
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700103 return hmm;
Ralph Campbell86a2d592018-10-30 15:04:14 -0700104
105error_mm:
106 spin_lock(&mm->page_table_lock);
107 if (mm->hmm == hmm)
108 mm->hmm = NULL;
109 spin_unlock(&mm->page_table_lock);
110error:
111 kfree(hmm);
112 return NULL;
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700113}
114
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700115static void hmm_free(struct kref *kref)
116{
117 struct hmm *hmm = container_of(kref, struct hmm, kref);
118 struct mm_struct *mm = hmm->mm;
119
120 mmu_notifier_unregister_no_release(&hmm->mmu_notifier, mm);
121
122 spin_lock(&mm->page_table_lock);
123 if (mm->hmm == hmm)
124 mm->hmm = NULL;
125 spin_unlock(&mm->page_table_lock);
126
127 kfree(hmm);
128}
129
130static inline void hmm_put(struct hmm *hmm)
131{
132 kref_put(&hmm->kref, hmm_free);
133}
134
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700135void hmm_mm_destroy(struct mm_struct *mm)
136{
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700137 struct hmm *hmm;
138
139 spin_lock(&mm->page_table_lock);
140 hmm = mm_get_hmm(mm);
141 mm->hmm = NULL;
142 if (hmm) {
143 hmm->mm = NULL;
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700144 hmm->dead = true;
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700145 spin_unlock(&mm->page_table_lock);
146 hmm_put(hmm);
147 return;
148 }
149
150 spin_unlock(&mm->page_table_lock);
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700151}
Jérôme Glissec0b12402017-09-08 16:11:27 -0700152
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700153static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
Jérôme Glissec0b12402017-09-08 16:11:27 -0700154{
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700155 struct hmm *hmm = mm_get_hmm(mm);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700156 struct hmm_mirror *mirror;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700157 struct hmm_range *range;
158
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700159 /* Report this HMM as dying. */
160 hmm->dead = true;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700161
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700162 /* Wake-up everyone waiting on any range. */
163 mutex_lock(&hmm->lock);
164 list_for_each_entry(range, &hmm->ranges, list) {
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700165 range->valid = false;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700166 }
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700167 wake_up_all(&hmm->wq);
168 mutex_unlock(&hmm->lock);
Ralph Campbelle1401512018-04-10 16:28:19 -0700169
170 down_write(&hmm->mirrors_sem);
171 mirror = list_first_entry_or_null(&hmm->mirrors, struct hmm_mirror,
172 list);
173 while (mirror) {
174 list_del_init(&mirror->list);
175 if (mirror->ops->release) {
176 /*
177 * Drop mirrors_sem so callback can wait on any pending
178 * work that might itself trigger mmu_notifier callback
179 * and thus would deadlock with us.
180 */
181 up_write(&hmm->mirrors_sem);
182 mirror->ops->release(mirror);
183 down_write(&hmm->mirrors_sem);
184 }
185 mirror = list_first_entry_or_null(&hmm->mirrors,
186 struct hmm_mirror, list);
187 }
188 up_write(&hmm->mirrors_sem);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700189
190 hmm_put(hmm);
Ralph Campbelle1401512018-04-10 16:28:19 -0700191}
192
Michal Hocko93065ac2018-08-21 21:52:33 -0700193static int hmm_invalidate_range_start(struct mmu_notifier *mn,
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700194 const struct mmu_notifier_range *nrange)
Jérôme Glissec0b12402017-09-08 16:11:27 -0700195{
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700196 struct hmm *hmm = mm_get_hmm(nrange->mm);
197 struct hmm_mirror *mirror;
Jérôme Glisseec131b22018-10-30 15:04:28 -0700198 struct hmm_update update;
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700199 struct hmm_range *range;
200 int ret = 0;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700201
202 VM_BUG_ON(!hmm);
203
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700204 update.start = nrange->start;
205 update.end = nrange->end;
Jérôme Glisseec131b22018-10-30 15:04:28 -0700206 update.event = HMM_UPDATE_INVALIDATE;
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700207 update.blockable = nrange->blockable;
208
209 if (nrange->blockable)
210 mutex_lock(&hmm->lock);
211 else if (!mutex_trylock(&hmm->lock)) {
212 ret = -EAGAIN;
213 goto out;
214 }
215 hmm->notifiers++;
216 list_for_each_entry(range, &hmm->ranges, list) {
217 if (update.end < range->start || update.start >= range->end)
218 continue;
219
220 range->valid = false;
221 }
222 mutex_unlock(&hmm->lock);
223
224 if (nrange->blockable)
225 down_read(&hmm->mirrors_sem);
226 else if (!down_read_trylock(&hmm->mirrors_sem)) {
227 ret = -EAGAIN;
228 goto out;
229 }
230 list_for_each_entry(mirror, &hmm->mirrors, list) {
231 int ret;
232
233 ret = mirror->ops->sync_cpu_device_pagetables(mirror, &update);
234 if (!update.blockable && ret == -EAGAIN) {
235 up_read(&hmm->mirrors_sem);
236 ret = -EAGAIN;
237 goto out;
238 }
239 }
240 up_read(&hmm->mirrors_sem);
241
242out:
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700243 hmm_put(hmm);
244 return ret;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700245}
246
247static void hmm_invalidate_range_end(struct mmu_notifier *mn,
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700248 const struct mmu_notifier_range *nrange)
Jérôme Glissec0b12402017-09-08 16:11:27 -0700249{
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700250 struct hmm *hmm = mm_get_hmm(nrange->mm);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700251
252 VM_BUG_ON(!hmm);
253
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700254 mutex_lock(&hmm->lock);
255 hmm->notifiers--;
256 if (!hmm->notifiers) {
257 struct hmm_range *range;
258
259 list_for_each_entry(range, &hmm->ranges, list) {
260 if (range->valid)
261 continue;
262 range->valid = true;
263 }
264 wake_up_all(&hmm->wq);
265 }
266 mutex_unlock(&hmm->lock);
267
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700268 hmm_put(hmm);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700269}
270
271static const struct mmu_notifier_ops hmm_mmu_notifier_ops = {
Ralph Campbelle1401512018-04-10 16:28:19 -0700272 .release = hmm_release,
Jérôme Glissec0b12402017-09-08 16:11:27 -0700273 .invalidate_range_start = hmm_invalidate_range_start,
274 .invalidate_range_end = hmm_invalidate_range_end,
275};
276
277/*
278 * hmm_mirror_register() - register a mirror against an mm
279 *
280 * @mirror: new mirror struct to register
281 * @mm: mm to register against
282 *
283 * To start mirroring a process address space, the device driver must register
284 * an HMM mirror struct.
285 *
286 * THE mm->mmap_sem MUST BE HELD IN WRITE MODE !
287 */
288int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm)
289{
290 /* Sanity check */
291 if (!mm || !mirror || !mirror->ops)
292 return -EINVAL;
293
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700294 mirror->hmm = hmm_get_or_create(mm);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700295 if (!mirror->hmm)
296 return -ENOMEM;
297
298 down_write(&mirror->hmm->mirrors_sem);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700299 list_add(&mirror->list, &mirror->hmm->mirrors);
300 up_write(&mirror->hmm->mirrors_sem);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700301
302 return 0;
303}
304EXPORT_SYMBOL(hmm_mirror_register);
305
306/*
307 * hmm_mirror_unregister() - unregister a mirror
308 *
309 * @mirror: new mirror struct to register
310 *
311 * Stop mirroring a process address space, and cleanup.
312 */
313void hmm_mirror_unregister(struct hmm_mirror *mirror)
314{
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700315 struct hmm *hmm = READ_ONCE(mirror->hmm);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700316
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700317 if (hmm == NULL)
Jérôme Glissec01cbba2018-04-10 16:28:23 -0700318 return;
319
Jérôme Glissec0b12402017-09-08 16:11:27 -0700320 down_write(&hmm->mirrors_sem);
Ralph Campbelle1401512018-04-10 16:28:19 -0700321 list_del_init(&mirror->list);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700322 /* To protect us against double unregister ... */
Jérôme Glissec01cbba2018-04-10 16:28:23 -0700323 mirror->hmm = NULL;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700324 up_write(&hmm->mirrors_sem);
Jérôme Glissec01cbba2018-04-10 16:28:23 -0700325
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700326 hmm_put(hmm);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700327}
328EXPORT_SYMBOL(hmm_mirror_unregister);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700329
Jérôme Glisse74eee182017-09-08 16:11:35 -0700330struct hmm_vma_walk {
331 struct hmm_range *range;
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700332 struct dev_pagemap *pgmap;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700333 unsigned long last;
334 bool fault;
335 bool block;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700336};
337
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700338static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr,
339 bool write_fault, uint64_t *pfn)
Jérôme Glisse74eee182017-09-08 16:11:35 -0700340{
341 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_REMOTE;
342 struct hmm_vma_walk *hmm_vma_walk = walk->private;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700343 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700344 struct vm_area_struct *vma = walk->vma;
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700345 vm_fault_t ret;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700346
347 flags |= hmm_vma_walk->block ? 0 : FAULT_FLAG_ALLOW_RETRY;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700348 flags |= write_fault ? FAULT_FLAG_WRITE : 0;
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700349 ret = handle_mm_fault(vma, addr, flags);
350 if (ret & VM_FAULT_RETRY)
Jérôme Glisse73231612019-05-13 17:19:58 -0700351 return -EAGAIN;
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700352 if (ret & VM_FAULT_ERROR) {
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700353 *pfn = range->values[HMM_PFN_ERROR];
Jérôme Glisse74eee182017-09-08 16:11:35 -0700354 return -EFAULT;
355 }
356
Jérôme Glisse73231612019-05-13 17:19:58 -0700357 return -EBUSY;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700358}
359
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700360static int hmm_pfns_bad(unsigned long addr,
361 unsigned long end,
362 struct mm_walk *walk)
363{
Jérôme Glissec7195472018-04-10 16:28:27 -0700364 struct hmm_vma_walk *hmm_vma_walk = walk->private;
365 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisseff05c0c2018-04-10 16:28:38 -0700366 uint64_t *pfns = range->pfns;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700367 unsigned long i;
368
369 i = (addr - range->start) >> PAGE_SHIFT;
370 for (; addr < end; addr += PAGE_SIZE, i++)
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700371 pfns[i] = range->values[HMM_PFN_ERROR];
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700372
373 return 0;
374}
375
Jérôme Glisse5504ed22018-04-10 16:28:46 -0700376/*
377 * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s)
378 * @start: range virtual start address (inclusive)
379 * @end: range virtual end address (exclusive)
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700380 * @fault: should we fault or not ?
381 * @write_fault: write fault ?
Jérôme Glisse5504ed22018-04-10 16:28:46 -0700382 * @walk: mm_walk structure
Jérôme Glisse73231612019-05-13 17:19:58 -0700383 * Returns: 0 on success, -EBUSY after page fault, or page fault error
Jérôme Glisse5504ed22018-04-10 16:28:46 -0700384 *
385 * This function will be called whenever pmd_none() or pte_none() returns true,
386 * or whenever there is no page directory covering the virtual address range.
387 */
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700388static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end,
389 bool fault, bool write_fault,
390 struct mm_walk *walk)
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700391{
Jérôme Glisse74eee182017-09-08 16:11:35 -0700392 struct hmm_vma_walk *hmm_vma_walk = walk->private;
393 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisseff05c0c2018-04-10 16:28:38 -0700394 uint64_t *pfns = range->pfns;
Jérôme Glisse63d50662019-05-13 17:20:18 -0700395 unsigned long i, page_size;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700396
Jérôme Glisse74eee182017-09-08 16:11:35 -0700397 hmm_vma_walk->last = addr;
Jérôme Glisse63d50662019-05-13 17:20:18 -0700398 page_size = hmm_range_page_size(range);
399 i = (addr - range->start) >> range->page_shift;
400
401 for (; addr < end; addr += page_size, i++) {
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700402 pfns[i] = range->values[HMM_PFN_NONE];
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700403 if (fault || write_fault) {
Jérôme Glisse74eee182017-09-08 16:11:35 -0700404 int ret;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700405
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700406 ret = hmm_vma_do_fault(walk, addr, write_fault,
407 &pfns[i]);
Jérôme Glisse73231612019-05-13 17:19:58 -0700408 if (ret != -EBUSY)
Jérôme Glisse74eee182017-09-08 16:11:35 -0700409 return ret;
410 }
411 }
412
Jérôme Glisse73231612019-05-13 17:19:58 -0700413 return (fault || write_fault) ? -EBUSY : 0;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700414}
415
416static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
417 uint64_t pfns, uint64_t cpu_flags,
418 bool *fault, bool *write_fault)
419{
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700420 struct hmm_range *range = hmm_vma_walk->range;
421
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700422 if (!hmm_vma_walk->fault)
423 return;
424
Jérôme Glisse023a0192019-05-13 17:20:05 -0700425 /*
426 * So we not only consider the individual per page request we also
427 * consider the default flags requested for the range. The API can
428 * be use in 2 fashions. The first one where the HMM user coalesce
429 * multiple page fault into one request and set flags per pfns for
430 * of those faults. The second one where the HMM user want to pre-
431 * fault a range with specific flags. For the latter one it is a
432 * waste to have the user pre-fill the pfn arrays with a default
433 * flags value.
434 */
435 pfns = (pfns & range->pfn_flags_mask) | range->default_flags;
436
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700437 /* We aren't ask to do anything ... */
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700438 if (!(pfns & range->flags[HMM_PFN_VALID]))
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700439 return;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700440 /* If this is device memory than only fault if explicitly requested */
441 if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
442 /* Do we fault on device memory ? */
443 if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) {
444 *write_fault = pfns & range->flags[HMM_PFN_WRITE];
445 *fault = true;
446 }
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700447 return;
448 }
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700449
450 /* If CPU page table is not valid then we need to fault */
451 *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]);
452 /* Need to write fault ? */
453 if ((pfns & range->flags[HMM_PFN_WRITE]) &&
454 !(cpu_flags & range->flags[HMM_PFN_WRITE])) {
455 *write_fault = true;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700456 *fault = true;
457 }
458}
459
460static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
461 const uint64_t *pfns, unsigned long npages,
462 uint64_t cpu_flags, bool *fault,
463 bool *write_fault)
464{
465 unsigned long i;
466
467 if (!hmm_vma_walk->fault) {
468 *fault = *write_fault = false;
469 return;
470 }
471
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700472 *fault = *write_fault = false;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700473 for (i = 0; i < npages; ++i) {
474 hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags,
475 fault, write_fault);
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700476 if ((*write_fault))
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700477 return;
478 }
479}
480
481static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
482 struct mm_walk *walk)
483{
484 struct hmm_vma_walk *hmm_vma_walk = walk->private;
485 struct hmm_range *range = hmm_vma_walk->range;
486 bool fault, write_fault;
487 unsigned long i, npages;
488 uint64_t *pfns;
489
490 i = (addr - range->start) >> PAGE_SHIFT;
491 npages = (end - addr) >> PAGE_SHIFT;
492 pfns = &range->pfns[i];
493 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
494 0, &fault, &write_fault);
495 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
496}
497
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700498static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd)
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700499{
500 if (pmd_protnone(pmd))
501 return 0;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700502 return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] |
503 range->flags[HMM_PFN_WRITE] :
504 range->flags[HMM_PFN_VALID];
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700505}
506
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700507static inline uint64_t pud_to_hmm_pfn_flags(struct hmm_range *range, pud_t pud)
508{
509 if (!pud_present(pud))
510 return 0;
511 return pud_write(pud) ? range->flags[HMM_PFN_VALID] |
512 range->flags[HMM_PFN_WRITE] :
513 range->flags[HMM_PFN_VALID];
514}
515
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700516static int hmm_vma_handle_pmd(struct mm_walk *walk,
517 unsigned long addr,
518 unsigned long end,
519 uint64_t *pfns,
520 pmd_t pmd)
521{
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700522#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700523 struct hmm_vma_walk *hmm_vma_walk = walk->private;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700524 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700525 unsigned long pfn, npages, i;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700526 bool fault, write_fault;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700527 uint64_t cpu_flags;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700528
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700529 npages = (end - addr) >> PAGE_SHIFT;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700530 cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700531 hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags,
532 &fault, &write_fault);
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700533
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700534 if (pmd_protnone(pmd) || fault || write_fault)
535 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700536
537 pfn = pmd_pfn(pmd) + pte_index(addr);
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700538 for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
539 if (pmd_devmap(pmd)) {
540 hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
541 hmm_vma_walk->pgmap);
542 if (unlikely(!hmm_vma_walk->pgmap))
543 return -EBUSY;
544 }
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700545 pfns[i] = hmm_pfn_from_pfn(range, pfn) | cpu_flags;
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700546 }
547 if (hmm_vma_walk->pgmap) {
548 put_dev_pagemap(hmm_vma_walk->pgmap);
549 hmm_vma_walk->pgmap = NULL;
550 }
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700551 hmm_vma_walk->last = end;
552 return 0;
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700553#else
554 /* If THP is not enabled then we should never reach that code ! */
555 return -EINVAL;
556#endif
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700557}
558
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700559static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700560{
561 if (pte_none(pte) || !pte_present(pte))
562 return 0;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700563 return pte_write(pte) ? range->flags[HMM_PFN_VALID] |
564 range->flags[HMM_PFN_WRITE] :
565 range->flags[HMM_PFN_VALID];
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700566}
567
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700568static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
569 unsigned long end, pmd_t *pmdp, pte_t *ptep,
570 uint64_t *pfn)
571{
572 struct hmm_vma_walk *hmm_vma_walk = walk->private;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700573 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700574 struct vm_area_struct *vma = walk->vma;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700575 bool fault, write_fault;
576 uint64_t cpu_flags;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700577 pte_t pte = *ptep;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700578 uint64_t orig_pfn = *pfn;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700579
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700580 *pfn = range->values[HMM_PFN_NONE];
Jérôme Glisse73231612019-05-13 17:19:58 -0700581 fault = write_fault = false;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700582
583 if (pte_none(pte)) {
Jérôme Glisse73231612019-05-13 17:19:58 -0700584 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0,
585 &fault, &write_fault);
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700586 if (fault || write_fault)
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700587 goto fault;
588 return 0;
589 }
590
591 if (!pte_present(pte)) {
592 swp_entry_t entry = pte_to_swp_entry(pte);
593
594 if (!non_swap_entry(entry)) {
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700595 if (fault || write_fault)
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700596 goto fault;
597 return 0;
598 }
599
600 /*
601 * This is a special swap entry, ignore migration, use
602 * device and report anything else as error.
603 */
604 if (is_device_private_entry(entry)) {
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700605 cpu_flags = range->flags[HMM_PFN_VALID] |
606 range->flags[HMM_PFN_DEVICE_PRIVATE];
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700607 cpu_flags |= is_write_device_private_entry(entry) ?
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700608 range->flags[HMM_PFN_WRITE] : 0;
609 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
610 &fault, &write_fault);
611 if (fault || write_fault)
612 goto fault;
613 *pfn = hmm_pfn_from_pfn(range, swp_offset(entry));
614 *pfn |= cpu_flags;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700615 return 0;
616 }
617
618 if (is_migration_entry(entry)) {
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700619 if (fault || write_fault) {
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700620 pte_unmap(ptep);
621 hmm_vma_walk->last = addr;
622 migration_entry_wait(vma->vm_mm,
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700623 pmdp, addr);
Jérôme Glisse73231612019-05-13 17:19:58 -0700624 return -EBUSY;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700625 }
626 return 0;
627 }
628
629 /* Report error for everything else */
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700630 *pfn = range->values[HMM_PFN_ERROR];
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700631 return -EFAULT;
Jérôme Glisse73231612019-05-13 17:19:58 -0700632 } else {
633 cpu_flags = pte_to_hmm_pfn_flags(range, pte);
634 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
635 &fault, &write_fault);
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700636 }
637
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700638 if (fault || write_fault)
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700639 goto fault;
640
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700641 if (pte_devmap(pte)) {
642 hmm_vma_walk->pgmap = get_dev_pagemap(pte_pfn(pte),
643 hmm_vma_walk->pgmap);
644 if (unlikely(!hmm_vma_walk->pgmap))
645 return -EBUSY;
646 } else if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pte_special(pte)) {
647 *pfn = range->values[HMM_PFN_SPECIAL];
648 return -EFAULT;
649 }
650
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700651 *pfn = hmm_pfn_from_pfn(range, pte_pfn(pte)) | cpu_flags;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700652 return 0;
653
654fault:
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700655 if (hmm_vma_walk->pgmap) {
656 put_dev_pagemap(hmm_vma_walk->pgmap);
657 hmm_vma_walk->pgmap = NULL;
658 }
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700659 pte_unmap(ptep);
660 /* Fault any virtual address we were asked to fault */
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700661 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700662}
663
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700664static int hmm_vma_walk_pmd(pmd_t *pmdp,
665 unsigned long start,
666 unsigned long end,
667 struct mm_walk *walk)
668{
Jérôme Glisse74eee182017-09-08 16:11:35 -0700669 struct hmm_vma_walk *hmm_vma_walk = walk->private;
670 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glissed08faca2018-10-30 15:04:20 -0700671 struct vm_area_struct *vma = walk->vma;
Jérôme Glisseff05c0c2018-04-10 16:28:38 -0700672 uint64_t *pfns = range->pfns;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700673 unsigned long addr = start, i;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700674 pte_t *ptep;
Jérôme Glissed08faca2018-10-30 15:04:20 -0700675 pmd_t pmd;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700676
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700677
678again:
Jérôme Glissed08faca2018-10-30 15:04:20 -0700679 pmd = READ_ONCE(*pmdp);
680 if (pmd_none(pmd))
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700681 return hmm_vma_walk_hole(start, end, walk);
682
Jérôme Glissed08faca2018-10-30 15:04:20 -0700683 if (pmd_huge(pmd) && (range->vma->vm_flags & VM_HUGETLB))
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700684 return hmm_pfns_bad(start, end, walk);
685
Jérôme Glissed08faca2018-10-30 15:04:20 -0700686 if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
687 bool fault, write_fault;
688 unsigned long npages;
689 uint64_t *pfns;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700690
Jérôme Glissed08faca2018-10-30 15:04:20 -0700691 i = (addr - range->start) >> PAGE_SHIFT;
692 npages = (end - addr) >> PAGE_SHIFT;
693 pfns = &range->pfns[i];
694
695 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
696 0, &fault, &write_fault);
697 if (fault || write_fault) {
698 hmm_vma_walk->last = addr;
699 pmd_migration_entry_wait(vma->vm_mm, pmdp);
Jérôme Glisse73231612019-05-13 17:19:58 -0700700 return -EBUSY;
Jérôme Glissed08faca2018-10-30 15:04:20 -0700701 }
702 return 0;
703 } else if (!pmd_present(pmd))
704 return hmm_pfns_bad(start, end, walk);
705
706 if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700707 /*
708 * No need to take pmd_lock here, even if some other threads
709 * is splitting the huge pmd we will get that event through
710 * mmu_notifier callback.
711 *
712 * So just read pmd value and check again its a transparent
713 * huge or device mapping one and compute corresponding pfn
714 * values.
715 */
716 pmd = pmd_read_atomic(pmdp);
717 barrier();
718 if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
719 goto again;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700720
Jérôme Glissed08faca2018-10-30 15:04:20 -0700721 i = (addr - range->start) >> PAGE_SHIFT;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700722 return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700723 }
724
Jérôme Glissed08faca2018-10-30 15:04:20 -0700725 /*
726 * We have handled all the valid case above ie either none, migration,
727 * huge or transparent huge. At this point either it is a valid pmd
728 * entry pointing to pte directory or it is a bad pmd that will not
729 * recover.
730 */
731 if (pmd_bad(pmd))
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700732 return hmm_pfns_bad(start, end, walk);
733
734 ptep = pte_offset_map(pmdp, addr);
Jérôme Glissed08faca2018-10-30 15:04:20 -0700735 i = (addr - range->start) >> PAGE_SHIFT;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700736 for (; addr < end; addr += PAGE_SIZE, ptep++, i++) {
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700737 int r;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700738
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700739 r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]);
740 if (r) {
741 /* hmm_vma_handle_pte() did unmap pte directory */
742 hmm_vma_walk->last = addr;
743 return r;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700744 }
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700745 }
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700746 if (hmm_vma_walk->pgmap) {
747 /*
748 * We do put_dev_pagemap() here and not in hmm_vma_handle_pte()
749 * so that we can leverage get_dev_pagemap() optimization which
750 * will not re-take a reference on a pgmap if we already have
751 * one.
752 */
753 put_dev_pagemap(hmm_vma_walk->pgmap);
754 hmm_vma_walk->pgmap = NULL;
755 }
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700756 pte_unmap(ptep - 1);
757
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700758 hmm_vma_walk->last = addr;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700759 return 0;
760}
761
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700762static int hmm_vma_walk_pud(pud_t *pudp,
763 unsigned long start,
764 unsigned long end,
765 struct mm_walk *walk)
766{
767 struct hmm_vma_walk *hmm_vma_walk = walk->private;
768 struct hmm_range *range = hmm_vma_walk->range;
769 unsigned long addr = start, next;
770 pmd_t *pmdp;
771 pud_t pud;
772 int ret;
773
774again:
775 pud = READ_ONCE(*pudp);
776 if (pud_none(pud))
777 return hmm_vma_walk_hole(start, end, walk);
778
779 if (pud_huge(pud) && pud_devmap(pud)) {
780 unsigned long i, npages, pfn;
781 uint64_t *pfns, cpu_flags;
782 bool fault, write_fault;
783
784 if (!pud_present(pud))
785 return hmm_vma_walk_hole(start, end, walk);
786
787 i = (addr - range->start) >> PAGE_SHIFT;
788 npages = (end - addr) >> PAGE_SHIFT;
789 pfns = &range->pfns[i];
790
791 cpu_flags = pud_to_hmm_pfn_flags(range, pud);
792 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
793 cpu_flags, &fault, &write_fault);
794 if (fault || write_fault)
795 return hmm_vma_walk_hole_(addr, end, fault,
796 write_fault, walk);
797
798#ifdef CONFIG_HUGETLB_PAGE
799 pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
800 for (i = 0; i < npages; ++i, ++pfn) {
801 hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
802 hmm_vma_walk->pgmap);
803 if (unlikely(!hmm_vma_walk->pgmap))
804 return -EBUSY;
805 pfns[i] = hmm_pfn_from_pfn(range, pfn) | cpu_flags;
806 }
807 if (hmm_vma_walk->pgmap) {
808 put_dev_pagemap(hmm_vma_walk->pgmap);
809 hmm_vma_walk->pgmap = NULL;
810 }
811 hmm_vma_walk->last = end;
812 return 0;
813#else
814 return -EINVAL;
815#endif
816 }
817
818 split_huge_pud(walk->vma, pudp, addr);
819 if (pud_none(*pudp))
820 goto again;
821
822 pmdp = pmd_offset(pudp, addr);
823 do {
824 next = pmd_addr_end(addr, end);
825 ret = hmm_vma_walk_pmd(pmdp, addr, next, walk);
826 if (ret)
827 return ret;
828 } while (pmdp++, addr = next, addr != end);
829
830 return 0;
831}
832
Jérôme Glisse63d50662019-05-13 17:20:18 -0700833static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
834 unsigned long start, unsigned long end,
835 struct mm_walk *walk)
836{
837#ifdef CONFIG_HUGETLB_PAGE
838 unsigned long addr = start, i, pfn, mask, size, pfn_inc;
839 struct hmm_vma_walk *hmm_vma_walk = walk->private;
840 struct hmm_range *range = hmm_vma_walk->range;
841 struct vm_area_struct *vma = walk->vma;
842 struct hstate *h = hstate_vma(vma);
843 uint64_t orig_pfn, cpu_flags;
844 bool fault, write_fault;
845 spinlock_t *ptl;
846 pte_t entry;
847 int ret = 0;
848
849 size = 1UL << huge_page_shift(h);
850 mask = size - 1;
851 if (range->page_shift != PAGE_SHIFT) {
852 /* Make sure we are looking at full page. */
853 if (start & mask)
854 return -EINVAL;
855 if (end < (start + size))
856 return -EINVAL;
857 pfn_inc = size >> PAGE_SHIFT;
858 } else {
859 pfn_inc = 1;
860 size = PAGE_SIZE;
861 }
862
863
864 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
865 entry = huge_ptep_get(pte);
866
867 i = (start - range->start) >> range->page_shift;
868 orig_pfn = range->pfns[i];
869 range->pfns[i] = range->values[HMM_PFN_NONE];
870 cpu_flags = pte_to_hmm_pfn_flags(range, entry);
871 fault = write_fault = false;
872 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
873 &fault, &write_fault);
874 if (fault || write_fault) {
875 ret = -ENOENT;
876 goto unlock;
877 }
878
879 pfn = pte_pfn(entry) + ((start & mask) >> range->page_shift);
880 for (; addr < end; addr += size, i++, pfn += pfn_inc)
881 range->pfns[i] = hmm_pfn_from_pfn(range, pfn) | cpu_flags;
882 hmm_vma_walk->last = end;
883
884unlock:
885 spin_unlock(ptl);
886
887 if (ret == -ENOENT)
888 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
889
890 return ret;
891#else /* CONFIG_HUGETLB_PAGE */
892 return -EINVAL;
893#endif
894}
895
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700896static void hmm_pfns_clear(struct hmm_range *range,
897 uint64_t *pfns,
Jérôme Glisse33cd47d2018-04-10 16:28:54 -0700898 unsigned long addr,
899 unsigned long end)
900{
901 for (; addr < end; addr += PAGE_SIZE, pfns++)
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700902 *pfns = range->values[HMM_PFN_NONE];
Jérôme Glisse33cd47d2018-04-10 16:28:54 -0700903}
904
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700905/*
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700906 * hmm_range_register() - start tracking change to CPU page table over a range
Jérôme Glisse25f23a02019-05-13 17:19:55 -0700907 * @range: range
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700908 * @mm: the mm struct for the range of virtual address
909 * @start: start virtual address (inclusive)
910 * @end: end virtual address (exclusive)
Jérôme Glisse63d50662019-05-13 17:20:18 -0700911 * @page_shift: expect page shift for the range
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700912 * Returns 0 on success, -EFAULT if the address space is no longer valid
Jérôme Glisse25f23a02019-05-13 17:19:55 -0700913 *
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700914 * Track updates to the CPU page table see include/linux/hmm.h
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700915 */
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700916int hmm_range_register(struct hmm_range *range,
917 struct mm_struct *mm,
918 unsigned long start,
Jérôme Glisse63d50662019-05-13 17:20:18 -0700919 unsigned long end,
920 unsigned page_shift)
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700921{
Jérôme Glisse63d50662019-05-13 17:20:18 -0700922 unsigned long mask = ((1UL << page_shift) - 1UL);
923
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700924 range->valid = false;
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700925 range->hmm = NULL;
926
Jérôme Glisse63d50662019-05-13 17:20:18 -0700927 if ((start & mask) || (end & mask))
928 return -EINVAL;
929 if (start >= end)
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700930 return -EINVAL;
931
Jérôme Glisse63d50662019-05-13 17:20:18 -0700932 range->page_shift = page_shift;
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700933 range->start = start;
934 range->end = end;
935
936 range->hmm = hmm_get_or_create(mm);
937 if (!range->hmm)
938 return -EFAULT;
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700939
940 /* Check if hmm_mm_destroy() was call. */
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700941 if (range->hmm->mm == NULL || range->hmm->dead) {
942 hmm_put(range->hmm);
943 return -EFAULT;
Jérôme Glisse86586a42018-04-10 16:28:34 -0700944 }
945
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700946 /* Initialize range to track CPU page table update */
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700947 mutex_lock(&range->hmm->lock);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700948
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700949 list_add_rcu(&range->list, &range->hmm->ranges);
Jérôme Glisse74eee182017-09-08 16:11:35 -0700950
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700951 /*
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700952 * If there are any concurrent notifiers we have to wait for them for
953 * the range to be valid (see hmm_range_wait_until_valid()).
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700954 */
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700955 if (!range->hmm->notifiers)
956 range->valid = true;
957 mutex_unlock(&range->hmm->lock);
958
959 return 0;
960}
961EXPORT_SYMBOL(hmm_range_register);
962
963/*
964 * hmm_range_unregister() - stop tracking change to CPU page table over a range
965 * @range: range
966 *
967 * Range struct is used to track updates to the CPU page table after a call to
968 * hmm_range_register(). See include/linux/hmm.h for how to use it.
969 */
970void hmm_range_unregister(struct hmm_range *range)
971{
972 /* Sanity check this really should not happen. */
973 if (range->hmm == NULL || range->end <= range->start)
974 return;
975
976 mutex_lock(&range->hmm->lock);
977 list_del_rcu(&range->list);
978 mutex_unlock(&range->hmm->lock);
979
980 /* Drop reference taken by hmm_range_register() */
981 range->valid = false;
982 hmm_put(range->hmm);
983 range->hmm = NULL;
984}
985EXPORT_SYMBOL(hmm_range_unregister);
986
987/*
988 * hmm_range_snapshot() - snapshot CPU page table for a range
989 * @range: range
990 * Returns: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
991 * permission (for instance asking for write and range is read only),
992 * -EAGAIN if you need to retry, -EFAULT invalid (ie either no valid
993 * vma or it is illegal to access that range), number of valid pages
994 * in range->pfns[] (from range start address).
995 *
996 * This snapshots the CPU page table for a range of virtual addresses. Snapshot
997 * validity is tracked by range struct. See in include/linux/hmm.h for example
998 * on how to use.
999 */
1000long hmm_range_snapshot(struct hmm_range *range)
1001{
Jérôme Glisse63d50662019-05-13 17:20:18 -07001002 const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001003 unsigned long start = range->start, end;
1004 struct hmm_vma_walk hmm_vma_walk;
1005 struct hmm *hmm = range->hmm;
1006 struct vm_area_struct *vma;
1007 struct mm_walk mm_walk;
1008
1009 /* Check if hmm_mm_destroy() was call. */
1010 if (hmm->mm == NULL || hmm->dead)
1011 return -EFAULT;
1012
1013 do {
1014 /* If range is no longer valid force retry. */
1015 if (!range->valid)
1016 return -EAGAIN;
1017
1018 vma = find_vma(hmm->mm, start);
Jérôme Glisse63d50662019-05-13 17:20:18 -07001019 if (vma == NULL || (vma->vm_flags & device_vma))
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001020 return -EFAULT;
1021
Jérôme Glisse63d50662019-05-13 17:20:18 -07001022 if (is_vm_hugetlb_page(vma)) {
1023 struct hstate *h = hstate_vma(vma);
1024
1025 if (huge_page_shift(h) != range->page_shift &&
1026 range->page_shift != PAGE_SHIFT)
1027 return -EINVAL;
1028 } else {
1029 if (range->page_shift != PAGE_SHIFT)
1030 return -EINVAL;
1031 }
1032
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001033 if (!(vma->vm_flags & VM_READ)) {
1034 /*
1035 * If vma do not allow read access, then assume that it
1036 * does not allow write access, either. HMM does not
1037 * support architecture that allow write without read.
1038 */
1039 hmm_pfns_clear(range, range->pfns,
1040 range->start, range->end);
1041 return -EPERM;
1042 }
1043
1044 range->vma = vma;
Jérôme Glisse992de9a2019-05-13 17:20:21 -07001045 hmm_vma_walk.pgmap = NULL;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001046 hmm_vma_walk.last = start;
1047 hmm_vma_walk.fault = false;
1048 hmm_vma_walk.range = range;
1049 mm_walk.private = &hmm_vma_walk;
1050 end = min(range->end, vma->vm_end);
1051
1052 mm_walk.vma = vma;
1053 mm_walk.mm = vma->vm_mm;
1054 mm_walk.pte_entry = NULL;
1055 mm_walk.test_walk = NULL;
1056 mm_walk.hugetlb_entry = NULL;
Jérôme Glisse992de9a2019-05-13 17:20:21 -07001057 mm_walk.pud_entry = hmm_vma_walk_pud;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001058 mm_walk.pmd_entry = hmm_vma_walk_pmd;
1059 mm_walk.pte_hole = hmm_vma_walk_hole;
Jérôme Glisse63d50662019-05-13 17:20:18 -07001060 mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001061
1062 walk_page_range(start, end, &mm_walk);
1063 start = end;
1064 } while (start < range->end);
1065
Jérôme Glisse25f23a02019-05-13 17:19:55 -07001066 return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -07001067}
Jérôme Glisse25f23a02019-05-13 17:19:55 -07001068EXPORT_SYMBOL(hmm_range_snapshot);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -07001069
1070/*
Jérôme Glisse73231612019-05-13 17:19:58 -07001071 * hmm_range_fault() - try to fault some address in a virtual address range
Jérôme Glisse08232a42018-04-10 16:28:30 -07001072 * @range: range being faulted
Jérôme Glisse74eee182017-09-08 16:11:35 -07001073 * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
Jérôme Glisse73231612019-05-13 17:19:58 -07001074 * Returns: number of valid pages in range->pfns[] (from range start
1075 * address). This may be zero. If the return value is negative,
1076 * then one of the following values may be returned:
1077 *
1078 * -EINVAL invalid arguments or mm or virtual address are in an
Jérôme Glisse63d50662019-05-13 17:20:18 -07001079 * invalid vma (for instance device file vma).
Jérôme Glisse73231612019-05-13 17:19:58 -07001080 * -ENOMEM: Out of memory.
1081 * -EPERM: Invalid permission (for instance asking for write and
1082 * range is read only).
1083 * -EAGAIN: If you need to retry and mmap_sem was drop. This can only
1084 * happens if block argument is false.
1085 * -EBUSY: If the the range is being invalidated and you should wait
1086 * for invalidation to finish.
1087 * -EFAULT: Invalid (ie either no valid vma or it is illegal to access
1088 * that range), number of valid pages in range->pfns[] (from
1089 * range start address).
Jérôme Glisse74eee182017-09-08 16:11:35 -07001090 *
1091 * This is similar to a regular CPU page fault except that it will not trigger
Jérôme Glisse73231612019-05-13 17:19:58 -07001092 * any memory migration if the memory being faulted is not accessible by CPUs
1093 * and caller does not ask for migration.
Jérôme Glisse74eee182017-09-08 16:11:35 -07001094 *
Jérôme Glisseff05c0c2018-04-10 16:28:38 -07001095 * On error, for one virtual address in the range, the function will mark the
1096 * corresponding HMM pfn entry with an error flag.
Jérôme Glisse74eee182017-09-08 16:11:35 -07001097 */
Jérôme Glisse73231612019-05-13 17:19:58 -07001098long hmm_range_fault(struct hmm_range *range, bool block)
Jérôme Glisse74eee182017-09-08 16:11:35 -07001099{
Jérôme Glisse63d50662019-05-13 17:20:18 -07001100 const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001101 unsigned long start = range->start, end;
Jérôme Glisse74eee182017-09-08 16:11:35 -07001102 struct hmm_vma_walk hmm_vma_walk;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001103 struct hmm *hmm = range->hmm;
1104 struct vm_area_struct *vma;
Jérôme Glisse74eee182017-09-08 16:11:35 -07001105 struct mm_walk mm_walk;
Jérôme Glisse74eee182017-09-08 16:11:35 -07001106 int ret;
1107
Jérôme Glisse704f3f22019-05-13 17:19:48 -07001108 /* Check if hmm_mm_destroy() was call. */
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001109 if (hmm->mm == NULL || hmm->dead)
1110 return -EFAULT;
Jérôme Glisse74eee182017-09-08 16:11:35 -07001111
1112 do {
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001113 /* If range is no longer valid force retry. */
1114 if (!range->valid) {
1115 up_read(&hmm->mm->mmap_sem);
1116 return -EAGAIN;
1117 }
Jérôme Glisse74eee182017-09-08 16:11:35 -07001118
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001119 vma = find_vma(hmm->mm, start);
Jérôme Glisse63d50662019-05-13 17:20:18 -07001120 if (vma == NULL || (vma->vm_flags & device_vma))
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001121 return -EFAULT;
Jérôme Glisse74eee182017-09-08 16:11:35 -07001122
Jérôme Glisse63d50662019-05-13 17:20:18 -07001123 if (is_vm_hugetlb_page(vma)) {
1124 if (huge_page_shift(hstate_vma(vma)) !=
1125 range->page_shift &&
1126 range->page_shift != PAGE_SHIFT)
1127 return -EINVAL;
1128 } else {
1129 if (range->page_shift != PAGE_SHIFT)
1130 return -EINVAL;
1131 }
1132
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001133 if (!(vma->vm_flags & VM_READ)) {
1134 /*
1135 * If vma do not allow read access, then assume that it
1136 * does not allow write access, either. HMM does not
1137 * support architecture that allow write without read.
1138 */
1139 hmm_pfns_clear(range, range->pfns,
1140 range->start, range->end);
1141 return -EPERM;
1142 }
1143
1144 range->vma = vma;
Jérôme Glisse992de9a2019-05-13 17:20:21 -07001145 hmm_vma_walk.pgmap = NULL;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001146 hmm_vma_walk.last = start;
1147 hmm_vma_walk.fault = true;
1148 hmm_vma_walk.block = block;
1149 hmm_vma_walk.range = range;
1150 mm_walk.private = &hmm_vma_walk;
1151 end = min(range->end, vma->vm_end);
1152
1153 mm_walk.vma = vma;
1154 mm_walk.mm = vma->vm_mm;
1155 mm_walk.pte_entry = NULL;
1156 mm_walk.test_walk = NULL;
1157 mm_walk.hugetlb_entry = NULL;
Jérôme Glisse992de9a2019-05-13 17:20:21 -07001158 mm_walk.pud_entry = hmm_vma_walk_pud;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001159 mm_walk.pmd_entry = hmm_vma_walk_pmd;
1160 mm_walk.pte_hole = hmm_vma_walk_hole;
Jérôme Glisse63d50662019-05-13 17:20:18 -07001161 mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001162
1163 do {
1164 ret = walk_page_range(start, end, &mm_walk);
1165 start = hmm_vma_walk.last;
1166
1167 /* Keep trying while the range is valid. */
1168 } while (ret == -EBUSY && range->valid);
1169
1170 if (ret) {
1171 unsigned long i;
1172
1173 i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
1174 hmm_pfns_clear(range, &range->pfns[i],
1175 hmm_vma_walk.last, range->end);
1176 return ret;
1177 }
1178 start = end;
1179
1180 } while (start < range->end);
Jérôme Glisse704f3f22019-05-13 17:19:48 -07001181
Jérôme Glisse73231612019-05-13 17:19:58 -07001182 return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
Jérôme Glisse74eee182017-09-08 16:11:35 -07001183}
Jérôme Glisse73231612019-05-13 17:19:58 -07001184EXPORT_SYMBOL(hmm_range_fault);
Jérôme Glissec0b12402017-09-08 16:11:27 -07001185#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001186
1187
Jérôme Glissedf6ad692017-09-08 16:12:24 -07001188#if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC)
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001189struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma,
1190 unsigned long addr)
1191{
1192 struct page *page;
1193
1194 page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
1195 if (!page)
1196 return NULL;
1197 lock_page(page);
1198 return page;
1199}
1200EXPORT_SYMBOL(hmm_vma_alloc_locked_page);
1201
1202
1203static void hmm_devmem_ref_release(struct percpu_ref *ref)
1204{
1205 struct hmm_devmem *devmem;
1206
1207 devmem = container_of(ref, struct hmm_devmem, ref);
1208 complete(&devmem->completion);
1209}
1210
1211static void hmm_devmem_ref_exit(void *data)
1212{
1213 struct percpu_ref *ref = data;
1214 struct hmm_devmem *devmem;
1215
1216 devmem = container_of(ref, struct hmm_devmem, ref);
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001217 wait_for_completion(&devmem->completion);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001218 percpu_ref_exit(ref);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001219}
1220
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001221static void hmm_devmem_ref_kill(struct percpu_ref *ref)
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001222{
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001223 percpu_ref_kill(ref);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001224}
1225
Souptick Joarderb57e622e62019-03-11 23:28:10 -07001226static vm_fault_t hmm_devmem_fault(struct vm_area_struct *vma,
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001227 unsigned long addr,
1228 const struct page *page,
1229 unsigned int flags,
1230 pmd_t *pmdp)
1231{
1232 struct hmm_devmem *devmem = page->pgmap->data;
1233
1234 return devmem->ops->fault(devmem, vma, addr, page, flags, pmdp);
1235}
1236
1237static void hmm_devmem_free(struct page *page, void *data)
1238{
1239 struct hmm_devmem *devmem = data;
1240
Dan Williams2fa147b2018-07-13 21:50:01 -07001241 page->mapping = NULL;
1242
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001243 devmem->ops->free(devmem, page);
1244}
1245
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001246/*
1247 * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory
1248 *
1249 * @ops: memory event device driver callback (see struct hmm_devmem_ops)
1250 * @device: device struct to bind the resource too
1251 * @size: size in bytes of the device memory to add
1252 * Returns: pointer to new hmm_devmem struct ERR_PTR otherwise
1253 *
1254 * This function first finds an empty range of physical address big enough to
1255 * contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which
1256 * in turn allocates struct pages. It does not do anything beyond that; all
1257 * events affecting the memory will go through the various callbacks provided
1258 * by hmm_devmem_ops struct.
1259 *
1260 * Device driver should call this function during device initialization and
1261 * is then responsible of memory management. HMM only provides helpers.
1262 */
1263struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
1264 struct device *device,
1265 unsigned long size)
1266{
1267 struct hmm_devmem *devmem;
1268 resource_size_t addr;
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001269 void *result;
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001270 int ret;
1271
Dan Williamse76384882018-05-16 11:46:08 -07001272 dev_pagemap_get_ops();
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001273
Dan Williams58ef15b2018-12-28 00:35:07 -08001274 devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001275 if (!devmem)
1276 return ERR_PTR(-ENOMEM);
1277
1278 init_completion(&devmem->completion);
1279 devmem->pfn_first = -1UL;
1280 devmem->pfn_last = -1UL;
1281 devmem->resource = NULL;
1282 devmem->device = device;
1283 devmem->ops = ops;
1284
1285 ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
1286 0, GFP_KERNEL);
1287 if (ret)
Dan Williams58ef15b2018-12-28 00:35:07 -08001288 return ERR_PTR(ret);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001289
Dan Williams58ef15b2018-12-28 00:35:07 -08001290 ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, &devmem->ref);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001291 if (ret)
Dan Williams58ef15b2018-12-28 00:35:07 -08001292 return ERR_PTR(ret);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001293
1294 size = ALIGN(size, PA_SECTION_SIZE);
1295 addr = min((unsigned long)iomem_resource.end,
1296 (1UL << MAX_PHYSMEM_BITS) - 1);
1297 addr = addr - size + 1UL;
1298
1299 /*
1300 * FIXME add a new helper to quickly walk resource tree and find free
1301 * range
1302 *
1303 * FIXME what about ioport_resource resource ?
1304 */
1305 for (; addr > size && addr >= iomem_resource.start; addr -= size) {
1306 ret = region_intersects(addr, size, 0, IORES_DESC_NONE);
1307 if (ret != REGION_DISJOINT)
1308 continue;
1309
1310 devmem->resource = devm_request_mem_region(device, addr, size,
1311 dev_name(device));
Dan Williams58ef15b2018-12-28 00:35:07 -08001312 if (!devmem->resource)
1313 return ERR_PTR(-ENOMEM);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001314 break;
1315 }
Dan Williams58ef15b2018-12-28 00:35:07 -08001316 if (!devmem->resource)
1317 return ERR_PTR(-ERANGE);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001318
1319 devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY;
1320 devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
1321 devmem->pfn_last = devmem->pfn_first +
1322 (resource_size(devmem->resource) >> PAGE_SHIFT);
Dan Williams063a7d12018-12-28 00:39:46 -08001323 devmem->page_fault = hmm_devmem_fault;
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001324
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001325 devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
1326 devmem->pagemap.res = *devmem->resource;
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001327 devmem->pagemap.page_free = hmm_devmem_free;
1328 devmem->pagemap.altmap_valid = false;
1329 devmem->pagemap.ref = &devmem->ref;
1330 devmem->pagemap.data = devmem;
1331 devmem->pagemap.kill = hmm_devmem_ref_kill;
Dan Williams58ef15b2018-12-28 00:35:07 -08001332
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001333 result = devm_memremap_pages(devmem->device, &devmem->pagemap);
1334 if (IS_ERR(result))
1335 return result;
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001336 return devmem;
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001337}
Dan Williams02917e92018-12-28 00:35:15 -08001338EXPORT_SYMBOL_GPL(hmm_devmem_add);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001339
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001340struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
1341 struct device *device,
1342 struct resource *res)
1343{
1344 struct hmm_devmem *devmem;
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001345 void *result;
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001346 int ret;
1347
1348 if (res->desc != IORES_DESC_DEVICE_PUBLIC_MEMORY)
1349 return ERR_PTR(-EINVAL);
1350
Dan Williamse76384882018-05-16 11:46:08 -07001351 dev_pagemap_get_ops();
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001352
Dan Williams58ef15b2018-12-28 00:35:07 -08001353 devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001354 if (!devmem)
1355 return ERR_PTR(-ENOMEM);
1356
1357 init_completion(&devmem->completion);
1358 devmem->pfn_first = -1UL;
1359 devmem->pfn_last = -1UL;
1360 devmem->resource = res;
1361 devmem->device = device;
1362 devmem->ops = ops;
1363
1364 ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
1365 0, GFP_KERNEL);
1366 if (ret)
Dan Williams58ef15b2018-12-28 00:35:07 -08001367 return ERR_PTR(ret);
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001368
Dan Williams58ef15b2018-12-28 00:35:07 -08001369 ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit,
1370 &devmem->ref);
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001371 if (ret)
Dan Williams58ef15b2018-12-28 00:35:07 -08001372 return ERR_PTR(ret);
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001373
1374 devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
1375 devmem->pfn_last = devmem->pfn_first +
1376 (resource_size(devmem->resource) >> PAGE_SHIFT);
Dan Williams063a7d12018-12-28 00:39:46 -08001377 devmem->page_fault = hmm_devmem_fault;
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001378
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001379 devmem->pagemap.type = MEMORY_DEVICE_PUBLIC;
1380 devmem->pagemap.res = *devmem->resource;
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001381 devmem->pagemap.page_free = hmm_devmem_free;
1382 devmem->pagemap.altmap_valid = false;
1383 devmem->pagemap.ref = &devmem->ref;
1384 devmem->pagemap.data = devmem;
1385 devmem->pagemap.kill = hmm_devmem_ref_kill;
Dan Williams58ef15b2018-12-28 00:35:07 -08001386
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001387 result = devm_memremap_pages(devmem->device, &devmem->pagemap);
1388 if (IS_ERR(result))
1389 return result;
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001390 return devmem;
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001391}
Dan Williams02917e92018-12-28 00:35:15 -08001392EXPORT_SYMBOL_GPL(hmm_devmem_add_resource);
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001393
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001394/*
Jérôme Glisse858b54d2017-09-08 16:12:02 -07001395 * A device driver that wants to handle multiple devices memory through a
1396 * single fake device can use hmm_device to do so. This is purely a helper
1397 * and it is not needed to make use of any HMM functionality.
1398 */
1399#define HMM_DEVICE_MAX 256
1400
1401static DECLARE_BITMAP(hmm_device_mask, HMM_DEVICE_MAX);
1402static DEFINE_SPINLOCK(hmm_device_lock);
1403static struct class *hmm_device_class;
1404static dev_t hmm_device_devt;
1405
1406static void hmm_device_release(struct device *device)
1407{
1408 struct hmm_device *hmm_device;
1409
1410 hmm_device = container_of(device, struct hmm_device, device);
1411 spin_lock(&hmm_device_lock);
1412 clear_bit(hmm_device->minor, hmm_device_mask);
1413 spin_unlock(&hmm_device_lock);
1414
1415 kfree(hmm_device);
1416}
1417
1418struct hmm_device *hmm_device_new(void *drvdata)
1419{
1420 struct hmm_device *hmm_device;
1421
1422 hmm_device = kzalloc(sizeof(*hmm_device), GFP_KERNEL);
1423 if (!hmm_device)
1424 return ERR_PTR(-ENOMEM);
1425
1426 spin_lock(&hmm_device_lock);
1427 hmm_device->minor = find_first_zero_bit(hmm_device_mask, HMM_DEVICE_MAX);
1428 if (hmm_device->minor >= HMM_DEVICE_MAX) {
1429 spin_unlock(&hmm_device_lock);
1430 kfree(hmm_device);
1431 return ERR_PTR(-EBUSY);
1432 }
1433 set_bit(hmm_device->minor, hmm_device_mask);
1434 spin_unlock(&hmm_device_lock);
1435
1436 dev_set_name(&hmm_device->device, "hmm_device%d", hmm_device->minor);
1437 hmm_device->device.devt = MKDEV(MAJOR(hmm_device_devt),
1438 hmm_device->minor);
1439 hmm_device->device.release = hmm_device_release;
1440 dev_set_drvdata(&hmm_device->device, drvdata);
1441 hmm_device->device.class = hmm_device_class;
1442 device_initialize(&hmm_device->device);
1443
1444 return hmm_device;
1445}
1446EXPORT_SYMBOL(hmm_device_new);
1447
1448void hmm_device_put(struct hmm_device *hmm_device)
1449{
1450 put_device(&hmm_device->device);
1451}
1452EXPORT_SYMBOL(hmm_device_put);
1453
1454static int __init hmm_init(void)
1455{
1456 int ret;
1457
1458 ret = alloc_chrdev_region(&hmm_device_devt, 0,
1459 HMM_DEVICE_MAX,
1460 "hmm_device");
1461 if (ret)
1462 return ret;
1463
1464 hmm_device_class = class_create(THIS_MODULE, "hmm_device");
1465 if (IS_ERR(hmm_device_class)) {
1466 unregister_chrdev_region(hmm_device_devt, HMM_DEVICE_MAX);
1467 return PTR_ERR(hmm_device_class);
1468 }
1469 return 0;
1470}
1471
1472device_initcall(hmm_init);
Jérôme Glissedf6ad692017-09-08 16:12:24 -07001473#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */