blob: b4e9afdc2181d4b634d33cb6fea4700349908afd [file] [log] [blame]
Jérôme Glisse133ff0e2017-09-08 16:11:23 -07001/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
Jérôme Glissef813f212018-10-30 15:04:06 -070014 * Authors: Jérôme Glisse <jglisse@redhat.com>
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070015 */
16/*
17 * Refer to include/linux/hmm.h for information about heterogeneous memory
18 * management or HMM for short.
19 */
20#include <linux/mm.h>
21#include <linux/hmm.h>
Jérôme Glisse858b54d2017-09-08 16:12:02 -070022#include <linux/init.h>
Jérôme Glisseda4c3c72017-09-08 16:11:31 -070023#include <linux/rmap.h>
24#include <linux/swap.h>
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070025#include <linux/slab.h>
26#include <linux/sched.h>
Jérôme Glisse4ef589d2017-09-08 16:11:58 -070027#include <linux/mmzone.h>
28#include <linux/pagemap.h>
Jérôme Glisseda4c3c72017-09-08 16:11:31 -070029#include <linux/swapops.h>
30#include <linux/hugetlb.h>
Jérôme Glisse4ef589d2017-09-08 16:11:58 -070031#include <linux/memremap.h>
Jérôme Glisse7b2d55d22017-09-08 16:11:46 -070032#include <linux/jump_label.h>
Jérôme Glissec0b12402017-09-08 16:11:27 -070033#include <linux/mmu_notifier.h>
Jérôme Glisse4ef589d2017-09-08 16:11:58 -070034#include <linux/memory_hotplug.h>
35
36#define PA_SECTION_SIZE (1UL << PA_SECTION_SHIFT)
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070037
Jérôme Glisse6b368cd2017-09-08 16:12:32 -070038#if IS_ENABLED(CONFIG_HMM_MIRROR)
Jérôme Glissec0b12402017-09-08 16:11:27 -070039static const struct mmu_notifier_ops hmm_mmu_notifier_ops;
40
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070041/*
42 * struct hmm - HMM per mm struct
43 *
44 * @mm: mm struct this HMM struct is bound to
Jérôme Glisseda4c3c72017-09-08 16:11:31 -070045 * @lock: lock protecting ranges list
Jérôme Glissec0b12402017-09-08 16:11:27 -070046 * @sequence: we track updates to the CPU page table with a sequence number
Jérôme Glisseda4c3c72017-09-08 16:11:31 -070047 * @ranges: list of range being snapshotted
Jérôme Glissec0b12402017-09-08 16:11:27 -070048 * @mirrors: list of mirrors for this mm
49 * @mmu_notifier: mmu notifier to track updates to CPU page table
50 * @mirrors_sem: read/write semaphore protecting the mirrors list
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070051 */
52struct hmm {
53 struct mm_struct *mm;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -070054 spinlock_t lock;
Jérôme Glissec0b12402017-09-08 16:11:27 -070055 atomic_t sequence;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -070056 struct list_head ranges;
Jérôme Glissec0b12402017-09-08 16:11:27 -070057 struct list_head mirrors;
58 struct mmu_notifier mmu_notifier;
59 struct rw_semaphore mirrors_sem;
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070060};
61
62/*
63 * hmm_register - register HMM against an mm (HMM internal)
64 *
65 * @mm: mm struct to attach to
66 *
67 * This is not intended to be used directly by device drivers. It allocates an
68 * HMM struct if mm does not have one, and initializes it.
69 */
70static struct hmm *hmm_register(struct mm_struct *mm)
71{
Jérôme Glissec0b12402017-09-08 16:11:27 -070072 struct hmm *hmm = READ_ONCE(mm->hmm);
73 bool cleanup = false;
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070074
75 /*
76 * The hmm struct can only be freed once the mm_struct goes away,
77 * hence we should always have pre-allocated an new hmm struct
78 * above.
79 */
Jérôme Glissec0b12402017-09-08 16:11:27 -070080 if (hmm)
81 return hmm;
82
83 hmm = kmalloc(sizeof(*hmm), GFP_KERNEL);
84 if (!hmm)
85 return NULL;
86 INIT_LIST_HEAD(&hmm->mirrors);
87 init_rwsem(&hmm->mirrors_sem);
88 atomic_set(&hmm->sequence, 0);
89 hmm->mmu_notifier.ops = NULL;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -070090 INIT_LIST_HEAD(&hmm->ranges);
91 spin_lock_init(&hmm->lock);
Jérôme Glissec0b12402017-09-08 16:11:27 -070092 hmm->mm = mm;
93
Jérôme Glissec0b12402017-09-08 16:11:27 -070094 spin_lock(&mm->page_table_lock);
95 if (!mm->hmm)
96 mm->hmm = hmm;
97 else
98 cleanup = true;
99 spin_unlock(&mm->page_table_lock);
100
Ralph Campbell86a2d592018-10-30 15:04:14 -0700101 if (cleanup)
102 goto error;
103
104 /*
105 * We should only get here if hold the mmap_sem in write mode ie on
106 * registration of first mirror through hmm_mirror_register()
107 */
108 hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops;
109 if (__mmu_notifier_register(&hmm->mmu_notifier, mm))
110 goto error_mm;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700111
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700112 return mm->hmm;
Ralph Campbell86a2d592018-10-30 15:04:14 -0700113
114error_mm:
115 spin_lock(&mm->page_table_lock);
116 if (mm->hmm == hmm)
117 mm->hmm = NULL;
118 spin_unlock(&mm->page_table_lock);
119error:
120 kfree(hmm);
121 return NULL;
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700122}
123
124void hmm_mm_destroy(struct mm_struct *mm)
125{
126 kfree(mm->hmm);
127}
Jérôme Glissec0b12402017-09-08 16:11:27 -0700128
Jérôme Glisse44532d42018-10-30 15:04:24 -0700129static int hmm_invalidate_range(struct hmm *hmm,
130 const struct hmm_update *update)
Jérôme Glissec0b12402017-09-08 16:11:27 -0700131{
132 struct hmm_mirror *mirror;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700133 struct hmm_range *range;
134
135 spin_lock(&hmm->lock);
136 list_for_each_entry(range, &hmm->ranges, list) {
137 unsigned long addr, idx, npages;
138
Jérôme Glisse44532d42018-10-30 15:04:24 -0700139 if (update->end < range->start || update->start >= range->end)
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700140 continue;
141
142 range->valid = false;
Jérôme Glisse44532d42018-10-30 15:04:24 -0700143 addr = max(update->start, range->start);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700144 idx = (addr - range->start) >> PAGE_SHIFT;
Jérôme Glisse44532d42018-10-30 15:04:24 -0700145 npages = (min(range->end, update->end) - addr) >> PAGE_SHIFT;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700146 memset(&range->pfns[idx], 0, sizeof(*range->pfns) * npages);
147 }
148 spin_unlock(&hmm->lock);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700149
150 down_read(&hmm->mirrors_sem);
Jérôme Glisse44532d42018-10-30 15:04:24 -0700151 list_for_each_entry(mirror, &hmm->mirrors, list) {
152 int ret;
153
154 ret = mirror->ops->sync_cpu_device_pagetables(mirror, update);
155 if (!update->blockable && ret == -EAGAIN) {
156 up_read(&hmm->mirrors_sem);
157 return -EAGAIN;
158 }
159 }
Jérôme Glissec0b12402017-09-08 16:11:27 -0700160 up_read(&hmm->mirrors_sem);
Jérôme Glisse44532d42018-10-30 15:04:24 -0700161
162 return 0;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700163}
164
Ralph Campbelle1401512018-04-10 16:28:19 -0700165static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
166{
167 struct hmm_mirror *mirror;
168 struct hmm *hmm = mm->hmm;
169
170 down_write(&hmm->mirrors_sem);
171 mirror = list_first_entry_or_null(&hmm->mirrors, struct hmm_mirror,
172 list);
173 while (mirror) {
174 list_del_init(&mirror->list);
175 if (mirror->ops->release) {
176 /*
177 * Drop mirrors_sem so callback can wait on any pending
178 * work that might itself trigger mmu_notifier callback
179 * and thus would deadlock with us.
180 */
181 up_write(&hmm->mirrors_sem);
182 mirror->ops->release(mirror);
183 down_write(&hmm->mirrors_sem);
184 }
185 mirror = list_first_entry_or_null(&hmm->mirrors,
186 struct hmm_mirror, list);
187 }
188 up_write(&hmm->mirrors_sem);
189}
190
Michal Hocko93065ac2018-08-21 21:52:33 -0700191static int hmm_invalidate_range_start(struct mmu_notifier *mn,
Jérôme Glissec0b12402017-09-08 16:11:27 -0700192 struct mm_struct *mm,
193 unsigned long start,
Michal Hocko93065ac2018-08-21 21:52:33 -0700194 unsigned long end,
195 bool blockable)
Jérôme Glissec0b12402017-09-08 16:11:27 -0700196{
197 struct hmm *hmm = mm->hmm;
198
199 VM_BUG_ON(!hmm);
200
201 atomic_inc(&hmm->sequence);
Michal Hocko93065ac2018-08-21 21:52:33 -0700202
203 return 0;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700204}
205
206static void hmm_invalidate_range_end(struct mmu_notifier *mn,
207 struct mm_struct *mm,
208 unsigned long start,
209 unsigned long end)
210{
Jérôme Glisse44532d42018-10-30 15:04:24 -0700211 struct hmm_update update;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700212 struct hmm *hmm = mm->hmm;
213
214 VM_BUG_ON(!hmm);
215
Jérôme Glisse44532d42018-10-30 15:04:24 -0700216 update.start = start;
217 update.end = end;
218 update.event = HMM_UPDATE_INVALIDATE;
219 update.blockable = true;
220 hmm_invalidate_range(hmm, &update);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700221}
222
223static const struct mmu_notifier_ops hmm_mmu_notifier_ops = {
Ralph Campbelle1401512018-04-10 16:28:19 -0700224 .release = hmm_release,
Jérôme Glissec0b12402017-09-08 16:11:27 -0700225 .invalidate_range_start = hmm_invalidate_range_start,
226 .invalidate_range_end = hmm_invalidate_range_end,
227};
228
229/*
230 * hmm_mirror_register() - register a mirror against an mm
231 *
232 * @mirror: new mirror struct to register
233 * @mm: mm to register against
234 *
235 * To start mirroring a process address space, the device driver must register
236 * an HMM mirror struct.
237 *
238 * THE mm->mmap_sem MUST BE HELD IN WRITE MODE !
239 */
240int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm)
241{
242 /* Sanity check */
243 if (!mm || !mirror || !mirror->ops)
244 return -EINVAL;
245
Jérôme Glissec01cbba2018-04-10 16:28:23 -0700246again:
Jérôme Glissec0b12402017-09-08 16:11:27 -0700247 mirror->hmm = hmm_register(mm);
248 if (!mirror->hmm)
249 return -ENOMEM;
250
251 down_write(&mirror->hmm->mirrors_sem);
Jérôme Glissec01cbba2018-04-10 16:28:23 -0700252 if (mirror->hmm->mm == NULL) {
253 /*
254 * A racing hmm_mirror_unregister() is about to destroy the hmm
255 * struct. Try again to allocate a new one.
256 */
257 up_write(&mirror->hmm->mirrors_sem);
258 mirror->hmm = NULL;
259 goto again;
260 } else {
261 list_add(&mirror->list, &mirror->hmm->mirrors);
262 up_write(&mirror->hmm->mirrors_sem);
263 }
Jérôme Glissec0b12402017-09-08 16:11:27 -0700264
265 return 0;
266}
267EXPORT_SYMBOL(hmm_mirror_register);
268
269/*
270 * hmm_mirror_unregister() - unregister a mirror
271 *
272 * @mirror: new mirror struct to register
273 *
274 * Stop mirroring a process address space, and cleanup.
275 */
276void hmm_mirror_unregister(struct hmm_mirror *mirror)
277{
Jérôme Glissec01cbba2018-04-10 16:28:23 -0700278 bool should_unregister = false;
279 struct mm_struct *mm;
280 struct hmm *hmm;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700281
Jérôme Glissec01cbba2018-04-10 16:28:23 -0700282 if (mirror->hmm == NULL)
283 return;
284
285 hmm = mirror->hmm;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700286 down_write(&hmm->mirrors_sem);
Ralph Campbelle1401512018-04-10 16:28:19 -0700287 list_del_init(&mirror->list);
Jérôme Glissec01cbba2018-04-10 16:28:23 -0700288 should_unregister = list_empty(&hmm->mirrors);
289 mirror->hmm = NULL;
290 mm = hmm->mm;
291 hmm->mm = NULL;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700292 up_write(&hmm->mirrors_sem);
Jérôme Glissec01cbba2018-04-10 16:28:23 -0700293
294 if (!should_unregister || mm == NULL)
295 return;
296
Ralph Campbell86a2d592018-10-30 15:04:14 -0700297 mmu_notifier_unregister_no_release(&hmm->mmu_notifier, mm);
298
Jérôme Glissec01cbba2018-04-10 16:28:23 -0700299 spin_lock(&mm->page_table_lock);
300 if (mm->hmm == hmm)
301 mm->hmm = NULL;
302 spin_unlock(&mm->page_table_lock);
303
Jérôme Glissec01cbba2018-04-10 16:28:23 -0700304 kfree(hmm);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700305}
306EXPORT_SYMBOL(hmm_mirror_unregister);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700307
Jérôme Glisse74eee182017-09-08 16:11:35 -0700308struct hmm_vma_walk {
309 struct hmm_range *range;
310 unsigned long last;
311 bool fault;
312 bool block;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700313};
314
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700315static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr,
316 bool write_fault, uint64_t *pfn)
Jérôme Glisse74eee182017-09-08 16:11:35 -0700317{
318 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_REMOTE;
319 struct hmm_vma_walk *hmm_vma_walk = walk->private;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700320 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700321 struct vm_area_struct *vma = walk->vma;
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700322 vm_fault_t ret;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700323
324 flags |= hmm_vma_walk->block ? 0 : FAULT_FLAG_ALLOW_RETRY;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700325 flags |= write_fault ? FAULT_FLAG_WRITE : 0;
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700326 ret = handle_mm_fault(vma, addr, flags);
327 if (ret & VM_FAULT_RETRY)
Jérôme Glisse74eee182017-09-08 16:11:35 -0700328 return -EBUSY;
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700329 if (ret & VM_FAULT_ERROR) {
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700330 *pfn = range->values[HMM_PFN_ERROR];
Jérôme Glisse74eee182017-09-08 16:11:35 -0700331 return -EFAULT;
332 }
333
334 return -EAGAIN;
335}
336
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700337static int hmm_pfns_bad(unsigned long addr,
338 unsigned long end,
339 struct mm_walk *walk)
340{
Jérôme Glissec7195472018-04-10 16:28:27 -0700341 struct hmm_vma_walk *hmm_vma_walk = walk->private;
342 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisseff05c0c2018-04-10 16:28:38 -0700343 uint64_t *pfns = range->pfns;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700344 unsigned long i;
345
346 i = (addr - range->start) >> PAGE_SHIFT;
347 for (; addr < end; addr += PAGE_SIZE, i++)
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700348 pfns[i] = range->values[HMM_PFN_ERROR];
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700349
350 return 0;
351}
352
Jérôme Glisse5504ed22018-04-10 16:28:46 -0700353/*
354 * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s)
355 * @start: range virtual start address (inclusive)
356 * @end: range virtual end address (exclusive)
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700357 * @fault: should we fault or not ?
358 * @write_fault: write fault ?
Jérôme Glisse5504ed22018-04-10 16:28:46 -0700359 * @walk: mm_walk structure
360 * Returns: 0 on success, -EAGAIN after page fault, or page fault error
361 *
362 * This function will be called whenever pmd_none() or pte_none() returns true,
363 * or whenever there is no page directory covering the virtual address range.
364 */
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700365static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end,
366 bool fault, bool write_fault,
367 struct mm_walk *walk)
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700368{
Jérôme Glisse74eee182017-09-08 16:11:35 -0700369 struct hmm_vma_walk *hmm_vma_walk = walk->private;
370 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisseff05c0c2018-04-10 16:28:38 -0700371 uint64_t *pfns = range->pfns;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700372 unsigned long i;
373
Jérôme Glisse74eee182017-09-08 16:11:35 -0700374 hmm_vma_walk->last = addr;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700375 i = (addr - range->start) >> PAGE_SHIFT;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700376 for (; addr < end; addr += PAGE_SIZE, i++) {
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700377 pfns[i] = range->values[HMM_PFN_NONE];
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700378 if (fault || write_fault) {
Jérôme Glisse74eee182017-09-08 16:11:35 -0700379 int ret;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700380
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700381 ret = hmm_vma_do_fault(walk, addr, write_fault,
382 &pfns[i]);
Jérôme Glisse74eee182017-09-08 16:11:35 -0700383 if (ret != -EAGAIN)
384 return ret;
385 }
386 }
387
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700388 return (fault || write_fault) ? -EAGAIN : 0;
389}
390
391static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
392 uint64_t pfns, uint64_t cpu_flags,
393 bool *fault, bool *write_fault)
394{
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700395 struct hmm_range *range = hmm_vma_walk->range;
396
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700397 *fault = *write_fault = false;
398 if (!hmm_vma_walk->fault)
399 return;
400
401 /* We aren't ask to do anything ... */
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700402 if (!(pfns & range->flags[HMM_PFN_VALID]))
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700403 return;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700404 /* If this is device memory than only fault if explicitly requested */
405 if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
406 /* Do we fault on device memory ? */
407 if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) {
408 *write_fault = pfns & range->flags[HMM_PFN_WRITE];
409 *fault = true;
410 }
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700411 return;
412 }
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700413
414 /* If CPU page table is not valid then we need to fault */
415 *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]);
416 /* Need to write fault ? */
417 if ((pfns & range->flags[HMM_PFN_WRITE]) &&
418 !(cpu_flags & range->flags[HMM_PFN_WRITE])) {
419 *write_fault = true;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700420 *fault = true;
421 }
422}
423
424static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
425 const uint64_t *pfns, unsigned long npages,
426 uint64_t cpu_flags, bool *fault,
427 bool *write_fault)
428{
429 unsigned long i;
430
431 if (!hmm_vma_walk->fault) {
432 *fault = *write_fault = false;
433 return;
434 }
435
436 for (i = 0; i < npages; ++i) {
437 hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags,
438 fault, write_fault);
439 if ((*fault) || (*write_fault))
440 return;
441 }
442}
443
444static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
445 struct mm_walk *walk)
446{
447 struct hmm_vma_walk *hmm_vma_walk = walk->private;
448 struct hmm_range *range = hmm_vma_walk->range;
449 bool fault, write_fault;
450 unsigned long i, npages;
451 uint64_t *pfns;
452
453 i = (addr - range->start) >> PAGE_SHIFT;
454 npages = (end - addr) >> PAGE_SHIFT;
455 pfns = &range->pfns[i];
456 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
457 0, &fault, &write_fault);
458 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
459}
460
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700461static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd)
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700462{
463 if (pmd_protnone(pmd))
464 return 0;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700465 return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] |
466 range->flags[HMM_PFN_WRITE] :
467 range->flags[HMM_PFN_VALID];
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700468}
469
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700470static int hmm_vma_handle_pmd(struct mm_walk *walk,
471 unsigned long addr,
472 unsigned long end,
473 uint64_t *pfns,
474 pmd_t pmd)
475{
476 struct hmm_vma_walk *hmm_vma_walk = walk->private;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700477 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700478 unsigned long pfn, npages, i;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700479 bool fault, write_fault;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700480 uint64_t cpu_flags;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700481
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700482 npages = (end - addr) >> PAGE_SHIFT;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700483 cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700484 hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags,
485 &fault, &write_fault);
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700486
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700487 if (pmd_protnone(pmd) || fault || write_fault)
488 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700489
490 pfn = pmd_pfn(pmd) + pte_index(addr);
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700491 for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++)
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700492 pfns[i] = hmm_pfn_from_pfn(range, pfn) | cpu_flags;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700493 hmm_vma_walk->last = end;
494 return 0;
495}
496
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700497static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700498{
499 if (pte_none(pte) || !pte_present(pte))
500 return 0;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700501 return pte_write(pte) ? range->flags[HMM_PFN_VALID] |
502 range->flags[HMM_PFN_WRITE] :
503 range->flags[HMM_PFN_VALID];
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700504}
505
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700506static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
507 unsigned long end, pmd_t *pmdp, pte_t *ptep,
508 uint64_t *pfn)
509{
510 struct hmm_vma_walk *hmm_vma_walk = walk->private;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700511 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700512 struct vm_area_struct *vma = walk->vma;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700513 bool fault, write_fault;
514 uint64_t cpu_flags;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700515 pte_t pte = *ptep;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700516 uint64_t orig_pfn = *pfn;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700517
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700518 *pfn = range->values[HMM_PFN_NONE];
519 cpu_flags = pte_to_hmm_pfn_flags(range, pte);
520 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700521 &fault, &write_fault);
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700522
523 if (pte_none(pte)) {
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700524 if (fault || write_fault)
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700525 goto fault;
526 return 0;
527 }
528
529 if (!pte_present(pte)) {
530 swp_entry_t entry = pte_to_swp_entry(pte);
531
532 if (!non_swap_entry(entry)) {
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700533 if (fault || write_fault)
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700534 goto fault;
535 return 0;
536 }
537
538 /*
539 * This is a special swap entry, ignore migration, use
540 * device and report anything else as error.
541 */
542 if (is_device_private_entry(entry)) {
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700543 cpu_flags = range->flags[HMM_PFN_VALID] |
544 range->flags[HMM_PFN_DEVICE_PRIVATE];
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700545 cpu_flags |= is_write_device_private_entry(entry) ?
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700546 range->flags[HMM_PFN_WRITE] : 0;
547 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
548 &fault, &write_fault);
549 if (fault || write_fault)
550 goto fault;
551 *pfn = hmm_pfn_from_pfn(range, swp_offset(entry));
552 *pfn |= cpu_flags;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700553 return 0;
554 }
555
556 if (is_migration_entry(entry)) {
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700557 if (fault || write_fault) {
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700558 pte_unmap(ptep);
559 hmm_vma_walk->last = addr;
560 migration_entry_wait(vma->vm_mm,
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700561 pmdp, addr);
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700562 return -EAGAIN;
563 }
564 return 0;
565 }
566
567 /* Report error for everything else */
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700568 *pfn = range->values[HMM_PFN_ERROR];
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700569 return -EFAULT;
570 }
571
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700572 if (fault || write_fault)
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700573 goto fault;
574
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700575 *pfn = hmm_pfn_from_pfn(range, pte_pfn(pte)) | cpu_flags;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700576 return 0;
577
578fault:
579 pte_unmap(ptep);
580 /* Fault any virtual address we were asked to fault */
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700581 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700582}
583
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700584static int hmm_vma_walk_pmd(pmd_t *pmdp,
585 unsigned long start,
586 unsigned long end,
587 struct mm_walk *walk)
588{
Jérôme Glisse74eee182017-09-08 16:11:35 -0700589 struct hmm_vma_walk *hmm_vma_walk = walk->private;
590 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glissed08faca2018-10-30 15:04:20 -0700591 struct vm_area_struct *vma = walk->vma;
Jérôme Glisseff05c0c2018-04-10 16:28:38 -0700592 uint64_t *pfns = range->pfns;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700593 unsigned long addr = start, i;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700594 pte_t *ptep;
Jérôme Glissed08faca2018-10-30 15:04:20 -0700595 pmd_t pmd;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700596
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700597
598again:
Jérôme Glissed08faca2018-10-30 15:04:20 -0700599 pmd = READ_ONCE(*pmdp);
600 if (pmd_none(pmd))
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700601 return hmm_vma_walk_hole(start, end, walk);
602
Jérôme Glissed08faca2018-10-30 15:04:20 -0700603 if (pmd_huge(pmd) && (range->vma->vm_flags & VM_HUGETLB))
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700604 return hmm_pfns_bad(start, end, walk);
605
Jérôme Glissed08faca2018-10-30 15:04:20 -0700606 if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
607 bool fault, write_fault;
608 unsigned long npages;
609 uint64_t *pfns;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700610
Jérôme Glissed08faca2018-10-30 15:04:20 -0700611 i = (addr - range->start) >> PAGE_SHIFT;
612 npages = (end - addr) >> PAGE_SHIFT;
613 pfns = &range->pfns[i];
614
615 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
616 0, &fault, &write_fault);
617 if (fault || write_fault) {
618 hmm_vma_walk->last = addr;
619 pmd_migration_entry_wait(vma->vm_mm, pmdp);
620 return -EAGAIN;
621 }
622 return 0;
623 } else if (!pmd_present(pmd))
624 return hmm_pfns_bad(start, end, walk);
625
626 if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700627 /*
628 * No need to take pmd_lock here, even if some other threads
629 * is splitting the huge pmd we will get that event through
630 * mmu_notifier callback.
631 *
632 * So just read pmd value and check again its a transparent
633 * huge or device mapping one and compute corresponding pfn
634 * values.
635 */
636 pmd = pmd_read_atomic(pmdp);
637 barrier();
638 if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
639 goto again;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700640
Jérôme Glissed08faca2018-10-30 15:04:20 -0700641 i = (addr - range->start) >> PAGE_SHIFT;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700642 return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700643 }
644
Jérôme Glissed08faca2018-10-30 15:04:20 -0700645 /*
646 * We have handled all the valid case above ie either none, migration,
647 * huge or transparent huge. At this point either it is a valid pmd
648 * entry pointing to pte directory or it is a bad pmd that will not
649 * recover.
650 */
651 if (pmd_bad(pmd))
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700652 return hmm_pfns_bad(start, end, walk);
653
654 ptep = pte_offset_map(pmdp, addr);
Jérôme Glissed08faca2018-10-30 15:04:20 -0700655 i = (addr - range->start) >> PAGE_SHIFT;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700656 for (; addr < end; addr += PAGE_SIZE, ptep++, i++) {
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700657 int r;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700658
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700659 r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]);
660 if (r) {
661 /* hmm_vma_handle_pte() did unmap pte directory */
662 hmm_vma_walk->last = addr;
663 return r;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700664 }
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700665 }
666 pte_unmap(ptep - 1);
667
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700668 hmm_vma_walk->last = addr;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700669 return 0;
670}
671
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700672static void hmm_pfns_clear(struct hmm_range *range,
673 uint64_t *pfns,
Jérôme Glisse33cd47d2018-04-10 16:28:54 -0700674 unsigned long addr,
675 unsigned long end)
676{
677 for (; addr < end; addr += PAGE_SIZE, pfns++)
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700678 *pfns = range->values[HMM_PFN_NONE];
Jérôme Glisse33cd47d2018-04-10 16:28:54 -0700679}
680
Jérôme Glisse855ce7d2018-04-10 16:28:42 -0700681static void hmm_pfns_special(struct hmm_range *range)
682{
683 unsigned long addr = range->start, i = 0;
684
685 for (; addr < range->end; addr += PAGE_SIZE, i++)
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700686 range->pfns[i] = range->values[HMM_PFN_SPECIAL];
Jérôme Glisse855ce7d2018-04-10 16:28:42 -0700687}
688
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700689/*
690 * hmm_vma_get_pfns() - snapshot CPU page table for a range of virtual addresses
Jérôme Glisse08232a42018-04-10 16:28:30 -0700691 * @range: range being snapshotted
Jérôme Glisse86586a42018-04-10 16:28:34 -0700692 * Returns: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
693 * vma permission, 0 success
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700694 *
695 * This snapshots the CPU page table for a range of virtual addresses. Snapshot
696 * validity is tracked by range struct. See hmm_vma_range_done() for further
697 * information.
698 *
699 * The range struct is initialized here. It tracks the CPU page table, but only
700 * if the function returns success (0), in which case the caller must then call
701 * hmm_vma_range_done() to stop CPU page table update tracking on this range.
702 *
703 * NOT CALLING hmm_vma_range_done() IF FUNCTION RETURNS 0 WILL LEAD TO SERIOUS
704 * MEMORY CORRUPTION ! YOU HAVE BEEN WARNED !
705 */
Jérôme Glisse08232a42018-04-10 16:28:30 -0700706int hmm_vma_get_pfns(struct hmm_range *range)
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700707{
Jérôme Glisse08232a42018-04-10 16:28:30 -0700708 struct vm_area_struct *vma = range->vma;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700709 struct hmm_vma_walk hmm_vma_walk;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700710 struct mm_walk mm_walk;
711 struct hmm *hmm;
712
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700713 /* Sanity check, this really should not happen ! */
Jérôme Glisse08232a42018-04-10 16:28:30 -0700714 if (range->start < vma->vm_start || range->start >= vma->vm_end)
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700715 return -EINVAL;
Jérôme Glisse08232a42018-04-10 16:28:30 -0700716 if (range->end < vma->vm_start || range->end > vma->vm_end)
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700717 return -EINVAL;
718
719 hmm = hmm_register(vma->vm_mm);
720 if (!hmm)
721 return -ENOMEM;
722 /* Caller must have registered a mirror, via hmm_mirror_register() ! */
723 if (!hmm->mmu_notifier.ops)
724 return -EINVAL;
725
Jérôme Glisse855ce7d2018-04-10 16:28:42 -0700726 /* FIXME support hugetlb fs */
Dave Jiange1fb4a02018-08-17 15:43:40 -0700727 if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL) ||
728 vma_is_dax(vma)) {
Jérôme Glisse855ce7d2018-04-10 16:28:42 -0700729 hmm_pfns_special(range);
730 return -EINVAL;
731 }
732
Jérôme Glisse86586a42018-04-10 16:28:34 -0700733 if (!(vma->vm_flags & VM_READ)) {
734 /*
735 * If vma do not allow read access, then assume that it does
736 * not allow write access, either. Architecture that allow
737 * write without read access are not supported by HMM, because
738 * operations such has atomic access would not work.
739 */
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700740 hmm_pfns_clear(range, range->pfns, range->start, range->end);
Jérôme Glisse86586a42018-04-10 16:28:34 -0700741 return -EPERM;
742 }
743
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700744 /* Initialize range to track CPU page table update */
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700745 spin_lock(&hmm->lock);
746 range->valid = true;
747 list_add_rcu(&range->list, &hmm->ranges);
748 spin_unlock(&hmm->lock);
749
Jérôme Glisse74eee182017-09-08 16:11:35 -0700750 hmm_vma_walk.fault = false;
751 hmm_vma_walk.range = range;
752 mm_walk.private = &hmm_vma_walk;
753
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700754 mm_walk.vma = vma;
755 mm_walk.mm = vma->vm_mm;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700756 mm_walk.pte_entry = NULL;
757 mm_walk.test_walk = NULL;
758 mm_walk.hugetlb_entry = NULL;
759 mm_walk.pmd_entry = hmm_vma_walk_pmd;
760 mm_walk.pte_hole = hmm_vma_walk_hole;
761
Jérôme Glisse08232a42018-04-10 16:28:30 -0700762 walk_page_range(range->start, range->end, &mm_walk);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700763 return 0;
764}
765EXPORT_SYMBOL(hmm_vma_get_pfns);
766
767/*
768 * hmm_vma_range_done() - stop tracking change to CPU page table over a range
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700769 * @range: range being tracked
770 * Returns: false if range data has been invalidated, true otherwise
771 *
772 * Range struct is used to track updates to the CPU page table after a call to
773 * either hmm_vma_get_pfns() or hmm_vma_fault(). Once the device driver is done
774 * using the data, or wants to lock updates to the data it got from those
775 * functions, it must call the hmm_vma_range_done() function, which will then
776 * stop tracking CPU page table updates.
777 *
778 * Note that device driver must still implement general CPU page table update
779 * tracking either by using hmm_mirror (see hmm_mirror_register()) or by using
780 * the mmu_notifier API directly.
781 *
782 * CPU page table update tracking done through hmm_range is only temporary and
783 * to be used while trying to duplicate CPU page table contents for a range of
784 * virtual addresses.
785 *
786 * There are two ways to use this :
787 * again:
Jérôme Glisse08232a42018-04-10 16:28:30 -0700788 * hmm_vma_get_pfns(range); or hmm_vma_fault(...);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700789 * trans = device_build_page_table_update_transaction(pfns);
790 * device_page_table_lock();
Jérôme Glisse08232a42018-04-10 16:28:30 -0700791 * if (!hmm_vma_range_done(range)) {
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700792 * device_page_table_unlock();
793 * goto again;
794 * }
795 * device_commit_transaction(trans);
796 * device_page_table_unlock();
797 *
798 * Or:
Jérôme Glisse08232a42018-04-10 16:28:30 -0700799 * hmm_vma_get_pfns(range); or hmm_vma_fault(...);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700800 * device_page_table_lock();
Jérôme Glisse08232a42018-04-10 16:28:30 -0700801 * hmm_vma_range_done(range);
802 * device_update_page_table(range->pfns);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700803 * device_page_table_unlock();
804 */
Jérôme Glisse08232a42018-04-10 16:28:30 -0700805bool hmm_vma_range_done(struct hmm_range *range)
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700806{
807 unsigned long npages = (range->end - range->start) >> PAGE_SHIFT;
808 struct hmm *hmm;
809
810 if (range->end <= range->start) {
811 BUG();
812 return false;
813 }
814
Jérôme Glisse08232a42018-04-10 16:28:30 -0700815 hmm = hmm_register(range->vma->vm_mm);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700816 if (!hmm) {
817 memset(range->pfns, 0, sizeof(*range->pfns) * npages);
818 return false;
819 }
820
821 spin_lock(&hmm->lock);
822 list_del_rcu(&range->list);
823 spin_unlock(&hmm->lock);
824
825 return range->valid;
826}
827EXPORT_SYMBOL(hmm_vma_range_done);
Jérôme Glisse74eee182017-09-08 16:11:35 -0700828
829/*
830 * hmm_vma_fault() - try to fault some address in a virtual address range
Jérôme Glisse08232a42018-04-10 16:28:30 -0700831 * @range: range being faulted
Jérôme Glisse74eee182017-09-08 16:11:35 -0700832 * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
833 * Returns: 0 success, error otherwise (-EAGAIN means mmap_sem have been drop)
834 *
835 * This is similar to a regular CPU page fault except that it will not trigger
836 * any memory migration if the memory being faulted is not accessible by CPUs.
837 *
Jérôme Glisseff05c0c2018-04-10 16:28:38 -0700838 * On error, for one virtual address in the range, the function will mark the
839 * corresponding HMM pfn entry with an error flag.
Jérôme Glisse74eee182017-09-08 16:11:35 -0700840 *
841 * Expected use pattern:
842 * retry:
843 * down_read(&mm->mmap_sem);
844 * // Find vma and address device wants to fault, initialize hmm_pfn_t
845 * // array accordingly
Jérôme Glisse08232a42018-04-10 16:28:30 -0700846 * ret = hmm_vma_fault(range, write, block);
Jérôme Glisse74eee182017-09-08 16:11:35 -0700847 * switch (ret) {
848 * case -EAGAIN:
Jérôme Glisse08232a42018-04-10 16:28:30 -0700849 * hmm_vma_range_done(range);
Jérôme Glisse74eee182017-09-08 16:11:35 -0700850 * // You might want to rate limit or yield to play nicely, you may
851 * // also commit any valid pfn in the array assuming that you are
852 * // getting true from hmm_vma_range_monitor_end()
853 * goto retry;
854 * case 0:
855 * break;
Jérôme Glisse86586a42018-04-10 16:28:34 -0700856 * case -ENOMEM:
857 * case -EINVAL:
858 * case -EPERM:
Jérôme Glisse74eee182017-09-08 16:11:35 -0700859 * default:
860 * // Handle error !
861 * up_read(&mm->mmap_sem)
862 * return;
863 * }
864 * // Take device driver lock that serialize device page table update
865 * driver_lock_device_page_table_update();
Jérôme Glisse08232a42018-04-10 16:28:30 -0700866 * hmm_vma_range_done(range);
Jérôme Glisse74eee182017-09-08 16:11:35 -0700867 * // Commit pfns we got from hmm_vma_fault()
868 * driver_unlock_device_page_table_update();
869 * up_read(&mm->mmap_sem)
870 *
871 * YOU MUST CALL hmm_vma_range_done() AFTER THIS FUNCTION RETURN SUCCESS (0)
872 * BEFORE FREEING THE range struct OR YOU WILL HAVE SERIOUS MEMORY CORRUPTION !
873 *
874 * YOU HAVE BEEN WARNED !
875 */
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700876int hmm_vma_fault(struct hmm_range *range, bool block)
Jérôme Glisse74eee182017-09-08 16:11:35 -0700877{
Jérôme Glisse08232a42018-04-10 16:28:30 -0700878 struct vm_area_struct *vma = range->vma;
879 unsigned long start = range->start;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700880 struct hmm_vma_walk hmm_vma_walk;
881 struct mm_walk mm_walk;
882 struct hmm *hmm;
883 int ret;
884
885 /* Sanity check, this really should not happen ! */
Jérôme Glisse08232a42018-04-10 16:28:30 -0700886 if (range->start < vma->vm_start || range->start >= vma->vm_end)
Jérôme Glisse74eee182017-09-08 16:11:35 -0700887 return -EINVAL;
Jérôme Glisse08232a42018-04-10 16:28:30 -0700888 if (range->end < vma->vm_start || range->end > vma->vm_end)
Jérôme Glisse74eee182017-09-08 16:11:35 -0700889 return -EINVAL;
890
891 hmm = hmm_register(vma->vm_mm);
892 if (!hmm) {
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700893 hmm_pfns_clear(range, range->pfns, range->start, range->end);
Jérôme Glisse74eee182017-09-08 16:11:35 -0700894 return -ENOMEM;
895 }
896 /* Caller must have registered a mirror using hmm_mirror_register() */
897 if (!hmm->mmu_notifier.ops)
898 return -EINVAL;
899
Jérôme Glisse855ce7d2018-04-10 16:28:42 -0700900 /* FIXME support hugetlb fs */
Dave Jiange1fb4a02018-08-17 15:43:40 -0700901 if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL) ||
902 vma_is_dax(vma)) {
Jérôme Glisse855ce7d2018-04-10 16:28:42 -0700903 hmm_pfns_special(range);
904 return -EINVAL;
905 }
906
Jérôme Glisse86586a42018-04-10 16:28:34 -0700907 if (!(vma->vm_flags & VM_READ)) {
908 /*
909 * If vma do not allow read access, then assume that it does
910 * not allow write access, either. Architecture that allow
911 * write without read access are not supported by HMM, because
912 * operations such has atomic access would not work.
913 */
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700914 hmm_pfns_clear(range, range->pfns, range->start, range->end);
Jérôme Glisse86586a42018-04-10 16:28:34 -0700915 return -EPERM;
916 }
Jérôme Glisse74eee182017-09-08 16:11:35 -0700917
Jérôme Glisse86586a42018-04-10 16:28:34 -0700918 /* Initialize range to track CPU page table update */
919 spin_lock(&hmm->lock);
920 range->valid = true;
921 list_add_rcu(&range->list, &hmm->ranges);
922 spin_unlock(&hmm->lock);
923
Jérôme Glisse74eee182017-09-08 16:11:35 -0700924 hmm_vma_walk.fault = true;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700925 hmm_vma_walk.block = block;
926 hmm_vma_walk.range = range;
927 mm_walk.private = &hmm_vma_walk;
928 hmm_vma_walk.last = range->start;
929
930 mm_walk.vma = vma;
931 mm_walk.mm = vma->vm_mm;
932 mm_walk.pte_entry = NULL;
933 mm_walk.test_walk = NULL;
934 mm_walk.hugetlb_entry = NULL;
935 mm_walk.pmd_entry = hmm_vma_walk_pmd;
936 mm_walk.pte_hole = hmm_vma_walk_hole;
937
938 do {
Jérôme Glisse08232a42018-04-10 16:28:30 -0700939 ret = walk_page_range(start, range->end, &mm_walk);
Jérôme Glisse74eee182017-09-08 16:11:35 -0700940 start = hmm_vma_walk.last;
941 } while (ret == -EAGAIN);
942
943 if (ret) {
944 unsigned long i;
945
946 i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700947 hmm_pfns_clear(range, &range->pfns[i], hmm_vma_walk.last,
948 range->end);
Jérôme Glisse08232a42018-04-10 16:28:30 -0700949 hmm_vma_range_done(range);
Jérôme Glisse74eee182017-09-08 16:11:35 -0700950 }
951 return ret;
952}
953EXPORT_SYMBOL(hmm_vma_fault);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700954#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
Jérôme Glisse4ef589d2017-09-08 16:11:58 -0700955
956
Jérôme Glissedf6ad692017-09-08 16:12:24 -0700957#if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC)
Jérôme Glisse4ef589d2017-09-08 16:11:58 -0700958struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma,
959 unsigned long addr)
960{
961 struct page *page;
962
963 page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
964 if (!page)
965 return NULL;
966 lock_page(page);
967 return page;
968}
969EXPORT_SYMBOL(hmm_vma_alloc_locked_page);
970
971
972static void hmm_devmem_ref_release(struct percpu_ref *ref)
973{
974 struct hmm_devmem *devmem;
975
976 devmem = container_of(ref, struct hmm_devmem, ref);
977 complete(&devmem->completion);
978}
979
980static void hmm_devmem_ref_exit(void *data)
981{
982 struct percpu_ref *ref = data;
983 struct hmm_devmem *devmem;
984
985 devmem = container_of(ref, struct hmm_devmem, ref);
986 percpu_ref_exit(ref);
987 devm_remove_action(devmem->device, &hmm_devmem_ref_exit, data);
988}
989
990static void hmm_devmem_ref_kill(void *data)
991{
992 struct percpu_ref *ref = data;
993 struct hmm_devmem *devmem;
994
995 devmem = container_of(ref, struct hmm_devmem, ref);
996 percpu_ref_kill(ref);
997 wait_for_completion(&devmem->completion);
998 devm_remove_action(devmem->device, &hmm_devmem_ref_kill, data);
999}
1000
1001static int hmm_devmem_fault(struct vm_area_struct *vma,
1002 unsigned long addr,
1003 const struct page *page,
1004 unsigned int flags,
1005 pmd_t *pmdp)
1006{
1007 struct hmm_devmem *devmem = page->pgmap->data;
1008
1009 return devmem->ops->fault(devmem, vma, addr, page, flags, pmdp);
1010}
1011
1012static void hmm_devmem_free(struct page *page, void *data)
1013{
1014 struct hmm_devmem *devmem = data;
1015
Dan Williams2fa147b2018-07-13 21:50:01 -07001016 page->mapping = NULL;
1017
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001018 devmem->ops->free(devmem, page);
1019}
1020
1021static DEFINE_MUTEX(hmm_devmem_lock);
1022static RADIX_TREE(hmm_devmem_radix, GFP_KERNEL);
1023
1024static void hmm_devmem_radix_release(struct resource *resource)
1025{
Colin Ian King1e926412018-08-17 15:50:07 -07001026 resource_size_t key;
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001027
1028 mutex_lock(&hmm_devmem_lock);
1029 for (key = resource->start;
1030 key <= resource->end;
1031 key += PA_SECTION_SIZE)
1032 radix_tree_delete(&hmm_devmem_radix, key >> PA_SECTION_SHIFT);
1033 mutex_unlock(&hmm_devmem_lock);
1034}
1035
1036static void hmm_devmem_release(struct device *dev, void *data)
1037{
1038 struct hmm_devmem *devmem = data;
1039 struct resource *resource = devmem->resource;
1040 unsigned long start_pfn, npages;
1041 struct zone *zone;
1042 struct page *page;
1043
1044 if (percpu_ref_tryget_live(&devmem->ref)) {
1045 dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
1046 percpu_ref_put(&devmem->ref);
1047 }
1048
1049 /* pages are dead and unused, undo the arch mapping */
1050 start_pfn = (resource->start & ~(PA_SECTION_SIZE - 1)) >> PAGE_SHIFT;
1051 npages = ALIGN(resource_size(resource), PA_SECTION_SIZE) >> PAGE_SHIFT;
1052
1053 page = pfn_to_page(start_pfn);
1054 zone = page_zone(page);
1055
1056 mem_hotplug_begin();
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001057 if (resource->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY)
Christoph Hellwigda024512017-12-29 08:53:55 +01001058 __remove_pages(zone, start_pfn, npages, NULL);
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001059 else
1060 arch_remove_memory(start_pfn << PAGE_SHIFT,
Christoph Hellwigda024512017-12-29 08:53:55 +01001061 npages << PAGE_SHIFT, NULL);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001062 mem_hotplug_done();
1063
1064 hmm_devmem_radix_release(resource);
1065}
1066
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001067static int hmm_devmem_pages_create(struct hmm_devmem *devmem)
1068{
1069 resource_size_t key, align_start, align_size, align_end;
1070 struct device *device = devmem->device;
1071 int ret, nid, is_ram;
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001072
1073 align_start = devmem->resource->start & ~(PA_SECTION_SIZE - 1);
1074 align_size = ALIGN(devmem->resource->start +
1075 resource_size(devmem->resource),
1076 PA_SECTION_SIZE) - align_start;
1077
1078 is_ram = region_intersects(align_start, align_size,
1079 IORESOURCE_SYSTEM_RAM,
1080 IORES_DESC_NONE);
1081 if (is_ram == REGION_MIXED) {
1082 WARN_ONCE(1, "%s attempted on mixed region %pr\n",
1083 __func__, devmem->resource);
1084 return -ENXIO;
1085 }
1086 if (is_ram == REGION_INTERSECTS)
1087 return -ENXIO;
1088
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001089 if (devmem->resource->desc == IORES_DESC_DEVICE_PUBLIC_MEMORY)
1090 devmem->pagemap.type = MEMORY_DEVICE_PUBLIC;
1091 else
1092 devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
1093
Logan Gunthorpee7744aa2017-12-29 08:54:04 +01001094 devmem->pagemap.res = *devmem->resource;
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001095 devmem->pagemap.page_fault = hmm_devmem_fault;
1096 devmem->pagemap.page_free = hmm_devmem_free;
1097 devmem->pagemap.dev = devmem->device;
1098 devmem->pagemap.ref = &devmem->ref;
1099 devmem->pagemap.data = devmem;
1100
1101 mutex_lock(&hmm_devmem_lock);
1102 align_end = align_start + align_size - 1;
1103 for (key = align_start; key <= align_end; key += PA_SECTION_SIZE) {
1104 struct hmm_devmem *dup;
1105
Tejun Heo18be4602018-04-10 16:29:09 -07001106 dup = radix_tree_lookup(&hmm_devmem_radix,
1107 key >> PA_SECTION_SHIFT);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001108 if (dup) {
1109 dev_err(device, "%s: collides with mapping for %s\n",
1110 __func__, dev_name(dup->device));
1111 mutex_unlock(&hmm_devmem_lock);
1112 ret = -EBUSY;
1113 goto error;
1114 }
1115 ret = radix_tree_insert(&hmm_devmem_radix,
1116 key >> PA_SECTION_SHIFT,
1117 devmem);
1118 if (ret) {
1119 dev_err(device, "%s: failed: %d\n", __func__, ret);
1120 mutex_unlock(&hmm_devmem_lock);
1121 goto error_radix;
1122 }
1123 }
1124 mutex_unlock(&hmm_devmem_lock);
1125
1126 nid = dev_to_node(device);
1127 if (nid < 0)
1128 nid = numa_mem_id();
1129
1130 mem_hotplug_begin();
1131 /*
1132 * For device private memory we call add_pages() as we only need to
1133 * allocate and initialize struct page for the device memory. More-
1134 * over the device memory is un-accessible thus we do not want to
1135 * create a linear mapping for the memory like arch_add_memory()
1136 * would do.
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001137 *
1138 * For device public memory, which is accesible by the CPU, we do
1139 * want the linear mapping and thus use arch_add_memory().
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001140 */
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001141 if (devmem->pagemap.type == MEMORY_DEVICE_PUBLIC)
Christoph Hellwig24e6d5a2017-12-29 08:53:53 +01001142 ret = arch_add_memory(nid, align_start, align_size, NULL,
1143 false);
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001144 else
1145 ret = add_pages(nid, align_start >> PAGE_SHIFT,
Christoph Hellwig24e6d5a2017-12-29 08:53:53 +01001146 align_size >> PAGE_SHIFT, NULL, false);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001147 if (ret) {
1148 mem_hotplug_done();
1149 goto error_add_memory;
1150 }
1151 move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
1152 align_start >> PAGE_SHIFT,
Christoph Hellwiga99583e2017-12-29 08:53:57 +01001153 align_size >> PAGE_SHIFT, NULL);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001154 mem_hotplug_done();
1155
Alexander Duyck966cf442018-10-26 15:07:52 -07001156 /*
1157 * Initialization of the pages has been deferred until now in order
1158 * to allow us to do the work while not holding the hotplug lock.
1159 */
1160 memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
1161 align_start >> PAGE_SHIFT,
1162 align_size >> PAGE_SHIFT, &devmem->pagemap);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001163
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001164 return 0;
1165
1166error_add_memory:
1167 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
1168error_radix:
1169 hmm_devmem_radix_release(devmem->resource);
1170error:
1171 return ret;
1172}
1173
1174static int hmm_devmem_match(struct device *dev, void *data, void *match_data)
1175{
1176 struct hmm_devmem *devmem = data;
1177
1178 return devmem->resource == match_data;
1179}
1180
1181static void hmm_devmem_pages_remove(struct hmm_devmem *devmem)
1182{
1183 devres_release(devmem->device, &hmm_devmem_release,
1184 &hmm_devmem_match, devmem->resource);
1185}
1186
1187/*
1188 * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory
1189 *
1190 * @ops: memory event device driver callback (see struct hmm_devmem_ops)
1191 * @device: device struct to bind the resource too
1192 * @size: size in bytes of the device memory to add
1193 * Returns: pointer to new hmm_devmem struct ERR_PTR otherwise
1194 *
1195 * This function first finds an empty range of physical address big enough to
1196 * contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which
1197 * in turn allocates struct pages. It does not do anything beyond that; all
1198 * events affecting the memory will go through the various callbacks provided
1199 * by hmm_devmem_ops struct.
1200 *
1201 * Device driver should call this function during device initialization and
1202 * is then responsible of memory management. HMM only provides helpers.
1203 */
1204struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
1205 struct device *device,
1206 unsigned long size)
1207{
1208 struct hmm_devmem *devmem;
1209 resource_size_t addr;
1210 int ret;
1211
Dan Williamse76384882018-05-16 11:46:08 -07001212 dev_pagemap_get_ops();
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001213
1214 devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem),
1215 GFP_KERNEL, dev_to_node(device));
1216 if (!devmem)
1217 return ERR_PTR(-ENOMEM);
1218
1219 init_completion(&devmem->completion);
1220 devmem->pfn_first = -1UL;
1221 devmem->pfn_last = -1UL;
1222 devmem->resource = NULL;
1223 devmem->device = device;
1224 devmem->ops = ops;
1225
1226 ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
1227 0, GFP_KERNEL);
1228 if (ret)
1229 goto error_percpu_ref;
1230
1231 ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref);
1232 if (ret)
1233 goto error_devm_add_action;
1234
1235 size = ALIGN(size, PA_SECTION_SIZE);
1236 addr = min((unsigned long)iomem_resource.end,
1237 (1UL << MAX_PHYSMEM_BITS) - 1);
1238 addr = addr - size + 1UL;
1239
1240 /*
1241 * FIXME add a new helper to quickly walk resource tree and find free
1242 * range
1243 *
1244 * FIXME what about ioport_resource resource ?
1245 */
1246 for (; addr > size && addr >= iomem_resource.start; addr -= size) {
1247 ret = region_intersects(addr, size, 0, IORES_DESC_NONE);
1248 if (ret != REGION_DISJOINT)
1249 continue;
1250
1251 devmem->resource = devm_request_mem_region(device, addr, size,
1252 dev_name(device));
1253 if (!devmem->resource) {
1254 ret = -ENOMEM;
1255 goto error_no_resource;
1256 }
1257 break;
1258 }
1259 if (!devmem->resource) {
1260 ret = -ERANGE;
1261 goto error_no_resource;
1262 }
1263
1264 devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY;
1265 devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
1266 devmem->pfn_last = devmem->pfn_first +
1267 (resource_size(devmem->resource) >> PAGE_SHIFT);
1268
1269 ret = hmm_devmem_pages_create(devmem);
1270 if (ret)
1271 goto error_pages;
1272
1273 devres_add(device, devmem);
1274
1275 ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref);
1276 if (ret) {
1277 hmm_devmem_remove(devmem);
1278 return ERR_PTR(ret);
1279 }
1280
1281 return devmem;
1282
1283error_pages:
1284 devm_release_mem_region(device, devmem->resource->start,
1285 resource_size(devmem->resource));
1286error_no_resource:
1287error_devm_add_action:
1288 hmm_devmem_ref_kill(&devmem->ref);
1289 hmm_devmem_ref_exit(&devmem->ref);
1290error_percpu_ref:
1291 devres_free(devmem);
1292 return ERR_PTR(ret);
1293}
1294EXPORT_SYMBOL(hmm_devmem_add);
1295
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001296struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
1297 struct device *device,
1298 struct resource *res)
1299{
1300 struct hmm_devmem *devmem;
1301 int ret;
1302
1303 if (res->desc != IORES_DESC_DEVICE_PUBLIC_MEMORY)
1304 return ERR_PTR(-EINVAL);
1305
Dan Williamse76384882018-05-16 11:46:08 -07001306 dev_pagemap_get_ops();
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001307
1308 devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem),
1309 GFP_KERNEL, dev_to_node(device));
1310 if (!devmem)
1311 return ERR_PTR(-ENOMEM);
1312
1313 init_completion(&devmem->completion);
1314 devmem->pfn_first = -1UL;
1315 devmem->pfn_last = -1UL;
1316 devmem->resource = res;
1317 devmem->device = device;
1318 devmem->ops = ops;
1319
1320 ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
1321 0, GFP_KERNEL);
1322 if (ret)
1323 goto error_percpu_ref;
1324
1325 ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref);
1326 if (ret)
1327 goto error_devm_add_action;
1328
1329
1330 devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
1331 devmem->pfn_last = devmem->pfn_first +
1332 (resource_size(devmem->resource) >> PAGE_SHIFT);
1333
1334 ret = hmm_devmem_pages_create(devmem);
1335 if (ret)
1336 goto error_devm_add_action;
1337
1338 devres_add(device, devmem);
1339
1340 ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref);
1341 if (ret) {
1342 hmm_devmem_remove(devmem);
1343 return ERR_PTR(ret);
1344 }
1345
1346 return devmem;
1347
1348error_devm_add_action:
1349 hmm_devmem_ref_kill(&devmem->ref);
1350 hmm_devmem_ref_exit(&devmem->ref);
1351error_percpu_ref:
1352 devres_free(devmem);
1353 return ERR_PTR(ret);
1354}
1355EXPORT_SYMBOL(hmm_devmem_add_resource);
1356
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001357/*
1358 * hmm_devmem_remove() - remove device memory (kill and free ZONE_DEVICE)
1359 *
1360 * @devmem: hmm_devmem struct use to track and manage the ZONE_DEVICE memory
1361 *
1362 * This will hot-unplug memory that was hotplugged by hmm_devmem_add on behalf
1363 * of the device driver. It will free struct page and remove the resource that
1364 * reserved the physical address range for this device memory.
1365 */
1366void hmm_devmem_remove(struct hmm_devmem *devmem)
1367{
1368 resource_size_t start, size;
1369 struct device *device;
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001370 bool cdm = false;
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001371
1372 if (!devmem)
1373 return;
1374
1375 device = devmem->device;
1376 start = devmem->resource->start;
1377 size = resource_size(devmem->resource);
1378
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001379 cdm = devmem->resource->desc == IORES_DESC_DEVICE_PUBLIC_MEMORY;
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001380 hmm_devmem_ref_kill(&devmem->ref);
1381 hmm_devmem_ref_exit(&devmem->ref);
1382 hmm_devmem_pages_remove(devmem);
1383
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001384 if (!cdm)
1385 devm_release_mem_region(device, start, size);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001386}
1387EXPORT_SYMBOL(hmm_devmem_remove);
Jérôme Glisse858b54d2017-09-08 16:12:02 -07001388
1389/*
1390 * A device driver that wants to handle multiple devices memory through a
1391 * single fake device can use hmm_device to do so. This is purely a helper
1392 * and it is not needed to make use of any HMM functionality.
1393 */
1394#define HMM_DEVICE_MAX 256
1395
1396static DECLARE_BITMAP(hmm_device_mask, HMM_DEVICE_MAX);
1397static DEFINE_SPINLOCK(hmm_device_lock);
1398static struct class *hmm_device_class;
1399static dev_t hmm_device_devt;
1400
1401static void hmm_device_release(struct device *device)
1402{
1403 struct hmm_device *hmm_device;
1404
1405 hmm_device = container_of(device, struct hmm_device, device);
1406 spin_lock(&hmm_device_lock);
1407 clear_bit(hmm_device->minor, hmm_device_mask);
1408 spin_unlock(&hmm_device_lock);
1409
1410 kfree(hmm_device);
1411}
1412
1413struct hmm_device *hmm_device_new(void *drvdata)
1414{
1415 struct hmm_device *hmm_device;
1416
1417 hmm_device = kzalloc(sizeof(*hmm_device), GFP_KERNEL);
1418 if (!hmm_device)
1419 return ERR_PTR(-ENOMEM);
1420
1421 spin_lock(&hmm_device_lock);
1422 hmm_device->minor = find_first_zero_bit(hmm_device_mask, HMM_DEVICE_MAX);
1423 if (hmm_device->minor >= HMM_DEVICE_MAX) {
1424 spin_unlock(&hmm_device_lock);
1425 kfree(hmm_device);
1426 return ERR_PTR(-EBUSY);
1427 }
1428 set_bit(hmm_device->minor, hmm_device_mask);
1429 spin_unlock(&hmm_device_lock);
1430
1431 dev_set_name(&hmm_device->device, "hmm_device%d", hmm_device->minor);
1432 hmm_device->device.devt = MKDEV(MAJOR(hmm_device_devt),
1433 hmm_device->minor);
1434 hmm_device->device.release = hmm_device_release;
1435 dev_set_drvdata(&hmm_device->device, drvdata);
1436 hmm_device->device.class = hmm_device_class;
1437 device_initialize(&hmm_device->device);
1438
1439 return hmm_device;
1440}
1441EXPORT_SYMBOL(hmm_device_new);
1442
1443void hmm_device_put(struct hmm_device *hmm_device)
1444{
1445 put_device(&hmm_device->device);
1446}
1447EXPORT_SYMBOL(hmm_device_put);
1448
1449static int __init hmm_init(void)
1450{
1451 int ret;
1452
1453 ret = alloc_chrdev_region(&hmm_device_devt, 0,
1454 HMM_DEVICE_MAX,
1455 "hmm_device");
1456 if (ret)
1457 return ret;
1458
1459 hmm_device_class = class_create(THIS_MODULE, "hmm_device");
1460 if (IS_ERR(hmm_device_class)) {
1461 unregister_chrdev_region(hmm_device_devt, HMM_DEVICE_MAX);
1462 return PTR_ERR(hmm_device_class);
1463 }
1464 return 0;
1465}
1466
1467device_initcall(hmm_init);
Jérôme Glissedf6ad692017-09-08 16:12:24 -07001468#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */