blob: 84e0577a912a724da2d9ce90477753f6e260f592 [file] [log] [blame]
Jérôme Glisse133ff0e2017-09-08 16:11:23 -07001/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
Jérôme Glissef813f212018-10-30 15:04:06 -070014 * Authors: Jérôme Glisse <jglisse@redhat.com>
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070015 */
16/*
17 * Refer to include/linux/hmm.h for information about heterogeneous memory
18 * management or HMM for short.
19 */
20#include <linux/mm.h>
21#include <linux/hmm.h>
Jérôme Glisse858b54d2017-09-08 16:12:02 -070022#include <linux/init.h>
Jérôme Glisseda4c3c72017-09-08 16:11:31 -070023#include <linux/rmap.h>
24#include <linux/swap.h>
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070025#include <linux/slab.h>
26#include <linux/sched.h>
Jérôme Glisse4ef589d2017-09-08 16:11:58 -070027#include <linux/mmzone.h>
28#include <linux/pagemap.h>
Jérôme Glisseda4c3c72017-09-08 16:11:31 -070029#include <linux/swapops.h>
30#include <linux/hugetlb.h>
Jérôme Glisse4ef589d2017-09-08 16:11:58 -070031#include <linux/memremap.h>
Jérôme Glisse7b2d55d22017-09-08 16:11:46 -070032#include <linux/jump_label.h>
Jérôme Glissec0b12402017-09-08 16:11:27 -070033#include <linux/mmu_notifier.h>
Jérôme Glisse4ef589d2017-09-08 16:11:58 -070034#include <linux/memory_hotplug.h>
35
36#define PA_SECTION_SIZE (1UL << PA_SECTION_SHIFT)
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070037
Jérôme Glisse6b368cd2017-09-08 16:12:32 -070038#if IS_ENABLED(CONFIG_HMM_MIRROR)
Jérôme Glissec0b12402017-09-08 16:11:27 -070039static const struct mmu_notifier_ops hmm_mmu_notifier_ops;
40
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070041/*
42 * struct hmm - HMM per mm struct
43 *
44 * @mm: mm struct this HMM struct is bound to
Jérôme Glisseda4c3c72017-09-08 16:11:31 -070045 * @lock: lock protecting ranges list
Jérôme Glisseda4c3c72017-09-08 16:11:31 -070046 * @ranges: list of range being snapshotted
Jérôme Glissec0b12402017-09-08 16:11:27 -070047 * @mirrors: list of mirrors for this mm
48 * @mmu_notifier: mmu notifier to track updates to CPU page table
49 * @mirrors_sem: read/write semaphore protecting the mirrors list
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070050 */
51struct hmm {
52 struct mm_struct *mm;
Jérôme Glisse704f3f22019-05-13 17:19:48 -070053 struct kref kref;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -070054 spinlock_t lock;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -070055 struct list_head ranges;
Jérôme Glissec0b12402017-09-08 16:11:27 -070056 struct list_head mirrors;
57 struct mmu_notifier mmu_notifier;
58 struct rw_semaphore mirrors_sem;
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070059};
60
Jérôme Glisse704f3f22019-05-13 17:19:48 -070061static inline struct hmm *mm_get_hmm(struct mm_struct *mm)
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070062{
Jérôme Glissec0b12402017-09-08 16:11:27 -070063 struct hmm *hmm = READ_ONCE(mm->hmm);
Jérôme Glisse704f3f22019-05-13 17:19:48 -070064
65 if (hmm && kref_get_unless_zero(&hmm->kref))
66 return hmm;
67
68 return NULL;
69}
70
71/**
72 * hmm_get_or_create - register HMM against an mm (HMM internal)
73 *
74 * @mm: mm struct to attach to
75 * Returns: returns an HMM object, either by referencing the existing
76 * (per-process) object, or by creating a new one.
77 *
78 * This is not intended to be used directly by device drivers. If mm already
79 * has an HMM struct then it get a reference on it and returns it. Otherwise
80 * it allocates an HMM struct, initializes it, associate it with the mm and
81 * returns it.
82 */
83static struct hmm *hmm_get_or_create(struct mm_struct *mm)
84{
85 struct hmm *hmm = mm_get_hmm(mm);
Jérôme Glissec0b12402017-09-08 16:11:27 -070086 bool cleanup = false;
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070087
Jérôme Glissec0b12402017-09-08 16:11:27 -070088 if (hmm)
89 return hmm;
90
91 hmm = kmalloc(sizeof(*hmm), GFP_KERNEL);
92 if (!hmm)
93 return NULL;
94 INIT_LIST_HEAD(&hmm->mirrors);
95 init_rwsem(&hmm->mirrors_sem);
Jérôme Glissec0b12402017-09-08 16:11:27 -070096 hmm->mmu_notifier.ops = NULL;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -070097 INIT_LIST_HEAD(&hmm->ranges);
98 spin_lock_init(&hmm->lock);
Jérôme Glisse704f3f22019-05-13 17:19:48 -070099 kref_init(&hmm->kref);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700100 hmm->mm = mm;
101
Jérôme Glissec0b12402017-09-08 16:11:27 -0700102 spin_lock(&mm->page_table_lock);
103 if (!mm->hmm)
104 mm->hmm = hmm;
105 else
106 cleanup = true;
107 spin_unlock(&mm->page_table_lock);
108
Ralph Campbell86a2d592018-10-30 15:04:14 -0700109 if (cleanup)
110 goto error;
111
112 /*
113 * We should only get here if hold the mmap_sem in write mode ie on
114 * registration of first mirror through hmm_mirror_register()
115 */
116 hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops;
117 if (__mmu_notifier_register(&hmm->mmu_notifier, mm))
118 goto error_mm;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700119
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700120 return hmm;
Ralph Campbell86a2d592018-10-30 15:04:14 -0700121
122error_mm:
123 spin_lock(&mm->page_table_lock);
124 if (mm->hmm == hmm)
125 mm->hmm = NULL;
126 spin_unlock(&mm->page_table_lock);
127error:
128 kfree(hmm);
129 return NULL;
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700130}
131
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700132static void hmm_free(struct kref *kref)
133{
134 struct hmm *hmm = container_of(kref, struct hmm, kref);
135 struct mm_struct *mm = hmm->mm;
136
137 mmu_notifier_unregister_no_release(&hmm->mmu_notifier, mm);
138
139 spin_lock(&mm->page_table_lock);
140 if (mm->hmm == hmm)
141 mm->hmm = NULL;
142 spin_unlock(&mm->page_table_lock);
143
144 kfree(hmm);
145}
146
147static inline void hmm_put(struct hmm *hmm)
148{
149 kref_put(&hmm->kref, hmm_free);
150}
151
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700152void hmm_mm_destroy(struct mm_struct *mm)
153{
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700154 struct hmm *hmm;
155
156 spin_lock(&mm->page_table_lock);
157 hmm = mm_get_hmm(mm);
158 mm->hmm = NULL;
159 if (hmm) {
160 hmm->mm = NULL;
161 spin_unlock(&mm->page_table_lock);
162 hmm_put(hmm);
163 return;
164 }
165
166 spin_unlock(&mm->page_table_lock);
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700167}
Jérôme Glissec0b12402017-09-08 16:11:27 -0700168
Jérôme Glisseec131b22018-10-30 15:04:28 -0700169static int hmm_invalidate_range(struct hmm *hmm, bool device,
Jérôme Glisse44532d42018-10-30 15:04:24 -0700170 const struct hmm_update *update)
Jérôme Glissec0b12402017-09-08 16:11:27 -0700171{
172 struct hmm_mirror *mirror;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700173 struct hmm_range *range;
174
175 spin_lock(&hmm->lock);
176 list_for_each_entry(range, &hmm->ranges, list) {
Jérôme Glisse44532d42018-10-30 15:04:24 -0700177 if (update->end < range->start || update->start >= range->end)
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700178 continue;
179
180 range->valid = false;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700181 }
182 spin_unlock(&hmm->lock);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700183
Jérôme Glisseec131b22018-10-30 15:04:28 -0700184 if (!device)
185 return 0;
186
Jérôme Glissec0b12402017-09-08 16:11:27 -0700187 down_read(&hmm->mirrors_sem);
Jérôme Glisse44532d42018-10-30 15:04:24 -0700188 list_for_each_entry(mirror, &hmm->mirrors, list) {
189 int ret;
190
191 ret = mirror->ops->sync_cpu_device_pagetables(mirror, update);
192 if (!update->blockable && ret == -EAGAIN) {
193 up_read(&hmm->mirrors_sem);
194 return -EAGAIN;
195 }
196 }
Jérôme Glissec0b12402017-09-08 16:11:27 -0700197 up_read(&hmm->mirrors_sem);
Jérôme Glisse44532d42018-10-30 15:04:24 -0700198
199 return 0;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700200}
201
Ralph Campbelle1401512018-04-10 16:28:19 -0700202static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
203{
204 struct hmm_mirror *mirror;
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700205 struct hmm *hmm = mm_get_hmm(mm);
Ralph Campbelle1401512018-04-10 16:28:19 -0700206
207 down_write(&hmm->mirrors_sem);
208 mirror = list_first_entry_or_null(&hmm->mirrors, struct hmm_mirror,
209 list);
210 while (mirror) {
211 list_del_init(&mirror->list);
212 if (mirror->ops->release) {
213 /*
214 * Drop mirrors_sem so callback can wait on any pending
215 * work that might itself trigger mmu_notifier callback
216 * and thus would deadlock with us.
217 */
218 up_write(&hmm->mirrors_sem);
219 mirror->ops->release(mirror);
220 down_write(&hmm->mirrors_sem);
221 }
222 mirror = list_first_entry_or_null(&hmm->mirrors,
223 struct hmm_mirror, list);
224 }
225 up_write(&hmm->mirrors_sem);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700226
227 hmm_put(hmm);
Ralph Campbelle1401512018-04-10 16:28:19 -0700228}
229
Michal Hocko93065ac2018-08-21 21:52:33 -0700230static int hmm_invalidate_range_start(struct mmu_notifier *mn,
Jérôme Glisse5d6527a2018-12-28 00:38:05 -0800231 const struct mmu_notifier_range *range)
Jérôme Glissec0b12402017-09-08 16:11:27 -0700232{
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700233 struct hmm *hmm = mm_get_hmm(range->mm);
Jérôme Glisseec131b22018-10-30 15:04:28 -0700234 struct hmm_update update;
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700235 int ret;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700236
237 VM_BUG_ON(!hmm);
238
Jérôme Glisse5d6527a2018-12-28 00:38:05 -0800239 update.start = range->start;
240 update.end = range->end;
Jérôme Glisseec131b22018-10-30 15:04:28 -0700241 update.event = HMM_UPDATE_INVALIDATE;
Jérôme Glisse5d6527a2018-12-28 00:38:05 -0800242 update.blockable = range->blockable;
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700243 ret = hmm_invalidate_range(hmm, true, &update);
244 hmm_put(hmm);
245 return ret;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700246}
247
248static void hmm_invalidate_range_end(struct mmu_notifier *mn,
Jérôme Glisse5d6527a2018-12-28 00:38:05 -0800249 const struct mmu_notifier_range *range)
Jérôme Glissec0b12402017-09-08 16:11:27 -0700250{
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700251 struct hmm *hmm = mm_get_hmm(range->mm);
Jérôme Glisse44532d42018-10-30 15:04:24 -0700252 struct hmm_update update;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700253
254 VM_BUG_ON(!hmm);
255
Jérôme Glisse5d6527a2018-12-28 00:38:05 -0800256 update.start = range->start;
257 update.end = range->end;
Jérôme Glisse44532d42018-10-30 15:04:24 -0700258 update.event = HMM_UPDATE_INVALIDATE;
259 update.blockable = true;
Jérôme Glisseec131b22018-10-30 15:04:28 -0700260 hmm_invalidate_range(hmm, false, &update);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700261 hmm_put(hmm);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700262}
263
264static const struct mmu_notifier_ops hmm_mmu_notifier_ops = {
Ralph Campbelle1401512018-04-10 16:28:19 -0700265 .release = hmm_release,
Jérôme Glissec0b12402017-09-08 16:11:27 -0700266 .invalidate_range_start = hmm_invalidate_range_start,
267 .invalidate_range_end = hmm_invalidate_range_end,
268};
269
270/*
271 * hmm_mirror_register() - register a mirror against an mm
272 *
273 * @mirror: new mirror struct to register
274 * @mm: mm to register against
275 *
276 * To start mirroring a process address space, the device driver must register
277 * an HMM mirror struct.
278 *
279 * THE mm->mmap_sem MUST BE HELD IN WRITE MODE !
280 */
281int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm)
282{
283 /* Sanity check */
284 if (!mm || !mirror || !mirror->ops)
285 return -EINVAL;
286
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700287 mirror->hmm = hmm_get_or_create(mm);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700288 if (!mirror->hmm)
289 return -ENOMEM;
290
291 down_write(&mirror->hmm->mirrors_sem);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700292 list_add(&mirror->list, &mirror->hmm->mirrors);
293 up_write(&mirror->hmm->mirrors_sem);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700294
295 return 0;
296}
297EXPORT_SYMBOL(hmm_mirror_register);
298
299/*
300 * hmm_mirror_unregister() - unregister a mirror
301 *
302 * @mirror: new mirror struct to register
303 *
304 * Stop mirroring a process address space, and cleanup.
305 */
306void hmm_mirror_unregister(struct hmm_mirror *mirror)
307{
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700308 struct hmm *hmm = READ_ONCE(mirror->hmm);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700309
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700310 if (hmm == NULL)
Jérôme Glissec01cbba2018-04-10 16:28:23 -0700311 return;
312
Jérôme Glissec0b12402017-09-08 16:11:27 -0700313 down_write(&hmm->mirrors_sem);
Ralph Campbelle1401512018-04-10 16:28:19 -0700314 list_del_init(&mirror->list);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700315 /* To protect us against double unregister ... */
Jérôme Glissec01cbba2018-04-10 16:28:23 -0700316 mirror->hmm = NULL;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700317 up_write(&hmm->mirrors_sem);
Jérôme Glissec01cbba2018-04-10 16:28:23 -0700318
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700319 hmm_put(hmm);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700320}
321EXPORT_SYMBOL(hmm_mirror_unregister);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700322
Jérôme Glisse74eee182017-09-08 16:11:35 -0700323struct hmm_vma_walk {
324 struct hmm_range *range;
325 unsigned long last;
326 bool fault;
327 bool block;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700328};
329
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700330static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr,
331 bool write_fault, uint64_t *pfn)
Jérôme Glisse74eee182017-09-08 16:11:35 -0700332{
333 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_REMOTE;
334 struct hmm_vma_walk *hmm_vma_walk = walk->private;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700335 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700336 struct vm_area_struct *vma = walk->vma;
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700337 vm_fault_t ret;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700338
339 flags |= hmm_vma_walk->block ? 0 : FAULT_FLAG_ALLOW_RETRY;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700340 flags |= write_fault ? FAULT_FLAG_WRITE : 0;
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700341 ret = handle_mm_fault(vma, addr, flags);
342 if (ret & VM_FAULT_RETRY)
Jérôme Glisse74eee182017-09-08 16:11:35 -0700343 return -EBUSY;
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700344 if (ret & VM_FAULT_ERROR) {
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700345 *pfn = range->values[HMM_PFN_ERROR];
Jérôme Glisse74eee182017-09-08 16:11:35 -0700346 return -EFAULT;
347 }
348
349 return -EAGAIN;
350}
351
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700352static int hmm_pfns_bad(unsigned long addr,
353 unsigned long end,
354 struct mm_walk *walk)
355{
Jérôme Glissec7195472018-04-10 16:28:27 -0700356 struct hmm_vma_walk *hmm_vma_walk = walk->private;
357 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisseff05c0c2018-04-10 16:28:38 -0700358 uint64_t *pfns = range->pfns;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700359 unsigned long i;
360
361 i = (addr - range->start) >> PAGE_SHIFT;
362 for (; addr < end; addr += PAGE_SIZE, i++)
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700363 pfns[i] = range->values[HMM_PFN_ERROR];
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700364
365 return 0;
366}
367
Jérôme Glisse5504ed22018-04-10 16:28:46 -0700368/*
369 * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s)
370 * @start: range virtual start address (inclusive)
371 * @end: range virtual end address (exclusive)
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700372 * @fault: should we fault or not ?
373 * @write_fault: write fault ?
Jérôme Glisse5504ed22018-04-10 16:28:46 -0700374 * @walk: mm_walk structure
375 * Returns: 0 on success, -EAGAIN after page fault, or page fault error
376 *
377 * This function will be called whenever pmd_none() or pte_none() returns true,
378 * or whenever there is no page directory covering the virtual address range.
379 */
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700380static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end,
381 bool fault, bool write_fault,
382 struct mm_walk *walk)
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700383{
Jérôme Glisse74eee182017-09-08 16:11:35 -0700384 struct hmm_vma_walk *hmm_vma_walk = walk->private;
385 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisseff05c0c2018-04-10 16:28:38 -0700386 uint64_t *pfns = range->pfns;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700387 unsigned long i;
388
Jérôme Glisse74eee182017-09-08 16:11:35 -0700389 hmm_vma_walk->last = addr;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700390 i = (addr - range->start) >> PAGE_SHIFT;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700391 for (; addr < end; addr += PAGE_SIZE, i++) {
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700392 pfns[i] = range->values[HMM_PFN_NONE];
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700393 if (fault || write_fault) {
Jérôme Glisse74eee182017-09-08 16:11:35 -0700394 int ret;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700395
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700396 ret = hmm_vma_do_fault(walk, addr, write_fault,
397 &pfns[i]);
Jérôme Glisse74eee182017-09-08 16:11:35 -0700398 if (ret != -EAGAIN)
399 return ret;
400 }
401 }
402
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700403 return (fault || write_fault) ? -EAGAIN : 0;
404}
405
406static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
407 uint64_t pfns, uint64_t cpu_flags,
408 bool *fault, bool *write_fault)
409{
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700410 struct hmm_range *range = hmm_vma_walk->range;
411
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700412 *fault = *write_fault = false;
413 if (!hmm_vma_walk->fault)
414 return;
415
416 /* We aren't ask to do anything ... */
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700417 if (!(pfns & range->flags[HMM_PFN_VALID]))
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700418 return;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700419 /* If this is device memory than only fault if explicitly requested */
420 if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
421 /* Do we fault on device memory ? */
422 if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) {
423 *write_fault = pfns & range->flags[HMM_PFN_WRITE];
424 *fault = true;
425 }
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700426 return;
427 }
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700428
429 /* If CPU page table is not valid then we need to fault */
430 *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]);
431 /* Need to write fault ? */
432 if ((pfns & range->flags[HMM_PFN_WRITE]) &&
433 !(cpu_flags & range->flags[HMM_PFN_WRITE])) {
434 *write_fault = true;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700435 *fault = true;
436 }
437}
438
439static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
440 const uint64_t *pfns, unsigned long npages,
441 uint64_t cpu_flags, bool *fault,
442 bool *write_fault)
443{
444 unsigned long i;
445
446 if (!hmm_vma_walk->fault) {
447 *fault = *write_fault = false;
448 return;
449 }
450
451 for (i = 0; i < npages; ++i) {
452 hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags,
453 fault, write_fault);
454 if ((*fault) || (*write_fault))
455 return;
456 }
457}
458
459static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
460 struct mm_walk *walk)
461{
462 struct hmm_vma_walk *hmm_vma_walk = walk->private;
463 struct hmm_range *range = hmm_vma_walk->range;
464 bool fault, write_fault;
465 unsigned long i, npages;
466 uint64_t *pfns;
467
468 i = (addr - range->start) >> PAGE_SHIFT;
469 npages = (end - addr) >> PAGE_SHIFT;
470 pfns = &range->pfns[i];
471 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
472 0, &fault, &write_fault);
473 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
474}
475
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700476static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd)
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700477{
478 if (pmd_protnone(pmd))
479 return 0;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700480 return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] |
481 range->flags[HMM_PFN_WRITE] :
482 range->flags[HMM_PFN_VALID];
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700483}
484
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700485static int hmm_vma_handle_pmd(struct mm_walk *walk,
486 unsigned long addr,
487 unsigned long end,
488 uint64_t *pfns,
489 pmd_t pmd)
490{
491 struct hmm_vma_walk *hmm_vma_walk = walk->private;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700492 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700493 unsigned long pfn, npages, i;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700494 bool fault, write_fault;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700495 uint64_t cpu_flags;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700496
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700497 npages = (end - addr) >> PAGE_SHIFT;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700498 cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700499 hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags,
500 &fault, &write_fault);
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700501
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700502 if (pmd_protnone(pmd) || fault || write_fault)
503 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700504
505 pfn = pmd_pfn(pmd) + pte_index(addr);
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700506 for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++)
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700507 pfns[i] = hmm_pfn_from_pfn(range, pfn) | cpu_flags;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700508 hmm_vma_walk->last = end;
509 return 0;
510}
511
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700512static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700513{
514 if (pte_none(pte) || !pte_present(pte))
515 return 0;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700516 return pte_write(pte) ? range->flags[HMM_PFN_VALID] |
517 range->flags[HMM_PFN_WRITE] :
518 range->flags[HMM_PFN_VALID];
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700519}
520
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700521static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
522 unsigned long end, pmd_t *pmdp, pte_t *ptep,
523 uint64_t *pfn)
524{
525 struct hmm_vma_walk *hmm_vma_walk = walk->private;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700526 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700527 struct vm_area_struct *vma = walk->vma;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700528 bool fault, write_fault;
529 uint64_t cpu_flags;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700530 pte_t pte = *ptep;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700531 uint64_t orig_pfn = *pfn;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700532
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700533 *pfn = range->values[HMM_PFN_NONE];
534 cpu_flags = pte_to_hmm_pfn_flags(range, pte);
535 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700536 &fault, &write_fault);
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700537
538 if (pte_none(pte)) {
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700539 if (fault || write_fault)
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700540 goto fault;
541 return 0;
542 }
543
544 if (!pte_present(pte)) {
545 swp_entry_t entry = pte_to_swp_entry(pte);
546
547 if (!non_swap_entry(entry)) {
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700548 if (fault || write_fault)
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700549 goto fault;
550 return 0;
551 }
552
553 /*
554 * This is a special swap entry, ignore migration, use
555 * device and report anything else as error.
556 */
557 if (is_device_private_entry(entry)) {
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700558 cpu_flags = range->flags[HMM_PFN_VALID] |
559 range->flags[HMM_PFN_DEVICE_PRIVATE];
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700560 cpu_flags |= is_write_device_private_entry(entry) ?
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700561 range->flags[HMM_PFN_WRITE] : 0;
562 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
563 &fault, &write_fault);
564 if (fault || write_fault)
565 goto fault;
566 *pfn = hmm_pfn_from_pfn(range, swp_offset(entry));
567 *pfn |= cpu_flags;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700568 return 0;
569 }
570
571 if (is_migration_entry(entry)) {
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700572 if (fault || write_fault) {
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700573 pte_unmap(ptep);
574 hmm_vma_walk->last = addr;
575 migration_entry_wait(vma->vm_mm,
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700576 pmdp, addr);
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700577 return -EAGAIN;
578 }
579 return 0;
580 }
581
582 /* Report error for everything else */
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700583 *pfn = range->values[HMM_PFN_ERROR];
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700584 return -EFAULT;
585 }
586
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700587 if (fault || write_fault)
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700588 goto fault;
589
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700590 *pfn = hmm_pfn_from_pfn(range, pte_pfn(pte)) | cpu_flags;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700591 return 0;
592
593fault:
594 pte_unmap(ptep);
595 /* Fault any virtual address we were asked to fault */
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700596 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700597}
598
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700599static int hmm_vma_walk_pmd(pmd_t *pmdp,
600 unsigned long start,
601 unsigned long end,
602 struct mm_walk *walk)
603{
Jérôme Glisse74eee182017-09-08 16:11:35 -0700604 struct hmm_vma_walk *hmm_vma_walk = walk->private;
605 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glissed08faca2018-10-30 15:04:20 -0700606 struct vm_area_struct *vma = walk->vma;
Jérôme Glisseff05c0c2018-04-10 16:28:38 -0700607 uint64_t *pfns = range->pfns;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700608 unsigned long addr = start, i;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700609 pte_t *ptep;
Jérôme Glissed08faca2018-10-30 15:04:20 -0700610 pmd_t pmd;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700611
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700612
613again:
Jérôme Glissed08faca2018-10-30 15:04:20 -0700614 pmd = READ_ONCE(*pmdp);
615 if (pmd_none(pmd))
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700616 return hmm_vma_walk_hole(start, end, walk);
617
Jérôme Glissed08faca2018-10-30 15:04:20 -0700618 if (pmd_huge(pmd) && (range->vma->vm_flags & VM_HUGETLB))
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700619 return hmm_pfns_bad(start, end, walk);
620
Jérôme Glissed08faca2018-10-30 15:04:20 -0700621 if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
622 bool fault, write_fault;
623 unsigned long npages;
624 uint64_t *pfns;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700625
Jérôme Glissed08faca2018-10-30 15:04:20 -0700626 i = (addr - range->start) >> PAGE_SHIFT;
627 npages = (end - addr) >> PAGE_SHIFT;
628 pfns = &range->pfns[i];
629
630 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
631 0, &fault, &write_fault);
632 if (fault || write_fault) {
633 hmm_vma_walk->last = addr;
634 pmd_migration_entry_wait(vma->vm_mm, pmdp);
635 return -EAGAIN;
636 }
637 return 0;
638 } else if (!pmd_present(pmd))
639 return hmm_pfns_bad(start, end, walk);
640
641 if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700642 /*
643 * No need to take pmd_lock here, even if some other threads
644 * is splitting the huge pmd we will get that event through
645 * mmu_notifier callback.
646 *
647 * So just read pmd value and check again its a transparent
648 * huge or device mapping one and compute corresponding pfn
649 * values.
650 */
651 pmd = pmd_read_atomic(pmdp);
652 barrier();
653 if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
654 goto again;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700655
Jérôme Glissed08faca2018-10-30 15:04:20 -0700656 i = (addr - range->start) >> PAGE_SHIFT;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700657 return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700658 }
659
Jérôme Glissed08faca2018-10-30 15:04:20 -0700660 /*
661 * We have handled all the valid case above ie either none, migration,
662 * huge or transparent huge. At this point either it is a valid pmd
663 * entry pointing to pte directory or it is a bad pmd that will not
664 * recover.
665 */
666 if (pmd_bad(pmd))
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700667 return hmm_pfns_bad(start, end, walk);
668
669 ptep = pte_offset_map(pmdp, addr);
Jérôme Glissed08faca2018-10-30 15:04:20 -0700670 i = (addr - range->start) >> PAGE_SHIFT;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700671 for (; addr < end; addr += PAGE_SIZE, ptep++, i++) {
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700672 int r;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700673
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700674 r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]);
675 if (r) {
676 /* hmm_vma_handle_pte() did unmap pte directory */
677 hmm_vma_walk->last = addr;
678 return r;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700679 }
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700680 }
681 pte_unmap(ptep - 1);
682
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700683 hmm_vma_walk->last = addr;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700684 return 0;
685}
686
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700687static void hmm_pfns_clear(struct hmm_range *range,
688 uint64_t *pfns,
Jérôme Glisse33cd47d2018-04-10 16:28:54 -0700689 unsigned long addr,
690 unsigned long end)
691{
692 for (; addr < end; addr += PAGE_SIZE, pfns++)
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700693 *pfns = range->values[HMM_PFN_NONE];
Jérôme Glisse33cd47d2018-04-10 16:28:54 -0700694}
695
Jérôme Glisse855ce7d2018-04-10 16:28:42 -0700696static void hmm_pfns_special(struct hmm_range *range)
697{
698 unsigned long addr = range->start, i = 0;
699
700 for (; addr < range->end; addr += PAGE_SIZE, i++)
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700701 range->pfns[i] = range->values[HMM_PFN_SPECIAL];
Jérôme Glisse855ce7d2018-04-10 16:28:42 -0700702}
703
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700704/*
705 * hmm_vma_get_pfns() - snapshot CPU page table for a range of virtual addresses
Jérôme Glisse08232a42018-04-10 16:28:30 -0700706 * @range: range being snapshotted
Jérôme Glisse86586a42018-04-10 16:28:34 -0700707 * Returns: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
708 * vma permission, 0 success
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700709 *
710 * This snapshots the CPU page table for a range of virtual addresses. Snapshot
711 * validity is tracked by range struct. See hmm_vma_range_done() for further
712 * information.
713 *
714 * The range struct is initialized here. It tracks the CPU page table, but only
715 * if the function returns success (0), in which case the caller must then call
716 * hmm_vma_range_done() to stop CPU page table update tracking on this range.
717 *
718 * NOT CALLING hmm_vma_range_done() IF FUNCTION RETURNS 0 WILL LEAD TO SERIOUS
719 * MEMORY CORRUPTION ! YOU HAVE BEEN WARNED !
720 */
Jérôme Glisse08232a42018-04-10 16:28:30 -0700721int hmm_vma_get_pfns(struct hmm_range *range)
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700722{
Jérôme Glisse08232a42018-04-10 16:28:30 -0700723 struct vm_area_struct *vma = range->vma;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700724 struct hmm_vma_walk hmm_vma_walk;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700725 struct mm_walk mm_walk;
726 struct hmm *hmm;
727
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700728 range->hmm = NULL;
729
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700730 /* Sanity check, this really should not happen ! */
Jérôme Glisse08232a42018-04-10 16:28:30 -0700731 if (range->start < vma->vm_start || range->start >= vma->vm_end)
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700732 return -EINVAL;
Jérôme Glisse08232a42018-04-10 16:28:30 -0700733 if (range->end < vma->vm_start || range->end > vma->vm_end)
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700734 return -EINVAL;
735
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700736 hmm = hmm_get_or_create(vma->vm_mm);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700737 if (!hmm)
738 return -ENOMEM;
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700739
740 /* Check if hmm_mm_destroy() was call. */
741 if (hmm->mm == NULL) {
742 hmm_put(hmm);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700743 return -EINVAL;
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700744 }
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700745
Jérôme Glisse855ce7d2018-04-10 16:28:42 -0700746 /* FIXME support hugetlb fs */
Dave Jiange1fb4a02018-08-17 15:43:40 -0700747 if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL) ||
748 vma_is_dax(vma)) {
Jérôme Glisse855ce7d2018-04-10 16:28:42 -0700749 hmm_pfns_special(range);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700750 hmm_put(hmm);
Jérôme Glisse855ce7d2018-04-10 16:28:42 -0700751 return -EINVAL;
752 }
753
Jérôme Glisse86586a42018-04-10 16:28:34 -0700754 if (!(vma->vm_flags & VM_READ)) {
755 /*
756 * If vma do not allow read access, then assume that it does
757 * not allow write access, either. Architecture that allow
758 * write without read access are not supported by HMM, because
759 * operations such has atomic access would not work.
760 */
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700761 hmm_pfns_clear(range, range->pfns, range->start, range->end);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700762 hmm_put(hmm);
Jérôme Glisse86586a42018-04-10 16:28:34 -0700763 return -EPERM;
764 }
765
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700766 /* Initialize range to track CPU page table update */
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700767 spin_lock(&hmm->lock);
768 range->valid = true;
769 list_add_rcu(&range->list, &hmm->ranges);
770 spin_unlock(&hmm->lock);
771
Jérôme Glisse74eee182017-09-08 16:11:35 -0700772 hmm_vma_walk.fault = false;
773 hmm_vma_walk.range = range;
774 mm_walk.private = &hmm_vma_walk;
775
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700776 mm_walk.vma = vma;
777 mm_walk.mm = vma->vm_mm;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700778 mm_walk.pte_entry = NULL;
779 mm_walk.test_walk = NULL;
780 mm_walk.hugetlb_entry = NULL;
781 mm_walk.pmd_entry = hmm_vma_walk_pmd;
782 mm_walk.pte_hole = hmm_vma_walk_hole;
783
Jérôme Glisse08232a42018-04-10 16:28:30 -0700784 walk_page_range(range->start, range->end, &mm_walk);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700785 /*
786 * Transfer hmm reference to the range struct it will be drop inside
787 * the hmm_vma_range_done() function (which _must_ be call if this
788 * function return 0).
789 */
790 range->hmm = hmm;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700791 return 0;
792}
793EXPORT_SYMBOL(hmm_vma_get_pfns);
794
795/*
796 * hmm_vma_range_done() - stop tracking change to CPU page table over a range
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700797 * @range: range being tracked
798 * Returns: false if range data has been invalidated, true otherwise
799 *
800 * Range struct is used to track updates to the CPU page table after a call to
801 * either hmm_vma_get_pfns() or hmm_vma_fault(). Once the device driver is done
802 * using the data, or wants to lock updates to the data it got from those
803 * functions, it must call the hmm_vma_range_done() function, which will then
804 * stop tracking CPU page table updates.
805 *
806 * Note that device driver must still implement general CPU page table update
807 * tracking either by using hmm_mirror (see hmm_mirror_register()) or by using
808 * the mmu_notifier API directly.
809 *
810 * CPU page table update tracking done through hmm_range is only temporary and
811 * to be used while trying to duplicate CPU page table contents for a range of
812 * virtual addresses.
813 *
814 * There are two ways to use this :
815 * again:
Jérôme Glisse08232a42018-04-10 16:28:30 -0700816 * hmm_vma_get_pfns(range); or hmm_vma_fault(...);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700817 * trans = device_build_page_table_update_transaction(pfns);
818 * device_page_table_lock();
Jérôme Glisse08232a42018-04-10 16:28:30 -0700819 * if (!hmm_vma_range_done(range)) {
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700820 * device_page_table_unlock();
821 * goto again;
822 * }
823 * device_commit_transaction(trans);
824 * device_page_table_unlock();
825 *
826 * Or:
Jérôme Glisse08232a42018-04-10 16:28:30 -0700827 * hmm_vma_get_pfns(range); or hmm_vma_fault(...);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700828 * device_page_table_lock();
Jérôme Glisse08232a42018-04-10 16:28:30 -0700829 * hmm_vma_range_done(range);
830 * device_update_page_table(range->pfns);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700831 * device_page_table_unlock();
832 */
Jérôme Glisse08232a42018-04-10 16:28:30 -0700833bool hmm_vma_range_done(struct hmm_range *range)
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700834{
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700835 bool ret = false;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700836
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700837 /* Sanity check this really should not happen. */
838 if (range->hmm == NULL || range->end <= range->start) {
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700839 BUG();
840 return false;
841 }
842
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700843 spin_lock(&range->hmm->lock);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700844 list_del_rcu(&range->list);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700845 ret = range->valid;
846 spin_unlock(&range->hmm->lock);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700847
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700848 /* Is the mm still alive ? */
849 if (range->hmm->mm == NULL)
850 ret = false;
851
852 /* Drop reference taken by hmm_vma_fault() or hmm_vma_get_pfns() */
853 hmm_put(range->hmm);
854 range->hmm = NULL;
855 return ret;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700856}
857EXPORT_SYMBOL(hmm_vma_range_done);
Jérôme Glisse74eee182017-09-08 16:11:35 -0700858
859/*
860 * hmm_vma_fault() - try to fault some address in a virtual address range
Jérôme Glisse08232a42018-04-10 16:28:30 -0700861 * @range: range being faulted
Jérôme Glisse74eee182017-09-08 16:11:35 -0700862 * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
863 * Returns: 0 success, error otherwise (-EAGAIN means mmap_sem have been drop)
864 *
865 * This is similar to a regular CPU page fault except that it will not trigger
866 * any memory migration if the memory being faulted is not accessible by CPUs.
867 *
Jérôme Glisseff05c0c2018-04-10 16:28:38 -0700868 * On error, for one virtual address in the range, the function will mark the
869 * corresponding HMM pfn entry with an error flag.
Jérôme Glisse74eee182017-09-08 16:11:35 -0700870 *
871 * Expected use pattern:
872 * retry:
873 * down_read(&mm->mmap_sem);
874 * // Find vma and address device wants to fault, initialize hmm_pfn_t
875 * // array accordingly
Jérôme Glisse08232a42018-04-10 16:28:30 -0700876 * ret = hmm_vma_fault(range, write, block);
Jérôme Glisse74eee182017-09-08 16:11:35 -0700877 * switch (ret) {
878 * case -EAGAIN:
Jérôme Glisse08232a42018-04-10 16:28:30 -0700879 * hmm_vma_range_done(range);
Jérôme Glisse74eee182017-09-08 16:11:35 -0700880 * // You might want to rate limit or yield to play nicely, you may
881 * // also commit any valid pfn in the array assuming that you are
882 * // getting true from hmm_vma_range_monitor_end()
883 * goto retry;
884 * case 0:
885 * break;
Jérôme Glisse86586a42018-04-10 16:28:34 -0700886 * case -ENOMEM:
887 * case -EINVAL:
888 * case -EPERM:
Jérôme Glisse74eee182017-09-08 16:11:35 -0700889 * default:
890 * // Handle error !
891 * up_read(&mm->mmap_sem)
892 * return;
893 * }
894 * // Take device driver lock that serialize device page table update
895 * driver_lock_device_page_table_update();
Jérôme Glisse08232a42018-04-10 16:28:30 -0700896 * hmm_vma_range_done(range);
Jérôme Glisse74eee182017-09-08 16:11:35 -0700897 * // Commit pfns we got from hmm_vma_fault()
898 * driver_unlock_device_page_table_update();
899 * up_read(&mm->mmap_sem)
900 *
901 * YOU MUST CALL hmm_vma_range_done() AFTER THIS FUNCTION RETURN SUCCESS (0)
902 * BEFORE FREEING THE range struct OR YOU WILL HAVE SERIOUS MEMORY CORRUPTION !
903 *
904 * YOU HAVE BEEN WARNED !
905 */
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700906int hmm_vma_fault(struct hmm_range *range, bool block)
Jérôme Glisse74eee182017-09-08 16:11:35 -0700907{
Jérôme Glisse08232a42018-04-10 16:28:30 -0700908 struct vm_area_struct *vma = range->vma;
909 unsigned long start = range->start;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700910 struct hmm_vma_walk hmm_vma_walk;
911 struct mm_walk mm_walk;
912 struct hmm *hmm;
913 int ret;
914
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700915 range->hmm = NULL;
916
Jérôme Glisse74eee182017-09-08 16:11:35 -0700917 /* Sanity check, this really should not happen ! */
Jérôme Glisse08232a42018-04-10 16:28:30 -0700918 if (range->start < vma->vm_start || range->start >= vma->vm_end)
Jérôme Glisse74eee182017-09-08 16:11:35 -0700919 return -EINVAL;
Jérôme Glisse08232a42018-04-10 16:28:30 -0700920 if (range->end < vma->vm_start || range->end > vma->vm_end)
Jérôme Glisse74eee182017-09-08 16:11:35 -0700921 return -EINVAL;
922
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700923 hmm = hmm_get_or_create(vma->vm_mm);
Jérôme Glisse74eee182017-09-08 16:11:35 -0700924 if (!hmm) {
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700925 hmm_pfns_clear(range, range->pfns, range->start, range->end);
Jérôme Glisse74eee182017-09-08 16:11:35 -0700926 return -ENOMEM;
927 }
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700928
929 /* Check if hmm_mm_destroy() was call. */
930 if (hmm->mm == NULL) {
931 hmm_put(hmm);
Jérôme Glisse74eee182017-09-08 16:11:35 -0700932 return -EINVAL;
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700933 }
Jérôme Glisse74eee182017-09-08 16:11:35 -0700934
Jérôme Glisse855ce7d2018-04-10 16:28:42 -0700935 /* FIXME support hugetlb fs */
Dave Jiange1fb4a02018-08-17 15:43:40 -0700936 if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL) ||
937 vma_is_dax(vma)) {
Jérôme Glisse855ce7d2018-04-10 16:28:42 -0700938 hmm_pfns_special(range);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700939 hmm_put(hmm);
Jérôme Glisse855ce7d2018-04-10 16:28:42 -0700940 return -EINVAL;
941 }
942
Jérôme Glisse86586a42018-04-10 16:28:34 -0700943 if (!(vma->vm_flags & VM_READ)) {
944 /*
945 * If vma do not allow read access, then assume that it does
946 * not allow write access, either. Architecture that allow
947 * write without read access are not supported by HMM, because
948 * operations such has atomic access would not work.
949 */
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700950 hmm_pfns_clear(range, range->pfns, range->start, range->end);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700951 hmm_put(hmm);
Jérôme Glisse86586a42018-04-10 16:28:34 -0700952 return -EPERM;
953 }
Jérôme Glisse74eee182017-09-08 16:11:35 -0700954
Jérôme Glisse86586a42018-04-10 16:28:34 -0700955 /* Initialize range to track CPU page table update */
956 spin_lock(&hmm->lock);
957 range->valid = true;
958 list_add_rcu(&range->list, &hmm->ranges);
959 spin_unlock(&hmm->lock);
960
Jérôme Glisse74eee182017-09-08 16:11:35 -0700961 hmm_vma_walk.fault = true;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700962 hmm_vma_walk.block = block;
963 hmm_vma_walk.range = range;
964 mm_walk.private = &hmm_vma_walk;
965 hmm_vma_walk.last = range->start;
966
967 mm_walk.vma = vma;
968 mm_walk.mm = vma->vm_mm;
969 mm_walk.pte_entry = NULL;
970 mm_walk.test_walk = NULL;
971 mm_walk.hugetlb_entry = NULL;
972 mm_walk.pmd_entry = hmm_vma_walk_pmd;
973 mm_walk.pte_hole = hmm_vma_walk_hole;
974
975 do {
Jérôme Glisse08232a42018-04-10 16:28:30 -0700976 ret = walk_page_range(start, range->end, &mm_walk);
Jérôme Glisse74eee182017-09-08 16:11:35 -0700977 start = hmm_vma_walk.last;
978 } while (ret == -EAGAIN);
979
980 if (ret) {
981 unsigned long i;
982
983 i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700984 hmm_pfns_clear(range, &range->pfns[i], hmm_vma_walk.last,
985 range->end);
Jérôme Glisse08232a42018-04-10 16:28:30 -0700986 hmm_vma_range_done(range);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700987 hmm_put(hmm);
988 } else {
989 /*
990 * Transfer hmm reference to the range struct it will be drop
991 * inside the hmm_vma_range_done() function (which _must_ be
992 * call if this function return 0).
993 */
994 range->hmm = hmm;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700995 }
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700996
Jérôme Glisse74eee182017-09-08 16:11:35 -0700997 return ret;
998}
999EXPORT_SYMBOL(hmm_vma_fault);
Jérôme Glissec0b12402017-09-08 16:11:27 -07001000#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001001
1002
Jérôme Glissedf6ad692017-09-08 16:12:24 -07001003#if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC)
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001004struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma,
1005 unsigned long addr)
1006{
1007 struct page *page;
1008
1009 page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
1010 if (!page)
1011 return NULL;
1012 lock_page(page);
1013 return page;
1014}
1015EXPORT_SYMBOL(hmm_vma_alloc_locked_page);
1016
1017
1018static void hmm_devmem_ref_release(struct percpu_ref *ref)
1019{
1020 struct hmm_devmem *devmem;
1021
1022 devmem = container_of(ref, struct hmm_devmem, ref);
1023 complete(&devmem->completion);
1024}
1025
1026static void hmm_devmem_ref_exit(void *data)
1027{
1028 struct percpu_ref *ref = data;
1029 struct hmm_devmem *devmem;
1030
1031 devmem = container_of(ref, struct hmm_devmem, ref);
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001032 wait_for_completion(&devmem->completion);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001033 percpu_ref_exit(ref);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001034}
1035
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001036static void hmm_devmem_ref_kill(struct percpu_ref *ref)
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001037{
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001038 percpu_ref_kill(ref);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001039}
1040
Souptick Joarderb57e622e62019-03-11 23:28:10 -07001041static vm_fault_t hmm_devmem_fault(struct vm_area_struct *vma,
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001042 unsigned long addr,
1043 const struct page *page,
1044 unsigned int flags,
1045 pmd_t *pmdp)
1046{
1047 struct hmm_devmem *devmem = page->pgmap->data;
1048
1049 return devmem->ops->fault(devmem, vma, addr, page, flags, pmdp);
1050}
1051
1052static void hmm_devmem_free(struct page *page, void *data)
1053{
1054 struct hmm_devmem *devmem = data;
1055
Dan Williams2fa147b2018-07-13 21:50:01 -07001056 page->mapping = NULL;
1057
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001058 devmem->ops->free(devmem, page);
1059}
1060
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001061/*
1062 * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory
1063 *
1064 * @ops: memory event device driver callback (see struct hmm_devmem_ops)
1065 * @device: device struct to bind the resource too
1066 * @size: size in bytes of the device memory to add
1067 * Returns: pointer to new hmm_devmem struct ERR_PTR otherwise
1068 *
1069 * This function first finds an empty range of physical address big enough to
1070 * contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which
1071 * in turn allocates struct pages. It does not do anything beyond that; all
1072 * events affecting the memory will go through the various callbacks provided
1073 * by hmm_devmem_ops struct.
1074 *
1075 * Device driver should call this function during device initialization and
1076 * is then responsible of memory management. HMM only provides helpers.
1077 */
1078struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
1079 struct device *device,
1080 unsigned long size)
1081{
1082 struct hmm_devmem *devmem;
1083 resource_size_t addr;
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001084 void *result;
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001085 int ret;
1086
Dan Williamse76384882018-05-16 11:46:08 -07001087 dev_pagemap_get_ops();
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001088
Dan Williams58ef15b2018-12-28 00:35:07 -08001089 devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001090 if (!devmem)
1091 return ERR_PTR(-ENOMEM);
1092
1093 init_completion(&devmem->completion);
1094 devmem->pfn_first = -1UL;
1095 devmem->pfn_last = -1UL;
1096 devmem->resource = NULL;
1097 devmem->device = device;
1098 devmem->ops = ops;
1099
1100 ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
1101 0, GFP_KERNEL);
1102 if (ret)
Dan Williams58ef15b2018-12-28 00:35:07 -08001103 return ERR_PTR(ret);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001104
Dan Williams58ef15b2018-12-28 00:35:07 -08001105 ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, &devmem->ref);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001106 if (ret)
Dan Williams58ef15b2018-12-28 00:35:07 -08001107 return ERR_PTR(ret);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001108
1109 size = ALIGN(size, PA_SECTION_SIZE);
1110 addr = min((unsigned long)iomem_resource.end,
1111 (1UL << MAX_PHYSMEM_BITS) - 1);
1112 addr = addr - size + 1UL;
1113
1114 /*
1115 * FIXME add a new helper to quickly walk resource tree and find free
1116 * range
1117 *
1118 * FIXME what about ioport_resource resource ?
1119 */
1120 for (; addr > size && addr >= iomem_resource.start; addr -= size) {
1121 ret = region_intersects(addr, size, 0, IORES_DESC_NONE);
1122 if (ret != REGION_DISJOINT)
1123 continue;
1124
1125 devmem->resource = devm_request_mem_region(device, addr, size,
1126 dev_name(device));
Dan Williams58ef15b2018-12-28 00:35:07 -08001127 if (!devmem->resource)
1128 return ERR_PTR(-ENOMEM);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001129 break;
1130 }
Dan Williams58ef15b2018-12-28 00:35:07 -08001131 if (!devmem->resource)
1132 return ERR_PTR(-ERANGE);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001133
1134 devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY;
1135 devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
1136 devmem->pfn_last = devmem->pfn_first +
1137 (resource_size(devmem->resource) >> PAGE_SHIFT);
Dan Williams063a7d12018-12-28 00:39:46 -08001138 devmem->page_fault = hmm_devmem_fault;
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001139
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001140 devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
1141 devmem->pagemap.res = *devmem->resource;
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001142 devmem->pagemap.page_free = hmm_devmem_free;
1143 devmem->pagemap.altmap_valid = false;
1144 devmem->pagemap.ref = &devmem->ref;
1145 devmem->pagemap.data = devmem;
1146 devmem->pagemap.kill = hmm_devmem_ref_kill;
Dan Williams58ef15b2018-12-28 00:35:07 -08001147
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001148 result = devm_memremap_pages(devmem->device, &devmem->pagemap);
1149 if (IS_ERR(result))
1150 return result;
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001151 return devmem;
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001152}
Dan Williams02917e92018-12-28 00:35:15 -08001153EXPORT_SYMBOL_GPL(hmm_devmem_add);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001154
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001155struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
1156 struct device *device,
1157 struct resource *res)
1158{
1159 struct hmm_devmem *devmem;
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001160 void *result;
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001161 int ret;
1162
1163 if (res->desc != IORES_DESC_DEVICE_PUBLIC_MEMORY)
1164 return ERR_PTR(-EINVAL);
1165
Dan Williamse76384882018-05-16 11:46:08 -07001166 dev_pagemap_get_ops();
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001167
Dan Williams58ef15b2018-12-28 00:35:07 -08001168 devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001169 if (!devmem)
1170 return ERR_PTR(-ENOMEM);
1171
1172 init_completion(&devmem->completion);
1173 devmem->pfn_first = -1UL;
1174 devmem->pfn_last = -1UL;
1175 devmem->resource = res;
1176 devmem->device = device;
1177 devmem->ops = ops;
1178
1179 ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
1180 0, GFP_KERNEL);
1181 if (ret)
Dan Williams58ef15b2018-12-28 00:35:07 -08001182 return ERR_PTR(ret);
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001183
Dan Williams58ef15b2018-12-28 00:35:07 -08001184 ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit,
1185 &devmem->ref);
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001186 if (ret)
Dan Williams58ef15b2018-12-28 00:35:07 -08001187 return ERR_PTR(ret);
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001188
1189 devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
1190 devmem->pfn_last = devmem->pfn_first +
1191 (resource_size(devmem->resource) >> PAGE_SHIFT);
Dan Williams063a7d12018-12-28 00:39:46 -08001192 devmem->page_fault = hmm_devmem_fault;
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001193
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001194 devmem->pagemap.type = MEMORY_DEVICE_PUBLIC;
1195 devmem->pagemap.res = *devmem->resource;
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001196 devmem->pagemap.page_free = hmm_devmem_free;
1197 devmem->pagemap.altmap_valid = false;
1198 devmem->pagemap.ref = &devmem->ref;
1199 devmem->pagemap.data = devmem;
1200 devmem->pagemap.kill = hmm_devmem_ref_kill;
Dan Williams58ef15b2018-12-28 00:35:07 -08001201
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001202 result = devm_memremap_pages(devmem->device, &devmem->pagemap);
1203 if (IS_ERR(result))
1204 return result;
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001205 return devmem;
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001206}
Dan Williams02917e92018-12-28 00:35:15 -08001207EXPORT_SYMBOL_GPL(hmm_devmem_add_resource);
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001208
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001209/*
Jérôme Glisse858b54d2017-09-08 16:12:02 -07001210 * A device driver that wants to handle multiple devices memory through a
1211 * single fake device can use hmm_device to do so. This is purely a helper
1212 * and it is not needed to make use of any HMM functionality.
1213 */
1214#define HMM_DEVICE_MAX 256
1215
1216static DECLARE_BITMAP(hmm_device_mask, HMM_DEVICE_MAX);
1217static DEFINE_SPINLOCK(hmm_device_lock);
1218static struct class *hmm_device_class;
1219static dev_t hmm_device_devt;
1220
1221static void hmm_device_release(struct device *device)
1222{
1223 struct hmm_device *hmm_device;
1224
1225 hmm_device = container_of(device, struct hmm_device, device);
1226 spin_lock(&hmm_device_lock);
1227 clear_bit(hmm_device->minor, hmm_device_mask);
1228 spin_unlock(&hmm_device_lock);
1229
1230 kfree(hmm_device);
1231}
1232
1233struct hmm_device *hmm_device_new(void *drvdata)
1234{
1235 struct hmm_device *hmm_device;
1236
1237 hmm_device = kzalloc(sizeof(*hmm_device), GFP_KERNEL);
1238 if (!hmm_device)
1239 return ERR_PTR(-ENOMEM);
1240
1241 spin_lock(&hmm_device_lock);
1242 hmm_device->minor = find_first_zero_bit(hmm_device_mask, HMM_DEVICE_MAX);
1243 if (hmm_device->minor >= HMM_DEVICE_MAX) {
1244 spin_unlock(&hmm_device_lock);
1245 kfree(hmm_device);
1246 return ERR_PTR(-EBUSY);
1247 }
1248 set_bit(hmm_device->minor, hmm_device_mask);
1249 spin_unlock(&hmm_device_lock);
1250
1251 dev_set_name(&hmm_device->device, "hmm_device%d", hmm_device->minor);
1252 hmm_device->device.devt = MKDEV(MAJOR(hmm_device_devt),
1253 hmm_device->minor);
1254 hmm_device->device.release = hmm_device_release;
1255 dev_set_drvdata(&hmm_device->device, drvdata);
1256 hmm_device->device.class = hmm_device_class;
1257 device_initialize(&hmm_device->device);
1258
1259 return hmm_device;
1260}
1261EXPORT_SYMBOL(hmm_device_new);
1262
1263void hmm_device_put(struct hmm_device *hmm_device)
1264{
1265 put_device(&hmm_device->device);
1266}
1267EXPORT_SYMBOL(hmm_device_put);
1268
1269static int __init hmm_init(void)
1270{
1271 int ret;
1272
1273 ret = alloc_chrdev_region(&hmm_device_devt, 0,
1274 HMM_DEVICE_MAX,
1275 "hmm_device");
1276 if (ret)
1277 return ret;
1278
1279 hmm_device_class = class_create(THIS_MODULE, "hmm_device");
1280 if (IS_ERR(hmm_device_class)) {
1281 unregister_chrdev_region(hmm_device_devt, HMM_DEVICE_MAX);
1282 return PTR_ERR(hmm_device_class);
1283 }
1284 return 0;
1285}
1286
1287device_initcall(hmm_init);
Jérôme Glissedf6ad692017-09-08 16:12:24 -07001288#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */