blob: 9e2125ae10a5f4e8914bf073d38ab5527589ad6f [file] [log] [blame]
Thomas Gleixner20c8ccb2019-06-04 10:11:32 +02001// SPDX-License-Identifier: GPL-2.0-only
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07002/*
3 * linux/mm/mmu_notifier.c
4 *
5 * Copyright (C) 2008 Qumranet, Inc.
6 * Copyright (C) 2008 SGI
Christoph Lameter93e205a2016-03-17 14:21:15 -07007 * Christoph Lameter <cl@linux.com>
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07008 */
9
10#include <linux/rculist.h>
11#include <linux/mmu_notifier.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040012#include <linux/export.h>
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070013#include <linux/mm.h>
14#include <linux/err.h>
Sagi Grimberg21a92732012-10-08 16:29:24 -070015#include <linux/srcu.h>
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070016#include <linux/rcupdate.h>
17#include <linux/sched.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010018#include <linux/sched/mm.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090019#include <linux/slab.h>
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070020
Sagi Grimberg21a92732012-10-08 16:29:24 -070021/* global SRCU for all MMs */
Paul E. McKenneydde8da62017-03-25 10:42:07 -070022DEFINE_STATIC_SRCU(srcu);
Sagi Grimberg21a92732012-10-08 16:29:24 -070023
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070024/*
25 * This function can't run concurrently against mmu_notifier_register
26 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
27 * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
28 * in parallel despite there being no task using this mm any more,
29 * through the vmas outside of the exit_mmap context, such as with
30 * vmtruncate. This serializes against mmu_notifier_unregister with
Sagi Grimberg21a92732012-10-08 16:29:24 -070031 * the mmu_notifier_mm->lock in addition to SRCU and it serializes
32 * against the other mmu notifiers with SRCU. struct mmu_notifier_mm
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070033 * can't go away from under us as exit_mmap holds an mm_count pin
34 * itself.
35 */
36void __mmu_notifier_release(struct mm_struct *mm)
37{
38 struct mmu_notifier *mn;
Sagi Grimberg21a92732012-10-08 16:29:24 -070039 int id;
Xiao Guangrong3ad3d902012-07-31 16:45:52 -070040
41 /*
Xiao Guangrongd34883d2013-05-24 15:55:11 -070042 * SRCU here will block mmu_notifier_unregister until
43 * ->release returns.
Xiao Guangrong3ad3d902012-07-31 16:45:52 -070044 */
Sagi Grimberg21a92732012-10-08 16:29:24 -070045 id = srcu_read_lock(&srcu);
Xiao Guangrongd34883d2013-05-24 15:55:11 -070046 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist)
47 /*
48 * If ->release runs before mmu_notifier_unregister it must be
49 * handled, as it's the only way for the driver to flush all
50 * existing sptes and stop the driver from establishing any more
51 * sptes before all the pages in the mm are freed.
52 */
53 if (mn->ops->release)
54 mn->ops->release(mn, mm);
Xiao Guangrongd34883d2013-05-24 15:55:11 -070055
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070056 spin_lock(&mm->mmu_notifier_mm->lock);
57 while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
58 mn = hlist_entry(mm->mmu_notifier_mm->list.first,
59 struct mmu_notifier,
60 hlist);
61 /*
Xiao Guangrongd34883d2013-05-24 15:55:11 -070062 * We arrived before mmu_notifier_unregister so
63 * mmu_notifier_unregister will do nothing other than to wait
64 * for ->release to finish and for mmu_notifier_unregister to
65 * return.
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070066 */
67 hlist_del_init_rcu(&mn->hlist);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070068 }
69 spin_unlock(&mm->mmu_notifier_mm->lock);
Peter Zijlstrab9722162014-08-06 16:08:20 -070070 srcu_read_unlock(&srcu, id);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070071
72 /*
Xiao Guangrongd34883d2013-05-24 15:55:11 -070073 * synchronize_srcu here prevents mmu_notifier_release from returning to
74 * exit_mmap (which would proceed with freeing all pages in the mm)
75 * until the ->release method returns, if it was invoked by
76 * mmu_notifier_unregister.
77 *
78 * The mmu_notifier_mm can't go away from under us because one mm_count
79 * is held by exit_mmap.
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070080 */
Sagi Grimberg21a92732012-10-08 16:29:24 -070081 synchronize_srcu(&srcu);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070082}
83
84/*
85 * If no young bitflag is supported by the hardware, ->clear_flush_young can
86 * unmap the address and return 1 or 0 depending if the mapping previously
87 * existed or not.
88 */
89int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -070090 unsigned long start,
91 unsigned long end)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070092{
93 struct mmu_notifier *mn;
Sagi Grimberg21a92732012-10-08 16:29:24 -070094 int young = 0, id;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070095
Sagi Grimberg21a92732012-10-08 16:29:24 -070096 id = srcu_read_lock(&srcu);
Sasha Levinb67bfe02013-02-27 17:06:00 -080097 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070098 if (mn->ops->clear_flush_young)
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -070099 young |= mn->ops->clear_flush_young(mn, mm, start, end);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700100 }
Sagi Grimberg21a92732012-10-08 16:29:24 -0700101 srcu_read_unlock(&srcu, id);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700102
103 return young;
104}
105
Vladimir Davydov1d7715c2015-09-09 15:35:41 -0700106int __mmu_notifier_clear_young(struct mm_struct *mm,
107 unsigned long start,
108 unsigned long end)
109{
110 struct mmu_notifier *mn;
111 int young = 0, id;
112
113 id = srcu_read_lock(&srcu);
114 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
115 if (mn->ops->clear_young)
116 young |= mn->ops->clear_young(mn, mm, start, end);
117 }
118 srcu_read_unlock(&srcu, id);
119
120 return young;
121}
122
Andrea Arcangeli8ee53822011-01-13 15:47:10 -0800123int __mmu_notifier_test_young(struct mm_struct *mm,
124 unsigned long address)
125{
126 struct mmu_notifier *mn;
Sagi Grimberg21a92732012-10-08 16:29:24 -0700127 int young = 0, id;
Andrea Arcangeli8ee53822011-01-13 15:47:10 -0800128
Sagi Grimberg21a92732012-10-08 16:29:24 -0700129 id = srcu_read_lock(&srcu);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800130 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
Andrea Arcangeli8ee53822011-01-13 15:47:10 -0800131 if (mn->ops->test_young) {
132 young = mn->ops->test_young(mn, mm, address);
133 if (young)
134 break;
135 }
136 }
Sagi Grimberg21a92732012-10-08 16:29:24 -0700137 srcu_read_unlock(&srcu, id);
Andrea Arcangeli8ee53822011-01-13 15:47:10 -0800138
139 return young;
140}
141
Izik Eidus828502d2009-09-21 17:01:51 -0700142void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
143 pte_t pte)
144{
145 struct mmu_notifier *mn;
Sagi Grimberg21a92732012-10-08 16:29:24 -0700146 int id;
Izik Eidus828502d2009-09-21 17:01:51 -0700147
Sagi Grimberg21a92732012-10-08 16:29:24 -0700148 id = srcu_read_lock(&srcu);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800149 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
Izik Eidus828502d2009-09-21 17:01:51 -0700150 if (mn->ops->change_pte)
151 mn->ops->change_pte(mn, mm, address, pte);
Izik Eidus828502d2009-09-21 17:01:51 -0700152 }
Sagi Grimberg21a92732012-10-08 16:29:24 -0700153 srcu_read_unlock(&srcu, id);
Izik Eidus828502d2009-09-21 17:01:51 -0700154}
155
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800156int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700157{
158 struct mmu_notifier *mn;
Michal Hocko93065ac2018-08-21 21:52:33 -0700159 int ret = 0;
Sagi Grimberg21a92732012-10-08 16:29:24 -0700160 int id;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700161
Sagi Grimberg21a92732012-10-08 16:29:24 -0700162 id = srcu_read_lock(&srcu);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800163 hlist_for_each_entry_rcu(mn, &range->mm->mmu_notifier_mm->list, hlist) {
Michal Hocko93065ac2018-08-21 21:52:33 -0700164 if (mn->ops->invalidate_range_start) {
Jérôme Glisse5d6527a2018-12-28 00:38:05 -0800165 int _ret = mn->ops->invalidate_range_start(mn, range);
Michal Hocko93065ac2018-08-21 21:52:33 -0700166 if (_ret) {
167 pr_info("%pS callback failed with %d in %sblockable context.\n",
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800168 mn->ops->invalidate_range_start, _ret,
Jérôme Glissedfcd6662019-05-13 17:20:38 -0700169 !mmu_notifier_range_blockable(range) ? "non-" : "");
Daniel Vetter8402ce62019-08-14 22:20:23 +0200170 WARN_ON(mmu_notifier_range_blockable(range) ||
171 ret != -EAGAIN);
Michal Hocko93065ac2018-08-21 21:52:33 -0700172 ret = _ret;
173 }
174 }
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700175 }
Sagi Grimberg21a92732012-10-08 16:29:24 -0700176 srcu_read_unlock(&srcu, id);
Michal Hocko93065ac2018-08-21 21:52:33 -0700177
178 return ret;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700179}
180
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800181void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range,
Jérôme Glisse4645b9f2017-11-15 17:34:11 -0800182 bool only_end)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700183{
184 struct mmu_notifier *mn;
Sagi Grimberg21a92732012-10-08 16:29:24 -0700185 int id;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700186
Sagi Grimberg21a92732012-10-08 16:29:24 -0700187 id = srcu_read_lock(&srcu);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800188 hlist_for_each_entry_rcu(mn, &range->mm->mmu_notifier_mm->list, hlist) {
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100189 /*
190 * Call invalidate_range here too to avoid the need for the
191 * subsystem of having to register an invalidate_range_end
192 * call-back when there is invalidate_range already. Usually a
193 * subsystem registers either invalidate_range_start()/end() or
194 * invalidate_range(), so this will be no additional overhead
195 * (besides the pointer check).
Jérôme Glisse4645b9f2017-11-15 17:34:11 -0800196 *
197 * We skip call to invalidate_range() if we know it is safe ie
198 * call site use mmu_notifier_invalidate_range_only_end() which
199 * is safe to do when we know that a call to invalidate_range()
200 * already happen under page table lock.
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100201 */
Jérôme Glisse4645b9f2017-11-15 17:34:11 -0800202 if (!only_end && mn->ops->invalidate_range)
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800203 mn->ops->invalidate_range(mn, range->mm,
204 range->start,
205 range->end);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700206 if (mn->ops->invalidate_range_end)
Jérôme Glisse5d6527a2018-12-28 00:38:05 -0800207 mn->ops->invalidate_range_end(mn, range);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700208 }
Sagi Grimberg21a92732012-10-08 16:29:24 -0700209 srcu_read_unlock(&srcu, id);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700210}
211
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100212void __mmu_notifier_invalidate_range(struct mm_struct *mm,
213 unsigned long start, unsigned long end)
214{
215 struct mmu_notifier *mn;
216 int id;
217
218 id = srcu_read_lock(&srcu);
219 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
220 if (mn->ops->invalidate_range)
221 mn->ops->invalidate_range(mn, mm, start, end);
222 }
223 srcu_read_unlock(&srcu, id);
224}
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100225
Jason Gunthorpe56c571032019-08-06 20:15:38 -0300226/*
227 * Same as mmu_notifier_register but here the caller must hold the
228 * mmap_sem in write mode.
229 */
230int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700231{
Jason Gunthorpe70df2912019-08-06 20:15:39 -0300232 struct mmu_notifier_mm *mmu_notifier_mm = NULL;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700233 int ret;
234
Jason Gunthorpe56c571032019-08-06 20:15:38 -0300235 lockdep_assert_held_write(&mm->mmap_sem);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700236 BUG_ON(atomic_read(&mm->mm_users) <= 0);
237
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300238 mn->mm = mm;
239 mn->users = 1;
240
Jason Gunthorpe70df2912019-08-06 20:15:39 -0300241 if (!mm->mmu_notifier_mm) {
242 /*
243 * kmalloc cannot be called under mm_take_all_locks(), but we
244 * know that mm->mmu_notifier_mm can't change while we hold
245 * the write side of the mmap_sem.
246 */
247 mmu_notifier_mm =
248 kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
249 if (!mmu_notifier_mm)
250 return -ENOMEM;
251
252 INIT_HLIST_HEAD(&mmu_notifier_mm->list);
253 spin_lock_init(&mmu_notifier_mm->lock);
254 }
Gavin Shan35cfa2b2012-10-25 13:38:01 -0700255
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700256 ret = mm_take_all_locks(mm);
257 if (unlikely(ret))
Gavin Shan35cfa2b2012-10-25 13:38:01 -0700258 goto out_clean;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700259
Jason Gunthorpe70df2912019-08-06 20:15:39 -0300260 /* Pairs with the mmdrop in mmu_notifier_unregister_* */
Vegard Nossumf1f10072017-02-27 14:30:07 -0800261 mmgrab(mm);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700262
263 /*
264 * Serialize the update against mmu_notifier_unregister. A
265 * side note: mmu_notifier_release can't run concurrently with
266 * us because we hold the mm_users pin (either implicitly as
267 * current->mm or explicitly with get_task_mm() or similar).
268 * We can't race against any other mmu notifier method either
269 * thanks to mm_take_all_locks().
270 */
Jason Gunthorpe70df2912019-08-06 20:15:39 -0300271 if (mmu_notifier_mm)
272 mm->mmu_notifier_mm = mmu_notifier_mm;
273
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700274 spin_lock(&mm->mmu_notifier_mm->lock);
Jean-Philippe Brucker543bdb22019-07-11 20:58:50 -0700275 hlist_add_head_rcu(&mn->hlist, &mm->mmu_notifier_mm->list);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700276 spin_unlock(&mm->mmu_notifier_mm->lock);
277
278 mm_drop_all_locks(mm);
Jason Gunthorpe70df2912019-08-06 20:15:39 -0300279 BUG_ON(atomic_read(&mm->mm_users) <= 0);
280 return 0;
281
Gavin Shan35cfa2b2012-10-25 13:38:01 -0700282out_clean:
Gavin Shan35cfa2b2012-10-25 13:38:01 -0700283 kfree(mmu_notifier_mm);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700284 return ret;
285}
Jason Gunthorpe56c571032019-08-06 20:15:38 -0300286EXPORT_SYMBOL_GPL(__mmu_notifier_register);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700287
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300288/**
289 * mmu_notifier_register - Register a notifier on a mm
290 * @mn: The notifier to attach
291 * @mm: The mm to attach the notifier to
292 *
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700293 * Must not hold mmap_sem nor any other VM related lock when calling
294 * this registration function. Must also ensure mm_users can't go down
295 * to zero while this runs to avoid races with mmu_notifier_release,
296 * so mm has to be current->mm or the mm should be pinned safely such
297 * as with get_task_mm(). If the mm is not current->mm, the mm_users
298 * pin should be released by calling mmput after mmu_notifier_register
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300299 * returns.
300 *
301 * mmu_notifier_unregister() or mmu_notifier_put() must be always called to
302 * unregister the notifier.
303 *
304 * While the caller has a mmu_notifier get the mn->mm pointer will remain
305 * valid, and can be converted to an active mm pointer via mmget_not_zero().
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700306 */
307int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
308{
Jason Gunthorpe56c571032019-08-06 20:15:38 -0300309 int ret;
310
311 down_write(&mm->mmap_sem);
312 ret = __mmu_notifier_register(mn, mm);
313 up_write(&mm->mmap_sem);
314 return ret;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700315}
316EXPORT_SYMBOL_GPL(mmu_notifier_register);
317
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300318static struct mmu_notifier *
319find_get_mmu_notifier(struct mm_struct *mm, const struct mmu_notifier_ops *ops)
320{
321 struct mmu_notifier *mn;
322
323 spin_lock(&mm->mmu_notifier_mm->lock);
324 hlist_for_each_entry_rcu (mn, &mm->mmu_notifier_mm->list, hlist) {
325 if (mn->ops != ops)
326 continue;
327
328 if (likely(mn->users != UINT_MAX))
329 mn->users++;
330 else
331 mn = ERR_PTR(-EOVERFLOW);
332 spin_unlock(&mm->mmu_notifier_mm->lock);
333 return mn;
334 }
335 spin_unlock(&mm->mmu_notifier_mm->lock);
336 return NULL;
337}
338
339/**
340 * mmu_notifier_get_locked - Return the single struct mmu_notifier for
341 * the mm & ops
342 * @ops: The operations struct being subscribe with
343 * @mm : The mm to attach notifiers too
344 *
345 * This function either allocates a new mmu_notifier via
346 * ops->alloc_notifier(), or returns an already existing notifier on the
347 * list. The value of the ops pointer is used to determine when two notifiers
348 * are the same.
349 *
350 * Each call to mmu_notifier_get() must be paired with a call to
351 * mmu_notifier_put(). The caller must hold the write side of mm->mmap_sem.
352 *
353 * While the caller has a mmu_notifier get the mm pointer will remain valid,
354 * and can be converted to an active mm pointer via mmget_not_zero().
355 */
356struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
357 struct mm_struct *mm)
358{
359 struct mmu_notifier *mn;
360 int ret;
361
362 lockdep_assert_held_write(&mm->mmap_sem);
363
364 if (mm->mmu_notifier_mm) {
365 mn = find_get_mmu_notifier(mm, ops);
366 if (mn)
367 return mn;
368 }
369
370 mn = ops->alloc_notifier(mm);
371 if (IS_ERR(mn))
372 return mn;
373 mn->ops = ops;
374 ret = __mmu_notifier_register(mn, mm);
375 if (ret)
376 goto out_free;
377 return mn;
378out_free:
379 mn->ops->free_notifier(mn);
380 return ERR_PTR(ret);
381}
382EXPORT_SYMBOL_GPL(mmu_notifier_get_locked);
383
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700384/* this is called after the last mmu_notifier_unregister() returned */
385void __mmu_notifier_mm_destroy(struct mm_struct *mm)
386{
387 BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list));
388 kfree(mm->mmu_notifier_mm);
389 mm->mmu_notifier_mm = LIST_POISON1; /* debug */
390}
391
392/*
393 * This releases the mm_count pin automatically and frees the mm
394 * structure if it was the last user of it. It serializes against
Sagi Grimberg21a92732012-10-08 16:29:24 -0700395 * running mmu notifiers with SRCU and against mmu_notifier_unregister
396 * with the unregister lock + SRCU. All sptes must be dropped before
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700397 * calling mmu_notifier_unregister. ->release or any other notifier
398 * method may be invoked concurrently with mmu_notifier_unregister,
399 * and only after mmu_notifier_unregister returned we're guaranteed
400 * that ->release or any other method can't run anymore.
401 */
402void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
403{
404 BUG_ON(atomic_read(&mm->mm_count) <= 0);
405
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700406 if (!hlist_unhashed(&mn->hlist)) {
Xiao Guangrongd34883d2013-05-24 15:55:11 -0700407 /*
408 * SRCU here will force exit_mmap to wait for ->release to
409 * finish before freeing the pages.
410 */
Sagi Grimberg21a92732012-10-08 16:29:24 -0700411 int id;
Xiao Guangrong3ad3d902012-07-31 16:45:52 -0700412
Robin Holt751efd82013-02-22 16:35:34 -0800413 id = srcu_read_lock(&srcu);
Xiao Guangrongd34883d2013-05-24 15:55:11 -0700414 /*
415 * exit_mmap will block in mmu_notifier_release to guarantee
416 * that ->release is called before freeing the pages.
417 */
Robin Holt751efd82013-02-22 16:35:34 -0800418 if (mn->ops->release)
419 mn->ops->release(mn, mm);
Robin Holt751efd82013-02-22 16:35:34 -0800420 srcu_read_unlock(&srcu, id);
Xiao Guangrongd34883d2013-05-24 15:55:11 -0700421
422 spin_lock(&mm->mmu_notifier_mm->lock);
423 /*
424 * Can not use list_del_rcu() since __mmu_notifier_release
425 * can delete it before we hold the lock.
426 */
427 hlist_del_init_rcu(&mn->hlist);
Robin Holt751efd82013-02-22 16:35:34 -0800428 spin_unlock(&mm->mmu_notifier_mm->lock);
Xiao Guangrongd34883d2013-05-24 15:55:11 -0700429 }
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700430
431 /*
Xiao Guangrongd34883d2013-05-24 15:55:11 -0700432 * Wait for any running method to finish, of course including
Geert Uytterhoeven83a35e32013-06-28 11:27:31 +0200433 * ->release if it was run by mmu_notifier_release instead of us.
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700434 */
Sagi Grimberg21a92732012-10-08 16:29:24 -0700435 synchronize_srcu(&srcu);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700436
437 BUG_ON(atomic_read(&mm->mm_count) <= 0);
438
439 mmdrop(mm);
440}
441EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
Sagi Grimberg21a92732012-10-08 16:29:24 -0700442
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300443static void mmu_notifier_free_rcu(struct rcu_head *rcu)
444{
445 struct mmu_notifier *mn = container_of(rcu, struct mmu_notifier, rcu);
446 struct mm_struct *mm = mn->mm;
447
448 mn->ops->free_notifier(mn);
449 /* Pairs with the get in __mmu_notifier_register() */
450 mmdrop(mm);
451}
452
453/**
454 * mmu_notifier_put - Release the reference on the notifier
455 * @mn: The notifier to act on
456 *
457 * This function must be paired with each mmu_notifier_get(), it releases the
458 * reference obtained by the get. If this is the last reference then process
459 * to free the notifier will be run asynchronously.
460 *
461 * Unlike mmu_notifier_unregister() the get/put flow only calls ops->release
462 * when the mm_struct is destroyed. Instead free_notifier is always called to
463 * release any resources held by the user.
464 *
465 * As ops->release is not guaranteed to be called, the user must ensure that
466 * all sptes are dropped, and no new sptes can be established before
467 * mmu_notifier_put() is called.
468 *
469 * This function can be called from the ops->release callback, however the
470 * caller must still ensure it is called pairwise with mmu_notifier_get().
471 *
472 * Modules calling this function must call mmu_notifier_synchronize() in
473 * their __exit functions to ensure the async work is completed.
474 */
475void mmu_notifier_put(struct mmu_notifier *mn)
476{
477 struct mm_struct *mm = mn->mm;
478
479 spin_lock(&mm->mmu_notifier_mm->lock);
480 if (WARN_ON(!mn->users) || --mn->users)
481 goto out_unlock;
482 hlist_del_init_rcu(&mn->hlist);
483 spin_unlock(&mm->mmu_notifier_mm->lock);
484
485 call_srcu(&srcu, &mn->rcu, mmu_notifier_free_rcu);
486 return;
487
488out_unlock:
489 spin_unlock(&mm->mmu_notifier_mm->lock);
490}
491EXPORT_SYMBOL_GPL(mmu_notifier_put);
492
493/**
494 * mmu_notifier_synchronize - Ensure all mmu_notifiers are freed
495 *
496 * This function ensures that all outstanding async SRU work from
497 * mmu_notifier_put() is completed. After it returns any mmu_notifier_ops
498 * associated with an unused mmu_notifier will no longer be called.
499 *
500 * Before using the caller must ensure that all of its mmu_notifiers have been
501 * fully released via mmu_notifier_put().
502 *
503 * Modules using the mmu_notifier_put() API should call this in their __exit
504 * function to avoid module unloading races.
505 */
506void mmu_notifier_synchronize(void)
507{
508 synchronize_srcu(&srcu);
509}
510EXPORT_SYMBOL_GPL(mmu_notifier_synchronize);
511
Jérôme Glissec6d23412019-05-13 17:21:00 -0700512bool
513mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range)
514{
515 if (!range->vma || range->event != MMU_NOTIFY_PROTECTION_VMA)
516 return false;
517 /* Return true if the vma still have the read flag set. */
518 return range->vma->vm_flags & VM_READ;
519}
520EXPORT_SYMBOL_GPL(mmu_notifier_range_update_to_read_only);