blob: 48b78a423d7d31a5c580cc8026d88d2cf19f7527 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Peter Zijlstra29dee3c2017-02-10 16:27:52 +01002/*
3 * Variant of atomic_t specialized for reference counts.
4 *
5 * The interface matches the atomic_t interface (to aid in porting) but only
6 * provides the few functions one should use for reference counting.
7 *
Will Deacon23e6b162019-11-21 11:58:53 +00008 * It differs in that the counter saturates at REFCOUNT_SATURATED and will not
9 * move once there. This avoids wrapping the counter and causing 'spurious'
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010010 * use-after-free issues.
11 *
12 * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
13 * and provide only what is strictly required for refcounts.
14 *
15 * The increments are fully relaxed; these will not provide ordering. The
16 * rationale is that whatever is used to obtain the object we're increasing the
17 * reference count on will provide the ordering. For locked data structures,
18 * its the lock acquire, for RCU/lockless data structures its the dependent
19 * load.
20 *
21 * Do note that inc_not_zero() provides a control dependency which will order
22 * future stores against the inc, this ensures we'll never modify the object
23 * if we did not in fact acquire a reference.
24 *
25 * The decrements will provide release order, such that all the prior loads and
26 * stores will be issued before, it also provides a control dependency, which
27 * will order us against the subsequent free().
28 *
29 * The control dependency is against the load of the cmpxchg (ll/sc) that
30 * succeeded. This means the stores aren't fully ordered, but this is fine
31 * because the 1->0 transition indicates no concurrency.
32 *
33 * Note that the allocator is responsible for ordering things between free()
34 * and alloc().
35 *
Elena Reshetova47b8f3a2019-01-30 13:18:51 +020036 * The decrements dec_and_test() and sub_and_test() also provide acquire
37 * ordering on success.
38 *
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010039 */
40
Alexey Dobriyan75a040f2018-04-01 01:00:36 +030041#include <linux/mutex.h>
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010042#include <linux/refcount.h>
Alexey Dobriyan75a040f2018-04-01 01:00:36 +030043#include <linux/spinlock.h>
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010044#include <linux/bug.h>
45
David Windsorbd174162017-03-10 10:34:12 -050046/**
Mark Rutlandafed7bc2018-07-11 10:36:07 +010047 * refcount_add_not_zero_checked - add a value to a refcount unless it is 0
David Windsorbd174162017-03-10 10:34:12 -050048 * @i: the value to add to the refcount
49 * @r: the refcount
50 *
Will Deacon23e6b162019-11-21 11:58:53 +000051 * Will saturate at REFCOUNT_SATURATED and WARN.
David Windsorbd174162017-03-10 10:34:12 -050052 *
53 * Provides no memory ordering, it is assumed the caller has guaranteed the
54 * object memory to be stable (RCU, etc.). It does provide a control dependency
55 * and thereby orders future stores. See the comment on top.
56 *
57 * Use of this function is not recommended for the normal reference counting
58 * use case in which references are taken and released one at a time. In these
59 * cases, refcount_inc(), or one of its variants, should instead be used to
60 * increment a reference count.
61 *
62 * Return: false if the passed refcount is 0, true otherwise
63 */
Mark Rutlandafed7bc2018-07-11 10:36:07 +010064bool refcount_add_not_zero_checked(unsigned int i, refcount_t *r)
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010065{
Peter Zijlstrab78c0d42017-02-01 16:07:55 +010066 unsigned int new, val = atomic_read(&r->refs);
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010067
Peter Zijlstrab78c0d42017-02-01 16:07:55 +010068 do {
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010069 if (!val)
70 return false;
71
Will Deacon23e6b162019-11-21 11:58:53 +000072 if (unlikely(val == REFCOUNT_SATURATED))
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010073 return true;
74
75 new = val + i;
76 if (new < val)
Will Deacon23e6b162019-11-21 11:58:53 +000077 new = REFCOUNT_SATURATED;
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010078
Peter Zijlstrab78c0d42017-02-01 16:07:55 +010079 } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010080
Will Deacon23e6b162019-11-21 11:58:53 +000081 WARN_ONCE(new == REFCOUNT_SATURATED,
82 "refcount_t: saturated; leaking memory.\n");
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010083
84 return true;
85}
Mark Rutlandafed7bc2018-07-11 10:36:07 +010086EXPORT_SYMBOL(refcount_add_not_zero_checked);
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010087
David Windsorbd174162017-03-10 10:34:12 -050088/**
Mark Rutlandafed7bc2018-07-11 10:36:07 +010089 * refcount_add_checked - add a value to a refcount
David Windsorbd174162017-03-10 10:34:12 -050090 * @i: the value to add to the refcount
91 * @r: the refcount
92 *
Will Deacon23e6b162019-11-21 11:58:53 +000093 * Similar to atomic_add(), but will saturate at REFCOUNT_SATURATED and WARN.
David Windsorbd174162017-03-10 10:34:12 -050094 *
95 * Provides no memory ordering, it is assumed the caller has guaranteed the
96 * object memory to be stable (RCU, etc.). It does provide a control dependency
97 * and thereby orders future stores. See the comment on top.
98 *
99 * Use of this function is not recommended for the normal reference counting
100 * use case in which references are taken and released one at a time. In these
101 * cases, refcount_inc(), or one of its variants, should instead be used to
102 * increment a reference count.
103 */
Mark Rutlandafed7bc2018-07-11 10:36:07 +0100104void refcount_add_checked(unsigned int i, refcount_t *r)
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100105{
Mark Rutlandafed7bc2018-07-11 10:36:07 +0100106 WARN_ONCE(!refcount_add_not_zero_checked(i, r), "refcount_t: addition on 0; use-after-free.\n");
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100107}
Mark Rutlandafed7bc2018-07-11 10:36:07 +0100108EXPORT_SYMBOL(refcount_add_checked);
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100109
David Windsorbd174162017-03-10 10:34:12 -0500110/**
Mark Rutlandafed7bc2018-07-11 10:36:07 +0100111 * refcount_inc_not_zero_checked - increment a refcount unless it is 0
David Windsorbd174162017-03-10 10:34:12 -0500112 * @r: the refcount to increment
113 *
Will Deacon23e6b162019-11-21 11:58:53 +0000114 * Similar to atomic_inc_not_zero(), but will saturate at REFCOUNT_SATURATED
115 * and WARN.
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100116 *
117 * Provides no memory ordering, it is assumed the caller has guaranteed the
118 * object memory to be stable (RCU, etc.). It does provide a control dependency
119 * and thereby orders future stores. See the comment on top.
David Windsorbd174162017-03-10 10:34:12 -0500120 *
121 * Return: true if the increment was successful, false otherwise
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100122 */
Mark Rutlandafed7bc2018-07-11 10:36:07 +0100123bool refcount_inc_not_zero_checked(refcount_t *r)
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100124{
Peter Zijlstrab78c0d42017-02-01 16:07:55 +0100125 unsigned int new, val = atomic_read(&r->refs);
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100126
Peter Zijlstrab78c0d42017-02-01 16:07:55 +0100127 do {
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100128 new = val + 1;
129
130 if (!val)
131 return false;
132
133 if (unlikely(!new))
134 return true;
135
Peter Zijlstrab78c0d42017-02-01 16:07:55 +0100136 } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100137
Will Deacon23e6b162019-11-21 11:58:53 +0000138 WARN_ONCE(new == REFCOUNT_SATURATED,
139 "refcount_t: saturated; leaking memory.\n");
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100140
141 return true;
142}
Mark Rutlandafed7bc2018-07-11 10:36:07 +0100143EXPORT_SYMBOL(refcount_inc_not_zero_checked);
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100144
David Windsorbd174162017-03-10 10:34:12 -0500145/**
Mark Rutlandafed7bc2018-07-11 10:36:07 +0100146 * refcount_inc_checked - increment a refcount
David Windsorbd174162017-03-10 10:34:12 -0500147 * @r: the refcount to increment
148 *
Will Deacon23e6b162019-11-21 11:58:53 +0000149 * Similar to atomic_inc(), but will saturate at REFCOUNT_SATURATED and WARN.
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100150 *
151 * Provides no memory ordering, it is assumed the caller already has a
David Windsorbd174162017-03-10 10:34:12 -0500152 * reference on the object.
153 *
154 * Will WARN if the refcount is 0, as this represents a possible use-after-free
155 * condition.
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100156 */
Mark Rutlandafed7bc2018-07-11 10:36:07 +0100157void refcount_inc_checked(refcount_t *r)
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100158{
Mark Rutlandafed7bc2018-07-11 10:36:07 +0100159 WARN_ONCE(!refcount_inc_not_zero_checked(r), "refcount_t: increment on 0; use-after-free.\n");
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100160}
Mark Rutlandafed7bc2018-07-11 10:36:07 +0100161EXPORT_SYMBOL(refcount_inc_checked);
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100162
David Windsorbd174162017-03-10 10:34:12 -0500163/**
Mark Rutlandafed7bc2018-07-11 10:36:07 +0100164 * refcount_sub_and_test_checked - subtract from a refcount and test if it is 0
David Windsorbd174162017-03-10 10:34:12 -0500165 * @i: amount to subtract from the refcount
166 * @r: the refcount
167 *
168 * Similar to atomic_dec_and_test(), but it will WARN, return false and
169 * ultimately leak on underflow and will fail to decrement when saturated
Will Deacon23e6b162019-11-21 11:58:53 +0000170 * at REFCOUNT_SATURATED.
David Windsorbd174162017-03-10 10:34:12 -0500171 *
172 * Provides release memory ordering, such that prior loads and stores are done
Elena Reshetova47b8f3a2019-01-30 13:18:51 +0200173 * before, and provides an acquire ordering on success such that free()
174 * must come after.
David Windsorbd174162017-03-10 10:34:12 -0500175 *
176 * Use of this function is not recommended for the normal reference counting
177 * use case in which references are taken and released one at a time. In these
178 * cases, refcount_dec(), or one of its variants, should instead be used to
179 * decrement a reference count.
180 *
181 * Return: true if the resulting refcount is 0, false otherwise
182 */
Mark Rutlandafed7bc2018-07-11 10:36:07 +0100183bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r)
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100184{
Peter Zijlstrab78c0d42017-02-01 16:07:55 +0100185 unsigned int new, val = atomic_read(&r->refs);
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100186
Peter Zijlstrab78c0d42017-02-01 16:07:55 +0100187 do {
Will Deacon23e6b162019-11-21 11:58:53 +0000188 if (unlikely(val == REFCOUNT_SATURATED))
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100189 return false;
190
191 new = val - i;
192 if (new > val) {
Ingo Molnar9dcfe2c2017-03-01 09:25:55 +0100193 WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100194 return false;
195 }
196
Peter Zijlstrab78c0d42017-02-01 16:07:55 +0100197 } while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100198
Elena Reshetova47b8f3a2019-01-30 13:18:51 +0200199 if (!new) {
200 smp_acquire__after_ctrl_dep();
201 return true;
202 }
203 return false;
204
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100205}
Mark Rutlandafed7bc2018-07-11 10:36:07 +0100206EXPORT_SYMBOL(refcount_sub_and_test_checked);
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100207
David Windsorbd174162017-03-10 10:34:12 -0500208/**
Mark Rutlandafed7bc2018-07-11 10:36:07 +0100209 * refcount_dec_and_test_checked - decrement a refcount and test if it is 0
David Windsorbd174162017-03-10 10:34:12 -0500210 * @r: the refcount
211 *
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100212 * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
Will Deacon23e6b162019-11-21 11:58:53 +0000213 * decrement when saturated at REFCOUNT_SATURATED.
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100214 *
215 * Provides release memory ordering, such that prior loads and stores are done
Elena Reshetova47b8f3a2019-01-30 13:18:51 +0200216 * before, and provides an acquire ordering on success such that free()
217 * must come after.
David Windsorbd174162017-03-10 10:34:12 -0500218 *
219 * Return: true if the resulting refcount is 0, false otherwise
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100220 */
Mark Rutlandafed7bc2018-07-11 10:36:07 +0100221bool refcount_dec_and_test_checked(refcount_t *r)
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100222{
Mark Rutlandafed7bc2018-07-11 10:36:07 +0100223 return refcount_sub_and_test_checked(1, r);
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100224}
Mark Rutlandafed7bc2018-07-11 10:36:07 +0100225EXPORT_SYMBOL(refcount_dec_and_test_checked);
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100226
David Windsorbd174162017-03-10 10:34:12 -0500227/**
Mark Rutlandafed7bc2018-07-11 10:36:07 +0100228 * refcount_dec_checked - decrement a refcount
David Windsorbd174162017-03-10 10:34:12 -0500229 * @r: the refcount
230 *
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100231 * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
Will Deacon23e6b162019-11-21 11:58:53 +0000232 * when saturated at REFCOUNT_SATURATED.
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100233 *
234 * Provides release memory ordering, such that prior loads and stores are done
235 * before.
236 */
Mark Rutlandafed7bc2018-07-11 10:36:07 +0100237void refcount_dec_checked(refcount_t *r)
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100238{
Mark Rutlandafed7bc2018-07-11 10:36:07 +0100239 WARN_ONCE(refcount_dec_and_test_checked(r), "refcount_t: decrement hit 0; leaking memory.\n");
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100240}
Mark Rutlandafed7bc2018-07-11 10:36:07 +0100241EXPORT_SYMBOL(refcount_dec_checked);
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100242
David Windsorbd174162017-03-10 10:34:12 -0500243/**
244 * refcount_dec_if_one - decrement a refcount if it is 1
245 * @r: the refcount
246 *
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100247 * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
248 * success thereof.
249 *
250 * Like all decrement operations, it provides release memory order and provides
251 * a control dependency.
252 *
253 * It can be used like a try-delete operator; this explicit case is provided
254 * and not cmpxchg in generic, because that would allow implementing unsafe
255 * operations.
David Windsorbd174162017-03-10 10:34:12 -0500256 *
257 * Return: true if the resulting refcount is 0, false otherwise
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100258 */
259bool refcount_dec_if_one(refcount_t *r)
260{
Peter Zijlstrab78c0d42017-02-01 16:07:55 +0100261 int val = 1;
262
263 return atomic_try_cmpxchg_release(&r->refs, &val, 0);
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100264}
Greg Kroah-Hartmand557d1b2017-05-04 15:51:03 -0700265EXPORT_SYMBOL(refcount_dec_if_one);
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100266
David Windsorbd174162017-03-10 10:34:12 -0500267/**
268 * refcount_dec_not_one - decrement a refcount if it is not 1
269 * @r: the refcount
270 *
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100271 * No atomic_t counterpart, it decrements unless the value is 1, in which case
272 * it will return false.
273 *
274 * Was often done like: atomic_add_unless(&var, -1, 1)
David Windsorbd174162017-03-10 10:34:12 -0500275 *
276 * Return: true if the decrement operation was successful, false otherwise
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100277 */
278bool refcount_dec_not_one(refcount_t *r)
279{
Peter Zijlstrab78c0d42017-02-01 16:07:55 +0100280 unsigned int new, val = atomic_read(&r->refs);
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100281
Peter Zijlstrab78c0d42017-02-01 16:07:55 +0100282 do {
Will Deacon23e6b162019-11-21 11:58:53 +0000283 if (unlikely(val == REFCOUNT_SATURATED))
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100284 return true;
285
286 if (val == 1)
287 return false;
288
289 new = val - 1;
290 if (new > val) {
Ingo Molnar9dcfe2c2017-03-01 09:25:55 +0100291 WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100292 return true;
293 }
294
Peter Zijlstrab78c0d42017-02-01 16:07:55 +0100295 } while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100296
297 return true;
298}
Greg Kroah-Hartmand557d1b2017-05-04 15:51:03 -0700299EXPORT_SYMBOL(refcount_dec_not_one);
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100300
David Windsorbd174162017-03-10 10:34:12 -0500301/**
302 * refcount_dec_and_mutex_lock - return holding mutex if able to decrement
303 * refcount to 0
304 * @r: the refcount
305 * @lock: the mutex to be locked
306 *
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100307 * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
Will Deacon23e6b162019-11-21 11:58:53 +0000308 * to decrement when saturated at REFCOUNT_SATURATED.
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100309 *
310 * Provides release memory ordering, such that prior loads and stores are done
311 * before, and provides a control dependency such that free() must come after.
312 * See the comment on top.
David Windsorbd174162017-03-10 10:34:12 -0500313 *
314 * Return: true and hold mutex if able to decrement refcount to 0, false
315 * otherwise
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100316 */
317bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
318{
319 if (refcount_dec_not_one(r))
320 return false;
321
322 mutex_lock(lock);
323 if (!refcount_dec_and_test(r)) {
324 mutex_unlock(lock);
325 return false;
326 }
327
328 return true;
329}
Greg Kroah-Hartmand557d1b2017-05-04 15:51:03 -0700330EXPORT_SYMBOL(refcount_dec_and_mutex_lock);
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100331
David Windsorbd174162017-03-10 10:34:12 -0500332/**
333 * refcount_dec_and_lock - return holding spinlock if able to decrement
334 * refcount to 0
335 * @r: the refcount
336 * @lock: the spinlock to be locked
337 *
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100338 * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
Will Deacon23e6b162019-11-21 11:58:53 +0000339 * decrement when saturated at REFCOUNT_SATURATED.
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100340 *
341 * Provides release memory ordering, such that prior loads and stores are done
342 * before, and provides a control dependency such that free() must come after.
343 * See the comment on top.
David Windsorbd174162017-03-10 10:34:12 -0500344 *
345 * Return: true and hold spinlock if able to decrement refcount to 0, false
346 * otherwise
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100347 */
348bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
349{
350 if (refcount_dec_not_one(r))
351 return false;
352
353 spin_lock(lock);
354 if (!refcount_dec_and_test(r)) {
355 spin_unlock(lock);
356 return false;
357 }
358
359 return true;
360}
Greg Kroah-Hartmand557d1b2017-05-04 15:51:03 -0700361EXPORT_SYMBOL(refcount_dec_and_lock);
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100362
Anna-Maria Gleixner7ea959c2018-06-12 18:16:21 +0200363/**
364 * refcount_dec_and_lock_irqsave - return holding spinlock with disabled
365 * interrupts if able to decrement refcount to 0
366 * @r: the refcount
367 * @lock: the spinlock to be locked
368 * @flags: saved IRQ-flags if the is acquired
369 *
370 * Same as refcount_dec_and_lock() above except that the spinlock is acquired
371 * with disabled interupts.
372 *
373 * Return: true and hold spinlock if able to decrement refcount to 0, false
374 * otherwise
375 */
376bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock,
377 unsigned long *flags)
378{
379 if (refcount_dec_not_one(r))
380 return false;
381
382 spin_lock_irqsave(lock, *flags);
383 if (!refcount_dec_and_test(r)) {
384 spin_unlock_irqrestore(lock, *flags);
385 return false;
386 }
387
388 return true;
389}
390EXPORT_SYMBOL(refcount_dec_and_lock_irqsave);