blob: 3a534fbebdcc5b26c740c59ac2b6f48bf5f0d852 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Peter Zijlstra29dee3c2017-02-10 16:27:52 +01002/*
Will Deacon77e99712019-11-21 11:58:56 +00003 * Out-of-line refcount functions common to all refcount implementations.
Peter Zijlstra29dee3c2017-02-10 16:27:52 +01004 */
5
Alexey Dobriyan75a040f2018-04-01 01:00:36 +03006#include <linux/mutex.h>
Peter Zijlstra29dee3c2017-02-10 16:27:52 +01007#include <linux/refcount.h>
Alexey Dobriyan75a040f2018-04-01 01:00:36 +03008#include <linux/spinlock.h>
Peter Zijlstra29dee3c2017-02-10 16:27:52 +01009#include <linux/bug.h>
10
David Windsorbd174162017-03-10 10:34:12 -050011/**
12 * refcount_dec_if_one - decrement a refcount if it is 1
13 * @r: the refcount
14 *
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010015 * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
16 * success thereof.
17 *
18 * Like all decrement operations, it provides release memory order and provides
19 * a control dependency.
20 *
21 * It can be used like a try-delete operator; this explicit case is provided
22 * and not cmpxchg in generic, because that would allow implementing unsafe
23 * operations.
David Windsorbd174162017-03-10 10:34:12 -050024 *
25 * Return: true if the resulting refcount is 0, false otherwise
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010026 */
27bool refcount_dec_if_one(refcount_t *r)
28{
Peter Zijlstrab78c0d42017-02-01 16:07:55 +010029 int val = 1;
30
31 return atomic_try_cmpxchg_release(&r->refs, &val, 0);
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010032}
Greg Kroah-Hartmand557d1b2017-05-04 15:51:03 -070033EXPORT_SYMBOL(refcount_dec_if_one);
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010034
David Windsorbd174162017-03-10 10:34:12 -050035/**
36 * refcount_dec_not_one - decrement a refcount if it is not 1
37 * @r: the refcount
38 *
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010039 * No atomic_t counterpart, it decrements unless the value is 1, in which case
40 * it will return false.
41 *
42 * Was often done like: atomic_add_unless(&var, -1, 1)
David Windsorbd174162017-03-10 10:34:12 -050043 *
44 * Return: true if the decrement operation was successful, false otherwise
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010045 */
46bool refcount_dec_not_one(refcount_t *r)
47{
Peter Zijlstrab78c0d42017-02-01 16:07:55 +010048 unsigned int new, val = atomic_read(&r->refs);
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010049
Peter Zijlstrab78c0d42017-02-01 16:07:55 +010050 do {
Will Deacon23e6b162019-11-21 11:58:53 +000051 if (unlikely(val == REFCOUNT_SATURATED))
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010052 return true;
53
54 if (val == 1)
55 return false;
56
57 new = val - 1;
58 if (new > val) {
Ingo Molnar9dcfe2c2017-03-01 09:25:55 +010059 WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010060 return true;
61 }
62
Peter Zijlstrab78c0d42017-02-01 16:07:55 +010063 } while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010064
65 return true;
66}
Greg Kroah-Hartmand557d1b2017-05-04 15:51:03 -070067EXPORT_SYMBOL(refcount_dec_not_one);
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010068
David Windsorbd174162017-03-10 10:34:12 -050069/**
70 * refcount_dec_and_mutex_lock - return holding mutex if able to decrement
71 * refcount to 0
72 * @r: the refcount
73 * @lock: the mutex to be locked
74 *
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010075 * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
Will Deacon23e6b162019-11-21 11:58:53 +000076 * to decrement when saturated at REFCOUNT_SATURATED.
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010077 *
78 * Provides release memory ordering, such that prior loads and stores are done
79 * before, and provides a control dependency such that free() must come after.
80 * See the comment on top.
David Windsorbd174162017-03-10 10:34:12 -050081 *
82 * Return: true and hold mutex if able to decrement refcount to 0, false
83 * otherwise
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010084 */
85bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
86{
87 if (refcount_dec_not_one(r))
88 return false;
89
90 mutex_lock(lock);
91 if (!refcount_dec_and_test(r)) {
92 mutex_unlock(lock);
93 return false;
94 }
95
96 return true;
97}
Greg Kroah-Hartmand557d1b2017-05-04 15:51:03 -070098EXPORT_SYMBOL(refcount_dec_and_mutex_lock);
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010099
David Windsorbd174162017-03-10 10:34:12 -0500100/**
101 * refcount_dec_and_lock - return holding spinlock if able to decrement
102 * refcount to 0
103 * @r: the refcount
104 * @lock: the spinlock to be locked
105 *
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100106 * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
Will Deacon23e6b162019-11-21 11:58:53 +0000107 * decrement when saturated at REFCOUNT_SATURATED.
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100108 *
109 * Provides release memory ordering, such that prior loads and stores are done
110 * before, and provides a control dependency such that free() must come after.
111 * See the comment on top.
David Windsorbd174162017-03-10 10:34:12 -0500112 *
113 * Return: true and hold spinlock if able to decrement refcount to 0, false
114 * otherwise
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100115 */
116bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
117{
118 if (refcount_dec_not_one(r))
119 return false;
120
121 spin_lock(lock);
122 if (!refcount_dec_and_test(r)) {
123 spin_unlock(lock);
124 return false;
125 }
126
127 return true;
128}
Greg Kroah-Hartmand557d1b2017-05-04 15:51:03 -0700129EXPORT_SYMBOL(refcount_dec_and_lock);
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100130
Anna-Maria Gleixner7ea959c2018-06-12 18:16:21 +0200131/**
132 * refcount_dec_and_lock_irqsave - return holding spinlock with disabled
133 * interrupts if able to decrement refcount to 0
134 * @r: the refcount
135 * @lock: the spinlock to be locked
136 * @flags: saved IRQ-flags if the is acquired
137 *
138 * Same as refcount_dec_and_lock() above except that the spinlock is acquired
139 * with disabled interupts.
140 *
141 * Return: true and hold spinlock if able to decrement refcount to 0, false
142 * otherwise
143 */
144bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock,
145 unsigned long *flags)
146{
147 if (refcount_dec_not_one(r))
148 return false;
149
150 spin_lock_irqsave(lock, *flags);
151 if (!refcount_dec_and_test(r)) {
152 spin_unlock_irqrestore(lock, *flags);
153 return false;
154 }
155
156 return true;
157}
158EXPORT_SYMBOL(refcount_dec_and_lock_irqsave);