Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 2 | /* |
Will Deacon | 77e9971 | 2019-11-21 11:58:56 +0000 | [diff] [blame] | 3 | * Out-of-line refcount functions common to all refcount implementations. |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 4 | */ |
| 5 | |
Alexey Dobriyan | 75a040f | 2018-04-01 01:00:36 +0300 | [diff] [blame] | 6 | #include <linux/mutex.h> |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 7 | #include <linux/refcount.h> |
Alexey Dobriyan | 75a040f | 2018-04-01 01:00:36 +0300 | [diff] [blame] | 8 | #include <linux/spinlock.h> |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 9 | #include <linux/bug.h> |
| 10 | |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 11 | /** |
| 12 | * refcount_dec_if_one - decrement a refcount if it is 1 |
| 13 | * @r: the refcount |
| 14 | * |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 15 | * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the |
| 16 | * success thereof. |
| 17 | * |
| 18 | * Like all decrement operations, it provides release memory order and provides |
| 19 | * a control dependency. |
| 20 | * |
| 21 | * It can be used like a try-delete operator; this explicit case is provided |
| 22 | * and not cmpxchg in generic, because that would allow implementing unsafe |
| 23 | * operations. |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 24 | * |
| 25 | * Return: true if the resulting refcount is 0, false otherwise |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 26 | */ |
| 27 | bool refcount_dec_if_one(refcount_t *r) |
| 28 | { |
Peter Zijlstra | b78c0d4 | 2017-02-01 16:07:55 +0100 | [diff] [blame] | 29 | int val = 1; |
| 30 | |
| 31 | return atomic_try_cmpxchg_release(&r->refs, &val, 0); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 32 | } |
Greg Kroah-Hartman | d557d1b | 2017-05-04 15:51:03 -0700 | [diff] [blame] | 33 | EXPORT_SYMBOL(refcount_dec_if_one); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 34 | |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 35 | /** |
| 36 | * refcount_dec_not_one - decrement a refcount if it is not 1 |
| 37 | * @r: the refcount |
| 38 | * |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 39 | * No atomic_t counterpart, it decrements unless the value is 1, in which case |
| 40 | * it will return false. |
| 41 | * |
| 42 | * Was often done like: atomic_add_unless(&var, -1, 1) |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 43 | * |
| 44 | * Return: true if the decrement operation was successful, false otherwise |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 45 | */ |
| 46 | bool refcount_dec_not_one(refcount_t *r) |
| 47 | { |
Peter Zijlstra | b78c0d4 | 2017-02-01 16:07:55 +0100 | [diff] [blame] | 48 | unsigned int new, val = atomic_read(&r->refs); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 49 | |
Peter Zijlstra | b78c0d4 | 2017-02-01 16:07:55 +0100 | [diff] [blame] | 50 | do { |
Will Deacon | 23e6b16 | 2019-11-21 11:58:53 +0000 | [diff] [blame] | 51 | if (unlikely(val == REFCOUNT_SATURATED)) |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 52 | return true; |
| 53 | |
| 54 | if (val == 1) |
| 55 | return false; |
| 56 | |
| 57 | new = val - 1; |
| 58 | if (new > val) { |
Ingo Molnar | 9dcfe2c | 2017-03-01 09:25:55 +0100 | [diff] [blame] | 59 | WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n"); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 60 | return true; |
| 61 | } |
| 62 | |
Peter Zijlstra | b78c0d4 | 2017-02-01 16:07:55 +0100 | [diff] [blame] | 63 | } while (!atomic_try_cmpxchg_release(&r->refs, &val, new)); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 64 | |
| 65 | return true; |
| 66 | } |
Greg Kroah-Hartman | d557d1b | 2017-05-04 15:51:03 -0700 | [diff] [blame] | 67 | EXPORT_SYMBOL(refcount_dec_not_one); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 68 | |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 69 | /** |
| 70 | * refcount_dec_and_mutex_lock - return holding mutex if able to decrement |
| 71 | * refcount to 0 |
| 72 | * @r: the refcount |
| 73 | * @lock: the mutex to be locked |
| 74 | * |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 75 | * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail |
Will Deacon | 23e6b16 | 2019-11-21 11:58:53 +0000 | [diff] [blame] | 76 | * to decrement when saturated at REFCOUNT_SATURATED. |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 77 | * |
| 78 | * Provides release memory ordering, such that prior loads and stores are done |
| 79 | * before, and provides a control dependency such that free() must come after. |
| 80 | * See the comment on top. |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 81 | * |
| 82 | * Return: true and hold mutex if able to decrement refcount to 0, false |
| 83 | * otherwise |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 84 | */ |
| 85 | bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) |
| 86 | { |
| 87 | if (refcount_dec_not_one(r)) |
| 88 | return false; |
| 89 | |
| 90 | mutex_lock(lock); |
| 91 | if (!refcount_dec_and_test(r)) { |
| 92 | mutex_unlock(lock); |
| 93 | return false; |
| 94 | } |
| 95 | |
| 96 | return true; |
| 97 | } |
Greg Kroah-Hartman | d557d1b | 2017-05-04 15:51:03 -0700 | [diff] [blame] | 98 | EXPORT_SYMBOL(refcount_dec_and_mutex_lock); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 99 | |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 100 | /** |
| 101 | * refcount_dec_and_lock - return holding spinlock if able to decrement |
| 102 | * refcount to 0 |
| 103 | * @r: the refcount |
| 104 | * @lock: the spinlock to be locked |
| 105 | * |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 106 | * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to |
Will Deacon | 23e6b16 | 2019-11-21 11:58:53 +0000 | [diff] [blame] | 107 | * decrement when saturated at REFCOUNT_SATURATED. |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 108 | * |
| 109 | * Provides release memory ordering, such that prior loads and stores are done |
| 110 | * before, and provides a control dependency such that free() must come after. |
| 111 | * See the comment on top. |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 112 | * |
| 113 | * Return: true and hold spinlock if able to decrement refcount to 0, false |
| 114 | * otherwise |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 115 | */ |
| 116 | bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) |
| 117 | { |
| 118 | if (refcount_dec_not_one(r)) |
| 119 | return false; |
| 120 | |
| 121 | spin_lock(lock); |
| 122 | if (!refcount_dec_and_test(r)) { |
| 123 | spin_unlock(lock); |
| 124 | return false; |
| 125 | } |
| 126 | |
| 127 | return true; |
| 128 | } |
Greg Kroah-Hartman | d557d1b | 2017-05-04 15:51:03 -0700 | [diff] [blame] | 129 | EXPORT_SYMBOL(refcount_dec_and_lock); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 130 | |
Anna-Maria Gleixner | 7ea959c | 2018-06-12 18:16:21 +0200 | [diff] [blame] | 131 | /** |
| 132 | * refcount_dec_and_lock_irqsave - return holding spinlock with disabled |
| 133 | * interrupts if able to decrement refcount to 0 |
| 134 | * @r: the refcount |
| 135 | * @lock: the spinlock to be locked |
| 136 | * @flags: saved IRQ-flags if the is acquired |
| 137 | * |
| 138 | * Same as refcount_dec_and_lock() above except that the spinlock is acquired |
| 139 | * with disabled interupts. |
| 140 | * |
| 141 | * Return: true and hold spinlock if able to decrement refcount to 0, false |
| 142 | * otherwise |
| 143 | */ |
| 144 | bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock, |
| 145 | unsigned long *flags) |
| 146 | { |
| 147 | if (refcount_dec_not_one(r)) |
| 148 | return false; |
| 149 | |
| 150 | spin_lock_irqsave(lock, *flags); |
| 151 | if (!refcount_dec_and_test(r)) { |
| 152 | spin_unlock_irqrestore(lock, *flags); |
| 153 | return false; |
| 154 | } |
| 155 | |
| 156 | return true; |
| 157 | } |
| 158 | EXPORT_SYMBOL(refcount_dec_and_lock_irqsave); |