blob: 26bf15e6cd35d69ebcbf72dd43a4aad6aa686af6 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002#ifndef _LINUX_BITOPS_H
3#define _LINUX_BITOPS_H
Andy Shevchenkoaa6159a2020-12-15 20:42:48 -08004
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#include <asm/types.h>
Will Deacon8bd9cb52018-06-19 13:53:08 +01006#include <linux/bits.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007
Andy Shevchenkoaa6159a2020-12-15 20:42:48 -08008#include <uapi/linux/kernel.h>
9
Aleksa Saraif5a1a532019-10-01 11:10:52 +100010/* Set bits in the first 'n' bytes when loaded from memory */
11#ifdef __LITTLE_ENDIAN
12# define aligned_byte_mask(n) ((1UL << 8*(n))-1)
13#else
14# define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n)))
15#endif
16
Yury Norov0bddc1b2020-02-03 17:37:24 -080017#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
Andy Shevchenkoaa6159a2020-12-15 20:42:48 -080018#define BITS_TO_LONGS(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
19#define BITS_TO_U64(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u64))
20#define BITS_TO_U32(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u32))
21#define BITS_TO_BYTES(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(char))
Chen, Gong10ef6b02013-10-18 14:29:07 -070022
Borislav Petkov4677d4a2010-05-03 14:57:11 +020023extern unsigned int __sw_hweight8(unsigned int w);
24extern unsigned int __sw_hweight16(unsigned int w);
25extern unsigned int __sw_hweight32(unsigned int w);
26extern unsigned long __sw_hweight64(__u64 w);
27
Linus Torvalds1da177e2005-04-16 15:20:36 -070028/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070029 * Include this here because some architectures need generic_ffs/fls in
30 * scope
31 */
32#include <asm/bitops.h>
33
Akinobu Mita984b3f52010-03-05 13:41:37 -080034#define for_each_set_bit(bit, addr, size) \
Robert Richter1e2ad282011-11-18 12:35:21 +010035 for ((bit) = find_first_bit((addr), (size)); \
36 (bit) < (size); \
37 (bit) = find_next_bit((addr), (size), (bit) + 1))
38
39/* same as for_each_set_bit() but use bit as value to start with */
Akinobu Mita307b1cd2012-03-23 15:02:03 -070040#define for_each_set_bit_from(bit, addr, size) \
Robert Richter1e2ad282011-11-18 12:35:21 +010041 for ((bit) = find_next_bit((addr), (size), (bit)); \
42 (bit) < (size); \
Shannon Nelson3e037452007-10-16 01:27:40 -070043 (bit) = find_next_bit((addr), (size), (bit) + 1))
44
Akinobu Mita03f4a822012-03-23 15:02:04 -070045#define for_each_clear_bit(bit, addr, size) \
46 for ((bit) = find_first_zero_bit((addr), (size)); \
47 (bit) < (size); \
48 (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
49
50/* same as for_each_clear_bit() but use bit as value to start with */
51#define for_each_clear_bit_from(bit, addr, size) \
52 for ((bit) = find_next_zero_bit((addr), (size), (bit)); \
53 (bit) < (size); \
54 (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
55
William Breathitt Gray169c4742019-12-04 16:50:57 -080056/**
57 * for_each_set_clump8 - iterate over bitmap for each 8-bit clump with set bits
58 * @start: bit offset to start search and to store the current iteration offset
59 * @clump: location to store copy of current 8-bit clump
60 * @bits: bitmap address to base the search on
61 * @size: bitmap size in number of bits
62 */
63#define for_each_set_clump8(start, clump, bits, size) \
64 for ((start) = find_first_clump8(&(clump), (bits), (size)); \
65 (start) < (size); \
66 (start) = find_next_clump8(&(clump), (bits), (size), (start) + 8))
67
Denys Vlasenko1a1d48a2015-08-04 16:15:14 +020068static inline int get_bitmask_order(unsigned int count)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069{
70 int order;
Peter Zijlstra9f416992010-01-22 15:59:29 +010071
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 order = fls(count);
73 return order; /* We could be slightly more clever with -1 here... */
74}
75
Denys Vlasenko1a1d48a2015-08-04 16:15:14 +020076static __always_inline unsigned long hweight_long(unsigned long w)
Linus Torvalds1da177e2005-04-16 15:20:36 -070077{
Arnd Bergmannbd93f002020-06-04 16:50:30 -070078 return sizeof(w) == 4 ? hweight32(w) : hweight64((__u64)w);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079}
80
Robert P. J. Day45f8bde2007-01-26 00:57:09 -080081/**
Alexey Dobriyanf2ea0f52012-01-14 21:44:49 +030082 * rol64 - rotate a 64-bit value left
83 * @word: value to rotate
84 * @shift: bits to roll
85 */
86static inline __u64 rol64(__u64 word, unsigned int shift)
87{
Rasmus Villemoesef4d6f62019-05-14 15:43:27 -070088 return (word << (shift & 63)) | (word >> ((-shift) & 63));
Alexey Dobriyanf2ea0f52012-01-14 21:44:49 +030089}
90
91/**
92 * ror64 - rotate a 64-bit value right
93 * @word: value to rotate
94 * @shift: bits to roll
95 */
96static inline __u64 ror64(__u64 word, unsigned int shift)
97{
Rasmus Villemoesef4d6f62019-05-14 15:43:27 -070098 return (word >> (shift & 63)) | (word << ((-shift) & 63));
Alexey Dobriyanf2ea0f52012-01-14 21:44:49 +030099}
100
101/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 * rol32 - rotate a 32-bit value left
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 * @word: value to rotate
104 * @shift: bits to roll
105 */
106static inline __u32 rol32(__u32 word, unsigned int shift)
107{
Rasmus Villemoesef4d6f62019-05-14 15:43:27 -0700108 return (word << (shift & 31)) | (word >> ((-shift) & 31));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109}
110
Robert P. J. Day45f8bde2007-01-26 00:57:09 -0800111/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 * ror32 - rotate a 32-bit value right
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 * @word: value to rotate
114 * @shift: bits to roll
115 */
116static inline __u32 ror32(__u32 word, unsigned int shift)
117{
Rasmus Villemoesef4d6f62019-05-14 15:43:27 -0700118 return (word >> (shift & 31)) | (word << ((-shift) & 31));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119}
120
Harvey Harrison3afe3922008-03-28 14:16:01 -0700121/**
122 * rol16 - rotate a 16-bit value left
123 * @word: value to rotate
124 * @shift: bits to roll
125 */
126static inline __u16 rol16(__u16 word, unsigned int shift)
127{
Rasmus Villemoesef4d6f62019-05-14 15:43:27 -0700128 return (word << (shift & 15)) | (word >> ((-shift) & 15));
Harvey Harrison3afe3922008-03-28 14:16:01 -0700129}
130
131/**
132 * ror16 - rotate a 16-bit value right
133 * @word: value to rotate
134 * @shift: bits to roll
135 */
136static inline __u16 ror16(__u16 word, unsigned int shift)
137{
Rasmus Villemoesef4d6f62019-05-14 15:43:27 -0700138 return (word >> (shift & 15)) | (word << ((-shift) & 15));
Harvey Harrison3afe3922008-03-28 14:16:01 -0700139}
140
141/**
142 * rol8 - rotate an 8-bit value left
143 * @word: value to rotate
144 * @shift: bits to roll
145 */
146static inline __u8 rol8(__u8 word, unsigned int shift)
147{
Rasmus Villemoesef4d6f62019-05-14 15:43:27 -0700148 return (word << (shift & 7)) | (word >> ((-shift) & 7));
Harvey Harrison3afe3922008-03-28 14:16:01 -0700149}
150
151/**
152 * ror8 - rotate an 8-bit value right
153 * @word: value to rotate
154 * @shift: bits to roll
155 */
156static inline __u8 ror8(__u8 word, unsigned int shift)
157{
Rasmus Villemoesef4d6f62019-05-14 15:43:27 -0700158 return (word >> (shift & 7)) | (word << ((-shift) & 7));
Harvey Harrison3afe3922008-03-28 14:16:01 -0700159}
160
Andreas Herrmann7919a572010-08-30 19:04:01 +0000161/**
162 * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
163 * @value: value to sign extend
164 * @index: 0 based bit index (0<=index<32) to sign bit
Martin Kepplingere2eb53a2015-11-06 16:30:58 -0800165 *
166 * This is safe to use for 16- and 8-bit types as well.
Andreas Herrmann7919a572010-08-30 19:04:01 +0000167 */
Josh Poimboeuff80ac982020-04-06 20:09:43 -0700168static __always_inline __s32 sign_extend32(__u32 value, int index)
Andreas Herrmann7919a572010-08-30 19:04:01 +0000169{
170 __u8 shift = 31 - index;
171 return (__s32)(value << shift) >> shift;
172}
173
Martin Kepplinger48e203e2015-11-06 16:31:02 -0800174/**
175 * sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit
176 * @value: value to sign extend
177 * @index: 0 based bit index (0<=index<64) to sign bit
178 */
Josh Poimboeuff80ac982020-04-06 20:09:43 -0700179static __always_inline __s64 sign_extend64(__u64 value, int index)
Martin Kepplinger48e203e2015-11-06 16:31:02 -0800180{
181 __u8 shift = 63 - index;
182 return (__s64)(value << shift) >> shift;
183}
184
Andrew Morton962749a2006-03-25 03:08:01 -0800185static inline unsigned fls_long(unsigned long l)
186{
187 if (sizeof(l) == 4)
188 return fls(l);
189 return fls64(l);
190}
191
zijun_hu252e5c62016-10-07 16:57:26 -0700192static inline int get_count_order(unsigned int count)
193{
Wei Yang004fba12020-10-15 20:11:46 -0700194 if (count == 0)
195 return -1;
zijun_hu252e5c62016-10-07 16:57:26 -0700196
Wei Yang004fba12020-10-15 20:11:46 -0700197 return fls(--count);
zijun_hu252e5c62016-10-07 16:57:26 -0700198}
199
200/**
201 * get_count_order_long - get order after rounding @l up to power of 2
202 * @l: parameter
203 *
204 * it is same as get_count_order() but with long type parameter
205 */
206static inline int get_count_order_long(unsigned long l)
207{
208 if (l == 0UL)
209 return -1;
Wei Yanga9eb6372020-10-15 20:11:41 -0700210 return (int)fls_long(--l);
zijun_hu252e5c62016-10-07 16:57:26 -0700211}
212
Steven Whitehouse952043a2009-04-23 08:48:15 +0100213/**
214 * __ffs64 - find first set bit in a 64 bit word
215 * @word: The 64 bit word
216 *
Geert Uytterhoeven4945cca2021-02-25 17:21:37 -0800217 * On 64 bit arches this is a synonym for __ffs
Steven Whitehouse952043a2009-04-23 08:48:15 +0100218 * The result is not defined if no bits are set, so check that @word
219 * is non-zero before calling this.
220 */
221static inline unsigned long __ffs64(u64 word)
222{
223#if BITS_PER_LONG == 32
224 if (((u32)word) == 0UL)
225 return __ffs((u32)(word >> 32)) + 32;
226#elif BITS_PER_LONG != 64
227#error BITS_PER_LONG not 32 or 64
228#endif
229 return __ffs((unsigned long)word);
230}
231
Lukas Wunner5307e2a2017-10-12 12:40:10 +0200232/**
233 * assign_bit - Assign value to a bit in memory
234 * @nr: the bit to set
235 * @addr: the address to start counting from
236 * @value: the value to assign
237 */
238static __always_inline void assign_bit(long nr, volatile unsigned long *addr,
239 bool value)
240{
241 if (value)
242 set_bit(nr, addr);
243 else
244 clear_bit(nr, addr);
245}
246
247static __always_inline void __assign_bit(long nr, volatile unsigned long *addr,
248 bool value)
249{
250 if (value)
251 __set_bit(nr, addr);
252 else
253 __clear_bit(nr, addr);
254}
255
Alexander van Heukelum64970b62008-03-11 16:17:19 +0100256#ifdef __KERNEL__
Alexander van Heukelum77b9bd92008-04-01 11:46:19 +0200257
Theodore Ts'o00a1a052014-03-30 10:20:01 -0400258#ifndef set_mask_bits
Miklos Szeredi18127422018-10-15 15:43:06 +0200259#define set_mask_bits(ptr, mask, bits) \
Theodore Ts'o00a1a052014-03-30 10:20:01 -0400260({ \
Miklos Szeredi18127422018-10-15 15:43:06 +0200261 const typeof(*(ptr)) mask__ = (mask), bits__ = (bits); \
262 typeof(*(ptr)) old__, new__; \
Theodore Ts'o00a1a052014-03-30 10:20:01 -0400263 \
264 do { \
Miklos Szeredi18127422018-10-15 15:43:06 +0200265 old__ = READ_ONCE(*(ptr)); \
266 new__ = (old__ & ~mask__) | bits__; \
267 } while (cmpxchg(ptr, old__, new__) != old__); \
Theodore Ts'o00a1a052014-03-30 10:20:01 -0400268 \
Vineet Gupta1db604f2019-03-07 16:28:14 -0800269 old__; \
Theodore Ts'o00a1a052014-03-30 10:20:01 -0400270})
271#endif
272
Guoqing Jiang85ad1d12016-05-03 22:22:13 -0400273#ifndef bit_clear_unless
Miklos Szerediedfa8722018-10-15 15:43:06 +0200274#define bit_clear_unless(ptr, clear, test) \
Guoqing Jiang85ad1d12016-05-03 22:22:13 -0400275({ \
Miklos Szerediedfa8722018-10-15 15:43:06 +0200276 const typeof(*(ptr)) clear__ = (clear), test__ = (test);\
277 typeof(*(ptr)) old__, new__; \
Guoqing Jiang85ad1d12016-05-03 22:22:13 -0400278 \
279 do { \
Miklos Szerediedfa8722018-10-15 15:43:06 +0200280 old__ = READ_ONCE(*(ptr)); \
281 new__ = old__ & ~clear__; \
282 } while (!(old__ & test__) && \
283 cmpxchg(ptr, old__, new__) != old__); \
Guoqing Jiang85ad1d12016-05-03 22:22:13 -0400284 \
Miklos Szerediedfa8722018-10-15 15:43:06 +0200285 !(old__ & test__); \
Guoqing Jiang85ad1d12016-05-03 22:22:13 -0400286})
287#endif
288
Alexander van Heukelum64970b62008-03-11 16:17:19 +0100289#endif /* __KERNEL__ */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290#endif