blob: 63661de67ad413fc47cab99d7db6267f32438742 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __LINUX_CPUMASK_H
2#define __LINUX_CPUMASK_H
3
4/*
5 * Cpumasks provide a bitmap suitable for representing the
Rusty Russell6ba2ef72009-09-24 09:34:53 -06006 * set of CPU's in a system, one bit position per CPU number. In general,
7 * only nr_cpu_ids (<= NR_CPUS) bits are valid.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/kernel.h>
10#include <linux/threads.h>
11#include <linux/bitmap.h>
Paul Gortmaker187f1882011-11-23 20:12:59 -050012#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
Rusty Russellcdfdef72015-03-05 10:49:19 +103014/* Don't assign or return these: may not be this big! */
Rusty Russell2d3854a2008-11-05 13:39:10 +110015typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016
Rusty Russellae7a47e2008-12-30 09:05:15 +103017/**
Rusty Russell6ba2ef72009-09-24 09:34:53 -060018 * cpumask_bits - get the bits in a cpumask
19 * @maskp: the struct cpumask *
Rusty Russellae7a47e2008-12-30 09:05:15 +103020 *
Rusty Russell6ba2ef72009-09-24 09:34:53 -060021 * You should only assume nr_cpu_ids bits of this mask are valid. This is
22 * a macro so it's const-correct.
Rusty Russellae7a47e2008-12-30 09:05:15 +103023 */
Rusty Russell6ba2ef72009-09-24 09:34:53 -060024#define cpumask_bits(maskp) ((maskp)->bits)
Paul Jackson7ea931c2008-04-28 02:12:29 -070025
Tejun Heof1bbc032015-02-13 14:36:57 -080026/**
27 * cpumask_pr_args - printf args to output a cpumask
28 * @maskp: cpumask to be printed
29 *
30 * Can be used to provide arguments for '%*pb[l]' when printing a cpumask.
31 */
32#define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp)
33
Mike Travis41df0d612008-05-12 21:21:13 +020034#if NR_CPUS == 1
Alexey Dobriyan9b130ad2017-09-08 16:14:18 -070035#define nr_cpu_ids 1U
Rusty Russell6ba2ef72009-09-24 09:34:53 -060036#else
Alexey Dobriyan9b130ad2017-09-08 16:14:18 -070037extern unsigned int nr_cpu_ids;
Mike Travis41df0d612008-05-12 21:21:13 +020038#endif
39
Rusty Russell6ba2ef72009-09-24 09:34:53 -060040#ifdef CONFIG_CPUMASK_OFFSTACK
41/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also,
42 * not all bits may be allocated. */
Alexey Dobriyan9b130ad2017-09-08 16:14:18 -070043#define nr_cpumask_bits nr_cpu_ids
Rusty Russell6ba2ef72009-09-24 09:34:53 -060044#else
Alexey Dobriyanc311c792017-05-08 15:56:15 -070045#define nr_cpumask_bits ((unsigned int)NR_CPUS)
Rusty Russell6ba2ef72009-09-24 09:34:53 -060046#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
48/*
49 * The following particular system cpumasks and operations manage
Rusty Russellb3199c02008-12-30 09:05:14 +103050 * possible, present, active and online cpus.
Linus Torvalds1da177e2005-04-16 15:20:36 -070051 *
Rusty Russellb3199c02008-12-30 09:05:14 +103052 * cpu_possible_mask- has bit 'cpu' set iff cpu is populatable
53 * cpu_present_mask - has bit 'cpu' set iff cpu is populated
54 * cpu_online_mask - has bit 'cpu' set iff cpu available to scheduler
55 * cpu_active_mask - has bit 'cpu' set iff cpu available to migration
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 *
Rusty Russellb3199c02008-12-30 09:05:14 +103057 * If !CONFIG_HOTPLUG_CPU, present == possible, and active == online.
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 *
Rusty Russellb3199c02008-12-30 09:05:14 +103059 * The cpu_possible_mask is fixed at boot time, as the set of CPU id's
60 * that it is possible might ever be plugged in at anytime during the
61 * life of that system boot. The cpu_present_mask is dynamic(*),
62 * representing which CPUs are currently plugged in. And
63 * cpu_online_mask is the dynamic subset of cpu_present_mask,
64 * indicating those CPUs available for scheduling.
65 *
66 * If HOTPLUG is enabled, then cpu_possible_mask is forced to have
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 * all NR_CPUS bits set, otherwise it is just the set of CPUs that
68 * ACPI reports present at boot.
69 *
Rusty Russellb3199c02008-12-30 09:05:14 +103070 * If HOTPLUG is enabled, then cpu_present_mask varies dynamically,
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 * depending on what ACPI reports as currently plugged in, otherwise
Rusty Russellb3199c02008-12-30 09:05:14 +103072 * cpu_present_mask is just a copy of cpu_possible_mask.
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 *
Rusty Russellb3199c02008-12-30 09:05:14 +103074 * (*) Well, cpu_present_mask is dynamic in the hotplug case. If not
75 * hotplug, it's a copy of cpu_possible_mask, hence fixed at boot.
Linus Torvalds1da177e2005-04-16 15:20:36 -070076 *
77 * Subtleties:
78 * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
79 * assumption that their single CPU is online. The UP
Rusty Russellb3199c02008-12-30 09:05:14 +103080 * cpu_{online,possible,present}_masks are placebos. Changing them
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 * will have no useful affect on the following num_*_cpus()
82 * and cpu_*() macros in the UP case. This ugliness is a UP
83 * optimization - don't waste any instructions or memory references
84 * asking if you're online or how many CPUs there are if there is
85 * only one CPU.
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 */
87
Rasmus Villemoes4b804c82016-01-20 15:00:19 -080088extern struct cpumask __cpu_possible_mask;
89extern struct cpumask __cpu_online_mask;
90extern struct cpumask __cpu_present_mask;
91extern struct cpumask __cpu_active_mask;
Rasmus Villemoes5aec01b2016-01-20 15:00:25 -080092#define cpu_possible_mask ((const struct cpumask *)&__cpu_possible_mask)
93#define cpu_online_mask ((const struct cpumask *)&__cpu_online_mask)
94#define cpu_present_mask ((const struct cpumask *)&__cpu_present_mask)
95#define cpu_active_mask ((const struct cpumask *)&__cpu_active_mask)
Rusty Russellb3199c02008-12-30 09:05:14 +103096
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#if NR_CPUS > 1
Rusty Russellae7a47e2008-12-30 09:05:15 +103098#define num_online_cpus() cpumask_weight(cpu_online_mask)
99#define num_possible_cpus() cpumask_weight(cpu_possible_mask)
100#define num_present_cpus() cpumask_weight(cpu_present_mask)
Peter Zijlstra6ad4c182009-11-25 13:31:39 +0100101#define num_active_cpus() cpumask_weight(cpu_active_mask)
Rusty Russellae7a47e2008-12-30 09:05:15 +1030102#define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask)
103#define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask)
104#define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask)
105#define cpu_active(cpu) cpumask_test_cpu((cpu), cpu_active_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106#else
Heiko Carstens221e3eb2010-03-05 13:42:41 -0800107#define num_online_cpus() 1U
108#define num_possible_cpus() 1U
109#define num_present_cpus() 1U
110#define num_active_cpus() 1U
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#define cpu_online(cpu) ((cpu) == 0)
112#define cpu_possible(cpu) ((cpu) == 0)
113#define cpu_present(cpu) ((cpu) == 0)
Max Krasnyanskye761b772008-07-15 04:43:49 -0700114#define cpu_active(cpu) ((cpu) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115#endif
116
Rusty Russell2d3854a2008-11-05 13:39:10 +1100117/* verify cpu argument to cpumask_* operators */
118static inline unsigned int cpumask_check(unsigned int cpu)
119{
120#ifdef CONFIG_DEBUG_PER_CPU_MAPS
121 WARN_ON_ONCE(cpu >= nr_cpumask_bits);
122#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
123 return cpu;
124}
125
126#if NR_CPUS == 1
Rusty Russell984f2f32008-11-08 20:24:19 +1100127/* Uniprocessor. Assume all masks are "1". */
128static inline unsigned int cpumask_first(const struct cpumask *srcp)
129{
130 return 0;
131}
132
Rakib Mullicke22cdc32017-10-23 19:01:54 +0600133static inline unsigned int cpumask_last(const struct cpumask *srcp)
134{
135 return 0;
136}
137
Rusty Russell984f2f32008-11-08 20:24:19 +1100138/* Valid inputs for n are -1 and 0. */
139static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
140{
141 return n+1;
142}
143
144static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
145{
146 return n+1;
147}
148
149static inline unsigned int cpumask_next_and(int n,
150 const struct cpumask *srcp,
151 const struct cpumask *andp)
152{
153 return n+1;
154}
155
156/* cpu must be a valid cpu, ie 0, so there's no other choice. */
157static inline unsigned int cpumask_any_but(const struct cpumask *mask,
158 unsigned int cpu)
159{
160 return 1;
161}
Rusty Russell2d3854a2008-11-05 13:39:10 +1100162
Rusty Russellf36963c2015-05-09 03:14:13 +0930163static inline unsigned int cpumask_local_spread(unsigned int i, int node)
Amir Vadaida913092014-06-09 10:24:38 +0300164{
Amir Vadaida913092014-06-09 10:24:38 +0300165 return 0;
166}
167
Rusty Russell2d3854a2008-11-05 13:39:10 +1100168#define for_each_cpu(cpu, mask) \
169 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
Paul E. McKenney8bd93a22010-02-22 17:04:59 -0800170#define for_each_cpu_not(cpu, mask) \
171 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
Rusty Russell2d3854a2008-11-05 13:39:10 +1100172#define for_each_cpu_and(cpu, mask, and) \
173 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and)
174#else
175/**
176 * cpumask_first - get the first cpu in a cpumask
177 * @srcp: the cpumask pointer
178 *
179 * Returns >= nr_cpu_ids if no cpus set.
180 */
181static inline unsigned int cpumask_first(const struct cpumask *srcp)
182{
183 return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits);
184}
185
Rakib Mullicke22cdc32017-10-23 19:01:54 +0600186/**
187 * cpumask_last - get the last CPU in a cpumask
188 * @srcp: - the cpumask pointer
189 *
190 * Returns >= nr_cpumask_bits if no CPUs set.
191 */
192static inline unsigned int cpumask_last(const struct cpumask *srcp)
193{
194 return find_last_bit(cpumask_bits(srcp), nr_cpumask_bits);
195}
196
Alexey Dobriyanf22ef332017-09-08 16:17:15 -0700197unsigned int cpumask_next(int n, const struct cpumask *srcp);
Rusty Russell2d3854a2008-11-05 13:39:10 +1100198
199/**
200 * cpumask_next_zero - get the next unset cpu in a cpumask
201 * @n: the cpu prior to the place to search (ie. return will be > @n)
202 * @srcp: the cpumask pointer
203 *
204 * Returns >= nr_cpu_ids if no further cpus unset.
205 */
206static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
207{
208 /* -1 is a legal arg here. */
209 if (n != -1)
210 cpumask_check(n);
211 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
212}
213
214int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
215int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
Rusty Russellf36963c2015-05-09 03:14:13 +0930216unsigned int cpumask_local_spread(unsigned int i, int node);
Rusty Russell2d3854a2008-11-05 13:39:10 +1100217
Rusty Russell984f2f32008-11-08 20:24:19 +1100218/**
219 * for_each_cpu - iterate over every cpu in a mask
220 * @cpu: the (optionally unsigned) integer iterator
221 * @mask: the cpumask pointer
222 *
223 * After the loop, cpu is >= nr_cpu_ids.
224 */
Rusty Russell2d3854a2008-11-05 13:39:10 +1100225#define for_each_cpu(cpu, mask) \
226 for ((cpu) = -1; \
227 (cpu) = cpumask_next((cpu), (mask)), \
228 (cpu) < nr_cpu_ids;)
Rusty Russell984f2f32008-11-08 20:24:19 +1100229
230/**
Paul E. McKenney8bd93a22010-02-22 17:04:59 -0800231 * for_each_cpu_not - iterate over every cpu in a complemented mask
232 * @cpu: the (optionally unsigned) integer iterator
233 * @mask: the cpumask pointer
234 *
235 * After the loop, cpu is >= nr_cpu_ids.
236 */
237#define for_each_cpu_not(cpu, mask) \
238 for ((cpu) = -1; \
239 (cpu) = cpumask_next_zero((cpu), (mask)), \
240 (cpu) < nr_cpu_ids;)
241
Peter Zijlstrac743f0a2017-04-14 14:20:05 +0200242extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
243
244/**
245 * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location
246 * @cpu: the (optionally unsigned) integer iterator
247 * @mask: the cpumask poiter
248 * @start: the start location
249 *
250 * The implementation does not assume any bit in @mask is set (including @start).
251 *
252 * After the loop, cpu is >= nr_cpu_ids.
253 */
254#define for_each_cpu_wrap(cpu, mask, start) \
255 for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false); \
256 (cpu) < nr_cpumask_bits; \
257 (cpu) = cpumask_next_wrap((cpu), (mask), (start), true))
258
Paul E. McKenney8bd93a22010-02-22 17:04:59 -0800259/**
Rusty Russell984f2f32008-11-08 20:24:19 +1100260 * for_each_cpu_and - iterate over every cpu in both masks
261 * @cpu: the (optionally unsigned) integer iterator
262 * @mask: the first cpumask pointer
263 * @and: the second cpumask pointer
264 *
265 * This saves a temporary CPU mask in many places. It is equivalent to:
266 * struct cpumask tmp;
267 * cpumask_and(&tmp, &mask, &and);
268 * for_each_cpu(cpu, &tmp)
269 * ...
270 *
271 * After the loop, cpu is >= nr_cpu_ids.
272 */
Rusty Russell2d3854a2008-11-05 13:39:10 +1100273#define for_each_cpu_and(cpu, mask, and) \
274 for ((cpu) = -1; \
275 (cpu) = cpumask_next_and((cpu), (mask), (and)), \
276 (cpu) < nr_cpu_ids;)
277#endif /* SMP */
278
279#define CPU_BITS_NONE \
280{ \
281 [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
282}
283
284#define CPU_BITS_CPU0 \
285{ \
286 [0] = 1UL \
287}
288
289/**
290 * cpumask_set_cpu - set a cpu in a cpumask
291 * @cpu: cpu number (< nr_cpu_ids)
292 * @dstp: the cpumask pointer
293 */
294static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
295{
296 set_bit(cpumask_check(cpu), cpumask_bits(dstp));
297}
298
Peter Zijlstra6c8557b2017-05-19 12:58:25 +0200299static inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
300{
301 __set_bit(cpumask_check(cpu), cpumask_bits(dstp));
302}
303
304
Rusty Russell2d3854a2008-11-05 13:39:10 +1100305/**
306 * cpumask_clear_cpu - clear a cpu in a cpumask
307 * @cpu: cpu number (< nr_cpu_ids)
308 * @dstp: the cpumask pointer
309 */
310static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
311{
312 clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
313}
314
Peter Zijlstra6c8557b2017-05-19 12:58:25 +0200315static inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
316{
317 __clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
318}
319
Rusty Russell2d3854a2008-11-05 13:39:10 +1100320/**
321 * cpumask_test_cpu - test for a cpu in a cpumask
322 * @cpu: cpu number (< nr_cpu_ids)
323 * @cpumask: the cpumask pointer
324 *
Alex Shic777ad62012-05-28 22:23:51 +0800325 * Returns 1 if @cpu is set in @cpumask, else returns 0
Rusty Russell2d3854a2008-11-05 13:39:10 +1100326 */
Rasmus Villemoes3bbf7f42015-03-31 13:25:05 +1030327static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
328{
329 return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
330}
Rusty Russell2d3854a2008-11-05 13:39:10 +1100331
332/**
333 * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask
334 * @cpu: cpu number (< nr_cpu_ids)
335 * @cpumask: the cpumask pointer
336 *
Alex Shic777ad62012-05-28 22:23:51 +0800337 * Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0
338 *
Rusty Russell2d3854a2008-11-05 13:39:10 +1100339 * test_and_set_bit wrapper for cpumasks.
340 */
341static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
342{
343 return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask));
344}
345
346/**
Xiao Guangrong54fdade2009-09-22 16:43:39 -0700347 * cpumask_test_and_clear_cpu - atomically test and clear a cpu in a cpumask
348 * @cpu: cpu number (< nr_cpu_ids)
349 * @cpumask: the cpumask pointer
350 *
Alex Shic777ad62012-05-28 22:23:51 +0800351 * Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0
352 *
Xiao Guangrong54fdade2009-09-22 16:43:39 -0700353 * test_and_clear_bit wrapper for cpumasks.
354 */
355static inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
356{
357 return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask));
358}
359
360/**
Rusty Russell2d3854a2008-11-05 13:39:10 +1100361 * cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask
362 * @dstp: the cpumask pointer
363 */
364static inline void cpumask_setall(struct cpumask *dstp)
365{
366 bitmap_fill(cpumask_bits(dstp), nr_cpumask_bits);
367}
368
369/**
370 * cpumask_clear - clear all cpus (< nr_cpu_ids) in a cpumask
371 * @dstp: the cpumask pointer
372 */
373static inline void cpumask_clear(struct cpumask *dstp)
374{
375 bitmap_zero(cpumask_bits(dstp), nr_cpumask_bits);
376}
377
378/**
379 * cpumask_and - *dstp = *src1p & *src2p
380 * @dstp: the cpumask result
381 * @src1p: the first input
382 * @src2p: the second input
Alex Shic777ad62012-05-28 22:23:51 +0800383 *
384 * If *@dstp is empty, returns 0, else returns 1
Rusty Russell2d3854a2008-11-05 13:39:10 +1100385 */
Linus Torvaldsf4b03732009-08-21 09:26:15 -0700386static inline int cpumask_and(struct cpumask *dstp,
Rusty Russell2d3854a2008-11-05 13:39:10 +1100387 const struct cpumask *src1p,
388 const struct cpumask *src2p)
389{
Linus Torvaldsf4b03732009-08-21 09:26:15 -0700390 return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
Rusty Russell2d3854a2008-11-05 13:39:10 +1100391 cpumask_bits(src2p), nr_cpumask_bits);
392}
393
394/**
395 * cpumask_or - *dstp = *src1p | *src2p
396 * @dstp: the cpumask result
397 * @src1p: the first input
398 * @src2p: the second input
399 */
400static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
401 const struct cpumask *src2p)
402{
403 bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p),
404 cpumask_bits(src2p), nr_cpumask_bits);
405}
406
407/**
408 * cpumask_xor - *dstp = *src1p ^ *src2p
409 * @dstp: the cpumask result
410 * @src1p: the first input
411 * @src2p: the second input
412 */
413static inline void cpumask_xor(struct cpumask *dstp,
414 const struct cpumask *src1p,
415 const struct cpumask *src2p)
416{
417 bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p),
418 cpumask_bits(src2p), nr_cpumask_bits);
419}
420
421/**
422 * cpumask_andnot - *dstp = *src1p & ~*src2p
423 * @dstp: the cpumask result
424 * @src1p: the first input
425 * @src2p: the second input
Alex Shic777ad62012-05-28 22:23:51 +0800426 *
427 * If *@dstp is empty, returns 0, else returns 1
Rusty Russell2d3854a2008-11-05 13:39:10 +1100428 */
Linus Torvaldsf4b03732009-08-21 09:26:15 -0700429static inline int cpumask_andnot(struct cpumask *dstp,
Rusty Russell2d3854a2008-11-05 13:39:10 +1100430 const struct cpumask *src1p,
431 const struct cpumask *src2p)
432{
Linus Torvaldsf4b03732009-08-21 09:26:15 -0700433 return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p),
Rusty Russell2d3854a2008-11-05 13:39:10 +1100434 cpumask_bits(src2p), nr_cpumask_bits);
435}
436
437/**
438 * cpumask_complement - *dstp = ~*srcp
439 * @dstp: the cpumask result
440 * @srcp: the input to invert
441 */
442static inline void cpumask_complement(struct cpumask *dstp,
443 const struct cpumask *srcp)
444{
445 bitmap_complement(cpumask_bits(dstp), cpumask_bits(srcp),
446 nr_cpumask_bits);
447}
448
449/**
450 * cpumask_equal - *src1p == *src2p
451 * @src1p: the first input
452 * @src2p: the second input
453 */
454static inline bool cpumask_equal(const struct cpumask *src1p,
455 const struct cpumask *src2p)
456{
457 return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p),
458 nr_cpumask_bits);
459}
460
461/**
462 * cpumask_intersects - (*src1p & *src2p) != 0
463 * @src1p: the first input
464 * @src2p: the second input
465 */
466static inline bool cpumask_intersects(const struct cpumask *src1p,
467 const struct cpumask *src2p)
468{
469 return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p),
470 nr_cpumask_bits);
471}
472
473/**
474 * cpumask_subset - (*src1p & ~*src2p) == 0
475 * @src1p: the first input
476 * @src2p: the second input
Alex Shic777ad62012-05-28 22:23:51 +0800477 *
478 * Returns 1 if *@src1p is a subset of *@src2p, else returns 0
Rusty Russell2d3854a2008-11-05 13:39:10 +1100479 */
480static inline int cpumask_subset(const struct cpumask *src1p,
481 const struct cpumask *src2p)
482{
483 return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p),
484 nr_cpumask_bits);
485}
486
487/**
488 * cpumask_empty - *srcp == 0
489 * @srcp: the cpumask to that all cpus < nr_cpu_ids are clear.
490 */
491static inline bool cpumask_empty(const struct cpumask *srcp)
492{
493 return bitmap_empty(cpumask_bits(srcp), nr_cpumask_bits);
494}
495
496/**
497 * cpumask_full - *srcp == 0xFFFFFFFF...
498 * @srcp: the cpumask to that all cpus < nr_cpu_ids are set.
499 */
500static inline bool cpumask_full(const struct cpumask *srcp)
501{
502 return bitmap_full(cpumask_bits(srcp), nr_cpumask_bits);
503}
504
505/**
506 * cpumask_weight - Count of bits in *srcp
507 * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
508 */
509static inline unsigned int cpumask_weight(const struct cpumask *srcp)
510{
511 return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
512}
513
514/**
515 * cpumask_shift_right - *dstp = *srcp >> n
516 * @dstp: the cpumask result
517 * @srcp: the input to shift
518 * @n: the number of bits to shift by
519 */
520static inline void cpumask_shift_right(struct cpumask *dstp,
521 const struct cpumask *srcp, int n)
522{
523 bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n,
524 nr_cpumask_bits);
525}
526
527/**
528 * cpumask_shift_left - *dstp = *srcp << n
529 * @dstp: the cpumask result
530 * @srcp: the input to shift
531 * @n: the number of bits to shift by
532 */
533static inline void cpumask_shift_left(struct cpumask *dstp,
534 const struct cpumask *srcp, int n)
535{
536 bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n,
537 nr_cpumask_bits);
538}
539
540/**
541 * cpumask_copy - *dstp = *srcp
542 * @dstp: the result
543 * @srcp: the input cpumask
544 */
545static inline void cpumask_copy(struct cpumask *dstp,
546 const struct cpumask *srcp)
547{
548 bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits);
549}
550
551/**
552 * cpumask_any - pick a "random" cpu from *srcp
553 * @srcp: the input cpumask
554 *
555 * Returns >= nr_cpu_ids if no cpus set.
556 */
557#define cpumask_any(srcp) cpumask_first(srcp)
558
559/**
560 * cpumask_first_and - return the first cpu from *srcp1 & *srcp2
561 * @src1p: the first input
562 * @src2p: the second input
563 *
564 * Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
565 */
566#define cpumask_first_and(src1p, src2p) cpumask_next_and(-1, (src1p), (src2p))
567
568/**
569 * cpumask_any_and - pick a "random" cpu from *mask1 & *mask2
570 * @mask1: the first input cpumask
571 * @mask2: the second input cpumask
572 *
573 * Returns >= nr_cpu_ids if no cpus set.
574 */
575#define cpumask_any_and(mask1, mask2) cpumask_first_and((mask1), (mask2))
576
577/**
Rusty Russellcd83e422008-11-07 11:12:29 +1100578 * cpumask_of - the cpumask containing just a given cpu
579 * @cpu: the cpu (<= nr_cpu_ids)
580 */
581#define cpumask_of(cpu) (get_cpu_mask(cpu))
582
583/**
Rusty Russell29c01772008-12-13 21:20:25 +1030584 * cpumask_parse_user - extract a cpumask from a user string
585 * @buf: the buffer to extract from
586 * @len: the length of the buffer
587 * @dstp: the cpumask to set.
588 *
589 * Returns -errno, or 0 for success.
590 */
591static inline int cpumask_parse_user(const char __user *buf, int len,
592 struct cpumask *dstp)
593{
Tejun Heo4d59b6c2017-02-08 14:30:56 -0800594 return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
Rusty Russell29c01772008-12-13 21:20:25 +1030595}
596
597/**
Mike Travis4b0604202011-05-24 17:13:12 -0700598 * cpumask_parselist_user - extract a cpumask from a user string
599 * @buf: the buffer to extract from
600 * @len: the length of the buffer
601 * @dstp: the cpumask to set.
602 *
603 * Returns -errno, or 0 for success.
604 */
605static inline int cpumask_parselist_user(const char __user *buf, int len,
606 struct cpumask *dstp)
607{
608 return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
Tejun Heo4d59b6c2017-02-08 14:30:56 -0800609 nr_cpumask_bits);
Mike Travis4b0604202011-05-24 17:13:12 -0700610}
611
612/**
Geliang Tangb06fb412016-08-02 14:05:42 -0700613 * cpumask_parse - extract a cpumask from a string
Tejun Heoba630e42013-03-12 11:30:04 -0700614 * @buf: the buffer to extract from
615 * @dstp: the cpumask to set.
616 *
617 * Returns -errno, or 0 for success.
618 */
619static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
620{
621 char *nl = strchr(buf, '\n');
Brian W Hartcea092c2014-05-14 10:33:45 +0930622 unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf);
Tejun Heoba630e42013-03-12 11:30:04 -0700623
Tejun Heo4d59b6c2017-02-08 14:30:56 -0800624 return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
Tejun Heoba630e42013-03-12 11:30:04 -0700625}
626
627/**
Alex Shi231daf02012-07-27 09:29:42 +0930628 * cpulist_parse - extract a cpumask from a user string of ranges
Rusty Russell29c01772008-12-13 21:20:25 +1030629 * @buf: the buffer to extract from
Rusty Russell29c01772008-12-13 21:20:25 +1030630 * @dstp: the cpumask to set.
631 *
632 * Returns -errno, or 0 for success.
633 */
634static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
635{
Tejun Heo4d59b6c2017-02-08 14:30:56 -0800636 return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits);
Rusty Russell2d3854a2008-11-05 13:39:10 +1100637}
638
639/**
640 * cpumask_size - size to allocate for a 'struct cpumask' in bytes
Rusty Russell2d3854a2008-11-05 13:39:10 +1100641 */
642static inline size_t cpumask_size(void)
643{
Rusty Russellcdfdef72015-03-05 10:49:19 +1030644 return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long);
Rusty Russell2d3854a2008-11-05 13:39:10 +1100645}
646
647/*
648 * cpumask_var_t: struct cpumask for stack usage.
649 *
650 * Oh, the wicked games we play! In order to make kernel coding a
651 * little more difficult, we typedef cpumask_var_t to an array or a
652 * pointer: doing &mask on an array is a noop, so it still works.
653 *
654 * ie.
655 * cpumask_var_t tmpmask;
656 * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
657 * return -ENOMEM;
658 *
659 * ... use 'tmpmask' like a normal struct cpumask * ...
660 *
661 * free_cpumask_var(tmpmask);
KOSAKI Motohiroa64a26e2011-07-26 16:08:45 -0700662 *
663 *
664 * However, one notable exception is there. alloc_cpumask_var() allocates
665 * only nr_cpumask_bits bits (in the other hand, real cpumask_t always has
666 * NR_CPUS bits). Therefore you don't have to dereference cpumask_var_t.
667 *
668 * cpumask_var_t tmpmask;
669 * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
670 * return -ENOMEM;
671 *
672 * var = *tmpmask;
673 *
674 * This code makes NR_CPUS length memcopy and brings to a memory corruption.
675 * cpumask_copy() provide safe copy functionality.
Christoph Lameter4ba29682014-08-26 19:12:21 -0500676 *
677 * Note that there is another evil here: If you define a cpumask_var_t
678 * as a percpu variable then the way to obtain the address of the cpumask
679 * structure differently influences what this_cpu_* operation needs to be
680 * used. Please use this_cpu_cpumask_var_t in those cases. The direct use
681 * of this_cpu_ptr() or this_cpu_read() will lead to failures when the
682 * other type of cpumask_var_t implementation is configured.
Waiman Long668802c2017-01-30 12:57:43 -0500683 *
684 * Please also note that __cpumask_var_read_mostly can be used to declare
685 * a cpumask_var_t variable itself (not its content) as read mostly.
Rusty Russell2d3854a2008-11-05 13:39:10 +1100686 */
687#ifdef CONFIG_CPUMASK_OFFSTACK
688typedef struct cpumask *cpumask_var_t;
689
Waiman Long668802c2017-01-30 12:57:43 -0500690#define this_cpu_cpumask_var_ptr(x) this_cpu_read(x)
691#define __cpumask_var_read_mostly __read_mostly
Christoph Lameter4ba29682014-08-26 19:12:21 -0500692
Mike Travis7b4967c2008-12-19 16:56:37 +1030693bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
Rusty Russell2d3854a2008-11-05 13:39:10 +1100694bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
Yinghai Lu0281b5d2009-06-06 14:50:36 -0700695bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
696bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
Rusty Russell2d3854a2008-11-05 13:39:10 +1100697void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
698void free_cpumask_var(cpumask_var_t mask);
Rusty Russellcd83e422008-11-07 11:12:29 +1100699void free_bootmem_cpumask_var(cpumask_var_t mask);
Rusty Russell2d3854a2008-11-05 13:39:10 +1100700
Matthias Kaehlckef7e30f02017-04-12 11:20:29 -0700701static inline bool cpumask_available(cpumask_var_t mask)
702{
703 return mask != NULL;
704}
705
Rusty Russell2d3854a2008-11-05 13:39:10 +1100706#else
707typedef struct cpumask cpumask_var_t[1];
708
Christoph Lameter4ba29682014-08-26 19:12:21 -0500709#define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x)
Waiman Long668802c2017-01-30 12:57:43 -0500710#define __cpumask_var_read_mostly
Christoph Lameter4ba29682014-08-26 19:12:21 -0500711
Rusty Russell2d3854a2008-11-05 13:39:10 +1100712static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
713{
714 return true;
715}
716
Mike Travis7b4967c2008-12-19 16:56:37 +1030717static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
718 int node)
719{
720 return true;
721}
722
Yinghai Lu0281b5d2009-06-06 14:50:36 -0700723static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
724{
725 cpumask_clear(*mask);
726 return true;
727}
728
729static inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
730 int node)
731{
732 cpumask_clear(*mask);
733 return true;
734}
735
Rusty Russell2d3854a2008-11-05 13:39:10 +1100736static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
737{
738}
739
740static inline void free_cpumask_var(cpumask_var_t mask)
741{
742}
Rusty Russellcd83e422008-11-07 11:12:29 +1100743
744static inline void free_bootmem_cpumask_var(cpumask_var_t mask)
745{
746}
Matthias Kaehlckef7e30f02017-04-12 11:20:29 -0700747
748static inline bool cpumask_available(cpumask_var_t mask)
749{
750 return true;
751}
Rusty Russell2d3854a2008-11-05 13:39:10 +1100752#endif /* CONFIG_CPUMASK_OFFSTACK */
753
Rusty Russell2d3854a2008-11-05 13:39:10 +1100754/* It's common to want to use cpu_all_mask in struct member initializers,
755 * so it has to refer to an address rather than a pointer. */
756extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
757#define cpu_all_mask to_cpumask(cpu_all_bits)
758
759/* First bits of cpu_bit_bitmap are in fact unset. */
760#define cpu_none_mask to_cpumask(cpu_bit_bitmap[0])
761
Rusty Russellae7a47e2008-12-30 09:05:15 +1030762#define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask)
763#define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask)
764#define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask)
765
Rusty Russell2d3854a2008-11-05 13:39:10 +1100766/* Wrappers for arch boot code to manipulate normally-constant masks */
Rusty Russell3fa41522008-12-30 09:05:16 +1030767void init_cpu_present(const struct cpumask *src);
768void init_cpu_possible(const struct cpumask *src);
769void init_cpu_online(const struct cpumask *src);
Rusty Russell6ba2ef72009-09-24 09:34:53 -0600770
Thomas Gleixner427d77a2016-12-13 19:32:28 +0100771static inline void reset_cpu_possible_mask(void)
772{
773 bitmap_zero(cpumask_bits(&__cpu_possible_mask), NR_CPUS);
774}
775
Rasmus Villemoes94256762016-01-20 15:00:28 -0800776static inline void
777set_cpu_possible(unsigned int cpu, bool possible)
778{
779 if (possible)
780 cpumask_set_cpu(cpu, &__cpu_possible_mask);
781 else
782 cpumask_clear_cpu(cpu, &__cpu_possible_mask);
783}
784
785static inline void
786set_cpu_present(unsigned int cpu, bool present)
787{
788 if (present)
789 cpumask_set_cpu(cpu, &__cpu_present_mask);
790 else
791 cpumask_clear_cpu(cpu, &__cpu_present_mask);
792}
793
794static inline void
795set_cpu_online(unsigned int cpu, bool online)
796{
Peter Zijlstra (Intel)e9d867a2016-03-10 12:54:08 +0100797 if (online)
Rasmus Villemoes94256762016-01-20 15:00:28 -0800798 cpumask_set_cpu(cpu, &__cpu_online_mask);
Peter Zijlstra (Intel)e9d867a2016-03-10 12:54:08 +0100799 else
Rasmus Villemoes94256762016-01-20 15:00:28 -0800800 cpumask_clear_cpu(cpu, &__cpu_online_mask);
Rasmus Villemoes94256762016-01-20 15:00:28 -0800801}
802
803static inline void
804set_cpu_active(unsigned int cpu, bool active)
805{
806 if (active)
807 cpumask_set_cpu(cpu, &__cpu_active_mask);
808 else
809 cpumask_clear_cpu(cpu, &__cpu_active_mask);
810}
811
812
Rusty Russell6ba2ef72009-09-24 09:34:53 -0600813/**
814 * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
815 * @bitmap: the bitmap
816 *
817 * There are a few places where cpumask_var_t isn't appropriate and
818 * static cpumasks must be used (eg. very early boot), yet we don't
819 * expose the definition of 'struct cpumask'.
820 *
821 * This does the conversion, and can be used as a constant initializer.
822 */
823#define to_cpumask(bitmap) \
824 ((struct cpumask *)(1 ? (bitmap) \
825 : (void *)sizeof(__check_is_bitmap(bitmap))))
826
827static inline int __check_is_bitmap(const unsigned long *bitmap)
828{
829 return 1;
830}
831
832/*
833 * Special-case data structure for "single bit set only" constant CPU masks.
834 *
835 * We pre-generate all the 64 (or 32) possible bit positions, with enough
836 * padding to the left and the right, and return the constant pointer
837 * appropriately offset.
838 */
839extern const unsigned long
840 cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
841
842static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
843{
844 const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
845 p -= cpu / BITS_PER_LONG;
846 return to_cpumask(p);
847}
848
849#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
850
851#if NR_CPUS <= BITS_PER_LONG
852#define CPU_BITS_ALL \
853{ \
Rusty Russell9941a382015-03-05 10:49:19 +1030854 [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
Rusty Russell6ba2ef72009-09-24 09:34:53 -0600855}
856
857#else /* NR_CPUS > BITS_PER_LONG */
858
859#define CPU_BITS_ALL \
860{ \
861 [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
Rusty Russell9941a382015-03-05 10:49:19 +1030862 [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
Rusty Russell6ba2ef72009-09-24 09:34:53 -0600863}
864#endif /* NR_CPUS > BITS_PER_LONG */
865
Sudeep Holla5aaba362014-09-30 14:48:22 +0100866/**
867 * cpumap_print_to_pagebuf - copies the cpumask into the buffer either
868 * as comma-separated list of cpus or hex values of cpumask
869 * @list: indicates whether the cpumap must be list
870 * @mask: the cpumask to copy
871 * @buf: the buffer to copy into
872 *
873 * Returns the length of the (null-terminated) @buf string, zero if
874 * nothing is copied.
875 */
876static inline ssize_t
877cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
878{
879 return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask),
Tejun Heo513e3d22015-02-13 14:36:50 -0800880 nr_cpu_ids);
Sudeep Holla5aaba362014-09-30 14:48:22 +0100881}
882
Rusty Russell9941a382015-03-05 10:49:19 +1030883#if NR_CPUS <= BITS_PER_LONG
884#define CPU_MASK_ALL \
885(cpumask_t) { { \
886 [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
887} }
888#else
889#define CPU_MASK_ALL \
890(cpumask_t) { { \
891 [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
892 [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
893} }
894#endif /* NR_CPUS > BITS_PER_LONG */
895
896#define CPU_MASK_NONE \
897(cpumask_t) { { \
898 [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
899} }
900
Rusty Russell15277812015-04-16 12:33:51 +0930901#define CPU_MASK_CPU0 \
902(cpumask_t) { { \
903 [0] = 1UL \
904} }
905
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906#endif /* __LINUX_CPUMASK_H */