Skip to content

Commit 5fb4088

Browse files
committed
Merge tag 'bitmap-for-6.14' of https://github.com:/norov/linux
Pull bitmap updates from Yury Norov: "This includes const_true() series from Vincent Mailhol, another __always_inline rework from Nathan Chancellor for RISCV, and a couple of random fixes from Dr. David Alan Gilbert and I Hsin Cheng" * tag 'bitmap-for-6.14' of https://github.com:/norov/linux: cpumask: Rephrase comments for cpumask_any*() APIs cpu: Remove unused init_cpu_online riscv: Always inline bitops linux/bits.h: simplify GENMASK_INPUT_CHECK() compiler.h: add const_true()
2 parents c2da8b3 + e876695 commit 5fb4088

File tree

5 files changed

+38
-23
lines changed

5 files changed

+38
-23
lines changed

arch/riscv/include/asm/bitops.h

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -228,7 +228,7 @@ static __always_inline int variable_fls(unsigned int x)
228228
*
229229
* This operation may be reordered on other architectures than x86.
230230
*/
231-
static inline int arch_test_and_set_bit(int nr, volatile unsigned long *addr)
231+
static __always_inline int arch_test_and_set_bit(int nr, volatile unsigned long *addr)
232232
{
233233
return __test_and_op_bit(or, __NOP, nr, addr);
234234
}
@@ -240,7 +240,7 @@ static inline int arch_test_and_set_bit(int nr, volatile unsigned long *addr)
240240
*
241241
* This operation can be reordered on other architectures other than x86.
242242
*/
243-
static inline int arch_test_and_clear_bit(int nr, volatile unsigned long *addr)
243+
static __always_inline int arch_test_and_clear_bit(int nr, volatile unsigned long *addr)
244244
{
245245
return __test_and_op_bit(and, __NOT, nr, addr);
246246
}
@@ -253,7 +253,7 @@ static inline int arch_test_and_clear_bit(int nr, volatile unsigned long *addr)
253253
* This operation is atomic and cannot be reordered.
254254
* It also implies a memory barrier.
255255
*/
256-
static inline int arch_test_and_change_bit(int nr, volatile unsigned long *addr)
256+
static __always_inline int arch_test_and_change_bit(int nr, volatile unsigned long *addr)
257257
{
258258
return __test_and_op_bit(xor, __NOP, nr, addr);
259259
}
@@ -270,7 +270,7 @@ static inline int arch_test_and_change_bit(int nr, volatile unsigned long *addr)
270270
* Note that @nr may be almost arbitrarily large; this function is not
271271
* restricted to acting on a single-word quantity.
272272
*/
273-
static inline void arch_set_bit(int nr, volatile unsigned long *addr)
273+
static __always_inline void arch_set_bit(int nr, volatile unsigned long *addr)
274274
{
275275
__op_bit(or, __NOP, nr, addr);
276276
}
@@ -284,7 +284,7 @@ static inline void arch_set_bit(int nr, volatile unsigned long *addr)
284284
* on non x86 architectures, so if you are writing portable code,
285285
* make sure not to rely on its reordering guarantees.
286286
*/
287-
static inline void arch_clear_bit(int nr, volatile unsigned long *addr)
287+
static __always_inline void arch_clear_bit(int nr, volatile unsigned long *addr)
288288
{
289289
__op_bit(and, __NOT, nr, addr);
290290
}
@@ -298,7 +298,7 @@ static inline void arch_clear_bit(int nr, volatile unsigned long *addr)
298298
* Note that @nr may be almost arbitrarily large; this function is not
299299
* restricted to acting on a single-word quantity.
300300
*/
301-
static inline void arch_change_bit(int nr, volatile unsigned long *addr)
301+
static __always_inline void arch_change_bit(int nr, volatile unsigned long *addr)
302302
{
303303
__op_bit(xor, __NOP, nr, addr);
304304
}
@@ -311,7 +311,7 @@ static inline void arch_change_bit(int nr, volatile unsigned long *addr)
311311
* This operation is atomic and provides acquire barrier semantics.
312312
* It can be used to implement bit locks.
313313
*/
314-
static inline int arch_test_and_set_bit_lock(
314+
static __always_inline int arch_test_and_set_bit_lock(
315315
unsigned long nr, volatile unsigned long *addr)
316316
{
317317
return __test_and_op_bit_ord(or, __NOP, nr, addr, .aq);
@@ -324,7 +324,7 @@ static inline int arch_test_and_set_bit_lock(
324324
*
325325
* This operation is atomic and provides release barrier semantics.
326326
*/
327-
static inline void arch_clear_bit_unlock(
327+
static __always_inline void arch_clear_bit_unlock(
328328
unsigned long nr, volatile unsigned long *addr)
329329
{
330330
__op_bit_ord(and, __NOT, nr, addr, .rl);
@@ -345,13 +345,13 @@ static inline void arch_clear_bit_unlock(
345345
* non-atomic property here: it's a lot more instructions and we still have to
346346
* provide release semantics anyway.
347347
*/
348-
static inline void arch___clear_bit_unlock(
348+
static __always_inline void arch___clear_bit_unlock(
349349
unsigned long nr, volatile unsigned long *addr)
350350
{
351351
arch_clear_bit_unlock(nr, addr);
352352
}
353353

354-
static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
354+
static __always_inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
355355
volatile unsigned long *addr)
356356
{
357357
unsigned long res;

include/linux/bits.h

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,9 +20,8 @@
2020
*/
2121
#if !defined(__ASSEMBLY__)
2222
#include <linux/build_bug.h>
23-
#define GENMASK_INPUT_CHECK(h, l) \
24-
(BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
25-
__is_constexpr((l) > (h)), (l) > (h), 0)))
23+
#include <linux/compiler.h>
24+
#define GENMASK_INPUT_CHECK(h, l) BUILD_BUG_ON_ZERO(const_true((l) > (h)))
2625
#else
2726
/*
2827
* BUILD_BUG_ON_ZERO is not available in h files included from asm files,

include/linux/compiler.h

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -307,6 +307,28 @@ static inline void *offset_to_ptr(const int *off)
307307
*/
308308
#define statically_true(x) (__builtin_constant_p(x) && (x))
309309

310+
/*
311+
* Similar to statically_true() but produces a constant expression
312+
*
313+
* To be used in conjunction with macros, such as BUILD_BUG_ON_ZERO(),
314+
* which require their input to be a constant expression and for which
315+
* statically_true() would otherwise fail.
316+
*
317+
* This is a trade-off: const_true() requires all its operands to be
318+
* compile time constants. Else, it would always returns false even on
319+
* the most trivial cases like:
320+
*
321+
* true || non_const_var
322+
*
323+
* On the opposite, statically_true() is able to fold more complex
324+
* tautologies and will return true on expressions such as:
325+
*
326+
* !(non_const_var * 8 % 4)
327+
*
328+
* For the general case, statically_true() is better.
329+
*/
330+
#define const_true(x) __builtin_choose_expr(__is_constexpr(x), x, false)
331+
310332
/*
311333
* This is needed in functions which generate the stack canary, see
312334
* arch/x86/kernel/smpboot.c::start_secondary() for an example.

include/linux/cpumask.h

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -391,7 +391,7 @@ unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int sta
391391
for_each_set_bit_from(cpu, cpumask_bits(mask), small_cpumask_bits)
392392

393393
/**
394-
* cpumask_any_but - return a "random" in a cpumask, but not this one.
394+
* cpumask_any_but - return an arbitrary cpu in a cpumask, but not this one.
395395
* @mask: the cpumask to search
396396
* @cpu: the cpu to ignore.
397397
*
@@ -411,7 +411,7 @@ unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
411411
}
412412

413413
/**
414-
* cpumask_any_and_but - pick a "random" cpu from *mask1 & *mask2, but not this one.
414+
* cpumask_any_and_but - pick an arbitrary cpu from *mask1 & *mask2, but not this one.
415415
* @mask1: the first input cpumask
416416
* @mask2: the second input cpumask
417417
* @cpu: the cpu to ignore
@@ -840,15 +840,15 @@ void cpumask_copy(struct cpumask *dstp, const struct cpumask *srcp)
840840
}
841841

842842
/**
843-
* cpumask_any - pick a "random" cpu from *srcp
843+
* cpumask_any - pick an arbitrary cpu from *srcp
844844
* @srcp: the input cpumask
845845
*
846846
* Return: >= nr_cpu_ids if no cpus set.
847847
*/
848848
#define cpumask_any(srcp) cpumask_first(srcp)
849849

850850
/**
851-
* cpumask_any_and - pick a "random" cpu from *mask1 & *mask2
851+
* cpumask_any_and - pick an arbitrary cpu from *mask1 & *mask2
852852
* @mask1: the first input cpumask
853853
* @mask2: the second input cpumask
854854
*
@@ -1043,7 +1043,6 @@ extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
10431043
/* Wrappers for arch boot code to manipulate normally-constant masks */
10441044
void init_cpu_present(const struct cpumask *src);
10451045
void init_cpu_possible(const struct cpumask *src);
1046-
void init_cpu_online(const struct cpumask *src);
10471046

10481047
#define assign_cpu(cpu, mask, val) \
10491048
assign_bit(cpumask_check(cpu), cpumask_bits(mask), (val))

kernel/cpu.c

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3128,11 +3128,6 @@ void init_cpu_possible(const struct cpumask *src)
31283128
cpumask_copy(&__cpu_possible_mask, src);
31293129
}
31303130

3131-
void init_cpu_online(const struct cpumask *src)
3132-
{
3133-
cpumask_copy(&__cpu_online_mask, src);
3134-
}
3135-
31363131
void set_cpu_online(unsigned int cpu, bool online)
31373132
{
31383133
/*

0 commit comments

Comments
 (0)