Skip to content

Commit 8cae8e0

Browse files
hcahcaAlexander Gordeev
authored andcommitted
s390/bitops: Switch to generic bitops
The generic bitops implementation is nearly identical to the s390 implementation therefore switch to the generic variant. This results in a small kernel image size decrease. This is because for the generic variant the nr parameter for most bitops functions is of type unsigned int while the s390 variant uses unsigned long. bloat-o-meter: add/remove: 670/670 grow/shrink: 167/209 up/down: 21440/-21792 (-352) Acked-by: Alexander Gordeev <[email protected]> Signed-off-by: Heiko Carstens <[email protected]> Signed-off-by: Alexander Gordeev <[email protected]>
1 parent 061a5e4 commit 8cae8e0

File tree

1 file changed

+3
-178
lines changed

1 file changed

+3
-178
lines changed

arch/s390/include/asm/bitops.h

Lines changed: 3 additions & 178 deletions
Original file line numberDiff line numberDiff line change
@@ -36,184 +36,9 @@
3636
#include <linux/typecheck.h>
3737
#include <linux/compiler.h>
3838
#include <linux/types.h>
39-
#include <asm/atomic_ops.h>
40-
#include <asm/barrier.h>
41-
42-
#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
43-
44-
static inline unsigned long *
45-
__bitops_word(unsigned long nr, const volatile unsigned long *ptr)
46-
{
47-
unsigned long addr;
48-
49-
addr = (unsigned long)ptr + ((nr ^ (nr & (BITS_PER_LONG - 1))) >> 3);
50-
return (unsigned long *)addr;
51-
}
52-
53-
static inline unsigned long __bitops_mask(unsigned long nr)
54-
{
55-
return 1UL << (nr & (BITS_PER_LONG - 1));
56-
}
57-
58-
static __always_inline void arch_set_bit(unsigned long nr, volatile unsigned long *ptr)
59-
{
60-
unsigned long *addr = __bitops_word(nr, ptr);
61-
unsigned long mask = __bitops_mask(nr);
62-
63-
__atomic64_or(mask, (long *)addr);
64-
}
65-
66-
static __always_inline void arch_clear_bit(unsigned long nr, volatile unsigned long *ptr)
67-
{
68-
unsigned long *addr = __bitops_word(nr, ptr);
69-
unsigned long mask = __bitops_mask(nr);
70-
71-
__atomic64_and(~mask, (long *)addr);
72-
}
73-
74-
static __always_inline void arch_change_bit(unsigned long nr,
75-
volatile unsigned long *ptr)
76-
{
77-
unsigned long *addr = __bitops_word(nr, ptr);
78-
unsigned long mask = __bitops_mask(nr);
79-
80-
__atomic64_xor(mask, (long *)addr);
81-
}
82-
83-
static inline bool arch_test_and_set_bit(unsigned long nr,
84-
volatile unsigned long *ptr)
85-
{
86-
unsigned long *addr = __bitops_word(nr, ptr);
87-
unsigned long mask = __bitops_mask(nr);
88-
unsigned long old;
89-
90-
old = __atomic64_or_barrier(mask, (long *)addr);
91-
return old & mask;
92-
}
93-
94-
static inline bool arch_test_and_clear_bit(unsigned long nr,
95-
volatile unsigned long *ptr)
96-
{
97-
unsigned long *addr = __bitops_word(nr, ptr);
98-
unsigned long mask = __bitops_mask(nr);
99-
unsigned long old;
100-
101-
old = __atomic64_and_barrier(~mask, (long *)addr);
102-
return old & mask;
103-
}
104-
105-
static inline bool arch_test_and_change_bit(unsigned long nr,
106-
volatile unsigned long *ptr)
107-
{
108-
unsigned long *addr = __bitops_word(nr, ptr);
109-
unsigned long mask = __bitops_mask(nr);
110-
unsigned long old;
111-
112-
old = __atomic64_xor_barrier(mask, (long *)addr);
113-
return old & mask;
114-
}
115-
116-
static __always_inline void
117-
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
118-
{
119-
unsigned long *p = __bitops_word(nr, addr);
120-
unsigned long mask = __bitops_mask(nr);
121-
122-
*p |= mask;
123-
}
124-
125-
static __always_inline void
126-
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
127-
{
128-
unsigned long *p = __bitops_word(nr, addr);
129-
unsigned long mask = __bitops_mask(nr);
130-
131-
*p &= ~mask;
132-
}
133-
134-
static __always_inline void
135-
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
136-
{
137-
unsigned long *p = __bitops_word(nr, addr);
138-
unsigned long mask = __bitops_mask(nr);
139-
140-
*p ^= mask;
141-
}
142-
143-
static __always_inline bool
144-
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
145-
{
146-
unsigned long *p = __bitops_word(nr, addr);
147-
unsigned long mask = __bitops_mask(nr);
148-
unsigned long old;
149-
150-
old = *p;
151-
*p |= mask;
152-
return old & mask;
153-
}
154-
155-
static __always_inline bool
156-
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
157-
{
158-
unsigned long *p = __bitops_word(nr, addr);
159-
unsigned long mask = __bitops_mask(nr);
160-
unsigned long old;
161-
162-
old = *p;
163-
*p &= ~mask;
164-
return old & mask;
165-
}
166-
167-
static __always_inline bool
168-
arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
169-
{
170-
unsigned long *p = __bitops_word(nr, addr);
171-
unsigned long mask = __bitops_mask(nr);
172-
unsigned long old;
173-
174-
old = *p;
175-
*p ^= mask;
176-
return old & mask;
177-
}
178-
179-
#define arch_test_bit generic_test_bit
180-
#define arch_test_bit_acquire generic_test_bit_acquire
181-
182-
static inline bool arch_test_and_set_bit_lock(unsigned long nr,
183-
volatile unsigned long *ptr)
184-
{
185-
if (arch_test_bit(nr, ptr))
186-
return true;
187-
return arch_test_and_set_bit(nr, ptr);
188-
}
189-
190-
static inline void arch_clear_bit_unlock(unsigned long nr,
191-
volatile unsigned long *ptr)
192-
{
193-
smp_mb__before_atomic();
194-
arch_clear_bit(nr, ptr);
195-
}
196-
197-
static inline void arch___clear_bit_unlock(unsigned long nr,
198-
volatile unsigned long *ptr)
199-
{
200-
smp_mb();
201-
arch___clear_bit(nr, ptr);
202-
}
203-
204-
static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
205-
volatile unsigned long *ptr)
206-
{
207-
unsigned long old;
208-
209-
old = __atomic64_xor_barrier(mask, (long *)ptr);
210-
return old & BIT(7);
211-
}
212-
#define arch_xor_unlock_is_negative_byte arch_xor_unlock_is_negative_byte
213-
214-
#include <asm-generic/bitops/instrumented-atomic.h>
215-
#include <asm-generic/bitops/instrumented-non-atomic.h>
216-
#include <asm-generic/bitops/instrumented-lock.h>
39+
#include <asm-generic/bitops/atomic.h>
40+
#include <asm-generic/bitops/non-atomic.h>
41+
#include <asm-generic/bitops/lock.h>
21742

21843
/*
21944
* Functions which use MSB0 bit numbering.

0 commit comments

Comments
 (0)