mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-04 20:19:47 +08:00
Similarly to bitmap functions, find_next_*_bit() users will benefit if
we'll handle a case of bitmaps that fit into a single word inline. In the
very best case, the compiler may replace a function call with a few
instructions.
This is the quite typical find_next_bit() user:
unsigned int cpumask_next(int n, const struct cpumask *srcp)
{
/* -1 is a legal arg here. */
if (n != -1)
cpumask_check(n);
return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1);
}
EXPORT_SYMBOL(cpumask_next);
Currently, on ARM64 the generated code looks like this:
0000000000000000 <cpumask_next>:
0: a9bf7bfd stp x29, x30, [sp, #-16]!
4: 11000402 add w2, w0, #0x1
8: aa0103e0 mov x0, x1
c: d2800401 mov x1, #0x40 // #64
10: 910003fd mov x29, sp
14: 93407c42 sxtw x2, w2
18: 94000000 bl 0 <find_next_bit>
1c: a8c17bfd ldp x29, x30, [sp], #16
20: d65f03c0 ret
24: d503201f nop
After applying this patch:
0000000000000140 <cpumask_next>:
140: 11000400 add w0, w0, #0x1
144: 93407c00 sxtw x0, w0
148: f100fc1f cmp x0, #0x3f
14c: 54000168 b.hi 178 <cpumask_next+0x38> // b.pmore
150: f9400023 ldr x3, [x1]
154: 92800001 mov x1, #0xffffffffffffffff // #-1
158: 9ac02020 lsl x0, x1, x0
15c: 52800802 mov w2, #0x40 // #64
160: 8a030001 and x1, x0, x3
164: dac00020 rbit x0, x1
168: f100003f cmp x1, #0x0
16c: dac01000 clz x0, x0
170: 1a800040 csel w0, w2, w0, eq // eq = none
174: d65f03c0 ret
178: 52800800 mov w0, #0x40 // #64
17c: d65f03c0 ret
find_next_bit() call is replaced with 6 instructions. find_next_bit()
itself is 41 instructions plus function call overhead.
Despite inlining, the scripts/bloat-o-meter report smaller .text size
after applying the series:
add/remove: 11/9 grow/shrink: 233/176 up/down: 5780/-6768 (-988)
Link: https://lkml.kernel.org/r/20210401003153.97325-10-yury.norov@gmail.com
Signed-off-by: Yury Norov <yury.norov@gmail.com>
Acked-by: Rasmus Villemoes <linux@rasmusvillemoes.dk>
Acked-by: Andy Shevchenko <andy.shevchenko@gmail.com>
Cc: Alexey Klimov <aklimov@redhat.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: David Sterba <dsterba@suse.com>
Cc: Dennis Zhou <dennis@kernel.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Jianpeng Ma <jianpeng.ma@intel.com>
Cc: Joe Perches <joe@perches.com>
Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Rich Felker <dalias@libc.org>
Cc: Stefano Brivio <sbrivio@redhat.com>
Cc: Wei Yang <richard.weiyang@linux.alibaba.com>
Cc: Wolfram Sang <wsa+renesas@sang-engineering.com>
Cc: Yoshinori Sato <ysato@users.osdn.me>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
129 lines
2.8 KiB
C
129 lines
2.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_GENERIC_BITOPS_LE_H_
|
|
#define _ASM_GENERIC_BITOPS_LE_H_
|
|
|
|
#include <asm-generic/bitops/find.h>
|
|
#include <asm/types.h>
|
|
#include <asm/byteorder.h>
|
|
#include <linux/swab.h>
|
|
|
|
#if defined(__LITTLE_ENDIAN)
|
|
|
|
#define BITOP_LE_SWIZZLE 0
|
|
|
|
static inline unsigned long find_next_zero_bit_le(const void *addr,
|
|
unsigned long size, unsigned long offset)
|
|
{
|
|
return find_next_zero_bit(addr, size, offset);
|
|
}
|
|
|
|
static inline unsigned long find_next_bit_le(const void *addr,
|
|
unsigned long size, unsigned long offset)
|
|
{
|
|
return find_next_bit(addr, size, offset);
|
|
}
|
|
|
|
static inline unsigned long find_first_zero_bit_le(const void *addr,
|
|
unsigned long size)
|
|
{
|
|
return find_first_zero_bit(addr, size);
|
|
}
|
|
|
|
#elif defined(__BIG_ENDIAN)
|
|
|
|
#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
|
|
|
|
#ifndef find_next_zero_bit_le
|
|
static inline
|
|
unsigned long find_next_zero_bit_le(const void *addr, unsigned
|
|
long size, unsigned long offset)
|
|
{
|
|
if (small_const_nbits(size)) {
|
|
unsigned long val = *(const unsigned long *)addr;
|
|
|
|
if (unlikely(offset >= size))
|
|
return size;
|
|
|
|
val = swab(val) | ~GENMASK(size - 1, offset);
|
|
return val == ~0UL ? size : ffz(val);
|
|
}
|
|
|
|
return _find_next_bit(addr, NULL, size, offset, ~0UL, 1);
|
|
}
|
|
#endif
|
|
|
|
#ifndef find_next_bit_le
|
|
static inline
|
|
unsigned long find_next_bit_le(const void *addr, unsigned
|
|
long size, unsigned long offset)
|
|
{
|
|
if (small_const_nbits(size)) {
|
|
unsigned long val = *(const unsigned long *)addr;
|
|
|
|
if (unlikely(offset >= size))
|
|
return size;
|
|
|
|
val = swab(val) & GENMASK(size - 1, offset);
|
|
return val ? __ffs(val) : size;
|
|
}
|
|
|
|
return _find_next_bit(addr, NULL, size, offset, 0UL, 1);
|
|
}
|
|
#endif
|
|
|
|
#ifndef find_first_zero_bit_le
|
|
#define find_first_zero_bit_le(addr, size) \
|
|
find_next_zero_bit_le((addr), (size), 0)
|
|
#endif
|
|
|
|
#else
|
|
#error "Please fix <asm/byteorder.h>"
|
|
#endif
|
|
|
|
static inline int test_bit_le(int nr, const void *addr)
|
|
{
|
|
return test_bit(nr ^ BITOP_LE_SWIZZLE, addr);
|
|
}
|
|
|
|
static inline void set_bit_le(int nr, void *addr)
|
|
{
|
|
set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
|
|
}
|
|
|
|
static inline void clear_bit_le(int nr, void *addr)
|
|
{
|
|
clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
|
|
}
|
|
|
|
static inline void __set_bit_le(int nr, void *addr)
|
|
{
|
|
__set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
|
|
}
|
|
|
|
static inline void __clear_bit_le(int nr, void *addr)
|
|
{
|
|
__clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
|
|
}
|
|
|
|
static inline int test_and_set_bit_le(int nr, void *addr)
|
|
{
|
|
return test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
|
|
}
|
|
|
|
static inline int test_and_clear_bit_le(int nr, void *addr)
|
|
{
|
|
return test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
|
|
}
|
|
|
|
static inline int __test_and_set_bit_le(int nr, void *addr)
|
|
{
|
|
return __test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
|
|
}
|
|
|
|
static inline int __test_and_clear_bit_le(int nr, void *addr)
|
|
{
|
|
return __test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
|
|
}
|
|
|
|
#endif /* _ASM_GENERIC_BITOPS_LE_H_ */
|