s390/bitops: remove small optimization to fix clang build

clang does not know about the 'b1' construct used in bitops inline
assembly. Since the plan is to use compiler atomic builtins anyway
there is no point in requesting clang support for this. Especially if
one considers that the kernel seems to be the only user of this.

With removing this small optimization it is possible to compile the
kernel also with -march=zEC12 and higher using clang.

Build error:

In file included from ./include/linux/bitops.h:32:
./arch/s390/include/asm/bitops.h:69:4: error: invalid operand in inline asm: 'oi        $0,${1:b}'
                        "oi     %0,%b1\n"
                        ^

Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
This commit is contained in:
Heiko Carstens 2021-01-15 20:01:36 +01:00 committed by Vasily Gorbik
parent 6110ccecd3
commit efe5e0fea4

View File

@ -61,18 +61,6 @@ static __always_inline void arch_set_bit(unsigned long nr, volatile unsigned lon
unsigned long *addr = __bitops_word(nr, ptr); unsigned long *addr = __bitops_word(nr, ptr);
unsigned long mask; unsigned long mask;
#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
if (__builtin_constant_p(nr)) {
unsigned char *caddr = __bitops_byte(nr, ptr);
asm volatile(
"oi %0,%b1\n"
: "+Q" (*caddr)
: "i" (1 << (nr & 7))
: "cc", "memory");
return;
}
#endif
mask = 1UL << (nr & (BITS_PER_LONG - 1)); mask = 1UL << (nr & (BITS_PER_LONG - 1));
__atomic64_or(mask, (long *)addr); __atomic64_or(mask, (long *)addr);
} }
@ -82,18 +70,6 @@ static __always_inline void arch_clear_bit(unsigned long nr, volatile unsigned l
unsigned long *addr = __bitops_word(nr, ptr); unsigned long *addr = __bitops_word(nr, ptr);
unsigned long mask; unsigned long mask;
#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
if (__builtin_constant_p(nr)) {
unsigned char *caddr = __bitops_byte(nr, ptr);
asm volatile(
"ni %0,%b1\n"
: "+Q" (*caddr)
: "i" (~(1 << (nr & 7)))
: "cc", "memory");
return;
}
#endif
mask = ~(1UL << (nr & (BITS_PER_LONG - 1))); mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
__atomic64_and(mask, (long *)addr); __atomic64_and(mask, (long *)addr);
} }
@ -104,18 +80,6 @@ static __always_inline void arch_change_bit(unsigned long nr,
unsigned long *addr = __bitops_word(nr, ptr); unsigned long *addr = __bitops_word(nr, ptr);
unsigned long mask; unsigned long mask;
#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
if (__builtin_constant_p(nr)) {
unsigned char *caddr = __bitops_byte(nr, ptr);
asm volatile(
"xi %0,%b1\n"
: "+Q" (*caddr)
: "i" (1 << (nr & 7))
: "cc", "memory");
return;
}
#endif
mask = 1UL << (nr & (BITS_PER_LONG - 1)); mask = 1UL << (nr & (BITS_PER_LONG - 1));
__atomic64_xor(mask, (long *)addr); __atomic64_xor(mask, (long *)addr);
} }