Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
{
#ifdef CONFIG_32BIT
#ifdef CONFIG_CPU_MIPS32
- __asm__ ("clz %0, %1" : "=r" (word) : "r" (word));
+ __asm__ (
+ " .set mips32 \n"
+ " clz %0, %1 \n"
+ " .set mips0 \n"
+ : "=r" (word)
+ : "r" (word));
return 32 - word;
#else
#ifdef CONFIG_64BIT
#ifdef CONFIG_CPU_MIPS64
- __asm__ ("dclz %0, %1" : "=r" (word) : "r" (word));
+ __asm__ (
+ " .set mips64 \n"
+ " dclz %0, %1 \n"
+ " .set mips0 \n"
+ : "=r" (word)
+ : "r" (word));
return 64 - word;
#else
static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 x)
{
__asm__(
+ " .set mips32r2 \n"
" wsbh %0, %1 \n"
+ " .set mips0 \n"
: "=r" (x)
: "r" (x));
static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
{
__asm__(
+ " .set mips32r2 \n"
" wsbh %0, %1 \n"
" rotr %0, %0, 16 \n"
+ " .set mips0 \n"
: "=r" (x)
: "r" (x));
" .set reorder \n"
" .set noat \n"
#ifdef CONFIG_CPU_MIPSR2
+ " .set mips32r2 \n"
" ei \n"
+ " .set mips0 \n"
#else
" mfc0 $1,$12 \n"
" ori $1,0x1f \n"
" .set push \n"
" .set noat \n"
#ifdef CONFIG_CPU_MIPSR2
+ " .set mips32r2 \n"
" di \n"
+ " .set mips0 \n"
#else
" mfc0 $1,$12 \n"
" ori $1,0x1f \n"
" .set reorder \n"
" .set noat \n"
#ifdef CONFIG_CPU_MIPSR2
+ " .set mips32r2 \n"
" di \\result \n"
" andi \\result, 1 \n"
+ " .set mips0 \n"
#else
" mfc0 \\result, $12 \n"
" ori $1, \\result, 0x1f \n"
* Slow, but doesn't suffer from a relativly unlikely race
* condition we're having since days 1.
*/
+ " .set mips32r2 \n"
" beqz \\flags, 1f \n"
" di \n"
" ei \n"
+ " .set mips0 \n"
"1: \n"
#elif defined(CONFIG_CPU_MIPSR2)
/*