arm64: Remove useless UAO IPI and describe how this gets enabled
authorJames Morse <james.morse@arm.com>
Wed, 6 Apr 2022 16:45:05 +0000 (17:45 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 12 Apr 2022 05:52:13 +0000 (07:52 +0200)
commit c8b06e3fddddaae1a87ed479edcb8b3d85caecc7 upstream.

Since its introduction, the UAO enable call was broken, and useless.
commit 2a6dcb2b5f3e ("arm64: cpufeature: Schedule enable() calls instead
of calling them via IPI"), fixed the framework so that these calls
are scheduled, so that they can modify PSTATE.

Now it is just useless. Remove it. UAO is enabled by the code patching
which causes get_user() and friends to use the 'ldtr' family of
instructions. This relies on the PSTATE.UAO bit being set to match
addr_limit, which we do in uao_thread_switch() called via __switch_to().

All that is needed to enable UAO is patch the code, and call schedule().
__apply_alternatives_multi_stop() calls stop_machine() when it modifies
the kernel text to enable the alternatives, (including the UAO code in
uao_thread_switch()). Once stop_machine() has finished __switch_to() is
called to reschedule the original task, this causes PSTATE.UAO to be set
appropriately. An explicit enable() call is not needed.

Reported-by: Vladimir Murzin <vladimir.murzin@arm.com>
Signed-off-by: James Morse <james.morse@arm.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/arm64/include/asm/processor.h
arch/arm64/kernel/cpufeature.c
arch/arm64/mm/fault.c

index 9ee660013e5cbfab0ab02d286a0a292b0ec0259e..d27e472bbbf1592e4d6b4c2c58b77738aaaac209 100644 (file)
@@ -220,7 +220,6 @@ static inline void spin_lock_prefetch(const void *ptr)
 #endif
 
 int cpu_enable_pan(void *__unused);
-int cpu_enable_uao(void *__unused);
 int cpu_enable_cache_maint_trap(void *__unused);
 
 #endif /* __ASSEMBLY__ */
index 4130a901ae0d10015f32afac4104b8d71c83da34..6601dd4005c3a6fe98c5f7c9c3af38adc5eca183 100644 (file)
@@ -905,7 +905,10 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .sys_reg = SYS_ID_AA64MMFR2_EL1,
                .field_pos = ID_AA64MMFR2_UAO_SHIFT,
                .min_field_value = 1,
-               .enable = cpu_enable_uao,
+               /*
+                * We rely on stop_machine() calling uao_thread_switch() to set
+                * UAO immediately after patching.
+                */
        },
 #endif /* CONFIG_ARM64_UAO */
 #ifdef CONFIG_ARM64_PAN
index f3d3f2e97adde498ea42d9fa5b595823d52ed431..e973002530dec63525ff6010a2f9b2de96b7f83e 100644 (file)
@@ -740,17 +740,3 @@ int cpu_enable_pan(void *__unused)
        return 0;
 }
 #endif /* CONFIG_ARM64_PAN */
-
-#ifdef CONFIG_ARM64_UAO
-/*
- * Kernel threads have fs=KERNEL_DS by default, and don't need to call
- * set_fs(), devtmpfs in particular relies on this behaviour.
- * We need to enable the feature at runtime (instead of adding it to
- * PSR_MODE_EL1h) as the feature may not be implemented by the cpu.
- */
-int cpu_enable_uao(void *__unused)
-{
-       asm(SET_PSTATE_UAO(1));
-       return 0;
-}
-#endif /* CONFIG_ARM64_UAO */