mirror of https://github.com/OpenIPC/firmware.git
208 lines
5.4 KiB
Diff
208 lines
5.4 KiB
Diff
diff -drupN a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
|
|
--- a/arch/x86/kernel/process.c 2018-08-06 17:23:04.000000000 +0300
|
|
+++ b/arch/x86/kernel/process.c 2022-06-12 05:28:14.000000000 +0300
|
|
@@ -33,7 +33,6 @@
|
|
#include <asm/mce.h>
|
|
#include <asm/vm86.h>
|
|
#include <asm/switch_to.h>
|
|
-#include <asm/spec-ctrl.h>
|
|
|
|
/*
|
|
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
|
|
@@ -68,19 +67,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_tss);
|
|
|
|
#ifdef CONFIG_X86_64
|
|
static DEFINE_PER_CPU(unsigned char, is_idle);
|
|
-static ATOMIC_NOTIFIER_HEAD(idle_notifier);
|
|
-
|
|
-void idle_notifier_register(struct notifier_block *n)
|
|
-{
|
|
- atomic_notifier_chain_register(&idle_notifier, n);
|
|
-}
|
|
-EXPORT_SYMBOL_GPL(idle_notifier_register);
|
|
-
|
|
-void idle_notifier_unregister(struct notifier_block *n)
|
|
-{
|
|
- atomic_notifier_chain_unregister(&idle_notifier, n);
|
|
-}
|
|
-EXPORT_SYMBOL_GPL(idle_notifier_unregister);
|
|
#endif
|
|
|
|
/*
|
|
@@ -203,148 +189,6 @@ static inline void switch_to_bitmap(stru
|
|
}
|
|
}
|
|
|
|
-#ifdef CONFIG_SMP
|
|
-
|
|
-struct ssb_state {
|
|
- struct ssb_state *shared_state;
|
|
- raw_spinlock_t lock;
|
|
- unsigned int disable_state;
|
|
- unsigned long local_state;
|
|
-};
|
|
-
|
|
-#define LSTATE_SSB 0
|
|
-
|
|
-static DEFINE_PER_CPU(struct ssb_state, ssb_state);
|
|
-
|
|
-void speculative_store_bypass_ht_init(void)
|
|
-{
|
|
- struct ssb_state *st = this_cpu_ptr(&ssb_state);
|
|
- unsigned int this_cpu = smp_processor_id();
|
|
- unsigned int cpu;
|
|
-
|
|
- st->local_state = 0;
|
|
-
|
|
- /*
|
|
- * Shared state setup happens once on the first bringup
|
|
- * of the CPU. It's not destroyed on CPU hotunplug.
|
|
- */
|
|
- if (st->shared_state)
|
|
- return;
|
|
-
|
|
- raw_spin_lock_init(&st->lock);
|
|
-
|
|
- /*
|
|
- * Go over HT siblings and check whether one of them has set up the
|
|
- * shared state pointer already.
|
|
- */
|
|
- for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
|
|
- if (cpu == this_cpu)
|
|
- continue;
|
|
-
|
|
- if (!per_cpu(ssb_state, cpu).shared_state)
|
|
- continue;
|
|
-
|
|
- /* Link it to the state of the sibling: */
|
|
- st->shared_state = per_cpu(ssb_state, cpu).shared_state;
|
|
- return;
|
|
- }
|
|
-
|
|
- /*
|
|
- * First HT sibling to come up on the core. Link shared state of
|
|
- * the first HT sibling to itself. The siblings on the same core
|
|
- * which come up later will see the shared state pointer and link
|
|
- * themself to the state of this CPU.
|
|
- */
|
|
- st->shared_state = st;
|
|
-}
|
|
-
|
|
-/*
|
|
- * Logic is: First HT sibling enables SSBD for both siblings in the core
|
|
- * and last sibling to disable it, disables it for the whole core. This how
|
|
- * MSR_SPEC_CTRL works in "hardware":
|
|
- *
|
|
- * CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
|
|
- */
|
|
-static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
|
|
-{
|
|
- struct ssb_state *st = this_cpu_ptr(&ssb_state);
|
|
- u64 msr = x86_amd_ls_cfg_base;
|
|
-
|
|
- if (!static_cpu_has(X86_FEATURE_ZEN)) {
|
|
- msr |= ssbd_tif_to_amd_ls_cfg(tifn);
|
|
- wrmsrl(MSR_AMD64_LS_CFG, msr);
|
|
- return;
|
|
- }
|
|
-
|
|
- if (tifn & _TIF_SSBD) {
|
|
- /*
|
|
- * Since this can race with prctl(), block reentry on the
|
|
- * same CPU.
|
|
- */
|
|
- if (__test_and_set_bit(LSTATE_SSB, &st->local_state))
|
|
- return;
|
|
-
|
|
- msr |= x86_amd_ls_cfg_ssbd_mask;
|
|
-
|
|
- raw_spin_lock(&st->shared_state->lock);
|
|
- /* First sibling enables SSBD: */
|
|
- if (!st->shared_state->disable_state)
|
|
- wrmsrl(MSR_AMD64_LS_CFG, msr);
|
|
- st->shared_state->disable_state++;
|
|
- raw_spin_unlock(&st->shared_state->lock);
|
|
- } else {
|
|
- if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state))
|
|
- return;
|
|
-
|
|
- raw_spin_lock(&st->shared_state->lock);
|
|
- st->shared_state->disable_state--;
|
|
- if (!st->shared_state->disable_state)
|
|
- wrmsrl(MSR_AMD64_LS_CFG, msr);
|
|
- raw_spin_unlock(&st->shared_state->lock);
|
|
- }
|
|
-}
|
|
-#else
|
|
-static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
|
|
-{
|
|
- u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
|
|
-
|
|
- wrmsrl(MSR_AMD64_LS_CFG, msr);
|
|
-}
|
|
-#endif
|
|
-
|
|
-static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
|
|
-{
|
|
- /*
|
|
- * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL,
|
|
- * so ssbd_tif_to_spec_ctrl() just works.
|
|
- */
|
|
- wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
|
|
-}
|
|
-
|
|
-static __always_inline void intel_set_ssb_state(unsigned long tifn)
|
|
-{
|
|
- u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
|
|
-
|
|
- wrmsrl(MSR_IA32_SPEC_CTRL, msr);
|
|
-}
|
|
-
|
|
-static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
|
|
-{
|
|
- if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
|
|
- amd_set_ssb_virt_state(tifn);
|
|
- else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
|
|
- amd_set_core_ssb_state(tifn);
|
|
- else
|
|
- intel_set_ssb_state(tifn);
|
|
-}
|
|
-
|
|
-void speculative_store_bypass_update(unsigned long tif)
|
|
-{
|
|
- preempt_disable();
|
|
- __speculative_store_bypass_update(tif);
|
|
- preempt_enable();
|
|
-}
|
|
-
|
|
void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
|
|
struct tss_struct *tss)
|
|
{
|
|
@@ -373,9 +217,6 @@ void __switch_to_xtra(struct task_struct
|
|
|
|
if ((tifp ^ tifn) & _TIF_NOTSC)
|
|
cr4_toggle_bits(X86_CR4_TSD);
|
|
-
|
|
- if ((tifp ^ tifn) & _TIF_SSBD)
|
|
- __speculative_store_bypass_update(tifn);
|
|
}
|
|
|
|
/*
|
|
@@ -397,14 +238,14 @@ static inline void play_dead(void)
|
|
void enter_idle(void)
|
|
{
|
|
this_cpu_write(is_idle, 1);
|
|
- atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
|
|
+ idle_notifier_call_chain(IDLE_START);
|
|
}
|
|
|
|
static void __exit_idle(void)
|
|
{
|
|
if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
|
|
return;
|
|
- atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
|
|
+ idle_notifier_call_chain(IDLE_END);
|
|
}
|
|
|
|
/* Called from interrupts to signify idle end */
|