From d9f5995cc5ffbcb224cc47884aa7cd2a9946eea1 Mon Sep 17 00:00:00 2001
From: Tim Chen <tim.c.chen@linux.intel.com>
Date: Mon, 6 Nov 2017 18:19:14 -0800
Subject: [PATCH 06/23] x86/idle: Disable IBRS entering idle and enable it on
 wakeup

CVE-2017-5715 (Spectre v2 Intel)

Clear IBRS on idle entry and set it on idle exit into kernel on mwait.

Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Andy Whitcroft <apw@canonical.com>
(cherry picked from commit c2a2a232b0553e32a7bfe198a40f377bd1ba016d)
Signed-off-by: Andy Whitcroft <apw@canonical.com>
---
 arch/x86/include/asm/mwait.h |  8 ++++++++
 arch/x86/kernel/process.c    | 12 ++++++++++--
 arch/x86/lib/delay.c         | 10 ++++++++++
 3 files changed, 28 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index f37f2d8a2989..33db0de4054a 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -4,6 +4,8 @@
 #include <linux/sched.h>
 
 #include <asm/cpufeature.h>
+#include <asm/spec_ctrl.h>
+#include <asm/microcode.h>
 
 #define MWAIT_SUBSTATE_MASK		0xf
 #define MWAIT_CSTATE_MASK		0xf
@@ -104,9 +106,15 @@ static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
 			mb();
 		}
 
+		if (boot_cpu_has(X86_FEATURE_SPEC_CTRL))
+			native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+
 		__monitor((void *)&current_thread_info()->flags, 0, 0);
 		if (!need_resched())
 			__mwait(eax, ecx);
+
+		if (boot_cpu_has(X86_FEATURE_SPEC_CTRL))
+			native_wrmsrl(MSR_IA32_SPEC_CTRL, FEATURE_ENABLE_IBRS);
 	}
 	current_clr_polling();
 }
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index cf3881663f3f..3f039e6169af 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -424,11 +424,19 @@ static void mwait_idle(void)
 			smp_mb(); /* quirk */
 		}
 
+		if (boot_cpu_has(X86_FEATURE_SPEC_CTRL))
+                        native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+
 		__monitor((void *)&current_thread_info()->flags, 0, 0);
-		if (!need_resched())
+		if (!need_resched()) {
 			__sti_mwait(0, 0);
-		else
+			if (boot_cpu_has(X86_FEATURE_SPEC_CTRL))
+				native_wrmsrl(MSR_IA32_SPEC_CTRL, FEATURE_ENABLE_IBRS);
+		} else {
+			if (boot_cpu_has(X86_FEATURE_SPEC_CTRL))
+				native_wrmsrl(MSR_IA32_SPEC_CTRL, FEATURE_ENABLE_IBRS);
 			local_irq_enable();
+		}
 		trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
 	} else {
 		local_irq_enable();
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
index e912b2f6d36e..89060ce54c85 100644
--- a/arch/x86/lib/delay.c
+++ b/arch/x86/lib/delay.c
@@ -26,6 +26,8 @@
 # include <asm/smp.h>
 #endif
 
+#define IBRS_DISABLE_THRESHOLD	1000
+
 /* simple loop based delay: */
 static void delay_loop(unsigned long loops)
 {
@@ -98,6 +100,10 @@ static void delay_mwaitx(unsigned long __loops)
 	for (;;) {
 		delay = min_t(u64, MWAITX_MAX_LOOPS, loops);
 
+		if (boot_cpu_has(X86_FEATURE_SPEC_CTRL) &&
+			(delay > IBRS_DISABLE_THRESHOLD))
+			native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+
 		/*
 		 * Use cpu_tss as a cacheline-aligned, seldomly
 		 * accessed per-cpu variable as the monitor target.
@@ -111,6 +117,10 @@ static void delay_mwaitx(unsigned long __loops)
 		 */
 		__mwaitx(MWAITX_DISABLE_CSTATES, delay, MWAITX_ECX_TIMER_ENABLE);
 
+		if (boot_cpu_has(X86_FEATURE_SPEC_CTRL) &&
+			(delay > IBRS_DISABLE_THRESHOLD))
+			native_wrmsrl(MSR_IA32_SPEC_CTRL, FEATURE_ENABLE_IBRS);
+
 		end = rdtsc_ordered();
 
 		if (loops <= end - start)
-- 
2.15.1

