From 5443a1b463f452e439e0d33d5325d50aae60e9dc Mon Sep 17 00:00:00 2001
From: Michael Ellerman <mpe@ellerman.id.au>
Date: Wed, 10 Jan 2018 23:36:31 +0530
Subject: [PATCH 09/26] UBUNTU: SAUCE: rfi-flush: Factor out
 init_fallback_flush()

CVE-2017-5754

[mauricfo: s/ppc64_caches.l1d.size/ppc64_caches.dsize/]
Signed-off-by: Mauricio Faria de Oliveira <mauricfo@linux.vnet.ibm.com>
Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
---
 arch/powerpc/kernel/setup_64.c | 62 ++++++++++++++++++++++--------------------
 1 file changed, 33 insertions(+), 29 deletions(-)

diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index c909cc7..e46f7655 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -775,40 +775,44 @@ void rfi_flush_enable(bool enable)
 	rfi_flush = enable;
 }
 
-void __init setup_rfi_flush(enum l1d_flush_type types, bool enable)
+static void init_fallback_flush(void)
 {
-	if (types & L1D_FLUSH_FALLBACK) {
-		int cpu;
-		u64 l1d_size = ppc64_caches.dsize;
-		u64 limit = min(safe_stack_limit(), ppc64_rma_size);
+	u64 l1d_size, limit;
+	int cpu;
 
-		pr_info("rfi-flush: Using fallback displacement flush\n");
+	l1d_size = ppc64_caches.dsize;
+	limit = min(safe_stack_limit(), ppc64_rma_size);
 
+	/*
+	 * Align to L1d size, and size it at 2x L1d size, to catch possible
+	 * hardware prefetch runoff. We don't have a recipe for load patterns to
+	 * reliably avoid the prefetcher.
+	 */
+	l1d_flush_fallback_area = __va(memblock_alloc_base(l1d_size * 2, l1d_size, limit));
+	memset(l1d_flush_fallback_area, 0, l1d_size * 2);
+
+	for_each_possible_cpu(cpu) {
 		/*
-		 * Align to L1d size, and size it at 2x L1d size, to
-		 * catch possible hardware prefetch runoff. We don't
-		 * have a recipe for load patterns to reliably avoid
-		 * the prefetcher.
+		 * The fallback flush is currently coded for 8-way
+		 * associativity. Different associativity is possible, but it
+		 * will be treated as 8-way and may not evict the lines as
+		 * effectively.
+		 *
+		 * 128 byte lines are mandatory.
 		 */
-		l1d_flush_fallback_area =
-			__va(memblock_alloc_base(l1d_size * 2, l1d_size, limit));
-		memset(l1d_flush_fallback_area, 0, l1d_size * 2);
-
-		for_each_possible_cpu(cpu) {
-			/*
-			 * The fallback flush is currently coded for 8-way
-			 * associativity. Different associativity is possible,
-			 * but it will be treated as 8-way and may not evict
-			 * the lines as effectively.
-			 *
-			 * 128 byte lines are mandatory.
-			 */
-			u64 c = l1d_size / 8;
-
-			paca[cpu].rfi_flush_fallback_area = l1d_flush_fallback_area;
-			paca[cpu].l1d_flush_congruence = c;
-			paca[cpu].l1d_flush_sets = c / 128;
-		}
+		u64 c = l1d_size / 8;
+
+		paca[cpu].rfi_flush_fallback_area = l1d_flush_fallback_area;
+		paca[cpu].l1d_flush_congruence = c;
+		paca[cpu].l1d_flush_sets = c / 128;
+	}
+}
+
+void __init setup_rfi_flush(enum l1d_flush_type types, bool enable)
+{
+	if (types & L1D_FLUSH_FALLBACK) {
+		pr_info("rfi-flush: Using fallback displacement flush\n");
+		init_fallback_flush();
 	}
 
 	if (types & L1D_FLUSH_ORI)
-- 
2.7.4

