From 30619c89b17d46808b4cdf5b3f81b6a01ade1473 Mon Sep 17 00:00:00 2001 From: Srikar Dronamraju Date: Wed, 20 Jun 2018 22:32:55 +0530 Subject: [PATCH] sched/numa: Update the scan period without holding the numa_group lock The metrics for updating scan periods are local or task specific. Currently this update happens under the numa_group lock, which seems unnecessary. Hence move this update outside the lock. Running SPECjbb2005 on a 4 node machine and comparing bops/JVM JVMS LAST_PATCH WITH_PATCH %CHANGE 16 25355.9 25645.4 1.141 1 72812 72142 -0.92 Signed-off-by: Srikar Dronamraju Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Rik van Riel Acked-by: Mel Gorman Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1529514181-9842-15-git-send-email-srikar@linux.vnet.ibm.com Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 3bcf0e864613..fc33a4b40a09 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2170,8 +2170,6 @@ static void task_numa_placement(struct task_struct *p) } } - update_task_scan_period(p, fault_types[0], fault_types[1]); - if (p->numa_group) { numa_group_count_active_nodes(p->numa_group); spin_unlock_irq(group_lock); @@ -2186,6 +2184,8 @@ static void task_numa_placement(struct task_struct *p) if (task_node(p) != p->numa_preferred_nid) numa_migrate_preferred(p); } + + update_task_scan_period(p, fault_types[0], fault_types[1]); } static inline int get_numa_group(struct numa_group *grp)