sched/numa: Take false sharing into account when adapting scan rate
[cascardo/linux.git] / kernel / sched / fair.c
index 222c2d0..d26a16e 100644 (file)
@@ -1381,7 +1381,8 @@ static void double_lock(spinlock_t *l1, spinlock_t *l2)
        spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
 }
 
-static void task_numa_group(struct task_struct *p, int cpupid, int flags)
+static void task_numa_group(struct task_struct *p, int cpupid, int flags,
+                       int *priv)
 {
        struct numa_group *grp, *my_grp;
        struct task_struct *tsk;
@@ -1447,6 +1448,9 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags)
        if (flags & TNF_SHARED)
                join = true;
 
+       /* Update priv based on whether false sharing was detected */
+       *priv = !join;
+
        if (join && !get_numa_group(grp))
                join = false;
 
@@ -1545,7 +1549,7 @@ void task_numa_fault(int last_cpupid, int node, int pages, int flags)
        } else {
                priv = cpupid_match_pid(p, last_cpupid);
                if (!priv && !(flags & TNF_NO_GROUP))
-                       task_numa_group(p, last_cpupid, flags);
+                       task_numa_group(p, last_cpupid, flags, &priv);
        }
 
        /*