stop_machine: Introduce __cpu_stop_queue_work() and cpu_stop_queue_two_works()
authorOleg Nesterov <oleg@redhat.com>
Thu, 8 Oct 2015 14:51:34 +0000 (16:51 +0200)
committerIngo Molnar <mingo@kernel.org>
Tue, 20 Oct 2015 08:23:54 +0000 (10:23 +0200)
Preparation to simplify the review of the next change. Add two simple
helpers, __cpu_stop_queue_work() and cpu_stop_queue_two_works() which
simply take a bit of code from their callers.

Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: heiko.carstens@de.ibm.com
Link: http://lkml.kernel.org/r/20151008145134.GA18146@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/stop_machine.c

index 6a40209..688d6b3 100644 (file)
@@ -73,21 +73,24 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed)
        }
 }
 
+static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
+                                       struct cpu_stop_work *work)
+{
+       list_add_tail(&work->list, &stopper->works);
+       wake_up_process(stopper->thread);
+}
+
 /* queue @work to @stopper.  if offline, @work is completed immediately */
 static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
 {
        struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
-
        unsigned long flags;
 
        spin_lock_irqsave(&stopper->lock, flags);
-
-       if (stopper->enabled) {
-               list_add_tail(&work->list, &stopper->works);
-               wake_up_process(stopper->thread);
-       } else
+       if (stopper->enabled)
+               __cpu_stop_queue_work(stopper, work);
+       else
                cpu_stop_signal_done(work->done, false);
-
        spin_unlock_irqrestore(&stopper->lock, flags);
 }
 
@@ -213,6 +216,16 @@ static int multi_cpu_stop(void *data)
        return err;
 }
 
+static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
+                                   int cpu2, struct cpu_stop_work *work2)
+{
+       lg_double_lock(&stop_cpus_lock, cpu1, cpu2);
+       cpu_stop_queue_work(cpu1, work1);
+       cpu_stop_queue_work(cpu2, work2);
+       lg_double_unlock(&stop_cpus_lock, cpu1, cpu2);
+
+       return 0;
+}
 /**
  * stop_two_cpus - stops two cpus
  * @cpu1: the cpu to stop
@@ -260,10 +273,12 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *
                return -ENOENT;
        }
 
-       lg_double_lock(&stop_cpus_lock, cpu1, cpu2);
-       cpu_stop_queue_work(cpu1, &work1);
-       cpu_stop_queue_work(cpu2, &work2);
-       lg_double_unlock(&stop_cpus_lock, cpu1, cpu2);
+       if (cpu1 > cpu2)
+               swap(cpu1, cpu2);
+       if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2)) {
+               preempt_enable();
+               return -ENOENT;
+       }
 
        preempt_enable();