locking/mcs: Allow architectures to hook in to contended paths
authorWill Deacon <will.deacon@arm.com>
Tue, 21 Jan 2014 23:36:10 +0000 (15:36 -0800)
committerIngo Molnar <mingo@kernel.org>
Tue, 28 Jan 2014 12:13:28 +0000 (13:13 +0100)
When contended, architectures may be able to reduce the polling overhead
in ways which aren't expressible using a simple relax() primitive.

This patch allows architectures to hook into the mcs_{lock,unlock}
functions for the contended cases only.

Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/r/1390347370.3138.65.camel@schen9-DESK
Signed-off-by: Ingo Molnar <mingo@kernel.org>
include/linux/mcs_spinlock.h

index 143fa42..e9a4d74 100644 (file)
@@ -17,6 +17,28 @@ struct mcs_spinlock {
        int locked; /* 1 if lock acquired */
 };
 
+#ifndef arch_mcs_spin_lock_contended
+/*
+ * Using smp_load_acquire() provides a memory barrier that ensures
+ * subsequent operations happen after the lock is acquired.
+ */
+#define arch_mcs_spin_lock_contended(l)                                        \
+do {                                                                   \
+       while (!(smp_load_acquire(l)))                                  \
+               arch_mutex_cpu_relax();                                 \
+} while (0)
+#endif
+
+#ifndef arch_mcs_spin_unlock_contended
+/*
+ * smp_store_release() provides a memory barrier to ensure all
+ * operations in the critical section has been completed before
+ * unlocking.
+ */
+#define arch_mcs_spin_unlock_contended(l)                              \
+       smp_store_release((l), 1)
+#endif
+
 /*
  * Note: the smp_load_acquire/smp_store_release pair is not
  * sufficient to form a full memory barrier across
@@ -58,13 +80,9 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
                return;
        }
        ACCESS_ONCE(prev->next) = node;
-       /*
-        * Wait until the lock holder passes the lock down.
-        * Using smp_load_acquire() provides a memory barrier that
-        * ensures subsequent operations happen after the lock is acquired.
-        */
-       while (!(smp_load_acquire(&node->locked)))
-               arch_mutex_cpu_relax();
+
+       /* Wait until the lock holder passes the lock down. */
+       arch_mcs_spin_lock_contended(&node->locked);
 }
 
 /*
@@ -86,13 +104,9 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
                while (!(next = ACCESS_ONCE(node->next)))
                        arch_mutex_cpu_relax();
        }
-       /*
-        * Pass lock to next waiter.
-        * smp_store_release() provides a memory barrier to ensure
-        * all operations in the critical section has been completed
-        * before unlocking.
-        */
-       smp_store_release(&next->locked, 1);
+
+       /* Pass lock to next waiter. */
+       arch_mcs_spin_unlock_contended(&next->locked);
 }
 
 #endif /* __LINUX_MCS_SPINLOCK_H */