Merge tag 'v4.0-rc1' into locking/core, to refresh the tree before merging new changes
authorIngo Molnar <mingo@kernel.org>
Tue, 24 Feb 2015 07:41:07 +0000 (08:41 +0100)
committerIngo Molnar <mingo@kernel.org>
Tue, 24 Feb 2015 07:41:07 +0000 (08:41 +0100)
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/futex.c
kernel/locking/mutex.c
kernel/locking/rwsem-spinlock.c
kernel/locking/rwsem-xadd.c
kernel/locking/rwsem.c
kernel/locking/rwsem.h [new file with mode: 0644]

index 2a5e383..2579e40 100644 (file)
@@ -900,7 +900,7 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key,
        if (!p)
                return -ESRCH;
 
-       if (!p->mm) {
+       if (unlikely(p->flags & PF_KTHREAD)) {
                put_task_struct(p);
                return -EPERM;
        }
index 94674e5..43bf25e 100644 (file)
@@ -25,7 +25,7 @@
 #include <linux/spinlock.h>
 #include <linux/interrupt.h>
 #include <linux/debug_locks.h>
-#include "mcs_spinlock.h"
+#include <linux/osq_lock.h>
 
 /*
  * In the DEBUG case we are using the "NULL fastpath" for mutexes,
@@ -217,44 +217,41 @@ ww_mutex_set_context_slowpath(struct ww_mutex *lock,
 }
 
 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
-static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
-{
-       if (lock->owner != owner)
-               return false;
-
-       /*
-        * Ensure we emit the owner->on_cpu, dereference _after_ checking
-        * lock->owner still matches owner, if that fails, owner might
-        * point to free()d memory, if it still matches, the rcu_read_lock()
-        * ensures the memory stays valid.
-        */
-       barrier();
-
-       return owner->on_cpu;
-}
-
 /*
  * Look out! "owner" is an entirely speculative pointer
  * access and not reliable.
  */
 static noinline
-int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
+bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
 {
+       bool ret;
+
        rcu_read_lock();
-       while (owner_running(lock, owner)) {
-               if (need_resched())
+       while (true) {
+               /* Return success when the lock owner changed */
+               if (lock->owner != owner) {
+                       ret = true;
                        break;
+               }
+
+               /*
+                * Ensure we emit the owner->on_cpu, dereference _after_
+                * checking lock->owner still matches owner, if that fails,
+                * owner might point to free()d memory, if it still matches,
+                * the rcu_read_lock() ensures the memory stays valid.
+                */
+               barrier();
+
+               if (!owner->on_cpu || need_resched()) {
+                       ret = false;
+                       break;
+               }
 
                cpu_relax_lowlatency();
        }
        rcu_read_unlock();
 
-       /*
-        * We break out the loop above on need_resched() and when the
-        * owner changed, which is a sign for heavy contention. Return
-        * success only when lock->owner is NULL.
-        */
-       return lock->owner == NULL;
+       return ret;
 }
 
 /*
index 2555ae1..3a50485 100644 (file)
@@ -85,6 +85,13 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
 
                list_del(&waiter->list);
                tsk = waiter->task;
+               /*
+                * Make sure we do not wakeup the next reader before
+                * setting the nil condition to grant the next reader;
+                * otherwise we could miss the wakeup on the other
+                * side and end up sleeping again. See the pairing
+                * in rwsem_down_read_failed().
+                */
                smp_mb();
                waiter->task = NULL;
                wake_up_process(tsk);
index 2f7cc40..e4ad019 100644 (file)
@@ -14,8 +14,9 @@
 #include <linux/init.h>
 #include <linux/export.h>
 #include <linux/sched/rt.h>
+#include <linux/osq_lock.h>
 
-#include "mcs_spinlock.h"
+#include "rwsem.h"
 
 /*
  * Guide to the rw_semaphore's count field for common values.
@@ -186,6 +187,13 @@ __rwsem_do_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type)
                waiter = list_entry(next, struct rwsem_waiter, list);
                next = waiter->list.next;
                tsk = waiter->task;
+               /*
+                * Make sure we do not wakeup the next reader before
+                * setting the nil condition to grant the next reader;
+                * otherwise we could miss the wakeup on the other
+                * side and end up sleeping again. See the pairing
+                * in rwsem_down_read_failed().
+                */
                smp_mb();
                waiter->task = NULL;
                wake_up_process(tsk);
@@ -258,6 +266,7 @@ static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
                    RWSEM_ACTIVE_WRITE_BIAS) == RWSEM_WAITING_BIAS) {
                if (!list_is_singular(&sem->wait_list))
                        rwsem_atomic_update(RWSEM_WAITING_BIAS, sem);
+               rwsem_set_owner(sem);
                return true;
        }
 
@@ -277,8 +286,10 @@ static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
                        return false;
 
                old = cmpxchg(&sem->count, count, count + RWSEM_ACTIVE_WRITE_BIAS);
-               if (old == count)
+               if (old == count) {
+                       rwsem_set_owner(sem);
                        return true;
+               }
 
                count = old;
        }
@@ -287,23 +298,30 @@ static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
 {
        struct task_struct *owner;
-       bool on_cpu = false;
+       bool ret = true;
 
        if (need_resched())
                return false;
 
        rcu_read_lock();
        owner = ACCESS_ONCE(sem->owner);
-       if (owner)
-               on_cpu = owner->on_cpu;
-       rcu_read_unlock();
+       if (!owner) {
+               long count = ACCESS_ONCE(sem->count);
+               /*
+                * If sem->owner is not set, yet we have just recently entered the
+                * slowpath with the lock being active, then there is a possibility
+                * reader(s) may have the lock. To be safe, bail spinning in these
+                * situations.
+                */
+               if (count & RWSEM_ACTIVE_MASK)
+                       ret = false;
+               goto done;
+       }
 
-       /*
-        * If sem->owner is not set, yet we have just recently entered the
-        * slowpath, then there is a possibility reader(s) may have the lock.
-        * To be safe, avoid spinning in these situations.
-        */
-       return on_cpu;
+       ret = owner->on_cpu;
+done:
+       rcu_read_unlock();
+       return ret;
 }
 
 static inline bool owner_running(struct rw_semaphore *sem,
@@ -326,21 +344,30 @@ static inline bool owner_running(struct rw_semaphore *sem,
 static noinline
 bool rwsem_spin_on_owner(struct rw_semaphore *sem, struct task_struct *owner)
 {
+       long count;
+
        rcu_read_lock();
        while (owner_running(sem, owner)) {
-               if (need_resched())
-                       break;
+               /* abort spinning when need_resched */
+               if (need_resched()) {
+                       rcu_read_unlock();
+                       return false;
+               }
 
                cpu_relax_lowlatency();
        }
        rcu_read_unlock();
 
+       if (READ_ONCE(sem->owner))
+               return true; /* new owner, continue spinning */
+
        /*
-        * We break out the loop above on need_resched() or when the
-        * owner changed, which is a sign for heavy contention. Return
-        * success only when sem->owner is NULL.
+        * When the owner is not set, the lock could be free or
+        * held by readers. Check the counter to verify the
+        * state.
         */
-       return sem->owner == NULL;
+       count = READ_ONCE(sem->count);
+       return (count == 0 || count == RWSEM_WAITING_BIAS);
 }
 
 static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
index e2d3bc7..205be0c 100644 (file)
@@ -9,29 +9,9 @@
 #include <linux/sched.h>
 #include <linux/export.h>
 #include <linux/rwsem.h>
-
 #include <linux/atomic.h>
 
-#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
-static inline void rwsem_set_owner(struct rw_semaphore *sem)
-{
-       sem->owner = current;
-}
-
-static inline void rwsem_clear_owner(struct rw_semaphore *sem)
-{
-       sem->owner = NULL;
-}
-
-#else
-static inline void rwsem_set_owner(struct rw_semaphore *sem)
-{
-}
-
-static inline void rwsem_clear_owner(struct rw_semaphore *sem)
-{
-}
-#endif
+#include "rwsem.h"
 
 /*
  * lock for reading
diff --git a/kernel/locking/rwsem.h b/kernel/locking/rwsem.h
new file mode 100644 (file)
index 0000000..870ed9a
--- /dev/null
@@ -0,0 +1,20 @@
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
+static inline void rwsem_set_owner(struct rw_semaphore *sem)
+{
+       sem->owner = current;
+}
+
+static inline void rwsem_clear_owner(struct rw_semaphore *sem)
+{
+       sem->owner = NULL;
+}
+
+#else
+static inline void rwsem_set_owner(struct rw_semaphore *sem)
+{
+}
+
+static inline void rwsem_clear_owner(struct rw_semaphore *sem)
+{
+}
+#endif