X-Git-Url: http://git.cascardo.eti.br/?a=blobdiff_plain;f=lib%2Flockref.c;h=6f9d434c1521eab9ca0b2821d10936af55f2b703;hb=981d901095a6f40577c16156006ac4f4c167f85e;hp=e2cd2c0a882126c58e04e47102fb4975c5247d3c;hpb=5a7d8a28080caed7fd4cb1b81d092adac4445e8e;p=cascardo%2Flinux.git diff --git a/lib/lockref.c b/lib/lockref.c index e2cd2c0a8821..6f9d434c1521 100644 --- a/lib/lockref.c +++ b/lib/lockref.c @@ -3,6 +3,22 @@ #ifdef CONFIG_CMPXCHG_LOCKREF +/* + * Allow weakly-ordered memory architectures to provide barrier-less + * cmpxchg semantics for lockref updates. + */ +#ifndef cmpxchg64_relaxed +# define cmpxchg64_relaxed cmpxchg64 +#endif + +/* + * Allow architectures to override the default cpu_relax() within CMPXCHG_LOOP. + * This is useful for architectures with an expensive cpu_relax(). + */ +#ifndef arch_mutex_cpu_relax +# define arch_mutex_cpu_relax() cpu_relax() +#endif + /* * Note that the "cmpxchg()" reloads the "old" value for the * failure case. @@ -14,12 +30,13 @@ while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \ struct lockref new = old, prev = old; \ CODE \ - old.lock_count = cmpxchg(&lockref->lock_count, \ - old.lock_count, new.lock_count); \ + old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \ + old.lock_count, \ + new.lock_count); \ if (likely(old.lock_count == prev.lock_count)) { \ SUCCESS; \ } \ - cpu_relax(); \ + arch_mutex_cpu_relax(); \ } \ } while (0)