*
* fat_rwlock_destroy() sets 'rwlock' to NULL to indicate that this
* slot may be destroyed. */
- struct list list_node; /* In struct rwlock's 'threads' list. */
+ struct ovs_list list_node; /* In struct rwlock's 'threads' list. */
struct fat_rwlock *rwlock; /* Owner. */
/* Mutex.
}
}
+static struct fat_rwlock_slot *
+fat_rwlock_try_get_slot__(struct fat_rwlock *rwlock)
+{
+ struct fat_rwlock_slot *slot;
+
+ /* Fast path. */
+ slot = ovsthread_getspecific(rwlock->key);
+ if (slot) {
+ return slot;
+ }
+
+ /* Slow path: create a new slot for 'rwlock' in this thread. */
+
+ if (!ovs_mutex_trylock(&rwlock->mutex)) {
+ slot = xmalloc_cacheline(sizeof *slot);
+ slot->rwlock = rwlock;
+ ovs_mutex_init(&slot->mutex);
+ slot->depth = 0;
+
+ list_push_back(&rwlock->threads, &slot->list_node);
+ ovs_mutex_unlock(&rwlock->mutex);
+ ovsthread_setspecific(rwlock->key, slot);
+ }
+
+ return slot;
+}
+
/* Tries to lock 'rwlock' for reading. If successful, returns 0. If taking
* the lock would require blocking, returns EBUSY (without blocking). */
int
OVS_NO_THREAD_SAFETY_ANALYSIS
{
struct fat_rwlock *rwlock = CONST_CAST(struct fat_rwlock *, rwlock_);
- struct fat_rwlock_slot *this = fat_rwlock_get_slot__(rwlock);
+ struct fat_rwlock_slot *this = fat_rwlock_try_get_slot__(rwlock);
int error;
+ if (!this) {
+ return EBUSY;
+ }
+
switch (this->depth) {
case UINT_MAX:
return EBUSY;