ipc_rcu_free(head);
}
-/*
- * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they
- * are only control barriers.
- * The code must pair with spin_unlock(&sem->lock) or
- * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient.
- *
- * smp_rmb() is sufficient, as writes cannot pass the control barrier.
- */
-#define ipc_smp_acquire__after_spin_is_unlocked() smp_rmb()
-
/*
* Wait until all currently ongoing simple ops have completed.
* Caller must own sem_perm.lock.
sem = sma->sem_base + i;
spin_unlock_wait(&sem->lock);
}
- ipc_smp_acquire__after_spin_is_unlocked();
}
/*
* complex_count++;
* spin_unlock(sem_perm.lock);
*/
- ipc_smp_acquire__after_spin_is_unlocked();
+ smp_acquire__after_ctrl_dep();
/*
* Now repeat the test of complex_count:
static inline void sem_lock_and_putref(struct sem_array *sma)
{
sem_lock(sma, NULL, -1);
- ipc_rcu_putref(sma, ipc_rcu_free);
+ ipc_rcu_putref(sma, sem_rcu_free);
}
static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
rcu_read_unlock();
sem_io = ipc_alloc(sizeof(ushort)*nsems);
if (sem_io == NULL) {
- ipc_rcu_putref(sma, ipc_rcu_free);
+ ipc_rcu_putref(sma, sem_rcu_free);
return -ENOMEM;
}
if (nsems > SEMMSL_FAST) {
sem_io = ipc_alloc(sizeof(ushort)*nsems);
if (sem_io == NULL) {
- ipc_rcu_putref(sma, ipc_rcu_free);
+ ipc_rcu_putref(sma, sem_rcu_free);
return -ENOMEM;
}
}
if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
- ipc_rcu_putref(sma, ipc_rcu_free);
+ ipc_rcu_putref(sma, sem_rcu_free);
err = -EFAULT;
goto out_free;
}
for (i = 0; i < nsems; i++) {
if (sem_io[i] > SEMVMX) {
- ipc_rcu_putref(sma, ipc_rcu_free);
+ ipc_rcu_putref(sma, sem_rcu_free);
err = -ERANGE;
goto out_free;
}
/* step 2: allocate new undo structure */
new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
if (!new) {
- ipc_rcu_putref(sma, ipc_rcu_free);
+ ipc_rcu_putref(sma, sem_rcu_free);
return ERR_PTR(-ENOMEM);
}