rcu_sync: Introduce rcu_sync_dtor()
authorOleg Nesterov <oleg@redhat.com>
Fri, 21 Aug 2015 17:42:52 +0000 (19:42 +0200)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Tue, 6 Oct 2015 18:25:21 +0000 (11:25 -0700)
This commit allows rcu_sync structures to be safely deallocated,
The trick is to add a new ->wait field to the gp_ops array.
This field is a pointer to the rcu_barrier() function corresponding
to the flavor of RCU in question.  This allows a new rcu_sync_dtor()
to wait for any outstanding callbacks before freeing the rcu_sync
structure.

Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
include/linux/rcu_sync.h
kernel/rcu/sync.c

index 1f2d4fc..8069d64 100644 (file)
@@ -62,6 +62,7 @@ static inline bool rcu_sync_is_idle(struct rcu_sync *rsp)
 extern void rcu_sync_init(struct rcu_sync *, enum rcu_sync_type);
 extern void rcu_sync_enter(struct rcu_sync *);
 extern void rcu_sync_exit(struct rcu_sync *);
+extern void rcu_sync_dtor(struct rcu_sync *);
 
 #define __RCU_SYNC_INITIALIZER(name, type) {                           \
                .gp_state = 0,                                          \
index 01c9807..1e353f0 100644 (file)
@@ -32,6 +32,7 @@
 static const struct {
        void (*sync)(void);
        void (*call)(struct rcu_head *, void (*)(struct rcu_head *));
+       void (*wait)(void);
 #ifdef CONFIG_PROVE_RCU
        int  (*held)(void);
 #endif
@@ -39,16 +40,19 @@ static const struct {
        [RCU_SYNC] = {
                .sync = synchronize_rcu,
                .call = call_rcu,
+               .wait = rcu_barrier,
                __INIT_HELD(rcu_read_lock_held)
        },
        [RCU_SCHED_SYNC] = {
                .sync = synchronize_sched,
                .call = call_rcu_sched,
+               .wait = rcu_barrier_sched,
                __INIT_HELD(rcu_read_lock_sched_held)
        },
        [RCU_BH_SYNC] = {
                .sync = synchronize_rcu_bh,
                .call = call_rcu_bh,
+               .wait = rcu_barrier_bh,
                __INIT_HELD(rcu_read_lock_bh_held)
        },
 };
@@ -195,3 +199,25 @@ void rcu_sync_exit(struct rcu_sync *rsp)
        }
        spin_unlock_irq(&rsp->rss_lock);
 }
+
+/**
+ * rcu_sync_dtor() - Clean up an rcu_sync structure
+ * @rsp: Pointer to rcu_sync structure to be cleaned up
+ */
+void rcu_sync_dtor(struct rcu_sync *rsp)
+{
+       int cb_state;
+
+       BUG_ON(rsp->gp_count);
+
+       spin_lock_irq(&rsp->rss_lock);
+       if (rsp->cb_state == CB_REPLAY)
+               rsp->cb_state = CB_PENDING;
+       cb_state = rsp->cb_state;
+       spin_unlock_irq(&rsp->rss_lock);
+
+       if (cb_state != CB_IDLE) {
+               gp_ops[rsp->gp_type].wait();
+               BUG_ON(rsp->cb_state != CB_IDLE);
+       }
+}