cmap: Merge CMAP_FOR_EACH_SAFE into CMAP_FOR_EACH.
authorBen Pfaff <blp@nicira.com>
Tue, 29 Jul 2014 16:02:23 +0000 (09:02 -0700)
committerBen Pfaff <blp@nicira.com>
Tue, 29 Jul 2014 16:02:23 +0000 (09:02 -0700)
There isn't any significant downside to making cmap iteration "safe" all
the time, so this drops the _SAFE variant.

Similar changes to CMAP_CURSOR_FOR_EACH and CMAP_CURSOR_FOR_EACH_CONTINUE.

Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jarno Rajahalme <jrajahalme@nicira.com>
lib/classifier.c
lib/cmap.h
lib/dpif-netdev.c

index 334d0da..a6a582c 100644 (file)
@@ -485,12 +485,12 @@ classifier_destroy(struct classifier *cls)
             trie_destroy(&cls->tries[i].root);
         }
 
-        CMAP_FOR_EACH_SAFE (subtable, cmap_node, &cls->subtables_map) {
+        CMAP_FOR_EACH (subtable, cmap_node, &cls->subtables_map) {
             destroy_subtable(cls, subtable);
         }
         cmap_destroy(&cls->subtables_map);
 
-        CMAP_FOR_EACH_SAFE (partition, cmap_node, &cls->partitions) {
+        CMAP_FOR_EACH (partition, cmap_node, &cls->partitions) {
             ovsrcu_postpone(free, partition);
         }
         cmap_destroy(&cls->partitions);
index 87a1d53..038db6c 100644 (file)
@@ -163,33 +163,29 @@ struct cmap_node *cmap_find_protected(const struct cmap *, uint32_t hash);
  *         ...operate on my_node...
  *     }
  *
- * CMAP_FOR_EACH_SAFE variant is useful only in deallocation code already
- * executing at postponed time, when it is known that the RCU grace period
- * has already expired.
+ * CMAP_FOR_EACH is "safe" in the sense of HMAP_FOR_EACH_SAFE.  That is, it is
+ * safe to free the current node before going on to the next iteration.  Most
+ * of the time, though, this doesn't matter for a cmap because node
+ * deallocation has to be postponed until the next grace period.  This means
+ * that this guarantee is useful only in deallocation code already executing at
+ * postponed time, when it is known that the RCU grace period has already
+ * expired.
  */
 
-#define CMAP_CURSOR_FOR_EACH(NODE, MEMBER, CURSOR, CMAP)            \
-    for (*(CURSOR) = cmap_cursor_start(CMAP);                       \
-         ((CURSOR)->node                                            \
-          ? (ASSIGN_CONTAINER(NODE, (CURSOR)->node, MEMBER), true)  \
-          : false);                                                 \
-         cmap_cursor_advance(CURSOR))
-
-#define CMAP_CURSOR_FOR_EACH_SAFE(NODE, MEMBER, CURSOR, CMAP)   \
-    for (*(CURSOR) = cmap_cursor_start(CMAP);                   \
-         ((CURSOR)->node                                        \
-          ? (ASSIGN_CONTAINER(NODE, (CURSOR)->node, MEMBER),    \
-             cmap_cursor_advance(CURSOR),                       \
-             true)                                              \
-          : false);                                             \
+#define CMAP_CURSOR_FOR_EACH__(NODE, CURSOR, MEMBER)    \
+    ((CURSOR)->node                                     \
+     ? (ASSIGN_CONTAINER(NODE, (CURSOR)->node, MEMBER), \
+        cmap_cursor_advance(CURSOR),                    \
+        true)                                           \
+     : false)
+
+#define CMAP_CURSOR_FOR_EACH(NODE, MEMBER, CURSOR, CMAP)    \
+    for (*(CURSOR) = cmap_cursor_start(CMAP);               \
+         CMAP_CURSOR_FOR_EACH__(NODE, CURSOR, MEMBER);      \
         )
 
-#define CMAP_CURSOR_FOR_EACH_CONTINUE(NODE, MEMBER, CURSOR)         \
-    for (cmap_cursor_advance(CURSOR);                               \
-         ((CURSOR)->node                                            \
-          ? (ASSIGN_CONTAINER(NODE, (CURSOR)->node, MEMBER), true)  \
-          : false);                                                 \
-         cmap_cursor_advance(CURSOR))
+#define CMAP_CURSOR_FOR_EACH_CONTINUE(NODE, MEMBER, CURSOR)   \
+    while (CMAP_CURSOR_FOR_EACH__(NODE, CURSOR, MEMBER))
 
 struct cmap_cursor {
     const struct cmap_impl *impl;
@@ -201,20 +197,9 @@ struct cmap_cursor {
 struct cmap_cursor cmap_cursor_start(const struct cmap *);
 void cmap_cursor_advance(struct cmap_cursor *);
 
-#define CMAP_FOR_EACH(NODE, MEMBER, CMAP)                               \
-    for (struct cmap_cursor cursor__ = cmap_cursor_start(CMAP); \
-         (cursor__.node                                                 \
-          ? (ASSIGN_CONTAINER(NODE, cursor__.node, MEMBER), true)       \
-          : false);                                                     \
-         cmap_cursor_advance(&cursor__))
-
-#define CMAP_FOR_EACH_SAFE(NODE, MEMBER, CMAP)                          \
+#define CMAP_FOR_EACH(NODE, MEMBER, CMAP)                       \
     for (struct cmap_cursor cursor__ = cmap_cursor_start(CMAP); \
-         (cursor__.node                                                 \
-          ? (ASSIGN_CONTAINER(NODE, cursor__.node, MEMBER),             \
-             cmap_cursor_advance(&cursor__),                            \
-             true)                                                      \
-          : false);                                                     \
+         CMAP_CURSOR_FOR_EACH__(NODE, &cursor__, MEMBER);       \
         )
 
 static inline struct cmap_node *cmap_first(const struct cmap *);
index 20813a8..bbc4eea 100644 (file)
@@ -925,7 +925,7 @@ dp_netdev_flow_flush(struct dp_netdev *dp)
     struct dp_netdev_flow *netdev_flow;
 
     ovs_mutex_lock(&dp->flow_mutex);
-    CMAP_FOR_EACH_SAFE (netdev_flow, node, &dp->flow_table) {
+    CMAP_FOR_EACH (netdev_flow, node, &dp->flow_table) {
         dp_netdev_remove_flow(dp, netdev_flow);
     }
     ovs_mutex_unlock(&dp->flow_mutex);