+static bool
+recirc_metadata_equal(const struct recirc_state *a,
+ const struct recirc_state *b)
+{
+ return (a->table_id == b->table_id
+ && a->ofproto == b->ofproto
+ && flow_tnl_equal(a->metadata.tunnel, b->metadata.tunnel)
+ && !memcmp(&a->metadata.metadata, &b->metadata.metadata,
+ sizeof a->metadata - sizeof a->metadata.tunnel)
+ && (((!a->stack || !a->stack->size) &&
+ (!b->stack || !b->stack->size))
+ || (a->stack && b->stack && ofpbuf_equal(a->stack, b->stack)))
+ && a->mirrors == b->mirrors
+ && a->conntracked == b->conntracked
+ && a->action_set_len == b->action_set_len
+ && ofpacts_equal(a->ofpacts, a->ofpacts_len,
+ b->ofpacts, b->ofpacts_len));
+}
+
+/* Lockless RCU protected lookup. If node is needed accross RCU quiescent
+ * state, caller should take a reference. */
+static struct recirc_id_node *
+recirc_find_equal(const struct recirc_state *target, uint32_t hash)
+{
+ struct recirc_id_node *node;
+
+ CMAP_FOR_EACH_WITH_HASH (node, metadata_node, hash, &metadata_map) {
+ if (recirc_metadata_equal(&node->state, target)) {
+ return node;
+ }
+ }
+ return NULL;
+}
+
+static struct recirc_id_node *
+recirc_ref_equal(const struct recirc_state *target, uint32_t hash)
+{
+ struct recirc_id_node *node;
+
+ do {
+ node = recirc_find_equal(target, hash);
+
+ /* Try again if the node was released before we get the reference. */
+ } while (node && !ovs_refcount_try_ref_rcu(&node->refcount));
+
+ return node;
+}
+
+static void
+recirc_state_clone(struct recirc_state *new, const struct recirc_state *old,
+ struct flow_tnl *tunnel)
+{
+ *new = *old;
+ flow_tnl_copy__(tunnel, old->metadata.tunnel);
+ new->metadata.tunnel = tunnel;
+
+ if (new->stack) {
+ new->stack = new->stack->size ? ofpbuf_clone(new->stack) : NULL;
+ }
+ if (new->ofpacts) {
+ new->ofpacts = (new->ofpacts_len
+ ? xmemdup(new->ofpacts, new->ofpacts_len)
+ : NULL);
+ }
+}
+
+/* Allocate a unique recirculation id for the given set of flow metadata.
+ * The ID space is 2^^32, so there should never be a situation in which all
+ * the IDs are used up. We loop until we find a free one.
+ * hash is recomputed if it is passed in as 0. */
+static struct recirc_id_node *
+recirc_alloc_id__(const struct recirc_state *state, uint32_t hash)
+{
+ ovs_assert(state->action_set_len <= state->ofpacts_len);
+
+ struct recirc_id_node *node = xzalloc(sizeof *node);
+
+ node->hash = hash;
+ ovs_refcount_init(&node->refcount);
+ recirc_state_clone(CONST_CAST(struct recirc_state *, &node->state), state,
+ &node->state_metadata_tunnel);
+
+ ovs_mutex_lock(&mutex);
+ for (;;) {
+ /* Claim the next ID. The ID space should be sparse enough for the
+ allocation to succeed at the first try. We do skip the first
+ RECIRC_POOL_STATIC_IDS IDs on the later rounds, though, as some of
+ the initial allocations may be for long term uses (like bonds). */
+ node->id = next_id++;
+ if (OVS_UNLIKELY(!node->id)) {
+ next_id = RECIRC_POOL_STATIC_IDS + 1;
+ node->id = next_id++;
+ }
+ /* Find if the id is free. */
+ if (OVS_LIKELY(!recirc_find__(node->id))) {
+ break;
+ }
+ }
+ cmap_insert(&id_map, &node->id_node, node->id);
+ cmap_insert(&metadata_map, &node->metadata_node, node->hash);
+ ovs_mutex_unlock(&mutex);
+ return node;
+}
+
+/* Look up an existing ID for the given flow's metadata and optional actions.
+ */