if (old) {
int i;
- for (i = 0; i < old->max; i++) {
- if (old->masks[i])
- new->masks[new->count++] = old->masks[i];
- }
+ for (i = 0; i < old->max; i++)
+ new->masks[i] = old->masks[i];
+
+ new->count = old->count;
}
rcu_assign_pointer(tbl->mask_array, new);
return 0;
}
+static void tbl_mask_array_delete_mask(struct mask_array *ma,
+ const struct sw_flow_mask *mask)
+{
+ int i = 0;
+
+ /* Delete a mask pointer from the valid section.
+ *
+ * Also move the last entry in its place, so there is no
+ * whole in the valid section.
+ *
+ * Notice the last entry still points to the original mask.
+ *
+ * <Note>: there is a small race window that may cause a mask
+ * to be missed in a search. Imaging a core is
+ * walking through the array, passing the index of deleting mask.
+ * But before reaching the last entry, it is overwritten,
+ * by another core that is adding a new mask, now the last entry
+ * will point to the new mask. In this case, the moved up last
+ * entry can be missed by the core walking the mask array.
+ *
+ * In case this missed mask would have led to successful
+ * lookup, Hitting the race window could cause a packet to miss
+ * kernel flow cache, and be sent to the user space.
+ * </Note>
+ */
+ for (i = 0; i < ma->count; i++)
+ if (mask == ma->masks[i]) {
+ struct sw_flow_mask *last;
+
+ last = ma->masks[ma->count - 1];
+ rcu_assign_pointer(ma->masks[i], last);
+ ma->count--;
+ break;
+ }
+
+ /* Remove the deleted mask pointers from the invalid section. */
+ for (i = ma->count; i < ma->max; i++)
+ if (mask == ma->masks[i])
+ RCU_INIT_POINTER(ma->masks[i], NULL);
+}
+
int ovs_flow_tbl_init(struct flow_table *table)
{
struct table_instance *ti;
return NULL;
}
-
static struct sw_flow *flow_lookup(struct flow_table *tbl,
struct table_instance *ti,
struct mask_array *ma,
struct sw_flow *flow;
int i;
- for (i = 0; i < ma->max; i++) {
+ for (i = 0; i < ma->count; i++) {
struct sw_flow_mask *mask;
mask = rcu_dereference_ovsl(ma->masks[i]);
return NULL;
}
+/* If the the cache index is outside of the valid region, update the index
+ * in case cache entry was moved up. */
+static void fixup_cache_entry_index(struct mask_cache_entry *e,
+ const struct mask_array *ma,
+ const struct sw_flow_mask *cache)
+{
+ int i;
+
+ for (i = 0; i < ma->count; i++)
+ if (cache == ovsl_dereference(ma->masks[i])) {
+ e->mask_index = i;
+ return;
+ }
+}
+
/*
* mask_cache maps flow to probable mask. This cache is not tightly
* coupled cache, It means updates to mask list can result in inconsistent
e = &entries[index];
if (e->skb_hash == skb_hash) {
struct sw_flow_mask *cache;
+ int i = e->mask_index;
- cache = rcu_dereference_ovsl(ma->masks[e->mask_index]);
- if (cache) {
+ if (likely(i < ma->count)) {
+ cache = rcu_dereference_ovsl(ma->masks[i]);
flow = masked_flow_lookup(ti, key, cache,
n_mask_hit);
- if (flow) /* Cache hit. */
- return flow;
+ } else if (i < ma->max) {
+ flow = NULL;
+ cache = rcu_dereference_ovsl(ma->masks[i]);
+ if (cache) {
+ fixup_cache_entry_index(e, ma, cache);
+ flow = masked_flow_lookup(ti, key,
+ cache, n_mask_hit);
+ }
+ } else {
+ flow = NULL;
}
+
+ if (flow) /* Cache hit. */
+ return flow;
+
+ /* Cache miss. This is the best cache
+ * replacement candidate. */
e->skb_hash = 0;
- ce = e; /* The best cache replacement candidate. */
+ ce = e;
break;
}
if (!mask->ref_count) {
struct mask_array *ma;
- int i;
ma = ovsl_dereference(tbl->mask_array);
- for (i = 0; i < ma->max; i++) {
- if (mask == ovsl_dereference(ma->masks[i])) {
- RCU_INIT_POINTER(ma->masks[i], NULL);
- ma->count--;
- goto free;
- }
+ /* Shrink the mask array if necessary. */
+ if (ma->max > MASK_ARRAY_SIZE_MIN * 2
+ && ma->count <= ma->max / 4) {
+
+ tbl_mask_array_realloc(tbl, ma->max / 2);
+ ma = ovsl_dereference(tbl->mask_array);
}
- BUG();
-free:
+
+ tbl_mask_array_delete_mask(ma, mask);
call_rcu(&mask->rcu, rcu_free_sw_flow_mask_cb);
}
}
int i;
ma = ovsl_dereference(tbl->mask_array);
- for (i = 0; i < ma->max; i++) {
+ for (i = 0; i < ma->count; i++) {
struct sw_flow_mask *t;
t = ovsl_dereference(ma->masks[i]);
mask = flow_mask_find(tbl, new);
if (!mask) {
struct mask_array *ma;
- int i;
/* Allocate a new mask if none exsits. */
mask = mask_alloc();
}
ma = ovsl_dereference(tbl->mask_array);
}
- for (i = 0; i < ma->max; i++) {
- const struct sw_flow_mask *t;
-
- t = ovsl_dereference(ma->masks[i]);
- if (!t) {
- rcu_assign_pointer(ma->masks[i], mask);
- ma->count++;
- break;
- }
- }
+
+ rcu_assign_pointer(ma->masks[ma->count], mask);
+ ma->count++;
} else {
BUG_ON(!mask->ref_count);
mask->ref_count++;