From 0e6efbe2712da03522532dc5e84806a96f6a0dd1 Mon Sep 17 00:00:00 2001 From: Pravin B Shelar Date: Tue, 5 Aug 2014 10:47:23 -0700 Subject: [PATCH] datapath: Optimize Flow mask cache hash collision case. In case hash collision on mask cache, OVS does extra flow lookup. Following patch avoid it. Signed-off-by: Pravin B Shelar Acked-by: Jarno Rajahalme --- datapath/flow_table.c | 48 ++++++++++++++++++++++--------------------- 1 file changed, 25 insertions(+), 23 deletions(-) diff --git a/datapath/flow_table.c b/datapath/flow_table.c index cfd5a84af..704662389 100644 --- a/datapath/flow_table.c +++ b/datapath/flow_table.c @@ -555,6 +555,9 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti, return NULL; } +/* Flow lookup does full lookup on flow table. It starts with + * mask from index passed in *index. + */ static struct sw_flow *flow_lookup(struct flow_table *tbl, struct table_instance *ti, struct mask_array *ma, @@ -562,15 +565,27 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl, u32 *n_mask_hit, u32 *index) { + struct sw_flow_mask *mask; struct sw_flow *flow; int i; - for (i = 0; i < ma->max; i++) { - struct sw_flow_mask *mask; + if (*index < ma->max) { + mask = rcu_dereference_ovsl(ma->masks[*index]); + if (mask) { + flow = masked_flow_lookup(ti, key, mask, n_mask_hit); + if (flow) + return flow; + } + } + + for (i = 0; i < ma->max; i++) { + + if (i == *index) + continue; mask = rcu_dereference_ovsl(ma->masks[i]); if (!mask) - break; + return NULL; flow = masked_flow_lookup(ti, key, mask, n_mask_hit); if (flow) { /* Found */ @@ -603,7 +618,7 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, *n_mask_hit = 0; if (unlikely(!skb_hash)) { - u32 __always_unused mask_index; + u32 mask_index = 0; return flow_lookup(tbl, ti, ma, key, n_mask_hit, &mask_index); } @@ -618,24 +633,11 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, e = &entries[index]; if (e->skb_hash == skb_hash) { - struct sw_flow_mask *cache; - int i = e->mask_index; - - if (likely(i < ma->max)) { - cache = rcu_dereference(ma->masks[i]); - if (cache) { - flow = masked_flow_lookup(ti, key, - cache, n_mask_hit); - if (flow) - return flow; - } - } - - /* Cache miss. This is the best cache - * replacement candidate. */ - e->skb_hash = 0; - ce = e; - break; + flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, + &e->mask_index); + if (!flow) + e->skb_hash = 0; + return flow; } if (!ce || e->skb_hash < ce->skb_hash) @@ -658,7 +660,7 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl, struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array); u32 __always_unused n_mask_hit; - u32 __always_unused index; + u32 index = 0; return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &index); } -- 2.20.1