datapath: Add NULL check for mask pointer.
authorPravin B Shelar <pshelar@nicira.com>
Thu, 24 Jul 2014 20:32:35 +0000 (13:32 -0700)
committerPravin B Shelar <pshelar@nicira.com>
Fri, 25 Jul 2014 21:22:31 +0000 (14:22 -0700)
There is race in datapath when last mask in mask array deleted can
result in NULL pointer dereference in datapath.
datapath lookup does not check mask pointer if its index is less than
mask-array count. That is safe because delete operation moves last valid
pointer to the deleted element. But this does not work if we are
deleting last element in array. Following patch adds NULL check for the
mask pointer.
This patch also avoids accessing ma->count without any locks.

Signed-off-by: Pravin B Shelar <pshelar@nicira.com>
Acked-by: Andy Zhou <azhou@nicira.com>
datapath/flow_table.c

index 9ab1020..765930e 100644 (file)
@@ -564,37 +564,23 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
        struct sw_flow *flow;
        int i;
 
-       for (i = 0; i < ma->count; i++) {
+       for (i = 0; i < ma->max; i++) {
                struct sw_flow_mask *mask;
 
                mask = rcu_dereference_ovsl(ma->masks[i]);
-               if (mask) {
-                       flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
-                       if (flow) { /* Found */
-                               *index = i;
-                               return flow;
-                       }
+               if (!mask)
+                       break;
+
+               flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
+               if (flow) { /* Found */
+                       *index = i;
+                       return flow;
                }
        }
 
        return NULL;
 }
 
-/* If the the cache index is outside of the valid region, update the index
- * in case cache entry was moved up. */
-static void fixup_cache_entry_index(struct mask_cache_entry *e,
-                                   const struct mask_array *ma,
-                                   const struct sw_flow_mask *cache)
-{
-       int i;
-
-       for (i = 0; i < ma->count; i++)
-               if (cache == ovsl_dereference(ma->masks[i])) {
-                       e->mask_index = i;
-                       return;
-               }
-}
-
 /*
  * mask_cache maps flow to probable mask. This cache is not tightly
  * coupled cache, It means updates to  mask list can result in inconsistent
@@ -607,8 +593,8 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
                                          u32 skb_hash,
                                          u32 *n_mask_hit)
 {
-       struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
-       struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
+       struct mask_array *ma = rcu_dereference(tbl->mask_array);
+       struct table_instance *ti = rcu_dereference(tbl->ti);
        struct mask_cache_entry *entries, *ce;
        struct sw_flow *flow;
        u32 hash = skb_hash;
@@ -634,25 +620,16 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
                        struct sw_flow_mask *cache;
                        int i = e->mask_index;
 
-                       if (likely(i < ma->count)) {
-                               cache = rcu_dereference_ovsl(ma->masks[i]);
-                               flow = masked_flow_lookup(ti, key, cache,
-                                                         n_mask_hit);
-                       } else if (i < ma->max) {
-                               flow = NULL;
-                               cache = rcu_dereference_ovsl(ma->masks[i]);
+                       if (likely(i < ma->max)) {
+                               cache = rcu_dereference(ma->masks[i]);
                                if (cache) {
-                                       fixup_cache_entry_index(e, ma, cache);
                                        flow = masked_flow_lookup(ti, key,
                                                        cache, n_mask_hit);
+                                       if (flow)
+                                               return flow;
                                }
-                       } else {
-                               flow = NULL;
                        }
 
-                       if (flow)  /* Cache hit. */
-                               return flow;
-
                        /* Cache miss. This is the best cache
                         * replacement candidate.  */
                        e->skb_hash = 0;