net/mlx4_core: Fix access to uninitialized index
[cascardo/linux.git] / mm / mempool.c
index 7924f4f..9b7a14a 100644 (file)
@@ -112,12 +112,12 @@ static void kasan_poison_element(mempool_t *pool, void *element)
                kasan_free_pages(element, (unsigned long)pool->pool_data);
 }
 
-static void kasan_unpoison_element(mempool_t *pool, void *element)
+static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags)
 {
        if (pool->alloc == mempool_alloc_slab)
-               kasan_slab_alloc(pool->pool_data, element);
+               kasan_slab_alloc(pool->pool_data, element, flags);
        if (pool->alloc == mempool_kmalloc)
-               kasan_krealloc(element, (size_t)pool->pool_data);
+               kasan_krealloc(element, (size_t)pool->pool_data, flags);
        if (pool->alloc == mempool_alloc_pages)
                kasan_alloc_pages(element, (unsigned long)pool->pool_data);
 }
@@ -130,12 +130,12 @@ static void add_element(mempool_t *pool, void *element)
        pool->elements[pool->curr_nr++] = element;
 }
 
-static void *remove_element(mempool_t *pool)
+static void *remove_element(mempool_t *pool, gfp_t flags)
 {
        void *element = pool->elements[--pool->curr_nr];
 
        BUG_ON(pool->curr_nr < 0);
-       kasan_unpoison_element(pool, element);
+       kasan_unpoison_element(pool, element, flags);
        check_element(pool, element);
        return element;
 }
@@ -154,7 +154,7 @@ void mempool_destroy(mempool_t *pool)
                return;
 
        while (pool->curr_nr) {
-               void *element = remove_element(pool);
+               void *element = remove_element(pool, GFP_KERNEL);
                pool->free(element, pool->pool_data);
        }
        kfree(pool->elements);
@@ -250,7 +250,7 @@ int mempool_resize(mempool_t *pool, int new_min_nr)
        spin_lock_irqsave(&pool->lock, flags);
        if (new_min_nr <= pool->min_nr) {
                while (new_min_nr < pool->curr_nr) {
-                       element = remove_element(pool);
+                       element = remove_element(pool, GFP_KERNEL);
                        spin_unlock_irqrestore(&pool->lock, flags);
                        pool->free(element, pool->pool_data);
                        spin_lock_irqsave(&pool->lock, flags);
@@ -310,25 +310,36 @@ EXPORT_SYMBOL(mempool_resize);
  * returns NULL. Note that due to preallocation, this function
  * *never* fails when called from process contexts. (it might
  * fail if called from an IRQ context.)
- * Note: using __GFP_ZERO is not supported.
+ * Note: neither __GFP_NOMEMALLOC nor __GFP_ZERO are supported.
  */
-void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
+void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
 {
        void *element;
        unsigned long flags;
        wait_queue_t wait;
        gfp_t gfp_temp;
 
+       /* If oom killed, memory reserves are essential to prevent livelock */
+       VM_WARN_ON_ONCE(gfp_mask & __GFP_NOMEMALLOC);
+       /* No element size to zero on allocation */
        VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
+
        might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
 
-       gfp_mask |= __GFP_NOMEMALLOC;   /* don't allocate emergency reserves */
        gfp_mask |= __GFP_NORETRY;      /* don't loop in __alloc_pages */
        gfp_mask |= __GFP_NOWARN;       /* failures are OK */
 
        gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO);
 
 repeat_alloc:
+       if (likely(pool->curr_nr)) {
+               /*
+                * Don't allocate from emergency reserves if there are
+                * elements available.  This check is racy, but it will
+                * be rechecked each loop.
+                */
+               gfp_temp |= __GFP_NOMEMALLOC;
+       }
 
        element = pool->alloc(gfp_temp, pool->pool_data);
        if (likely(element != NULL))
@@ -336,7 +347,7 @@ repeat_alloc:
 
        spin_lock_irqsave(&pool->lock, flags);
        if (likely(pool->curr_nr)) {
-               element = remove_element(pool);
+               element = remove_element(pool, gfp_temp);
                spin_unlock_irqrestore(&pool->lock, flags);
                /* paired with rmb in mempool_free(), read comment there */
                smp_wmb();
@@ -352,11 +363,12 @@ repeat_alloc:
         * We use gfp mask w/o direct reclaim or IO for the first round.  If
         * alloc failed with that and @pool was empty, retry immediately.
         */
-       if (gfp_temp != gfp_mask) {
+       if ((gfp_temp & ~__GFP_NOMEMALLOC) != gfp_mask) {
                spin_unlock_irqrestore(&pool->lock, flags);
                gfp_temp = gfp_mask;
                goto repeat_alloc;
        }
+       gfp_temp = gfp_mask;
 
        /* We must not sleep if !__GFP_DIRECT_RECLAIM */
        if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {