mm: slab/slub: coding style: whitespaces and tabs mixture
authorLQYMGT <lqymgt@gmail.com>
Wed, 10 Dec 2014 23:42:13 +0000 (15:42 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 11 Dec 2014 01:41:04 +0000 (17:41 -0800)
Some code in mm/slab.c and mm/slub.c use whitespaces in indent.
Clean them up.

Signed-off-by: LQYMGT <lqymgt@gmail.com>
Acked-by: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/slab.c
mm/slub.c

index f34e053..eae2d21 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3580,11 +3580,11 @@ static int alloc_kmem_cache_node(struct kmem_cache *cachep, gfp_t gfp)
 
        for_each_online_node(node) {
 
-                if (use_alien_caches) {
-                        new_alien = alloc_alien_cache(node, cachep->limit, gfp);
-                        if (!new_alien)
-                                goto fail;
-                }
+               if (use_alien_caches) {
+                       new_alien = alloc_alien_cache(node, cachep->limit, gfp);
+                       if (!new_alien)
+                               goto fail;
+               }
 
                new_shared = NULL;
                if (cachep->shared) {
index ae7b9f1..761789e 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2554,7 +2554,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
 
                        } else { /* Needs to be taken off a list */
 
-                               n = get_node(s, page_to_nid(page));
+                               n = get_node(s, page_to_nid(page));
                                /*
                                 * Speculatively acquire the list_lock.
                                 * If the cmpxchg does not succeed then we may
@@ -2587,10 +2587,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
                 * The list lock was not taken therefore no list
                 * activity can be necessary.
                 */
-                if (was_frozen)
-                        stat(s, FREE_FROZEN);
-                return;
-        }
+               if (was_frozen)
+                       stat(s, FREE_FROZEN);
+               return;
+       }
 
        if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
                goto slab_empty;