From 55178acc967196b8035508958c3c93662562f0eb Mon Sep 17 00:00:00 2001 From: Mandeep Singh Baines Date: Fri, 22 Jul 2011 08:48:48 -0700 Subject: [PATCH] CHROMIUM: verity: use alloc_page instead of mempool_alloc I ran a quick test and verified that mempool_alloc we are never hitting the remove_element path of mempool_alloc so the 8MB of mempool memory is never actually used. Since dm-verity is read-only, its not part of memory reclaim. So a memory pool is not neccesary. Since we alloc with GFP_KERNEL, an allocation failure is highly unlikely. If an allocation does fail, we already have code to handle the failure. By removing the memory pool, we save 8 MB of RAM and save 1 ms on boot: [ 0.974280] before mempool_create_page_pool [ 0.975345] after mempool_create_page_pool BUG=chromium-os:9752 TEST=Ran dm-verity.git unit tests. Ran platform_DMVerityCorruption on H/W. Also ran platform_BootPerfServer: Before: seconds_power_on_to_login 8.81 seconds_power_on_to_login{1} 8.76 seconds_power_on_to_login{2} 9.24 seconds_power_on_to_login{3} 8.83 seconds_power_on_to_login{4} 8.76 seconds_power_on_to_login{5} 8.84 seconds_power_on_to_login{6} 8.86 seconds_power_on_to_login{7} 8.86 seconds_power_on_to_login{8} 8.86 seconds_power_on_to_login{9} 8.97 Mean: 8.87 Stdev: 0.14 After: seconds_power_on_to_login 8.92 seconds_power_on_to_login{1} 9.06 seconds_power_on_to_login{2} 8.96 seconds_power_on_to_login{3} 8.71 seconds_power_on_to_login{4} 8.99 seconds_power_on_to_login{5} 8.89 seconds_power_on_to_login{6} 8.77 seconds_power_on_to_login{7} 8.96 seconds_power_on_to_login{8} 8.95 seconds_power_on_to_login{9} 8.95 Mean: 8.91 Stdev 0.10 The difference between the two runs is within stdev. Change-Id: I9eddf2f01e6d3f09a010622d09485fac0924a8db Signed-off-by: Mandeep Singh Baines Reviewed-on: http://gerrit.chromium.org/gerrit/4584 --- drivers/md/dm-bht.c | 24 +++++------------------- include/linux/dm-bht.h | 2 -- 2 files changed, 5 insertions(+), 21 deletions(-) diff --git a/drivers/md/dm-bht.c b/drivers/md/dm-bht.c index 45ac407156eb..a66b97cb8cb8 100644 --- a/drivers/md/dm-bht.c +++ b/drivers/md/dm-bht.c @@ -382,16 +382,6 @@ static int dm_bht_initialize_entries(struct dm_bht *bht) } } - /* Go ahead and reserve enough space for everything. We really don't - * want memory allocation failures. Once we start freeing verified - * entries, then we can reduce this reservation. - */ - bht->entry_pool = mempool_create_page_pool(total_entries, 0); - if (!bht->entry_pool) { - DMERR("failed to allocate mempool"); - return -ENOMEM; - } - return 0; } @@ -564,8 +554,7 @@ int dm_bht_store_block(struct dm_bht *bht, unsigned int block, * The number of updated entries is NOT tracked. */ if (state == DM_BHT_ENTRY_UNALLOCATED) { - node_page = (struct page *) mempool_alloc(bht->entry_pool, - GFP_KERNEL); + node_page = alloc_page(GFP_KERNEL); if (!node_page) { atomic_set(&entry->state, DM_BHT_ENTRY_ERROR); return -ENOMEM; @@ -645,8 +634,7 @@ int dm_bht_compute(struct dm_bht *bht, void *read_cb_ctx) unsigned int count = bht->node_count; struct page *pg; - pg = (struct page *) mempool_alloc(bht->entry_pool, - GFP_NOIO); + pg = alloc_page(GFP_NOIO); if (!pg) { DMCRIT("an error occurred while reading entry"); goto out; @@ -793,7 +781,7 @@ int dm_bht_populate(struct dm_bht *bht, void *ctx, continue; /* Current entry is claimed for allocation and loading */ - pg = (struct page *) mempool_alloc(bht->entry_pool, GFP_NOIO); + pg = alloc_page(GFP_NOIO); if (!pg) goto nomem; @@ -815,7 +803,7 @@ error_state: return state; nomem: - DMCRIT("failed to allocate memory for entry->nodes from pool"); + DMCRIT("failed to allocate memory for entry->nodes"); return -ENOMEM; } EXPORT_SYMBOL(dm_bht_populate); @@ -870,15 +858,13 @@ int dm_bht_destroy(struct dm_bht *bht) continue; default: BUG_ON(!entry->nodes); - mempool_free(virt_to_page(entry->nodes), - bht->entry_pool); + __free_page(virt_to_page(entry->nodes)); break; } } kfree(bht->levels[depth].entries); bht->levels[depth].entries = NULL; } - mempool_destroy(bht->entry_pool); kfree(bht->levels); for (cpu = 0; cpu < nr_cpu_ids; ++cpu) if (bht->hash_desc[cpu].tfm) diff --git a/include/linux/dm-bht.h b/include/linux/dm-bht.h index 69b010083c72..929a68456e43 100644 --- a/include/linux/dm-bht.h +++ b/include/linux/dm-bht.h @@ -11,7 +11,6 @@ #include #include -#include #include /* To avoid allocating memory for digest tests, we just setup a @@ -97,7 +96,6 @@ struct dm_bht { /* bool verified; Full tree is verified */ u8 root_digest[DM_BHT_MAX_DIGEST_SIZE]; struct dm_bht_level *levels; /* in reverse order */ - mempool_t *entry_pool; /* Callbacks for reading and/or writing to the hash device */ dm_bht_callback read_cb; dm_bht_callback write_cb; -- 2.20.1