summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLibravatar Elijah Newren <newren@gmail.com>2020-08-15 17:37:56 +0000
committerLibravatar Junio C Hamano <gitster@pobox.com>2020-08-18 12:16:06 -0700
commit44c7e1a7e08c0863c4156869364cb5751a23784e (patch)
treea0e8f23bd18a49bfe9a92421dc5de8bb6df20afd
parentmem-pool: add convenience functions for strdup and strndup (diff)
downloadtgif-44c7e1a7e08c0863c4156869364cb5751a23784e.tar.xz
mem-pool: use more standard initialization and finalization
A typical memory type, such as strbuf, hashmap, or string_list can be stored on the stack or embedded within another structure. mem_pool cannot be, because of how mem_pool_init() and mem_pool_discard() are written. mem_pool_init() does essentially the following (simplified for purposes of explanation here): void mem_pool_init(struct mem_pool **pool...) { *pool = xcalloc(1, sizeof(*pool)); It seems weird to require that mem_pools can only be accessed through a pointer. It also seems slightly dangerous: unlike strbuf_release() or strbuf_reset() or string_list_clear(), all of which put the data structure into a state where it can be re-used after the call, mem_pool_discard(pool) will leave pool pointing at free'd memory. read-cache (and split-index) are the only current users of mem_pools, and they haven't fallen into a use-after-free mistake here, but it seems likely to be problematic for future users especially since several of the current callers of mem_pool_init() will only call it when the mem_pool* is not already allocated (i.e. is NULL). This type of mechanism also prevents finding synchronization points where one can free existing memory and then resume more operations. It would be natural at such points to run something like mem_pool_discard(pool...); and, if necessary, mem_pool_init(&pool...); and then carry on continuing to use the pool. However, this fails badly if several objects had a copy of the value of pool from before these commands; in such a case, those objects won't get the updated value of pool that mem_pool_init() overwrites pool with and they'll all instead be reading and writing from free'd memory. Modify mem_pool_init()/mem_pool_discard() to behave more like strbuf_init()/strbuf_release() or string_list_init()/string_list_clear() In particular: (1) make mem_pool_init() just take a mem_pool* and have it only worry about allocating struct mp_blocks, not the struct mem_pool itself, (2) make mem_pool_discard() free the memory that the pool was responsible for, but leave it in a state where it can be used to allocate more memory afterward (without the need to call mem_pool_init() again). Signed-off-by: Elijah Newren <newren@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
-rw-r--r--mem-pool.c15
-rw-r--r--mem-pool.h4
-rw-r--r--read-cache.c21
-rw-r--r--split-index.c6
4 files changed, 23 insertions, 23 deletions
diff --git a/mem-pool.c b/mem-pool.c
index 020b51e0c5..7659919ab2 100644
--- a/mem-pool.c
+++ b/mem-pool.c
@@ -33,21 +33,13 @@ static struct mp_block *mem_pool_alloc_block(struct mem_pool *mem_pool, size_t b
return p;
}
-void mem_pool_init(struct mem_pool **mem_pool, size_t initial_size)
+void mem_pool_init(struct mem_pool *pool, size_t initial_size)
{
- struct mem_pool *pool;
-
- if (*mem_pool)
- return;
-
- pool = xcalloc(1, sizeof(*pool));
-
+ memset(pool, 0, sizeof(*pool));
pool->block_alloc = BLOCK_GROWTH_SIZE;
if (initial_size > 0)
mem_pool_alloc_block(pool, initial_size, NULL);
-
- *mem_pool = pool;
}
void mem_pool_discard(struct mem_pool *mem_pool, int invalidate_memory)
@@ -66,7 +58,8 @@ void mem_pool_discard(struct mem_pool *mem_pool, int invalidate_memory)
free(block_to_free);
}
- free(mem_pool);
+ mem_pool->mp_block = NULL;
+ mem_pool->pool_alloc = 0;
}
void *mem_pool_alloc(struct mem_pool *mem_pool, size_t len)
diff --git a/mem-pool.h b/mem-pool.h
index ca062c9070..870161ab44 100644
--- a/mem-pool.h
+++ b/mem-pool.h
@@ -24,10 +24,10 @@ struct mem_pool {
/*
* Initialize mem_pool with specified initial size.
*/
-void mem_pool_init(struct mem_pool **mem_pool, size_t initial_size);
+void mem_pool_init(struct mem_pool *pool, size_t initial_size);
/*
- * Discard a memory pool and free all the memory it is responsible for.
+ * Discard all the memory the memory pool is responsible for.
*/
void mem_pool_discard(struct mem_pool *mem_pool, int invalidate_memory);
diff --git a/read-cache.c b/read-cache.c
index 8ed1c29b54..fa291cdbee 100644
--- a/read-cache.c
+++ b/read-cache.c
@@ -89,8 +89,10 @@ static struct mem_pool *find_mem_pool(struct index_state *istate)
else
pool_ptr = &istate->ce_mem_pool;
- if (!*pool_ptr)
- mem_pool_init(pool_ptr, 0);
+ if (!*pool_ptr) {
+ *pool_ptr = xmalloc(sizeof(**pool_ptr));
+ mem_pool_init(*pool_ptr, 0);
+ }
return *pool_ptr;
}
@@ -2006,11 +2008,12 @@ static unsigned long load_all_cache_entries(struct index_state *istate,
{
unsigned long consumed;
+ istate->ce_mem_pool = xmalloc(sizeof(*istate->ce_mem_pool));
if (istate->version == 4) {
- mem_pool_init(&istate->ce_mem_pool,
+ mem_pool_init(istate->ce_mem_pool,
estimate_cache_size_from_compressed(istate->cache_nr));
} else {
- mem_pool_init(&istate->ce_mem_pool,
+ mem_pool_init(istate->ce_mem_pool,
estimate_cache_size(mmap_size, istate->cache_nr));
}
@@ -2070,7 +2073,8 @@ static unsigned long load_cache_entries_threaded(struct index_state *istate, con
if (istate->name_hash_initialized)
BUG("the name hash isn't thread safe");
- mem_pool_init(&istate->ce_mem_pool, 0);
+ istate->ce_mem_pool = xmalloc(sizeof(*istate->ce_mem_pool));
+ mem_pool_init(istate->ce_mem_pool, 0);
/* ensure we have no more threads than we have blocks to process */
if (nr_threads > ieot->nr)
@@ -2097,11 +2101,12 @@ static unsigned long load_cache_entries_threaded(struct index_state *istate, con
nr = 0;
for (j = p->ieot_start; j < p->ieot_start + p->ieot_blocks; j++)
nr += p->ieot->entries[j].nr;
+ istate->ce_mem_pool = xmalloc(sizeof(*istate->ce_mem_pool));
if (istate->version == 4) {
- mem_pool_init(&p->ce_mem_pool,
+ mem_pool_init(p->ce_mem_pool,
estimate_cache_size_from_compressed(nr));
} else {
- mem_pool_init(&p->ce_mem_pool,
+ mem_pool_init(p->ce_mem_pool,
estimate_cache_size(mmap_size, nr));
}
@@ -2358,7 +2363,7 @@ int discard_index(struct index_state *istate)
if (istate->ce_mem_pool) {
mem_pool_discard(istate->ce_mem_pool, should_validate_cache_entries());
- istate->ce_mem_pool = NULL;
+ FREE_AND_NULL(istate->ce_mem_pool);
}
return 0;
diff --git a/split-index.c b/split-index.c
index e6154e4ea9..c0e8ad670d 100644
--- a/split-index.c
+++ b/split-index.c
@@ -79,8 +79,10 @@ void move_cache_to_base_index(struct index_state *istate)
if (si->base &&
si->base->ce_mem_pool) {
- if (!istate->ce_mem_pool)
- mem_pool_init(&istate->ce_mem_pool, 0);
+ if (!istate->ce_mem_pool) {
+ istate->ce_mem_pool = xmalloc(sizeof(struct mem_pool));
+ mem_pool_init(istate->ce_mem_pool, 0);
+ }
mem_pool_combine(istate->ce_mem_pool, istate->split_index->base->ce_mem_pool);
}