summaryrefslogtreecommitdiff
path: root/read-cache.c
diff options
context:
space:
mode:
Diffstat (limited to 'read-cache.c')
-rw-r--r--read-cache.c35
1 files changed, 13 insertions, 22 deletions
diff --git a/read-cache.c b/read-cache.c
index aa427c5c17..ecf6f68994 100644
--- a/read-cache.c
+++ b/read-cache.c
@@ -89,8 +89,10 @@ static struct mem_pool *find_mem_pool(struct index_state *istate)
else
pool_ptr = &istate->ce_mem_pool;
- if (!*pool_ptr)
- mem_pool_init(pool_ptr, 0);
+ if (!*pool_ptr) {
+ *pool_ptr = xmalloc(sizeof(**pool_ptr));
+ mem_pool_init(*pool_ptr, 0);
+ }
return *pool_ptr;
}
@@ -1171,20 +1173,6 @@ static int has_dir_name(struct index_state *istate,
return retval;
}
- if (istate->cache_nr > 0 &&
- ce_namelen(istate->cache[istate->cache_nr - 1]) > len) {
- /*
- * The directory prefix lines up with part of
- * a longer file or directory name, but sorts
- * after it, so this sub-directory cannot
- * collide with a file.
- *
- * last: xxx/yy-file (because '-' sorts before '/')
- * this: xxx/yy/abc
- */
- return retval;
- }
-
/*
* This is a possible collision. Fall through and
* let the regular search code handle it.
@@ -2020,11 +2008,12 @@ static unsigned long load_all_cache_entries(struct index_state *istate,
{
unsigned long consumed;
+ istate->ce_mem_pool = xmalloc(sizeof(*istate->ce_mem_pool));
if (istate->version == 4) {
- mem_pool_init(&istate->ce_mem_pool,
+ mem_pool_init(istate->ce_mem_pool,
estimate_cache_size_from_compressed(istate->cache_nr));
} else {
- mem_pool_init(&istate->ce_mem_pool,
+ mem_pool_init(istate->ce_mem_pool,
estimate_cache_size(mmap_size, istate->cache_nr));
}
@@ -2084,7 +2073,8 @@ static unsigned long load_cache_entries_threaded(struct index_state *istate, con
if (istate->name_hash_initialized)
BUG("the name hash isn't thread safe");
- mem_pool_init(&istate->ce_mem_pool, 0);
+ istate->ce_mem_pool = xmalloc(sizeof(*istate->ce_mem_pool));
+ mem_pool_init(istate->ce_mem_pool, 0);
/* ensure we have no more threads than we have blocks to process */
if (nr_threads > ieot->nr)
@@ -2111,11 +2101,12 @@ static unsigned long load_cache_entries_threaded(struct index_state *istate, con
nr = 0;
for (j = p->ieot_start; j < p->ieot_start + p->ieot_blocks; j++)
nr += p->ieot->entries[j].nr;
+ p->ce_mem_pool = xmalloc(sizeof(*istate->ce_mem_pool));
if (istate->version == 4) {
- mem_pool_init(&p->ce_mem_pool,
+ mem_pool_init(p->ce_mem_pool,
estimate_cache_size_from_compressed(nr));
} else {
- mem_pool_init(&p->ce_mem_pool,
+ mem_pool_init(p->ce_mem_pool,
estimate_cache_size(mmap_size, nr));
}
@@ -2372,7 +2363,7 @@ int discard_index(struct index_state *istate)
if (istate->ce_mem_pool) {
mem_pool_discard(istate->ce_mem_pool, should_validate_cache_entries());
- istate->ce_mem_pool = NULL;
+ FREE_AND_NULL(istate->ce_mem_pool);
}
return 0;