summaryrefslogtreecommitdiff
path: root/split-index.c
diff options
context:
space:
mode:
Diffstat (limited to 'split-index.c')
-rw-r--r--split-index.c120
1 files changed, 84 insertions, 36 deletions
diff --git a/split-index.c b/split-index.c
index 21485e2066..84f067e10d 100644
--- a/split-index.c
+++ b/split-index.c
@@ -18,12 +18,12 @@ int read_link_extension(struct index_state *istate,
struct split_index *si;
int ret;
- if (sz < 20)
+ if (sz < the_hash_algo->rawsz)
return error("corrupt link extension (too short)");
si = init_split_index(istate);
- hashcpy(si->base_sha1, data);
- data += 20;
- sz -= 20;
+ hashcpy(si->base_oid.hash, data);
+ data += the_hash_algo->rawsz;
+ sz -= the_hash_algo->rawsz;
if (!sz)
return 0;
si->delete_bitmap = ewah_new();
@@ -41,22 +41,15 @@ int read_link_extension(struct index_state *istate,
return 0;
}
-static int write_strbuf(void *user_data, const void *data, size_t len)
-{
- struct strbuf *sb = user_data;
- strbuf_add(sb, data, len);
- return len;
-}
-
int write_link_extension(struct strbuf *sb,
struct index_state *istate)
{
struct split_index *si = istate->split_index;
- strbuf_add(sb, si->base_sha1, 20);
+ strbuf_add(sb, si->base_oid.hash, the_hash_algo->rawsz);
if (!si->delete_bitmap && !si->replace_bitmap)
return 0;
- ewah_serialize_to(si->delete_bitmap, write_strbuf, sb);
- ewah_serialize_to(si->replace_bitmap, write_strbuf, sb);
+ ewah_serialize_strbuf(si->delete_bitmap, sb);
+ ewah_serialize_strbuf(si->replace_bitmap, sb);
return 0;
}
@@ -67,7 +60,7 @@ static void mark_base_index_entries(struct index_state *base)
* To keep track of the shared entries between
* istate->base->cache[] and istate->cache[], base entry
* position is stored in each base entry. All positions start
- * from 1 instead of 0, which is resrved to say "this is a new
+ * from 1 instead of 0, which is reserved to say "this is a new
* entry".
*/
for (i = 0; i < base->cache_nr; i++)
@@ -80,18 +73,32 @@ void move_cache_to_base_index(struct index_state *istate)
int i;
/*
- * do not delete old si->base, its index entries may be shared
- * with istate->cache[]. Accept a bit of leaking here because
- * this code is only used by short-lived update-index.
+ * If there was a previous base index, then transfer ownership of allocated
+ * entries to the parent index.
*/
+ if (si->base &&
+ si->base->ce_mem_pool) {
+
+ if (!istate->ce_mem_pool)
+ mem_pool_init(&istate->ce_mem_pool, 0);
+
+ mem_pool_combine(istate->ce_mem_pool, istate->split_index->base->ce_mem_pool);
+ }
+
si->base = xcalloc(1, sizeof(*si->base));
si->base->version = istate->version;
/* zero timestamp disables racy test in ce_write_index() */
si->base->timestamp = istate->timestamp;
ALLOC_GROW(si->base->cache, istate->cache_nr, si->base->cache_alloc);
si->base->cache_nr = istate->cache_nr;
- memcpy(si->base->cache, istate->cache,
- sizeof(*istate->cache) * istate->cache_nr);
+
+ /*
+ * The mem_pool needs to move with the allocated entries.
+ */
+ si->base->ce_mem_pool = istate->ce_mem_pool;
+ istate->ce_mem_pool = NULL;
+
+ COPY_ARRAY(si->base->cache, istate->cache, istate->cache_nr);
mark_base_index_entries(si->base);
for (i = 0; i < si->base->cache_nr; i++)
si->base->cache[i]->ce_flags &= ~CE_UPDATE_IN_BASE;
@@ -131,7 +138,7 @@ static void replace_entry(size_t pos, void *data)
src->ce_flags |= CE_UPDATE_IN_BASE;
src->ce_namelen = dst->ce_namelen;
copy_cache_entry(dst, src);
- free(src);
+ discard_cache_entry(src);
si->nr_replacements++;
}
@@ -148,8 +155,7 @@ void merge_base_index(struct index_state *istate)
istate->cache = NULL;
istate->cache_alloc = 0;
ALLOC_GROW(istate->cache, istate->cache_nr, istate->cache_alloc);
- memcpy(istate->cache, si->base->cache,
- sizeof(*istate->cache) * istate->cache_nr);
+ COPY_ARRAY(istate->cache, si->base->cache, istate->cache_nr);
si->nr_deletions = 0;
si->nr_replacements = 0;
@@ -176,10 +182,9 @@ void merge_base_index(struct index_state *istate)
ewah_free(si->delete_bitmap);
ewah_free(si->replace_bitmap);
- free(si->saved_cache);
+ FREE_AND_NULL(si->saved_cache);
si->delete_bitmap = NULL;
si->replace_bitmap = NULL;
- si->saved_cache = NULL;
si->saved_cache_nr = 0;
}
@@ -196,7 +201,7 @@ void prepare_to_write_split_index(struct index_state *istate)
/* Go through istate->cache[] and mark CE_MATCHED to
* entry with positive index. We'll go through
* base->cache[] later to delete all entries in base
- * that are not marked eith either CE_MATCHED or
+ * that are not marked with either CE_MATCHED or
* CE_UPDATE_IN_BASE. If istate->cache[i] is a
* duplicate, deduplicate it.
*/
@@ -234,7 +239,7 @@ void prepare_to_write_split_index(struct index_state *istate)
base->ce_flags = base_flags;
if (ret)
ce->ce_flags |= CE_UPDATE_IN_BASE;
- free(base);
+ discard_cache_entry(base);
si->base->cache[ce->index - 1] = ce;
}
for (i = 0; i < si->base->cache_nr; i++) {
@@ -248,6 +253,8 @@ void prepare_to_write_split_index(struct index_state *istate)
ALLOC_GROW(entries, nr_entries+1, nr_alloc);
entries[nr_entries++] = ce;
}
+ if (is_null_oid(&ce->oid))
+ istate->drop_cache_tree = 1;
}
}
@@ -309,20 +316,61 @@ void save_or_free_index_entry(struct index_state *istate, struct cache_entry *ce
ce == istate->split_index->base->cache[ce->index - 1])
ce->ce_flags |= CE_REMOVE;
else
- free(ce);
+ discard_cache_entry(ce);
}
void replace_index_entry_in_base(struct index_state *istate,
- struct cache_entry *old,
- struct cache_entry *new)
+ struct cache_entry *old_entry,
+ struct cache_entry *new_entry)
{
- if (old->index &&
+ if (old_entry->index &&
istate->split_index &&
istate->split_index->base &&
- old->index <= istate->split_index->base->cache_nr) {
- new->index = old->index;
- if (old != istate->split_index->base->cache[new->index - 1])
- free(istate->split_index->base->cache[new->index - 1]);
- istate->split_index->base->cache[new->index - 1] = new;
+ old_entry->index <= istate->split_index->base->cache_nr) {
+ new_entry->index = old_entry->index;
+ if (old_entry != istate->split_index->base->cache[new_entry->index - 1])
+ discard_cache_entry(istate->split_index->base->cache[new_entry->index - 1]);
+ istate->split_index->base->cache[new_entry->index - 1] = new_entry;
+ }
+}
+
+void add_split_index(struct index_state *istate)
+{
+ if (!istate->split_index) {
+ init_split_index(istate);
+ istate->cache_changed |= SPLIT_INDEX_ORDERED;
+ }
+}
+
+void remove_split_index(struct index_state *istate)
+{
+ if (istate->split_index) {
+ /*
+ * When removing the split index, we need to move
+ * ownership of the mem_pool associated with the
+ * base index to the main index. There may be cache entries
+ * allocated from the base's memory pool that are shared with
+ * the_index.cache[].
+ */
+ mem_pool_combine(istate->ce_mem_pool, istate->split_index->base->ce_mem_pool);
+
+ /*
+ * The split index no longer owns the mem_pool backing
+ * its cache array. As we are discarding this index,
+ * mark the index as having no cache entries, so it
+ * will not attempt to clean up the cache entries or
+ * validate them.
+ */
+ if (istate->split_index->base)
+ istate->split_index->base->cache_nr = 0;
+
+ /*
+ * We can discard the split index because its
+ * memory pool has been incorporated into the
+ * memory pool associated with the the_index.
+ */
+ discard_split_index(istate);
+
+ istate->cache_changed |= SOMETHING_CHANGED;
}
}