diff options
Diffstat (limited to 'cache-tree.c')
-rw-r--r-- | cache-tree.c | 41 |
1 files changed, 22 insertions, 19 deletions
diff --git a/cache-tree.c b/cache-tree.c index 32772b9564..ddf0cc9f9a 100644 --- a/cache-tree.c +++ b/cache-tree.c @@ -79,11 +79,9 @@ static struct cache_tree_sub *find_subtree(struct cache_tree *it, ALLOC_GROW(it->down, it->subtree_nr + 1, it->subtree_alloc); it->subtree_nr++; - down = xmalloc(sizeof(*down) + pathlen + 1); + FLEX_ALLOC_MEM(down, name, path, pathlen); down->cache_tree = NULL; down->namelen = pathlen; - memcpy(down->name, path, pathlen); - down->name[pathlen] = 0; if (pos < it->subtree_nr) memmove(it->down + pos + 1, @@ -377,7 +375,7 @@ static int update_one(struct cache_tree *it, * they are not part of generated trees. Invalidate up * to root to force cache-tree users to read elsewhere. */ - if (ce->ce_flags & CE_INTENT_TO_ADD) { + if (ce_intent_to_add(ce)) { to_invalidate = 1; continue; } @@ -592,7 +590,7 @@ static struct cache_tree *cache_tree_find(struct cache_tree *it, const char *pat return it; } -int write_cache_as_tree(unsigned char *sha1, int flags, const char *prefix) +int write_index_as_tree(unsigned char *sha1, struct index_state *index_state, const char *index_path, int flags, const char *prefix) { int entries, was_valid, newfd; struct lock_file *lock_file; @@ -603,23 +601,23 @@ int write_cache_as_tree(unsigned char *sha1, int flags, const char *prefix) */ lock_file = xcalloc(1, sizeof(struct lock_file)); - newfd = hold_locked_index(lock_file, 1); + newfd = hold_lock_file_for_update(lock_file, index_path, LOCK_DIE_ON_ERROR); - entries = read_cache(); + entries = read_index_from(index_state, index_path); if (entries < 0) return WRITE_TREE_UNREADABLE_INDEX; if (flags & WRITE_TREE_IGNORE_CACHE_TREE) - cache_tree_free(&(active_cache_tree)); + cache_tree_free(&index_state->cache_tree); - if (!active_cache_tree) - active_cache_tree = cache_tree(); + if (!index_state->cache_tree) + index_state->cache_tree = cache_tree(); - was_valid = cache_tree_fully_valid(active_cache_tree); + was_valid = cache_tree_fully_valid(index_state->cache_tree); if (!was_valid) { - if (cache_tree_update(&the_index, flags) < 0) + if (cache_tree_update(index_state, flags) < 0) return WRITE_TREE_UNMERGED_INDEX; if (0 <= newfd) { - if (!write_locked_index(&the_index, lock_file, COMMIT_LOCK)) + if (!write_locked_index(index_state, lock_file, COMMIT_LOCK)) newfd = -1; } /* Not being able to write is fine -- we are only interested @@ -631,14 +629,14 @@ int write_cache_as_tree(unsigned char *sha1, int flags, const char *prefix) } if (prefix) { - struct cache_tree *subtree = - cache_tree_find(active_cache_tree, prefix); + struct cache_tree *subtree; + subtree = cache_tree_find(index_state->cache_tree, prefix); if (!subtree) return WRITE_TREE_PREFIX_ERROR; hashcpy(sha1, subtree->sha1); } else - hashcpy(sha1, active_cache_tree->sha1); + hashcpy(sha1, index_state->cache_tree->sha1); if (0 <= newfd) rollback_lock_file(lock_file); @@ -646,13 +644,18 @@ int write_cache_as_tree(unsigned char *sha1, int flags, const char *prefix) return 0; } +int write_cache_as_tree(unsigned char *sha1, int flags, const char *prefix) +{ + return write_index_as_tree(sha1, &the_index, get_index_file(), flags, prefix); +} + static void prime_cache_tree_rec(struct cache_tree *it, struct tree *tree) { struct tree_desc desc; struct name_entry entry; int cnt; - hashcpy(it->sha1, tree->object.sha1); + hashcpy(it->sha1, tree->object.oid.hash); init_tree_desc(&desc, tree->buffer, tree->size); cnt = 0; while (tree_entry(&desc, &entry)) { @@ -660,7 +663,7 @@ static void prime_cache_tree_rec(struct cache_tree *it, struct tree *tree) cnt++; else { struct cache_tree_sub *sub; - struct tree *subtree = lookup_tree(entry.sha1); + struct tree *subtree = lookup_tree(entry.oid->hash); if (!subtree->object.parsed) parse_tree(subtree); sub = cache_tree_sub(it, entry.path); @@ -707,7 +710,7 @@ int cache_tree_matches_traversal(struct cache_tree *root, it = find_cache_tree_from_traversal(root, info); it = cache_tree_find(it, ent->path); - if (it && it->entry_count > 0 && !hashcmp(ent->sha1, it->sha1)) + if (it && it->entry_count > 0 && !hashcmp(ent->oid->hash, it->sha1)) return it->entry_count; return 0; } |