summaryrefslogtreecommitdiff
path: root/cache-tree.c
diff options
context:
space:
mode:
Diffstat (limited to 'cache-tree.c')
-rw-r--r--cache-tree.c31
1 files changed, 18 insertions, 13 deletions
diff --git a/cache-tree.c b/cache-tree.c
index 32772b9564..a59e6f1e1f 100644
--- a/cache-tree.c
+++ b/cache-tree.c
@@ -592,7 +592,7 @@ static struct cache_tree *cache_tree_find(struct cache_tree *it, const char *pat
return it;
}
-int write_cache_as_tree(unsigned char *sha1, int flags, const char *prefix)
+int write_index_as_tree(unsigned char *sha1, struct index_state *index_state, const char *index_path, int flags, const char *prefix)
{
int entries, was_valid, newfd;
struct lock_file *lock_file;
@@ -603,23 +603,23 @@ int write_cache_as_tree(unsigned char *sha1, int flags, const char *prefix)
*/
lock_file = xcalloc(1, sizeof(struct lock_file));
- newfd = hold_locked_index(lock_file, 1);
+ newfd = hold_lock_file_for_update(lock_file, index_path, LOCK_DIE_ON_ERROR);
- entries = read_cache();
+ entries = read_index_from(index_state, index_path);
if (entries < 0)
return WRITE_TREE_UNREADABLE_INDEX;
if (flags & WRITE_TREE_IGNORE_CACHE_TREE)
- cache_tree_free(&(active_cache_tree));
+ cache_tree_free(&index_state->cache_tree);
- if (!active_cache_tree)
- active_cache_tree = cache_tree();
+ if (!index_state->cache_tree)
+ index_state->cache_tree = cache_tree();
- was_valid = cache_tree_fully_valid(active_cache_tree);
+ was_valid = cache_tree_fully_valid(index_state->cache_tree);
if (!was_valid) {
- if (cache_tree_update(&the_index, flags) < 0)
+ if (cache_tree_update(index_state, flags) < 0)
return WRITE_TREE_UNMERGED_INDEX;
if (0 <= newfd) {
- if (!write_locked_index(&the_index, lock_file, COMMIT_LOCK))
+ if (!write_locked_index(index_state, lock_file, COMMIT_LOCK))
newfd = -1;
}
/* Not being able to write is fine -- we are only interested
@@ -631,14 +631,14 @@ int write_cache_as_tree(unsigned char *sha1, int flags, const char *prefix)
}
if (prefix) {
- struct cache_tree *subtree =
- cache_tree_find(active_cache_tree, prefix);
+ struct cache_tree *subtree;
+ subtree = cache_tree_find(index_state->cache_tree, prefix);
if (!subtree)
return WRITE_TREE_PREFIX_ERROR;
hashcpy(sha1, subtree->sha1);
}
else
- hashcpy(sha1, active_cache_tree->sha1);
+ hashcpy(sha1, index_state->cache_tree->sha1);
if (0 <= newfd)
rollback_lock_file(lock_file);
@@ -646,13 +646,18 @@ int write_cache_as_tree(unsigned char *sha1, int flags, const char *prefix)
return 0;
}
+int write_cache_as_tree(unsigned char *sha1, int flags, const char *prefix)
+{
+ return write_index_as_tree(sha1, &the_index, get_index_file(), flags, prefix);
+}
+
static void prime_cache_tree_rec(struct cache_tree *it, struct tree *tree)
{
struct tree_desc desc;
struct name_entry entry;
int cnt;
- hashcpy(it->sha1, tree->object.sha1);
+ hashcpy(it->sha1, tree->object.oid.hash);
init_tree_desc(&desc, tree->buffer, tree->size);
cnt = 0;
while (tree_entry(&desc, &entry)) {