summaryrefslogtreecommitdiff
path: root/cache-tree.c
diff options
context:
space:
mode:
Diffstat (limited to 'cache-tree.c')
-rw-r--r--cache-tree.c66
1 files changed, 31 insertions, 35 deletions
diff --git a/cache-tree.c b/cache-tree.c
index 2440d1dc89..c52e4303df 100644
--- a/cache-tree.c
+++ b/cache-tree.c
@@ -49,7 +49,7 @@ static int subtree_pos(struct cache_tree *it, const char *path, int pathlen)
lo = 0;
hi = it->subtree_nr;
while (lo < hi) {
- int mi = (lo + hi) / 2;
+ int mi = lo + (hi - lo) / 2;
struct cache_tree_sub *mdl = down[mi];
int cmp = subtree_name_cmp(path, pathlen,
mdl->name, mdl->namelen);
@@ -84,9 +84,8 @@ static struct cache_tree_sub *find_subtree(struct cache_tree *it,
down->namelen = pathlen;
if (pos < it->subtree_nr)
- memmove(it->down + pos + 1,
- it->down + pos,
- sizeof(down) * (it->subtree_nr - pos - 1));
+ MOVE_ARRAY(it->down + pos + 1, it->down + pos,
+ it->subtree_nr - pos - 1);
it->down[pos] = down;
return down;
}
@@ -400,16 +399,16 @@ static int update_one(struct cache_tree *it,
}
if (repair) {
- unsigned char sha1[20];
- hash_sha1_file(buffer.buf, buffer.len, tree_type, sha1);
- if (has_sha1_file(sha1))
- hashcpy(it->oid.hash, sha1);
+ struct object_id oid;
+ hash_object_file(buffer.buf, buffer.len, tree_type, &oid);
+ if (has_sha1_file(oid.hash))
+ oidcpy(&it->oid, &oid);
else
to_invalidate = 1;
- } else if (dryrun)
- hash_sha1_file(buffer.buf, buffer.len, tree_type,
- it->oid.hash);
- else if (write_sha1_file(buffer.buf, buffer.len, tree_type, it->oid.hash)) {
+ } else if (dryrun) {
+ hash_object_file(buffer.buf, buffer.len, tree_type, &it->oid);
+ } else if (write_object_file(buffer.buf, buffer.len, tree_type,
+ &it->oid)) {
strbuf_release(&buffer);
return -1;
}
@@ -602,20 +601,17 @@ static struct cache_tree *cache_tree_find(struct cache_tree *it, const char *pat
int write_index_as_tree(unsigned char *sha1, struct index_state *index_state, const char *index_path, int flags, const char *prefix)
{
- int entries, was_valid, newfd;
- struct lock_file *lock_file;
+ int entries, was_valid;
+ struct lock_file lock_file = LOCK_INIT;
+ int ret = 0;
- /*
- * We can't free this memory, it becomes part of a linked list
- * parsed atexit()
- */
- lock_file = xcalloc(1, sizeof(struct lock_file));
+ hold_lock_file_for_update(&lock_file, index_path, LOCK_DIE_ON_ERROR);
- newfd = hold_lock_file_for_update(lock_file, index_path, LOCK_DIE_ON_ERROR);
-
- entries = read_index_from(index_state, index_path);
- if (entries < 0)
- return WRITE_TREE_UNREADABLE_INDEX;
+ entries = read_index_from(index_state, index_path, get_git_dir());
+ if (entries < 0) {
+ ret = WRITE_TREE_UNREADABLE_INDEX;
+ goto out;
+ }
if (flags & WRITE_TREE_IGNORE_CACHE_TREE)
cache_tree_free(&index_state->cache_tree);
@@ -624,12 +620,11 @@ int write_index_as_tree(unsigned char *sha1, struct index_state *index_state, co
was_valid = cache_tree_fully_valid(index_state->cache_tree);
if (!was_valid) {
- if (cache_tree_update(index_state, flags) < 0)
- return WRITE_TREE_UNMERGED_INDEX;
- if (0 <= newfd) {
- if (!write_locked_index(index_state, lock_file, COMMIT_LOCK))
- newfd = -1;
+ if (cache_tree_update(index_state, flags) < 0) {
+ ret = WRITE_TREE_UNMERGED_INDEX;
+ goto out;
}
+ write_locked_index(index_state, &lock_file, COMMIT_LOCK);
/* Not being able to write is fine -- we are only interested
* in updating the cache-tree part, and if the next caller
* ends up using the old index with unupdated cache-tree part
@@ -641,17 +636,18 @@ int write_index_as_tree(unsigned char *sha1, struct index_state *index_state, co
if (prefix) {
struct cache_tree *subtree;
subtree = cache_tree_find(index_state->cache_tree, prefix);
- if (!subtree)
- return WRITE_TREE_PREFIX_ERROR;
+ if (!subtree) {
+ ret = WRITE_TREE_PREFIX_ERROR;
+ goto out;
+ }
hashcpy(sha1, subtree->oid.hash);
}
else
hashcpy(sha1, index_state->cache_tree->oid.hash);
- if (0 <= newfd)
- rollback_lock_file(lock_file);
-
- return 0;
+out:
+ rollback_lock_file(&lock_file);
+ return ret;
}
int write_cache_as_tree(unsigned char *sha1, int flags, const char *prefix)