summaryrefslogtreecommitdiff
path: root/cache-tree.c
diff options
context:
space:
mode:
authorLibravatar Junio C Hamano <gitster@pobox.com>2010-01-30 16:03:10 -0800
committerLibravatar Junio C Hamano <gitster@pobox.com>2010-01-30 16:03:10 -0800
commit00d3278c8534a8244ae3447189401111e017fd5d (patch)
treef1c19903bc10ffe4816642040080fb6cfd5da376 /cache-tree.c
parentt6000lib: Fix permission (diff)
parentAdd a small patch-mode testing library (diff)
downloadtgif-00d3278c8534a8244ae3447189401111e017fd5d.tar.xz
Merge commit 'b319ef7' into jc/maint-fix-test-perm
* commit 'b319ef7': (8132 commits) Add a small patch-mode testing library git-apply--interactive: Refactor patch mode code t8005: Nobody writes Russian in shift_jis Fix severe breakage in "git-apply --whitespace=fix" Update release notes for 1.6.4 After renaming a section, print any trailing variable definitions Make section_name_match start on '[', and return the length on success send-email: detect cycles in alias expansion Show the presence of untracked files in the bash prompt. SunOS grep does not understand -C<n> nor -e Fix export_marks() error handling. git repack: keep commits hidden by a graft Add a test showing that 'git repack' throws away grafted-away parents git branch: clean up detached branch handling git branch: avoid unnecessary object lookups git branch: fix performance problem git svn: fix shallow clone when upstream revision is too new do_one_ref(): null_sha1 check is not about broken ref configure.ac: properly unset NEEDS_SSL_WITH_CRYPTO when sha1 func is missing janitor: useless checks before free ...
Diffstat (limited to 'cache-tree.c')
-rw-r--r--cache-tree.c215
1 files changed, 162 insertions, 53 deletions
diff --git a/cache-tree.c b/cache-tree.c
index 9b73c8669a..d91743775d 100644
--- a/cache-tree.c
+++ b/cache-tree.c
@@ -1,5 +1,6 @@
#include "cache.h"
#include "tree.h"
+#include "tree-walk.h"
#include "cache-tree.h"
#ifndef DEBUG
@@ -155,13 +156,17 @@ static int verify_cache(struct cache_entry **cache,
funny = 0;
for (i = 0; i < entries; i++) {
struct cache_entry *ce = cache[i];
- if (ce_stage(ce)) {
+ if (ce_stage(ce) || (ce->ce_flags & CE_INTENT_TO_ADD)) {
if (10 < ++funny) {
fprintf(stderr, "...\n");
break;
}
- fprintf(stderr, "%s: unmerged (%s)\n",
- ce->name, sha1_to_hex(ce->sha1));
+ if (ce_stage(ce))
+ fprintf(stderr, "%s: unmerged (%s)\n",
+ ce->name, sha1_to_hex(ce->sha1));
+ else
+ fprintf(stderr, "%s: not added yet\n",
+ ce->name);
}
}
if (funny)
@@ -235,8 +240,7 @@ static int update_one(struct cache_tree *it,
int missing_ok,
int dryrun)
{
- unsigned long size, offset;
- char *buffer;
+ struct strbuf buffer;
int i;
if (0 <= it->entry_count && has_sha1_file(it->sha1))
@@ -293,9 +297,7 @@ static int update_one(struct cache_tree *it,
/*
* Then write out the tree object for this level.
*/
- size = 8192;
- buffer = xmalloc(size);
- offset = 0;
+ strbuf_init(&buffer, 8192);
for (i = 0; i < entries; i++) {
struct cache_entry *ce = cache[i];
@@ -323,24 +325,19 @@ static int update_one(struct cache_tree *it,
}
else {
sha1 = ce->sha1;
- mode = ntohl(ce->ce_mode);
+ mode = ce->ce_mode;
entlen = pathlen - baselen;
}
- if (!missing_ok && !has_sha1_file(sha1))
- return error("invalid object %s", sha1_to_hex(sha1));
+ if (mode != S_IFGITLINK && !missing_ok && !has_sha1_file(sha1))
+ return error("invalid object %06o %s for '%.*s'",
+ mode, sha1_to_hex(sha1), entlen+baselen, path);
- if (!ce->ce_mode)
+ if (ce->ce_flags & CE_REMOVE)
continue; /* entry being removed */
- if (size < offset + entlen + 100) {
- size = alloc_nr(offset + entlen + 100);
- buffer = xrealloc(buffer, size);
- }
- offset += sprintf(buffer + offset,
- "%o %.*s", mode, entlen, path + baselen);
- buffer[offset++] = 0;
- hashcpy((unsigned char*)buffer + offset, sha1);
- offset += 20;
+ strbuf_grow(&buffer, entlen + 100);
+ strbuf_addf(&buffer, "%o %.*s%c", mode, entlen, path + baselen, '\0');
+ strbuf_add(&buffer, sha1, 20);
#if DEBUG
fprintf(stderr, "cache-tree update-one %o %.*s\n",
@@ -349,10 +346,13 @@ static int update_one(struct cache_tree *it,
}
if (dryrun)
- hash_sha1_file(buffer, offset, tree_type, it->sha1);
- else
- write_sha1_file(buffer, offset, tree_type, it->sha1);
- free(buffer);
+ hash_sha1_file(buffer.buf, buffer.len, tree_type, it->sha1);
+ else if (write_sha1_file(buffer.buf, buffer.len, tree_type, it->sha1)) {
+ strbuf_release(&buffer);
+ return -1;
+ }
+
+ strbuf_release(&buffer);
it->entry_count = i;
#if DEBUG
fprintf(stderr, "cache-tree update-one (%d ent, %d subtree) %s\n",
@@ -378,12 +378,8 @@ int cache_tree_update(struct cache_tree *it,
return 0;
}
-static void *write_one(struct cache_tree *it,
- char *path,
- int pathlen,
- char *buffer,
- unsigned long *size,
- unsigned long *offset)
+static void write_one(struct strbuf *buffer, struct cache_tree *it,
+ const char *path, int pathlen)
{
int i;
@@ -393,13 +389,9 @@ static void *write_one(struct cache_tree *it,
* tree-sha1 (missing if invalid)
* subtree_nr "cache-tree" entries for subtrees.
*/
- if (*size < *offset + pathlen + 100) {
- *size = alloc_nr(*offset + pathlen + 100);
- buffer = xrealloc(buffer, *size);
- }
- *offset += sprintf(buffer + *offset, "%.*s%c%d %d\n",
- pathlen, path, 0,
- it->entry_count, it->subtree_nr);
+ strbuf_grow(buffer, pathlen + 100);
+ strbuf_add(buffer, path, pathlen);
+ strbuf_addf(buffer, "%c%d %d\n", 0, it->entry_count, it->subtree_nr);
#if DEBUG
if (0 <= it->entry_count)
@@ -412,8 +404,7 @@ static void *write_one(struct cache_tree *it,
#endif
if (0 <= it->entry_count) {
- hashcpy((unsigned char*)buffer + *offset, it->sha1);
- *offset += 20;
+ strbuf_add(buffer, it->sha1, 20);
}
for (i = 0; i < it->subtree_nr; i++) {
struct cache_tree_sub *down = it->down[i];
@@ -423,21 +414,13 @@ static void *write_one(struct cache_tree *it,
prev->name, prev->namelen) <= 0)
die("fatal - unsorted cache subtree");
}
- buffer = write_one(down->cache_tree, down->name, down->namelen,
- buffer, size, offset);
+ write_one(buffer, down->cache_tree, down->name, down->namelen);
}
- return buffer;
}
-void *cache_tree_write(struct cache_tree *root, unsigned long *size_p)
+void cache_tree_write(struct strbuf *sb, struct cache_tree *root)
{
- char path[PATH_MAX];
- unsigned long size = 8192;
- char *buffer = xmalloc(size);
-
- *size_p = 0;
- path[0] = 0;
- return write_one(root, path, 0, buffer, &size, size_p);
+ write_one(sb, root, "", 0);
}
static struct cache_tree *read_one(const char **buffer, unsigned long *size_p)
@@ -478,7 +461,7 @@ static struct cache_tree *read_one(const char **buffer, unsigned long *size_p)
if (0 <= it->entry_count) {
if (size < 20)
goto free_return;
- hashcpy(it->sha1, (unsigned char*)buf);
+ hashcpy(it->sha1, (const unsigned char*)buf);
buf += 20;
size -= 20;
}
@@ -530,8 +513,10 @@ struct cache_tree *cache_tree_read(const char *buffer, unsigned long size)
return read_one(&buffer, &size);
}
-struct cache_tree *cache_tree_find(struct cache_tree *it, const char *path)
+static struct cache_tree *cache_tree_find(struct cache_tree *it, const char *path)
{
+ if (!it)
+ return NULL;
while (*path) {
const char *slash;
struct cache_tree_sub *sub;
@@ -555,3 +540,127 @@ struct cache_tree *cache_tree_find(struct cache_tree *it, const char *path)
}
return it;
}
+
+int write_cache_as_tree(unsigned char *sha1, int flags, const char *prefix)
+{
+ int entries, was_valid, newfd;
+ struct lock_file *lock_file;
+
+ /*
+ * We can't free this memory, it becomes part of a linked list
+ * parsed atexit()
+ */
+ lock_file = xcalloc(1, sizeof(struct lock_file));
+
+ newfd = hold_locked_index(lock_file, 1);
+
+ entries = read_cache();
+ if (entries < 0)
+ return WRITE_TREE_UNREADABLE_INDEX;
+ if (flags & WRITE_TREE_IGNORE_CACHE_TREE)
+ cache_tree_free(&(active_cache_tree));
+
+ if (!active_cache_tree)
+ active_cache_tree = cache_tree();
+
+ was_valid = cache_tree_fully_valid(active_cache_tree);
+ if (!was_valid) {
+ int missing_ok = flags & WRITE_TREE_MISSING_OK;
+
+ if (cache_tree_update(active_cache_tree,
+ active_cache, active_nr,
+ missing_ok, 0) < 0)
+ return WRITE_TREE_UNMERGED_INDEX;
+ if (0 <= newfd) {
+ if (!write_cache(newfd, active_cache, active_nr) &&
+ !commit_lock_file(lock_file))
+ newfd = -1;
+ }
+ /* Not being able to write is fine -- we are only interested
+ * in updating the cache-tree part, and if the next caller
+ * ends up using the old index with unupdated cache-tree part
+ * it misses the work we did here, but that is just a
+ * performance penalty and not a big deal.
+ */
+ }
+
+ if (prefix) {
+ struct cache_tree *subtree =
+ cache_tree_find(active_cache_tree, prefix);
+ if (!subtree)
+ return WRITE_TREE_PREFIX_ERROR;
+ hashcpy(sha1, subtree->sha1);
+ }
+ else
+ hashcpy(sha1, active_cache_tree->sha1);
+
+ if (0 <= newfd)
+ rollback_lock_file(lock_file);
+
+ return 0;
+}
+
+static void prime_cache_tree_rec(struct cache_tree *it, struct tree *tree)
+{
+ struct tree_desc desc;
+ struct name_entry entry;
+ int cnt;
+
+ hashcpy(it->sha1, tree->object.sha1);
+ init_tree_desc(&desc, tree->buffer, tree->size);
+ cnt = 0;
+ while (tree_entry(&desc, &entry)) {
+ if (!S_ISDIR(entry.mode))
+ cnt++;
+ else {
+ struct cache_tree_sub *sub;
+ struct tree *subtree = lookup_tree(entry.sha1);
+ if (!subtree->object.parsed)
+ parse_tree(subtree);
+ sub = cache_tree_sub(it, entry.path);
+ sub->cache_tree = cache_tree();
+ prime_cache_tree_rec(sub->cache_tree, subtree);
+ cnt += sub->cache_tree->entry_count;
+ }
+ }
+ it->entry_count = cnt;
+}
+
+void prime_cache_tree(struct cache_tree **it, struct tree *tree)
+{
+ cache_tree_free(it);
+ *it = cache_tree();
+ prime_cache_tree_rec(*it, tree);
+}
+
+/*
+ * find the cache_tree that corresponds to the current level without
+ * exploding the full path into textual form. The root of the
+ * cache tree is given as "root", and our current level is "info".
+ * (1) When at root level, info->prev is NULL, so it is "root" itself.
+ * (2) Otherwise, find the cache_tree that corresponds to one level
+ * above us, and find ourselves in there.
+ */
+static struct cache_tree *find_cache_tree_from_traversal(struct cache_tree *root,
+ struct traverse_info *info)
+{
+ struct cache_tree *our_parent;
+
+ if (!info->prev)
+ return root;
+ our_parent = find_cache_tree_from_traversal(root, info->prev);
+ return cache_tree_find(our_parent, info->name.path);
+}
+
+int cache_tree_matches_traversal(struct cache_tree *root,
+ struct name_entry *ent,
+ struct traverse_info *info)
+{
+ struct cache_tree *it;
+
+ it = find_cache_tree_from_traversal(root, info);
+ it = cache_tree_find(it, ent->path);
+ if (it && it->entry_count > 0 && !hashcmp(ent->sha1, it->sha1))
+ return it->entry_count;
+ return 0;
+}