diff options
53 files changed, 1992 insertions, 711 deletions
diff --git a/Documentation/git-branch.txt b/Documentation/git-branch.txt index 4cd0cb90ad..71ecd858aa 100644 --- a/Documentation/git-branch.txt +++ b/Documentation/git-branch.txt @@ -7,16 +7,20 @@ git-branch - Create a new branch, or remove an old one SYNOPSIS -------- -'git-branch' [(-d | -D) <branchname>] | [[-f] <branchname> [<start-point>]] +[verse] +'git-branch' [[-f] <branchname> [<start-point>]] +'git-branch' (-d | -D) <branchname> DESCRIPTION ----------- If no argument is provided, show available branches and mark current branch with star. Otherwise, create a new branch of name <branchname>. - If a starting point is also specified, that will be where the branch is created, otherwise it will be created at the current HEAD. +With a `-d` or `-D` option, `<branchname>` will be deleted. + + OPTIONS ------- -d:: @@ -39,7 +43,7 @@ OPTIONS Examples ~~~~~~~~ -Start development off of a know tag:: +Start development off of a known tag:: + ------------ $ git clone git://git.kernel.org/pub/scm/.../linux-2.6 my2.6 diff --git a/Documentation/git-checkout.txt b/Documentation/git-checkout.txt index 556e733c9b..985bb2f827 100644 --- a/Documentation/git-checkout.txt +++ b/Documentation/git-checkout.txt @@ -7,15 +7,18 @@ git-checkout - Checkout and switch to a branch SYNOPSIS -------- -'git-checkout' [-f] [-b <new_branch>] [-m] [<branch>] [<paths>...] +[verse] +'git-checkout' [-f] [-b <new_branch>] [-m] [<branch>] +'git-checkout' [-m] [<branch>] <paths>... DESCRIPTION ----------- -When <paths> are not given, this command switches branches, by +When <paths> are not given, this command switches branches by updating the index and working tree to reflect the specified branch, <branch>, and updating HEAD to be <branch> or, if -specified, <new_branch>. +specified, <new_branch>. Using -b will cause <new_branch> to +be created. When <paths> are given, this command does *not* switch branches. It updates the named paths in the working tree from @@ -29,17 +32,17 @@ given paths before updating the working tree. OPTIONS ------- -f:: - Force an re-read of everything. + Force a re-read of everything. -b:: Create a new branch and start it at <branch>. -m:: - If you have local modifications to a file that is - different between the current branch and the branch you - are switching to, the command refuses to switch - branches, to preserve your modifications in context. - With this option, a three-way merge between the current + If you have local modifications to one or more files that + are different between the current branch and the branch to + which you are switching, the command refuses to switch + branches in order to preserve your modifications in context. + However, with this option, a three-way merge between the current branch, your working tree contents, and the new branch is done, and you will be on the new branch. + @@ -82,7 +85,7 @@ $ git checkout -- hello.c ------------ . After working in a wrong branch, switching to the correct -branch you would want to is done with: +branch would be done using: + ------------ $ git checkout mytopic diff --git a/Documentation/git-commit.txt b/Documentation/git-commit.txt index 214ed235c5..d04b342a95 100644 --- a/Documentation/git-commit.txt +++ b/Documentation/git-commit.txt @@ -18,6 +18,10 @@ Updates the index file for given paths, or all modified files if VISUAL and EDITOR environment variables to edit the commit log message. +Several environment variable are used during commits. They are +documented in gitlink:git-commit-tree[1]. + + This command can run `commit-msg`, `pre-commit`, and `post-commit` hooks. See link:hooks.html[hooks] for more information. diff --git a/Documentation/git-cvsimport.txt b/Documentation/git-cvsimport.txt index 57027b448f..b0c6d7c303 100644 --- a/Documentation/git-cvsimport.txt +++ b/Documentation/git-cvsimport.txt @@ -99,21 +99,24 @@ If you need to pass multiple options, separate them with a comma. CVS by default uses the unix username when writing its commit logs. Using this option and an author-conv-file in this format - ++ +--------- exon=Andreas Ericsson <ae@op5.se> spawn=Simon Pawn <spawn@frog-pond.org> - git-cvsimport will make it appear as those authors had - their GIT_AUTHOR_NAME and GIT_AUTHOR_EMAIL set properly - all along. - - For convenience, this data is saved to $GIT_DIR/cvs-authors - each time the -A option is provided and read from that same - file each time git-cvsimport is run. - - It is not recommended to use this feature if you intend to - export changes back to CVS again later with - git-link[1]::git-cvsexportcommit. +--------- ++ +git-cvsimport will make it appear as those authors had +their GIT_AUTHOR_NAME and GIT_AUTHOR_EMAIL set properly +all along. ++ +For convenience, this data is saved to $GIT_DIR/cvs-authors +each time the -A option is provided and read from that same +file each time git-cvsimport is run. ++ +It is not recommended to use this feature if you intend to +export changes back to CVS again later with +git-link[1]::git-cvsexportcommit. OUTPUT ------ diff --git a/Documentation/git-fsck-objects.txt b/Documentation/git-fsck-objects.txt index 387b435484..93ce9dcc92 100644 --- a/Documentation/git-fsck-objects.txt +++ b/Documentation/git-fsck-objects.txt @@ -10,7 +10,7 @@ SYNOPSIS -------- [verse] 'git-fsck-objects' [--tags] [--root] [--unreachable] [--cache] - [--standalone | --full] [--strict] [<object>*] + [--full] [--strict] [<object>*] DESCRIPTION ----------- @@ -38,21 +38,14 @@ index file and all SHA1 references in .git/refs/* as heads. Consider any object recorded in the index also as a head node for an unreachability trace. ---standalone:: - Limit checks to the contents of GIT_OBJECT_DIRECTORY - ($GIT_DIR/objects), making sure that it is consistent and - complete without referring to objects found in alternate - object pools listed in GIT_ALTERNATE_OBJECT_DIRECTORIES, - nor packed git archives found in $GIT_DIR/objects/pack; - cannot be used with --full. - --full:: Check not just objects in GIT_OBJECT_DIRECTORY ($GIT_DIR/objects), but also the ones found in alternate - object pools listed in GIT_ALTERNATE_OBJECT_DIRECTORIES, + object pools listed in GIT_ALTERNATE_OBJECT_DIRECTORIES + or $GIT_DIR/objects/info/alternates, and in packed git archives found in $GIT_DIR/objects/pack and corresponding pack subdirectories in alternate - object pools; cannot be used with --standalone. + object pools. --strict:: Enable more strict checking, namely to catch a file mode diff --git a/Documentation/git-init-db.txt b/Documentation/git-init-db.txt index ea4d849aa3..aeb1115af9 100644 --- a/Documentation/git-init-db.txt +++ b/Documentation/git-init-db.txt @@ -14,7 +14,8 @@ SYNOPSIS OPTIONS ------- --template=<template_directory>:: - Provide the directory in from which templates will be used. + Provide the directory from which templates will be used. + The default template directory is `/usr/share/git-core/templates`. --shared:: Specify that the git repository is to be shared amongst several users. @@ -22,9 +23,17 @@ OPTIONS DESCRIPTION ----------- -This simply creates an empty git repository - basically a `.git` directory -and `.git/object/??/`, `.git/refs/heads` and `.git/refs/tags` directories, -and links `.git/HEAD` symbolically to `.git/refs/heads/master`. +This command creates an empty git repository - basically a `.git` directory +with subdirectories for `objects`, `refs/heads`, `refs/tags`, and +templated files. +An initial `HEAD` file that references the HEAD of the master branch +is also created. + +If `--template=<template_directory>` is specified, `<template_directory>` +is used as the source of the template files rather than the default. +The template files include some directory structure, some suggested +"exclude patterns", and copies of non-executing "hook" files. The +suggested patterns and hook files are all modifiable and extensible. If the `$GIT_DIR` environment variable is set then it specifies a path to use instead of `./.git` for the base of the repository. @@ -38,7 +47,6 @@ repository. When specifying `--shared` the config variable "core.sharedRepositor is set to 'true' so that directories under `$GIT_DIR` are made group writable (and g+sx, since the git group may be not the primary group of all users). - Running `git-init-db` in an existing repository is safe. It will not overwrite things that are already there. The primary reason for rerunning `git-init-db` is to pick up newly added templates. diff --git a/Documentation/git-ls-files.txt b/Documentation/git-ls-files.txt index e813f84202..59f6adc494 100644 --- a/Documentation/git-ls-files.txt +++ b/Documentation/git-ls-files.txt @@ -14,9 +14,9 @@ SYNOPSIS (-[c|d|o|i|s|u|k|m])\* [-x <pattern>|--exclude=<pattern>] [-X <file>|--exclude-from=<file>] - [--exclude-per-directory=<file>] + [--exclude-per-directory=<file>] [--error-unmatch] - [--full-name] [--] [<file>]\* + [--full-name] [--abbrev] [--] [<file>]\* DESCRIPTION ----------- @@ -98,6 +98,11 @@ OPTIONS option forces paths to be output relative to the project top directory. +--abbrev[=<n>]:: + Instead of showing the full 40-byte hexadecimal object + lines, show only handful hexdigits prefix. + Non default number of digits can be specified with --abbrev=<n>. + --:: Do not interpret any more arguments as options. diff --git a/Documentation/git-ls-tree.txt b/Documentation/git-ls-tree.txt index 5bf6d8b613..018c401953 100644 --- a/Documentation/git-ls-tree.txt +++ b/Documentation/git-ls-tree.txt @@ -8,7 +8,9 @@ git-ls-tree - Lists the contents of a tree object SYNOPSIS -------- -'git-ls-tree' [-d] [-r] [-t] [-z] [--name-only] [--name-status] <tree-ish> [paths...] +'git-ls-tree' [-d] [-r] [-t] [-z] + [--name-only] [--name-status] [--full-name] [--abbrev=[<n>]] + <tree-ish> [paths...] DESCRIPTION ----------- @@ -40,6 +42,11 @@ OPTIONS --name-status:: List only filenames (instead of the "long" output), one per line. +--abbrev[=<n>]:: + Instead of showing the full 40-byte hexadecimal object + lines, show only handful hexdigits prefix. + Non default number of digits can be specified with --abbrev=<n>. + paths:: When paths are given, show them (note that this isn't really raw pathnames, but rather a list of patterns to match). Otherwise diff --git a/Documentation/git-rebase.txt b/Documentation/git-rebase.txt index 4d5b546db1..b36276c7ed 100644 --- a/Documentation/git-rebase.txt +++ b/Documentation/git-rebase.txt @@ -25,7 +25,7 @@ Assume the following history exists and the current branch is "topic": / D---E---F---G master -From this point, the result of the following commands: +From this point, the result of either of the following commands: git-rebase master git-rebase master topic @@ -36,7 +36,7 @@ would be: / D---E---F---G master -While, starting from the same point, the result of the following +While, starting from the same point, the result of either of the following commands: git-rebase --onto master~1 master @@ -58,7 +58,7 @@ OPTIONS <upstream>:: Upstream branch to compare against. -<head>:: +<branch>:: Working branch; defaults to HEAD. Author diff --git a/Documentation/git-show-branch.txt b/Documentation/git-show-branch.txt index d3b6e620a8..f115b45ef6 100644 --- a/Documentation/git-show-branch.txt +++ b/Documentation/git-show-branch.txt @@ -141,7 +141,7 @@ it, having the following in the configuration file may help: ------------ -With this,`git show-branch` without extra parameters would show +With this, `git show-branch` without extra parameters would show only the primary branches. In addition, if you happen to be on your topic branch, it is shown as well. diff --git a/Documentation/git-svnimport.txt b/Documentation/git-svnimport.txt index 9d3865719c..b1b87c2fcd 100644 --- a/Documentation/git-svnimport.txt +++ b/Documentation/git-svnimport.txt @@ -75,18 +75,21 @@ When importing incrementally, you might need to edit the .git/svn2git file. -A <author_file>:: Read a file with lines on the form ++ +------ + username = User's Full Name <email@addr.es> - username = User's Full Name <email@addr.es> - - and use "User's Full Name <email@addr.es>" as the GIT - author and committer for Subversion commits made by - "username". If encountering a commit made by a user not in the - list, abort. - - For convenience, this data is saved to $GIT_DIR/svn-authors - each time the -A option is provided, and read from that same - file each time git-svnimport is run with an existing GIT - repository without -A. +------ ++ +and use "User's Full Name <email@addr.es>" as the GIT +author and committer for Subversion commits made by +"username". If encountering a commit made by a user not in the +list, abort. ++ +For convenience, this data is saved to $GIT_DIR/svn-authors +each time the -A option is provided, and read from that same +file each time git-svnimport is run with an existing GIT +repository without -A. -m:: Attempt to detect merges based on the commit message. This option diff --git a/Documentation/git.txt b/Documentation/git.txt index 8610d36c49..de3934d098 100644 --- a/Documentation/git.txt +++ b/Documentation/git.txt @@ -329,6 +329,9 @@ gitlink:git-revert[1]:: gitlink:git-shortlog[1]:: Summarizes 'git log' output. +gitlink:git-show[1]:: + Show one commit log and its diff. + gitlink:git-show-branch[1]:: Show branches and their commits. @@ -190,7 +190,7 @@ PYMODULES = \ LIB_FILE=libgit.a LIB_H = \ - blob.h cache.h commit.h count-delta.h csum-file.h delta.h \ + blob.h cache.h commit.h csum-file.h delta.h \ diff.h object.h pack.h pkt-line.h quote.h refs.h \ run-command.h strbuf.h tag.h tree.h git-compat-util.h revision.h @@ -200,7 +200,7 @@ DIFF_OBJS = \ diffcore-delta.o LIB_OBJS = \ - blob.o commit.o connect.o count-delta.o csum-file.o \ + blob.o commit.o connect.o csum-file.o \ date.o diff-delta.o entry.o exec_cmd.o ident.o index.o \ object.o pack-check.o patch-delta.o path.o pkt-line.o \ quote.o read-cache.o refs.o run-command.o \ @@ -553,6 +553,13 @@ $(LIB_FILE): $(LIB_OBJS) doc: $(MAKE) -C Documentation all +TAGS: + rm -f TAGS + find . -name '*.[hcS]' -print | xargs etags -a + +tags: + rm -f tags + find . -name '*.[hcS]' -print | xargs ctags -a ### Testing rules @@ -617,7 +624,7 @@ rpm: dist clean: rm -f *.o mozilla-sha1/*.o arm/*.o ppc/*.o compat/*.o $(LIB_FILE) rm -f $(ALL_PROGRAMS) git$X - rm -f *.spec *.pyc *.pyo */*.pyc */*.pyo common-cmds.h + rm -f *.spec *.pyc *.pyo */*.pyc */*.pyo common-cmds.h TAGS tags rm -rf $(GIT_TARNAME) rm -f $(GIT_TARNAME).tar.gz git-core_$(GIT_VERSION)-*.tar.gz $(MAKE) -C Documentation/ clean @@ -626,5 +633,5 @@ clean: rm -f GIT-VERSION-FILE .PHONY: all install clean strip -.PHONY: .FORCE-GIT-VERSION-FILE +.PHONY: .FORCE-GIT-VERSION-FILE TAGS tags @@ -14,6 +14,7 @@ #include "tree.h" #include "blob.h" #include "diff.h" +#include "diffcore.h" #include "revision.h" #define DEBUG 0 @@ -34,7 +35,9 @@ struct util_info { char *buf; unsigned long size; int num_lines; -// const char* path; + const char* pathname; + + void* topo_data; }; struct chunk { @@ -177,11 +180,13 @@ static int get_blob_sha1_internal(unsigned char *sha1, const char *base, unsigned mode, int stage); static unsigned char blob_sha1[20]; +static const char* blame_file; static int get_blob_sha1(struct tree *t, const char *pathname, unsigned char *sha1) { int i; const char *pathspec[2]; + blame_file = pathname; pathspec[0] = pathname; pathspec[1] = NULL; memset(blob_sha1, 0, sizeof(blob_sha1)); @@ -206,6 +211,10 @@ static int get_blob_sha1_internal(unsigned char *sha1, const char *base, if (S_ISDIR(mode)) return READ_TREE_RECURSIVE; + if (strncmp(blame_file, base, baselen) || + strcmp(blame_file + baselen, pathname)) + return -1; + memcpy(blob_sha1, sha1, 20); return -1; } @@ -342,25 +351,34 @@ static int map_line(struct commit *commit, int line) return info->line_map[line]; } -static int fill_util_info(struct commit *commit, const char *path) +static struct util_info* get_util(struct commit *commit) { - struct util_info *util; - if (commit->object.util) - return 0; + struct util_info *util = commit->object.util; + + if (util) + return util; util = xmalloc(sizeof(struct util_info)); + util->buf = NULL; + util->size = 0; + util->line_map = NULL; + util->num_lines = -1; + util->pathname = NULL; + commit->object.util = util; + return util; +} + +static int fill_util_info(struct commit *commit) +{ + struct util_info *util = commit->object.util; + + assert(util); + assert(util->pathname); - if (get_blob_sha1(commit->tree, path, util->sha1)) { - free(util); + if (get_blob_sha1(commit->tree, util->pathname, util->sha1)) return 1; - } else { - util->buf = NULL; - util->size = 0; - util->line_map = NULL; - util->num_lines = -1; - commit->object.util = util; + else return 0; - } } static void alloc_line_map(struct commit *commit) @@ -389,10 +407,11 @@ static void alloc_line_map(struct commit *commit) static void init_first_commit(struct commit* commit, const char* filename) { - struct util_info* util; + struct util_info* util = commit->object.util; int i; - if (fill_util_info(commit, filename)) + util->pathname = filename; + if (fill_util_info(commit)) die("fill_util_info failed"); alloc_line_map(commit); @@ -453,7 +472,7 @@ static void process_commits(struct rev_info *rev, const char *path, if(num_parents == 0) *initial = commit; - if(fill_util_info(commit, path)) + if (fill_util_info(commit)) continue; alloc_line_map(commit); @@ -471,7 +490,7 @@ static void process_commits(struct rev_info *rev, const char *path, printf("parent: %s\n", sha1_to_hex(parent->object.sha1)); - if(fill_util_info(parent, path)) { + if (fill_util_info(parent)) { num_parents--; continue; } @@ -511,6 +530,135 @@ static void process_commits(struct rev_info *rev, const char *path, } while ((commit = get_revision(rev)) != NULL); } + +static int compare_tree_path(struct rev_info* revs, + struct commit* c1, struct commit* c2) +{ + const char* paths[2]; + struct util_info* util = c2->object.util; + paths[0] = util->pathname; + paths[1] = NULL; + + diff_tree_setup_paths(get_pathspec(revs->prefix, paths)); + return rev_compare_tree(c1->tree, c2->tree); +} + + +static int same_tree_as_empty_path(struct rev_info *revs, struct tree* t1, + const char* path) +{ + const char* paths[2]; + paths[0] = path; + paths[1] = NULL; + + diff_tree_setup_paths(get_pathspec(revs->prefix, paths)); + return rev_same_tree_as_empty(t1); +} + +static const char* find_rename(struct commit* commit, struct commit* parent) +{ + struct util_info* cutil = commit->object.util; + struct diff_options diff_opts; + const char *paths[1]; + int i; + + if (DEBUG) { + printf("find_rename commit: %s ", + sha1_to_hex(commit->object.sha1)); + puts(sha1_to_hex(parent->object.sha1)); + } + + diff_setup(&diff_opts); + diff_opts.recursive = 1; + diff_opts.detect_rename = DIFF_DETECT_RENAME; + paths[0] = NULL; + diff_tree_setup_paths(paths); + if (diff_setup_done(&diff_opts) < 0) + die("diff_setup_done failed"); + + diff_tree_sha1(commit->tree->object.sha1, parent->tree->object.sha1, + "", &diff_opts); + diffcore_std(&diff_opts); + + for (i = 0; i < diff_queued_diff.nr; i++) { + struct diff_filepair *p = diff_queued_diff.queue[i]; + + if (p->status == 'R' && !strcmp(p->one->path, cutil->pathname)) { + if (DEBUG) + printf("rename %s -> %s\n", p->one->path, p->two->path); + return p->two->path; + } + } + + return 0; +} + +static void simplify_commit(struct rev_info *revs, struct commit *commit) +{ + struct commit_list **pp, *parent; + + if (!commit->tree) + return; + + if (!commit->parents) { + struct util_info* util = commit->object.util; + if (!same_tree_as_empty_path(revs, commit->tree, + util->pathname)) + commit->object.flags |= TREECHANGE; + return; + } + + pp = &commit->parents; + while ((parent = *pp) != NULL) { + struct commit *p = parent->item; + + if (p->object.flags & UNINTERESTING) { + pp = &parent->next; + continue; + } + + parse_commit(p); + switch (compare_tree_path(revs, p, commit)) { + case REV_TREE_SAME: + parent->next = NULL; + commit->parents = parent; + get_util(p)->pathname = get_util(commit)->pathname; + return; + + case REV_TREE_NEW: + { + + struct util_info* util = commit->object.util; + if (revs->remove_empty_trees && + same_tree_as_empty_path(revs, p->tree, + util->pathname)) { + const char* new_name = find_rename(commit, p); + if (new_name) { + struct util_info* putil = get_util(p); + if (!putil->pathname) + putil->pathname = strdup(new_name); + } else { + *pp = parent->next; + continue; + } + } + } + + /* fallthrough */ + case REV_TREE_DIFFERENT: + pp = &parent->next; + if (!get_util(p)->pathname) + get_util(p)->pathname = + get_util(commit)->pathname; + continue; + } + die("bad tree compare for commit %s", + sha1_to_hex(commit->object.sha1)); + } + commit->object.flags |= TREECHANGE; +} + + struct commit_info { char* author; @@ -569,6 +717,18 @@ static const char* format_time(unsigned long time, const char* tz_str) return time_buf; } +static void topo_setter(struct commit* c, void* data) +{ + struct util_info* util = c->object.util; + util->topo_data = data; +} + +static void* topo_getter(struct commit* c) +{ + struct util_info* util = c->object.util; + return util->topo_data; +} + int main(int argc, const char **argv) { int i; @@ -580,14 +740,16 @@ int main(int argc, const char **argv) int sha1_len = 8; int compability = 0; int options = 1; + struct commit* start_commit; - int num_args; const char* args[10]; struct rev_info rev; struct commit_info ci; const char *buf; int max_digits; + size_t longest_file, longest_author; + int found_rename; const char* prefix = setup_git_directory(); @@ -634,28 +796,29 @@ int main(int argc, const char **argv) strcpy(filename_buf, filename); filename = filename_buf; - { - struct commit* c; - if (get_sha1(commit, sha1)) - die("get_sha1 failed, commit '%s' not found", commit); - c = lookup_commit_reference(sha1); - - if (fill_util_info(c, filename)) { - printf("%s not found in %s\n", filename, commit); - return 1; - } + if (get_sha1(commit, sha1)) + die("get_sha1 failed, commit '%s' not found", commit); + start_commit = lookup_commit_reference(sha1); + get_util(start_commit)->pathname = filename; + if (fill_util_info(start_commit)) { + printf("%s not found in %s\n", filename, commit); + return 1; } - num_args = 0; - args[num_args++] = NULL; - args[num_args++] = "--topo-order"; - args[num_args++] = "--remove-empty"; - args[num_args++] = commit; - args[num_args++] = "--"; - args[num_args++] = filename; - args[num_args] = NULL; - setup_revisions(num_args, args, &rev, "HEAD"); + init_revisions(&rev); + rev.remove_empty_trees = 1; + rev.topo_order = 1; + rev.prune_fn = simplify_commit; + rev.topo_setter = topo_setter; + rev.topo_getter = topo_getter; + rev.limited = 1; + + commit_list_insert(start_commit, &rev.commits); + + args[0] = filename; + args[1] = NULL; + diff_tree_setup_paths(args); prepare_revision_walk(&rev); process_commits(&rev, filename, &initial); @@ -663,20 +826,47 @@ int main(int argc, const char **argv) for (max_digits = 1, i = 10; i <= num_blame_lines + 1; max_digits++) i *= 10; + longest_file = 0; + longest_author = 0; + found_rename = 0; for (i = 0; i < num_blame_lines; i++) { struct commit *c = blame_lines[i]; + struct util_info* u; if (!c) c = initial; + u = c->object.util; + if (!found_rename && strcmp(filename, u->pathname)) + found_rename = 1; + if (longest_file < strlen(u->pathname)) + longest_file = strlen(u->pathname); + get_commit_info(c, &ci); + if (longest_author < strlen(ci.author)) + longest_author = strlen(ci.author); + } + + for (i = 0; i < num_blame_lines; i++) { + struct commit *c = blame_lines[i]; + struct util_info* u; + + if (!c) + c = initial; + + u = c->object.util; get_commit_info(c, &ci); fwrite(sha1_to_hex(c->object.sha1), sha1_len, 1, stdout); - if(compability) + if(compability) { printf("\t(%10s\t%10s\t%d)", ci.author, format_time(ci.author_time, ci.author_tz), i+1); - else - printf(" (%-15.15s %10s %*d) ", ci.author, + } else { + if (found_rename) + printf(" %-*.*s", longest_file, longest_file, + u->pathname); + printf(" (%-*.*s %10s %*d) ", + longest_author, longest_author, ci.author, format_time(ci.author_time, ci.author_tz), max_digits, i+1); + } if(i == num_blame_lines - 1) { fwrite(buf, blame_len - (buf - blame_contents), @@ -165,6 +165,7 @@ extern void rollback_index_file(struct cache_file *); extern int trust_executable_bit; extern int assume_unchanged; extern int only_use_symrefs; +extern int warn_ambiguous_refs; extern int diff_rename_limit_default; extern int shared_repository; extern const char *apply_default_whitespace; @@ -569,11 +569,29 @@ int count_parents(struct commit * commit) return count; } +void topo_sort_default_setter(struct commit *c, void *data) +{ + c->object.util = data; +} + +void *topo_sort_default_getter(struct commit *c) +{ + return c->object.util; +} + /* * Performs an in-place topological sort on the list supplied. */ void sort_in_topological_order(struct commit_list ** list, int lifo) { + sort_in_topological_order_fn(list, lifo, topo_sort_default_setter, + topo_sort_default_getter); +} + +void sort_in_topological_order_fn(struct commit_list ** list, int lifo, + topo_sort_set_fn_t setter, + topo_sort_get_fn_t getter) +{ struct commit_list * next = *list; struct commit_list * work = NULL, **insert; struct commit_list ** pptr = list; @@ -596,7 +614,7 @@ void sort_in_topological_order(struct commit_list ** list, int lifo) next=*list; while (next) { next_nodes->list_item = next; - next->item->object.util = next_nodes; + setter(next->item, next_nodes); next_nodes++; next = next->next; } @@ -606,8 +624,8 @@ void sort_in_topological_order(struct commit_list ** list, int lifo) struct commit_list * parents = next->item->parents; while (parents) { struct commit * parent=parents->item; - struct sort_node * pn = (struct sort_node *)parent->object.util; - + struct sort_node * pn = (struct sort_node *) getter(parent); + if (pn) pn->indegree++; parents=parents->next; @@ -624,7 +642,7 @@ void sort_in_topological_order(struct commit_list ** list, int lifo) next=*list; insert = &work; while (next) { - struct sort_node * node = (struct sort_node *)next->item->object.util; + struct sort_node * node = (struct sort_node *) getter(next->item); if (node->indegree == 0) { insert = &commit_list_insert(next->item, insert)->next; @@ -637,15 +655,15 @@ void sort_in_topological_order(struct commit_list ** list, int lifo) sort_by_date(&work); while (work) { struct commit * work_item = pop_commit(&work); - struct sort_node * work_node = (struct sort_node *)work_item->object.util; + struct sort_node * work_node = (struct sort_node *) getter(work_item); struct commit_list * parents = work_item->parents; while (parents) { struct commit * parent=parents->item; - struct sort_node * pn = (struct sort_node *)parent->object.util; - + struct sort_node * pn = (struct sort_node *) getter(parent); + if (pn) { - /* + /* * parents are only enqueued for emission * when all their children have been emitted thereby * guaranteeing topological order. @@ -667,7 +685,7 @@ void sort_in_topological_order(struct commit_list ** list, int lifo) *pptr = work_node->list_item; pptr = &(*pptr)->next; *pptr = NULL; - work_item->object.util = NULL; + setter(work_item, NULL); } free(nodes); } @@ -65,15 +65,29 @@ int count_parents(struct commit * commit); /* * Performs an in-place topological sort of list supplied. * - * Pre-conditions: + * Pre-conditions for sort_in_topological_order: * all commits in input list and all parents of those * commits must have object.util == NULL - * - * Post-conditions: + * + * Pre-conditions for sort_in_topological_order_fn: + * all commits in input list and all parents of those + * commits must have getter(commit) == NULL + * + * Post-conditions: * invariant of resulting list is: * a reachable from b => ord(b) < ord(a) * in addition, when lifo == 0, commits on parallel tracks are * sorted in the dates order. */ + +typedef void (*topo_sort_set_fn_t)(struct commit*, void *data); +typedef void* (*topo_sort_get_fn_t)(struct commit*); + +void topo_sort_default_setter(struct commit *c, void *data); +void *topo_sort_default_getter(struct commit *c); + void sort_in_topological_order(struct commit_list ** list, int lifo); +void sort_in_topological_order_fn(struct commit_list ** list, int lifo, + topo_sort_set_fn_t setter, + topo_sort_get_fn_t getter); #endif /* COMMIT_H */ @@ -232,6 +232,11 @@ int git_default_config(const char *var, const char *value) return 0; } + if (!strcmp(var, "core.warnambiguousrefs")) { + warn_ambiguous_refs = git_config_bool(var, value); + return 0; + } + if (!strcmp(var, "user.name")) { strncpy(git_default_name, value, sizeof(git_default_name)); return 0; diff --git a/contrib/emacs/git.el b/contrib/emacs/git.el index 5135e361be..ebd00ef9c4 100644 --- a/contrib/emacs/git.el +++ b/contrib/emacs/git.el @@ -59,14 +59,14 @@ (defcustom git-committer-name nil "User name to use for commits. -The default is to fall back to `add-log-full-name' and then `user-full-name'." +The default is to fall back to the repository config, then to `add-log-full-name' and then to `user-full-name'." :group 'git :type '(choice (const :tag "Default" nil) (string :tag "Name"))) (defcustom git-committer-email nil "Email address to use for commits. -The default is to fall back to `add-log-mailing-address' and then `user-mail-address'." +The default is to fall back to the git repository config, then to `add-log-mailing-address' and then to `user-mail-address'." :group 'git :type '(choice (const :tag "Default" nil) (string :tag "Email"))) @@ -148,6 +148,12 @@ The default is to fall back to `add-log-mailing-address' and then `user-mail-add (append (git-get-env-strings env) (list "git") args)) (apply #'call-process "git" nil buffer nil args))) +(defun git-call-process-env-string (env &rest args) + "Wrapper for call-process that sets environment strings, and returns the process output as a string." + (with-temp-buffer + (and (eq 0 (apply #' git-call-process-env t env args)) + (buffer-string)))) + (defun git-run-process-region (buffer start end program args) "Run a git process with a buffer region as input." (let ((output-buffer (current-buffer)) @@ -189,13 +195,15 @@ The default is to fall back to `add-log-mailing-address' and then `user-mail-add (defun git-get-string-sha1 (string) "Read a SHA1 from the specified string." - (let ((pos (string-match "[0-9a-f]\\{40\\}" string))) - (and pos (substring string pos (match-end 0))))) + (and string + (string-match "[0-9a-f]\\{40\\}" string) + (match-string 0 string))) (defun git-get-committer-name () "Return the name to use as GIT_COMMITTER_NAME." ; copied from log-edit (or git-committer-name + (git-repo-config "user.name") (and (boundp 'add-log-full-name) add-log-full-name) (and (fboundp 'user-full-name) (user-full-name)) (and (boundp 'user-full-name) user-full-name))) @@ -204,6 +212,7 @@ The default is to fall back to `add-log-mailing-address' and then `user-mail-add "Return the email address to use as GIT_COMMITTER_EMAIL." ; copied from log-edit (or git-committer-email + (git-repo-config "user.email") (and (boundp 'add-log-mailing-address) add-log-mailing-address) (and (fboundp 'user-mail-address) (user-mail-address)) (and (boundp 'user-mail-address) user-mail-address))) @@ -259,18 +268,17 @@ The default is to fall back to `add-log-mailing-address' and then `user-mail-add (defun git-rev-parse (rev) "Parse a revision name and return its SHA1." (git-get-string-sha1 - (with-output-to-string - (with-current-buffer standard-output - (git-call-process-env t nil "rev-parse" rev))))) + (git-call-process-env-string nil "rev-parse" rev))) + +(defun git-repo-config (key) + "Retrieve the value associated to KEY in the git repository config file." + (let ((str (git-call-process-env-string nil "repo-config" key))) + (and str (car (split-string str "\n"))))) (defun git-symbolic-ref (ref) "Wrapper for the git-symbolic-ref command." - (car - (split-string - (with-output-to-string - (with-current-buffer standard-output - (git-call-process-env t nil "symbolic-ref" ref))) - "\n"))) + (let ((str (git-call-process-env-string nil "symbolic-ref" ref))) + (and str (car (split-string str "\n"))))) (defun git-update-ref (ref val &optional oldval) "Update a reference by calling git-update-ref." @@ -285,11 +293,7 @@ The default is to fall back to `add-log-mailing-address' and then `user-mail-add (defun git-write-tree (&optional index-file) "Call git-write-tree and return the resulting tree SHA1 as a string." (git-get-string-sha1 - (with-output-to-string - (with-current-buffer standard-output - (git-call-process-env t - (if index-file `(("GIT_INDEX_FILE" . ,index-file)) nil) - "write-tree"))))) + (git-call-process-env-string (and index-file `(("GIT_INDEX_FILE" . ,index-file))) "write-tree"))) (defun git-commit-tree (buffer tree head) "Call git-commit-tree with buffer as input and return the resulting commit SHA1." @@ -763,6 +767,16 @@ The default is to fall back to `add-log-mailing-address' and then `user-mail-add (git-setup-diff-buffer (apply #'git-run-command-buffer "*git-diff*" "diff-index" "-p" "-M" "HEAD" "--" (git-get-filenames files))))) +(defun git-diff-file-merge-head (arg) + "Diff the marked file(s) against the first merge head (or the nth one with a numeric prefix)." + (interactive "p") + (let ((files (git-marked-files)) + (merge-heads (git-get-merge-heads))) + (unless merge-heads (error "No merge in progress")) + (git-setup-diff-buffer + (apply #'git-run-command-buffer "*git-diff*" "diff-index" "-p" "-M" + (or (nth (1- arg) merge-heads) "HEAD") "--" (git-get-filenames files))))) + (defun git-diff-unmerged-file (stage) "Diff the marked unmerged file(s) against the specified stage." (let ((files (git-marked-files))) @@ -955,6 +969,7 @@ The default is to fall back to `add-log-mailing-address' and then `user-mail-add (define-key diff-map "=" 'git-diff-file) (define-key diff-map "e" 'git-diff-file-idiff) (define-key diff-map "E" 'git-find-file-imerge) + (define-key diff-map "h" 'git-diff-file-merge-head) (define-key diff-map "m" 'git-diff-file-mine) (define-key diff-map "o" 'git-diff-file-other) (setq git-status-mode-map map))) diff --git a/contrib/git-svn/git-svn.perl b/contrib/git-svn/git-svn.perl index cf233ef6ed..f3fc3ec1a9 100755 --- a/contrib/git-svn/git-svn.perl +++ b/contrib/git-svn/git-svn.perl @@ -850,11 +850,23 @@ sub assert_revision_unknown { } } +sub trees_eq { + my ($x, $y) = @_; + my @x = safe_qx('git-cat-file','commit',$x); + my @y = safe_qx('git-cat-file','commit',$y); + if (($y[0] ne $x[0]) || $x[0] !~ /^tree $sha1\n$/ + || $y[0] !~ /^tree $sha1\n$/) { + print STDERR "Trees not equal: $y[0] != $x[0]\n"; + return 0 + } + return 1; +} + sub assert_revision_eq_or_unknown { my ($revno, $commit) = @_; if (-f "$REV_DIR/$revno") { my $current = file_to_s("$REV_DIR/$revno"); - if ($commit ne $current) { + if (($commit ne $current) && !trees_eq($commit, $current)) { croak "$REV_DIR/$revno already exists!\n", "current: $current\nexpected: $commit\n"; } diff --git a/count-delta.c b/count-delta.c deleted file mode 100644 index 058a2aadb1..0000000000 --- a/count-delta.c +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright (C) 2005 Junio C Hamano - * The delta-parsing part is almost straight copy of patch-delta.c - * which is (C) 2005 Nicolas Pitre <nico@cam.org>. - */ -#include <stdlib.h> -#include <string.h> -#include <limits.h> -#include "delta.h" -#include "count-delta.h" - -/* - * NOTE. We do not _interpret_ delta fully. As an approximation, we - * just count the number of bytes that are copied from the source, and - * the number of literal data bytes that are inserted. - * - * Number of bytes that are _not_ copied from the source is deletion, - * and number of inserted literal bytes are addition, so sum of them - * is the extent of damage. - */ -int count_delta(void *delta_buf, unsigned long delta_size, - unsigned long *src_copied, unsigned long *literal_added) -{ - unsigned long copied_from_source, added_literal; - const unsigned char *data, *top; - unsigned char cmd; - unsigned long src_size, dst_size, out; - - if (delta_size < DELTA_SIZE_MIN) - return -1; - - data = delta_buf; - top = delta_buf + delta_size; - - src_size = get_delta_hdr_size(&data); - dst_size = get_delta_hdr_size(&data); - - added_literal = copied_from_source = out = 0; - while (data < top) { - cmd = *data++; - if (cmd & 0x80) { - unsigned long cp_off = 0, cp_size = 0; - if (cmd & 0x01) cp_off = *data++; - if (cmd & 0x02) cp_off |= (*data++ << 8); - if (cmd & 0x04) cp_off |= (*data++ << 16); - if (cmd & 0x08) cp_off |= (*data++ << 24); - if (cmd & 0x10) cp_size = *data++; - if (cmd & 0x20) cp_size |= (*data++ << 8); - if (cmd & 0x40) cp_size |= (*data++ << 16); - if (cp_size == 0) cp_size = 0x10000; - - copied_from_source += cp_size; - out += cp_size; - } else { - /* write literal into dst */ - added_literal += cmd; - out += cmd; - data += cmd; - } - } - - /* sanity check */ - if (data != top || out != dst_size) - return -1; - - /* delete size is what was _not_ copied from source. - * edit size is that and literal additions. - */ - *src_copied = copied_from_source; - *literal_added = added_literal; - return 0; -} diff --git a/count-delta.h b/count-delta.h deleted file mode 100644 index 7359629827..0000000000 --- a/count-delta.h +++ /dev/null @@ -1,10 +0,0 @@ -/* - * Copyright (C) 2005 Junio C Hamano - */ -#ifndef COUNT_DELTA_H -#define COUNT_DELTA_H - -int count_delta(void *, unsigned long, - unsigned long *src_copied, unsigned long *literal_added); - -#endif diff --git a/diff-delta.c b/diff-delta.c index aaee7be4d2..1188b31cd0 100644 --- a/diff-delta.c +++ b/diff-delta.c @@ -136,7 +136,8 @@ void *diff_delta(void *from_buf, unsigned long from_size, unsigned long *delta_size, unsigned long max_size) { - unsigned int i, outpos, outsize, inscnt, hash_shift; + unsigned int i, outpos, outsize, hash_shift; + int inscnt; const unsigned char *ref_data, *ref_top, *data, *top; unsigned char *out; struct index *entry, **hash; @@ -222,6 +223,20 @@ void *diff_delta(void *from_buf, unsigned long from_size, unsigned char *op; if (inscnt) { + while (moff && ref_data[moff-1] == data[-1]) { + if (msize == 0x10000) + break; + /* we can match one byte back */ + msize++; + moff--; + data--; + outpos--; + if (--inscnt) + continue; + outpos--; /* remove count slot */ + inscnt--; /* make it -1 */ + break; + } out[outpos - inscnt - 1] = inscnt; inscnt = 0; } @@ -463,6 +463,8 @@ void diff_free_filespec_data(struct diff_filespec *s) munmap(s->data, s->size); s->should_free = s->should_munmap = 0; s->data = NULL; + free(s->cnt_data); + s->cnt_data = NULL; } static void prep_temp_blob(struct diff_tempfile *temp, diff --git a/diffcore-break.c b/diffcore-break.c index 0fc2b860be..ed0e14c6d8 100644 --- a/diffcore-break.c +++ b/diffcore-break.c @@ -45,8 +45,8 @@ static int should_break(struct diff_filespec *src, * The value we return is 1 if we want the pair to be broken, * or 0 if we do not. */ - unsigned long delta_size, base_size, src_copied, literal_added; - int to_break = 0; + unsigned long delta_size, base_size, src_copied, literal_added, + src_removed; *merge_score_p = 0; /* assume no deletion --- "do not break" * is the default. @@ -68,37 +68,45 @@ static int should_break(struct diff_filespec *src, if (diffcore_count_changes(src->data, src->size, dst->data, dst->size, + NULL, NULL, 0, &src_copied, &literal_added)) return 0; + /* sanity */ + if (src->size < src_copied) + src_copied = src->size; + if (dst->size < literal_added + src_copied) { + if (src_copied < dst->size) + literal_added = dst->size - src_copied; + else + literal_added = 0; + } + src_removed = src->size - src_copied; + /* Compute merge-score, which is "how much is removed * from the source material". The clean-up stage will * merge the surviving pair together if the score is * less than the minimum, after rename/copy runs. */ - if (src->size <= src_copied) - ; /* all copied, nothing removed */ - else { - delta_size = src->size - src_copied; - *merge_score_p = delta_size * MAX_SCORE / src->size; - } - + *merge_score_p = src_removed * MAX_SCORE / src->size; + /* Extent of damage, which counts both inserts and * deletes. */ - if (src->size + literal_added <= src_copied) - delta_size = 0; /* avoid wrapping around */ - else - delta_size = (src->size - src_copied) + literal_added; - - /* We break if the edit exceeds the minimum. - * i.e. (break_score / MAX_SCORE < delta_size / base_size) + delta_size = src_removed + literal_added; + if (delta_size * MAX_SCORE / base_size < break_score) + return 0; + + /* If you removed a lot without adding new material, that is + * not really a rewrite. */ - if (break_score * base_size < delta_size * MAX_SCORE) - to_break = 1; + if ((src->size * break_score < src_removed * MAX_SCORE) && + (literal_added * 20 < src_removed) && + (literal_added * 20 < src_copied)) + return 0; - return to_break; + return 1; } void diffcore_break(int break_score) diff --git a/diffcore-delta.c b/diffcore-delta.c index 1e6a6911ec..7338a40c59 100644 --- a/diffcore-delta.c +++ b/diffcore-delta.c @@ -1,43 +1,213 @@ #include "cache.h" #include "diff.h" #include "diffcore.h" -#include "delta.h" -#include "count-delta.h" - -static int diffcore_count_changes_1(void *src, unsigned long src_size, - void *dst, unsigned long dst_size, - unsigned long delta_limit, - unsigned long *src_copied, - unsigned long *literal_added) + +/* + * Idea here is very simple. + * + * We have total of (sz-N+1) N-byte overlapping sequences in buf whose + * size is sz. If the same N-byte sequence appears in both source and + * destination, we say the byte that starts that sequence is shared + * between them (i.e. copied from source to destination). + * + * For each possible N-byte sequence, if the source buffer has more + * instances of it than the destination buffer, that means the + * difference are the number of bytes not copied from source to + * destination. If the counts are the same, everything was copied + * from source to destination. If the destination has more, + * everything was copied, and destination added more. + * + * We are doing an approximation so we do not really have to waste + * memory by actually storing the sequence. We just hash them into + * somewhere around 2^16 hashbuckets and count the occurrences. + * + * The length of the sequence is arbitrarily set to 8 for now. + */ + +/* Wild guess at the initial hash size */ +#define INITIAL_HASH_SIZE 9 + +/* We leave more room in smaller hash but do not let it + * grow to have unused hole too much. + */ +#define INITIAL_FREE(sz_log2) ((1<<(sz_log2))*(sz_log2-3)/(sz_log2)) + +/* A prime rather carefully chosen between 2^16..2^17, so that + * HASHBASE < INITIAL_FREE(17). We want to keep the maximum hashtable + * size under the current 2<<17 maximum, which can hold this many + * different values before overflowing to hashtable of size 2<<18. + */ +#define HASHBASE 107927 + +struct spanhash { + unsigned int hashval; + unsigned int cnt; +}; +struct spanhash_top { + int alloc_log2; + int free; + struct spanhash data[FLEX_ARRAY]; +}; + +static struct spanhash *spanhash_find(struct spanhash_top *top, + unsigned int hashval) { - void *delta; - unsigned long delta_size; - - delta = diff_delta(src, src_size, - dst, dst_size, - &delta_size, delta_limit); - if (!delta) - /* If delta_limit is exceeded, we have too much differences */ - return -1; - - /* Estimate the edit size by interpreting delta. */ - if (count_delta(delta, delta_size, src_copied, literal_added)) { - free(delta); - return -1; + int sz = 1 << top->alloc_log2; + int bucket = hashval & (sz - 1); + while (1) { + struct spanhash *h = &(top->data[bucket++]); + if (!h->cnt) + return NULL; + if (h->hashval == hashval) + return h; + if (sz <= bucket) + bucket = 0; } - free(delta); - return 0; +} + +static struct spanhash_top *spanhash_rehash(struct spanhash_top *orig) +{ + struct spanhash_top *new; + int i; + int osz = 1 << orig->alloc_log2; + int sz = osz << 1; + + new = xmalloc(sizeof(*orig) + sizeof(struct spanhash) * sz); + new->alloc_log2 = orig->alloc_log2 + 1; + new->free = INITIAL_FREE(new->alloc_log2); + memset(new->data, 0, sizeof(struct spanhash) * sz); + for (i = 0; i < osz; i++) { + struct spanhash *o = &(orig->data[i]); + int bucket; + if (!o->cnt) + continue; + bucket = o->hashval & (sz - 1); + while (1) { + struct spanhash *h = &(new->data[bucket++]); + if (!h->cnt) { + h->hashval = o->hashval; + h->cnt = o->cnt; + new->free--; + break; + } + if (sz <= bucket) + bucket = 0; + } + } + free(orig); + return new; +} + +static struct spanhash_top *add_spanhash(struct spanhash_top *top, + unsigned int hashval, int cnt) +{ + int bucket, lim; + struct spanhash *h; + + lim = (1 << top->alloc_log2); + bucket = hashval & (lim - 1); + while (1) { + h = &(top->data[bucket++]); + if (!h->cnt) { + h->hashval = hashval; + h->cnt = cnt; + top->free--; + if (top->free < 0) + return spanhash_rehash(top); + return top; + } + if (h->hashval == hashval) { + h->cnt += cnt; + return top; + } + if (lim <= bucket) + bucket = 0; + } +} + +static struct spanhash_top *hash_chars(unsigned char *buf, unsigned int sz) +{ + int i, n; + unsigned int accum1, accum2, hashval; + struct spanhash_top *hash; + + i = INITIAL_HASH_SIZE; + hash = xmalloc(sizeof(*hash) + sizeof(struct spanhash) * (1<<i)); + hash->alloc_log2 = i; + hash->free = INITIAL_FREE(i); + memset(hash->data, 0, sizeof(struct spanhash) * (1<<i)); + + n = 0; + accum1 = accum2 = 0; + while (sz) { + unsigned int c = *buf++; + unsigned int old_1 = accum1; + sz--; + accum1 = (accum1 << 7) ^ (accum2 >> 25); + accum2 = (accum2 << 7) ^ (old_1 >> 25); + accum1 += c; + if (++n < 64 && c != '\n') + continue; + hashval = (accum1 + accum2 * 0x61) % HASHBASE; + hash = add_spanhash(hash, hashval, n); + n = 0; + accum1 = accum2 = 0; + } + return hash; } int diffcore_count_changes(void *src, unsigned long src_size, void *dst, unsigned long dst_size, + void **src_count_p, + void **dst_count_p, unsigned long delta_limit, unsigned long *src_copied, unsigned long *literal_added) { - return diffcore_count_changes_1(src, src_size, - dst, dst_size, - delta_limit, - src_copied, - literal_added); + int i, ssz; + struct spanhash_top *src_count, *dst_count; + unsigned long sc, la; + + src_count = dst_count = NULL; + if (src_count_p) + src_count = *src_count_p; + if (!src_count) { + src_count = hash_chars(src, src_size); + if (src_count_p) + *src_count_p = src_count; + } + if (dst_count_p) + dst_count = *dst_count_p; + if (!dst_count) { + dst_count = hash_chars(dst, dst_size); + if (dst_count_p) + *dst_count_p = dst_count; + } + sc = la = 0; + + ssz = 1 << src_count->alloc_log2; + for (i = 0; i < ssz; i++) { + struct spanhash *s = &(src_count->data[i]); + struct spanhash *d; + unsigned dst_cnt, src_cnt; + if (!s->cnt) + continue; + src_cnt = s->cnt; + d = spanhash_find(dst_count, s->hashval); + dst_cnt = d ? d->cnt : 0; + if (src_cnt < dst_cnt) { + la += dst_cnt - src_cnt; + sc += src_cnt; + } + else + sc += dst_cnt; + } + + if (!src_count_p) + free(src_count); + if (!dst_count_p) + free(dst_count); + *src_copied = sc; + *literal_added = la; + return 0; } diff --git a/diffcore-rename.c b/diffcore-rename.c index 55cf1c37f3..e992698720 100644 --- a/diffcore-rename.c +++ b/diffcore-rename.c @@ -133,7 +133,7 @@ static int estimate_similarity(struct diff_filespec *src, * match than anything else; the destination does not even * call into this function in that case. */ - unsigned long delta_size, base_size, src_copied, literal_added; + unsigned long max_size, delta_size, base_size, src_copied, literal_added; unsigned long delta_limit; int score; @@ -144,9 +144,9 @@ static int estimate_similarity(struct diff_filespec *src, if (!S_ISREG(src->mode) || !S_ISREG(dst->mode)) return 0; - delta_size = ((src->size < dst->size) ? - (dst->size - src->size) : (src->size - dst->size)); + max_size = ((src->size > dst->size) ? src->size : dst->size); base_size = ((src->size < dst->size) ? src->size : dst->size); + delta_size = max_size - base_size; /* We would not consider edits that change the file size so * drastically. delta_size must be smaller than @@ -166,23 +166,18 @@ static int estimate_similarity(struct diff_filespec *src, delta_limit = base_size * (MAX_SCORE-minimum_score) / MAX_SCORE; if (diffcore_count_changes(src->data, src->size, dst->data, dst->size, + &src->cnt_data, &dst->cnt_data, delta_limit, &src_copied, &literal_added)) return 0; - /* Extent of damage */ - if (src->size + literal_added < src_copied) - delta_size = 0; - else - delta_size = (src->size - src_copied) + literal_added; - - /* - * Now we will give some score to it. 100% edit gets 0 points - * and 0% edit gets MAX_SCORE points. + /* How similar are they? + * what percentage of material in dst are from source? */ - score = MAX_SCORE - (MAX_SCORE * delta_size / base_size); - if (score < 0) return 0; - if (MAX_SCORE < score) return MAX_SCORE; + if (!dst->size) + score = 0; /* should not happen */ + else + score = src_copied * MAX_SCORE / max_size; return score; } @@ -310,6 +305,8 @@ void diffcore_rename(struct diff_options *options) m->score = estimate_similarity(one, two, minimum_score); } + /* We do not need the text anymore */ + diff_free_filespec_data(two); dst_cnt++; } /* cost matrix sorted by most to least similar pair */ diff --git a/diffcore.h b/diffcore.h index dba4f17658..73c7842cc7 100644 --- a/diffcore.h +++ b/diffcore.h @@ -17,8 +17,8 @@ */ #define MAX_SCORE 60000.0 #define DEFAULT_RENAME_SCORE 30000 /* rename/copy similarity minimum (50%) */ -#define DEFAULT_BREAK_SCORE 30000 /* minimum for break to happen (50%)*/ -#define DEFAULT_MERGE_SCORE 48000 /* maximum for break-merge to happen (80%)*/ +#define DEFAULT_BREAK_SCORE 30000 /* minimum for break to happen (50%) */ +#define DEFAULT_MERGE_SCORE 36000 /* maximum for break-merge to happen 60%) */ #define MINIMUM_BREAK_SIZE 400 /* do not break a file smaller than this */ @@ -26,6 +26,7 @@ struct diff_filespec { unsigned char sha1[20]; char *path; void *data; + void *cnt_data; unsigned long size; int xfrm_flags; /* for use by the xfrm */ unsigned short mode; /* file mode */ @@ -103,6 +104,8 @@ void diff_debug_queue(const char *, struct diff_queue_struct *); extern int diffcore_count_changes(void *src, unsigned long src_size, void *dst, unsigned long dst_size, + void **src_count_p, + void **dst_count_p, unsigned long delta_limit, unsigned long *src_copied, unsigned long *literal_added); diff --git a/environment.c b/environment.c index 16c08f0697..5d29b92bd5 100644 --- a/environment.c +++ b/environment.c @@ -14,6 +14,7 @@ char git_default_name[MAX_GITNAME]; int trust_executable_bit = 1; int assume_unchanged = 0; int only_use_symrefs = 0; +int warn_ambiguous_refs = 0; int repository_format_version = 0; char git_commit_encoding[MAX_ENCODING_LENGTH] = "utf-8"; int shared_repository = 0; diff --git a/fsck-objects.c b/fsck-objects.c index 4ddd67699c..59b25904cb 100644 --- a/fsck-objects.c +++ b/fsck-objects.c @@ -14,10 +14,9 @@ static int show_root = 0; static int show_tags = 0; static int show_unreachable = 0; -static int standalone = 0; static int check_full = 0; static int check_strict = 0; -static int keep_cache_objects = 0; +static int keep_cache_objects = 0; static unsigned char head_sha1[20]; #ifdef NO_D_INO_IN_DIRENT @@ -68,7 +67,7 @@ static void check_connectivity(void) continue; if (!obj->parsed) { - if (!standalone && has_sha1_file(obj->sha1)) + if (has_sha1_file(obj->sha1)) ; /* it is in pack */ else printf("missing %s %s\n", @@ -82,7 +81,7 @@ static void check_connectivity(void) for (j = 0; j < refs->count; j++) { struct object *ref = refs->ref[j]; if (ref->parsed || - (!standalone && has_sha1_file(ref->sha1))) + (has_sha1_file(ref->sha1))) continue; printf("broken link from %7s %s\n", obj->type, sha1_to_hex(obj->sha1)); @@ -390,7 +389,7 @@ static int fsck_handle_ref(const char *refname, const unsigned char *sha1) obj = lookup_object(sha1); if (!obj) { - if (!standalone && has_sha1_file(sha1)) { + if (has_sha1_file(sha1)) { default_refs++; return 0; /* it is in a pack */ } @@ -464,10 +463,6 @@ int main(int argc, char **argv) keep_cache_objects = 1; continue; } - if (!strcmp(arg, "--standalone")) { - standalone = 1; - continue; - } if (!strcmp(arg, "--full")) { check_full = 1; continue; @@ -477,14 +472,9 @@ int main(int argc, char **argv) continue; } if (*arg == '-') - usage("git-fsck-objects [--tags] [--root] [[--unreachable] [--cache] [--standalone | --full] [--strict] <head-sha1>*]"); + usage("git-fsck-objects [--tags] [--root] [[--unreachable] [--cache] [--full] [--strict] <head-sha1>*]"); } - if (standalone && check_full) - die("Only one of --standalone or --full can be used."); - if (standalone) - putenv("GIT_ALTERNATE_OBJECT_DIRECTORIES="); - fsck_head_link(); fsck_object_dir(get_object_directory()); if (check_full) { diff --git a/generate-cmdlist.sh b/generate-cmdlist.sh index 6ee85d5a53..6c59dbd68f 100755 --- a/generate-cmdlist.sh +++ b/generate-cmdlist.sh @@ -41,8 +41,12 @@ whatchanged EOF while read cmd do - sed -n "/NAME/,/git-$cmd/H; - \$ {x; s/.*git-$cmd - \\(.*\\)/ {\"$cmd\", \"\1\"},/; p}" \ - "Documentation/git-$cmd.txt" + sed -n ' + /NAME/,/git-'"$cmd"'/H + ${ + x + s/.*git-'"$cmd"' - \(.*\)/ {"'"$cmd"'", "\1"},/ + p + }' "Documentation/git-$cmd.txt" done echo "};" diff --git a/git-annotate.perl b/git-annotate.perl index feea0a2d81..9df72a1662 100755 --- a/git-annotate.perl +++ b/git-annotate.perl @@ -20,7 +20,7 @@ sub usage() { -r, --rename Follow renames (Defaults on). -S, --rev-file revs-file - use revs from revs-file instead of calling git-rev-list + Use revs from revs-file instead of calling git-rev-list -h, --help This message. '; diff --git a/git-commit.sh b/git-commit.sh index 330a434b18..1e7c09e1f2 100755 --- a/git-commit.sh +++ b/git-commit.sh @@ -161,7 +161,7 @@ run_status () { } ' - if test -n "$verbose" + if test -n "$verbose" -a -z "$IS_INITIAL" then git-diff-index --cached -M -p --diff-filter=MDTCRA $REFERENCE fi diff --git a/git-cvsimport.perl b/git-cvsimport.perl index 02d1928ada..3728294e74 100755 --- a/git-cvsimport.perl +++ b/git-cvsimport.perl @@ -453,6 +453,7 @@ chdir($git_tree); my $last_branch = ""; my $orig_branch = ""; my %branch_date; +my $tip_at_start = undef; my $git_dir = $ENV{"GIT_DIR"} || ".git"; $git_dir = getwd()."/".$git_dir unless $git_dir =~ m#^/#; @@ -487,6 +488,7 @@ unless(-d $git_dir) { $last_branch = "master"; } $orig_branch = $last_branch; + $tip_at_start = `git-rev-parse --verify HEAD`; # populate index system('git-read-tree', $last_branch); @@ -873,7 +875,22 @@ if (defined $orig_git_index) { # Now switch back to the branch we were in before all of this happened if($orig_branch) { - print "DONE; you may need to merge manually.\n" if $opt_v; + print "DONE.\n" if $opt_v; + if ($opt_i) { + exit 0; + } + my $tip_at_end = `git-rev-parse --verify HEAD`; + if ($tip_at_start ne $tip_at_end) { + for ($tip_at_start, $tip_at_end) { chomp; } + print "Fetched into the current branch.\n" if $opt_v; + system(qw(git-read-tree -u -m), + $tip_at_start, $tip_at_end); + die "Fast-forward update failed: $?\n" if $?; + } + else { + system(qw(git-merge cvsimport HEAD), "refs/heads/$opt_o"); + die "Could not merge $opt_o into the current branch.\n" if $?; + } } else { $orig_branch = "master"; print "DONE; creating $orig_branch branch\n" if $opt_v; diff --git a/git-diff.sh b/git-diff.sh index dc4d1b3cfd..dc0dd312bf 100755 --- a/git-diff.sh +++ b/git-diff.sh @@ -38,9 +38,9 @@ case " $flags " in flags="$flags'$cc_or_p' " ;; esac -# If we do not have -B nor -C, default to -M. +# If we do not have -B, -C, -r, nor -p, default to -M. case " $flags " in -*" '-"[BCM]* | *" '--find-copies-harder' "*) +*" '-"[BCMrp]* | *" '--find-copies-harder' "*) ;; # something like -M50. *) flags="$flags'-M' " ;; diff --git a/git-fetch.sh b/git-fetch.sh index c0eb96752e..954901ddce 100755 --- a/git-fetch.sh +++ b/git-fetch.sh @@ -185,6 +185,7 @@ fast_forward_local () { ;; *) echo >&2 " not updating." + exit 1 ;; esac } diff --git a/git-merge.sh b/git-merge.sh index cc0952a97d..78ab422e4e 100755 --- a/git-merge.sh +++ b/git-merge.sh @@ -11,11 +11,15 @@ LF=' ' all_strategies='recursive octopus resolve stupid ours' -default_strategies='recursive' +default_twohead_strategies='recursive' +default_octopus_strategies='octopus' +no_trivial_merge_strategies='ours' use_strategies= + +index_merge=t if test "@@NO_PYTHON@@"; then all_strategies='resolve octopus stupid ours' - default_strategies='resolve' + default_twohead_strategies='resolve' fi dropsave() { @@ -90,8 +94,6 @@ do shift done -test "$#" -le 2 && usage ;# we need at least two heads. - merge_msg="$1" shift head_arg="$1" @@ -99,6 +101,8 @@ head=$(git-rev-parse --verify "$1"^0) || usage shift # All the rest are remote heads +test "$#" = 0 && usage ;# we need at least one remote head. + remoteheads= for remote do @@ -108,6 +112,27 @@ do done set x $remoteheads ; shift +case "$use_strategies" in +'') + case "$#" in + 1) + use_strategies="$default_twohead_strategies" ;; + *) + use_strategies="$default_octopus_strategies" ;; + esac + ;; +esac + +for s in $use_strategies +do + case " $s " in + *" $no_trivial_merge_strategies "*) + index_merge=f + break + ;; + esac +done + case "$#" in 1) common=$(git-merge-base --all $head "$@") @@ -118,18 +143,21 @@ case "$#" in esac echo "$head" >"$GIT_DIR/ORIG_HEAD" -case "$#,$common,$no_commit" in -*,'',*) +case "$index_merge,$#,$common,$no_commit" in +f,*) + # We've been told not to try anything clever. Skip to real merge. + ;; +?,*,'',*) # No common ancestors found. We need a real merge. ;; -1,"$1",*) +?,1,"$1",*) # If head can reach all the merge then we are up to date. - # but first the most common case of merging one remote + # but first the most common case of merging one remote. echo "Already up-to-date." dropsave exit 0 ;; -1,"$head",*) +?,1,"$head",*) # Again the most common case of merging one remote. echo "Updating from $head to $1" git-update-index --refresh 2>/dev/null @@ -139,11 +167,11 @@ case "$#,$common,$no_commit" in dropsave exit 0 ;; -1,?*"$LF"?*,*) +?,1,?*"$LF"?*,*) # We are not doing octopus and not fast forward. Need a # real merge. ;; -1,*,) +?,1,*,) # We are not doing octopus, not fast forward, and have only # one common. See if it is really trivial. git var GIT_COMMITTER_IDENT >/dev/null || exit @@ -188,17 +216,6 @@ esac # We are going to make a new commit. git var GIT_COMMITTER_IDENT >/dev/null || exit -case "$use_strategies" in -'') - case "$#" in - 1) - use_strategies="$default_strategies" ;; - *) - use_strategies=octopus ;; - esac - ;; -esac - # At this point, we need a real merge. No matter what strategy # we use, it would operate on the index, possibly affecting the # working tree, and when resolved cleanly, have the desired tree @@ -270,11 +287,7 @@ done # auto resolved the merge cleanly. if test '' != "$result_tree" then - parents="-p $head" - for remote - do - parents="$parents -p $remote" - done + parents=$(git-show-branch --independent "$head" "$@" | sed -e 's/^/-p /') result_commit=$(echo "$merge_msg" | git-commit-tree $result_tree $parents) || exit finish "$result_commit" "Merge $result_commit, made by $wt_strategy." dropsave diff --git a/git-pull.sh b/git-pull.sh index 6caf1aad47..29c14e148e 100755 --- a/git-pull.sh +++ b/git-pull.sh @@ -70,20 +70,16 @@ case "$merge_head" in exit 0 ;; ?*' '?*) - var=`git repo-config --get pull.octopus` - if test '' = "$var" + var=`git-repo-config --get pull.octopus` + if test -n "$var" then - strategy_default_args='-s octopus' - else strategy_default_args="-s $var" fi ;; *) - var=`git repo-config --get pull.twohead` - if test '' = "$var" - then - strategy_default_args='-s recursive' - else + var=`git-repo-config --get pull.twohead` + if test -n "$var" + then strategy_default_args="-s $var" fi ;; diff --git a/http-fetch.c b/http-fetch.c index 8fd9de081f..dc67218ae7 100644 --- a/http-fetch.c +++ b/http-fetch.c @@ -8,6 +8,7 @@ #define RANGE_HEADER_SIZE 30 static int got_alternates = -1; +static int corrupt_object_found = 0; static struct curl_slist *no_pragma_header; @@ -468,9 +469,13 @@ static void process_alternates_response(void *callback_data) alt_req->url); active_requests++; slot->in_use = 1; + if (slot->finished != NULL) + (*slot->finished) = 0; if (!start_active_slot(slot)) { got_alternates = -1; slot->in_use = 0; + if (slot->finished != NULL) + (*slot->finished) = 1; } return; } @@ -830,6 +835,7 @@ static int fetch_object(struct alt_base *repo, unsigned char *sha1) obj_req->errorstr, obj_req->curl_result, obj_req->http_code, hex); } else if (obj_req->zret != Z_STREAM_END) { + corrupt_object_found++; ret = error("File %s (%s) corrupt", hex, obj_req->url); } else if (memcmp(obj_req->sha1, obj_req->real_sha1, 20)) { ret = error("File %s has bad hash", hex); @@ -989,5 +995,11 @@ int main(int argc, char **argv) http_cleanup(); + if (corrupt_object_found) { + fprintf(stderr, +"Some loose object were found to be corrupt, but they might be just\n" +"a false '404 Not Found' error message sent with incorrect HTTP\n" +"status code. Suggest running git fsck-objects.\n"); + } return rc; } diff --git a/http-push.c b/http-push.c index 226d71966d..b35d400ee1 100644 --- a/http-push.c +++ b/http-push.c @@ -7,11 +7,12 @@ #include "http.h" #include "refs.h" #include "revision.h" +#include "exec_cmd.h" #include <expat.h> static const char http_push_usage[] = -"git-http-push [--complete] [--force] [--verbose] <url> <ref> [<ref>...]\n"; +"git-http-push [--all] [--force] [--verbose] <remote> [<head>...]\n"; #ifndef XML_STATUS_OK enum XML_Status { @@ -22,6 +23,7 @@ enum XML_Status { #define XML_STATUS_ERROR 0 #endif +#define PREV_BUF_SIZE 4096 #define RANGE_HEADER_SIZE 30 /* DAV methods */ @@ -31,6 +33,7 @@ enum XML_Status { #define DAV_PROPFIND "PROPFIND" #define DAV_PUT "PUT" #define DAV_UNLOCK "UNLOCK" +#define DAV_DELETE "DELETE" /* DAV lock flags */ #define DAV_PROP_LOCKWR (1u << 0) @@ -58,13 +61,17 @@ enum XML_Status { /* bits #0-4 in revision.h */ -#define LOCAL (1u << 5) -#define REMOTE (1u << 6) -#define PUSHING (1u << 7) +#define LOCAL (1u << 5) +#define REMOTE (1u << 6) +#define FETCHING (1u << 7) +#define PUSHING (1u << 8) + +/* We allow "recursive" symbolic refs. Only within reason, though */ +#define MAXDEPTH 5 static int pushing = 0; static int aborted = 0; -static char remote_dir_exists[256]; +static signed char remote_dir_exists[256]; static struct curl_slist *no_pragma_header; static struct curl_slist *default_headers; @@ -79,13 +86,19 @@ struct repo { char *url; int path_len; + int has_info_refs; + int can_update_info_refs; + int has_info_packs; struct packed_git *packs; + struct remote_lock *locks; }; static struct repo *remote = NULL; -static struct remote_lock *remote_locks = NULL; enum transfer_state { + NEED_FETCH, + RUN_FETCH_LOOSE, + RUN_FETCH_PACKED, NEED_PUSH, RUN_MKCOL, RUN_PUT, @@ -104,6 +117,8 @@ struct transfer_request struct buffer buffer; char filename[PATH_MAX]; char tmpfile[PATH_MAX]; + int local_fileno; + FILE *local_stream; enum transfer_state state; CURLcode curl_result; char errorstr[CURL_ERROR_SIZE]; @@ -113,6 +128,7 @@ struct transfer_request z_stream stream; int zret; int rename; + void *userData; struct active_request_slot *slot; struct transfer_request *next; }; @@ -135,19 +151,31 @@ struct remote_lock char *token; time_t start_time; long timeout; - int active; int refreshing; struct remote_lock *next; }; -struct remote_dentry +/* Flags that control remote_ls processing */ +#define PROCESS_FILES (1u << 0) +#define PROCESS_DIRS (1u << 1) +#define RECURSIVE (1u << 2) + +/* Flags that remote_ls passes to callback functions */ +#define IS_DIR (1u << 0) + +struct remote_ls_ctx { - char *base; - char *name; - int is_dir; + char *path; + void (*userFunc)(struct remote_ls_ctx *ls); + void *userData; + int flags; + char *dentry_name; + int dentry_flags; + struct remote_ls_ctx *parent; }; static void finish_request(struct transfer_request *request); +static void release_request(struct transfer_request *request); static void process_response(void *callback_data) { @@ -157,6 +185,258 @@ static void process_response(void *callback_data) finish_request(request); } +static size_t fwrite_sha1_file(void *ptr, size_t eltsize, size_t nmemb, + void *data) +{ + unsigned char expn[4096]; + size_t size = eltsize * nmemb; + int posn = 0; + struct transfer_request *request = (struct transfer_request *)data; + do { + ssize_t retval = write(request->local_fileno, + ptr + posn, size - posn); + if (retval < 0) + return posn; + posn += retval; + } while (posn < size); + + request->stream.avail_in = size; + request->stream.next_in = ptr; + do { + request->stream.next_out = expn; + request->stream.avail_out = sizeof(expn); + request->zret = inflate(&request->stream, Z_SYNC_FLUSH); + SHA1_Update(&request->c, expn, + sizeof(expn) - request->stream.avail_out); + } while (request->stream.avail_in && request->zret == Z_OK); + data_received++; + return size; +} + +static void start_fetch_loose(struct transfer_request *request) +{ + char *hex = sha1_to_hex(request->obj->sha1); + char *filename; + char prevfile[PATH_MAX]; + char *url; + char *posn; + int prevlocal; + unsigned char prev_buf[PREV_BUF_SIZE]; + ssize_t prev_read = 0; + long prev_posn = 0; + char range[RANGE_HEADER_SIZE]; + struct curl_slist *range_header = NULL; + struct active_request_slot *slot; + + filename = sha1_file_name(request->obj->sha1); + snprintf(request->filename, sizeof(request->filename), "%s", filename); + snprintf(request->tmpfile, sizeof(request->tmpfile), + "%s.temp", filename); + + snprintf(prevfile, sizeof(prevfile), "%s.prev", request->filename); + unlink(prevfile); + rename(request->tmpfile, prevfile); + unlink(request->tmpfile); + + if (request->local_fileno != -1) + error("fd leakage in start: %d", request->local_fileno); + request->local_fileno = open(request->tmpfile, + O_WRONLY | O_CREAT | O_EXCL, 0666); + /* This could have failed due to the "lazy directory creation"; + * try to mkdir the last path component. + */ + if (request->local_fileno < 0 && errno == ENOENT) { + char *dir = strrchr(request->tmpfile, '/'); + if (dir) { + *dir = 0; + mkdir(request->tmpfile, 0777); + *dir = '/'; + } + request->local_fileno = open(request->tmpfile, + O_WRONLY | O_CREAT | O_EXCL, 0666); + } + + if (request->local_fileno < 0) { + request->state = ABORTED; + error("Couldn't create temporary file %s for %s: %s", + request->tmpfile, request->filename, strerror(errno)); + return; + } + + memset(&request->stream, 0, sizeof(request->stream)); + + inflateInit(&request->stream); + + SHA1_Init(&request->c); + + url = xmalloc(strlen(remote->url) + 50); + request->url = xmalloc(strlen(remote->url) + 50); + strcpy(url, remote->url); + posn = url + strlen(remote->url); + strcpy(posn, "objects/"); + posn += 8; + memcpy(posn, hex, 2); + posn += 2; + *(posn++) = '/'; + strcpy(posn, hex + 2); + strcpy(request->url, url); + + /* If a previous temp file is present, process what was already + fetched. */ + prevlocal = open(prevfile, O_RDONLY); + if (prevlocal != -1) { + do { + prev_read = read(prevlocal, prev_buf, PREV_BUF_SIZE); + if (prev_read>0) { + if (fwrite_sha1_file(prev_buf, + 1, + prev_read, + request) == prev_read) { + prev_posn += prev_read; + } else { + prev_read = -1; + } + } + } while (prev_read > 0); + close(prevlocal); + } + unlink(prevfile); + + /* Reset inflate/SHA1 if there was an error reading the previous temp + file; also rewind to the beginning of the local file. */ + if (prev_read == -1) { + memset(&request->stream, 0, sizeof(request->stream)); + inflateInit(&request->stream); + SHA1_Init(&request->c); + if (prev_posn>0) { + prev_posn = 0; + lseek(request->local_fileno, SEEK_SET, 0); + ftruncate(request->local_fileno, 0); + } + } + + slot = get_active_slot(); + slot->callback_func = process_response; + slot->callback_data = request; + request->slot = slot; + + curl_easy_setopt(slot->curl, CURLOPT_FILE, request); + curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_sha1_file); + curl_easy_setopt(slot->curl, CURLOPT_ERRORBUFFER, request->errorstr); + curl_easy_setopt(slot->curl, CURLOPT_URL, url); + curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, no_pragma_header); + + /* If we have successfully processed data from a previous fetch + attempt, only fetch the data we don't already have. */ + if (prev_posn>0) { + if (push_verbosely) + fprintf(stderr, + "Resuming fetch of object %s at byte %ld\n", + hex, prev_posn); + sprintf(range, "Range: bytes=%ld-", prev_posn); + range_header = curl_slist_append(range_header, range); + curl_easy_setopt(slot->curl, + CURLOPT_HTTPHEADER, range_header); + } + + /* Try to get the request started, abort the request on error */ + request->state = RUN_FETCH_LOOSE; + if (!start_active_slot(slot)) { + fprintf(stderr, "Unable to start GET request\n"); + remote->can_update_info_refs = 0; + release_request(request); + } +} + +static void start_fetch_packed(struct transfer_request *request) +{ + char *url; + struct packed_git *target; + FILE *packfile; + char *filename; + long prev_posn = 0; + char range[RANGE_HEADER_SIZE]; + struct curl_slist *range_header = NULL; + + struct transfer_request *check_request = request_queue_head; + struct active_request_slot *slot; + + target = find_sha1_pack(request->obj->sha1, remote->packs); + if (!target) { + fprintf(stderr, "Unable to fetch %s, will not be able to update server info refs\n", sha1_to_hex(request->obj->sha1)); + remote->can_update_info_refs = 0; + release_request(request); + return; + } + + fprintf(stderr, "Fetching pack %s\n", sha1_to_hex(target->sha1)); + fprintf(stderr, " which contains %s\n", sha1_to_hex(request->obj->sha1)); + + filename = sha1_pack_name(target->sha1); + snprintf(request->filename, sizeof(request->filename), "%s", filename); + snprintf(request->tmpfile, sizeof(request->tmpfile), + "%s.temp", filename); + + url = xmalloc(strlen(remote->url) + 64); + sprintf(url, "%sobjects/pack/pack-%s.pack", + remote->url, sha1_to_hex(target->sha1)); + + /* Make sure there isn't another open request for this pack */ + while (check_request) { + if (check_request->state == RUN_FETCH_PACKED && + !strcmp(check_request->url, url)) { + free(url); + release_request(request); + return; + } + check_request = check_request->next; + } + + packfile = fopen(request->tmpfile, "a"); + if (!packfile) { + fprintf(stderr, "Unable to open local file %s for pack", + filename); + remote->can_update_info_refs = 0; + free(url); + return; + } + + slot = get_active_slot(); + slot->callback_func = process_response; + slot->callback_data = request; + request->slot = slot; + request->local_stream = packfile; + request->userData = target; + + request->url = url; + curl_easy_setopt(slot->curl, CURLOPT_FILE, packfile); + curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite); + curl_easy_setopt(slot->curl, CURLOPT_URL, url); + curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, no_pragma_header); + slot->local = packfile; + + /* If there is data present from a previous transfer attempt, + resume where it left off */ + prev_posn = ftell(packfile); + if (prev_posn>0) { + if (push_verbosely) + fprintf(stderr, + "Resuming fetch of pack %s at byte %ld\n", + sha1_to_hex(target->sha1), prev_posn); + sprintf(range, "Range: bytes=%ld-", prev_posn); + range_header = curl_slist_append(range_header, range); + curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, range_header); + } + + /* Try to get the request started, abort the request on error */ + request->state = RUN_FETCH_PACKED; + if (!start_active_slot(slot)) { + fprintf(stderr, "Unable to start GET request\n"); + remote->can_update_info_refs = 0; + release_request(request); + } +} + static void start_mkcol(struct transfer_request *request) { char *hex = sha1_to_hex(request->obj->sha1); @@ -299,62 +579,69 @@ static void start_move(struct transfer_request *request) } } -static int refresh_lock(struct remote_lock *check_lock) +static int refresh_lock(struct remote_lock *lock) { struct active_request_slot *slot; + struct slot_results results; char *if_header; char timeout_header[25]; struct curl_slist *dav_headers = NULL; - struct remote_lock *lock; - int time_remaining; - time_t current_time; + int rc = 0; - /* Refresh all active locks if they're close to expiring */ - for (lock = remote_locks; lock; lock = lock->next) { - if (!lock->active) - continue; + lock->refreshing = 1; - current_time = time(NULL); - time_remaining = lock->start_time + lock->timeout - - current_time; - if (time_remaining > LOCK_REFRESH) - continue; + if_header = xmalloc(strlen(lock->token) + 25); + sprintf(if_header, "If: (<opaquelocktoken:%s>)", lock->token); + sprintf(timeout_header, "Timeout: Second-%ld", lock->timeout); + dav_headers = curl_slist_append(dav_headers, if_header); + dav_headers = curl_slist_append(dav_headers, timeout_header); - lock->refreshing = 1; + slot = get_active_slot(); + slot->results = &results; + curl_easy_setopt(slot->curl, CURLOPT_HTTPGET, 1); + curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_null); + curl_easy_setopt(slot->curl, CURLOPT_URL, lock->url); + curl_easy_setopt(slot->curl, CURLOPT_CUSTOMREQUEST, DAV_LOCK); + curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, dav_headers); - if_header = xmalloc(strlen(lock->token) + 25); - sprintf(if_header, "If: (<opaquelocktoken:%s>)", lock->token); - sprintf(timeout_header, "Timeout: Second-%ld", lock->timeout); - dav_headers = curl_slist_append(dav_headers, if_header); - dav_headers = curl_slist_append(dav_headers, timeout_header); + if (start_active_slot(slot)) { + run_active_slot(slot); + if (results.curl_result != CURLE_OK) { + fprintf(stderr, "LOCK HTTP error %ld\n", + results.http_code); + } else { + lock->start_time = time(NULL); + rc = 1; + } + } - slot = get_active_slot(); - curl_easy_setopt(slot->curl, CURLOPT_HTTPGET, 1); - curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_null); - curl_easy_setopt(slot->curl, CURLOPT_URL, lock->url); - curl_easy_setopt(slot->curl, CURLOPT_CUSTOMREQUEST, DAV_LOCK); - curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, dav_headers); + lock->refreshing = 0; + curl_slist_free_all(dav_headers); + free(if_header); - if (start_active_slot(slot)) { - run_active_slot(slot); - if (slot->curl_result != CURLE_OK) { - fprintf(stderr, "Got HTTP error %ld\n", slot->http_code); - lock->active = 0; - } else { - lock->active = 1; - lock->start_time = time(NULL); + return rc; +} + +static void check_locks() +{ + struct remote_lock *lock = remote->locks; + time_t current_time = time(NULL); + int time_remaining; + + while (lock) { + time_remaining = lock->start_time + lock->timeout - + current_time; + if (!lock->refreshing && time_remaining < LOCK_REFRESH) { + if (!refresh_lock(lock)) { + fprintf(stderr, + "Unable to refresh lock for %s\n", + lock->url); + aborted = 1; + return; } } - - lock->refreshing = 0; - curl_slist_free_all(dav_headers); - free(if_header); + lock = lock->next; } - - if (check_lock) - return check_lock->active; - else - return 0; } static void release_request(struct transfer_request *request) @@ -370,6 +657,10 @@ static void release_request(struct transfer_request *request) entry->next = entry->next->next; } + if (request->local_fileno != -1) + close(request->local_fileno); + if (request->local_stream) + fclose(request->local_stream); if (request->url != NULL) free(request->url); free(request); @@ -377,12 +668,16 @@ static void release_request(struct transfer_request *request) static void finish_request(struct transfer_request *request) { - request->curl_result = request->slot->curl_result; + struct stat st; + struct packed_git *target; + struct packed_git **lst; + + request->curl_result = request->slot->curl_result; request->http_code = request->slot->http_code; request->slot = NULL; /* Keep locks active */ - refresh_lock(request->lock); + check_locks(); if (request->headers != NULL) curl_slist_free_all(request->headers); @@ -417,9 +712,9 @@ static void finish_request(struct transfer_request *request) } } else if (request->state == RUN_MOVE) { if (request->curl_result == CURLE_OK) { - fprintf(stderr, " sent %s\n", - sha1_to_hex(request->obj->sha1)); - request->state = COMPLETE; + if (push_verbosely) + fprintf(stderr, " sent %s\n", + sha1_to_hex(request->obj->sha1)); request->obj->flags |= REMOTE; release_request(request); } else { @@ -429,12 +724,73 @@ static void finish_request(struct transfer_request *request) request->state = ABORTED; aborted = 1; } + } else if (request->state == RUN_FETCH_LOOSE) { + fchmod(request->local_fileno, 0444); + close(request->local_fileno); request->local_fileno = -1; + + if (request->curl_result != CURLE_OK && + request->http_code != 416) { + if (stat(request->tmpfile, &st) == 0) { + if (st.st_size == 0) + unlink(request->tmpfile); + } + } else { + if (request->http_code == 416) + fprintf(stderr, "Warning: requested range invalid; we may already have all the data.\n"); + + inflateEnd(&request->stream); + SHA1_Final(request->real_sha1, &request->c); + if (request->zret != Z_STREAM_END) { + unlink(request->tmpfile); + } else if (memcmp(request->obj->sha1, request->real_sha1, 20)) { + unlink(request->tmpfile); + } else { + request->rename = + move_temp_to_file( + request->tmpfile, + request->filename); + if (request->rename == 0) { + request->obj->flags |= (LOCAL | REMOTE); + } + } + } + + /* Try fetching packed if necessary */ + if (request->obj->flags & LOCAL) + release_request(request); + else + start_fetch_packed(request); + + } else if (request->state == RUN_FETCH_PACKED) { + if (request->curl_result != CURLE_OK) { + fprintf(stderr, "Unable to get pack file %s\n%s", + request->url, curl_errorstr); + remote->can_update_info_refs = 0; + } else { + fclose(request->local_stream); + request->local_stream = NULL; + if (!move_temp_to_file(request->tmpfile, + request->filename)) { + target = (struct packed_git *)request->userData; + lst = &remote->packs; + while (*lst != target) + lst = &((*lst)->next); + *lst = (*lst)->next; + + if (!verify_pack(target, 0)) + install_packed_git(target); + else + remote->can_update_info_refs = 0; + } + } + release_request(request); } } void fill_active_slots(void) { struct transfer_request *request = request_queue_head; + struct transfer_request *next; struct active_request_slot *slot = active_queue_head; int num_transfers; @@ -442,7 +798,10 @@ void fill_active_slots(void) return; while (active_requests < max_requests && request != NULL) { - if (pushing && request->state == NEED_PUSH) { + next = request->next; + if (request->state == NEED_FETCH) { + start_fetch_loose(request); + } else if (pushing && request->state == NEED_PUSH) { if (remote_dir_exists[request->obj->sha1[0]] == 1) { start_put(request); } else { @@ -450,7 +809,7 @@ void fill_active_slots(void) } curl_multi_perform(curlm, &num_transfers); } - request = request->next; + request = next; } while (slot != NULL) { @@ -464,11 +823,45 @@ void fill_active_slots(void) static void get_remote_object_list(unsigned char parent); -static void add_request(struct object *obj, struct remote_lock *lock) +static void add_fetch_request(struct object *obj) +{ + struct transfer_request *request; + + check_locks(); + + /* + * Don't fetch the object if it's known to exist locally + * or is already in the request queue + */ + if (remote_dir_exists[obj->sha1[0]] == -1) + get_remote_object_list(obj->sha1[0]); + if (obj->flags & (LOCAL | FETCHING)) + return; + + obj->flags |= FETCHING; + request = xmalloc(sizeof(*request)); + request->obj = obj; + request->url = NULL; + request->lock = NULL; + request->headers = NULL; + request->local_fileno = -1; + request->local_stream = NULL; + request->state = NEED_FETCH; + request->next = request_queue_head; + request_queue_head = request; + + fill_active_slots(); + step_active_slots(); +} + +static int add_send_request(struct object *obj, struct remote_lock *lock) { struct transfer_request *request = request_queue_head; struct packed_git *target; + /* Keep locks active */ + check_locks(); + /* * Don't push the object if it's known to exist on the remote * or is already in the request queue @@ -476,11 +869,11 @@ static void add_request(struct object *obj, struct remote_lock *lock) if (remote_dir_exists[obj->sha1[0]] == -1) get_remote_object_list(obj->sha1[0]); if (obj->flags & (REMOTE | PUSHING)) - return; + return 0; target = find_sha1_pack(obj->sha1, remote->packs); if (target) { obj->flags |= REMOTE; - return; + return 0; } obj->flags |= PUSHING; @@ -489,12 +882,16 @@ static void add_request(struct object *obj, struct remote_lock *lock) request->url = NULL; request->lock = lock; request->headers = NULL; + request->local_fileno = -1; + request->local_stream = NULL; request->state = NEED_PUSH; request->next = request_queue_head; request_queue_head = request; fill_active_slots(); step_active_slots(); + + return 1; } static int fetch_index(unsigned char *sha1) @@ -509,16 +906,18 @@ static int fetch_index(unsigned char *sha1) FILE *indexfile; struct active_request_slot *slot; + struct slot_results results; /* Don't use the index if the pack isn't there */ - url = xmalloc(strlen(remote->url) + 65); - sprintf(url, "%s/objects/pack/pack-%s.pack", remote->url, hex); + url = xmalloc(strlen(remote->url) + 64); + sprintf(url, "%sobjects/pack/pack-%s.pack", remote->url, hex); slot = get_active_slot(); + slot->results = &results; curl_easy_setopt(slot->curl, CURLOPT_URL, url); curl_easy_setopt(slot->curl, CURLOPT_NOBODY, 1); if (start_active_slot(slot)) { run_active_slot(slot); - if (slot->curl_result != CURLE_OK) { + if (results.curl_result != CURLE_OK) { free(url); return error("Unable to verify pack %s is available", hex); @@ -532,9 +931,9 @@ static int fetch_index(unsigned char *sha1) if (push_verbosely) fprintf(stderr, "Getting index for pack %s\n", hex); - - sprintf(url, "%s/objects/pack/pack-%s.idx", remote->url, hex); - + + sprintf(url, "%sobjects/pack/pack-%s.idx", remote->url, hex); + filename = sha1_pack_index_name(sha1); snprintf(tmpfile, sizeof(tmpfile), "%s.temp", filename); indexfile = fopen(tmpfile, "a"); @@ -543,6 +942,7 @@ static int fetch_index(unsigned char *sha1) filename); slot = get_active_slot(); + slot->results = &results; curl_easy_setopt(slot->curl, CURLOPT_NOBODY, 0); curl_easy_setopt(slot->curl, CURLOPT_HTTPGET, 1); curl_easy_setopt(slot->curl, CURLOPT_FILE, indexfile); @@ -566,7 +966,7 @@ static int fetch_index(unsigned char *sha1) if (start_active_slot(slot)) { run_active_slot(slot); - if (slot->curl_result != CURLE_OK) { + if (results.curl_result != CURLE_OK) { free(url); fclose(indexfile); return error("Unable to get pack index %s\n%s", url, @@ -606,6 +1006,7 @@ static int fetch_indices(void) int i = 0; struct active_request_slot *slot; + struct slot_results results; data = xmalloc(4096); memset(data, 0, 4096); @@ -615,21 +1016,22 @@ static int fetch_indices(void) if (push_verbosely) fprintf(stderr, "Getting pack list\n"); - - url = xmalloc(strlen(remote->url) + 21); - sprintf(url, "%s/objects/info/packs", remote->url); + + url = xmalloc(strlen(remote->url) + 20); + sprintf(url, "%sobjects/info/packs", remote->url); slot = get_active_slot(); + slot->results = &results; curl_easy_setopt(slot->curl, CURLOPT_FILE, &buffer); curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_buffer); curl_easy_setopt(slot->curl, CURLOPT_URL, url); curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, NULL); if (start_active_slot(slot)) { run_active_slot(slot); - if (slot->curl_result != CURLE_OK) { + if (results.curl_result != CURLE_OK) { free(buffer.buffer); free(url); - if (slot->http_code == 404) + if (results.http_code == 404) return 0; else return error("%s", curl_errorstr); @@ -716,20 +1118,22 @@ int fetch_ref(char *ref, unsigned char *sha1) struct buffer buffer; char *base = remote->url; struct active_request_slot *slot; + struct slot_results results; buffer.size = 41; buffer.posn = 0; buffer.buffer = hex; hex[41] = '\0'; - + url = quote_ref_url(base, ref); slot = get_active_slot(); + slot->results = &results; curl_easy_setopt(slot->curl, CURLOPT_FILE, &buffer); curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_buffer); curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, NULL); curl_easy_setopt(slot->curl, CURLOPT_URL, url); if (start_active_slot(slot)) { run_active_slot(slot); - if (slot->curl_result != CURLE_OK) + if (results.curl_result != CURLE_OK) return error("Couldn't get %s for %s\n%s", url, ref, curl_errorstr); } else { @@ -803,55 +1207,6 @@ static void handle_new_lock_ctx(struct xml_ctx *ctx, int tag_closed) } static void one_remote_ref(char *refname); -static void crawl_remote_refs(char *path); - -static void handle_crawl_ref_ctx(struct xml_ctx *ctx, int tag_closed) -{ - struct remote_dentry *dentry = (struct remote_dentry *)ctx->userData; - - - if (tag_closed) { - if (!strcmp(ctx->name, DAV_PROPFIND_RESP) && dentry->name) { - if (dentry->is_dir) { - if (strcmp(dentry->name, dentry->base)) { - crawl_remote_refs(dentry->name); - } - } else { - one_remote_ref(dentry->name); - } - } else if (!strcmp(ctx->name, DAV_PROPFIND_NAME) && ctx->cdata) { - dentry->name = xmalloc(strlen(ctx->cdata) - - remote->path_len + 1); - strcpy(dentry->name, - ctx->cdata + remote->path_len); - } else if (!strcmp(ctx->name, DAV_PROPFIND_COLLECTION)) { - dentry->is_dir = 1; - } - } else if (!strcmp(ctx->name, DAV_PROPFIND_RESP)) { - dentry->name = NULL; - dentry->is_dir = 0; - } -} - -static void handle_remote_object_list_ctx(struct xml_ctx *ctx, int tag_closed) -{ - char *path; - char *obj_hex; - - if (tag_closed) { - if (!strcmp(ctx->name, DAV_PROPFIND_NAME) && ctx->cdata) { - path = ctx->cdata + remote->path_len; - if (strlen(path) != 50) - return; - path += 9; - obj_hex = xmalloc(strlen(path)); - strncpy(obj_hex, path, 2); - strcpy(obj_hex + 2, path + 3); - one_remote_object(obj_hex); - free(obj_hex); - } - } -} static void xml_start_tag(void *userData, const char *name, const char **atts) @@ -913,6 +1268,7 @@ xml_cdata(void *userData, const XML_Char *s, int len) static struct remote_lock *lock_remote(char *path, long timeout) { struct active_request_slot *slot; + struct slot_results results; struct buffer out_buffer; struct buffer in_buffer; char *out_data; @@ -920,7 +1276,7 @@ static struct remote_lock *lock_remote(char *path, long timeout) char *url; char *ep; char timeout_header[25]; - struct remote_lock *lock = remote_locks; + struct remote_lock *lock = NULL; XML_Parser parser = XML_ParserCreate(NULL); enum XML_Status result; struct curl_slist *dav_headers = NULL; @@ -929,31 +1285,20 @@ static struct remote_lock *lock_remote(char *path, long timeout) url = xmalloc(strlen(remote->url) + strlen(path) + 1); sprintf(url, "%s%s", remote->url, path); - /* Make sure the url is not already locked */ - while (lock && strcmp(lock->url, url)) { - lock = lock->next; - } - if (lock) { - free(url); - if (refresh_lock(lock)) - return lock; - else - return NULL; - } - /* Make sure leading directories exist for the remote ref */ ep = strchr(url + strlen(remote->url) + 11, '/'); while (ep) { *ep = 0; slot = get_active_slot(); + slot->results = &results; curl_easy_setopt(slot->curl, CURLOPT_HTTPGET, 1); curl_easy_setopt(slot->curl, CURLOPT_URL, url); curl_easy_setopt(slot->curl, CURLOPT_CUSTOMREQUEST, DAV_MKCOL); curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_null); if (start_active_slot(slot)) { run_active_slot(slot); - if (slot->curl_result != CURLE_OK && - slot->http_code != 405) { + if (results.curl_result != CURLE_OK && + results.http_code != 405) { fprintf(stderr, "Unable to create branch path %s\n", url); @@ -961,7 +1306,7 @@ static struct remote_lock *lock_remote(char *path, long timeout) return NULL; } } else { - fprintf(stderr, "Unable to start request\n"); + fprintf(stderr, "Unable to start MKCOL request\n"); free(url); return NULL; } @@ -985,6 +1330,7 @@ static struct remote_lock *lock_remote(char *path, long timeout) dav_headers = curl_slist_append(dav_headers, "Content-Type: text/xml"); slot = get_active_slot(); + slot->results = &results; curl_easy_setopt(slot->curl, CURLOPT_INFILE, &out_buffer); curl_easy_setopt(slot->curl, CURLOPT_INFILESIZE, out_buffer.size); curl_easy_setopt(slot->curl, CURLOPT_READFUNCTION, fread_buffer); @@ -996,14 +1342,11 @@ static struct remote_lock *lock_remote(char *path, long timeout) curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, dav_headers); lock = xcalloc(1, sizeof(*lock)); - lock->owner = NULL; - lock->token = NULL; lock->timeout = -1; - lock->refreshing = 0; if (start_active_slot(slot)) { run_active_slot(slot); - if (slot->curl_result == CURLE_OK) { + if (results.curl_result == CURLE_OK) { ctx.name = xcalloc(10, 1); ctx.len = 0; ctx.cdata = NULL; @@ -1024,7 +1367,7 @@ static struct remote_lock *lock_remote(char *path, long timeout) } } } else { - fprintf(stderr, "Unable to start request\n"); + fprintf(stderr, "Unable to start LOCK request\n"); } curl_slist_free_all(dav_headers); @@ -1041,10 +1384,9 @@ static struct remote_lock *lock_remote(char *path, long timeout) lock = NULL; } else { lock->url = url; - lock->active = 1; lock->start_time = time(NULL); - lock->next = remote_locks; - remote_locks = lock; + lock->next = remote->locks; + remote->locks = lock; } return lock; @@ -1053,6 +1395,8 @@ static struct remote_lock *lock_remote(char *path, long timeout) static int unlock_remote(struct remote_lock *lock) { struct active_request_slot *slot; + struct slot_results results; + struct remote_lock *prev = remote->locks; char *lock_token_header; struct curl_slist *dav_headers = NULL; int rc = 0; @@ -1063,6 +1407,7 @@ static int unlock_remote(struct remote_lock *lock) dav_headers = curl_slist_append(dav_headers, lock_token_header); slot = get_active_slot(); + slot->results = &results; curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_null); curl_easy_setopt(slot->curl, CURLOPT_URL, lock->url); curl_easy_setopt(slot->curl, CURLOPT_CUSTOMREQUEST, DAV_UNLOCK); @@ -1070,107 +1415,115 @@ static int unlock_remote(struct remote_lock *lock) if (start_active_slot(slot)) { run_active_slot(slot); - if (slot->curl_result == CURLE_OK) + if (results.curl_result == CURLE_OK) rc = 1; else - fprintf(stderr, "Got HTTP error %ld\n", - slot->http_code); + fprintf(stderr, "UNLOCK HTTP error %ld\n", + results.http_code); } else { - fprintf(stderr, "Unable to start request\n"); + fprintf(stderr, "Unable to start UNLOCK request\n"); } curl_slist_free_all(dav_headers); free(lock_token_header); - lock->active = 0; + if (remote->locks == lock) { + remote->locks = lock->next; + } else { + while (prev && prev->next != lock) + prev = prev->next; + if (prev) + prev->next = prev->next->next; + } + + if (lock->owner != NULL) + free(lock->owner); + free(lock->url); + free(lock->token); + free(lock); return rc; } -static void crawl_remote_refs(char *path) -{ - char *url; - struct active_request_slot *slot; - struct buffer in_buffer; - struct buffer out_buffer; - char *in_data; - char *out_data; - XML_Parser parser = XML_ParserCreate(NULL); - enum XML_Status result; - struct curl_slist *dav_headers = NULL; - struct xml_ctx ctx; - struct remote_dentry dentry; - - fprintf(stderr, " %s\n", path); +static void remote_ls(const char *path, int flags, + void (*userFunc)(struct remote_ls_ctx *ls), + void *userData); - dentry.base = path; - dentry.name = NULL; - dentry.is_dir = 0; - - url = xmalloc(strlen(remote->url) + strlen(path) + 1); - sprintf(url, "%s%s", remote->url, path); +static void process_ls_object(struct remote_ls_ctx *ls) +{ + unsigned int *parent = (unsigned int *)ls->userData; + char *path = ls->dentry_name; + char *obj_hex; - out_buffer.size = strlen(PROPFIND_ALL_REQUEST); - out_data = xmalloc(out_buffer.size + 1); - snprintf(out_data, out_buffer.size + 1, PROPFIND_ALL_REQUEST); - out_buffer.posn = 0; - out_buffer.buffer = out_data; + if (!strcmp(ls->path, ls->dentry_name) && (ls->flags & IS_DIR)) { + remote_dir_exists[*parent] = 1; + return; + } - in_buffer.size = 4096; - in_data = xmalloc(in_buffer.size); - in_buffer.posn = 0; - in_buffer.buffer = in_data; + if (strlen(path) != 49) + return; + path += 8; + obj_hex = xmalloc(strlen(path)); + strncpy(obj_hex, path, 2); + strcpy(obj_hex + 2, path + 3); + one_remote_object(obj_hex); + free(obj_hex); +} - dav_headers = curl_slist_append(dav_headers, "Depth: 1"); - dav_headers = curl_slist_append(dav_headers, "Content-Type: text/xml"); +static void process_ls_ref(struct remote_ls_ctx *ls) +{ + if (!strcmp(ls->path, ls->dentry_name) && (ls->dentry_flags & IS_DIR)) { + fprintf(stderr, " %s\n", ls->dentry_name); + return; + } - slot = get_active_slot(); - curl_easy_setopt(slot->curl, CURLOPT_INFILE, &out_buffer); - curl_easy_setopt(slot->curl, CURLOPT_INFILESIZE, out_buffer.size); - curl_easy_setopt(slot->curl, CURLOPT_READFUNCTION, fread_buffer); - curl_easy_setopt(slot->curl, CURLOPT_FILE, &in_buffer); - curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_buffer); - curl_easy_setopt(slot->curl, CURLOPT_URL, url); - curl_easy_setopt(slot->curl, CURLOPT_UPLOAD, 1); - curl_easy_setopt(slot->curl, CURLOPT_CUSTOMREQUEST, DAV_PROPFIND); - curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, dav_headers); + if (!(ls->dentry_flags & IS_DIR)) + one_remote_ref(ls->dentry_name); +} - if (start_active_slot(slot)) { - run_active_slot(slot); - if (slot->curl_result == CURLE_OK) { - ctx.name = xcalloc(10, 1); - ctx.len = 0; - ctx.cdata = NULL; - ctx.userFunc = handle_crawl_ref_ctx; - ctx.userData = &dentry; - XML_SetUserData(parser, &ctx); - XML_SetElementHandler(parser, xml_start_tag, - xml_end_tag); - XML_SetCharacterDataHandler(parser, xml_cdata); - result = XML_Parse(parser, in_buffer.buffer, - in_buffer.posn, 1); - free(ctx.name); +static void handle_remote_ls_ctx(struct xml_ctx *ctx, int tag_closed) +{ + struct remote_ls_ctx *ls = (struct remote_ls_ctx *)ctx->userData; - if (result != XML_STATUS_OK) { - fprintf(stderr, "XML error: %s\n", - XML_ErrorString( - XML_GetErrorCode(parser))); + if (tag_closed) { + if (!strcmp(ctx->name, DAV_PROPFIND_RESP) && ls->dentry_name) { + if (ls->dentry_flags & IS_DIR) { + if (ls->flags & PROCESS_DIRS) { + ls->userFunc(ls); + } + if (strcmp(ls->dentry_name, ls->path) && + ls->flags & RECURSIVE) { + remote_ls(ls->dentry_name, + ls->flags, + ls->userFunc, + ls->userData); + } + } else if (ls->flags & PROCESS_FILES) { + ls->userFunc(ls); } + } else if (!strcmp(ctx->name, DAV_PROPFIND_NAME) && ctx->cdata) { + ls->dentry_name = xmalloc(strlen(ctx->cdata) - + remote->path_len + 1); + strcpy(ls->dentry_name, ctx->cdata + remote->path_len); + } else if (!strcmp(ctx->name, DAV_PROPFIND_COLLECTION)) { + ls->dentry_flags |= IS_DIR; } - } else { - fprintf(stderr, "Unable to start request\n"); + } else if (!strcmp(ctx->name, DAV_PROPFIND_RESP)) { + if (ls->dentry_name) { + free(ls->dentry_name); + } + ls->dentry_name = NULL; + ls->dentry_flags = 0; } - - free(url); - free(out_data); - free(in_buffer.buffer); - curl_slist_free_all(dav_headers); } -static void get_remote_object_list(unsigned char parent) +static void remote_ls(const char *path, int flags, + void (*userFunc)(struct remote_ls_ctx *ls), + void *userData) { - char *url; + char *url = xmalloc(strlen(remote->url) + strlen(path) + 1); struct active_request_slot *slot; + struct slot_results results; struct buffer in_buffer; struct buffer out_buffer; char *in_data; @@ -1179,13 +1532,15 @@ static void get_remote_object_list(unsigned char parent) enum XML_Status result; struct curl_slist *dav_headers = NULL; struct xml_ctx ctx; - char path[] = "/objects/XX/"; - static const char hex[] = "0123456789abcdef"; - unsigned int val = parent; + struct remote_ls_ctx ls; + + ls.flags = flags; + ls.path = strdup(path); + ls.dentry_name = NULL; + ls.dentry_flags = 0; + ls.userData = userData; + ls.userFunc = userFunc; - path[9] = hex[val >> 4]; - path[10] = hex[val & 0xf]; - url = xmalloc(strlen(remote->url) + strlen(path) + 1); sprintf(url, "%s%s", remote->url, path); out_buffer.size = strlen(PROPFIND_ALL_REQUEST); @@ -1203,6 +1558,7 @@ static void get_remote_object_list(unsigned char parent) dav_headers = curl_slist_append(dav_headers, "Content-Type: text/xml"); slot = get_active_slot(); + slot->results = &results; curl_easy_setopt(slot->curl, CURLOPT_INFILE, &out_buffer); curl_easy_setopt(slot->curl, CURLOPT_INFILESIZE, out_buffer.size); curl_easy_setopt(slot->curl, CURLOPT_READFUNCTION, fread_buffer); @@ -1215,12 +1571,12 @@ static void get_remote_object_list(unsigned char parent) if (start_active_slot(slot)) { run_active_slot(slot); - if (slot->curl_result == CURLE_OK) { - remote_dir_exists[parent] = 1; + if (results.curl_result == CURLE_OK) { ctx.name = xcalloc(10, 1); ctx.len = 0; ctx.cdata = NULL; - ctx.userFunc = handle_remote_object_list_ctx; + ctx.userFunc = handle_remote_ls_ctx; + ctx.userData = &ls; XML_SetUserData(parser, &ctx); XML_SetElementHandler(parser, xml_start_tag, xml_end_tag); @@ -1234,22 +1590,35 @@ static void get_remote_object_list(unsigned char parent) XML_ErrorString( XML_GetErrorCode(parser))); } - } else { - remote_dir_exists[parent] = 0; } } else { - fprintf(stderr, "Unable to start request\n"); + fprintf(stderr, "Unable to start PROPFIND request\n"); } + free(ls.path); free(url); free(out_data); free(in_buffer.buffer); curl_slist_free_all(dav_headers); } +static void get_remote_object_list(unsigned char parent) +{ + char path[] = "objects/XX/"; + static const char hex[] = "0123456789abcdef"; + unsigned int val = parent; + + path[8] = hex[val >> 4]; + path[9] = hex[val & 0xf]; + remote_dir_exists[val] = 0; + remote_ls(path, (PROCESS_FILES | PROCESS_DIRS), + process_ls_object, &val); +} + static int locking_available(void) { struct active_request_slot *slot; + struct slot_results results; struct buffer in_buffer; struct buffer out_buffer; char *in_data; @@ -1276,8 +1645,9 @@ static int locking_available(void) dav_headers = curl_slist_append(dav_headers, "Depth: 0"); dav_headers = curl_slist_append(dav_headers, "Content-Type: text/xml"); - + slot = get_active_slot(); + slot->results = &results; curl_easy_setopt(slot->curl, CURLOPT_INFILE, &out_buffer); curl_easy_setopt(slot->curl, CURLOPT_INFILESIZE, out_buffer.size); curl_easy_setopt(slot->curl, CURLOPT_READFUNCTION, fread_buffer); @@ -1290,7 +1660,7 @@ static int locking_available(void) if (start_active_slot(slot)) { run_active_slot(slot); - if (slot->curl_result == CURLE_OK) { + if (results.curl_result == CURLE_OK) { ctx.name = xcalloc(10, 1); ctx.len = 0; ctx.cdata = NULL; @@ -1311,7 +1681,7 @@ static int locking_available(void) } } } else { - fprintf(stderr, "Unable to start request\n"); + fprintf(stderr, "Unable to start PROPFIND request\n"); } free(out_data); @@ -1372,16 +1742,17 @@ static struct object_list **process_tree(struct tree *tree, return p; } -static void get_delta(struct rev_info *revs, struct remote_lock *lock) +static int get_delta(struct rev_info *revs, struct remote_lock *lock) { struct commit *commit; struct object_list **p = &objects, *pending; + int count = 0; while ((commit = get_revision(revs)) != NULL) { p = process_tree(commit->tree, p, NULL, ""); commit->object.flags |= LOCAL; if (!(commit->object.flags & UNINTERESTING)) - add_request(&commit->object, lock); + count += add_send_request(&commit->object, lock); } for (pending = revs->pending_objects; pending; pending = pending->next) { @@ -1408,14 +1779,17 @@ static void get_delta(struct rev_info *revs, struct remote_lock *lock) while (objects) { if (!(objects->item->flags & UNINTERESTING)) - add_request(objects->item, lock); + count += add_send_request(objects->item, lock); objects = objects->next; } + + return count; } static int update_remote(unsigned char *sha1, struct remote_lock *lock) { struct active_request_slot *slot; + struct slot_results results; char *out_data; char *if_header; struct buffer out_buffer; @@ -1437,6 +1811,7 @@ static int update_remote(unsigned char *sha1, struct remote_lock *lock) out_buffer.buffer = out_data; slot = get_active_slot(); + slot->results = &results; curl_easy_setopt(slot->curl, CURLOPT_INFILE, &out_buffer); curl_easy_setopt(slot->curl, CURLOPT_INFILESIZE, out_buffer.size); curl_easy_setopt(slot->curl, CURLOPT_READFUNCTION, fread_buffer); @@ -1451,10 +1826,10 @@ static int update_remote(unsigned char *sha1, struct remote_lock *lock) run_active_slot(slot); free(out_data); free(if_header); - if (slot->curl_result != CURLE_OK) { + if (results.curl_result != CURLE_OK) { fprintf(stderr, "PUT error: curl result=%d, HTTP code=%ld\n", - slot->curl_result, slot->http_code); + results.curl_result, results.http_code); /* We should attempt recovery? */ return 0; } @@ -1487,6 +1862,7 @@ static void one_remote_ref(char *refname) { struct ref *ref; unsigned char remote_sha1[20]; + struct object *obj; if (fetch_ref(refname, remote_sha1) != 0) { fprintf(stderr, @@ -1495,6 +1871,19 @@ static void one_remote_ref(char *refname) return; } + /* + * Fetch a copy of the object if it doesn't exist locally - it + * may be required for updating server info later. + */ + if (remote->can_update_info_refs && !has_sha1_file(remote_sha1)) { + obj = lookup_unknown_object(remote_sha1); + if (obj) { + fprintf(stderr, " fetch %s for %s\n", + sha1_to_hex(remote_sha1), refname); + add_fetch_request(obj); + } + } + int len = strlen(refname) + 1; ref = xcalloc(1, sizeof(*ref) + len); memcpy(ref->old_sha1, remote_sha1, 20); @@ -1512,7 +1901,7 @@ static void get_local_heads(void) static void get_dav_remote_heads(void) { remote_tail = &remote_refs; - crawl_remote_refs("refs/"); + remote_ls("refs/", (PROCESS_FILES | PROCESS_DIRS | RECURSIVE), process_ls_ref, NULL); } static int is_zero_sha1(const unsigned char *sha1) @@ -1600,24 +1989,335 @@ static void mark_edges_uninteresting(struct commit_list *list) } } +static void add_remote_info_ref(struct remote_ls_ctx *ls) +{ + struct buffer *buf = (struct buffer *)ls->userData; + unsigned char remote_sha1[20]; + struct object *o; + int len; + char *ref_info; + + if (fetch_ref(ls->dentry_name, remote_sha1) != 0) { + fprintf(stderr, + "Unable to fetch ref %s from %s\n", + ls->dentry_name, remote->url); + aborted = 1; + return; + } + + o = parse_object(remote_sha1); + if (!o) { + fprintf(stderr, + "Unable to parse object %s for remote ref %s\n", + sha1_to_hex(remote_sha1), ls->dentry_name); + aborted = 1; + return; + } + + len = strlen(ls->dentry_name) + 42; + ref_info = xcalloc(len + 1, 1); + sprintf(ref_info, "%s %s\n", + sha1_to_hex(remote_sha1), ls->dentry_name); + fwrite_buffer(ref_info, 1, len, buf); + free(ref_info); + + if (o->type == tag_type) { + o = deref_tag(o, ls->dentry_name, 0); + if (o) { + len = strlen(ls->dentry_name) + 45; + ref_info = xcalloc(len + 1, 1); + sprintf(ref_info, "%s %s^{}\n", + sha1_to_hex(o->sha1), ls->dentry_name); + fwrite_buffer(ref_info, 1, len, buf); + free(ref_info); + } + } +} + +static void update_remote_info_refs(struct remote_lock *lock) +{ + struct buffer buffer; + struct active_request_slot *slot; + struct slot_results results; + char *if_header; + struct curl_slist *dav_headers = NULL; + + buffer.buffer = xmalloc(4096); + memset(buffer.buffer, 0, 4096); + buffer.size = 4096; + buffer.posn = 0; + remote_ls("refs/", (PROCESS_FILES | RECURSIVE), + add_remote_info_ref, &buffer); + if (!aborted) { + if_header = xmalloc(strlen(lock->token) + 25); + sprintf(if_header, "If: (<opaquelocktoken:%s>)", lock->token); + dav_headers = curl_slist_append(dav_headers, if_header); + + slot = get_active_slot(); + slot->results = &results; + curl_easy_setopt(slot->curl, CURLOPT_INFILE, &buffer); + curl_easy_setopt(slot->curl, CURLOPT_INFILESIZE, buffer.posn); + curl_easy_setopt(slot->curl, CURLOPT_READFUNCTION, fread_buffer); + curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_null); + curl_easy_setopt(slot->curl, CURLOPT_CUSTOMREQUEST, DAV_PUT); + curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, dav_headers); + curl_easy_setopt(slot->curl, CURLOPT_UPLOAD, 1); + curl_easy_setopt(slot->curl, CURLOPT_PUT, 1); + curl_easy_setopt(slot->curl, CURLOPT_URL, lock->url); + + buffer.posn = 0; + + if (start_active_slot(slot)) { + run_active_slot(slot); + if (results.curl_result != CURLE_OK) { + fprintf(stderr, + "PUT error: curl result=%d, HTTP code=%ld\n", + results.curl_result, results.http_code); + } + } + free(if_header); + } + free(buffer.buffer); +} + +static int remote_exists(const char *path) +{ + char *url = xmalloc(strlen(remote->url) + strlen(path) + 1); + struct active_request_slot *slot; + struct slot_results results; + + sprintf(url, "%s%s", remote->url, path); + + slot = get_active_slot(); + slot->results = &results; + curl_easy_setopt(slot->curl, CURLOPT_URL, url); + curl_easy_setopt(slot->curl, CURLOPT_NOBODY, 1); + + if (start_active_slot(slot)) { + run_active_slot(slot); + if (results.http_code == 404) + return 0; + else if (results.curl_result == CURLE_OK) + return 1; + else + fprintf(stderr, "HEAD HTTP error %ld\n", results.http_code); + } else { + fprintf(stderr, "Unable to start HEAD request\n"); + } + + return -1; +} + +static void fetch_symref(char *path, char **symref, unsigned char *sha1) +{ + char *url; + struct buffer buffer; + struct active_request_slot *slot; + struct slot_results results; + + url = xmalloc(strlen(remote->url) + strlen(path) + 1); + sprintf(url, "%s%s", remote->url, path); + + buffer.size = 4096; + buffer.posn = 0; + buffer.buffer = xmalloc(buffer.size); + + slot = get_active_slot(); + slot->results = &results; + curl_easy_setopt(slot->curl, CURLOPT_FILE, &buffer); + curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_buffer); + curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, NULL); + curl_easy_setopt(slot->curl, CURLOPT_URL, url); + if (start_active_slot(slot)) { + run_active_slot(slot); + if (results.curl_result != CURLE_OK) { + die("Couldn't get %s for remote symref\n%s", + url, curl_errorstr); + } + } else { + die("Unable to start remote symref request"); + } + free(url); + + if (*symref != NULL) + free(*symref); + *symref = NULL; + memset(sha1, 0, 20); + + if (buffer.posn == 0) + return; + + /* If it's a symref, set the refname; otherwise try for a sha1 */ + if (!strncmp((char *)buffer.buffer, "ref: ", 5)) { + *symref = xcalloc(buffer.posn - 5, 1); + strncpy(*symref, (char *)buffer.buffer + 5, buffer.posn - 6); + } else { + get_sha1_hex(buffer.buffer, sha1); + } + + free(buffer.buffer); +} + +static int verify_merge_base(unsigned char *head_sha1, unsigned char *branch_sha1) +{ + int pipe_fd[2]; + pid_t merge_base_pid; + char line[PATH_MAX + 20]; + unsigned char merge_sha1[20]; + int verified = 0; + + if (pipe(pipe_fd) < 0) + die("Verify merge base: pipe failed"); + + merge_base_pid = fork(); + if (!merge_base_pid) { + static const char *args[] = { + "merge-base", + "-a", + NULL, + NULL, + NULL + }; + args[2] = strdup(sha1_to_hex(head_sha1)); + args[3] = sha1_to_hex(branch_sha1); + + dup2(pipe_fd[1], 1); + close(pipe_fd[0]); + close(pipe_fd[1]); + execv_git_cmd(args); + die("merge-base setup failed"); + } + if (merge_base_pid < 0) + die("merge-base fork failed"); + + dup2(pipe_fd[0], 0); + close(pipe_fd[0]); + close(pipe_fd[1]); + while (fgets(line, sizeof(line), stdin) != NULL) { + if (get_sha1_hex(line, merge_sha1)) + die("expected sha1, got garbage:\n %s", line); + if (!memcmp(branch_sha1, merge_sha1, 20)) { + verified = 1; + break; + } + } + + return verified; +} + +static int delete_remote_branch(char *pattern, int force) +{ + struct ref *refs = remote_refs; + struct ref *remote_ref = NULL; + unsigned char head_sha1[20]; + char *symref = NULL; + int match; + int patlen = strlen(pattern); + int i; + struct active_request_slot *slot; + struct slot_results results; + char *url; + + /* Find the remote branch(es) matching the specified branch name */ + for (match = 0; refs; refs = refs->next) { + char *name = refs->name; + int namelen = strlen(name); + if (namelen < patlen || + memcmp(name + namelen - patlen, pattern, patlen)) + continue; + if (namelen != patlen && name[namelen - patlen - 1] != '/') + continue; + match++; + remote_ref = refs; + } + if (match == 0) + return error("No remote branch matches %s", pattern); + if (match != 1) + return error("More than one remote branch matches %s", + pattern); + + /* + * Remote HEAD must be a symref (not exactly foolproof; a remote + * symlink to a symref will look like a symref) + */ + fetch_symref("HEAD", &symref, head_sha1); + if (!symref) + return error("Remote HEAD is not a symref"); + + /* Remote branch must not be the remote HEAD */ + for (i=0; symref && i<MAXDEPTH; i++) { + if (!strcmp(remote_ref->name, symref)) + return error("Remote branch %s is the current HEAD", + remote_ref->name); + fetch_symref(symref, &symref, head_sha1); + } + + /* Run extra sanity checks if delete is not forced */ + if (!force) { + /* Remote HEAD must resolve to a known object */ + if (symref) + return error("Remote HEAD symrefs too deep"); + if (is_zero_sha1(head_sha1)) + return error("Unable to resolve remote HEAD"); + if (!has_sha1_file(head_sha1)) + return error("Remote HEAD resolves to object %s\nwhich does not exist locally, perhaps you need to fetch?", sha1_to_hex(head_sha1)); + + /* Remote branch must resolve to a known object */ + if (is_zero_sha1(remote_ref->old_sha1)) + return error("Unable to resolve remote branch %s", + remote_ref->name); + if (!has_sha1_file(remote_ref->old_sha1)) + return error("Remote branch %s resolves to object %s\nwhich does not exist locally, perhaps you need to fetch?", remote_ref->name, sha1_to_hex(remote_ref->old_sha1)); + + /* Remote branch must be an ancestor of remote HEAD */ + if (!verify_merge_base(head_sha1, remote_ref->old_sha1)) { + return error("The branch '%s' is not a strict subset of your current HEAD.\nIf you are sure you want to delete it, run:\n\t'git http-push -D %s %s'", remote_ref->name, remote->url, pattern); + } + } + + /* Send delete request */ + fprintf(stderr, "Removing remote branch '%s'\n", remote_ref->name); + url = xmalloc(strlen(remote->url) + strlen(remote_ref->name) + 1); + sprintf(url, "%s%s", remote->url, remote_ref->name); + slot = get_active_slot(); + slot->results = &results; + curl_easy_setopt(slot->curl, CURLOPT_HTTPGET, 1); + curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_null); + curl_easy_setopt(slot->curl, CURLOPT_URL, url); + curl_easy_setopt(slot->curl, CURLOPT_CUSTOMREQUEST, DAV_DELETE); + if (start_active_slot(slot)) { + run_active_slot(slot); + free(url); + if (results.curl_result != CURLE_OK) + return error("DELETE request failed (%d/%ld)\n", + results.curl_result, results.http_code); + } else { + free(url); + return error("Unable to start DELETE request"); + } + + return 0; +} + int main(int argc, char **argv) { struct transfer_request *request; struct transfer_request *next_request; int nr_refspec = 0; char **refspec = NULL; - struct remote_lock *ref_lock; + struct remote_lock *ref_lock = NULL; + struct remote_lock *info_ref_lock = NULL; struct rev_info revs; + int delete_branch = 0; + int force_delete = 0; + int objects_to_send; int rc = 0; int i; setup_git_directory(); setup_ident(); - remote = xmalloc(sizeof(*remote)); - remote->url = NULL; - remote->path_len = 0; - remote->packs = NULL; + remote = xcalloc(sizeof(*remote), 1); argv++; for (i = 1; i < argc; i++, argv++) { @@ -1636,7 +2336,15 @@ int main(int argc, char **argv) push_verbosely = 1; continue; } - usage(http_push_usage); + if (!strcmp(arg, "-d")) { + delete_branch = 1; + continue; + } + if (!strcmp(arg, "-D")) { + delete_branch = 1; + force_delete = 1; + continue; + } } if (!remote->url) { remote->url = arg; @@ -1656,6 +2364,9 @@ int main(int argc, char **argv) if (!remote->url) usage(http_push_usage); + if (delete_branch && nr_refspec != 1) + die("You must specify only one branch name when deleting a remote branch"); + memset(remote_dir_exists, -1, 256); http_init(); @@ -1674,11 +2385,31 @@ int main(int argc, char **argv) goto cleanup; } + /* Check whether the remote has server info files */ + remote->can_update_info_refs = 0; + remote->has_info_refs = remote_exists("info/refs"); + remote->has_info_packs = remote_exists("objects/info/packs"); + if (remote->has_info_refs) { + info_ref_lock = lock_remote("info/refs", LOCK_TIME); + if (info_ref_lock) + remote->can_update_info_refs = 1; + } + if (remote->has_info_packs) + fetch_indices(); + /* Get a list of all local and remote heads to validate refspecs */ get_local_heads(); fprintf(stderr, "Fetching remote heads...\n"); get_dav_remote_heads(); + /* Remove a remote branch if -d or -D was specified */ + if (delete_branch) { + if (delete_remote_branch(refspec[0], force_delete) == -1) + fprintf(stderr, "Unable to delete remote branch %s\n", + refspec[0]); + goto cleanup; + } + /* match them up */ if (!remote_tail) remote_tail = &remote_refs; @@ -1690,7 +2421,6 @@ int main(int argc, char **argv) return 0; } - int ret = 0; int new_refs = 0; struct ref *ref; for (ref = remote_refs; ref; ref = ref->next) { @@ -1722,14 +2452,14 @@ int main(int argc, char **argv) "need to pull first?", ref->name, ref->peer_ref->name); - ret = -2; + rc = -2; continue; } } memcpy(ref->new_sha1, ref->peer_ref->new_sha1, 20); if (is_zero_sha1(ref->new_sha1)) { error("cannot happen anymore"); - ret = -3; + rc = -3; continue; } new_refs++; @@ -1752,23 +2482,20 @@ int main(int argc, char **argv) } /* Set up revision info for this refspec */ - const char *commit_argv[3]; - int commit_argc = 2; + const char *commit_argv[4]; + int commit_argc = 3; char *new_sha1_hex = strdup(sha1_to_hex(ref->new_sha1)); char *old_sha1_hex = NULL; - commit_argv[1] = new_sha1_hex; + commit_argv[1] = "--objects"; + commit_argv[2] = new_sha1_hex; if (!push_all && !is_zero_sha1(ref->old_sha1)) { old_sha1_hex = xmalloc(42); sprintf(old_sha1_hex, "^%s", sha1_to_hex(ref->old_sha1)); - commit_argv[2] = old_sha1_hex; + commit_argv[3] = old_sha1_hex; commit_argc++; } - revs.commits = NULL; setup_revisions(commit_argc, commit_argv, &revs, NULL); - revs.tag_objects = 1; - revs.tree_objects = 1; - revs.blob_objects = 1; free(new_sha1_hex); if (old_sha1_hex) { free(old_sha1_hex); @@ -1779,13 +2506,15 @@ int main(int argc, char **argv) pushing = 0; prepare_revision_walk(&revs); mark_edges_uninteresting(revs.commits); - fetch_indices(); - get_delta(&revs, ref_lock); + objects_to_send = get_delta(&revs, ref_lock); finish_all_active_slots(); /* Push missing objects to remote, this would be a convenient time to pack them first if appropriate. */ pushing = 1; + if (objects_to_send) + fprintf(stderr, " sending %d objects\n", + objects_to_send); fill_active_slots(); finish_all_active_slots(); @@ -1799,7 +2528,20 @@ int main(int argc, char **argv) if (!rc) fprintf(stderr, " done\n"); unlock_remote(ref_lock); + check_locks(); + } + + /* Update remote server info if appropriate */ + if (remote->has_info_refs && new_refs) { + if (info_ref_lock && remote->can_update_info_refs) { + fprintf(stderr, "Updating remote server info\n"); + update_remote_info_refs(info_ref_lock); + } else { + fprintf(stderr, "Unable to update server info\n"); + } } + if (info_ref_lock) + unlock_remote(info_ref_lock); cleanup: free(remote); @@ -339,6 +339,7 @@ struct active_request_slot *get_active_slot(void) slot->in_use = 1; slot->local = NULL; slot->results = NULL; + slot->finished = NULL; slot->callback_data = NULL; slot->callback_func = NULL; curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, pragma_header); @@ -389,8 +390,10 @@ void run_active_slot(struct active_request_slot *slot) fd_set excfds; int max_fd; struct timeval select_timeout; + int finished = 0; - while (slot->in_use) { + slot->finished = &finished; + while (!finished) { data_received = 0; step_active_slots(); @@ -442,6 +445,9 @@ static void finish_active_slot(struct active_request_slot *slot) closedown_active_slot(slot); curl_easy_getinfo(slot->curl, CURLINFO_HTTP_CODE, &slot->http_code); + if (slot->finished != NULL) + (*slot->finished) = 1; + /* Store slot results so they can be read after the slot is reused */ if (slot->results != NULL) { slot->results->curl_result = slot->curl_result; @@ -35,6 +35,7 @@ struct active_request_slot int in_use; CURLcode curl_result; long http_code; + int *finished; struct slot_results *results; void *callback_data; void (*callback_func)(void *data); diff --git a/imap-send.c b/imap-send.c index fddaac00c0..e33c78bff2 100644 --- a/imap-send.c +++ b/imap-send.c @@ -28,6 +28,7 @@ #include <netinet/in.h> #include <netinet/tcp.h> #include <arpa/inet.h> +#include <sys/socket.h> #include <netdb.h> typedef struct store_conf { @@ -371,7 +372,7 @@ free_generic_messages( message_t *msgs ) } static int -vasprintf( char **strp, const char *fmt, va_list ap ) +git_vasprintf( char **strp, const char *fmt, va_list ap ) { int len; char tmp[1024]; @@ -401,7 +402,7 @@ nfsnprintf( char *buf, int blen, const char *fmt, ... ) static int nfvasprintf( char **str, const char *fmt, va_list va ) { - int ret = vasprintf( str, fmt, va ); + int ret = git_vasprintf( str, fmt, va ); if (ret < 0) die( "Fatal: Out of memory\n"); return ret; @@ -945,7 +946,7 @@ imap_open_store( imap_server_conf_t *srvc ) _exit( 127 ); close( a[0] ); close( a[1] ); - execl( "/bin/sh", "sh", "-c", srvc->tunnel, 0 ); + execl( "/bin/sh", "sh", "-c", srvc->tunnel, NULL ); _exit( 127 ); } diff --git a/ls-files.c b/ls-files.c index df25c8c012..3a17e5d8e0 100644 --- a/ls-files.c +++ b/ls-files.c @@ -11,6 +11,7 @@ #include "cache.h" #include "quote.h" +static int abbrev = 0; static int show_deleted = 0; static int show_cached = 0; static int show_others = 0; @@ -92,11 +93,12 @@ static int add_excludes_from_file_1(const char *fname, close(fd); return 0; } - buf = xmalloc(size); + buf = xmalloc(size+1); if (read(fd, buf, size) != size) goto err; close(fd); + buf[size++] = '\n'; entry = buf; for (i = 0; i < size; i++) { if (buf[i] == '\n') { @@ -488,7 +490,8 @@ static void show_ce_entry(const char *tag, struct cache_entry *ce) printf("%s%06o %s %d\t", tag, ntohl(ce->ce_mode), - sha1_to_hex(ce->sha1), + abbrev ? find_unique_abbrev(ce->sha1,abbrev) + : sha1_to_hex(ce->sha1), ce_stage(ce)); write_name_quoted("", 0, ce->name + offset, line_terminator, stdout); @@ -629,7 +632,8 @@ static void verify_pathspec(void) static const char ls_files_usage[] = "git-ls-files [-z] [-t] [-v] (--[cached|deleted|others|stage|unmerged|killed|modified])* " "[ --ignored ] [--exclude=<pattern>] [--exclude-from=<file>] " - "[ --exclude-per-directory=<filename> ] [--full-name] [--] [<file>]*"; + "[ --exclude-per-directory=<filename> ] [--full-name] [--abbrev] " + "[--] [<file>]*"; int main(int argc, const char **argv) { @@ -736,6 +740,18 @@ int main(int argc, const char **argv) error_unmatch = 1; continue; } + if (!strncmp(arg, "--abbrev=", 9)) { + abbrev = strtoul(arg+9, NULL, 10); + if (abbrev && abbrev < MINIMUM_ABBREV) + abbrev = MINIMUM_ABBREV; + else if (abbrev > 40) + abbrev = 40; + continue; + } + if (!strcmp(arg, "--abbrev")) { + abbrev = DEFAULT_ABBREV; + continue; + } if (*arg == '-') usage(ls_files_usage); break; @@ -13,13 +13,14 @@ static int line_termination = '\n'; #define LS_TREE_ONLY 2 #define LS_SHOW_TREES 4 #define LS_NAME_ONLY 8 +static int abbrev = 0; static int ls_options = 0; const char **pathspec; static int chomp_prefix = 0; static const char *prefix; static const char ls_tree_usage[] = - "git-ls-tree [-d] [-r] [-t] [-z] [--name-only] [--name-status] [--full-name] <tree-ish> [path...]"; + "git-ls-tree [-d] [-r] [-t] [-z] [--name-only] [--name-status] [--full-name] [--abbrev[=<n>]] <tree-ish> [path...]"; static int show_recursive(const char *base, int baselen, const char *pathname) { @@ -73,7 +74,9 @@ static int show_tree(unsigned char *sha1, const char *base, int baselen, return 0; if (!(ls_options & LS_NAME_ONLY)) - printf("%06o %s %s\t", mode, type, sha1_to_hex(sha1)); + printf("%06o %s %s\t", mode, type, + abbrev ? find_unique_abbrev(sha1,abbrev) + : sha1_to_hex(sha1)); write_name_quoted(base + chomp_prefix, baselen - chomp_prefix, pathname, line_termination, stdout); @@ -113,6 +116,18 @@ int main(int argc, const char **argv) chomp_prefix = 0; break; } + if (!strncmp(argv[1]+2, "abbrev=",7)) { + abbrev = strtoul(argv[1]+9, NULL, 10); + if (abbrev && abbrev < MINIMUM_ABBREV) + abbrev = MINIMUM_ABBREV; + else if (abbrev > 40) + abbrev = 40; + break; + } + if (!strcmp(argv[1]+2, "abbrev")) { + abbrev = DEFAULT_ABBREV; + break; + } /* otherwise fallthru */ default: usage(ls_tree_usage); diff --git a/pack-check.c b/pack-check.c index eca32b6cab..84ed90d369 100644 --- a/pack-check.c +++ b/pack-check.c @@ -70,13 +70,17 @@ static int verify_packfile(struct packed_git *p) } +#define MAX_CHAIN 40 + static void show_pack_info(struct packed_git *p) { struct pack_header *hdr; int nr_objects, i; + unsigned int chain_histogram[MAX_CHAIN]; hdr = p->pack_base; nr_objects = ntohl(hdr->hdr_entries); + memset(chain_histogram, 0, sizeof(chain_histogram)); for (i = 0; i < nr_objects; i++) { unsigned char sha1[20], base_sha1[20]; @@ -97,11 +101,25 @@ static void show_pack_info(struct packed_git *p) printf("%s ", sha1_to_hex(sha1)); if (!delta_chain_length) printf("%-6s %lu %u\n", type, size, e.offset); - else + else { printf("%-6s %lu %u %u %s\n", type, size, e.offset, delta_chain_length, sha1_to_hex(base_sha1)); + if (delta_chain_length < MAX_CHAIN) + chain_histogram[delta_chain_length]++; + else + chain_histogram[0]++; + } } + for (i = 0; i < MAX_CHAIN; i++) { + if (!chain_histogram[i]) + continue; + printf("chain length %s %d: %d object%s\n", + i ? "=" : ">=", + i ? i : MAX_CHAIN, + chain_histogram[i], + 1 < chain_histogram[i] ? "s" : ""); + } } int verify_pack(struct packed_git *p, int verbose) diff --git a/pack-objects.c b/pack-objects.c index 136a7f5aad..49357c6735 100644 --- a/pack-objects.c +++ b/pack-objects.c @@ -32,9 +32,6 @@ struct object_entry { * be used as the base objectto delta huge * objects against. */ - int based_on_preferred; /* current delta candidate is a preferred - * one, or delta against a preferred one. - */ }; /* @@ -824,8 +821,6 @@ static int try_delta(struct unpacked *cur, struct unpacked *old, unsigned max_de { struct object_entry *cur_entry = cur->entry; struct object_entry *old_entry = old->entry; - int old_preferred = (old_entry->preferred_base || - old_entry->based_on_preferred); unsigned long size, oldsize, delta_size, sizediff; long max_size; void *delta_buf; @@ -867,27 +862,8 @@ static int try_delta(struct unpacked *cur, struct unpacked *old, unsigned max_de * delete). */ max_size = size / 2 - 20; - if (cur_entry->delta) { - if (cur_entry->based_on_preferred) { - if (old_preferred) - max_size = cur_entry->delta_size-1; - else - /* trying with non-preferred one when we - * already have a delta based on preferred - * one is pointless. - */ - return -1; - } - else if (!old_preferred) - max_size = cur_entry->delta_size-1; - else - /* otherwise... even if delta with a - * preferred one produces a bigger result than - * what we currently have, which is based on a - * non-preferred one, it is OK. - */ - ; - } + if (cur_entry->delta) + max_size = cur_entry->delta_size-1; if (sizediff >= max_size) return -1; delta_buf = diff_delta(old->data, oldsize, @@ -897,7 +873,6 @@ static int try_delta(struct unpacked *cur, struct unpacked *old, unsigned max_de cur_entry->delta = old_entry; cur_entry->delta_size = delta_size; cur_entry->depth = old_entry->depth + 1; - cur_entry->based_on_preferred = old_preferred; free(delta_buf); return 0; } @@ -966,6 +941,15 @@ static void find_deltas(struct object_entry **list, int window, int depth) if (try_delta(n, m, depth) < 0) break; } +#if 0 + /* if we made n a delta, and if n is already at max + * depth, leaving it in the window is pointless. we + * should evict it first. + * ... in theory only; somehow this makes things worse. + */ + if (entry->delta && depth <= entry->depth) + continue; +#endif idx++; if (idx >= window) idx = 0; diff --git a/rev-list.c b/rev-list.c index 8e4d83efba..812d237f47 100644 --- a/rev-list.c +++ b/rev-list.c @@ -190,7 +190,7 @@ static int count_distance(struct commit_list *entry) if (commit->object.flags & (UNINTERESTING | COUNTED)) break; - if (!revs.paths || (commit->object.flags & TREECHANGE)) + if (!revs.prune_fn || (commit->object.flags & TREECHANGE)) nr++; commit->object.flags |= COUNTED; p = commit->parents; @@ -224,7 +224,7 @@ static struct commit_list *find_bisection(struct commit_list *list) nr = 0; p = list; while (p) { - if (!revs.paths || (p->item->object.flags & TREECHANGE)) + if (!revs.prune_fn || (p->item->object.flags & TREECHANGE)) nr++; p = p->next; } @@ -234,7 +234,7 @@ static struct commit_list *find_bisection(struct commit_list *list) for (p = list; p; p = p->next) { int distance; - if (revs.paths && !(p->item->object.flags & TREECHANGE)) + if (revs.prune_fn && !(p->item->object.flags & TREECHANGE)) continue; distance = count_distance(p); diff --git a/revision.c b/revision.c index c8d93ff106..12cd0529a5 100644 --- a/revision.c +++ b/revision.c @@ -199,31 +199,27 @@ static int everybody_uninteresting(struct commit_list *orig) return 1; } -#define TREE_SAME 0 -#define TREE_NEW 1 -#define TREE_DIFFERENT 2 -static int tree_difference = TREE_SAME; +static int tree_difference = REV_TREE_SAME; static void file_add_remove(struct diff_options *options, int addremove, unsigned mode, const unsigned char *sha1, const char *base, const char *path) { - int diff = TREE_DIFFERENT; + int diff = REV_TREE_DIFFERENT; /* - * Is it an add of a new file? It means that - * the old tree didn't have it at all, so we - * will turn "TREE_SAME" -> "TREE_NEW", but - * leave any "TREE_DIFFERENT" alone (and if - * it already was "TREE_NEW", we'll keep it - * "TREE_NEW" of course). + * Is it an add of a new file? It means that the old tree + * didn't have it at all, so we will turn "REV_TREE_SAME" -> + * "REV_TREE_NEW", but leave any "REV_TREE_DIFFERENT" alone + * (and if it already was "REV_TREE_NEW", we'll keep it + * "REV_TREE_NEW" of course). */ if (addremove == '+') { diff = tree_difference; - if (diff != TREE_SAME) + if (diff != REV_TREE_SAME) return; - diff = TREE_NEW; + diff = REV_TREE_NEW; } tree_difference = diff; } @@ -234,7 +230,7 @@ static void file_change(struct diff_options *options, const unsigned char *new_sha1, const char *base, const char *path) { - tree_difference = TREE_DIFFERENT; + tree_difference = REV_TREE_DIFFERENT; } static struct diff_options diff_opt = { @@ -243,19 +239,19 @@ static struct diff_options diff_opt = { .change = file_change, }; -static int compare_tree(struct tree *t1, struct tree *t2) +int rev_compare_tree(struct tree *t1, struct tree *t2) { if (!t1) - return TREE_NEW; + return REV_TREE_NEW; if (!t2) - return TREE_DIFFERENT; - tree_difference = TREE_SAME; + return REV_TREE_DIFFERENT; + tree_difference = REV_TREE_SAME; if (diff_tree_sha1(t1->object.sha1, t2->object.sha1, "", &diff_opt) < 0) - return TREE_DIFFERENT; + return REV_TREE_DIFFERENT; return tree_difference; } -static int same_tree_as_empty(struct tree *t1) +int rev_same_tree_as_empty(struct tree *t1) { int retval; void *tree; @@ -288,7 +284,7 @@ static void try_to_simplify_commit(struct rev_info *revs, struct commit *commit) return; if (!commit->parents) { - if (!same_tree_as_empty(commit->tree)) + if (!rev_same_tree_as_empty(commit->tree)) commit->object.flags |= TREECHANGE; return; } @@ -298,8 +294,8 @@ static void try_to_simplify_commit(struct rev_info *revs, struct commit *commit) struct commit *p = parent->item; parse_commit(p); - switch (compare_tree(p->tree, commit->tree)) { - case TREE_SAME: + switch (rev_compare_tree(p->tree, commit->tree)) { + case REV_TREE_SAME: if (p->object.flags & UNINTERESTING) { /* Even if a merge with an uninteresting * side branch brought the entire change @@ -314,13 +310,22 @@ static void try_to_simplify_commit(struct rev_info *revs, struct commit *commit) commit->parents = parent; return; - case TREE_NEW: - if (revs->remove_empty_trees && same_tree_as_empty(p->tree)) { - *pp = parent->next; - continue; + case REV_TREE_NEW: + if (revs->remove_empty_trees && + rev_same_tree_as_empty(p->tree)) { + /* We are adding all the specified + * paths from this parent, so the + * history beyond this parent is not + * interesting. Remove its parents + * (they are grandparents for us). + * IOW, we pretend this parent is a + * "root" commit. + */ + parse_commit(p); + p->parents = NULL; } /* fallthrough */ - case TREE_DIFFERENT: + case REV_TREE_DIFFERENT: tree_changed = 1; pp = &parent->next; continue; @@ -368,8 +373,8 @@ static void add_parents_to_list(struct rev_info *revs, struct commit *commit, st * simplify the commit history and find the parent * that has no differences in the path set if one exists. */ - if (revs->paths) - try_to_simplify_commit(revs, commit); + if (revs->prune_fn) + revs->prune_fn(revs, commit); parent = commit->parents; while (parent) { @@ -391,9 +396,6 @@ static void limit_list(struct rev_info *revs) struct commit_list *newlist = NULL; struct commit_list **p = &newlist; - if (revs->paths) - diff_tree_setup_paths(revs->paths); - while (list) { struct commit_list *entry = list; struct commit *commit = list->item; @@ -445,6 +447,23 @@ static void handle_all(struct rev_info *revs, unsigned flags) for_each_ref(handle_one_ref); } +void init_revisions(struct rev_info *revs) +{ + memset(revs, 0, sizeof(*revs)); + revs->lifo = 1; + revs->dense = 1; + revs->prefix = setup_git_directory(); + revs->max_age = -1; + revs->min_age = -1; + revs->max_count = -1; + + revs->prune_fn = NULL; + revs->prune_data = NULL; + + revs->topo_setter = topo_sort_default_setter; + revs->topo_getter = topo_sort_default_getter; +} + /* * Parse revision information, filling in the "rev_info" structure, * and removing the used arguments from the argument list. @@ -458,13 +477,7 @@ int setup_revisions(int argc, const char **argv, struct rev_info *revs, const ch const char **unrecognized = argv + 1; int left = 1; - memset(revs, 0, sizeof(*revs)); - revs->lifo = 1; - revs->dense = 1; - revs->prefix = setup_git_directory(); - revs->max_age = -1; - revs->min_age = -1; - revs->max_count = -1; + init_revisions(revs); /* First, search for "--" */ seen_dashdash = 0; @@ -474,7 +487,7 @@ int setup_revisions(int argc, const char **argv, struct rev_info *revs, const ch continue; argv[i] = NULL; argc = i; - revs->paths = get_pathspec(revs->prefix, argv + i + 1); + revs->prune_data = get_pathspec(revs->prefix, argv + i + 1); seen_dashdash = 1; break; } @@ -638,7 +651,7 @@ int setup_revisions(int argc, const char **argv, struct rev_info *revs, const ch if (lstat(argv[j], &st) < 0) die("'%s': %s", arg, strerror(errno)); } - revs->paths = get_pathspec(revs->prefix, argv + i); + revs->prune_data = get_pathspec(revs->prefix, argv + i); break; } commit = get_commit_reference(revs, arg, sha1, flags ^ local_flags); @@ -652,8 +665,13 @@ int setup_revisions(int argc, const char **argv, struct rev_info *revs, const ch commit = get_commit_reference(revs, def, sha1, 0); add_one_commit(commit, revs); } - if (revs->paths) + + if (revs->prune_data) { + diff_tree_setup_paths(revs->prune_data); + revs->prune_fn = try_to_simplify_commit; revs->limited = 1; + } + return left; } @@ -663,7 +681,9 @@ void prepare_revision_walk(struct rev_info *revs) if (revs->limited) limit_list(revs); if (revs->topo_order) - sort_in_topological_order(&revs->commits, revs->lifo); + sort_in_topological_order_fn(&revs->commits, revs->lifo, + revs->topo_setter, + revs->topo_getter); } static int rewrite_one(struct commit **pp) @@ -719,7 +739,7 @@ struct commit *get_revision(struct rev_info *revs) return NULL; if (revs->no_merges && commit->parents && commit->parents->next) goto next; - if (revs->paths && revs->dense) { + if (revs->prune_fn && revs->dense) { if (!(commit->object.flags & TREECHANGE)) goto next; rewrite_parents(commit); diff --git a/revision.h b/revision.h index 31e8f61567..6c2becad13 100644 --- a/revision.h +++ b/revision.h @@ -7,6 +7,10 @@ #define SHOWN (1u<<3) #define TMP_MARK (1u<<4) /* for isolated cases; clean after use */ +struct rev_info; + +typedef void (prune_fn_t)(struct rev_info *revs, struct commit *commit); + struct rev_info { /* Starting list */ struct commit_list *commits; @@ -14,7 +18,8 @@ struct rev_info { /* Basic information */ const char *prefix; - const char **paths; + void *prune_data; + prune_fn_t *prune_fn; /* Traversal flags */ unsigned int dense:1, @@ -33,9 +38,20 @@ struct rev_info { int max_count; unsigned long max_age; unsigned long min_age; + + topo_sort_set_fn_t topo_setter; + topo_sort_get_fn_t topo_getter; }; +#define REV_TREE_SAME 0 +#define REV_TREE_NEW 1 +#define REV_TREE_DIFFERENT 2 + /* revision.c */ +extern int rev_same_tree_as_empty(struct tree *t1); +extern int rev_compare_tree(struct tree *t1, struct tree *t2); + +extern void init_revisions(struct rev_info *revs); extern int setup_revisions(int argc, const char **argv, struct rev_info *revs, const char *def); extern void prepare_revision_walk(struct rev_info *revs); extern struct commit *get_revision(struct rev_info *revs); diff --git a/sha1_file.c b/sha1_file.c index a80d849f15..58edec0bb6 100644 --- a/sha1_file.c +++ b/sha1_file.c @@ -973,6 +973,16 @@ static void *unpack_delta_entry(unsigned char *base_sha1, if (left < 20) die("truncated pack file"); + + /* The base entry _must_ be in the same pack */ + if (!find_pack_entry_one(base_sha1, &base_ent, p)) + die("failed to find delta-pack base object %s", + sha1_to_hex(base_sha1)); + base = unpack_entry_gently(&base_ent, type, &base_size); + if (!base) + die("failed to read delta-pack base object %s", + sha1_to_hex(base_sha1)); + data = base_sha1 + 20; data_size = left - 20; delta_data = xmalloc(delta_size); @@ -990,14 +1000,6 @@ static void *unpack_delta_entry(unsigned char *base_sha1, if ((st != Z_STREAM_END) || stream.total_out != delta_size) die("delta data unpack failed"); - /* The base entry _must_ be in the same pack */ - if (!find_pack_entry_one(base_sha1, &base_ent, p)) - die("failed to find delta-pack base object %s", - sha1_to_hex(base_sha1)); - base = unpack_entry_gently(&base_ent, type, &base_size); - if (!base) - die("failed to read delta-pack base object %s", - sha1_to_hex(base_sha1)); result = patch_delta(base, base_size, delta_data, delta_size, &result_size); diff --git a/sha1_name.c b/sha1_name.c index d67de18ba5..3adaec3167 100644 --- a/sha1_name.c +++ b/sha1_name.c @@ -235,14 +235,21 @@ static int ambiguous_path(const char *path, int len) static int get_sha1_basic(const char *str, int len, unsigned char *sha1) { - static const char *prefix[] = { - "", - "refs", - "refs/tags", - "refs/heads", + static const char *fmt[] = { + "/%.*s", + "refs/%.*s", + "refs/tags/%.*s", + "refs/heads/%.*s", + "refs/remotes/%.*s", + "refs/remotes/%.*s/HEAD", NULL }; const char **p; + const char *warning = "warning: refname '%.*s' is ambiguous.\n"; + char *pathname; + int already_found = 0; + unsigned char *this_result; + unsigned char sha1_from_ref[20]; if (len == 40 && !get_sha1_hex(str, sha1)) return 0; @@ -251,11 +258,22 @@ static int get_sha1_basic(const char *str, int len, unsigned char *sha1) if (ambiguous_path(str, len)) return -1; - for (p = prefix; *p; p++) { - char *pathname = git_path("%s/%.*s", *p, len, str); - if (!read_ref(pathname, sha1)) - return 0; + for (p = fmt; *p; p++) { + this_result = already_found ? sha1_from_ref : sha1; + pathname = git_path(*p, len, str); + if (!read_ref(pathname, this_result)) { + if (warn_ambiguous_refs) { + if (already_found && + !memcmp(sha1, sha1_from_ref, 20)) + fprintf(stderr, warning, len, str); + already_found++; + } + else + return 0; + } } + if (already_found) + return 0; return -1; } diff --git a/t/annotate-tests.sh b/t/annotate-tests.sh index 9c5a15a15e..114938c3ff 100644 --- a/t/annotate-tests.sh +++ b/t/annotate-tests.sh @@ -94,7 +94,7 @@ test_expect_success \ test_expect_success \ 'merge-setup part 4' \ 'echo "evil merge." >>file && - EDITOR=: git commit -a --amend' + EDITOR=: VISUAL=: git commit -a --amend' test_expect_success \ 'Two lines blamed on A, one on B, two on B1, one on B2, one on A U Thor' \ |