diff options
265 files changed, 5704 insertions, 2414 deletions
diff --git a/.gitignore b/.gitignore index 0d77ea5894..7374587f9d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ +/fuzz-commit-graph /fuzz_corpora /fuzz-pack-headers /fuzz-pack-idx @@ -124,7 +125,6 @@ /git-rebase--am /git-rebase--common /git-rebase--interactive -/git-rebase--merge /git-rebase--preserve-merges /git-receive-pack /git-reflog @@ -229,3 +229,4 @@ *.pdb /Debug/ /Release/ +*.dSYM diff --git a/.travis.yml b/.travis.yml index 03c8e4c613..36cbdea7f4 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,6 +8,8 @@ os: - linux - osx +osx_image: xcode10.1 + compiler: - clang - gcc diff --git a/Documentation/RelNotes/2.21.0.txt b/Documentation/RelNotes/2.21.0.txt index 4596d9e01a..5ad2233c99 100644 --- a/Documentation/RelNotes/2.21.0.txt +++ b/Documentation/RelNotes/2.21.0.txt @@ -59,6 +59,9 @@ UI, Workflows & Features * Custom userformat "log --format" learned %S atom that stands for the tip the traversal reached the commit from, i.e. --source. + * "git instaweb" learned to drive http.server that comes with + "batteries included" Python installation (both Python2 & 3). + Performance, Internal Implementation, Development Support etc. @@ -101,6 +104,26 @@ Performance, Internal Implementation, Development Support etc. * The code to walk tree objects has been taught that we may be working with object names that are not computed with SHA-1. + * The in-core repository instances are passed through more codepaths. + + * Update the protocol message specification to allow only the limited + use of scaled quantities. This is ensure potential compatibility + issues will not go out of hand. + + * Micro-optimize the code that prepares commit objects to be walked + by "git rev-list" when the commit-graph is available. + + * "git fetch" and "git upload-pack" learned to send all exchange over + the sideband channel while talking the v2 protocol. + + * The codepath to write out commit-graph has been optimized by + following the usual pattern of visiting objects in in-pack order. + + * The codepath to show progress meter while writing out commit-graph + file has been improved. + + * Cocci rules have been updated to encourage use of strbuf_addbuf(). + Fixes since v2.20 ----------------- @@ -221,6 +244,71 @@ Fixes since v2.20 temporary file. (merge fa6f225e01 js/add-e-clear-patch-before-stating later to maint). + * "git p4" failed to update a shelved change when there were moved + files, which has been corrected. + (merge 7a10946ab9 ld/git-p4-shelve-update-fix later to maint). + + * The codepath to read from the commit-graph file attempted to read + past the end of it when the file's table-of-contents was corrupt. + + * The compat/obstack code had casts that -Wcast-function-type + compilation option found questionable. + (merge 764473d257 sg/obstack-cast-function-type-fix later to maint). + + * An obvious typo in an assertion error message has been fixed. + (merge 3c27e2e059 cc/test-ref-store-typofix later to maint). + + * In Git for Windows, "git clone \\server\share\path" etc. that uses + UNC paths from command line had bad interaction with its shell + emulation. + + * "git add --ignore-errors" did not work as advertised and instead + worked as an unintended synonym for "git add --renormalize", which + has been fixed. + (merge 9e5da3d055 jk/add-ignore-errors-bit-assignment-fix later to maint). + + * On a case-insensitive filesystem, we failed to compare the part of + the path that is above the worktree directory in an absolute + pathname, which has been corrected. + + * Asking "git check-attr" about a macro (e.g. "binary") on a specific + path did not work correctly, even though "git check-attr -a" listed + such a macro correctly. This has been corrected. + (merge 7b95849be4 jk/attr-macro-fix later to maint). + + * "git pack-objects" incorrectly used uninitialized mutex, which has + been corrected. + (merge edb673cf10 ph/pack-objects-mutex-fix later to maint). + + * "git checkout -b <new> [HEAD]" to create a new branch from the + current commit and check it out ought to be a no-op in the index + and the working tree in normal cases, but there are corner cases + that do require updates to the index and the working tree. Running + it immediately after "git clone --no-checkout" is one of these + cases that an earlier optimization kicked in incorrectly, which has + been fixed. + (merge 8424bfd45b bp/checkout-new-branch-optim later to maint). + + * "git diff --color-moved --cc --stat -p" did not work well due to + funny interaction between a bug in color-moved and the rest, which + has been fixed. + (merge dac03b5518 jk/diff-cc-stat-fixes later to maint). + + * When GIT_SEQUENCE_EDITOR is set, the command was incorrectly + started when modes of "git rebase" that implicitly uses the + machinery for the interactive rebase are run, which has been + corrected. + (merge 891d4a0313 pw/no-editor-in-rebase-i-implicit later to maint). + + * The commit-graph facility did not work when in-core objects that + are promoted from unknown type to commit (e.g. a commit that is + accessed via a tag that refers to it) were involved, which has been + corrected. + (merge 4468d4435c sg/object-as-type-commit-graph-fix later to maint). + + * "git fetch" output cleanup. + (merge dc40b24df4 nd/fetch-compact-update later to maint). + * Code cleanup, docfix, build fix, etc. (merge 89ba9a79ae hb/t0061-dot-in-path-fix later to maint). (merge d173e799ea sb/diff-color-moved-config-option-fixup later to maint). @@ -240,3 +328,5 @@ Fixes since v2.20 (merge 1747125e2c cc/parial-clone-doc-typofix later to maint). (merge e01378753d cc/fetch-error-message-fix later to maint). (merge 54e8c11215 jk/remote-insteadof-cleanup later to maint). + (merge d609615f48 js/test-git-installed later to maint). + (merge ba170517be ja/doc-style-fix later to maint). diff --git a/Documentation/config/core.txt b/Documentation/config/core.txt index d0e6635fe0..7e9b6c8f4c 100644 --- a/Documentation/config/core.txt +++ b/Documentation/config/core.txt @@ -121,11 +121,14 @@ core.quotePath:: core.eol:: Sets the line ending type to use in the working directory for - files that have the `text` property set when core.autocrlf is false. + files that are marked as text (either by having the `text` + attribute set, or by having `text=auto` and Git auto-detecting + the contents as text). Alternatives are 'lf', 'crlf' and 'native', which uses the platform's native line ending. The default value is `native`. See linkgit:gitattributes[5] for more information on end-of-line - conversion. + conversion. Note that this value is ignored if `core.autocrlf` + is set to `true` or `input`. core.safecrlf:: If true, makes Git check if converting `CRLF` is reversible when diff --git a/Documentation/config/pack.txt b/Documentation/config/pack.txt index edac75c83f..425c73aa52 100644 --- a/Documentation/config/pack.txt +++ b/Documentation/config/pack.txt @@ -105,6 +105,15 @@ pack.useBitmaps:: true. You should not generally need to turn this off unless you are debugging pack bitmaps. +pack.useSparse:: + When true, git will default to using the '--sparse' option in + 'git pack-objects' when the '--revs' option is present. This + algorithm only walks trees that appear in paths that introduce new + objects. This can have significant performance benefits when + computing a pack to send a small change. However, it is possible + that extra objects are added to the pack-file if the included + commits contain certain types of direct renames. + pack.writeBitmaps (deprecated):: This is a deprecated synonym for `repack.writeBitmaps`. diff --git a/Documentation/doc-diff b/Documentation/doc-diff index dfd9418778..32c83dd26f 100755 --- a/Documentation/doc-diff +++ b/Documentation/doc-diff @@ -39,8 +39,7 @@ do shift done -cd_to_toplevel -tmp=Documentation/tmp-doc-diff +tmp="$(git rev-parse --show-toplevel)/Documentation/tmp-doc-diff" || exit 1 if test -n "$clean" then @@ -109,7 +108,7 @@ render_tree () { make -j$parallel -C "$tmp/worktree" \ GIT_VERSION=omitted \ SOURCE_DATE_EPOCH=0 \ - DESTDIR="$PWD/$tmp/installed/$1+" \ + DESTDIR="$tmp/installed/$1+" \ install-man && mv "$tmp/installed/$1+" "$tmp/installed/$1" fi && diff --git a/Documentation/git-add.txt b/Documentation/git-add.txt index 45652fe4a6..37bcab94d5 100644 --- a/Documentation/git-add.txt +++ b/Documentation/git-add.txt @@ -58,9 +58,9 @@ OPTIONS specifying `dir` will record not just a file `dir/file1` modified in the working tree, a file `dir/file2` added to the working tree, but also a file `dir/file3` removed from - the working tree. Note that older versions of Git used + the working tree). Note that older versions of Git used to ignore removed files; use `--no-all` option if you want - to add modified or new files but ignore removed ones. + to add modified or new files but ignore removed ones. + For more details about the <pathspec> syntax, see the 'pathspec' entry in linkgit:gitglossary[7]. @@ -124,7 +124,7 @@ subdirectories). --no-ignore-removal:: Update the index not only where the working tree has a file matching <pathspec> but also where the index already has an - entry. This adds, modifies, and removes index entries to + entry. This adds, modifies, and removes index entries to match the working tree. + If no <pathspec> is given when `-A` option is used, all @@ -206,7 +206,7 @@ EXAMPLES -------- * Adds content from all `*.txt` files under `Documentation` directory -and its subdirectories: + and its subdirectories: + ------------ $ git add Documentation/\*.txt diff --git a/Documentation/git-branch.txt b/Documentation/git-branch.txt index bf5316ffa9..3bd83a7cbd 100644 --- a/Documentation/git-branch.txt +++ b/Documentation/git-branch.txt @@ -297,7 +297,7 @@ $ git checkout my2.6.14 ------------ + <1> This step and the next one could be combined into a single step with -"checkout -b my2.6.14 v2.6.14". + "checkout -b my2.6.14 v2.6.14". Delete an unneeded branch:: + @@ -309,10 +309,10 @@ $ git branch -D test <2> ------------ + <1> Delete the remote-tracking branches "todo", "html" and "man". The next -'fetch' or 'pull' will create them again unless you configure them not to. -See linkgit:git-fetch[1]. + 'fetch' or 'pull' will create them again unless you configure them not to. + See linkgit:git-fetch[1]. <2> Delete the "test" branch even if the "master" branch (or whichever branch -is currently checked out) does not have all commits from the test branch. + is currently checked out) does not have all commits from the test branch. NOTES diff --git a/Documentation/git-cat-file.txt b/Documentation/git-cat-file.txt index 9a2e9cdafb..8eca671b82 100644 --- a/Documentation/git-cat-file.txt +++ b/Documentation/git-cat-file.txt @@ -252,6 +252,12 @@ the repository, then `cat-file` will ignore any custom format and print: <object> SP missing LF ------------ +If a name is specified that might refer to more than one object (an ambiguous short sha), then `cat-file` will ignore any custom format and print: + +------------ +<object> SP ambiguous LF +------------ + If --follow-symlinks is used, and a symlink in the repository points outside the repository, then `cat-file` will ignore any custom format and print: diff --git a/Documentation/git-checkout.txt b/Documentation/git-checkout.txt index 6acc3d98e7..9a396498d1 100644 --- a/Documentation/git-checkout.txt +++ b/Documentation/git-checkout.txt @@ -424,14 +424,14 @@ $ git tag foo <3> ------------ <1> creates a new branch 'foo', which refers to commit 'f', and then -updates HEAD to refer to branch 'foo'. In other words, we'll no longer -be in detached HEAD state after this command. + updates HEAD to refer to branch 'foo'. In other words, we'll no longer + be in detached HEAD state after this command. <2> similarly creates a new branch 'foo', which refers to commit 'f', -but leaves HEAD detached. + but leaves HEAD detached. <3> creates a new tag 'foo', which refers to commit 'f', -leaving HEAD detached. + leaving HEAD detached. If we have moved away from commit 'f', then we must first recover its object name (typically by using git reflog), and then we can create a reference to @@ -459,8 +459,8 @@ EXAMPLES -------- . The following sequence checks out the `master` branch, reverts -the `Makefile` to two revisions back, deletes hello.c by -mistake, and gets it back from the index. + the `Makefile` to two revisions back, deletes hello.c by + mistake, and gets it back from the index. + ------------ $ git checkout master <1> @@ -494,7 +494,7 @@ $ git checkout -- hello.c ------------ . After working in the wrong branch, switching to the correct -branch would be done using: + branch would be done using: + ------------ $ git checkout mytopic @@ -522,7 +522,7 @@ registered in your index file, so `git diff` would show you what changes you made since the tip of the new branch. . When a merge conflict happens during switching branches with -the `-m` option, you would see something like this: + the `-m` option, you would see something like this: + ------------ $ git checkout -m mytopic diff --git a/Documentation/git-cherry-pick.txt b/Documentation/git-cherry-pick.txt index d35d771fc8..b8cfeec67e 100644 --- a/Documentation/git-cherry-pick.txt +++ b/Documentation/git-cherry-pick.txt @@ -213,16 +213,16 @@ $ git reset --merge ORIG_HEAD <3> $ git cherry-pick -Xpatience topic^ <4> ------------ <1> apply the change that would be shown by `git show topic^`. -In this example, the patch does not apply cleanly, so -information about the conflict is written to the index and -working tree and no new commit results. + In this example, the patch does not apply cleanly, so + information about the conflict is written to the index and + working tree and no new commit results. <2> summarize changes to be reconciled <3> cancel the cherry-pick. In other words, return to the -pre-cherry-pick state, preserving any local modifications you had in -the working tree. + pre-cherry-pick state, preserving any local modifications + you had in the working tree. <4> try to apply the change introduced by `topic^` again, -spending extra time to avoid mistakes based on incorrectly matching -context lines. + spending extra time to avoid mistakes based on incorrectly + matching context lines. SEE ALSO -------- diff --git a/Documentation/git-commit.txt b/Documentation/git-commit.txt index f970a43422..a85c2c2a4c 100644 --- a/Documentation/git-commit.txt +++ b/Documentation/git-commit.txt @@ -17,16 +17,20 @@ SYNOPSIS DESCRIPTION ----------- -Stores the current contents of the index in a new commit along -with a log message from the user describing the changes. +Create a new commit containing the current contents of the index and +the given log message describing the changes. The new commit is a +direct child of HEAD, usually the tip of the current branch, and the +branch is updated to point to it (unless no branch is associated with +the working tree, in which case HEAD is "detached" as described in +linkgit:git-checkout[1]). -The content to be added can be specified in several ways: +The content to be committed can be specified in several ways: -1. by using 'git add' to incrementally "add" changes to the - index before using the 'commit' command (Note: even modified - files must be "added"); +1. by using linkgit:git-add[1] to incrementally "add" changes to the + index before using the 'commit' command (Note: even modified files + must be "added"); -2. by using 'git rm' to remove files from the working tree +2. by using linkgit:git-rm[1] to remove files from the working tree and the index, again before using the 'commit' command; 3. by listing files as arguments to the 'commit' command diff --git a/Documentation/git-diff-tree.txt b/Documentation/git-diff-tree.txt index 2319b2b192..43daa7c046 100644 --- a/Documentation/git-diff-tree.txt +++ b/Documentation/git-diff-tree.txt @@ -31,10 +31,7 @@ include::diff-options.txt[] <path>...:: If provided, the results are limited to a subset of files - matching one of these prefix strings. - i.e., file matches `/^<pattern1>|<pattern2>|.../` - Note that this parameter does not provide any wildcard or regexp - features. + matching one of the provided pathspecs. -r:: recurse into sub-trees @@ -114,52 +111,6 @@ include::pretty-options.txt[] include::pretty-formats.txt[] - - -LIMITING OUTPUT ---------------- -If you're only interested in differences in a subset of files, for -example some architecture-specific files, you might do: - - git diff-tree -r <tree-ish> <tree-ish> arch/ia64 include/asm-ia64 - -and it will only show you what changed in those two directories. - -Or if you are searching for what changed in just `kernel/sched.c`, just do - - git diff-tree -r <tree-ish> <tree-ish> kernel/sched.c - -and it will ignore all differences to other files. - -The pattern is always the prefix, and is matched exactly. There are no -wildcards. Even stricter, it has to match a complete path component. -I.e. "foo" does not pick up `foobar.h`. "foo" does match `foo/bar.h` -so it can be used to name subdirectories. - -An example of normal usage is: - - torvalds@ppc970:~/git> git diff-tree --abbrev 5319e4 - :100664 100664 ac348b... a01513... git-fsck-objects.c - -which tells you that the last commit changed just one file (it's from -this one: - ------------------------------------------------------------------------------ -commit 3c6f7ca19ad4043e9e72fa94106f352897e651a8 -tree 5319e4d609cdd282069cc4dce33c1db559539b03 -parent b4e628ea30d5ab3606119d2ea5caeab141d38df7 -author Linus Torvalds <torvalds@ppc970.osdl.org> Sat Apr 9 12:02:30 2005 -committer Linus Torvalds <torvalds@ppc970.osdl.org> Sat Apr 9 12:02:30 2005 - -Make "git-fsck-objects" print out all the root commits it finds. - -Once I do the reference tracking, I'll also make it print out all the -HEAD commits it finds, which is even more interesting. ------------------------------------------------------------------------------ - -in case you care). - - include::diff-format.txt[] GIT diff --git a/Documentation/git-diff.txt b/Documentation/git-diff.txt index 030f162f30..72179d993c 100644 --- a/Documentation/git-diff.txt +++ b/Documentation/git-diff.txt @@ -132,9 +132,9 @@ $ git diff HEAD <3> + <1> Changes in the working tree not yet staged for the next commit. <2> Changes between the index and your last commit; what you -would be committing if you run "git commit" without "-a" option. + would be committing if you run "git commit" without "-a" option. <3> Changes in the working tree since your last commit; what you -would be committing if you run "git commit -a" + would be committing if you run "git commit -a" Comparing with arbitrary commits:: + @@ -145,10 +145,10 @@ $ git diff HEAD^ HEAD <3> ------------ + <1> Instead of using the tip of the current branch, compare with the -tip of "test" branch. + tip of "test" branch. <2> Instead of comparing with the tip of "test" branch, compare with -the tip of the current branch, but limit the comparison to the -file "test". + the tip of the current branch, but limit the comparison to the + file "test". <3> Compare the version before the last commit and the last commit. Comparing branches:: @@ -162,7 +162,7 @@ $ git diff topic...master <3> <1> Changes between the tips of the topic and the master branches. <2> Same as above. <3> Changes that occurred on the master branch since when the topic -branch was started off it. + branch was started off it. Limiting the diff output:: + @@ -173,9 +173,9 @@ $ git diff arch/i386 include/asm-i386 <3> ------------ + <1> Show only modification, rename, and copy, but not addition -or deletion. + or deletion. <2> Show only names and the nature of change, but not actual -diff output. + diff output. <3> Limit diff output to named subtrees. Munging the diff output:: @@ -186,7 +186,7 @@ $ git diff -R <2> ------------ + <1> Spend extra cycles to find renames, copies and complete -rewrites (very expensive). + rewrites (very expensive). <2> Output diff in reverse. SEE ALSO diff --git a/Documentation/git-fetch.txt b/Documentation/git-fetch.txt index e319935597..266d63cf11 100644 --- a/Documentation/git-fetch.txt +++ b/Documentation/git-fetch.txt @@ -266,7 +266,7 @@ The `pu` branch will be updated even if it is does not fast-forward, because it is prefixed with a plus sign; `tmp` will not be. * Peek at a remote's branch, without configuring the remote in your local -repository: + repository: + ------------------------------------------------ $ git fetch git://git.kernel.org/pub/scm/git/git.git maint diff --git a/Documentation/git-format-patch.txt b/Documentation/git-format-patch.txt index 27304428a1..1af85d404f 100644 --- a/Documentation/git-format-patch.txt +++ b/Documentation/git-format-patch.txt @@ -504,9 +504,9 @@ Toggle it to make sure it is set to `false`. Also, search for "mailnews.wraplength" and set the value to 0. 3. Disable the use of format=flowed: -Edit..Preferences..Advanced..Config Editor. Search for -"mailnews.send_plaintext_flowed". -Toggle it to make sure it is set to `false`. + Edit..Preferences..Advanced..Config Editor. Search for + "mailnews.send_plaintext_flowed". + Toggle it to make sure it is set to `false`. After that is done, you should be able to compose email as you otherwise would (cut + paste, 'git format-patch' | 'git imap-send', etc), @@ -629,14 +629,14 @@ EXAMPLES -------- * Extract commits between revisions R1 and R2, and apply them on top of -the current branch using 'git am' to cherry-pick them: + the current branch using 'git am' to cherry-pick them: + ------------ $ git format-patch -k --stdout R1..R2 | git am -3 -k ------------ * Extract all commits which are in the current branch but not in the -origin branch: + origin branch: + ------------ $ git format-patch origin @@ -645,7 +645,7 @@ $ git format-patch origin For each commit a separate file is created in the current directory. * Extract all commits that lead to 'origin' since the inception of the -project: + project: + ------------ $ git format-patch --root origin @@ -664,7 +664,7 @@ Note that non-Git "patch" programs won't understand renaming patches, so use it only when you know the recipient uses Git to apply your patch. * Extract three topmost commits from the current branch and format them -as e-mailable patches: + as e-mailable patches: + ------------ $ git format-patch -3 diff --git a/Documentation/git-fsck.txt b/Documentation/git-fsck.txt index ab9a93fb9b..55950d9eea 100644 --- a/Documentation/git-fsck.txt +++ b/Documentation/git-fsck.txt @@ -140,9 +140,9 @@ dangling <type> <object>:: The <type> object <object>, is present in the database but never 'directly' used. A dangling commit could be a root node. -sha1 mismatch <object>:: - The database has an object who's sha1 doesn't match the - database value. +hash mismatch <object>:: + The database has an object whose hash doesn't match the + object database value. This indicates a serious data integrity problem. Environment Variables diff --git a/Documentation/git-help.txt b/Documentation/git-help.txt index aab5453bbb..c318bf87e1 100644 --- a/Documentation/git-help.txt +++ b/Documentation/git-help.txt @@ -118,9 +118,9 @@ format is chosen. The following values are currently supported: * "man": use the 'man' program as usual, * "woman": use 'emacsclient' to launch the "woman" mode in emacs -(this only works starting with emacsclient versions 22), + (this only works starting with emacsclient versions 22), * "konqueror": use 'kfmclient' to open the man page in a new konqueror -tab (see 'Note about konqueror' below). + tab (see 'Note about konqueror' below). Values for other tools can be used if there is a corresponding `man.<tool>.cmd` configuration entry (see below). diff --git a/Documentation/git-init.txt b/Documentation/git-init.txt index 057076ca38..32880aafb0 100644 --- a/Documentation/git-init.txt +++ b/Documentation/git-init.txt @@ -128,7 +128,7 @@ The template directory will be one of the following (in order): The default template directory includes some directory structure, suggested "exclude patterns" (see linkgit:gitignore[5]), and sample hook files. -The sample hooks are all disabled by default, To enable one of the +The sample hooks are all disabled by default. To enable one of the sample hooks rename it by removing its `.sample` suffix. See linkgit:githooks[5] for more general info on hook execution. diff --git a/Documentation/git-instaweb.txt b/Documentation/git-instaweb.txt index e8ecdbf927..a54fe4401b 100644 --- a/Documentation/git-instaweb.txt +++ b/Documentation/git-instaweb.txt @@ -29,7 +29,8 @@ OPTIONS The HTTP daemon command-line that will be executed. Command-line options may be specified here, and the configuration file will be added at the end of the command-line. - Currently apache2, lighttpd, mongoose, plackup and webrick are supported. + Currently apache2, lighttpd, mongoose, plackup, python and + webrick are supported. (Default: lighttpd) -m:: diff --git a/Documentation/git-log.txt b/Documentation/git-log.txt index 90761f1694..b02e922dc3 100644 --- a/Documentation/git-log.txt +++ b/Documentation/git-log.txt @@ -192,6 +192,10 @@ log.date:: Default format for human-readable dates. (Compare the `--date` option.) Defaults to "default", which means to write dates like `Sat May 8 19:35:34 2010 -0500`. ++ +If the format is set to "auto:foo" and the pager is in use, format +"foo" will be the used for the date format. Otherwise "default" will +be used. log.follow:: If `true`, `git log` will act as if the `--follow` option was used when diff --git a/Documentation/git-p4.txt b/Documentation/git-p4.txt index f0a0280954..3494a1db3e 100644 --- a/Documentation/git-p4.txt +++ b/Documentation/git-p4.txt @@ -71,12 +71,12 @@ $ git p4 clone //depot/path/project ------------ This: -1. Creates an empty Git repository in a subdirectory called 'project'. +1. Creates an empty Git repository in a subdirectory called 'project'. + -2. Imports the full contents of the head revision from the given p4 -depot path into a single commit in the Git branch 'refs/remotes/p4/master'. +2. Imports the full contents of the head revision from the given p4 + depot path into a single commit in the Git branch 'refs/remotes/p4/master'. + -3. Creates a local branch, 'master' from this remote and checks it out. +3. Creates a local branch, 'master' from this remote and checks it out. To reproduce the entire p4 history in Git, use the '@all' modifier on the depot path: diff --git a/Documentation/git-pack-objects.txt b/Documentation/git-pack-objects.txt index 40c825c381..e45f3e680d 100644 --- a/Documentation/git-pack-objects.txt +++ b/Documentation/git-pack-objects.txt @@ -14,7 +14,7 @@ SYNOPSIS [--local] [--incremental] [--window=<n>] [--depth=<n>] [--revs [--unpacked | --all]] [--keep-pack=<pack-name>] [--stdout [--filter=<filter-spec>] | base-name] - [--shallow] [--keep-true-parents] < object-list + [--shallow] [--keep-true-parents] [--sparse] < object-list DESCRIPTION @@ -196,6 +196,15 @@ depth is 4095. Add --no-reuse-object if you want to force a uniform compression level on all data no matter the source. +--sparse:: + Use the "sparse" algorithm to determine which objects to include in + the pack, when combined with the "--revs" option. This algorithm + only walks trees that appear in paths that introduce new objects. + This can have significant performance benefits when computing + a pack to send a small change. However, it is possible that extra + objects are added to the pack-file if the included commits contain + certain types of direct renames. + --thin:: Create a "thin" pack by omitting the common objects between a sender and a receiver in order to reduce network transfer. This diff --git a/Documentation/git-rebase.txt b/Documentation/git-rebase.txt index 4dd5853d6e..7e695b30e4 100644 --- a/Documentation/git-rebase.txt +++ b/Documentation/git-rebase.txt @@ -515,15 +515,7 @@ See also INCOMPATIBLE OPTIONS below. INCOMPATIBLE OPTIONS -------------------- -git-rebase has many flags that are incompatible with each other, -predominantly due to the fact that it has three different underlying -implementations: - - * one based on linkgit:git-am[1] (the default) - * one based on git-merge-recursive (merge backend) - * one based on linkgit:git-cherry-pick[1] (interactive backend) - -Flags only understood by the am backend: +The following options: * --committer-date-is-author-date * --ignore-date @@ -531,15 +523,12 @@ Flags only understood by the am backend: * --ignore-whitespace * -C -Flags understood by both merge and interactive backends: +are incompatible with the following options: * --merge * --strategy * --strategy-option * --allow-empty-message - -Flags only understood by the interactive backend: - * --[no-]autosquash * --rebase-merges * --preserve-merges @@ -550,7 +539,7 @@ Flags only understood by the interactive backend: * --edit-todo * --root when used in combination with --onto -Other incompatible flag pairs: +In addition, the following pairs of options are incompatible: * --preserve-merges and --interactive * --preserve-merges and --signoff diff --git a/Documentation/git-reset.txt b/Documentation/git-reset.txt index 9f69ae8b69..132f8e55f6 100644 --- a/Documentation/git-reset.txt +++ b/Documentation/git-reset.txt @@ -115,17 +115,17 @@ $ git pull git://info.example.com/ nitfol <4> ------------ + <1> You are happily working on something, and find the changes -in these files are in good order. You do not want to see them -when you run `git diff`, because you plan to work on other files -and changes with these files are distracting. + in these files are in good order. You do not want to see them + when you run `git diff`, because you plan to work on other files + and changes with these files are distracting. <2> Somebody asks you to pull, and the changes sound worthy of merging. <3> However, you already dirtied the index (i.e. your index does -not match the `HEAD` commit). But you know the pull you are going -to make does not affect `frotz.c` or `filfre.c`, so you revert the -index changes for these two files. Your changes in working tree -remain there. + not match the `HEAD` commit). But you know the pull you are going + to make does not affect `frotz.c` or `filfre.c`, so you revert the + index changes for these two files. Your changes in working tree + remain there. <4> Then you can pull and merge, leaving `frotz.c` and `filfre.c` -changes still in the working tree. + changes still in the working tree. Undo a commit and redo:: + @@ -137,12 +137,12 @@ $ git commit -a -c ORIG_HEAD <3> ------------ + <1> This is most often done when you remembered what you -just committed is incomplete, or you misspelled your commit -message, or both. Leaves working tree as it was before "reset". + just committed is incomplete, or you misspelled your commit + message, or both. Leaves working tree as it was before "reset". <2> Make corrections to working tree files. <3> "reset" copies the old head to `.git/ORIG_HEAD`; redo the -commit by starting with its log message. If you do not need to -edit the message further, you can give `-C` option instead. + commit by starting with its log message. If you do not need to + edit the message further, you can give `-C` option instead. + See also the `--amend` option to linkgit:git-commit[1]. @@ -155,9 +155,9 @@ $ git checkout topic/wip <3> ------------ + <1> You have made some commits, but realize they were premature -to be in the `master` branch. You want to continue polishing -them in a topic branch, so create `topic/wip` branch off of the -current `HEAD`. + to be in the `master` branch. You want to continue polishing + them in a topic branch, so create `topic/wip` branch off of the + current `HEAD`. <2> Rewind the master branch to get rid of those three commits. <3> Switch to `topic/wip` branch and keep working. @@ -169,10 +169,10 @@ $ git reset --hard HEAD~3 <1> ------------ + <1> The last three commits (`HEAD`, `HEAD^`, and `HEAD~2`) were bad -and you do not want to ever see them again. Do *not* do this if -you have already given these commits to somebody else. (See the -"RECOVERING FROM UPSTREAM REBASE" section in linkgit:git-rebase[1] for -the implications of doing so.) + and you do not want to ever see them again. Do *not* do this if + you have already given these commits to somebody else. (See the + "RECOVERING FROM UPSTREAM REBASE" section in linkgit:git-rebase[1] + for the implications of doing so.) Undo a merge or pull:: + @@ -189,18 +189,18 @@ $ git reset --hard ORIG_HEAD <4> ------------ + <1> Try to update from the upstream resulted in a lot of -conflicts; you were not ready to spend a lot of time merging -right now, so you decide to do that later. + conflicts; you were not ready to spend a lot of time merging + right now, so you decide to do that later. <2> "pull" has not made merge commit, so `git reset --hard` -which is a synonym for `git reset --hard HEAD` clears the mess -from the index file and the working tree. + which is a synonym for `git reset --hard HEAD` clears the mess + from the index file and the working tree. <3> Merge a topic branch into the current branch, which resulted -in a fast-forward. + in a fast-forward. <4> But you decided that the topic branch is not ready for public -consumption yet. "pull" or "merge" always leaves the original -tip of the current branch in `ORIG_HEAD`, so resetting hard to it -brings your index file and the working tree back to that state, -and resets the tip of the branch to that commit. + consumption yet. "pull" or "merge" always leaves the original + tip of the current branch in `ORIG_HEAD`, so resetting hard to it + brings your index file and the working tree back to that state, + and resets the tip of the branch to that commit. Undo a merge or pull inside a dirty working tree:: + @@ -214,14 +214,14 @@ $ git reset --merge ORIG_HEAD <2> ------------ + <1> Even if you may have local modifications in your -working tree, you can safely say `git pull` when you know -that the change in the other branch does not overlap with -them. + working tree, you can safely say `git pull` when you know + that the change in the other branch does not overlap with + them. <2> After inspecting the result of the merge, you may find -that the change in the other branch is unsatisfactory. Running -`git reset --hard ORIG_HEAD` will let you go back to where you -were, but it will discard your local changes, which you do not -want. `git reset --merge` keeps your local changes. + that the change in the other branch is unsatisfactory. Running + `git reset --hard ORIG_HEAD` will let you go back to where you + were, but it will discard your local changes, which you do not + want. `git reset --merge` keeps your local changes. Interrupted workflow:: diff --git a/Documentation/git-send-email.txt b/Documentation/git-send-email.txt index 62c6c76f27..1afe9fc858 100644 --- a/Documentation/git-send-email.txt +++ b/Documentation/git-send-email.txt @@ -33,7 +33,7 @@ This is what linkgit:git-format-patch[1] generates. Most headers and MIME formatting are ignored. 2. The original format used by Greg Kroah-Hartman's 'send_lots_of_email.pl' -script + script + This format expects the first line of the file to contain the "Cc:" value and the "Subject:" of the message as the second line. diff --git a/Documentation/git-tag.txt b/Documentation/git-tag.txt index f2d644e3af..a74e7b926d 100644 --- a/Documentation/git-tag.txt +++ b/Documentation/git-tag.txt @@ -237,16 +237,16 @@ your repository directly), then others will have already seen the old tag. In that case you can do one of two things: . The sane thing. -Just admit you screwed up, and use a different name. Others have -already seen one tag-name, and if you keep the same name, you -may be in the situation that two people both have "version X", -but they actually have 'different' "X"'s. So just call it "X.1" -and be done with it. + Just admit you screwed up, and use a different name. Others have + already seen one tag-name, and if you keep the same name, you + may be in the situation that two people both have "version X", + but they actually have 'different' "X"'s. So just call it "X.1" + and be done with it. . The insane thing. -You really want to call the new version "X" too, 'even though' -others have already seen the old one. So just use 'git tag -f' -again, as if you hadn't already published the old one. + You really want to call the new version "X" too, 'even though' + others have already seen the old one. So just use 'git tag -f' + again, as if you hadn't already published the old one. However, Git does *not* (and it should not) change tags behind users back. So if somebody already got the old tag, doing a diff --git a/Documentation/git-upload-pack.txt b/Documentation/git-upload-pack.txt index 998f52d3df..9822c1eb1a 100644 --- a/Documentation/git-upload-pack.txt +++ b/Documentation/git-upload-pack.txt @@ -22,7 +22,6 @@ The UI for the protocol is on the 'git fetch-pack' side, and the program pair is meant to be used to pull updates from a remote repository. For push operations, see 'git send-pack'. - OPTIONS ------- diff --git a/Documentation/gitattributes.txt b/Documentation/gitattributes.txt index b8392fc330..9b41f81c06 100644 --- a/Documentation/gitattributes.txt +++ b/Documentation/gitattributes.txt @@ -124,7 +124,9 @@ text file is normalized, its line endings are converted to LF in the repository. To control what line ending style is used in the working directory, use the `eol` attribute for a single file and the `core.eol` configuration variable for all text files. -Note that `core.autocrlf` overrides `core.eol` +Note that setting `core.autocrlf` to `true` or `input` overrides +`core.eol` (see the definitions of those options in +linkgit:git-config[1]). Set:: @@ -344,7 +346,9 @@ automatic line ending conversion based on your platform. Use the following attributes if your '*.ps1' files are UTF-16 little endian encoded without BOM and you want Git to use Windows line endings -in the working directory. Please note, it is highly recommended to +in the working directory (use `UTF-16-LE-BOM` instead of `UTF-16LE` if +you want UTF-16 little endian with BOM). +Please note, it is highly recommended to explicitly define the line endings with `eol` if the `working-tree-encoding` attribute is used to avoid ambiguity. diff --git a/Documentation/rev-list-options.txt b/Documentation/rev-list-options.txt index 98b538bc77..cad711ce0a 100644 --- a/Documentation/rev-list-options.txt +++ b/Documentation/rev-list-options.txt @@ -270,13 +270,13 @@ depending on a few rules: + -- 1. If the starting point is specified as `ref@{Nth}`, show the index -format. + format. + 2. If the starting point was specified as `ref@{now}`, show the -timestamp format. + timestamp format. + 3. If neither was used, but `--date` was given on the command line, show -the timestamp in the format requested by `--date`. + the timestamp in the format requested by `--date`. + 4. Otherwise, show the index format. -- @@ -730,8 +730,13 @@ specification contained in <path>. + The form '--filter=tree:<depth>' omits all blobs and trees whose depth from the root tree is >= <depth> (minimum depth if an object is located -at multiple depths in the commits traversed). Currently, only <depth>=0 -is supported, which omits all blobs and trees. +at multiple depths in the commits traversed). <depth>=0 will not include +any trees or blobs unless included explicitly in the command-line (or +standard input when --stdin is used). <depth>=1 will include only the +tree and blobs which are referenced directly by a commit reachable from +<commit> or an explicitly-given object. <depth>=2 is like <depth>=1 +while also including trees and blobs one more level removed from an +explicitly-given commit or tree. --no-filter:: Turn off any previous `--filter=` argument. @@ -831,6 +836,13 @@ Note that the `-local` option does not affect the seconds-since-epoch value (which is always measured in UTC), but does switch the accompanying timezone value. + +`--date=human` shows the timezone if the timezone does not match the +current time-zone, and doesn't print the whole date if that matches +(ie skip printing year for dates that are "this year", but also skip +the whole date itself if it's in the last few days and we can just say +what weekday it was). For older dates the hour and minute is also +omitted. ++ `--date=unix` shows the date as a Unix epoch timestamp (seconds since 1970). As with `--raw`, this is always in UTC and therefore `-local` has no effect. diff --git a/Documentation/technical/commit-graph-format.txt b/Documentation/technical/commit-graph-format.txt index cc0474ba3e..16452a0504 100644 --- a/Documentation/technical/commit-graph-format.txt +++ b/Documentation/technical/commit-graph-format.txt @@ -76,7 +76,7 @@ CHUNK DATA: of the ith commit. Stores value 0x7000000 if no parent in that position. If there are more than two parents, the second value has its most-significant bit on and the other bits store an array - position into the Large Edge List chunk. + position into the Extra Edge List chunk. * The next 8 bytes store the generation number of the commit and the commit time in seconds since EPOCH. The generation number uses the higher 30 bits of the first 4 bytes, while the commit @@ -84,7 +84,7 @@ CHUNK DATA: 2 bits of the lowest byte, storing the 33rd and 34th bit of the commit time. - Large Edge List (ID: {'E', 'D', 'G', 'E'}) [Optional] + Extra Edge List (ID: {'E', 'D', 'G', 'E'}) [Optional] This list of 4-byte values store the second through nth parents for all octopus merges. The second parent value in the commit data stores an array position within this list along with the most-significant bit diff --git a/Documentation/technical/pack-protocol.txt b/Documentation/technical/pack-protocol.txt index 6ac774d5f6..7a2375a55d 100644 --- a/Documentation/technical/pack-protocol.txt +++ b/Documentation/technical/pack-protocol.txt @@ -22,6 +22,16 @@ protocol-common.txt. When the grammar indicate `PKT-LINE(...)`, unless otherwise noted the usual pkt-line LF rules apply: the sender SHOULD include a LF, but the receiver MUST NOT complain if it is not present. +An error packet is a special pkt-line that contains an error string. + +---- + error-line = PKT-LINE("ERR" SP explanation-text) +---- + +Throughout the protocol, where `PKT-LINE(...)` is expected, an error packet MAY +be sent. Once this packet is sent by a client or a server, the data transfer +process defined in this protocol is terminated. + Transports ---------- There are three transports over which the packfile protocol is @@ -89,13 +99,6 @@ process on the server side over the Git protocol is this: "0039git-upload-pack /schacon/gitbook.git\0host=example.com\0" | nc -v example.com 9418 -If the server refuses the request for some reasons, it could abort -gracefully with an error message. - ----- - error-line = PKT-LINE("ERR" SP explanation-text) ----- - SSH Transport ------------- @@ -398,12 +401,11 @@ from the client). Then the server will start sending its packfile data. ---- - server-response = *ack_multi ack / nak / error-line + server-response = *ack_multi ack / nak ack_multi = PKT-LINE("ACK" SP obj-id ack_status) ack_status = "continue" / "common" / "ready" ack = PKT-LINE("ACK" SP obj-id) nak = PKT-LINE("NAK") - error-line = PKT-LINE("ERR" SP explanation-text) ---- A simple clone may look like this (with no 'have' lines): diff --git a/Documentation/technical/protocol-v2.txt b/Documentation/technical/protocol-v2.txt index 09e4e0273f..ead85ce35c 100644 --- a/Documentation/technical/protocol-v2.txt +++ b/Documentation/technical/protocol-v2.txt @@ -296,7 +296,13 @@ included in the client's request: Request that various objects from the packfile be omitted using one of several filtering techniques. These are intended for use with partial clone and partial fetch operations. See - `rev-list` for possible "filter-spec" values. + `rev-list` for possible "filter-spec" values. When communicating + with other processes, senders SHOULD translate scaled integers + (e.g. "1k") into a fully-expanded form (e.g. "1024") to aid + interoperability with older receivers that may not understand + newly-invented scaling suffixes. However, receivers SHOULD + accept the following suffixes: 'k', 'm', and 'g' for 1024, + 1048576, and 1073741824, respectively. If the 'ref-in-want' feature is advertised, the following argument can be included in the client's request as well as the potential addition of @@ -307,6 +313,16 @@ the 'wanted-refs' section in the server's response as explained below. particular ref, where <ref> is the full name of a ref on the server. +If the 'sideband-all' feature is advertised, the following argument can be +included in the client's request: + + sideband-all + Instruct the server to send the whole response multiplexed, not just + the packfile section. All non-flush and non-delim PKT-LINE in the + response (not only in the packfile section) will then start with a byte + indicating its sideband (1, 2, or 3), and the server may send "0005\2" + (a PKT-LINE of sideband 2 with no payload) as a keepalive packet. + The response of `fetch` is broken into a number of sections separated by delimiter packets (0001), with each section beginning with its section header. @@ -634,7 +634,6 @@ SCRIPT_LIB += git-parse-remote SCRIPT_LIB += git-rebase--am SCRIPT_LIB += git-rebase--common SCRIPT_LIB += git-rebase--preserve-merges -SCRIPT_LIB += git-rebase--merge SCRIPT_LIB += git-sh-setup SCRIPT_LIB += git-sh-i18n @@ -690,6 +689,7 @@ SCRIPTS = $(SCRIPT_SH_INS) \ ETAGS_TARGET = TAGS +FUZZ_OBJS += fuzz-commit-graph.o FUZZ_OBJS += fuzz-pack-headers.o FUZZ_OBJS += fuzz-pack-idx.o @@ -763,6 +763,7 @@ TEST_BUILTINS_OBJS += test-submodule-config.o TEST_BUILTINS_OBJS += test-submodule-nested-repo-config.o TEST_BUILTINS_OBJS += test-subprocess.o TEST_BUILTINS_OBJS += test-urlmatch-normalization.o +TEST_BUILTINS_OBJS += test-xml-encode.o TEST_BUILTINS_OBJS += test-wildmatch.o TEST_BUILTINS_OBJS += test-windows-named-pipe.o TEST_BUILTINS_OBJS += test-write-cache.o @@ -2948,6 +2949,16 @@ rpm:: @false .PHONY: rpm +artifacts-tar:: $(ALL_PROGRAMS) $(SCRIPT_LIB) $(BUILT_INS) $(OTHER_PROGRAMS) \ + GIT-BUILD-OPTIONS $(TEST_PROGRAMS) $(test_bindir_programs) \ + $(NO_INSTALL) $(MOFILES) + $(QUIET_SUBDIR0)templates $(QUIET_SUBDIR1) \ + SHELL_PATH='$(SHELL_PATH_SQ)' PERL_PATH='$(PERL_PATH_SQ)' + test -n "$(ARTIFACTS_DIRECTORY)" + mkdir -p "$(ARTIFACTS_DIRECTORY)" + $(TAR) czf "$(ARTIFACTS_DIRECTORY)/artifacts.tar.gz" $^ templates/blt/ +.PHONY: artifacts-tar + htmldocs = git-htmldocs-$(GIT_VERSION) manpages = git-manpages-$(GIT_VERSION) .PHONY: dist-doc distclean @@ -3125,7 +3136,7 @@ cover_db_html: cover_db # An example command to build against libFuzzer from LLVM 4.0.0: # # make CC=clang CXX=clang++ \ -# FUZZ_CXXFLAGS="-fsanitize-coverage=trace-pc-guard -fsanitize=address" \ +# CFLAGS="-fsanitize-coverage=trace-pc-guard -fsanitize=address" \ # LIB_FUZZING_ENGINE=/usr/lib/llvm-4.0/lib/libFuzzer.a \ # fuzz-all # @@ -1,3 +1,5 @@ +[![Build Status](https://dev.azure.com/git/git/_apis/build/status/test-git.git)](https://dev.azure.com/git/git/_build/latest?definitionId=2) + Git - fast, scalable, distributed revision control system ========================================================= @@ -99,18 +99,23 @@ void *alloc_object_node(struct repository *r) return obj; } -unsigned int alloc_commit_index(struct repository *r) +static unsigned int alloc_commit_index(struct repository *r) { return r->parsed_objects->commit_count++; } -void *alloc_commit_node(struct repository *r) +void init_commit_node(struct repository *r, struct commit *c) { - struct commit *c = alloc_node(r->parsed_objects->commit_state, sizeof(struct commit)); c->object.type = OBJ_COMMIT; c->index = alloc_commit_index(r); c->graph_pos = COMMIT_NOT_FROM_GRAPH; c->generation = GENERATION_NUMBER_INFINITY; +} + +void *alloc_commit_node(struct repository *r) +{ + struct commit *c = alloc_node(r->parsed_objects->commit_state, sizeof(struct commit)); + init_commit_node(r, c); return c; } @@ -9,11 +9,11 @@ struct repository; void *alloc_blob_node(struct repository *r); void *alloc_tree_node(struct repository *r); +void init_commit_node(struct repository *r, struct commit *c); void *alloc_commit_node(struct repository *r); void *alloc_tag_node(struct repository *r); void *alloc_object_node(struct repository *r); void alloc_report(struct repository *r); -unsigned int alloc_commit_index(struct repository *r); struct alloc_state *allocate_alloc_state(void); void clear_alloc_state(struct alloc_state *s); @@ -467,7 +467,6 @@ static char *squash_slash(char *name) static char *find_name_gnu(struct apply_state *state, const char *line, - const char *def, int p_value) { struct strbuf name = STRBUF_INIT; @@ -714,7 +713,7 @@ static char *find_name(struct apply_state *state, int terminate) { if (*line == '"') { - char *name = find_name_gnu(state, line, def, p_value); + char *name = find_name_gnu(state, line, p_value); if (name) return name; } @@ -731,7 +730,7 @@ static char *find_name_traditional(struct apply_state *state, size_t date_len; if (*line == '"') { - char *name = find_name_gnu(state, line, def, p_value); + char *name = find_name_gnu(state, line, p_value); if (name) return name; } @@ -3183,7 +3182,7 @@ static int apply_binary(struct apply_state *state, return 0; /* deletion patch */ } - if (has_sha1_file(oid.hash)) { + if (has_object_file(&oid)) { /* We already have the postimage */ enum object_type type; unsigned long size; @@ -4020,7 +4019,7 @@ static int read_apply_cache(struct apply_state *state) return read_index_from(state->repo->index, state->index_file, get_git_dir()); else - return read_index(state->repo->index); + return repo_read_index(state->repo); } /* This function tries to read the object name from the current index */ @@ -4713,7 +4712,8 @@ static int apply_patch(struct apply_state *state, state->index_file, LOCK_DIE_ON_ERROR); else - hold_locked_index(&state->lock_file, LOCK_DIE_ON_ERROR); + repo_hold_locked_index(state->repo, &state->lock_file, + LOCK_DIE_ON_ERROR); } if (state->check_index && read_apply_cache(state) < 0) { @@ -7,7 +7,6 @@ * an insanely large number of attributes. */ -#define NO_THE_INDEX_COMPATIBILITY_MACROS #include "cache.h" #include "config.h" #include "exec-cmd.h" @@ -1092,7 +1091,7 @@ static void collect_some_attrs(const struct index_state *istate, const char *path, struct attr_check *check) { - int i, pathlen, rem, dirlen; + int pathlen, rem, dirlen; const char *cp, *last_slash = NULL; int basename_offset; @@ -1113,20 +1112,6 @@ static void collect_some_attrs(const struct index_state *istate, all_attrs_init(&g_attr_hashmap, check); determine_macros(check->all_attrs, check->stack); - if (check->nr) { - rem = 0; - for (i = 0; i < check->nr; i++) { - int n = check->items[i].attr->attr_nr; - struct all_attrs_item *item = &check->all_attrs[n]; - if (item->macro) { - item->value = ATTR__UNSET; - rem++; - } - } - if (rem == check->nr) - return; - } - rem = check->all_attrs_nr; fill(path, pathlen, basename_offset, check->stack, check->all_attrs, rem); } diff --git a/azure-pipelines.yml b/azure-pipelines.yml new file mode 100644 index 0000000000..c329b7218b --- /dev/null +++ b/azure-pipelines.yml @@ -0,0 +1,387 @@ +resources: +- repo: self + fetchDepth: 1 + +jobs: +- job: windows_build + displayName: Windows Build + condition: succeeded() + pool: Hosted + timeoutInMinutes: 240 + steps: + - powershell: | + if ("$GITFILESHAREPWD" -ne "" -and "$GITFILESHAREPWD" -ne "`$`(gitfileshare.pwd)") { + net use s: \\gitfileshare.file.core.windows.net\test-cache "$GITFILESHAREPWD" /user:AZURE\gitfileshare /persistent:no + cmd /c mklink /d "$(Build.SourcesDirectory)\test-cache" S:\ + } + displayName: 'Mount test-cache' + env: + GITFILESHAREPWD: $(gitfileshare.pwd) + - powershell: | + $urlbase = "https://dev.azure.com/git-for-windows/git/_apis/build/builds" + $id = ((Invoke-WebRequest -UseBasicParsing "${urlbase}?definitions=22&statusFilter=completed&resultFilter=succeeded&`$top=1").content | ConvertFrom-JSON).value[0].id + $downloadUrl = ((Invoke-WebRequest -UseBasicParsing "${urlbase}/$id/artifacts").content | ConvertFrom-JSON).value[1].resource.downloadUrl + (New-Object Net.WebClient).DownloadFile($downloadUrl,"git-sdk-64-minimal.zip") + Expand-Archive git-sdk-64-minimal.zip -DestinationPath . -Force + Remove-Item git-sdk-64-minimal.zip + + # Let Git ignore the SDK and the test-cache + "/git-sdk-64-minimal/`n/test-cache/`n" | Out-File -NoNewLine -Encoding ascii -Append "$(Build.SourcesDirectory)\.git\info\exclude" + displayName: 'Download git-sdk-64-minimal' + - powershell: | + & git-sdk-64-minimal\usr\bin\bash.exe -lc @" + ci/make-test-artifacts.sh artifacts + "@ + if (!$?) { exit(1) } + displayName: Build + env: + HOME: $(Build.SourcesDirectory) + MSYSTEM: MINGW64 + DEVELOPER: 1 + NO_PERL: 1 + - task: PublishPipelineArtifact@0 + displayName: 'Publish Pipeline Artifact: test artifacts' + inputs: + artifactName: 'windows-artifacts' + targetPath: '$(Build.SourcesDirectory)\artifacts' + - task: PublishPipelineArtifact@0 + displayName: 'Publish Pipeline Artifact: git-sdk-64-minimal' + inputs: + artifactName: 'git-sdk-64-minimal' + targetPath: '$(Build.SourcesDirectory)\git-sdk-64-minimal' + - powershell: | + if ("$GITFILESHAREPWD" -ne "" -and "$GITFILESHAREPWD" -ne "`$`(gitfileshare.pwd)") { + cmd /c rmdir "$(Build.SourcesDirectory)\test-cache" + } + displayName: 'Unmount test-cache' + condition: true + env: + GITFILESHAREPWD: $(gitfileshare.pwd) + +- job: windows_test + displayName: Windows Test + dependsOn: windows_build + condition: succeeded() + pool: Hosted + timeoutInMinutes: 240 + strategy: + parallel: 10 + steps: + - powershell: | + if ("$GITFILESHAREPWD" -ne "" -and "$GITFILESHAREPWD" -ne "`$`(gitfileshare.pwd)") { + net use s: \\gitfileshare.file.core.windows.net\test-cache "$GITFILESHAREPWD" /user:AZURE\gitfileshare /persistent:no + cmd /c mklink /d "$(Build.SourcesDirectory)\test-cache" S:\ + } + displayName: 'Mount test-cache' + env: + GITFILESHAREPWD: $(gitfileshare.pwd) + - task: DownloadPipelineArtifact@0 + displayName: 'Download Pipeline Artifact: test artifacts' + inputs: + artifactName: 'windows-artifacts' + targetPath: '$(Build.SourcesDirectory)' + - task: DownloadPipelineArtifact@0 + displayName: 'Download Pipeline Artifact: git-sdk-64-minimal' + inputs: + artifactName: 'git-sdk-64-minimal' + targetPath: '$(Build.SourcesDirectory)\git-sdk-64-minimal' + - powershell: | + & git-sdk-64-minimal\usr\bin\bash.exe -lc @" + test -f artifacts.tar.gz || { + echo No test artifacts found\; skipping >&2 + exit 0 + } + tar xf artifacts.tar.gz || exit 1 + + # Let Git ignore the SDK and the test-cache + printf '%s\n' /git-sdk-64-minimal/ /test-cache/ >>.git/info/exclude + + ci/run-test-slice.sh `$SYSTEM_JOBPOSITIONINPHASE `$SYSTEM_TOTALJOBSINPHASE || { + ci/print-test-failures.sh + exit 1 + } + "@ + if (!$?) { exit(1) } + displayName: 'Test (parallel)' + env: + HOME: $(Build.SourcesDirectory) + MSYSTEM: MINGW64 + NO_SVN_TESTS: 1 + GIT_TEST_SKIP_REBASE_P: 1 + - powershell: | + if ("$GITFILESHAREPWD" -ne "" -and "$GITFILESHAREPWD" -ne "`$`(gitfileshare.pwd)") { + cmd /c rmdir "$(Build.SourcesDirectory)\test-cache" + } + displayName: 'Unmount test-cache' + condition: true + env: + GITFILESHAREPWD: $(gitfileshare.pwd) + - task: PublishTestResults@2 + displayName: 'Publish Test Results **/TEST-*.xml' + inputs: + mergeTestResults: true + testRunTitle: 'windows' + platform: Windows + publishRunAttachments: false + condition: succeededOrFailed() + - task: PublishBuildArtifacts@1 + displayName: 'Publish trash directories of failed tests' + condition: failed() + inputs: + PathtoPublish: t/failed-test-artifacts + ArtifactName: failed-test-artifacts + +- job: linux_clang + displayName: linux-clang + condition: succeeded() + pool: Hosted Ubuntu 1604 + steps: + - bash: | + test "$GITFILESHAREPWD" = '$(gitfileshare.pwd)' || ci/mount-fileshare.sh //gitfileshare.file.core.windows.net/test-cache gitfileshare "$GITFILESHAREPWD" "$HOME/test-cache" || exit 1 + + sudo apt-get update && + sudo apt-get -y install git gcc make libssl-dev libcurl4-openssl-dev libexpat-dev tcl tk gettext git-email zlib1g-dev apache2-bin && + + export CC=clang || exit 1 + + ci/install-dependencies.sh || exit 1 + ci/run-build-and-tests.sh || { + ci/print-test-failures.sh + exit 1 + } + + test "$GITFILESHAREPWD" = '$(gitfileshare.pwd)' || sudo umount "$HOME/test-cache" || exit 1 + displayName: 'ci/run-build-and-tests.sh' + env: + GITFILESHAREPWD: $(gitfileshare.pwd) + - task: PublishTestResults@2 + displayName: 'Publish Test Results **/TEST-*.xml' + inputs: + mergeTestResults: true + testRunTitle: 'linux-clang' + platform: Linux + publishRunAttachments: false + condition: succeededOrFailed() + - task: PublishBuildArtifacts@1 + displayName: 'Publish trash directories of failed tests' + condition: failed() + inputs: + PathtoPublish: t/failed-test-artifacts + ArtifactName: failed-test-artifacts + +- job: linux_gcc + displayName: linux-gcc + condition: succeeded() + pool: Hosted Ubuntu 1604 + steps: + - bash: | + test "$GITFILESHAREPWD" = '$(gitfileshare.pwd)' || ci/mount-fileshare.sh //gitfileshare.file.core.windows.net/test-cache gitfileshare "$GITFILESHAREPWD" "$HOME/test-cache" || exit 1 + + sudo add-apt-repository ppa:ubuntu-toolchain-r/test && + sudo apt-get update && + sudo apt-get -y install git gcc make libssl-dev libcurl4-openssl-dev libexpat-dev tcl tk gettext git-email zlib1g-dev apache2 language-pack-is git-svn gcc-8 || exit 1 + + ci/install-dependencies.sh || exit 1 + ci/run-build-and-tests.sh || { + ci/print-test-failures.sh + exit 1 + } + + test "$GITFILESHAREPWD" = '$(gitfileshare.pwd)' || sudo umount "$HOME/test-cache" || exit 1 + displayName: 'ci/run-build-and-tests.sh' + env: + GITFILESHAREPWD: $(gitfileshare.pwd) + - task: PublishTestResults@2 + displayName: 'Publish Test Results **/TEST-*.xml' + inputs: + mergeTestResults: true + testRunTitle: 'linux-gcc' + platform: Linux + publishRunAttachments: false + condition: succeededOrFailed() + - task: PublishBuildArtifacts@1 + displayName: 'Publish trash directories of failed tests' + condition: failed() + inputs: + PathtoPublish: t/failed-test-artifacts + ArtifactName: failed-test-artifacts + +- job: osx_clang + displayName: osx-clang + condition: succeeded() + pool: Hosted macOS + steps: + - bash: | + test "$GITFILESHAREPWD" = '$(gitfileshare.pwd)' || ci/mount-fileshare.sh //gitfileshare.file.core.windows.net/test-cache gitfileshare "$GITFILESHAREPWD" "$HOME/test-cache" || exit 1 + + export CC=clang + + ci/install-dependencies.sh || exit 1 + ci/run-build-and-tests.sh || { + ci/print-test-failures.sh + exit 1 + } + + test "$GITFILESHAREPWD" = '$(gitfileshare.pwd)' || umount "$HOME/test-cache" || exit 1 + displayName: 'ci/run-build-and-tests.sh' + env: + GITFILESHAREPWD: $(gitfileshare.pwd) + - task: PublishTestResults@2 + displayName: 'Publish Test Results **/TEST-*.xml' + inputs: + mergeTestResults: true + testRunTitle: 'osx-clang' + platform: macOS + publishRunAttachments: false + condition: succeededOrFailed() + - task: PublishBuildArtifacts@1 + displayName: 'Publish trash directories of failed tests' + condition: failed() + inputs: + PathtoPublish: t/failed-test-artifacts + ArtifactName: failed-test-artifacts + +- job: osx_gcc + displayName: osx-gcc + condition: succeeded() + pool: Hosted macOS + steps: + - bash: | + test "$GITFILESHAREPWD" = '$(gitfileshare.pwd)' || ci/mount-fileshare.sh //gitfileshare.file.core.windows.net/test-cache gitfileshare "$GITFILESHAREPWD" "$HOME/test-cache" || exit 1 + + ci/install-dependencies.sh || exit 1 + ci/run-build-and-tests.sh || { + ci/print-test-failures.sh + exit 1 + } + + test "$GITFILESHAREPWD" = '$(gitfileshare.pwd)' || umount "$HOME/test-cache" || exit 1 + displayName: 'ci/run-build-and-tests.sh' + env: + GITFILESHAREPWD: $(gitfileshare.pwd) + - task: PublishTestResults@2 + displayName: 'Publish Test Results **/TEST-*.xml' + inputs: + mergeTestResults: true + testRunTitle: 'osx-gcc' + platform: macOS + publishRunAttachments: false + condition: succeededOrFailed() + - task: PublishBuildArtifacts@1 + displayName: 'Publish trash directories of failed tests' + condition: failed() + inputs: + PathtoPublish: t/failed-test-artifacts + ArtifactName: failed-test-artifacts + +- job: gettext_poison + displayName: GETTEXT_POISON + condition: succeeded() + pool: Hosted Ubuntu 1604 + steps: + - bash: | + test "$GITFILESHAREPWD" = '$(gitfileshare.pwd)' || ci/mount-fileshare.sh //gitfileshare.file.core.windows.net/test-cache gitfileshare "$GITFILESHAREPWD" "$HOME/test-cache" || exit 1 + + sudo apt-get update && + sudo apt-get -y install git gcc make libssl-dev libcurl4-openssl-dev libexpat-dev tcl tk gettext git-email zlib1g-dev && + + export jobname=GETTEXT_POISON || exit 1 + + ci/run-build-and-tests.sh || { + ci/print-test-failures.sh + exit 1 + } + + test "$GITFILESHAREPWD" = '$(gitfileshare.pwd)' || sudo umount "$HOME/test-cache" || exit 1 + displayName: 'ci/run-build-and-tests.sh' + env: + GITFILESHAREPWD: $(gitfileshare.pwd) + - task: PublishTestResults@2 + displayName: 'Publish Test Results **/TEST-*.xml' + inputs: + mergeTestResults: true + testRunTitle: 'gettext-poison' + platform: Linux + publishRunAttachments: false + condition: succeededOrFailed() + - task: PublishBuildArtifacts@1 + displayName: 'Publish trash directories of failed tests' + condition: failed() + inputs: + PathtoPublish: t/failed-test-artifacts + ArtifactName: failed-test-artifacts + +- job: linux32 + displayName: Linux32 + condition: succeeded() + pool: Hosted Ubuntu 1604 + steps: + - bash: | + test "$GITFILESHAREPWD" = '$(gitfileshare.pwd)' || ci/mount-fileshare.sh //gitfileshare.file.core.windows.net/test-cache gitfileshare "$GITFILESHAREPWD" "$HOME/test-cache" || exit 1 + + res=0 + sudo AGENT_OS="$AGENT_OS" BUILD_BUILDNUMBER="$BUILD_BUILDNUMBER" BUILD_REPOSITORY_URI="$BUILD_REPOSITORY_URI" BUILD_SOURCEBRANCH="$BUILD_SOURCEBRANCH" BUILD_SOURCEVERSION="$BUILD_SOURCEVERSION" SYSTEM_PHASENAME="$SYSTEM_PHASENAME" SYSTEM_TASKDEFINITIONSURI="$SYSTEM_TASKDEFINITIONSURI" SYSTEM_TEAMPROJECT="$SYSTEM_TEAMPROJECT" CC=$CC MAKEFLAGS="$MAKEFLAGS" bash -lxc ci/run-linux32-docker.sh || res=1 + + sudo chmod a+r t/out/TEST-*.xml + test ! -d t/failed-test-artifacts || sudo chmod a+r t/failed-test-artifacts + + test "$GITFILESHAREPWD" = '$(gitfileshare.pwd)' || sudo umount "$HOME/test-cache" || res=1 + exit $res + displayName: 'ci/run-linux32-docker.sh' + env: + GITFILESHAREPWD: $(gitfileshare.pwd) + - task: PublishTestResults@2 + displayName: 'Publish Test Results **/TEST-*.xml' + inputs: + mergeTestResults: true + testRunTitle: 'linux32' + platform: Linux + publishRunAttachments: false + condition: succeededOrFailed() + - task: PublishBuildArtifacts@1 + displayName: 'Publish trash directories of failed tests' + condition: failed() + inputs: + PathtoPublish: t/failed-test-artifacts + ArtifactName: failed-test-artifacts + +- job: static_analysis + displayName: StaticAnalysis + condition: succeeded() + pool: Hosted Ubuntu 1604 + steps: + - bash: | + test "$GITFILESHAREPWD" = '$(gitfileshare.pwd)' || ci/mount-fileshare.sh //gitfileshare.file.core.windows.net/test-cache gitfileshare "$GITFILESHAREPWD" "$HOME/test-cache" || exit 1 + + sudo apt-get update && + sudo apt-get install -y coccinelle && + + export jobname=StaticAnalysis && + + ci/run-static-analysis.sh || exit 1 + + test "$GITFILESHAREPWD" = '$(gitfileshare.pwd)' || sudo umount "$HOME/test-cache" || exit 1 + displayName: 'ci/run-static-analysis.sh' + env: + GITFILESHAREPWD: $(gitfileshare.pwd) + +- job: documentation + displayName: Documentation + condition: succeeded() + pool: Hosted Ubuntu 1604 + steps: + - bash: | + test "$GITFILESHAREPWD" = '$(gitfileshare.pwd)' || ci/mount-fileshare.sh //gitfileshare.file.core.windows.net/test-cache gitfileshare "$GITFILESHAREPWD" "$HOME/test-cache" || exit 1 + + sudo apt-get update && + sudo apt-get install -y asciidoc xmlto asciidoctor && + + export ALREADY_HAVE_ASCIIDOCTOR=yes. && + export jobname=Documentation && + + ci/test-documentation.sh || exit 1 + + test "$GITFILESHAREPWD" = '$(gitfileshare.pwd)' || sudo umount "$HOME/test-cache" || exit 1 + displayName: 'ci/test-documentation.sh' + env: + GITFILESHAREPWD: $(gitfileshare.pwd) @@ -658,7 +658,7 @@ static void bisect_common(struct rev_info *revs) if (prepare_revision_walk(revs)) die("revision walk setup failed"); if (revs->tree_objects) - mark_edges_uninteresting(revs, NULL); + mark_edges_uninteresting(revs, NULL, 0); } static void exit_if_skipped_commits(struct commit_list *tried, @@ -188,7 +188,7 @@ static struct commit *fake_working_tree_commit(struct repository *r, unsigned mode; struct strbuf msg = STRBUF_INIT; - read_index(r->index); + repo_read_index(r); time(&now); commit = alloc_commit_node(r); commit->object.parsed = 1; @@ -270,7 +270,7 @@ static struct commit *fake_working_tree_commit(struct repository *r, * want to run "diff-index --cached". */ discard_index(r->index); - read_index(r->index); + repo_read_index(r); len = strlen(path); if (!mode) { diff --git a/builtin/add.c b/builtin/add.c index d461ba08b9..db2dfa4350 100644 --- a/builtin/add.c +++ b/builtin/add.c @@ -3,6 +3,7 @@ * * Copyright (C) 2006 Linus Torvalds */ +#define USE_THE_INDEX_COMPATIBILITY_MACROS #include "cache.h" #include "config.h" #include "builtin.h" @@ -137,7 +138,7 @@ static int renormalize_tracked_files(const struct pathspec *pathspec, int flags) continue; /* do not touch non blobs */ if (pathspec && !ce_path_match(&the_index, ce, pathspec, NULL)) continue; - retval |= add_file_to_cache(ce->name, flags | HASH_RENORMALIZE); + retval |= add_file_to_cache(ce->name, flags | ADD_CACHE_RENORMALIZE); } return retval; diff --git a/builtin/am.c b/builtin/am.c index 95370313b6..58a2aef28b 100644 --- a/builtin/am.c +++ b/builtin/am.c @@ -3,6 +3,7 @@ * * Based on git-am.sh by Junio C Hamano. */ +#define USE_THE_INDEX_COMPATIBILITY_MACROS #include "cache.h" #include "config.h" #include "builtin.h" @@ -35,22 +36,6 @@ #include "repository.h" /** - * Returns 1 if the file is empty or does not exist, 0 otherwise. - */ -static int is_empty_file(const char *filename) -{ - struct stat st; - - if (stat(filename, &st) < 0) { - if (errno == ENOENT) - return 1; - die_errno(_("could not stat %s"), filename); - } - - return !st.st_size; -} - -/** * Returns the length of the first line of msg. */ static int linelen(const char *msg) @@ -527,7 +512,7 @@ static int copy_notes_for_rebase(const struct am_state *state) } finish: - finish_copy_notes_for_rewrite(c, msg); + finish_copy_notes_for_rewrite(the_repository, c, msg); fclose(fp); strbuf_release(&sb); return ret; @@ -1220,7 +1205,7 @@ static int parse_mail(struct am_state *state, const char *mail) goto finish; } - if (is_empty_file(am_path(state, "patch"))) { + if (is_empty_or_missing_file(am_path(state, "patch"))) { printf_ln(_("Patch is empty.")); die_user_resolve(state); } @@ -1545,7 +1530,7 @@ static int fall_back_threeway(const struct am_state *state, const char *index_pa * changes. */ - init_merge_options(&o); + init_merge_options(&o, the_repository); o.branch1 = "HEAD"; their_tree_name = xstrfmt("%.*s", linelen(state->msg), state->msg); @@ -1719,7 +1704,7 @@ static void am_run(struct am_state *state, int resume) refresh_and_write_cache(); - if (index_has_changes(&the_index, NULL, &sb)) { + if (repo_index_has_changes(the_repository, NULL, &sb)) { write_state_bool(state, "dirtyindex", 1); die(_("Dirty index: cannot apply patches (dirty: %s)"), sb.buf); } @@ -1777,7 +1762,7 @@ static void am_run(struct am_state *state, int resume) * the result may have produced the same tree as ours. */ if (!apply_status && - !index_has_changes(&the_index, NULL, NULL)) { + !repo_index_has_changes(the_repository, NULL, NULL)) { say(state, stdout, _("No changes -- Patch already applied.")); goto next; } @@ -1803,7 +1788,7 @@ next: resume = 0; } - if (!is_empty_file(am_path(state, "rewritten"))) { + if (!is_empty_or_missing_file(am_path(state, "rewritten"))) { assert(state->rebasing); copy_notes_for_rebase(state); run_post_rewrite_hook(state); @@ -1831,7 +1816,7 @@ static void am_resolve(struct am_state *state) say(state, stdout, _("Applying: %.*s"), linelen(state->msg), state->msg); - if (!index_has_changes(&the_index, NULL, NULL)) { + if (!repo_index_has_changes(the_repository, NULL, NULL)) { printf_ln(_("No changes - did you forget to use 'git add'?\n" "If there is nothing left to stage, chances are that something else\n" "already introduced the same changes; you might want to skip this patch.")); @@ -2000,6 +1985,15 @@ static void am_skip(struct am_state *state) if (clean_index(&head, &head)) die(_("failed to clean index")); + if (state->rebasing) { + FILE *fp = xfopen(am_path(state, "rewritten"), "a"); + + assert(!is_null_oid(&state->orig_commit)); + fprintf(fp, "%s ", oid_to_hex(&state->orig_commit)); + fprintf(fp, "%s\n", oid_to_hex(&head)); + fclose(fp); + } + am_next(state); am_load(state); am_run(state, 0); @@ -2278,7 +2272,7 @@ int cmd_am(int argc, const char **argv, const char *prefix) /* Ensure a valid committer ident can be constructed */ git_committer_info(IDENT_STRICT); - if (read_index_preload(&the_index, NULL, 0) < 0) + if (repo_read_index_preload(the_repository, NULL, 0) < 0) die(_("failed to read the index")); if (in_progress) { diff --git a/builtin/archive.c b/builtin/archive.c index d2455237ce..45d11669aa 100644 --- a/builtin/archive.c +++ b/builtin/archive.c @@ -27,10 +27,10 @@ static int run_remote_archiver(int argc, const char **argv, const char *remote, const char *exec, const char *name_hint) { - char *buf; int fd[2], i, rv; struct transport *transport; struct remote *_remote; + struct packet_reader reader; _remote = remote_get(remote); if (!_remote->url[0]) @@ -53,18 +53,19 @@ static int run_remote_archiver(int argc, const char **argv, packet_write_fmt(fd[1], "argument %s\n", argv[i]); packet_flush(fd[1]); - buf = packet_read_line(fd[0], NULL); - if (!buf) + packet_reader_init(&reader, fd[0], NULL, 0, + PACKET_READ_CHOMP_NEWLINE | + PACKET_READ_DIE_ON_ERR_PACKET); + + if (packet_reader_read(&reader) != PACKET_READ_NORMAL) die(_("git archive: expected ACK/NAK, got a flush packet")); - if (strcmp(buf, "ACK")) { - if (starts_with(buf, "NACK ")) - die(_("git archive: NACK %s"), buf + 5); - if (starts_with(buf, "ERR ")) - die(_("remote error: %s"), buf + 4); + if (strcmp(reader.line, "ACK")) { + if (starts_with(reader.line, "NACK ")) + die(_("git archive: NACK %s"), reader.line + 5); die(_("git archive: protocol error")); } - if (packet_read_line(fd[0], NULL)) + if (packet_reader_read(&reader) != PACKET_READ_FLUSH) die(_("git archive: expected a flush")); /* Now, start reading from fd[0] and spit it out to stdout */ diff --git a/builtin/bisect--helper.c b/builtin/bisect--helper.c index 417d141c09..c1cff32661 100644 --- a/builtin/bisect--helper.c +++ b/builtin/bisect--helper.c @@ -3,18 +3,58 @@ #include "parse-options.h" #include "bisect.h" #include "refs.h" +#include "dir.h" +#include "argv-array.h" +#include "run-command.h" +#include "prompt.h" +#include "quote.h" static GIT_PATH_FUNC(git_path_bisect_terms, "BISECT_TERMS") static GIT_PATH_FUNC(git_path_bisect_expected_rev, "BISECT_EXPECTED_REV") static GIT_PATH_FUNC(git_path_bisect_ancestors_ok, "BISECT_ANCESTORS_OK") +static GIT_PATH_FUNC(git_path_bisect_start, "BISECT_START") +static GIT_PATH_FUNC(git_path_bisect_head, "BISECT_HEAD") +static GIT_PATH_FUNC(git_path_bisect_log, "BISECT_LOG") +static GIT_PATH_FUNC(git_path_head_name, "head-name") +static GIT_PATH_FUNC(git_path_bisect_names, "BISECT_NAMES") static const char * const git_bisect_helper_usage[] = { N_("git bisect--helper --next-all [--no-checkout]"), N_("git bisect--helper --write-terms <bad_term> <good_term>"), N_("git bisect--helper --bisect-clean-state"), + N_("git bisect--helper --bisect-reset [<commit>]"), + N_("git bisect--helper --bisect-write [--no-log] <state> <revision> <good_term> <bad_term>"), + N_("git bisect--helper --bisect-check-and-set-terms <command> <good_term> <bad_term>"), + N_("git bisect--helper --bisect-next-check <good_term> <bad_term> [<term>]"), + N_("git bisect--helper --bisect-terms [--term-good | --term-old | --term-bad | --term-new]"), + N_("git bisect--helper --bisect-start [--term-{old,good}=<term> --term-{new,bad}=<term>]" + "[--no-checkout] [<bad> [<good>...]] [--] [<paths>...]"), NULL }; +struct bisect_terms { + char *term_good; + char *term_bad; +}; + +static void free_terms(struct bisect_terms *terms) +{ + FREE_AND_NULL(terms->term_good); + FREE_AND_NULL(terms->term_bad); +} + +static void set_terms(struct bisect_terms *terms, const char *bad, + const char *good) +{ + free((void *)terms->term_good); + terms->term_good = xstrdup(good); + free((void *)terms->term_bad); + terms->term_bad = xstrdup(bad); +} + +static const char *vocab_bad = "bad|new"; +static const char *vocab_good = "good|old"; + /* * Check whether the string `term` belongs to the set of strings * included in the variable arguments. @@ -106,15 +146,482 @@ static void check_expected_revs(const char **revs, int rev_nr) } } +static int bisect_reset(const char *commit) +{ + struct strbuf branch = STRBUF_INIT; + + if (!commit) { + if (strbuf_read_file(&branch, git_path_bisect_start(), 0) < 1) { + printf(_("We are not bisecting.\n")); + return 0; + } + strbuf_rtrim(&branch); + } else { + struct object_id oid; + + if (get_oid_commit(commit, &oid)) + return error(_("'%s' is not a valid commit"), commit); + strbuf_addstr(&branch, commit); + } + + if (!file_exists(git_path_bisect_head())) { + struct argv_array argv = ARGV_ARRAY_INIT; + + argv_array_pushl(&argv, "checkout", branch.buf, "--", NULL); + if (run_command_v_opt(argv.argv, RUN_GIT_CMD)) { + strbuf_release(&branch); + argv_array_clear(&argv); + return error(_("could not check out original" + " HEAD '%s'. Try 'git bisect" + "reset <commit>'."), branch.buf); + } + argv_array_clear(&argv); + } + + strbuf_release(&branch); + return bisect_clean_state(); +} + +static void log_commit(FILE *fp, char *fmt, const char *state, + struct commit *commit) +{ + struct pretty_print_context pp = {0}; + struct strbuf commit_msg = STRBUF_INIT; + char *label = xstrfmt(fmt, state); + + format_commit_message(commit, "%s", &commit_msg, &pp); + + fprintf(fp, "# %s: [%s] %s\n", label, oid_to_hex(&commit->object.oid), + commit_msg.buf); + + strbuf_release(&commit_msg); + free(label); +} + +static int bisect_write(const char *state, const char *rev, + const struct bisect_terms *terms, int nolog) +{ + struct strbuf tag = STRBUF_INIT; + struct object_id oid; + struct commit *commit; + FILE *fp = NULL; + int retval = 0; + + if (!strcmp(state, terms->term_bad)) { + strbuf_addf(&tag, "refs/bisect/%s", state); + } else if (one_of(state, terms->term_good, "skip", NULL)) { + strbuf_addf(&tag, "refs/bisect/%s-%s", state, rev); + } else { + retval = error(_("Bad bisect_write argument: %s"), state); + goto finish; + } + + if (get_oid(rev, &oid)) { + retval = error(_("couldn't get the oid of the rev '%s'"), rev); + goto finish; + } + + if (update_ref(NULL, tag.buf, &oid, NULL, 0, + UPDATE_REFS_MSG_ON_ERR)) { + retval = -1; + goto finish; + } + + fp = fopen(git_path_bisect_log(), "a"); + if (!fp) { + retval = error_errno(_("couldn't open the file '%s'"), git_path_bisect_log()); + goto finish; + } + + commit = lookup_commit_reference(the_repository, &oid); + log_commit(fp, "%s", state, commit); + + if (!nolog) + fprintf(fp, "git bisect %s %s\n", state, rev); + +finish: + if (fp) + fclose(fp); + strbuf_release(&tag); + return retval; +} + +static int check_and_set_terms(struct bisect_terms *terms, const char *cmd) +{ + int has_term_file = !is_empty_or_missing_file(git_path_bisect_terms()); + + if (one_of(cmd, "skip", "start", "terms", NULL)) + return 0; + + if (has_term_file && strcmp(cmd, terms->term_bad) && + strcmp(cmd, terms->term_good)) + return error(_("Invalid command: you're currently in a " + "%s/%s bisect"), terms->term_bad, + terms->term_good); + + if (!has_term_file) { + if (one_of(cmd, "bad", "good", NULL)) { + set_terms(terms, "bad", "good"); + return write_terms(terms->term_bad, terms->term_good); + } + if (one_of(cmd, "new", "old", NULL)) { + set_terms(terms, "new", "old"); + return write_terms(terms->term_bad, terms->term_good); + } + } + + return 0; +} + +static int mark_good(const char *refname, const struct object_id *oid, + int flag, void *cb_data) +{ + int *m_good = (int *)cb_data; + *m_good = 0; + return 1; +} + +static const char *need_bad_and_good_revision_warning = + N_("You need to give me at least one %s and %s revision.\n" + "You can use \"git bisect %s\" and \"git bisect %s\" for that."); + +static const char *need_bisect_start_warning = + N_("You need to start by \"git bisect start\".\n" + "You then need to give me at least one %s and %s revision.\n" + "You can use \"git bisect %s\" and \"git bisect %s\" for that."); + +static int bisect_next_check(const struct bisect_terms *terms, + const char *current_term) +{ + int missing_good = 1, missing_bad = 1, retval = 0; + const char *bad_ref = xstrfmt("refs/bisect/%s", terms->term_bad); + const char *good_glob = xstrfmt("%s-*", terms->term_good); + + if (ref_exists(bad_ref)) + missing_bad = 0; + + for_each_glob_ref_in(mark_good, good_glob, "refs/bisect/", + (void *) &missing_good); + + if (!missing_good && !missing_bad) + goto finish; + + if (!current_term) { + retval = -1; + goto finish; + } + + if (missing_good && !missing_bad && + !strcmp(current_term, terms->term_good)) { + char *yesno; + /* + * have bad (or new) but not good (or old). We could bisect + * although this is less optimum. + */ + warning(_("bisecting only with a %s commit"), terms->term_bad); + if (!isatty(0)) + goto finish; + /* + * TRANSLATORS: Make sure to include [Y] and [n] in your + * translation. The program will only accept English input + * at this point. + */ + yesno = git_prompt(_("Are you sure [Y/n]? "), PROMPT_ECHO); + if (starts_with(yesno, "N") || starts_with(yesno, "n")) + retval = -1; + goto finish; + } + if (!is_empty_or_missing_file(git_path_bisect_start())) { + retval = error(_(need_bad_and_good_revision_warning), + vocab_bad, vocab_good, vocab_bad, vocab_good); + } else { + retval = error(_(need_bisect_start_warning), + vocab_good, vocab_bad, vocab_good, vocab_bad); + } + +finish: + free((void *) good_glob); + free((void *) bad_ref); + return retval; +} + +static int get_terms(struct bisect_terms *terms) +{ + struct strbuf str = STRBUF_INIT; + FILE *fp = NULL; + int res = 0; + + fp = fopen(git_path_bisect_terms(), "r"); + if (!fp) { + res = -1; + goto finish; + } + + free_terms(terms); + strbuf_getline_lf(&str, fp); + terms->term_bad = strbuf_detach(&str, NULL); + strbuf_getline_lf(&str, fp); + terms->term_good = strbuf_detach(&str, NULL); + +finish: + if (fp) + fclose(fp); + strbuf_release(&str); + return res; +} + +static int bisect_terms(struct bisect_terms *terms, const char *option) +{ + if (get_terms(terms)) + return error(_("no terms defined")); + + if (option == NULL) { + printf(_("Your current terms are %s for the old state\n" + "and %s for the new state.\n"), + terms->term_good, terms->term_bad); + return 0; + } + if (one_of(option, "--term-good", "--term-old", NULL)) + printf("%s\n", terms->term_good); + else if (one_of(option, "--term-bad", "--term-new", NULL)) + printf("%s\n", terms->term_bad); + else + return error(_("invalid argument %s for 'git bisect terms'.\n" + "Supported options are: " + "--term-good|--term-old and " + "--term-bad|--term-new."), option); + + return 0; +} + +static int bisect_append_log_quoted(const char **argv) +{ + int retval = 0; + FILE *fp = fopen(git_path_bisect_log(), "a"); + struct strbuf orig_args = STRBUF_INIT; + + if (!fp) + return -1; + + if (fprintf(fp, "git bisect start") < 1) { + retval = -1; + goto finish; + } + + sq_quote_argv(&orig_args, argv); + if (fprintf(fp, "%s\n", orig_args.buf) < 1) + retval = -1; + +finish: + fclose(fp); + strbuf_release(&orig_args); + return retval; +} + +static int bisect_start(struct bisect_terms *terms, int no_checkout, + const char **argv, int argc) +{ + int i, has_double_dash = 0, must_write_terms = 0, bad_seen = 0; + int flags, pathspec_pos, retval = 0; + struct string_list revs = STRING_LIST_INIT_DUP; + struct string_list states = STRING_LIST_INIT_DUP; + struct strbuf start_head = STRBUF_INIT; + struct strbuf bisect_names = STRBUF_INIT; + struct object_id head_oid; + struct object_id oid; + const char *head; + + if (is_bare_repository()) + no_checkout = 1; + + /* + * Check for one bad and then some good revisions + */ + for (i = 0; i < argc; i++) { + if (!strcmp(argv[i], "--")) { + has_double_dash = 1; + break; + } + } + + for (i = 0; i < argc; i++) { + const char *arg = argv[i]; + if (!strcmp(argv[i], "--")) { + break; + } else if (!strcmp(arg, "--no-checkout")) { + no_checkout = 1; + } else if (!strcmp(arg, "--term-good") || + !strcmp(arg, "--term-old")) { + must_write_terms = 1; + free((void *) terms->term_good); + terms->term_good = xstrdup(argv[++i]); + } else if (skip_prefix(arg, "--term-good=", &arg) || + skip_prefix(arg, "--term-old=", &arg)) { + must_write_terms = 1; + free((void *) terms->term_good); + terms->term_good = xstrdup(arg); + } else if (!strcmp(arg, "--term-bad") || + !strcmp(arg, "--term-new")) { + must_write_terms = 1; + free((void *) terms->term_bad); + terms->term_bad = xstrdup(argv[++i]); + } else if (skip_prefix(arg, "--term-bad=", &arg) || + skip_prefix(arg, "--term-new=", &arg)) { + must_write_terms = 1; + free((void *) terms->term_bad); + terms->term_bad = xstrdup(arg); + } else if (starts_with(arg, "--") && + !one_of(arg, "--term-good", "--term-bad", NULL)) { + return error(_("unrecognized option: '%s'"), arg); + } else { + char *commit_id = xstrfmt("%s^{commit}", arg); + if (get_oid(commit_id, &oid) && has_double_dash) + die(_("'%s' does not appear to be a valid " + "revision"), arg); + + string_list_append(&revs, oid_to_hex(&oid)); + free(commit_id); + } + } + pathspec_pos = i; + + /* + * The user ran "git bisect start <sha1> <sha1>", hence did not + * explicitly specify the terms, but we are already starting to + * set references named with the default terms, and won't be able + * to change afterwards. + */ + if (revs.nr) + must_write_terms = 1; + for (i = 0; i < revs.nr; i++) { + if (bad_seen) { + string_list_append(&states, terms->term_good); + } else { + bad_seen = 1; + string_list_append(&states, terms->term_bad); + } + } + + /* + * Verify HEAD + */ + head = resolve_ref_unsafe("HEAD", 0, &head_oid, &flags); + if (!head) + if (get_oid("HEAD", &head_oid)) + return error(_("bad HEAD - I need a HEAD")); + + /* + * Check if we are bisecting + */ + if (!is_empty_or_missing_file(git_path_bisect_start())) { + /* Reset to the rev from where we started */ + strbuf_read_file(&start_head, git_path_bisect_start(), 0); + strbuf_trim(&start_head); + if (!no_checkout) { + struct argv_array argv = ARGV_ARRAY_INIT; + + argv_array_pushl(&argv, "checkout", start_head.buf, + "--", NULL); + if (run_command_v_opt(argv.argv, RUN_GIT_CMD)) { + retval = error(_("checking out '%s' failed." + " Try 'git bisect start " + "<valid-branch>'."), + start_head.buf); + goto finish; + } + } + } else { + /* Get the rev from where we start. */ + if (!get_oid(head, &head_oid) && + !starts_with(head, "refs/heads/")) { + strbuf_reset(&start_head); + strbuf_addstr(&start_head, oid_to_hex(&head_oid)); + } else if (!get_oid(head, &head_oid) && + skip_prefix(head, "refs/heads/", &head)) { + /* + * This error message should only be triggered by + * cogito usage, and cogito users should understand + * it relates to cg-seek. + */ + if (!is_empty_or_missing_file(git_path_head_name())) + return error(_("won't bisect on cg-seek'ed tree")); + strbuf_addstr(&start_head, head); + } else { + return error(_("bad HEAD - strange symbolic ref")); + } + } + + /* + * Get rid of any old bisect state. + */ + if (bisect_clean_state()) + return -1; + + /* + * In case of mistaken revs or checkout error, or signals received, + * "bisect_auto_next" below may exit or misbehave. + * We have to trap this to be able to clean up using + * "bisect_clean_state". + */ + + /* + * Write new start state + */ + write_file(git_path_bisect_start(), "%s\n", start_head.buf); + + if (no_checkout) { + get_oid(start_head.buf, &oid); + if (update_ref(NULL, "BISECT_HEAD", &oid, NULL, 0, + UPDATE_REFS_MSG_ON_ERR)) { + retval = -1; + goto finish; + } + } + + if (pathspec_pos < argc - 1) + sq_quote_argv(&bisect_names, argv + pathspec_pos); + write_file(git_path_bisect_names(), "%s\n", bisect_names.buf); + + for (i = 0; i < states.nr; i++) + if (bisect_write(states.items[i].string, + revs.items[i].string, terms, 1)) { + retval = -1; + goto finish; + } + + if (must_write_terms && write_terms(terms->term_bad, + terms->term_good)) { + retval = -1; + goto finish; + } + + retval = bisect_append_log_quoted(argv); + if (retval) + retval = -1; + +finish: + string_list_clear(&revs, 0); + string_list_clear(&states, 0); + strbuf_release(&start_head); + strbuf_release(&bisect_names); + return retval; +} + int cmd_bisect__helper(int argc, const char **argv, const char *prefix) { enum { NEXT_ALL = 1, WRITE_TERMS, BISECT_CLEAN_STATE, - CHECK_EXPECTED_REVS + CHECK_EXPECTED_REVS, + BISECT_RESET, + BISECT_WRITE, + CHECK_AND_SET_TERMS, + BISECT_NEXT_CHECK, + BISECT_TERMS, + BISECT_START } cmdmode = 0; - int no_checkout = 0; + int no_checkout = 0, res = 0, nolog = 0; struct option options[] = { OPT_CMDMODE(0, "next-all", &cmdmode, N_("perform 'git bisect next'"), NEXT_ALL), @@ -124,13 +631,29 @@ int cmd_bisect__helper(int argc, const char **argv, const char *prefix) N_("cleanup the bisection state"), BISECT_CLEAN_STATE), OPT_CMDMODE(0, "check-expected-revs", &cmdmode, N_("check for expected revs"), CHECK_EXPECTED_REVS), + OPT_CMDMODE(0, "bisect-reset", &cmdmode, + N_("reset the bisection state"), BISECT_RESET), + OPT_CMDMODE(0, "bisect-write", &cmdmode, + N_("write out the bisection state in BISECT_LOG"), BISECT_WRITE), + OPT_CMDMODE(0, "check-and-set-terms", &cmdmode, + N_("check and set terms in a bisection state"), CHECK_AND_SET_TERMS), + OPT_CMDMODE(0, "bisect-next-check", &cmdmode, + N_("check whether bad or good terms exist"), BISECT_NEXT_CHECK), + OPT_CMDMODE(0, "bisect-terms", &cmdmode, + N_("print out the bisect terms"), BISECT_TERMS), + OPT_CMDMODE(0, "bisect-start", &cmdmode, + N_("start the bisect session"), BISECT_START), OPT_BOOL(0, "no-checkout", &no_checkout, N_("update BISECT_HEAD instead of checking out the current commit")), + OPT_BOOL(0, "no-log", &nolog, + N_("no log for BISECT_WRITE ")), OPT_END() }; + struct bisect_terms terms = { .term_good = NULL, .term_bad = NULL }; argc = parse_options(argc, argv, prefix, options, - git_bisect_helper_usage, 0); + git_bisect_helper_usage, + PARSE_OPT_KEEP_DASHDASH | PARSE_OPT_KEEP_UNKNOWN); if (!cmdmode) usage_with_options(git_bisect_helper_usage, options); @@ -149,8 +672,40 @@ int cmd_bisect__helper(int argc, const char **argv, const char *prefix) case CHECK_EXPECTED_REVS: check_expected_revs(argv, argc); return 0; + case BISECT_RESET: + if (argc > 1) + return error(_("--bisect-reset requires either no argument or a commit")); + return !!bisect_reset(argc ? argv[0] : NULL); + case BISECT_WRITE: + if (argc != 4 && argc != 5) + return error(_("--bisect-write requires either 4 or 5 arguments")); + set_terms(&terms, argv[3], argv[2]); + res = bisect_write(argv[0], argv[1], &terms, nolog); + break; + case CHECK_AND_SET_TERMS: + if (argc != 3) + return error(_("--check-and-set-terms requires 3 arguments")); + set_terms(&terms, argv[2], argv[1]); + res = check_and_set_terms(&terms, argv[0]); + break; + case BISECT_NEXT_CHECK: + if (argc != 2 && argc != 3) + return error(_("--bisect-next-check requires 2 or 3 arguments")); + set_terms(&terms, argv[1], argv[0]); + res = bisect_next_check(&terms, argc == 3 ? argv[2] : NULL); + break; + case BISECT_TERMS: + if (argc > 1) + return error(_("--bisect-terms requires 0 or 1 argument")); + res = bisect_terms(&terms, argc == 1 ? argv[0] : NULL); + break; + case BISECT_START: + set_terms(&terms, "bad", "good"); + res = bisect_start(&terms, no_checkout, argv, argc); + break; default: return error("BUG: unknown subcommand '%d'", cmdmode); } - return 0; + free_terms(&terms); + return !!res; } diff --git a/builtin/blame.c b/builtin/blame.c index 6d798f9939..581de0d832 100644 --- a/builtin/blame.c +++ b/builtin/blame.c @@ -925,6 +925,10 @@ parse_done: */ blame_date_width = utf8_strwidth(_("4 years, 11 months ago")) + 1; /* add the null */ break; + case DATE_HUMAN: + /* If the year is shown, no time is shown */ + blame_date_width = sizeof("Thu Oct 19 16:00"); + break; case DATE_NORMAL: blame_date_width = sizeof("Thu Oct 19 16:00:04 2006 -0700"); break; @@ -1007,7 +1011,8 @@ parse_done: long bottom, top; if (parse_range_arg(range_list.items[range_i].string, nth_line_cb, &sb, lno, anchor, - &bottom, &top, sb.path, &the_index)) + &bottom, &top, sb.path, + the_repository->index)) usage(blame_usage); if ((!lno && (top || bottom)) || lno < bottom) die(Q_("file %s has only %lu line", diff --git a/builtin/bundle.c b/builtin/bundle.c index 9e9c65d9c6..1ea4bfdfc1 100644 --- a/builtin/bundle.c +++ b/builtin/bundle.c @@ -56,8 +56,7 @@ int cmd_bundle(int argc, const char **argv, const char *prefix) } if (!startup_info->have_repository) die(_("Need a repository to create a bundle.")); - return !!create_bundle(the_repository, &header, - bundle_file, argc, argv); + return !!create_bundle(the_repository, bundle_file, argc, argv); } else if (!strcmp(cmd, "unbundle")) { if (!startup_info->have_repository) die(_("Need a repository to unbundle.")); diff --git a/builtin/cat-file.c b/builtin/cat-file.c index 2ca56fd086..0f092382e1 100644 --- a/builtin/cat-file.c +++ b/builtin/cat-file.c @@ -3,6 +3,7 @@ * * Copyright (C) Linus Torvalds, 2005 */ +#define USE_THE_INDEX_COMPATIBILITY_MACROS #include "cache.h" #include "config.h" #include "builtin.h" @@ -73,7 +74,8 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name, if (unknown_type) flags |= OBJECT_INFO_ALLOW_UNKNOWN_TYPE; - if (get_oid_with_context(obj_name, GET_OID_RECORD_PATH, + if (get_oid_with_context(the_repository, obj_name, + GET_OID_RECORD_PATH, &oid, &obj_context)) die("Not a valid object name %s", obj_name); @@ -209,14 +211,14 @@ struct expand_data { /* * After a mark_query run, this object_info is set up to be - * passed to sha1_object_info_extended. It will point to the data + * passed to oid_object_info_extended. It will point to the data * elements above, so you can retrieve the response from there. */ struct object_info info; /* * This flag will be true if the requested batch format and options - * don't require us to call sha1_object_info, which can then be + * don't require us to call oid_object_info, which can then be * optimized out. */ unsigned skip_object_info : 1; @@ -380,14 +382,18 @@ static void batch_one_object(const char *obj_name, { struct object_context ctx; int flags = opt->follow_symlinks ? GET_OID_FOLLOW_SYMLINKS : 0; - enum follow_symlinks_result result; + enum get_oid_result result; - result = get_oid_with_context(obj_name, flags, &data->oid, &ctx); + result = get_oid_with_context(the_repository, obj_name, + flags, &data->oid, &ctx); if (result != FOUND) { switch (result) { case MISSING_OBJECT: printf("%s missing\n", obj_name); break; + case SHORT_NAME_AMBIGUOUS: + printf("%s ambiguous\n", obj_name); + break; case DANGLING_SYMLINK: printf("dangling %"PRIuMAX"\n%s\n", (uintmax_t)strlen(obj_name), obj_name); @@ -490,7 +496,7 @@ static int batch_objects(struct batch_options *opt) /* * Expand once with our special mark_query flag, which will prime the - * object_info to be handed to sha1_object_info_extended for each + * object_info to be handed to oid_object_info_extended for each * object. */ memset(&data, 0, sizeof(data)); diff --git a/builtin/check-attr.c b/builtin/check-attr.c index 30a2f84274..dd83397786 100644 --- a/builtin/check-attr.c +++ b/builtin/check-attr.c @@ -1,3 +1,4 @@ +#define USE_THE_INDEX_COMPATIBILITY_MACROS #include "builtin.h" #include "cache.h" #include "config.h" diff --git a/builtin/check-ignore.c b/builtin/check-ignore.c index ec9a959e08..599097304b 100644 --- a/builtin/check-ignore.c +++ b/builtin/check-ignore.c @@ -1,3 +1,4 @@ +#define USE_THE_INDEX_COMPATIBILITY_MACROS #include "builtin.h" #include "cache.h" #include "config.h" diff --git a/builtin/checkout-index.c b/builtin/checkout-index.c index a2a726ad8d..1ac1cc290e 100644 --- a/builtin/checkout-index.c +++ b/builtin/checkout-index.c @@ -4,6 +4,7 @@ * Copyright (C) 2005 Linus Torvalds * */ +#define USE_THE_INDEX_COMPATIBILITY_MACROS #include "builtin.h" #include "config.h" #include "lockfile.h" diff --git a/builtin/checkout.c b/builtin/checkout.c index 6fadf412e8..ece4eb14bf 100644 --- a/builtin/checkout.c +++ b/builtin/checkout.c @@ -1,3 +1,4 @@ +#define USE_THE_INDEX_COMPATIBILITY_MACROS #include "builtin.h" #include "config.h" #include "checkout.h" @@ -288,7 +289,7 @@ static int checkout_paths(const struct checkout_opts *opts, return run_add_interactive(revision, "--patch=checkout", &opts->pathspec); - hold_locked_index(&lock_file, LOCK_DIE_ON_ERROR); + repo_hold_locked_index(the_repository, &lock_file, LOCK_DIE_ON_ERROR); if (read_cache_preload(&opts->pathspec) < 0) return error(_("index file corrupt")); @@ -592,6 +593,14 @@ static int skip_merge_working_tree(const struct checkout_opts *opts, * Remaining variables are not checkout options but used to track state */ + /* + * Do the merge if this is the initial checkout. We cannot use + * is_cache_unborn() here because the index hasn't been loaded yet + * so cache_nr and timestamp.sec are always zero. + */ + if (!file_exists(get_index_file())) + return 0; + return 1; } @@ -693,7 +702,7 @@ static int merge_working_tree(const struct checkout_opts *opts, * a pain; plumb in an option to set * o.renormalize? */ - init_merge_options(&o); + init_merge_options(&o, the_repository); o.verbosity = 0; work = write_tree_from_memory(&o); diff --git a/builtin/clean.c b/builtin/clean.c index bbcdeb2d9e..aaba4af3c2 100644 --- a/builtin/clean.c +++ b/builtin/clean.c @@ -6,6 +6,7 @@ * Based on git-clean.sh by Pavel Roskin */ +#define USE_THE_INDEX_COMPATIBILITY_MACROS #include "builtin.h" #include "cache.h" #include "config.h" diff --git a/builtin/clone.c b/builtin/clone.c index 7c7f98c72c..50bde99618 100644 --- a/builtin/clone.c +++ b/builtin/clone.c @@ -8,6 +8,7 @@ * Clone a repository into a different directory that does not yet exist. */ +#define USE_THE_INDEX_COMPATIBILITY_MACROS #include "builtin.h" #include "config.h" #include "lockfile.h" @@ -1136,9 +1137,13 @@ int cmd_clone(int argc, const char **argv, const char *prefix) option_upload_pack); if (filter_options.choice) { + struct strbuf expanded_filter_spec = STRBUF_INIT; + expand_list_objects_filter_spec(&filter_options, + &expanded_filter_spec); transport_set_option(transport, TRANS_OPT_LIST_OBJECTS_FILTER, - filter_options.filter_spec); + expanded_filter_spec.buf); transport_set_option(transport, TRANS_OPT_FROM_PROMISOR, "1"); + strbuf_release(&expanded_filter_spec); } if (transport->smart_options && !deepen && !filter_options.choice) diff --git a/builtin/commit-graph.c b/builtin/commit-graph.c index c02a3f1221..4ae502754c 100644 --- a/builtin/commit-graph.c +++ b/builtin/commit-graph.c @@ -110,8 +110,8 @@ static int graph_read(int argc, const char **argv) printf(" oid_lookup"); if (graph->chunk_commit_data) printf(" commit_metadata"); - if (graph->chunk_large_edges) - printf(" large_edges"); + if (graph->chunk_extra_edges) + printf(" extra_edges"); printf("\n"); UNLEAK(graph); diff --git a/builtin/commit-tree.c b/builtin/commit-tree.c index 9ec36a82b6..12cc403bd7 100644 --- a/builtin/commit-tree.c +++ b/builtin/commit-tree.c @@ -66,7 +66,13 @@ int cmd_commit_tree(int argc, const char **argv, const char *prefix) continue; } - if (skip_prefix(arg, "-S", &sign_commit)) + if (!strcmp(arg, "--gpg-sign")) { + sign_commit = ""; + continue; + } + + if (skip_prefix(arg, "-S", &sign_commit) || + skip_prefix(arg, "--gpg-sign=", &sign_commit)) continue; if (!strcmp(arg, "--no-gpg-sign")) { diff --git a/builtin/commit.c b/builtin/commit.c index 7d2e0b61e5..2986553d5f 100644 --- a/builtin/commit.c +++ b/builtin/commit.c @@ -5,6 +5,7 @@ * Based on git-commit.sh by Junio C Hamano and Linus Torvalds */ +#define USE_THE_INDEX_COMPATIBILITY_MACROS #include "cache.h" #include "config.h" #include "lockfile.h" @@ -1368,7 +1369,7 @@ int cmd_status(int argc, const char **argv, const char *prefix) if (status_format != STATUS_FORMAT_PORCELAIN && status_format != STATUS_FORMAT_PORCELAIN_V2) progress_flag = REFRESH_PROGRESS; - read_index(&the_index); + repo_read_index(the_repository); refresh_index(&the_index, REFRESH_QUIET|REFRESH_UNMERGED|progress_flag, &s.pathspec, NULL, NULL); @@ -1397,7 +1398,7 @@ int cmd_status(int argc, const char **argv, const char *prefix) wt_status_collect(&s); if (0 <= fd) - update_index_if_able(&the_index, &index_lock); + repo_update_index_if_able(the_repository, &index_lock); if (s.relative_paths) s.prefix = prefix; @@ -1675,7 +1676,7 @@ int cmd_commit(int argc, const char **argv, const char *prefix) run_command_v_opt(argv_gc_auto, RUN_GIT_CMD); run_commit_hook(use_editor, get_index_file(), "post-commit", NULL); if (amend && !no_post_rewrite) { - commit_post_rewrite(current_head, &oid); + commit_post_rewrite(the_repository, current_head, &oid); } if (!quiet) { unsigned int flags = 0; diff --git a/builtin/describe.c b/builtin/describe.c index cc118448ee..1409cedce2 100644 --- a/builtin/describe.c +++ b/builtin/describe.c @@ -1,3 +1,4 @@ +#define USE_THE_INDEX_COMPATIBILITY_MACROS #include "cache.h" #include "config.h" #include "lockfile.h" @@ -629,12 +630,13 @@ int cmd_describe(int argc, const char **argv, const char *prefix) struct argv_array args = ARGV_ARRAY_INIT; int fd, result; + setup_work_tree(); read_cache(); refresh_index(&the_index, REFRESH_QUIET|REFRESH_UNMERGED, NULL, NULL, NULL); fd = hold_locked_index(&index_lock, 0); if (0 <= fd) - update_index_if_able(&the_index, &index_lock); + repo_update_index_if_able(the_repository, &index_lock); repo_init_revisions(the_repository, &revs, prefix); argv_array_pushv(&args, diff_index_args); diff --git a/builtin/diff-files.c b/builtin/diff-files.c index 48cfcb935d..86ae474fbf 100644 --- a/builtin/diff-files.c +++ b/builtin/diff-files.c @@ -3,6 +3,7 @@ * * Copyright (C) Linus Torvalds, 2005 */ +#define USE_THE_INDEX_COMPATIBILITY_MACROS #include "cache.h" #include "config.h" #include "diff.h" diff --git a/builtin/diff-index.c b/builtin/diff-index.c index fcccd1f10d..93ec642423 100644 --- a/builtin/diff-index.c +++ b/builtin/diff-index.c @@ -1,3 +1,4 @@ +#define USE_THE_INDEX_COMPATIBILITY_MACROS #include "cache.h" #include "config.h" #include "diff.h" diff --git a/builtin/diff-tree.c b/builtin/diff-tree.c index ef996126d7..a90681bcba 100644 --- a/builtin/diff-tree.c +++ b/builtin/diff-tree.c @@ -1,3 +1,4 @@ +#define USE_THE_INDEX_COMPATIBILITY_MACROS #include "cache.h" #include "config.h" #include "diff.h" @@ -165,7 +166,7 @@ int cmd_diff_tree(int argc, const char **argv, const char *prefix) if (opt->diffopt.detect_rename) { if (!the_index.cache) - read_index(&the_index); + repo_read_index(the_repository); opt->diffopt.setup |= DIFF_SETUP_USE_SIZE_CACHE; } while (fgets(line, sizeof(line), stdin)) { diff --git a/builtin/diff.c b/builtin/diff.c index f0393bba23..9f6109224b 100644 --- a/builtin/diff.c +++ b/builtin/diff.c @@ -3,6 +3,7 @@ * * Copyright (c) 2006 Junio C Hamano */ +#define USE_THE_INDEX_COMPATIBILITY_MACROS #include "cache.h" #include "config.h" #include "lockfile.h" @@ -102,7 +103,7 @@ static int builtin_diff_blobs(struct rev_info *revs, int argc, const char **argv, struct object_array_entry **blob) { - unsigned mode = canon_mode(S_IFREG | 0644); + const unsigned mode = canon_mode(S_IFREG | 0644); if (argc > 1) usage(builtin_diff_usage); @@ -212,7 +213,7 @@ static void refresh_index_quietly(void) discard_cache(); read_cache(); refresh_cache(REFRESH_QUIET|REFRESH_UNMERGED); - update_index_if_able(&the_index, &lock_file); + repo_update_index_if_able(the_repository, &lock_file); } static int builtin_diff_files(struct rev_info *revs, int argc, const char **argv) diff --git a/builtin/difftool.c b/builtin/difftool.c index 71318c26e1..a3ea60ea71 100644 --- a/builtin/difftool.c +++ b/builtin/difftool.c @@ -11,6 +11,7 @@ * * Copyright (C) 2016 Johannes Schindelin */ +#define USE_THE_INDEX_COMPATIBILITY_MACROS #include "cache.h" #include "config.h" #include "builtin.h" diff --git a/builtin/fetch-pack.c b/builtin/fetch-pack.c index f6a513495e..153a2bd282 100644 --- a/builtin/fetch-pack.c +++ b/builtin/fetch-pack.c @@ -218,7 +218,8 @@ int cmd_fetch_pack(int argc, const char **argv, const char *prefix) packet_reader_init(&reader, fd[0], NULL, 0, PACKET_READ_CHOMP_NEWLINE | - PACKET_READ_GENTLE_ON_EOF); + PACKET_READ_GENTLE_ON_EOF | + PACKET_READ_DIE_ON_ERR_PACKET); version = discover_version(&reader); switch (version) { diff --git a/builtin/fetch.c b/builtin/fetch.c index c316c03ba2..5a09fe24cd 100644 --- a/builtin/fetch.c +++ b/builtin/fetch.c @@ -317,8 +317,7 @@ static void find_non_local_tags(const struct ref *refs, !has_object_file_with_flags(&ref->old_oid, OBJECT_INFO_QUICK) && !will_fetch(head, ref->old_oid.hash) && - !has_sha1_file_with_flags(item->oid.hash, - OBJECT_INFO_QUICK) && + !has_object_file_with_flags(&item->oid, OBJECT_INFO_QUICK) && !will_fetch(head, item->oid.hash)) oidclr(&item->oid); item = NULL; @@ -332,7 +331,7 @@ static void find_non_local_tags(const struct ref *refs, * fetch. */ if (item && - !has_sha1_file_with_flags(item->oid.hash, OBJECT_INFO_QUICK) && + !has_object_file_with_flags(&item->oid, OBJECT_INFO_QUICK) && !will_fetch(head, item->oid.hash)) oidclr(&item->oid); @@ -353,7 +352,7 @@ static void find_non_local_tags(const struct ref *refs, * checked to see if it needs fetching. */ if (item && - !has_sha1_file_with_flags(item->oid.hash, OBJECT_INFO_QUICK) && + !has_object_file_with_flags(&item->oid, OBJECT_INFO_QUICK) && !will_fetch(head, item->oid.hash)) oidclr(&item->oid); @@ -629,9 +628,14 @@ static int find_and_replace(struct strbuf *haystack, const char *needle, const char *placeholder) { - const char *p = strstr(haystack->buf, needle); + const char *p = NULL; int plen, nlen; + nlen = strlen(needle); + if (ends_with(haystack->buf, needle)) + p = haystack->buf + haystack->len - nlen; + else + p = strstr(haystack->buf, needle); if (!p) return 0; @@ -639,7 +643,6 @@ static int find_and_replace(struct strbuf *haystack, return 0; plen = strlen(p); - nlen = strlen(needle); if (plen > nlen && p[nlen] != '/') return 0; @@ -1165,6 +1168,7 @@ static void add_negotiation_tips(struct git_transport_options *smart_options) static struct transport *prepare_transport(struct remote *remote, int deepen) { struct transport *transport; + transport = transport_get(remote, NULL); transport_set_verbosity(transport, verbosity, progress); transport->family = family; @@ -1184,9 +1188,13 @@ static struct transport *prepare_transport(struct remote *remote, int deepen) if (update_shallow) set_option(transport, TRANS_OPT_UPDATE_SHALLOW, "yes"); if (filter_options.choice) { + struct strbuf expanded_filter_spec = STRBUF_INIT; + expand_list_objects_filter_spec(&filter_options, + &expanded_filter_spec); set_option(transport, TRANS_OPT_LIST_OBJECTS_FILTER, - filter_options.filter_spec); + expanded_filter_spec.buf); set_option(transport, TRANS_OPT_FROM_PROMISOR, "1"); + strbuf_release(&expanded_filter_spec); } if (negotiation_tip.nr) { if (transport->smart_options) diff --git a/builtin/fsck.c b/builtin/fsck.c index bf5ddff43f..bb4227bebc 100644 --- a/builtin/fsck.c +++ b/builtin/fsck.c @@ -1,3 +1,4 @@ +#define USE_THE_INDEX_COMPATIBILITY_MACROS #include "builtin.h" #include "cache.h" #include "repository.h" @@ -401,7 +402,8 @@ out: if (obj->type == OBJ_TREE) free_tree_buffer((struct tree *)obj); if (obj->type == OBJ_COMMIT) - free_commit_buffer((struct commit *)obj); + free_commit_buffer(the_repository->parsed_objects, + (struct commit *)obj); return err; } diff --git a/builtin/grep.c b/builtin/grep.c index dd52ea968b..580fd38f41 100644 --- a/builtin/grep.c +++ b/builtin/grep.c @@ -3,6 +3,7 @@ * * Copyright (c) 2006 Junio C Hamano */ +#define USE_THE_INDEX_COMPATIBILITY_MACROS #include "cache.h" #include "repository.h" #include "config.h" @@ -393,21 +394,22 @@ static void run_pager(struct grep_opt *opt, const char *prefix) exit(status); } -static int grep_cache(struct grep_opt *opt, struct repository *repo, +static int grep_cache(struct grep_opt *opt, const struct pathspec *pathspec, int cached); static int grep_tree(struct grep_opt *opt, const struct pathspec *pathspec, struct tree_desc *tree, struct strbuf *base, int tn_len, - int check_attr, struct repository *repo); + int check_attr); -static int grep_submodule(struct grep_opt *opt, struct repository *superproject, +static int grep_submodule(struct grep_opt *opt, const struct pathspec *pathspec, const struct object_id *oid, const char *filename, const char *path) { struct repository subrepo; + struct repository *superproject = opt->repo; const struct submodule *sub = submodule_from_path(superproject, &null_oid, path); - + struct grep_opt subopt; int hit; /* @@ -443,6 +445,9 @@ static int grep_submodule(struct grep_opt *opt, struct repository *superproject, add_to_alternates_memory(subrepo.objects->odb->path); grep_read_unlock(); + memcpy(&subopt, opt, sizeof(subopt)); + subopt.repo = &subrepo; + if (oid) { struct object *object; struct tree_desc tree; @@ -464,21 +469,22 @@ static int grep_submodule(struct grep_opt *opt, struct repository *superproject, strbuf_addch(&base, '/'); init_tree_desc(&tree, data, size); - hit = grep_tree(opt, pathspec, &tree, &base, base.len, - object->type == OBJ_COMMIT, &subrepo); + hit = grep_tree(&subopt, pathspec, &tree, &base, base.len, + object->type == OBJ_COMMIT); strbuf_release(&base); free(data); } else { - hit = grep_cache(opt, &subrepo, pathspec, 1); + hit = grep_cache(&subopt, pathspec, 1); } repo_clear(&subrepo); return hit; } -static int grep_cache(struct grep_opt *opt, struct repository *repo, +static int grep_cache(struct grep_opt *opt, const struct pathspec *pathspec, int cached) { + struct repository *repo = opt->repo; int hit = 0; int nr; struct strbuf name = STRBUF_INIT; @@ -516,7 +522,7 @@ static int grep_cache(struct grep_opt *opt, struct repository *repo, } } else if (recurse_submodules && S_ISGITLINK(ce->ce_mode) && submodule_path_match(repo->index, pathspec, name.buf, NULL)) { - hit |= grep_submodule(opt, repo, pathspec, NULL, ce->name, ce->name); + hit |= grep_submodule(opt, pathspec, NULL, ce->name, ce->name); } else { continue; } @@ -538,8 +544,9 @@ static int grep_cache(struct grep_opt *opt, struct repository *repo, static int grep_tree(struct grep_opt *opt, const struct pathspec *pathspec, struct tree_desc *tree, struct strbuf *base, int tn_len, - int check_attr, struct repository *repo) + int check_attr) { + struct repository *repo = opt->repo; int hit = 0; enum interesting match = entry_not_interesting; struct name_entry entry; @@ -586,10 +593,10 @@ static int grep_tree(struct grep_opt *opt, const struct pathspec *pathspec, strbuf_addch(base, '/'); init_tree_desc(&sub, data, size); hit |= grep_tree(opt, pathspec, &sub, base, tn_len, - check_attr, repo); + check_attr); free(data); } else if (recurse_submodules && S_ISGITLINK(entry.mode)) { - hit |= grep_submodule(opt, repo, pathspec, &entry.oid, + hit |= grep_submodule(opt, pathspec, &entry.oid, base->buf, base->buf + tn_len); } @@ -631,7 +638,7 @@ static int grep_object(struct grep_opt *opt, const struct pathspec *pathspec, } init_tree_desc(&tree, data, size); hit = grep_tree(opt, pathspec, &tree, &base, base.len, - obj->type == OBJ_COMMIT, the_repository); + obj->type == OBJ_COMMIT); strbuf_release(&base); free(data); return hit; @@ -648,12 +655,12 @@ static int grep_objects(struct grep_opt *opt, const struct pathspec *pathspec, for (i = 0; i < nr; i++) { struct object *real_obj; - real_obj = deref_tag(the_repository, list->objects[i].item, + real_obj = deref_tag(opt->repo, list->objects[i].item, NULL, 0); /* load the gitmodules file for this rev */ if (recurse_submodules) { - submodule_free(the_repository); + submodule_free(opt->repo); gitmodules_config_oid(&real_obj->oid); } if (grep_object(opt, pathspec, real_obj, list->objects[i].name, @@ -678,9 +685,9 @@ static int grep_directory(struct grep_opt *opt, const struct pathspec *pathspec, if (exc_std) setup_standard_excludes(&dir); - fill_directory(&dir, &the_index, pathspec); + fill_directory(&dir, opt->repo->index, pathspec); for (i = 0; i < dir.nr; i++) { - if (!dir_path_match(&the_index, dir.entries[i], pathspec, 0, NULL)) + if (!dir_path_match(opt->repo->index, dir.entries[i], pathspec, 0, NULL)) continue; hit |= grep_file(opt, dir.entries[i]->name); if (hit && opt->status_only) @@ -1018,7 +1025,8 @@ int cmd_grep(int argc, const char **argv, const char *prefix) break; } - if (get_oid_with_context(arg, GET_OID_RECORD_PATH, + if (get_oid_with_context(the_repository, arg, + GET_OID_RECORD_PATH, &oid, &oc)) { if (seen_dashdash) die(_("unable to resolve revision: %s"), arg); @@ -1121,7 +1129,7 @@ int cmd_grep(int argc, const char **argv, const char *prefix) if (!cached) setup_work_tree(); - hit = grep_cache(&opt, the_repository, &pathspec, cached); + hit = grep_cache(&opt, &pathspec, cached); } else { if (cached) die(_("both --cached and trees are given")); diff --git a/builtin/hash-object.c b/builtin/hash-object.c index d6f06ea32f..e055c11103 100644 --- a/builtin/hash-object.c +++ b/builtin/hash-object.c @@ -40,7 +40,8 @@ static void hash_fd(int fd, const char *type, const char *path, unsigned flags, if (fstat(fd, &st) < 0 || (literally ? hash_literally(&oid, fd, type, flags) - : index_fd(&the_index, &oid, fd, &st, type_from_string(type), path, flags))) + : index_fd(the_repository->index, &oid, fd, &st, + type_from_string(type), path, flags))) die((flags & HASH_WRITE_OBJECT) ? "Unable to add %s to database" : "Unable to hash %s", path); diff --git a/builtin/index-pack.c b/builtin/index-pack.c index ac1f4ea9a7..31046c7a0a 100644 --- a/builtin/index-pack.c +++ b/builtin/index-pack.c @@ -772,7 +772,7 @@ static void sha1_object(const void *data, struct object_entry *obj_entry, if (startup_info->have_repository) { read_lock(); collision_test_needed = - has_sha1_file_with_flags(oid->hash, OBJECT_INFO_QUICK); + has_object_file_with_flags(oid, OBJECT_INFO_QUICK); read_unlock(); } diff --git a/builtin/log.c b/builtin/log.c index a479642eb9..57869267d8 100644 --- a/builtin/log.c +++ b/builtin/log.c @@ -4,6 +4,7 @@ * (C) Copyright 2006 Linus Torvalds * 2006 Junio Hamano */ +#define USE_THE_INDEX_COMPATIBILITY_MACROS #include "cache.h" #include "config.h" #include "refs.h" @@ -397,7 +398,8 @@ static int cmd_log_walk(struct rev_info *rev) * We may show a given commit multiple times when * walking the reflogs. */ - free_commit_buffer(commit); + free_commit_buffer(the_repository->parsed_objects, + commit); free_commit_list(commit->parents); commit->parents = NULL; } @@ -508,7 +510,8 @@ static int show_blob_object(const struct object_id *oid, struct rev_info *rev, c !rev->diffopt.flags.allow_textconv) return stream_blob_to_fd(1, oid, NULL, 0); - if (get_oid_with_context(obj_name, GET_OID_RECORD_PATH, + if (get_oid_with_context(the_repository, obj_name, + GET_OID_RECORD_PATH, &oidc, &obj_context)) die(_("Not a valid object name %s"), obj_name); if (!obj_context.path || @@ -1940,7 +1943,8 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix) open_next_file(rev.numbered_files ? NULL : commit, NULL, &rev, quiet)) die(_("Failed to create output files")); shown = log_tree_commit(&rev, commit); - free_commit_buffer(commit); + free_commit_buffer(the_repository->parsed_objects, + commit); /* We put one extra blank line between formatted * patches and this flag is used by log-tree code diff --git a/builtin/ls-files.c b/builtin/ls-files.c index cde87cbeeb..29a8762d46 100644 --- a/builtin/ls-files.c +++ b/builtin/ls-files.c @@ -5,7 +5,6 @@ * * Copyright (C) Linus Torvalds, 2005 */ -#define NO_THE_INDEX_COMPATIBILITY_MACROS #include "cache.h" #include "repository.h" #include "config.h" diff --git a/builtin/merge-index.c b/builtin/merge-index.c index c99443b095..38ea6ad6ca 100644 --- a/builtin/merge-index.c +++ b/builtin/merge-index.c @@ -1,3 +1,4 @@ +#define USE_THE_INDEX_COMPATIBILITY_MACROS #include "builtin.h" #include "run-command.h" diff --git a/builtin/merge-ours.c b/builtin/merge-ours.c index 0b07263415..4594507420 100644 --- a/builtin/merge-ours.c +++ b/builtin/merge-ours.c @@ -7,6 +7,7 @@ * * Pretend we resolved the heads, but declare our tree trumps everybody else. */ +#define USE_THE_INDEX_COMPATIBILITY_MACROS #include "git-compat-util.h" #include "builtin.h" #include "diff.h" diff --git a/builtin/merge-recursive.c b/builtin/merge-recursive.c index 7545136c2a..5b910e351e 100644 --- a/builtin/merge-recursive.c +++ b/builtin/merge-recursive.c @@ -29,7 +29,7 @@ int cmd_merge_recursive(int argc, const char **argv, const char *prefix) char *better1, *better2; struct commit *result; - init_merge_options(&o); + init_merge_options(&o, the_repository); if (argv[0] && ends_with(argv[0], "-subtree")) o.subtree_shift = ""; diff --git a/builtin/merge-tree.c b/builtin/merge-tree.c index 5541ad091e..34ca0258b1 100644 --- a/builtin/merge-tree.c +++ b/builtin/merge-tree.c @@ -1,3 +1,4 @@ +#define USE_THE_INDEX_COMPATIBILITY_MACROS #include "builtin.h" #include "tree-walk.h" #include "xdiff-interface.h" @@ -76,7 +77,8 @@ static void *result(struct merge_list *entry, unsigned long *size) their = NULL; if (entry) their = entry->blob; - return merge_blobs(&the_index, path, base, our, their, size); + return merge_blobs(the_repository->index, path, + base, our, their, size); } static void *origin(struct merge_list *entry, unsigned long *size) diff --git a/builtin/merge.c b/builtin/merge.c index dc0b7cc521..e47d77baee 100644 --- a/builtin/merge.c +++ b/builtin/merge.c @@ -6,6 +6,7 @@ * Based on git-merge.sh by Junio C Hamano. */ +#define USE_THE_INDEX_COMPATIBILITY_MACROS #include "cache.h" #include "config.h" #include "parse-options.h" @@ -702,7 +703,7 @@ static int try_merge_strategy(const char *strategy, struct commit_list *common, return 2; } - init_merge_options(&o); + init_merge_options(&o, the_repository); if (!strcmp(strategy, "subtree")) o.subtree_shift = ""; diff --git a/builtin/mv.c b/builtin/mv.c index 80bb967a63..be15ba7044 100644 --- a/builtin/mv.c +++ b/builtin/mv.c @@ -3,6 +3,7 @@ * * Copyright (C) 2006 Johannes Schindelin */ +#define USE_THE_INDEX_COMPATIBILITY_MACROS #include "builtin.h" #include "config.h" #include "pathspec.h" diff --git a/builtin/notes.c b/builtin/notes.c index 4996a670f7..02e97f55c5 100644 --- a/builtin/notes.c +++ b/builtin/notes.c @@ -330,10 +330,10 @@ static int notes_copy_from_stdin(int force, const char *rewrite_cmd) } if (!rewrite_cmd) { - commit_notes(t, msg); + commit_notes(the_repository, t, msg); free_notes(t); } else { - finish_copy_notes_for_rewrite(c, msg); + finish_copy_notes_for_rewrite(the_repository, c, msg); } strbuf_release(&buf); return ret; @@ -469,12 +469,14 @@ static int add(int argc, const char **argv, const char *prefix) write_note_data(&d, &new_note); if (add_note(t, &object, &new_note, combine_notes_overwrite)) BUG("combine_notes_overwrite failed"); - commit_notes(t, "Notes added by 'git notes add'"); + commit_notes(the_repository, t, + "Notes added by 'git notes add'"); } else { fprintf(stderr, _("Removing note for object %s\n"), oid_to_hex(&object)); remove_note(t, object.hash); - commit_notes(t, "Notes removed by 'git notes add'"); + commit_notes(the_repository, t, + "Notes removed by 'git notes add'"); } free_note_data(&d); @@ -552,7 +554,8 @@ static int copy(int argc, const char **argv, const char *prefix) if (add_note(t, &object, from_note, combine_notes_overwrite)) BUG("combine_notes_overwrite failed"); - commit_notes(t, "Notes added by 'git notes copy'"); + commit_notes(the_repository, t, + "Notes added by 'git notes copy'"); out: free_notes(t); return retval; @@ -636,7 +639,7 @@ static int append_edit(int argc, const char **argv, const char *prefix) remove_note(t, object.hash); logmsg = xstrfmt("Notes removed by 'git notes %s'", argv[0]); } - commit_notes(t, logmsg); + commit_notes(the_repository, t, logmsg); free(logmsg); free_note_data(&d); @@ -937,7 +940,8 @@ static int remove_cmd(int argc, const char **argv, const char *prefix) strbuf_release(&sb); } if (!retval) - commit_notes(t, "Notes removed by 'git notes remove'"); + commit_notes(the_repository, t, + "Notes removed by 'git notes remove'"); free_notes(t); return retval; } @@ -965,7 +969,8 @@ static int prune(int argc, const char **argv, const char *prefix) prune_notes(t, (verbose ? NOTES_PRUNE_VERBOSE : 0) | (show_only ? NOTES_PRUNE_VERBOSE|NOTES_PRUNE_DRYRUN : 0) ); if (!show_only) - commit_notes(t, "Notes removed by 'git notes prune'"); + commit_notes(the_repository, t, + "Notes removed by 'git notes prune'"); free_notes(t); return 0; } diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c index 0a70d04604..a9fac7c128 100644 --- a/builtin/pack-objects.c +++ b/builtin/pack-objects.c @@ -84,6 +84,7 @@ static unsigned long pack_size_limit; static int depth = 50; static int delta_search_threads; static int pack_to_stdout; +static int sparse; static int thin; static int num_preferred_base; static struct progress *progress_state; @@ -970,7 +971,7 @@ static int no_try_delta(const char *path) if (!check) check = attr_check_initl("delta", NULL); - git_check_attr(&the_index, path, check); + git_check_attr(the_repository->index, path, check); if (ATTR_FALSE(check->items[0].value)) return 1; return 0; @@ -1642,7 +1643,7 @@ static void check_object(struct object_entry *entry) /* * No choice but to fall back to the recursive delta walk - * with sha1_object_info() to find about the object type + * with oid_object_info() to find about the object type * at this point... */ give_up: @@ -1718,7 +1719,7 @@ static void drop_reused_delta(struct object_entry *entry) if (packed_object_info(the_repository, IN_PACK(entry), entry->in_pack_offset, &oi) < 0) { /* * We failed to get the info from this pack for some reason; - * fall back to sha1_object_info, which may find another copy. + * fall back to oid_object_info, which may find another copy. * And if that fails, the error will be recorded in oe_type(entry) * and dealt with in prepare_pack(). */ @@ -1901,10 +1902,10 @@ static int type_size_sort(const void *_a, const void *_b) { const struct object_entry *a = *(struct object_entry **)_a; const struct object_entry *b = *(struct object_entry **)_b; - enum object_type a_type = oe_type(a); - enum object_type b_type = oe_type(b); - unsigned long a_size = SIZE(a); - unsigned long b_size = SIZE(b); + const enum object_type a_type = oe_type(a); + const enum object_type b_type = oe_type(b); + const unsigned long a_size = SIZE(a); + const unsigned long b_size = SIZE(b); if (a_type > b_type) return -1; @@ -1919,7 +1920,7 @@ static int type_size_sort(const void *_a, const void *_b) if (a->preferred_base < b->preferred_base) return 1; if (use_delta_islands) { - int island_cmp = island_delta_cmp(&a->idx.oid, &b->idx.oid); + const int island_cmp = island_delta_cmp(&a->idx.oid, &b->idx.oid); if (island_cmp) return island_cmp; } @@ -1953,11 +1954,6 @@ static int delta_cacheable(unsigned long src_size, unsigned long trg_size, return 0; } -/* Protect access to object database */ -static pthread_mutex_t read_mutex; -#define read_lock() pthread_mutex_lock(&read_mutex) -#define read_unlock() pthread_mutex_unlock(&read_mutex) - /* Protect delta_cache_size */ static pthread_mutex_t cache_mutex; #define cache_lock() pthread_mutex_lock(&cache_mutex) @@ -1993,11 +1989,11 @@ unsigned long oe_get_size_slow(struct packing_data *pack, unsigned long used, avail, size; if (e->type_ != OBJ_OFS_DELTA && e->type_ != OBJ_REF_DELTA) { - read_lock(); + packing_data_lock(&to_pack); if (oid_object_info(the_repository, &e->idx.oid, &size) < 0) die(_("unable to get size of %s"), oid_to_hex(&e->idx.oid)); - read_unlock(); + packing_data_unlock(&to_pack); return size; } @@ -2005,7 +2001,7 @@ unsigned long oe_get_size_slow(struct packing_data *pack, if (!p) BUG("when e->type is a delta, it must belong to a pack"); - read_lock(); + packing_data_lock(&to_pack); w_curs = NULL; buf = use_pack(p, &w_curs, e->in_pack_offset, &avail); used = unpack_object_header_buffer(buf, avail, &type, &size); @@ -2014,7 +2010,7 @@ unsigned long oe_get_size_slow(struct packing_data *pack, oid_to_hex(&e->idx.oid)); unuse_pack(&w_curs); - read_unlock(); + packing_data_unlock(&to_pack); return size; } @@ -2076,9 +2072,9 @@ static int try_delta(struct unpacked *trg, struct unpacked *src, /* Load data if not already done */ if (!trg->data) { - read_lock(); + packing_data_lock(&to_pack); trg->data = read_object_file(&trg_entry->idx.oid, &type, &sz); - read_unlock(); + packing_data_unlock(&to_pack); if (!trg->data) die(_("object %s cannot be read"), oid_to_hex(&trg_entry->idx.oid)); @@ -2089,9 +2085,9 @@ static int try_delta(struct unpacked *trg, struct unpacked *src, *mem_usage += sz; } if (!src->data) { - read_lock(); + packing_data_lock(&to_pack); src->data = read_object_file(&src_entry->idx.oid, &type, &sz); - read_unlock(); + packing_data_unlock(&to_pack); if (!src->data) { if (src_entry->preferred_base) { static int warned = 0; @@ -2171,7 +2167,7 @@ static unsigned int check_delta_limit(struct object_entry *me, unsigned int n) struct object_entry *child = DELTA_CHILD(me); unsigned int m = n; while (child) { - unsigned int c = check_delta_limit(child, n + 1); + const unsigned int c = check_delta_limit(child, n + 1); if (m < c) m = c; child = DELTA_SIBLING(child); @@ -2226,7 +2222,7 @@ static void find_deltas(struct object_entry **list, unsigned *list_size, while (window_memory_limit && mem_usage > window_memory_limit && count > 1) { - uint32_t tail = (idx + window - count) % window; + const uint32_t tail = (idx + window - count) % window; mem_usage -= free_unpacked(array + tail); count--; } @@ -2337,9 +2333,9 @@ static void find_deltas(struct object_entry **list, unsigned *list_size, static void try_to_free_from_threads(size_t size) { - read_lock(); + packing_data_lock(&to_pack); release_pack_memory(size); - read_unlock(); + packing_data_unlock(&to_pack); } static try_to_free_t old_try_to_free_routine; @@ -2381,7 +2377,6 @@ static pthread_cond_t progress_cond; */ static void init_threaded_search(void) { - init_recursive_mutex(&read_mutex); pthread_mutex_init(&cache_mutex, NULL); pthread_mutex_init(&progress_mutex, NULL); pthread_cond_init(&progress_cond, NULL); @@ -2392,7 +2387,6 @@ static void cleanup_threaded_search(void) { set_try_to_free_routine(old_try_to_free_routine); pthread_cond_destroy(&progress_cond); - pthread_mutex_destroy(&read_mutex); pthread_mutex_destroy(&cache_mutex); pthread_mutex_destroy(&progress_mutex); } @@ -2710,6 +2704,10 @@ static int git_pack_config(const char *k, const char *v, void *cb) use_bitmap_index_default = git_config_bool(k, v); return 0; } + if (!strcmp(k, "pack.usesparse")) { + sparse = git_config_bool(k, v); + return 0; + } if (!strcmp(k, "pack.threads")) { delta_search_threads = git_config_int(k, v); if (delta_search_threads < 0) @@ -3137,7 +3135,7 @@ static void get_object_list(int ac, const char **av) if (prepare_revision_walk(&revs)) die(_("revision walk setup failed")); - mark_edges_uninteresting(&revs, show_edge); + mark_edges_uninteresting(&revs, show_edge, sparse); if (!fn_show_object) fn_show_object = show_object; @@ -3294,6 +3292,8 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix) { OPTION_CALLBACK, 0, "unpack-unreachable", NULL, N_("time"), N_("unpack unreachable objects newer than <time>"), PARSE_OPT_OPTARG, option_parse_unpack_unreachable }, + OPT_BOOL(0, "sparse", &sparse, + N_("use the sparse reachability algorithm")), OPT_BOOL(0, "thin", &thin, N_("create thin packs")), OPT_BOOL(0, "shallow", &shallow, @@ -3326,6 +3326,7 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix) read_replace_refs = 0; + sparse = git_env_bool("GIT_TEST_PACK_SPARSE", 0); reset_pack_idx_option(&pack_idx_opts); git_config(git_pack_config, NULL); diff --git a/builtin/pack-redundant.c b/builtin/pack-redundant.c index cf9a9aabd4..11bc514566 100644 --- a/builtin/pack-redundant.c +++ b/builtin/pack-redundant.c @@ -166,7 +166,7 @@ redo_from_start: l = (hint == NULL) ? list->front : hint; prev = NULL; while (l) { - int cmp = oidcmp(l->oid, oid); + const int cmp = oidcmp(l->oid, oid); if (cmp > 0) /* not in list, since sorted */ return prev; if (!cmp) { /* found */ @@ -264,7 +264,7 @@ static void cmp_two_packs(struct pack_list *p1, struct pack_list *p2) while (p1_off < p1->pack->num_objects * p1_step && p2_off < p2->pack->num_objects * p2_step) { - int cmp = hashcmp(p1_base + p1_off, p2_base + p2_off); + const int cmp = hashcmp(p1_base + p1_off, p2_base + p2_off); /* cmp ~ p1 - p2 */ if (cmp == 0) { p1_hint = llist_sorted_remove(p1->unique_objects, diff --git a/builtin/pull.c b/builtin/pull.c index 74808b9455..701d1473dc 100644 --- a/builtin/pull.c +++ b/builtin/pull.c @@ -5,6 +5,7 @@ * * Fetch one or more remote refs and merge it/them into the current HEAD. */ +#define USE_THE_INDEX_COMPATIBILITY_MACROS #include "cache.h" #include "config.h" #include "builtin.h" diff --git a/builtin/read-tree.c b/builtin/read-tree.c index ac255ad2c2..9083dcfa28 100644 --- a/builtin/read-tree.c +++ b/builtin/read-tree.c @@ -4,6 +4,7 @@ * Copyright (C) Linus Torvalds, 2005 */ +#define USE_THE_INDEX_COMPATIBILITY_MACROS #include "cache.h" #include "config.h" #include "lockfile.h" diff --git a/builtin/rebase--interactive.c b/builtin/rebase--interactive.c index c2c2d51a3b..888390f911 100644 --- a/builtin/rebase--interactive.c +++ b/builtin/rebase--interactive.c @@ -1,3 +1,4 @@ +#define USE_THE_INDEX_COMPATIBILITY_MACROS #include "builtin.h" #include "cache.h" #include "config.h" diff --git a/builtin/rebase.c b/builtin/rebase.c index 774264bae8..b9d61771ab 100644 --- a/builtin/rebase.c +++ b/builtin/rebase.c @@ -4,6 +4,7 @@ * Copyright (c) 2018 Pratik Karki */ +#define USE_THE_INDEX_COMPATIBILITY_MACROS #include "builtin.h" #include "run-command.h" #include "exec-cmd.h" @@ -123,7 +124,7 @@ static void imply_interactive(struct rebase_options *opts, const char *option) case REBASE_PRESERVE_MERGES: break; case REBASE_MERGE: - /* we silently *upgrade* --merge to --interactive if needed */ + /* we now implement --merge via --interactive */ default: opts->type = REBASE_INTERACTIVE; /* implied */ break; @@ -186,10 +187,7 @@ static int read_basic_state(struct rebase_options *opts) if (get_oid(buf.buf, &opts->orig_head)) return error(_("invalid orig-head: '%s'"), buf.buf); - strbuf_reset(&buf); - if (read_one(state_dir_path("quiet", opts), &buf)) - return -1; - if (buf.len) + if (file_exists(state_dir_path("quiet", opts))) opts->flags &= ~REBASE_NO_QUIET; else opts->flags |= REBASE_NO_QUIET; @@ -247,6 +245,37 @@ static int read_basic_state(struct rebase_options *opts) return 0; } +static int write_basic_state(struct rebase_options *opts) +{ + write_file(state_dir_path("head-name", opts), "%s", + opts->head_name ? opts->head_name : "detached HEAD"); + write_file(state_dir_path("onto", opts), "%s", + opts->onto ? oid_to_hex(&opts->onto->object.oid) : ""); + write_file(state_dir_path("orig-head", opts), "%s", + oid_to_hex(&opts->orig_head)); + write_file(state_dir_path("quiet", opts), "%s", + opts->flags & REBASE_NO_QUIET ? "" : "t"); + if (opts->flags & REBASE_VERBOSE) + write_file(state_dir_path("verbose", opts), "%s", ""); + if (opts->strategy) + write_file(state_dir_path("strategy", opts), "%s", + opts->strategy); + if (opts->strategy_opts) + write_file(state_dir_path("strategy_opts", opts), "%s", + opts->strategy_opts); + if (opts->allow_rerere_autoupdate >= 0) + write_file(state_dir_path("allow_rerere_autoupdate", opts), + "-%s-rerere-autoupdate", + opts->allow_rerere_autoupdate ? "" : "-no"); + if (opts->gpg_sign_opt) + write_file(state_dir_path("gpg_sign_opt", opts), "%s", + opts->gpg_sign_opt); + if (opts->signoff) + write_file(state_dir_path("strategy", opts), "--signoff"); + + return 0; +} + static int apply_autostash(struct rebase_options *opts) { const char *path = state_dir_path("autostash", opts); @@ -334,6 +363,161 @@ static void add_var(struct strbuf *buf, const char *name, const char *value) } } +#define GIT_REFLOG_ACTION_ENVIRONMENT "GIT_REFLOG_ACTION" + +#define RESET_HEAD_DETACH (1<<0) +#define RESET_HEAD_HARD (1<<1) +#define RESET_HEAD_RUN_POST_CHECKOUT_HOOK (1<<2) +#define RESET_HEAD_REFS_ONLY (1<<3) + +static int reset_head(struct object_id *oid, const char *action, + const char *switch_to_branch, unsigned flags, + const char *reflog_orig_head, const char *reflog_head) +{ + unsigned detach_head = flags & RESET_HEAD_DETACH; + unsigned reset_hard = flags & RESET_HEAD_HARD; + unsigned run_hook = flags & RESET_HEAD_RUN_POST_CHECKOUT_HOOK; + unsigned refs_only = flags & RESET_HEAD_REFS_ONLY; + struct object_id head_oid; + struct tree_desc desc[2] = { { NULL }, { NULL } }; + struct lock_file lock = LOCK_INIT; + struct unpack_trees_options unpack_tree_opts; + struct tree *tree; + const char *reflog_action; + struct strbuf msg = STRBUF_INIT; + size_t prefix_len; + struct object_id *orig = NULL, oid_orig, + *old_orig = NULL, oid_old_orig; + int ret = 0, nr = 0; + + if (switch_to_branch && !starts_with(switch_to_branch, "refs/")) + BUG("Not a fully qualified branch: '%s'", switch_to_branch); + + if (!refs_only && hold_locked_index(&lock, LOCK_REPORT_ON_ERROR) < 0) { + ret = -1; + goto leave_reset_head; + } + + if ((!oid || !reset_hard) && get_oid("HEAD", &head_oid)) { + ret = error(_("could not determine HEAD revision")); + goto leave_reset_head; + } + + if (!oid) + oid = &head_oid; + + if (refs_only) + goto reset_head_refs; + + memset(&unpack_tree_opts, 0, sizeof(unpack_tree_opts)); + setup_unpack_trees_porcelain(&unpack_tree_opts, action); + unpack_tree_opts.head_idx = 1; + unpack_tree_opts.src_index = the_repository->index; + unpack_tree_opts.dst_index = the_repository->index; + unpack_tree_opts.fn = reset_hard ? oneway_merge : twoway_merge; + unpack_tree_opts.update = 1; + unpack_tree_opts.merge = 1; + if (!detach_head) + unpack_tree_opts.reset = 1; + + if (repo_read_index_unmerged(the_repository) < 0) { + ret = error(_("could not read index")); + goto leave_reset_head; + } + + if (!reset_hard && !fill_tree_descriptor(&desc[nr++], &head_oid)) { + ret = error(_("failed to find tree of %s"), + oid_to_hex(&head_oid)); + goto leave_reset_head; + } + + if (!fill_tree_descriptor(&desc[nr++], oid)) { + ret = error(_("failed to find tree of %s"), oid_to_hex(oid)); + goto leave_reset_head; + } + + if (unpack_trees(nr, desc, &unpack_tree_opts)) { + ret = -1; + goto leave_reset_head; + } + + tree = parse_tree_indirect(oid); + prime_cache_tree(the_repository, the_repository->index, tree); + + if (write_locked_index(the_repository->index, &lock, COMMIT_LOCK) < 0) { + ret = error(_("could not write index")); + goto leave_reset_head; + } + +reset_head_refs: + reflog_action = getenv(GIT_REFLOG_ACTION_ENVIRONMENT); + strbuf_addf(&msg, "%s: ", reflog_action ? reflog_action : "rebase"); + prefix_len = msg.len; + + if (!get_oid("ORIG_HEAD", &oid_old_orig)) + old_orig = &oid_old_orig; + if (!get_oid("HEAD", &oid_orig)) { + orig = &oid_orig; + if (!reflog_orig_head) { + strbuf_addstr(&msg, "updating ORIG_HEAD"); + reflog_orig_head = msg.buf; + } + update_ref(reflog_orig_head, "ORIG_HEAD", orig, old_orig, 0, + UPDATE_REFS_MSG_ON_ERR); + } else if (old_orig) + delete_ref(NULL, "ORIG_HEAD", old_orig, 0); + if (!reflog_head) { + strbuf_setlen(&msg, prefix_len); + strbuf_addstr(&msg, "updating HEAD"); + reflog_head = msg.buf; + } + if (!switch_to_branch) + ret = update_ref(reflog_head, "HEAD", oid, orig, + detach_head ? REF_NO_DEREF : 0, + UPDATE_REFS_MSG_ON_ERR); + else { + ret = update_ref(reflog_orig_head, switch_to_branch, oid, + NULL, 0, UPDATE_REFS_MSG_ON_ERR); + if (!ret) + ret = create_symref("HEAD", switch_to_branch, + reflog_head); + } + if (run_hook) + run_hook_le(NULL, "post-checkout", + oid_to_hex(orig ? orig : &null_oid), + oid_to_hex(oid), "1", NULL); + +leave_reset_head: + strbuf_release(&msg); + rollback_lock_file(&lock); + while (nr) + free((void *)desc[--nr].buffer); + return ret; +} + +static int move_to_original_branch(struct rebase_options *opts) +{ + struct strbuf orig_head_reflog = STRBUF_INIT, head_reflog = STRBUF_INIT; + int ret; + + if (!opts->head_name) + return 0; /* nothing to move back to */ + + if (!opts->onto) + BUG("move_to_original_branch without onto"); + + strbuf_addf(&orig_head_reflog, "rebase finished: %s onto %s", + opts->head_name, oid_to_hex(&opts->onto->object.oid)); + strbuf_addf(&head_reflog, "rebase finished: returning to %s", + opts->head_name); + ret = reset_head(NULL, "", opts->head_name, RESET_HEAD_REFS_ONLY, + orig_head_reflog.buf, head_reflog.buf); + + strbuf_release(&orig_head_reflog); + strbuf_release(&head_reflog); + return ret; +} + static const char *resolvemsg = N_("Resolve all conflicts manually, mark them as resolved with\n" "\"git add/rm <conflicted_files>\", then run \"git rebase --continue\".\n" @@ -341,6 +525,126 @@ N_("Resolve all conflicts manually, mark them as resolved with\n" "To abort and get back to the state before \"git rebase\", run " "\"git rebase --abort\"."); +static int run_am(struct rebase_options *opts) +{ + struct child_process am = CHILD_PROCESS_INIT; + struct child_process format_patch = CHILD_PROCESS_INIT; + struct strbuf revisions = STRBUF_INIT; + int status; + char *rebased_patches; + + am.git_cmd = 1; + argv_array_push(&am.args, "am"); + + if (opts->action && !strcmp("continue", opts->action)) { + argv_array_push(&am.args, "--resolved"); + argv_array_pushf(&am.args, "--resolvemsg=%s", resolvemsg); + if (opts->gpg_sign_opt) + argv_array_push(&am.args, opts->gpg_sign_opt); + status = run_command(&am); + if (status) + return status; + + return move_to_original_branch(opts); + } + if (opts->action && !strcmp("skip", opts->action)) { + argv_array_push(&am.args, "--skip"); + argv_array_pushf(&am.args, "--resolvemsg=%s", resolvemsg); + status = run_command(&am); + if (status) + return status; + + return move_to_original_branch(opts); + } + if (opts->action && !strcmp("show-current-patch", opts->action)) { + argv_array_push(&am.args, "--show-current-patch"); + return run_command(&am); + } + + strbuf_addf(&revisions, "%s...%s", + oid_to_hex(opts->root ? + /* this is now equivalent to !opts->upstream */ + &opts->onto->object.oid : + &opts->upstream->object.oid), + oid_to_hex(&opts->orig_head)); + + rebased_patches = xstrdup(git_path("rebased-patches")); + format_patch.out = open(rebased_patches, + O_WRONLY | O_CREAT | O_TRUNC, 0666); + if (format_patch.out < 0) { + status = error_errno(_("could not open '%s' for writing"), + rebased_patches); + free(rebased_patches); + argv_array_clear(&am.args); + return status; + } + + format_patch.git_cmd = 1; + argv_array_pushl(&format_patch.args, "format-patch", "-k", "--stdout", + "--full-index", "--cherry-pick", "--right-only", + "--src-prefix=a/", "--dst-prefix=b/", "--no-renames", + "--no-cover-letter", "--pretty=mboxrd", "--topo-order", NULL); + if (opts->git_format_patch_opt.len) + argv_array_split(&format_patch.args, + opts->git_format_patch_opt.buf); + argv_array_push(&format_patch.args, revisions.buf); + if (opts->restrict_revision) + argv_array_pushf(&format_patch.args, "^%s", + oid_to_hex(&opts->restrict_revision->object.oid)); + + status = run_command(&format_patch); + if (status) { + unlink(rebased_patches); + free(rebased_patches); + argv_array_clear(&am.args); + + reset_head(&opts->orig_head, "checkout", opts->head_name, 0, + "HEAD", NULL); + error(_("\ngit encountered an error while preparing the " + "patches to replay\n" + "these revisions:\n" + "\n %s\n\n" + "As a result, git cannot rebase them."), + opts->revisions); + + strbuf_release(&revisions); + return status; + } + strbuf_release(&revisions); + + am.in = open(rebased_patches, O_RDONLY); + if (am.in < 0) { + status = error_errno(_("could not open '%s' for reading"), + rebased_patches); + free(rebased_patches); + argv_array_clear(&am.args); + return status; + } + + argv_array_pushv(&am.args, opts->git_am_opts.argv); + argv_array_push(&am.args, "--rebasing"); + argv_array_pushf(&am.args, "--resolvemsg=%s", resolvemsg); + argv_array_push(&am.args, "--patch-format=mboxrd"); + if (opts->allow_rerere_autoupdate > 0) + argv_array_push(&am.args, "--rerere-autoupdate"); + else if (opts->allow_rerere_autoupdate == 0) + argv_array_push(&am.args, "--no-rerere-autoupdate"); + if (opts->gpg_sign_opt) + argv_array_push(&am.args, opts->gpg_sign_opt); + status = run_command(&am); + unlink(rebased_patches); + free(rebased_patches); + + if (!status) { + return move_to_original_branch(opts); + } + + if (is_directory(opts->state_dir)) + write_basic_state(opts); + + return status; +} + static int run_specific_rebase(struct rebase_options *opts) { const char *argv[] = { NULL, NULL }; @@ -355,7 +659,8 @@ static int run_specific_rebase(struct rebase_options *opts) argv_array_pushf(&child.env_array, "GIT_CHERRY_PICK_HELP=%s", resolvemsg); if (!(opts->flags & REBASE_INTERACTIVE_EXPLICIT)) { - argv_array_push(&child.env_array, "GIT_EDITOR=:"); + argv_array_push(&child.env_array, + "GIT_SEQUENCE_EDITOR=:"); opts->autosquash = 0; } @@ -423,6 +728,11 @@ static int run_specific_rebase(struct rebase_options *opts) goto finished_rebase; } + if (opts->type == REBASE_AM) { + status = run_am(opts); + goto finished_rebase; + } + add_var(&script_snippet, "GIT_DIR", absolute_path(get_git_dir())); add_var(&script_snippet, "state_dir", opts->state_dir); @@ -478,7 +788,7 @@ static int run_specific_rebase(struct rebase_options *opts) if (is_interactive(opts) && !(opts->flags & REBASE_INTERACTIVE_EXPLICIT)) { strbuf_addstr(&script_snippet, - "GIT_EDITOR=:; export GIT_EDITOR; "); + "GIT_SEQUENCE_EDITOR=:; export GIT_SEQUENCE_EDITOR; "); opts->autosquash = 0; } @@ -487,10 +797,6 @@ static int run_specific_rebase(struct rebase_options *opts) backend = "git-rebase--am"; backend_func = "git_rebase__am"; break; - case REBASE_MERGE: - backend = "git-rebase--merge"; - backend_func = "git_rebase__merge"; - break; case REBASE_PRESERVE_MERGES: backend = "git-rebase--preserve-merges"; backend_func = "git_rebase__preserve_merges"; @@ -529,131 +835,6 @@ finished_rebase: return status ? -1 : 0; } -#define GIT_REFLOG_ACTION_ENVIRONMENT "GIT_REFLOG_ACTION" - -#define RESET_HEAD_DETACH (1<<0) -#define RESET_HEAD_HARD (1<<1) -#define RESET_HEAD_RUN_POST_CHECKOUT_HOOK (1<<2) - -static int reset_head(struct object_id *oid, const char *action, - const char *switch_to_branch, unsigned flags, - const char *reflog_orig_head, const char *reflog_head) -{ - unsigned detach_head = flags & RESET_HEAD_DETACH; - unsigned reset_hard = flags & RESET_HEAD_HARD; - unsigned run_hook = flags & RESET_HEAD_RUN_POST_CHECKOUT_HOOK; - struct object_id head_oid; - struct tree_desc desc[2] = { { NULL }, { NULL } }; - struct lock_file lock = LOCK_INIT; - struct unpack_trees_options unpack_tree_opts; - struct tree *tree; - const char *reflog_action; - struct strbuf msg = STRBUF_INIT; - size_t prefix_len; - struct object_id *orig = NULL, oid_orig, - *old_orig = NULL, oid_old_orig; - int ret = 0, nr = 0; - - if (switch_to_branch && !starts_with(switch_to_branch, "refs/")) - BUG("Not a fully qualified branch: '%s'", switch_to_branch); - - if (hold_locked_index(&lock, LOCK_REPORT_ON_ERROR) < 0) { - ret = -1; - goto leave_reset_head; - } - - if ((!oid || !reset_hard) && get_oid("HEAD", &head_oid)) { - ret = error(_("could not determine HEAD revision")); - goto leave_reset_head; - } - - if (!oid) - oid = &head_oid; - - memset(&unpack_tree_opts, 0, sizeof(unpack_tree_opts)); - setup_unpack_trees_porcelain(&unpack_tree_opts, action); - unpack_tree_opts.head_idx = 1; - unpack_tree_opts.src_index = the_repository->index; - unpack_tree_opts.dst_index = the_repository->index; - unpack_tree_opts.fn = reset_hard ? oneway_merge : twoway_merge; - unpack_tree_opts.update = 1; - unpack_tree_opts.merge = 1; - if (!detach_head) - unpack_tree_opts.reset = 1; - - if (read_index_unmerged(the_repository->index) < 0) { - ret = error(_("could not read index")); - goto leave_reset_head; - } - - if (!reset_hard && !fill_tree_descriptor(&desc[nr++], &head_oid)) { - ret = error(_("failed to find tree of %s"), - oid_to_hex(&head_oid)); - goto leave_reset_head; - } - - if (!fill_tree_descriptor(&desc[nr++], oid)) { - ret = error(_("failed to find tree of %s"), oid_to_hex(oid)); - goto leave_reset_head; - } - - if (unpack_trees(nr, desc, &unpack_tree_opts)) { - ret = -1; - goto leave_reset_head; - } - - tree = parse_tree_indirect(oid); - prime_cache_tree(the_repository, the_repository->index, tree); - - if (write_locked_index(the_repository->index, &lock, COMMIT_LOCK) < 0) { - ret = error(_("could not write index")); - goto leave_reset_head; - } - - reflog_action = getenv(GIT_REFLOG_ACTION_ENVIRONMENT); - strbuf_addf(&msg, "%s: ", reflog_action ? reflog_action : "rebase"); - prefix_len = msg.len; - - if (!get_oid("ORIG_HEAD", &oid_old_orig)) - old_orig = &oid_old_orig; - if (!get_oid("HEAD", &oid_orig)) { - orig = &oid_orig; - if (!reflog_orig_head) { - strbuf_addstr(&msg, "updating ORIG_HEAD"); - reflog_orig_head = msg.buf; - } - update_ref(reflog_orig_head, "ORIG_HEAD", orig, old_orig, 0, - UPDATE_REFS_MSG_ON_ERR); - } else if (old_orig) - delete_ref(NULL, "ORIG_HEAD", old_orig, 0); - if (!reflog_head) { - strbuf_setlen(&msg, prefix_len); - strbuf_addstr(&msg, "updating HEAD"); - reflog_head = msg.buf; - } - if (!switch_to_branch) - ret = update_ref(reflog_head, "HEAD", oid, orig, - detach_head ? REF_NO_DEREF : 0, - UPDATE_REFS_MSG_ON_ERR); - else { - ret = create_symref("HEAD", switch_to_branch, msg.buf); - if (!ret) - ret = update_ref(reflog_head, "HEAD", oid, NULL, 0, - UPDATE_REFS_MSG_ON_ERR); - } - if (run_hook) - run_hook_le(NULL, "post-checkout", - oid_to_hex(orig ? orig : &null_oid), - oid_to_hex(oid), "1", NULL); - -leave_reset_head: - strbuf_release(&msg); - rollback_lock_file(&lock); - while (nr) - free((void *)desc[--nr].buffer); - return ret; -} - static int rebase_config(const char *var, const char *value, void *data) { struct rebase_options *opts = data; @@ -824,6 +1005,19 @@ static void set_reflog_action(struct rebase_options *options) strbuf_release(&buf); } +static int check_exec_cmd(const char *cmd) +{ + if (strchr(cmd, '\n')) + return error(_("exec commands cannot contain newlines")); + + /* Does the command consist purely of whitespace? */ + if (!cmd[strspn(cmd, " \t\r\f\v")]) + return error(_("empty exec command")); + + return 0; +} + + int cmd_rebase(int argc, const char **argv, const char *prefix) { struct rebase_options options = { @@ -1053,13 +1247,12 @@ int cmd_rebase(int argc, const char **argv, const char *prefix) die(_("Cannot read HEAD")); fd = hold_locked_index(&lock_file, 0); - if (read_index(the_repository->index) < 0) + if (repo_read_index(the_repository) < 0) die(_("could not read index")); refresh_index(the_repository->index, REFRESH_QUIET, NULL, NULL, NULL); if (0 <= fd) - update_index_if_able(the_repository->index, - &lock_file); + repo_update_index_if_able(the_repository, &lock_file); rollback_lock_file(&lock_file); if (has_unstaged_changes(the_repository, 1)) { @@ -1168,6 +1361,10 @@ int cmd_rebase(int argc, const char **argv, const char *prefix) } } + for (i = 0; i < exec.nr; i++) + if (check_exec_cmd(exec.items[i].string)) + exit(1); + if (!(options.flags & REBASE_NO_QUIET)) argv_array_push(&options.git_am_opts, "-q"); @@ -1232,6 +1429,9 @@ int cmd_rebase(int argc, const char **argv, const char *prefix) } } + if (options.type == REBASE_MERGE) + imply_interactive(&options, "--merge"); + if (options.root && !options.onto_name) imply_interactive(&options, "--root without --onto"); @@ -1264,14 +1464,8 @@ int cmd_rebase(int argc, const char **argv, const char *prefix) break; if (is_interactive(&options) && i >= 0) - die(_("error: cannot combine interactive options " - "(--interactive, --exec, --rebase-merges, " - "--preserve-merges, --keep-empty, --root + " - "--onto) with am options (%s)"), buf.buf); - if (options.type == REBASE_MERGE && i >= 0) - die(_("error: cannot combine merge options (--merge, " - "--strategy, --strategy-option) with am options " - "(%s)"), buf.buf); + die(_("cannot combine am options with either " + "interactive or merge options")); } if (options.signoff) { @@ -1289,7 +1483,7 @@ int cmd_rebase(int argc, const char **argv, const char *prefix) * git-rebase.txt caveats with "unless you know what you are doing" */ if (options.rebase_merges) - die(_("error: cannot combine '--preserve-merges' with " + die(_("cannot combine '--preserve-merges' with " "'--rebase-merges'")); if (options.reschedule_failed_exec) @@ -1299,10 +1493,10 @@ int cmd_rebase(int argc, const char **argv, const char *prefix) if (options.rebase_merges) { if (strategy_options.nr) - die(_("error: cannot combine '--rebase-merges' with " + die(_("cannot combine '--rebase-merges' with " "'--strategy-option'")); if (options.strategy) - die(_("error: cannot combine '--rebase-merges' with " + die(_("cannot combine '--rebase-merges' with " "'--strategy'")); } @@ -1414,7 +1608,7 @@ int cmd_rebase(int argc, const char **argv, const char *prefix) get_fork_point(options.upstream_name, head); } - if (read_index(the_repository->index) < 0) + if (repo_read_index(the_repository) < 0) die(_("could not read index")); if (options.autostash) { @@ -1424,7 +1618,7 @@ int cmd_rebase(int argc, const char **argv, const char *prefix) fd = hold_locked_index(&lock_file, 0); refresh_cache(REFRESH_QUIET); if (0 <= fd) - update_index_if_able(&the_index, &lock_file); + repo_update_index_if_able(the_repository, &lock_file); rollback_lock_file(&lock_file); if (has_unstaged_changes(the_repository, 1) || @@ -1469,7 +1663,7 @@ int cmd_rebase(int argc, const char **argv, const char *prefix) putchar('\n'); if (discard_index(the_repository->index) < 0 || - read_index(the_repository->index) < 0) + repo_read_index(the_repository) < 0) die(_("could not read index")); } } diff --git a/builtin/receive-pack.c b/builtin/receive-pack.c index 33187bd8e9..d58b7750b6 100644 --- a/builtin/receive-pack.c +++ b/builtin/receive-pack.c @@ -1569,30 +1569,29 @@ static void queue_commands_from_cert(struct command **tail, } } -static struct command *read_head_info(struct oid_array *shallow) +static struct command *read_head_info(struct packet_reader *reader, + struct oid_array *shallow) { struct command *commands = NULL; struct command **p = &commands; for (;;) { - char *line; - int len, linelen; + int linelen; - line = packet_read_line(0, &len); - if (!line) + if (packet_reader_read(reader) != PACKET_READ_NORMAL) break; - if (len > 8 && starts_with(line, "shallow ")) { + if (reader->pktlen > 8 && starts_with(reader->line, "shallow ")) { struct object_id oid; - if (get_oid_hex(line + 8, &oid)) + if (get_oid_hex(reader->line + 8, &oid)) die("protocol error: expected shallow sha, got '%s'", - line + 8); + reader->line + 8); oid_array_append(shallow, &oid); continue; } - linelen = strlen(line); - if (linelen < len) { - const char *feature_list = line + linelen + 1; + linelen = strlen(reader->line); + if (linelen < reader->pktlen) { + const char *feature_list = reader->line + linelen + 1; if (parse_feature_request(feature_list, "report-status")) report_status = 1; if (parse_feature_request(feature_list, "side-band-64k")) @@ -1607,28 +1606,32 @@ static struct command *read_head_info(struct oid_array *shallow) use_push_options = 1; } - if (!strcmp(line, "push-cert")) { + if (!strcmp(reader->line, "push-cert")) { int true_flush = 0; - char certbuf[1024]; + int saved_options = reader->options; + reader->options &= ~PACKET_READ_CHOMP_NEWLINE; for (;;) { - len = packet_read(0, NULL, NULL, - certbuf, sizeof(certbuf), 0); - if (!len) { + packet_reader_read(reader); + if (reader->status == PACKET_READ_FLUSH) { true_flush = 1; break; } - if (!strcmp(certbuf, "push-cert-end\n")) + if (reader->status != PACKET_READ_NORMAL) { + die("protocol error: got an unexpected packet"); + } + if (!strcmp(reader->line, "push-cert-end\n")) break; /* end of cert */ - strbuf_addstr(&push_cert, certbuf); + strbuf_addstr(&push_cert, reader->line); } + reader->options = saved_options; if (true_flush) break; continue; } - p = queue_command(p, line, linelen); + p = queue_command(p, reader->line, linelen); } if (push_cert.len) @@ -1637,18 +1640,14 @@ static struct command *read_head_info(struct oid_array *shallow) return commands; } -static void read_push_options(struct string_list *options) +static void read_push_options(struct packet_reader *reader, + struct string_list *options) { while (1) { - char *line; - int len; - - line = packet_read_line(0, &len); - - if (!line) + if (packet_reader_read(reader) != PACKET_READ_NORMAL) break; - string_list_append(options, line); + string_list_append(options, reader->line); } } @@ -1924,6 +1923,7 @@ int cmd_receive_pack(int argc, const char **argv, const char *prefix) struct oid_array shallow = OID_ARRAY_INIT; struct oid_array ref = OID_ARRAY_INIT; struct shallow_info si; + struct packet_reader reader; struct option options[] = { OPT__QUIET(&quiet, N_("quiet")), @@ -1986,12 +1986,16 @@ int cmd_receive_pack(int argc, const char **argv, const char *prefix) if (advertise_refs) return 0; - if ((commands = read_head_info(&shallow)) != NULL) { + packet_reader_init(&reader, 0, NULL, 0, + PACKET_READ_CHOMP_NEWLINE | + PACKET_READ_DIE_ON_ERR_PACKET); + + if ((commands = read_head_info(&reader, &shallow)) != NULL) { const char *unpack_status = NULL; struct string_list push_options = STRING_LIST_INIT_DUP; if (use_push_options) - read_push_options(&push_options); + read_push_options(&reader, &push_options); if (!check_cert_push_options(&push_options)) { struct command *cmd; for (cmd = commands; cmd; cmd = cmd->next) diff --git a/builtin/reflog.c b/builtin/reflog.c index 1f1010e2d9..4d3430900d 100644 --- a/builtin/reflog.c +++ b/builtin/reflog.c @@ -94,7 +94,7 @@ static int tree_is_complete(const struct object_id *oid) init_tree_desc(&desc, tree->buffer, tree->size); complete = 1; while (tree_entry(&desc, &entry)) { - if (!has_sha1_file(entry.oid.hash) || + if (!has_object_file(&entry.oid) || (S_ISDIR(entry.mode) && !tree_is_complete(&entry.oid))) { tree->object.flags |= INCOMPLETE; complete = 0; diff --git a/builtin/replace.c b/builtin/replace.c index affcdfb416..5b80b7f211 100644 --- a/builtin/replace.c +++ b/builtin/replace.c @@ -295,7 +295,7 @@ static int import_object(struct object_id *oid, enum object_type type, close(fd); return -1; } - if (index_fd(&the_index, oid, fd, &st, type, NULL, flags) < 0) + if (index_fd(the_repository->index, oid, fd, &st, type, NULL, flags) < 0) return error(_("unable to write object to database")); /* index_fd close()s fd for us */ } diff --git a/builtin/reset.c b/builtin/reset.c index 59898c972e..4d18a461fa 100644 --- a/builtin/reset.c +++ b/builtin/reset.c @@ -7,6 +7,7 @@ * * Copyright (c) 2005, 2006 Linus Torvalds and Junio C Hamano */ +#define USE_THE_INDEX_COMPATIBILITY_MACROS #include "builtin.h" #include "config.h" #include "lockfile.h" diff --git a/builtin/rev-list.c b/builtin/rev-list.c index 51e9e1267e..5b5b6dbb1c 100644 --- a/builtin/rev-list.c +++ b/builtin/rev-list.c @@ -197,7 +197,8 @@ static void finish_commit(struct commit *commit, void *data) free_commit_list(commit->parents); commit->parents = NULL; } - free_commit_buffer(commit); + free_commit_buffer(the_repository->parsed_objects, + commit); } static inline void finish_object__ma(struct object *obj) @@ -545,7 +546,7 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix) if (prepare_revision_walk(&revs)) die("revision walk setup failed"); if (revs.tree_objects) - mark_edges_uninteresting(&revs, show_edge); + mark_edges_uninteresting(&revs, show_edge, 0); if (bisect_list) { int reaches, all; diff --git a/builtin/rev-parse.c b/builtin/rev-parse.c index 10d4dab894..f8bbe6d47e 100644 --- a/builtin/rev-parse.c +++ b/builtin/rev-parse.c @@ -3,6 +3,7 @@ * * Copyright (C) Linus Torvalds, 2005 */ +#define USE_THE_INDEX_COMPATIBILITY_MACROS #include "cache.h" #include "config.h" #include "commit.h" @@ -933,7 +934,8 @@ int cmd_rev_parse(int argc, const char **argv, const char *prefix) name++; type = REVERSED; } - if (!get_oid_with_context(name, flags, &oid, &unused)) { + if (!get_oid_with_context(the_repository, name, + flags, &oid, &unused)) { if (verify) revs_count++; else diff --git a/builtin/rm.c b/builtin/rm.c index 17086d3d97..db85b33982 100644 --- a/builtin/rm.c +++ b/builtin/rm.c @@ -3,6 +3,7 @@ * * Copyright (C) Linus Torvalds 2006 */ +#define USE_THE_INDEX_COMPATIBILITY_MACROS #include "builtin.h" #include "config.h" #include "lockfile.h" diff --git a/builtin/send-pack.c b/builtin/send-pack.c index 8e3c7490f7..098ebf22d0 100644 --- a/builtin/send-pack.c +++ b/builtin/send-pack.c @@ -250,7 +250,8 @@ int cmd_send_pack(int argc, const char **argv, const char *prefix) packet_reader_init(&reader, fd[0], NULL, 0, PACKET_READ_CHOMP_NEWLINE | - PACKET_READ_GENTLE_ON_EOF); + PACKET_READ_GENTLE_ON_EOF | + PACKET_READ_DIE_ON_ERR_PACKET); switch (discover_version(&reader)) { case protocol_v2: diff --git a/builtin/show-ref.c b/builtin/show-ref.c index ed888ffa48..6a706c02a6 100644 --- a/builtin/show-ref.c +++ b/builtin/show-ref.c @@ -23,7 +23,7 @@ static void show_one(const char *refname, const struct object_id *oid) const char *hex; struct object_id peeled; - if (!has_sha1_file(oid->hash)) + if (!has_object_file(oid)) die("git show-ref: bad ref %s (%s)", refname, oid_to_hex(oid)); diff --git a/builtin/submodule--helper.c b/builtin/submodule--helper.c index 0e140f176c..b80fc4ba3d 100644 --- a/builtin/submodule--helper.c +++ b/builtin/submodule--helper.c @@ -1,3 +1,4 @@ +#define USE_THE_INDEX_COMPATIBILITY_MACROS #include "builtin.h" #include "repository.h" #include "cache.h" diff --git a/builtin/update-index.c b/builtin/update-index.c index e19da77edc..02ace602b9 100644 --- a/builtin/update-index.c +++ b/builtin/update-index.c @@ -3,6 +3,7 @@ * * Copyright (C) Linus Torvalds, 2005 */ +#define USE_THE_INDEX_COMPATIBILITY_MACROS #include "cache.h" #include "config.h" #include "lockfile.h" diff --git a/builtin/write-tree.c b/builtin/write-tree.c index cdcbf8264e..3d46d22ee5 100644 --- a/builtin/write-tree.c +++ b/builtin/write-tree.c @@ -3,6 +3,7 @@ * * Copyright (C) Linus Torvalds, 2005 */ +#define USE_THE_INDEX_COMPATIBILITY_MACROS #include "builtin.h" #include "cache.h" #include "config.h" diff --git a/bulk-checkin.c b/bulk-checkin.c index 409ecb566b..39ee7d6107 100644 --- a/bulk-checkin.c +++ b/bulk-checkin.c @@ -67,7 +67,7 @@ static int already_written(struct bulk_checkin_state *state, struct object_id *o int i; /* The object may already exist in the repository */ - if (has_sha1_file(oid->hash)) + if (has_object_file(oid)) return 1; /* Might want to keep the list sorted */ @@ -424,8 +424,8 @@ static int write_bundle_refs(int bundle_fd, struct rev_info *revs) return ref_count; } -int create_bundle(struct repository *r, struct bundle_header *header, - const char *path, int argc, const char **argv) +int create_bundle(struct repository *r, const char *path, + int argc, const char **argv) { struct lock_file lock = LOCK_INIT; int bundle_fd = -1; @@ -18,8 +18,8 @@ struct bundle_header { int is_bundle(const char *path, int quiet); int read_bundle_header(const char *path, struct bundle_header *header); -int create_bundle(struct repository *r, struct bundle_header *header, - const char *path, int argc, const char **argv); +int create_bundle(struct repository *r, const char *path, + int argc, const char **argv); int verify_bundle(struct repository *r, struct bundle_header *header, int verbose); #define BUNDLE_VERBOSE 1 int unbundle(struct repository *r, struct bundle_header *header, diff --git a/cache-tree.c b/cache-tree.c index c4b8a1fa16..b13bfaf71e 100644 --- a/cache-tree.c +++ b/cache-tree.c @@ -225,7 +225,7 @@ int cache_tree_fully_valid(struct cache_tree *it) int i; if (!it) return 0; - if (it->entry_count < 0 || !has_sha1_file(it->oid.hash)) + if (it->entry_count < 0 || !has_object_file(&it->oid)) return 0; for (i = 0; i < it->subtree_nr; i++) { if (!cache_tree_fully_valid(it->down[i]->cache_tree)) @@ -253,7 +253,7 @@ static int update_one(struct cache_tree *it, *skip_count = 0; - if (0 <= it->entry_count && has_sha1_file(it->oid.hash)) + if (0 <= it->entry_count && has_object_file(&it->oid)) return it->entry_count; /* diff --git a/cache-tree.h b/cache-tree.h index 326209198b..757bbc48bc 100644 --- a/cache-tree.h +++ b/cache-tree.h @@ -51,7 +51,7 @@ void prime_cache_tree(struct repository *, struct index_state *, struct tree *); int cache_tree_matches_traversal(struct cache_tree *, struct name_entry *ent, struct traverse_info *info); -#ifndef NO_THE_INDEX_COMPATIBILITY_MACROS +#ifdef USE_THE_INDEX_COMPATIBILITY_MACROS static inline int write_cache_as_tree(struct object_id *oid, int flags, const char *prefix) { return write_index_as_tree(oid, &the_index, get_index_file(), flags, prefix); @@ -348,8 +348,6 @@ struct index_state { struct mem_pool *ce_mem_pool; }; -extern struct index_state the_index; - /* Name hashing */ extern int test_lazy_init_name_hash(struct index_state *istate, int try_threaded); extern void add_name_hash(struct index_state *istate, struct cache_entry *ce); @@ -411,18 +409,20 @@ struct cache_entry *dup_cache_entry(const struct cache_entry *ce, struct index_s */ void validate_cache_entries(const struct index_state *istate); -#ifndef NO_THE_INDEX_COMPATIBILITY_MACROS +#ifdef USE_THE_INDEX_COMPATIBILITY_MACROS +extern struct index_state the_index; + #define active_cache (the_index.cache) #define active_nr (the_index.cache_nr) #define active_alloc (the_index.cache_alloc) #define active_cache_changed (the_index.cache_changed) #define active_cache_tree (the_index.cache_tree) -#define read_cache() read_index(&the_index) +#define read_cache() repo_read_index(the_repository) #define read_cache_from(path) read_index_from(&the_index, (path), (get_git_dir())) -#define read_cache_preload(pathspec) read_index_preload(&the_index, (pathspec), 0) +#define read_cache_preload(pathspec) repo_read_index_preload(the_repository, (pathspec), 0) #define is_cache_unborn() is_index_unborn(&the_index) -#define read_cache_unmerged() read_index_unmerged(&the_index) +#define read_cache_unmerged() repo_read_index_unmerged(the_repository) #define discard_cache() discard_index(&the_index) #define unmerged_cache() unmerged_index(&the_index) #define cache_name_pos(name, namelen) index_name_pos(&the_index,(name),(namelen)) @@ -443,6 +443,7 @@ void validate_cache_entries(const struct index_state *istate); #define unmerge_cache_entry_at(at) unmerge_index_entry_at(&the_index, at) #define unmerge_cache(pathspec) unmerge_index(&the_index, pathspec) #define read_blob_data_from_cache(path, sz) read_blob_data_from_index(&the_index, (path), (sz)) +#define hold_locked_index(lock_file, flags) repo_hold_locked_index(the_repository, (lock_file), (flags)) #endif #define TYPE_BITS 3 @@ -670,19 +671,14 @@ extern int daemonize(void); /* Initialize and use the cache information */ struct lock_file; -extern int read_index(struct index_state *); extern void preload_index(struct index_state *index, const struct pathspec *pathspec, unsigned int refresh_flags); -extern int read_index_preload(struct index_state *, - const struct pathspec *pathspec, - unsigned int refresh_flags); extern int do_read_index(struct index_state *istate, const char *path, int must_exist); /* for testting only! */ extern int read_index_from(struct index_state *, const char *path, const char *gitdir); extern int is_index_unborn(struct index_state *); -extern int read_index_unmerged(struct index_state *); /* For use with `write_locked_index()`. */ #define COMMIT_LOCK (1 << 0) @@ -720,9 +716,9 @@ extern int unmerged_index(const struct index_state *); * provided, the space-separated list of files that differ will be appended * to it. */ -extern int index_has_changes(struct index_state *istate, - struct tree *tree, - struct strbuf *sb); +extern int repo_index_has_changes(struct repository *repo, + struct tree *tree, + struct strbuf *sb); extern int verify_path(const char *path, unsigned mode); extern int strcmp_offset(const char *s1, const char *s2, size_t *first_change); @@ -755,6 +751,7 @@ extern int index_name_pos(const struct index_state *, const char *name, int name #define ADD_CACHE_JUST_APPEND 8 /* Append only; tree.c::read_tree() */ #define ADD_CACHE_NEW_ONLY 16 /* Do not replace existing ones */ #define ADD_CACHE_KEEP_CACHE_TREE 32 /* Do not invalidate cache-tree */ +#define ADD_CACHE_RENORMALIZE 64 /* Pass along HASH_RENORMALIZE */ extern int add_index_entry(struct index_state *, struct cache_entry *ce, int option); extern void rename_index_entry_at(struct index_state *, int pos, const char *new_name); @@ -837,13 +834,6 @@ extern void fill_stat_cache_info(struct cache_entry *ce, struct stat *st); extern int refresh_index(struct index_state *, unsigned int flags, const struct pathspec *pathspec, char *seen, const char *header_msg); extern struct cache_entry *refresh_cache_entry(struct index_state *, struct cache_entry *, unsigned int); -/* - * Opportunistically update the index but do not complain if we can't. - * The lockfile is always committed or rolled back. - */ -extern void update_index_if_able(struct index_state *, struct lock_file *); - -extern int hold_locked_index(struct lock_file *, int); extern void set_alternate_index_output(const char *); extern int verify_index_checksum; @@ -1281,8 +1271,8 @@ extern char *xdg_cache_home(const char *filename); extern int git_open_cloexec(const char *name, int flags); #define git_open(name) git_open_cloexec(name, O_RDONLY) -extern int unpack_sha1_header(git_zstream *stream, unsigned char *map, unsigned long mapsize, void *buffer, unsigned long bufsiz); -extern int parse_sha1_header(const char *hdr, unsigned long *sizep); +extern int unpack_loose_header(git_zstream *stream, unsigned char *map, unsigned long mapsize, void *buffer, unsigned long bufsiz); +extern int parse_loose_header(const char *hdr, unsigned long *sizep); extern int check_object_signature(const struct object_id *oid, void *buf, unsigned long size, const char *type); @@ -1344,6 +1334,24 @@ struct object_context { GET_OID_TREE | GET_OID_TREEISH | \ GET_OID_BLOB) +enum get_oid_result { + FOUND = 0, + MISSING_OBJECT = -1, /* The requested object is missing */ + SHORT_NAME_AMBIGUOUS = -2, + /* The following only apply when symlinks are followed */ + DANGLING_SYMLINK = -4, /* + * The initial symlink is there, but + * (transitively) points to a missing + * in-tree file + */ + SYMLINK_LOOP = -5, + NOT_DIR = -6, /* + * Somewhere along the symlink chain, a path is + * requested which contains a file as a + * non-final element. + */ +}; + extern int get_oid(const char *str, struct object_id *oid); extern int get_oid_commit(const char *str, struct object_id *oid); extern int get_oid_committish(const char *str, struct object_id *oid); @@ -1351,8 +1359,9 @@ extern int get_oid_tree(const char *str, struct object_id *oid); extern int get_oid_treeish(const char *str, struct object_id *oid); extern int get_oid_blob(const char *str, struct object_id *oid); extern void maybe_die_on_misspelt_object_name(const char *name, const char *prefix); -extern int get_oid_with_context(const char *str, unsigned flags, struct object_id *oid, struct object_context *oc); - +extern enum get_oid_result get_oid_with_context(struct repository *repo, const char *str, + unsigned flags, struct object_id *oid, + struct object_context *oc); typedef int each_abbrev_fn(const struct object_id *oid, void *); extern int for_each_abbrev(const char *prefix, each_abbrev_fn, void *); @@ -1454,6 +1463,7 @@ extern struct object *peel_to_type(const char *name, int namelen, enum date_mode_type { DATE_NORMAL = 0, + DATE_HUMAN, DATE_RELATIVE, DATE_SHORT, DATE_ISO8601, @@ -1479,7 +1489,9 @@ struct date_mode { struct date_mode *date_mode_from_type(enum date_mode_type type); const char *show_date(timestamp_t time, int timezone, const struct date_mode *mode); -void show_date_relative(timestamp_t time, int tz, const struct timeval *now, +void show_date_relative(timestamp_t time, const struct timeval *now, + struct strbuf *timebuf); +void show_date_human(timestamp_t time, int tz, const struct timeval *now, struct strbuf *timebuf); int parse_date(const char *date, struct strbuf *out); int parse_date_basic(const char *date, timestamp_t *timestamp, int *offset); @@ -1607,7 +1619,7 @@ extern int odb_mkstemp(struct strbuf *temp_filename, const char *pattern); extern int odb_pack_keep(const char *name); /* - * Set this to 0 to prevent sha1_object_info_extended() from fetching missing + * Set this to 0 to prevent oid_object_info_extended() from fetching missing * blobs. This has a difference only if extensions.partialClone is set. * * Its default value is 1. @@ -1803,4 +1815,7 @@ void safe_create_dir(const char *dir, int share); */ extern int print_sha1_ellipsis(void); +/* Return 1 if the file is empty or does not exists, 0 otherwise. */ +extern int is_empty_or_missing_file(const char *filename); + #endif /* CACHE_H */ diff --git a/ci/install-dependencies.sh b/ci/install-dependencies.sh index 06c3546e1e..d64667fcbf 100755 --- a/ci/install-dependencies.sh +++ b/ci/install-dependencies.sh @@ -3,7 +3,7 @@ # Install dependencies required to build and test Git on Linux and macOS # -. ${0%/*}/lib-travisci.sh +. ${0%/*}/lib.sh P4WHENCE=http://filehost.perforce.com/perforce/r$LINUX_P4_VERSION LFSWHENCE=https://github.com/github/git-lfs/releases/download/v$LINUX_GIT_LFS_VERSION @@ -34,12 +34,18 @@ linux-clang|linux-gcc) popd ;; osx-clang|osx-gcc) - brew update --quiet + brew update >/dev/null # Uncomment this if you want to run perf tests: # brew install gnu-time - brew install git-lfs gettext + test -z "$BREW_INSTALL_PACKAGES" || + brew install $BREW_INSTALL_PACKAGES brew link --force gettext brew install caskroom/cask/perforce + case "$jobname" in + osx-gcc) + brew link gcc@8 + ;; + esac ;; StaticAnalysis) sudo apt-get -q update diff --git a/ci/lib-travisci.sh b/ci/lib-travisci.sh deleted file mode 100755 index 69dff4d1ec..0000000000 --- a/ci/lib-travisci.sh +++ /dev/null @@ -1,129 +0,0 @@ -# Library of functions shared by all CI scripts - -skip_branch_tip_with_tag () { - # Sometimes, a branch is pushed at the same time the tag that points - # at the same commit as the tip of the branch is pushed, and building - # both at the same time is a waste. - # - # Travis gives a tagname e.g. v2.14.0 in $TRAVIS_BRANCH when - # the build is triggered by a push to a tag. Let's see if - # $TRAVIS_BRANCH is exactly at a tag, and if so, if it is - # different from $TRAVIS_BRANCH. That way, we can tell if - # we are building the tip of a branch that is tagged and - # we can skip the build because we won't be skipping a build - # of a tag. - - if TAG=$(git describe --exact-match "$TRAVIS_BRANCH" 2>/dev/null) && - test "$TAG" != "$TRAVIS_BRANCH" - then - echo "$(tput setaf 2)Tip of $TRAVIS_BRANCH is exactly at $TAG$(tput sgr0)" - exit 0 - fi -} - -# Save some info about the current commit's tree, so we can skip the build -# job if we encounter the same tree again and can provide a useful info -# message. -save_good_tree () { - echo "$(git rev-parse $TRAVIS_COMMIT^{tree}) $TRAVIS_COMMIT $TRAVIS_JOB_NUMBER $TRAVIS_JOB_ID" >>"$good_trees_file" - # limit the file size - tail -1000 "$good_trees_file" >"$good_trees_file".tmp - mv "$good_trees_file".tmp "$good_trees_file" -} - -# Skip the build job if the same tree has already been built and tested -# successfully before (e.g. because the branch got rebased, changing only -# the commit messages). -skip_good_tree () { - if ! good_tree_info="$(grep "^$(git rev-parse $TRAVIS_COMMIT^{tree}) " "$good_trees_file")" - then - # Haven't seen this tree yet, or no cached good trees file yet. - # Continue the build job. - return - fi - - echo "$good_tree_info" | { - read tree prev_good_commit prev_good_job_number prev_good_job_id - - if test "$TRAVIS_JOB_ID" = "$prev_good_job_id" - then - cat <<-EOF - $(tput setaf 2)Skipping build job for commit $TRAVIS_COMMIT.$(tput sgr0) - This commit has already been built and tested successfully by this build job. - To force a re-build delete the branch's cache and then hit 'Restart job'. - EOF - else - cat <<-EOF - $(tput setaf 2)Skipping build job for commit $TRAVIS_COMMIT.$(tput sgr0) - This commit's tree has already been built and tested successfully in build job $prev_good_job_number for commit $prev_good_commit. - The log of that build job is available at https://travis-ci.org/$TRAVIS_REPO_SLUG/jobs/$prev_good_job_id - To force a re-build delete the branch's cache and then hit 'Restart job'. - EOF - fi - } - - exit 0 -} - -check_unignored_build_artifacts () -{ - ! git ls-files --other --exclude-standard --error-unmatch \ - -- ':/*' 2>/dev/null || - { - echo "$(tput setaf 1)error: found unignored build artifacts$(tput sgr0)" - false - } -} - -# Set 'exit on error' for all CI scripts to let the caller know that -# something went wrong. -# Set tracing executed commands, primarily setting environment variables -# and installing dependencies. -set -ex - -cache_dir="$HOME/travis-cache" -good_trees_file="$cache_dir/good-trees" - -mkdir -p "$cache_dir" - -skip_branch_tip_with_tag -skip_good_tree - -if test -z "$jobname" -then - jobname="$TRAVIS_OS_NAME-$CC" -fi - -export DEVELOPER=1 -export DEFAULT_TEST_TARGET=prove -export GIT_PROVE_OPTS="--timer --jobs 3 --state=failed,slow,save" -export GIT_TEST_OPTS="--verbose-log -x --immediate" -export GIT_TEST_CLONE_2GB=YesPlease -if [ "$jobname" = linux-gcc ]; then - export CC=gcc-8 -fi - -case "$jobname" in -linux-clang|linux-gcc) - export GIT_TEST_HTTPD=YesPlease - - # The Linux build installs the defined dependency versions below. - # The OS X build installs the latest available versions. Keep that - # in mind when you encounter a broken OS X build! - export LINUX_P4_VERSION="16.2" - export LINUX_GIT_LFS_VERSION="1.5.2" - - P4_PATH="$HOME/custom/p4" - GIT_LFS_PATH="$HOME/custom/git-lfs" - export PATH="$GIT_LFS_PATH:$P4_PATH:$PATH" - ;; -osx-clang|osx-gcc) - # t9810 occasionally fails on Travis CI OS X - # t9816 occasionally fails with "TAP out of sequence errors" on - # Travis CI OS X - export GIT_SKIP_TESTS="t9810 t9816" - ;; -GIT_TEST_GETTEXT_POISON) - export GIT_TEST_GETTEXT_POISON=YesPlease - ;; -esac diff --git a/ci/lib.sh b/ci/lib.sh new file mode 100755 index 0000000000..16f4ecbc67 --- /dev/null +++ b/ci/lib.sh @@ -0,0 +1,188 @@ +# Library of functions shared by all CI scripts + +skip_branch_tip_with_tag () { + # Sometimes, a branch is pushed at the same time the tag that points + # at the same commit as the tip of the branch is pushed, and building + # both at the same time is a waste. + # + # When the build is triggered by a push to a tag, $CI_BRANCH will + # have that tagname, e.g. v2.14.0. Let's see if $CI_BRANCH is + # exactly at a tag, and if so, if it is different from $CI_BRANCH. + # That way, we can tell if we are building the tip of a branch that + # is tagged and we can skip the build because we won't be skipping a + # build of a tag. + + if TAG=$(git describe --exact-match "$CI_BRANCH" 2>/dev/null) && + test "$TAG" != "$CI_BRANCH" + then + echo "$(tput setaf 2)Tip of $CI_BRANCH is exactly at $TAG$(tput sgr0)" + exit 0 + fi +} + +# Save some info about the current commit's tree, so we can skip the build +# job if we encounter the same tree again and can provide a useful info +# message. +save_good_tree () { + echo "$(git rev-parse $CI_COMMIT^{tree}) $CI_COMMIT $CI_JOB_NUMBER $CI_JOB_ID" >>"$good_trees_file" + # limit the file size + tail -1000 "$good_trees_file" >"$good_trees_file".tmp + mv "$good_trees_file".tmp "$good_trees_file" +} + +# Skip the build job if the same tree has already been built and tested +# successfully before (e.g. because the branch got rebased, changing only +# the commit messages). +skip_good_tree () { + if ! good_tree_info="$(grep "^$(git rev-parse $CI_COMMIT^{tree}) " "$good_trees_file")" + then + # Haven't seen this tree yet, or no cached good trees file yet. + # Continue the build job. + return + fi + + echo "$good_tree_info" | { + read tree prev_good_commit prev_good_job_number prev_good_job_id + + if test "$CI_JOB_ID" = "$prev_good_job_id" + then + cat <<-EOF + $(tput setaf 2)Skipping build job for commit $CI_COMMIT.$(tput sgr0) + This commit has already been built and tested successfully by this build job. + To force a re-build delete the branch's cache and then hit 'Restart job'. + EOF + else + cat <<-EOF + $(tput setaf 2)Skipping build job for commit $CI_COMMIT.$(tput sgr0) + This commit's tree has already been built and tested successfully in build job $prev_good_job_number for commit $prev_good_commit. + The log of that build job is available at $(url_for_job_id $prev_good_job_id) + To force a re-build delete the branch's cache and then hit 'Restart job'. + EOF + fi + } + + exit 0 +} + +check_unignored_build_artifacts () +{ + ! git ls-files --other --exclude-standard --error-unmatch \ + -- ':/*' 2>/dev/null || + { + echo "$(tput setaf 1)error: found unignored build artifacts$(tput sgr0)" + false + } +} + +# Set 'exit on error' for all CI scripts to let the caller know that +# something went wrong. +# Set tracing executed commands, primarily setting environment variables +# and installing dependencies. +set -ex + +if test true = "$TRAVIS" +then + CI_TYPE=travis + # When building a PR, TRAVIS_BRANCH refers to the *target* branch. Not + # what we want here. We want the source branch instead. + CI_BRANCH="${TRAVIS_PULL_REQUEST_BRANCH:-$TRAVIS_BRANCH}" + CI_COMMIT="$TRAVIS_COMMIT" + CI_JOB_ID="$TRAVIS_JOB_ID" + CI_JOB_NUMBER="$TRAVIS_JOB_NUMBER" + CI_OS_NAME="$TRAVIS_OS_NAME" + CI_REPO_SLUG="$TRAVIS_REPO_SLUG" + + cache_dir="$HOME/travis-cache" + + url_for_job_id () { + echo "https://travis-ci.org/$CI_REPO_SLUG/jobs/$1" + } + + BREW_INSTALL_PACKAGES="git-lfs gettext" + export GIT_PROVE_OPTS="--timer --jobs 3 --state=failed,slow,save" + export GIT_TEST_OPTS="--verbose-log -x --immediate" + export MAKEFLAGS="--jobs=2" +elif test -n "$SYSTEM_COLLECTIONURI" || test -n "$SYSTEM_TASKDEFINITIONSURI" +then + CI_TYPE=azure-pipelines + # We are running in Azure Pipelines + CI_BRANCH="$BUILD_SOURCEBRANCH" + CI_COMMIT="$BUILD_SOURCEVERSION" + CI_JOB_ID="$BUILD_BUILDID" + CI_JOB_NUMBER="$BUILD_BUILDNUMBER" + CI_OS_NAME="$(echo "$AGENT_OS" | tr A-Z a-z)" + test darwin != "$CI_OS_NAME" || CI_OS_NAME=osx + CI_REPO_SLUG="$(expr "$BUILD_REPOSITORY_URI" : '.*/\([^/]*/[^/]*\)$')" + CC="${CC:-gcc}" + + # use a subdirectory of the cache dir (because the file share is shared + # among *all* phases) + cache_dir="$HOME/test-cache/$SYSTEM_PHASENAME" + + url_for_job_id () { + echo "$SYSTEM_TASKDEFINITIONSURI$SYSTEM_TEAMPROJECT/_build/results?buildId=$1" + } + + BREW_INSTALL_PACKAGES=gcc@8 + export GIT_PROVE_OPTS="--timer --jobs 10 --state=failed,slow,save" + export GIT_TEST_OPTS="--verbose-log -x --write-junit-xml" + export MAKEFLAGS="--jobs=10" + test windows_nt != "$CI_OS_NAME" || + GIT_TEST_OPTS="--no-chain-lint --no-bin-wrappers $GIT_TEST_OPTS" +else + echo "Could not identify CI type" >&2 + exit 1 +fi + +good_trees_file="$cache_dir/good-trees" + +mkdir -p "$cache_dir" + +skip_branch_tip_with_tag +skip_good_tree + +if test -z "$jobname" +then + jobname="$CI_OS_NAME-$CC" +fi + +export DEVELOPER=1 +export DEFAULT_TEST_TARGET=prove +export GIT_TEST_CLONE_2GB=YesPlease + +case "$jobname" in +linux-clang|linux-gcc) + if [ "$jobname" = linux-gcc ] + then + export CC=gcc-8 + fi + + export GIT_TEST_HTTPD=YesPlease + + # The Linux build installs the defined dependency versions below. + # The OS X build installs the latest available versions. Keep that + # in mind when you encounter a broken OS X build! + export LINUX_P4_VERSION="16.2" + export LINUX_GIT_LFS_VERSION="1.5.2" + + P4_PATH="$HOME/custom/p4" + GIT_LFS_PATH="$HOME/custom/git-lfs" + export PATH="$GIT_LFS_PATH:$P4_PATH:$PATH" + ;; +osx-clang|osx-gcc) + if [ "$jobname" = osx-gcc ] + then + export CC=gcc-8 + fi + + # t9810 occasionally fails on Travis CI OS X + # t9816 occasionally fails with "TAP out of sequence errors" on + # Travis CI OS X + export GIT_SKIP_TESTS="t9810 t9816" + ;; +GIT_TEST_GETTEXT_POISON) + export GIT_TEST_GETTEXT_POISON=YesPlease + ;; +esac + +export MAKEFLAGS="CC=${CC:-cc}" diff --git a/ci/make-test-artifacts.sh b/ci/make-test-artifacts.sh new file mode 100755 index 0000000000..646967481f --- /dev/null +++ b/ci/make-test-artifacts.sh @@ -0,0 +1,12 @@ +#!/bin/sh +# +# Build Git and store artifacts for testing +# + +mkdir -p "$1" # in case ci/lib.sh decides to quit early + +. ${0%/*}/lib.sh + +make artifacts-tar ARTIFACTS_DIRECTORY="$1" + +check_unignored_build_artifacts diff --git a/ci/mount-fileshare.sh b/ci/mount-fileshare.sh new file mode 100755 index 0000000000..26b58a8096 --- /dev/null +++ b/ci/mount-fileshare.sh @@ -0,0 +1,25 @@ +#!/bin/sh + +die () { + echo "$*" >&2 + exit 1 +} + +test $# = 4 || +die "Usage: $0 <share> <username> <password> <mountpoint>" + +mkdir -p "$4" || die "Could not create $4" + +case "$(uname -s)" in +Linux) + sudo mount -t cifs -o vers=3.0,username="$2",password="$3",dir_mode=0777,file_mode=0777,serverino "$1" "$4" + ;; +Darwin) + pass="$(echo "$3" | sed -e 's/\//%2F/g' -e 's/+/%2B/g')" && + mount -t smbfs,soft "smb://$2:$pass@${1#//}" "$4" + ;; +*) + die "No support for $(uname -s)" + ;; +esac || +die "Could not mount $4" diff --git a/ci/print-test-failures.sh b/ci/print-test-failures.sh index d55460a212..e688a26f0d 100755 --- a/ci/print-test-failures.sh +++ b/ci/print-test-failures.sh @@ -3,7 +3,7 @@ # Print output of failing tests # -. ${0%/*}/lib-travisci.sh +. ${0%/*}/lib.sh # Tracing executed commands would produce too much noise in the loop below. set +x @@ -38,6 +38,19 @@ do test_name="${TEST_EXIT%.exit}" test_name="${test_name##*/}" trash_dir="trash directory.$test_name" + case "$CI_TYPE" in + travis) + ;; + azure-pipelines) + mkdir -p failed-test-artifacts + mv "$trash_dir" failed-test-artifacts + continue + ;; + *) + echo "Unhandled CI type: $CI_TYPE" >&2 + exit 1 + ;; + esac trash_tgz_b64="trash.$test_name.base64" if [ -d "$trash_dir" ] then diff --git a/ci/run-build-and-tests.sh b/ci/run-build-and-tests.sh index cda170d5c2..cdd2913440 100755 --- a/ci/run-build-and-tests.sh +++ b/ci/run-build-and-tests.sh @@ -3,12 +3,15 @@ # Build and test Git # -. ${0%/*}/lib-travisci.sh +. ${0%/*}/lib.sh -ln -s "$cache_dir/.prove" t/.prove +case "$CI_OS_NAME" in +windows*) cmd //c mklink //j t\\.prove "$(cygpath -aw "$cache_dir/.prove")";; +*) ln -s "$cache_dir/.prove" t/.prove;; +esac -make --jobs=2 -make --quiet test +make +make test if test "$jobname" = "linux-gcc" then export GIT_TEST_SPLIT_INDEX=yes @@ -17,7 +20,7 @@ then export GIT_TEST_OE_DELTA_SIZE=5 export GIT_TEST_COMMIT_GRAPH=1 export GIT_TEST_MULTI_PACK_INDEX=1 - make --quiet test + make test fi check_unignored_build_artifacts diff --git a/ci/run-linux32-build.sh b/ci/run-linux32-build.sh index 2c60d2e70a..e3a193adbc 100755 --- a/ci/run-linux32-build.sh +++ b/ci/run-linux32-build.sh @@ -55,6 +55,6 @@ linux32 --32bit i386 su -m -l $CI_USER -c ' set -ex cd /usr/src/git test -n "$cache_dir" && ln -s "$cache_dir/.prove" t/.prove - make --jobs=2 - make --quiet test + make + make test ' diff --git a/ci/run-linux32-docker.sh b/ci/run-linux32-docker.sh index 21637903ce..751acfcf8a 100755 --- a/ci/run-linux32-docker.sh +++ b/ci/run-linux32-docker.sh @@ -3,7 +3,7 @@ # Download and run Docker image to build and test 32-bit Git # -. ${0%/*}/lib-travisci.sh +. ${0%/*}/lib.sh docker pull daald/ubuntu32:xenial diff --git a/ci/run-static-analysis.sh b/ci/run-static-analysis.sh index 5688f261d0..a19aa7ebbc 100755 --- a/ci/run-static-analysis.sh +++ b/ci/run-static-analysis.sh @@ -3,9 +3,9 @@ # Perform various static code analysis checks # -. ${0%/*}/lib-travisci.sh +. ${0%/*}/lib.sh -make --jobs=2 coccicheck +make coccicheck set +x diff --git a/ci/run-test-slice.sh b/ci/run-test-slice.sh new file mode 100755 index 0000000000..f8c2c3106a --- /dev/null +++ b/ci/run-test-slice.sh @@ -0,0 +1,17 @@ +#!/bin/sh +# +# Test Git in parallel +# + +. ${0%/*}/lib.sh + +case "$CI_OS_NAME" in +windows*) cmd //c mklink //j t\\.prove "$(cygpath -aw "$cache_dir/.prove")";; +*) ln -s "$cache_dir/.prove" t/.prove;; +esac + +make --quiet -C t T="$(cd t && + ./helper/test-tool path-utils slice-tests "$1" "$2" t[0-9]*.sh | + tr '\n' ' ')" + +check_unignored_build_artifacts diff --git a/ci/run-windows-build.sh b/ci/run-windows-build.sh index d99a180e52..a73a4eca0a 100755 --- a/ci/run-windows-build.sh +++ b/ci/run-windows-build.sh @@ -6,7 +6,7 @@ # supported) and a commit hash. # -. ${0%/*}/lib-travisci.sh +. ${0%/*}/lib.sh test $# -ne 2 && echo "Unexpected number of parameters" && exit 1 test -z "$GFW_CI_TOKEN" && echo "GFW_CI_TOKEN not defined" && exit diff --git a/ci/test-documentation.sh b/ci/test-documentation.sh index a20de9ca12..be3b7d376a 100755 --- a/ci/test-documentation.sh +++ b/ci/test-documentation.sh @@ -3,15 +3,16 @@ # Perform sanity checks on documentation and build it. # -. ${0%/*}/lib-travisci.sh +. ${0%/*}/lib.sh +test -n "$ALREADY_HAVE_ASCIIDOCTOR" || gem install asciidoctor make check-builtins make check-docs # Build docs with AsciiDoc -make --jobs=2 doc > >(tee stdout.log) 2> >(tee stderr.log >&2) +make doc > >(tee stdout.log) 2> >(tee stderr.log >&2) ! test -s stderr.log test -s Documentation/git.html test -s Documentation/git.xml @@ -23,7 +24,7 @@ check_unignored_build_artifacts # Build docs with AsciiDoctor make clean -make --jobs=2 USE_ASCIIDOCTOR=1 doc > >(tee stdout.log) 2> >(tee stderr.log >&2) +make USE_ASCIIDOCTOR=1 doc > >(tee stdout.log) 2> >(tee stderr.log >&2) sed '/^GIT_VERSION = / d' stderr.log ! test -s stderr.log test -s Documentation/git.html @@ -21,7 +21,7 @@ struct column_data { }; /* return length of 's' in letters, ANSI escapes stripped */ -static int item_length(unsigned int colopts, const char *s) +static int item_length(const char *s) { int len, i = 0; struct strbuf str = STRBUF_INIT; @@ -167,7 +167,7 @@ static void display_table(const struct string_list *list, ALLOC_ARRAY(data.len, list->nr); for (i = 0; i < list->nr; i++) - data.len[i] = item_length(colopts, list->items[i].string); + data.len[i] = item_length(list->items[i].string); layout(&data, &initial_width); diff --git a/combine-diff.c b/combine-diff.c index a143c00634..23d8fabe75 100644 --- a/combine-diff.c +++ b/combine-diff.c @@ -1321,6 +1321,14 @@ static const char *path_path(void *obj) return path->path; } +/* + * Diff stat formats which we always compute solely against the first parent. + */ +#define STAT_FORMAT_MASK (DIFF_FORMAT_NUMSTAT \ + | DIFF_FORMAT_SHORTSTAT \ + | DIFF_FORMAT_SUMMARY \ + | DIFF_FORMAT_DIRSTAT \ + | DIFF_FORMAT_DIFFSTAT) /* find set of paths that every parent touches */ static struct combine_diff_path *find_paths_generic(const struct object_id *oid, @@ -1342,8 +1350,7 @@ static struct combine_diff_path *find_paths_generic(const struct object_id *oid, * show stat against the first parent even when doing * combined diff. */ - int stat_opt = (output_format & - (DIFF_FORMAT_NUMSTAT|DIFF_FORMAT_DIFFSTAT)); + int stat_opt = output_format & STAT_FORMAT_MASK; if (i == 0 && stat_opt) opt->output_format = stat_opt; else @@ -1470,8 +1477,7 @@ void diff_tree_combined(const struct object_id *oid, * show stat against the first parent even * when doing combined diff. */ - stat_opt = (opt->output_format & - (DIFF_FORMAT_NUMSTAT|DIFF_FORMAT_DIFFSTAT)); + stat_opt = opt->output_format & STAT_FORMAT_MASK; if (stat_opt) { diffopts.output_format = stat_opt; @@ -1515,8 +1521,7 @@ void diff_tree_combined(const struct object_id *oid, show_raw_diff(p, num_parent, rev); needsep = 1; } - else if (opt->output_format & - (DIFF_FORMAT_NUMSTAT|DIFF_FORMAT_DIFFSTAT)) + else if (opt->output_format & STAT_FORMAT_MASK) needsep = 1; else if (opt->output_format & DIFF_FORMAT_CALLBACK) handle_combined_callback(opt, paths, num_parent, num_paths); diff --git a/commit-graph.c b/commit-graph.c index 3f9c03afd5..47e9be0a3a 100644 --- a/commit-graph.c +++ b/commit-graph.c @@ -21,14 +21,14 @@ #define GRAPH_CHUNKID_OIDFANOUT 0x4f494446 /* "OIDF" */ #define GRAPH_CHUNKID_OIDLOOKUP 0x4f49444c /* "OIDL" */ #define GRAPH_CHUNKID_DATA 0x43444154 /* "CDAT" */ -#define GRAPH_CHUNKID_LARGEEDGES 0x45444745 /* "EDGE" */ +#define GRAPH_CHUNKID_EXTRAEDGES 0x45444745 /* "EDGE" */ #define GRAPH_DATA_WIDTH (the_hash_algo->rawsz + 16) #define GRAPH_VERSION_1 0x1 #define GRAPH_VERSION GRAPH_VERSION_1 -#define GRAPH_OCTOPUS_EDGES_NEEDED 0x80000000 +#define GRAPH_EXTRA_EDGES_NEEDED 0x80000000 #define GRAPH_EDGE_LAST_MASK 0x7fffffff #define GRAPH_PARENT_NONE 0x70000000 @@ -83,16 +83,10 @@ static int commit_graph_compatible(struct repository *r) struct commit_graph *load_commit_graph_one(const char *graph_file) { void *graph_map; - const unsigned char *data, *chunk_lookup; size_t graph_size; struct stat st; - uint32_t i; - struct commit_graph *graph; + struct commit_graph *ret; int fd = git_open(graph_file); - uint64_t last_chunk_offset; - uint32_t last_chunk_id; - uint32_t graph_signature; - unsigned char graph_version, hash_version; if (fd < 0) return NULL; @@ -107,27 +101,55 @@ struct commit_graph *load_commit_graph_one(const char *graph_file) die(_("graph file %s is too small"), graph_file); } graph_map = xmmap(NULL, graph_size, PROT_READ, MAP_PRIVATE, fd, 0); + ret = parse_commit_graph(graph_map, fd, graph_size); + + if (!ret) { + munmap(graph_map, graph_size); + close(fd); + exit(1); + } + + return ret; +} + +struct commit_graph *parse_commit_graph(void *graph_map, int fd, + size_t graph_size) +{ + const unsigned char *data, *chunk_lookup; + uint32_t i; + struct commit_graph *graph; + uint64_t last_chunk_offset; + uint32_t last_chunk_id; + uint32_t graph_signature; + unsigned char graph_version, hash_version; + + if (!graph_map) + return NULL; + + if (graph_size < GRAPH_MIN_SIZE) + return NULL; + data = (const unsigned char *)graph_map; graph_signature = get_be32(data); if (graph_signature != GRAPH_SIGNATURE) { error(_("graph signature %X does not match signature %X"), graph_signature, GRAPH_SIGNATURE); - goto cleanup_fail; + return NULL; } graph_version = *(unsigned char*)(data + 4); if (graph_version != GRAPH_VERSION) { error(_("graph version %X does not match version %X"), graph_version, GRAPH_VERSION); - goto cleanup_fail; + return NULL; } hash_version = *(unsigned char*)(data + 5); if (hash_version != oid_version()) { error(_("hash version %X does not match version %X"), hash_version, oid_version()); - goto cleanup_fail; + return NULL; } graph = alloc_commit_graph(); @@ -142,16 +164,27 @@ struct commit_graph *load_commit_graph_one(const char *graph_file) last_chunk_offset = 8; chunk_lookup = data + 8; for (i = 0; i < graph->num_chunks; i++) { - uint32_t chunk_id = get_be32(chunk_lookup + 0); - uint64_t chunk_offset = get_be64(chunk_lookup + 4); + uint32_t chunk_id; + uint64_t chunk_offset; int chunk_repeated = 0; + if (data + graph_size - chunk_lookup < + GRAPH_CHUNKLOOKUP_WIDTH) { + error(_("chunk lookup table entry missing; graph file may be incomplete")); + free(graph); + return NULL; + } + + chunk_id = get_be32(chunk_lookup + 0); + chunk_offset = get_be64(chunk_lookup + 4); + chunk_lookup += GRAPH_CHUNKLOOKUP_WIDTH; if (chunk_offset > graph_size - the_hash_algo->rawsz) { error(_("improper chunk offset %08x%08x"), (uint32_t)(chunk_offset >> 32), (uint32_t)chunk_offset); - goto cleanup_fail; + free(graph); + return NULL; } switch (chunk_id) { @@ -176,17 +209,18 @@ struct commit_graph *load_commit_graph_one(const char *graph_file) graph->chunk_commit_data = data + chunk_offset; break; - case GRAPH_CHUNKID_LARGEEDGES: - if (graph->chunk_large_edges) + case GRAPH_CHUNKID_EXTRAEDGES: + if (graph->chunk_extra_edges) chunk_repeated = 1; else - graph->chunk_large_edges = data + chunk_offset; + graph->chunk_extra_edges = data + chunk_offset; break; } if (chunk_repeated) { error(_("chunk id %08x appears multiple times"), chunk_id); - goto cleanup_fail; + free(graph); + return NULL; } if (last_chunk_id == GRAPH_CHUNKID_OIDLOOKUP) @@ -200,11 +234,6 @@ struct commit_graph *load_commit_graph_one(const char *graph_file) } return graph; - -cleanup_fail: - munmap(graph_map, graph_size); - close(fd); - exit(1); } static void prepare_commit_graph_one(struct repository *r, const char *obj_dir) @@ -288,7 +317,8 @@ static int bsearch_graph(struct commit_graph *g, struct object_id *oid, uint32_t g->chunk_oid_lookup, g->hash_len, pos); } -static struct commit_list **insert_parent_or_die(struct commit_graph *g, +static struct commit_list **insert_parent_or_die(struct repository *r, + struct commit_graph *g, uint64_t pos, struct commit_list **pptr) { @@ -299,7 +329,7 @@ static struct commit_list **insert_parent_or_die(struct commit_graph *g, die("invalid parent position %"PRIu64, pos); hashcpy(oid.hash, g->chunk_oid_lookup + g->hash_len * pos); - c = lookup_commit(the_repository, &oid); + c = lookup_commit(r, &oid); if (!c) die(_("could not find commit %s"), oid_to_hex(&oid)); c->graph_pos = pos; @@ -313,7 +343,9 @@ static void fill_commit_graph_info(struct commit *item, struct commit_graph *g, item->generation = get_be32(commit_data + g->hash_len + 8) >> 2; } -static int fill_commit_in_graph(struct commit *item, struct commit_graph *g, uint32_t pos) +static int fill_commit_in_graph(struct repository *r, + struct commit *item, + struct commit_graph *g, uint32_t pos) { uint32_t edge_value; uint32_t *parent_data_ptr; @@ -337,21 +369,21 @@ static int fill_commit_in_graph(struct commit *item, struct commit_graph *g, uin edge_value = get_be32(commit_data + g->hash_len); if (edge_value == GRAPH_PARENT_NONE) return 1; - pptr = insert_parent_or_die(g, edge_value, pptr); + pptr = insert_parent_or_die(r, g, edge_value, pptr); edge_value = get_be32(commit_data + g->hash_len + 4); if (edge_value == GRAPH_PARENT_NONE) return 1; - if (!(edge_value & GRAPH_OCTOPUS_EDGES_NEEDED)) { - pptr = insert_parent_or_die(g, edge_value, pptr); + if (!(edge_value & GRAPH_EXTRA_EDGES_NEEDED)) { + pptr = insert_parent_or_die(r, g, edge_value, pptr); return 1; } - parent_data_ptr = (uint32_t*)(g->chunk_large_edges + + parent_data_ptr = (uint32_t*)(g->chunk_extra_edges + 4 * (uint64_t)(edge_value & GRAPH_EDGE_LAST_MASK)); do { edge_value = get_be32(parent_data_ptr); - pptr = insert_parent_or_die(g, + pptr = insert_parent_or_die(r, g, edge_value & GRAPH_EDGE_LAST_MASK, pptr); parent_data_ptr++; @@ -370,7 +402,9 @@ static int find_commit_in_graph(struct commit *item, struct commit_graph *g, uin } } -static int parse_commit_in_graph_one(struct commit_graph *g, struct commit *item) +static int parse_commit_in_graph_one(struct repository *r, + struct commit_graph *g, + struct commit *item) { uint32_t pos; @@ -378,7 +412,7 @@ static int parse_commit_in_graph_one(struct commit_graph *g, struct commit *item return 1; if (find_commit_in_graph(item, g, &pos)) - return fill_commit_in_graph(item, g, pos); + return fill_commit_in_graph(r, item, g, pos); return 0; } @@ -387,7 +421,7 @@ int parse_commit_in_graph(struct repository *r, struct commit *item) { if (!prepare_commit_graph(r)) return 0; - return parse_commit_in_graph_one(r->objects->commit_graph, item); + return parse_commit_in_graph_one(r, r->objects->commit_graph, item); } void load_commit_graph_info(struct repository *r, struct commit *item) @@ -399,19 +433,22 @@ void load_commit_graph_info(struct repository *r, struct commit *item) fill_commit_graph_info(item, r->objects->commit_graph, pos); } -static struct tree *load_tree_for_commit(struct commit_graph *g, struct commit *c) +static struct tree *load_tree_for_commit(struct repository *r, + struct commit_graph *g, + struct commit *c) { struct object_id oid; const unsigned char *commit_data = g->chunk_commit_data + GRAPH_DATA_WIDTH * (c->graph_pos); hashcpy(oid.hash, commit_data); - c->maybe_tree = lookup_tree(the_repository, &oid); + c->maybe_tree = lookup_tree(r, &oid); return c->maybe_tree; } -static struct tree *get_commit_tree_in_graph_one(struct commit_graph *g, +static struct tree *get_commit_tree_in_graph_one(struct repository *r, + struct commit_graph *g, const struct commit *c) { if (c->maybe_tree) @@ -419,17 +456,19 @@ static struct tree *get_commit_tree_in_graph_one(struct commit_graph *g, if (c->graph_pos == COMMIT_NOT_FROM_GRAPH) BUG("get_commit_tree_in_graph_one called from non-commit-graph commit"); - return load_tree_for_commit(g, (struct commit *)c); + return load_tree_for_commit(r, g, (struct commit *)c); } struct tree *get_commit_tree_in_graph(struct repository *r, const struct commit *c) { - return get_commit_tree_in_graph_one(r->objects->commit_graph, c); + return get_commit_tree_in_graph_one(r, r->objects->commit_graph, c); } static void write_graph_chunk_fanout(struct hashfile *f, struct commit **commits, - int nr_commits) + int nr_commits, + struct progress *progress, + uint64_t *progress_cnt) { int i, count = 0; struct commit **list = commits; @@ -443,6 +482,7 @@ static void write_graph_chunk_fanout(struct hashfile *f, while (count < nr_commits) { if ((*list)->object.oid.hash[0] != i) break; + display_progress(progress, ++*progress_cnt); count++; list++; } @@ -452,12 +492,16 @@ static void write_graph_chunk_fanout(struct hashfile *f, } static void write_graph_chunk_oids(struct hashfile *f, int hash_len, - struct commit **commits, int nr_commits) + struct commit **commits, int nr_commits, + struct progress *progress, + uint64_t *progress_cnt) { struct commit **list = commits; int count; - for (count = 0; count < nr_commits; count++, list++) + for (count = 0; count < nr_commits; count++, list++) { + display_progress(progress, ++*progress_cnt); hashwrite(f, (*list)->object.oid.hash, (int)hash_len); + } } static const unsigned char *commit_to_sha1(size_t index, void *table) @@ -467,7 +511,9 @@ static const unsigned char *commit_to_sha1(size_t index, void *table) } static void write_graph_chunk_data(struct hashfile *f, int hash_len, - struct commit **commits, int nr_commits) + struct commit **commits, int nr_commits, + struct progress *progress, + uint64_t *progress_cnt) { struct commit **list = commits; struct commit **last = commits + nr_commits; @@ -477,6 +523,7 @@ static void write_graph_chunk_data(struct hashfile *f, int hash_len, struct commit_list *parent; int edge_value; uint32_t packedDate[2]; + display_progress(progress, ++*progress_cnt); parse_commit(*list); hashwrite(f, get_commit_tree_oid(*list)->hash, hash_len); @@ -505,7 +552,7 @@ static void write_graph_chunk_data(struct hashfile *f, int hash_len, if (!parent) edge_value = GRAPH_PARENT_NONE; else if (parent->next) - edge_value = GRAPH_OCTOPUS_EDGES_NEEDED | num_extra_edges; + edge_value = GRAPH_EXTRA_EDGES_NEEDED | num_extra_edges; else { edge_value = sha1_pos(parent->item->object.oid.hash, commits, @@ -519,7 +566,7 @@ static void write_graph_chunk_data(struct hashfile *f, int hash_len, hashwrite_be32(f, edge_value); - if (edge_value & GRAPH_OCTOPUS_EDGES_NEEDED) { + if (edge_value & GRAPH_EXTRA_EDGES_NEEDED) { do { num_extra_edges++; parent = parent->next; @@ -540,9 +587,11 @@ static void write_graph_chunk_data(struct hashfile *f, int hash_len, } } -static void write_graph_chunk_large_edges(struct hashfile *f, +static void write_graph_chunk_extra_edges(struct hashfile *f, struct commit **commits, - int nr_commits) + int nr_commits, + struct progress *progress, + uint64_t *progress_cnt) { struct commit **list = commits; struct commit **last = commits + nr_commits; @@ -550,6 +599,9 @@ static void write_graph_chunk_large_edges(struct hashfile *f, while (list < last) { int num_parents = 0; + + display_progress(progress, ++*progress_cnt); + for (parent = (*list)->parents; num_parents < 3 && parent; parent = parent->next) num_parents++; @@ -643,15 +695,15 @@ static void add_missing_parents(struct packed_oid_list *oids, struct commit *com static void close_reachable(struct packed_oid_list *oids, int report_progress) { - int i, j; + int i; struct commit *commit; struct progress *progress = NULL; if (report_progress) progress = start_delayed_progress( - _("Loading known commits in commit graph"), j = 0); + _("Loading known commits in commit graph"), oids->nr); for (i = 0; i < oids->nr; i++) { - display_progress(progress, ++j); + display_progress(progress, i + 1); commit = lookup_commit(the_repository, &oids->list[i]); if (commit) commit->object.flags |= UNINTERESTING; @@ -665,9 +717,9 @@ static void close_reachable(struct packed_oid_list *oids, int report_progress) */ if (report_progress) progress = start_delayed_progress( - _("Expanding reachable commits in commit graph"), j = 0); + _("Expanding reachable commits in commit graph"), oids->nr); for (i = 0; i < oids->nr; i++) { - display_progress(progress, ++j); + display_progress(progress, i + 1); commit = lookup_commit(the_repository, &oids->list[i]); if (commit && !parse_commit(commit)) @@ -677,9 +729,9 @@ static void close_reachable(struct packed_oid_list *oids, int report_progress) if (report_progress) progress = start_delayed_progress( - _("Clearing commit marks in commit graph"), j = 0); + _("Clearing commit marks in commit graph"), oids->nr); for (i = 0; i < oids->nr; i++) { - display_progress(progress, ++j); + display_progress(progress, i + 1); commit = lookup_commit(the_repository, &oids->list[i]); if (commit) @@ -774,12 +826,16 @@ void write_commit_graph(const char *obj_dir, struct commit_list *parent; struct progress *progress = NULL; const unsigned hashsz = the_hash_algo->rawsz; + uint64_t progress_cnt = 0; + struct strbuf progress_title = STRBUF_INIT; + unsigned long approx_nr_objects; if (!commit_graph_compatible(the_repository)) return; oids.nr = 0; - oids.alloc = approximate_object_count() / 32; + approx_nr_objects = approximate_object_count(); + oids.alloc = approx_nr_objects / 32; oids.progress = NULL; oids.progress_done = 0; @@ -809,8 +865,12 @@ void write_commit_graph(const char *obj_dir, strbuf_addf(&packname, "%s/pack/", obj_dir); dirlen = packname.len; if (report_progress) { - oids.progress = start_delayed_progress( - _("Finding commits for commit graph"), 0); + strbuf_addf(&progress_title, + Q_("Finding commits for commit graph in %d pack", + "Finding commits for commit graph in %d packs", + pack_indexes->nr), + pack_indexes->nr); + oids.progress = start_delayed_progress(progress_title.buf, 0); oids.progress_done = 0; } for (i = 0; i < pack_indexes->nr; i++) { @@ -822,19 +882,26 @@ void write_commit_graph(const char *obj_dir, die(_("error adding pack %s"), packname.buf); if (open_pack_index(p)) die(_("error opening index for %s"), packname.buf); - for_each_object_in_pack(p, add_packed_commits, &oids, 0); + for_each_object_in_pack(p, add_packed_commits, &oids, + FOR_EACH_OBJECT_PACK_ORDER); close_pack(p); free(p); } stop_progress(&oids.progress); + strbuf_reset(&progress_title); strbuf_release(&packname); } if (commit_hex) { - if (report_progress) - progress = start_delayed_progress( - _("Finding commits for commit graph"), - commit_hex->nr); + if (report_progress) { + strbuf_addf(&progress_title, + Q_("Finding commits for commit graph from %d ref", + "Finding commits for commit graph from %d refs", + commit_hex->nr), + commit_hex->nr); + progress = start_delayed_progress(progress_title.buf, + commit_hex->nr); + } for (i = 0; i < commit_hex->nr; i++) { const char *end; struct object_id oid; @@ -854,25 +921,36 @@ void write_commit_graph(const char *obj_dir, } } stop_progress(&progress); + strbuf_reset(&progress_title); } if (!pack_indexes && !commit_hex) { if (report_progress) oids.progress = start_delayed_progress( - _("Finding commits for commit graph"), 0); - for_each_packed_object(add_packed_commits, &oids, 0); + _("Finding commits for commit graph among packed objects"), + approx_nr_objects); + for_each_packed_object(add_packed_commits, &oids, + FOR_EACH_OBJECT_PACK_ORDER); + if (oids.progress_done < approx_nr_objects) + display_progress(oids.progress, approx_nr_objects); stop_progress(&oids.progress); } close_reachable(&oids, report_progress); + if (report_progress) + progress = start_delayed_progress( + _("Counting distinct commits in commit graph"), + oids.nr); + display_progress(progress, 0); /* TODO: Measure QSORT() progress */ QSORT(oids.list, oids.nr, commit_compare); - count_distinct = 1; for (i = 1; i < oids.nr; i++) { + display_progress(progress, i + 1); if (!oideq(&oids.list[i - 1], &oids.list[i])) count_distinct++; } + stop_progress(&progress); if (count_distinct >= GRAPH_EDGE_LAST_MASK) die(_("the commit graph format cannot write %d commits"), count_distinct); @@ -882,8 +960,13 @@ void write_commit_graph(const char *obj_dir, ALLOC_ARRAY(commits.list, commits.alloc); num_extra_edges = 0; + if (report_progress) + progress = start_delayed_progress( + _("Finding extra edges in commit graph"), + oids.nr); for (i = 0; i < oids.nr; i++) { int num_parents = 0; + display_progress(progress, i + 1); if (i > 0 && oideq(&oids.list[i - 1], &oids.list[i])) continue; @@ -900,6 +983,7 @@ void write_commit_graph(const char *obj_dir, commits.nr++; } num_chunks = num_extra_edges ? 4 : 3; + stop_progress(&progress); if (commits.nr >= GRAPH_EDGE_LAST_MASK) die(_("too many commits to write graph")); @@ -927,7 +1011,7 @@ void write_commit_graph(const char *obj_dir, chunk_ids[1] = GRAPH_CHUNKID_OIDLOOKUP; chunk_ids[2] = GRAPH_CHUNKID_DATA; if (num_extra_edges) - chunk_ids[3] = GRAPH_CHUNKID_LARGEEDGES; + chunk_ids[3] = GRAPH_CHUNKID_EXTRAEDGES; else chunk_ids[3] = 0; chunk_ids[4] = 0; @@ -947,10 +1031,23 @@ void write_commit_graph(const char *obj_dir, hashwrite(f, chunk_write, 12); } - write_graph_chunk_fanout(f, commits.list, commits.nr); - write_graph_chunk_oids(f, hashsz, commits.list, commits.nr); - write_graph_chunk_data(f, hashsz, commits.list, commits.nr); - write_graph_chunk_large_edges(f, commits.list, commits.nr); + if (report_progress) { + strbuf_addf(&progress_title, + Q_("Writing out commit graph in %d pass", + "Writing out commit graph in %d passes", + num_chunks), + num_chunks); + progress = start_delayed_progress( + progress_title.buf, + num_chunks * commits.nr); + } + write_graph_chunk_fanout(f, commits.list, commits.nr, progress, &progress_cnt); + write_graph_chunk_oids(f, hashsz, commits.list, commits.nr, progress, &progress_cnt); + write_graph_chunk_data(f, hashsz, commits.list, commits.nr, progress, &progress_cnt); + if (num_extra_edges) + write_graph_chunk_extra_edges(f, commits.list, commits.nr, progress, &progress_cnt); + stop_progress(&progress); + strbuf_release(&progress_title); close_commit_graph(the_repository); finalize_hashfile(f, NULL, CSUM_HASH_IN_STREAM | CSUM_FSYNC); @@ -1035,7 +1132,7 @@ int verify_commit_graph(struct repository *r, struct commit_graph *g) } graph_commit = lookup_commit(r, &cur_oid); - if (!parse_commit_in_graph_one(g, graph_commit)) + if (!parse_commit_in_graph_one(r, g, graph_commit)) graph_report("failed to parse %s from commit-graph", oid_to_hex(&cur_oid)); } @@ -1071,7 +1168,7 @@ int verify_commit_graph(struct repository *r, struct commit_graph *g) continue; } - if (!oideq(&get_commit_tree_in_graph_one(g, graph_commit)->object.oid, + if (!oideq(&get_commit_tree_in_graph_one(r, g, graph_commit)->object.oid, get_commit_tree_oid(odb_commit))) graph_report("root tree OID for commit %s in commit-graph is %s != %s", oid_to_hex(&cur_oid), diff --git a/commit-graph.h b/commit-graph.h index 9db40b4d3a..096d8bac34 100644 --- a/commit-graph.h +++ b/commit-graph.h @@ -49,11 +49,14 @@ struct commit_graph { const uint32_t *chunk_oid_fanout; const unsigned char *chunk_oid_lookup; const unsigned char *chunk_commit_data; - const unsigned char *chunk_large_edges; + const unsigned char *chunk_extra_edges; }; struct commit_graph *load_commit_graph_one(const char *graph_file); +struct commit_graph *parse_commit_graph(void *graph_map, int fd, + size_t graph_size); + /* * Return 1 if and only if the repository has a commit-graph * file and generation numbers are computed in that file. diff --git a/commit-reach.c b/commit-reach.c index d5a39defd3..3ea174788a 100644 --- a/commit-reach.c +++ b/commit-reach.c @@ -30,7 +30,8 @@ static int queue_has_nonstale(struct prio_queue *queue) } /* all input commits in one and twos[] must have been parsed! */ -static struct commit_list *paint_down_to_common(struct commit *one, int n, +static struct commit_list *paint_down_to_common(struct repository *r, + struct commit *one, int n, struct commit **twos, int min_generation) { @@ -83,7 +84,7 @@ static struct commit_list *paint_down_to_common(struct commit *one, int n, parents = parents->next; if ((p->object.flags & flags) == flags) continue; - if (parse_commit(p)) + if (repo_parse_commit(r, p)) return NULL; p->object.flags |= flags; prio_queue_put(&queue, p); @@ -94,7 +95,9 @@ static struct commit_list *paint_down_to_common(struct commit *one, int n, return result; } -static struct commit_list *merge_bases_many(struct commit *one, int n, struct commit **twos) +static struct commit_list *merge_bases_many(struct repository *r, + struct commit *one, int n, + struct commit **twos) { struct commit_list *list = NULL; struct commit_list *result = NULL; @@ -109,14 +112,14 @@ static struct commit_list *merge_bases_many(struct commit *one, int n, struct co return commit_list_insert(one, &result); } - if (parse_commit(one)) + if (repo_parse_commit(r, one)) return NULL; for (i = 0; i < n; i++) { - if (parse_commit(twos[i])) + if (repo_parse_commit(r, twos[i])) return NULL; } - list = paint_down_to_common(one, n, twos, 0); + list = paint_down_to_common(r, one, n, twos, 0); while (list) { struct commit *commit = pop_commit(&list); @@ -153,7 +156,7 @@ struct commit_list *get_octopus_merge_bases(struct commit_list *in) return ret; } -static int remove_redundant(struct commit **array, int cnt) +static int remove_redundant(struct repository *r, struct commit **array, int cnt) { /* * Some commit in the array may be an ancestor of @@ -171,7 +174,7 @@ static int remove_redundant(struct commit **array, int cnt) ALLOC_ARRAY(filled_index, cnt - 1); for (i = 0; i < cnt; i++) - parse_commit(array[i]); + repo_parse_commit(r, array[i]); for (i = 0; i < cnt; i++) { struct commit_list *common; uint32_t min_generation = array[i]->generation; @@ -187,8 +190,8 @@ static int remove_redundant(struct commit **array, int cnt) if (array[j]->generation < min_generation) min_generation = array[j]->generation; } - common = paint_down_to_common(array[i], filled, work, - min_generation); + common = paint_down_to_common(r, array[i], filled, + work, min_generation); if (array[i]->object.flags & PARENT2) redundant[i] = 1; for (j = 0; j < filled; j++) @@ -213,7 +216,8 @@ static int remove_redundant(struct commit **array, int cnt) return filled; } -static struct commit_list *get_merge_bases_many_0(struct commit *one, +static struct commit_list *get_merge_bases_many_0(struct repository *r, + struct commit *one, int n, struct commit **twos, int cleanup) @@ -223,7 +227,7 @@ static struct commit_list *get_merge_bases_many_0(struct commit *one, struct commit_list *result; int cnt, i; - result = merge_bases_many(one, n, twos); + result = merge_bases_many(r, one, n, twos); for (i = 0; i < n; i++) { if (one == twos[i]) return result; @@ -246,7 +250,7 @@ static struct commit_list *get_merge_bases_many_0(struct commit *one, clear_commit_marks(one, all_flags); clear_commit_marks_many(n, twos, all_flags); - cnt = remove_redundant(rslt, cnt); + cnt = remove_redundant(r, rslt, cnt); result = NULL; for (i = 0; i < cnt; i++) commit_list_insert_by_date(rslt[i], &result); @@ -254,23 +258,27 @@ static struct commit_list *get_merge_bases_many_0(struct commit *one, return result; } -struct commit_list *get_merge_bases_many(struct commit *one, - int n, - struct commit **twos) +struct commit_list *repo_get_merge_bases_many(struct repository *r, + struct commit *one, + int n, + struct commit **twos) { - return get_merge_bases_many_0(one, n, twos, 1); + return get_merge_bases_many_0(r, one, n, twos, 1); } -struct commit_list *get_merge_bases_many_dirty(struct commit *one, - int n, - struct commit **twos) +struct commit_list *repo_get_merge_bases_many_dirty(struct repository *r, + struct commit *one, + int n, + struct commit **twos) { - return get_merge_bases_many_0(one, n, twos, 0); + return get_merge_bases_many_0(r, one, n, twos, 0); } -struct commit_list *get_merge_bases(struct commit *one, struct commit *two) +struct commit_list *repo_get_merge_bases(struct repository *r, + struct commit *one, + struct commit *two) { - return get_merge_bases_many_0(one, 1, &two, 1); + return get_merge_bases_many_0(r, one, 1, &two, 1); } /* @@ -304,16 +312,17 @@ int is_descendant_of(struct commit *commit, struct commit_list *with_commit) /* * Is "commit" an ancestor of one of the "references"? */ -int in_merge_bases_many(struct commit *commit, int nr_reference, struct commit **reference) +int repo_in_merge_bases_many(struct repository *r, struct commit *commit, + int nr_reference, struct commit **reference) { struct commit_list *bases; int ret = 0, i; uint32_t min_generation = GENERATION_NUMBER_INFINITY; - if (parse_commit(commit)) + if (repo_parse_commit(r, commit)) return ret; for (i = 0; i < nr_reference; i++) { - if (parse_commit(reference[i])) + if (repo_parse_commit(r, reference[i])) return ret; if (reference[i]->generation < min_generation) min_generation = reference[i]->generation; @@ -322,7 +331,9 @@ int in_merge_bases_many(struct commit *commit, int nr_reference, struct commit * if (commit->generation > min_generation) return ret; - bases = paint_down_to_common(commit, nr_reference, reference, commit->generation); + bases = paint_down_to_common(r, commit, + nr_reference, reference, + commit->generation); if (commit->object.flags & PARENT2) ret = 1; clear_commit_marks(commit, all_flags); @@ -334,9 +345,11 @@ int in_merge_bases_many(struct commit *commit, int nr_reference, struct commit * /* * Is "commit" an ancestor of (i.e. reachable from) the "reference"? */ -int in_merge_bases(struct commit *commit, struct commit *reference) +int repo_in_merge_bases(struct repository *r, + struct commit *commit, + struct commit *reference) { - return in_merge_bases_many(commit, 1, &reference); + return repo_in_merge_bases_many(r, commit, 1, &reference); } struct commit_list *reduce_heads(struct commit_list *heads) @@ -365,7 +378,7 @@ struct commit_list *reduce_heads(struct commit_list *heads) p->item->object.flags &= ~STALE; } } - num_head = remove_redundant(array, num_head); + num_head = remove_redundant(the_repository, array, num_head); for (i = 0; i < num_head; i++) tail = &commit_list_insert(array[i], tail)->next; free(array); diff --git a/commit-reach.h b/commit-reach.h index fb8082a2ec..99a43e8b64 100644 --- a/commit-reach.h +++ b/commit-reach.h @@ -9,21 +9,35 @@ struct ref_filter; struct object_id; struct object_array; -struct commit_list *get_merge_bases_many(struct commit *one, - int n, - struct commit **twos); -struct commit_list *get_merge_bases_many_dirty(struct commit *one, - int n, - struct commit **twos); -struct commit_list *get_merge_bases(struct commit *one, struct commit *two); -struct commit_list *get_octopus_merge_bases(struct commit_list *in); - +struct commit_list *repo_get_merge_bases(struct repository *r, + struct commit *rev1, + struct commit *rev2); +struct commit_list *repo_get_merge_bases_many(struct repository *r, + struct commit *one, int n, + struct commit **twos); /* To be used only when object flags after this call no longer matter */ -struct commit_list *get_merge_bases_many_dirty(struct commit *one, int n, struct commit **twos); +struct commit_list *repo_get_merge_bases_many_dirty(struct repository *r, + struct commit *one, int n, + struct commit **twos); +#ifndef NO_THE_REPOSITORY_COMPATIBILITY_MACROS +#define get_merge_bases(r1, r2) repo_get_merge_bases(the_repository, r1, r2) +#define get_merge_bases_many(one, n, two) repo_get_merge_bases_many(the_repository, one, n, two) +#define get_merge_bases_many_dirty(one, n, twos) repo_get_merge_bases_many_dirty(the_repository, one, n, twos) +#endif + +struct commit_list *get_octopus_merge_bases(struct commit_list *in); int is_descendant_of(struct commit *commit, struct commit_list *with_commit); -int in_merge_bases_many(struct commit *commit, int nr_reference, struct commit **reference); -int in_merge_bases(struct commit *commit, struct commit *reference); +int repo_in_merge_bases(struct repository *r, + struct commit *commit, + struct commit *reference); +int repo_in_merge_bases_many(struct repository *r, + struct commit *commit, + int nr_reference, struct commit **reference); +#ifndef NO_THE_REPOSITORY_COMPATIBILITY_MACROS +#define in_merge_bases(c1, c2) repo_in_merge_bases(the_repository, c1, c2) +#define in_merge_bases_many(c1, n, cs) repo_in_merge_bases_many(the_repository, c1, n, cs) +#endif /* * Takes a list of commits and returns a new list where those @@ -299,13 +299,15 @@ const void *get_cached_commit_buffer(struct repository *r, const struct commit * return v->buffer; } -const void *get_commit_buffer(const struct commit *commit, unsigned long *sizep) +const void *repo_get_commit_buffer(struct repository *r, + const struct commit *commit, + unsigned long *sizep) { - const void *ret = get_cached_commit_buffer(the_repository, commit, sizep); + const void *ret = get_cached_commit_buffer(r, commit, sizep); if (!ret) { enum object_type type; unsigned long size; - ret = read_object_file(&commit->object.oid, &type, &size); + ret = repo_read_object_file(r, &commit->object.oid, &type, &size); if (!ret) die("cannot read commit object %s", oid_to_hex(&commit->object.oid)); @@ -318,18 +320,20 @@ const void *get_commit_buffer(const struct commit *commit, unsigned long *sizep) return ret; } -void unuse_commit_buffer(const struct commit *commit, const void *buffer) +void repo_unuse_commit_buffer(struct repository *r, + const struct commit *commit, + const void *buffer) { struct commit_buffer *v = buffer_slab_peek( - the_repository->parsed_objects->buffer_slab, commit); + r->parsed_objects->buffer_slab, commit); if (!(v && v->buffer == buffer)) free((void *)buffer); } -void free_commit_buffer(struct commit *commit) +void free_commit_buffer(struct parsed_object_pool *pool, struct commit *commit) { struct commit_buffer *v = buffer_slab_peek( - the_repository->parsed_objects->buffer_slab, commit); + pool->buffer_slab, commit); if (v) { FREE_AND_NULL(v->buffer); v->size = 0; @@ -352,13 +356,12 @@ struct object_id *get_commit_tree_oid(const struct commit *commit) return &get_commit_tree(commit)->object.oid; } -void release_commit_memory(struct commit *c) +void release_commit_memory(struct parsed_object_pool *pool, struct commit *c) { c->maybe_tree = NULL; c->index = 0; - free_commit_buffer(c); + free_commit_buffer(pool, c); free_commit_list(c->parents); - /* TODO: what about commit->util? */ c->object.parsed = 0; } @@ -445,7 +448,10 @@ int parse_commit_buffer(struct repository *r, struct commit *item, const void *b return 0; } -int parse_commit_internal(struct commit *item, int quiet_on_missing, int use_commit_graph) +int repo_parse_commit_internal(struct repository *r, + struct commit *item, + int quiet_on_missing, + int use_commit_graph) { enum object_type type; void *buffer; @@ -456,9 +462,9 @@ int parse_commit_internal(struct commit *item, int quiet_on_missing, int use_com return -1; if (item->object.parsed) return 0; - if (use_commit_graph && parse_commit_in_graph(the_repository, item)) + if (use_commit_graph && parse_commit_in_graph(r, item)) return 0; - buffer = read_object_file(&item->object.oid, &type, &size); + buffer = repo_read_object_file(r, &item->object.oid, &type, &size); if (!buffer) return quiet_on_missing ? -1 : error("Could not read %s", @@ -469,18 +475,19 @@ int parse_commit_internal(struct commit *item, int quiet_on_missing, int use_com oid_to_hex(&item->object.oid)); } - ret = parse_commit_buffer(the_repository, item, buffer, size, 0); + ret = parse_commit_buffer(r, item, buffer, size, 0); if (save_commit_buffer && !ret) { - set_commit_buffer(the_repository, item, buffer, size); + set_commit_buffer(r, item, buffer, size); return 0; } free(buffer); return ret; } -int parse_commit_gently(struct commit *item, int quiet_on_missing) +int repo_parse_commit_gently(struct repository *r, + struct commit *item, int quiet_on_missing) { - return parse_commit_internal(item, quiet_on_missing, 1); + return repo_parse_commit_internal(r, item, quiet_on_missing, 1); } void parse_commit_or_die(struct commit *item) @@ -80,12 +80,21 @@ struct commit *lookup_commit_reference_by_name(const char *name); struct commit *lookup_commit_or_die(const struct object_id *oid, const char *ref_name); int parse_commit_buffer(struct repository *r, struct commit *item, const void *buffer, unsigned long size, int check_graph); -int parse_commit_internal(struct commit *item, int quiet_on_missing, int use_commit_graph); -int parse_commit_gently(struct commit *item, int quiet_on_missing); -static inline int parse_commit(struct commit *item) +int repo_parse_commit_internal(struct repository *r, struct commit *item, + int quiet_on_missing, int use_commit_graph); +int repo_parse_commit_gently(struct repository *r, + struct commit *item, + int quiet_on_missing); +static inline int repo_parse_commit(struct repository *r, struct commit *item) { - return parse_commit_gently(item, 0); + return repo_parse_commit_gently(r, item, 0); } +#ifndef NO_THE_REPOSITORY_COMPATIBILITY_MACROS +#define parse_commit_internal(item, quiet, use) repo_parse_commit_internal(the_repository, item, quiet, use) +#define parse_commit_gently(item, quiet) repo_parse_commit_gently(the_repository, item, quiet) +#define parse_commit(item) repo_parse_commit(the_repository, item) +#endif + void parse_commit_or_die(struct commit *item); struct buffer_slab; @@ -109,7 +118,12 @@ const void *get_cached_commit_buffer(struct repository *, const struct commit *, * from disk. The resulting memory should not be modified, and must be given * to unuse_commit_buffer when the caller is done. */ -const void *get_commit_buffer(const struct commit *, unsigned long *size); +const void *repo_get_commit_buffer(struct repository *r, + const struct commit *, + unsigned long *size); +#ifndef NO_THE_REPOSITORY_COMPATIBILITY_MACROS +#define get_commit_buffer(c, s) repo_get_commit_buffer(the_repository, c, s) +#endif /* * Tell the commit subsytem that we are done with a particular commit buffer. @@ -117,12 +131,17 @@ const void *get_commit_buffer(const struct commit *, unsigned long *size); * from an earlier call to get_commit_buffer. The buffer may or may not be * freed by this call; callers should not access the memory afterwards. */ -void unuse_commit_buffer(const struct commit *, const void *buffer); +void repo_unuse_commit_buffer(struct repository *r, + const struct commit *, + const void *buffer); +#ifndef NO_THE_REPOSITORY_COMPATIBILITY_MACROS +#define unuse_commit_buffer(c, b) repo_unuse_commit_buffer(the_repository, c, b) +#endif /* * Free any cached object buffer associated with the commit. */ -void free_commit_buffer(struct commit *); +void free_commit_buffer(struct parsed_object_pool *pool, struct commit *); struct tree *get_commit_tree(const struct commit *); struct object_id *get_commit_tree_oid(const struct commit *); @@ -131,7 +150,7 @@ struct object_id *get_commit_tree_oid(const struct commit *); * Release memory related to a commit, including the parent list and * any cached object buffer. */ -void release_commit_memory(struct commit *c); +void release_commit_memory(struct parsed_object_pool *pool, struct commit *c); /* * Disassociate any cached object buffer from the commit, but do not free it. @@ -162,6 +181,14 @@ extern int has_non_ascii(const char *text); extern const char *logmsg_reencode(const struct commit *commit, char **commit_encoding, const char *output_encoding); +const char *repo_logmsg_reencode(struct repository *r, + const struct commit *commit, + char **commit_encoding, + const char *output_encoding); +#ifndef NO_THE_REPOSITORY_COMPATIBILITY_MACROS +#define logmsg_reencode(c, enc, out) repo_logmsg_reencode(the_repository, c, enc, out) +#endif + extern const char *skip_blank_lines(const char *msg); /** Removes the first commit from a list sorted by date, and adds all diff --git a/compat/mingw.c b/compat/mingw.c index b459e1a291..4276297595 100644 --- a/compat/mingw.c +++ b/compat/mingw.c @@ -7,6 +7,7 @@ #include "../cache.h" #include "win32/lazyload.h" #include "../config.h" +#include "dir.h" #define HCAST(type, handle) ((type)(intptr_t)handle) @@ -1031,7 +1032,7 @@ char *mingw_getcwd(char *pointer, int len) * See "Parsing C++ Command-Line Arguments" at Microsoft's Docs: * https://docs.microsoft.com/en-us/cpp/cpp/parsing-cpp-command-line-arguments */ -static const char *quote_arg(const char *arg) +static const char *quote_arg_msvc(const char *arg) { /* count chars to quote */ int len = 0, n = 0; @@ -1086,6 +1087,37 @@ static const char *quote_arg(const char *arg) return q; } +#include "quote.h" + +static const char *quote_arg_msys2(const char *arg) +{ + struct strbuf buf = STRBUF_INIT; + const char *p2 = arg, *p; + + for (p = arg; *p; p++) { + int ws = isspace(*p); + if (!ws && *p != '\\' && *p != '"' && *p != '{') + continue; + if (!buf.len) + strbuf_addch(&buf, '"'); + if (p != p2) + strbuf_add(&buf, p2, p - p2); + if (!ws && *p != '{') + strbuf_addch(&buf, '\\'); + p2 = p; + } + + if (p == arg) + strbuf_addch(&buf, '"'); + else if (!buf.len) + return arg; + else + strbuf_add(&buf, p2, p - p2), + + strbuf_addch(&buf, '"'); + return strbuf_detach(&buf, 0); +} + static const char *parse_interpreter(const char *cmd) { static char buf[100]; @@ -1317,6 +1349,47 @@ struct pinfo_t { static struct pinfo_t *pinfo = NULL; CRITICAL_SECTION pinfo_cs; +/* Used to match and chomp off path components */ +static inline int match_last_path_component(const char *path, size_t *len, + const char *component) +{ + size_t component_len = strlen(component); + if (*len < component_len + 1 || + !is_dir_sep(path[*len - component_len - 1]) || + fspathncmp(path + *len - component_len, component, component_len)) + return 0; + *len -= component_len + 1; + /* chomp off repeated dir separators */ + while (*len > 0 && is_dir_sep(path[*len - 1])) + (*len)--; + return 1; +} + +static int is_msys2_sh(const char *cmd) +{ + if (cmd && !strcmp(cmd, "sh")) { + static int ret = -1; + char *p; + + if (ret >= 0) + return ret; + + p = path_lookup(cmd, 0); + if (!p) + ret = 0; + else { + size_t len = strlen(p); + + ret = match_last_path_component(p, &len, "sh.exe") && + match_last_path_component(p, &len, "bin") && + match_last_path_component(p, &len, "usr"); + free(p); + } + return ret; + } + return 0; +} + static pid_t mingw_spawnve_fd(const char *cmd, const char **argv, char **deltaenv, const char *dir, int prepend_cmd, int fhin, int fhout, int fherr) @@ -1328,6 +1401,8 @@ static pid_t mingw_spawnve_fd(const char *cmd, const char **argv, char **deltaen unsigned flags = CREATE_UNICODE_ENVIRONMENT; BOOL ret; HANDLE cons; + const char *(*quote_arg)(const char *arg) = + is_msys2_sh(*argv) ? quote_arg_msys2 : quote_arg_msvc; do_unset_environment_variables(); @@ -2100,7 +2175,7 @@ static void stop_timer_thread(void) if (timer_event) SetEvent(timer_event); /* tell thread to terminate */ if (timer_thread) { - int rc = WaitForSingleObject(timer_thread, 1000); + int rc = WaitForSingleObject(timer_thread, 10000); if (rc == WAIT_TIMEOUT) error("timer thread did not terminate timely"); else if (rc != WAIT_OBJECT_0) diff --git a/compat/obstack.c b/compat/obstack.c index 4d1d95beeb..27cd5c1ea1 100644 --- a/compat/obstack.c +++ b/compat/obstack.c @@ -112,15 +112,15 @@ compat_symbol (libc, _obstack_compat, _obstack, GLIBC_2_0); # define CALL_CHUNKFUN(h, size) \ (((h) -> use_extra_arg) \ - ? (*(h)->chunkfun) ((h)->extra_arg, (size)) \ - : (*(struct _obstack_chunk *(*) (long)) (h)->chunkfun) ((size))) + ? (*(h)->chunkfun.extra) ((h)->extra_arg, (size)) \ + : (*(h)->chunkfun.plain) ((size))) # define CALL_FREEFUN(h, old_chunk) \ do { \ if ((h) -> use_extra_arg) \ - (*(h)->freefun) ((h)->extra_arg, (old_chunk)); \ + (*(h)->freefun.extra) ((h)->extra_arg, (old_chunk)); \ else \ - (*(void (*) (void *)) (h)->freefun) ((old_chunk)); \ + (*(h)->freefun.plain) ((old_chunk)); \ } while (0) @@ -159,8 +159,8 @@ _obstack_begin (struct obstack *h, size = 4096 - extra; } - h->chunkfun = (struct _obstack_chunk * (*)(void *, long)) chunkfun; - h->freefun = (void (*) (void *, struct _obstack_chunk *)) freefun; + h->chunkfun.plain = chunkfun; + h->freefun.plain = freefun; h->chunk_size = size; h->alignment_mask = alignment - 1; h->use_extra_arg = 0; @@ -206,8 +206,9 @@ _obstack_begin_1 (struct obstack *h, int size, int alignment, size = 4096 - extra; } - h->chunkfun = (struct _obstack_chunk * (*)(void *,long)) chunkfun; - h->freefun = (void (*) (void *, struct _obstack_chunk *)) freefun; + h->chunkfun.extra = (struct _obstack_chunk * (*)(void *,long)) chunkfun; + h->freefun.extra = (void (*) (void *, struct _obstack_chunk *)) freefun; + h->chunk_size = size; h->alignment_mask = alignment - 1; h->extra_arg = arg; diff --git a/compat/obstack.h b/compat/obstack.h index 6bc24b7644..ced94d0118 100644 --- a/compat/obstack.h +++ b/compat/obstack.h @@ -160,11 +160,15 @@ struct obstack /* control current object in current chunk */ void *tempptr; } temp; /* Temporary for some macros. */ int alignment_mask; /* Mask of alignment for each object. */ - /* These prototypes vary based on `use_extra_arg', and we use - casts to the prototypeless function type in all assignments, - but having prototypes here quiets -Wstrict-prototypes. */ - struct _obstack_chunk *(*chunkfun) (void *, long); - void (*freefun) (void *, struct _obstack_chunk *); + /* These prototypes vary based on `use_extra_arg'. */ + union { + void *(*plain) (long); + struct _obstack_chunk *(*extra) (void *, long); + } chunkfun; + union { + void (*plain) (void *); + void (*extra) (void *, struct _obstack_chunk *); + } freefun; void *extra_arg; /* first arg for chunk alloc/dealloc funcs */ unsigned use_extra_arg:1; /* chunk alloc/dealloc funcs take extra arg */ unsigned maybe_empty_object:1;/* There is a possibility that the current @@ -235,10 +239,10 @@ extern void (*obstack_alloc_failed_handler) (void); (void (*) (void *, void *)) (freefun), (arg)) #define obstack_chunkfun(h, newchunkfun) \ - ((h) -> chunkfun = (struct _obstack_chunk *(*)(void *, long)) (newchunkfun)) + ((h)->chunkfun.extra = (struct _obstack_chunk *(*)(void *, long)) (newchunkfun)) #define obstack_freefun(h, newfreefun) \ - ((h) -> freefun = (void (*)(void *, struct _obstack_chunk *)) (newfreefun)) + ((h)->freefun.extra = (void (*)(void *, struct _obstack_chunk *)) (newfreefun)) #define obstack_1grow_fast(h,achar) (*((h)->next_free)++ = (achar)) diff --git a/compat/precompose_utf8.c b/compat/precompose_utf8.c index de61c15d34..136250fbf6 100644 --- a/compat/precompose_utf8.c +++ b/compat/precompose_utf8.c @@ -79,7 +79,7 @@ void precompose_argv(int argc, const char **argv) size_t namelen; oldarg = argv[i]; if (has_non_ascii(oldarg, (size_t)-1, &namelen)) { - newarg = reencode_string_iconv(oldarg, namelen, ic_precompose, NULL); + newarg = reencode_string_iconv(oldarg, namelen, ic_precompose, 0, NULL); if (newarg) argv[i] = newarg; } @@ -2565,7 +2565,6 @@ static ssize_t write_pair(int fd, const char *key, const char *value, * entry (which all are to be removed). */ static void maybe_remove_section(struct config_store_data *store, - const char *contents, size_t *begin_offset, size_t *end_offset, int *seen_ptr) { @@ -2850,7 +2849,7 @@ int git_config_set_multivar_in_file_gently(const char *config_filename, replace_end = store.parsed[j].end; copy_end = store.parsed[j].begin; if (!value) - maybe_remove_section(&store, contents, + maybe_remove_section(&store, ©_end, &replace_end, &i); /* @@ -296,7 +296,6 @@ struct ref **get_remote_heads(struct packet_reader *reader, struct ref **orig_list = list; int len = 0; enum get_remote_heads_state state = EXPECTING_FIRST_REF; - const char *arg; *list = NULL; @@ -306,8 +305,6 @@ struct ref **get_remote_heads(struct packet_reader *reader, die_initial_contact(1); case PACKET_READ_NORMAL: len = reader->pktlen; - if (len > 4 && skip_prefix(reader->line, "ERR ", &arg)) - die(_("remote error: %s"), arg); break; case PACKET_READ_FLUSH: state = EXPECTING_DONE; diff --git a/contrib/coccinelle/strbuf.cocci b/contrib/coccinelle/strbuf.cocci index e34eada1ad..d9ada69b43 100644 --- a/contrib/coccinelle/strbuf.cocci +++ b/contrib/coccinelle/strbuf.cocci @@ -13,6 +13,36 @@ constant fmt !~ "%"; ); @@ +expression E; +struct strbuf SB; +format F =~ "s"; +@@ +- strbuf_addf(E, "%@F@", SB.buf); ++ strbuf_addbuf(E, &SB); + +@@ +expression E; +struct strbuf *SBP; +format F =~ "s"; +@@ +- strbuf_addf(E, "%@F@", SBP->buf); ++ strbuf_addbuf(E, SBP); + +@@ +expression E; +struct strbuf SB; +@@ +- strbuf_addstr(E, SB.buf); ++ strbuf_addbuf(E, &SB); + +@@ +expression E; +struct strbuf *SBP; +@@ +- strbuf_addstr(E, SBP->buf); ++ strbuf_addbuf(E, SBP); + +@@ expression E1, E2; format F =~ "s"; @@ diff --git a/contrib/coccinelle/the_repository.pending.cocci b/contrib/coccinelle/the_repository.pending.cocci new file mode 100644 index 0000000000..2ee702ecf7 --- /dev/null +++ b/contrib/coccinelle/the_repository.pending.cocci @@ -0,0 +1,144 @@ +// This file is used for the ongoing refactoring of +// bringing the index or repository struct in all of +// our code base. + +@@ +expression E; +expression F; +expression G; +@@ +- read_object_file( ++ repo_read_object_file(the_repository, + E, F, G) + +@@ +expression E; +@@ +- has_sha1_file( ++ repo_has_sha1_file(the_repository, + E) + +@@ +expression E; +expression F; +@@ +- has_sha1_file_with_flags( ++ repo_has_sha1_file_with_flags(the_repository, + E) + +@@ +expression E; +@@ +- has_object_file( ++ repo_has_object_file(the_repository, + E) + +@@ +expression E; +expression F; +@@ +- has_object_file_with_flags( ++ repo_has_object_file_with_flags(the_repository, + E) + +@@ +expression E; +expression F; +expression G; +@@ +- parse_commit_internal( ++ repo_parse_commit_internal(the_repository, + E, F, G) + +@@ +expression E; +expression F; +@@ +- parse_commit_gently( ++ repo_parse_commit_gently(the_repository, + E, F) + +@@ +expression E; +@@ +- parse_commit( ++ repo_parse_commit(the_repository, + E) + +@@ +expression E; +expression F; +@@ +- get_merge_bases( ++ repo_get_merge_bases(the_repository, + E, F); + +@@ +expression E; +expression F; +expression G; +@@ +- get_merge_bases_many( ++ repo_get_merge_bases_many(the_repository, + E, F, G); + +@@ +expression E; +expression F; +expression G; +@@ +- get_merge_bases_many_dirty( ++ repo_get_merge_bases_many_dirty(the_repository, + E, F, G); + +@@ +expression E; +expression F; +@@ +- in_merge_bases( ++ repo_in_merge_bases(the_repository, + E, F); + +@@ +expression E; +expression F; +expression G; +@@ +- in_merge_bases_many( ++ repo_in_merge_bases_many(the_repository, + E, F, G); + +@@ +expression E; +expression F; +@@ +- get_commit_buffer( ++ repo_get_commit_buffer(the_repository, + E, F); + +@@ +expression E; +expression F; +@@ +- unuse_commit_buffer( ++ repo_unuse_commit_buffer(the_repository, + E, F); + +@@ +expression E; +expression F; +expression G; +@@ +- logmsg_reencode( ++ repo_logmsg_reencode(the_repository, + E, F, G); + +@@ +expression E; +expression F; +expression G; +expression H; +@@ +- format_commit_message( ++ repo_format_commit_message(the_repository, + E, F, G, H); @@ -1,4 +1,3 @@ -#define NO_THE_INDEX_COMPATIBILITY_MACROS #include "cache.h" #include "config.h" #include "object-store.h" @@ -92,7 +91,7 @@ static void gather_stats(const char *buf, unsigned long size, struct text_stat * * The same heuristics as diff.c::mmfile_is_binary() * We treat files with bare CR as binary */ -static int convert_is_binary(unsigned long size, const struct text_stat *stats) +static int convert_is_binary(const struct text_stat *stats) { if (stats->lonecr) return 1; @@ -110,7 +109,7 @@ static unsigned int gather_convert_stats(const char *data, unsigned long size) if (!data || !size) return 0; gather_stats(data, size, &stats); - if (convert_is_binary(size, &stats)) + if (convert_is_binary(&stats)) ret |= CONVERT_STAT_BITS_BIN; if (stats.crlf) ret |= CONVERT_STAT_BITS_TXT_CRLF; @@ -245,7 +244,7 @@ static int has_crlf_in_index(const struct index_state *istate, const char *path) return has_crlf; } -static int will_convert_lf_to_crlf(size_t len, struct text_stat *stats, +static int will_convert_lf_to_crlf(struct text_stat *stats, enum crlf_action crlf_action) { if (output_eol(crlf_action) != EOL_CRLF) @@ -260,7 +259,7 @@ static int will_convert_lf_to_crlf(size_t len, struct text_stat *stats, if (stats->lonecr || stats->crlf) return 0; - if (convert_is_binary(len, stats)) + if (convert_is_binary(stats)) return 0; } return 1; @@ -527,7 +526,7 @@ static int crlf_to_git(const struct index_state *istate, convert_crlf_into_lf = !!stats.crlf; if (crlf_action == CRLF_AUTO || crlf_action == CRLF_AUTO_INPUT || crlf_action == CRLF_AUTO_CRLF) { - if (convert_is_binary(len, &stats)) + if (convert_is_binary(&stats)) return 0; /* * If the file in the index has any CR in it, do not @@ -549,7 +548,7 @@ static int crlf_to_git(const struct index_state *istate, new_stats.crlf = 0; } /* simulate "git checkout" */ - if (will_convert_lf_to_crlf(len, &new_stats, crlf_action)) { + if (will_convert_lf_to_crlf(&new_stats, crlf_action)) { new_stats.crlf += new_stats.lonelf; new_stats.lonelf = 0; } @@ -591,7 +590,7 @@ static int crlf_to_git(const struct index_state *istate, return 1; } -static int crlf_to_worktree(const char *path, const char *src, size_t len, +static int crlf_to_worktree(const char *src, size_t len, struct strbuf *buf, enum crlf_action crlf_action) { char *to_free = NULL; @@ -601,7 +600,7 @@ static int crlf_to_worktree(const char *path, const char *src, size_t len, return 0; gather_stats(src, len, &stats); - if (!will_convert_lf_to_crlf(len, &stats, crlf_action)) + if (!will_convert_lf_to_crlf(&stats, crlf_action)) return 0; /* are we "faking" in place editing ? */ @@ -1091,7 +1090,7 @@ static int count_ident(const char *cp, unsigned long size) return cnt; } -static int ident_to_git(const char *path, const char *src, size_t len, +static int ident_to_git(const char *src, size_t len, struct strbuf *buf, int ident) { char *dst, *dollar; @@ -1135,7 +1134,7 @@ static int ident_to_git(const char *path, const char *src, size_t len, return 1; } -static int ident_to_worktree(const char *path, const char *src, size_t len, +static int ident_to_worktree(const char *src, size_t len, struct strbuf *buf, int ident) { struct object_id oid; @@ -1416,7 +1415,7 @@ int convert_to_git(const struct index_state *istate, len = dst->len; } } - return ret | ident_to_git(path, src, len, dst, ca.ident); + return ret | ident_to_git(src, len, dst, ca.ident); } void convert_to_git_filter_fd(const struct index_state *istate, @@ -1434,7 +1433,7 @@ void convert_to_git_filter_fd(const struct index_state *istate, encode_to_git(path, dst->buf, dst->len, dst, ca.working_tree_encoding, conv_flags); crlf_to_git(istate, path, dst->buf, dst->len, dst, ca.crlf_action, conv_flags); - ident_to_git(path, dst->buf, dst->len, dst, ca.ident); + ident_to_git(dst->buf, dst->len, dst, ca.ident); } static int convert_to_working_tree_internal(const struct index_state *istate, @@ -1447,7 +1446,7 @@ static int convert_to_working_tree_internal(const struct index_state *istate, convert_attrs(istate, &ca, path); - ret |= ident_to_worktree(path, src, len, dst, ca.ident); + ret |= ident_to_worktree(src, len, dst, ca.ident); if (ret) { src = dst->buf; len = dst->len; @@ -1458,7 +1457,7 @@ static int convert_to_working_tree_internal(const struct index_state *istate, * support smudge). The filters might expect CRLFs. */ if ((ca.drv && (ca.drv->smudge || ca.drv->process)) || !normalizing) { - ret |= crlf_to_worktree(path, src, len, dst, ca.crlf_action); + ret |= crlf_to_worktree(src, len, dst, ca.crlf_action); if (ret) { src = dst->buf; len = dst->len; @@ -77,22 +77,16 @@ static struct tm *time_to_tm_local(timestamp_t time) } /* - * What value of "tz" was in effect back then at "time" in the - * local timezone? + * Fill in the localtime 'struct tm' for the supplied time, + * and return the local tz. */ -static int local_tzoffset(timestamp_t time) +static int local_time_tzoffset(time_t t, struct tm *tm) { - time_t t, t_local; - struct tm tm; + time_t t_local; int offset, eastwest; - if (date_overflows(time)) - die("Timestamp too large for this system: %"PRItime, time); - - t = (time_t)time; - localtime_r(&t, &tm); - t_local = tm_to_time_t(&tm); - + localtime_r(&t, tm); + t_local = tm_to_time_t(tm); if (t_local == -1) return 0; /* error; just use +0000 */ if (t_local < t) { @@ -107,9 +101,36 @@ static int local_tzoffset(timestamp_t time) return offset * eastwest; } -void show_date_relative(timestamp_t time, int tz, - const struct timeval *now, - struct strbuf *timebuf) +/* + * What value of "tz" was in effect back then at "time" in the + * local timezone? + */ +static int local_tzoffset(timestamp_t time) +{ + struct tm tm; + + if (date_overflows(time)) + die("Timestamp too large for this system: %"PRItime, time); + + return local_time_tzoffset((time_t)time, &tm); +} + +static void get_time(struct timeval *now) +{ + const char *x; + + x = getenv("GIT_TEST_DATE_NOW"); + if (x) { + now->tv_sec = atoi(x); + now->tv_usec = 0; + } + else + gettimeofday(now, NULL); +} + +void show_date_relative(timestamp_t time, + const struct timeval *now, + struct strbuf *timebuf) { timestamp_t diff; if (now->tv_sec < time) { @@ -191,9 +212,80 @@ struct date_mode *date_mode_from_type(enum date_mode_type type) return &mode; } +static void show_date_normal(struct strbuf *buf, timestamp_t time, struct tm *tm, int tz, struct tm *human_tm, int human_tz, int local) +{ + struct { + unsigned int year:1, + date:1, + wday:1, + time:1, + seconds:1, + tz:1; + } hide = { 0 }; + + hide.tz = local || tz == human_tz; + hide.year = tm->tm_year == human_tm->tm_year; + if (hide.year) { + if (tm->tm_mon == human_tm->tm_mon) { + if (tm->tm_mday > human_tm->tm_mday) { + /* Future date: think timezones */ + } else if (tm->tm_mday == human_tm->tm_mday) { + hide.date = hide.wday = 1; + } else if (tm->tm_mday + 5 > human_tm->tm_mday) { + /* Leave just weekday if it was a few days ago */ + hide.date = 1; + } + } + } + + /* Show "today" times as just relative times */ + if (hide.wday) { + struct timeval now; + get_time(&now); + show_date_relative(time, &now, buf); + return; + } + + /* + * Always hide seconds for human-readable. + * Hide timezone if showing date. + * Hide weekday and time if showing year. + * + * The logic here is two-fold: + * (a) only show details when recent enough to matter + * (b) keep the maximum length "similar", and in check + */ + if (human_tm->tm_year) { + hide.seconds = 1; + hide.tz |= !hide.date; + hide.wday = hide.time = !hide.year; + } + + if (!hide.wday) + strbuf_addf(buf, "%.3s ", weekday_names[tm->tm_wday]); + if (!hide.date) + strbuf_addf(buf, "%.3s %d ", month_names[tm->tm_mon], tm->tm_mday); + + /* Do we want AM/PM depending on locale? */ + if (!hide.time) { + strbuf_addf(buf, "%02d:%02d", tm->tm_hour, tm->tm_min); + if (!hide.seconds) + strbuf_addf(buf, ":%02d", tm->tm_sec); + } else + strbuf_rtrim(buf); + + if (!hide.year) + strbuf_addf(buf, " %d", tm->tm_year + 1900); + + if (!hide.tz) + strbuf_addf(buf, " %+05d", tz); +} + const char *show_date(timestamp_t time, int tz, const struct date_mode *mode) { struct tm *tm; + struct tm human_tm = { 0 }; + int human_tz = -1; static struct strbuf timebuf = STRBUF_INIT; if (mode->type == DATE_UNIX) { @@ -202,6 +294,15 @@ const char *show_date(timestamp_t time, int tz, const struct date_mode *mode) return timebuf.buf; } + if (mode->type == DATE_HUMAN) { + struct timeval now; + + get_time(&now); + + /* Fill in the data for "current time" in human_tz and human_tm */ + human_tz = local_time_tzoffset(now.tv_sec, &human_tm); + } + if (mode->local) tz = local_tzoffset(time); @@ -215,8 +316,8 @@ const char *show_date(timestamp_t time, int tz, const struct date_mode *mode) struct timeval now; strbuf_reset(&timebuf); - gettimeofday(&now, NULL); - show_date_relative(time, tz, &now, &timebuf); + get_time(&now); + show_date_relative(time, &now, &timebuf); return timebuf.buf; } @@ -258,14 +359,7 @@ const char *show_date(timestamp_t time, int tz, const struct date_mode *mode) strbuf_addftime(&timebuf, mode->strftime_fmt, tm, tz, !mode->local); else - strbuf_addf(&timebuf, "%.3s %.3s %d %02d:%02d:%02d %d%c%+05d", - weekday_names[tm->tm_wday], - month_names[tm->tm_mon], - tm->tm_mday, - tm->tm_hour, tm->tm_min, tm->tm_sec, - tm->tm_year + 1900, - mode->local ? 0 : ' ', - tz); + show_date_normal(&timebuf, time, tm, tz, &human_tm, human_tz, mode->local); return timebuf.buf; } @@ -819,6 +913,8 @@ static enum date_mode_type parse_date_type(const char *format, const char **end) return DATE_SHORT; if (skip_prefix(format, "default", end)) return DATE_NORMAL; + if (skip_prefix(format, "human", end)) + return DATE_HUMAN; if (skip_prefix(format, "raw", end)) return DATE_RAW; if (skip_prefix(format, "unix", end)) @@ -833,6 +929,14 @@ void parse_date_format(const char *format, struct date_mode *mode) { const char *p; + /* "auto:foo" is "if tty/pager, then foo, otherwise normal" */ + if (skip_prefix(format, "auto:", &p)) { + if (isatty(1) || pager_in_use()) + format = p; + else + format = "default"; + } + /* historical alias */ if (!strcmp(format, "local")) format = "default-local"; @@ -1205,7 +1309,7 @@ timestamp_t approxidate_careful(const char *date, int *error_ret) return timestamp; } - gettimeofday(&tv, NULL); + get_time(&tv); return approxidate_str(date, &tv, error_ret); } @@ -5962,8 +5962,10 @@ static void diff_flush_patch_all_file_pairs(struct diff_options *o) for (i = 0; i < esm.nr; i++) free((void *)esm.buf[i].line); + esm.nr = 0; + + o->emitted_symbols = NULL; } - esm.nr = 0; } void diff_flush(struct diff_options *options) @@ -7,7 +7,6 @@ * Copyright (C) Linus Torvalds, 2005-2006 * Junio Hamano, 2005-2006 */ -#define NO_THE_INDEX_COMPATIBILITY_MACROS #include "cache.h" #include "config.h" #include "dir.h" diff --git a/fetch-pack.c b/fetch-pack.c index a92621a388..812be15d7e 100644 --- a/fetch-pack.c +++ b/fetch-pack.c @@ -135,38 +135,42 @@ enum ack_type { ACK_ready }; -static void consume_shallow_list(struct fetch_pack_args *args, int fd) +static void consume_shallow_list(struct fetch_pack_args *args, + struct packet_reader *reader) { if (args->stateless_rpc && args->deepen) { /* If we sent a depth we will get back "duplicate" * shallow and unshallow commands every time there * is a block of have lines exchanged. */ - char *line; - while ((line = packet_read_line(fd, NULL))) { - if (starts_with(line, "shallow ")) + while (packet_reader_read(reader) == PACKET_READ_NORMAL) { + if (starts_with(reader->line, "shallow ")) continue; - if (starts_with(line, "unshallow ")) + if (starts_with(reader->line, "unshallow ")) continue; die(_("git fetch-pack: expected shallow list")); } + if (reader->status != PACKET_READ_FLUSH) + die(_("git fetch-pack: expected a flush packet after shallow list")); } } -static enum ack_type get_ack(int fd, struct object_id *result_oid) +static enum ack_type get_ack(struct packet_reader *reader, + struct object_id *result_oid) { int len; - char *line = packet_read_line(fd, &len); const char *arg; - if (!line) + if (packet_reader_read(reader) != PACKET_READ_NORMAL) die(_("git fetch-pack: expected ACK/NAK, got a flush packet")); - if (!strcmp(line, "NAK")) + len = reader->pktlen; + + if (!strcmp(reader->line, "NAK")) return NAK; - if (skip_prefix(line, "ACK ", &arg)) { + if (skip_prefix(reader->line, "ACK ", &arg)) { if (!get_oid_hex(arg, result_oid)) { arg += 40; - len -= arg - line; + len -= arg - reader->line; if (len < 1) return ACK; if (strstr(arg, "continue")) @@ -178,9 +182,7 @@ static enum ack_type get_ack(int fd, struct object_id *result_oid) return ACK; } } - if (skip_prefix(line, "ERR ", &arg)) - die(_("remote error: %s"), arg); - die(_("git fetch-pack: expected ACK/NAK, got '%s'"), line); + die(_("git fetch-pack: expected ACK/NAK, got '%s'"), reader->line); } static void send_request(struct fetch_pack_args *args, @@ -248,10 +250,15 @@ static int find_common(struct fetch_negotiator *negotiator, int got_ready = 0; struct strbuf req_buf = STRBUF_INIT; size_t state_len = 0; + struct packet_reader reader; if (args->stateless_rpc && multi_ack == 1) die(_("--stateless-rpc requires multi_ack_detailed")); + packet_reader_init(&reader, fd[0], NULL, 0, + PACKET_READ_CHOMP_NEWLINE | + PACKET_READ_DIE_ON_ERR_PACKET); + if (!args->no_dependents) { mark_tips(negotiator, args->negotiation_tips); for_each_cached_alternate(negotiator, insert_one_alternate_object); @@ -329,38 +336,42 @@ static int find_common(struct fetch_negotiator *negotiator, packet_buf_write(&req_buf, "deepen-not %s", s->string); } } - if (server_supports_filtering && args->filter_options.choice) + if (server_supports_filtering && args->filter_options.choice) { + struct strbuf expanded_filter_spec = STRBUF_INIT; + expand_list_objects_filter_spec(&args->filter_options, + &expanded_filter_spec); packet_buf_write(&req_buf, "filter %s", - args->filter_options.filter_spec); + expanded_filter_spec.buf); + strbuf_release(&expanded_filter_spec); + } packet_buf_flush(&req_buf); state_len = req_buf.len; if (args->deepen) { - char *line; const char *arg; struct object_id oid; send_request(args, fd[1], &req_buf); - while ((line = packet_read_line(fd[0], NULL))) { - if (skip_prefix(line, "shallow ", &arg)) { + while (packet_reader_read(&reader) == PACKET_READ_NORMAL) { + if (skip_prefix(reader.line, "shallow ", &arg)) { if (get_oid_hex(arg, &oid)) - die(_("invalid shallow line: %s"), line); + die(_("invalid shallow line: %s"), reader.line); register_shallow(the_repository, &oid); continue; } - if (skip_prefix(line, "unshallow ", &arg)) { + if (skip_prefix(reader.line, "unshallow ", &arg)) { if (get_oid_hex(arg, &oid)) - die(_("invalid unshallow line: %s"), line); + die(_("invalid unshallow line: %s"), reader.line); if (!lookup_object(the_repository, oid.hash)) - die(_("object not found: %s"), line); + die(_("object not found: %s"), reader.line); /* make sure that it is parsed as shallow */ if (!parse_object(the_repository, &oid)) - die(_("error in object: %s"), line); + die(_("error in object: %s"), reader.line); if (unregister_shallow(&oid)) - die(_("no shallow found: %s"), line); + die(_("no shallow found: %s"), reader.line); continue; } - die(_("expected shallow/unshallow, got %s"), line); + die(_("expected shallow/unshallow, got %s"), reader.line); } } else if (!args->stateless_rpc) send_request(args, fd[1], &req_buf); @@ -397,9 +408,9 @@ static int find_common(struct fetch_negotiator *negotiator, if (!args->stateless_rpc && count == INITIAL_FLUSH) continue; - consume_shallow_list(args, fd[0]); + consume_shallow_list(args, &reader); do { - ack = get_ack(fd[0], result_oid); + ack = get_ack(&reader, result_oid); if (ack) print_verbose(args, _("got %s %d %s"), "ack", ack, oid_to_hex(result_oid)); @@ -469,9 +480,9 @@ done: strbuf_release(&req_buf); if (!got_ready || !no_done) - consume_shallow_list(args, fd[0]); + consume_shallow_list(args, &reader); while (flushes || multi_ack) { - int ack = get_ack(fd[0], result_oid); + int ack = get_ack(&reader, result_oid); if (ack) { print_verbose(args, _("got %s (%d) %s"), "ack", ack, oid_to_hex(result_oid)); @@ -1086,7 +1097,8 @@ static int add_haves(struct fetch_negotiator *negotiator, static int send_fetch_request(struct fetch_negotiator *negotiator, int fd_out, const struct fetch_pack_args *args, const struct ref *wants, struct oidset *common, - int *haves_to_send, int *in_vain) + int *haves_to_send, int *in_vain, + int sideband_all) { int ret = 0; struct strbuf req_buf = STRBUF_INIT; @@ -1112,6 +1124,8 @@ static int send_fetch_request(struct fetch_negotiator *negotiator, int fd_out, packet_buf_write(&req_buf, "include-tag"); if (prefer_ofs_delta) packet_buf_write(&req_buf, "ofs-delta"); + if (sideband_all) + packet_buf_write(&req_buf, "sideband-all"); /* Add shallow-info and deepen request */ if (server_supports_feature("fetch", "shallow", 0)) @@ -1122,9 +1136,13 @@ static int send_fetch_request(struct fetch_negotiator *negotiator, int fd_out, /* Add filter */ if (server_supports_feature("fetch", "filter", 0) && args->filter_options.choice) { + struct strbuf expanded_filter_spec = STRBUF_INIT; print_verbose(args, _("Server supports filter")); + expand_list_objects_filter_spec(&args->filter_options, + &expanded_filter_spec); packet_buf_write(&req_buf, "filter %s", - args->filter_options.filter_spec); + expanded_filter_spec.buf); + strbuf_release(&expanded_filter_spec); } else if (args->filter_options.choice) { warning("filtering not recognized by server, ignoring"); } @@ -1327,7 +1345,13 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args, struct fetch_negotiator negotiator; fetch_negotiator_init(&negotiator, negotiation_algorithm); packet_reader_init(&reader, fd[0], NULL, 0, - PACKET_READ_CHOMP_NEWLINE); + PACKET_READ_CHOMP_NEWLINE | + PACKET_READ_DIE_ON_ERR_PACKET); + if (git_env_bool("GIT_TEST_SIDEBAND_ALL", 1) && + server_supports_feature("fetch", "sideband-all", 0)) { + reader.use_sideband = 1; + reader.me = "fetch-pack"; + } while (state != FETCH_DONE) { switch (state) { @@ -1361,7 +1385,8 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args, case FETCH_SEND_REQUEST: if (send_fetch_request(&negotiator, fd[1], args, ref, &common, - &haves_to_send, &in_vain)) + &haves_to_send, &in_vain, + reader.use_sideband)) state = FETCH_GET_PACK; else state = FETCH_PROCESS_ACKS; diff --git a/fuzz-commit-graph.c b/fuzz-commit-graph.c new file mode 100644 index 0000000000..cf790c9d04 --- /dev/null +++ b/fuzz-commit-graph.c @@ -0,0 +1,16 @@ +#include "commit-graph.h" + +struct commit_graph *parse_commit_graph(void *graph_map, int fd, + size_t graph_size); + +int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size); + +int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) +{ + struct commit_graph *g; + + g = parse_commit_graph((void *)data, -1, size); + free(g); + + return 0; +} diff --git a/git-bisect.sh b/git-bisect.sh index 54cbfecc5a..efee12b8b1 100755 --- a/git-bisect.sh +++ b/git-bisect.sh @@ -71,122 +71,7 @@ bisect_autostart() { } bisect_start() { - # - # Check for one bad and then some good revisions. - # - has_double_dash=0 - for arg; do - case "$arg" in --) has_double_dash=1; break ;; esac - done - orig_args=$(git rev-parse --sq-quote "$@") - bad_seen=0 - eval='' - must_write_terms=0 - revs='' - if test "z$(git rev-parse --is-bare-repository)" != zfalse - then - mode=--no-checkout - else - mode='' - fi - while [ $# -gt 0 ]; do - arg="$1" - case "$arg" in - --) - shift - break - ;; - --no-checkout) - mode=--no-checkout - shift ;; - --term-good|--term-old) - shift - must_write_terms=1 - TERM_GOOD=$1 - shift ;; - --term-good=*|--term-old=*) - must_write_terms=1 - TERM_GOOD=${1#*=} - shift ;; - --term-bad|--term-new) - shift - must_write_terms=1 - TERM_BAD=$1 - shift ;; - --term-bad=*|--term-new=*) - must_write_terms=1 - TERM_BAD=${1#*=} - shift ;; - --*) - die "$(eval_gettext "unrecognised option: '\$arg'")" ;; - *) - rev=$(git rev-parse -q --verify "$arg^{commit}") || { - test $has_double_dash -eq 1 && - die "$(eval_gettext "'\$arg' does not appear to be a valid revision")" - break - } - revs="$revs $rev" - shift - ;; - esac - done - - for rev in $revs - do - # The user ran "git bisect start <sha1> - # <sha1>", hence did not explicitly specify - # the terms, but we are already starting to - # set references named with the default terms, - # and won't be able to change afterwards. - must_write_terms=1 - - case $bad_seen in - 0) state=$TERM_BAD ; bad_seen=1 ;; - *) state=$TERM_GOOD ;; - esac - eval="$eval bisect_write '$state' '$rev' 'nolog' &&" - done - # - # Verify HEAD. - # - head=$(GIT_DIR="$GIT_DIR" git symbolic-ref -q HEAD) || - head=$(GIT_DIR="$GIT_DIR" git rev-parse --verify HEAD) || - die "$(gettext "Bad HEAD - I need a HEAD")" - - # - # Check if we are bisecting. - # - start_head='' - if test -s "$GIT_DIR/BISECT_START" - then - # Reset to the rev from where we started. - start_head=$(cat "$GIT_DIR/BISECT_START") - if test "z$mode" != "z--no-checkout" - then - git checkout "$start_head" -- || - die "$(eval_gettext "Checking out '\$start_head' failed. Try 'git bisect reset <valid-branch>'.")" - fi - else - # Get rev from where we start. - case "$head" in - refs/heads/*|$_x40) - # This error message should only be triggered by - # cogito usage, and cogito users should understand - # it relates to cg-seek. - [ -s "$GIT_DIR/head-name" ] && - die "$(gettext "won't bisect on cg-seek'ed tree")" - start_head="${head#refs/heads/}" - ;; - *) - die "$(gettext "Bad HEAD - strange symbolic ref")" - ;; - esac - fi - - # - # Get rid of any old bisect state. - # - git bisect--helper --bisect-clean-state || exit + git bisect--helper --bisect-start $@ || exit # # Change state. @@ -199,44 +84,14 @@ bisect_start() { trap 'exit 255' 1 2 3 15 # - # Write new start state. - # - echo "$start_head" >"$GIT_DIR/BISECT_START" && { - test "z$mode" != "z--no-checkout" || - git update-ref --no-deref BISECT_HEAD "$start_head" - } && - git rev-parse --sq-quote "$@" >"$GIT_DIR/BISECT_NAMES" && - eval "$eval true" && - if test $must_write_terms -eq 1 - then - git bisect--helper --write-terms "$TERM_BAD" "$TERM_GOOD" || exit - fi && - echo "git bisect start$orig_args" >>"$GIT_DIR/BISECT_LOG" || exit - # # Check if we can proceed to the next bisect state. # + get_terms bisect_auto_next trap '-' 0 } -bisect_write() { - state="$1" - rev="$2" - nolog="$3" - case "$state" in - "$TERM_BAD") - tag="$state" ;; - "$TERM_GOOD"|skip) - tag="$state"-"$rev" ;; - *) - die "$(eval_gettext "Bad bisect_write argument: \$state")" ;; - esac - git update-ref "refs/bisect/$tag" "$rev" || exit - echo "# $state: $(git show-branch $rev)" >>"$GIT_DIR/BISECT_LOG" - test -n "$nolog" || echo "git bisect $state $rev" >>"$GIT_DIR/BISECT_LOG" -} - bisect_skip() { all='' for arg in "$@" @@ -255,7 +110,8 @@ bisect_skip() { bisect_state() { bisect_autostart state=$1 - check_and_set_terms $state + git bisect--helper --check-and-set-terms $state $TERM_GOOD $TERM_BAD || exit + get_terms case "$#,$state" in 0,*) die "Please call 'bisect_state' with at least one argument." ;; @@ -263,7 +119,7 @@ bisect_state() { bisected_head=$(bisect_head) rev=$(git rev-parse --verify "$bisected_head") || die "$(eval_gettext "Bad rev input: \$bisected_head")" - bisect_write "$state" "$rev" + git bisect--helper --bisect-write "$state" "$rev" "$TERM_GOOD" "$TERM_BAD" || exit git bisect--helper --check-expected-revs "$rev" ;; 2,"$TERM_BAD"|*,"$TERM_GOOD"|*,skip) shift @@ -276,7 +132,7 @@ bisect_state() { done for rev in $hash_list do - bisect_write "$state" "$rev" + git bisect--helper --bisect-write "$state" "$rev" "$TERM_GOOD" "$TERM_BAD" || exit done git bisect--helper --check-expected-revs $hash_list ;; *,"$TERM_BAD") @@ -287,59 +143,14 @@ bisect_state() { bisect_auto_next } -bisect_next_check() { - missing_good= missing_bad= - git show-ref -q --verify refs/bisect/$TERM_BAD || missing_bad=t - test -n "$(git for-each-ref "refs/bisect/$TERM_GOOD-*")" || missing_good=t - - case "$missing_good,$missing_bad,$1" in - ,,*) - : have both $TERM_GOOD and $TERM_BAD - ok - ;; - *,) - # do not have both but not asked to fail - just report. - false - ;; - t,,"$TERM_GOOD") - # have bad (or new) but not good (or old). we could bisect although - # this is less optimum. - eval_gettextln "Warning: bisecting only with a \$TERM_BAD commit." >&2 - if test -t 0 - then - # TRANSLATORS: Make sure to include [Y] and [n] in your - # translation. The program will only accept English input - # at this point. - gettext "Are you sure [Y/n]? " >&2 - read yesno - case "$yesno" in [Nn]*) exit 1 ;; esac - fi - : bisect without $TERM_GOOD... - ;; - *) - bad_syn=$(bisect_voc bad) - good_syn=$(bisect_voc good) - if test -s "$GIT_DIR/BISECT_START" - then - - eval_gettextln "You need to give me at least one \$bad_syn and one \$good_syn revision. -(You can use \"git bisect \$bad_syn\" and \"git bisect \$good_syn\" for that.)" >&2 - else - eval_gettextln "You need to start by \"git bisect start\". -You then need to give me at least one \$good_syn and one \$bad_syn revision. -(You can use \"git bisect \$bad_syn\" and \"git bisect \$good_syn\" for that.)" >&2 - fi - exit 1 ;; - esac -} - bisect_auto_next() { - bisect_next_check && bisect_next || : + git bisect--helper --bisect-next-check $TERM_GOOD $TERM_BAD && bisect_next || : } bisect_next() { case "$#" in 0) ;; *) usage ;; esac bisect_autostart - bisect_next_check $TERM_GOOD + git bisect--helper --bisect-next-check $TERM_GOOD $TERM_BAD $TERM_GOOD|| exit # Perform all bisection computation, display and checkout git bisect--helper --next-all $(test -f "$GIT_DIR/BISECT_HEAD" && echo --no-checkout) @@ -371,7 +182,7 @@ bisect_next() { } bisect_visualize() { - bisect_next_check fail + git bisect--helper --bisect-next-check $TERM_GOOD $TERM_BAD fail || exit if test $# = 0 then @@ -393,35 +204,11 @@ bisect_visualize() { eval '"$@"' --bisect -- $(cat "$GIT_DIR/BISECT_NAMES") } -bisect_reset() { - test -s "$GIT_DIR/BISECT_START" || { - gettextln "We are not bisecting." - return - } - case "$#" in - 0) branch=$(cat "$GIT_DIR/BISECT_START") ;; - 1) git rev-parse --quiet --verify "$1^{commit}" >/dev/null || { - invalid="$1" - die "$(eval_gettext "'\$invalid' is not a valid commit")" - } - branch="$1" ;; - *) - usage ;; - esac - - if ! test -f "$GIT_DIR/BISECT_HEAD" && ! git checkout "$branch" -- - then - die "$(eval_gettext "Could not check out original HEAD '\$branch'. -Try 'git bisect reset <commit>'.")" - fi - git bisect--helper --bisect-clean-state || exit -} - bisect_replay () { file="$1" test "$#" -eq 1 || die "$(gettext "No logfile given")" test -r "$file" || die "$(eval_gettext "cannot read \$file for replaying")" - bisect_reset + git bisect--helper --bisect-reset || exit while read git bisect command rev do test "$git $bisect" = "git bisect" || test "$git" = "git-bisect" || continue @@ -431,15 +218,16 @@ bisect_replay () { command="$bisect" fi get_terms - check_and_set_terms "$command" + git bisect--helper --check-and-set-terms "$command" "$TERM_GOOD" "$TERM_BAD" || exit + get_terms case "$command" in start) cmd="bisect_start $rev" eval "$cmd" ;; "$TERM_GOOD"|"$TERM_BAD"|skip) - bisect_write "$command" "$rev" ;; + git bisect--helper --bisect-write "$command" "$rev" "$TERM_GOOD" "$TERM_BAD" || exit;; terms) - bisect_terms $rev ;; + git bisect--helper --bisect-terms $rev || exit;; *) die "$(gettext "?? what are you talking about?")" ;; esac @@ -448,7 +236,7 @@ bisect_replay () { } bisect_run () { - bisect_next_check fail + git bisect--helper --bisect-next-check $TERM_GOOD $TERM_BAD fail || exit test -n "$*" || die "$(gettext "bisect run failed: no command provided.")" @@ -523,74 +311,6 @@ get_terms () { fi } -check_and_set_terms () { - cmd="$1" - case "$cmd" in - skip|start|terms) ;; - *) - if test -s "$GIT_DIR/BISECT_TERMS" && test "$cmd" != "$TERM_BAD" && test "$cmd" != "$TERM_GOOD" - then - die "$(eval_gettext "Invalid command: you're currently in a \$TERM_BAD/\$TERM_GOOD bisect.")" - fi - case "$cmd" in - bad|good) - if ! test -s "$GIT_DIR/BISECT_TERMS" - then - TERM_BAD=bad - TERM_GOOD=good - git bisect--helper --write-terms "$TERM_BAD" "$TERM_GOOD" || exit - fi - ;; - new|old) - if ! test -s "$GIT_DIR/BISECT_TERMS" - then - TERM_BAD=new - TERM_GOOD=old - git bisect--helper --write-terms "$TERM_BAD" "$TERM_GOOD" || exit - fi - ;; - esac ;; - esac -} - -bisect_voc () { - case "$1" in - bad) echo "bad|new" ;; - good) echo "good|old" ;; - esac -} - -bisect_terms () { - get_terms - if ! test -s "$GIT_DIR/BISECT_TERMS" - then - die "$(gettext "no terms defined")" - fi - case "$#" in - 0) - gettextln "Your current terms are $TERM_GOOD for the old state -and $TERM_BAD for the new state." - ;; - 1) - arg=$1 - case "$arg" in - --term-good|--term-old) - printf '%s\n' "$TERM_GOOD" - ;; - --term-bad|--term-new) - printf '%s\n' "$TERM_BAD" - ;; - *) - die "$(eval_gettext "invalid argument \$arg for 'git bisect terms'. -Supported options are: --term-good|--term-old and --term-bad|--term-new.")" - ;; - esac - ;; - *) - usage ;; - esac -} - case "$#" in 0) usage ;; @@ -613,7 +333,7 @@ case "$#" in visualize|view) bisect_visualize "$@" ;; reset) - bisect_reset "$@" ;; + git bisect--helper --bisect-reset "$@" ;; replay) bisect_replay "$@" ;; log) @@ -621,7 +341,7 @@ case "$#" in run) bisect_run "$@" ;; terms) - bisect_terms "$@" ;; + git bisect--helper --bisect-terms "$@" || exit;; *) usage ;; esac diff --git a/git-instaweb.sh b/git-instaweb.sh index eec264e630..7c55229773 100755 --- a/git-instaweb.sh +++ b/git-instaweb.sh @@ -67,6 +67,13 @@ resolve_full_httpd () { httpd_only="${httpd%% *}" # cut on first space return ;; + *python*) + # server is started by running via generated gitweb.py in + # $fqgitdir/gitweb + full_httpd="$fqgitdir/gitweb/gitweb.py" + httpd_only="${httpd%% *}" # cut on first space + return + ;; esac httpd_only="$(echo $httpd | cut -f1 -d' ')" @@ -110,7 +117,7 @@ start_httpd () { # don't quote $full_httpd, there can be arguments to it (-f) case "$httpd" in - *mongoose*|*plackup*) + *mongoose*|*plackup*|*python*) #These servers don't have a daemon mode so we'll have to fork it $full_httpd "$conf" & #Save the pid before doing anything else (we'll print it later) @@ -595,6 +602,121 @@ EOF rm -f "$conf" } +python_conf() { + # Python's builtin http.server and its CGI support is very limited. + # CGI handler is capable of running CGI script only from inside a directory. + # Trying to set cgi_directories=["/"] will add double slash to SCRIPT_NAME + # and that in turn breaks gitweb's relative link generation. + + # create a simple web root where $fqgitdir/gitweb/$httpd_only is our root + mkdir -p "$fqgitdir/gitweb/$httpd_only/cgi-bin" + # Python http.server follows the symlinks + ln -sf "$root/gitweb.cgi" "$fqgitdir/gitweb/$httpd_only/cgi-bin/gitweb.cgi" + ln -sf "$root/static" "$fqgitdir/gitweb/$httpd_only/" + + # generate a standalone 'python http.server' script in $fqgitdir/gitweb + # This asumes that python is in user's $PATH + # This script is Python 2 and 3 compatible + cat > "$fqgitdir/gitweb/gitweb.py" <<EOF +#!/usr/bin/env python +import os +import sys + +# Open log file in line buffering mode +accesslogfile = open("$fqgitdir/gitweb/access.log", 'a', buffering=1) +errorlogfile = open("$fqgitdir/gitweb/error.log", 'a', buffering=1) + +# and replace our stdout and stderr with log files +# also do a lowlevel duplicate of the logfile file descriptors so that +# our CGI child process writes any stderr warning also to the log file +_orig_stdout_fd = sys.stdout.fileno() +sys.stdout.close() +os.dup2(accesslogfile.fileno(), _orig_stdout_fd) +sys.stdout = accesslogfile + +_orig_stderr_fd = sys.stderr.fileno() +sys.stderr.close() +os.dup2(errorlogfile.fileno(), _orig_stderr_fd) +sys.stderr = errorlogfile + +from functools import partial + +if sys.version_info < (3, 0): # Python 2 + from CGIHTTPServer import CGIHTTPRequestHandler + from BaseHTTPServer import HTTPServer as ServerClass +else: # Python 3 + from http.server import CGIHTTPRequestHandler + from http.server import HTTPServer as ServerClass + + +# Those environment variables will be passed to the cgi script +os.environ.update({ + "GIT_EXEC_PATH": "$GIT_EXEC_PATH", + "GIT_DIR": "$GIT_DIR", + "GITWEB_CONFIG": "$GITWEB_CONFIG" +}) + + +class GitWebRequestHandler(CGIHTTPRequestHandler): + + def log_message(self, format, *args): + # Write access logs to stdout + sys.stdout.write("%s - - [%s] %s\n" % + (self.address_string(), + self.log_date_time_string(), + format%args)) + + def do_HEAD(self): + self.redirect_path() + CGIHTTPRequestHandler.do_HEAD(self) + + def do_GET(self): + if self.path == "/": + self.send_response(303, "See Other") + self.send_header("Location", "/cgi-bin/gitweb.cgi") + self.end_headers() + return + self.redirect_path() + CGIHTTPRequestHandler.do_GET(self) + + def do_POST(self): + self.redirect_path() + CGIHTTPRequestHandler.do_POST(self) + + # rewrite path of every request that is not gitweb.cgi to out of cgi-bin + def redirect_path(self): + if not self.path.startswith("/cgi-bin/gitweb.cgi"): + self.path = self.path.replace("/cgi-bin/", "/") + + # gitweb.cgi is the only thing that is ever going to be run here. + # Ignore everything else + def is_cgi(self): + result = False + if self.path.startswith('/cgi-bin/gitweb.cgi'): + result = CGIHTTPRequestHandler.is_cgi(self) + return result + + +bind = "127.0.0.1" +if "$local" == "true": + bind = "0.0.0.0" + +# Set our http root directory +# This is a work around for a missing directory argument in older Python versions +# as this was added to SimpleHTTPRequestHandler in Python 3.7 +os.chdir("$fqgitdir/gitweb/$httpd_only/") + +GitWebRequestHandler.protocol_version = "HTTP/1.0" +httpd = ServerClass((bind, $port), GitWebRequestHandler) + +sa = httpd.socket.getsockname() +print("Serving HTTP on", sa[0], "port", sa[1], "...") +httpd.serve_forever() +EOF + + chmod a+x "$fqgitdir/gitweb/gitweb.py" +} + gitweb_conf() { cat > "$fqgitdir/gitweb/gitweb_config.perl" <<EOF #!@@PERL@@ @@ -623,6 +745,9 @@ configure_httpd() { *plackup*) plackup_conf ;; + *python*) + python_conf + ;; *) echo "Unknown httpd specified: $httpd" exit 1 diff --git a/git-legacy-rebase.sh b/git-legacy-rebase.sh index 3bb0682db5..8d6c9aca65 100755 --- a/git-legacy-rebase.sh +++ b/git-legacy-rebase.sh @@ -118,7 +118,7 @@ read_basic_state () { else orig_head=$(cat "$state_dir"/head) fi && - GIT_QUIET=$(cat "$state_dir"/quiet) && + test -f "$state_dir"/quiet && GIT_QUIET=t test -f "$state_dir"/verbose && verbose=t test -f "$state_dir"/strategy && strategy="$(cat "$state_dir"/strategy)" test -f "$state_dir"/strategy_opts && @@ -176,8 +176,8 @@ run_interactive () { run_specific_rebase () { if [ "$interactive_rebase" = implied ]; then - GIT_EDITOR=: - export GIT_EDITOR + GIT_SEQUENCE_EDITOR=: + export GIT_SEQUENCE_EDITOR autosquash= fi @@ -226,6 +226,7 @@ then state_dir="$apply_dir" elif test -d "$merge_dir" then + type=interactive if test -d "$merge_dir"/rewritten then type=preserve-merges @@ -233,10 +234,7 @@ then preserve_merges=t elif test -f "$merge_dir"/interactive then - type=interactive interactive_rebase=explicit - else - type=merge fi state_dir="$merge_dir" fi @@ -496,6 +494,7 @@ then test -z "$interactive_rebase" && interactive_rebase=implied fi +actually_interactive= if test -n "$interactive_rebase" then if test -z "$preserve_merges" @@ -504,11 +503,12 @@ then else type=preserve-merges fi - + actually_interactive=t state_dir="$merge_dir" elif test -n "$do_merge" then - type=merge + interactive_rebase=implied + type=interactive state_dir="$merge_dir" else type=am @@ -520,28 +520,20 @@ then git_format_patch_opt="$git_format_patch_opt --progress" fi -if test -n "$git_am_opt"; then - incompatible_opts=$(echo " $git_am_opt " | \ - sed -e 's/ -q / /g' -e 's/^ \(.*\) $/\1/') - if test -n "$interactive_rebase" +incompatible_opts=$(echo " $git_am_opt " | \ + sed -e 's/ -q / /g' -e 's/^ \(.*\) $/\1/') +if test -n "$incompatible_opts" +then + if test -n "$actually_interactive" || test "$do_merge" then - if test -n "$incompatible_opts" - then - die "$(gettext "error: cannot combine interactive options (--interactive, --exec, --rebase-merges, --preserve-merges, --keep-empty, --root + --onto) with am options ($incompatible_opts)")" - fi - fi - if test -n "$do_merge"; then - if test -n "$incompatible_opts" - then - die "$(gettext "error: cannot combine merge options (--merge, --strategy, --strategy-option) with am options ($incompatible_opts)")" - fi + die "$(gettext "fatal: cannot combine am options with either interactive or merge options")" fi fi if test -n "$signoff" then test -n "$preserve_merges" && - die "$(gettext "error: cannot combine '--signoff' with '--preserve-merges'")" + die "$(gettext "fatal: cannot combine '--signoff' with '--preserve-merges'")" git_am_opt="$git_am_opt $signoff" force_rebase=t fi @@ -552,7 +544,7 @@ then # Note: incompatibility with --interactive is just a strong warning; # git-rebase.txt caveats with "unless you know what you are doing" test -n "$rebase_merges" && - die "$(gettext "error: cannot combine '--preserve-merges' with '--rebase-merges'")" + die "$(gettext "fatal: cannot combine '--preserve-merges' with '--rebase-merges'")" test -n "$reschedule_failed_exec" && die "$(gettext "error: cannot combine '--preserve-merges' with '--reschedule-failed-exec'")" @@ -561,9 +553,9 @@ fi if test -n "$rebase_merges" then test -n "$strategy_opts" && - die "$(gettext "error: cannot combine '--rebase-merges' with '--strategy-option'")" + die "$(gettext "fatal: cannot combine '--rebase-merges' with '--strategy-option'")" test -n "$strategy" && - die "$(gettext "error: cannot combine '--rebase-merges' with '--strategy'")" + die "$(gettext "fatal: cannot combine '--rebase-merges' with '--strategy'")" fi if test -z "$rebase_root" @@ -702,7 +694,7 @@ require_clean_work_tree "rebase" "$(gettext "Please commit or stash them.")" # but this should be done only when upstream and onto are the same # and if this is not an interactive rebase. mb=$(git merge-base "$onto" "$orig_head") -if test -z "$interactive_rebase" && test "$upstream" = "$onto" && +if test -z "$actually_interactive" && test "$upstream" = "$onto" && test "$mb" = "$onto" && test -z "$restrict_revision" && # linear history? ! (git rev-list --parents "$onto".."$orig_head" | sane_grep " .* ") > /dev/null @@ -752,6 +744,19 @@ then GIT_PAGER='' git diff --stat --summary "$mb_tree" "$onto" fi +if test -z "$actually_interactive" && test "$mb" = "$orig_head" +then + say "$(eval_gettext "Fast-forwarded \$branch_name to \$onto_name.")" + GIT_REFLOG_ACTION="$GIT_REFLOG_ACTION: checkout $onto_name" \ + git checkout -q "$onto^0" || die "could not detach HEAD" + # If the $onto is a proper descendant of the tip of the branch, then + # we just fast-forwarded. + git update-ref ORIG_HEAD $orig_head + move_to_original_branch + finish_rebase + exit 0 +fi + test -n "$interactive_rebase" && run_specific_rebase # Detach HEAD and reset the tree @@ -761,16 +766,6 @@ GIT_REFLOG_ACTION="$GIT_REFLOG_ACTION: checkout $onto_name" \ git checkout -q "$onto^0" || die "could not detach HEAD" git update-ref ORIG_HEAD $orig_head -# If the $onto is a proper descendant of the tip of the branch, then -# we just fast-forwarded. -if test "$mb" = "$orig_head" -then - say "$(eval_gettext "Fast-forwarded \$branch_name to \$onto_name.")" - move_to_original_branch - finish_rebase - exit 0 -fi - if test -n "$rebase_root" then revisions="$onto..$orig_head" @@ -1861,6 +1861,7 @@ class P4Submit(Command, P4UserMap): filesToAdd.remove(path) elif modifier == "C": src, dest = diff['src'], diff['dst'] + all_files.append(dest) p4_integrate(src, dest) pureRenameCopy.add(dest) if diff['src_sha1'] != diff['dst_sha1']: @@ -1877,6 +1878,7 @@ class P4Submit(Command, P4UserMap): editedFiles.add(dest) elif modifier == "R": src, dest = diff['src'], diff['dst'] + all_files.append(dest) if self.p4HasMoveCommand: p4_edit(src) # src must be open before move p4_move(src, dest) # opens for (move/delete, move/add) diff --git a/git-rebase--am.sh b/git-rebase--am.sh index 99b8c17787..6416716ee6 100644 --- a/git-rebase--am.sh +++ b/git-rebase--am.sh @@ -36,7 +36,7 @@ rm -f "$GIT_DIR/rebased-patches" git format-patch -k --stdout --full-index --cherry-pick --right-only \ --src-prefix=a/ --dst-prefix=b/ --no-renames --no-cover-letter \ - --pretty=mboxrd \ + --pretty=mboxrd --topo-order \ $git_format_patch_opt \ "$revisions" ${restrict_revision+^$restrict_revision} \ >"$GIT_DIR/rebased-patches" diff --git a/git-rebase--common.sh b/git-rebase--common.sh index a8a44608e0..f00e13e5d0 100644 --- a/git-rebase--common.sh +++ b/git-rebase--common.sh @@ -10,7 +10,7 @@ write_basic_state () { echo "$head_name" > "$state_dir"/head-name && echo "$onto" > "$state_dir"/onto && echo "$orig_head" > "$state_dir"/orig-head && - echo "$GIT_QUIET" > "$state_dir"/quiet && + test t = "$GIT_QUIET" && : > "$state_dir"/quiet test t = "$verbose" && : > "$state_dir"/verbose test -n "$strategy" && echo "$strategy" > "$state_dir"/strategy test -n "$strategy_opts" && echo "$strategy_opts" > \ diff --git a/git-rebase--merge.sh b/git-rebase--merge.sh deleted file mode 100644 index aa2f2f0872..0000000000 --- a/git-rebase--merge.sh +++ /dev/null @@ -1,164 +0,0 @@ -# This shell script fragment is sourced by git-rebase to implement -# its merge-based non-interactive mode that copes well with renamed -# files. -# -# Copyright (c) 2010 Junio C Hamano. -# - -prec=4 - -read_state () { - onto_name=$(cat "$state_dir"/onto_name) && - end=$(cat "$state_dir"/end) && - msgnum=$(cat "$state_dir"/msgnum) -} - -continue_merge () { - test -d "$state_dir" || die "$state_dir directory does not exist" - - unmerged=$(git ls-files -u) - if test -n "$unmerged" - then - echo "You still have unmerged paths in your index" - echo "did you forget to use git add?" - die "$resolvemsg" - fi - - cmt=$(cat "$state_dir/current") - if ! git diff-index --quiet --ignore-submodules HEAD -- - then - if ! git commit ${gpg_sign_opt:+"$gpg_sign_opt"} $signoff $allow_empty_message \ - --no-verify -C "$cmt" - then - echo "Commit failed, please do not call \"git commit\"" - echo "directly, but instead do one of the following: " - die "$resolvemsg" - fi - if test -z "$GIT_QUIET" - then - printf "Committed: %0${prec}d " $msgnum - fi - echo "$cmt $(git rev-parse HEAD^0)" >> "$state_dir/rewritten" - else - if test -z "$GIT_QUIET" - then - printf "Already applied: %0${prec}d " $msgnum - fi - fi - test -z "$GIT_QUIET" && - GIT_PAGER='' git log --format=%s -1 "$cmt" - - # onto the next patch: - msgnum=$(($msgnum + 1)) - echo "$msgnum" >"$state_dir/msgnum" -} - -call_merge () { - msgnum="$1" - echo "$msgnum" >"$state_dir/msgnum" - cmt="$(cat "$state_dir/cmt.$msgnum")" - echo "$cmt" > "$state_dir/current" - git update-ref REBASE_HEAD "$cmt" - hd=$(git rev-parse --verify HEAD) - cmt_name=$(git symbolic-ref HEAD 2> /dev/null || echo HEAD) - eval GITHEAD_$cmt='"${cmt_name##refs/heads/}~$(($end - $msgnum))"' - eval GITHEAD_$hd='$onto_name' - export GITHEAD_$cmt GITHEAD_$hd - if test -n "$GIT_QUIET" - then - GIT_MERGE_VERBOSITY=1 && export GIT_MERGE_VERBOSITY - fi - test -z "$strategy" && strategy=recursive - # If cmt doesn't have a parent, don't include it as a base - base=$(git rev-parse --verify --quiet $cmt^) - eval 'git merge-$strategy' $strategy_opts $base ' -- "$hd" "$cmt"' - rv=$? - case "$rv" in - 0) - unset GITHEAD_$cmt GITHEAD_$hd - return - ;; - 1) - git rerere $allow_rerere_autoupdate - die "$resolvemsg" - ;; - 2) - echo "Strategy: $strategy failed, try another" 1>&2 - die "$resolvemsg" - ;; - *) - die "Unknown exit code ($rv) from command:" \ - "git merge-$strategy $cmt^ -- HEAD $cmt" - ;; - esac -} - -finish_rb_merge () { - move_to_original_branch - if test -s "$state_dir"/rewritten - then - git notes copy --for-rewrite=rebase <"$state_dir"/rewritten - hook="$(git rev-parse --git-path hooks/post-rewrite)" - test -x "$hook" && "$hook" rebase <"$state_dir"/rewritten - fi - say All done. -} - -git_rebase__merge () { - -case "$action" in -continue) - read_state - continue_merge - while test "$msgnum" -le "$end" - do - call_merge "$msgnum" - continue_merge - done - finish_rb_merge - return - ;; -skip) - read_state - git rerere clear - msgnum=$(($msgnum + 1)) - while test "$msgnum" -le "$end" - do - call_merge "$msgnum" - continue_merge - done - finish_rb_merge - return - ;; -show-current-patch) - exec git show REBASE_HEAD -- - ;; -esac - -mkdir -p "$state_dir" -echo "$onto_name" > "$state_dir/onto_name" -write_basic_state -rm -f "$(git rev-parse --git-path REBASE_HEAD)" - -msgnum=0 -for cmt in $(git rev-list --reverse --no-merges "$revisions") -do - msgnum=$(($msgnum + 1)) - echo "$cmt" > "$state_dir/cmt.$msgnum" -done - -echo 1 >"$state_dir/msgnum" -echo $msgnum >"$state_dir/end" - -end=$msgnum -msgnum=1 - -while test "$msgnum" -le "$end" -do - call_merge "$msgnum" - continue_merge -done - -finish_rb_merge - -} diff --git a/git-submodule.sh b/git-submodule.sh index 5e608f8bad..b5f2beee60 100755 --- a/git-submodule.sh +++ b/git-submodule.sh @@ -548,7 +548,7 @@ cmd_update() do die_if_unmatched "$quickabort" "$sha1" - git submodule--helper ensure-core-worktree "$sm_path" + git submodule--helper ensure-core-worktree "$sm_path" || exit 1 update_module=$(git submodule--helper update-module-mode $just_cloned "$sm_path" $update) @@ -850,8 +850,11 @@ cmd_summary() { ;; esac - sha1_abbr_src=$(echo $sha1_src | cut -c1-7) - sha1_abbr_dst=$(echo $sha1_dst | cut -c1-7) + sha1_abbr_src=$(GIT_DIR="$name/.git" git rev-parse --short $sha1_src 2>/dev/null || + echo $sha1_src | cut -c1-7) + sha1_abbr_dst=$(GIT_DIR="$name/.git" git rev-parse --short $sha1_dst 2>/dev/null || + echo $sha1_dst | cut -c1-7) + if test $status = T then blob="$(gettext "blob")" @@ -418,9 +418,9 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv) trace_argv_printf(argv, "trace: built-in: git"); - validate_cache_entries(&the_index); + validate_cache_entries(the_repository->index); status = p->fn(argc, argv, prefix); - validate_cache_entries(&the_index); + validate_cache_entries(the_repository->index); if (status) return status; @@ -85,7 +85,8 @@ static void print_command_list(const struct cmdname_help *cmds, if (cmds[i].category & mask) { size_t len = strlen(cmds[i].name); printf(" %s ", cmds[i].name); - mput_char(' ', longest > len ? longest - len : 1); + if (longest > len) + mput_char(' ', longest - len); puts(_(cmds[i].help)); } } diff --git a/http-push.c b/http-push.c index bb802d80ee..b22c7caea0 100644 --- a/http-push.c +++ b/http-push.c @@ -255,7 +255,7 @@ static void start_fetch_loose(struct transfer_request *request) struct active_request_slot *slot; struct http_object_request *obj_req; - obj_req = new_http_object_request(repo->url, request->obj->oid.hash); + obj_req = new_http_object_request(repo->url, &request->obj->oid); if (obj_req == NULL) { request->state = ABORTED; return; @@ -1933,7 +1933,7 @@ int cmd_main(int argc, const char **argv) pushing = 0; if (prepare_revision_walk(&revs)) die("revision walk setup failed"); - mark_edges_uninteresting(&revs, NULL); + mark_edges_uninteresting(&revs, NULL, 0); objects_to_send = get_delta(&revs, ref_lock); finish_all_active_slots(); diff --git a/http-walker.c b/http-walker.c index 0a392c85b6..8ae5d76c6a 100644 --- a/http-walker.c +++ b/http-walker.c @@ -58,7 +58,7 @@ static void start_object_request(struct walker *walker, struct active_request_slot *slot; struct http_object_request *req; - req = new_http_object_request(obj_req->repo->base, obj_req->oid.hash); + req = new_http_object_request(obj_req->repo->base, &obj_req->oid); if (req == NULL) { obj_req->state = ABORTED; return; @@ -131,7 +131,7 @@ static int fill_active_slot(struct walker *walker) list_for_each_safe(pos, tmp, head) { obj_req = list_entry(pos, struct object_request, node); if (obj_req->state == WAITING) { - if (has_sha1_file(obj_req->oid.hash)) + if (has_object_file(&obj_req->oid)) obj_req->state = COMPLETE; else { start_object_request(walker, obj_req); @@ -489,7 +489,7 @@ static int fetch_object(struct walker *walker, unsigned char *sha1) if (obj_req == NULL) return error("Couldn't find request for %s in the queue", hex); - if (has_sha1_file(obj_req->oid.hash)) { + if (has_object_file(&obj_req->oid)) { if (obj_req->req != NULL) abort_http_object_request(obj_req->req); abort_object_request(obj_req); @@ -543,11 +543,11 @@ static int fetch_object(struct walker *walker, unsigned char *sha1) } else if (req->zret != Z_STREAM_END) { walker->corrupt_object_found++; ret = error("File %s (%s) corrupt", hex, req->url); - } else if (!hasheq(obj_req->oid.hash, req->real_sha1)) { + } else if (!oideq(&obj_req->oid, &req->real_oid)) { ret = error("File %s has bad hash", hex); } else if (req->rename < 0) { struct strbuf buf = STRBUF_INIT; - loose_object_path(the_repository, &buf, req->sha1); + loose_object_path(the_repository, &buf, &req->oid); ret = error("unable to write sha1 filename %s", buf.buf); strbuf_release(&buf); } @@ -2343,9 +2343,9 @@ static size_t fwrite_sha1_file(char *ptr, size_t eltsize, size_t nmemb, } struct http_object_request *new_http_object_request(const char *base_url, - unsigned char *sha1) + const struct object_id *oid) { - char *hex = sha1_to_hex(sha1); + char *hex = oid_to_hex(oid); struct strbuf filename = STRBUF_INIT; struct strbuf prevfile = STRBUF_INIT; int prevlocal; @@ -2356,10 +2356,10 @@ struct http_object_request *new_http_object_request(const char *base_url, freq = xcalloc(1, sizeof(*freq)); strbuf_init(&freq->tmpfile, 0); - hashcpy(freq->sha1, sha1); + oidcpy(&freq->oid, oid); freq->localfile = -1; - loose_object_path(the_repository, &filename, sha1); + loose_object_path(the_repository, &filename, oid); strbuf_addf(&freq->tmpfile, "%s.temp", filename.buf); strbuf_addf(&prevfile, "%s.prev", filename.buf); @@ -2501,16 +2501,16 @@ int finish_http_object_request(struct http_object_request *freq) } git_inflate_end(&freq->stream); - git_SHA1_Final(freq->real_sha1, &freq->c); + git_SHA1_Final(freq->real_oid.hash, &freq->c); if (freq->zret != Z_STREAM_END) { unlink_or_warn(freq->tmpfile.buf); return -1; } - if (!hasheq(freq->sha1, freq->real_sha1)) { + if (!oideq(&freq->oid, &freq->real_oid)) { unlink_or_warn(freq->tmpfile.buf); return -1; } - loose_object_path(the_repository, &filename, freq->sha1); + loose_object_path(the_repository, &filename, &freq->oid); freq->rename = finalize_object_file(freq->tmpfile.buf, filename.buf); strbuf_release(&filename); @@ -223,8 +223,8 @@ struct http_object_request { CURLcode curl_result; char errorstr[CURL_ERROR_SIZE]; long http_code; - unsigned char sha1[20]; - unsigned char real_sha1[20]; + struct object_id oid; + struct object_id real_oid; git_SHA_CTX c; git_zstream stream; int zret; @@ -233,7 +233,7 @@ struct http_object_request { }; extern struct http_object_request *new_http_object_request( - const char *base_url, unsigned char *sha1); + const char *base_url, const struct object_id *oid); extern void process_http_object_request(struct http_object_request *freq); extern int finish_http_object_request(struct http_object_request *freq); extern void abort_http_object_request(struct http_object_request *freq); diff --git a/list-objects-filter-options.c b/list-objects-filter-options.c index e8da2e8581..b71bd1fb65 100644 --- a/list-objects-filter-options.c +++ b/list-objects-filter-options.c @@ -18,8 +18,9 @@ * See Documentation/rev-list-options.txt for allowed values for <arg>. * * Capture the given arg as the "filter_spec". This can be forwarded to - * subordinate commands when necessary. We also "intern" the arg for - * the convenience of the current command. + * subordinate commands when necessary (although it's better to pass it through + * expand_list_objects_filter_spec() first). We also "intern" the arg for the + * convenience of the current command. */ static int gently_parse_list_objects_filter( struct list_objects_filter_options *filter_options, @@ -50,16 +51,15 @@ static int gently_parse_list_objects_filter( } } else if (skip_prefix(arg, "tree:", &v0)) { - unsigned long depth; - if (!git_parse_ulong(v0, &depth) || depth != 0) { + if (!git_parse_ulong(v0, &filter_options->tree_exclude_depth)) { if (errbuf) { strbuf_addstr( errbuf, - _("only 'tree:0' is supported")); + _("expected 'tree:<depth>'")); } return 1; } - filter_options->choice = LOFC_TREE_NONE; + filter_options->choice = LOFC_TREE_DEPTH; return 0; } else if (skip_prefix(arg, "sparse:oid=", &v0)) { @@ -71,7 +71,7 @@ static int gently_parse_list_objects_filter( * command, but DO NOT complain if we don't have the blob or * ref locally. */ - if (!get_oid_with_context(v0, GET_OID_BLOB, + if (!get_oid_with_context(the_repository, v0, GET_OID_BLOB, &sparse_oid, &oc)) filter_options->sparse_oid_value = oiddup(&sparse_oid); filter_options->choice = LOFC_SPARSE_OID; @@ -112,6 +112,21 @@ int opt_parse_list_objects_filter(const struct option *opt, return parse_list_objects_filter(filter_options, arg); } +void expand_list_objects_filter_spec( + const struct list_objects_filter_options *filter, + struct strbuf *expanded_spec) +{ + strbuf_init(expanded_spec, strlen(filter->filter_spec)); + if (filter->choice == LOFC_BLOB_LIMIT) + strbuf_addf(expanded_spec, "blob:limit=%lu", + filter->blob_limit_value); + else if (filter->choice == LOFC_TREE_DEPTH) + strbuf_addf(expanded_spec, "tree:%lu", + filter->tree_exclude_depth); + else + strbuf_addstr(expanded_spec, filter->filter_spec); +} + void list_objects_filter_release( struct list_objects_filter_options *filter_options) { diff --git a/list-objects-filter-options.h b/list-objects-filter-options.h index af64e5c66f..e3adc78ebf 100644 --- a/list-objects-filter-options.h +++ b/list-objects-filter-options.h @@ -2,6 +2,7 @@ #define LIST_OBJECTS_FILTER_OPTIONS_H #include "parse-options.h" +#include "strbuf.h" /* * The list of defined filters for list-objects. @@ -10,7 +11,7 @@ enum list_objects_filter_choice { LOFC_DISABLED = 0, LOFC_BLOB_NONE, LOFC_BLOB_LIMIT, - LOFC_TREE_NONE, + LOFC_TREE_DEPTH, LOFC_SPARSE_OID, LOFC_SPARSE_PATH, LOFC__COUNT /* must be last */ @@ -20,8 +21,9 @@ struct list_objects_filter_options { /* * 'filter_spec' is the raw argument value given on the command line * or protocol request. (The part after the "--keyword=".) For - * commands that launch filtering sub-processes, this value should be - * passed to them as received by the current process. + * commands that launch filtering sub-processes, or for communication + * over the network, don't use this value; use the result of + * expand_list_objects_filter_spec() instead. */ char *filter_spec; @@ -44,6 +46,7 @@ struct list_objects_filter_options { struct object_id *sparse_oid_value; char *sparse_path_value; unsigned long blob_limit_value; + unsigned long tree_exclude_depth; }; /* Normalized command line arguments */ @@ -61,6 +64,17 @@ int opt_parse_list_objects_filter(const struct option *opt, N_("object filtering"), 0, \ opt_parse_list_objects_filter } +/* + * Translates abbreviated numbers in the filter's filter_spec into their + * fully-expanded forms (e.g., "limit:blob=1k" becomes "limit:blob=1024"). + * + * This form should be used instead of the raw filter_spec field when + * communicating with a remote process or subprocess. + */ +void expand_list_objects_filter_spec( + const struct list_objects_filter_options *filter, + struct strbuf *expanded_spec); + void list_objects_filter_release( struct list_objects_filter_options *filter_options); diff --git a/list-objects-filter.c b/list-objects-filter.c index a62624a1ce..ee449de3f7 100644 --- a/list-objects-filter.c +++ b/list-objects-filter.c @@ -10,6 +10,7 @@ #include "list-objects.h" #include "list-objects-filter.h" #include "list-objects-filter-options.h" +#include "oidmap.h" #include "oidset.h" #include "object-store.h" @@ -84,11 +85,44 @@ static void *filter_blobs_none__init( * A filter for list-objects to omit ALL trees and blobs from the traversal. * Can OPTIONALLY collect a list of the omitted OIDs. */ -struct filter_trees_none_data { +struct filter_trees_depth_data { struct oidset *omits; + + /* + * Maps trees to the minimum depth at which they were seen. It is not + * necessary to re-traverse a tree at deeper or equal depths than it has + * already been traversed. + * + * We can't use LOFR_MARK_SEEN for tree objects since this will prevent + * it from being traversed at shallower depths. + */ + struct oidmap seen_at_depth; + + unsigned long exclude_depth; + unsigned long current_depth; }; -static enum list_objects_filter_result filter_trees_none( +struct seen_map_entry { + struct oidmap_entry base; + size_t depth; +}; + +/* Returns 1 if the oid was in the omits set before it was invoked. */ +static int filter_trees_update_omits( + struct object *obj, + struct filter_trees_depth_data *filter_data, + int include_it) +{ + if (!filter_data->omits) + return 0; + + if (include_it) + return oidset_remove(filter_data->omits, &obj->oid); + else + return oidset_insert(filter_data->omits, &obj->oid); +} + +static enum list_objects_filter_result filter_trees_depth( struct repository *r, enum list_objects_filter_situation filter_situation, struct object *obj, @@ -96,43 +130,91 @@ static enum list_objects_filter_result filter_trees_none( const char *filename, void *filter_data_) { - struct filter_trees_none_data *filter_data = filter_data_; + struct filter_trees_depth_data *filter_data = filter_data_; + struct seen_map_entry *seen_info; + int include_it = filter_data->current_depth < + filter_data->exclude_depth; + int filter_res; + int already_seen; + + /* + * Note that we do not use _MARK_SEEN in order to allow re-traversal in + * case we encounter a tree or blob again at a shallower depth. + */ switch (filter_situation) { default: BUG("unknown filter_situation: %d", filter_situation); - case LOFS_BEGIN_TREE: + case LOFS_END_TREE: + assert(obj->type == OBJ_TREE); + filter_data->current_depth--; + return LOFR_ZERO; + case LOFS_BLOB: - if (filter_data->omits) { - oidset_insert(filter_data->omits, &obj->oid); - /* _MARK_SEEN but not _DO_SHOW (hard omit) */ - return LOFR_MARK_SEEN; + filter_trees_update_omits(obj, filter_data, include_it); + return include_it ? LOFR_MARK_SEEN | LOFR_DO_SHOW : LOFR_ZERO; + + case LOFS_BEGIN_TREE: + seen_info = oidmap_get( + &filter_data->seen_at_depth, &obj->oid); + if (!seen_info) { + seen_info = xcalloc(1, sizeof(*seen_info)); + oidcpy(&seen_info->base.oid, &obj->oid); + seen_info->depth = filter_data->current_depth; + oidmap_put(&filter_data->seen_at_depth, seen_info); + already_seen = 0; } else { - /* - * Not collecting omits so no need to to traverse tree. - */ - return LOFR_SKIP_TREE | LOFR_MARK_SEEN; + already_seen = + filter_data->current_depth >= seen_info->depth; } - case LOFS_END_TREE: - assert(obj->type == OBJ_TREE); - return LOFR_ZERO; + if (already_seen) { + filter_res = LOFR_SKIP_TREE; + } else { + int been_omitted = filter_trees_update_omits( + obj, filter_data, include_it); + seen_info->depth = filter_data->current_depth; + + if (include_it) + filter_res = LOFR_DO_SHOW; + else if (filter_data->omits && !been_omitted) + /* + * Must update omit information of children + * recursively; they have not been omitted yet. + */ + filter_res = LOFR_ZERO; + else + filter_res = LOFR_SKIP_TREE; + } + filter_data->current_depth++; + return filter_res; } } -static void* filter_trees_none__init( +static void filter_trees_free(void *filter_data) { + struct filter_trees_depth_data *d = filter_data; + if (!d) + return; + oidmap_free(&d->seen_at_depth, 1); + free(d); +} + +static void *filter_trees_depth__init( struct oidset *omitted, struct list_objects_filter_options *filter_options, filter_object_fn *filter_fn, filter_free_fn *filter_free_fn) { - struct filter_trees_none_data *d = xcalloc(1, sizeof(*d)); + struct filter_trees_depth_data *d = xcalloc(1, sizeof(*d)); d->omits = omitted; + oidmap_init(&d->seen_at_depth, 0); + d->exclude_depth = filter_options->tree_exclude_depth; + d->current_depth = 0; - *filter_fn = filter_trees_none; - *filter_free_fn = free; + *filter_fn = filter_trees_depth; + *filter_free_fn = filter_trees_free; return d; } @@ -430,7 +512,7 @@ static filter_init_fn s_filters[] = { NULL, filter_blobs_none__init, filter_blobs_limit__init, - filter_trees_none__init, + filter_trees_depth__init, filter_sparse_oid__init, filter_sparse_path__init, }; diff --git a/list-objects.c b/list-objects.c index a2296a8e7b..dc77361e11 100644 --- a/list-objects.c +++ b/list-objects.c @@ -226,25 +226,73 @@ static void mark_edge_parents_uninteresting(struct commit *commit, } } -void mark_edges_uninteresting(struct rev_info *revs, show_edge_fn show_edge) +static void add_edge_parents(struct commit *commit, + struct rev_info *revs, + show_edge_fn show_edge, + struct oidset *set) +{ + struct commit_list *parents; + + for (parents = commit->parents; parents; parents = parents->next) { + struct commit *parent = parents->item; + struct tree *tree = get_commit_tree(parent); + + if (!tree) + continue; + + oidset_insert(set, &tree->object.oid); + + if (!(parent->object.flags & UNINTERESTING)) + continue; + tree->object.flags |= UNINTERESTING; + + if (revs->edge_hint && !(parent->object.flags & SHOWN)) { + parent->object.flags |= SHOWN; + show_edge(parent); + } + } +} + +void mark_edges_uninteresting(struct rev_info *revs, + show_edge_fn show_edge, + int sparse) { struct commit_list *list; int i; - for (list = revs->commits; list; list = list->next) { - struct commit *commit = list->item; + if (sparse) { + struct oidset set; + oidset_init(&set, 16); - if (commit->object.flags & UNINTERESTING) { - mark_tree_uninteresting(revs->repo, - get_commit_tree(commit)); - if (revs->edge_hint_aggressive && !(commit->object.flags & SHOWN)) { - commit->object.flags |= SHOWN; - show_edge(commit); + for (list = revs->commits; list; list = list->next) { + struct commit *commit = list->item; + struct tree *tree = get_commit_tree(commit); + + if (commit->object.flags & UNINTERESTING) + tree->object.flags |= UNINTERESTING; + + oidset_insert(&set, &tree->object.oid); + add_edge_parents(commit, revs, show_edge, &set); + } + + mark_trees_uninteresting_sparse(revs->repo, &set); + oidset_clear(&set); + } else { + for (list = revs->commits; list; list = list->next) { + struct commit *commit = list->item; + if (commit->object.flags & UNINTERESTING) { + mark_tree_uninteresting(revs->repo, + get_commit_tree(commit)); + if (revs->edge_hint_aggressive && !(commit->object.flags & SHOWN)) { + commit->object.flags |= SHOWN; + show_edge(commit); + } + continue; } - continue; + mark_edge_parents_uninteresting(commit, revs, show_edge); } - mark_edge_parents_uninteresting(commit, revs, show_edge); } + if (revs->edge_hint_aggressive) { for (i = 0; i < revs->cmdline.nr; i++) { struct object *obj = revs->cmdline.rev[i].item; diff --git a/list-objects.h b/list-objects.h index ad40762926..a952680e46 100644 --- a/list-objects.h +++ b/list-objects.h @@ -10,7 +10,9 @@ typedef void (*show_object_fn)(struct object *, const char *, void *); void traverse_commit_list(struct rev_info *, show_commit_fn, show_object_fn, void *); typedef void (*show_edge_fn)(struct commit *); -void mark_edges_uninteresting(struct rev_info *, show_edge_fn); +void mark_edges_uninteresting(struct rev_info *revs, + show_edge_fn show_edge, + int sparse); struct oidset; struct list_objects_filter_options; @@ -44,7 +44,7 @@ static int send_ref(const char *refname, const struct object_id *oid, if (ref_is_hidden(refname_nons, refname)) return 0; - if (!ref_match(&data->prefixes, refname)) + if (!ref_match(&data->prefixes, refname_nons)) return 0; strbuf_addf(&refline, "%s %s", oid_to_hex(oid), refname_nons); diff --git a/match-trees.c b/match-trees.c index 18ab825bef..ddc4d39845 100644 --- a/match-trees.c +++ b/match-trees.c @@ -3,7 +3,7 @@ #include "tree-walk.h" #include "object-store.h" -static int score_missing(unsigned mode, const char *path) +static int score_missing(unsigned mode) { int score; @@ -16,7 +16,7 @@ static int score_missing(unsigned mode, const char *path) return score; } -static int score_differs(unsigned mode1, unsigned mode2, const char *path) +static int score_differs(unsigned mode1, unsigned mode2) { int score; @@ -29,7 +29,7 @@ static int score_differs(unsigned mode1, unsigned mode2, const char *path) return score; } -static int score_matches(unsigned mode1, unsigned mode2, const char *path) +static int score_matches(unsigned mode1, unsigned mode2) { int score; @@ -98,24 +98,22 @@ static int score_trees(const struct object_id *hash1, const struct object_id *ha if (cmp < 0) { /* path1 does not appear in two */ - score += score_missing(one.entry.mode, one.entry.path); + score += score_missing(one.entry.mode); update_tree_entry(&one); } else if (cmp > 0) { /* path2 does not appear in one */ - score += score_missing(two.entry.mode, two.entry.path); + score += score_missing(two.entry.mode); update_tree_entry(&two); } else { /* path appears in both */ if (!oideq(&one.entry.oid, &two.entry.oid)) { /* they are different */ score += score_differs(one.entry.mode, - two.entry.mode, - one.entry.path); + two.entry.mode); } else { /* same subtree or blob */ score += score_matches(one.entry.mode, - two.entry.mode, - one.entry.path); + two.entry.mode); } update_tree_entry(&one); update_tree_entry(&two); diff --git a/merge-recursive.c b/merge-recursive.c index 59ba4b4a1a..4851825aeb 100644 --- a/merge-recursive.c +++ b/merge-recursive.c @@ -146,7 +146,8 @@ static int err(struct merge_options *o, const char *err, ...) return -1; } -static struct tree *shift_tree_object(struct tree *one, struct tree *two, +static struct tree *shift_tree_object(struct repository *repo, + struct tree *one, struct tree *two, const char *subtree_shift) { struct object_id shifted; @@ -159,12 +160,14 @@ static struct tree *shift_tree_object(struct tree *one, struct tree *two, } if (oideq(&two->object.oid, &shifted)) return two; - return lookup_tree(the_repository, &shifted); + return lookup_tree(repo, &shifted); } -static struct commit *make_virtual_commit(struct tree *tree, const char *comment) +static struct commit *make_virtual_commit(struct repository *repo, + struct tree *tree, + const char *comment) { - struct commit *commit = alloc_commit_node(the_repository); + struct commit *commit = alloc_commit_node(repo); set_merge_remote_desc(commit, comment, (struct object *)commit); commit->maybe_tree = tree; @@ -343,22 +346,24 @@ static int add_cacheinfo(struct merge_options *o, unsigned int mode, const struct object_id *oid, const char *path, int stage, int refresh, int options) { + struct index_state *istate = o->repo->index; struct cache_entry *ce; int ret; - ce = make_cache_entry(&the_index, mode, oid ? oid : &null_oid, path, stage, 0); + ce = make_cache_entry(istate, mode, oid ? oid : &null_oid, path, stage, 0); if (!ce) return err(o, _("add_cacheinfo failed for path '%s'; merge aborting."), path); - ret = add_cache_entry(ce, options); + ret = add_index_entry(istate, ce, options); if (refresh) { struct cache_entry *nce; - nce = refresh_cache_entry(&the_index, ce, CE_MATCH_REFRESH | CE_MATCH_IGNORE_MISSING); + nce = refresh_cache_entry(istate, ce, + CE_MATCH_REFRESH | CE_MATCH_IGNORE_MISSING); if (!nce) return err(o, _("add_cacheinfo failed to refresh for path '%s'; merge aborting."), path); if (nce != ce) - ret = add_cache_entry(nce, options); + ret = add_index_entry(istate, nce, options); } return ret; } @@ -386,7 +391,7 @@ static int unpack_trees_start(struct merge_options *o, o->unpack_opts.merge = 1; o->unpack_opts.head_idx = 2; o->unpack_opts.fn = threeway_merge; - o->unpack_opts.src_index = &the_index; + o->unpack_opts.src_index = o->repo->index; o->unpack_opts.dst_index = &tmp_index; o->unpack_opts.aggressive = !merge_detect_rename(o); setup_unpack_trees_porcelain(&o->unpack_opts, "merge"); @@ -396,16 +401,16 @@ static int unpack_trees_start(struct merge_options *o, init_tree_desc_from_tree(t+2, merge); rc = unpack_trees(3, t, &o->unpack_opts); - cache_tree_free(&active_cache_tree); + cache_tree_free(&o->repo->index->cache_tree); /* - * Update the_index to match the new results, AFTER saving a copy + * Update o->repo->index to match the new results, AFTER saving a copy * in o->orig_index. Update src_index to point to the saved copy. * (verify_uptodate() checks src_index, and the original index is * the one that had the necessary modification timestamps.) */ - o->orig_index = the_index; - the_index = tmp_index; + o->orig_index = *o->repo->index; + *o->repo->index = tmp_index; o->unpack_opts.src_index = &o->orig_index; return rc; @@ -420,12 +425,13 @@ static void unpack_trees_finish(struct merge_options *o) struct tree *write_tree_from_memory(struct merge_options *o) { struct tree *result = NULL; + struct index_state *istate = o->repo->index; - if (unmerged_cache()) { + if (unmerged_index(istate)) { int i; fprintf(stderr, "BUG: There are unmerged index entries:\n"); - for (i = 0; i < active_nr; i++) { - const struct cache_entry *ce = active_cache[i]; + for (i = 0; i < istate->cache_nr; i++) { + const struct cache_entry *ce = istate->cache[i]; if (ce_stage(ce)) fprintf(stderr, "BUG: %d %.*s\n", ce_stage(ce), (int)ce_namelen(ce), ce->name); @@ -433,16 +439,16 @@ struct tree *write_tree_from_memory(struct merge_options *o) BUG("unmerged index entries in merge-recursive.c"); } - if (!active_cache_tree) - active_cache_tree = cache_tree(); + if (!istate->cache_tree) + istate->cache_tree = cache_tree(); - if (!cache_tree_fully_valid(active_cache_tree) && - cache_tree_update(&the_index, 0) < 0) { + if (!cache_tree_fully_valid(istate->cache_tree) && + cache_tree_update(istate, 0) < 0) { err(o, _("error building trees")); return NULL; } - result = lookup_tree(the_repository, &active_cache_tree->oid); + result = lookup_tree(o->repo, &istate->cache_tree->oid); return result; } @@ -513,17 +519,17 @@ static struct stage_data *insert_stage_data(const char *path, * Create a dictionary mapping file names to stage_data objects. The * dictionary contains one entry for every path with a non-zero stage entry. */ -static struct string_list *get_unmerged(void) +static struct string_list *get_unmerged(struct index_state *istate) { struct string_list *unmerged = xcalloc(1, sizeof(struct string_list)); int i; unmerged->strdup_strings = 1; - for (i = 0; i < active_nr; i++) { + for (i = 0; i < istate->cache_nr; i++) { struct string_list_item *item; struct stage_data *e; - const struct cache_entry *ce = active_cache[i]; + const struct cache_entry *ce = istate->cache[i]; if (!ce_stage(ce)) continue; @@ -683,7 +689,7 @@ static int update_stages(struct merge_options *opt, const char *path, int clear = 1; int options = ADD_CACHE_OK_TO_ADD | ADD_CACHE_SKIP_DFCHECK; if (clear) - if (remove_file_from_cache(path)) + if (remove_file_from_index(opt->repo->index, path)) return -1; if (o) if (add_cacheinfo(opt, o->mode, &o->oid, path, 1, 0, options)) @@ -718,13 +724,14 @@ static int remove_file(struct merge_options *o, int clean, int update_working_directory = !o->call_depth && !no_wd; if (update_cache) { - if (remove_file_from_cache(path)) + if (remove_file_from_index(o->repo->index, path)) return -1; } if (update_working_directory) { if (ignore_case) { struct cache_entry *ce; - ce = cache_file_exists(path, strlen(path), ignore_case); + ce = index_file_exists(o->repo->index, path, strlen(path), + ignore_case); if (ce && ce_stage(ce) == 0 && strcmp(path, ce->name)) return 0; } @@ -774,7 +781,8 @@ static char *unique_path(struct merge_options *o, const char *path, const char * * check the working directory. If empty_ok is non-zero, also return * 0 in the case where the working-tree dir exists but is empty. */ -static int dir_in_way(const char *path, int check_working_copy, int empty_ok) +static int dir_in_way(struct index_state *istate, const char *path, + int check_working_copy, int empty_ok) { int pos; struct strbuf dirpath = STRBUF_INIT; @@ -783,12 +791,12 @@ static int dir_in_way(const char *path, int check_working_copy, int empty_ok) strbuf_addstr(&dirpath, path); strbuf_addch(&dirpath, '/'); - pos = cache_name_pos(dirpath.buf, dirpath.len); + pos = index_name_pos(istate, dirpath.buf, dirpath.len); if (pos < 0) pos = -1 - pos; - if (pos < active_nr && - !strncmp(dirpath.buf, active_cache[pos]->name, dirpath.len)) { + if (pos < istate->cache_nr && + !strncmp(dirpath.buf, istate->cache[pos]->name, dirpath.len)) { strbuf_release(&dirpath); return 1; } @@ -831,8 +839,10 @@ static int was_tracked(struct merge_options *o, const char *path) return 0; } -static int would_lose_untracked(const char *path) +static int would_lose_untracked(struct merge_options *o, const char *path) { + struct index_state *istate = o->repo->index; + /* * This may look like it can be simplified to: * return !was_tracked(o, path) && file_exists(path) @@ -850,19 +860,19 @@ static int would_lose_untracked(const char *path) * update_file()/would_lose_untracked(); see every comment in this * file which mentions "update_stages". */ - int pos = cache_name_pos(path, strlen(path)); + int pos = index_name_pos(istate, path, strlen(path)); if (pos < 0) pos = -1 - pos; - while (pos < active_nr && - !strcmp(path, active_cache[pos]->name)) { + while (pos < istate->cache_nr && + !strcmp(path, istate->cache[pos]->name)) { /* * If stage #0, it is definitely tracked. * If it has stage #2 then it was tracked * before this merge started. All other * cases the path was not tracked. */ - switch (ce_stage(active_cache[pos])) { + switch (ce_stage(istate->cache[pos])) { case 0: case 2: return 0; @@ -922,7 +932,7 @@ static int make_room_for_path(struct merge_options *o, const char *path) * Do not unlink a file in the work tree if we are not * tracking it. */ - if (would_lose_untracked(path)) + if (would_lose_untracked(o, path)) return err(o, _("refusing to lose untracked file at '%s'"), path); @@ -972,7 +982,7 @@ static int update_file_flags(struct merge_options *o, } if (S_ISREG(mode)) { struct strbuf strbuf = STRBUF_INIT; - if (convert_to_working_tree(&the_index, path, buf, size, &strbuf)) { + if (convert_to_working_tree(o->repo->index, path, buf, size, &strbuf)) { free(buf); size = strbuf.len; buf = strbuf_detach(&strbuf, NULL); @@ -1092,7 +1102,7 @@ static int merge_3way(struct merge_options *o, merge_status = ll_merge(result_buf, a->path, &orig, base_name, &src1, name1, &src2, name2, - &the_index, &ll_opts); + o->repo->index, &ll_opts); free(base_name); free(name1); @@ -1103,7 +1113,8 @@ static int merge_3way(struct merge_options *o, return merge_status; } -static int find_first_merges(struct object_array *result, const char *path, +static int find_first_merges(struct repository *repo, + struct object_array *result, const char *path, struct commit *a, struct commit *b) { int i, j; @@ -1123,7 +1134,7 @@ static int find_first_merges(struct object_array *result, const char *path, /* get all revisions that merge commit a */ xsnprintf(merged_revision, sizeof(merged_revision), "^%s", oid_to_hex(&a->object.oid)); - repo_init_revisions(the_repository, &revs, NULL); + repo_init_revisions(repo, &revs, NULL); rev_opts.submodule = path; /* FIXME: can't handle linked worktrees in submodules yet */ revs.single_worktree = path != NULL; @@ -1201,9 +1212,9 @@ static int merge_submodule(struct merge_options *o, return 0; } - if (!(commit_base = lookup_commit_reference(the_repository, base)) || - !(commit_a = lookup_commit_reference(the_repository, a)) || - !(commit_b = lookup_commit_reference(the_repository, b))) { + if (!(commit_base = lookup_commit_reference(o->repo, base)) || + !(commit_a = lookup_commit_reference(o->repo, a)) || + !(commit_b = lookup_commit_reference(o->repo, b))) { output(o, 1, _("Failed to merge submodule %s (commits not present)"), path); return 0; } @@ -1253,7 +1264,8 @@ static int merge_submodule(struct merge_options *o, return 0; /* find commit which merges them */ - parent_count = find_first_merges(&merges, path, commit_a, commit_b); + parent_count = find_first_merges(o->repo, &merges, path, + commit_a, commit_b); switch (parent_count) { case 0: output(o, 1, _("Failed to merge submodule %s (merge following commits not found)"), path); @@ -1401,7 +1413,7 @@ static int handle_rename_via_dir(struct merge_options *o, */ const struct diff_filespec *dest = pair->two; - if (!o->call_depth && would_lose_untracked(dest->path)) { + if (!o->call_depth && would_lose_untracked(o, dest->path)) { char *alt_path = unique_path(o, dest->path, rename_branch); output(o, 1, _("Error: Refusing to lose untracked file at %s; " @@ -1439,8 +1451,8 @@ static int handle_change_delete(struct merge_options *o, const char *update_path = path; int ret = 0; - if (dir_in_way(path, !o->call_depth, 0) || - (!o->call_depth && would_lose_untracked(path))) { + if (dir_in_way(o->repo->index, path, !o->call_depth, 0) || + (!o->call_depth && would_lose_untracked(o, path))) { update_path = alt_path = unique_path(o, path, change_branch); } @@ -1450,7 +1462,7 @@ static int handle_change_delete(struct merge_options *o, * correct; since there is no true "middle point" between * them, simply reuse the base version for virtual merge base. */ - ret = remove_file_from_cache(path); + ret = remove_file_from_index(o->repo->index, path); if (!ret) ret = update_file(o, 0, o_oid, o_mode, update_path); } else { @@ -1526,7 +1538,7 @@ static int handle_rename_delete(struct merge_options *o, return -1; if (o->call_depth) - return remove_file_from_cache(dest->path); + return remove_file_from_index(o->repo->index, dest->path); else return update_stages(o, dest->path, NULL, rename_branch == o->branch1 ? dest : NULL, @@ -1607,10 +1619,10 @@ static int handle_file_collision(struct merge_options *o, /* Remove rename sources if rename/add or rename/rename(2to1) */ if (prev_path1) remove_file(o, 1, prev_path1, - o->call_depth || would_lose_untracked(prev_path1)); + o->call_depth || would_lose_untracked(o, prev_path1)); if (prev_path2) remove_file(o, 1, prev_path2, - o->call_depth || would_lose_untracked(prev_path2)); + o->call_depth || would_lose_untracked(o, prev_path2)); /* * Remove the collision path, if it wouldn't cause dirty contents @@ -1621,7 +1633,7 @@ static int handle_file_collision(struct merge_options *o, output(o, 1, _("Refusing to lose dirty file at %s"), collide_path); update_path = alt_path = unique_path(o, collide_path, "merged"); - } else if (would_lose_untracked(collide_path)) { + } else if (would_lose_untracked(o, collide_path)) { /* * Only way we get here is if both renames were from * a directory rename AND user had an untracked file @@ -1717,12 +1729,12 @@ static char *find_path_for_conflict(struct merge_options *o, const char *branch2) { char *new_path = NULL; - if (dir_in_way(path, !o->call_depth, 0)) { + if (dir_in_way(o->repo->index, path, !o->call_depth, 0)) { new_path = unique_path(o, path, branch1); output(o, 1, _("%s is a directory in %s adding " "as %s instead"), path, branch2, new_path); - } else if (would_lose_untracked(path)) { + } else if (would_lose_untracked(o, path)) { new_path = unique_path(o, path, branch1); output(o, 1, _("Refusing to lose untracked file" " at %s; adding as %s instead"), @@ -1783,14 +1795,14 @@ static int handle_rename_rename_1to2(struct merge_options *o, return -1; } else - remove_file_from_cache(a->path); + remove_file_from_index(o->repo->index, a->path); add = filespec_from_entry(&other, ci->dst_entry2, 3 ^ 1); if (add) { if (update_file(o, 0, &add->oid, add->mode, b->path)) return -1; } else - remove_file_from_cache(b->path); + remove_file_from_index(o->repo->index, b->path); } else { /* * For each destination path, we need to see if there is a @@ -1887,7 +1899,7 @@ static struct diff_queue_struct *get_diffpairs(struct merge_options *o, struct diff_queue_struct *ret; struct diff_options opts; - repo_diff_setup(the_repository, &opts); + repo_diff_setup(o->repo, &opts); opts.flags.recursive = 1; opts.flags.rename_empty = 0; opts.detect_rename = merge_detect_rename(o); @@ -3042,8 +3054,8 @@ static int blob_unchanged(struct merge_options *opt, * performed. Comparison can be skipped if both files are * unchanged since their sha1s have already been compared. */ - if (renormalize_buffer(&the_index, path, o.buf, o.len, &o) | - renormalize_buffer(&the_index, path, a.buf, a.len, &a)) + if (renormalize_buffer(opt->repo->index, path, o.buf, o.len, &o) | + renormalize_buffer(opt->repo->index, path, a.buf, a.len, &a)) ret = (o.len == a.len && !memcmp(o.buf, a.buf, o.len)); error_return: @@ -3124,7 +3136,7 @@ static int handle_content_merge(struct merge_options *o, a.path = (char *)path1; b.path = (char *)path2; - if (dir_in_way(path, !o->call_depth, + if (dir_in_way(o->repo->index, path, !o->call_depth, S_ISGITLINK(pair1->two->mode))) df_conflict_remains = 1; } @@ -3158,8 +3170,8 @@ static int handle_content_merge(struct merge_options *o, pos = index_name_pos(&o->orig_index, path, strlen(path)); ce = o->orig_index.cache[pos]; if (ce_skip_worktree(ce)) { - pos = index_name_pos(&the_index, path, strlen(path)); - ce = the_index.cache[pos]; + pos = index_name_pos(o->repo->index, path, strlen(path)); + ce = o->repo->index->cache[pos]; ce->ce_flags |= CE_SKIP_WORKTREE; } return mfi.clean; @@ -3178,7 +3190,7 @@ static int handle_content_merge(struct merge_options *o, if (df_conflict_remains || is_dirty) { char *new_path; if (o->call_depth) { - remove_file_from_cache(path); + remove_file_from_index(o->repo->index, path); } else { if (!mfi.clean) { if (update_stages(o, path, &one, &a, &b)) @@ -3338,7 +3350,7 @@ static int process_entry(struct merge_options *o, oid = b_oid; conf = _("directory/file"); } - if (dir_in_way(path, + if (dir_in_way(o->repo->index, path, !o->call_depth && !S_ISGITLINK(a_mode), 0)) { char *new_path = unique_path(o, path, add_branch); @@ -3349,7 +3361,7 @@ static int process_entry(struct merge_options *o, if (update_file(o, 0, oid, mode, new_path)) clean_merge = -1; else if (o->call_depth) - remove_file_from_cache(path); + remove_file_from_index(o->repo->index, path); free(new_path); } else { output(o, 2, _("Adding %s"), path); @@ -3397,18 +3409,19 @@ int merge_trees(struct merge_options *o, struct tree *common, struct tree **result) { + struct index_state *istate = o->repo->index; int code, clean; struct strbuf sb = STRBUF_INIT; - if (!o->call_depth && index_has_changes(&the_index, head, &sb)) { + if (!o->call_depth && repo_index_has_changes(o->repo, head, &sb)) { err(o, _("Your local changes to the following files would be overwritten by merge:\n %s"), sb.buf); return -1; } if (o->subtree_shift) { - merge = shift_tree_object(head, merge, o->subtree_shift); - common = shift_tree_object(head, common, o->subtree_shift); + merge = shift_tree_object(o->repo, head, merge, o->subtree_shift); + common = shift_tree_object(o->repo, head, common, o->subtree_shift); } if (oid_eq(&common->object.oid, &merge->object.oid)) { @@ -3428,7 +3441,7 @@ int merge_trees(struct merge_options *o, return -1; } - if (unmerged_cache()) { + if (unmerged_index(istate)) { struct string_list *entries; struct rename_info re_info; int i; @@ -3443,7 +3456,7 @@ int merge_trees(struct merge_options *o, get_files_dirs(o, head); get_files_dirs(o, merge); - entries = get_unmerged(); + entries = get_unmerged(o->repo->index); clean = detect_and_process_renames(o, common, head, merge, entries, &re_info); record_df_conflict_files(o, entries); @@ -3544,8 +3557,8 @@ int merge_recursive(struct merge_options *o, /* if there is no common ancestor, use an empty tree */ struct tree *tree; - tree = lookup_tree(the_repository, the_repository->hash_algo->empty_tree); - merged_common_ancestors = make_virtual_commit(tree, "ancestor"); + tree = lookup_tree(o->repo, o->repo->hash_algo->empty_tree); + merged_common_ancestors = make_virtual_commit(o->repo, tree, "ancestor"); } for (iter = ca; iter; iter = iter->next) { @@ -3559,7 +3572,7 @@ int merge_recursive(struct merge_options *o, * overwritten it: the committed "conflicts" were * already resolved. */ - discard_cache(); + discard_index(o->repo->index); saved_b1 = o->branch1; saved_b2 = o->branch2; o->branch1 = "Temporary merge branch 1"; @@ -3575,9 +3588,9 @@ int merge_recursive(struct merge_options *o, return err(o, _("merge returned no commit")); } - discard_cache(); + discard_index(o->repo->index); if (!o->call_depth) - read_cache(); + repo_read_index(o->repo); o->ancestor = "merged common ancestors"; clean = merge_trees(o, get_commit_tree(h1), get_commit_tree(h2), @@ -3589,7 +3602,7 @@ int merge_recursive(struct merge_options *o, } if (o->call_depth) { - *result = make_virtual_commit(mrtree, "merged tree"); + *result = make_virtual_commit(o->repo, mrtree, "merged tree"); commit_list_insert(h1, &(*result)->parents); commit_list_insert(h2, &(*result)->parents->next); } @@ -3602,17 +3615,17 @@ int merge_recursive(struct merge_options *o, return clean; } -static struct commit *get_ref(const struct object_id *oid, const char *name) +static struct commit *get_ref(struct repository *repo, const struct object_id *oid, + const char *name) { struct object *object; - object = deref_tag(the_repository, parse_object(the_repository, oid), - name, - strlen(name)); + object = deref_tag(repo, parse_object(repo, oid), + name, strlen(name)); if (!object) return NULL; if (object->type == OBJ_TREE) - return make_virtual_commit((struct tree*)object, name); + return make_virtual_commit(repo, (struct tree*)object, name); if (object->type != OBJ_COMMIT) return NULL; if (parse_commit((struct commit *)object)) @@ -3629,22 +3642,22 @@ int merge_recursive_generic(struct merge_options *o, { int clean; struct lock_file lock = LOCK_INIT; - struct commit *head_commit = get_ref(head, o->branch1); - struct commit *next_commit = get_ref(merge, o->branch2); + struct commit *head_commit = get_ref(o->repo, head, o->branch1); + struct commit *next_commit = get_ref(o->repo, merge, o->branch2); struct commit_list *ca = NULL; if (base_list) { int i; for (i = 0; i < num_base_list; ++i) { struct commit *base; - if (!(base = get_ref(base_list[i], oid_to_hex(base_list[i])))) + if (!(base = get_ref(o->repo, base_list[i], oid_to_hex(base_list[i])))) return err(o, _("Could not parse object '%s'"), oid_to_hex(base_list[i])); commit_list_insert(base, &ca); } } - hold_locked_index(&lock, LOCK_DIE_ON_ERROR); + repo_hold_locked_index(o->repo, &lock, LOCK_DIE_ON_ERROR); clean = merge_recursive(o, head_commit, next_commit, ca, result); if (clean < 0) { @@ -3652,7 +3665,7 @@ int merge_recursive_generic(struct merge_options *o, return clean; } - if (write_locked_index(&the_index, &lock, + if (write_locked_index(o->repo->index, &lock, COMMIT_LOCK | SKIP_IF_UNCHANGED)) return err(o, _("Unable to write index.")); @@ -3676,10 +3689,12 @@ static void merge_recursive_config(struct merge_options *o) git_config(git_xmerge_config, NULL); } -void init_merge_options(struct merge_options *o) +void init_merge_options(struct merge_options *o, + struct repository *repo) { const char *merge_verbosity; memset(o, 0, sizeof(struct merge_options)); + o->repo = repo; o->verbosity = 2; o->buffer_output = 1; o->diff_rename_limit = -1; diff --git a/merge-recursive.h b/merge-recursive.h index e6a0828eca..c2b7bb65c6 100644 --- a/merge-recursive.h +++ b/merge-recursive.h @@ -6,6 +6,8 @@ struct commit; +struct repository; + struct merge_options { const char *ancestor; const char *branch1; @@ -34,6 +36,7 @@ struct merge_options { struct string_list df_conflict_file_set; struct unpack_trees_options unpack_opts; struct index_state orig_index; + struct repository *repo; }; /* @@ -92,7 +95,8 @@ int merge_recursive_generic(struct merge_options *o, const struct object_id **ca, struct commit **result); -void init_merge_options(struct merge_options *o); +void init_merge_options(struct merge_options *o, + struct repository *repo); struct tree *write_tree_from_memory(struct merge_options *o); int parse_merge_opt(struct merge_options *out, const char *s); @@ -37,7 +37,7 @@ int try_merge_command(struct repository *r, argv_array_clear(&args); discard_index(r->index); - if (read_index(r->index) < 0) + if (repo_read_index(r) < 0) die(_("failed to read the cache")); resolve_undo_clear_index(r->index); @@ -58,7 +58,7 @@ int checkout_fast_forward(struct repository *r, refresh_index(r->index, REFRESH_QUIET, NULL, NULL, NULL); - if (hold_locked_index(&lock_file, LOCK_REPORT_ON_ERROR) < 0) + if (repo_hold_locked_index(r, &lock_file, LOCK_REPORT_ON_ERROR) < 0) return -1; memset(&trees, 0, sizeof(trees)); diff --git a/name-hash.c b/name-hash.c index 623ca6923a..b4861bc7b0 100644 --- a/name-hash.c +++ b/name-hash.c @@ -5,7 +5,6 @@ * * Copyright (C) 2008 Linus Torvalds */ -#define NO_THE_INDEX_COMPATIBILITY_MACROS #include "cache.h" #include "thread-utils.h" diff --git a/notes-merge.c b/notes-merge.c index 72688d301b..280aa8e6c1 100644 --- a/notes-merge.c +++ b/notes-merge.c @@ -649,7 +649,7 @@ int notes_merge(struct notes_merge_options *o, struct commit_list *parents = NULL; commit_list_insert(remote, &parents); /* LIFO order */ commit_list_insert(local, &parents); - create_notes_commit(local_tree, parents, o->commit_msg.buf, + create_notes_commit(o->repo, local_tree, parents, o->commit_msg.buf, o->commit_msg.len, result_oid); } @@ -724,7 +724,7 @@ int notes_merge_commit(struct notes_merge_options *o, strbuf_setlen(&path, baselen); } - create_notes_commit(partial_tree, partial_commit->parents, msg, + create_notes_commit(o->repo, partial_tree, partial_commit->parents, msg, strlen(msg), result_oid); unuse_commit_buffer(partial_commit, buffer); if (o->verbosity >= 4) diff --git a/notes-utils.c b/notes-utils.c index 14ea03178e..a819410698 100644 --- a/notes-utils.c +++ b/notes-utils.c @@ -5,7 +5,9 @@ #include "notes-utils.h" #include "repository.h" -void create_notes_commit(struct notes_tree *t, struct commit_list *parents, +void create_notes_commit(struct repository *r, + struct notes_tree *t, + struct commit_list *parents, const char *msg, size_t msg_len, struct object_id *result_oid) { @@ -20,8 +22,7 @@ void create_notes_commit(struct notes_tree *t, struct commit_list *parents, /* Deduce parent commit from t->ref */ struct object_id parent_oid; if (!read_ref(t->ref, &parent_oid)) { - struct commit *parent = lookup_commit(the_repository, - &parent_oid); + struct commit *parent = lookup_commit(r, &parent_oid); if (parse_commit(parent)) die("Failed to find/parse commit %s", t->ref); commit_list_insert(parent, &parents); @@ -34,7 +35,7 @@ void create_notes_commit(struct notes_tree *t, struct commit_list *parents, die("Failed to commit notes tree to database"); } -void commit_notes(struct notes_tree *t, const char *msg) +void commit_notes(struct repository *r, struct notes_tree *t, const char *msg) { struct strbuf buf = STRBUF_INIT; struct object_id commit_oid; @@ -50,7 +51,7 @@ void commit_notes(struct notes_tree *t, const char *msg) strbuf_addstr(&buf, msg); strbuf_complete_line(&buf); - create_notes_commit(t, NULL, buf.buf, buf.len, &commit_oid); + create_notes_commit(r, t, NULL, buf.buf, buf.len, &commit_oid); strbuf_insert(&buf, 0, "notes: ", 7); /* commit message starts at index 7 */ update_ref(buf.buf, t->update_ref, &commit_oid, NULL, 0, UPDATE_REFS_DIE_ON_ERR); @@ -171,11 +172,13 @@ int copy_note_for_rewrite(struct notes_rewrite_cfg *c, return ret; } -void finish_copy_notes_for_rewrite(struct notes_rewrite_cfg *c, const char *msg) +void finish_copy_notes_for_rewrite(struct repository *r, + struct notes_rewrite_cfg *c, + const char *msg) { int i; for (i = 0; c->trees[i]; i++) { - commit_notes(c->trees[i], msg); + commit_notes(r, c->trees[i], msg); free_notes(c->trees[i]); } free(c->trees); diff --git a/notes-utils.h b/notes-utils.h index 5408306528..d9b3c09eaf 100644 --- a/notes-utils.h +++ b/notes-utils.h @@ -5,6 +5,7 @@ struct commit_list; struct object_id; +struct repository; /* * Create new notes commit from the given notes tree @@ -17,11 +18,13 @@ struct object_id; * * The resulting commit SHA1 is stored in result_sha1. */ -void create_notes_commit(struct notes_tree *t, struct commit_list *parents, +void create_notes_commit(struct repository *r, + struct notes_tree *t, + struct commit_list *parents, const char *msg, size_t msg_len, struct object_id *result_oid); -void commit_notes(struct notes_tree *t, const char *msg); +void commit_notes(struct repository *r, struct notes_tree *t, const char *msg); enum notes_merge_strategy { NOTES_MERGE_RESOLVE_MANUAL = 0, @@ -45,6 +48,8 @@ int parse_notes_merge_strategy(const char *v, enum notes_merge_strategy *s); struct notes_rewrite_cfg *init_copy_notes_for_rewrite(const char *cmd); int copy_note_for_rewrite(struct notes_rewrite_cfg *c, const struct object_id *from_obj, const struct object_id *to_obj); -void finish_copy_notes_for_rewrite(struct notes_rewrite_cfg *c, const char *msg); +void finish_copy_notes_for_rewrite(struct repository *r, + struct notes_rewrite_cfg *c, + const char *msg); #endif diff --git a/object-store.h b/object-store.h index e16aa38cae..14fc935bd1 100644 --- a/object-store.h +++ b/object-store.h @@ -154,19 +154,28 @@ void raw_object_store_clear(struct raw_object_store *o); /* * Put in `buf` the name of the file in the local object database that - * would be used to store a loose object with the specified sha1. + * would be used to store a loose object with the specified oid. */ -const char *loose_object_path(struct repository *r, struct strbuf *buf, const unsigned char *sha1); +const char *loose_object_path(struct repository *r, struct strbuf *buf, + const struct object_id *oid); -void *map_sha1_file(struct repository *r, const unsigned char *sha1, unsigned long *size); +void *map_loose_object(struct repository *r, const struct object_id *oid, + unsigned long *size); -extern void *read_object_file_extended(const struct object_id *oid, +extern void *read_object_file_extended(struct repository *r, + const struct object_id *oid, enum object_type *type, unsigned long *size, int lookup_replace); -static inline void *read_object_file(const struct object_id *oid, enum object_type *type, unsigned long *size) +static inline void *repo_read_object_file(struct repository *r, + const struct object_id *oid, + enum object_type *type, + unsigned long *size) { - return read_object_file_extended(oid, type, size, 1); + return read_object_file_extended(r, oid, type, size, 1); } +#ifndef NO_THE_REPOSITORY_COMPATIBILITY_MACROS +#define read_object_file(oid, type, size) repo_read_object_file(the_repository, oid, type, size) +#endif /* Read and unpack an object file into memory, write memory to an object file */ int oid_object_info(struct repository *r, const struct object_id *, unsigned long *); @@ -199,20 +208,19 @@ int read_loose_object(const char *path, unsigned long *size, void **contents); -/* - * Convenience for sha1_object_info_extended() with a NULL struct - * object_info. OBJECT_INFO_SKIP_CACHED is automatically set; pass - * nonzero flags to also set other flags. - */ -extern int has_sha1_file_with_flags(const unsigned char *sha1, int flags); -static inline int has_sha1_file(const unsigned char *sha1) -{ - return has_sha1_file_with_flags(sha1, 0); -} +#ifndef NO_THE_REPOSITORY_COMPATIBILITY_MACROS +#define has_sha1_file_with_flags(sha1, flags) repo_has_sha1_file_with_flags(the_repository, sha1, flags) +#define has_sha1_file(sha1) repo_has_sha1_file(the_repository, sha1) +#endif /* Same as the above, except for struct object_id. */ -extern int has_object_file(const struct object_id *oid); -extern int has_object_file_with_flags(const struct object_id *oid, int flags); +int repo_has_object_file(struct repository *r, const struct object_id *oid); +int repo_has_object_file_with_flags(struct repository *r, + const struct object_id *oid, int flags); +#ifndef NO_THE_REPOSITORY_COMPATIBILITY_MACROS +#define has_object_file(oid) repo_has_object_file(the_repository, oid) +#define has_object_file_with_flags(oid, flags) repo_has_object_file_with_flags(the_repository, oid, flags) +#endif /* * Return true iff an alternate object database has a loose object @@ -164,8 +164,9 @@ void *object_as_type(struct repository *r, struct object *obj, enum object_type return obj; else if (obj->type == OBJ_NONE) { if (type == OBJ_COMMIT) - ((struct commit *)obj)->index = alloc_commit_index(r); - obj->type = type; + init_commit_node(r, (struct commit *) obj); + else + obj->type = type; return obj; } else { @@ -259,22 +260,22 @@ struct object *parse_object(struct repository *r, const struct object_id *oid) if (obj && obj->parsed) return obj; - if ((obj && obj->type == OBJ_BLOB && has_object_file(oid)) || - (!obj && has_object_file(oid) && + if ((obj && obj->type == OBJ_BLOB && repo_has_object_file(r, oid)) || + (!obj && repo_has_object_file(r, oid) && oid_object_info(r, oid, NULL) == OBJ_BLOB)) { if (check_object_signature(repl, NULL, 0, NULL) < 0) { - error(_("sha1 mismatch %s"), oid_to_hex(oid)); + error(_("hash mismatch %s"), oid_to_hex(oid)); return NULL; } parse_blob_buffer(lookup_blob(r, oid), NULL, 0); return lookup_object(r, oid->hash); } - buffer = read_object_file(oid, &type, &size); + buffer = repo_read_object_file(r, oid, &type, &size); if (buffer) { if (check_object_signature(repl, buffer, size, type_name(type)) < 0) { free(buffer); - error(_("sha1 mismatch %s"), oid_to_hex(repl)); + error(_("hash mismatch %s"), oid_to_hex(repl)); return NULL; } @@ -540,7 +541,7 @@ void parsed_object_pool_clear(struct parsed_object_pool *o) if (obj->type == OBJ_TREE) free_tree_buffer((struct tree*)obj); else if (obj->type == OBJ_COMMIT) - release_commit_memory((struct commit*)obj); + release_commit_memory(o, (struct commit*)obj); else if (obj->type == OBJ_TAG) release_tag_memory((struct tag*)obj); } diff --git a/pack-objects.c b/pack-objects.c index 9c45842df3..e7cd337bee 100644 --- a/pack-objects.c +++ b/pack-objects.c @@ -150,9 +150,7 @@ void prepare_packing_data(struct repository *r, struct packing_data *pdata) 1U << OE_SIZE_BITS); pdata->oe_delta_size_limit = git_env_ulong("GIT_TEST_OE_DELTA_SIZE", 1UL << OE_DELTA_SIZE_BITS); -#ifndef NO_PTHREADS - pthread_mutex_init(&pdata->lock, NULL); -#endif + init_recursive_mutex(&pdata->odb_lock); } struct object_entry *packlist_alloc(struct packing_data *pdata, diff --git a/pack-objects.h b/pack-objects.h index 3cd8d1f00a..6bfacc7d2c 100644 --- a/pack-objects.h +++ b/pack-objects.h @@ -148,7 +148,11 @@ struct packing_data { struct packed_git **in_pack_by_idx; struct packed_git **in_pack; - pthread_mutex_t lock; + /* + * During packing with multiple threads, protect the in-core + * object database from concurrent accesses. + */ + pthread_mutex_t odb_lock; /* * This list contains entries for bases which we know the other side @@ -168,13 +172,14 @@ struct packing_data { void prepare_packing_data(struct repository *r, struct packing_data *pdata); +/* Protect access to object database */ static inline void packing_data_lock(struct packing_data *pdata) { - pthread_mutex_lock(&pdata->lock); + pthread_mutex_lock(&pdata->odb_lock); } static inline void packing_data_unlock(struct packing_data *pdata) { - pthread_mutex_unlock(&pdata->lock); + pthread_mutex_unlock(&pdata->odb_lock); } struct object_entry *packlist_alloc(struct packing_data *pdata, diff --git a/pack-revindex.c b/pack-revindex.c index 3c58784a5f..50891f77a2 100644 --- a/pack-revindex.c +++ b/pack-revindex.c @@ -119,7 +119,7 @@ static void sort_revindex(struct revindex_entry *entries, unsigned n, off_t max) */ static void create_pack_revindex(struct packed_git *p) { - unsigned num_ent = p->num_objects; + const unsigned num_ent = p->num_objects; unsigned i; const char *index = p->index_data; const unsigned hashsz = the_hash_algo->rawsz; @@ -132,7 +132,7 @@ static void create_pack_revindex(struct packed_git *p) (uint32_t *)(index + 8 + p->num_objects * (hashsz + 4)); const uint32_t *off_64 = off_32 + p->num_objects; for (i = 0; i < num_ent; i++) { - uint32_t off = ntohl(*off_32++); + const uint32_t off = ntohl(*off_32++); if (!(off & 0x80000000)) { p->revindex[i].offset = off; } else { @@ -143,7 +143,7 @@ static void create_pack_revindex(struct packed_git *p) } } else { for (i = 0; i < num_ent; i++) { - uint32_t hl = *((uint32_t *)(index + (hashsz + 4) * i)); + const uint32_t hl = *((uint32_t *)(index + (hashsz + 4) * i)); p->revindex[i].offset = ntohl(hl); p->revindex[i].nr = i; } @@ -168,10 +168,10 @@ int find_revindex_position(struct packed_git *p, off_t ofs) { int lo = 0; int hi = p->num_objects + 1; - struct revindex_entry *revindex = p->revindex; + const struct revindex_entry *revindex = p->revindex; do { - unsigned mi = lo + (hi - lo) / 2; + const unsigned mi = lo + (hi - lo) / 2; if (revindex[mi].offset == ofs) { return mi; } else if (ofs < revindex[mi].offset) diff --git a/packfile.c b/packfile.c index ac6bb64bc3..16bcb75262 100644 --- a/packfile.c +++ b/packfile.c @@ -1148,12 +1148,13 @@ void mark_bad_packed_object(struct packed_git *p, const unsigned char *sha1) p->num_bad_objects++; } -const struct packed_git *has_packed_and_bad(const unsigned char *sha1) +const struct packed_git *has_packed_and_bad(struct repository *r, + const unsigned char *sha1) { struct packed_git *p; unsigned i; - for (p = the_repository->objects->packed_git; p; p = p->next) + for (p = r->objects->packed_git; p; p = p->next) for (i = 0; i < p->num_bad_objects; i++) if (hasheq(sha1, p->bad_object_sha1 + the_hash_algo->rawsz * i)) diff --git a/packfile.h b/packfile.h index 6c4037605d..d70c6d9afb 100644 --- a/packfile.h +++ b/packfile.h @@ -146,7 +146,7 @@ extern int packed_object_info(struct repository *r, off_t offset, struct object_info *); extern void mark_bad_packed_object(struct packed_git *p, const unsigned char *sha1); -extern const struct packed_git *has_packed_and_bad(const unsigned char *sha1); +extern const struct packed_git *has_packed_and_bad(struct repository *r, const unsigned char *sha1); /* * Iff a pack file in the given repository contains the object named by sha1, @@ -165,7 +165,7 @@ extern void report_linked_checkout_garbage(void); const char *git_path_##var(struct repository *r) \ { \ if (!r->cached_paths.var) \ - r->cached_paths.var = git_pathdup(filename); \ + r->cached_paths.var = repo_git_path(r, filename); \ return r->cached_paths.var; \ } diff --git a/pathspec.c b/pathspec.c index e85298f68c..12c2b322b3 100644 --- a/pathspec.c +++ b/pathspec.c @@ -1,4 +1,3 @@ -#define NO_THE_INDEX_COMPATIBILITY_MACROS #include "cache.h" #include "config.h" #include "dir.h" diff --git a/pkt-line.c b/pkt-line.c index 04d10bbd03..d4b71d3e82 100644 --- a/pkt-line.c +++ b/pkt-line.c @@ -129,12 +129,14 @@ static void set_packet_header(char *buf, const int size) #undef hex } -static void format_packet(struct strbuf *out, const char *fmt, va_list args) +static void format_packet(struct strbuf *out, const char *prefix, + const char *fmt, va_list args) { size_t orig_len, n; orig_len = out->len; strbuf_addstr(out, "0000"); + strbuf_addstr(out, prefix); strbuf_vaddf(out, fmt, args); n = out->len - orig_len; @@ -145,13 +147,13 @@ static void format_packet(struct strbuf *out, const char *fmt, va_list args) packet_trace(out->buf + orig_len + 4, n - 4, 1); } -static int packet_write_fmt_1(int fd, int gently, +static int packet_write_fmt_1(int fd, int gently, const char *prefix, const char *fmt, va_list args) { static struct strbuf buf = STRBUF_INIT; strbuf_reset(&buf); - format_packet(&buf, fmt, args); + format_packet(&buf, prefix, fmt, args); if (write_in_full(fd, buf.buf, buf.len) < 0) { if (!gently) { check_pipe(errno); @@ -168,7 +170,7 @@ void packet_write_fmt(int fd, const char *fmt, ...) va_list args; va_start(args, fmt); - packet_write_fmt_1(fd, 0, fmt, args); + packet_write_fmt_1(fd, 0, "", fmt, args); va_end(args); } @@ -178,7 +180,7 @@ int packet_write_fmt_gently(int fd, const char *fmt, ...) va_list args; va_start(args, fmt); - status = packet_write_fmt_1(fd, 1, fmt, args); + status = packet_write_fmt_1(fd, 1, "", fmt, args); va_end(args); return status; } @@ -211,7 +213,7 @@ void packet_buf_write(struct strbuf *buf, const char *fmt, ...) va_list args; va_start(args, fmt); - format_packet(buf, fmt, args); + format_packet(buf, "", fmt, args); va_end(args); } @@ -346,6 +348,10 @@ enum packet_read_status packet_read_with_status(int fd, char **src_buffer, return PACKET_READ_EOF; } + if ((options & PACKET_READ_DIE_ON_ERR_PACKET) && + starts_with(buffer, "ERR ")) + die(_("remote error: %s"), buffer + 4); + if ((options & PACKET_READ_CHOMP_NEWLINE) && len && buffer[len-1] == '\n') len--; @@ -433,6 +439,29 @@ ssize_t read_packetized_to_strbuf(int fd_in, struct strbuf *sb_out) return sb_out->len - orig_len; } +int recv_sideband(const char *me, int in_stream, int out) +{ + char buf[LARGE_PACKET_MAX + 1]; + int len; + struct strbuf scratch = STRBUF_INIT; + enum sideband_type sideband_type; + + while (1) { + len = packet_read(in_stream, NULL, NULL, buf, LARGE_PACKET_MAX, + 0); + if (!demultiplex_sideband(me, buf, len, 0, &scratch, + &sideband_type)) + continue; + switch (sideband_type) { + case SIDEBAND_PRIMARY: + write_or_die(out, buf + 1, len - 1); + break; + default: /* errors: message already written */ + return sideband_type; + } + } +} + /* Packet Reader Functions */ void packet_reader_init(struct packet_reader *reader, int fd, char *src_buffer, size_t src_len, @@ -446,25 +475,43 @@ void packet_reader_init(struct packet_reader *reader, int fd, reader->buffer = packet_buffer; reader->buffer_size = sizeof(packet_buffer); reader->options = options; + reader->me = "git"; } enum packet_read_status packet_reader_read(struct packet_reader *reader) { + struct strbuf scratch = STRBUF_INIT; + if (reader->line_peeked) { reader->line_peeked = 0; return reader->status; } - reader->status = packet_read_with_status(reader->fd, - &reader->src_buffer, - &reader->src_len, - reader->buffer, - reader->buffer_size, - &reader->pktlen, - reader->options); + /* + * Consume all progress packets until a primary payload packet is + * received + */ + while (1) { + enum sideband_type sideband_type; + reader->status = packet_read_with_status(reader->fd, + &reader->src_buffer, + &reader->src_len, + reader->buffer, + reader->buffer_size, + &reader->pktlen, + reader->options); + if (!reader->use_sideband) + break; + if (demultiplex_sideband(reader->me, reader->buffer, + reader->pktlen, 1, &scratch, + &sideband_type)) + break; + } if (reader->status == PACKET_READ_NORMAL) - reader->line = reader->buffer; + /* Skip the sideband designator if sideband is used */ + reader->line = reader->use_sideband ? + reader->buffer + 1 : reader->buffer; else reader->line = NULL; @@ -482,3 +529,39 @@ enum packet_read_status packet_reader_peek(struct packet_reader *reader) reader->line_peeked = 1; return reader->status; } + +void packet_writer_init(struct packet_writer *writer, int dest_fd) +{ + writer->dest_fd = dest_fd; + writer->use_sideband = 0; +} + +void packet_writer_write(struct packet_writer *writer, const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + packet_write_fmt_1(writer->dest_fd, 0, + writer->use_sideband ? "\001" : "", fmt, args); + va_end(args); +} + +void packet_writer_error(struct packet_writer *writer, const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + packet_write_fmt_1(writer->dest_fd, 0, + writer->use_sideband ? "\003" : "ERR ", fmt, args); + va_end(args); +} + +void packet_writer_delim(struct packet_writer *writer) +{ + packet_delim(writer->dest_fd); +} + +void packet_writer_flush(struct packet_writer *writer) +{ + packet_flush(writer->dest_fd); +} diff --git a/pkt-line.h b/pkt-line.h index 5b28d43472..ad9a4a2cd7 100644 --- a/pkt-line.h +++ b/pkt-line.h @@ -3,6 +3,7 @@ #include "git-compat-util.h" #include "strbuf.h" +#include "sideband.h" /* * Write a packetized stream, where each line is preceded by @@ -62,9 +63,13 @@ int write_packetized_from_buf(const char *src_in, size_t len, int fd_out); * * If options contains PACKET_READ_CHOMP_NEWLINE, a trailing newline (if * present) is removed from the buffer before returning. + * + * If options contains PACKET_READ_DIE_ON_ERR_PACKET, it dies when it sees an + * ERR packet. */ -#define PACKET_READ_GENTLE_ON_EOF (1u<<0) -#define PACKET_READ_CHOMP_NEWLINE (1u<<1) +#define PACKET_READ_GENTLE_ON_EOF (1u<<0) +#define PACKET_READ_CHOMP_NEWLINE (1u<<1) +#define PACKET_READ_DIE_ON_ERR_PACKET (1u<<2) int packet_read(int fd, char **src_buffer, size_t *src_len, char *buffer, unsigned size, int options); @@ -116,6 +121,21 @@ char *packet_read_line_buf(char **src_buf, size_t *src_len, int *size); */ ssize_t read_packetized_to_strbuf(int fd_in, struct strbuf *sb_out); +/* + * Receive multiplexed output stream over git native protocol. + * in_stream is the input stream from the remote, which carries data + * in pkt_line format with band designator. Demultiplex it into out + * and err and return error appropriately. Band #1 carries the + * primary payload. Things coming over band #2 is not necessarily + * error; they are usually informative message on the standard error + * stream, aka "verbose"). A message over band #3 is a signal that + * the remote died unexpectedly. A flush() concludes the stream. + * + * Returns SIDEBAND_FLUSH upon a normal conclusion, and SIDEBAND_PROTOCOL_ERROR + * or SIDEBAND_REMOTE_ERROR if an error occurred. + */ +int recv_sideband(const char *me, int in_stream, int out); + struct packet_reader { /* source file descriptor */ int fd; @@ -142,6 +162,9 @@ struct packet_reader { /* indicates if a line has been peeked */ int line_peeked; + + unsigned use_sideband : 1; + const char *me; }; /* @@ -179,4 +202,19 @@ extern enum packet_read_status packet_reader_peek(struct packet_reader *reader); #define LARGE_PACKET_DATA_MAX (LARGE_PACKET_MAX - 4) extern char packet_buffer[LARGE_PACKET_MAX]; +struct packet_writer { + int dest_fd; + unsigned use_sideband : 1; +}; + +void packet_writer_init(struct packet_writer *writer, int dest_fd); + +/* These functions die upon failure. */ +__attribute__((format (printf, 2, 3))) +void packet_writer_write(struct packet_writer *writer, const char *fmt, ...); +__attribute__((format (printf, 2, 3))) +void packet_writer_error(struct packet_writer *writer, const char *fmt, ...); +void packet_writer_delim(struct packet_writer *writer); +void packet_writer_flush(struct packet_writer *writer); + #endif diff --git a/preload-index.c b/preload-index.c index c7dc3f2b9f..e73600ee78 100644 --- a/preload-index.c +++ b/preload-index.c @@ -8,6 +8,7 @@ #include "config.h" #include "progress.h" #include "thread-utils.h" +#include "repository.h" /* * Mostly randomly chosen maximum thread counts: we @@ -146,12 +147,12 @@ void preload_index(struct index_state *index, trace_performance_leave("preload index"); } -int read_index_preload(struct index_state *index, - const struct pathspec *pathspec, - unsigned int refresh_flags) +int repo_read_index_preload(struct repository *repo, + const struct pathspec *pathspec, + unsigned int refresh_flags) { - int retval = read_index(index); + int retval = repo_read_index(repo); - preload_index(index, pathspec, refresh_flags); + preload_index(repo->index, pathspec, refresh_flags); return retval; } @@ -595,14 +595,15 @@ static char *replace_encoding_header(char *buf, const char *encoding) return strbuf_detach(&tmp, NULL); } -const char *logmsg_reencode(const struct commit *commit, - char **commit_encoding, - const char *output_encoding) +const char *repo_logmsg_reencode(struct repository *r, + const struct commit *commit, + char **commit_encoding, + const char *output_encoding) { static const char *utf8 = "UTF-8"; const char *use_encoding; char *encoding; - const char *msg = get_commit_buffer(commit, NULL); + const char *msg = repo_get_commit_buffer(r, commit, NULL); char *out; if (!output_encoding || !*output_encoding) { @@ -630,7 +631,7 @@ const char *logmsg_reencode(const struct commit *commit, * the cached copy from get_commit_buffer, we need to duplicate it * to avoid munging the cached copy. */ - if (msg == get_cached_commit_buffer(the_repository, commit, NULL)) + if (msg == get_cached_commit_buffer(r, commit, NULL)) out = xstrdup(msg); else out = (char *)msg; @@ -644,7 +645,7 @@ const char *logmsg_reencode(const struct commit *commit, */ out = reencode_string(msg, output_encoding, use_encoding); if (out) - unuse_commit_buffer(commit, msg); + repo_unuse_commit_buffer(r, commit, msg); } /* @@ -1527,9 +1528,10 @@ void userformat_find_requirements(const char *fmt, struct userformat_want *w) strbuf_release(&dummy); } -void format_commit_message(const struct commit *commit, - const char *format, struct strbuf *sb, - const struct pretty_print_context *pretty_ctx) +void repo_format_commit_message(struct repository *r, + const struct commit *commit, + const char *format, struct strbuf *sb, + const struct pretty_print_context *pretty_ctx) { struct format_commit_context context; const char *output_enc = pretty_ctx->output_encoding; @@ -1543,9 +1545,9 @@ void format_commit_message(const struct commit *commit, * convert a commit message to UTF-8 first * as far as 'format_commit_item' assumes it in UTF-8 */ - context.message = logmsg_reencode(commit, - &context.commit_encoding, - utf8); + context.message = repo_logmsg_reencode(r, commit, + &context.commit_encoding, + utf8); strbuf_expand(sb, format, format_commit_item, &context); rewrap_message_tail(sb, &context, 0, 0, 0); @@ -1569,7 +1571,7 @@ void format_commit_message(const struct commit *commit, } free(context.commit_encoding); - unuse_commit_buffer(commit, context.message); + repo_unuse_commit_buffer(r, commit, context.message); } static void pp_header(struct pretty_print_context *pp, @@ -104,9 +104,14 @@ void pp_remainder(struct pretty_print_context *pp, const char **msg_p, * Put the result to "sb". * Please use this function for custom formats. */ -void format_commit_message(const struct commit *commit, +void repo_format_commit_message(struct repository *r, + const struct commit *commit, const char *format, struct strbuf *sb, const struct pretty_print_context *context); +#ifndef NO_THE_REPOSITORY_COMPATIBILITY_MACROS +#define format_commit_message(c, f, s, con) \ + repo_format_commit_message(the_repository, c, f, s, con) +#endif /* * Parse given arguments from "arg", check it for correctness and diff --git a/read-cache.c b/read-cache.c index bfff271a3d..a66c87bc7a 100644 --- a/read-cache.c +++ b/read-cache.c @@ -3,7 +3,6 @@ * * Copyright (C) Linus Torvalds, 2005 */ -#define NO_THE_INDEX_COMPATIBILITY_MACROS #include "cache.h" #include "config.h" #include "diff.h" @@ -95,7 +94,6 @@ static struct mem_pool *find_mem_pool(struct index_state *istate) return *pool_ptr; } -struct index_state the_index; static const char *alternate_index_output; static void set_index_entry(struct index_state *istate, int nr, struct cache_entry *ce) @@ -703,10 +701,10 @@ int add_to_index(struct index_state *istate, const char *path, struct stat *st, int intent_only = flags & ADD_CACHE_INTENT; int add_option = (ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE| (intent_only ? ADD_CACHE_NEW_ONLY : 0)); - int newflags = HASH_WRITE_OBJECT; + int hash_flags = HASH_WRITE_OBJECT; - if (flags & HASH_RENORMALIZE) - newflags |= HASH_RENORMALIZE; + if (flags & ADD_CACHE_RENORMALIZE) + hash_flags |= HASH_RENORMALIZE; if (!S_ISREG(st_mode) && !S_ISLNK(st_mode) && !S_ISDIR(st_mode)) return error(_("%s: can only add regular files, symbolic links or git-directories"), path); @@ -762,7 +760,7 @@ int add_to_index(struct index_state *istate, const char *path, struct stat *st, } } if (!intent_only) { - if (index_path(istate, &ce->oid, path, st, newflags)) { + if (index_path(istate, &ce->oid, path, st, hash_flags)) { discard_cache_entry(ce); return error(_("unable to index file '%s'"), path); } @@ -1733,16 +1731,6 @@ static int read_index_extension(struct index_state *istate, return 0; } -int hold_locked_index(struct lock_file *lk, int lock_flags) -{ - return hold_lock_file_for_update(lk, get_index_file(), lock_flags); -} - -int read_index(struct index_state *istate) -{ - return read_index_from(istate, get_index_file(), get_git_dir()); -} - static struct cache_entry *create_from_disk(struct mem_pool *ce_mem_pool, unsigned int version, struct ondisk_cache_entry *ondisk, @@ -2375,22 +2363,20 @@ int unmerged_index(const struct index_state *istate) return 0; } -int index_has_changes(struct index_state *istate, - struct tree *tree, - struct strbuf *sb) +int repo_index_has_changes(struct repository *repo, + struct tree *tree, + struct strbuf *sb) { + struct index_state *istate = repo->index; struct object_id cmp; int i; - if (istate != &the_index) { - BUG("index_has_changes cannot yet accept istate != &the_index; do_diff_cache needs updating first."); - } if (tree) cmp = tree->object.oid; if (tree || !get_oid_tree("HEAD", &cmp)) { struct diff_options opt; - repo_diff_setup(the_repository, &opt); + repo_diff_setup(repo, &opt); opt.flags.exit_with_status = 1; if (!sb) opt.flags.quick = 1; @@ -2664,9 +2650,9 @@ out: return 0; } -static int verify_index(const struct index_state *istate) +static int repo_verify_index(struct repository *repo) { - return verify_index_from(istate, get_index_file()); + return verify_index_from(repo->index, repo->index_file); } static int has_racy_timestamp(struct index_state *istate) @@ -2682,11 +2668,13 @@ static int has_racy_timestamp(struct index_state *istate) return 0; } -void update_index_if_able(struct index_state *istate, struct lock_file *lockfile) +void repo_update_index_if_able(struct repository *repo, + struct lock_file *lockfile) { - if ((istate->cache_changed || has_racy_timestamp(istate)) && - verify_index(istate)) - write_locked_index(istate, lockfile, COMMIT_LOCK); + if ((repo->index->cache_changed || + has_racy_timestamp(repo->index)) && + repo_verify_index(repo)) + write_locked_index(repo->index, lockfile, COMMIT_LOCK); else rollback_lock_file(lockfile); } @@ -3223,12 +3211,14 @@ out: * state can call this and check its return value, instead of calling * read_cache(). */ -int read_index_unmerged(struct index_state *istate) +int repo_read_index_unmerged(struct repository *repo) { + struct index_state *istate; int i; int unmerged = 0; - read_index(istate); + repo_read_index(repo); + istate = repo->index; for (i = 0; i < istate->cache_nr; i++) { struct cache_entry *ce = istate->cache[i]; struct cache_entry *new_ce; @@ -188,7 +188,7 @@ int ref_resolves_to_object(const char *refname, { if (flags & REF_ISBROKEN) return 0; - if (!has_sha1_file(oid->hash)) { + if (!has_object_file(oid)) { error(_("%s does not point to a valid object!"), refname); return 0; } diff --git a/remote-curl.c b/remote-curl.c index 6ff9c66b90..2e04d53ac8 100644 --- a/remote-curl.c +++ b/remote-curl.c @@ -204,7 +204,8 @@ static struct ref *parse_git_refs(struct discovery *heads, int for_push) packet_reader_init(&reader, -1, heads->buf, heads->len, PACKET_READ_CHOMP_NEWLINE | - PACKET_READ_GENTLE_ON_EOF); + PACKET_READ_GENTLE_ON_EOF | + PACKET_READ_DIE_ON_ERR_PACKET); heads->version = discover_version(&reader); switch (heads->version) { @@ -408,28 +409,37 @@ static struct discovery *discover_refs(const char *service, int for_push) if (maybe_smart && (5 <= last->len && last->buf[4] == '#') && !strbuf_cmp(&exp, &type)) { - char *line; + struct packet_reader reader; + packet_reader_init(&reader, -1, last->buf, last->len, + PACKET_READ_CHOMP_NEWLINE | + PACKET_READ_DIE_ON_ERR_PACKET); /* * smart HTTP response; validate that the service * pkt-line matches our request. */ - line = packet_read_line_buf(&last->buf, &last->len, NULL); - if (!line) + if (packet_reader_read(&reader) != PACKET_READ_NORMAL) die("invalid server response; expected service, got flush packet"); strbuf_reset(&exp); strbuf_addf(&exp, "# service=%s", service); - if (strcmp(line, exp.buf)) - die("invalid server response; got '%s'", line); + if (strcmp(reader.line, exp.buf)) + die("invalid server response; got '%s'", reader.line); strbuf_release(&exp); /* The header can include additional metadata lines, up * until a packet flush marker. Ignore these now, but * in the future we might start to scan them. */ - while (packet_read_line_buf(&last->buf, &last->len, NULL)) - ; + for (;;) { + packet_reader_read(&reader); + if (reader.pktlen <= 0) { + break; + } + } + + last->buf = reader.src_buffer; + last->len = reader.src_len; last->proto_git = 1; } else if (maybe_smart && @@ -1194,7 +1204,8 @@ static void proxy_state_init(struct proxy_state *p, const char *service_name, p->headers = curl_slist_append(p->headers, buf.buf); packet_reader_init(&p->reader, p->in, NULL, 0, - PACKET_READ_GENTLE_ON_EOF); + PACKET_READ_GENTLE_ON_EOF | + PACKET_READ_DIE_ON_ERR_PACKET); strbuf_release(&buf); } diff --git a/repository.c b/repository.c index 20c509a922..65e6f8b8fd 100644 --- a/repository.c +++ b/repository.c @@ -1,13 +1,20 @@ +/* + * not really _using_ the compat macros, just make sure the_index + * declaration matches the definition in this file. + */ +#define USE_THE_INDEX_COMPATIBILITY_MACROS #include "cache.h" #include "repository.h" #include "object-store.h" #include "config.h" #include "object.h" +#include "lockfile.h" #include "submodule-config.h" /* The main repository */ static struct repository the_repo; struct repository *the_repository; +struct index_state the_index; void initialize_the_repository(void) { @@ -256,3 +263,12 @@ int repo_read_index(struct repository *repo) return read_index_from(repo->index, repo->index_file, repo->gitdir); } + +int repo_hold_locked_index(struct repository *repo, + struct lock_file *lf, + int flags) +{ + if (!repo->index_file) + BUG("the repo hasn't been setup"); + return hold_lock_file_for_update(lf, repo->index_file, flags); +} diff --git a/repository.h b/repository.h index 0e482b7d49..8981649d43 100644 --- a/repository.h +++ b/repository.h @@ -6,6 +6,8 @@ struct config_set; struct git_hash_algo; struct index_state; +struct lock_file; +struct pathspec; struct raw_object_store; struct submodule_cache; @@ -138,5 +140,19 @@ void repo_clear(struct repository *repo); * populated then the number of entries will simply be returned. */ int repo_read_index(struct repository *repo); +int repo_hold_locked_index(struct repository *repo, + struct lock_file *lf, + int flags); + +int repo_read_index_preload(struct repository *, + const struct pathspec *pathspec, + unsigned refresh_flags); +int repo_read_index_unmerged(struct repository *); +/* + * Opportunistically update the index but do not complain if we can't. + * The lockfile is always committed or rolled back. + */ +void repo_update_index_if_able(struct repository *, struct lock_file *); + #endif /* REPOSITORY_H */ @@ -561,7 +561,7 @@ static int find_conflict(struct repository *r, struct string_list *conflict) { int i; - if (read_index(r->index) < 0) + if (repo_read_index(r) < 0) return error(_("index file corrupt")); for (i = 0; i < r->index->cache_nr;) { @@ -595,7 +595,7 @@ int rerere_remaining(struct repository *r, struct string_list *merge_rr) if (setup_rerere(r, merge_rr, RERERE_READONLY)) return 0; - if (read_index(r->index) < 0) + if (repo_read_index(r) < 0) return error(_("index file corrupt")); for (i = 0; i < r->index->cache_nr;) { @@ -705,7 +705,7 @@ static void update_paths(struct repository *r, struct string_list *update) struct lock_file index_lock = LOCK_INIT; int i; - hold_locked_index(&index_lock, LOCK_DIE_ON_ERROR); + repo_hold_locked_index(r, &index_lock, LOCK_DIE_ON_ERROR); for (i = 0; i < update->nr; i++) { struct string_list_item *item = &update->items[i]; @@ -1107,7 +1107,7 @@ int rerere_forget(struct repository *r, struct pathspec *pathspec) struct string_list conflict = STRING_LIST_INIT_DUP; struct string_list merge_rr = STRING_LIST_INIT_DUP; - if (read_index(r->index) < 0) + if (repo_read_index(r) < 0) return error(_("index file corrupt")); fd = setup_rerere(r, &merge_rr, RERERE_NOAUTOUPDATE); diff --git a/revision.c b/revision.c index 119947ced0..162d511d46 100644 --- a/revision.c +++ b/revision.c @@ -27,6 +27,7 @@ #include "commit-reach.h" #include "commit-graph.h" #include "prio-queue.h" +#include "hashmap.h" volatile show_early_output_fn_t show_early_output; @@ -99,6 +100,148 @@ void mark_tree_uninteresting(struct repository *r, struct tree *tree) mark_tree_contents_uninteresting(r, tree); } +struct path_and_oids_entry { + struct hashmap_entry ent; + char *path; + struct oidset trees; +}; + +static int path_and_oids_cmp(const void *hashmap_cmp_fn_data, + const struct path_and_oids_entry *e1, + const struct path_and_oids_entry *e2, + const void *keydata) +{ + return strcmp(e1->path, e2->path); +} + +static void paths_and_oids_init(struct hashmap *map) +{ + hashmap_init(map, (hashmap_cmp_fn) path_and_oids_cmp, NULL, 0); +} + +static void paths_and_oids_clear(struct hashmap *map) +{ + struct hashmap_iter iter; + struct path_and_oids_entry *entry; + hashmap_iter_init(map, &iter); + + while ((entry = (struct path_and_oids_entry *)hashmap_iter_next(&iter))) { + oidset_clear(&entry->trees); + free(entry->path); + } + + hashmap_free(map, 1); +} + +static void paths_and_oids_insert(struct hashmap *map, + const char *path, + const struct object_id *oid) +{ + int hash = strhash(path); + struct path_and_oids_entry key; + struct path_and_oids_entry *entry; + + hashmap_entry_init(&key, hash); + + /* use a shallow copy for the lookup */ + key.path = (char *)path; + oidset_init(&key.trees, 0); + + if (!(entry = (struct path_and_oids_entry *)hashmap_get(map, &key, NULL))) { + entry = xcalloc(1, sizeof(struct path_and_oids_entry)); + hashmap_entry_init(entry, hash); + entry->path = xstrdup(key.path); + oidset_init(&entry->trees, 16); + hashmap_put(map, entry); + } + + oidset_insert(&entry->trees, oid); +} + +static void add_children_by_path(struct repository *r, + struct tree *tree, + struct hashmap *map) +{ + struct tree_desc desc; + struct name_entry entry; + + if (!tree) + return; + + if (parse_tree_gently(tree, 1) < 0) + return; + + init_tree_desc(&desc, tree->buffer, tree->size); + while (tree_entry(&desc, &entry)) { + switch (object_type(entry.mode)) { + case OBJ_TREE: + paths_and_oids_insert(map, entry.path, &entry.oid); + + if (tree->object.flags & UNINTERESTING) { + struct tree *child = lookup_tree(r, &entry.oid); + if (child) + child->object.flags |= UNINTERESTING; + } + break; + case OBJ_BLOB: + if (tree->object.flags & UNINTERESTING) { + struct blob *child = lookup_blob(r, &entry.oid); + if (child) + child->object.flags |= UNINTERESTING; + } + break; + default: + /* Subproject commit - not in this repository */ + break; + } + } + + free_tree_buffer(tree); +} + +void mark_trees_uninteresting_sparse(struct repository *r, + struct oidset *trees) +{ + unsigned has_interesting = 0, has_uninteresting = 0; + struct hashmap map; + struct hashmap_iter map_iter; + struct path_and_oids_entry *entry; + struct object_id *oid; + struct oidset_iter iter; + + oidset_iter_init(trees, &iter); + while ((!has_interesting || !has_uninteresting) && + (oid = oidset_iter_next(&iter))) { + struct tree *tree = lookup_tree(r, oid); + + if (!tree) + continue; + + if (tree->object.flags & UNINTERESTING) + has_uninteresting = 1; + else + has_interesting = 1; + } + + /* Do not walk unless we have both types of trees. */ + if (!has_uninteresting || !has_interesting) + return; + + paths_and_oids_init(&map); + + oidset_iter_init(trees, &iter); + while ((oid = oidset_iter_next(&iter))) { + struct tree *tree = lookup_tree(r, oid); + add_children_by_path(r, tree, &map); + } + + hashmap_iter_init(&map, &map_iter); + while ((entry = hashmap_iter_next(&map_iter))) + mark_trees_uninteresting_sparse(r, &entry->trees); + + paths_and_oids_clear(&map); +} + struct commit_stack { struct commit **items; size_t nr, alloc; @@ -213,7 +356,20 @@ static struct object *get_reference(struct rev_info *revs, const char *name, { struct object *object; - object = parse_object(revs->repo, oid); + /* + * If the repository has commit graphs, repo_parse_commit() avoids + * reading the object buffer, so use it whenever possible. + */ + if (oid_object_info(revs->repo, oid, NULL) == OBJ_COMMIT) { + struct commit *c = lookup_commit(revs->repo, oid); + if (!repo_parse_commit(revs->repo, c)) + object = (struct object *) c; + else + object = NULL; + } else { + object = parse_object(revs->repo, oid); + } + if (!object) { if (revs->ignore_missing) return object; @@ -1384,7 +1540,7 @@ void add_index_objects_to_pending(struct rev_info *revs, unsigned int flags) { struct worktree **worktrees, **p; - read_index(revs->repo->index); + repo_read_index(revs->repo); do_add_index_objects_to_pending(revs, revs->repo->index, flags); if (revs->single_worktree) @@ -1531,7 +1687,7 @@ static void prepare_show_merge(struct rev_info *revs) head->object.flags |= SYMMETRIC_LEFT; if (!istate->cache_nr) - read_index(istate); + repo_read_index(revs->repo); for (i = 0; i < istate->cache_nr; i++) { const struct cache_entry *ce = istate->cache[i]; if (!ce_stage(ce)) @@ -1590,8 +1746,8 @@ static int handle_dotdot_1(const char *arg, char *dotdot, if (!*b_name) b_name = "HEAD"; - if (get_oid_with_context(a_name, oc_flags, &a_oid, a_oc) || - get_oid_with_context(b_name, oc_flags, &b_oid, b_oc)) + if (get_oid_with_context(revs->repo, a_name, oc_flags, &a_oid, a_oc) || + get_oid_with_context(revs->repo, b_name, oc_flags, &b_oid, b_oc)) return -1; if (!cant_be_filename) { @@ -1725,7 +1881,7 @@ int handle_revision_arg(const char *arg_, struct rev_info *revs, int flags, unsi if (revarg_opt & REVARG_COMMITTISH) get_sha1_flags |= GET_OID_COMMITTISH; - if (get_oid_with_context(arg, get_sha1_flags, &oid, &oc)) + if (get_oid_with_context(revs->repo, arg, get_sha1_flags, &oid, &oc)) return revs->ignore_missing ? 0 : -1; if (!cant_be_filename) verify_non_filename(revs->prefix, arg); @@ -2458,7 +2614,7 @@ int setup_revisions(int argc, const char **argv, struct rev_info *revs, struct s struct object_id oid; struct object *object; struct object_context oc; - if (get_oid_with_context(revs->def, 0, &oid, &oc)) + if (get_oid_with_context(revs->repo, revs->def, 0, &oid, &oc)) diagnose_missing_default(revs->def); object = get_reference(revs, revs->def, &oid, 0); add_pending_object_with_mode(revs, object, revs->def, oc.mode); diff --git a/revision.h b/revision.h index 52e5a88ff5..d32d62abc6 100644 --- a/revision.h +++ b/revision.h @@ -67,6 +67,7 @@ struct rev_cmdline_info { #define REVISION_WALK_NO_WALK_SORTED 1 #define REVISION_WALK_NO_WALK_UNSORTED 2 +struct oidset; struct topo_walk_info; struct rev_info { @@ -327,6 +328,7 @@ void put_revision_mark(const struct rev_info *revs, void mark_parents_uninteresting(struct commit *commit); void mark_tree_uninteresting(struct repository *r, struct tree *tree); +void mark_trees_uninteresting_sparse(struct repository *r, struct oidset *trees); void show_object_with_name(FILE *, struct object *, const char *); diff --git a/send-pack.c b/send-pack.c index f692686770..6dc16c3211 100644 --- a/send-pack.c +++ b/send-pack.c @@ -40,7 +40,7 @@ int option_parse_push_signed(const struct option *opt, static void feed_object(const struct object_id *oid, FILE *fh, int negative) { - if (negative && !has_sha1_file(oid->hash)) + if (negative && !has_object_file(oid)) return; if (negative) @@ -135,38 +135,36 @@ static int pack_objects(int fd, struct ref *refs, struct oid_array *extra, struc return 0; } -static int receive_unpack_status(int in) +static int receive_unpack_status(struct packet_reader *reader) { - const char *line = packet_read_line(in, NULL); - if (!line) + if (packet_reader_read(reader) != PACKET_READ_NORMAL) return error(_("unexpected flush packet while reading remote unpack status")); - if (!skip_prefix(line, "unpack ", &line)) - return error(_("unable to parse remote unpack status: %s"), line); - if (strcmp(line, "ok")) - return error(_("remote unpack failed: %s"), line); + if (!skip_prefix(reader->line, "unpack ", &reader->line)) + return error(_("unable to parse remote unpack status: %s"), reader->line); + if (strcmp(reader->line, "ok")) + return error(_("remote unpack failed: %s"), reader->line); return 0; } -static int receive_status(int in, struct ref *refs) +static int receive_status(struct packet_reader *reader, struct ref *refs) { struct ref *hint; int ret; hint = NULL; - ret = receive_unpack_status(in); + ret = receive_unpack_status(reader); while (1) { - char *refname; + const char *refname; char *msg; - char *line = packet_read_line(in, NULL); - if (!line) + if (packet_reader_read(reader) != PACKET_READ_NORMAL) break; - if (!starts_with(line, "ok ") && !starts_with(line, "ng ")) { - error("invalid ref status from remote: %s", line); + if (!starts_with(reader->line, "ok ") && !starts_with(reader->line, "ng ")) { + error("invalid ref status from remote: %s", reader->line); ret = -1; break; } - refname = line + 3; + refname = reader->line + 3; msg = strchr(refname, ' '); if (msg) *msg++ = '\0'; @@ -187,7 +185,7 @@ static int receive_status(int in, struct ref *refs) continue; } - if (line[0] == 'o' && line[1] == 'k') + if (reader->line[0] == 'o' && reader->line[1] == 'k') hint->status = REF_STATUS_OK; else { hint->status = REF_STATUS_REMOTE_REJECT; @@ -390,6 +388,7 @@ int send_pack(struct send_pack_args *args, int ret; struct async demux; const char *push_cert_nonce = NULL; + struct packet_reader reader; /* Does the other end support the reporting? */ if (server_supports("report-status")) @@ -559,6 +558,10 @@ int send_pack(struct send_pack_args *args, in = demux.out; } + packet_reader_init(&reader, in, NULL, 0, + PACKET_READ_CHOMP_NEWLINE | + PACKET_READ_DIE_ON_ERR_PACKET); + if (need_pack_data && cmds_sent) { if (pack_objects(out, remote_refs, extra_have, args) < 0) { for (ref = remote_refs; ref; ref = ref->next) @@ -573,7 +576,7 @@ int send_pack(struct send_pack_args *args, * are failing, and just want the error() side effects. */ if (status_report) - receive_unpack_status(in); + receive_unpack_status(&reader); if (use_sideband) { close(demux.out); @@ -590,7 +593,7 @@ int send_pack(struct send_pack_args *args, packet_flush(out); if (status_report && cmds_sent) - ret = receive_status(in, remote_refs); + ret = receive_status(&reader, remote_refs); else ret = 0; if (args->stateless_rpc) diff --git a/sequencer.c b/sequencer.c index 213815dbfc..972402e8c0 100644 --- a/sequencer.c +++ b/sequencer.c @@ -150,6 +150,7 @@ static GIT_PATH_FUNC(rebase_path_refs_to_delete, "rebase-merge/refs-to-delete") static GIT_PATH_FUNC(rebase_path_gpg_sign_opt, "rebase-merge/gpg_sign_opt") static GIT_PATH_FUNC(rebase_path_orig_head, "rebase-merge/orig-head") static GIT_PATH_FUNC(rebase_path_verbose, "rebase-merge/verbose") +static GIT_PATH_FUNC(rebase_path_quiet, "rebase-merge/quiet") static GIT_PATH_FUNC(rebase_path_signoff, "rebase-merge/signoff") static GIT_PATH_FUNC(rebase_path_head_name, "rebase-merge/head-name") static GIT_PATH_FUNC(rebase_path_onto, "rebase-merge/onto") @@ -157,7 +158,6 @@ static GIT_PATH_FUNC(rebase_path_autostash, "rebase-merge/autostash") static GIT_PATH_FUNC(rebase_path_strategy, "rebase-merge/strategy") static GIT_PATH_FUNC(rebase_path_strategy_opts, "rebase-merge/strategy_opts") static GIT_PATH_FUNC(rebase_path_allow_rerere_autoupdate, "rebase-merge/allow_rerere_autoupdate") -static GIT_PATH_FUNC(rebase_path_quiet, "rebase-merge/quiet") static GIT_PATH_FUNC(rebase_path_reschedule_failed_exec, "rebase-merge/reschedule-failed-exec") static int git_sequencer_config(const char *k, const char *v, void *cb) @@ -447,9 +447,9 @@ static struct tree *empty_tree(struct repository *r) return lookup_tree(r, the_hash_algo->empty_tree); } -static int error_dirty_index(struct index_state *istate, struct replay_opts *opts) +static int error_dirty_index(struct repository *repo, struct replay_opts *opts) { - if (read_index_unmerged(istate)) + if (repo_read_index_unmerged(repo)) return error_resolve_conflict(_(action_name(opts))); error(_("your local changes would be overwritten by %s."), @@ -484,7 +484,7 @@ static int fast_forward_to(struct repository *r, struct strbuf sb = STRBUF_INIT; struct strbuf err = STRBUF_INIT; - read_index(r->index); + repo_read_index(r); if (checkout_fast_forward(r, from, to, 1)) return -1; /* the callee should have complained already */ @@ -541,12 +541,12 @@ static int do_recursive_merge(struct repository *r, char **xopt; struct lock_file index_lock = LOCK_INIT; - if (hold_locked_index(&index_lock, LOCK_REPORT_ON_ERROR) < 0) + if (repo_hold_locked_index(r, &index_lock, LOCK_REPORT_ON_ERROR) < 0) return -1; - read_index(r->index); + repo_read_index(r); - init_merge_options(&o); + init_merge_options(&o, r); o.ancestor = base ? base_label : "(empty tree)"; o.branch1 = "HEAD"; o.branch2 = next ? next_label : "(empty tree)"; @@ -1116,7 +1116,8 @@ static int run_rewrite_hook(const struct object_id *oldoid, return finish_command(&proc); } -void commit_post_rewrite(const struct commit *old_head, +void commit_post_rewrite(struct repository *r, + const struct commit *old_head, const struct object_id *new_head) { struct notes_rewrite_cfg *cfg; @@ -1125,7 +1126,7 @@ void commit_post_rewrite(const struct commit *old_head, if (cfg) { /* we are amending, so old_head is not NULL */ copy_note_for_rewrite(cfg, &old_head->object.oid, new_head); - finish_copy_notes_for_rewrite(cfg, "Notes added by 'git commit --amend'"); + finish_copy_notes_for_rewrite(r, cfg, "Notes added by 'git commit --amend'"); } run_rewrite_hook(&old_head->object.oid, new_head); } @@ -1406,7 +1407,7 @@ static int try_to_commit(struct repository *r, } if (flags & AMEND_MSG) - commit_post_rewrite(current_head, oid); + commit_post_rewrite(r, current_head, oid); out: free_commit_extra_headers(extra); @@ -1768,7 +1769,7 @@ static int do_pick_commit(struct repository *r, oidcpy(&head, the_hash_algo->empty_tree); if (index_differs_from(r, unborn ? empty_tree_oid_hex() : "HEAD", NULL, 0)) - return error_dirty_index(r->index, opts); + return error_dirty_index(r, opts); } discard_index(r->index); @@ -1998,8 +1999,8 @@ static int read_and_refresh_cache(struct repository *r, struct replay_opts *opts) { struct lock_file index_lock = LOCK_INIT; - int index_fd = hold_locked_index(&index_lock, 0); - if (read_index(r->index) < 0) { + int index_fd = repo_hold_locked_index(r, &index_lock, 0); + if (repo_read_index(r) < 0) { rollback_lock_file(&index_lock); return error(_("git %s: failed to read the index"), _(action_name(opts))); @@ -2390,6 +2391,9 @@ static int read_populate_opts(struct replay_opts *opts) if (file_exists(rebase_path_verbose())) opts->verbose = 1; + if (file_exists(rebase_path_quiet())) + opts->quiet = 1; + if (file_exists(rebase_path_signoff())) { opts->allow_ff = 0; opts->signoff = 1; @@ -2460,9 +2464,6 @@ int write_basic_state(struct replay_opts *opts, const char *head_name, if (quiet) write_file(rebase_path_quiet(), "%s\n", quiet); - else - write_file(rebase_path_quiet(), "\n"); - if (opts->verbose) write_file(rebase_path_verbose(), "%s", ""); if (opts->strategy) @@ -2865,7 +2866,7 @@ static int do_exec(struct repository *r, const char *command_line) child_env.argv); /* force re-reading of the cache */ - if (discard_index(r->index) < 0 || read_index(r->index) < 0) + if (discard_index(r->index) < 0 || repo_read_index(r) < 0) return error(_("could not read index")); dirty = require_clean_work_tree(r, "rebase", NULL, 1, 1); @@ -2989,7 +2990,7 @@ static int do_reset(struct repository *r, struct unpack_trees_options unpack_tree_opts; int ret = 0; - if (hold_locked_index(&lock, LOCK_REPORT_ON_ERROR) < 0) + if (repo_hold_locked_index(r, &lock, LOCK_REPORT_ON_ERROR) < 0) return -1; if (len == 10 && !strncmp("[new root]", name, len)) { @@ -3034,7 +3035,7 @@ static int do_reset(struct repository *r, unpack_tree_opts.merge = 1; unpack_tree_opts.update = 1; - if (read_index_unmerged(r->index)) { + if (repo_read_index_unmerged(r)) { rollback_lock_file(&lock); strbuf_release(&ref_name); return error_resolve_conflict(_(action_name(opts))); @@ -3107,7 +3108,7 @@ static int do_merge(struct repository *r, static struct lock_file lock; const char *p; - if (hold_locked_index(&lock, LOCK_REPORT_ON_ERROR) < 0) { + if (repo_hold_locked_index(r, &lock, LOCK_REPORT_ON_ERROR) < 0) { ret = -1; goto leave_merge; } @@ -3288,7 +3289,7 @@ static int do_merge(struct repository *r, /* force re-reading of the cache */ if (!ret && (discard_index(r->index) < 0 || - read_index(r->index) < 0)) + repo_read_index(r) < 0)) ret = error(_("could not read index")); goto leave_merge; } @@ -3310,8 +3311,8 @@ static int do_merge(struct repository *r, commit_list_insert(j->item, &reversed); free_commit_list(bases); - read_index(r->index); - init_merge_options(&o); + repo_read_index(r); + init_merge_options(&o, r); o.branch1 = "HEAD"; o.branch2 = ref_name.buf; o.buffer_output = 2; @@ -3555,10 +3556,11 @@ static int pick_commits(struct repository *r, fprintf(f, "%d\n", todo_list->done_nr); fclose(f); } - fprintf(stderr, "Rebasing (%d/%d)%s", - todo_list->done_nr, - todo_list->total_nr, - opts->verbose ? "\n" : "\r"); + if (!opts->quiet) + fprintf(stderr, "Rebasing (%d/%d)%s", + todo_list->done_nr, + todo_list->total_nr, + opts->verbose ? "\n" : "\r"); } unlink(rebase_path_message()); unlink(rebase_path_author_script()); @@ -3792,8 +3794,10 @@ cleanup_head_ref: } apply_autostash(opts); - fprintf(stderr, "Successfully rebased and updated %s.\n", - head_ref.buf); + if (!opts->quiet) + fprintf(stderr, + "Successfully rebased and updated %s.\n", + head_ref.buf); strbuf_release(&buf); strbuf_release(&head_ref); @@ -3984,7 +3988,7 @@ int sequencer_continue(struct repository *r, struct replay_opts *opts) goto release_todo_list; } if (index_differs_from(r, "HEAD", NULL, 0)) { - res = error_dirty_index(r->index, opts); + res = error_dirty_index(r, opts); goto release_todo_list; } todo_list.current++; diff --git a/sequencer.h b/sequencer.h index 5d7bc04751..93e891309b 100644 --- a/sequencer.h +++ b/sequencer.h @@ -40,6 +40,7 @@ struct replay_opts { int allow_empty_message; int keep_redundant_commits; int verbose; + int quiet; int reschedule_failed_exec; int mainline; @@ -125,7 +126,8 @@ int update_head_with_reflog(const struct commit *old_head, const struct object_id *new_head, const char *action, const struct strbuf *msg, struct strbuf *err); -void commit_post_rewrite(const struct commit *current_head, +void commit_post_rewrite(struct repository *r, + const struct commit *current_head, const struct object_id *new_head); int prepare_branch_to_be_rebased(struct replay_opts *opts, const char *commit); @@ -167,7 +167,8 @@ static int process_request(void) packet_reader_init(&reader, 0, NULL, 0, PACKET_READ_CHOMP_NEWLINE | - PACKET_READ_GENTLE_ON_EOF); + PACKET_READ_GENTLE_ON_EOF | + PACKET_READ_DIE_ON_ERR_PACKET); /* * Check to see if the client closed their end before sending another @@ -175,7 +176,7 @@ static int process_request(void) */ if (packet_reader_peek(&reader) == PACKET_READ_EOF) return 1; - reader.options = PACKET_READ_CHOMP_NEWLINE; + reader.options &= ~PACKET_READ_GENTLE_ON_EOF; while (state != PROCESS_REQUEST_DONE) { switch (packet_reader_peek(&reader)) { @@ -39,7 +39,7 @@ static int abspath_part_inside_repo(char *path) off = offset_1st_component(path); /* check if work tree is already the prefix */ - if (wtlen <= len && !strncmp(path, work_tree, wtlen)) { + if (wtlen <= len && !fspathncmp(path, work_tree, wtlen)) { if (path[wtlen] == '/') { memmove(path, path + wtlen + 1, len - wtlen); return 0; @@ -59,7 +59,7 @@ static int abspath_part_inside_repo(char *path) path++; if (*path == '/') { *path = '\0'; - if (strcmp(real_path(path0), work_tree) == 0) { + if (fspathcmp(real_path(path0), work_tree) == 0) { memmove(path0, path + 1, len - (path - path0)); return 0; } @@ -68,7 +68,7 @@ static int abspath_part_inside_repo(char *path) } /* check whole path */ - if (strcmp(real_path(path0), work_tree) == 0) { + if (fspathcmp(real_path(path0), work_tree) == 0) { *path0 = '\0'; return 0; } diff --git a/sha1-file.c b/sha1-file.c index 10f9e9936a..494606f771 100644 --- a/sha1-file.c +++ b/sha1-file.c @@ -192,7 +192,7 @@ int hash_algo_by_id(uint32_t format_id) /* * This is meant to hold a *small* number of objects that you would - * want read_sha1_file() to be able to return, but yet you do not want + * want read_object_file() to be able to return, but yet you do not want * to write them into the object store (e.g. a browse-only * application). */ @@ -401,12 +401,12 @@ out: return ret; } -static void fill_sha1_path(struct strbuf *buf, const unsigned char *sha1) +static void fill_loose_path(struct strbuf *buf, const struct object_id *oid) { int i; for (i = 0; i < the_hash_algo->rawsz; i++) { static char hex[] = "0123456789abcdef"; - unsigned int val = sha1[i]; + unsigned int val = oid->hash[i]; strbuf_addch(buf, hex[val >> 4]); strbuf_addch(buf, hex[val & 0xf]); if (!i) @@ -416,19 +416,19 @@ static void fill_sha1_path(struct strbuf *buf, const unsigned char *sha1) static const char *odb_loose_path(struct object_directory *odb, struct strbuf *buf, - const unsigned char *sha1) + const struct object_id *oid) { strbuf_reset(buf); strbuf_addstr(buf, odb->path); strbuf_addch(buf, '/'); - fill_sha1_path(buf, sha1); + fill_loose_path(buf, oid); return buf->buf; } const char *loose_object_path(struct repository *r, struct strbuf *buf, - const unsigned char *sha1) + const struct object_id *oid) { - return odb_loose_path(r->objects->odb, buf, sha1); + return odb_loose_path(r->objects->odb, buf, oid); } /* @@ -789,7 +789,7 @@ static int check_and_freshen_odb(struct object_directory *odb, int freshen) { static struct strbuf path = STRBUF_INIT; - odb_loose_path(odb, &path, oid->hash); + odb_loose_path(odb, &path, oid); return check_and_freshen_file(path.buf, freshen); } @@ -866,8 +866,8 @@ void *xmmap(void *start, size_t length, /* * With an in-core object data in "map", rehash it to make sure the - * object name actually matches "sha1" to detect object corruption. - * With "map" == NULL, try reading the object named with "sha1" using + * object name actually matches "oid" to detect object corruption. + * With "map" == NULL, try reading the object named with "oid" using * the streaming interface and rehash it to do the same. */ int check_object_signature(const struct object_id *oid, void *map, @@ -940,22 +940,22 @@ int git_open_cloexec(const char *name, int flags) } /* - * Find "sha1" as a loose object in the local repository or in an alternate. + * Find "oid" as a loose object in the local repository or in an alternate. * Returns 0 on success, negative on failure. * * The "path" out-parameter will give the path of the object we found (if any). * Note that it may point to static storage and is only valid until another - * call to stat_sha1_file(). + * call to stat_loose_object(). */ -static int stat_sha1_file(struct repository *r, const unsigned char *sha1, - struct stat *st, const char **path) +static int stat_loose_object(struct repository *r, const struct object_id *oid, + struct stat *st, const char **path) { struct object_directory *odb; static struct strbuf buf = STRBUF_INIT; prepare_alt_odb(r); for (odb = r->objects->odb; odb; odb = odb->next) { - *path = odb_loose_path(odb, &buf, sha1); + *path = odb_loose_path(odb, &buf, oid); if (!lstat(*path, st)) return 0; } @@ -964,11 +964,11 @@ static int stat_sha1_file(struct repository *r, const unsigned char *sha1, } /* - * Like stat_sha1_file(), but actually open the object and return the + * Like stat_loose_object(), but actually open the object and return the * descriptor. See the caveats on the "path" parameter above. */ -static int open_sha1_file(struct repository *r, - const unsigned char *sha1, const char **path) +static int open_loose_object(struct repository *r, + const struct object_id *oid, const char **path) { int fd; struct object_directory *odb; @@ -977,7 +977,7 @@ static int open_sha1_file(struct repository *r, prepare_alt_odb(r); for (odb = r->objects->odb; odb; odb = odb->next) { - *path = odb_loose_path(odb, &buf, sha1); + *path = odb_loose_path(odb, &buf, oid); fd = git_open(*path); if (fd >= 0) return fd; @@ -990,16 +990,13 @@ static int open_sha1_file(struct repository *r, } static int quick_has_loose(struct repository *r, - const unsigned char *sha1) + const struct object_id *oid) { - struct object_id oid; struct object_directory *odb; - hashcpy(oid.hash, sha1); - prepare_alt_odb(r); for (odb = r->objects->odb; odb; odb = odb->next) { - if (oid_array_lookup(odb_loose_cache(odb, &oid), &oid) >= 0) + if (oid_array_lookup(odb_loose_cache(odb, oid), oid) >= 0) return 1; } return 0; @@ -1007,10 +1004,10 @@ static int quick_has_loose(struct repository *r, /* * Map the loose object at "path" if it is not NULL, or the path found by - * searching for a loose object named "sha1". + * searching for a loose object named "oid". */ -static void *map_sha1_file_1(struct repository *r, const char *path, - const unsigned char *sha1, unsigned long *size) +static void *map_loose_object_1(struct repository *r, const char *path, + const struct object_id *oid, unsigned long *size) { void *map; int fd; @@ -1018,7 +1015,7 @@ static void *map_sha1_file_1(struct repository *r, const char *path, if (path) fd = git_open(path); else - fd = open_sha1_file(r, sha1, &path); + fd = open_loose_object(r, oid, &path); map = NULL; if (fd >= 0) { struct stat st; @@ -1038,15 +1035,16 @@ static void *map_sha1_file_1(struct repository *r, const char *path, return map; } -void *map_sha1_file(struct repository *r, - const unsigned char *sha1, unsigned long *size) +void *map_loose_object(struct repository *r, + const struct object_id *oid, + unsigned long *size) { - return map_sha1_file_1(r, NULL, sha1, size); + return map_loose_object_1(r, NULL, oid, size); } -static int unpack_sha1_short_header(git_zstream *stream, - unsigned char *map, unsigned long mapsize, - void *buffer, unsigned long bufsiz) +static int unpack_loose_short_header(git_zstream *stream, + unsigned char *map, unsigned long mapsize, + void *buffer, unsigned long bufsiz) { /* Get the data stream */ memset(stream, 0, sizeof(*stream)); @@ -1059,12 +1057,12 @@ static int unpack_sha1_short_header(git_zstream *stream, return git_inflate(stream, 0); } -int unpack_sha1_header(git_zstream *stream, - unsigned char *map, unsigned long mapsize, - void *buffer, unsigned long bufsiz) +int unpack_loose_header(git_zstream *stream, + unsigned char *map, unsigned long mapsize, + void *buffer, unsigned long bufsiz) { - int status = unpack_sha1_short_header(stream, map, mapsize, - buffer, bufsiz); + int status = unpack_loose_short_header(stream, map, mapsize, + buffer, bufsiz); if (status < Z_OK) return status; @@ -1075,13 +1073,13 @@ int unpack_sha1_header(git_zstream *stream, return 0; } -static int unpack_sha1_header_to_strbuf(git_zstream *stream, unsigned char *map, - unsigned long mapsize, void *buffer, - unsigned long bufsiz, struct strbuf *header) +static int unpack_loose_header_to_strbuf(git_zstream *stream, unsigned char *map, + unsigned long mapsize, void *buffer, + unsigned long bufsiz, struct strbuf *header) { int status; - status = unpack_sha1_short_header(stream, map, mapsize, buffer, bufsiz); + status = unpack_loose_short_header(stream, map, mapsize, buffer, bufsiz); if (status < Z_OK) return -1; @@ -1111,7 +1109,9 @@ static int unpack_sha1_header_to_strbuf(git_zstream *stream, unsigned char *map, return -1; } -static void *unpack_sha1_rest(git_zstream *stream, void *buffer, unsigned long size, const unsigned char *sha1) +static void *unpack_loose_rest(git_zstream *stream, + void *buffer, unsigned long size, + const struct object_id *oid) { int bytes = strlen(buffer) + 1; unsigned char *buf = xmallocz(size); @@ -1148,10 +1148,10 @@ static void *unpack_sha1_rest(git_zstream *stream, void *buffer, unsigned long s } if (status < 0) - error(_("corrupt loose object '%s'"), sha1_to_hex(sha1)); + error(_("corrupt loose object '%s'"), oid_to_hex(oid)); else if (stream->avail_in) error(_("garbage at end of loose object '%s'"), - sha1_to_hex(sha1)); + oid_to_hex(oid)); free(buf); return NULL; } @@ -1161,8 +1161,8 @@ static void *unpack_sha1_rest(git_zstream *stream, void *buffer, unsigned long s * too permissive for what we want to check. So do an anal * object header parse by hand. */ -static int parse_sha1_header_extended(const char *hdr, struct object_info *oi, - unsigned int flags) +static int parse_loose_header_extended(const char *hdr, struct object_info *oi, + unsigned int flags) { const char *type_buf = hdr; unsigned long size; @@ -1222,17 +1222,17 @@ static int parse_sha1_header_extended(const char *hdr, struct object_info *oi, return *hdr ? -1 : type; } -int parse_sha1_header(const char *hdr, unsigned long *sizep) +int parse_loose_header(const char *hdr, unsigned long *sizep) { struct object_info oi = OBJECT_INFO_INIT; oi.sizep = sizep; - return parse_sha1_header_extended(hdr, &oi, 0); + return parse_loose_header_extended(hdr, &oi, 0); } -static int sha1_loose_object_info(struct repository *r, - const unsigned char *sha1, - struct object_info *oi, int flags) +static int loose_object_info(struct repository *r, + const struct object_id *oid, + struct object_info *oi, int flags) { int status = 0; unsigned long mapsize; @@ -1257,15 +1257,15 @@ static int sha1_loose_object_info(struct repository *r, const char *path; struct stat st; if (!oi->disk_sizep && (flags & OBJECT_INFO_QUICK)) - return quick_has_loose(r, sha1) ? 0 : -1; - if (stat_sha1_file(r, sha1, &st, &path) < 0) + return quick_has_loose(r, oid) ? 0 : -1; + if (stat_loose_object(r, oid, &st, &path) < 0) return -1; if (oi->disk_sizep) *oi->disk_sizep = st.st_size; return 0; } - map = map_sha1_file(r, sha1, &mapsize); + map = map_loose_object(r, oid, &mapsize); if (!map) return -1; @@ -1275,24 +1275,24 @@ static int sha1_loose_object_info(struct repository *r, if (oi->disk_sizep) *oi->disk_sizep = mapsize; if ((flags & OBJECT_INFO_ALLOW_UNKNOWN_TYPE)) { - if (unpack_sha1_header_to_strbuf(&stream, map, mapsize, hdr, sizeof(hdr), &hdrbuf) < 0) + if (unpack_loose_header_to_strbuf(&stream, map, mapsize, hdr, sizeof(hdr), &hdrbuf) < 0) status = error(_("unable to unpack %s header with --allow-unknown-type"), - sha1_to_hex(sha1)); - } else if (unpack_sha1_header(&stream, map, mapsize, hdr, sizeof(hdr)) < 0) + oid_to_hex(oid)); + } else if (unpack_loose_header(&stream, map, mapsize, hdr, sizeof(hdr)) < 0) status = error(_("unable to unpack %s header"), - sha1_to_hex(sha1)); + oid_to_hex(oid)); if (status < 0) ; /* Do nothing */ else if (hdrbuf.len) { - if ((status = parse_sha1_header_extended(hdrbuf.buf, oi, flags)) < 0) + if ((status = parse_loose_header_extended(hdrbuf.buf, oi, flags)) < 0) status = error(_("unable to parse %s header with --allow-unknown-type"), - sha1_to_hex(sha1)); - } else if ((status = parse_sha1_header_extended(hdr, oi, flags)) < 0) - status = error(_("unable to parse %s header"), sha1_to_hex(sha1)); + oid_to_hex(oid)); + } else if ((status = parse_loose_header_extended(hdr, oi, flags)) < 0) + status = error(_("unable to parse %s header"), oid_to_hex(oid)); if (status >= 0 && oi->contentp) { - *oi->contentp = unpack_sha1_rest(&stream, hdr, - *oi->sizep, sha1); + *oi->contentp = unpack_loose_rest(&stream, hdr, + *oi->sizep, oid); if (!*oi->contentp) { git_inflate_end(&stream); status = -1; @@ -1358,7 +1358,7 @@ int oid_object_info_extended(struct repository *r, const struct object_id *oid, return -1; /* Most likely it's a loose object. */ - if (!sha1_loose_object_info(r, real->hash, oi, flags)) + if (!loose_object_info(r, real, oi, flags)) return 0; /* Not a loose object; someone else may have just packed it. */ @@ -1421,19 +1421,17 @@ int oid_object_info(struct repository *r, return type; } -static void *read_object(const unsigned char *sha1, enum object_type *type, +static void *read_object(struct repository *r, + const struct object_id *oid, enum object_type *type, unsigned long *size) { - struct object_id oid; struct object_info oi = OBJECT_INFO_INIT; void *content; oi.typep = type; oi.sizep = size; oi.contentp = &content; - hashcpy(oid.hash, sha1); - - if (oid_object_info_extended(the_repository, &oid, &oi, 0) < 0) + if (oid_object_info_extended(r, oid, &oi, 0) < 0) return NULL; return content; } @@ -1444,7 +1442,7 @@ int pretend_object_file(void *buf, unsigned long len, enum object_type type, struct cached_object *co; hash_object_file(buf, len, type_name(type), oid); - if (has_sha1_file(oid->hash) || find_cached_object(oid)) + if (has_object_file(oid) || find_cached_object(oid)) return 0; ALLOC_GROW(cached_objects, cached_object_nr + 1, cached_object_alloc); co = &cached_objects[cached_object_nr++]; @@ -1461,7 +1459,8 @@ int pretend_object_file(void *buf, unsigned long len, enum object_type type, * deal with them should arrange to call read_object() and give error * messages themselves. */ -void *read_object_file_extended(const struct object_id *oid, +void *read_object_file_extended(struct repository *r, + const struct object_id *oid, enum object_type *type, unsigned long *size, int lookup_replace) @@ -1471,10 +1470,10 @@ void *read_object_file_extended(const struct object_id *oid, const char *path; struct stat st; const struct object_id *repl = lookup_replace ? - lookup_replace_object(the_repository, oid) : oid; + lookup_replace_object(r, oid) : oid; errno = 0; - data = read_object(repl->hash, type, size); + data = read_object(r, repl, type, size); if (data) return data; @@ -1486,11 +1485,11 @@ void *read_object_file_extended(const struct object_id *oid, die(_("replacement %s not found for %s"), oid_to_hex(repl), oid_to_hex(oid)); - if (!stat_sha1_file(the_repository, repl->hash, &st, &path)) + if (!stat_loose_object(r, repl, &st, &path)) die(_("loose object %s (stored in %s) is corrupt"), oid_to_hex(repl), path); - if ((p = has_packed_and_bad(repl->hash)) != NULL) + if ((p = has_packed_and_bad(r, repl->hash)) != NULL) die(_("packed object %s (stored in %s) is corrupt"), oid_to_hex(repl), p->pack_name); @@ -1593,7 +1592,7 @@ int finalize_object_file(const char *tmpfile, const char *filename) unlink_or_warn(tmpfile); if (ret) { if (ret != EEXIST) { - return error_errno(_("unable to write sha1 filename %s"), filename); + return error_errno(_("unable to write file %s"), filename); } /* FIXME!!! Collision check here ? */ } @@ -1621,12 +1620,12 @@ int hash_object_file(const void *buf, unsigned long len, const char *type, } /* Finalize a file on disk, and close it. */ -static void close_sha1_file(int fd) +static void close_loose_object(int fd) { if (fsync_object_files) - fsync_or_die(fd, "sha1 file"); + fsync_or_die(fd, "loose object file"); if (close(fd) != 0) - die_errno(_("error when closing sha1 file")); + die_errno(_("error when closing loose object file")); } /* Size of directory component, including the ending '/' */ @@ -1686,7 +1685,7 @@ static int write_loose_object(const struct object_id *oid, char *hdr, static struct strbuf tmp_file = STRBUF_INIT; static struct strbuf filename = STRBUF_INIT; - loose_object_path(the_repository, &filename, oid->hash); + loose_object_path(the_repository, &filename, oid); fd = create_tmpfile(&tmp_file, filename.buf); if (fd < 0) { @@ -1717,7 +1716,7 @@ static int write_loose_object(const struct object_id *oid, char *hdr, ret = git_deflate(&stream, Z_FINISH); the_hash_algo->update_fn(&c, in0, stream.next_in - in0); if (write_buffer(fd, compressed, stream.next_out - compressed) < 0) - die(_("unable to write sha1 file")); + die(_("unable to write loose object file")); stream.next_out = compressed; stream.avail_out = sizeof(compressed); } while (ret == Z_OK); @@ -1734,7 +1733,7 @@ static int write_loose_object(const struct object_id *oid, char *hdr, die(_("confused by unstable object source data for %s"), oid_to_hex(oid)); - close_sha1_file(fd); + close_loose_object(fd); if (mtime) { struct utimbuf utb; @@ -1814,9 +1813,9 @@ int force_object_loose(const struct object_id *oid, time_t mtime) if (has_loose_object(oid)) return 0; - buf = read_object(oid->hash, &type, &len); + buf = read_object(the_repository, oid, &type, &len); if (!buf) - return error(_("cannot read sha1_file for %s"), oid_to_hex(oid)); + return error(_("cannot read object for %s"), oid_to_hex(oid)); hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %"PRIuMAX , type_name(type), (uintmax_t)len) + 1; ret = write_loose_object(oid, hdr, hdrlen, buf, len, mtime); free(buf); @@ -1824,24 +1823,19 @@ int force_object_loose(const struct object_id *oid, time_t mtime) return ret; } -int has_sha1_file_with_flags(const unsigned char *sha1, int flags) +int repo_has_object_file_with_flags(struct repository *r, + const struct object_id *oid, int flags) { - struct object_id oid; if (!startup_info->have_repository) return 0; - hashcpy(oid.hash, sha1); - return oid_object_info_extended(the_repository, &oid, NULL, + return oid_object_info_extended(r, oid, NULL, flags | OBJECT_INFO_SKIP_CACHED) >= 0; } -int has_object_file(const struct object_id *oid) +int repo_has_object_file(struct repository *r, + const struct object_id *oid) { - return has_sha1_file(oid->hash); -} - -int has_object_file_with_flags(const struct object_id *oid, int flags) -{ - return has_sha1_file_with_flags(oid->hash, flags); + return repo_has_object_file_with_flags(r, oid, 0); } static void check_tree(const void *buf, size_t size) @@ -2252,14 +2246,14 @@ void odb_clear_loose_cache(struct object_directory *odb) sizeof(odb->loose_objects_subdir_seen)); } -static int check_stream_sha1(git_zstream *stream, - const char *hdr, - unsigned long size, - const char *path, - const unsigned char *expected_sha1) +static int check_stream_oid(git_zstream *stream, + const char *hdr, + unsigned long size, + const char *path, + const struct object_id *expected_oid) { git_hash_ctx c; - unsigned char real_sha1[GIT_MAX_RAWSZ]; + struct object_id real_oid; unsigned char buf[4096]; unsigned long total_read; int status = Z_OK; @@ -2275,7 +2269,7 @@ static int check_stream_sha1(git_zstream *stream, /* * This size comparison must be "<=" to read the final zlib packets; - * see the comment in unpack_sha1_rest for details. + * see the comment in unpack_loose_rest for details. */ while (total_read <= size && (status == Z_OK || @@ -2291,19 +2285,19 @@ static int check_stream_sha1(git_zstream *stream, git_inflate_end(stream); if (status != Z_STREAM_END) { - error(_("corrupt loose object '%s'"), sha1_to_hex(expected_sha1)); + error(_("corrupt loose object '%s'"), oid_to_hex(expected_oid)); return -1; } if (stream->avail_in) { error(_("garbage at end of loose object '%s'"), - sha1_to_hex(expected_sha1)); + oid_to_hex(expected_oid)); return -1; } - the_hash_algo->final_fn(real_sha1, &c); - if (!hasheq(expected_sha1, real_sha1)) { - error(_("sha1 mismatch for %s (expected %s)"), path, - sha1_to_hex(expected_sha1)); + the_hash_algo->final_fn(real_oid.hash, &c); + if (!oideq(expected_oid, &real_oid)) { + error(_("hash mismatch for %s (expected %s)"), path, + oid_to_hex(expected_oid)); return -1; } @@ -2324,18 +2318,18 @@ int read_loose_object(const char *path, *contents = NULL; - map = map_sha1_file_1(the_repository, path, NULL, &mapsize); + map = map_loose_object_1(the_repository, path, NULL, &mapsize); if (!map) { error_errno(_("unable to mmap %s"), path); goto out; } - if (unpack_sha1_header(&stream, map, mapsize, hdr, sizeof(hdr)) < 0) { + if (unpack_loose_header(&stream, map, mapsize, hdr, sizeof(hdr)) < 0) { error(_("unable to unpack header of %s"), path); goto out; } - *type = parse_sha1_header(hdr, size); + *type = parse_loose_header(hdr, size); if (*type < 0) { error(_("unable to parse header of %s"), path); git_inflate_end(&stream); @@ -2343,10 +2337,10 @@ int read_loose_object(const char *path, } if (*type == OBJ_BLOB && *size > big_file_threshold) { - if (check_stream_sha1(&stream, hdr, *size, path, expected_oid->hash) < 0) + if (check_stream_oid(&stream, hdr, *size, path, expected_oid) < 0) goto out; } else { - *contents = unpack_sha1_rest(&stream, hdr, *size, expected_oid->hash); + *contents = unpack_loose_rest(&stream, hdr, *size, expected_oid); if (!*contents) { error(_("unable to unpack contents of %s"), path); git_inflate_end(&stream); @@ -2354,7 +2348,7 @@ int read_loose_object(const char *path, } if (check_object_signature(expected_oid, *contents, *size, type_name(*type))) { - error(_("sha1 mismatch for %s (expected %s)"), path, + error(_("hash mismatch for %s (expected %s)"), path, oid_to_hex(expected_oid)); free(*contents); goto out; diff --git a/sha1-name.c b/sha1-name.c index a656481c6a..d1cc77c124 100644 --- a/sha1-name.c +++ b/sha1-name.c @@ -190,9 +190,6 @@ static void find_short_packed_object(struct disambiguate_state *ds) unique_in_pack(p, ds); } -#define SHORT_NAME_NOT_FOUND (-1) -#define SHORT_NAME_AMBIGUOUS (-2) - static int finish_object_disambiguation(struct disambiguate_state *ds, struct object_id *oid) { @@ -200,7 +197,7 @@ static int finish_object_disambiguation(struct disambiguate_state *ds, return SHORT_NAME_AMBIGUOUS; if (!ds->candidate_exists) - return SHORT_NAME_NOT_FOUND; + return MISSING_OBJECT; if (!ds->candidate_checked) /* @@ -414,8 +411,9 @@ static int sort_ambiguous(const void *a, const void *b) return a_type_sort > b_type_sort ? 1 : -1; } -static int get_short_oid(const char *name, int len, struct object_id *oid, - unsigned flags) +static enum get_oid_result get_short_oid(const char *name, int len, + struct object_id *oid, + unsigned flags) { int status; struct disambiguate_state ds; @@ -733,7 +731,7 @@ static inline int push_mark(const char *string, int len) return at_mark(string, len, suffix, ARRAY_SIZE(suffix)); } -static int get_oid_1(const char *name, int len, struct object_id *oid, unsigned lookup_flags); +static enum get_oid_result get_oid_1(const char *name, int len, struct object_id *oid, unsigned lookup_flags); static int interpret_nth_prior_checkout(const char *name, int namelen, struct strbuf *buf); static int get_oid_basic(const char *str, int len, struct object_id *oid, @@ -883,11 +881,12 @@ static int get_oid_basic(const char *str, int len, struct object_id *oid, return 0; } -static int get_parent(const char *name, int len, - struct object_id *result, int idx) +static enum get_oid_result get_parent(const char *name, int len, + struct object_id *result, int idx) { struct object_id oid; - int ret = get_oid_1(name, len, &oid, GET_OID_COMMITTISH); + enum get_oid_result ret = get_oid_1(name, len, &oid, + GET_OID_COMMITTISH); struct commit *commit; struct commit_list *p; @@ -895,24 +894,25 @@ static int get_parent(const char *name, int len, return ret; commit = lookup_commit_reference(the_repository, &oid); if (parse_commit(commit)) - return -1; + return MISSING_OBJECT; if (!idx) { oidcpy(result, &commit->object.oid); - return 0; + return FOUND; } p = commit->parents; while (p) { if (!--idx) { oidcpy(result, &p->item->object.oid); - return 0; + return FOUND; } p = p->next; } - return -1; + return MISSING_OBJECT; } -static int get_nth_ancestor(const char *name, int len, - struct object_id *result, int generation) +static enum get_oid_result get_nth_ancestor(const char *name, int len, + struct object_id *result, + int generation) { struct object_id oid; struct commit *commit; @@ -923,15 +923,15 @@ static int get_nth_ancestor(const char *name, int len, return ret; commit = lookup_commit_reference(the_repository, &oid); if (!commit) - return -1; + return MISSING_OBJECT; while (generation--) { if (parse_commit(commit) || !commit->parents) - return -1; + return MISSING_OBJECT; commit = commit->parents->item; } oidcpy(result, &commit->object.oid); - return 0; + return FOUND; } struct object *peel_to_type(const char *name, int namelen, @@ -1077,7 +1077,9 @@ static int get_describe_name(const char *name, int len, struct object_id *oid) return -1; } -static int get_oid_1(const char *name, int len, struct object_id *oid, unsigned lookup_flags) +static enum get_oid_result get_oid_1(const char *name, int len, + struct object_id *oid, + unsigned lookup_flags) { int ret, has_suffix; const char *cp; @@ -1111,16 +1113,16 @@ static int get_oid_1(const char *name, int len, struct object_id *oid, unsigned ret = peel_onion(name, len, oid, lookup_flags); if (!ret) - return 0; + return FOUND; ret = get_oid_basic(name, len, oid, lookup_flags); if (!ret) - return 0; + return FOUND; /* It could be describe output that is "SOMETHING-gXXXX" */ ret = get_describe_name(name, len, oid); if (!ret) - return 0; + return FOUND; return get_short_oid(name, len, oid, lookup_flags); } @@ -1513,7 +1515,7 @@ int strbuf_check_branch_ref(struct strbuf *sb, const char *name) int get_oid(const char *name, struct object_id *oid) { struct object_context unused; - return get_oid_with_context(name, 0, oid, &unused); + return get_oid_with_context(the_repository, name, 0, oid, &unused); } @@ -1530,35 +1532,40 @@ int get_oid(const char *name, struct object_id *oid) int get_oid_committish(const char *name, struct object_id *oid) { struct object_context unused; - return get_oid_with_context(name, GET_OID_COMMITTISH, + return get_oid_with_context(the_repository, + name, GET_OID_COMMITTISH, oid, &unused); } int get_oid_treeish(const char *name, struct object_id *oid) { struct object_context unused; - return get_oid_with_context(name, GET_OID_TREEISH, + return get_oid_with_context(the_repository, + name, GET_OID_TREEISH, oid, &unused); } int get_oid_commit(const char *name, struct object_id *oid) { struct object_context unused; - return get_oid_with_context(name, GET_OID_COMMIT, + return get_oid_with_context(the_repository, + name, GET_OID_COMMIT, oid, &unused); } int get_oid_tree(const char *name, struct object_id *oid) { struct object_context unused; - return get_oid_with_context(name, GET_OID_TREE, + return get_oid_with_context(the_repository, + name, GET_OID_TREE, oid, &unused); } int get_oid_blob(const char *name, struct object_id *oid) { struct object_context unused; - return get_oid_with_context(name, GET_OID_BLOB, + return get_oid_with_context(the_repository, + name, GET_OID_BLOB, oid, &unused); } @@ -1597,7 +1604,8 @@ static void diagnose_invalid_oid_path(const char *prefix, } /* Must be called only when :stage:filename doesn't exist. */ -static void diagnose_invalid_index_path(int stage, +static void diagnose_invalid_index_path(struct index_state *istate, + int stage, const char *prefix, const char *filename) { @@ -1610,11 +1618,11 @@ static void diagnose_invalid_index_path(int stage, prefix = ""; /* Wrong stage number? */ - pos = cache_name_pos(filename, namelen); + pos = index_name_pos(istate, filename, namelen); if (pos < 0) pos = -pos - 1; - if (pos < active_nr) { - ce = active_cache[pos]; + if (pos < istate->cache_nr) { + ce = istate->cache[pos]; if (ce_namelen(ce) == namelen && !memcmp(ce->name, filename, namelen)) die("Path '%s' is in the index, but not at stage %d.\n" @@ -1626,11 +1634,11 @@ static void diagnose_invalid_index_path(int stage, /* Confusion between relative and absolute filenames? */ strbuf_addstr(&fullname, prefix); strbuf_addstr(&fullname, filename); - pos = cache_name_pos(fullname.buf, fullname.len); + pos = index_name_pos(istate, fullname.buf, fullname.len); if (pos < 0) pos = -pos - 1; - if (pos < active_nr) { - ce = active_cache[pos]; + if (pos < istate->cache_nr) { + ce = istate->cache[pos]; if (ce_namelen(ce) == fullname.len && !memcmp(ce->name, fullname.buf, fullname.len)) die("Path '%s' is in the index, but not '%s'.\n" @@ -1664,7 +1672,8 @@ static char *resolve_relative_path(const char *rel) rel); } -static int get_oid_with_context_1(const char *name, +static enum get_oid_result get_oid_with_context_1(struct repository *repo, + const char *name, unsigned flags, const char *prefix, struct object_id *oid, @@ -1723,13 +1732,13 @@ static int get_oid_with_context_1(const char *name, if (flags & GET_OID_RECORD_PATH) oc->path = xstrdup(cp); - if (!active_cache) - read_cache(); - pos = cache_name_pos(cp, namelen); + if (!repo->index->cache) + repo_read_index(the_repository); + pos = index_name_pos(repo->index, cp, namelen); if (pos < 0) pos = -pos - 1; - while (pos < active_nr) { - ce = active_cache[pos]; + while (pos < repo->index->cache_nr) { + ce = repo->index->cache[pos]; if (ce_namelen(ce) != namelen || memcmp(ce->name, cp, namelen)) break; @@ -1742,7 +1751,7 @@ static int get_oid_with_context_1(const char *name, pos++; } if (only_to_die && name[1] && name[1] != '/') - diagnose_invalid_index_path(stage, prefix, cp); + diagnose_invalid_index_path(repo->index, stage, prefix, cp); free(new_path); return -1; } @@ -1807,12 +1816,15 @@ void maybe_die_on_misspelt_object_name(const char *name, const char *prefix) { struct object_context oc; struct object_id oid; - get_oid_with_context_1(name, GET_OID_ONLY_TO_DIE, prefix, &oid, &oc); + get_oid_with_context_1(the_repository, name, GET_OID_ONLY_TO_DIE, + prefix, &oid, &oc); } -int get_oid_with_context(const char *str, unsigned flags, struct object_id *oid, struct object_context *oc) +int get_oid_with_context(struct repository *repo, const char *str, + unsigned flags, struct object_id *oid, + struct object_context *oc) { if (flags & GET_OID_FOLLOW_SYMLINKS && flags & GET_OID_ONLY_TO_DIE) BUG("incompatible flags for get_sha1_with_context"); - return get_oid_with_context_1(str, flags, NULL, oid, oc); + return get_oid_with_context_1(repo, str, flags, NULL, oid, oc); } diff --git a/sideband.c b/sideband.c index 7c3d33d3f8..ef851113c4 100644 --- a/sideband.c +++ b/sideband.c @@ -1,7 +1,6 @@ #include "cache.h" #include "color.h" #include "config.h" -#include "pkt-line.h" #include "sideband.h" #include "help.h" @@ -110,109 +109,104 @@ static void maybe_colorize_sideband(struct strbuf *dest, const char *src, int n) } -/* - * Receive multiplexed output stream over git native protocol. - * in_stream is the input stream from the remote, which carries data - * in pkt_line format with band designator. Demultiplex it into out - * and err and return error appropriately. Band #1 carries the - * primary payload. Things coming over band #2 is not necessarily - * error; they are usually informative message on the standard error - * stream, aka "verbose"). A message over band #3 is a signal that - * the remote died unexpectedly. A flush() concludes the stream. - */ - #define DISPLAY_PREFIX "remote: " #define ANSI_SUFFIX "\033[K" #define DUMB_SUFFIX " " -int recv_sideband(const char *me, int in_stream, int out) +int demultiplex_sideband(const char *me, char *buf, int len, + int die_on_error, + struct strbuf *scratch, + enum sideband_type *sideband_type) { - const char *suffix; - char buf[LARGE_PACKET_MAX + 1]; - struct strbuf outbuf = STRBUF_INIT; - int retval = 0; - - if (isatty(2) && !is_terminal_dumb()) - suffix = ANSI_SUFFIX; - else - suffix = DUMB_SUFFIX; - - while (!retval) { - const char *b, *brk; - int band, len; - len = packet_read(in_stream, NULL, NULL, buf, LARGE_PACKET_MAX, 0); - if (len == 0) - break; - if (len < 1) { - strbuf_addf(&outbuf, - "%s%s: protocol error: no band designator", - outbuf.len ? "\n" : "", me); - retval = SIDEBAND_PROTOCOL_ERROR; - break; - } - band = buf[0] & 0xff; - buf[len] = '\0'; - len--; - switch (band) { - case 3: - strbuf_addf(&outbuf, "%s%s", outbuf.len ? "\n" : "", - DISPLAY_PREFIX); - maybe_colorize_sideband(&outbuf, buf + 1, len); - - retval = SIDEBAND_REMOTE_ERROR; - break; - case 2: - b = buf + 1; - - /* - * Append a suffix to each nonempty line to clear the - * end of the screen line. - * - * The output is accumulated in a buffer and - * each line is printed to stderr using - * write(2) to ensure inter-process atomicity. - */ - while ((brk = strpbrk(b, "\n\r"))) { - int linelen = brk - b; - - if (!outbuf.len) - strbuf_addstr(&outbuf, DISPLAY_PREFIX); - if (linelen > 0) { - maybe_colorize_sideband(&outbuf, b, linelen); - strbuf_addstr(&outbuf, suffix); - } - - strbuf_addch(&outbuf, *brk); - xwrite(2, outbuf.buf, outbuf.len); - strbuf_reset(&outbuf); - - b = brk + 1; - } + static const char *suffix; + const char *b, *brk; + int band; + + if (!suffix) { + if (isatty(2) && !is_terminal_dumb()) + suffix = ANSI_SUFFIX; + else + suffix = DUMB_SUFFIX; + } - if (*b) { - strbuf_addstr(&outbuf, outbuf.len ? - "" : DISPLAY_PREFIX); - maybe_colorize_sideband(&outbuf, b, strlen(b)); + if (len == 0) { + *sideband_type = SIDEBAND_FLUSH; + goto cleanup; + } + if (len < 1) { + strbuf_addf(scratch, + "%s%s: protocol error: no band designator", + scratch->len ? "\n" : "", me); + *sideband_type = SIDEBAND_PROTOCOL_ERROR; + goto cleanup; + } + band = buf[0] & 0xff; + buf[len] = '\0'; + len--; + switch (band) { + case 3: + if (die_on_error) + die("remote error: %s", buf + 1); + strbuf_addf(scratch, "%s%s", scratch->len ? "\n" : "", + DISPLAY_PREFIX); + maybe_colorize_sideband(scratch, buf + 1, len); + + *sideband_type = SIDEBAND_REMOTE_ERROR; + break; + case 2: + b = buf + 1; + + /* + * Append a suffix to each nonempty line to clear the + * end of the screen line. + * + * The output is accumulated in a buffer and + * each line is printed to stderr using + * write(2) to ensure inter-process atomicity. + */ + while ((brk = strpbrk(b, "\n\r"))) { + int linelen = brk - b; + + if (!scratch->len) + strbuf_addstr(scratch, DISPLAY_PREFIX); + if (linelen > 0) { + maybe_colorize_sideband(scratch, b, linelen); + strbuf_addstr(scratch, suffix); } - break; - case 1: - write_or_die(out, buf + 1, len); - break; - default: - strbuf_addf(&outbuf, "%s%s: protocol error: bad band #%d", - outbuf.len ? "\n" : "", me, band); - retval = SIDEBAND_PROTOCOL_ERROR; - break; + + strbuf_addch(scratch, *brk); + xwrite(2, scratch->buf, scratch->len); + strbuf_reset(scratch); + + b = brk + 1; + } + + if (*b) { + strbuf_addstr(scratch, scratch->len ? + "" : DISPLAY_PREFIX); + maybe_colorize_sideband(scratch, b, strlen(b)); } + return 0; + case 1: + *sideband_type = SIDEBAND_PRIMARY; + break; + default: + strbuf_addf(scratch, "%s%s: protocol error: bad band #%d", + scratch->len ? "\n" : "", me, band); + *sideband_type = SIDEBAND_PROTOCOL_ERROR; + break; } - if (outbuf.len) { - strbuf_addch(&outbuf, '\n'); - xwrite(2, outbuf.buf, outbuf.len); +cleanup: + if (die_on_error && *sideband_type == SIDEBAND_PROTOCOL_ERROR) + die("%s", scratch->buf); + if (scratch->len) { + strbuf_addch(scratch, '\n'); + xwrite(2, scratch->buf, scratch->len); } - strbuf_release(&outbuf); - return retval; + strbuf_release(scratch); + return 1; } /* diff --git a/sideband.h b/sideband.h index 7a8146f161..227740a58e 100644 --- a/sideband.h +++ b/sideband.h @@ -1,10 +1,29 @@ #ifndef SIDEBAND_H #define SIDEBAND_H -#define SIDEBAND_PROTOCOL_ERROR -2 -#define SIDEBAND_REMOTE_ERROR -1 +enum sideband_type { + SIDEBAND_PROTOCOL_ERROR = -2, + SIDEBAND_REMOTE_ERROR = -1, + SIDEBAND_FLUSH = 0, + SIDEBAND_PRIMARY = 1 +}; + +/* + * Inspects a multiplexed packet read from the remote. If this packet is a + * progress packet and thus should not be processed by the caller, returns 0. + * Otherwise, returns 1, releases scratch, and sets sideband_type. + * + * If this packet is SIDEBAND_PROTOCOL_ERROR, SIDEBAND_REMOTE_ERROR, or a + * progress packet, also prints a message to stderr. + * + * scratch must be a struct strbuf allocated by the caller. It is used to store + * progress messages split across multiple packets. + */ +int demultiplex_sideband(const char *me, char *buf, int len, + int die_on_error, + struct strbuf *scratch, + enum sideband_type *sideband_type); -int recv_sideband(const char *me, int in_stream, int out); void send_sideband(int fd, int band, const char *data, ssize_t sz, int packet_max); #endif diff --git a/streaming.c b/streaming.c index ac7c7a22f9..fcd6303219 100644 --- a/streaming.c +++ b/streaming.c @@ -338,16 +338,16 @@ static struct stream_vtbl loose_vtbl = { static open_method_decl(loose) { - st->u.loose.mapped = map_sha1_file(the_repository, - oid->hash, &st->u.loose.mapsize); + st->u.loose.mapped = map_loose_object(the_repository, + oid, &st->u.loose.mapsize); if (!st->u.loose.mapped) return -1; - if ((unpack_sha1_header(&st->z, - st->u.loose.mapped, - st->u.loose.mapsize, - st->u.loose.hdr, - sizeof(st->u.loose.hdr)) < 0) || - (parse_sha1_header(st->u.loose.hdr, &st->size) < 0)) { + if ((unpack_loose_header(&st->z, + st->u.loose.mapped, + st->u.loose.mapsize, + st->u.loose.hdr, + sizeof(st->u.loose.hdr)) < 0) || + (parse_loose_header(st->u.loose.hdr, &st->size) < 0)) { git_inflate_end(&st->z); munmap(st->u.loose.mapped, st->u.loose.mapsize); return -1; @@ -499,7 +499,7 @@ static struct stream_vtbl incore_vtbl = { static open_method_decl(incore) { - st->u.incore.buf = read_object_file_extended(oid, type, &st->size, 0); + st->u.incore.buf = read_object_file_extended(the_repository, oid, type, &st->size, 0); st->u.incore.read_ptr = 0; st->vtbl = &incore_vtbl; diff --git a/submodule.c b/submodule.c index 7b5cea8522..934ecfa294 100644 --- a/submodule.c +++ b/submodule.c @@ -1,4 +1,3 @@ -#define NO_THE_INDEX_COMPATIBILITY_MACROS #include "cache.h" #include "repository.h" @@ -456,7 +455,7 @@ static int prepare_submodule_summary(struct rev_info *rev, const char *path, return prepare_revision_walk(rev); } -static void print_submodule_summary(struct rev_info *rev, struct diff_options *o) +static void print_submodule_summary(struct repository *r, struct rev_info *rev, struct diff_options *o) { static const char format[] = " %m %s"; struct strbuf sb = STRBUF_INIT; @@ -467,7 +466,8 @@ static void print_submodule_summary(struct rev_info *rev, struct diff_options *o ctx.date_mode = rev->date_mode; ctx.output_encoding = get_log_output_encoding(); strbuf_setlen(&sb, 0); - format_commit_message(commit, format, &sb, &ctx); + repo_format_commit_message(r, commit, format, &sb, + &ctx); strbuf_addch(&sb, '\n'); if (commit->object.flags & SYMMETRIC_LEFT) diff_emit_submodule_del(o, sb.buf); @@ -500,14 +500,46 @@ static void prepare_submodule_repo_env_in_gitdir(struct argv_array *out) argv_array_pushf(out, "%s=.", GIT_DIR_ENVIRONMENT); } -/* Helper function to display the submodule header line prior to the full - * summary output. If it can locate the submodule objects directory it will - * attempt to lookup both the left and right commits and put them into the - * left and right pointers. +/* + * Initialize a repository struct for a submodule based on the provided 'path'. + * + * Unlike repo_submodule_init, this tolerates submodules not present + * in .gitmodules. This function exists only to preserve historical behavior, + * + * Returns the repository struct on success, + * NULL when the submodule is not present. + */ +static struct repository *open_submodule(const char *path) +{ + struct strbuf sb = STRBUF_INIT; + struct repository *out = xmalloc(sizeof(*out)); + + if (submodule_to_gitdir(&sb, path) || repo_init(out, sb.buf, NULL)) { + strbuf_release(&sb); + free(out); + return NULL; + } + + /* Mark it as a submodule */ + out->submodule_prefix = xstrdup(path); + + strbuf_release(&sb); + return out; +} + +/* + * Helper function to display the submodule header line prior to the full + * summary output. + * + * If it can locate the submodule git directory it will create a repository + * handle for the submodule and lookup both the left and right commits and + * put them into the left and right pointers. */ -static void show_submodule_header(struct diff_options *o, const char *path, +static void show_submodule_header(struct diff_options *o, + const char *path, struct object_id *one, struct object_id *two, unsigned dirty_submodule, + struct repository *sub, struct commit **left, struct commit **right, struct commit_list **merge_bases) { @@ -526,7 +558,7 @@ static void show_submodule_header(struct diff_options *o, const char *path, else if (is_null_oid(two)) message = "(submodule deleted)"; - if (add_submodule_odb(path)) { + if (!sub) { if (!message) message = "(commits not present)"; goto output_header; @@ -536,8 +568,8 @@ static void show_submodule_header(struct diff_options *o, const char *path, * Attempt to lookup the commit references, and determine if this is * a fast forward or fast backwards update. */ - *left = lookup_commit_reference(the_repository, one); - *right = lookup_commit_reference(the_repository, two); + *left = lookup_commit_reference(sub, one); + *right = lookup_commit_reference(sub, two); /* * Warn about missing commits in the submodule project, but only if @@ -547,7 +579,7 @@ static void show_submodule_header(struct diff_options *o, const char *path, (!is_null_oid(two) && !*right)) message = "(commits not present)"; - *merge_bases = get_merge_bases(*left, *right); + *merge_bases = repo_get_merge_bases(sub, *left, *right); if (*merge_bases) { if ((*merge_bases)->item == *left) fast_forward = 1; @@ -581,16 +613,18 @@ void show_submodule_summary(struct diff_options *o, const char *path, struct rev_info rev; struct commit *left = NULL, *right = NULL; struct commit_list *merge_bases = NULL; + struct repository *sub; + sub = open_submodule(path); show_submodule_header(o, path, one, two, dirty_submodule, - &left, &right, &merge_bases); + sub, &left, &right, &merge_bases); /* * If we don't have both a left and a right pointer, there is no * reason to try and display a summary. The header line should contain * all the information the user needs. */ - if (!left || !right) + if (!left || !right || !sub) goto out; /* Treat revision walker failure the same as missing commits */ @@ -599,13 +633,17 @@ void show_submodule_summary(struct diff_options *o, const char *path, goto out; } - print_submodule_summary(&rev, o); + print_submodule_summary(sub, &rev, o); out: if (merge_bases) free_commit_list(merge_bases); clear_commit_marks(left, ~0); clear_commit_marks(right, ~0); + if (sub) { + repo_clear(sub); + free(sub); + } } void show_submodule_inline_diff(struct diff_options *o, const char *path, @@ -617,9 +655,11 @@ void show_submodule_inline_diff(struct diff_options *o, const char *path, struct commit_list *merge_bases = NULL; struct child_process cp = CHILD_PROCESS_INIT; struct strbuf sb = STRBUF_INIT; + struct repository *sub; + sub = open_submodule(path); show_submodule_header(o, path, one, two, dirty_submodule, - &left, &right, &merge_bases); + sub, &left, &right, &merge_bases); /* We need a valid left and right commit to display a difference */ if (!(left || is_null_oid(one)) || @@ -680,6 +720,10 @@ done: clear_commit_marks(left, ~0); if (right) clear_commit_marks(right, ~0); + if (sub) { + repo_clear(sub); + free(sub); + } } int should_update_submodules(void) @@ -1004,9 +1048,6 @@ static int push_submodule(const char *path, const struct string_list *push_options, int dry_run) { - if (add_submodule_odb(path)) - return 1; - if (for_each_remote_ref_submodule(path, has_remote, NULL) > 0) { struct child_process cp = CHILD_PROCESS_INIT; argv_array_push(&cp.args, "push"); diff --git a/t/.gitignore b/t/.gitignore index 348715f0e4..91cf5772fe 100644 --- a/t/.gitignore +++ b/t/.gitignore @@ -2,3 +2,4 @@ /test-results /.prove /chainlinttmp +/out/ @@ -170,6 +170,15 @@ appropriately before running "make". implied by other options like --valgrind and GIT_TEST_INSTALLED. +--no-bin-wrappers:: + By default, the test suite uses the wrappers in + `../bin-wrappers/` to execute `git` and friends. With this option, + `../git` and friends are run directly. This is not recommended + in general, as the wrappers contain safeguards to ensure that no + files from an installed Git are used, but can speed up test runs + especially on platforms where running shell scripts is expensive + (most notably, Windows). + --root=<directory>:: Create "trash" directories used to store all temporary data during testing under <directory>, instead of the t/ directory. @@ -358,6 +367,10 @@ GIT_TEST_INDEX_VERSION=<n> exercises the index read/write code path for the index version specified. Can be set to any valid version (currently 2, 3, or 4). +GIT_TEST_PACK_SPARSE=<boolean> if enabled will default the pack-objects +builtin to use the sparse object walk. This can still be overridden by +the --no-sparse command-line argument. + GIT_TEST_PRELOAD_INDEX=<boolean> exercises the preload-index code path by overriding the minimum number of cache entries required per thread. @@ -374,6 +387,11 @@ GIT_TEST_MULTI_PACK_INDEX=<boolean>, when true, forces the multi-pack- index to be written after every 'git repack' command, and overrides the 'core.multiPackIndex' setting to true. +GIT_TEST_SIDEBAND_ALL=<boolean>, when true, overrides the +'uploadpack.allowSidebandAll' setting to true, and when false, forces +fetch-pack to not request sideband-all (even if the server advertises +sideband-all). + Naming Tests ------------ diff --git a/t/check-non-portable-shell.pl b/t/check-non-portable-shell.pl index 8037eef777..166d64d4a2 100755 --- a/t/check-non-portable-shell.pl +++ b/t/check-non-portable-shell.pl @@ -36,7 +36,7 @@ while (<>) { } /\bcp\s+-a/ and err 'cp -a is not portable'; - /\bsed\s+-i/ and err 'sed -i is not portable'; + /\bsed\s+-[^efn]\s+/ and err 'sed option not portable (use only -n, -e, -f)'; /\becho\s+-[neE]/ and err 'echo with option is not portable (use printf)'; /^\s*declare\s+/ and err 'arrays/declare not portable'; /^\s*[^#]\s*which\s/ and err 'which is not portable (use type)'; diff --git a/t/helper/test-date.c b/t/helper/test-date.c index a0837371ab..b3253803ac 100644 --- a/t/helper/test-date.c +++ b/t/helper/test-date.c @@ -3,10 +3,12 @@ static const char *usage_msg = "\n" " test-tool date relative [time_t]...\n" +" test-tool date human [time_t]...\n" " test-tool date show:<format> [time_t]...\n" " test-tool date parse [date]...\n" " test-tool date approxidate [date]...\n" " test-tool date timestamp [date]...\n" +" test-tool date getnanos [start-nanos]\n" " test-tool date is64bit\n" " test-tool date time_t-is64bit\n"; @@ -16,12 +18,20 @@ static void show_relative_dates(const char **argv, struct timeval *now) for (; *argv; argv++) { time_t t = atoi(*argv); - show_date_relative(t, 0, now, &buf); + show_date_relative(t, now, &buf); printf("%s -> %s\n", *argv, buf.buf); } strbuf_release(&buf); } +static void show_human_dates(const char **argv) +{ + for (; *argv; argv++) { + time_t t = atoi(*argv); + printf("%s -> %s\n", *argv, show_date(t, 0, DATE_MODE(HUMAN))); + } +} + static void show_dates(const char **argv, const char *format) { struct date_mode mode; @@ -82,12 +92,21 @@ static void parse_approx_timestamp(const char **argv, struct timeval *now) } } +static void getnanos(const char **argv) +{ + double seconds = getnanotime() / 1.0e9; + + if (*argv) + seconds -= strtod(*argv, NULL); + printf("%lf\n", seconds); +} + int cmd__date(int argc, const char **argv) { struct timeval now; const char *x; - x = getenv("TEST_DATE_NOW"); + x = getenv("GIT_TEST_DATE_NOW"); if (x) { now.tv_sec = atoi(x); now.tv_usec = 0; @@ -100,6 +119,8 @@ int cmd__date(int argc, const char **argv) usage(usage_msg); if (!strcmp(*argv, "relative")) show_relative_dates(argv+1, &now); + else if (!strcmp(*argv, "human")) + show_human_dates(argv+1); else if (skip_prefix(*argv, "show:", &x)) show_dates(argv+1, x); else if (!strcmp(*argv, "parse")) @@ -108,6 +129,8 @@ int cmd__date(int argc, const char **argv) parse_approxidate(argv+1, &now); else if (!strcmp(*argv, "timestamp")) parse_approx_timestamp(argv+1, &now); + else if (!strcmp(*argv, "getnanos")) + getnanos(argv+1); else if (!strcmp(*argv, "is64bit")) return sizeof(timestamp_t) == 8 ? 0 : 1; else if (!strcmp(*argv, "time_t-is64bit")) diff --git a/t/helper/test-dump-fsmonitor.c b/t/helper/test-dump-fsmonitor.c index 08e3684aff..2786f47088 100644 --- a/t/helper/test-dump-fsmonitor.c +++ b/t/helper/test-dump-fsmonitor.c @@ -3,11 +3,11 @@ int cmd__dump_fsmonitor(int ac, const char **av) { - struct index_state *istate = &the_index; + struct index_state *istate = the_repository->index; int i; setup_git_directory(); - if (do_read_index(istate, get_index_file(), 0) < 0) + if (do_read_index(istate, the_repository->index_file, 0) < 0) die("unable to read index file"); if (!istate->fsmonitor_last_update) { printf("no fsmonitor\n"); diff --git a/t/helper/test-dump-untracked-cache.c b/t/helper/test-dump-untracked-cache.c index 52870ebbb3..cf0f2c7228 100644 --- a/t/helper/test-dump-untracked-cache.c +++ b/t/helper/test-dump-untracked-cache.c @@ -1,3 +1,4 @@ +#define USE_THE_INDEX_COMPATIBILITY_MACROS #include "test-tool.h" #include "cache.h" #include "dir.h" diff --git a/t/helper/test-path-utils.c b/t/helper/test-path-utils.c index ae091d9b3e..5d543ad21f 100644 --- a/t/helper/test-path-utils.c +++ b/t/helper/test-path-utils.c @@ -177,6 +177,14 @@ static int is_dotgitmodules(const char *path) return is_hfs_dotgitmodules(path) || is_ntfs_dotgitmodules(path); } +static int cmp_by_st_size(const void *a, const void *b) +{ + intptr_t x = (intptr_t)((struct string_list_item *)a)->util; + intptr_t y = (intptr_t)((struct string_list_item *)b)->util; + + return x > y ? -1 : (x < y ? +1 : 0); +} + int cmd__path_utils(int argc, const char **argv) { if (argc == 3 && !strcmp(argv[1], "normalize_path_copy")) { @@ -291,6 +299,62 @@ int cmd__path_utils(int argc, const char **argv) return !!res; } + if (argc > 2 && !strcmp(argv[1], "file-size")) { + int res = 0, i; + struct stat st; + + for (i = 2; i < argc; i++) + if (stat(argv[i], &st)) + res = error_errno("Cannot stat '%s'", argv[i]); + else + printf("%"PRIuMAX"\n", (uintmax_t)st.st_size); + return !!res; + } + + if (argc == 4 && !strcmp(argv[1], "skip-n-bytes")) { + int fd = open(argv[2], O_RDONLY), offset = atoi(argv[3]); + char buffer[65536]; + + if (fd < 0) + die_errno("could not open '%s'", argv[2]); + if (lseek(fd, offset, SEEK_SET) < 0) + die_errno("could not skip %d bytes", offset); + for (;;) { + ssize_t count = read(fd, buffer, sizeof(buffer)); + if (count < 0) + die_errno("could not read '%s'", argv[2]); + if (!count) + break; + if (write(1, buffer, count) < 0) + die_errno("could not write to stdout"); + } + close(fd); + return 0; + } + + if (argc > 5 && !strcmp(argv[1], "slice-tests")) { + int res = 0; + long offset, stride, i; + struct string_list list = STRING_LIST_INIT_NODUP; + struct stat st; + + offset = strtol(argv[2], NULL, 10); + stride = strtol(argv[3], NULL, 10); + if (stride < 1) + stride = 1; + for (i = 4; i < argc; i++) + if (stat(argv[i], &st)) + res = error_errno("Cannot stat '%s'", argv[i]); + else + string_list_append(&list, argv[i])->util = + (void *)(intptr_t)st.st_size; + QSORT(list.items, list.nr, cmp_by_st_size); + for (i = offset; i < list.nr; i+= stride) + printf("%s\n", list.items[i].string); + + return !!res; + } + fprintf(stderr, "%s: unknown function name: %s\n", argv[0], argv[1] ? argv[1] : "(there was none)"); return 1; diff --git a/t/helper/test-ref-store.c b/t/helper/test-ref-store.c index e9e0541276..799fc00aa1 100644 --- a/t/helper/test-ref-store.c +++ b/t/helper/test-ref-store.c @@ -233,7 +233,7 @@ static int cmd_update_ref(struct ref_store *refs, const char **argv) { const char *msg = notnull(*argv++, "msg"); const char *refname = notnull(*argv++, "refname"); - const char *new_sha1_buf = notnull(*argv++, "old-sha1"); + const char *new_sha1_buf = notnull(*argv++, "new-sha1"); const char *old_sha1_buf = notnull(*argv++, "old-sha1"); unsigned int flags = arg_flags(*argv++, "flags"); struct object_id old_oid; diff --git a/t/helper/test-repository.c b/t/helper/test-repository.c index 6a84a53efb..f7f8618445 100644 --- a/t/helper/test-repository.c +++ b/t/helper/test-repository.c @@ -17,6 +17,11 @@ static void test_parse_commit_in_graph(const char *gitdir, const char *worktree, setup_git_env(gitdir); + memset(the_repository, 0, sizeof(*the_repository)); + + /* TODO: Needed for temporary hack in hashcmp, see 183a638b7da. */ + repo_set_hash_algo(the_repository, GIT_HASH_SHA1); + if (repo_init(&r, gitdir, worktree)) die("Couldn't init repo"); @@ -43,6 +48,11 @@ static void test_get_commit_tree_in_graph(const char *gitdir, setup_git_env(gitdir); + memset(the_repository, 0, sizeof(*the_repository)); + + /* TODO: Needed for temporary hack in hashcmp, see 183a638b7da. */ + repo_set_hash_algo(the_repository, GIT_HASH_SHA1); + if (repo_init(&r, gitdir, worktree)) die("Couldn't init repo"); diff --git a/t/helper/test-tool.c b/t/helper/test-tool.c index 5b137874e1..50c55f8b1a 100644 --- a/t/helper/test-tool.c +++ b/t/helper/test-tool.c @@ -51,6 +51,7 @@ static struct test_cmd cmds[] = { { "submodule-nested-repo-config", cmd__submodule_nested_repo_config }, { "subprocess", cmd__subprocess }, { "urlmatch-normalization", cmd__urlmatch_normalization }, + { "xml-encode", cmd__xml_encode }, { "wildmatch", cmd__wildmatch }, #ifdef GIT_WINDOWS_NATIVE { "windows-named-pipe", cmd__windows_named_pipe }, diff --git a/t/helper/test-tool.h b/t/helper/test-tool.h index ca5c88edb2..a563df49bf 100644 --- a/t/helper/test-tool.h +++ b/t/helper/test-tool.h @@ -1,6 +1,7 @@ #ifndef TEST_TOOL_H #define TEST_TOOL_H +#define USE_THE_INDEX_COMPATIBILITY_MACROS #include "git-compat-util.h" int cmd__chmtime(int argc, const char **argv); @@ -47,6 +48,7 @@ int cmd__submodule_config(int argc, const char **argv); int cmd__submodule_nested_repo_config(int argc, const char **argv); int cmd__subprocess(int argc, const char **argv); int cmd__urlmatch_normalization(int argc, const char **argv); +int cmd__xml_encode(int argc, const char **argv); int cmd__wildmatch(int argc, const char **argv); #ifdef GIT_WINDOWS_NATIVE int cmd__windows_named_pipe(int argc, const char **argv); diff --git a/t/helper/test-xml-encode.c b/t/helper/test-xml-encode.c new file mode 100644 index 0000000000..a648bbd961 --- /dev/null +++ b/t/helper/test-xml-encode.c @@ -0,0 +1,80 @@ +#include "test-tool.h" + +static const char *utf8_replace_character = "�"; + +/* + * Encodes (possibly incorrect) UTF-8 on <stdin> to <stdout>, to be embedded + * in an XML file. + */ +int cmd__xml_encode(int argc, const char **argv) +{ + unsigned char buf[1024], tmp[4], *tmp2 = NULL; + ssize_t cur = 0, len = 1, remaining = 0; + unsigned char ch; + + for (;;) { + if (++cur == len) { + len = xread(0, buf, sizeof(buf)); + if (!len) + return 0; + if (len < 0) + die_errno("Could not read <stdin>"); + cur = 0; + } + ch = buf[cur]; + + if (tmp2) { + if ((ch & 0xc0) != 0x80) { + fputs(utf8_replace_character, stdout); + tmp2 = NULL; + cur--; + continue; + } + *tmp2 = ch; + tmp2++; + if (--remaining == 0) { + fwrite(tmp, tmp2 - tmp, 1, stdout); + tmp2 = NULL; + } + continue; + } + + if (!(ch & 0x80)) { + /* 0xxxxxxx */ + if (ch == '&') + fputs("&", stdout); + else if (ch == '\'') + fputs("'", stdout); + else if (ch == '"') + fputs(""", stdout); + else if (ch == '<') + fputs("<", stdout); + else if (ch == '>') + fputs(">", stdout); + else if (ch >= 0x20) + fputc(ch, stdout); + else if (ch == 0x09 || ch == 0x0a || ch == 0x0d) + fprintf(stdout, "&#x%02x;", ch); + else + fputs(utf8_replace_character, stdout); + } else if ((ch & 0xe0) == 0xc0) { + /* 110XXXXx 10xxxxxx */ + tmp[0] = ch; + remaining = 1; + tmp2 = tmp + 1; + } else if ((ch & 0xf0) == 0xe0) { + /* 1110XXXX 10Xxxxxx 10xxxxxx */ + tmp[0] = ch; + remaining = 2; + tmp2 = tmp + 1; + } else if ((ch & 0xf8) == 0xf0) { + /* 11110XXX 10XXxxxx 10xxxxxx 10xxxxxx */ + tmp[0] = ch; + remaining = 3; + tmp2 = tmp + 1; + } else + fputs(utf8_replace_character, stdout); + } + + return 0; +} diff --git a/t/lib-httpd/apache.conf b/t/lib-httpd/apache.conf index cc4b87507e..5d63ed90c5 100644 --- a/t/lib-httpd/apache.conf +++ b/t/lib-httpd/apache.conf @@ -78,6 +78,7 @@ PassEnv GNUPGHOME PassEnv ASAN_OPTIONS PassEnv GIT_TRACE PassEnv GIT_CONFIG_NOSYSTEM +PassEnv GIT_TEST_SIDEBAND_ALL SetEnvIf Git-Protocol ".*" GIT_PROTOCOL=$0 diff --git a/t/t0003-attributes.sh b/t/t0003-attributes.sh index 22499bce5f..71e63d8b50 100755 --- a/t/t0003-attributes.sh +++ b/t/t0003-attributes.sh @@ -322,4 +322,24 @@ test_expect_success 'bare repository: test info/attributes' ' ) ' +test_expect_success 'binary macro expanded by -a' ' + echo "file binary" >.gitattributes && + cat >expect <<-\EOF && + file: binary: set + file: diff: unset + file: merge: unset + file: text: unset + EOF + git check-attr -a file >actual && + test_cmp expect actual +' + + +test_expect_success 'query binary macro directly' ' + echo "file binary" >.gitattributes && + echo file: binary: set >expect && + git check-attr binary file >actual && + test_cmp expect actual +' + test_done diff --git a/t/t0006-date.sh b/t/t0006-date.sh index ffb2975e48..d9fcc829a9 100755 --- a/t/t0006-date.sh +++ b/t/t0006-date.sh @@ -4,10 +4,10 @@ test_description='test date parsing and printing' . ./test-lib.sh # arbitrary reference time: 2009-08-30 19:20:00 -TEST_DATE_NOW=1251660000; export TEST_DATE_NOW +GIT_TEST_DATE_NOW=1251660000; export GIT_TEST_DATE_NOW check_relative() { - t=$(($TEST_DATE_NOW - $1)) + t=$(($GIT_TEST_DATE_NOW - $1)) echo "$t -> $2" >expect test_expect_${3:-success} "relative date ($2)" " test-tool date relative $t >actual && @@ -128,4 +128,22 @@ check_approxidate '6AM, June 7, 2009' '2009-06-07 06:00:00' check_approxidate '2008-12-01' '2008-12-01 19:20:00' check_approxidate '2009-12-01' '2009-12-01 19:20:00' +check_date_format_human() { + t=$(($GIT_TEST_DATE_NOW - $1)) + echo "$t -> $2" >expect + test_expect_success "human date $t" ' + test-tool date human $t >actual && + test_i18ncmp expect actual +' +} + +check_date_format_human 18000 "5 hours ago" # 5 hours ago +check_date_format_human 432000 "Tue Aug 25 19:20" # 5 days ago +check_date_format_human 1728000 "Mon Aug 10 19:20" # 3 weeks ago +check_date_format_human 13000000 "Thu Apr 2 08:13" # 5 months ago +check_date_format_human 31449600 "Aug 31 2008" # 12 months ago +check_date_format_human 37500000 "Jun 22 2008" # 1 year, 2 months ago +check_date_format_human 55188000 "Dec 1 2007" # 1 year, 9 months ago +check_date_format_human 630000000 "Sep 13 1989" # 20 years ago + test_done diff --git a/t/t0021-conversion.sh b/t/t0021-conversion.sh index fd5f1ac649..e10f5f787f 100755 --- a/t/t0021-conversion.sh +++ b/t/t0021-conversion.sh @@ -24,7 +24,7 @@ generate_random_characters () { } file_size () { - perl -e 'print -s $ARGV[0]' "$1" + test-tool path-utils file-size "$1" } filter_git () { diff --git a/t/t0025-crlf-renormalize.sh b/t/t0025-crlf-renormalize.sh index 9d9e02a211..e13363ade5 100755 --- a/t/t0025-crlf-renormalize.sh +++ b/t/t0025-crlf-renormalize.sh @@ -27,4 +27,13 @@ test_expect_success 'renormalize CRLF in repo' ' test_cmp expect actual ' +test_expect_success 'ignore-errors not mistaken for renormalize' ' + git reset --hard && + echo "*.txt text=auto" >.gitattributes && + git ls-files --eol >expect && + git add --ignore-errors "*.txt" && + git ls-files --eol >actual && + test_cmp expect actual +' + test_done diff --git a/t/t0028-working-tree-encoding.sh b/t/t0028-working-tree-encoding.sh index 7e87b5a200..e58ecbfc44 100755 --- a/t/t0028-working-tree-encoding.sh +++ b/t/t0028-working-tree-encoding.sh @@ -11,9 +11,12 @@ test_expect_success 'setup test files' ' text="hallo there!\ncan you read me?" && echo "*.utf16 text working-tree-encoding=utf-16" >.gitattributes && + echo "*.utf16lebom text working-tree-encoding=UTF-16LE-BOM" >>.gitattributes && printf "$text" >test.utf8.raw && printf "$text" | iconv -f UTF-8 -t UTF-16 >test.utf16.raw && printf "$text" | iconv -f UTF-8 -t UTF-32 >test.utf32.raw && + printf "\377\376" >test.utf16lebom.raw && + printf "$text" | iconv -f UTF-8 -t UTF-32LE >>test.utf16lebom.raw && # Line ending tests printf "one\ntwo\nthree\n" >lf.utf8.raw && @@ -32,7 +35,8 @@ test_expect_success 'setup test files' ' # Add only UTF-16 file, we will add the UTF-32 file later cp test.utf16.raw test.utf16 && cp test.utf32.raw test.utf32 && - git add .gitattributes test.utf16 && + cp test.utf16lebom.raw test.utf16lebom && + git add .gitattributes test.utf16 test.utf16lebom && git commit -m initial ' @@ -51,6 +55,12 @@ test_expect_success 're-encode to UTF-16 on checkout' ' test_cmp_bin test.utf16.raw test.utf16 ' +test_expect_success 're-encode to UTF-16-LE-BOM on checkout' ' + rm test.utf16lebom && + git checkout test.utf16lebom && + test_cmp_bin test.utf16lebom.raw test.utf16lebom +' + test_expect_success 'check $GIT_DIR/info/attributes support' ' test_when_finished "rm -f test.utf32.git" && test_when_finished "git reset --hard HEAD" && diff --git a/t/t0061-run-command.sh b/t/t0061-run-command.sh index 99a614bc7c..ebc49561ac 100755 --- a/t/t0061-run-command.sh +++ b/t/t0061-run-command.sh @@ -166,7 +166,8 @@ test_trace () { expect="$1" shift GIT_TRACE=1 test-tool run-command "$@" run-command true 2>&1 >/dev/null | \ - sed -e 's/.* run_command: //' -e '/trace: .*/d' >actual && + sed -e 's/.* run_command: //' -e '/trace: .*/d' \ + -e '/RUNTIME_PREFIX requested/d' >actual && echo "$expect true" >expect && test_cmp expect actual } @@ -199,4 +200,14 @@ test_expect_success 'GIT_TRACE with environment variables' ' ) ' +test_expect_success MINGW 'verify curlies are quoted properly' ' + : force the rev-parse through the MSYS2 Bash && + git -c alias.r="!git rev-parse" r -- a{b}c >actual && + cat >expect <<-\EOF && + -- + a{b}c + EOF + test_cmp expect actual +' + test_done diff --git a/t/t1050-large.sh b/t/t1050-large.sh index 1a9b21b293..dcb4dbba67 100755 --- a/t/t1050-large.sh +++ b/t/t1050-large.sh @@ -8,7 +8,7 @@ test_description='adding and checking out large blobs' # This should be moved to test-lib.sh together with the # copy in t0021 after both topics have graduated to 'master'. file_size () { - perl -e 'print -s $ARGV[0]' "$1" + test-tool path-utils file-size "$1" } test_expect_success setup ' diff --git a/t/t1450-fsck.sh b/t/t1450-fsck.sh index 2e5e979336..c61f972141 100755 --- a/t/t1450-fsck.sh +++ b/t/t1450-fsck.sh @@ -406,7 +406,7 @@ test_expect_success 'rev-list --verify-objects with bad sha1' ' test_might_fail git rev-list --verify-objects refs/heads/bogus >/dev/null 2>out && cat out && - test_i18ngrep -q "error: sha1 mismatch 63ffffffffffffffffffffffffffffffffffffff" out + test_i18ngrep -q "error: hash mismatch 63ffffffffffffffffffffffffffffffffffffff" out ' test_expect_success 'force fsck to ignore double author' ' diff --git a/t/t1512-rev-parse-disambiguation.sh b/t/t1512-rev-parse-disambiguation.sh index e4d5b56014..c19fb500cb 100755 --- a/t/t1512-rev-parse-disambiguation.sh +++ b/t/t1512-rev-parse-disambiguation.sh @@ -388,4 +388,14 @@ test_expect_success C_LOCALE_OUTPUT 'ambiguous commits are printed by type first done ' +test_expect_success 'cat-file --batch and --batch-check show ambiguous' ' + echo "0000 ambiguous" >expect && + echo 0000 | git cat-file --batch-check >actual 2>err && + test_cmp expect actual && + test_i18ngrep hint: err && + echo 0000 | git cat-file --batch >actual 2>err && + test_cmp expect actual && + test_i18ngrep hint: err +' + test_done diff --git a/t/t2018-checkout-branch.sh b/t/t2018-checkout-branch.sh index 2131fb2a56..c5014ad9a6 100755 --- a/t/t2018-checkout-branch.sh +++ b/t/t2018-checkout-branch.sh @@ -198,4 +198,13 @@ test_expect_success 'checkout -B to the current branch works' ' test_dirty_mergeable ' +test_expect_success 'checkout -b after clone --no-checkout does a checkout of HEAD' ' + git init src && + test_commit -C src a && + rev="$(git -C src rev-parse HEAD)" && + git clone --no-checkout src dest && + git -C dest checkout "$rev" -b branch && + test_path_is_file dest/a.t +' + test_done diff --git a/t/t3404-rebase-interactive.sh b/t/t3404-rebase-interactive.sh index 7a440e08d8..52fa41c707 100755 --- a/t/t3404-rebase-interactive.sh +++ b/t/t3404-rebase-interactive.sh @@ -147,6 +147,25 @@ test_expect_success 'rebase -i with the exec command checks tree cleanness' ' git rebase --continue ' +test_expect_success 'rebase -x with empty command fails' ' + test_when_finished "git rebase --abort ||:" && + test_must_fail git rebase -x "" @ 2>actual && + test_write_lines "error: empty exec command" >expected && + test_i18ncmp expected actual && + test_must_fail git rebase -x " " @ 2>actual && + test_i18ncmp expected actual +' + +LF=' +' +test_expect_success 'rebase -x with newline in command fails' ' + test_when_finished "git rebase --abort ||:" && + test_must_fail git rebase -x "a${LF}b" @ 2>actual && + test_write_lines "error: exec commands cannot contain newlines" \ + >expected && + test_i18ncmp expected actual +' + test_expect_success 'rebase -i with exec of inexistent command' ' git checkout master && test_when_finished "git rebase --abort" && @@ -156,6 +175,11 @@ test_expect_success 'rebase -i with exec of inexistent command' ' ! grep "Maybe git-rebase is broken" actual ' +test_expect_success 'implicit interactive rebase does not invoke sequence editor' ' + test_when_finished "git rebase --abort ||:" && + GIT_SEQUENCE_EDITOR="echo bad >" git rebase -x"echo one" @^ +' + test_expect_success 'no changes are a nop' ' git checkout branch2 && set_fake_editor && diff --git a/t/t3406-rebase-message.sh b/t/t3406-rebase-message.sh index f64b130cb8..b393e1e9fe 100755 --- a/t/t3406-rebase-message.sh +++ b/t/t3406-rebase-message.sh @@ -17,14 +17,9 @@ test_expect_success 'setup' ' git tag start ' -cat >expect <<\EOF -Already applied: 0001 A -Already applied: 0002 B -Committed: 0003 Z -EOF - test_expect_success 'rebase -m' ' git rebase -m master >report && + >expect && sed -n -e "/^Already applied: /p" \ -e "/^Committed: /p" report >actual && test_cmp expect actual diff --git a/t/t3420-rebase-autostash.sh b/t/t3420-rebase-autostash.sh index 4c7494cc8f..2d1094e483 100755 --- a/t/t3420-rebase-autostash.sh +++ b/t/t3420-rebase-autostash.sh @@ -53,41 +53,6 @@ create_expected_success_interactive () { EOF } -create_expected_success_merge () { - cat >expected <<-EOF - $(grep "^Created autostash: [0-9a-f][0-9a-f]*\$" actual) - HEAD is now at $(git rev-parse --short feature-branch) third commit - First, rewinding head to replay your work on top of it... - Merging unrelated-onto-branch with HEAD~1 - Merging: - $(git rev-parse --short unrelated-onto-branch) unrelated commit - $(git rev-parse --short feature-branch^) second commit - found 1 common ancestor: - $(git rev-parse --short feature-branch~2) initial commit - [detached HEAD $(git rev-parse --short rebased-feature-branch~1)] second commit - Author: A U Thor <author@example.com> - Date: Thu Apr 7 15:14:13 2005 -0700 - 2 files changed, 2 insertions(+) - create mode 100644 file1 - create mode 100644 file2 - Committed: 0001 second commit - Merging unrelated-onto-branch with HEAD~0 - Merging: - $(git rev-parse --short rebased-feature-branch~1) second commit - $(git rev-parse --short feature-branch) third commit - found 1 common ancestor: - $(git rev-parse --short feature-branch~1) second commit - [detached HEAD $(git rev-parse --short rebased-feature-branch)] third commit - Author: A U Thor <author@example.com> - Date: Thu Apr 7 15:15:13 2005 -0700 - 1 file changed, 1 insertion(+) - create mode 100644 file3 - Committed: 0002 third commit - All done. - Applied autostash. - EOF -} - create_expected_failure_am () { cat >expected <<-EOF $(grep "^Created autostash: [0-9a-f][0-9a-f]*\$" actual) @@ -112,43 +77,6 @@ create_expected_failure_interactive () { EOF } -create_expected_failure_merge () { - cat >expected <<-EOF - $(grep "^Created autostash: [0-9a-f][0-9a-f]*\$" actual) - HEAD is now at $(git rev-parse --short feature-branch) third commit - First, rewinding head to replay your work on top of it... - Merging unrelated-onto-branch with HEAD~1 - Merging: - $(git rev-parse --short unrelated-onto-branch) unrelated commit - $(git rev-parse --short feature-branch^) second commit - found 1 common ancestor: - $(git rev-parse --short feature-branch~2) initial commit - [detached HEAD $(git rev-parse --short rebased-feature-branch~1)] second commit - Author: A U Thor <author@example.com> - Date: Thu Apr 7 15:14:13 2005 -0700 - 2 files changed, 2 insertions(+) - create mode 100644 file1 - create mode 100644 file2 - Committed: 0001 second commit - Merging unrelated-onto-branch with HEAD~0 - Merging: - $(git rev-parse --short rebased-feature-branch~1) second commit - $(git rev-parse --short feature-branch) third commit - found 1 common ancestor: - $(git rev-parse --short feature-branch~1) second commit - [detached HEAD $(git rev-parse --short rebased-feature-branch)] third commit - Author: A U Thor <author@example.com> - Date: Thu Apr 7 15:15:13 2005 -0700 - 1 file changed, 1 insertion(+) - create mode 100644 file3 - Committed: 0002 third commit - All done. - Applying autostash resulted in conflicts. - Your changes are safe in the stash. - You can run "git stash pop" or "git stash drop" at any time. - EOF -} - testrebase () { type=$1 dotest=$2 @@ -177,6 +105,9 @@ testrebase () { test_expect_success "rebase$type --autostash: check output" ' test_when_finished git branch -D rebased-feature-branch && suffix=${type#\ --} && suffix=${suffix:-am} && + if test ${suffix} = "merge"; then + suffix=interactive + fi && create_expected_success_$suffix && test_i18ncmp expected actual ' @@ -274,6 +205,9 @@ testrebase () { test_expect_success "rebase$type: check output with conflicting stash" ' test_when_finished git branch -D rebased-feature-branch && suffix=${type#\ --} && suffix=${suffix:-am} && + if test ${suffix} = "merge"; then + suffix=interactive + fi && create_expected_failure_$suffix && test_i18ncmp expected actual ' diff --git a/t/t3421-rebase-topology-linear.sh b/t/t3421-rebase-topology-linear.sh index 23ad4cff35..7274dca40b 100755 --- a/t/t3421-rebase-topology-linear.sh +++ b/t/t3421-rebase-topology-linear.sh @@ -111,7 +111,7 @@ test_run_rebase () { " } test_run_rebase success '' -test_run_rebase failure -m +test_run_rebase success -m test_run_rebase success -i test_have_prereq !REBASE_P || test_run_rebase success -p @@ -126,7 +126,7 @@ test_run_rebase () { " } test_run_rebase success '' -test_run_rebase failure -m +test_run_rebase success -m test_run_rebase success -i test_have_prereq !REBASE_P || test_run_rebase success -p @@ -141,7 +141,7 @@ test_run_rebase () { " } test_run_rebase success '' -test_run_rebase failure -m +test_run_rebase success -m test_run_rebase success -i test_have_prereq !REBASE_P || test_run_rebase success -p @@ -284,7 +284,7 @@ test_run_rebase () { " } test_run_rebase success '' -test_run_rebase failure -m +test_run_rebase success -m test_run_rebase success -i test_have_prereq !REBASE_P || test_run_rebase success -p @@ -315,7 +315,7 @@ test_run_rebase () { " } test_run_rebase success '' -test_run_rebase failure -m +test_run_rebase success -m test_run_rebase success -i test_have_prereq !REBASE_P || test_run_rebase failure -p diff --git a/t/t3425-rebase-topology-merges.sh b/t/t3425-rebase-topology-merges.sh index 5f892e33d7..fd8efe84fe 100755 --- a/t/t3425-rebase-topology-merges.sh +++ b/t/t3425-rebase-topology-merges.sh @@ -70,9 +70,8 @@ test_run_rebase () { test_linear_range "\'"$expected"\'" d.. " } -#TODO: make order consistent across all flavors of rebase -test_run_rebase success 'e n o' '' -test_run_rebase success 'e n o' -m +test_run_rebase success 'n o e' '' +test_run_rebase success 'n o e' -m test_run_rebase success 'n o e' -i test_run_rebase () { @@ -87,9 +86,8 @@ test_run_rebase () { test_linear_range "\'"$expected"\'" c.. " } -#TODO: make order consistent across all flavors of rebase -test_run_rebase success 'd e n o' '' -test_run_rebase success 'd e n o' -m +test_run_rebase success 'd n o e' '' +test_run_rebase success 'd n o e' -m test_run_rebase success 'd n o e' -i test_run_rebase () { @@ -104,9 +102,8 @@ test_run_rebase () { test_linear_range "\'"$expected"\'" c.. " } -#TODO: make order consistent across all flavors of rebase -test_run_rebase success 'd e n o' '' -test_run_rebase success 'd e n o' -m +test_run_rebase success 'd n o e' '' +test_run_rebase success 'd n o e' -m test_run_rebase success 'd n o e' -i if ! test_have_prereq REBASE_P; then diff --git a/t/t3430-rebase-merges.sh b/t/t3430-rebase-merges.sh index cc5646836f..4c69255ee6 100755 --- a/t/t3430-rebase-merges.sh +++ b/t/t3430-rebase-merges.sh @@ -125,7 +125,7 @@ test_expect_success '`reset` refuses to overwrite untracked files' ' : >dont-overwrite-untracked.t && echo "reset refs/tags/dont-overwrite-untracked" >script-from-scratch && test_config sequence.editor \""$PWD"/replace-editor.sh\" && - test_must_fail git rebase -r HEAD && + test_must_fail git rebase -ir HEAD && git rebase --abort ' diff --git a/t/t3700-add.sh b/t/t3700-add.sh index 37729ba258..be582a513b 100755 --- a/t/t3700-add.sh +++ b/t/t3700-add.sh @@ -402,4 +402,11 @@ test_expect_success 'all statuses changed in folder if . is given' ' test $(git ls-files --stage | grep ^100755 | wc -l) -eq 0 ' +test_expect_success CASE_INSENSITIVE_FS 'path is case-insensitive' ' + path="$(pwd)/BLUB" && + touch "$path" && + downcased="$(echo "$path" | tr A-Z a-z)" && + git add "$downcased" +' + test_done diff --git a/t/t4006-diff-mode.sh b/t/t4006-diff-mode.sh index a8e01eccd1..03489aff14 100755 --- a/t/t4006-diff-mode.sh +++ b/t/t4006-diff-mode.sh @@ -32,28 +32,37 @@ test_expect_success 'prepare binary file' ' git commit -m binbin ' -# test_expect_success '--stat output after text chmod' ' -# test_chmod -x rezrov && -# echo " 0 files changed" >expect && -# git diff HEAD --stat >actual && -# test_i18ncmp expect actual -# ' -# -# test_expect_success '--shortstat output after text chmod' ' -# git diff HEAD --shortstat >actual && -# test_i18ncmp expect actual -# ' -# -# test_expect_success '--stat output after binary chmod' ' -# test_chmod +x binbin && -# echo " 0 files changed" >expect && -# git diff HEAD --stat >actual && -# test_i18ncmp expect actual -# ' -# -# test_expect_success '--shortstat output after binary chmod' ' -# git diff HEAD --shortstat >actual && -# test_i18ncmp expect actual -# ' +test_expect_success '--stat output after text chmod' ' + test_chmod -x rezrov && + cat >expect <<-\EOF && + rezrov | 0 + 1 file changed, 0 insertions(+), 0 deletions(-) + EOF + git diff HEAD --stat >actual && + test_i18ncmp expect actual +' + +test_expect_success '--shortstat output after text chmod' ' + tail -n 1 <expect >expect.short && + git diff HEAD --shortstat >actual && + test_i18ncmp expect.short actual +' + +test_expect_success '--stat output after binary chmod' ' + test_chmod +x binbin && + cat >expect <<-EOF && + binbin | Bin + rezrov | 0 + 2 files changed, 0 insertions(+), 0 deletions(-) + EOF + git diff HEAD --stat >actual && + test_i18ncmp expect actual +' + +test_expect_success '--shortstat output after binary chmod' ' + tail -n 1 <expect >expect.short && + git diff HEAD --shortstat >actual && + test_i18ncmp expect.short actual +' test_done diff --git a/t/t4013-diff-various.sh b/t/t4013-diff-various.sh index 7d985ff6b1..9f8f0e84ad 100755 --- a/t/t4013-diff-various.sh +++ b/t/t4013-diff-various.sh @@ -98,6 +98,12 @@ test_expect_success setup ' git commit -m "update mode" && git checkout -f master && + # Same merge as master, but with parents reversed. Hide it in a + # pseudo-ref to avoid impacting tests with --all. + commit=$(echo reverse | + git commit-tree -p master^2 -p master^1 master^{tree}) && + git update-ref REVERSE $commit && + git config diff.renames false && git show-branch @@ -239,6 +245,8 @@ diff-tree --cc --stat --summary master # stat summary should show the diffstat and summary with the first parent diff-tree -c --stat --summary side diff-tree --cc --stat --summary side +diff-tree --cc --shortstat master +diff-tree --cc --summary REVERSE # improved by Timo's patch diff-tree --cc --patch-with-stat master # improved by Timo's patch @@ -350,6 +358,7 @@ diff --line-prefix=abc master master^ side diff --dirstat master~1 master~2 diff --dirstat initial rearrange diff --dirstat-by-file initial rearrange +diff --dirstat --cc master~1 master # No-index --abbrev and --no-abbrev diff --raw initial :noellipses diff --raw initial diff --git a/t/t4013/diff.diff-tree_--cc_--shortstat_master b/t/t4013/diff.diff-tree_--cc_--shortstat_master new file mode 100644 index 0000000000..a4ca42df2a --- /dev/null +++ b/t/t4013/diff.diff-tree_--cc_--shortstat_master @@ -0,0 +1,4 @@ +$ git diff-tree --cc --shortstat master +59d314ad6f356dd08601a4cd5e530381da3e3c64 + 2 files changed, 5 insertions(+) +$ diff --git a/t/t4013/diff.diff-tree_--cc_--summary_REVERSE b/t/t4013/diff.diff-tree_--cc_--summary_REVERSE new file mode 100644 index 0000000000..e208dd5682 --- /dev/null +++ b/t/t4013/diff.diff-tree_--cc_--summary_REVERSE @@ -0,0 +1,6 @@ +$ git diff-tree --cc --summary REVERSE +2562325a7ee916efb2481da93073b82cec801cbc + create mode 100644 file1 + delete mode 100644 file2 + delete mode 100644 file3 +$ diff --git a/t/t4013/diff.diff_--dirstat_--cc_master~1_master b/t/t4013/diff.diff_--dirstat_--cc_master~1_master new file mode 100644 index 0000000000..fba4e34175 --- /dev/null +++ b/t/t4013/diff.diff_--dirstat_--cc_master~1_master @@ -0,0 +1,3 @@ +$ git diff --dirstat --cc master~1 master + 40.0% dir/ +$ diff --git a/t/t4066-diff-emit-delay.sh b/t/t4066-diff-emit-delay.sh new file mode 100755 index 0000000000..5df6b5e64e --- /dev/null +++ b/t/t4066-diff-emit-delay.sh @@ -0,0 +1,79 @@ +#!/bin/sh + +test_description='test combined/stat/moved interaction' +. ./test-lib.sh + +# This test covers a weird 3-way interaction between "--cc -p", which will run +# the combined diff code, along with "--stat", which will be computed as a +# first-parent stat during the combined diff, and "--color-moved", which +# enables the emitted_symbols list to store the diff in memory. + +test_expect_success 'set up history with a merge' ' + test_commit A && + test_commit B && + git checkout -b side HEAD^ && + test_commit C && + git merge -m M master && + test_commit D +' + +test_expect_success 'log --cc -p --stat --color-moved' ' + cat >expect <<-\EOF && + commit D + --- + D.t | 1 + + 1 file changed, 1 insertion(+) + + diff --git a/D.t b/D.t + new file mode 100644 + index 0000000..1784810 + --- /dev/null + +++ b/D.t + @@ -0,0 +1 @@ + +D + commit M + + B.t | 1 + + 1 file changed, 1 insertion(+) + commit C + --- + C.t | 1 + + 1 file changed, 1 insertion(+) + + diff --git a/C.t b/C.t + new file mode 100644 + index 0000000..3cc58df + --- /dev/null + +++ b/C.t + @@ -0,0 +1 @@ + +C + commit B + --- + B.t | 1 + + 1 file changed, 1 insertion(+) + + diff --git a/B.t b/B.t + new file mode 100644 + index 0000000..223b783 + --- /dev/null + +++ b/B.t + @@ -0,0 +1 @@ + +B + commit A + --- + A.t | 1 + + 1 file changed, 1 insertion(+) + + diff --git a/A.t b/A.t + new file mode 100644 + index 0000000..f70f10e + --- /dev/null + +++ b/A.t + @@ -0,0 +1 @@ + +A + EOF + git log --format="commit %s" --cc -p --stat --color-moved >actual && + test_cmp expect actual +' + +test_done diff --git a/t/t5315-pack-objects-compression.sh b/t/t5315-pack-objects-compression.sh index 34c47dae09..df970d7584 100755 --- a/t/t5315-pack-objects-compression.sh +++ b/t/t5315-pack-objects-compression.sh @@ -7,7 +7,7 @@ test_description='pack-object compression configuration' # This should be moved to test-lib.sh together with the # copy in t0021 after both topics have graduated to 'master'. file_size () { - perl -e 'print -s $ARGV[0]' "$1" + test-tool path-utils file-size "$1" } test_expect_success setup ' diff --git a/t/t5318-commit-graph.sh b/t/t5318-commit-graph.sh index 5fe21db99f..16d10ebce8 100755 --- a/t/t5318-commit-graph.sh +++ b/t/t5318-commit-graph.sh @@ -122,7 +122,7 @@ test_expect_success 'write graph with merges' ' cd "$TRASH_DIRECTORY/full" && git commit-graph write && test_path_is_file $objdir/info/commit-graph && - graph_read_expect "10" "large_edges" + graph_read_expect "10" "extra_edges" ' graph_git_behavior 'merge 1 vs 2' full merge/1 merge/2 @@ -157,7 +157,7 @@ test_expect_success 'write graph with new commit' ' cd "$TRASH_DIRECTORY/full" && git commit-graph write && test_path_is_file $objdir/info/commit-graph && - graph_read_expect "11" "large_edges" + graph_read_expect "11" "extra_edges" ' graph_git_behavior 'full graph, commit 8 vs merge 1' full commits/8 merge/1 @@ -167,7 +167,7 @@ test_expect_success 'write graph with nothing new' ' cd "$TRASH_DIRECTORY/full" && git commit-graph write && test_path_is_file $objdir/info/commit-graph && - graph_read_expect "11" "large_edges" + graph_read_expect "11" "extra_edges" ' graph_git_behavior 'cleared graph, commit 8 vs merge 1' full commits/8 merge/1 @@ -177,7 +177,7 @@ test_expect_success 'build graph from latest pack with closure' ' cd "$TRASH_DIRECTORY/full" && cat new-idx | git commit-graph write --stdin-packs && test_path_is_file $objdir/info/commit-graph && - graph_read_expect "9" "large_edges" + graph_read_expect "9" "extra_edges" ' graph_git_behavior 'graph from pack, commit 8 vs merge 1' full commits/8 merge/1 @@ -200,7 +200,7 @@ test_expect_success 'build graph from commits with append' ' cd "$TRASH_DIRECTORY/full" && git rev-parse merge/3 | git commit-graph write --stdin-commits --append && test_path_is_file $objdir/info/commit-graph && - graph_read_expect "10" "large_edges" + graph_read_expect "10" "extra_edges" ' graph_git_behavior 'append graph, commit 8 vs merge 1' full commits/8 merge/1 @@ -210,7 +210,7 @@ test_expect_success 'build graph using --reachable' ' cd "$TRASH_DIRECTORY/full" && git commit-graph write --reachable && test_path_is_file $objdir/info/commit-graph && - graph_read_expect "11" "large_edges" + graph_read_expect "11" "extra_edges" ' graph_git_behavior 'append graph, commit 8 vs merge 1' full commits/8 merge/1 @@ -231,7 +231,7 @@ test_expect_success 'write graph in bare repo' ' cd "$TRASH_DIRECTORY/bare" && git commit-graph write && test_path_is_file $baredir/info/commit-graph && - graph_read_expect "11" "large_edges" + graph_read_expect "11" "extra_edges" ' graph_git_behavior 'bare repo with graph, commit 8 vs merge 1' bare commits/8 merge/1 @@ -366,9 +366,10 @@ GRAPH_OCTOPUS_DATA_OFFSET=$(($GRAPH_COMMIT_DATA_OFFSET + \ GRAPH_BYTE_OCTOPUS=$(($GRAPH_OCTOPUS_DATA_OFFSET + 4)) GRAPH_BYTE_FOOTER=$(($GRAPH_OCTOPUS_DATA_OFFSET + 4 * $NUM_OCTOPUS_EDGES)) -# usage: corrupt_graph_and_verify <position> <data> <string> +# usage: corrupt_graph_and_verify <position> <data> <string> [<zero_pos>] # Manipulates the commit-graph file at the position -# by inserting the data, then runs 'git commit-graph verify' +# by inserting the data, optionally zeroing the file +# starting at <zero_pos>, then runs 'git commit-graph verify' # and places the output in the file 'err'. Test 'err' for # the given string. corrupt_graph_and_verify() { @@ -376,11 +377,15 @@ corrupt_graph_and_verify() { data="${2:-\0}" grepstr=$3 cd "$TRASH_DIRECTORY/full" && + orig_size=$(wc -c < $objdir/info/commit-graph) && + zero_pos=${4:-${orig_size}} && test_when_finished mv commit-graph-backup $objdir/info/commit-graph && cp $objdir/info/commit-graph commit-graph-backup && printf "$data" | dd of="$objdir/info/commit-graph" bs=1 seek="$pos" conv=notrunc && + dd of="$objdir/info/commit-graph" bs=1 seek="$zero_pos" count=0 && + dd if=/dev/zero of="$objdir/info/commit-graph" bs=1 seek="$zero_pos" count=$(($orig_size - $zero_pos)) && test_must_fail git commit-graph verify 2>test_err && - grep -v "^+" test_err >err + grep -v "^+" test_err >err && test_i18ngrep "$grepstr" err } @@ -484,6 +489,11 @@ test_expect_success 'detect invalid checksum hash' ' "incorrect checksum" ' +test_expect_success 'detect incorrect chunk count' ' + corrupt_graph_and_verify $GRAPH_BYTE_CHUNK_COUNT "\377" \ + "chunk lookup table entry missing" $GRAPH_CHUNK_LOOKUP_OFFSET +' + test_expect_success 'git fsck (checks commit-graph)' ' cd "$TRASH_DIRECTORY/full" && git fsck && diff --git a/t/t5322-pack-objects-sparse.sh b/t/t5322-pack-objects-sparse.sh new file mode 100755 index 0000000000..7124b5581a --- /dev/null +++ b/t/t5322-pack-objects-sparse.sh @@ -0,0 +1,136 @@ +#!/bin/sh + +test_description='pack-objects object selection using sparse algorithm' +. ./test-lib.sh + +test_expect_success 'setup repo' ' + test_commit initial && + for i in $(test_seq 1 3) + do + mkdir f$i && + for j in $(test_seq 1 3) + do + mkdir f$i/f$j && + echo $j >f$i/f$j/data.txt + done + done && + git add . && + git commit -m "Initialized trees" && + for i in $(test_seq 1 3) + do + git checkout -b topic$i master && + echo change-$i >f$i/f$i/data.txt && + git commit -a -m "Changed f$i/f$i/data.txt" + done && + cat >packinput.txt <<-EOF && + topic1 + ^topic2 + ^topic3 + EOF + git rev-parse \ + topic1 \ + topic1^{tree} \ + topic1:f1 \ + topic1:f1/f1 \ + topic1:f1/f1/data.txt | sort >expect_objects.txt +' + +test_expect_success 'non-sparse pack-objects' ' + git pack-objects --stdout --revs --no-sparse <packinput.txt >nonsparse.pack && + git index-pack -o nonsparse.idx nonsparse.pack && + git show-index <nonsparse.idx | awk "{print \$2}" >nonsparse_objects.txt && + test_cmp expect_objects.txt nonsparse_objects.txt +' + +test_expect_success 'sparse pack-objects' ' + git pack-objects --stdout --revs --sparse <packinput.txt >sparse.pack && + git index-pack -o sparse.idx sparse.pack && + git show-index <sparse.idx | awk "{print \$2}" >sparse_objects.txt && + test_cmp expect_objects.txt sparse_objects.txt +' + +test_expect_success 'duplicate a folder from f3 and commit to topic1' ' + git checkout topic1 && + echo change-3 >f3/f3/data.txt && + git commit -a -m "Changed f3/f3/data.txt" && + git rev-parse \ + topic1~1 \ + topic1~1^{tree} \ + topic1^{tree} \ + topic1 \ + topic1:f1 \ + topic1:f1/f1 \ + topic1:f1/f1/data.txt | sort >required_objects.txt +' + +test_expect_success 'non-sparse pack-objects' ' + git pack-objects --stdout --revs --no-sparse <packinput.txt >nonsparse.pack && + git index-pack -o nonsparse.idx nonsparse.pack && + git show-index <nonsparse.idx | awk "{print \$2}" >nonsparse_objects.txt && + comm -1 -2 required_objects.txt nonsparse_objects.txt >nonsparse_required_objects.txt && + test_cmp required_objects.txt nonsparse_required_objects.txt +' + +test_expect_success 'sparse pack-objects' ' + git pack-objects --stdout --revs --sparse <packinput.txt >sparse.pack && + git index-pack -o sparse.idx sparse.pack && + git show-index <sparse.idx | awk "{print \$2}" >sparse_objects.txt && + comm -1 -2 required_objects.txt sparse_objects.txt >sparse_required_objects.txt && + test_cmp required_objects.txt sparse_required_objects.txt +' + +# Demonstrate that the algorithms differ when we copy a tree wholesale +# from one folder to another. + +test_expect_success 'duplicate a folder from f1 into f3' ' + mkdir f3/f4 && + cp -r f1/f1/* f3/f4 && + git add f3/f4 && + git commit -m "Copied f1/f1 to f3/f4" && + cat >packinput.txt <<-EOF && + topic1 + ^topic1~1 + EOF + git rev-parse \ + topic1 \ + topic1^{tree} \ + topic1:f3 | sort >required_objects.txt +' + +test_expect_success 'non-sparse pack-objects' ' + git pack-objects --stdout --revs --no-sparse <packinput.txt >nonsparse.pack && + git index-pack -o nonsparse.idx nonsparse.pack && + git show-index <nonsparse.idx | awk "{print \$2}" >nonsparse_objects.txt && + comm -1 -2 required_objects.txt nonsparse_objects.txt >nonsparse_required_objects.txt && + test_cmp required_objects.txt nonsparse_required_objects.txt +' + +test_expect_success 'sparse pack-objects' ' + git rev-parse \ + topic1 \ + topic1^{tree} \ + topic1:f3 \ + topic1:f3/f4 \ + topic1:f3/f4/data.txt | sort >expect_sparse_objects.txt && + git pack-objects --stdout --revs --sparse <packinput.txt >sparse.pack && + git index-pack -o sparse.idx sparse.pack && + git show-index <sparse.idx | awk "{print \$2}" >sparse_objects.txt && + test_cmp expect_sparse_objects.txt sparse_objects.txt +' + +test_expect_success 'pack.useSparse enables algorithm' ' + git config pack.useSparse true && + git pack-objects --stdout --revs <packinput.txt >sparse.pack && + git index-pack -o sparse.idx sparse.pack && + git show-index <sparse.idx | awk "{print \$2}" >sparse_objects.txt && + test_cmp expect_sparse_objects.txt sparse_objects.txt +' + +test_expect_success 'pack.useSparse overridden' ' + git pack-objects --stdout --revs --no-sparse <packinput.txt >sparse.pack && + git index-pack -o sparse.idx sparse.pack && + git show-index <sparse.idx | awk "{print \$2}" >sparse_objects.txt && + test_cmp required_objects.txt sparse_objects.txt +' + +test_done diff --git a/t/t5407-post-rewrite-hook.sh b/t/t5407-post-rewrite-hook.sh index 9b2a274c71..a4a5903cba 100755 --- a/t/t5407-post-rewrite-hook.sh +++ b/t/t5407-post-rewrite-hook.sh @@ -78,6 +78,7 @@ test_expect_success 'git rebase --skip' ' git rebase --continue && echo rebase >expected.args && cat >expected.data <<-EOF && + $(git rev-parse C) $(git rev-parse HEAD^) $(git rev-parse D) $(git rev-parse HEAD) EOF verify_hook_input @@ -91,6 +92,7 @@ test_expect_success 'git rebase --skip the last one' ' echo rebase >expected.args && cat >expected.data <<-EOF && $(git rev-parse E) $(git rev-parse HEAD) + $(git rev-parse F) $(git rev-parse HEAD) EOF verify_hook_input ' @@ -120,6 +122,38 @@ test_expect_success 'git rebase -m --skip' ' git rebase --continue && echo rebase >expected.args && cat >expected.data <<-EOF && + $(git rev-parse C) $(git rev-parse HEAD^) + $(git rev-parse D) $(git rev-parse HEAD) + EOF + verify_hook_input +' + +test_expect_success 'git rebase with implicit use of interactive backend' ' + git reset --hard D && + clear_hook_input && + test_must_fail git rebase --keep --onto A B && + echo C > foo && + git add foo && + git rebase --continue && + echo rebase >expected.args && + cat >expected.data <<-EOF && + $(git rev-parse C) $(git rev-parse HEAD^) + $(git rev-parse D) $(git rev-parse HEAD) + EOF + verify_hook_input +' + +test_expect_success 'git rebase --skip with implicit use of interactive backend' ' + git reset --hard D && + clear_hook_input && + test_must_fail git rebase --keep --onto A B && + test_must_fail git rebase --skip && + echo D > foo && + git add foo && + git rebase --continue && + echo rebase >expected.args && + cat >expected.data <<-EOF && + $(git rev-parse C) $(git rev-parse HEAD^) $(git rev-parse D) $(git rev-parse HEAD) EOF verify_hook_input diff --git a/t/t5537-fetch-shallow.sh b/t/t5537-fetch-shallow.sh index 6faf17e17a..6caf628efa 100755 --- a/t/t5537-fetch-shallow.sh +++ b/t/t5537-fetch-shallow.sh @@ -243,7 +243,8 @@ test_expect_success 'shallow fetches check connectivity before writing shallow f "$(git -C "$REPO" rev-parse HEAD)" \ "$(git -C "$REPO" rev-parse HEAD^)" \ >"$HTTPD_ROOT_PATH/one-time-sed" && - test_must_fail git -C client fetch --depth=1 "$HTTPD_URL/one_time_sed/repo" \ + test_must_fail env GIT_TEST_SIDEBAND_ALL=0 git -C client \ + fetch --depth=1 "$HTTPD_URL/one_time_sed/repo" \ master:a_branch && # Ensure that the one-time-sed script was used. diff --git a/t/t5580-clone-push-unc.sh b/t/t5580-clone-push-unc.sh index ba548df4a9..217adf3a63 100755 --- a/t/t5580-clone-push-unc.sh +++ b/t/t5580-clone-push-unc.sh @@ -40,6 +40,11 @@ test_expect_success clone ' git clone "file://$UNCPATH" clone ' +test_expect_success 'clone with backslashed path' ' + BACKSLASHED="$(echo "$UNCPATH" | tr / \\\\)" && + git clone "$BACKSLASHED" backslashed +' + test_expect_success push ' ( cd clone && diff --git a/t/t5701-git-serve.sh b/t/t5701-git-serve.sh index ae79c6bbc0..fe45bf828d 100755 --- a/t/t5701-git-serve.sh +++ b/t/t5701-git-serve.sh @@ -14,7 +14,7 @@ test_expect_success 'test capability advertisement' ' 0000 EOF - git serve --advertise-capabilities >out && + GIT_TEST_SIDEBAND_ALL=0 git serve --advertise-capabilities >out && test-tool pkt-line unpack <out >actual && test_cmp expect actual ' diff --git a/t/t5702-protocol-v2.sh b/t/t5702-protocol-v2.sh index a738c0c4ce..db4ae09f2f 100755 --- a/t/t5702-protocol-v2.sh +++ b/t/t5702-protocol-v2.sh @@ -561,6 +561,27 @@ test_expect_success 'fetch with http:// using protocol v2' ' grep "git< version 2" log ' +test_expect_success 'fetch from namespaced repo respects namespaces' ' + test_when_finished "rm -f log" && + + git init "$HTTPD_DOCUMENT_ROOT_PATH/nsrepo" && + test_commit -C "$HTTPD_DOCUMENT_ROOT_PATH/nsrepo" one && + test_commit -C "$HTTPD_DOCUMENT_ROOT_PATH/nsrepo" two && + git -C "$HTTPD_DOCUMENT_ROOT_PATH/nsrepo" \ + update-ref refs/namespaces/ns/refs/heads/master one && + + GIT_TRACE_PACKET="$(pwd)/log" git -C http_child -c protocol.version=2 \ + fetch "$HTTPD_URL/smart_namespace/nsrepo" \ + refs/heads/master:refs/heads/theirs && + + # Server responded using protocol v2 + grep "fetch< version 2" log && + + git -C "$HTTPD_DOCUMENT_ROOT_PATH/nsrepo" rev-parse one >expect && + git -C http_child rev-parse theirs >actual && + test_cmp expect actual +' + test_expect_success 'push with http:// and a config of v2 does not request v2' ' test_when_finished "rm -f log" && # Till v2 for push is designed, make sure that if a client has @@ -630,8 +651,8 @@ test_expect_success 'when server does not send "ready", expect FLUSH' ' test_must_fail env GIT_TRACE_PACKET="$(pwd)/log" git -C http_child \ -c protocol.version=2 \ fetch "$HTTPD_URL/one_time_sed/http_parent" 2> err && - grep "fetch< acknowledgments" log && - ! grep "fetch< ready" log && + grep "fetch< .*acknowledgments" log && + ! grep "fetch< .*ready" log && test_i18ngrep "expected no other sections to be sent after no .ready." err ' diff --git a/t/t5703-upload-pack-ref-in-want.sh b/t/t5703-upload-pack-ref-in-want.sh index 7053899cb5..f87b2f6df3 100755 --- a/t/t5703-upload-pack-ref-in-want.sh +++ b/t/t5703-upload-pack-ref-in-want.sh @@ -208,7 +208,7 @@ test_expect_success 'server is initially ahead - no ref in want' ' cp -r "$LOCAL_PRISTINE" local && inconsistency master 1234567890123456789012345678901234567890 && test_must_fail git -C local fetch 2>err && - test_i18ngrep "ERR upload-pack: not our ref" err + test_i18ngrep "fatal: remote error: upload-pack: not our ref" err ' test_expect_success 'server is initially ahead - ref in want' ' @@ -254,7 +254,7 @@ test_expect_success 'server loses a ref - ref in want' ' echo "s/master/raster/" >"$HTTPD_ROOT_PATH/one-time-sed" && test_must_fail git -C local fetch 2>err && - test_i18ngrep "ERR unknown ref refs/heads/raster" err + test_i18ngrep "fatal: remote error: unknown ref refs/heads/raster" err ' stop_httpd diff --git a/t/t6030-bisect-porcelain.sh b/t/t6030-bisect-porcelain.sh index f84ff941c3..55835ee4a4 100755 --- a/t/t6030-bisect-porcelain.sh +++ b/t/t6030-bisect-porcelain.sh @@ -802,7 +802,7 @@ test_expect_success 'bisect terms needs 0 or 1 argument' ' test_must_fail git bisect terms only-one && test_must_fail git bisect terms 1 2 && test_must_fail git bisect terms 2>actual && - echo "no terms defined" >expected && + echo "error: no terms defined" >expected && test_i18ncmp expected actual ' diff --git a/t/t6042-merge-rename-corner-cases.sh b/t/t6042-merge-rename-corner-cases.sh index 7cc34e7579..09dfa8bd92 100755 --- a/t/t6042-merge-rename-corner-cases.sh +++ b/t/t6042-merge-rename-corner-cases.sh @@ -1175,7 +1175,7 @@ test_expect_success 'setup nested conflicts from rename/rename(2to1)' ' # Handle the left side git checkout L && - git mv one three && + git rm one two && mv -f file_v2 three && mv -f file_v5 two && git add two three && @@ -1183,7 +1183,7 @@ test_expect_success 'setup nested conflicts from rename/rename(2to1)' ' # Handle the right side git checkout R && - git mv two three && + git rm one two && mv -f file_v3 one && mv -f file_v6 three && git add one three && diff --git a/t/t6112-rev-list-filters-objects.sh b/t/t6112-rev-list-filters-objects.sh index eb32505a6e..9c11427719 100755 --- a/t/t6112-rev-list-filters-objects.sh +++ b/t/t6112-rev-list-filters-objects.sh @@ -283,7 +283,7 @@ test_expect_success 'verify tree:0 includes trees in "filtered" output' ' # Make sure tree:0 does not iterate through any trees. -test_expect_success 'filter a GIANT tree through tree:0' ' +test_expect_success 'verify skipping tree iteration when not collecting omits' ' GIT_TRACE=1 git -C r3 rev-list \ --objects --filter=tree:0 HEAD 2>filter_trace && grep "Skipping contents of tree [.][.][.]" filter_trace >actual && @@ -294,6 +294,126 @@ test_expect_success 'filter a GIANT tree through tree:0' ' ! grep "Skipping contents of tree [^.]" filter_trace ' +# Test tree:# filters. + +expect_has () { + commit=$1 && + name=$2 && + + hash=$(git -C r3 rev-parse $commit:$name) && + grep "^$hash $name$" actual +} + +test_expect_success 'verify tree:1 includes root trees' ' + git -C r3 rev-list --objects --filter=tree:1 HEAD >actual && + + # We should get two root directories and two commits. + expect_has HEAD "" && + expect_has HEAD~1 "" && + test_line_count = 4 actual +' + +test_expect_success 'verify tree:2 includes root trees and immediate children' ' + git -C r3 rev-list --objects --filter=tree:2 HEAD >actual && + + expect_has HEAD "" && + expect_has HEAD~1 "" && + expect_has HEAD dir1 && + expect_has HEAD pattern && + expect_has HEAD sparse1 && + expect_has HEAD sparse2 && + + # There are also 2 commit objects + test_line_count = 8 actual +' + +test_expect_success 'verify tree:3 includes everything expected' ' + git -C r3 rev-list --objects --filter=tree:3 HEAD >actual && + + expect_has HEAD "" && + expect_has HEAD~1 "" && + expect_has HEAD dir1 && + expect_has HEAD dir1/sparse1 && + expect_has HEAD dir1/sparse2 && + expect_has HEAD pattern && + expect_has HEAD sparse1 && + expect_has HEAD sparse2 && + + # There are also 2 commit objects + test_line_count = 10 actual +' + +# Test provisional omit collection logic with a repo that has objects appearing +# at multiple depths - first deeper than the filter's threshold, then shallow. + +test_expect_success 'setup r4' ' + git init r4 && + + echo foo > r4/foo && + mkdir r4/subdir && + echo bar > r4/subdir/bar && + + mkdir r4/filt && + cp -r r4/foo r4/subdir r4/filt && + + git -C r4 add foo subdir filt && + git -C r4 commit -m "commit msg" +' + +expect_has_with_different_name () { + repo=$1 && + name=$2 && + + hash=$(git -C $repo rev-parse HEAD:$name) && + ! grep "^$hash $name$" actual && + grep "^$hash " actual && + ! grep "~$hash" actual +} + +test_expect_success 'test tree:# filter provisional omit for blob and tree' ' + git -C r4 rev-list --objects --filter-print-omitted --filter=tree:2 \ + HEAD >actual && + expect_has_with_different_name r4 filt/foo && + expect_has_with_different_name r4 filt/subdir +' + +test_expect_success 'verify skipping tree iteration when collecting omits' ' + GIT_TRACE=1 git -C r4 rev-list --filter-print-omitted \ + --objects --filter=tree:0 HEAD 2>filter_trace && + grep "^Skipping contents of tree " filter_trace >actual && + + echo "Skipping contents of tree subdir/..." >expect && + test_cmp expect actual +' + +# Test tree:<depth> where a tree is iterated to twice - once where a subentry is +# too deep to be included, and again where the blob inside it is shallow enough +# to be included. This makes sure we don't use LOFR_MARK_SEEN incorrectly (we +# can't use it because a tree can be iterated over again at a lower depth). + +test_expect_success 'tree:<depth> where we iterate over tree at two levels' ' + git init r5 && + + mkdir -p r5/a/subdir/b && + echo foo > r5/a/subdir/b/foo && + + mkdir -p r5/subdir/b && + echo foo > r5/subdir/b/foo && + + git -C r5 add a subdir && + git -C r5 commit -m "commit msg" && + + git -C r5 rev-list --objects --filter=tree:4 HEAD >actual && + expect_has_with_different_name r5 a/subdir/b/foo +' + +test_expect_success 'tree:<depth> which filters out blob but given as arg' ' + blob_hash=$(git -C r4 rev-parse HEAD:subdir/bar) && + + git -C r4 rev-list --objects --filter=tree:1 HEAD $blob_hash >actual && + grep ^$blob_hash actual +' + # Delete some loose objects and use rev-list, but WITHOUT any filtering. # This models previously omitted objects that we did not receive. @@ -324,4 +444,21 @@ test_expect_success 'rev-list W/ missing=allow-any' ' git -C r1 rev-list --quiet --missing=allow-any --objects HEAD ' +# Test expansion of filter specs. + +test_expect_success 'expand blob limit in protocol' ' + git -C r2 config --local uploadpack.allowfilter 1 && + GIT_TRACE_PACKET="$(pwd)/trace" git -c protocol.version=2 clone \ + --filter=blob:limit=1k "file://$(pwd)/r2" limit && + ! grep "blob:limit=1k" trace && + grep "blob:limit=1024" trace +' + +test_expect_success 'expand tree depth limit in protocol' ' + GIT_TRACE_PACKET="$(pwd)/tree_trace" git -c protocol.version=2 clone \ + --filter=tree:0k "file://$(pwd)/r2" tree && + ! grep "tree:0k" tree_trace && + grep "tree:0" tree_trace +' + test_done diff --git a/t/t6120-describe.sh b/t/t6120-describe.sh index d639d94696..ee5b03ee18 100755 --- a/t/t6120-describe.sh +++ b/t/t6120-describe.sh @@ -143,16 +143,46 @@ test_expect_success 'rename tag Q back to A' ' test_expect_success 'pack tag refs' 'git pack-refs' check_describe A-* HEAD +test_expect_success 'describe works from outside repo using --git-dir' ' + git clone --bare "$TRASH_DIRECTORY" "$TRASH_DIRECTORY/bare" && + git --git-dir "$TRASH_DIRECTORY/bare" describe >out && + grep "^A-[1-9][0-9]\?-g[0-9a-f]\+$" out +' + check_describe "A-*[0-9a-f]" --dirty +test_expect_success 'describe --dirty with --work-tree' ' + ( + cd "$TEST_DIRECTORY" && + git --git-dir "$TRASH_DIRECTORY/.git" --work-tree "$TRASH_DIRECTORY" describe --dirty >"$TRASH_DIRECTORY/out" + ) && + grep "^A-[1-9][0-9]\?-g[0-9a-f]\+$" out +' + test_expect_success 'set-up dirty work tree' ' echo >>file ' check_describe "A-*[0-9a-f]-dirty" --dirty +test_expect_success 'describe --dirty with --work-tree (dirty)' ' + ( + cd "$TEST_DIRECTORY" && + git --git-dir "$TRASH_DIRECTORY/.git" --work-tree "$TRASH_DIRECTORY" describe --dirty >"$TRASH_DIRECTORY/out" + ) && + grep "^A-[1-9][0-9]\?-g[0-9a-f]\+-dirty$" out +' + check_describe "A-*[0-9a-f].mod" --dirty=.mod +test_expect_success 'describe --dirty=.mod with --work-tree (dirty)' ' + ( + cd "$TEST_DIRECTORY" && + git --git-dir "$TRASH_DIRECTORY/.git" --work-tree "$TRASH_DIRECTORY" describe --dirty=.mod >"$TRASH_DIRECTORY/out" + ) && + grep "^A-[1-9][0-9]\?-g[0-9a-f]\+.mod$" out +' + test_expect_success 'describe --dirty HEAD' ' test_must_fail git describe --dirty HEAD ' @@ -303,8 +333,17 @@ test_expect_success 'describe chokes on severely broken submodules' ' mv .git/modules/sub1/ .git/modules/sub_moved && test_must_fail git describe --dirty ' + test_expect_success 'describe ignoring a broken submodule' ' git describe --broken >out && + grep broken out +' + +test_expect_success 'describe with --work-tree ignoring a broken submodule' ' + ( + cd "$TEST_DIRECTORY" && + git --git-dir "$TRASH_DIRECTORY/.git" --work-tree "$TRASH_DIRECTORY" describe --broken >"$TRASH_DIRECTORY/out" + ) && test_when_finished "mv .git/modules/sub_moved .git/modules/sub1" && grep broken out ' diff --git a/t/t7505-prepare-commit-msg-hook.sh b/t/t7505-prepare-commit-msg-hook.sh index ebfcad9c4c..ba8bd1b514 100755 --- a/t/t7505-prepare-commit-msg-hook.sh +++ b/t/t7505-prepare-commit-msg-hook.sh @@ -215,7 +215,7 @@ test_expect_success 'with hook and editor (merge)' ' test_rebase () { expect=$1 && mode=$2 && - test_expect_$expect C_LOCALE_OUTPUT "with hook (rebase $mode)" ' + test_expect_$expect C_LOCALE_OUTPUT "with hook (rebase ${mode:--i})" ' test_when_finished "\ git rebase --abort git checkout -f master @@ -225,7 +225,7 @@ test_rebase () { GIT_EDITOR="\"$FAKE_EDITOR\"" && ( export GIT_SEQUENCE_EDITOR GIT_EDITOR && - test_must_fail git rebase $mode b && + test_must_fail git rebase -i $mode b && echo x >a && git add a && test_must_fail git rebase --continue && @@ -241,18 +241,18 @@ test_rebase () { git add b && git rebase --continue ) && - if test $mode = -p # reword amended after pick + if test "$mode" = -p # reword amended after pick then n=18 else n=17 fi && git log --pretty=%s -g -n$n HEAD@{1} >actual && - test_cmp "$TEST_DIRECTORY/t7505/expected-rebase$mode" actual + test_cmp "$TEST_DIRECTORY/t7505/expected-rebase${mode:--i}" actual ' } -test_rebase success -i +test_rebase success test_have_prereq !REBASE_P || test_rebase success -p test_expect_success 'with hook (cherry-pick)' ' diff --git a/t/t7510-signed-commit.sh b/t/t7510-signed-commit.sh index 86d3f93fa2..682b23a068 100755 --- a/t/t7510-signed-commit.sh +++ b/t/t7510-signed-commit.sh @@ -49,15 +49,28 @@ test_expect_success GPG 'create signed commits' ' git tag eighth-signed-alt && # commit.gpgsign is still on but this must not be signed - git tag ninth-unsigned $(echo 9 | git commit-tree HEAD^{tree}) && + echo 9 | git commit-tree HEAD^{tree} >oid && + test_line_count = 1 oid && + git tag ninth-unsigned $(cat oid) && # explicit -S of course must sign. - git tag tenth-signed $(echo 9 | git commit-tree -S HEAD^{tree}) + echo 10 | git commit-tree -S HEAD^{tree} >oid && + test_line_count = 1 oid && + git tag tenth-signed $(cat oid) && + + # --gpg-sign[=<key-id>] must sign. + echo 11 | git commit-tree --gpg-sign HEAD^{tree} >oid && + test_line_count = 1 oid && + git tag eleventh-signed $(cat oid) && + echo 12 | git commit-tree --gpg-sign=B7227189 HEAD^{tree} >oid && + test_line_count = 1 oid && + git tag twelfth-signed-alt $(cat oid) ' test_expect_success GPG 'verify and show signatures' ' ( for commit in initial second merge fourth-signed \ - fifth-signed sixth-signed seventh-signed tenth-signed + fifth-signed sixth-signed seventh-signed tenth-signed \ + eleventh-signed do git verify-commit $commit && git show --pretty=short --show-signature $commit >actual && @@ -78,7 +91,7 @@ test_expect_success GPG 'verify and show signatures' ' done ) && ( - for commit in eighth-signed-alt + for commit in eighth-signed-alt twelfth-signed-alt do git show --pretty=short --show-signature $commit >actual && grep "Good signature from" actual && diff --git a/t/t9303-fast-import-compression.sh b/t/t9303-fast-import-compression.sh index 856219f46a..5045f02a53 100755 --- a/t/t9303-fast-import-compression.sh +++ b/t/t9303-fast-import-compression.sh @@ -6,7 +6,7 @@ test_description='compression setting of fast-import utility' # This should be moved to test-lib.sh together with the # copy in t0021 after both topics have graduated to 'master'. file_size () { - perl -e 'print -s $ARGV[0]' "$1" + test-tool path-utils file-size "$1" } import_large () { diff --git a/t/t9807-git-p4-submit.sh b/t/t9807-git-p4-submit.sh index 2325599ee6..850d979119 100755 --- a/t/t9807-git-p4-submit.sh +++ b/t/t9807-git-p4-submit.sh @@ -500,6 +500,10 @@ test_expect_success 'submit --shelve' ' ) ' +last_shelve () { + p4 -G changes -s shelved -m 1 //depot/... | marshal_dump change +} + make_shelved_cl() { test_commit "$1" >/dev/null && git p4 submit --origin HEAD^ --shelve >/dev/null && @@ -533,12 +537,59 @@ test_expect_success 'submit --update-shelve' ' ) && ( cd "$cli" && - change=$(p4 -G changes -s shelved -m 1 //depot/... | \ - marshal_dump change) && + change=$(last_shelve) && p4 unshelve -c $change -s $change && grep -q updated-line shelf.t && p4 describe -S $change | grep added-file.t && - test_path_is_missing shelved-change-1.t + test_path_is_missing shelved-change-1.t && + p4 revert ... + ) +' + +test_expect_success 'update a shelve involving moved and copied files' ' + test_when_finished cleanup_git && + ( + cd "$cli" && + : >file_to_move && + p4 add file_to_move && + p4 submit -d "change1" && + p4 edit file_to_move && + echo change >>file_to_move && + p4 submit -d "change2" && + p4 opened + ) && + git p4 clone --dest="$git" //depot && + ( + cd "$git" && + git config git-p4.detectCopies true && + git config git-p4.detectRenames true && + git config git-p4.skipSubmitEdit true && + mkdir moved && + cp file_to_move copy_of_file && + git add copy_of_file && + git mv file_to_move moved/ && + git commit -m "rename a file" && + git p4 submit -M --shelve --origin HEAD^ && + : >new_file && + git add new_file && + git commit --amend && + git show --stat HEAD && + change=$(last_shelve) && + git p4 submit -M --update-shelve $change --commit HEAD + ) && + ( + cd "$cli" && + change=$(last_shelve) && + echo change=$change && + p4 unshelve -s $change && + p4 submit -d "Testing update-shelve" && + test_path_is_file copy_of_file && + test_path_is_file moved/file_to_move && + test_path_is_missing file_to_move && + test_path_is_file new_file && + echo "unshelved and submitted change $change" && + p4 changes moved/file_to_move | grep "Testing update-shelve" && + p4 changes copy_of_file | grep "Testing update-shelve" ) ' diff --git a/t/t9903-bash-prompt.sh b/t/t9903-bash-prompt.sh index 81a5179e28..5cadedb2a9 100755 --- a/t/t9903-bash-prompt.sh +++ b/t/t9903-bash-prompt.sh @@ -180,7 +180,7 @@ test_expect_success 'prompt - interactive rebase' ' ' test_expect_success 'prompt - rebase merge' ' - printf " (b2|REBASE-m 1/3)" >expected && + printf " (b2|REBASE-i 1/3)" >expected && git checkout b2 && test_when_finished "git checkout master" && test_must_fail git rebase --merge b1 b2 && diff --git a/t/test-lib.sh b/t/test-lib.sh index a1abb1177a..42b1a0aa7f 100644 --- a/t/test-lib.sh +++ b/t/test-lib.sh @@ -111,6 +111,8 @@ do test -z "$HARNESS_ACTIVE" && quiet=t ;; --with-dashes) with_dashes=t ;; + --no-bin-wrappers) + no_bin_wrappers=t ;; --no-color) color= ;; --va|--val|--valg|--valgr|--valgri|--valgrin|--valgrind) @@ -139,6 +141,9 @@ do verbose_log=t tee=t ;; + --write-junit-xml) + write_junit_xml=t + ;; --stress) stress=t ;; --stress=*) @@ -622,11 +627,35 @@ trap 'exit $?' INT TERM HUP # the test_expect_* functions instead. test_ok_ () { + if test -n "$write_junit_xml" + then + write_junit_xml_testcase "$*" + fi test_success=$(($test_success + 1)) say_color "" "ok $test_count - $@" } test_failure_ () { + if test -n "$write_junit_xml" + then + junit_insert="<failure message=\"not ok $test_count -" + junit_insert="$junit_insert $(xml_attr_encode "$1")\">" + junit_insert="$junit_insert $(xml_attr_encode \ + "$(if test -n "$GIT_TEST_TEE_OUTPUT_FILE" + then + test-tool path-utils skip-n-bytes \ + "$GIT_TEST_TEE_OUTPUT_FILE" $GIT_TEST_TEE_OFFSET + else + printf '%s\n' "$@" | sed 1d + fi)")" + junit_insert="$junit_insert</failure>" + if test -n "$GIT_TEST_TEE_OUTPUT_FILE" + then + junit_insert="$junit_insert<system-err>$(xml_attr_encode \ + "$(cat "$GIT_TEST_TEE_OUTPUT_FILE")")</system-err>" + fi + write_junit_xml_testcase "$1" " $junit_insert" + fi test_failure=$(($test_failure + 1)) say_color error "not ok $test_count - $1" shift @@ -635,11 +664,19 @@ test_failure_ () { } test_known_broken_ok_ () { + if test -n "$write_junit_xml" + then + write_junit_xml_testcase "$* (breakage fixed)" + fi test_fixed=$(($test_fixed+1)) say_color error "ok $test_count - $@ # TODO known breakage vanished" } test_known_broken_failure_ () { + if test -n "$write_junit_xml" + then + write_junit_xml_testcase "$* (known breakage)" + fi test_broken=$(($test_broken+1)) say_color warn "not ok $test_count - $@ # TODO known breakage" } @@ -897,12 +934,21 @@ test_start_ () { test_count=$(($test_count+1)) maybe_setup_verbose maybe_setup_valgrind + if test -n "$write_junit_xml" + then + junit_start=$(test-tool date getnanos) + fi } test_finish_ () { echo >&3 "" maybe_teardown_valgrind maybe_teardown_verbose + if test -n "$GIT_TEST_TEE_OFFSET" + then + GIT_TEST_TEE_OFFSET=$(test-tool path-utils file-size \ + "$GIT_TEST_TEE_OUTPUT_FILE") + fi } test_skip () { @@ -934,6 +980,13 @@ test_skip () { case "$to_skip" in t) + if test -n "$write_junit_xml" + then + message="$(xml_attr_encode "$skipped_reason")" + write_junit_xml_testcase "$1" \ + " <skipped message=\"$message\" />" + fi + say_color skip >&3 "skipping test: $@" say_color skip "ok $test_count # skip $1 ($skipped_reason)" : true @@ -949,9 +1002,51 @@ test_at_end_hook_ () { : } +write_junit_xml () { + case "$1" in + --truncate) + >"$junit_xml_path" + junit_have_testcase= + shift + ;; + esac + printf '%s\n' "$@" >>"$junit_xml_path" +} + +xml_attr_encode () { + printf '%s\n' "$@" | test-tool xml-encode +} + +write_junit_xml_testcase () { + junit_attrs="name=\"$(xml_attr_encode "$this_test.$test_count $1")\"" + shift + junit_attrs="$junit_attrs classname=\"$this_test\"" + junit_attrs="$junit_attrs time=\"$(test-tool \ + date getnanos $junit_start)\"" + write_junit_xml "$(printf '%s\n' \ + " <testcase $junit_attrs>" "$@" " </testcase>")" + junit_have_testcase=t +} + test_done () { GIT_EXIT_OK=t + if test -n "$write_junit_xml" && test -n "$junit_xml_path" + then + test -n "$junit_have_testcase" || { + junit_start=$(test-tool date getnanos) + write_junit_xml_testcase "all tests skipped" + } + + # adjust the overall time + junit_time=$(test-tool date getnanos $junit_suite_start) + sed "s/<testsuite [^>]*/& time=\"$junit_time\"/" \ + <"$junit_xml_path" >"$junit_xml_path.new" + mv "$junit_xml_path.new" "$junit_xml_path" + + write_junit_xml " </testsuite>" "</testsuites>" + fi + if test -z "$HARNESS_ACTIVE" then mkdir -p "$TEST_RESULTS_DIR" @@ -1011,7 +1106,11 @@ test_done () { error "Tests passed but trash directory already removed before test cleanup; aborting" cd "$TRASH_DIRECTORY/.." && - rm -fr "$TRASH_DIRECTORY" || + rm -fr "$TRASH_DIRECTORY" || { + # try again in a bit + sleep 5; + rm -fr "$TRASH_DIRECTORY" + } || error "Tests passed but test cleanup failed; aborting" fi test_at_end_hook_ @@ -1117,20 +1216,25 @@ then PATH=$GIT_TEST_INSTALLED:$GIT_BUILD_DIR/t/helper:$PATH GIT_EXEC_PATH=${GIT_TEST_EXEC_PATH:-$GIT_EXEC_PATH} else # normal case, use ../bin-wrappers only unless $with_dashes: - git_bin_dir="$GIT_BUILD_DIR/bin-wrappers" - if ! test -x "$git_bin_dir/git" + if test -n "$no_bin_wrappers" then - if test -z "$with_dashes" + with_dashes=t + else + git_bin_dir="$GIT_BUILD_DIR/bin-wrappers" + if ! test -x "$git_bin_dir/git" then - say "$git_bin_dir/git is not executable; using GIT_EXEC_PATH" + if test -z "$with_dashes" + then + say "$git_bin_dir/git is not executable; using GIT_EXEC_PATH" + fi + with_dashes=t fi - with_dashes=t + PATH="$git_bin_dir:$PATH" fi - PATH="$git_bin_dir:$PATH" GIT_EXEC_PATH=$GIT_BUILD_DIR if test -n "$with_dashes" then - PATH="$GIT_BUILD_DIR:$PATH" + PATH="$GIT_BUILD_DIR:$GIT_BUILD_DIR/t/helper:$PATH" fi fi GIT_TEMPLATE_DIR="$GIT_BUILD_DIR"/templates/blt @@ -1154,7 +1258,7 @@ test -d "$GIT_BUILD_DIR"/templates/blt || { error "You haven't built things yet, have you?" } -if ! test -x "$GIT_BUILD_DIR"/t/helper/test-tool +if ! test -x "$GIT_BUILD_DIR"/t/helper/test-tool$X then echo >&2 'You need to build test-tool:' echo >&2 'Run "make t/helper/test-tool" in the source (toplevel) directory' @@ -1178,6 +1282,7 @@ then else mkdir -p "$TRASH_DIRECTORY" fi + # Use -P to resolve symlinks in our working directory so that the cwd # in subprocesses like git equals our $PWD (for pathname comparisons). cd -P "$TRASH_DIRECTORY" || exit 1 @@ -1191,6 +1296,23 @@ then test_done fi +if test -n "$write_junit_xml" +then + junit_xml_dir="$TEST_OUTPUT_DIRECTORY/out" + mkdir -p "$junit_xml_dir" + junit_xml_base=${0##*/} + junit_xml_path="$junit_xml_dir/TEST-${junit_xml_base%.sh}.xml" + junit_attrs="name=\"${junit_xml_base%.sh}\"" + junit_attrs="$junit_attrs timestamp=\"$(TZ=UTC \ + date +%Y-%m-%dT%H:%M:%S)\"" + write_junit_xml --truncate "<testsuites>" " <testsuite $junit_attrs>" + junit_suite_start=$(test-tool date getnanos) + if test -n "$GIT_TEST_TEE_OUTPUT_FILE" + then + GIT_TEST_TEE_OFFSET=0 + fi +fi + # Provide an implementation of the 'yes' utility yes () { if test $# = 0 diff --git a/transport-helper.c b/transport-helper.c index 6cf3bb324e..1f52c95fd8 100644 --- a/transport-helper.c +++ b/transport-helper.c @@ -679,10 +679,15 @@ static int fetch(struct transport *transport, if (data->transport_options.update_shallow) set_helper_option(transport, "update-shallow", "true"); - if (data->transport_options.filter_options.choice) - set_helper_option( - transport, "filter", - data->transport_options.filter_options.filter_spec); + if (data->transport_options.filter_options.choice) { + struct strbuf expanded_filter_spec = STRBUF_INIT; + expand_list_objects_filter_spec( + &data->transport_options.filter_options, + &expanded_filter_spec); + set_helper_option(transport, "filter", + expanded_filter_spec.buf); + strbuf_release(&expanded_filter_spec); + } if (data->transport_options.negotiation_tips) warning("Ignoring --negotiation-tip because the protocol does not support it."); diff --git a/transport.c b/transport.c index 99678153c1..e078812897 100644 --- a/transport.c +++ b/transport.c @@ -273,7 +273,8 @@ static struct ref *handshake(struct transport *transport, int for_push, packet_reader_init(&reader, data->fd[0], NULL, 0, PACKET_READ_CHOMP_NEWLINE | - PACKET_READ_GENTLE_ON_EOF); + PACKET_READ_GENTLE_ON_EOF | + PACKET_READ_DIE_ON_ERR_PACKET); data->version = discover_version(&reader); switch (data->version) { diff --git a/tree-walk.c b/tree-walk.c index 277e3b3243..1e4bbc8a0e 100644 --- a/tree-walk.c +++ b/tree-walk.c @@ -582,10 +582,10 @@ int get_tree_entry(const struct object_id *tree_oid, const char *name, struct ob * with the sha1 of the found object, and *mode will hold the mode of * the object. * - * See the code for enum follow_symlink_result for a description of + * See the code for enum get_oid_result for a description of * the return values. */ -enum follow_symlinks_result get_tree_entry_follow_symlinks(struct object_id *tree_oid, const char *name, struct object_id *result, struct strbuf *result_path, unsigned *mode) +enum get_oid_result get_tree_entry_follow_symlinks(struct object_id *tree_oid, const char *name, struct object_id *result, struct strbuf *result_path, unsigned *mode) { int retval = MISSING_OBJECT; struct dir_state *parents = NULL; diff --git a/tree-walk.h b/tree-walk.h index a4ad28ea5e..8225171866 100644 --- a/tree-walk.h +++ b/tree-walk.h @@ -51,23 +51,7 @@ struct traverse_info; typedef int (*traverse_callback_t)(int n, unsigned long mask, unsigned long dirmask, struct name_entry *entry, struct traverse_info *); int traverse_trees(struct index_state *istate, int n, struct tree_desc *t, struct traverse_info *info); -enum follow_symlinks_result { - FOUND = 0, /* This includes out-of-tree links */ - MISSING_OBJECT = -1, /* The initial symlink is missing */ - DANGLING_SYMLINK = -2, /* - * The initial symlink is there, but - * (transitively) points to a missing - * in-tree file - */ - SYMLINK_LOOP = -3, - NOT_DIR = -4, /* - * Somewhere along the symlink chain, a path is - * requested which contains a file as a - * non-final element. - */ -}; - -enum follow_symlinks_result get_tree_entry_follow_symlinks(struct object_id *tree_oid, const char *name, struct object_id *result, struct strbuf *result_path, unsigned *mode); +enum get_oid_result get_tree_entry_follow_symlinks(struct object_id *tree_oid, const char *name, struct object_id *result, struct strbuf *result_path, unsigned *mode); struct traverse_info { const char *traverse_path; @@ -1,4 +1,3 @@ -#define NO_THE_INDEX_COMPATIBILITY_MACROS #include "cache.h" #include "cache-tree.h" #include "tree.h" diff --git a/unpack-trees.c b/unpack-trees.c index 01c2175f7c..3563daae1a 100644 --- a/unpack-trees.c +++ b/unpack-trees.c @@ -1,4 +1,3 @@ -#define NO_THE_INDEX_COMPATIBILITY_MACROS #include "cache.h" #include "argv-array.h" #include "repository.h" diff --git a/upload-pack.c b/upload-pack.c index 9df27b55a0..d098ef5982 100644 --- a/upload-pack.c +++ b/upload-pack.c @@ -70,6 +70,8 @@ static int allow_filter; static int allow_ref_in_want; static struct list_objects_filter_options filter_options; +static int allow_sideband_all; + static void reset_timeout(void) { alarm(timeout); @@ -139,14 +141,17 @@ static void create_pack_file(const struct object_array *have_obj, if (use_include_tag) argv_array_push(&pack_objects.args, "--include-tag"); if (filter_options.filter_spec) { + struct strbuf expanded_filter_spec = STRBUF_INIT; + expand_list_objects_filter_spec(&filter_options, + &expanded_filter_spec); if (pack_objects.use_shell) { struct strbuf buf = STRBUF_INIT; - sq_quote_buf(&buf, filter_options.filter_spec); + sq_quote_buf(&buf, expanded_filter_spec.buf); argv_array_pushf(&pack_objects.args, "--filter=%s", buf.buf); strbuf_release(&buf); } else { argv_array_pushf(&pack_objects.args, "--filter=%s", - filter_options.filter_spec); + expanded_filter_spec.buf); } } @@ -353,7 +358,8 @@ static int ok_to_give_up(const struct object_array *have_obj, min_generation); } -static int get_common_commits(struct object_array *have_obj, +static int get_common_commits(struct packet_reader *reader, + struct object_array *have_obj, struct object_array *want_obj) { struct object_id oid; @@ -365,12 +371,11 @@ static int get_common_commits(struct object_array *have_obj, save_commit_buffer = 0; for (;;) { - char *line = packet_read_line(0, NULL); const char *arg; reset_timeout(); - if (!line) { + if (packet_reader_read(reader) != PACKET_READ_NORMAL) { if (multi_ack == 2 && got_common && !got_other && ok_to_give_up(have_obj, want_obj)) { sent_ready = 1; @@ -389,7 +394,7 @@ static int get_common_commits(struct object_array *have_obj, got_other = 0; continue; } - if (skip_prefix(line, "have ", &arg)) { + if (skip_prefix(reader->line, "have ", &arg)) { switch (got_oid(arg, &oid, have_obj)) { case -1: /* they have what we do not */ got_other = 1; @@ -415,7 +420,7 @@ static int get_common_commits(struct object_array *have_obj, } continue; } - if (!strcmp(line, "done")) { + if (!strcmp(reader->line, "done")) { if (have_obj->nr > 0) { if (multi_ack) packet_write_fmt(1, "ACK %s\n", last_hex); @@ -424,7 +429,7 @@ static int get_common_commits(struct object_array *have_obj, packet_write_fmt(1, "NAK\n"); return -1; } - die("git upload-pack: expected SHA1 list, got '%s'", line); + die("git upload-pack: expected SHA1 list, got '%s'", reader->line); } } @@ -612,13 +617,14 @@ error: } } -static void send_shallow(struct commit_list *result) +static void send_shallow(struct packet_writer *writer, + struct commit_list *result) { while (result) { struct object *object = &result->item->object; if (!(object->flags & (CLIENT_SHALLOW|NOT_SHALLOW))) { - packet_write_fmt(1, "shallow %s", - oid_to_hex(&object->oid)); + packet_writer_write(writer, "shallow %s", + oid_to_hex(&object->oid)); register_shallow(the_repository, &object->oid); shallow_nr++; } @@ -626,7 +632,8 @@ static void send_shallow(struct commit_list *result) } } -static void send_unshallow(const struct object_array *shallows, +static void send_unshallow(struct packet_writer *writer, + const struct object_array *shallows, struct object_array *want_obj) { int i; @@ -635,8 +642,8 @@ static void send_unshallow(const struct object_array *shallows, struct object *object = shallows->objects[i].item; if (object->flags & NOT_SHALLOW) { struct commit_list *parents; - packet_write_fmt(1, "unshallow %s", - oid_to_hex(&object->oid)); + packet_writer_write(writer, "unshallow %s", + oid_to_hex(&object->oid)); object->flags &= ~CLIENT_SHALLOW; /* * We want to _register_ "object" as shallow, but we @@ -663,8 +670,7 @@ static void send_unshallow(const struct object_array *shallows, static int check_ref(const char *refname_full, const struct object_id *oid, int flag, void *cb_data); - -static void deepen(int depth, int deepen_relative, +static void deepen(struct packet_writer *writer, int depth, int deepen_relative, struct object_array *shallows, struct object_array *want_obj) { if (depth == INFINITE_DEPTH && !is_repository_shallow(the_repository)) { @@ -689,7 +695,7 @@ static void deepen(int depth, int deepen_relative, result = get_shallow_commits(&reachable_shallows, depth + 1, SHALLOW, NOT_SHALLOW); - send_shallow(result); + send_shallow(writer, result); free_commit_list(result); object_array_clear(&reachable_shallows); } else { @@ -697,14 +703,15 @@ static void deepen(int depth, int deepen_relative, result = get_shallow_commits(want_obj, depth, SHALLOW, NOT_SHALLOW); - send_shallow(result); + send_shallow(writer, result); free_commit_list(result); } - send_unshallow(shallows, want_obj); + send_unshallow(writer, shallows, want_obj); } -static void deepen_by_rev_list(int ac, const char **av, +static void deepen_by_rev_list(struct packet_writer *writer, int ac, + const char **av, struct object_array *shallows, struct object_array *want_obj) { @@ -712,13 +719,14 @@ static void deepen_by_rev_list(int ac, const char **av, close_commit_graph(the_repository); result = get_shallow_commits_by_rev_list(ac, av, SHALLOW, NOT_SHALLOW); - send_shallow(result); + send_shallow(writer, result); free_commit_list(result); - send_unshallow(shallows, want_obj); + send_unshallow(writer, shallows, want_obj); } /* Returns 1 if a shallow list is sent or 0 otherwise */ -static int send_shallow_list(int depth, int deepen_rev_list, +static int send_shallow_list(struct packet_writer *writer, + int depth, int deepen_rev_list, timestamp_t deepen_since, struct string_list *deepen_not, int deepen_relative, @@ -730,7 +738,7 @@ static int send_shallow_list(int depth, int deepen_rev_list, if (depth > 0 && deepen_rev_list) die("git upload-pack: deepen and deepen-since (or deepen-not) cannot be used together"); if (depth > 0) { - deepen(depth, deepen_relative, shallows, want_obj); + deepen(writer, depth, deepen_relative, shallows, want_obj); ret = 1; } else if (deepen_rev_list) { struct argv_array av = ARGV_ARRAY_INIT; @@ -751,7 +759,7 @@ static int send_shallow_list(int depth, int deepen_rev_list, struct object *o = want_obj->objects[i].item; argv_array_push(&av, oid_to_hex(&o->oid)); } - deepen_by_rev_list(av.argc, av.argv, shallows, want_obj); + deepen_by_rev_list(writer, av.argc, av.argv, shallows, want_obj); argv_array_clear(&av); ret = 1; } else { @@ -836,7 +844,7 @@ static int process_deepen_not(const char *line, struct string_list *deepen_not, return 0; } -static void receive_needs(struct object_array *want_obj) +static void receive_needs(struct packet_reader *reader, struct object_array *want_obj) { struct object_array shallows = OBJECT_ARRAY_INIT; struct string_list deepen_not = STRING_LIST_INIT_DUP; @@ -845,39 +853,40 @@ static void receive_needs(struct object_array *want_obj) timestamp_t deepen_since = 0; int deepen_rev_list = 0; int deepen_relative = 0; + struct packet_writer writer; shallow_nr = 0; + packet_writer_init(&writer, 1); for (;;) { struct object *o; const char *features; struct object_id oid_buf; - char *line = packet_read_line(0, NULL); const char *arg; reset_timeout(); - if (!line) + if (packet_reader_read(reader) != PACKET_READ_NORMAL) break; - if (process_shallow(line, &shallows)) + if (process_shallow(reader->line, &shallows)) continue; - if (process_deepen(line, &depth)) + if (process_deepen(reader->line, &depth)) continue; - if (process_deepen_since(line, &deepen_since, &deepen_rev_list)) + if (process_deepen_since(reader->line, &deepen_since, &deepen_rev_list)) continue; - if (process_deepen_not(line, &deepen_not, &deepen_rev_list)) + if (process_deepen_not(reader->line, &deepen_not, &deepen_rev_list)) continue; - if (skip_prefix(line, "filter ", &arg)) { + if (skip_prefix(reader->line, "filter ", &arg)) { if (!filter_capability_requested) die("git upload-pack: filtering capability not negotiated"); parse_list_objects_filter(&filter_options, arg); continue; } - if (!skip_prefix(line, "want ", &arg) || + if (!skip_prefix(reader->line, "want ", &arg) || parse_oid_hex(arg, &oid_buf, &features)) die("git upload-pack: protocol error, " - "expected to get object ID, not '%s'", line); + "expected to get object ID, not '%s'", reader->line); if (parse_feature_request(features, "deepen-relative")) deepen_relative = 1; @@ -904,9 +913,9 @@ static void receive_needs(struct object_array *want_obj) o = parse_object(the_repository, &oid_buf); if (!o) { - packet_write_fmt(1, - "ERR upload-pack: not our ref %s", - oid_to_hex(&oid_buf)); + packet_writer_error(&writer, + "upload-pack: not our ref %s", + oid_to_hex(&oid_buf)); die("git upload-pack: not our ref %s", oid_to_hex(&oid_buf)); } @@ -935,7 +944,7 @@ static void receive_needs(struct object_array *want_obj) if (depth == 0 && !deepen_rev_list && shallows.nr == 0) return; - if (send_shallow_list(depth, deepen_rev_list, deepen_since, + if (send_shallow_list(&writer, depth, deepen_rev_list, deepen_since, &deepen_not, deepen_relative, &shallows, want_obj)) packet_flush(1); @@ -1053,6 +1062,8 @@ static int upload_pack_config(const char *var, const char *value, void *unused) allow_filter = git_config_bool(var, value); } else if (!strcmp("uploadpack.allowrefinwant", var)) { allow_ref_in_want = git_config_bool(var, value); + } else if (!strcmp("uploadpack.allowsidebandall", var)) { + allow_sideband_all = git_config_bool(var, value); } if (current_config_scope() != CONFIG_SCOPE_REPO) { @@ -1067,6 +1078,7 @@ void upload_pack(struct upload_pack_options *options) { struct string_list symref = STRING_LIST_INIT_DUP; struct object_array want_obj = OBJECT_ARRAY_INIT; + struct packet_reader reader; stateless_rpc = options->stateless_rpc; timeout = options->timeout; @@ -1090,10 +1102,14 @@ void upload_pack(struct upload_pack_options *options) if (options->advertise_refs) return; - receive_needs(&want_obj); + packet_reader_init(&reader, 0, NULL, 0, + PACKET_READ_CHOMP_NEWLINE | + PACKET_READ_DIE_ON_ERR_PACKET); + + receive_needs(&reader, &want_obj); if (want_obj.nr) { struct object_array have_obj = OBJECT_ARRAY_INIT; - get_common_commits(&have_obj, &want_obj); + get_common_commits(&reader, &have_obj, &want_obj); create_pack_file(&have_obj, &want_obj); } } @@ -1110,6 +1126,8 @@ struct upload_pack_data { int deepen_rev_list; int deepen_relative; + struct packet_writer writer; + unsigned stateless_rpc : 1; unsigned use_thin_pack : 1; @@ -1133,6 +1151,7 @@ static void upload_pack_data_init(struct upload_pack_data *data) data->haves = haves; data->shallows = shallows; data->deepen_not = deepen_not; + packet_writer_init(&data->writer, 1); } static void upload_pack_data_clear(struct upload_pack_data *data) @@ -1144,7 +1163,8 @@ static void upload_pack_data_clear(struct upload_pack_data *data) string_list_clear(&data->deepen_not, 0); } -static int parse_want(const char *line, struct object_array *want_obj) +static int parse_want(struct packet_writer *writer, const char *line, + struct object_array *want_obj) { const char *arg; if (skip_prefix(line, "want ", &arg)) { @@ -1157,9 +1177,9 @@ static int parse_want(const char *line, struct object_array *want_obj) o = parse_object(the_repository, &oid); if (!o) { - packet_write_fmt(1, - "ERR upload-pack: not our ref %s", - oid_to_hex(&oid)); + packet_writer_error(writer, + "upload-pack: not our ref %s", + oid_to_hex(&oid)); die("git upload-pack: not our ref %s", oid_to_hex(&oid)); } @@ -1175,7 +1195,8 @@ static int parse_want(const char *line, struct object_array *want_obj) return 0; } -static int parse_want_ref(const char *line, struct string_list *wanted_refs, +static int parse_want_ref(struct packet_writer *writer, const char *line, + struct string_list *wanted_refs, struct object_array *want_obj) { const char *arg; @@ -1185,7 +1206,7 @@ static int parse_want_ref(const char *line, struct string_list *wanted_refs, struct object *o; if (read_ref(arg, &oid)) { - packet_write_fmt(1, "ERR unknown ref %s", arg); + packet_writer_error(writer, "unknown ref %s", arg); die("unknown ref %s", arg); } @@ -1228,10 +1249,11 @@ static void process_args(struct packet_reader *request, const char *p; /* process want */ - if (parse_want(arg, want_obj)) + if (parse_want(&data->writer, arg, want_obj)) continue; if (allow_ref_in_want && - parse_want_ref(arg, &data->wanted_refs, want_obj)) + parse_want_ref(&data->writer, arg, &data->wanted_refs, + want_obj)) continue; /* process have line */ if (parse_have(arg, &data->haves)) @@ -1280,6 +1302,13 @@ static void process_args(struct packet_reader *request, continue; } + if ((git_env_bool("GIT_TEST_SIDEBAND_ALL", 0) || + allow_sideband_all) && + !strcmp(arg, "sideband-all")) { + data->writer.use_sideband = 1; + continue; + } + /* ignore unknown lines maybe? */ die("unexpected line: '%s'", arg); } @@ -1325,26 +1354,26 @@ static int process_haves(struct oid_array *haves, struct oid_array *common, return 0; } -static int send_acks(struct oid_array *acks, struct strbuf *response, +static int send_acks(struct packet_writer *writer, struct oid_array *acks, const struct object_array *have_obj, struct object_array *want_obj) { int i; - packet_buf_write(response, "acknowledgments\n"); + packet_writer_write(writer, "acknowledgments\n"); /* Send Acks */ if (!acks->nr) - packet_buf_write(response, "NAK\n"); + packet_writer_write(writer, "NAK\n"); for (i = 0; i < acks->nr; i++) { - packet_buf_write(response, "ACK %s\n", - oid_to_hex(&acks->oid[i])); + packet_writer_write(writer, "ACK %s\n", + oid_to_hex(&acks->oid[i])); } if (ok_to_give_up(have_obj, want_obj)) { /* Send Ready */ - packet_buf_write(response, "ready\n"); + packet_writer_write(writer, "ready\n"); return 1; } @@ -1356,25 +1385,20 @@ static int process_haves_and_send_acks(struct upload_pack_data *data, struct object_array *want_obj) { struct oid_array common = OID_ARRAY_INIT; - struct strbuf response = STRBUF_INIT; int ret = 0; process_haves(&data->haves, &common, have_obj); if (data->done) { ret = 1; - } else if (send_acks(&common, &response, have_obj, want_obj)) { - packet_buf_delim(&response); + } else if (send_acks(&data->writer, &common, have_obj, want_obj)) { + packet_writer_delim(&data->writer); ret = 1; } else { /* Add Flush */ - packet_buf_flush(&response); + packet_writer_flush(&data->writer); ret = 0; } - /* Send response */ - write_or_die(1, response.buf, response.len); - strbuf_release(&response); - oid_array_clear(&data->haves); oid_array_clear(&common); return ret; @@ -1387,15 +1411,15 @@ static void send_wanted_ref_info(struct upload_pack_data *data) if (!data->wanted_refs.nr) return; - packet_write_fmt(1, "wanted-refs\n"); + packet_writer_write(&data->writer, "wanted-refs\n"); for_each_string_list_item(item, &data->wanted_refs) { - packet_write_fmt(1, "%s %s\n", - oid_to_hex(item->util), - item->string); + packet_writer_write(&data->writer, "%s %s\n", + oid_to_hex(item->util), + item->string); } - packet_delim(1); + packet_writer_delim(&data->writer); } static void send_shallow_info(struct upload_pack_data *data, @@ -1406,15 +1430,16 @@ static void send_shallow_info(struct upload_pack_data *data, !is_repository_shallow(the_repository)) return; - packet_write_fmt(1, "shallow-info\n"); + packet_writer_write(&data->writer, "shallow-info\n"); - if (!send_shallow_list(data->depth, data->deepen_rev_list, + if (!send_shallow_list(&data->writer, data->depth, + data->deepen_rev_list, data->deepen_since, &data->deepen_not, data->deepen_relative, &data->shallows, want_obj) && is_repository_shallow(the_repository)) - deepen(INFINITE_DEPTH, data->deepen_relative, &data->shallows, - want_obj); + deepen(&data->writer, INFINITE_DEPTH, data->deepen_relative, + &data->shallows, want_obj); packet_delim(1); } @@ -1476,7 +1501,7 @@ int upload_pack_v2(struct repository *r, struct argv_array *keys, send_wanted_ref_info(&data); send_shallow_info(&data, &want_obj); - packet_write_fmt(1, "packfile\n"); + packet_writer_write(&data.writer, "packfile\n"); create_pack_file(&have_obj, &want_obj); state = FETCH_DONE; break; @@ -1497,6 +1522,7 @@ int upload_pack_advertise(struct repository *r, if (value) { int allow_filter_value; int allow_ref_in_want; + int allow_sideband_all_value; strbuf_addstr(value, "shallow"); @@ -1511,6 +1537,13 @@ int upload_pack_advertise(struct repository *r, &allow_ref_in_want) && allow_ref_in_want) strbuf_addstr(value, " ref-in-want"); + + if (git_env_bool("GIT_TEST_SIDEBAND_ALL", 0) || + (!repo_config_get_bool(the_repository, + "uploadpack.allowsidebandall", + &allow_sideband_all_value) && + allow_sideband_all_value)) + strbuf_addstr(value, " sideband-all"); } return 1; @@ -4,6 +4,11 @@ /* This code is originally from http://www.cl.cam.ac.uk/~mgk25/ucs/ */ +static const char utf16_be_bom[] = {'\xFE', '\xFF'}; +static const char utf16_le_bom[] = {'\xFF', '\xFE'}; +static const char utf32_be_bom[] = {'\0', '\0', '\xFE', '\xFF'}; +static const char utf32_le_bom[] = {'\xFF', '\xFE', '\0', '\0'}; + struct interval { ucs_char_t first; ucs_char_t last; @@ -470,16 +475,17 @@ int utf8_fprintf(FILE *stream, const char *format, ...) #else typedef char * iconv_ibp; #endif -char *reencode_string_iconv(const char *in, size_t insz, iconv_t conv, size_t *outsz_p) +char *reencode_string_iconv(const char *in, size_t insz, iconv_t conv, + size_t bom_len, size_t *outsz_p) { size_t outsz, outalloc; char *out, *outpos; iconv_ibp cp; outsz = insz; - outalloc = st_add(outsz, 1); /* for terminating NUL */ + outalloc = st_add(outsz, 1 + bom_len); /* for terminating NUL */ out = xmalloc(outalloc); - outpos = out; + outpos = out + bom_len; cp = (iconv_ibp)in; while (1) { @@ -540,10 +546,30 @@ char *reencode_string_len(const char *in, size_t insz, { iconv_t conv; char *out; + const char *bom_str = NULL; + size_t bom_len = 0; if (!in_encoding) return NULL; + /* UTF-16LE-BOM is the same as UTF-16 for reading */ + if (same_utf_encoding("UTF-16LE-BOM", in_encoding)) + in_encoding = "UTF-16"; + + /* + * For writing, UTF-16 iconv typically creates "UTF-16BE-BOM" + * Some users under Windows want the little endian version + */ + if (same_utf_encoding("UTF-16LE-BOM", out_encoding)) { + bom_str = utf16_le_bom; + bom_len = sizeof(utf16_le_bom); + out_encoding = "UTF-16LE"; + } else if (same_utf_encoding("UTF-16BE-BOM", out_encoding)) { + bom_str = utf16_be_bom; + bom_len = sizeof(utf16_be_bom); + out_encoding = "UTF-16BE"; + } + conv = iconv_open(out_encoding, in_encoding); if (conv == (iconv_t) -1) { in_encoding = fallback_encoding(in_encoding); @@ -553,9 +579,10 @@ char *reencode_string_len(const char *in, size_t insz, if (conv == (iconv_t) -1) return NULL; } - - out = reencode_string_iconv(in, insz, conv, outsz); + out = reencode_string_iconv(in, insz, conv, bom_len, outsz); iconv_close(conv); + if (out && bom_str && bom_len) + memcpy(out, bom_str, bom_len); return out; } #endif @@ -566,11 +593,6 @@ static int has_bom_prefix(const char *data, size_t len, return data && bom && (len >= bom_len) && !memcmp(data, bom, bom_len); } -static const char utf16_be_bom[] = {'\xFE', '\xFF'}; -static const char utf16_le_bom[] = {'\xFF', '\xFE'}; -static const char utf32_be_bom[] = {'\0', '\0', '\xFE', '\xFF'}; -static const char utf32_le_bom[] = {'\xFF', '\xFE', '\0', '\0'}; - int has_prohibited_utf_bom(const char *enc, const char *data, size_t len) { return ( @@ -27,7 +27,7 @@ void strbuf_utf8_replace(struct strbuf *sb, int pos, int width, #ifndef NO_ICONV char *reencode_string_iconv(const char *in, size_t insz, - iconv_t conv, size_t *outsz); + iconv_t conv, size_t bom_len, size_t *outsz); char *reencode_string_len(const char *in, size_t insz, const char *out_encoding, const char *in_encoding, @@ -690,3 +690,16 @@ int xgethostname(char *buf, size_t len) buf[len - 1] = 0; return ret; } + +int is_empty_or_missing_file(const char *filename) +{ + struct stat st; + + if (stat(filename, &st) < 0) { + if (errno == ENOENT) + return 1; + die_errno(_("could not stat %s"), filename); + } + + return !st.st_size; +} diff --git a/wt-status.c b/wt-status.c index 0fe3bcd4cd..1f564b12d2 100644 --- a/wt-status.c +++ b/wt-status.c @@ -2375,10 +2375,10 @@ int require_clean_work_tree(struct repository *r, struct lock_file lock_file = LOCK_INIT; int err = 0, fd; - fd = hold_locked_index(&lock_file, 0); + fd = repo_hold_locked_index(r, &lock_file, 0); refresh_index(r->index, REFRESH_QUIET, NULL, NULL, NULL); if (0 <= fd) - update_index_if_able(r->index, &lock_file); + repo_update_index_if_able(r, &lock_file); rollback_lock_file(&lock_file); if (has_unstaged_changes(r, ignore_submodules)) { |