diff options
594 files changed, 26164 insertions, 9612 deletions
diff --git a/.clang-format b/.clang-format index 12a89f95f9..de1c8b5c77 100644 --- a/.clang-format +++ b/.clang-format @@ -6,6 +6,8 @@ # Use tabs whenever we need to fill whitespace that spans at least from one tab # stop to the next one. +# +# These settings are mirrored in .editorconfig. Keep them in sync. UseTab: Always TabWidth: 8 IndentWidth: 8 diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000000..42cdc4bbfb --- /dev/null +++ b/.editorconfig @@ -0,0 +1,16 @@ +[*] +charset = utf-8 +insert_final_newline = true + +# The settings for C (*.c and *.h) files are mirrored in .clang-format. Keep +# them in sync. +[*.{c,h,sh,perl,pl,pm}] +indent_style = tab +tab_width = 8 + +[*.py] +indent_style = space +indent_size = 4 + +[COMMIT_EDITMSG] +max_line_length = 72 diff --git a/.gitattributes b/.gitattributes index 49b3051641..acf853e029 100644 --- a/.gitattributes +++ b/.gitattributes @@ -5,7 +5,7 @@ *.pl eof=lf diff=perl *.pm eol=lf diff=perl *.py eol=lf diff=python -/Documentation/git-*.txt eol=lf +/Documentation/**/*.txt eol=lf /command-list.txt eol=lf /GIT-VERSION-GEN eol=lf /mergetools/* eol=lf diff --git a/.gitignore b/.gitignore index ffceea7d59..0d77ea5894 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ +/fuzz_corpora +/fuzz-pack-headers +/fuzz-pack-idx /GIT-BUILD-OPTIONS /GIT-CFLAGS /GIT-LDFLAGS @@ -78,6 +81,7 @@ /git-init-db /git-interpret-trailers /git-instaweb +/git-legacy-rebase /git-log /git-ls-files /git-ls-remote @@ -99,8 +103,9 @@ /git-mergetool--lib /git-mktag /git-mktree -/git-name-rev +/git-multi-pack-index /git-mv +/git-name-rev /git-notes /git-p4 /git-pack-redundant @@ -117,7 +122,7 @@ /git-read-tree /git-rebase /git-rebase--am -/git-rebase--helper +/git-rebase--common /git-rebase--interactive /git-rebase--merge /git-rebase--preserve-merges @@ -21,6 +21,8 @@ Anders Kaseorg <andersk@MIT.EDU> <andersk@mit.edu> Aneesh Kumar K.V <aneesh.kumar@gmail.com> Amos Waterland <apw@debian.org> <apw@rossby.metr.ou.edu> Amos Waterland <apw@debian.org> <apw@us.ibm.com> +Ben Peart <benpeart@microsoft.com> <Ben.Peart@microsoft.com> +Ben Peart <benpeart@microsoft.com> <peartben@gmail.com> Ben Walton <bdwalton@gmail.com> <bwalton@artsci.utoronto.ca> Benoit Sigoure <tsunanet@gmail.com> <tsuna@lrde.epita.fr> Bernt Hansen <bernt@norang.ca> <bernt@alumni.uwaterloo.ca> @@ -32,6 +34,7 @@ Bryan Larsen <bryan@larsen.st> <bryanlarsen@yahoo.com> Cheng Renquan <crquan@gmail.com> Chris Shoemaker <c.shoemaker@cox.net> Chris Wright <chrisw@sous-sol.org> <chrisw@osdl.org> +Christian Ludwig <chrissicool@gmail.com> <chrissicool@googlemail.com> Cord Seele <cowose@gmail.com> <cowose@googlemail.com> Christian Couder <chriscool@tuxfamily.org> <christian.couder@gmail.com> Christian Stimming <stimming@tuhh.de> <chs@ckiste.goetheallee> @@ -51,6 +54,7 @@ David Reiss <dreiss@facebook.com> <dreiss@dreiss-vmware.(none)> David S. Miller <davem@davemloft.net> David Turner <novalis@novalis.org> <dturner@twopensource.com> David Turner <novalis@novalis.org> <dturner@twosigma.com> +Derrick Stolee <dstolee@microsoft.com> <stolee@gmail.com> Deskin Miller <deskinm@umich.edu> Dirk Süsserott <newsletter@dirk.my1.cc> Eric Blake <eblake@redhat.com> <ebb9@byu.net> @@ -98,6 +102,7 @@ Jens Axboe <axboe@kernel.dk> <jens.axboe@oracle.com> Jens Lindström <jl@opera.com> Jens Lindstrom <jl@opera.com> Jim Meyering <jim@meyering.net> <meyering@redhat.com> Joachim Berdal Haga <cjhaga@fys.uio.no> +Joachim Jablon <joachim.jablon@people-doc.com> <ewjoachim@gmail.com> Johannes Schindelin <Johannes.Schindelin@gmx.de> <johannes.schindelin@gmx.de> Johannes Sixt <j6t@kdbg.org> <J.Sixt@eudaptics.com> Johannes Sixt <j6t@kdbg.org> <j.sixt@viscovery.net> @@ -150,6 +155,7 @@ Mark Levedahl <mdl123@verizon.net> <mlevedahl@gmail.com> Mark Rada <marada@uwaterloo.ca> Martin Langhoff <martin@laptop.org> <martin@catalyst.net.nz> Martin von Zweigbergk <martinvonz@gmail.com> <martin.von.zweigbergk@gmail.com> +Masaya Suzuki <masayasuzuki@google.com> <draftcode@gmail.com> Matt Draisey <matt@draisey.ca> <mattdraisey@sympatico.ca> Matt Kraai <kraai@ftbfs.org> <matt.kraai@amo.abbott.com> Matt McCutchen <matt@mattmccutchen.net> <hashproduct@gmail.com> @@ -157,6 +163,7 @@ Matthias Kestenholz <matthias@spinlock.ch> <mk@spinlock.ch> Matthias Rüster <matthias.ruester@gmail.com> Matthias Ruester Matthias Urlichs <matthias@urlichs.de> <smurf@kiste.(none)> Matthias Urlichs <matthias@urlichs.de> <smurf@smurf.noris.de> +Matthieu Moy <git@matthieu-moy.fr> <Matthieu.Moy@imag.fr> Michael Coleman <tutufan@gmail.com> Michael J Gruber <git@grubix.eu> <michaeljgruber+gmane@fastmail.fm> Michael J Gruber <git@grubix.eu> <git@drmicha.warpmail.net> @@ -180,7 +187,11 @@ Nick Stokoe <nick@noodlefactory.co.uk> Nick Woolley <nick@noodlefactory.co.uk> Nick Stokoe <nick@noodlefactory.co.uk> Nick Woolley <nickwoolley@yahoo.co.uk> Nicolas Morey-Chaisemartin <devel-git@morey-chaisemartin.com> <nicolas.morey@free.fr> Nicolas Morey-Chaisemartin <devel-git@morey-chaisemartin.com> <nmorey@kalray.eu> +Nicolas Morey-Chaisemartin <devel-git@morey-chaisemartin.com> <nicolas@morey-chaisemartin.com> +Nicolas Morey-Chaisemartin <devel-git@morey-chaisemartin.com> <NMoreyChaisemartin@suse.com> +Nicolas Morey-Chaisemartin <devel-git@morey-chaisemartin.com> <nmoreychaisemartin@suse.com> Nicolas Sebrecht <nicolas.s.dev@gmx.fr> <ni.s@laposte.net> +Orgad Shaneh <orgads@gmail.com> <orgad.shaneh@audiocodes.com> Paolo Bonzini <bonzini@gnu.org> <paolo.bonzini@lu.unisi.ch> Pascal Obry <pascal@obry.net> <pascal.obry@gmail.com> Pascal Obry <pascal@obry.net> <pascal.obry@wanadoo.fr> @@ -200,6 +211,7 @@ Philipp A. Hartmann <pah@qo.cx> <ph@sorgh.de> Philippe Bruhat <book@cpan.org> Ralf Thielow <ralf.thielow@gmail.com> <ralf.thielow@googlemail.com> Ramsay Jones <ramsay@ramsayjones.plus.com> <ramsay@ramsay1.demon.co.uk> +Randall S. Becker <randall.becker@nexbridge.ca> <rsbecker@nexbridge.com> René Scharfe <l.s.r@web.de> <rene.scharfe@lsrfire.ath.cx> René Scharfe <l.s.r@web.de> Rene Scharfe Richard Hansen <rhansen@rhansen.org> <hansenr@google.com> @@ -238,6 +250,7 @@ Steven Walter <stevenrwalter@gmail.com> <swalter@lpdev.prtdev.lexmark.com> Sven Verdoolaege <skimo@kotnet.org> <Sven.Verdoolaege@cs.kuleuven.ac.be> Sven Verdoolaege <skimo@kotnet.org> <skimo@liacs.nl> SZEDER Gábor <szeder.dev@gmail.com> <szeder@ira.uka.de> +Tao Qingyun <taoqy@ls-a.me> <845767657@qq.com> Tay Ray Chuan <rctay89@gmail.com> Ted Percival <ted@midg3t.net> <ted.percival@quest.com> Theodore Ts'o <tytso@mit.edu> diff --git a/.travis.yml b/.travis.yml index 8d2499739e..03c8e4c613 100644 --- a/.travis.yml +++ b/.travis.yml @@ -12,19 +12,9 @@ compiler: - clang - gcc -addons: - apt: - sources: - - ubuntu-toolchain-r-test - packages: - - language-pack-is - - git-svn - - apache2 - - gcc-8 - matrix: include: - - env: jobname=GETTEXT_POISON + - env: jobname=GIT_TEST_GETTEXT_POISON os: linux compiler: addons: @@ -50,22 +40,11 @@ matrix: - env: jobname=StaticAnalysis os: linux compiler: - addons: - apt: - packages: - - coccinelle - before_install: script: ci/run-static-analysis.sh after_failure: - env: jobname=Documentation os: linux compiler: - addons: - apt: - packages: - - asciidoc - - xmlto - before_install: script: ci/test-documentation.sh after_failure: diff --git a/Documentation/CodingGuidelines b/Documentation/CodingGuidelines index 8dddb50a9d..8579530710 100644 --- a/Documentation/CodingGuidelines +++ b/Documentation/CodingGuidelines @@ -118,6 +118,24 @@ For shell scripts specifically (not exhaustive): do this fi + - If a command sequence joined with && or || or | spans multiple + lines, put each command on a separate line and put && and || and | + operators at the end of each line, rather than the start. This + means you don't need to use \ to join lines, since the above + operators imply the sequence isn't finished. + + (incorrect) + grep blob verify_pack_result \ + | awk -f print_1.awk \ + | sort >actual && + ... + + (correct) + grep blob verify_pack_result | + awk -f print_1.awk | + sort >actual && + ... + - We prefer "test" over "[ ... ]". - We do not write the noiseword "function" in front of shell diff --git a/Documentation/Makefile b/Documentation/Makefile index 95f6a321f2..d5d936e6a7 100644 --- a/Documentation/Makefile +++ b/Documentation/Makefile @@ -73,6 +73,7 @@ TECH_DOCS += technical/hash-function-transition TECH_DOCS += technical/http-protocol TECH_DOCS += technical/index-format TECH_DOCS += technical/long-running-process-protocol +TECH_DOCS += technical/multi-pack-index TECH_DOCS += technical/pack-format TECH_DOCS += technical/pack-heuristics TECH_DOCS += technical/pack-protocol @@ -285,7 +286,7 @@ docdep_prereqs = \ mergetools-list.made $(mergetools_txt) \ cmd-list.made $(cmds_txt) -doc.dep : $(docdep_prereqs) $(wildcard *.txt) build-docdep.perl +doc.dep : $(docdep_prereqs) $(wildcard *.txt) $(wildcard config/*.txt) build-docdep.perl $(QUIET_GEN)$(RM) $@+ $@ && \ $(PERL_PATH) ./build-docdep.perl >$@+ $(QUIET_STDERR) && \ mv $@+ $@ diff --git a/Documentation/RelNotes/2.20.0.txt b/Documentation/RelNotes/2.20.0.txt new file mode 100644 index 0000000000..f6bf3626fb --- /dev/null +++ b/Documentation/RelNotes/2.20.0.txt @@ -0,0 +1,676 @@ +Git 2.20 Release Notes +====================== + +Backward Compatibility Notes +---------------------------- + + * "git branch -l <foo>" used to be a way to ask a reflog to be + created while creating a new branch, but that is no longer the + case. It is a short-hand for "git branch --list <foo>" now. + + * "git push" into refs/tags/* hierarchy is rejected without getting + forced, but "git fetch" (misguidedly) used the "fast forwarding" + rule used for the refs/heads/* hierarchy; this has been corrected, + which means some fetches of tags that did not fail with older + version of Git will fail without "--force" with this version. + + * "git help -a" now gives verbose output (same as "git help -av"). + Those who want the old output may say "git help --no-verbose -a".. + + * "git cpn --help", when "cpn" is an alias to, say, "cherry-pick -n", + reported only the alias expansion of "cpn" in earlier versions of + Git. It now runs "git cherry-pick --help" to show the manual page + of the command, while sending the alias expansion to the standard + error stream. + + * "git send-email" learned to grab address-looking string on any + trailer whose name ends with "-by". This is a backward-incompatible + change. Adding "--suppress-cc=misc-by" on the command line, or + setting sendemail.suppresscc configuration variable to "misc-by", + can be used to disable this behaviour. + + +Updates since v2.19 +------------------- + +UI, Workflows & Features + + * Running "git clone" against a project that contain two files with + pathnames that differ only in cases on a case insensitive + filesystem would result in one of the files lost because the + underlying filesystem is incapable of holding both at the same + time. An attempt is made to detect such a case and warn. + + * "git checkout -b newbranch [HEAD]" should not have to do as much as + checking out a commit different from HEAD. An attempt is made to + optimize this special case. + + * "git rev-list --stdin </dev/null" used to be an error; it now shows + no output without an error. "git rev-list --stdin --default HEAD" + still falls back to the given default when nothing is given on the + standard input. + + * Lift code from GitHub to restrict delta computation so that an + object that exists in one fork is not made into a delta against + another object that does not appear in the same forked repository. + + * "git format-patch" learned new "--interdiff" and "--range-diff" + options to explain the difference between this version and the + previous attempt in the cover letter (or after the three-dashes as + a comment). + + * "git mailinfo" used in "git am" learned to make a best-effort + recovery of a patch corrupted by MUA that sends text/plain with + format=flawed option. + (merge 3aa4d81f88 rs/mailinfo-format-flowed later to maint). + + * The rules used by "git push" and "git fetch" to determine if a ref + can or cannot be updated were inconsistent; specifically, fetching + to update existing tags were allowed even though tags are supposed + to be unmoving anchoring points. "git fetch" was taught to forbid + updates to existing tags without the "--force" option. + + * "git multi-pack-index" learned to detect corruption in the .midx + file it uses, and this feature has been integrated into "git fsck". + + * Generation of (experimental) commit-graph files have so far been + fairly silent, even though it takes noticeable amount of time in a + meaningfully large repository. The users will now see progress + output. + + * The minimum version of Windows supported by Windows port of Git is + now set to Vista. + + * The completion script (in contrib/) learned to complete a handful of + options "git stash list" command takes. + + * The completion script (in contrib/) learned that "git fetch + --multiple" only takes remote names as arguments and no refspecs. + + * "git status" learns to show progress bar when refreshing the index + takes a long time. + (merge ae9af12287 nd/status-refresh-progress later to maint). + + * "git help -a" and "git help -av" give different pieces of + information, and generally the "verbose" version is more friendly + to the new users. "git help -a" by default now uses the more + verbose output (with "--no-verbose", you can go back to the + original). Also "git help -av" now lists aliases and external + commands, which it did not used to. + + * Unlike "grep", "git grep" by default recurses to the whole tree. + The command learned "git grep --recursive" option, so that "git + grep --no-recursive" can serve as a synonym to setting the + max-depth to 0. + + * When pushing into a repository that borrows its objects from an + alternate object store, "git receive-pack" that responds to the + push request on the other side lists the tips of refs in the + alternate to reduce the amount of objects transferred. This + sometimes is detrimental when the number of refs in the alternate + is absurdly large, in which case the bandwidth saved in potentially + fewer objects transferred is wasted in excessively large ref + advertisement. The alternate refs that are advertised are now + configurable with a pair of configuration variables. + + * "git cmd --help" when "cmd" is aliased used to only say "cmd is + aliased to ...". Now it shows that to the standard error stream + and runs "git $cmd --help" where $cmd is the first word of the + alias expansion. + + * The documentation of "git gc" has been updated to mention that it + is no longer limited to "pruning away crufts" but also updates + ancillary files like commit-graph as a part of repository + optimization. + + * "git p4 unshelve" improvements. + + * The logic to select the default user name and e-mail on Windows has + been improved. + (merge 501afcb8b0 js/mingw-default-ident later to maint). + + * The "rev-list --filter" feature learned to exclude all trees via + "tree:0" filter. + + * "git send-email" learned to grab address-looking string on any + trailer whose name ends with "-by"; --suppress-cc=misc-by on the + command line, or setting sendemail.suppresscc configuration + variable to "misc-by", can be used to disable this behaviour. + + * Developer builds now uses -Wunused-function compilation option. + + * One of our CI tests to run with "unusual/experimental/random" + settings now also uses commit-graph and midx. + + * "git mergetool" learned to take the "--[no-]gui" option, just like + "git difftool" does. + + * "git rebase -i" learned a new insn, 'break', that the user can + insert in the to-do list. Upon hitting it, the command returns + control back to the user. + + * New "--pretty=format:" placeholders %GF and %GP that show the GPG + key fingerprints have been invented. + + * On platforms with recent cURL library, http.sslBackend configuration + variable can be used to choose a different SSL backend at runtime. + The Windows port uses this mechanism to switch between OpenSSL and + Secure Channel while talking over the HTTPS protocol. + + * "git send-email" learned to disable SMTP authentication via the + "--smtp-auth=none" option, even when the smtp username is given + (which turns the authentication on by default). + + * A fourth class of configuration files (in addition to the + traditional "system wide", "per user in the $HOME directory" and + "per repository in the $GIT_DIR/config") has been introduced so + that different worktrees that share the same repository (hence the + same $GIT_DIR/config file) can use different customization. + + * A pattern with '**' that does not have a slash on either side used + to be an invalid one, but the code now treats such double-asterisks + the same way as two normal asterisks that happen to be adjacent to + each other. + (merge e5bbe09e88 nd/wildmatch-double-asterisk later to maint). + + * The "--no-patch" option, which can be used to get a high-level + overview without the actual line-by-line patch difference shown, of + the "range-diff" command was earlier broken, which has been + corrected. + + * The recently merged "rebase in C" has an escape hatch to use the + scripted version when necessary, but it hasn't been documented, + which has been corrected. + + +Performance, Internal Implementation, Development Support etc. + + * When there are too many packfiles in a repository (which is not + recommended), looking up an object in these would require + consulting many pack .idx files; a new mechanism to have a single + file that consolidates all of these .idx files is introduced. + + * "git submodule update" is getting rewritten piece-by-piece into C. + + * The code for computing history reachability has been shuffled, + obtained a bunch of new tests to cover them, and then being + improved. + + * The unpack_trees() API used in checking out a branch and merging + walks one or more trees along with the index. When the cache-tree + in the index tells us that we are walking a tree whose flattened + contents is known (i.e. matches a span in the index), as linearly + scanning a span in the index is much more efficient than having to + open tree objects recursively and listing their entries, the walk + can be optimized, which has been done. + + * When creating a thin pack, which allows objects to be made into a + delta against another object that is not in the resulting pack but + is known to be present on the receiving end, the code learned to + take advantage of the reachability bitmap; this allows the server + to send a delta against a base beyond the "boundary" commit. + + * spatch transformation to replace boolean uses of !hashcmp() to + newly introduced oideq() is added, and applied, to regain + performance lost due to support of multiple hash algorithms. + + * Fix a bug in which the same path could be registered under multiple + worktree entries if the path was missing (for instance, was removed + manually). Also, as a convenience, expand the number of cases in + which --force is applicable. + + * Split Documentation/config.txt for easier maintenance. + (merge 6014363f0b nd/config-split later to maint). + + * Test helper binaries clean-up. + (merge c9a1f4161f nd/test-tool later to maint). + + * Various tests have been updated to make it easier to swap the + hash function used for object identification. + (merge ae0c89d41b bc/hash-independent-tests later to maint). + + * Update fsck.skipList implementation and documentation. + (merge 371a655074 ab/fsck-skiplist later to maint). + + * An alias that expands to another alias has so far been forbidden, + but now it is allowed to create such an alias. + + * Various test scripts have been updated for style and also correct + handling of exit status of various commands. + + * "gc --auto" ended up calling exit(-1) upon error, which has been + corrected to use exit(1). Also the error reporting behaviour when + daemonized has been updated to exit with zero status when stopping + due to a previously discovered error (which implies there is no + point running gc to improve the situation); we used to exit with + failure in such a case. + + * Various codepaths in the core-ish part learned to work on an + arbitrary in-core index structure, not necessarily the default + instance "the_index". + (merge b3c7eef9b0 nd/the-index later to maint). + + * Code clean-up in the internal machinery used by "git status" and + "git commit --dry-run". + (merge 73ba5d78b4 ss/wt-status-committable later to maint). + + * Some environment variables that control the runtime options of Git + used during tests are getting renamed for consistency. + (merge 4231d1ba99 bp/rename-test-env-var later to maint). + + * A new extension to the index file has been introduced, which allows + the index file to be read in parallel for performance. + + * The oidset API was built on top of the oidmap API which in turn is + on the hashmap API. Replace the implementation to build on top of + the khash API and gain performance. + + * Over some transports, fetching objects with an exact commit object + name can be done without first seeing the ref advertisements. The + code has been optimized to exploit this. + + * In a partial clone that will lazily be hydrated from the + originating repository, we generally want to avoid "does this + object exist (locally)?" on objects that we deliberately omitted + when we created the clone. The cache-tree codepath (which is used + to write a tree object out of the index) however insisted that the + object exists, even for paths that are outside of the partial + checkout area. The code has been updated to avoid such a check. + + * To help developers, an EditorConfig file that attempts to follow + the project convention has been added. + (merge b548d698a0 bc/editorconfig later to maint). + + * The result of coverage test can be combined with "git blame" to + check the test coverage of code introduced recently with a new + 'coverage-diff' tool (in contrib/). + (merge 783faedd65 ds/coverage-diff later to maint). + + * An experiment to fuzz test a few areas, hopefully we can gain more + coverage to various areas. + + * More codepaths are moving away from hardcoded hash sizes. + + * The way the Windows port figures out the current directory has been + improved. + + * The way DLLs are loaded on the Windows port has been improved. + + * Some tests have been reorganized and renamed; "ls t/" now gives a + better overview of what is tested for these scripts than before. + + * "git rebase" and "git rebase -i" have been reimplemented in C. + + * Windows port learned to use nano-second resolution file timestamps. + + * The overly large Documentation/config.txt file have been split into + million little pieces. This potentially allows each individual piece + included into the manual page of the command it affects more easily. + + * Replace three string-list instances used as look-up tables in "git + fetch" with hashmaps. + + * Unify code to read the author-script used in "git am" and the + commands that use the sequencer machinery, e.g. "git rebase -i". + + * In preparation to the day when we can deprecate and remove the + "rebase -p", make sure we can skip and later remove tests for + it. + + * The history traversal used to implement the tag-following has been + optimized by introducing a new helper. + + * The helper function to refresh the cached stat information in the + in-core index has learned to perform the lstat() part of the + operation in parallel on multi-core platforms. + + * The code to traverse objects for reachability, used to decide what + objects are unreferenced and expendable, have been taught to also + consider per-worktree refs of other worktrees as starting points to + prevent data loss. + + * "git add" needs to internally run "diff-files" equivalent, and the + codepath learned the same optimization as "diff-files" has to run + lstat(2) in parallel to find which paths have been updated in the + working tree. + + * The procedure to install dependencies before testing at Travis CI + is getting revamped for both simplicity and flexibility, taking + advantage of the recent move to the vm-based environment. + + * The support for format-patch (and send-email) by the command-line + completion script (in contrib/) has been simplified a bit. + + * The revision walker machinery learned to take advantage of the + commit generation numbers stored in the commit-graph file. + + * The codebase has been cleaned up to reduce "#ifndef NO_PTHREADS". + + * The way -lcurl library gets linked has been simplified by taking + advantage of the fact that we can just ask curl-config command how. + + * Various functions have been audited for "-Wunused-parameter" warnings + and bugs in them got fixed. + + * A sanity check for start-up sequence has been added in the config + API codepath. + + * The build procedure to link for fuzzing test has been made + customizable with a new Makefile variable. + + * The way "git rebase" parses and forwards the command line options + meant for underlying "git am" has been revamped, which fixed for + options with parameters that were not passed correctly. + + * Our testing framework uses a special i18n "poisoned localization" + feature to find messages that ought to stay constant but are + incorrectly marked to be translated. This feature has been made + into a runtime option (it used to be a compile-time option). + + * "git push" used to check ambiguities between object-names and + refnames while processing the list of refs' old and new values, + which was unnecessary (as it knew that it is feeding raw object + names). This has been optimized out. + + * The xcurl_off_t() helper function is used to cast size_t to + curl_off_t, but some compilers gave warnings against the code to + ensure the casting is done without wraparound, when size_t is + narrower than curl_off_t. This warning has been squelched. + + * Code preparation to replace ulong vars with size_t vars where + appropriate continues. + + * The "test installed Git" mode of our test suite has been updated to + work better. + + * A coding convention around the Coccinelle semantic patches to have + two classes to ease code migration process has been proposed and + its support has been added to the Makefile. + + +Fixes since v2.19 +----------------- + + * "git interpret-trailers" and its underlying machinery had a buggy + code that attempted to ignore patch text after commit log message, + which triggered in various codepaths that will always get the log + message alone and never get such an input. + (merge 66e83d9b41 jk/trailer-fixes later to maint). + + * Malformed or crafted data in packstream can make our code attempt + to read or write past the allocated buffer and abort, instead of + reporting an error, which has been fixed. + + * "git rebase -i" did not clear the state files correctly when a run + of "squash/fixup" is aborted and then the user manually amended the + commit instead, which has been corrected. + (merge 10d2f35436 js/rebase-i-autosquash-fix later to maint). + + * When fsmonitor is in use, after operation on submodules updates + .gitmodules, we lost track of the fact that we did so and relied on + stale fsmonitor data. + (merge 43f1180814 bp/mv-submodules-with-fsmonitor later to maint). + + * Fix for a long-standing bug that leaves the index file corrupt when + it shrinks during a partial commit. + (merge 6c003d6ffb jk/reopen-tempfile-truncate later to maint). + + * Further fix for O_APPEND emulation on Windows + (merge eeaf7ddac7 js/mingw-o-append later to maint). + + * A corner case bugfix in "git rerere" code. + (merge ad2bf0d9b4 en/rerere-multi-stage-1-fix later to maint). + + * "git add ':(attr:foo)'" is not supported and is supposed to be + rejected while the command line arguments are parsed, but we fail + to reject such a command line upfront. + (merge 84d938b732 nd/attr-pathspec-fix later to maint). + + * Recent update broke the reachability algorithm when refs (e.g. + tags) that point at objects that are not commit were involved, + which has been fixed. + + * "git rebase" etc. in Git 2.19 fails to abort when given an empty + commit log message as result of editing, which has been corrected. + (merge a3ec9eaf38 en/sequencer-empty-edit-result-aborts later to maint). + + * The code to backfill objects in lazily cloned repository did not + work correctly, which has been corrected. + (merge e68302011c jt/lazy-object-fetch-fix later to maint). + + * Update error messages given by "git remote" and make them consistent. + (merge 5025425dff ms/remote-error-message-update later to maint). + + * "git update-ref" learned to make both "--no-deref" and "--stdin" + work at the same time. + (merge d345e9fbe7 en/update-ref-no-deref-stdin later to maint). + + * Recently added "range-diff" had a corner-case bug to cause it + segfault, which has been corrected. + (merge e467a90c7a tg/range-diff-corner-case-fix later to maint). + + * The recently introduced commit-graph auxiliary data is incompatible + with mechanisms such as replace & grafts that "breaks" immutable + nature of the object reference relationship. Disable optimizations + based on its use (and updating existing commit-graph) when these + incompatible features are in use in the repository. + (merge 829a321569 ds/commit-graph-with-grafts later to maint). + + * The mailmap file update. + (merge 255eb03edf jn/mailmap-update later to maint). + + * The code in "git status" sometimes hit an assertion failure. This + was caused by a structure that was reused without cleaning the data + used for the first run, which has been corrected. + (merge 3e73cc62c0 en/status-multiple-renames-to-the-same-target-fix later to maint). + + * "git fetch $repo $object" in a partial clone did not correctly + fetch the asked-for object that is referenced by an object in + promisor packfile, which has been fixed. + + * A corner-case bugfix. + (merge c5cbb27cb5 sm/show-superproject-while-conflicted later to maint). + + * Various fixes to "diff --color-moved-ws". + + * A partial clone that is configured to lazily fetch missing objects + will on-demand issue a "git fetch" request to the originating + repository to fill not-yet-obtained objects. The request has been + optimized for requesting a tree object (and not the leaf blob + objects contained in it) by telling the originating repository that + no blobs are needed. + (merge 4c7f9567ea jt/non-blob-lazy-fetch later to maint). + + * The codepath to support the experimental split-index mode had + remaining "racily clean" issues fixed. + (merge 4c490f3d32 sg/split-index-racefix later to maint). + + * "git log --graph" showing an octopus merge sometimes miscounted the + number of display columns it is consuming to show the merge and its + parent commits, which has been corrected. + (merge 04005834ed np/log-graph-octopus-fix later to maint). + + * "git range-diff" did not work well when the compared ranges had + changes in submodules and the "--submodule=log" was used. + + * The implementation of run_command() API on the UNIX platforms had a + bug that caused a command not on $PATH to be found in the current + directory. + (merge f67b980771 jk/run-command-notdot later to maint). + + * A mutex used in "git pack-objects" were not correctly initialized + and this caused "git repack" to dump core on Windows. + (merge 34204c8166 js/pack-objects-mutex-init-fix later to maint). + + * Under certain circumstances, "git diff D:/a/b/c D:/a/b/d" on + Windows would strip initial parts from the paths because they + were not recognized as absolute, which has been corrected. + (merge ffd04e92e2 js/diff-notice-has-drive-prefix later to maint). + + * The receive.denyCurrentBranch=updateInstead codepath kicked in even + when the push should have been rejected due to other reasons, such + as it does not fast-forward or the update-hook rejects it, which + has been corrected. + (merge b072a25fad jc/receive-deny-current-branch-fix later to maint). + + * The logic to determine the archive type "git archive" uses did not + correctly kick in for "git archive --remote", which has been + corrected. + + * "git repack" in a shallow clone did not correctly update the + shallow points in the repository, leading to a repository that + does not pass fsck. + (merge 5dcfbf564c js/shallow-and-fetch-prune later to maint). + + * Some codepaths failed to form a proper URL when .gitmodules record + the URL to a submodule repository as relative to the repository of + superproject, which has been corrected. + (merge e0a862fdaf sb/submodule-url-to-absolute later to maint). + + * "git fetch" over protocol v2 into a shallow repository failed to + fetch full history behind a new tip of history that was diverged + before the cut-off point of the history that was previously fetched + shallowly. + + * The command line completion machinery (in contrib/) has been + updated to allow the completion script to tweak the list of options + that are reported by the parse-options machinery correctly. + (merge 276b49ff34 nd/completion-negation later to maint). + + * Operations on promisor objects make sense in the context of only a + small subset of the commands that internally use the revisions + machinery, but the "--exclude-promisor-objects" option were taken + and led to nonsense results by commands like "log", to which it + didn't make much sense. This has been corrected. + (merge 669b1d2aae md/exclude-promisor-objects-fix later to maint). + + * The "container" mode of TravisCI is going away. Our .travis.yml + file is getting prepared for the transition. + (merge 32ee384be8 ss/travis-ci-force-vm-mode later to maint). + + * Our test scripts can now take the '-V' option as a synonym for the + '--verbose-log' option. + (merge a5f52c6dab sg/test-verbose-log later to maint). + + * A regression in Git 2.12 era made "git fsck" fall into an infinite + loop while processing truncated loose objects. + (merge 18ad13e5b2 jk/detect-truncated-zlib-input later to maint). + + * "git ls-remote $there foo" was broken by recent update for the + protocol v2 and stopped showing refs that match 'foo' that are not + refs/{heads,tags}/foo, which has been fixed. + (merge 6a139cdd74 jk/proto-v2-ref-prefix-fix later to maint). + + * Additional comment on a tricky piece of code to help developers. + (merge 0afbe3e806 jk/stream-pack-non-delta-clarification later to maint). + + * A couple of tests used to leave the repository in a state that is + deliberately corrupt, which have been corrected. + (merge aa984dbe5e ab/pack-tests-cleanup later to maint). + + * The submodule support has been updated to read from the blob at + HEAD:.gitmodules when the .gitmodules file is missing from the + working tree. + (merge 2b1257e463 ao/submodule-wo-gitmodules-checked-out later to maint). + + * "git fetch" was a bit loose in parsing responses from the other side + when talking over the protocol v2. + + * "git rev-parse --exclude=* --branches --branches" (i.e. first + saying "add only things that do not match '*' out of all branches" + and then adding all branches, without any exclusion this time") + worked as expected, but "--exclude=* --all --all" did not work the + same way, which has been fixed. + (merge 5221048092 ag/rev-parse-all-exclude-fix later to maint). + + * "git send-email --transfer-encoding=..." in recent versions of Git + sometimes produced an empty "Content-Transfer-Encoding:" header, + which has been corrected. + (merge 3c88e46f1a al/send-email-auto-cte-fixup later to maint). + + * The interface into "xdiff" library used to discover the offset and + size of a generated patch hunk by first formatting it into the + textual hunk header "@@ -n,m +k,l @@" and then parsing the numbers + out. A new interface has been introduced to allow callers a more + direct access to them. + (merge 5eade0746e jk/xdiff-interface later to maint). + + * Pathspec matching against a tree object were buggy when negative + pathspec elements were involved, which has been fixed. + (merge b7845cebc0 nd/tree-walk-path-exclusion later to maint). + + * "git merge" and "git pull" that merges into an unborn branch used + to completely ignore "--verify-signatures", which has been + corrected. + (merge 01a31f3bca jk/verify-sig-merge-into-void later to maint). + + * "git rebase --autostash" did not correctly re-attach the HEAD at times. + + * "rev-parse --exclude=<pattern> --branches=<pattern>" etc. did not + quite work, which has been corrected. + (merge 9ab9b5df0e ra/rev-parse-exclude-glob later to maint). + + * When editing a patch in a "git add -i" session, a hunk could be + made to no-op. The "git apply" program used to reject a patch with + such a no-op hunk to catch user mistakes, but it is now updated to + explicitly allow a no-op hunk in an edited patch. + (merge 22cb3835b9 js/apply-recount-allow-noop later to maint). + + * The URL to an MSDN page in a comment has been updated. + (merge 2ef2ae2917 js/mingw-msdn-url later to maint). + + * "git ls-remote --sort=<thing>" can feed an object that is not yet + available into the comparison machinery and segfault, which has + been corrected to check such a request upfront and reject it. + + * When "git bundle" aborts due to an empty commit ranges + (i.e. resulting in an empty pack), it left a file descriptor to an + lockfile open, which resulted in leftover lockfile on Windows where + you cannot remove a file with an open file descriptor. This has + been corrected. + (merge 2c8ee1f53c jk/close-duped-fd-before-unlock-for-bundle later to maint). + + * "git format-patch --stat=<width>" can be used to specify the width + used by the diffstat (shown in the cover letter). + (merge 284aeb7e60 nd/format-patch-cover-letter-stat-width later to maint). + + * Code cleanup, docfix, build fix, etc. + (merge 96a7501aad ts/doc-build-manpage-xsl-quietly later to maint). + (merge b9b07efdb2 tg/conflict-marker-size later to maint). + (merge fa0aeea770 sg/doc-trace-appends later to maint). + (merge d64324cb60 tb/void-check-attr later to maint). + (merge c3b9bc94b9 en/double-semicolon-fix later to maint). + (merge 79336116f5 sg/t3701-tighten-trace later to maint). + (merge 801fa63a90 jk/dev-build-format-security later to maint). + (merge 0597dd62ba sb/string-list-remove-unused later to maint). + (merge db2d36fad8 bw/protocol-v2 later to maint). + (merge 456d7cd3a9 sg/split-index-test later to maint). + (merge 7b6057c852 tq/refs-internal-comment-fix later to maint). + (merge 29e8dc50ad tg/t5551-with-curl-7.61.1 later to maint). + (merge 55f6bce2c9 fe/doc-updates later to maint). + (merge 7987d2232d jk/check-everything-connected-is-long-gone later to maint). + (merge 4ba3c9be47 dz/credential-doc-url-matching-rules later to maint). + (merge 4c399442f7 ma/commit-graph-docs later to maint). + (merge fc0503b04e ma/t1400-undebug-test later to maint). + (merge e56b53553a nd/packobjectshook-doc-fix later to maint). + (merge c56170a0c4 ma/mailing-list-address-in-git-help later to maint). + (merge 6e8fc70fce rs/sequencer-oidset-insert-avoids-dups later to maint). + (merge ad0b8f9575 mw/doc-typofixes later to maint). + (merge d9f079ad1a jc/how-to-document-api later to maint). + (merge b1492bf315 ma/t7005-bash-workaround later to maint). + (merge ac1f98a0df du/rev-parse-is-plumbing later to maint). + (merge ca8ed443a5 mm/doc-no-dashed-git later to maint). + (merge ce366a8144 du/get-tar-commit-id-is-plumbing later to maint). + (merge 61018fe9e0 du/cherry-is-plumbing later to maint). + (merge c7e5fe79b9 sb/strbuf-h-update later to maint). + (merge 8d2008196b tq/branch-create-wo-branch-get later to maint). + (merge 2e3c894f4b tq/branch-style-fix later to maint). + (merge c5d844af9c sg/doc-show-branch-typofix later to maint). + (merge 081d91618b ah/doc-updates later to maint). + (merge b84c783882 jc/cocci-preincr later to maint). + (merge 5e495f8122 uk/merge-subtree-doc-update later to maint). + (merge aaaa881822 jk/uploadpack-packobjectshook-fix later to maint). + (merge 3063477445 tb/char-may-be-unsigned later to maint). + (merge 8c64bc9420 sg/test-rebase-editor-fix later to maint). + (merge 71571cd7d6 ma/sequencer-do-reset-saner-loop-termination later to maint). + (merge 9a4cb8781e cb/notes-freeing-always-null-fix later to maint). diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches index b44fd51f27..ec8b205145 100644 --- a/Documentation/SubmittingPatches +++ b/Documentation/SubmittingPatches @@ -80,7 +80,9 @@ GitHub-Travis CI hints section for details. Do not forget to update the documentation to describe the updated behavior and make sure that the resulting documentation set formats -well. It is currently a liberal mixture of US and UK English norms for +well (try the Documentation/doc-diff script). + +We currently have a liberal mixture of US and UK English norms for spelling and grammar, which is somewhat unfortunate. A huge patch that touches the files all over the place only to correct the inconsistency is not welcome, though. Potential clashes with other changes that can diff --git a/Documentation/config.txt b/Documentation/config.txt index 09a2385280..d87846faa6 100644 --- a/Documentation/config.txt +++ b/Documentation/config.txt @@ -2,8 +2,9 @@ CONFIGURATION FILE ------------------ The Git configuration file contains a number of variables that affect -the Git commands' behavior. The `.git/config` file in each repository -is used to store the configuration for that repository, and +the Git commands' behavior. The files `.git/config` and optionally +`config.worktree` (see `extensions.worktreeConfig` below) in each +repository are used to store the configuration for that repository, and `$HOME/.gitconfig` is used to store a per-user configuration as fallback values for the `.git/config` file. The file `/etc/gitconfig` can be used to store a system-wide default configuration. @@ -225,7 +226,7 @@ boolean:: false;; Boolean false literals are `no`, `off`, `false`, `0` and the empty string. + -When converting value to the canonical form using `--bool` type +When converting a value to its canonical form using the `--type=bool` type specifier, 'git config' will ensure that the output is "true" or "false" (spelled in lowercase). @@ -287,3472 +288,152 @@ inventing new variables for use in your own tool, make sure their names do not conflict with those that are used by Git itself and other popular tools, and describe them in your documentation. +include::config/advice.txt[] -advice.*:: - These variables control various optional help messages designed to - aid new users. All 'advice.*' variables default to 'true', and you - can tell Git that you do not need help by setting these to 'false': -+ --- - pushUpdateRejected:: - Set this variable to 'false' if you want to disable - 'pushNonFFCurrent', - 'pushNonFFMatching', 'pushAlreadyExists', - 'pushFetchFirst', and 'pushNeedsForce' - simultaneously. - pushNonFFCurrent:: - Advice shown when linkgit:git-push[1] fails due to a - non-fast-forward update to the current branch. - pushNonFFMatching:: - Advice shown when you ran linkgit:git-push[1] and pushed - 'matching refs' explicitly (i.e. you used ':', or - specified a refspec that isn't your current branch) and - it resulted in a non-fast-forward error. - pushAlreadyExists:: - Shown when linkgit:git-push[1] rejects an update that - does not qualify for fast-forwarding (e.g., a tag.) - pushFetchFirst:: - Shown when linkgit:git-push[1] rejects an update that - tries to overwrite a remote ref that points at an - object we do not have. - pushNeedsForce:: - Shown when linkgit:git-push[1] rejects an update that - tries to overwrite a remote ref that points at an - object that is not a commit-ish, or make the remote - ref point at an object that is not a commit-ish. - statusHints:: - Show directions on how to proceed from the current - state in the output of linkgit:git-status[1], in - the template shown when writing commit messages in - linkgit:git-commit[1], and in the help message shown - by linkgit:git-checkout[1] when switching branch. - statusUoption:: - Advise to consider using the `-u` option to linkgit:git-status[1] - when the command takes more than 2 seconds to enumerate untracked - files. - commitBeforeMerge:: - Advice shown when linkgit:git-merge[1] refuses to - merge to avoid overwriting local changes. - resolveConflict:: - Advice shown by various commands when conflicts - prevent the operation from being performed. - implicitIdentity:: - Advice on how to set your identity configuration when - your information is guessed from the system username and - domain name. - detachedHead:: - Advice shown when you used linkgit:git-checkout[1] to - move to the detach HEAD state, to instruct how to create - a local branch after the fact. - checkoutAmbiguousRemoteBranchName:: - Advice shown when the argument to - linkgit:git-checkout[1] ambiguously resolves to a - remote tracking branch on more than one remote in - situations where an unambiguous argument would have - otherwise caused a remote-tracking branch to be - checked out. See the `checkout.defaultRemote` - configuration variable for how to set a given remote - to used by default in some situations where this - advice would be printed. - amWorkDir:: - Advice that shows the location of the patch file when - linkgit:git-am[1] fails to apply it. - rmHints:: - In case of failure in the output of linkgit:git-rm[1], - show directions on how to proceed from the current state. - addEmbeddedRepo:: - Advice on what to do when you've accidentally added one - git repo inside of another. - ignoredHook:: - Advice shown if a hook is ignored because the hook is not - set as executable. - waitingForEditor:: - Print a message to the terminal whenever Git is waiting for - editor input from the user. --- - -core.fileMode:: - Tells Git if the executable bit of files in the working tree - is to be honored. -+ -Some filesystems lose the executable bit when a file that is -marked as executable is checked out, or checks out a -non-executable file with executable bit on. -linkgit:git-clone[1] or linkgit:git-init[1] probe the filesystem -to see if it handles the executable bit correctly -and this variable is automatically set as necessary. -+ -A repository, however, may be on a filesystem that handles -the filemode correctly, and this variable is set to 'true' -when created, but later may be made accessible from another -environment that loses the filemode (e.g. exporting ext4 via -CIFS mount, visiting a Cygwin created repository with -Git for Windows or Eclipse). -In such a case it may be necessary to set this variable to 'false'. -See linkgit:git-update-index[1]. -+ -The default is true (when core.filemode is not specified in the config file). - -core.hideDotFiles:: - (Windows-only) If true, mark newly-created directories and files whose - name starts with a dot as hidden. If 'dotGitOnly', only the `.git/` - directory is hidden, but no other files starting with a dot. The - default mode is 'dotGitOnly'. - -core.ignoreCase:: - Internal variable which enables various workarounds to enable - Git to work better on filesystems that are not case sensitive, - like APFS, HFS+, FAT, NTFS, etc. For example, if a directory listing - finds "makefile" when Git expects "Makefile", Git will assume - it is really the same file, and continue to remember it as - "Makefile". -+ -The default is false, except linkgit:git-clone[1] or linkgit:git-init[1] -will probe and set core.ignoreCase true if appropriate when the repository -is created. -+ -Git relies on the proper configuration of this variable for your operating -and file system. Modifying this value may result in unexpected behavior. - -core.precomposeUnicode:: - This option is only used by Mac OS implementation of Git. - When core.precomposeUnicode=true, Git reverts the unicode decomposition - of filenames done by Mac OS. This is useful when sharing a repository - between Mac OS and Linux or Windows. - (Git for Windows 1.7.10 or higher is needed, or Git under cygwin 1.7). - When false, file names are handled fully transparent by Git, - which is backward compatible with older versions of Git. - -core.protectHFS:: - If set to true, do not allow checkout of paths that would - be considered equivalent to `.git` on an HFS+ filesystem. - Defaults to `true` on Mac OS, and `false` elsewhere. - -core.protectNTFS:: - If set to true, do not allow checkout of paths that would - cause problems with the NTFS filesystem, e.g. conflict with - 8.3 "short" names. - Defaults to `true` on Windows, and `false` elsewhere. - -core.fsmonitor:: - If set, the value of this variable is used as a command which - will identify all files that may have changed since the - requested date/time. This information is used to speed up git by - avoiding unnecessary processing of files that have not changed. - See the "fsmonitor-watchman" section of linkgit:githooks[5]. - -core.trustctime:: - If false, the ctime differences between the index and the - working tree are ignored; useful when the inode change time - is regularly modified by something outside Git (file system - crawlers and some backup systems). - See linkgit:git-update-index[1]. True by default. - -core.splitIndex:: - If true, the split-index feature of the index will be used. - See linkgit:git-update-index[1]. False by default. - -core.untrackedCache:: - Determines what to do about the untracked cache feature of the - index. It will be kept, if this variable is unset or set to - `keep`. It will automatically be added if set to `true`. And - it will automatically be removed, if set to `false`. Before - setting it to `true`, you should check that mtime is working - properly on your system. - See linkgit:git-update-index[1]. `keep` by default. - -core.checkStat:: - When missing or is set to `default`, many fields in the stat - structure are checked to detect if a file has been modified - since Git looked at it. When this configuration variable is - set to `minimal`, sub-second part of mtime and ctime, the - uid and gid of the owner of the file, the inode number (and - the device number, if Git was compiled to use it), are - excluded from the check among these fields, leaving only the - whole-second part of mtime (and ctime, if `core.trustCtime` - is set) and the filesize to be checked. -+ -There are implementations of Git that do not leave usable values in -some fields (e.g. JGit); by excluding these fields from the -comparison, the `minimal` mode may help interoperability when the -same repository is used by these other systems at the same time. - -core.quotePath:: - Commands that output paths (e.g. 'ls-files', 'diff'), will - quote "unusual" characters in the pathname by enclosing the - pathname in double-quotes and escaping those characters with - backslashes in the same way C escapes control characters (e.g. - `\t` for TAB, `\n` for LF, `\\` for backslash) or bytes with - values larger than 0x80 (e.g. octal `\302\265` for "micro" in - UTF-8). If this variable is set to false, bytes higher than - 0x80 are not considered "unusual" any more. Double-quotes, - backslash and control characters are always escaped regardless - of the setting of this variable. A simple space character is - not considered "unusual". Many commands can output pathnames - completely verbatim using the `-z` option. The default value - is true. - -core.eol:: - Sets the line ending type to use in the working directory for - files that have the `text` property set when core.autocrlf is false. - Alternatives are 'lf', 'crlf' and 'native', which uses the platform's - native line ending. The default value is `native`. See - linkgit:gitattributes[5] for more information on end-of-line - conversion. - -core.safecrlf:: - If true, makes Git check if converting `CRLF` is reversible when - end-of-line conversion is active. Git will verify if a command - modifies a file in the work tree either directly or indirectly. - For example, committing a file followed by checking out the - same file should yield the original file in the work tree. If - this is not the case for the current setting of - `core.autocrlf`, Git will reject the file. The variable can - be set to "warn", in which case Git will only warn about an - irreversible conversion but continue the operation. -+ -CRLF conversion bears a slight chance of corrupting data. -When it is enabled, Git will convert CRLF to LF during commit and LF to -CRLF during checkout. A file that contains a mixture of LF and -CRLF before the commit cannot be recreated by Git. For text -files this is the right thing to do: it corrects line endings -such that we have only LF line endings in the repository. -But for binary files that are accidentally classified as text the -conversion can corrupt data. -+ -If you recognize such corruption early you can easily fix it by -setting the conversion type explicitly in .gitattributes. Right -after committing you still have the original file in your work -tree and this file is not yet corrupted. You can explicitly tell -Git that this file is binary and Git will handle the file -appropriately. -+ -Unfortunately, the desired effect of cleaning up text files with -mixed line endings and the undesired effect of corrupting binary -files cannot be distinguished. In both cases CRLFs are removed -in an irreversible way. For text files this is the right thing -to do because CRLFs are line endings, while for binary files -converting CRLFs corrupts data. -+ -Note, this safety check does not mean that a checkout will generate a -file identical to the original file for a different setting of -`core.eol` and `core.autocrlf`, but only for the current one. For -example, a text file with `LF` would be accepted with `core.eol=lf` -and could later be checked out with `core.eol=crlf`, in which case the -resulting file would contain `CRLF`, although the original file -contained `LF`. However, in both work trees the line endings would be -consistent, that is either all `LF` or all `CRLF`, but never mixed. A -file with mixed line endings would be reported by the `core.safecrlf` -mechanism. - -core.autocrlf:: - Setting this variable to "true" is the same as setting - the `text` attribute to "auto" on all files and core.eol to "crlf". - Set to true if you want to have `CRLF` line endings in your - working directory and the repository has LF line endings. - This variable can be set to 'input', - in which case no output conversion is performed. - -core.checkRoundtripEncoding:: - A comma and/or whitespace separated list of encodings that Git - performs UTF-8 round trip checks on if they are used in an - `working-tree-encoding` attribute (see linkgit:gitattributes[5]). - The default value is `SHIFT-JIS`. - -core.symlinks:: - If false, symbolic links are checked out as small plain files that - contain the link text. linkgit:git-update-index[1] and - linkgit:git-add[1] will not change the recorded type to regular - file. Useful on filesystems like FAT that do not support - symbolic links. -+ -The default is true, except linkgit:git-clone[1] or linkgit:git-init[1] -will probe and set core.symlinks false if appropriate when the repository -is created. - -core.gitProxy:: - A "proxy command" to execute (as 'command host port') instead - of establishing direct connection to the remote server when - using the Git protocol for fetching. If the variable value is - in the "COMMAND for DOMAIN" format, the command is applied only - on hostnames ending with the specified domain string. This variable - may be set multiple times and is matched in the given order; - the first match wins. -+ -Can be overridden by the `GIT_PROXY_COMMAND` environment variable -(which always applies universally, without the special "for" -handling). -+ -The special string `none` can be used as the proxy command to -specify that no proxy be used for a given domain pattern. -This is useful for excluding servers inside a firewall from -proxy use, while defaulting to a common proxy for external domains. - -core.sshCommand:: - If this variable is set, `git fetch` and `git push` will - use the specified command instead of `ssh` when they need to - connect to a remote system. The command is in the same form as - the `GIT_SSH_COMMAND` environment variable and is overridden - when the environment variable is set. - -core.ignoreStat:: - If true, Git will avoid using lstat() calls to detect if files have - changed by setting the "assume-unchanged" bit for those tracked files - which it has updated identically in both the index and working tree. -+ -When files are modified outside of Git, the user will need to stage -the modified files explicitly (e.g. see 'Examples' section in -linkgit:git-update-index[1]). -Git will not normally detect changes to those files. -+ -This is useful on systems where lstat() calls are very slow, such as -CIFS/Microsoft Windows. -+ -False by default. - -core.preferSymlinkRefs:: - Instead of the default "symref" format for HEAD - and other symbolic reference files, use symbolic links. - This is sometimes needed to work with old scripts that - expect HEAD to be a symbolic link. - -core.bare:: - If true this repository is assumed to be 'bare' and has no - working directory associated with it. If this is the case a - number of commands that require a working directory will be - disabled, such as linkgit:git-add[1] or linkgit:git-merge[1]. -+ -This setting is automatically guessed by linkgit:git-clone[1] or -linkgit:git-init[1] when the repository was created. By default a -repository that ends in "/.git" is assumed to be not bare (bare = -false), while all other repositories are assumed to be bare (bare -= true). - -core.worktree:: - Set the path to the root of the working tree. - If `GIT_COMMON_DIR` environment variable is set, core.worktree - is ignored and not used for determining the root of working tree. - This can be overridden by the `GIT_WORK_TREE` environment - variable and the `--work-tree` command-line option. - The value can be an absolute path or relative to the path to - the .git directory, which is either specified by --git-dir - or GIT_DIR, or automatically discovered. - If --git-dir or GIT_DIR is specified but none of - --work-tree, GIT_WORK_TREE and core.worktree is specified, - the current working directory is regarded as the top level - of your working tree. -+ -Note that this variable is honored even when set in a configuration -file in a ".git" subdirectory of a directory and its value differs -from the latter directory (e.g. "/path/to/.git/config" has -core.worktree set to "/different/path"), which is most likely a -misconfiguration. Running Git commands in the "/path/to" directory will -still use "/different/path" as the root of the work tree and can cause -confusion unless you know what you are doing (e.g. you are creating a -read-only snapshot of the same index to a location different from the -repository's usual working tree). - -core.logAllRefUpdates:: - Enable the reflog. Updates to a ref <ref> is logged to the file - "`$GIT_DIR/logs/<ref>`", by appending the new and old - SHA-1, the date/time and the reason of the update, but - only when the file exists. If this configuration - variable is set to `true`, missing "`$GIT_DIR/logs/<ref>`" - file is automatically created for branch heads (i.e. under - `refs/heads/`), remote refs (i.e. under `refs/remotes/`), - note refs (i.e. under `refs/notes/`), and the symbolic ref `HEAD`. - If it is set to `always`, then a missing reflog is automatically - created for any ref under `refs/`. -+ -This information can be used to determine what commit -was the tip of a branch "2 days ago". -+ -This value is true by default in a repository that has -a working directory associated with it, and false by -default in a bare repository. - -core.repositoryFormatVersion:: - Internal variable identifying the repository format and layout - version. - -core.sharedRepository:: - When 'group' (or 'true'), the repository is made shareable between - several users in a group (making sure all the files and objects are - group-writable). When 'all' (or 'world' or 'everybody'), the - repository will be readable by all users, additionally to being - group-shareable. When 'umask' (or 'false'), Git will use permissions - reported by umask(2). When '0xxx', where '0xxx' is an octal number, - files in the repository will have this mode value. '0xxx' will override - user's umask value (whereas the other options will only override - requested parts of the user's umask value). Examples: '0660' will make - the repo read/write-able for the owner and group, but inaccessible to - others (equivalent to 'group' unless umask is e.g. '0022'). '0640' is a - repository that is group-readable but not group-writable. - See linkgit:git-init[1]. False by default. - -core.warnAmbiguousRefs:: - If true, Git will warn you if the ref name you passed it is ambiguous - and might match multiple refs in the repository. True by default. - -core.compression:: - An integer -1..9, indicating a default compression level. - -1 is the zlib default. 0 means no compression, - and 1..9 are various speed/size tradeoffs, 9 being slowest. - If set, this provides a default to other compression variables, - such as `core.looseCompression` and `pack.compression`. - -core.looseCompression:: - An integer -1..9, indicating the compression level for objects that - are not in a pack file. -1 is the zlib default. 0 means no - compression, and 1..9 are various speed/size tradeoffs, 9 being - slowest. If not set, defaults to core.compression. If that is - not set, defaults to 1 (best speed). - -core.packedGitWindowSize:: - Number of bytes of a pack file to map into memory in a - single mapping operation. Larger window sizes may allow - your system to process a smaller number of large pack files - more quickly. Smaller window sizes will negatively affect - performance due to increased calls to the operating system's - memory manager, but may improve performance when accessing - a large number of large pack files. -+ -Default is 1 MiB if NO_MMAP was set at compile time, otherwise 32 -MiB on 32 bit platforms and 1 GiB on 64 bit platforms. This should -be reasonable for all users/operating systems. You probably do -not need to adjust this value. -+ -Common unit suffixes of 'k', 'm', or 'g' are supported. +include::config/core.txt[] -core.packedGitLimit:: - Maximum number of bytes to map simultaneously into memory - from pack files. If Git needs to access more than this many - bytes at once to complete an operation it will unmap existing - regions to reclaim virtual address space within the process. -+ -Default is 256 MiB on 32 bit platforms and 32 TiB (effectively -unlimited) on 64 bit platforms. -This should be reasonable for all users/operating systems, except on -the largest projects. You probably do not need to adjust this value. -+ -Common unit suffixes of 'k', 'm', or 'g' are supported. - -core.deltaBaseCacheLimit:: - Maximum number of bytes to reserve for caching base objects - that may be referenced by multiple deltified objects. By storing the - entire decompressed base objects in a cache Git is able - to avoid unpacking and decompressing frequently used base - objects multiple times. -+ -Default is 96 MiB on all platforms. This should be reasonable -for all users/operating systems, except on the largest projects. -You probably do not need to adjust this value. -+ -Common unit suffixes of 'k', 'm', or 'g' are supported. - -core.bigFileThreshold:: - Files larger than this size are stored deflated, without - attempting delta compression. Storing large files without - delta compression avoids excessive memory usage, at the - slight expense of increased disk usage. Additionally files - larger than this size are always treated as binary. -+ -Default is 512 MiB on all platforms. This should be reasonable -for most projects as source code and other text files can still -be delta compressed, but larger binary media files won't be. -+ -Common unit suffixes of 'k', 'm', or 'g' are supported. - -core.excludesFile:: - Specifies the pathname to the file that contains patterns to - describe paths that are not meant to be tracked, in addition - to '.gitignore' (per-directory) and '.git/info/exclude'. - Defaults to `$XDG_CONFIG_HOME/git/ignore`. - If `$XDG_CONFIG_HOME` is either not set or empty, `$HOME/.config/git/ignore` - is used instead. See linkgit:gitignore[5]. - -core.askPass:: - Some commands (e.g. svn and http interfaces) that interactively - ask for a password can be told to use an external program given - via the value of this variable. Can be overridden by the `GIT_ASKPASS` - environment variable. If not set, fall back to the value of the - `SSH_ASKPASS` environment variable or, failing that, a simple password - prompt. The external program shall be given a suitable prompt as - command-line argument and write the password on its STDOUT. - -core.attributesFile:: - In addition to '.gitattributes' (per-directory) and - '.git/info/attributes', Git looks into this file for attributes - (see linkgit:gitattributes[5]). Path expansions are made the same - way as for `core.excludesFile`. Its default value is - `$XDG_CONFIG_HOME/git/attributes`. If `$XDG_CONFIG_HOME` is either not - set or empty, `$HOME/.config/git/attributes` is used instead. - -core.hooksPath:: - By default Git will look for your hooks in the - '$GIT_DIR/hooks' directory. Set this to different path, - e.g. '/etc/git/hooks', and Git will try to find your hooks in - that directory, e.g. '/etc/git/hooks/pre-receive' instead of - in '$GIT_DIR/hooks/pre-receive'. -+ -The path can be either absolute or relative. A relative path is -taken as relative to the directory where the hooks are run (see -the "DESCRIPTION" section of linkgit:githooks[5]). -+ -This configuration variable is useful in cases where you'd like to -centrally configure your Git hooks instead of configuring them on a -per-repository basis, or as a more flexible and centralized -alternative to having an `init.templateDir` where you've changed -default hooks. - -core.editor:: - Commands such as `commit` and `tag` that let you edit - messages by launching an editor use the value of this - variable when it is set, and the environment variable - `GIT_EDITOR` is not set. See linkgit:git-var[1]. - -core.commentChar:: - Commands such as `commit` and `tag` that let you edit - messages consider a line that begins with this character - commented, and removes them after the editor returns - (default '#'). -+ -If set to "auto", `git-commit` would select a character that is not -the beginning character of any line in existing commit messages. - -core.filesRefLockTimeout:: - The length of time, in milliseconds, to retry when trying to - lock an individual reference. Value 0 means not to retry at - all; -1 means to try indefinitely. Default is 100 (i.e., - retry for 100ms). - -core.packedRefsTimeout:: - The length of time, in milliseconds, to retry when trying to - lock the `packed-refs` file. Value 0 means not to retry at - all; -1 means to try indefinitely. Default is 1000 (i.e., - retry for 1 second). - -sequence.editor:: - Text editor used by `git rebase -i` for editing the rebase instruction file. - The value is meant to be interpreted by the shell when it is used. - It can be overridden by the `GIT_SEQUENCE_EDITOR` environment variable. - When not configured the default commit message editor is used instead. - -core.pager:: - Text viewer for use by Git commands (e.g., 'less'). The value - is meant to be interpreted by the shell. The order of preference - is the `$GIT_PAGER` environment variable, then `core.pager` - configuration, then `$PAGER`, and then the default chosen at - compile time (usually 'less'). -+ -When the `LESS` environment variable is unset, Git sets it to `FRX` -(if `LESS` environment variable is set, Git does not change it at -all). If you want to selectively override Git's default setting -for `LESS`, you can set `core.pager` to e.g. `less -S`. This will -be passed to the shell by Git, which will translate the final -command to `LESS=FRX less -S`. The environment does not set the -`S` option but the command line does, instructing less to truncate -long lines. Similarly, setting `core.pager` to `less -+F` will -deactivate the `F` option specified by the environment from the -command-line, deactivating the "quit if one screen" behavior of -`less`. One can specifically activate some flags for particular -commands: for example, setting `pager.blame` to `less -S` enables -line truncation only for `git blame`. -+ -Likewise, when the `LV` environment variable is unset, Git sets it -to `-c`. You can override this setting by exporting `LV` with -another value or setting `core.pager` to `lv +c`. - -core.whitespace:: - A comma separated list of common whitespace problems to - notice. 'git diff' will use `color.diff.whitespace` to - highlight them, and 'git apply --whitespace=error' will - consider them as errors. You can prefix `-` to disable - any of them (e.g. `-trailing-space`): -+ -* `blank-at-eol` treats trailing whitespaces at the end of the line - as an error (enabled by default). -* `space-before-tab` treats a space character that appears immediately - before a tab character in the initial indent part of the line as an - error (enabled by default). -* `indent-with-non-tab` treats a line that is indented with space - characters instead of the equivalent tabs as an error (not enabled by - default). -* `tab-in-indent` treats a tab character in the initial indent part of - the line as an error (not enabled by default). -* `blank-at-eof` treats blank lines added at the end of file as an error - (enabled by default). -* `trailing-space` is a short-hand to cover both `blank-at-eol` and - `blank-at-eof`. -* `cr-at-eol` treats a carriage-return at the end of line as - part of the line terminator, i.e. with it, `trailing-space` - does not trigger if the character before such a carriage-return - is not a whitespace (not enabled by default). -* `tabwidth=<n>` tells how many character positions a tab occupies; this - is relevant for `indent-with-non-tab` and when Git fixes `tab-in-indent` - errors. The default tab width is 8. Allowed values are 1 to 63. - -core.fsyncObjectFiles:: - This boolean will enable 'fsync()' when writing object files. -+ -This is a total waste of time and effort on a filesystem that orders -data writes properly, but can be useful for filesystems that do not use -journalling (traditional UNIX filesystems) or that only journal metadata -and not file contents (OS X's HFS+, or Linux ext3 with "data=writeback"). +include::config/add.txt[] -core.preloadIndex:: - Enable parallel index preload for operations like 'git diff' -+ -This can speed up operations like 'git diff' and 'git status' especially -on filesystems like NFS that have weak caching semantics and thus -relatively high IO latencies. When enabled, Git will do the -index comparison to the filesystem data in parallel, allowing -overlapping IO's. Defaults to true. - -core.createObject:: - You can set this to 'link', in which case a hardlink followed by - a delete of the source are used to make sure that object creation - will not overwrite existing objects. -+ -On some file system/operating system combinations, this is unreliable. -Set this config setting to 'rename' there; However, This will remove the -check that makes sure that existing object files will not get overwritten. - -core.notesRef:: - When showing commit messages, also show notes which are stored in - the given ref. The ref must be fully qualified. If the given - ref does not exist, it is not an error but means that no - notes should be printed. -+ -This setting defaults to "refs/notes/commits", and it can be overridden by -the `GIT_NOTES_REF` environment variable. See linkgit:git-notes[1]. - -core.commitGraph:: - If true, then git will read the commit-graph file (if it exists) - to parse the graph structure of commits. Defaults to false. See - linkgit:git-commit-graph[1] for more information. - -core.useReplaceRefs:: - If set to `false`, behave as if the `--no-replace-objects` - option was given on the command line. See linkgit:git[1] and - linkgit:git-replace[1] for more information. - -core.sparseCheckout:: - Enable "sparse checkout" feature. See section "Sparse checkout" in - linkgit:git-read-tree[1] for more information. - -core.abbrev:: - Set the length object names are abbreviated to. If - unspecified or set to "auto", an appropriate value is - computed based on the approximate number of packed objects - in your repository, which hopefully is enough for - abbreviated object names to stay unique for some time. - The minimum length is 4. - -add.ignoreErrors:: -add.ignore-errors (deprecated):: - Tells 'git add' to continue adding files when some files cannot be - added due to indexing errors. Equivalent to the `--ignore-errors` - option of linkgit:git-add[1]. `add.ignore-errors` is deprecated, - as it does not follow the usual naming convention for configuration - variables. - -alias.*:: - Command aliases for the linkgit:git[1] command wrapper - e.g. - after defining "alias.last = cat-file commit HEAD", the invocation - "git last" is equivalent to "git cat-file commit HEAD". To avoid - confusion and troubles with script usage, aliases that - hide existing Git commands are ignored. Arguments are split by - spaces, the usual shell quoting and escaping is supported. - A quote pair or a backslash can be used to quote them. -+ -If the alias expansion is prefixed with an exclamation point, -it will be treated as a shell command. For example, defining -"alias.new = !gitk --all --not ORIG_HEAD", the invocation -"git new" is equivalent to running the shell command -"gitk --all --not ORIG_HEAD". Note that shell commands will be -executed from the top-level directory of a repository, which may -not necessarily be the current directory. -`GIT_PREFIX` is set as returned by running 'git rev-parse --show-prefix' -from the original current directory. See linkgit:git-rev-parse[1]. - -am.keepcr:: - If true, git-am will call git-mailsplit for patches in mbox format - with parameter `--keep-cr`. In this case git-mailsplit will - not remove `\r` from lines ending with `\r\n`. Can be overridden - by giving `--no-keep-cr` from the command line. - See linkgit:git-am[1], linkgit:git-mailsplit[1]. - -am.threeWay:: - By default, `git am` will fail if the patch does not apply cleanly. When - set to true, this setting tells `git am` to fall back on 3-way merge if - the patch records the identity of blobs it is supposed to apply to and - we have those blobs available locally (equivalent to giving the `--3way` - option from the command line). Defaults to `false`. - See linkgit:git-am[1]. - -apply.ignoreWhitespace:: - When set to 'change', tells 'git apply' to ignore changes in - whitespace, in the same way as the `--ignore-space-change` - option. - When set to one of: no, none, never, false tells 'git apply' to - respect all whitespace differences. - See linkgit:git-apply[1]. - -apply.whitespace:: - Tells 'git apply' how to handle whitespaces, in the same way - as the `--whitespace` option. See linkgit:git-apply[1]. - -blame.blankBoundary:: - Show blank commit object name for boundary commits in - linkgit:git-blame[1]. This option defaults to false. - -blame.coloring:: - This determines the coloring scheme to be applied to blame - output. It can be 'repeatedLines', 'highlightRecent', - or 'none' which is the default. - -blame.date:: - Specifies the format used to output dates in linkgit:git-blame[1]. - If unset the iso format is used. For supported values, - see the discussion of the `--date` option at linkgit:git-log[1]. - -blame.showEmail:: - Show the author email instead of author name in linkgit:git-blame[1]. - This option defaults to false. - -blame.showRoot:: - Do not treat root commits as boundaries in linkgit:git-blame[1]. - This option defaults to false. - -branch.autoSetupMerge:: - Tells 'git branch' and 'git checkout' to set up new branches - so that linkgit:git-pull[1] will appropriately merge from the - starting point branch. Note that even if this option is not set, - this behavior can be chosen per-branch using the `--track` - and `--no-track` options. The valid settings are: `false` -- no - automatic setup is done; `true` -- automatic setup is done when the - starting point is a remote-tracking branch; `always` -- - automatic setup is done when the starting point is either a - local branch or remote-tracking - branch. This option defaults to true. - -branch.autoSetupRebase:: - When a new branch is created with 'git branch' or 'git checkout' - that tracks another branch, this variable tells Git to set - up pull to rebase instead of merge (see "branch.<name>.rebase"). - When `never`, rebase is never automatically set to true. - When `local`, rebase is set to true for tracked branches of - other local branches. - When `remote`, rebase is set to true for tracked branches of - remote-tracking branches. - When `always`, rebase will be set to true for all tracking - branches. - See "branch.autoSetupMerge" for details on how to set up a - branch to track another branch. - This option defaults to never. - -branch.sort:: - This variable controls the sort ordering of branches when displayed by - linkgit:git-branch[1]. Without the "--sort=<value>" option provided, the - value of this variable will be used as the default. - See linkgit:git-for-each-ref[1] field names for valid values. - -branch.<name>.remote:: - When on branch <name>, it tells 'git fetch' and 'git push' - which remote to fetch from/push to. The remote to push to - may be overridden with `remote.pushDefault` (for all branches). - The remote to push to, for the current branch, may be further - overridden by `branch.<name>.pushRemote`. If no remote is - configured, or if you are not on any branch, it defaults to - `origin` for fetching and `remote.pushDefault` for pushing. - Additionally, `.` (a period) is the current local repository - (a dot-repository), see `branch.<name>.merge`'s final note below. - -branch.<name>.pushRemote:: - When on branch <name>, it overrides `branch.<name>.remote` for - pushing. It also overrides `remote.pushDefault` for pushing - from branch <name>. When you pull from one place (e.g. your - upstream) and push to another place (e.g. your own publishing - repository), you would want to set `remote.pushDefault` to - specify the remote to push to for all branches, and use this - option to override it for a specific branch. - -branch.<name>.merge:: - Defines, together with branch.<name>.remote, the upstream branch - for the given branch. It tells 'git fetch'/'git pull'/'git rebase' which - branch to merge and can also affect 'git push' (see push.default). - When in branch <name>, it tells 'git fetch' the default - refspec to be marked for merging in FETCH_HEAD. The value is - handled like the remote part of a refspec, and must match a - ref which is fetched from the remote given by - "branch.<name>.remote". - The merge information is used by 'git pull' (which at first calls - 'git fetch') to lookup the default branch for merging. Without - this option, 'git pull' defaults to merge the first refspec fetched. - Specify multiple values to get an octopus merge. - If you wish to setup 'git pull' so that it merges into <name> from - another branch in the local repository, you can point - branch.<name>.merge to the desired branch, and use the relative path - setting `.` (a period) for branch.<name>.remote. - -branch.<name>.mergeOptions:: - Sets default options for merging into branch <name>. The syntax and - supported options are the same as those of linkgit:git-merge[1], but - option values containing whitespace characters are currently not - supported. - -branch.<name>.rebase:: - When true, rebase the branch <name> on top of the fetched branch, - instead of merging the default branch from the default remote when - "git pull" is run. See "pull.rebase" for doing this in a non - branch-specific manner. -+ -When `merges`, pass the `--rebase-merges` option to 'git rebase' -so that the local merge commits are included in the rebase (see -linkgit:git-rebase[1] for details). -+ -When preserve, also pass `--preserve-merges` along to 'git rebase' -so that locally committed merge commits will not be flattened -by running 'git pull'. -+ -When the value is `interactive`, the rebase is run in interactive mode. -+ -*NOTE*: this is a possibly dangerous operation; do *not* use -it unless you understand the implications (see linkgit:git-rebase[1] -for details). - -branch.<name>.description:: - Branch description, can be edited with - `git branch --edit-description`. Branch description is - automatically added in the format-patch cover letter or - request-pull summary. - -browser.<tool>.cmd:: - Specify the command to invoke the specified browser. The - specified command is evaluated in shell with the URLs passed - as arguments. (See linkgit:git-web{litdd}browse[1].) - -browser.<tool>.path:: - Override the path for the given tool that may be used to - browse HTML help (see `-w` option in linkgit:git-help[1]) or a - working repository in gitweb (see linkgit:git-instaweb[1]). - -checkout.defaultRemote:: - When you run 'git checkout <something>' and only have one - remote, it may implicitly fall back on checking out and - tracking e.g. 'origin/<something>'. This stops working as soon - as you have more than one remote with a '<something>' - reference. This setting allows for setting the name of a - preferred remote that should always win when it comes to - disambiguation. The typical use-case is to set this to - `origin`. -+ -Currently this is used by linkgit:git-checkout[1] when 'git checkout -<something>' will checkout the '<something>' branch on another remote, -and by linkgit:git-worktree[1] when 'git worktree add' refers to a -remote branch. This setting might be used for other checkout-like -commands or functionality in the future. - -clean.requireForce:: - A boolean to make git-clean do nothing unless given -f, - -i or -n. Defaults to true. - -color.advice:: - A boolean to enable/disable color in hints (e.g. when a push - failed, see `advice.*` for a list). May be set to `always`, - `false` (or `never`) or `auto` (or `true`), in which case colors - are used only when the error output goes to a terminal. If - unset, then the value of `color.ui` is used (`auto` by default). - -color.advice.hint:: - Use customized color for hints. - -color.blame.highlightRecent:: - This can be used to color the metadata of a blame line depending - on age of the line. -+ -This setting should be set to a comma-separated list of color and date settings, -starting and ending with a color, the dates should be set from oldest to newest. -The metadata will be colored given the colors if the the line was introduced -before the given timestamp, overwriting older timestamped colors. -+ -Instead of an absolute timestamp relative timestamps work as well, e.g. -2.weeks.ago is valid to address anything older than 2 weeks. -+ -It defaults to 'blue,12 month ago,white,1 month ago,red', which colors -everything older than one year blue, recent changes between one month and -one year old are kept white, and lines introduced within the last month are -colored red. - -color.blame.repeatedLines:: - Use the customized color for the part of git-blame output that - is repeated meta information per line (such as commit id, - author name, date and timezone). Defaults to cyan. - -color.branch:: - A boolean to enable/disable color in the output of - linkgit:git-branch[1]. May be set to `always`, - `false` (or `never`) or `auto` (or `true`), in which case colors are used - only when the output is to a terminal. If unset, then the - value of `color.ui` is used (`auto` by default). - -color.branch.<slot>:: - Use customized color for branch coloration. `<slot>` is one of - `current` (the current branch), `local` (a local branch), - `remote` (a remote-tracking branch in refs/remotes/), - `upstream` (upstream tracking branch), `plain` (other - refs). - -color.diff:: - Whether to use ANSI escape sequences to add color to patches. - If this is set to `always`, linkgit:git-diff[1], - linkgit:git-log[1], and linkgit:git-show[1] will use color - for all patches. If it is set to `true` or `auto`, those - commands will only use color when output is to the terminal. - If unset, then the value of `color.ui` is used (`auto` by - default). -+ -This does not affect linkgit:git-format-patch[1] or the -'git-diff-{asterisk}' plumbing commands. Can be overridden on the -command line with the `--color[=<when>]` option. - -color.diff.<slot>:: - Use customized color for diff colorization. `<slot>` specifies - which part of the patch to use the specified color, and is one - of `context` (context text - `plain` is a historical synonym), - `meta` (metainformation), `frag` - (hunk header), 'func' (function in hunk header), `old` (removed lines), - `new` (added lines), `commit` (commit headers), `whitespace` - (highlighting whitespace errors), `oldMoved` (deleted lines), - `newMoved` (added lines), `oldMovedDimmed`, `oldMovedAlternative`, - `oldMovedAlternativeDimmed`, `newMovedDimmed`, `newMovedAlternative` - `newMovedAlternativeDimmed` (See the '<mode>' - setting of '--color-moved' in linkgit:git-diff[1] for details), - `contextDimmed`, `oldDimmed`, `newDimmed`, `contextBold`, - `oldBold`, and `newBold` (see linkgit:git-range-diff[1] for details). - -color.decorate.<slot>:: - Use customized color for 'git log --decorate' output. `<slot>` is one - of `branch`, `remoteBranch`, `tag`, `stash` or `HEAD` for local - branches, remote-tracking branches, tags, stash and HEAD, respectively - and `grafted` for grafted commits. - -color.grep:: - When set to `always`, always highlight matches. When `false` (or - `never`), never. When set to `true` or `auto`, use color only - when the output is written to the terminal. If unset, then the - value of `color.ui` is used (`auto` by default). - -color.grep.<slot>:: - Use customized color for grep colorization. `<slot>` specifies which - part of the line to use the specified color, and is one of -+ --- -`context`;; - non-matching text in context lines (when using `-A`, `-B`, or `-C`) -`filename`;; - filename prefix (when not using `-h`) -`function`;; - function name lines (when using `-p`) -`lineNumber`;; - line number prefix (when using `-n`) -`column`;; - column number prefix (when using `--column`) -`match`;; - matching text (same as setting `matchContext` and `matchSelected`) -`matchContext`;; - matching text in context lines -`matchSelected`;; - matching text in selected lines -`selected`;; - non-matching text in selected lines -`separator`;; - separators between fields on a line (`:`, `-`, and `=`) - and between hunks (`--`) --- - -color.interactive:: - When set to `always`, always use colors for interactive prompts - and displays (such as those used by "git-add --interactive" and - "git-clean --interactive"). When false (or `never`), never. - When set to `true` or `auto`, use colors only when the output is - to the terminal. If unset, then the value of `color.ui` is - used (`auto` by default). - -color.interactive.<slot>:: - Use customized color for 'git add --interactive' and 'git clean - --interactive' output. `<slot>` may be `prompt`, `header`, `help` - or `error`, for four distinct types of normal output from - interactive commands. - -color.pager:: - A boolean to enable/disable colored output when the pager is in - use (default is true). - -color.push:: - A boolean to enable/disable color in push errors. May be set to - `always`, `false` (or `never`) or `auto` (or `true`), in which - case colors are used only when the error output goes to a terminal. - If unset, then the value of `color.ui` is used (`auto` by default). - -color.push.error:: - Use customized color for push errors. - -color.remote:: - If set, keywords at the start of the line are highlighted. The - keywords are "error", "warning", "hint" and "success", and are - matched case-insensitively. May be set to `always`, `false` (or - `never`) or `auto` (or `true`). If unset, then the value of - `color.ui` is used (`auto` by default). - -color.remote.<slot>:: - Use customized color for each remote keyword. `<slot>` may be - `hint`, `warning`, `success` or `error` which match the - corresponding keyword. - -color.showBranch:: - A boolean to enable/disable color in the output of - linkgit:git-show-branch[1]. May be set to `always`, - `false` (or `never`) or `auto` (or `true`), in which case colors are used - only when the output is to a terminal. If unset, then the - value of `color.ui` is used (`auto` by default). - -color.status:: - A boolean to enable/disable color in the output of - linkgit:git-status[1]. May be set to `always`, - `false` (or `never`) or `auto` (or `true`), in which case colors are used - only when the output is to a terminal. If unset, then the - value of `color.ui` is used (`auto` by default). - -color.status.<slot>:: - Use customized color for status colorization. `<slot>` is - one of `header` (the header text of the status message), - `added` or `updated` (files which are added but not committed), - `changed` (files which are changed but not added in the index), - `untracked` (files which are not tracked by Git), - `branch` (the current branch), - `nobranch` (the color the 'no branch' warning is shown in, defaulting - to red), - `localBranch` or `remoteBranch` (the local and remote branch names, - respectively, when branch and tracking information is displayed in the - status short-format), or - `unmerged` (files which have unmerged changes). - -color.transport:: - A boolean to enable/disable color when pushes are rejected. May be - set to `always`, `false` (or `never`) or `auto` (or `true`), in which - case colors are used only when the error output goes to a terminal. - If unset, then the value of `color.ui` is used (`auto` by default). - -color.transport.rejected:: - Use customized color when a push was rejected. - -color.ui:: - This variable determines the default value for variables such - as `color.diff` and `color.grep` that control the use of color - per command family. Its scope will expand as more commands learn - configuration to set a default for the `--color` option. Set it - to `false` or `never` if you prefer Git commands not to use - color unless enabled explicitly with some other configuration - or the `--color` option. Set it to `always` if you want all - output not intended for machine consumption to use color, to - `true` or `auto` (this is the default since Git 1.8.4) if you - want such output to use color when written to the terminal. - -column.ui:: - Specify whether supported commands should output in columns. - This variable consists of a list of tokens separated by spaces - or commas: -+ -These options control when the feature should be enabled -(defaults to 'never'): -+ --- -`always`;; - always show in columns -`never`;; - never show in columns -`auto`;; - show in columns if the output is to the terminal --- -+ -These options control layout (defaults to 'column'). Setting any -of these implies 'always' if none of 'always', 'never', or 'auto' are -specified. -+ --- -`column`;; - fill columns before rows -`row`;; - fill rows before columns -`plain`;; - show in one column --- -+ -Finally, these options can be combined with a layout option (defaults -to 'nodense'): -+ --- -`dense`;; - make unequal size columns to utilize more space -`nodense`;; - make equal size columns --- - -column.branch:: - Specify whether to output branch listing in `git branch` in columns. - See `column.ui` for details. - -column.clean:: - Specify the layout when list items in `git clean -i`, which always - shows files and directories in columns. See `column.ui` for details. - -column.status:: - Specify whether to output untracked files in `git status` in columns. - See `column.ui` for details. - -column.tag:: - Specify whether to output tag listing in `git tag` in columns. - See `column.ui` for details. - -commit.cleanup:: - This setting overrides the default of the `--cleanup` option in - `git commit`. See linkgit:git-commit[1] for details. Changing the - default can be useful when you always want to keep lines that begin - with comment character `#` in your log message, in which case you - would do `git config commit.cleanup whitespace` (note that you will - have to remove the help lines that begin with `#` in the commit log - template yourself, if you do this). - -commit.gpgSign:: - - A boolean to specify whether all commits should be GPG signed. - Use of this option when doing operations such as rebase can - result in a large number of commits being signed. It may be - convenient to use an agent to avoid typing your GPG passphrase - several times. - -commit.status:: - A boolean to enable/disable inclusion of status information in the - commit message template when using an editor to prepare the commit - message. Defaults to true. - -commit.template:: - Specify the pathname of a file to use as the template for - new commit messages. - -commit.verbose:: - A boolean or int to specify the level of verbose with `git commit`. - See linkgit:git-commit[1]. - -credential.helper:: - Specify an external helper to be called when a username or - password credential is needed; the helper may consult external - storage to avoid prompting the user for the credentials. Note - that multiple helpers may be defined. See linkgit:gitcredentials[7] - for details. - -credential.useHttpPath:: - When acquiring credentials, consider the "path" component of an http - or https URL to be important. Defaults to false. See - linkgit:gitcredentials[7] for more information. - -credential.username:: - If no username is set for a network authentication, use this username - by default. See credential.<context>.* below, and - linkgit:gitcredentials[7]. - -credential.<url>.*:: - Any of the credential.* options above can be applied selectively to - some credentials. For example "credential.https://example.com.username" - would set the default username only for https connections to - example.com. See linkgit:gitcredentials[7] for details on how URLs are - matched. - -credentialCache.ignoreSIGHUP:: - Tell git-credential-cache--daemon to ignore SIGHUP, instead of quitting. - -completion.commands:: - This is only used by git-completion.bash to add or remove - commands from the list of completed commands. Normally only - porcelain commands and a few select others are completed. You - can add more commands, separated by space, in this - variable. Prefixing the command with '-' will remove it from - the existing list. - -include::diff-config.txt[] - -difftool.<tool>.path:: - Override the path for the given tool. This is useful in case - your tool is not in the PATH. - -difftool.<tool>.cmd:: - Specify the command to invoke the specified diff tool. - The specified command is evaluated in shell with the following - variables available: 'LOCAL' is set to the name of the temporary - file containing the contents of the diff pre-image and 'REMOTE' - is set to the name of the temporary file containing the contents - of the diff post-image. - -difftool.prompt:: - Prompt before each invocation of the diff tool. - -fastimport.unpackLimit:: - If the number of objects imported by linkgit:git-fast-import[1] - is below this limit, then the objects will be unpacked into - loose object files. However if the number of imported objects - equals or exceeds this limit then the pack will be stored as a - pack. Storing the pack from a fast-import can make the import - operation complete faster, especially on slow filesystems. If - not set, the value of `transfer.unpackLimit` is used instead. - -fetch.recurseSubmodules:: - This option can be either set to a boolean value or to 'on-demand'. - Setting it to a boolean changes the behavior of fetch and pull to - unconditionally recurse into submodules when set to true or to not - recurse at all when set to false. When set to 'on-demand' (the default - value), fetch and pull will only recurse into a populated submodule - when its superproject retrieves a commit that updates the submodule's - reference. - -fetch.fsckObjects:: - If it is set to true, git-fetch-pack will check all fetched - objects. See `transfer.fsckObjects` for what's - checked. Defaults to false. If not set, the value of - `transfer.fsckObjects` is used instead. - -fetch.fsck.<msg-id>:: - Acts like `fsck.<msg-id>`, but is used by - linkgit:git-fetch-pack[1] instead of linkgit:git-fsck[1]. See - the `fsck.<msg-id>` documentation for details. - -fetch.fsck.skipList:: - Acts like `fsck.skipList`, but is used by - linkgit:git-fetch-pack[1] instead of linkgit:git-fsck[1]. See - the `fsck.skipList` documentation for details. - -fetch.unpackLimit:: - If the number of objects fetched over the Git native - transfer is below this - limit, then the objects will be unpacked into loose object - files. However if the number of received objects equals or - exceeds this limit then the received pack will be stored as - a pack, after adding any missing delta bases. Storing the - pack from a push can make the push operation complete faster, - especially on slow filesystems. If not set, the value of - `transfer.unpackLimit` is used instead. - -fetch.prune:: - If true, fetch will automatically behave as if the `--prune` - option was given on the command line. See also `remote.<name>.prune` - and the PRUNING section of linkgit:git-fetch[1]. - -fetch.pruneTags:: - If true, fetch will automatically behave as if the - `refs/tags/*:refs/tags/*` refspec was provided when pruning, - if not set already. This allows for setting both this option - and `fetch.prune` to maintain a 1=1 mapping to upstream - refs. See also `remote.<name>.pruneTags` and the PRUNING - section of linkgit:git-fetch[1]. - -fetch.output:: - Control how ref update status is printed. Valid values are - `full` and `compact`. Default value is `full`. See section - OUTPUT in linkgit:git-fetch[1] for detail. - -fetch.negotiationAlgorithm:: - Control how information about the commits in the local repository is - sent when negotiating the contents of the packfile to be sent by the - server. Set to "skipping" to use an algorithm that skips commits in an - effort to converge faster, but may result in a larger-than-necessary - packfile; The default is "default" which instructs Git to use the default algorithm - that never skips commits (unless the server has acknowledged it or one - of its descendants). - Unknown values will cause 'git fetch' to error out. -+ -See also the `--negotiation-tip` option for linkgit:git-fetch[1]. - -format.attach:: - Enable multipart/mixed attachments as the default for - 'format-patch'. The value can also be a double quoted string - which will enable attachments as the default and set the - value as the boundary. See the --attach option in - linkgit:git-format-patch[1]. - -format.from:: - Provides the default value for the `--from` option to format-patch. - Accepts a boolean value, or a name and email address. If false, - format-patch defaults to `--no-from`, using commit authors directly in - the "From:" field of patch mails. If true, format-patch defaults to - `--from`, using your committer identity in the "From:" field of patch - mails and including a "From:" field in the body of the patch mail if - different. If set to a non-boolean value, format-patch uses that - value instead of your committer identity. Defaults to false. - -format.numbered:: - A boolean which can enable or disable sequence numbers in patch - subjects. It defaults to "auto" which enables it only if there - is more than one patch. It can be enabled or disabled for all - messages by setting it to "true" or "false". See --numbered - option in linkgit:git-format-patch[1]. - -format.headers:: - Additional email headers to include in a patch to be submitted - by mail. See linkgit:git-format-patch[1]. - -format.to:: -format.cc:: - Additional recipients to include in a patch to be submitted - by mail. See the --to and --cc options in - linkgit:git-format-patch[1]. - -format.subjectPrefix:: - The default for format-patch is to output files with the '[PATCH]' - subject prefix. Use this variable to change that prefix. - -format.signature:: - The default for format-patch is to output a signature containing - the Git version number. Use this variable to change that default. - Set this variable to the empty string ("") to suppress - signature generation. - -format.signatureFile:: - Works just like format.signature except the contents of the - file specified by this variable will be used as the signature. - -format.suffix:: - The default for format-patch is to output files with the suffix - `.patch`. Use this variable to change that suffix (make sure to - include the dot if you want it). - -format.pretty:: - The default pretty format for log/show/whatchanged command, - See linkgit:git-log[1], linkgit:git-show[1], - linkgit:git-whatchanged[1]. - -format.thread:: - The default threading style for 'git format-patch'. Can be - a boolean value, or `shallow` or `deep`. `shallow` threading - makes every mail a reply to the head of the series, - where the head is chosen from the cover letter, the - `--in-reply-to`, and the first patch mail, in this order. - `deep` threading makes every mail a reply to the previous one. - A true boolean value is the same as `shallow`, and a false - value disables threading. - -format.signOff:: - A boolean value which lets you enable the `-s/--signoff` option of - format-patch by default. *Note:* Adding the Signed-off-by: line to a - patch should be a conscious act and means that you certify you have - the rights to submit this work under the same open source license. - Please see the 'SubmittingPatches' document for further discussion. - -format.coverLetter:: - A boolean that controls whether to generate a cover-letter when - format-patch is invoked, but in addition can be set to "auto", to - generate a cover-letter only when there's more than one patch. - -format.outputDirectory:: - Set a custom directory to store the resulting files instead of the - current working directory. - -format.useAutoBase:: - A boolean value which lets you enable the `--base=auto` option of - format-patch by default. - -filter.<driver>.clean:: - The command which is used to convert the content of a worktree - file to a blob upon checkin. See linkgit:gitattributes[5] for - details. - -filter.<driver>.smudge:: - The command which is used to convert the content of a blob - object to a worktree file upon checkout. See - linkgit:gitattributes[5] for details. - -fsck.<msg-id>:: - During fsck git may find issues with legacy data which - wouldn't be generated by current versions of git, and which - wouldn't be sent over the wire if `transfer.fsckObjects` was - set. This feature is intended to support working with legacy - repositories containing such data. -+ -Setting `fsck.<msg-id>` will be picked up by linkgit:git-fsck[1], but -to accept pushes of such data set `receive.fsck.<msg-id>` instead, or -to clone or fetch it set `fetch.fsck.<msg-id>`. -+ -The rest of the documentation discusses `fsck.*` for brevity, but the -same applies for the corresponding `receive.fsck.*` and -`fetch.<msg-id>.*`. variables. -+ -Unlike variables like `color.ui` and `core.editor` the -`receive.fsck.<msg-id>` and `fetch.fsck.<msg-id>` variables will not -fall back on the `fsck.<msg-id>` configuration if they aren't set. To -uniformly configure the same fsck settings in different circumstances -all three of them they must all set to the same values. -+ -When `fsck.<msg-id>` is set, errors can be switched to warnings and -vice versa by configuring the `fsck.<msg-id>` setting where the -`<msg-id>` is the fsck message ID and the value is one of `error`, -`warn` or `ignore`. For convenience, fsck prefixes the error/warning -with the message ID, e.g. "missingEmail: invalid author/committer line -- missing email" means that setting `fsck.missingEmail = ignore` will -hide that issue. -+ -In general, it is better to enumerate existing objects with problems -with `fsck.skipList`, instead of listing the kind of breakages these -problematic objects share to be ignored, as doing the latter will -allow new instances of the same breakages go unnoticed. -+ -Setting an unknown `fsck.<msg-id>` value will cause fsck to die, but -doing the same for `receive.fsck.<msg-id>` and `fetch.fsck.<msg-id>` -will only cause git to warn. - -fsck.skipList:: - The path to a sorted list of object names (i.e. one SHA-1 per - line) that are known to be broken in a non-fatal way and should - be ignored. This feature is useful when an established project - should be accepted despite early commits containing errors that - can be safely ignored such as invalid committer email addresses. - Note: corrupt objects cannot be skipped with this setting. -+ -Like `fsck.<msg-id>` this variable has corresponding -`receive.fsck.skipList` and `fetch.fsck.skipList` variants. -+ -Unlike variables like `color.ui` and `core.editor` the -`receive.fsck.skipList` and `fetch.fsck.skipList` variables will not -fall back on the `fsck.skipList` configuration if they aren't set. To -uniformly configure the same fsck settings in different circumstances -all three of them they must all set to the same values. - -gc.aggressiveDepth:: - The depth parameter used in the delta compression - algorithm used by 'git gc --aggressive'. This defaults - to 50. - -gc.aggressiveWindow:: - The window size parameter used in the delta compression - algorithm used by 'git gc --aggressive'. This defaults - to 250. - -gc.auto:: - When there are approximately more than this many loose - objects in the repository, `git gc --auto` will pack them. - Some Porcelain commands use this command to perform a - light-weight garbage collection from time to time. The - default value is 6700. Setting this to 0 disables it. - -gc.autoPackLimit:: - When there are more than this many packs that are not - marked with `*.keep` file in the repository, `git gc - --auto` consolidates them into one larger pack. The - default value is 50. Setting this to 0 disables it. - -gc.autoDetach:: - Make `git gc --auto` return immediately and run in background - if the system supports it. Default is true. - -gc.bigPackThreshold:: - If non-zero, all packs larger than this limit are kept when - `git gc` is run. This is very similar to `--keep-base-pack` - except that all packs that meet the threshold are kept, not - just the base pack. Defaults to zero. Common unit suffixes of - 'k', 'm', or 'g' are supported. -+ -Note that if the number of kept packs is more than gc.autoPackLimit, -this configuration variable is ignored, all packs except the base pack -will be repacked. After this the number of packs should go below -gc.autoPackLimit and gc.bigPackThreshold should be respected again. - -gc.writeCommitGraph:: - If true, then gc will rewrite the commit-graph file when - linkgit:git-gc[1] is run. When using linkgit:git-gc[1] - '--auto' the commit-graph will be updated if housekeeping is - required. Default is false. See linkgit:git-commit-graph[1] - for details. - -gc.logExpiry:: - If the file gc.log exists, then `git gc --auto` won't run - unless that file is more than 'gc.logExpiry' old. Default is - "1.day". See `gc.pruneExpire` for more ways to specify its - value. - -gc.packRefs:: - Running `git pack-refs` in a repository renders it - unclonable by Git versions prior to 1.5.1.2 over dumb - transports such as HTTP. This variable determines whether - 'git gc' runs `git pack-refs`. This can be set to `notbare` - to enable it within all non-bare repos or it can be set to a - boolean value. The default is `true`. - -gc.pruneExpire:: - When 'git gc' is run, it will call 'prune --expire 2.weeks.ago'. - Override the grace period with this config variable. The value - "now" may be used to disable this grace period and always prune - unreachable objects immediately, or "never" may be used to - suppress pruning. This feature helps prevent corruption when - 'git gc' runs concurrently with another process writing to the - repository; see the "NOTES" section of linkgit:git-gc[1]. - -gc.worktreePruneExpire:: - When 'git gc' is run, it calls - 'git worktree prune --expire 3.months.ago'. - This config variable can be used to set a different grace - period. The value "now" may be used to disable the grace - period and prune `$GIT_DIR/worktrees` immediately, or "never" - may be used to suppress pruning. - -gc.reflogExpire:: -gc.<pattern>.reflogExpire:: - 'git reflog expire' removes reflog entries older than - this time; defaults to 90 days. The value "now" expires all - entries immediately, and "never" suppresses expiration - altogether. With "<pattern>" (e.g. - "refs/stash") in the middle the setting applies only to - the refs that match the <pattern>. - -gc.reflogExpireUnreachable:: -gc.<pattern>.reflogExpireUnreachable:: - 'git reflog expire' removes reflog entries older than - this time and are not reachable from the current tip; - defaults to 30 days. The value "now" expires all entries - immediately, and "never" suppresses expiration altogether. - With "<pattern>" (e.g. "refs/stash") - in the middle, the setting applies only to the refs that - match the <pattern>. - -gc.rerereResolved:: - Records of conflicted merge you resolved earlier are - kept for this many days when 'git rerere gc' is run. - You can also use more human-readable "1.month.ago", etc. - The default is 60 days. See linkgit:git-rerere[1]. - -gc.rerereUnresolved:: - Records of conflicted merge you have not resolved are - kept for this many days when 'git rerere gc' is run. - You can also use more human-readable "1.month.ago", etc. - The default is 15 days. See linkgit:git-rerere[1]. - -gitcvs.commitMsgAnnotation:: - Append this string to each commit message. Set to empty string - to disable this feature. Defaults to "via git-CVS emulator". - -gitcvs.enabled:: - Whether the CVS server interface is enabled for this repository. - See linkgit:git-cvsserver[1]. - -gitcvs.logFile:: - Path to a log file where the CVS server interface well... logs - various stuff. See linkgit:git-cvsserver[1]. - -gitcvs.usecrlfattr:: - If true, the server will look up the end-of-line conversion - attributes for files to determine the `-k` modes to use. If - the attributes force Git to treat a file as text, - the `-k` mode will be left blank so CVS clients will - treat it as text. If they suppress text conversion, the file - will be set with '-kb' mode, which suppresses any newline munging - the client might otherwise do. If the attributes do not allow - the file type to be determined, then `gitcvs.allBinary` is - used. See linkgit:gitattributes[5]. - -gitcvs.allBinary:: - This is used if `gitcvs.usecrlfattr` does not resolve - the correct '-kb' mode to use. If true, all - unresolved files are sent to the client in - mode '-kb'. This causes the client to treat them - as binary files, which suppresses any newline munging it - otherwise might do. Alternatively, if it is set to "guess", - then the contents of the file are examined to decide if - it is binary, similar to `core.autocrlf`. - -gitcvs.dbName:: - Database used by git-cvsserver to cache revision information - derived from the Git repository. The exact meaning depends on the - used database driver, for SQLite (which is the default driver) this - is a filename. Supports variable substitution (see - linkgit:git-cvsserver[1] for details). May not contain semicolons (`;`). - Default: '%Ggitcvs.%m.sqlite' - -gitcvs.dbDriver:: - Used Perl DBI driver. You can specify any available driver - for this here, but it might not work. git-cvsserver is tested - with 'DBD::SQLite', reported to work with 'DBD::Pg', and - reported *not* to work with 'DBD::mysql'. Experimental feature. - May not contain double colons (`:`). Default: 'SQLite'. - See linkgit:git-cvsserver[1]. - -gitcvs.dbUser, gitcvs.dbPass:: - Database user and password. Only useful if setting `gitcvs.dbDriver`, - since SQLite has no concept of database users and/or passwords. - 'gitcvs.dbUser' supports variable substitution (see - linkgit:git-cvsserver[1] for details). - -gitcvs.dbTableNamePrefix:: - Database table name prefix. Prepended to the names of any - database tables used, allowing a single database to be used - for several repositories. Supports variable substitution (see - linkgit:git-cvsserver[1] for details). Any non-alphabetic - characters will be replaced with underscores. - -All gitcvs variables except for `gitcvs.usecrlfattr` and -`gitcvs.allBinary` can also be specified as -'gitcvs.<access_method>.<varname>' (where 'access_method' -is one of "ext" and "pserver") to make them apply only for the given -access method. - -gitweb.category:: -gitweb.description:: -gitweb.owner:: -gitweb.url:: - See linkgit:gitweb[1] for description. - -gitweb.avatar:: -gitweb.blame:: -gitweb.grep:: -gitweb.highlight:: -gitweb.patches:: -gitweb.pickaxe:: -gitweb.remote_heads:: -gitweb.showSizes:: -gitweb.snapshot:: - See linkgit:gitweb.conf[5] for description. - -grep.lineNumber:: - If set to true, enable `-n` option by default. - -grep.column:: - If set to true, enable the `--column` option by default. - -grep.patternType:: - Set the default matching behavior. Using a value of 'basic', 'extended', - 'fixed', or 'perl' will enable the `--basic-regexp`, `--extended-regexp`, - `--fixed-strings`, or `--perl-regexp` option accordingly, while the - value 'default' will return to the default matching behavior. - -grep.extendedRegexp:: - If set to true, enable `--extended-regexp` option by default. This - option is ignored when the `grep.patternType` option is set to a value - other than 'default'. - -grep.threads:: - Number of grep worker threads to use. - See `grep.threads` in linkgit:git-grep[1] for more information. - -grep.fallbackToNoIndex:: - If set to true, fall back to git grep --no-index if git grep - is executed outside of a git repository. Defaults to false. - -gpg.program:: - Use this custom program instead of "`gpg`" found on `$PATH` when - making or verifying a PGP signature. The program must support the - same command-line interface as GPG, namely, to verify a detached - signature, "`gpg --verify $file - <$signature`" is run, and the - program is expected to signal a good signature by exiting with - code 0, and to generate an ASCII-armored detached signature, the - standard input of "`gpg -bsau $key`" is fed with the contents to be - signed, and the program is expected to send the result to its - standard output. - -gpg.format:: - Specifies which key format to use when signing with `--gpg-sign`. - Default is "openpgp" and another possible value is "x509". - -gpg.<format>.program:: - Use this to customize the program used for the signing format you - chose. (see `gpg.program` and `gpg.format`) `gpg.program` can still - be used as a legacy synonym for `gpg.openpgp.program`. The default - value for `gpg.x509.program` is "gpgsm". - -gui.commitMsgWidth:: - Defines how wide the commit message window is in the - linkgit:git-gui[1]. "75" is the default. - -gui.diffContext:: - Specifies how many context lines should be used in calls to diff - made by the linkgit:git-gui[1]. The default is "5". - -gui.displayUntracked:: - Determines if linkgit:git-gui[1] shows untracked files - in the file list. The default is "true". - -gui.encoding:: - Specifies the default encoding to use for displaying of - file contents in linkgit:git-gui[1] and linkgit:gitk[1]. - It can be overridden by setting the 'encoding' attribute - for relevant files (see linkgit:gitattributes[5]). - If this option is not set, the tools default to the - locale encoding. - -gui.matchTrackingBranch:: - Determines if new branches created with linkgit:git-gui[1] should - default to tracking remote branches with matching names or - not. Default: "false". - -gui.newBranchTemplate:: - Is used as suggested name when creating new branches using the - linkgit:git-gui[1]. - -gui.pruneDuringFetch:: - "true" if linkgit:git-gui[1] should prune remote-tracking branches when - performing a fetch. The default value is "false". - -gui.trustmtime:: - Determines if linkgit:git-gui[1] should trust the file modification - timestamp or not. By default the timestamps are not trusted. - -gui.spellingDictionary:: - Specifies the dictionary used for spell checking commit messages in - the linkgit:git-gui[1]. When set to "none" spell checking is turned - off. - -gui.fastCopyBlame:: - If true, 'git gui blame' uses `-C` instead of `-C -C` for original - location detection. It makes blame significantly faster on huge - repositories at the expense of less thorough copy detection. - -gui.copyBlameThreshold:: - Specifies the threshold to use in 'git gui blame' original location - detection, measured in alphanumeric characters. See the - linkgit:git-blame[1] manual for more information on copy detection. - -gui.blamehistoryctx:: - Specifies the radius of history context in days to show in - linkgit:gitk[1] for the selected commit, when the `Show History - Context` menu item is invoked from 'git gui blame'. If this - variable is set to zero, the whole history is shown. - -guitool.<name>.cmd:: - Specifies the shell command line to execute when the corresponding item - of the linkgit:git-gui[1] `Tools` menu is invoked. This option is - mandatory for every tool. The command is executed from the root of - the working directory, and in the environment it receives the name of - the tool as `GIT_GUITOOL`, the name of the currently selected file as - 'FILENAME', and the name of the current branch as 'CUR_BRANCH' (if - the head is detached, 'CUR_BRANCH' is empty). - -guitool.<name>.needsFile:: - Run the tool only if a diff is selected in the GUI. It guarantees - that 'FILENAME' is not empty. - -guitool.<name>.noConsole:: - Run the command silently, without creating a window to display its - output. - -guitool.<name>.noRescan:: - Don't rescan the working directory for changes after the tool - finishes execution. - -guitool.<name>.confirm:: - Show a confirmation dialog before actually running the tool. - -guitool.<name>.argPrompt:: - Request a string argument from the user, and pass it to the tool - through the `ARGS` environment variable. Since requesting an - argument implies confirmation, the 'confirm' option has no effect - if this is enabled. If the option is set to 'true', 'yes', or '1', - the dialog uses a built-in generic prompt; otherwise the exact - value of the variable is used. - -guitool.<name>.revPrompt:: - Request a single valid revision from the user, and set the - `REVISION` environment variable. In other aspects this option - is similar to 'argPrompt', and can be used together with it. - -guitool.<name>.revUnmerged:: - Show only unmerged branches in the 'revPrompt' subdialog. - This is useful for tools similar to merge or rebase, but not - for things like checkout or reset. - -guitool.<name>.title:: - Specifies the title to use for the prompt dialog. The default - is the tool name. - -guitool.<name>.prompt:: - Specifies the general prompt string to display at the top of - the dialog, before subsections for 'argPrompt' and 'revPrompt'. - The default value includes the actual command. - -help.browser:: - Specify the browser that will be used to display help in the - 'web' format. See linkgit:git-help[1]. - -help.format:: - Override the default help format used by linkgit:git-help[1]. - Values 'man', 'info', 'web' and 'html' are supported. 'man' is - the default. 'web' and 'html' are the same. - -help.autoCorrect:: - Automatically correct and execute mistyped commands after - waiting for the given number of deciseconds (0.1 sec). If more - than one command can be deduced from the entered text, nothing - will be executed. If the value of this option is negative, - the corrected command will be executed immediately. If the - value is 0 - the command will be just shown but not executed. - This is the default. - -help.htmlPath:: - Specify the path where the HTML documentation resides. File system paths - and URLs are supported. HTML pages will be prefixed with this path when - help is displayed in the 'web' format. This defaults to the documentation - path of your Git installation. - -http.proxy:: - Override the HTTP proxy, normally configured using the 'http_proxy', - 'https_proxy', and 'all_proxy' environment variables (see `curl(1)`). In - addition to the syntax understood by curl, it is possible to specify a - proxy string with a user name but no password, in which case git will - attempt to acquire one in the same way it does for other credentials. See - linkgit:gitcredentials[7] for more information. The syntax thus is - '[protocol://][user[:password]@]proxyhost[:port]'. This can be overridden - on a per-remote basis; see remote.<name>.proxy - -http.proxyAuthMethod:: - Set the method with which to authenticate against the HTTP proxy. This - only takes effect if the configured proxy string contains a user name part - (i.e. is of the form 'user@host' or 'user@host:port'). This can be - overridden on a per-remote basis; see `remote.<name>.proxyAuthMethod`. - Both can be overridden by the `GIT_HTTP_PROXY_AUTHMETHOD` environment - variable. Possible values are: -+ --- -* `anyauth` - Automatically pick a suitable authentication method. It is - assumed that the proxy answers an unauthenticated request with a 407 - status code and one or more Proxy-authenticate headers with supported - authentication methods. This is the default. -* `basic` - HTTP Basic authentication -* `digest` - HTTP Digest authentication; this prevents the password from being - transmitted to the proxy in clear text -* `negotiate` - GSS-Negotiate authentication (compare the --negotiate option - of `curl(1)`) -* `ntlm` - NTLM authentication (compare the --ntlm option of `curl(1)`) --- - -http.emptyAuth:: - Attempt authentication without seeking a username or password. This - can be used to attempt GSS-Negotiate authentication without specifying - a username in the URL, as libcurl normally requires a username for - authentication. - -http.delegation:: - Control GSSAPI credential delegation. The delegation is disabled - by default in libcurl since version 7.21.7. Set parameter to tell - the server what it is allowed to delegate when it comes to user - credentials. Used with GSS/kerberos. Possible values are: -+ --- -* `none` - Don't allow any delegation. -* `policy` - Delegates if and only if the OK-AS-DELEGATE flag is set in the - Kerberos service ticket, which is a matter of realm policy. -* `always` - Unconditionally allow the server to delegate. --- - - -http.extraHeader:: - Pass an additional HTTP header when communicating with a server. If - more than one such entry exists, all of them are added as extra - headers. To allow overriding the settings inherited from the system - config, an empty value will reset the extra headers to the empty list. - -http.cookieFile:: - The pathname of a file containing previously stored cookie lines, - which should be used - in the Git http session, if they match the server. The file format - of the file to read cookies from should be plain HTTP headers or - the Netscape/Mozilla cookie file format (see `curl(1)`). - NOTE that the file specified with http.cookieFile is used only as - input unless http.saveCookies is set. - -http.saveCookies:: - If set, store cookies received during requests to the file specified by - http.cookieFile. Has no effect if http.cookieFile is unset. - -http.sslVersion:: - The SSL version to use when negotiating an SSL connection, if you - want to force the default. The available and default version - depend on whether libcurl was built against NSS or OpenSSL and the - particular configuration of the crypto library in use. Internally - this sets the 'CURLOPT_SSL_VERSION' option; see the libcurl - documentation for more details on the format of this option and - for the ssl version supported. Actually the possible values of - this option are: - - - sslv2 - - sslv3 - - tlsv1 - - tlsv1.0 - - tlsv1.1 - - tlsv1.2 - - tlsv1.3 +include::config/alias.txt[] -+ -Can be overridden by the `GIT_SSL_VERSION` environment variable. -To force git to use libcurl's default ssl version and ignore any -explicit http.sslversion option, set `GIT_SSL_VERSION` to the -empty string. - -http.sslCipherList:: - A list of SSL ciphers to use when negotiating an SSL connection. - The available ciphers depend on whether libcurl was built against - NSS or OpenSSL and the particular configuration of the crypto - library in use. Internally this sets the 'CURLOPT_SSL_CIPHER_LIST' - option; see the libcurl documentation for more details on the format - of this list. -+ -Can be overridden by the `GIT_SSL_CIPHER_LIST` environment variable. -To force git to use libcurl's default cipher list and ignore any -explicit http.sslCipherList option, set `GIT_SSL_CIPHER_LIST` to the -empty string. - -http.sslVerify:: - Whether to verify the SSL certificate when fetching or pushing - over HTTPS. Defaults to true. Can be overridden by the - `GIT_SSL_NO_VERIFY` environment variable. - -http.sslCert:: - File containing the SSL certificate when fetching or pushing - over HTTPS. Can be overridden by the `GIT_SSL_CERT` environment - variable. - -http.sslKey:: - File containing the SSL private key when fetching or pushing - over HTTPS. Can be overridden by the `GIT_SSL_KEY` environment - variable. - -http.sslCertPasswordProtected:: - Enable Git's password prompt for the SSL certificate. Otherwise - OpenSSL will prompt the user, possibly many times, if the - certificate or private key is encrypted. Can be overridden by the - `GIT_SSL_CERT_PASSWORD_PROTECTED` environment variable. - -http.sslCAInfo:: - File containing the certificates to verify the peer with when - fetching or pushing over HTTPS. Can be overridden by the - `GIT_SSL_CAINFO` environment variable. - -http.sslCAPath:: - Path containing files with the CA certificates to verify the peer - with when fetching or pushing over HTTPS. Can be overridden - by the `GIT_SSL_CAPATH` environment variable. - -http.pinnedpubkey:: - Public key of the https service. It may either be the filename of - a PEM or DER encoded public key file or a string starting with - 'sha256//' followed by the base64 encoded sha256 hash of the - public key. See also libcurl 'CURLOPT_PINNEDPUBLICKEY'. git will - exit with an error if this option is set but not supported by - cURL. - -http.sslTry:: - Attempt to use AUTH SSL/TLS and encrypted data transfers - when connecting via regular FTP protocol. This might be needed - if the FTP server requires it for security reasons or you wish - to connect securely whenever remote FTP server supports it. - Default is false since it might trigger certificate verification - errors on misconfigured servers. - -http.maxRequests:: - How many HTTP requests to launch in parallel. Can be overridden - by the `GIT_HTTP_MAX_REQUESTS` environment variable. Default is 5. - -http.minSessions:: - The number of curl sessions (counted across slots) to be kept across - requests. They will not be ended with curl_easy_cleanup() until - http_cleanup() is invoked. If USE_CURL_MULTI is not defined, this - value will be capped at 1. Defaults to 1. - -http.postBuffer:: - Maximum size in bytes of the buffer used by smart HTTP - transports when POSTing data to the remote system. - For requests larger than this buffer size, HTTP/1.1 and - Transfer-Encoding: chunked is used to avoid creating a - massive pack file locally. Default is 1 MiB, which is - sufficient for most requests. - -http.lowSpeedLimit, http.lowSpeedTime:: - If the HTTP transfer speed is less than 'http.lowSpeedLimit' - for longer than 'http.lowSpeedTime' seconds, the transfer is aborted. - Can be overridden by the `GIT_HTTP_LOW_SPEED_LIMIT` and - `GIT_HTTP_LOW_SPEED_TIME` environment variables. - -http.noEPSV:: - A boolean which disables using of EPSV ftp command by curl. - This can helpful with some "poor" ftp servers which don't - support EPSV mode. Can be overridden by the `GIT_CURL_FTP_NO_EPSV` - environment variable. Default is false (curl will use EPSV). - -http.userAgent:: - The HTTP USER_AGENT string presented to an HTTP server. The default - value represents the version of the client Git such as git/1.7.1. - This option allows you to override this value to a more common value - such as Mozilla/4.0. This may be necessary, for instance, if - connecting through a firewall that restricts HTTP connections to a set - of common USER_AGENT strings (but not including those like git/1.7.1). - Can be overridden by the `GIT_HTTP_USER_AGENT` environment variable. - -http.followRedirects:: - Whether git should follow HTTP redirects. If set to `true`, git - will transparently follow any redirect issued by a server it - encounters. If set to `false`, git will treat all redirects as - errors. If set to `initial`, git will follow redirects only for - the initial request to a remote, but not for subsequent - follow-up HTTP requests. Since git uses the redirected URL as - the base for the follow-up requests, this is generally - sufficient. The default is `initial`. - -http.<url>.*:: - Any of the http.* options above can be applied selectively to some URLs. - For a config key to match a URL, each element of the config key is - compared to that of the URL, in the following order: -+ --- -. Scheme (e.g., `https` in `https://example.com/`). This field - must match exactly between the config key and the URL. - -. Host/domain name (e.g., `example.com` in `https://example.com/`). - This field must match between the config key and the URL. It is - possible to specify a `*` as part of the host name to match all subdomains - at this level. `https://*.example.com/` for example would match - `https://foo.example.com/`, but not `https://foo.bar.example.com/`. - -. Port number (e.g., `8080` in `http://example.com:8080/`). - This field must match exactly between the config key and the URL. - Omitted port numbers are automatically converted to the correct - default for the scheme before matching. - -. Path (e.g., `repo.git` in `https://example.com/repo.git`). The - path field of the config key must match the path field of the URL - either exactly or as a prefix of slash-delimited path elements. This means - a config key with path `foo/` matches URL path `foo/bar`. A prefix can only - match on a slash (`/`) boundary. Longer matches take precedence (so a config - key with path `foo/bar` is a better match to URL path `foo/bar` than a config - key with just path `foo/`). - -. User name (e.g., `user` in `https://user@example.com/repo.git`). If - the config key has a user name it must match the user name in the - URL exactly. If the config key does not have a user name, that - config key will match a URL with any user name (including none), - but at a lower precedence than a config key with a user name. --- -+ -The list above is ordered by decreasing precedence; a URL that matches -a config key's path is preferred to one that matches its user name. For example, -if the URL is `https://user@example.com/foo/bar` a config key match of -`https://example.com/foo` will be preferred over a config key match of -`https://user@example.com`. -+ -All URLs are normalized before attempting any matching (the password part, -if embedded in the URL, is always ignored for matching purposes) so that -equivalent URLs that are simply spelled differently will match properly. -Environment variable settings always override any matches. The URLs that are -matched against are those given directly to Git commands. This means any URLs -visited as a result of a redirection do not participate in matching. - -ssh.variant:: - By default, Git determines the command line arguments to use - based on the basename of the configured SSH command (configured - using the environment variable `GIT_SSH` or `GIT_SSH_COMMAND` or - the config setting `core.sshCommand`). If the basename is - unrecognized, Git will attempt to detect support of OpenSSH - options by first invoking the configured SSH command with the - `-G` (print configuration) option and will subsequently use - OpenSSH options (if that is successful) or no options besides - the host and remote command (if it fails). -+ -The config variable `ssh.variant` can be set to override this detection. -Valid values are `ssh` (to use OpenSSH options), `plink`, `putty`, -`tortoiseplink`, `simple` (no options except the host and remote command). -The default auto-detection can be explicitly requested using the value -`auto`. Any other value is treated as `ssh`. This setting can also be -overridden via the environment variable `GIT_SSH_VARIANT`. -+ -The current command-line parameters used for each variant are as -follows: -+ --- +include::config/am.txt[] -* `ssh` - [-p port] [-4] [-6] [-o option] [username@]host command +include::config/apply.txt[] -* `simple` - [username@]host command +include::config/blame.txt[] -* `plink` or `putty` - [-P port] [-4] [-6] [username@]host command +include::config/branch.txt[] -* `tortoiseplink` - [-P port] [-4] [-6] -batch [username@]host command +include::config/browser.txt[] --- -+ -Except for the `simple` variant, command-line parameters are likely to -change as git gains new features. - -i18n.commitEncoding:: - Character encoding the commit messages are stored in; Git itself - does not care per se, but this information is necessary e.g. when - importing commits from emails or in the gitk graphical history - browser (and possibly at other places in the future or in other - porcelains). See e.g. linkgit:git-mailinfo[1]. Defaults to 'utf-8'. - -i18n.logOutputEncoding:: - Character encoding the commit messages are converted to when - running 'git log' and friends. - -imap:: - The configuration variables in the 'imap' section are described - in linkgit:git-imap-send[1]. - -index.version:: - Specify the version with which new index files should be - initialized. This does not affect existing repositories. - -init.templateDir:: - Specify the directory from which templates will be copied. - (See the "TEMPLATE DIRECTORY" section of linkgit:git-init[1].) - -instaweb.browser:: - Specify the program that will be used to browse your working - repository in gitweb. See linkgit:git-instaweb[1]. - -instaweb.httpd:: - The HTTP daemon command-line to start gitweb on your working - repository. See linkgit:git-instaweb[1]. - -instaweb.local:: - If true the web server started by linkgit:git-instaweb[1] will - be bound to the local IP (127.0.0.1). - -instaweb.modulePath:: - The default module path for linkgit:git-instaweb[1] to use - instead of /usr/lib/apache2/modules. Only used if httpd - is Apache. - -instaweb.port:: - The port number to bind the gitweb httpd to. See - linkgit:git-instaweb[1]. - -interactive.singleKey:: - In interactive commands, allow the user to provide one-letter - input with a single key (i.e., without hitting enter). - Currently this is used by the `--patch` mode of - linkgit:git-add[1], linkgit:git-checkout[1], linkgit:git-commit[1], - linkgit:git-reset[1], and linkgit:git-stash[1]. Note that this - setting is silently ignored if portable keystroke input - is not available; requires the Perl module Term::ReadKey. - -interactive.diffFilter:: - When an interactive command (such as `git add --patch`) shows - a colorized diff, git will pipe the diff through the shell - command defined by this configuration variable. The command may - mark up the diff further for human consumption, provided that it - retains a one-to-one correspondence with the lines in the - original diff. Defaults to disabled (no filtering). - -log.abbrevCommit:: - If true, makes linkgit:git-log[1], linkgit:git-show[1], and - linkgit:git-whatchanged[1] assume `--abbrev-commit`. You may - override this option with `--no-abbrev-commit`. - -log.date:: - Set the default date-time mode for the 'log' command. - Setting a value for log.date is similar to using 'git log''s - `--date` option. See linkgit:git-log[1] for details. - -log.decorate:: - Print out the ref names of any commits that are shown by the log - command. If 'short' is specified, the ref name prefixes 'refs/heads/', - 'refs/tags/' and 'refs/remotes/' will not be printed. If 'full' is - specified, the full ref name (including prefix) will be printed. - If 'auto' is specified, then if the output is going to a terminal, - the ref names are shown as if 'short' were given, otherwise no ref - names are shown. This is the same as the `--decorate` option - of the `git log`. - -log.follow:: - If `true`, `git log` will act as if the `--follow` option was used when - a single <path> is given. This has the same limitations as `--follow`, - i.e. it cannot be used to follow multiple files and does not work well - on non-linear history. - -log.graphColors:: - A list of colors, separated by commas, that can be used to draw - history lines in `git log --graph`. - -log.showRoot:: - If true, the initial commit will be shown as a big creation event. - This is equivalent to a diff against an empty tree. - Tools like linkgit:git-log[1] or linkgit:git-whatchanged[1], which - normally hide the root commit will now show it. True by default. - -log.showSignature:: - If true, makes linkgit:git-log[1], linkgit:git-show[1], and - linkgit:git-whatchanged[1] assume `--show-signature`. - -log.mailmap:: - If true, makes linkgit:git-log[1], linkgit:git-show[1], and - linkgit:git-whatchanged[1] assume `--use-mailmap`. - -mailinfo.scissors:: - If true, makes linkgit:git-mailinfo[1] (and therefore - linkgit:git-am[1]) act by default as if the --scissors option - was provided on the command-line. When active, this features - removes everything from the message body before a scissors - line (i.e. consisting mainly of ">8", "8<" and "-"). - -mailmap.file:: - The location of an augmenting mailmap file. The default - mailmap, located in the root of the repository, is loaded - first, then the mailmap file pointed to by this variable. - The location of the mailmap file may be in a repository - subdirectory, or somewhere outside of the repository itself. - See linkgit:git-shortlog[1] and linkgit:git-blame[1]. - -mailmap.blob:: - Like `mailmap.file`, but consider the value as a reference to a - blob in the repository. If both `mailmap.file` and - `mailmap.blob` are given, both are parsed, with entries from - `mailmap.file` taking precedence. In a bare repository, this - defaults to `HEAD:.mailmap`. In a non-bare repository, it - defaults to empty. - -man.viewer:: - Specify the programs that may be used to display help in the - 'man' format. See linkgit:git-help[1]. - -man.<tool>.cmd:: - Specify the command to invoke the specified man viewer. The - specified command is evaluated in shell with the man page - passed as argument. (See linkgit:git-help[1].) - -man.<tool>.path:: - Override the path for the given tool that may be used to - display help in the 'man' format. See linkgit:git-help[1]. - -include::merge-config.txt[] - -mergetool.<tool>.path:: - Override the path for the given tool. This is useful in case - your tool is not in the PATH. - -mergetool.<tool>.cmd:: - Specify the command to invoke the specified merge tool. The - specified command is evaluated in shell with the following - variables available: 'BASE' is the name of a temporary file - containing the common base of the files to be merged, if available; - 'LOCAL' is the name of a temporary file containing the contents of - the file on the current branch; 'REMOTE' is the name of a temporary - file containing the contents of the file from the branch being - merged; 'MERGED' contains the name of the file to which the merge - tool should write the results of a successful merge. - -mergetool.<tool>.trustExitCode:: - For a custom merge command, specify whether the exit code of - the merge command can be used to determine whether the merge was - successful. If this is not set to true then the merge target file - timestamp is checked and the merge assumed to have been successful - if the file has been updated, otherwise the user is prompted to - indicate the success of the merge. - -mergetool.meld.hasOutput:: - Older versions of `meld` do not support the `--output` option. - Git will attempt to detect whether `meld` supports `--output` - by inspecting the output of `meld --help`. Configuring - `mergetool.meld.hasOutput` will make Git skip these checks and - use the configured value instead. Setting `mergetool.meld.hasOutput` - to `true` tells Git to unconditionally use the `--output` option, - and `false` avoids using `--output`. - -mergetool.keepBackup:: - After performing a merge, the original file with conflict markers - can be saved as a file with a `.orig` extension. If this variable - is set to `false` then this file is not preserved. Defaults to - `true` (i.e. keep the backup files). - -mergetool.keepTemporaries:: - When invoking a custom merge tool, Git uses a set of temporary - files to pass to the tool. If the tool returns an error and this - variable is set to `true`, then these temporary files will be - preserved, otherwise they will be removed after the tool has - exited. Defaults to `false`. - -mergetool.writeToTemp:: - Git writes temporary 'BASE', 'LOCAL', and 'REMOTE' versions of - conflicting files in the worktree by default. Git will attempt - to use a temporary directory for these files when set `true`. - Defaults to `false`. - -mergetool.prompt:: - Prompt before each invocation of the merge resolution program. - -notes.mergeStrategy:: - Which merge strategy to choose by default when resolving notes - conflicts. Must be one of `manual`, `ours`, `theirs`, `union`, or - `cat_sort_uniq`. Defaults to `manual`. See "NOTES MERGE STRATEGIES" - section of linkgit:git-notes[1] for more information on each strategy. - -notes.<name>.mergeStrategy:: - Which merge strategy to choose when doing a notes merge into - refs/notes/<name>. This overrides the more general - "notes.mergeStrategy". See the "NOTES MERGE STRATEGIES" section in - linkgit:git-notes[1] for more information on the available strategies. - -notes.displayRef:: - The (fully qualified) refname from which to show notes when - showing commit messages. The value of this variable can be set - to a glob, in which case notes from all matching refs will be - shown. You may also specify this configuration variable - several times. A warning will be issued for refs that do not - exist, but a glob that does not match any refs is silently - ignored. -+ -This setting can be overridden with the `GIT_NOTES_DISPLAY_REF` -environment variable, which must be a colon separated list of refs or -globs. -+ -The effective value of "core.notesRef" (possibly overridden by -GIT_NOTES_REF) is also implicitly added to the list of refs to be -displayed. - -notes.rewrite.<command>:: - When rewriting commits with <command> (currently `amend` or - `rebase`) and this variable is set to `true`, Git - automatically copies your notes from the original to the - rewritten commit. Defaults to `true`, but see - "notes.rewriteRef" below. - -notes.rewriteMode:: - When copying notes during a rewrite (see the - "notes.rewrite.<command>" option), determines what to do if - the target commit already has a note. Must be one of - `overwrite`, `concatenate`, `cat_sort_uniq`, or `ignore`. - Defaults to `concatenate`. -+ -This setting can be overridden with the `GIT_NOTES_REWRITE_MODE` -environment variable. - -notes.rewriteRef:: - When copying notes during a rewrite, specifies the (fully - qualified) ref whose notes should be copied. The ref may be a - glob, in which case notes in all matching refs will be copied. - You may also specify this configuration several times. -+ -Does not have a default value; you must configure this variable to -enable note rewriting. Set it to `refs/notes/commits` to enable -rewriting for the default commit notes. -+ -This setting can be overridden with the `GIT_NOTES_REWRITE_REF` -environment variable, which must be a colon separated list of refs or -globs. - -pack.window:: - The size of the window used by linkgit:git-pack-objects[1] when no - window size is given on the command line. Defaults to 10. - -pack.depth:: - The maximum delta depth used by linkgit:git-pack-objects[1] when no - maximum depth is given on the command line. Defaults to 50. - Maximum value is 4095. - -pack.windowMemory:: - The maximum size of memory that is consumed by each thread - in linkgit:git-pack-objects[1] for pack window memory when - no limit is given on the command line. The value can be - suffixed with "k", "m", or "g". When left unconfigured (or - set explicitly to 0), there will be no limit. - -pack.compression:: - An integer -1..9, indicating the compression level for objects - in a pack file. -1 is the zlib default. 0 means no - compression, and 1..9 are various speed/size tradeoffs, 9 being - slowest. If not set, defaults to core.compression. If that is - not set, defaults to -1, the zlib default, which is "a default - compromise between speed and compression (currently equivalent - to level 6)." -+ -Note that changing the compression level will not automatically recompress -all existing objects. You can force recompression by passing the -F option -to linkgit:git-repack[1]. - -pack.deltaCacheSize:: - The maximum memory in bytes used for caching deltas in - linkgit:git-pack-objects[1] before writing them out to a pack. - This cache is used to speed up the writing object phase by not - having to recompute the final delta result once the best match - for all objects is found. Repacking large repositories on machines - which are tight with memory might be badly impacted by this though, - especially if this cache pushes the system into swapping. - A value of 0 means no limit. The smallest size of 1 byte may be - used to virtually disable this cache. Defaults to 256 MiB. - -pack.deltaCacheLimit:: - The maximum size of a delta, that is cached in - linkgit:git-pack-objects[1]. This cache is used to speed up the - writing object phase by not having to recompute the final delta - result once the best match for all objects is found. - Defaults to 1000. Maximum value is 65535. - -pack.threads:: - Specifies the number of threads to spawn when searching for best - delta matches. This requires that linkgit:git-pack-objects[1] - be compiled with pthreads otherwise this option is ignored with a - warning. This is meant to reduce packing time on multiprocessor - machines. The required amount of memory for the delta search window - is however multiplied by the number of threads. - Specifying 0 will cause Git to auto-detect the number of CPU's - and set the number of threads accordingly. - -pack.indexVersion:: - Specify the default pack index version. Valid values are 1 for - legacy pack index used by Git versions prior to 1.5.2, and 2 for - the new pack index with capabilities for packs larger than 4 GB - as well as proper protection against the repacking of corrupted - packs. Version 2 is the default. Note that version 2 is enforced - and this config option ignored whenever the corresponding pack is - larger than 2 GB. -+ -If you have an old Git that does not understand the version 2 `*.idx` file, -cloning or fetching over a non native protocol (e.g. "http") -that will copy both `*.pack` file and corresponding `*.idx` file from the -other side may give you a repository that cannot be accessed with your -older version of Git. If the `*.pack` file is smaller than 2 GB, however, -you can use linkgit:git-index-pack[1] on the *.pack file to regenerate -the `*.idx` file. - -pack.packSizeLimit:: - The maximum size of a pack. This setting only affects - packing to a file when repacking, i.e. the git:// protocol - is unaffected. It can be overridden by the `--max-pack-size` - option of linkgit:git-repack[1]. Reaching this limit results - in the creation of multiple packfiles; which in turn prevents - bitmaps from being created. - The minimum size allowed is limited to 1 MiB. - The default is unlimited. - Common unit suffixes of 'k', 'm', or 'g' are - supported. - -pack.useBitmaps:: - When true, git will use pack bitmaps (if available) when packing - to stdout (e.g., during the server side of a fetch). Defaults to - true. You should not generally need to turn this off unless - you are debugging pack bitmaps. - -pack.writeBitmaps (deprecated):: - This is a deprecated synonym for `repack.writeBitmaps`. - -pack.writeBitmapHashCache:: - When true, git will include a "hash cache" section in the bitmap - index (if one is written). This cache can be used to feed git's - delta heuristics, potentially leading to better deltas between - bitmapped and non-bitmapped objects (e.g., when serving a fetch - between an older, bitmapped pack and objects that have been - pushed since the last gc). The downside is that it consumes 4 - bytes per object of disk space, and that JGit's bitmap - implementation does not understand it, causing it to complain if - Git and JGit are used on the same repository. Defaults to false. - -pager.<cmd>:: - If the value is boolean, turns on or off pagination of the - output of a particular Git subcommand when writing to a tty. - Otherwise, turns on pagination for the subcommand using the - pager specified by the value of `pager.<cmd>`. If `--paginate` - or `--no-pager` is specified on the command line, it takes - precedence over this option. To disable pagination for all - commands, set `core.pager` or `GIT_PAGER` to `cat`. - -pretty.<name>:: - Alias for a --pretty= format string, as specified in - linkgit:git-log[1]. Any aliases defined here can be used just - as the built-in pretty formats could. For example, - running `git config pretty.changelog "format:* %H %s"` - would cause the invocation `git log --pretty=changelog` - to be equivalent to running `git log "--pretty=format:* %H %s"`. - Note that an alias with the same name as a built-in format - will be silently ignored. - -protocol.allow:: - If set, provide a user defined default policy for all protocols which - don't explicitly have a policy (`protocol.<name>.allow`). By default, - if unset, known-safe protocols (http, https, git, ssh, file) have a - default policy of `always`, known-dangerous protocols (ext) have a - default policy of `never`, and all other protocols have a default - policy of `user`. Supported policies: -+ --- +include::config/checkout.txt[] -* `always` - protocol is always able to be used. +include::config/clean.txt[] -* `never` - protocol is never able to be used. +include::config/color.txt[] -* `user` - protocol is only able to be used when `GIT_PROTOCOL_FROM_USER` is - either unset or has a value of 1. This policy should be used when you want a - protocol to be directly usable by the user but don't want it used by commands which - execute clone/fetch/push commands without user input, e.g. recursive - submodule initialization. +include::config/column.txt[] --- +include::config/commit.txt[] -protocol.<name>.allow:: - Set a policy to be used by protocol `<name>` with clone/fetch/push - commands. See `protocol.allow` above for the available policies. -+ -The protocol names currently used by git are: -+ --- - - `file`: any local file-based path (including `file://` URLs, - or local paths) - - - `git`: the anonymous git protocol over a direct TCP - connection (or proxy, if configured) - - - `ssh`: git over ssh (including `host:path` syntax, - `ssh://`, etc). - - - `http`: git over http, both "smart http" and "dumb http". - Note that this does _not_ include `https`; if you want to configure - both, you must do so individually. - - - any external helpers are named by their protocol (e.g., use - `hg` to allow the `git-remote-hg` helper) --- - -protocol.version:: - Experimental. If set, clients will attempt to communicate with a - server using the specified protocol version. If unset, no - attempt will be made by the client to communicate using a - particular protocol version, this results in protocol version 0 - being used. - Supported versions: -+ --- +include::config/credential.txt[] -* `0` - the original wire protocol. +include::config/completion.txt[] -* `1` - the original wire protocol with the addition of a version string - in the initial response from the server. +include::config/diff.txt[] -* `2` - link:technical/protocol-v2.html[wire protocol version 2]. +include::config/difftool.txt[] --- +include::config/fastimport.txt[] -pull.ff:: - By default, Git does not create an extra merge commit when merging - a commit that is a descendant of the current commit. Instead, the - tip of the current branch is fast-forwarded. When set to `false`, - this variable tells Git to create an extra merge commit in such - a case (equivalent to giving the `--no-ff` option from the command - line). When set to `only`, only such fast-forward merges are - allowed (equivalent to giving the `--ff-only` option from the - command line). This setting overrides `merge.ff` when pulling. +include::config/fetch.txt[] -pull.rebase:: - When true, rebase branches on top of the fetched branch, instead - of merging the default branch from the default remote when "git - pull" is run. See "branch.<name>.rebase" for setting this on a - per-branch basis. -+ -When `merges`, pass the `--rebase-merges` option to 'git rebase' -so that the local merge commits are included in the rebase (see -linkgit:git-rebase[1] for details). -+ -When preserve, also pass `--preserve-merges` along to 'git rebase' -so that locally committed merge commits will not be flattened -by running 'git pull'. -+ -When the value is `interactive`, the rebase is run in interactive mode. -+ -*NOTE*: this is a possibly dangerous operation; do *not* use -it unless you understand the implications (see linkgit:git-rebase[1] -for details). - -pull.octopus:: - The default merge strategy to use when pulling multiple branches - at once. - -pull.twohead:: - The default merge strategy to use when pulling a single branch. - -push.default:: - Defines the action `git push` should take if no refspec is - explicitly given. Different values are well-suited for - specific workflows; for instance, in a purely central workflow - (i.e. the fetch source is equal to the push destination), - `upstream` is probably what you want. Possible values are: -+ --- +include::config/format.txt[] -* `nothing` - do not push anything (error out) unless a refspec is - explicitly given. This is primarily meant for people who want to - avoid mistakes by always being explicit. +include::config/filter.txt[] -* `current` - push the current branch to update a branch with the same - name on the receiving end. Works in both central and non-central - workflows. +include::config/fsck.txt[] -* `upstream` - push the current branch back to the branch whose - changes are usually integrated into the current branch (which is - called `@{upstream}`). This mode only makes sense if you are - pushing to the same repository you would normally pull from - (i.e. central workflow). +include::config/gc.txt[] -* `tracking` - This is a deprecated synonym for `upstream`. +include::config/gitcvs.txt[] -* `simple` - in centralized workflow, work like `upstream` with an - added safety to refuse to push if the upstream branch's name is - different from the local one. -+ -When pushing to a remote that is different from the remote you normally -pull from, work as `current`. This is the safest option and is suited -for beginners. -+ -This mode has become the default in Git 2.0. - -* `matching` - push all branches having the same name on both ends. - This makes the repository you are pushing to remember the set of - branches that will be pushed out (e.g. if you always push 'maint' - and 'master' there and no other branches, the repository you push - to will have these two branches, and your local 'maint' and - 'master' will be pushed there). -+ -To use this mode effectively, you have to make sure _all_ the -branches you would push out are ready to be pushed out before -running 'git push', as the whole point of this mode is to allow you -to push all of the branches in one go. If you usually finish work -on only one branch and push out the result, while other branches are -unfinished, this mode is not for you. Also this mode is not -suitable for pushing into a shared central repository, as other -people may add new branches there, or update the tip of existing -branches outside your control. -+ -This used to be the default, but not since Git 2.0 (`simple` is the -new default). - --- - -push.followTags:: - If set to true enable `--follow-tags` option by default. You - may override this configuration at time of push by specifying - `--no-follow-tags`. - -push.gpgSign:: - May be set to a boolean value, or the string 'if-asked'. A true - value causes all pushes to be GPG signed, as if `--signed` is - passed to linkgit:git-push[1]. The string 'if-asked' causes - pushes to be signed if the server supports it, as if - `--signed=if-asked` is passed to 'git push'. A false value may - override a value from a lower-priority config file. An explicit - command-line flag always overrides this config option. - -push.pushOption:: - When no `--push-option=<option>` argument is given from the - command line, `git push` behaves as if each <value> of - this variable is given as `--push-option=<value>`. -+ -This is a multi-valued variable, and an empty value can be used in a -higher priority configuration file (e.g. `.git/config` in a -repository) to clear the values inherited from a lower priority -configuration files (e.g. `$HOME/.gitconfig`). -+ --- - -Example: - -/etc/gitconfig - push.pushoption = a - push.pushoption = b - -~/.gitconfig - push.pushoption = c - -repo/.git/config - push.pushoption = - push.pushoption = b - -This will result in only b (a and c are cleared). - --- - -push.recurseSubmodules:: - Make sure all submodule commits used by the revisions to be pushed - are available on a remote-tracking branch. If the value is 'check' - then Git will verify that all submodule commits that changed in the - revisions to be pushed are available on at least one remote of the - submodule. If any commits are missing, the push will be aborted and - exit with non-zero status. If the value is 'on-demand' then all - submodules that changed in the revisions to be pushed will be - pushed. If on-demand was not able to push all necessary revisions - it will also be aborted and exit with non-zero status. If the value - is 'no' then default behavior of ignoring submodules when pushing - is retained. You may override this configuration at time of push by - specifying '--recurse-submodules=check|on-demand|no'. - -include::rebase-config.txt[] - -receive.advertiseAtomic:: - By default, git-receive-pack will advertise the atomic push - capability to its clients. If you don't want to advertise this - capability, set this variable to false. - -receive.advertisePushOptions:: - When set to true, git-receive-pack will advertise the push options - capability to its clients. False by default. - -receive.autogc:: - By default, git-receive-pack will run "git-gc --auto" after - receiving data from git-push and updating refs. You can stop - it by setting this variable to false. - -receive.certNonceSeed:: - By setting this variable to a string, `git receive-pack` - will accept a `git push --signed` and verifies it by using - a "nonce" protected by HMAC using this string as a secret - key. - -receive.certNonceSlop:: - When a `git push --signed` sent a push certificate with a - "nonce" that was issued by a receive-pack serving the same - repository within this many seconds, export the "nonce" - found in the certificate to `GIT_PUSH_CERT_NONCE` to the - hooks (instead of what the receive-pack asked the sending - side to include). This may allow writing checks in - `pre-receive` and `post-receive` a bit easier. Instead of - checking `GIT_PUSH_CERT_NONCE_SLOP` environment variable - that records by how many seconds the nonce is stale to - decide if they want to accept the certificate, they only - can check `GIT_PUSH_CERT_NONCE_STATUS` is `OK`. - -receive.fsckObjects:: - If it is set to true, git-receive-pack will check all received - objects. See `transfer.fsckObjects` for what's checked. - Defaults to false. If not set, the value of - `transfer.fsckObjects` is used instead. - -receive.fsck.<msg-id>:: - Acts like `fsck.<msg-id>`, but is used by - linkgit:git-receive-pack[1] instead of - linkgit:git-fsck[1]. See the `fsck.<msg-id>` documentation for - details. - -receive.fsck.skipList:: - Acts like `fsck.skipList`, but is used by - linkgit:git-receive-pack[1] instead of - linkgit:git-fsck[1]. See the `fsck.skipList` documentation for - details. - -receive.keepAlive:: - After receiving the pack from the client, `receive-pack` may - produce no output (if `--quiet` was specified) while processing - the pack, causing some networks to drop the TCP connection. - With this option set, if `receive-pack` does not transmit - any data in this phase for `receive.keepAlive` seconds, it will - send a short keepalive packet. The default is 5 seconds; set - to 0 to disable keepalives entirely. - -receive.unpackLimit:: - If the number of objects received in a push is below this - limit then the objects will be unpacked into loose object - files. However if the number of received objects equals or - exceeds this limit then the received pack will be stored as - a pack, after adding any missing delta bases. Storing the - pack from a push can make the push operation complete faster, - especially on slow filesystems. If not set, the value of - `transfer.unpackLimit` is used instead. - -receive.maxInputSize:: - If the size of the incoming pack stream is larger than this - limit, then git-receive-pack will error out, instead of - accepting the pack file. If not set or set to 0, then the size - is unlimited. - -receive.denyDeletes:: - If set to true, git-receive-pack will deny a ref update that deletes - the ref. Use this to prevent such a ref deletion via a push. - -receive.denyDeleteCurrent:: - If set to true, git-receive-pack will deny a ref update that - deletes the currently checked out branch of a non-bare repository. - -receive.denyCurrentBranch:: - If set to true or "refuse", git-receive-pack will deny a ref update - to the currently checked out branch of a non-bare repository. - Such a push is potentially dangerous because it brings the HEAD - out of sync with the index and working tree. If set to "warn", - print a warning of such a push to stderr, but allow the push to - proceed. If set to false or "ignore", allow such pushes with no - message. Defaults to "refuse". -+ -Another option is "updateInstead" which will update the working -tree if pushing into the current branch. This option is -intended for synchronizing working directories when one side is not easily -accessible via interactive ssh (e.g. a live web site, hence the requirement -that the working directory be clean). This mode also comes in handy when -developing inside a VM to test and fix code on different Operating Systems. -+ -By default, "updateInstead" will refuse the push if the working tree or -the index have any difference from the HEAD, but the `push-to-checkout` -hook can be used to customize this. See linkgit:githooks[5]. - -receive.denyNonFastForwards:: - If set to true, git-receive-pack will deny a ref update which is - not a fast-forward. Use this to prevent such an update via a push, - even if that push is forced. This configuration variable is - set when initializing a shared repository. - -receive.hideRefs:: - This variable is the same as `transfer.hideRefs`, but applies - only to `receive-pack` (and so affects pushes, but not fetches). - An attempt to update or delete a hidden ref by `git push` is - rejected. - -receive.updateServerInfo:: - If set to true, git-receive-pack will run git-update-server-info - after receiving data from git-push and updating refs. - -receive.shallowUpdate:: - If set to true, .git/shallow can be updated when new refs - require new shallow roots. Otherwise those refs are rejected. - -remote.pushDefault:: - The remote to push to by default. Overrides - `branch.<name>.remote` for all branches, and is overridden by - `branch.<name>.pushRemote` for specific branches. - -remote.<name>.url:: - The URL of a remote repository. See linkgit:git-fetch[1] or - linkgit:git-push[1]. - -remote.<name>.pushurl:: - The push URL of a remote repository. See linkgit:git-push[1]. - -remote.<name>.proxy:: - For remotes that require curl (http, https and ftp), the URL to - the proxy to use for that remote. Set to the empty string to - disable proxying for that remote. - -remote.<name>.proxyAuthMethod:: - For remotes that require curl (http, https and ftp), the method to use for - authenticating against the proxy in use (probably set in - `remote.<name>.proxy`). See `http.proxyAuthMethod`. - -remote.<name>.fetch:: - The default set of "refspec" for linkgit:git-fetch[1]. See - linkgit:git-fetch[1]. - -remote.<name>.push:: - The default set of "refspec" for linkgit:git-push[1]. See - linkgit:git-push[1]. - -remote.<name>.mirror:: - If true, pushing to this remote will automatically behave - as if the `--mirror` option was given on the command line. - -remote.<name>.skipDefaultUpdate:: - If true, this remote will be skipped by default when updating - using linkgit:git-fetch[1] or the `update` subcommand of - linkgit:git-remote[1]. - -remote.<name>.skipFetchAll:: - If true, this remote will be skipped by default when updating - using linkgit:git-fetch[1] or the `update` subcommand of - linkgit:git-remote[1]. - -remote.<name>.receivepack:: - The default program to execute on the remote side when pushing. See - option --receive-pack of linkgit:git-push[1]. - -remote.<name>.uploadpack:: - The default program to execute on the remote side when fetching. See - option --upload-pack of linkgit:git-fetch-pack[1]. - -remote.<name>.tagOpt:: - Setting this value to --no-tags disables automatic tag following when - fetching from remote <name>. Setting it to --tags will fetch every - tag from remote <name>, even if they are not reachable from remote - branch heads. Passing these flags directly to linkgit:git-fetch[1] can - override this setting. See options --tags and --no-tags of - linkgit:git-fetch[1]. - -remote.<name>.vcs:: - Setting this to a value <vcs> will cause Git to interact with - the remote with the git-remote-<vcs> helper. - -remote.<name>.prune:: - When set to true, fetching from this remote by default will also - remove any remote-tracking references that no longer exist on the - remote (as if the `--prune` option was given on the command line). - Overrides `fetch.prune` settings, if any. - -remote.<name>.pruneTags:: - When set to true, fetching from this remote by default will also - remove any local tags that no longer exist on the remote if pruning - is activated in general via `remote.<name>.prune`, `fetch.prune` or - `--prune`. Overrides `fetch.pruneTags` settings, if any. -+ -See also `remote.<name>.prune` and the PRUNING section of -linkgit:git-fetch[1]. - -remotes.<group>:: - The list of remotes which are fetched by "git remote update - <group>". See linkgit:git-remote[1]. - -repack.useDeltaBaseOffset:: - By default, linkgit:git-repack[1] creates packs that use - delta-base offset. If you need to share your repository with - Git older than version 1.4.4, either directly or via a dumb - protocol such as http, then you need to set this option to - "false" and repack. Access from old Git versions over the - native protocol are unaffected by this option. - -repack.packKeptObjects:: - If set to true, makes `git repack` act as if - `--pack-kept-objects` was passed. See linkgit:git-repack[1] for - details. Defaults to `false` normally, but `true` if a bitmap - index is being written (either via `--write-bitmap-index` or - `repack.writeBitmaps`). - -repack.writeBitmaps:: - When true, git will write a bitmap index when packing all - objects to disk (e.g., when `git repack -a` is run). This - index can speed up the "counting objects" phase of subsequent - packs created for clones and fetches, at the cost of some disk - space and extra time spent on the initial repack. This has - no effect if multiple packfiles are created. - Defaults to false. - -rerere.autoUpdate:: - When set to true, `git-rerere` updates the index with the - resulting contents after it cleanly resolves conflicts using - previously recorded resolution. Defaults to false. - -rerere.enabled:: - Activate recording of resolved conflicts, so that identical - conflict hunks can be resolved automatically, should they be - encountered again. By default, linkgit:git-rerere[1] is - enabled if there is an `rr-cache` directory under the - `$GIT_DIR`, e.g. if "rerere" was previously used in the - repository. - -sendemail.identity:: - A configuration identity. When given, causes values in the - 'sendemail.<identity>' subsection to take precedence over - values in the 'sendemail' section. The default identity is - the value of `sendemail.identity`. - -sendemail.smtpEncryption:: - See linkgit:git-send-email[1] for description. Note that this - setting is not subject to the 'identity' mechanism. - -sendemail.smtpssl (deprecated):: - Deprecated alias for 'sendemail.smtpEncryption = ssl'. - -sendemail.smtpsslcertpath:: - Path to ca-certificates (either a directory or a single file). - Set it to an empty string to disable certificate verification. - -sendemail.<identity>.*:: - Identity-specific versions of the 'sendemail.*' parameters - found below, taking precedence over those when this - identity is selected, through either the command-line or - `sendemail.identity`. - -sendemail.aliasesFile:: -sendemail.aliasFileType:: -sendemail.annotate:: -sendemail.bcc:: -sendemail.cc:: -sendemail.ccCmd:: -sendemail.chainReplyTo:: -sendemail.confirm:: -sendemail.envelopeSender:: -sendemail.from:: -sendemail.multiEdit:: -sendemail.signedoffbycc:: -sendemail.smtpPass:: -sendemail.suppresscc:: -sendemail.suppressFrom:: -sendemail.to:: -sendemail.tocmd:: -sendemail.smtpDomain:: -sendemail.smtpServer:: -sendemail.smtpServerPort:: -sendemail.smtpServerOption:: -sendemail.smtpUser:: -sendemail.thread:: -sendemail.transferEncoding:: -sendemail.validate:: -sendemail.xmailer:: - See linkgit:git-send-email[1] for description. - -sendemail.signedoffcc (deprecated):: - Deprecated alias for `sendemail.signedoffbycc`. - -sendemail.smtpBatchSize:: - Number of messages to be sent per connection, after that a relogin - will happen. If the value is 0 or undefined, send all messages in - one connection. - See also the `--batch-size` option of linkgit:git-send-email[1]. - -sendemail.smtpReloginDelay:: - Seconds wait before reconnecting to smtp server. - See also the `--relogin-delay` option of linkgit:git-send-email[1]. - -showbranch.default:: - The default set of branches for linkgit:git-show-branch[1]. - See linkgit:git-show-branch[1]. - -splitIndex.maxPercentChange:: - When the split index feature is used, this specifies the - percent of entries the split index can contain compared to the - total number of entries in both the split index and the shared - index before a new shared index is written. - The value should be between 0 and 100. If the value is 0 then - a new shared index is always written, if it is 100 a new - shared index is never written. - By default the value is 20, so a new shared index is written - if the number of entries in the split index would be greater - than 20 percent of the total number of entries. - See linkgit:git-update-index[1]. - -splitIndex.sharedIndexExpire:: - When the split index feature is used, shared index files that - were not modified since the time this variable specifies will - be removed when a new shared index file is created. The value - "now" expires all entries immediately, and "never" suppresses - expiration altogether. - The default value is "2.weeks.ago". - Note that a shared index file is considered modified (for the - purpose of expiration) each time a new split-index file is - either created based on it or read from it. - See linkgit:git-update-index[1]. - -status.relativePaths:: - By default, linkgit:git-status[1] shows paths relative to the - current directory. Setting this variable to `false` shows paths - relative to the repository root (this was the default for Git - prior to v1.5.4). - -status.short:: - Set to true to enable --short by default in linkgit:git-status[1]. - The option --no-short takes precedence over this variable. - -status.branch:: - Set to true to enable --branch by default in linkgit:git-status[1]. - The option --no-branch takes precedence over this variable. - -status.displayCommentPrefix:: - If set to true, linkgit:git-status[1] will insert a comment - prefix before each output line (starting with - `core.commentChar`, i.e. `#` by default). This was the - behavior of linkgit:git-status[1] in Git 1.8.4 and previous. - Defaults to false. - -status.renameLimit:: - The number of files to consider when performing rename detection - in linkgit:git-status[1] and linkgit:git-commit[1]. Defaults to - the value of diff.renameLimit. - -status.renames:: - Whether and how Git detects renames in linkgit:git-status[1] and - linkgit:git-commit[1] . If set to "false", rename detection is - disabled. If set to "true", basic rename detection is enabled. - If set to "copies" or "copy", Git will detect copies, as well. - Defaults to the value of diff.renames. - -status.showStash:: - If set to true, linkgit:git-status[1] will display the number of - entries currently stashed away. - Defaults to false. - -status.showUntrackedFiles:: - By default, linkgit:git-status[1] and linkgit:git-commit[1] show - files which are not currently tracked by Git. Directories which - contain only untracked files, are shown with the directory name - only. Showing untracked files means that Git needs to lstat() all - the files in the whole repository, which might be slow on some - systems. So, this variable controls how the commands displays - the untracked files. Possible values are: -+ --- -* `no` - Show no untracked files. -* `normal` - Show untracked files and directories. -* `all` - Show also individual files in untracked directories. --- -+ -If this variable is not specified, it defaults to 'normal'. -This variable can be overridden with the -u|--untracked-files option -of linkgit:git-status[1] and linkgit:git-commit[1]. - -status.submoduleSummary:: - Defaults to false. - If this is set to a non zero number or true (identical to -1 or an - unlimited number), the submodule summary will be enabled and a - summary of commits for modified submodules will be shown (see - --summary-limit option of linkgit:git-submodule[1]). Please note - that the summary output command will be suppressed for all - submodules when `diff.ignoreSubmodules` is set to 'all' or only - for those submodules where `submodule.<name>.ignore=all`. The only - exception to that rule is that status and commit will show staged - submodule changes. To - also view the summary for ignored submodules you can either use - the --ignore-submodules=dirty command-line option or the 'git - submodule summary' command, which shows a similar output but does - not honor these settings. - -stash.showPatch:: - If this is set to true, the `git stash show` command without an - option will show the stash entry in patch form. Defaults to false. - See description of 'show' command in linkgit:git-stash[1]. - -stash.showStat:: - If this is set to true, the `git stash show` command without an - option will show diffstat of the stash entry. Defaults to true. - See description of 'show' command in linkgit:git-stash[1]. - -submodule.<name>.url:: - The URL for a submodule. This variable is copied from the .gitmodules - file to the git config via 'git submodule init'. The user can change - the configured URL before obtaining the submodule via 'git submodule - update'. If neither submodule.<name>.active or submodule.active are - set, the presence of this variable is used as a fallback to indicate - whether the submodule is of interest to git commands. - See linkgit:git-submodule[1] and linkgit:gitmodules[5] for details. - -submodule.<name>.update:: - The method by which a submodule is updated by 'git submodule update', - which is the only affected command, others such as - 'git checkout --recurse-submodules' are unaffected. It exists for - historical reasons, when 'git submodule' was the only command to - interact with submodules; settings like `submodule.active` - and `pull.rebase` are more specific. It is populated by - `git submodule init` from the linkgit:gitmodules[5] file. - See description of 'update' command in linkgit:git-submodule[1]. - -submodule.<name>.branch:: - The remote branch name for a submodule, used by `git submodule - update --remote`. Set this option to override the value found in - the `.gitmodules` file. See linkgit:git-submodule[1] and - linkgit:gitmodules[5] for details. - -submodule.<name>.fetchRecurseSubmodules:: - This option can be used to control recursive fetching of this - submodule. It can be overridden by using the --[no-]recurse-submodules - command-line option to "git fetch" and "git pull". - This setting will override that from in the linkgit:gitmodules[5] - file. - -submodule.<name>.ignore:: - Defines under what circumstances "git status" and the diff family show - a submodule as modified. When set to "all", it will never be considered - modified (but it will nonetheless show up in the output of status and - commit when it has been staged), "dirty" will ignore all changes - to the submodules work tree and - takes only differences between the HEAD of the submodule and the commit - recorded in the superproject into account. "untracked" will additionally - let submodules with modified tracked files in their work tree show up. - Using "none" (the default when this option is not set) also shows - submodules that have untracked files in their work tree as changed. - This setting overrides any setting made in .gitmodules for this submodule, - both settings can be overridden on the command line by using the - "--ignore-submodules" option. The 'git submodule' commands are not - affected by this setting. - -submodule.<name>.active:: - Boolean value indicating if the submodule is of interest to git - commands. This config option takes precedence over the - submodule.active config option. See linkgit:gitsubmodules[7] for - details. - -submodule.active:: - A repeated field which contains a pathspec used to match against a - submodule's path to determine if the submodule is of interest to git - commands. See linkgit:gitsubmodules[7] for details. - -submodule.recurse:: - Specifies if commands recurse into submodules by default. This - applies to all commands that have a `--recurse-submodules` option, - except `clone`. - Defaults to false. - -submodule.fetchJobs:: - Specifies how many submodules are fetched/cloned at the same time. - A positive integer allows up to that number of submodules fetched - in parallel. A value of 0 will give some reasonable default. - If unset, it defaults to 1. - -submodule.alternateLocation:: - Specifies how the submodules obtain alternates when submodules are - cloned. Possible values are `no`, `superproject`. - By default `no` is assumed, which doesn't add references. When the - value is set to `superproject` the submodule to be cloned computes - its alternates location relative to the superprojects alternate. - -submodule.alternateErrorStrategy:: - Specifies how to treat errors with the alternates for a submodule - as computed via `submodule.alternateLocation`. Possible values are - `ignore`, `info`, `die`. Default is `die`. - -tag.forceSignAnnotated:: - A boolean to specify whether annotated tags created should be GPG signed. - If `--annotate` is specified on the command line, it takes - precedence over this option. - -tag.sort:: - This variable controls the sort ordering of tags when displayed by - linkgit:git-tag[1]. Without the "--sort=<value>" option provided, the - value of this variable will be used as the default. - -tar.umask:: - This variable can be used to restrict the permission bits of - tar archive entries. The default is 0002, which turns off the - world write bit. The special value "user" indicates that the - archiving user's umask will be used instead. See umask(2) and - linkgit:git-archive[1]. - -transfer.fsckObjects:: - When `fetch.fsckObjects` or `receive.fsckObjects` are - not set, the value of this variable is used instead. - Defaults to false. -+ -When set, the fetch or receive will abort in the case of a malformed -object or a link to a nonexistent object. In addition, various other -issues are checked for, including legacy issues (see `fsck.<msg-id>`), -and potential security issues like the existence of a `.GIT` directory -or a malicious `.gitmodules` file (see the release notes for v2.2.1 -and v2.17.1 for details). Other sanity and security checks may be -added in future releases. -+ -On the receiving side, failing fsckObjects will make those objects -unreachable, see "QUARANTINE ENVIRONMENT" in -linkgit:git-receive-pack[1]. On the fetch side, malformed objects will -instead be left unreferenced in the repository. -+ -Due to the non-quarantine nature of the `fetch.fsckObjects` -implementation it can not be relied upon to leave the object store -clean like `receive.fsckObjects` can. -+ -As objects are unpacked they're written to the object store, so there -can be cases where malicious objects get introduced even though the -"fetch" failed, only to have a subsequent "fetch" succeed because only -new incoming objects are checked, not those that have already been -written to the object store. That difference in behavior should not be -relied upon. In the future, such objects may be quarantined for -"fetch" as well. -+ -For now, the paranoid need to find some way to emulate the quarantine -environment if they'd like the same protection as "push". E.g. in the -case of an internal mirror do the mirroring in two steps, one to fetch -the untrusted objects, and then do a second "push" (which will use the -quarantine) to another internal repo, and have internal clients -consume this pushed-to repository, or embargo internal fetches and -only allow them once a full "fsck" has run (and no new fetches have -happened in the meantime). - -transfer.hideRefs:: - String(s) `receive-pack` and `upload-pack` use to decide which - refs to omit from their initial advertisements. Use more than - one definition to specify multiple prefix strings. A ref that is - under the hierarchies listed in the value of this variable is - excluded, and is hidden when responding to `git push` or `git - fetch`. See `receive.hideRefs` and `uploadpack.hideRefs` for - program-specific versions of this config. -+ -You may also include a `!` in front of the ref name to negate the entry, -explicitly exposing it, even if an earlier entry marked it as hidden. -If you have multiple hideRefs values, later entries override earlier ones -(and entries in more-specific config files override less-specific ones). -+ -If a namespace is in use, the namespace prefix is stripped from each -reference before it is matched against `transfer.hiderefs` patterns. -For example, if `refs/heads/master` is specified in `transfer.hideRefs` and -the current namespace is `foo`, then `refs/namespaces/foo/refs/heads/master` -is omitted from the advertisements but `refs/heads/master` and -`refs/namespaces/bar/refs/heads/master` are still advertised as so-called -"have" lines. In order to match refs before stripping, add a `^` in front of -the ref name. If you combine `!` and `^`, `!` must be specified first. -+ -Even if you hide refs, a client may still be able to steal the target -objects via the techniques described in the "SECURITY" section of the -linkgit:gitnamespaces[7] man page; it's best to keep private data in a -separate repository. - -transfer.unpackLimit:: - When `fetch.unpackLimit` or `receive.unpackLimit` are - not set, the value of this variable is used instead. - The default value is 100. - -uploadarchive.allowUnreachable:: - If true, allow clients to use `git archive --remote` to request - any tree, whether reachable from the ref tips or not. See the - discussion in the "SECURITY" section of - linkgit:git-upload-archive[1] for more details. Defaults to - `false`. - -uploadpack.hideRefs:: - This variable is the same as `transfer.hideRefs`, but applies - only to `upload-pack` (and so affects only fetches, not pushes). - An attempt to fetch a hidden ref by `git fetch` will fail. See - also `uploadpack.allowTipSHA1InWant`. - -uploadpack.allowTipSHA1InWant:: - When `uploadpack.hideRefs` is in effect, allow `upload-pack` - to accept a fetch request that asks for an object at the tip - of a hidden ref (by default, such a request is rejected). - See also `uploadpack.hideRefs`. Even if this is false, a client - may be able to steal objects via the techniques described in the - "SECURITY" section of the linkgit:gitnamespaces[7] man page; it's - best to keep private data in a separate repository. - -uploadpack.allowReachableSHA1InWant:: - Allow `upload-pack` to accept a fetch request that asks for an - object that is reachable from any ref tip. However, note that - calculating object reachability is computationally expensive. - Defaults to `false`. Even if this is false, a client may be able - to steal objects via the techniques described in the "SECURITY" - section of the linkgit:gitnamespaces[7] man page; it's best to - keep private data in a separate repository. - -uploadpack.allowAnySHA1InWant:: - Allow `upload-pack` to accept a fetch request that asks for any - object at all. - Defaults to `false`. - -uploadpack.keepAlive:: - When `upload-pack` has started `pack-objects`, there may be a - quiet period while `pack-objects` prepares the pack. Normally - it would output progress information, but if `--quiet` was used - for the fetch, `pack-objects` will output nothing at all until - the pack data begins. Some clients and networks may consider - the server to be hung and give up. Setting this option instructs - `upload-pack` to send an empty keepalive packet every - `uploadpack.keepAlive` seconds. Setting this option to 0 - disables keepalive packets entirely. The default is 5 seconds. - -uploadpack.packObjectsHook:: - If this option is set, when `upload-pack` would run - `git pack-objects` to create a packfile for a client, it will - run this shell command instead. The `pack-objects` command and - arguments it _would_ have run (including the `git pack-objects` - at the beginning) are appended to the shell command. The stdin - and stdout of the hook are treated as if `pack-objects` itself - was run. I.e., `upload-pack` will feed input intended for - `pack-objects` to the hook, and expects a completed packfile on - stdout. -+ -Note that this configuration variable is ignored if it is seen in the -repository-level config (this is a safety measure against fetching from -untrusted repositories). - -uploadpack.allowFilter:: - If this option is set, `upload-pack` will support partial - clone and partial fetch object filtering. - -uploadpack.allowRefInWant:: - If this option is set, `upload-pack` will support the `ref-in-want` - feature of the protocol version 2 `fetch` command. This feature - is intended for the benefit of load-balanced servers which may - not have the same view of what OIDs their refs point to due to - replication delay. - -url.<base>.insteadOf:: - Any URL that starts with this value will be rewritten to - start, instead, with <base>. In cases where some site serves a - large number of repositories, and serves them with multiple - access methods, and some users need to use different access - methods, this feature allows people to specify any of the - equivalent URLs and have Git automatically rewrite the URL to - the best alternative for the particular user, even for a - never-before-seen repository on the site. When more than one - insteadOf strings match a given URL, the longest match is used. -+ -Note that any protocol restrictions will be applied to the rewritten -URL. If the rewrite changes the URL to use a custom protocol or remote -helper, you may need to adjust the `protocol.*.allow` config to permit -the request. In particular, protocols you expect to use for submodules -must be set to `always` rather than the default of `user`. See the -description of `protocol.allow` above. - -url.<base>.pushInsteadOf:: - Any URL that starts with this value will not be pushed to; - instead, it will be rewritten to start with <base>, and the - resulting URL will be pushed to. In cases where some site serves - a large number of repositories, and serves them with multiple - access methods, some of which do not allow push, this feature - allows people to specify a pull-only URL and have Git - automatically use an appropriate URL to push, even for a - never-before-seen repository on the site. When more than one - pushInsteadOf strings match a given URL, the longest match is - used. If a remote has an explicit pushurl, Git will ignore this - setting for that remote. - -user.email:: - Your email address to be recorded in any newly created commits. - Can be overridden by the `GIT_AUTHOR_EMAIL`, `GIT_COMMITTER_EMAIL`, and - `EMAIL` environment variables. See linkgit:git-commit-tree[1]. - -user.name:: - Your full name to be recorded in any newly created commits. - Can be overridden by the `GIT_AUTHOR_NAME` and `GIT_COMMITTER_NAME` - environment variables. See linkgit:git-commit-tree[1]. - -user.useConfigOnly:: - Instruct Git to avoid trying to guess defaults for `user.email` - and `user.name`, and instead retrieve the values only from the - configuration. For example, if you have multiple email addresses - and would like to use a different one for each repository, then - with this configuration option set to `true` in the global config - along with a name, Git will prompt you to set up an email before - making new commits in a newly cloned repository. - Defaults to `false`. - -user.signingKey:: - If linkgit:git-tag[1] or linkgit:git-commit[1] is not selecting the - key you want it to automatically when creating a signed tag or - commit, you can override the default selection with this variable. - This option is passed unchanged to gpg's --local-user parameter, - so you may specify a key using any method that gpg supports. - -versionsort.prereleaseSuffix (deprecated):: - Deprecated alias for `versionsort.suffix`. Ignored if - `versionsort.suffix` is set. - -versionsort.suffix:: - Even when version sort is used in linkgit:git-tag[1], tagnames - with the same base version but different suffixes are still sorted - lexicographically, resulting e.g. in prerelease tags appearing - after the main release (e.g. "1.0-rc1" after "1.0"). This - variable can be specified to determine the sorting order of tags - with different suffixes. -+ -By specifying a single suffix in this variable, any tagname containing -that suffix will appear before the corresponding main release. E.g. if -the variable is set to "-rc", then all "1.0-rcX" tags will appear before -"1.0". If specified multiple times, once per suffix, then the order of -suffixes in the configuration will determine the sorting order of tagnames -with those suffixes. E.g. if "-pre" appears before "-rc" in the -configuration, then all "1.0-preX" tags will be listed before any -"1.0-rcX" tags. The placement of the main release tag relative to tags -with various suffixes can be determined by specifying the empty suffix -among those other suffixes. E.g. if the suffixes "-rc", "", "-ck" and -"-bfs" appear in the configuration in this order, then all "v4.8-rcX" tags -are listed first, followed by "v4.8", then "v4.8-ckX" and finally -"v4.8-bfsX". -+ -If more than one suffixes match the same tagname, then that tagname will -be sorted according to the suffix which starts at the earliest position in -the tagname. If more than one different matching suffixes start at -that earliest position, then that tagname will be sorted according to the -longest of those suffixes. -The sorting order between different suffixes is undefined if they are -in multiple config files. - -web.browser:: - Specify a web browser that may be used by some commands. - Currently only linkgit:git-instaweb[1] and linkgit:git-help[1] - may use it. - -worktree.guessRemote:: - With `add`, if no branch argument, and neither of `-b` nor - `-B` nor `--detach` are given, the command defaults to - creating a new branch from HEAD. If `worktree.guessRemote` is - set to true, `worktree add` tries to find a remote-tracking - branch whose name uniquely matches the new branch name. If - such a branch exists, it is checked out and set as "upstream" - for the new branch. If no such match can be found, it falls - back to creating a new branch from the current HEAD. +include::config/gitweb.txt[] + +include::config/grep.txt[] + +include::config/gpg.txt[] + +include::config/gui.txt[] + +include::config/guitool.txt[] + +include::config/help.txt[] + +include::config/http.txt[] + +include::config/i18n.txt[] + +include::config/imap.txt[] + +include::config/index.txt[] + +include::config/init.txt[] + +include::config/instaweb.txt[] + +include::config/interactive.txt[] + +include::config/log.txt[] + +include::config/mailinfo.txt[] + +include::config/mailmap.txt[] + +include::config/man.txt[] + +include::config/merge.txt[] + +include::config/mergetool.txt[] + +include::config/notes.txt[] + +include::config/pack.txt[] + +include::config/pager.txt[] + +include::config/pretty.txt[] + +include::config/protocol.txt[] + +include::config/pull.txt[] + +include::config/push.txt[] + +include::config/rebase.txt[] + +include::config/receive.txt[] + +include::config/remote.txt[] + +include::config/remotes.txt[] + +include::config/repack.txt[] + +include::config/rerere.txt[] + +include::config/reset.txt[] + +include::config/sendemail.txt[] + +include::config/sequencer.txt[] + +include::config/showbranch.txt[] + +include::config/splitindex.txt[] + +include::config/ssh.txt[] + +include::config/status.txt[] + +include::config/stash.txt[] + +include::config/submodule.txt[] + +include::config/tag.txt[] + +include::config/transfer.txt[] + +include::config/uploadarchive.txt[] + +include::config/uploadpack.txt[] + +include::config/url.txt[] + +include::config/user.txt[] + +include::config/versionsort.txt[] + +include::config/web.txt[] + +include::config/worktree.txt[] diff --git a/Documentation/config/add.txt b/Documentation/config/add.txt new file mode 100644 index 0000000000..4d753f006e --- /dev/null +++ b/Documentation/config/add.txt @@ -0,0 +1,7 @@ +add.ignoreErrors:: +add.ignore-errors (deprecated):: + Tells 'git add' to continue adding files when some files cannot be + added due to indexing errors. Equivalent to the `--ignore-errors` + option of linkgit:git-add[1]. `add.ignore-errors` is deprecated, + as it does not follow the usual naming convention for configuration + variables. diff --git a/Documentation/config/advice.txt b/Documentation/config/advice.txt new file mode 100644 index 0000000000..57fcd4c862 --- /dev/null +++ b/Documentation/config/advice.txt @@ -0,0 +1,86 @@ +advice.*:: + These variables control various optional help messages designed to + aid new users. All 'advice.*' variables default to 'true', and you + can tell Git that you do not need help by setting these to 'false': ++ +-- + pushUpdateRejected:: + Set this variable to 'false' if you want to disable + 'pushNonFFCurrent', + 'pushNonFFMatching', 'pushAlreadyExists', + 'pushFetchFirst', and 'pushNeedsForce' + simultaneously. + pushNonFFCurrent:: + Advice shown when linkgit:git-push[1] fails due to a + non-fast-forward update to the current branch. + pushNonFFMatching:: + Advice shown when you ran linkgit:git-push[1] and pushed + 'matching refs' explicitly (i.e. you used ':', or + specified a refspec that isn't your current branch) and + it resulted in a non-fast-forward error. + pushAlreadyExists:: + Shown when linkgit:git-push[1] rejects an update that + does not qualify for fast-forwarding (e.g., a tag.) + pushFetchFirst:: + Shown when linkgit:git-push[1] rejects an update that + tries to overwrite a remote ref that points at an + object we do not have. + pushNeedsForce:: + Shown when linkgit:git-push[1] rejects an update that + tries to overwrite a remote ref that points at an + object that is not a commit-ish, or make the remote + ref point at an object that is not a commit-ish. + statusHints:: + Show directions on how to proceed from the current + state in the output of linkgit:git-status[1], in + the template shown when writing commit messages in + linkgit:git-commit[1], and in the help message shown + by linkgit:git-checkout[1] when switching branch. + statusUoption:: + Advise to consider using the `-u` option to linkgit:git-status[1] + when the command takes more than 2 seconds to enumerate untracked + files. + commitBeforeMerge:: + Advice shown when linkgit:git-merge[1] refuses to + merge to avoid overwriting local changes. + resetQuiet:: + Advice to consider using the `--quiet` option to linkgit:git-reset[1] + when the command takes more than 2 seconds to enumerate unstaged + changes after reset. + resolveConflict:: + Advice shown by various commands when conflicts + prevent the operation from being performed. + implicitIdentity:: + Advice on how to set your identity configuration when + your information is guessed from the system username and + domain name. + detachedHead:: + Advice shown when you used linkgit:git-checkout[1] to + move to the detach HEAD state, to instruct how to create + a local branch after the fact. + checkoutAmbiguousRemoteBranchName:: + Advice shown when the argument to + linkgit:git-checkout[1] ambiguously resolves to a + remote tracking branch on more than one remote in + situations where an unambiguous argument would have + otherwise caused a remote-tracking branch to be + checked out. See the `checkout.defaultRemote` + configuration variable for how to set a given remote + to used by default in some situations where this + advice would be printed. + amWorkDir:: + Advice that shows the location of the patch file when + linkgit:git-am[1] fails to apply it. + rmHints:: + In case of failure in the output of linkgit:git-rm[1], + show directions on how to proceed from the current state. + addEmbeddedRepo:: + Advice on what to do when you've accidentally added one + git repo inside of another. + ignoredHook:: + Advice shown if a hook is ignored because the hook is not + set as executable. + waitingForEditor:: + Print a message to the terminal whenever Git is waiting for + editor input from the user. +-- diff --git a/Documentation/config/alias.txt b/Documentation/config/alias.txt new file mode 100644 index 0000000000..0b14178314 --- /dev/null +++ b/Documentation/config/alias.txt @@ -0,0 +1,18 @@ +alias.*:: + Command aliases for the linkgit:git[1] command wrapper - e.g. + after defining "alias.last = cat-file commit HEAD", the invocation + "git last" is equivalent to "git cat-file commit HEAD". To avoid + confusion and troubles with script usage, aliases that + hide existing Git commands are ignored. Arguments are split by + spaces, the usual shell quoting and escaping is supported. + A quote pair or a backslash can be used to quote them. ++ +If the alias expansion is prefixed with an exclamation point, +it will be treated as a shell command. For example, defining +"alias.new = !gitk --all --not ORIG_HEAD", the invocation +"git new" is equivalent to running the shell command +"gitk --all --not ORIG_HEAD". Note that shell commands will be +executed from the top-level directory of a repository, which may +not necessarily be the current directory. +`GIT_PREFIX` is set as returned by running 'git rev-parse --show-prefix' +from the original current directory. See linkgit:git-rev-parse[1]. diff --git a/Documentation/config/am.txt b/Documentation/config/am.txt new file mode 100644 index 0000000000..5bcad2efb1 --- /dev/null +++ b/Documentation/config/am.txt @@ -0,0 +1,14 @@ +am.keepcr:: + If true, git-am will call git-mailsplit for patches in mbox format + with parameter `--keep-cr`. In this case git-mailsplit will + not remove `\r` from lines ending with `\r\n`. Can be overridden + by giving `--no-keep-cr` from the command line. + See linkgit:git-am[1], linkgit:git-mailsplit[1]. + +am.threeWay:: + By default, `git am` will fail if the patch does not apply cleanly. When + set to true, this setting tells `git am` to fall back on 3-way merge if + the patch records the identity of blobs it is supposed to apply to and + we have those blobs available locally (equivalent to giving the `--3way` + option from the command line). Defaults to `false`. + See linkgit:git-am[1]. diff --git a/Documentation/config/apply.txt b/Documentation/config/apply.txt new file mode 100644 index 0000000000..8fb8ef763d --- /dev/null +++ b/Documentation/config/apply.txt @@ -0,0 +1,11 @@ +apply.ignoreWhitespace:: + When set to 'change', tells 'git apply' to ignore changes in + whitespace, in the same way as the `--ignore-space-change` + option. + When set to one of: no, none, never, false tells 'git apply' to + respect all whitespace differences. + See linkgit:git-apply[1]. + +apply.whitespace:: + Tells 'git apply' how to handle whitespaces, in the same way + as the `--whitespace` option. See linkgit:git-apply[1]. diff --git a/Documentation/config/blame.txt b/Documentation/config/blame.txt new file mode 100644 index 0000000000..67b5c1d1e0 --- /dev/null +++ b/Documentation/config/blame.txt @@ -0,0 +1,21 @@ +blame.blankBoundary:: + Show blank commit object name for boundary commits in + linkgit:git-blame[1]. This option defaults to false. + +blame.coloring:: + This determines the coloring scheme to be applied to blame + output. It can be 'repeatedLines', 'highlightRecent', + or 'none' which is the default. + +blame.date:: + Specifies the format used to output dates in linkgit:git-blame[1]. + If unset the iso format is used. For supported values, + see the discussion of the `--date` option at linkgit:git-log[1]. + +blame.showEmail:: + Show the author email instead of author name in linkgit:git-blame[1]. + This option defaults to false. + +blame.showRoot:: + Do not treat root commits as boundaries in linkgit:git-blame[1]. + This option defaults to false. diff --git a/Documentation/config/branch.txt b/Documentation/config/branch.txt new file mode 100644 index 0000000000..019d60ede2 --- /dev/null +++ b/Documentation/config/branch.txt @@ -0,0 +1,102 @@ +branch.autoSetupMerge:: + Tells 'git branch' and 'git checkout' to set up new branches + so that linkgit:git-pull[1] will appropriately merge from the + starting point branch. Note that even if this option is not set, + this behavior can be chosen per-branch using the `--track` + and `--no-track` options. The valid settings are: `false` -- no + automatic setup is done; `true` -- automatic setup is done when the + starting point is a remote-tracking branch; `always` -- + automatic setup is done when the starting point is either a + local branch or remote-tracking + branch. This option defaults to true. + +branch.autoSetupRebase:: + When a new branch is created with 'git branch' or 'git checkout' + that tracks another branch, this variable tells Git to set + up pull to rebase instead of merge (see "branch.<name>.rebase"). + When `never`, rebase is never automatically set to true. + When `local`, rebase is set to true for tracked branches of + other local branches. + When `remote`, rebase is set to true for tracked branches of + remote-tracking branches. + When `always`, rebase will be set to true for all tracking + branches. + See "branch.autoSetupMerge" for details on how to set up a + branch to track another branch. + This option defaults to never. + +branch.sort:: + This variable controls the sort ordering of branches when displayed by + linkgit:git-branch[1]. Without the "--sort=<value>" option provided, the + value of this variable will be used as the default. + See linkgit:git-for-each-ref[1] field names for valid values. + +branch.<name>.remote:: + When on branch <name>, it tells 'git fetch' and 'git push' + which remote to fetch from/push to. The remote to push to + may be overridden with `remote.pushDefault` (for all branches). + The remote to push to, for the current branch, may be further + overridden by `branch.<name>.pushRemote`. If no remote is + configured, or if you are not on any branch, it defaults to + `origin` for fetching and `remote.pushDefault` for pushing. + Additionally, `.` (a period) is the current local repository + (a dot-repository), see `branch.<name>.merge`'s final note below. + +branch.<name>.pushRemote:: + When on branch <name>, it overrides `branch.<name>.remote` for + pushing. It also overrides `remote.pushDefault` for pushing + from branch <name>. When you pull from one place (e.g. your + upstream) and push to another place (e.g. your own publishing + repository), you would want to set `remote.pushDefault` to + specify the remote to push to for all branches, and use this + option to override it for a specific branch. + +branch.<name>.merge:: + Defines, together with branch.<name>.remote, the upstream branch + for the given branch. It tells 'git fetch'/'git pull'/'git rebase' which + branch to merge and can also affect 'git push' (see push.default). + When in branch <name>, it tells 'git fetch' the default + refspec to be marked for merging in FETCH_HEAD. The value is + handled like the remote part of a refspec, and must match a + ref which is fetched from the remote given by + "branch.<name>.remote". + The merge information is used by 'git pull' (which at first calls + 'git fetch') to lookup the default branch for merging. Without + this option, 'git pull' defaults to merge the first refspec fetched. + Specify multiple values to get an octopus merge. + If you wish to setup 'git pull' so that it merges into <name> from + another branch in the local repository, you can point + branch.<name>.merge to the desired branch, and use the relative path + setting `.` (a period) for branch.<name>.remote. + +branch.<name>.mergeOptions:: + Sets default options for merging into branch <name>. The syntax and + supported options are the same as those of linkgit:git-merge[1], but + option values containing whitespace characters are currently not + supported. + +branch.<name>.rebase:: + When true, rebase the branch <name> on top of the fetched branch, + instead of merging the default branch from the default remote when + "git pull" is run. See "pull.rebase" for doing this in a non + branch-specific manner. ++ +When `merges`, pass the `--rebase-merges` option to 'git rebase' +so that the local merge commits are included in the rebase (see +linkgit:git-rebase[1] for details). ++ +When preserve, also pass `--preserve-merges` along to 'git rebase' +so that locally committed merge commits will not be flattened +by running 'git pull'. ++ +When the value is `interactive`, the rebase is run in interactive mode. ++ +*NOTE*: this is a possibly dangerous operation; do *not* use +it unless you understand the implications (see linkgit:git-rebase[1] +for details). + +branch.<name>.description:: + Branch description, can be edited with + `git branch --edit-description`. Branch description is + automatically added in the format-patch cover letter or + request-pull summary. diff --git a/Documentation/config/browser.txt b/Documentation/config/browser.txt new file mode 100644 index 0000000000..195df207a6 --- /dev/null +++ b/Documentation/config/browser.txt @@ -0,0 +1,9 @@ +browser.<tool>.cmd:: + Specify the command to invoke the specified browser. The + specified command is evaluated in shell with the URLs passed + as arguments. (See linkgit:git-web{litdd}browse[1].) + +browser.<tool>.path:: + Override the path for the given tool that may be used to + browse HTML help (see `-w` option in linkgit:git-help[1]) or a + working repository in gitweb (see linkgit:git-instaweb[1]). diff --git a/Documentation/config/checkout.txt b/Documentation/config/checkout.txt new file mode 100644 index 0000000000..c4118fa196 --- /dev/null +++ b/Documentation/config/checkout.txt @@ -0,0 +1,23 @@ +checkout.defaultRemote:: + When you run 'git checkout <something>' and only have one + remote, it may implicitly fall back on checking out and + tracking e.g. 'origin/<something>'. This stops working as soon + as you have more than one remote with a '<something>' + reference. This setting allows for setting the name of a + preferred remote that should always win when it comes to + disambiguation. The typical use-case is to set this to + `origin`. ++ +Currently this is used by linkgit:git-checkout[1] when 'git checkout +<something>' will checkout the '<something>' branch on another remote, +and by linkgit:git-worktree[1] when 'git worktree add' refers to a +remote branch. This setting might be used for other checkout-like +commands or functionality in the future. + +checkout.optimizeNewBranch:: + Optimizes the performance of "git checkout -b <new_branch>" when + using sparse-checkout. When set to true, git will not update the + repo based on the current sparse-checkout settings. This means it + will not update the skip-worktree bit in the index nor add/remove + files in the working directory to reflect the current sparse checkout + settings nor will it show the local changes. diff --git a/Documentation/config/clean.txt b/Documentation/config/clean.txt new file mode 100644 index 0000000000..a807c925b9 --- /dev/null +++ b/Documentation/config/clean.txt @@ -0,0 +1,3 @@ +clean.requireForce:: + A boolean to make git-clean do nothing unless given -f, + -i or -n. Defaults to true. diff --git a/Documentation/config/color.txt b/Documentation/config/color.txt new file mode 100644 index 0000000000..8375596c44 --- /dev/null +++ b/Documentation/config/color.txt @@ -0,0 +1,201 @@ +color.advice:: + A boolean to enable/disable color in hints (e.g. when a push + failed, see `advice.*` for a list). May be set to `always`, + `false` (or `never`) or `auto` (or `true`), in which case colors + are used only when the error output goes to a terminal. If + unset, then the value of `color.ui` is used (`auto` by default). + +color.advice.hint:: + Use customized color for hints. + +color.blame.highlightRecent:: + This can be used to color the metadata of a blame line depending + on age of the line. ++ +This setting should be set to a comma-separated list of color and date settings, +starting and ending with a color, the dates should be set from oldest to newest. +The metadata will be colored given the colors if the the line was introduced +before the given timestamp, overwriting older timestamped colors. ++ +Instead of an absolute timestamp relative timestamps work as well, e.g. +2.weeks.ago is valid to address anything older than 2 weeks. ++ +It defaults to 'blue,12 month ago,white,1 month ago,red', which colors +everything older than one year blue, recent changes between one month and +one year old are kept white, and lines introduced within the last month are +colored red. + +color.blame.repeatedLines:: + Use the customized color for the part of git-blame output that + is repeated meta information per line (such as commit id, + author name, date and timezone). Defaults to cyan. + +color.branch:: + A boolean to enable/disable color in the output of + linkgit:git-branch[1]. May be set to `always`, + `false` (or `never`) or `auto` (or `true`), in which case colors are used + only when the output is to a terminal. If unset, then the + value of `color.ui` is used (`auto` by default). + +color.branch.<slot>:: + Use customized color for branch coloration. `<slot>` is one of + `current` (the current branch), `local` (a local branch), + `remote` (a remote-tracking branch in refs/remotes/), + `upstream` (upstream tracking branch), `plain` (other + refs). + +color.diff:: + Whether to use ANSI escape sequences to add color to patches. + If this is set to `always`, linkgit:git-diff[1], + linkgit:git-log[1], and linkgit:git-show[1] will use color + for all patches. If it is set to `true` or `auto`, those + commands will only use color when output is to the terminal. + If unset, then the value of `color.ui` is used (`auto` by + default). ++ +This does not affect linkgit:git-format-patch[1] or the +'git-diff-{asterisk}' plumbing commands. Can be overridden on the +command line with the `--color[=<when>]` option. + +color.diff.<slot>:: + Use customized color for diff colorization. `<slot>` specifies + which part of the patch to use the specified color, and is one + of `context` (context text - `plain` is a historical synonym), + `meta` (metainformation), `frag` + (hunk header), 'func' (function in hunk header), `old` (removed lines), + `new` (added lines), `commit` (commit headers), `whitespace` + (highlighting whitespace errors), `oldMoved` (deleted lines), + `newMoved` (added lines), `oldMovedDimmed`, `oldMovedAlternative`, + `oldMovedAlternativeDimmed`, `newMovedDimmed`, `newMovedAlternative` + `newMovedAlternativeDimmed` (See the '<mode>' + setting of '--color-moved' in linkgit:git-diff[1] for details), + `contextDimmed`, `oldDimmed`, `newDimmed`, `contextBold`, + `oldBold`, and `newBold` (see linkgit:git-range-diff[1] for details). + +color.decorate.<slot>:: + Use customized color for 'git log --decorate' output. `<slot>` is one + of `branch`, `remoteBranch`, `tag`, `stash` or `HEAD` for local + branches, remote-tracking branches, tags, stash and HEAD, respectively + and `grafted` for grafted commits. + +color.grep:: + When set to `always`, always highlight matches. When `false` (or + `never`), never. When set to `true` or `auto`, use color only + when the output is written to the terminal. If unset, then the + value of `color.ui` is used (`auto` by default). + +color.grep.<slot>:: + Use customized color for grep colorization. `<slot>` specifies which + part of the line to use the specified color, and is one of ++ +-- +`context`;; + non-matching text in context lines (when using `-A`, `-B`, or `-C`) +`filename`;; + filename prefix (when not using `-h`) +`function`;; + function name lines (when using `-p`) +`lineNumber`;; + line number prefix (when using `-n`) +`column`;; + column number prefix (when using `--column`) +`match`;; + matching text (same as setting `matchContext` and `matchSelected`) +`matchContext`;; + matching text in context lines +`matchSelected`;; + matching text in selected lines +`selected`;; + non-matching text in selected lines +`separator`;; + separators between fields on a line (`:`, `-`, and `=`) + and between hunks (`--`) +-- + +color.interactive:: + When set to `always`, always use colors for interactive prompts + and displays (such as those used by "git-add --interactive" and + "git-clean --interactive"). When false (or `never`), never. + When set to `true` or `auto`, use colors only when the output is + to the terminal. If unset, then the value of `color.ui` is + used (`auto` by default). + +color.interactive.<slot>:: + Use customized color for 'git add --interactive' and 'git clean + --interactive' output. `<slot>` may be `prompt`, `header`, `help` + or `error`, for four distinct types of normal output from + interactive commands. + +color.pager:: + A boolean to enable/disable colored output when the pager is in + use (default is true). + +color.push:: + A boolean to enable/disable color in push errors. May be set to + `always`, `false` (or `never`) or `auto` (or `true`), in which + case colors are used only when the error output goes to a terminal. + If unset, then the value of `color.ui` is used (`auto` by default). + +color.push.error:: + Use customized color for push errors. + +color.remote:: + If set, keywords at the start of the line are highlighted. The + keywords are "error", "warning", "hint" and "success", and are + matched case-insensitively. May be set to `always`, `false` (or + `never`) or `auto` (or `true`). If unset, then the value of + `color.ui` is used (`auto` by default). + +color.remote.<slot>:: + Use customized color for each remote keyword. `<slot>` may be + `hint`, `warning`, `success` or `error` which match the + corresponding keyword. + +color.showBranch:: + A boolean to enable/disable color in the output of + linkgit:git-show-branch[1]. May be set to `always`, + `false` (or `never`) or `auto` (or `true`), in which case colors are used + only when the output is to a terminal. If unset, then the + value of `color.ui` is used (`auto` by default). + +color.status:: + A boolean to enable/disable color in the output of + linkgit:git-status[1]. May be set to `always`, + `false` (or `never`) or `auto` (or `true`), in which case colors are used + only when the output is to a terminal. If unset, then the + value of `color.ui` is used (`auto` by default). + +color.status.<slot>:: + Use customized color for status colorization. `<slot>` is + one of `header` (the header text of the status message), + `added` or `updated` (files which are added but not committed), + `changed` (files which are changed but not added in the index), + `untracked` (files which are not tracked by Git), + `branch` (the current branch), + `nobranch` (the color the 'no branch' warning is shown in, defaulting + to red), + `localBranch` or `remoteBranch` (the local and remote branch names, + respectively, when branch and tracking information is displayed in the + status short-format), or + `unmerged` (files which have unmerged changes). + +color.transport:: + A boolean to enable/disable color when pushes are rejected. May be + set to `always`, `false` (or `never`) or `auto` (or `true`), in which + case colors are used only when the error output goes to a terminal. + If unset, then the value of `color.ui` is used (`auto` by default). + +color.transport.rejected:: + Use customized color when a push was rejected. + +color.ui:: + This variable determines the default value for variables such + as `color.diff` and `color.grep` that control the use of color + per command family. Its scope will expand as more commands learn + configuration to set a default for the `--color` option. Set it + to `false` or `never` if you prefer Git commands not to use + color unless enabled explicitly with some other configuration + or the `--color` option. Set it to `always` if you want all + output not intended for machine consumption to use color, to + `true` or `auto` (this is the default since Git 1.8.4) if you + want such output to use color when written to the terminal. diff --git a/Documentation/config/column.txt b/Documentation/config/column.txt new file mode 100644 index 0000000000..76aa2f29dc --- /dev/null +++ b/Documentation/config/column.txt @@ -0,0 +1,55 @@ +column.ui:: + Specify whether supported commands should output in columns. + This variable consists of a list of tokens separated by spaces + or commas: ++ +These options control when the feature should be enabled +(defaults to 'never'): ++ +-- +`always`;; + always show in columns +`never`;; + never show in columns +`auto`;; + show in columns if the output is to the terminal +-- ++ +These options control layout (defaults to 'column'). Setting any +of these implies 'always' if none of 'always', 'never', or 'auto' are +specified. ++ +-- +`column`;; + fill columns before rows +`row`;; + fill rows before columns +`plain`;; + show in one column +-- ++ +Finally, these options can be combined with a layout option (defaults +to 'nodense'): ++ +-- +`dense`;; + make unequal size columns to utilize more space +`nodense`;; + make equal size columns +-- + +column.branch:: + Specify whether to output branch listing in `git branch` in columns. + See `column.ui` for details. + +column.clean:: + Specify the layout when list items in `git clean -i`, which always + shows files and directories in columns. See `column.ui` for details. + +column.status:: + Specify whether to output untracked files in `git status` in columns. + See `column.ui` for details. + +column.tag:: + Specify whether to output tag listing in `git tag` in columns. + See `column.ui` for details. diff --git a/Documentation/config/commit.txt b/Documentation/config/commit.txt new file mode 100644 index 0000000000..2c95573930 --- /dev/null +++ b/Documentation/config/commit.txt @@ -0,0 +1,29 @@ +commit.cleanup:: + This setting overrides the default of the `--cleanup` option in + `git commit`. See linkgit:git-commit[1] for details. Changing the + default can be useful when you always want to keep lines that begin + with comment character `#` in your log message, in which case you + would do `git config commit.cleanup whitespace` (note that you will + have to remove the help lines that begin with `#` in the commit log + template yourself, if you do this). + +commit.gpgSign:: + + A boolean to specify whether all commits should be GPG signed. + Use of this option when doing operations such as rebase can + result in a large number of commits being signed. It may be + convenient to use an agent to avoid typing your GPG passphrase + several times. + +commit.status:: + A boolean to enable/disable inclusion of status information in the + commit message template when using an editor to prepare the commit + message. Defaults to true. + +commit.template:: + Specify the pathname of a file to use as the template for + new commit messages. + +commit.verbose:: + A boolean or int to specify the level of verbose with `git commit`. + See linkgit:git-commit[1]. diff --git a/Documentation/config/completion.txt b/Documentation/config/completion.txt new file mode 100644 index 0000000000..4d99bf33c9 --- /dev/null +++ b/Documentation/config/completion.txt @@ -0,0 +1,7 @@ +completion.commands:: + This is only used by git-completion.bash to add or remove + commands from the list of completed commands. Normally only + porcelain commands and a few select others are completed. You + can add more commands, separated by space, in this + variable. Prefixing the command with '-' will remove it from + the existing list. diff --git a/Documentation/config/core.txt b/Documentation/config/core.txt new file mode 100644 index 0000000000..d0e6635fe0 --- /dev/null +++ b/Documentation/config/core.txt @@ -0,0 +1,600 @@ +core.fileMode:: + Tells Git if the executable bit of files in the working tree + is to be honored. ++ +Some filesystems lose the executable bit when a file that is +marked as executable is checked out, or checks out a +non-executable file with executable bit on. +linkgit:git-clone[1] or linkgit:git-init[1] probe the filesystem +to see if it handles the executable bit correctly +and this variable is automatically set as necessary. ++ +A repository, however, may be on a filesystem that handles +the filemode correctly, and this variable is set to 'true' +when created, but later may be made accessible from another +environment that loses the filemode (e.g. exporting ext4 via +CIFS mount, visiting a Cygwin created repository with +Git for Windows or Eclipse). +In such a case it may be necessary to set this variable to 'false'. +See linkgit:git-update-index[1]. ++ +The default is true (when core.filemode is not specified in the config file). + +core.hideDotFiles:: + (Windows-only) If true, mark newly-created directories and files whose + name starts with a dot as hidden. If 'dotGitOnly', only the `.git/` + directory is hidden, but no other files starting with a dot. The + default mode is 'dotGitOnly'. + +core.ignoreCase:: + Internal variable which enables various workarounds to enable + Git to work better on filesystems that are not case sensitive, + like APFS, HFS+, FAT, NTFS, etc. For example, if a directory listing + finds "makefile" when Git expects "Makefile", Git will assume + it is really the same file, and continue to remember it as + "Makefile". ++ +The default is false, except linkgit:git-clone[1] or linkgit:git-init[1] +will probe and set core.ignoreCase true if appropriate when the repository +is created. ++ +Git relies on the proper configuration of this variable for your operating +and file system. Modifying this value may result in unexpected behavior. + +core.precomposeUnicode:: + This option is only used by Mac OS implementation of Git. + When core.precomposeUnicode=true, Git reverts the unicode decomposition + of filenames done by Mac OS. This is useful when sharing a repository + between Mac OS and Linux or Windows. + (Git for Windows 1.7.10 or higher is needed, or Git under cygwin 1.7). + When false, file names are handled fully transparent by Git, + which is backward compatible with older versions of Git. + +core.protectHFS:: + If set to true, do not allow checkout of paths that would + be considered equivalent to `.git` on an HFS+ filesystem. + Defaults to `true` on Mac OS, and `false` elsewhere. + +core.protectNTFS:: + If set to true, do not allow checkout of paths that would + cause problems with the NTFS filesystem, e.g. conflict with + 8.3 "short" names. + Defaults to `true` on Windows, and `false` elsewhere. + +core.fsmonitor:: + If set, the value of this variable is used as a command which + will identify all files that may have changed since the + requested date/time. This information is used to speed up git by + avoiding unnecessary processing of files that have not changed. + See the "fsmonitor-watchman" section of linkgit:githooks[5]. + +core.trustctime:: + If false, the ctime differences between the index and the + working tree are ignored; useful when the inode change time + is regularly modified by something outside Git (file system + crawlers and some backup systems). + See linkgit:git-update-index[1]. True by default. + +core.splitIndex:: + If true, the split-index feature of the index will be used. + See linkgit:git-update-index[1]. False by default. + +core.untrackedCache:: + Determines what to do about the untracked cache feature of the + index. It will be kept, if this variable is unset or set to + `keep`. It will automatically be added if set to `true`. And + it will automatically be removed, if set to `false`. Before + setting it to `true`, you should check that mtime is working + properly on your system. + See linkgit:git-update-index[1]. `keep` by default. + +core.checkStat:: + When missing or is set to `default`, many fields in the stat + structure are checked to detect if a file has been modified + since Git looked at it. When this configuration variable is + set to `minimal`, sub-second part of mtime and ctime, the + uid and gid of the owner of the file, the inode number (and + the device number, if Git was compiled to use it), are + excluded from the check among these fields, leaving only the + whole-second part of mtime (and ctime, if `core.trustCtime` + is set) and the filesize to be checked. ++ +There are implementations of Git that do not leave usable values in +some fields (e.g. JGit); by excluding these fields from the +comparison, the `minimal` mode may help interoperability when the +same repository is used by these other systems at the same time. + +core.quotePath:: + Commands that output paths (e.g. 'ls-files', 'diff'), will + quote "unusual" characters in the pathname by enclosing the + pathname in double-quotes and escaping those characters with + backslashes in the same way C escapes control characters (e.g. + `\t` for TAB, `\n` for LF, `\\` for backslash) or bytes with + values larger than 0x80 (e.g. octal `\302\265` for "micro" in + UTF-8). If this variable is set to false, bytes higher than + 0x80 are not considered "unusual" any more. Double-quotes, + backslash and control characters are always escaped regardless + of the setting of this variable. A simple space character is + not considered "unusual". Many commands can output pathnames + completely verbatim using the `-z` option. The default value + is true. + +core.eol:: + Sets the line ending type to use in the working directory for + files that have the `text` property set when core.autocrlf is false. + Alternatives are 'lf', 'crlf' and 'native', which uses the platform's + native line ending. The default value is `native`. See + linkgit:gitattributes[5] for more information on end-of-line + conversion. + +core.safecrlf:: + If true, makes Git check if converting `CRLF` is reversible when + end-of-line conversion is active. Git will verify if a command + modifies a file in the work tree either directly or indirectly. + For example, committing a file followed by checking out the + same file should yield the original file in the work tree. If + this is not the case for the current setting of + `core.autocrlf`, Git will reject the file. The variable can + be set to "warn", in which case Git will only warn about an + irreversible conversion but continue the operation. ++ +CRLF conversion bears a slight chance of corrupting data. +When it is enabled, Git will convert CRLF to LF during commit and LF to +CRLF during checkout. A file that contains a mixture of LF and +CRLF before the commit cannot be recreated by Git. For text +files this is the right thing to do: it corrects line endings +such that we have only LF line endings in the repository. +But for binary files that are accidentally classified as text the +conversion can corrupt data. ++ +If you recognize such corruption early you can easily fix it by +setting the conversion type explicitly in .gitattributes. Right +after committing you still have the original file in your work +tree and this file is not yet corrupted. You can explicitly tell +Git that this file is binary and Git will handle the file +appropriately. ++ +Unfortunately, the desired effect of cleaning up text files with +mixed line endings and the undesired effect of corrupting binary +files cannot be distinguished. In both cases CRLFs are removed +in an irreversible way. For text files this is the right thing +to do because CRLFs are line endings, while for binary files +converting CRLFs corrupts data. ++ +Note, this safety check does not mean that a checkout will generate a +file identical to the original file for a different setting of +`core.eol` and `core.autocrlf`, but only for the current one. For +example, a text file with `LF` would be accepted with `core.eol=lf` +and could later be checked out with `core.eol=crlf`, in which case the +resulting file would contain `CRLF`, although the original file +contained `LF`. However, in both work trees the line endings would be +consistent, that is either all `LF` or all `CRLF`, but never mixed. A +file with mixed line endings would be reported by the `core.safecrlf` +mechanism. + +core.autocrlf:: + Setting this variable to "true" is the same as setting + the `text` attribute to "auto" on all files and core.eol to "crlf". + Set to true if you want to have `CRLF` line endings in your + working directory and the repository has LF line endings. + This variable can be set to 'input', + in which case no output conversion is performed. + +core.checkRoundtripEncoding:: + A comma and/or whitespace separated list of encodings that Git + performs UTF-8 round trip checks on if they are used in an + `working-tree-encoding` attribute (see linkgit:gitattributes[5]). + The default value is `SHIFT-JIS`. + +core.symlinks:: + If false, symbolic links are checked out as small plain files that + contain the link text. linkgit:git-update-index[1] and + linkgit:git-add[1] will not change the recorded type to regular + file. Useful on filesystems like FAT that do not support + symbolic links. ++ +The default is true, except linkgit:git-clone[1] or linkgit:git-init[1] +will probe and set core.symlinks false if appropriate when the repository +is created. + +core.gitProxy:: + A "proxy command" to execute (as 'command host port') instead + of establishing direct connection to the remote server when + using the Git protocol for fetching. If the variable value is + in the "COMMAND for DOMAIN" format, the command is applied only + on hostnames ending with the specified domain string. This variable + may be set multiple times and is matched in the given order; + the first match wins. ++ +Can be overridden by the `GIT_PROXY_COMMAND` environment variable +(which always applies universally, without the special "for" +handling). ++ +The special string `none` can be used as the proxy command to +specify that no proxy be used for a given domain pattern. +This is useful for excluding servers inside a firewall from +proxy use, while defaulting to a common proxy for external domains. + +core.sshCommand:: + If this variable is set, `git fetch` and `git push` will + use the specified command instead of `ssh` when they need to + connect to a remote system. The command is in the same form as + the `GIT_SSH_COMMAND` environment variable and is overridden + when the environment variable is set. + +core.ignoreStat:: + If true, Git will avoid using lstat() calls to detect if files have + changed by setting the "assume-unchanged" bit for those tracked files + which it has updated identically in both the index and working tree. ++ +When files are modified outside of Git, the user will need to stage +the modified files explicitly (e.g. see 'Examples' section in +linkgit:git-update-index[1]). +Git will not normally detect changes to those files. ++ +This is useful on systems where lstat() calls are very slow, such as +CIFS/Microsoft Windows. ++ +False by default. + +core.preferSymlinkRefs:: + Instead of the default "symref" format for HEAD + and other symbolic reference files, use symbolic links. + This is sometimes needed to work with old scripts that + expect HEAD to be a symbolic link. + +core.alternateRefsCommand:: + When advertising tips of available history from an alternate, use the shell to + execute the specified command instead of linkgit:git-for-each-ref[1]. The + first argument is the absolute path of the alternate. Output must contain one + hex object id per line (i.e., the same as produced by `git for-each-ref + --format='%(objectname)'`). ++ +Note that you cannot generally put `git for-each-ref` directly into the config +value, as it does not take a repository path as an argument (but you can wrap +the command above in a shell script). + +core.alternateRefsPrefixes:: + When listing references from an alternate, list only references that begin + with the given prefix. Prefixes match as if they were given as arguments to + linkgit:git-for-each-ref[1]. To list multiple prefixes, separate them with + whitespace. If `core.alternateRefsCommand` is set, setting + `core.alternateRefsPrefixes` has no effect. + +core.bare:: + If true this repository is assumed to be 'bare' and has no + working directory associated with it. If this is the case a + number of commands that require a working directory will be + disabled, such as linkgit:git-add[1] or linkgit:git-merge[1]. ++ +This setting is automatically guessed by linkgit:git-clone[1] or +linkgit:git-init[1] when the repository was created. By default a +repository that ends in "/.git" is assumed to be not bare (bare = +false), while all other repositories are assumed to be bare (bare += true). + +core.worktree:: + Set the path to the root of the working tree. + If `GIT_COMMON_DIR` environment variable is set, core.worktree + is ignored and not used for determining the root of working tree. + This can be overridden by the `GIT_WORK_TREE` environment + variable and the `--work-tree` command-line option. + The value can be an absolute path or relative to the path to + the .git directory, which is either specified by --git-dir + or GIT_DIR, or automatically discovered. + If --git-dir or GIT_DIR is specified but none of + --work-tree, GIT_WORK_TREE and core.worktree is specified, + the current working directory is regarded as the top level + of your working tree. ++ +Note that this variable is honored even when set in a configuration +file in a ".git" subdirectory of a directory and its value differs +from the latter directory (e.g. "/path/to/.git/config" has +core.worktree set to "/different/path"), which is most likely a +misconfiguration. Running Git commands in the "/path/to" directory will +still use "/different/path" as the root of the work tree and can cause +confusion unless you know what you are doing (e.g. you are creating a +read-only snapshot of the same index to a location different from the +repository's usual working tree). + +core.logAllRefUpdates:: + Enable the reflog. Updates to a ref <ref> is logged to the file + "`$GIT_DIR/logs/<ref>`", by appending the new and old + SHA-1, the date/time and the reason of the update, but + only when the file exists. If this configuration + variable is set to `true`, missing "`$GIT_DIR/logs/<ref>`" + file is automatically created for branch heads (i.e. under + `refs/heads/`), remote refs (i.e. under `refs/remotes/`), + note refs (i.e. under `refs/notes/`), and the symbolic ref `HEAD`. + If it is set to `always`, then a missing reflog is automatically + created for any ref under `refs/`. ++ +This information can be used to determine what commit +was the tip of a branch "2 days ago". ++ +This value is true by default in a repository that has +a working directory associated with it, and false by +default in a bare repository. + +core.repositoryFormatVersion:: + Internal variable identifying the repository format and layout + version. + +core.sharedRepository:: + When 'group' (or 'true'), the repository is made shareable between + several users in a group (making sure all the files and objects are + group-writable). When 'all' (or 'world' or 'everybody'), the + repository will be readable by all users, additionally to being + group-shareable. When 'umask' (or 'false'), Git will use permissions + reported by umask(2). When '0xxx', where '0xxx' is an octal number, + files in the repository will have this mode value. '0xxx' will override + user's umask value (whereas the other options will only override + requested parts of the user's umask value). Examples: '0660' will make + the repo read/write-able for the owner and group, but inaccessible to + others (equivalent to 'group' unless umask is e.g. '0022'). '0640' is a + repository that is group-readable but not group-writable. + See linkgit:git-init[1]. False by default. + +core.warnAmbiguousRefs:: + If true, Git will warn you if the ref name you passed it is ambiguous + and might match multiple refs in the repository. True by default. + +core.compression:: + An integer -1..9, indicating a default compression level. + -1 is the zlib default. 0 means no compression, + and 1..9 are various speed/size tradeoffs, 9 being slowest. + If set, this provides a default to other compression variables, + such as `core.looseCompression` and `pack.compression`. + +core.looseCompression:: + An integer -1..9, indicating the compression level for objects that + are not in a pack file. -1 is the zlib default. 0 means no + compression, and 1..9 are various speed/size tradeoffs, 9 being + slowest. If not set, defaults to core.compression. If that is + not set, defaults to 1 (best speed). + +core.packedGitWindowSize:: + Number of bytes of a pack file to map into memory in a + single mapping operation. Larger window sizes may allow + your system to process a smaller number of large pack files + more quickly. Smaller window sizes will negatively affect + performance due to increased calls to the operating system's + memory manager, but may improve performance when accessing + a large number of large pack files. ++ +Default is 1 MiB if NO_MMAP was set at compile time, otherwise 32 +MiB on 32 bit platforms and 1 GiB on 64 bit platforms. This should +be reasonable for all users/operating systems. You probably do +not need to adjust this value. ++ +Common unit suffixes of 'k', 'm', or 'g' are supported. + +core.packedGitLimit:: + Maximum number of bytes to map simultaneously into memory + from pack files. If Git needs to access more than this many + bytes at once to complete an operation it will unmap existing + regions to reclaim virtual address space within the process. ++ +Default is 256 MiB on 32 bit platforms and 32 TiB (effectively +unlimited) on 64 bit platforms. +This should be reasonable for all users/operating systems, except on +the largest projects. You probably do not need to adjust this value. ++ +Common unit suffixes of 'k', 'm', or 'g' are supported. + +core.deltaBaseCacheLimit:: + Maximum number of bytes to reserve for caching base objects + that may be referenced by multiple deltified objects. By storing the + entire decompressed base objects in a cache Git is able + to avoid unpacking and decompressing frequently used base + objects multiple times. ++ +Default is 96 MiB on all platforms. This should be reasonable +for all users/operating systems, except on the largest projects. +You probably do not need to adjust this value. ++ +Common unit suffixes of 'k', 'm', or 'g' are supported. + +core.bigFileThreshold:: + Files larger than this size are stored deflated, without + attempting delta compression. Storing large files without + delta compression avoids excessive memory usage, at the + slight expense of increased disk usage. Additionally files + larger than this size are always treated as binary. ++ +Default is 512 MiB on all platforms. This should be reasonable +for most projects as source code and other text files can still +be delta compressed, but larger binary media files won't be. ++ +Common unit suffixes of 'k', 'm', or 'g' are supported. + +core.excludesFile:: + Specifies the pathname to the file that contains patterns to + describe paths that are not meant to be tracked, in addition + to '.gitignore' (per-directory) and '.git/info/exclude'. + Defaults to `$XDG_CONFIG_HOME/git/ignore`. + If `$XDG_CONFIG_HOME` is either not set or empty, `$HOME/.config/git/ignore` + is used instead. See linkgit:gitignore[5]. + +core.askPass:: + Some commands (e.g. svn and http interfaces) that interactively + ask for a password can be told to use an external program given + via the value of this variable. Can be overridden by the `GIT_ASKPASS` + environment variable. If not set, fall back to the value of the + `SSH_ASKPASS` environment variable or, failing that, a simple password + prompt. The external program shall be given a suitable prompt as + command-line argument and write the password on its STDOUT. + +core.attributesFile:: + In addition to '.gitattributes' (per-directory) and + '.git/info/attributes', Git looks into this file for attributes + (see linkgit:gitattributes[5]). Path expansions are made the same + way as for `core.excludesFile`. Its default value is + `$XDG_CONFIG_HOME/git/attributes`. If `$XDG_CONFIG_HOME` is either not + set or empty, `$HOME/.config/git/attributes` is used instead. + +core.hooksPath:: + By default Git will look for your hooks in the + '$GIT_DIR/hooks' directory. Set this to different path, + e.g. '/etc/git/hooks', and Git will try to find your hooks in + that directory, e.g. '/etc/git/hooks/pre-receive' instead of + in '$GIT_DIR/hooks/pre-receive'. ++ +The path can be either absolute or relative. A relative path is +taken as relative to the directory where the hooks are run (see +the "DESCRIPTION" section of linkgit:githooks[5]). ++ +This configuration variable is useful in cases where you'd like to +centrally configure your Git hooks instead of configuring them on a +per-repository basis, or as a more flexible and centralized +alternative to having an `init.templateDir` where you've changed +default hooks. + +core.editor:: + Commands such as `commit` and `tag` that let you edit + messages by launching an editor use the value of this + variable when it is set, and the environment variable + `GIT_EDITOR` is not set. See linkgit:git-var[1]. + +core.commentChar:: + Commands such as `commit` and `tag` that let you edit + messages consider a line that begins with this character + commented, and removes them after the editor returns + (default '#'). ++ +If set to "auto", `git-commit` would select a character that is not +the beginning character of any line in existing commit messages. + +core.filesRefLockTimeout:: + The length of time, in milliseconds, to retry when trying to + lock an individual reference. Value 0 means not to retry at + all; -1 means to try indefinitely. Default is 100 (i.e., + retry for 100ms). + +core.packedRefsTimeout:: + The length of time, in milliseconds, to retry when trying to + lock the `packed-refs` file. Value 0 means not to retry at + all; -1 means to try indefinitely. Default is 1000 (i.e., + retry for 1 second). + +core.pager:: + Text viewer for use by Git commands (e.g., 'less'). The value + is meant to be interpreted by the shell. The order of preference + is the `$GIT_PAGER` environment variable, then `core.pager` + configuration, then `$PAGER`, and then the default chosen at + compile time (usually 'less'). ++ +When the `LESS` environment variable is unset, Git sets it to `FRX` +(if `LESS` environment variable is set, Git does not change it at +all). If you want to selectively override Git's default setting +for `LESS`, you can set `core.pager` to e.g. `less -S`. This will +be passed to the shell by Git, which will translate the final +command to `LESS=FRX less -S`. The environment does not set the +`S` option but the command line does, instructing less to truncate +long lines. Similarly, setting `core.pager` to `less -+F` will +deactivate the `F` option specified by the environment from the +command-line, deactivating the "quit if one screen" behavior of +`less`. One can specifically activate some flags for particular +commands: for example, setting `pager.blame` to `less -S` enables +line truncation only for `git blame`. ++ +Likewise, when the `LV` environment variable is unset, Git sets it +to `-c`. You can override this setting by exporting `LV` with +another value or setting `core.pager` to `lv +c`. + +core.whitespace:: + A comma separated list of common whitespace problems to + notice. 'git diff' will use `color.diff.whitespace` to + highlight them, and 'git apply --whitespace=error' will + consider them as errors. You can prefix `-` to disable + any of them (e.g. `-trailing-space`): ++ +* `blank-at-eol` treats trailing whitespaces at the end of the line + as an error (enabled by default). +* `space-before-tab` treats a space character that appears immediately + before a tab character in the initial indent part of the line as an + error (enabled by default). +* `indent-with-non-tab` treats a line that is indented with space + characters instead of the equivalent tabs as an error (not enabled by + default). +* `tab-in-indent` treats a tab character in the initial indent part of + the line as an error (not enabled by default). +* `blank-at-eof` treats blank lines added at the end of file as an error + (enabled by default). +* `trailing-space` is a short-hand to cover both `blank-at-eol` and + `blank-at-eof`. +* `cr-at-eol` treats a carriage-return at the end of line as + part of the line terminator, i.e. with it, `trailing-space` + does not trigger if the character before such a carriage-return + is not a whitespace (not enabled by default). +* `tabwidth=<n>` tells how many character positions a tab occupies; this + is relevant for `indent-with-non-tab` and when Git fixes `tab-in-indent` + errors. The default tab width is 8. Allowed values are 1 to 63. + +core.fsyncObjectFiles:: + This boolean will enable 'fsync()' when writing object files. ++ +This is a total waste of time and effort on a filesystem that orders +data writes properly, but can be useful for filesystems that do not use +journalling (traditional UNIX filesystems) or that only journal metadata +and not file contents (OS X's HFS+, or Linux ext3 with "data=writeback"). + +core.preloadIndex:: + Enable parallel index preload for operations like 'git diff' ++ +This can speed up operations like 'git diff' and 'git status' especially +on filesystems like NFS that have weak caching semantics and thus +relatively high IO latencies. When enabled, Git will do the +index comparison to the filesystem data in parallel, allowing +overlapping IO's. Defaults to true. + +core.unsetenvvars:: + Windows-only: comma-separated list of environment variables' + names that need to be unset before spawning any other process. + Defaults to `PERL5LIB` to account for the fact that Git for + Windows insists on using its own Perl interpreter. + +core.createObject:: + You can set this to 'link', in which case a hardlink followed by + a delete of the source are used to make sure that object creation + will not overwrite existing objects. ++ +On some file system/operating system combinations, this is unreliable. +Set this config setting to 'rename' there; However, This will remove the +check that makes sure that existing object files will not get overwritten. + +core.notesRef:: + When showing commit messages, also show notes which are stored in + the given ref. The ref must be fully qualified. If the given + ref does not exist, it is not an error but means that no + notes should be printed. ++ +This setting defaults to "refs/notes/commits", and it can be overridden by +the `GIT_NOTES_REF` environment variable. See linkgit:git-notes[1]. + +core.commitGraph:: + If true, then git will read the commit-graph file (if it exists) + to parse the graph structure of commits. Defaults to false. See + linkgit:git-commit-graph[1] for more information. + +core.useReplaceRefs:: + If set to `false`, behave as if the `--no-replace-objects` + option was given on the command line. See linkgit:git[1] and + linkgit:git-replace[1] for more information. + +core.multiPackIndex:: + Use the multi-pack-index file to track multiple packfiles using a + single index. See link:technical/multi-pack-index.html[the + multi-pack-index design document]. + +core.sparseCheckout:: + Enable "sparse checkout" feature. See section "Sparse checkout" in + linkgit:git-read-tree[1] for more information. + +core.abbrev:: + Set the length object names are abbreviated to. If + unspecified or set to "auto", an appropriate value is + computed based on the approximate number of packed objects + in your repository, which hopefully is enough for + abbreviated object names to stay unique for some time. + The minimum length is 4. diff --git a/Documentation/config/credential.txt b/Documentation/config/credential.txt new file mode 100644 index 0000000000..60fb3189e1 --- /dev/null +++ b/Documentation/config/credential.txt @@ -0,0 +1,26 @@ +credential.helper:: + Specify an external helper to be called when a username or + password credential is needed; the helper may consult external + storage to avoid prompting the user for the credentials. Note + that multiple helpers may be defined. See linkgit:gitcredentials[7] + for details. + +credential.useHttpPath:: + When acquiring credentials, consider the "path" component of an http + or https URL to be important. Defaults to false. See + linkgit:gitcredentials[7] for more information. + +credential.username:: + If no username is set for a network authentication, use this username + by default. See credential.<context>.* below, and + linkgit:gitcredentials[7]. + +credential.<url>.*:: + Any of the credential.* options above can be applied selectively to + some credentials. For example "credential.https://example.com.username" + would set the default username only for https connections to + example.com. See linkgit:gitcredentials[7] for details on how URLs are + matched. + +credentialCache.ignoreSIGHUP:: + Tell git-credential-cache--daemon to ignore SIGHUP, instead of quitting. diff --git a/Documentation/diff-config.txt b/Documentation/config/diff.txt index 85bca83c30..e48bb987d7 100644 --- a/Documentation/diff-config.txt +++ b/Documentation/config/diff.txt @@ -177,7 +177,15 @@ diff.tool:: Any other value is treated as a custom diff tool and requires that a corresponding difftool.<tool>.cmd variable is defined. -include::mergetools-diff.txt[] +diff.guitool:: + Controls which diff tool is used by linkgit:git-difftool[1] when + the -g/--gui flag is specified. This variable overrides the value + configured in `merge.guitool`. The list below shows the valid + built-in values. Any other value is treated as a custom diff tool + and requires that a corresponding difftool.<guitool>.cmd variable + is defined. + +include::../mergetools-diff.txt[] diff.indentHeuristic:: Set this option to `true` to enable experimental heuristics diff --git a/Documentation/config/difftool.txt b/Documentation/config/difftool.txt new file mode 100644 index 0000000000..6762594480 --- /dev/null +++ b/Documentation/config/difftool.txt @@ -0,0 +1,14 @@ +difftool.<tool>.path:: + Override the path for the given tool. This is useful in case + your tool is not in the PATH. + +difftool.<tool>.cmd:: + Specify the command to invoke the specified diff tool. + The specified command is evaluated in shell with the following + variables available: 'LOCAL' is set to the name of the temporary + file containing the contents of the diff pre-image and 'REMOTE' + is set to the name of the temporary file containing the contents + of the diff post-image. + +difftool.prompt:: + Prompt before each invocation of the diff tool. diff --git a/Documentation/config/fastimport.txt b/Documentation/config/fastimport.txt new file mode 100644 index 0000000000..c1166e330d --- /dev/null +++ b/Documentation/config/fastimport.txt @@ -0,0 +1,8 @@ +fastimport.unpackLimit:: + If the number of objects imported by linkgit:git-fast-import[1] + is below this limit, then the objects will be unpacked into + loose object files. However if the number of imported objects + equals or exceeds this limit then the pack will be stored as a + pack. Storing the pack from a fast-import can make the import + operation complete faster, especially on slow filesystems. If + not set, the value of `transfer.unpackLimit` is used instead. diff --git a/Documentation/config/fetch.txt b/Documentation/config/fetch.txt new file mode 100644 index 0000000000..cbfad6cdbb --- /dev/null +++ b/Documentation/config/fetch.txt @@ -0,0 +1,65 @@ +fetch.recurseSubmodules:: + This option can be either set to a boolean value or to 'on-demand'. + Setting it to a boolean changes the behavior of fetch and pull to + unconditionally recurse into submodules when set to true or to not + recurse at all when set to false. When set to 'on-demand' (the default + value), fetch and pull will only recurse into a populated submodule + when its superproject retrieves a commit that updates the submodule's + reference. + +fetch.fsckObjects:: + If it is set to true, git-fetch-pack will check all fetched + objects. See `transfer.fsckObjects` for what's + checked. Defaults to false. If not set, the value of + `transfer.fsckObjects` is used instead. + +fetch.fsck.<msg-id>:: + Acts like `fsck.<msg-id>`, but is used by + linkgit:git-fetch-pack[1] instead of linkgit:git-fsck[1]. See + the `fsck.<msg-id>` documentation for details. + +fetch.fsck.skipList:: + Acts like `fsck.skipList`, but is used by + linkgit:git-fetch-pack[1] instead of linkgit:git-fsck[1]. See + the `fsck.skipList` documentation for details. + +fetch.unpackLimit:: + If the number of objects fetched over the Git native + transfer is below this + limit, then the objects will be unpacked into loose object + files. However if the number of received objects equals or + exceeds this limit then the received pack will be stored as + a pack, after adding any missing delta bases. Storing the + pack from a push can make the push operation complete faster, + especially on slow filesystems. If not set, the value of + `transfer.unpackLimit` is used instead. + +fetch.prune:: + If true, fetch will automatically behave as if the `--prune` + option was given on the command line. See also `remote.<name>.prune` + and the PRUNING section of linkgit:git-fetch[1]. + +fetch.pruneTags:: + If true, fetch will automatically behave as if the + `refs/tags/*:refs/tags/*` refspec was provided when pruning, + if not set already. This allows for setting both this option + and `fetch.prune` to maintain a 1=1 mapping to upstream + refs. See also `remote.<name>.pruneTags` and the PRUNING + section of linkgit:git-fetch[1]. + +fetch.output:: + Control how ref update status is printed. Valid values are + `full` and `compact`. Default value is `full`. See section + OUTPUT in linkgit:git-fetch[1] for detail. + +fetch.negotiationAlgorithm:: + Control how information about the commits in the local repository is + sent when negotiating the contents of the packfile to be sent by the + server. Set to "skipping" to use an algorithm that skips commits in an + effort to converge faster, but may result in a larger-than-necessary + packfile; The default is "default" which instructs Git to use the default algorithm + that never skips commits (unless the server has acknowledged it or one + of its descendants). + Unknown values will cause 'git fetch' to error out. ++ +See also the `--negotiation-tip` option for linkgit:git-fetch[1]. diff --git a/Documentation/config/filter.txt b/Documentation/config/filter.txt new file mode 100644 index 0000000000..90dfe0ba5a --- /dev/null +++ b/Documentation/config/filter.txt @@ -0,0 +1,9 @@ +filter.<driver>.clean:: + The command which is used to convert the content of a worktree + file to a blob upon checkin. See linkgit:gitattributes[5] for + details. + +filter.<driver>.smudge:: + The command which is used to convert the content of a blob + object to a worktree file upon checkout. See + linkgit:gitattributes[5] for details. diff --git a/Documentation/fmt-merge-msg-config.txt b/Documentation/config/fmt-merge-msg.txt index c73cfa90b7..c73cfa90b7 100644 --- a/Documentation/fmt-merge-msg-config.txt +++ b/Documentation/config/fmt-merge-msg.txt diff --git a/Documentation/config/format.txt b/Documentation/config/format.txt new file mode 100644 index 0000000000..dc77941c48 --- /dev/null +++ b/Documentation/config/format.txt @@ -0,0 +1,87 @@ +format.attach:: + Enable multipart/mixed attachments as the default for + 'format-patch'. The value can also be a double quoted string + which will enable attachments as the default and set the + value as the boundary. See the --attach option in + linkgit:git-format-patch[1]. + +format.from:: + Provides the default value for the `--from` option to format-patch. + Accepts a boolean value, or a name and email address. If false, + format-patch defaults to `--no-from`, using commit authors directly in + the "From:" field of patch mails. If true, format-patch defaults to + `--from`, using your committer identity in the "From:" field of patch + mails and including a "From:" field in the body of the patch mail if + different. If set to a non-boolean value, format-patch uses that + value instead of your committer identity. Defaults to false. + +format.numbered:: + A boolean which can enable or disable sequence numbers in patch + subjects. It defaults to "auto" which enables it only if there + is more than one patch. It can be enabled or disabled for all + messages by setting it to "true" or "false". See --numbered + option in linkgit:git-format-patch[1]. + +format.headers:: + Additional email headers to include in a patch to be submitted + by mail. See linkgit:git-format-patch[1]. + +format.to:: +format.cc:: + Additional recipients to include in a patch to be submitted + by mail. See the --to and --cc options in + linkgit:git-format-patch[1]. + +format.subjectPrefix:: + The default for format-patch is to output files with the '[PATCH]' + subject prefix. Use this variable to change that prefix. + +format.signature:: + The default for format-patch is to output a signature containing + the Git version number. Use this variable to change that default. + Set this variable to the empty string ("") to suppress + signature generation. + +format.signatureFile:: + Works just like format.signature except the contents of the + file specified by this variable will be used as the signature. + +format.suffix:: + The default for format-patch is to output files with the suffix + `.patch`. Use this variable to change that suffix (make sure to + include the dot if you want it). + +format.pretty:: + The default pretty format for log/show/whatchanged command, + See linkgit:git-log[1], linkgit:git-show[1], + linkgit:git-whatchanged[1]. + +format.thread:: + The default threading style for 'git format-patch'. Can be + a boolean value, or `shallow` or `deep`. `shallow` threading + makes every mail a reply to the head of the series, + where the head is chosen from the cover letter, the + `--in-reply-to`, and the first patch mail, in this order. + `deep` threading makes every mail a reply to the previous one. + A true boolean value is the same as `shallow`, and a false + value disables threading. + +format.signOff:: + A boolean value which lets you enable the `-s/--signoff` option of + format-patch by default. *Note:* Adding the Signed-off-by: line to a + patch should be a conscious act and means that you certify you have + the rights to submit this work under the same open source license. + Please see the 'SubmittingPatches' document for further discussion. + +format.coverLetter:: + A boolean that controls whether to generate a cover-letter when + format-patch is invoked, but in addition can be set to "auto", to + generate a cover-letter only when there's more than one patch. + +format.outputDirectory:: + Set a custom directory to store the resulting files instead of the + current working directory. + +format.useAutoBase:: + A boolean value which lets you enable the `--base=auto` option of + format-patch by default. diff --git a/Documentation/config/fsck.txt b/Documentation/config/fsck.txt new file mode 100644 index 0000000000..879c5a29c4 --- /dev/null +++ b/Documentation/config/fsck.txt @@ -0,0 +1,67 @@ +fsck.<msg-id>:: + During fsck git may find issues with legacy data which + wouldn't be generated by current versions of git, and which + wouldn't be sent over the wire if `transfer.fsckObjects` was + set. This feature is intended to support working with legacy + repositories containing such data. ++ +Setting `fsck.<msg-id>` will be picked up by linkgit:git-fsck[1], but +to accept pushes of such data set `receive.fsck.<msg-id>` instead, or +to clone or fetch it set `fetch.fsck.<msg-id>`. ++ +The rest of the documentation discusses `fsck.*` for brevity, but the +same applies for the corresponding `receive.fsck.*` and +`fetch.<msg-id>.*`. variables. ++ +Unlike variables like `color.ui` and `core.editor` the +`receive.fsck.<msg-id>` and `fetch.fsck.<msg-id>` variables will not +fall back on the `fsck.<msg-id>` configuration if they aren't set. To +uniformly configure the same fsck settings in different circumstances +all three of them they must all set to the same values. ++ +When `fsck.<msg-id>` is set, errors can be switched to warnings and +vice versa by configuring the `fsck.<msg-id>` setting where the +`<msg-id>` is the fsck message ID and the value is one of `error`, +`warn` or `ignore`. For convenience, fsck prefixes the error/warning +with the message ID, e.g. "missingEmail: invalid author/committer line +- missing email" means that setting `fsck.missingEmail = ignore` will +hide that issue. ++ +In general, it is better to enumerate existing objects with problems +with `fsck.skipList`, instead of listing the kind of breakages these +problematic objects share to be ignored, as doing the latter will +allow new instances of the same breakages go unnoticed. ++ +Setting an unknown `fsck.<msg-id>` value will cause fsck to die, but +doing the same for `receive.fsck.<msg-id>` and `fetch.fsck.<msg-id>` +will only cause git to warn. + +fsck.skipList:: + The path to a list of object names (i.e. one unabbreviated SHA-1 per + line) that are known to be broken in a non-fatal way and should + be ignored. On versions of Git 2.20 and later comments ('#'), empty + lines, and any leading and trailing whitespace is ignored. Everything + but a SHA-1 per line will error out on older versions. ++ +This feature is useful when an established project should be accepted +despite early commits containing errors that can be safely ignored +such as invalid committer email addresses. Note: corrupt objects +cannot be skipped with this setting. ++ +Like `fsck.<msg-id>` this variable has corresponding +`receive.fsck.skipList` and `fetch.fsck.skipList` variants. ++ +Unlike variables like `color.ui` and `core.editor` the +`receive.fsck.skipList` and `fetch.fsck.skipList` variables will not +fall back on the `fsck.skipList` configuration if they aren't set. To +uniformly configure the same fsck settings in different circumstances +all three of them they must all set to the same values. ++ +Older versions of Git (before 2.20) documented that the object names +list should be sorted. This was never a requirement, the object names +could appear in any order, but when reading the list we tracked whether +the list was sorted for the purposes of an internal binary search +implementation, which could save itself some work with an already sorted +list. Unless you had a humongous list there was no reason to go out of +your way to pre-sort the list. After Git version 2.20 a hash implementation +is used instead, so there's now no reason to pre-sort the list. diff --git a/Documentation/config/gc.txt b/Documentation/config/gc.txt new file mode 100644 index 0000000000..c6fbb8a96f --- /dev/null +++ b/Documentation/config/gc.txt @@ -0,0 +1,108 @@ +gc.aggressiveDepth:: + The depth parameter used in the delta compression + algorithm used by 'git gc --aggressive'. This defaults + to 50. + +gc.aggressiveWindow:: + The window size parameter used in the delta compression + algorithm used by 'git gc --aggressive'. This defaults + to 250. + +gc.auto:: + When there are approximately more than this many loose + objects in the repository, `git gc --auto` will pack them. + Some Porcelain commands use this command to perform a + light-weight garbage collection from time to time. The + default value is 6700. Setting this to 0 disables it. + +gc.autoPackLimit:: + When there are more than this many packs that are not + marked with `*.keep` file in the repository, `git gc + --auto` consolidates them into one larger pack. The + default value is 50. Setting this to 0 disables it. + +gc.autoDetach:: + Make `git gc --auto` return immediately and run in background + if the system supports it. Default is true. + +gc.bigPackThreshold:: + If non-zero, all packs larger than this limit are kept when + `git gc` is run. This is very similar to `--keep-base-pack` + except that all packs that meet the threshold are kept, not + just the base pack. Defaults to zero. Common unit suffixes of + 'k', 'm', or 'g' are supported. ++ +Note that if the number of kept packs is more than gc.autoPackLimit, +this configuration variable is ignored, all packs except the base pack +will be repacked. After this the number of packs should go below +gc.autoPackLimit and gc.bigPackThreshold should be respected again. + +gc.writeCommitGraph:: + If true, then gc will rewrite the commit-graph file when + linkgit:git-gc[1] is run. When using linkgit:git-gc[1] + '--auto' the commit-graph will be updated if housekeeping is + required. Default is false. See linkgit:git-commit-graph[1] + for details. + +gc.logExpiry:: + If the file gc.log exists, then `git gc --auto` will print + its content and exit with status zero instead of running + unless that file is more than 'gc.logExpiry' old. Default is + "1.day". See `gc.pruneExpire` for more ways to specify its + value. + +gc.packRefs:: + Running `git pack-refs` in a repository renders it + unclonable by Git versions prior to 1.5.1.2 over dumb + transports such as HTTP. This variable determines whether + 'git gc' runs `git pack-refs`. This can be set to `notbare` + to enable it within all non-bare repos or it can be set to a + boolean value. The default is `true`. + +gc.pruneExpire:: + When 'git gc' is run, it will call 'prune --expire 2.weeks.ago'. + Override the grace period with this config variable. The value + "now" may be used to disable this grace period and always prune + unreachable objects immediately, or "never" may be used to + suppress pruning. This feature helps prevent corruption when + 'git gc' runs concurrently with another process writing to the + repository; see the "NOTES" section of linkgit:git-gc[1]. + +gc.worktreePruneExpire:: + When 'git gc' is run, it calls + 'git worktree prune --expire 3.months.ago'. + This config variable can be used to set a different grace + period. The value "now" may be used to disable the grace + period and prune `$GIT_DIR/worktrees` immediately, or "never" + may be used to suppress pruning. + +gc.reflogExpire:: +gc.<pattern>.reflogExpire:: + 'git reflog expire' removes reflog entries older than + this time; defaults to 90 days. The value "now" expires all + entries immediately, and "never" suppresses expiration + altogether. With "<pattern>" (e.g. + "refs/stash") in the middle the setting applies only to + the refs that match the <pattern>. + +gc.reflogExpireUnreachable:: +gc.<pattern>.reflogExpireUnreachable:: + 'git reflog expire' removes reflog entries older than + this time and are not reachable from the current tip; + defaults to 30 days. The value "now" expires all entries + immediately, and "never" suppresses expiration altogether. + With "<pattern>" (e.g. "refs/stash") + in the middle, the setting applies only to the refs that + match the <pattern>. + +gc.rerereResolved:: + Records of conflicted merge you resolved earlier are + kept for this many days when 'git rerere gc' is run. + You can also use more human-readable "1.month.ago", etc. + The default is 60 days. See linkgit:git-rerere[1]. + +gc.rerereUnresolved:: + Records of conflicted merge you have not resolved are + kept for this many days when 'git rerere gc' is run. + You can also use more human-readable "1.month.ago", etc. + The default is 15 days. See linkgit:git-rerere[1]. diff --git a/Documentation/config/gitcvs.txt b/Documentation/config/gitcvs.txt new file mode 100644 index 0000000000..02da427fd9 --- /dev/null +++ b/Documentation/config/gitcvs.txt @@ -0,0 +1,67 @@ +gitcvs.commitMsgAnnotation:: + Append this string to each commit message. Set to empty string + to disable this feature. Defaults to "via git-CVS emulator". + +gitcvs.enabled:: + Whether the CVS server interface is enabled for this repository. + See linkgit:git-cvsserver[1]. + +gitcvs.logFile:: + Path to a log file where the CVS server interface well... logs + various stuff. See linkgit:git-cvsserver[1]. + +gitcvs.usecrlfattr:: + If true, the server will look up the end-of-line conversion + attributes for files to determine the `-k` modes to use. If + the attributes force Git to treat a file as text, + the `-k` mode will be left blank so CVS clients will + treat it as text. If they suppress text conversion, the file + will be set with '-kb' mode, which suppresses any newline munging + the client might otherwise do. If the attributes do not allow + the file type to be determined, then `gitcvs.allBinary` is + used. See linkgit:gitattributes[5]. + +gitcvs.allBinary:: + This is used if `gitcvs.usecrlfattr` does not resolve + the correct '-kb' mode to use. If true, all + unresolved files are sent to the client in + mode '-kb'. This causes the client to treat them + as binary files, which suppresses any newline munging it + otherwise might do. Alternatively, if it is set to "guess", + then the contents of the file are examined to decide if + it is binary, similar to `core.autocrlf`. + +gitcvs.dbName:: + Database used by git-cvsserver to cache revision information + derived from the Git repository. The exact meaning depends on the + used database driver, for SQLite (which is the default driver) this + is a filename. Supports variable substitution (see + linkgit:git-cvsserver[1] for details). May not contain semicolons (`;`). + Default: '%Ggitcvs.%m.sqlite' + +gitcvs.dbDriver:: + Used Perl DBI driver. You can specify any available driver + for this here, but it might not work. git-cvsserver is tested + with 'DBD::SQLite', reported to work with 'DBD::Pg', and + reported *not* to work with 'DBD::mysql'. Experimental feature. + May not contain double colons (`:`). Default: 'SQLite'. + See linkgit:git-cvsserver[1]. + +gitcvs.dbUser, gitcvs.dbPass:: + Database user and password. Only useful if setting `gitcvs.dbDriver`, + since SQLite has no concept of database users and/or passwords. + 'gitcvs.dbUser' supports variable substitution (see + linkgit:git-cvsserver[1] for details). + +gitcvs.dbTableNamePrefix:: + Database table name prefix. Prepended to the names of any + database tables used, allowing a single database to be used + for several repositories. Supports variable substitution (see + linkgit:git-cvsserver[1] for details). Any non-alphabetic + characters will be replaced with underscores. + +All gitcvs variables except for `gitcvs.usecrlfattr` and +`gitcvs.allBinary` can also be specified as +'gitcvs.<access_method>.<varname>' (where 'access_method' +is one of "ext" and "pserver") to make them apply only for the given +access method. diff --git a/Documentation/config/gitweb.txt b/Documentation/config/gitweb.txt new file mode 100644 index 0000000000..1b51475108 --- /dev/null +++ b/Documentation/config/gitweb.txt @@ -0,0 +1,16 @@ +gitweb.category:: +gitweb.description:: +gitweb.owner:: +gitweb.url:: + See linkgit:gitweb[1] for description. + +gitweb.avatar:: +gitweb.blame:: +gitweb.grep:: +gitweb.highlight:: +gitweb.patches:: +gitweb.pickaxe:: +gitweb.remote_heads:: +gitweb.showSizes:: +gitweb.snapshot:: + See linkgit:gitweb.conf[5] for description. diff --git a/Documentation/config/gpg.txt b/Documentation/config/gpg.txt new file mode 100644 index 0000000000..590fe0d4ba --- /dev/null +++ b/Documentation/config/gpg.txt @@ -0,0 +1,20 @@ +gpg.program:: + Use this custom program instead of "`gpg`" found on `$PATH` when + making or verifying a PGP signature. The program must support the + same command-line interface as GPG, namely, to verify a detached + signature, "`gpg --verify $file - <$signature`" is run, and the + program is expected to signal a good signature by exiting with + code 0, and to generate an ASCII-armored detached signature, the + standard input of "`gpg -bsau $key`" is fed with the contents to be + signed, and the program is expected to send the result to its + standard output. + +gpg.format:: + Specifies which key format to use when signing with `--gpg-sign`. + Default is "openpgp" and another possible value is "x509". + +gpg.<format>.program:: + Use this to customize the program used for the signing format you + chose. (see `gpg.program` and `gpg.format`) `gpg.program` can still + be used as a legacy synonym for `gpg.openpgp.program`. The default + value for `gpg.x509.program` is "gpgsm". diff --git a/Documentation/config/grep.txt b/Documentation/config/grep.txt new file mode 100644 index 0000000000..44abe45a7c --- /dev/null +++ b/Documentation/config/grep.txt @@ -0,0 +1,24 @@ +grep.lineNumber:: + If set to true, enable `-n` option by default. + +grep.column:: + If set to true, enable the `--column` option by default. + +grep.patternType:: + Set the default matching behavior. Using a value of 'basic', 'extended', + 'fixed', or 'perl' will enable the `--basic-regexp`, `--extended-regexp`, + `--fixed-strings`, or `--perl-regexp` option accordingly, while the + value 'default' will return to the default matching behavior. + +grep.extendedRegexp:: + If set to true, enable `--extended-regexp` option by default. This + option is ignored when the `grep.patternType` option is set to a value + other than 'default'. + +grep.threads:: + Number of grep worker threads to use. + See `grep.threads` in linkgit:git-grep[1] for more information. + +grep.fallbackToNoIndex:: + If set to true, fall back to git grep --no-index if git grep + is executed outside of a git repository. Defaults to false. diff --git a/Documentation/config/gui.txt b/Documentation/config/gui.txt new file mode 100644 index 0000000000..d30831a130 --- /dev/null +++ b/Documentation/config/gui.txt @@ -0,0 +1,57 @@ +gui.commitMsgWidth:: + Defines how wide the commit message window is in the + linkgit:git-gui[1]. "75" is the default. + +gui.diffContext:: + Specifies how many context lines should be used in calls to diff + made by the linkgit:git-gui[1]. The default is "5". + +gui.displayUntracked:: + Determines if linkgit:git-gui[1] shows untracked files + in the file list. The default is "true". + +gui.encoding:: + Specifies the default encoding to use for displaying of + file contents in linkgit:git-gui[1] and linkgit:gitk[1]. + It can be overridden by setting the 'encoding' attribute + for relevant files (see linkgit:gitattributes[5]). + If this option is not set, the tools default to the + locale encoding. + +gui.matchTrackingBranch:: + Determines if new branches created with linkgit:git-gui[1] should + default to tracking remote branches with matching names or + not. Default: "false". + +gui.newBranchTemplate:: + Is used as suggested name when creating new branches using the + linkgit:git-gui[1]. + +gui.pruneDuringFetch:: + "true" if linkgit:git-gui[1] should prune remote-tracking branches when + performing a fetch. The default value is "false". + +gui.trustmtime:: + Determines if linkgit:git-gui[1] should trust the file modification + timestamp or not. By default the timestamps are not trusted. + +gui.spellingDictionary:: + Specifies the dictionary used for spell checking commit messages in + the linkgit:git-gui[1]. When set to "none" spell checking is turned + off. + +gui.fastCopyBlame:: + If true, 'git gui blame' uses `-C` instead of `-C -C` for original + location detection. It makes blame significantly faster on huge + repositories at the expense of less thorough copy detection. + +gui.copyBlameThreshold:: + Specifies the threshold to use in 'git gui blame' original location + detection, measured in alphanumeric characters. See the + linkgit:git-blame[1] manual for more information on copy detection. + +gui.blamehistoryctx:: + Specifies the radius of history context in days to show in + linkgit:gitk[1] for the selected commit, when the `Show History + Context` menu item is invoked from 'git gui blame'. If this + variable is set to zero, the whole history is shown. diff --git a/Documentation/config/guitool.txt b/Documentation/config/guitool.txt new file mode 100644 index 0000000000..43fb9466ff --- /dev/null +++ b/Documentation/config/guitool.txt @@ -0,0 +1,50 @@ +guitool.<name>.cmd:: + Specifies the shell command line to execute when the corresponding item + of the linkgit:git-gui[1] `Tools` menu is invoked. This option is + mandatory for every tool. The command is executed from the root of + the working directory, and in the environment it receives the name of + the tool as `GIT_GUITOOL`, the name of the currently selected file as + 'FILENAME', and the name of the current branch as 'CUR_BRANCH' (if + the head is detached, 'CUR_BRANCH' is empty). + +guitool.<name>.needsFile:: + Run the tool only if a diff is selected in the GUI. It guarantees + that 'FILENAME' is not empty. + +guitool.<name>.noConsole:: + Run the command silently, without creating a window to display its + output. + +guitool.<name>.noRescan:: + Don't rescan the working directory for changes after the tool + finishes execution. + +guitool.<name>.confirm:: + Show a confirmation dialog before actually running the tool. + +guitool.<name>.argPrompt:: + Request a string argument from the user, and pass it to the tool + through the `ARGS` environment variable. Since requesting an + argument implies confirmation, the 'confirm' option has no effect + if this is enabled. If the option is set to 'true', 'yes', or '1', + the dialog uses a built-in generic prompt; otherwise the exact + value of the variable is used. + +guitool.<name>.revPrompt:: + Request a single valid revision from the user, and set the + `REVISION` environment variable. In other aspects this option + is similar to 'argPrompt', and can be used together with it. + +guitool.<name>.revUnmerged:: + Show only unmerged branches in the 'revPrompt' subdialog. + This is useful for tools similar to merge or rebase, but not + for things like checkout or reset. + +guitool.<name>.title:: + Specifies the title to use for the prompt dialog. The default + is the tool name. + +guitool.<name>.prompt:: + Specifies the general prompt string to display at the top of + the dialog, before subsections for 'argPrompt' and 'revPrompt'. + The default value includes the actual command. diff --git a/Documentation/config/help.txt b/Documentation/config/help.txt new file mode 100644 index 0000000000..224bbf5a28 --- /dev/null +++ b/Documentation/config/help.txt @@ -0,0 +1,23 @@ +help.browser:: + Specify the browser that will be used to display help in the + 'web' format. See linkgit:git-help[1]. + +help.format:: + Override the default help format used by linkgit:git-help[1]. + Values 'man', 'info', 'web' and 'html' are supported. 'man' is + the default. 'web' and 'html' are the same. + +help.autoCorrect:: + Automatically correct and execute mistyped commands after + waiting for the given number of deciseconds (0.1 sec). If more + than one command can be deduced from the entered text, nothing + will be executed. If the value of this option is negative, + the corrected command will be executed immediately. If the + value is 0 - the command will be just shown but not executed. + This is the default. + +help.htmlPath:: + Specify the path where the HTML documentation resides. File system paths + and URLs are supported. HTML pages will be prefixed with this path when + help is displayed in the 'web' format. This defaults to the documentation + path of your Git installation. diff --git a/Documentation/config/http.txt b/Documentation/config/http.txt new file mode 100644 index 0000000000..a56d848bc0 --- /dev/null +++ b/Documentation/config/http.txt @@ -0,0 +1,271 @@ +http.proxy:: + Override the HTTP proxy, normally configured using the 'http_proxy', + 'https_proxy', and 'all_proxy' environment variables (see `curl(1)`). In + addition to the syntax understood by curl, it is possible to specify a + proxy string with a user name but no password, in which case git will + attempt to acquire one in the same way it does for other credentials. See + linkgit:gitcredentials[7] for more information. The syntax thus is + '[protocol://][user[:password]@]proxyhost[:port]'. This can be overridden + on a per-remote basis; see remote.<name>.proxy + +http.proxyAuthMethod:: + Set the method with which to authenticate against the HTTP proxy. This + only takes effect if the configured proxy string contains a user name part + (i.e. is of the form 'user@host' or 'user@host:port'). This can be + overridden on a per-remote basis; see `remote.<name>.proxyAuthMethod`. + Both can be overridden by the `GIT_HTTP_PROXY_AUTHMETHOD` environment + variable. Possible values are: ++ +-- +* `anyauth` - Automatically pick a suitable authentication method. It is + assumed that the proxy answers an unauthenticated request with a 407 + status code and one or more Proxy-authenticate headers with supported + authentication methods. This is the default. +* `basic` - HTTP Basic authentication +* `digest` - HTTP Digest authentication; this prevents the password from being + transmitted to the proxy in clear text +* `negotiate` - GSS-Negotiate authentication (compare the --negotiate option + of `curl(1)`) +* `ntlm` - NTLM authentication (compare the --ntlm option of `curl(1)`) +-- + +http.emptyAuth:: + Attempt authentication without seeking a username or password. This + can be used to attempt GSS-Negotiate authentication without specifying + a username in the URL, as libcurl normally requires a username for + authentication. + +http.delegation:: + Control GSSAPI credential delegation. The delegation is disabled + by default in libcurl since version 7.21.7. Set parameter to tell + the server what it is allowed to delegate when it comes to user + credentials. Used with GSS/kerberos. Possible values are: ++ +-- +* `none` - Don't allow any delegation. +* `policy` - Delegates if and only if the OK-AS-DELEGATE flag is set in the + Kerberos service ticket, which is a matter of realm policy. +* `always` - Unconditionally allow the server to delegate. +-- + + +http.extraHeader:: + Pass an additional HTTP header when communicating with a server. If + more than one such entry exists, all of them are added as extra + headers. To allow overriding the settings inherited from the system + config, an empty value will reset the extra headers to the empty list. + +http.cookieFile:: + The pathname of a file containing previously stored cookie lines, + which should be used + in the Git http session, if they match the server. The file format + of the file to read cookies from should be plain HTTP headers or + the Netscape/Mozilla cookie file format (see `curl(1)`). + NOTE that the file specified with http.cookieFile is used only as + input unless http.saveCookies is set. + +http.saveCookies:: + If set, store cookies received during requests to the file specified by + http.cookieFile. Has no effect if http.cookieFile is unset. + +http.sslVersion:: + The SSL version to use when negotiating an SSL connection, if you + want to force the default. The available and default version + depend on whether libcurl was built against NSS or OpenSSL and the + particular configuration of the crypto library in use. Internally + this sets the 'CURLOPT_SSL_VERSION' option; see the libcurl + documentation for more details on the format of this option and + for the ssl version supported. Actually the possible values of + this option are: + + - sslv2 + - sslv3 + - tlsv1 + - tlsv1.0 + - tlsv1.1 + - tlsv1.2 + - tlsv1.3 + ++ +Can be overridden by the `GIT_SSL_VERSION` environment variable. +To force git to use libcurl's default ssl version and ignore any +explicit http.sslversion option, set `GIT_SSL_VERSION` to the +empty string. + +http.sslCipherList:: + A list of SSL ciphers to use when negotiating an SSL connection. + The available ciphers depend on whether libcurl was built against + NSS or OpenSSL and the particular configuration of the crypto + library in use. Internally this sets the 'CURLOPT_SSL_CIPHER_LIST' + option; see the libcurl documentation for more details on the format + of this list. ++ +Can be overridden by the `GIT_SSL_CIPHER_LIST` environment variable. +To force git to use libcurl's default cipher list and ignore any +explicit http.sslCipherList option, set `GIT_SSL_CIPHER_LIST` to the +empty string. + +http.sslVerify:: + Whether to verify the SSL certificate when fetching or pushing + over HTTPS. Defaults to true. Can be overridden by the + `GIT_SSL_NO_VERIFY` environment variable. + +http.sslCert:: + File containing the SSL certificate when fetching or pushing + over HTTPS. Can be overridden by the `GIT_SSL_CERT` environment + variable. + +http.sslKey:: + File containing the SSL private key when fetching or pushing + over HTTPS. Can be overridden by the `GIT_SSL_KEY` environment + variable. + +http.sslCertPasswordProtected:: + Enable Git's password prompt for the SSL certificate. Otherwise + OpenSSL will prompt the user, possibly many times, if the + certificate or private key is encrypted. Can be overridden by the + `GIT_SSL_CERT_PASSWORD_PROTECTED` environment variable. + +http.sslCAInfo:: + File containing the certificates to verify the peer with when + fetching or pushing over HTTPS. Can be overridden by the + `GIT_SSL_CAINFO` environment variable. + +http.sslCAPath:: + Path containing files with the CA certificates to verify the peer + with when fetching or pushing over HTTPS. Can be overridden + by the `GIT_SSL_CAPATH` environment variable. + +http.sslBackend:: + Name of the SSL backend to use (e.g. "openssl" or "schannel"). + This option is ignored if cURL lacks support for choosing the SSL + backend at runtime. + +http.schannelCheckRevoke:: + Used to enforce or disable certificate revocation checks in cURL + when http.sslBackend is set to "schannel". Defaults to `true` if + unset. Only necessary to disable this if Git consistently errors + and the message is about checking the revocation status of a + certificate. This option is ignored if cURL lacks support for + setting the relevant SSL option at runtime. + +http.schannelUseSSLCAInfo:: + As of cURL v7.60.0, the Secure Channel backend can use the + certificate bundle provided via `http.sslCAInfo`, but that would + override the Windows Certificate Store. Since this is not desirable + by default, Git will tell cURL not to use that bundle by default + when the `schannel` backend was configured via `http.sslBackend`, + unless `http.schannelUseSSLCAInfo` overrides this behavior. + +http.pinnedpubkey:: + Public key of the https service. It may either be the filename of + a PEM or DER encoded public key file or a string starting with + 'sha256//' followed by the base64 encoded sha256 hash of the + public key. See also libcurl 'CURLOPT_PINNEDPUBLICKEY'. git will + exit with an error if this option is set but not supported by + cURL. + +http.sslTry:: + Attempt to use AUTH SSL/TLS and encrypted data transfers + when connecting via regular FTP protocol. This might be needed + if the FTP server requires it for security reasons or you wish + to connect securely whenever remote FTP server supports it. + Default is false since it might trigger certificate verification + errors on misconfigured servers. + +http.maxRequests:: + How many HTTP requests to launch in parallel. Can be overridden + by the `GIT_HTTP_MAX_REQUESTS` environment variable. Default is 5. + +http.minSessions:: + The number of curl sessions (counted across slots) to be kept across + requests. They will not be ended with curl_easy_cleanup() until + http_cleanup() is invoked. If USE_CURL_MULTI is not defined, this + value will be capped at 1. Defaults to 1. + +http.postBuffer:: + Maximum size in bytes of the buffer used by smart HTTP + transports when POSTing data to the remote system. + For requests larger than this buffer size, HTTP/1.1 and + Transfer-Encoding: chunked is used to avoid creating a + massive pack file locally. Default is 1 MiB, which is + sufficient for most requests. + +http.lowSpeedLimit, http.lowSpeedTime:: + If the HTTP transfer speed is less than 'http.lowSpeedLimit' + for longer than 'http.lowSpeedTime' seconds, the transfer is aborted. + Can be overridden by the `GIT_HTTP_LOW_SPEED_LIMIT` and + `GIT_HTTP_LOW_SPEED_TIME` environment variables. + +http.noEPSV:: + A boolean which disables using of EPSV ftp command by curl. + This can helpful with some "poor" ftp servers which don't + support EPSV mode. Can be overridden by the `GIT_CURL_FTP_NO_EPSV` + environment variable. Default is false (curl will use EPSV). + +http.userAgent:: + The HTTP USER_AGENT string presented to an HTTP server. The default + value represents the version of the client Git such as git/1.7.1. + This option allows you to override this value to a more common value + such as Mozilla/4.0. This may be necessary, for instance, if + connecting through a firewall that restricts HTTP connections to a set + of common USER_AGENT strings (but not including those like git/1.7.1). + Can be overridden by the `GIT_HTTP_USER_AGENT` environment variable. + +http.followRedirects:: + Whether git should follow HTTP redirects. If set to `true`, git + will transparently follow any redirect issued by a server it + encounters. If set to `false`, git will treat all redirects as + errors. If set to `initial`, git will follow redirects only for + the initial request to a remote, but not for subsequent + follow-up HTTP requests. Since git uses the redirected URL as + the base for the follow-up requests, this is generally + sufficient. The default is `initial`. + +http.<url>.*:: + Any of the http.* options above can be applied selectively to some URLs. + For a config key to match a URL, each element of the config key is + compared to that of the URL, in the following order: ++ +-- +. Scheme (e.g., `https` in `https://example.com/`). This field + must match exactly between the config key and the URL. + +. Host/domain name (e.g., `example.com` in `https://example.com/`). + This field must match between the config key and the URL. It is + possible to specify a `*` as part of the host name to match all subdomains + at this level. `https://*.example.com/` for example would match + `https://foo.example.com/`, but not `https://foo.bar.example.com/`. + +. Port number (e.g., `8080` in `http://example.com:8080/`). + This field must match exactly between the config key and the URL. + Omitted port numbers are automatically converted to the correct + default for the scheme before matching. + +. Path (e.g., `repo.git` in `https://example.com/repo.git`). The + path field of the config key must match the path field of the URL + either exactly or as a prefix of slash-delimited path elements. This means + a config key with path `foo/` matches URL path `foo/bar`. A prefix can only + match on a slash (`/`) boundary. Longer matches take precedence (so a config + key with path `foo/bar` is a better match to URL path `foo/bar` than a config + key with just path `foo/`). + +. User name (e.g., `user` in `https://user@example.com/repo.git`). If + the config key has a user name it must match the user name in the + URL exactly. If the config key does not have a user name, that + config key will match a URL with any user name (including none), + but at a lower precedence than a config key with a user name. +-- ++ +The list above is ordered by decreasing precedence; a URL that matches +a config key's path is preferred to one that matches its user name. For example, +if the URL is `https://user@example.com/foo/bar` a config key match of +`https://example.com/foo` will be preferred over a config key match of +`https://user@example.com`. ++ +All URLs are normalized before attempting any matching (the password part, +if embedded in the URL, is always ignored for matching purposes) so that +equivalent URLs that are simply spelled differently will match properly. +Environment variable settings always override any matches. The URLs that are +matched against are those given directly to Git commands. This means any URLs +visited as a result of a redirection do not participate in matching. diff --git a/Documentation/config/i18n.txt b/Documentation/config/i18n.txt new file mode 100644 index 0000000000..cc25621731 --- /dev/null +++ b/Documentation/config/i18n.txt @@ -0,0 +1,10 @@ +i18n.commitEncoding:: + Character encoding the commit messages are stored in; Git itself + does not care per se, but this information is necessary e.g. when + importing commits from emails or in the gitk graphical history + browser (and possibly at other places in the future or in other + porcelains). See e.g. linkgit:git-mailinfo[1]. Defaults to 'utf-8'. + +i18n.logOutputEncoding:: + Character encoding the commit messages are converted to when + running 'git log' and friends. diff --git a/Documentation/config/imap.txt b/Documentation/config/imap.txt new file mode 100644 index 0000000000..06166fb5c0 --- /dev/null +++ b/Documentation/config/imap.txt @@ -0,0 +1,44 @@ +imap.folder:: + The folder to drop the mails into, which is typically the Drafts + folder. For example: "INBOX.Drafts", "INBOX/Drafts" or + "[Gmail]/Drafts". Required. + +imap.tunnel:: + Command used to setup a tunnel to the IMAP server through which + commands will be piped instead of using a direct network connection + to the server. Required when imap.host is not set. + +imap.host:: + A URL identifying the server. Use an `imap://` prefix for non-secure + connections and an `imaps://` prefix for secure connections. + Ignored when imap.tunnel is set, but required otherwise. + +imap.user:: + The username to use when logging in to the server. + +imap.pass:: + The password to use when logging in to the server. + +imap.port:: + An integer port number to connect to on the server. + Defaults to 143 for imap:// hosts and 993 for imaps:// hosts. + Ignored when imap.tunnel is set. + +imap.sslverify:: + A boolean to enable/disable verification of the server certificate + used by the SSL/TLS connection. Default is `true`. Ignored when + imap.tunnel is set. + +imap.preformattedHTML:: + A boolean to enable/disable the use of html encoding when sending + a patch. An html encoded patch will be bracketed with <pre> + and have a content type of text/html. Ironically, enabling this + option causes Thunderbird to send the patch as a plain/text, + format=fixed email. Default is `false`. + +imap.authMethod:: + Specify authenticate method for authentication with IMAP server. + If Git was built with the NO_CURL option, or if your curl version is older + than 7.34.0, or if you're running git-imap-send with the `--no-curl` + option, the only supported method is 'CRAM-MD5'. If this is not set + then 'git imap-send' uses the basic IMAP plaintext LOGIN command. diff --git a/Documentation/config/index.txt b/Documentation/config/index.txt new file mode 100644 index 0000000000..f181503041 --- /dev/null +++ b/Documentation/config/index.txt @@ -0,0 +1,26 @@ +index.recordEndOfIndexEntries:: + Specifies whether the index file should include an "End Of Index + Entry" section. This reduces index load time on multiprocessor + machines but produces a message "ignoring EOIE extension" when + reading the index using Git versions before 2.20. Defaults to + 'true' if index.threads has been explicitly enabled, 'false' + otherwise. + +index.recordOffsetTable:: + Specifies whether the index file should include an "Index Entry + Offset Table" section. This reduces index load time on + multiprocessor machines but produces a message "ignoring IEOT + extension" when reading the index using Git versions before 2.20. + Defaults to 'true' if index.threads has been explicitly enabled, + 'false' otherwise. + +index.threads:: + Specifies the number of threads to spawn when loading the index. + This is meant to reduce index load time on multiprocessor machines. + Specifying 0 or 'true' will cause Git to auto-detect the number of + CPU's and set the number of threads accordingly. Specifying 1 or + 'false' will disable multithreading. Defaults to 'true'. + +index.version:: + Specify the version with which new index files should be + initialized. This does not affect existing repositories. diff --git a/Documentation/config/init.txt b/Documentation/config/init.txt new file mode 100644 index 0000000000..46fa8c6a08 --- /dev/null +++ b/Documentation/config/init.txt @@ -0,0 +1,3 @@ +init.templateDir:: + Specify the directory from which templates will be copied. + (See the "TEMPLATE DIRECTORY" section of linkgit:git-init[1].) diff --git a/Documentation/config/instaweb.txt b/Documentation/config/instaweb.txt new file mode 100644 index 0000000000..50cb2f7d62 --- /dev/null +++ b/Documentation/config/instaweb.txt @@ -0,0 +1,20 @@ +instaweb.browser:: + Specify the program that will be used to browse your working + repository in gitweb. See linkgit:git-instaweb[1]. + +instaweb.httpd:: + The HTTP daemon command-line to start gitweb on your working + repository. See linkgit:git-instaweb[1]. + +instaweb.local:: + If true the web server started by linkgit:git-instaweb[1] will + be bound to the local IP (127.0.0.1). + +instaweb.modulePath:: + The default module path for linkgit:git-instaweb[1] to use + instead of /usr/lib/apache2/modules. Only used if httpd + is Apache. + +instaweb.port:: + The port number to bind the gitweb httpd to. See + linkgit:git-instaweb[1]. diff --git a/Documentation/config/interactive.txt b/Documentation/config/interactive.txt new file mode 100644 index 0000000000..ad846dd7c9 --- /dev/null +++ b/Documentation/config/interactive.txt @@ -0,0 +1,16 @@ +interactive.singleKey:: + In interactive commands, allow the user to provide one-letter + input with a single key (i.e., without hitting enter). + Currently this is used by the `--patch` mode of + linkgit:git-add[1], linkgit:git-checkout[1], linkgit:git-commit[1], + linkgit:git-reset[1], and linkgit:git-stash[1]. Note that this + setting is silently ignored if portable keystroke input + is not available; requires the Perl module Term::ReadKey. + +interactive.diffFilter:: + When an interactive command (such as `git add --patch`) shows + a colorized diff, git will pipe the diff through the shell + command defined by this configuration variable. The command may + mark up the diff further for human consumption, provided that it + retains a one-to-one correspondence with the lines in the + original diff. Defaults to disabled (no filtering). diff --git a/Documentation/config/log.txt b/Documentation/config/log.txt new file mode 100644 index 0000000000..78d9e4453a --- /dev/null +++ b/Documentation/config/log.txt @@ -0,0 +1,43 @@ +log.abbrevCommit:: + If true, makes linkgit:git-log[1], linkgit:git-show[1], and + linkgit:git-whatchanged[1] assume `--abbrev-commit`. You may + override this option with `--no-abbrev-commit`. + +log.date:: + Set the default date-time mode for the 'log' command. + Setting a value for log.date is similar to using 'git log''s + `--date` option. See linkgit:git-log[1] for details. + +log.decorate:: + Print out the ref names of any commits that are shown by the log + command. If 'short' is specified, the ref name prefixes 'refs/heads/', + 'refs/tags/' and 'refs/remotes/' will not be printed. If 'full' is + specified, the full ref name (including prefix) will be printed. + If 'auto' is specified, then if the output is going to a terminal, + the ref names are shown as if 'short' were given, otherwise no ref + names are shown. This is the same as the `--decorate` option + of the `git log`. + +log.follow:: + If `true`, `git log` will act as if the `--follow` option was used when + a single <path> is given. This has the same limitations as `--follow`, + i.e. it cannot be used to follow multiple files and does not work well + on non-linear history. + +log.graphColors:: + A list of colors, separated by commas, that can be used to draw + history lines in `git log --graph`. + +log.showRoot:: + If true, the initial commit will be shown as a big creation event. + This is equivalent to a diff against an empty tree. + Tools like linkgit:git-log[1] or linkgit:git-whatchanged[1], which + normally hide the root commit will now show it. True by default. + +log.showSignature:: + If true, makes linkgit:git-log[1], linkgit:git-show[1], and + linkgit:git-whatchanged[1] assume `--show-signature`. + +log.mailmap:: + If true, makes linkgit:git-log[1], linkgit:git-show[1], and + linkgit:git-whatchanged[1] assume `--use-mailmap`. diff --git a/Documentation/config/mailinfo.txt b/Documentation/config/mailinfo.txt new file mode 100644 index 0000000000..3854d4ae37 --- /dev/null +++ b/Documentation/config/mailinfo.txt @@ -0,0 +1,6 @@ +mailinfo.scissors:: + If true, makes linkgit:git-mailinfo[1] (and therefore + linkgit:git-am[1]) act by default as if the --scissors option + was provided on the command-line. When active, this features + removes everything from the message body before a scissors + line (i.e. consisting mainly of ">8", "8<" and "-"). diff --git a/Documentation/config/mailmap.txt b/Documentation/config/mailmap.txt new file mode 100644 index 0000000000..48cbc30722 --- /dev/null +++ b/Documentation/config/mailmap.txt @@ -0,0 +1,15 @@ +mailmap.file:: + The location of an augmenting mailmap file. The default + mailmap, located in the root of the repository, is loaded + first, then the mailmap file pointed to by this variable. + The location of the mailmap file may be in a repository + subdirectory, or somewhere outside of the repository itself. + See linkgit:git-shortlog[1] and linkgit:git-blame[1]. + +mailmap.blob:: + Like `mailmap.file`, but consider the value as a reference to a + blob in the repository. If both `mailmap.file` and + `mailmap.blob` are given, both are parsed, with entries from + `mailmap.file` taking precedence. In a bare repository, this + defaults to `HEAD:.mailmap`. In a non-bare repository, it + defaults to empty. diff --git a/Documentation/config/man.txt b/Documentation/config/man.txt new file mode 100644 index 0000000000..a727d987a8 --- /dev/null +++ b/Documentation/config/man.txt @@ -0,0 +1,12 @@ +man.viewer:: + Specify the programs that may be used to display help in the + 'man' format. See linkgit:git-help[1]. + +man.<tool>.cmd:: + Specify the command to invoke the specified man viewer. The + specified command is evaluated in shell with the man page + passed as argument. (See linkgit:git-help[1].) + +man.<tool>.path:: + Override the path for the given tool that may be used to + display help in the 'man' format. See linkgit:git-help[1]. diff --git a/Documentation/merge-config.txt b/Documentation/config/merge.txt index 662c2713ca..d389c73929 100644 --- a/Documentation/merge-config.txt +++ b/Documentation/config/merge.txt @@ -30,7 +30,7 @@ merge.verifySignatures:: If true, this is equivalent to the --verify-signatures command line option. See linkgit:git-merge[1] for details. -include::fmt-merge-msg-config.txt[] +include::fmt-merge-msg.txt[] merge.renameLimit:: The number of files to consider when performing rename detection @@ -63,7 +63,13 @@ merge.tool:: Any other value is treated as a custom merge tool and requires that a corresponding mergetool.<tool>.cmd variable is defined. -include::mergetools-merge.txt[] +merge.guitool:: + Controls which merge tool is used by linkgit:git-mergetool[1] when the + -g/--gui flag is specified. The list below shows the valid built-in values. + Any other value is treated as a custom merge tool and requires that a + corresponding mergetool.<guitool>.cmd variable is defined. + +include::../mergetools-merge.txt[] merge.verbosity:: Controls the amount of output shown by the recursive merge diff --git a/Documentation/config/mergetool.txt b/Documentation/config/mergetool.txt new file mode 100644 index 0000000000..09ed31dbfa --- /dev/null +++ b/Documentation/config/mergetool.txt @@ -0,0 +1,53 @@ +mergetool.<tool>.path:: + Override the path for the given tool. This is useful in case + your tool is not in the PATH. + +mergetool.<tool>.cmd:: + Specify the command to invoke the specified merge tool. The + specified command is evaluated in shell with the following + variables available: 'BASE' is the name of a temporary file + containing the common base of the files to be merged, if available; + 'LOCAL' is the name of a temporary file containing the contents of + the file on the current branch; 'REMOTE' is the name of a temporary + file containing the contents of the file from the branch being + merged; 'MERGED' contains the name of the file to which the merge + tool should write the results of a successful merge. + +mergetool.<tool>.trustExitCode:: + For a custom merge command, specify whether the exit code of + the merge command can be used to determine whether the merge was + successful. If this is not set to true then the merge target file + timestamp is checked and the merge assumed to have been successful + if the file has been updated, otherwise the user is prompted to + indicate the success of the merge. + +mergetool.meld.hasOutput:: + Older versions of `meld` do not support the `--output` option. + Git will attempt to detect whether `meld` supports `--output` + by inspecting the output of `meld --help`. Configuring + `mergetool.meld.hasOutput` will make Git skip these checks and + use the configured value instead. Setting `mergetool.meld.hasOutput` + to `true` tells Git to unconditionally use the `--output` option, + and `false` avoids using `--output`. + +mergetool.keepBackup:: + After performing a merge, the original file with conflict markers + can be saved as a file with a `.orig` extension. If this variable + is set to `false` then this file is not preserved. Defaults to + `true` (i.e. keep the backup files). + +mergetool.keepTemporaries:: + When invoking a custom merge tool, Git uses a set of temporary + files to pass to the tool. If the tool returns an error and this + variable is set to `true`, then these temporary files will be + preserved, otherwise they will be removed after the tool has + exited. Defaults to `false`. + +mergetool.writeToTemp:: + Git writes temporary 'BASE', 'LOCAL', and 'REMOTE' versions of + conflicting files in the worktree by default. Git will attempt + to use a temporary directory for these files when set `true`. + Defaults to `false`. + +mergetool.prompt:: + Prompt before each invocation of the merge resolution program. diff --git a/Documentation/config/notes.txt b/Documentation/config/notes.txt new file mode 100644 index 0000000000..aeef56d49a --- /dev/null +++ b/Documentation/config/notes.txt @@ -0,0 +1,59 @@ +notes.mergeStrategy:: + Which merge strategy to choose by default when resolving notes + conflicts. Must be one of `manual`, `ours`, `theirs`, `union`, or + `cat_sort_uniq`. Defaults to `manual`. See "NOTES MERGE STRATEGIES" + section of linkgit:git-notes[1] for more information on each strategy. + +notes.<name>.mergeStrategy:: + Which merge strategy to choose when doing a notes merge into + refs/notes/<name>. This overrides the more general + "notes.mergeStrategy". See the "NOTES MERGE STRATEGIES" section in + linkgit:git-notes[1] for more information on the available strategies. + +notes.displayRef:: + The (fully qualified) refname from which to show notes when + showing commit messages. The value of this variable can be set + to a glob, in which case notes from all matching refs will be + shown. You may also specify this configuration variable + several times. A warning will be issued for refs that do not + exist, but a glob that does not match any refs is silently + ignored. ++ +This setting can be overridden with the `GIT_NOTES_DISPLAY_REF` +environment variable, which must be a colon separated list of refs or +globs. ++ +The effective value of "core.notesRef" (possibly overridden by +GIT_NOTES_REF) is also implicitly added to the list of refs to be +displayed. + +notes.rewrite.<command>:: + When rewriting commits with <command> (currently `amend` or + `rebase`) and this variable is set to `true`, Git + automatically copies your notes from the original to the + rewritten commit. Defaults to `true`, but see + "notes.rewriteRef" below. + +notes.rewriteMode:: + When copying notes during a rewrite (see the + "notes.rewrite.<command>" option), determines what to do if + the target commit already has a note. Must be one of + `overwrite`, `concatenate`, `cat_sort_uniq`, or `ignore`. + Defaults to `concatenate`. ++ +This setting can be overridden with the `GIT_NOTES_REWRITE_MODE` +environment variable. + +notes.rewriteRef:: + When copying notes during a rewrite, specifies the (fully + qualified) ref whose notes should be copied. The ref may be a + glob, in which case notes in all matching refs will be copied. + You may also specify this configuration several times. ++ +Does not have a default value; you must configure this variable to +enable note rewriting. Set it to `refs/notes/commits` to enable +rewriting for the default commit notes. ++ +This setting can be overridden with the `GIT_NOTES_REWRITE_REF` +environment variable, which must be a colon separated list of refs or +globs. diff --git a/Documentation/config/pack.txt b/Documentation/config/pack.txt new file mode 100644 index 0000000000..edac75c83f --- /dev/null +++ b/Documentation/config/pack.txt @@ -0,0 +1,120 @@ +pack.window:: + The size of the window used by linkgit:git-pack-objects[1] when no + window size is given on the command line. Defaults to 10. + +pack.depth:: + The maximum delta depth used by linkgit:git-pack-objects[1] when no + maximum depth is given on the command line. Defaults to 50. + Maximum value is 4095. + +pack.windowMemory:: + The maximum size of memory that is consumed by each thread + in linkgit:git-pack-objects[1] for pack window memory when + no limit is given on the command line. The value can be + suffixed with "k", "m", or "g". When left unconfigured (or + set explicitly to 0), there will be no limit. + +pack.compression:: + An integer -1..9, indicating the compression level for objects + in a pack file. -1 is the zlib default. 0 means no + compression, and 1..9 are various speed/size tradeoffs, 9 being + slowest. If not set, defaults to core.compression. If that is + not set, defaults to -1, the zlib default, which is "a default + compromise between speed and compression (currently equivalent + to level 6)." ++ +Note that changing the compression level will not automatically recompress +all existing objects. You can force recompression by passing the -F option +to linkgit:git-repack[1]. + +pack.island:: + An extended regular expression configuring a set of delta + islands. See "DELTA ISLANDS" in linkgit:git-pack-objects[1] + for details. + +pack.islandCore:: + Specify an island name which gets to have its objects be + packed first. This creates a kind of pseudo-pack at the front + of one pack, so that the objects from the specified island are + hopefully faster to copy into any pack that should be served + to a user requesting these objects. In practice this means + that the island specified should likely correspond to what is + the most commonly cloned in the repo. See also "DELTA ISLANDS" + in linkgit:git-pack-objects[1]. + +pack.deltaCacheSize:: + The maximum memory in bytes used for caching deltas in + linkgit:git-pack-objects[1] before writing them out to a pack. + This cache is used to speed up the writing object phase by not + having to recompute the final delta result once the best match + for all objects is found. Repacking large repositories on machines + which are tight with memory might be badly impacted by this though, + especially if this cache pushes the system into swapping. + A value of 0 means no limit. The smallest size of 1 byte may be + used to virtually disable this cache. Defaults to 256 MiB. + +pack.deltaCacheLimit:: + The maximum size of a delta, that is cached in + linkgit:git-pack-objects[1]. This cache is used to speed up the + writing object phase by not having to recompute the final delta + result once the best match for all objects is found. + Defaults to 1000. Maximum value is 65535. + +pack.threads:: + Specifies the number of threads to spawn when searching for best + delta matches. This requires that linkgit:git-pack-objects[1] + be compiled with pthreads otherwise this option is ignored with a + warning. This is meant to reduce packing time on multiprocessor + machines. The required amount of memory for the delta search window + is however multiplied by the number of threads. + Specifying 0 will cause Git to auto-detect the number of CPU's + and set the number of threads accordingly. + +pack.indexVersion:: + Specify the default pack index version. Valid values are 1 for + legacy pack index used by Git versions prior to 1.5.2, and 2 for + the new pack index with capabilities for packs larger than 4 GB + as well as proper protection against the repacking of corrupted + packs. Version 2 is the default. Note that version 2 is enforced + and this config option ignored whenever the corresponding pack is + larger than 2 GB. ++ +If you have an old Git that does not understand the version 2 `*.idx` file, +cloning or fetching over a non native protocol (e.g. "http") +that will copy both `*.pack` file and corresponding `*.idx` file from the +other side may give you a repository that cannot be accessed with your +older version of Git. If the `*.pack` file is smaller than 2 GB, however, +you can use linkgit:git-index-pack[1] on the *.pack file to regenerate +the `*.idx` file. + +pack.packSizeLimit:: + The maximum size of a pack. This setting only affects + packing to a file when repacking, i.e. the git:// protocol + is unaffected. It can be overridden by the `--max-pack-size` + option of linkgit:git-repack[1]. Reaching this limit results + in the creation of multiple packfiles; which in turn prevents + bitmaps from being created. + The minimum size allowed is limited to 1 MiB. + The default is unlimited. + Common unit suffixes of 'k', 'm', or 'g' are + supported. + +pack.useBitmaps:: + When true, git will use pack bitmaps (if available) when packing + to stdout (e.g., during the server side of a fetch). Defaults to + true. You should not generally need to turn this off unless + you are debugging pack bitmaps. + +pack.writeBitmaps (deprecated):: + This is a deprecated synonym for `repack.writeBitmaps`. + +pack.writeBitmapHashCache:: + When true, git will include a "hash cache" section in the bitmap + index (if one is written). This cache can be used to feed git's + delta heuristics, potentially leading to better deltas between + bitmapped and non-bitmapped objects (e.g., when serving a fetch + between an older, bitmapped pack and objects that have been + pushed since the last gc). The downside is that it consumes 4 + bytes per object of disk space, and that JGit's bitmap + implementation does not understand it, causing it to complain if + Git and JGit are used on the same repository. Defaults to false. diff --git a/Documentation/config/pager.txt b/Documentation/config/pager.txt new file mode 100644 index 0000000000..d3731cf66c --- /dev/null +++ b/Documentation/config/pager.txt @@ -0,0 +1,8 @@ +pager.<cmd>:: + If the value is boolean, turns on or off pagination of the + output of a particular Git subcommand when writing to a tty. + Otherwise, turns on pagination for the subcommand using the + pager specified by the value of `pager.<cmd>`. If `--paginate` + or `--no-pager` is specified on the command line, it takes + precedence over this option. To disable pagination for all + commands, set `core.pager` or `GIT_PAGER` to `cat`. diff --git a/Documentation/config/pretty.txt b/Documentation/config/pretty.txt new file mode 100644 index 0000000000..063c6b63d9 --- /dev/null +++ b/Documentation/config/pretty.txt @@ -0,0 +1,9 @@ +pretty.<name>:: + Alias for a --pretty= format string, as specified in + linkgit:git-log[1]. Any aliases defined here can be used just + as the built-in pretty formats could. For example, + running `git config pretty.changelog "format:* %H %s"` + would cause the invocation `git log --pretty=changelog` + to be equivalent to running `git log "--pretty=format:* %H %s"`. + Note that an alias with the same name as a built-in format + will be silently ignored. diff --git a/Documentation/config/protocol.txt b/Documentation/config/protocol.txt new file mode 100644 index 0000000000..bfccc07491 --- /dev/null +++ b/Documentation/config/protocol.txt @@ -0,0 +1,64 @@ +protocol.allow:: + If set, provide a user defined default policy for all protocols which + don't explicitly have a policy (`protocol.<name>.allow`). By default, + if unset, known-safe protocols (http, https, git, ssh, file) have a + default policy of `always`, known-dangerous protocols (ext) have a + default policy of `never`, and all other protocols have a default + policy of `user`. Supported policies: ++ +-- + +* `always` - protocol is always able to be used. + +* `never` - protocol is never able to be used. + +* `user` - protocol is only able to be used when `GIT_PROTOCOL_FROM_USER` is + either unset or has a value of 1. This policy should be used when you want a + protocol to be directly usable by the user but don't want it used by commands which + execute clone/fetch/push commands without user input, e.g. recursive + submodule initialization. + +-- + +protocol.<name>.allow:: + Set a policy to be used by protocol `<name>` with clone/fetch/push + commands. See `protocol.allow` above for the available policies. ++ +The protocol names currently used by git are: ++ +-- + - `file`: any local file-based path (including `file://` URLs, + or local paths) + + - `git`: the anonymous git protocol over a direct TCP + connection (or proxy, if configured) + + - `ssh`: git over ssh (including `host:path` syntax, + `ssh://`, etc). + + - `http`: git over http, both "smart http" and "dumb http". + Note that this does _not_ include `https`; if you want to configure + both, you must do so individually. + + - any external helpers are named by their protocol (e.g., use + `hg` to allow the `git-remote-hg` helper) +-- + +protocol.version:: + Experimental. If set, clients will attempt to communicate with a + server using the specified protocol version. If unset, no + attempt will be made by the client to communicate using a + particular protocol version, this results in protocol version 0 + being used. + Supported versions: ++ +-- + +* `0` - the original wire protocol. + +* `1` - the original wire protocol with the addition of a version string + in the initial response from the server. + +* `2` - link:technical/protocol-v2.html[wire protocol version 2]. + +-- diff --git a/Documentation/config/pull.txt b/Documentation/config/pull.txt new file mode 100644 index 0000000000..bb23a9947d --- /dev/null +++ b/Documentation/config/pull.txt @@ -0,0 +1,36 @@ +pull.ff:: + By default, Git does not create an extra merge commit when merging + a commit that is a descendant of the current commit. Instead, the + tip of the current branch is fast-forwarded. When set to `false`, + this variable tells Git to create an extra merge commit in such + a case (equivalent to giving the `--no-ff` option from the command + line). When set to `only`, only such fast-forward merges are + allowed (equivalent to giving the `--ff-only` option from the + command line). This setting overrides `merge.ff` when pulling. + +pull.rebase:: + When true, rebase branches on top of the fetched branch, instead + of merging the default branch from the default remote when "git + pull" is run. See "branch.<name>.rebase" for setting this on a + per-branch basis. ++ +When `merges`, pass the `--rebase-merges` option to 'git rebase' +so that the local merge commits are included in the rebase (see +linkgit:git-rebase[1] for details). ++ +When preserve, also pass `--preserve-merges` along to 'git rebase' +so that locally committed merge commits will not be flattened +by running 'git pull'. ++ +When the value is `interactive`, the rebase is run in interactive mode. ++ +*NOTE*: this is a possibly dangerous operation; do *not* use +it unless you understand the implications (see linkgit:git-rebase[1] +for details). + +pull.octopus:: + The default merge strategy to use when pulling multiple branches + at once. + +pull.twohead:: + The default merge strategy to use when pulling a single branch. diff --git a/Documentation/config/push.txt b/Documentation/config/push.txt new file mode 100644 index 0000000000..0a0e000569 --- /dev/null +++ b/Documentation/config/push.txt @@ -0,0 +1,113 @@ +push.default:: + Defines the action `git push` should take if no refspec is + explicitly given. Different values are well-suited for + specific workflows; for instance, in a purely central workflow + (i.e. the fetch source is equal to the push destination), + `upstream` is probably what you want. Possible values are: ++ +-- + +* `nothing` - do not push anything (error out) unless a refspec is + explicitly given. This is primarily meant for people who want to + avoid mistakes by always being explicit. + +* `current` - push the current branch to update a branch with the same + name on the receiving end. Works in both central and non-central + workflows. + +* `upstream` - push the current branch back to the branch whose + changes are usually integrated into the current branch (which is + called `@{upstream}`). This mode only makes sense if you are + pushing to the same repository you would normally pull from + (i.e. central workflow). + +* `tracking` - This is a deprecated synonym for `upstream`. + +* `simple` - in centralized workflow, work like `upstream` with an + added safety to refuse to push if the upstream branch's name is + different from the local one. ++ +When pushing to a remote that is different from the remote you normally +pull from, work as `current`. This is the safest option and is suited +for beginners. ++ +This mode has become the default in Git 2.0. + +* `matching` - push all branches having the same name on both ends. + This makes the repository you are pushing to remember the set of + branches that will be pushed out (e.g. if you always push 'maint' + and 'master' there and no other branches, the repository you push + to will have these two branches, and your local 'maint' and + 'master' will be pushed there). ++ +To use this mode effectively, you have to make sure _all_ the +branches you would push out are ready to be pushed out before +running 'git push', as the whole point of this mode is to allow you +to push all of the branches in one go. If you usually finish work +on only one branch and push out the result, while other branches are +unfinished, this mode is not for you. Also this mode is not +suitable for pushing into a shared central repository, as other +people may add new branches there, or update the tip of existing +branches outside your control. ++ +This used to be the default, but not since Git 2.0 (`simple` is the +new default). + +-- + +push.followTags:: + If set to true enable `--follow-tags` option by default. You + may override this configuration at time of push by specifying + `--no-follow-tags`. + +push.gpgSign:: + May be set to a boolean value, or the string 'if-asked'. A true + value causes all pushes to be GPG signed, as if `--signed` is + passed to linkgit:git-push[1]. The string 'if-asked' causes + pushes to be signed if the server supports it, as if + `--signed=if-asked` is passed to 'git push'. A false value may + override a value from a lower-priority config file. An explicit + command-line flag always overrides this config option. + +push.pushOption:: + When no `--push-option=<option>` argument is given from the + command line, `git push` behaves as if each <value> of + this variable is given as `--push-option=<value>`. ++ +This is a multi-valued variable, and an empty value can be used in a +higher priority configuration file (e.g. `.git/config` in a +repository) to clear the values inherited from a lower priority +configuration files (e.g. `$HOME/.gitconfig`). ++ +-- + +Example: + +/etc/gitconfig + push.pushoption = a + push.pushoption = b + +~/.gitconfig + push.pushoption = c + +repo/.git/config + push.pushoption = + push.pushoption = b + +This will result in only b (a and c are cleared). + +-- + +push.recurseSubmodules:: + Make sure all submodule commits used by the revisions to be pushed + are available on a remote-tracking branch. If the value is 'check' + then Git will verify that all submodule commits that changed in the + revisions to be pushed are available on at least one remote of the + submodule. If any commits are missing, the push will be aborted and + exit with non-zero status. If the value is 'on-demand' then all + submodules that changed in the revisions to be pushed will be + pushed. If on-demand was not able to push all necessary revisions + it will also be aborted and exit with non-zero status. If the value + is 'no' then default behavior of ignoring submodules when pushing + is retained. You may override this configuration at time of push by + specifying '--recurse-submodules=check|on-demand|no'. diff --git a/Documentation/rebase-config.txt b/Documentation/config/rebase.txt index 42e1ba7575..f079bf6b7e 100644 --- a/Documentation/rebase-config.txt +++ b/Documentation/config/rebase.txt @@ -1,3 +1,17 @@ +rebase.useBuiltin:: + Set to `false` to use the legacy shellscript implementation of + linkgit:git-rebase[1]. Is `true` by default, which means use + the built-in rewrite of it in C. ++ +The C rewrite is first included with Git version 2.20. This option +serves an an escape hatch to re-enable the legacy version in case any +bugs are found in the rewrite. This option and the shellscript version +of linkgit:git-rebase[1] will be removed in some future release. ++ +If you find some reason to set this option to `false` other than +one-off testing you should report the behavior difference as a bug in +git. + rebase.stat:: Whether to show a diffstat of what changed upstream since the last rebase. False by default. diff --git a/Documentation/config/receive.txt b/Documentation/config/receive.txt new file mode 100644 index 0000000000..65f78aac37 --- /dev/null +++ b/Documentation/config/receive.txt @@ -0,0 +1,123 @@ +receive.advertiseAtomic:: + By default, git-receive-pack will advertise the atomic push + capability to its clients. If you don't want to advertise this + capability, set this variable to false. + +receive.advertisePushOptions:: + When set to true, git-receive-pack will advertise the push options + capability to its clients. False by default. + +receive.autogc:: + By default, git-receive-pack will run "git-gc --auto" after + receiving data from git-push and updating refs. You can stop + it by setting this variable to false. + +receive.certNonceSeed:: + By setting this variable to a string, `git receive-pack` + will accept a `git push --signed` and verifies it by using + a "nonce" protected by HMAC using this string as a secret + key. + +receive.certNonceSlop:: + When a `git push --signed` sent a push certificate with a + "nonce" that was issued by a receive-pack serving the same + repository within this many seconds, export the "nonce" + found in the certificate to `GIT_PUSH_CERT_NONCE` to the + hooks (instead of what the receive-pack asked the sending + side to include). This may allow writing checks in + `pre-receive` and `post-receive` a bit easier. Instead of + checking `GIT_PUSH_CERT_NONCE_SLOP` environment variable + that records by how many seconds the nonce is stale to + decide if they want to accept the certificate, they only + can check `GIT_PUSH_CERT_NONCE_STATUS` is `OK`. + +receive.fsckObjects:: + If it is set to true, git-receive-pack will check all received + objects. See `transfer.fsckObjects` for what's checked. + Defaults to false. If not set, the value of + `transfer.fsckObjects` is used instead. + +receive.fsck.<msg-id>:: + Acts like `fsck.<msg-id>`, but is used by + linkgit:git-receive-pack[1] instead of + linkgit:git-fsck[1]. See the `fsck.<msg-id>` documentation for + details. + +receive.fsck.skipList:: + Acts like `fsck.skipList`, but is used by + linkgit:git-receive-pack[1] instead of + linkgit:git-fsck[1]. See the `fsck.skipList` documentation for + details. + +receive.keepAlive:: + After receiving the pack from the client, `receive-pack` may + produce no output (if `--quiet` was specified) while processing + the pack, causing some networks to drop the TCP connection. + With this option set, if `receive-pack` does not transmit + any data in this phase for `receive.keepAlive` seconds, it will + send a short keepalive packet. The default is 5 seconds; set + to 0 to disable keepalives entirely. + +receive.unpackLimit:: + If the number of objects received in a push is below this + limit then the objects will be unpacked into loose object + files. However if the number of received objects equals or + exceeds this limit then the received pack will be stored as + a pack, after adding any missing delta bases. Storing the + pack from a push can make the push operation complete faster, + especially on slow filesystems. If not set, the value of + `transfer.unpackLimit` is used instead. + +receive.maxInputSize:: + If the size of the incoming pack stream is larger than this + limit, then git-receive-pack will error out, instead of + accepting the pack file. If not set or set to 0, then the size + is unlimited. + +receive.denyDeletes:: + If set to true, git-receive-pack will deny a ref update that deletes + the ref. Use this to prevent such a ref deletion via a push. + +receive.denyDeleteCurrent:: + If set to true, git-receive-pack will deny a ref update that + deletes the currently checked out branch of a non-bare repository. + +receive.denyCurrentBranch:: + If set to true or "refuse", git-receive-pack will deny a ref update + to the currently checked out branch of a non-bare repository. + Such a push is potentially dangerous because it brings the HEAD + out of sync with the index and working tree. If set to "warn", + print a warning of such a push to stderr, but allow the push to + proceed. If set to false or "ignore", allow such pushes with no + message. Defaults to "refuse". ++ +Another option is "updateInstead" which will update the working +tree if pushing into the current branch. This option is +intended for synchronizing working directories when one side is not easily +accessible via interactive ssh (e.g. a live web site, hence the requirement +that the working directory be clean). This mode also comes in handy when +developing inside a VM to test and fix code on different Operating Systems. ++ +By default, "updateInstead" will refuse the push if the working tree or +the index have any difference from the HEAD, but the `push-to-checkout` +hook can be used to customize this. See linkgit:githooks[5]. + +receive.denyNonFastForwards:: + If set to true, git-receive-pack will deny a ref update which is + not a fast-forward. Use this to prevent such an update via a push, + even if that push is forced. This configuration variable is + set when initializing a shared repository. + +receive.hideRefs:: + This variable is the same as `transfer.hideRefs`, but applies + only to `receive-pack` (and so affects pushes, but not fetches). + An attempt to update or delete a hidden ref by `git push` is + rejected. + +receive.updateServerInfo:: + If set to true, git-receive-pack will run git-update-server-info + after receiving data from git-push and updating refs. + +receive.shallowUpdate:: + If set to true, .git/shallow can be updated when new refs + require new shallow roots. Otherwise those refs are rejected. diff --git a/Documentation/config/remote.txt b/Documentation/config/remote.txt new file mode 100644 index 0000000000..6c4cad83a2 --- /dev/null +++ b/Documentation/config/remote.txt @@ -0,0 +1,78 @@ +remote.pushDefault:: + The remote to push to by default. Overrides + `branch.<name>.remote` for all branches, and is overridden by + `branch.<name>.pushRemote` for specific branches. + +remote.<name>.url:: + The URL of a remote repository. See linkgit:git-fetch[1] or + linkgit:git-push[1]. + +remote.<name>.pushurl:: + The push URL of a remote repository. See linkgit:git-push[1]. + +remote.<name>.proxy:: + For remotes that require curl (http, https and ftp), the URL to + the proxy to use for that remote. Set to the empty string to + disable proxying for that remote. + +remote.<name>.proxyAuthMethod:: + For remotes that require curl (http, https and ftp), the method to use for + authenticating against the proxy in use (probably set in + `remote.<name>.proxy`). See `http.proxyAuthMethod`. + +remote.<name>.fetch:: + The default set of "refspec" for linkgit:git-fetch[1]. See + linkgit:git-fetch[1]. + +remote.<name>.push:: + The default set of "refspec" for linkgit:git-push[1]. See + linkgit:git-push[1]. + +remote.<name>.mirror:: + If true, pushing to this remote will automatically behave + as if the `--mirror` option was given on the command line. + +remote.<name>.skipDefaultUpdate:: + If true, this remote will be skipped by default when updating + using linkgit:git-fetch[1] or the `update` subcommand of + linkgit:git-remote[1]. + +remote.<name>.skipFetchAll:: + If true, this remote will be skipped by default when updating + using linkgit:git-fetch[1] or the `update` subcommand of + linkgit:git-remote[1]. + +remote.<name>.receivepack:: + The default program to execute on the remote side when pushing. See + option --receive-pack of linkgit:git-push[1]. + +remote.<name>.uploadpack:: + The default program to execute on the remote side when fetching. See + option --upload-pack of linkgit:git-fetch-pack[1]. + +remote.<name>.tagOpt:: + Setting this value to --no-tags disables automatic tag following when + fetching from remote <name>. Setting it to --tags will fetch every + tag from remote <name>, even if they are not reachable from remote + branch heads. Passing these flags directly to linkgit:git-fetch[1] can + override this setting. See options --tags and --no-tags of + linkgit:git-fetch[1]. + +remote.<name>.vcs:: + Setting this to a value <vcs> will cause Git to interact with + the remote with the git-remote-<vcs> helper. + +remote.<name>.prune:: + When set to true, fetching from this remote by default will also + remove any remote-tracking references that no longer exist on the + remote (as if the `--prune` option was given on the command line). + Overrides `fetch.prune` settings, if any. + +remote.<name>.pruneTags:: + When set to true, fetching from this remote by default will also + remove any local tags that no longer exist on the remote if pruning + is activated in general via `remote.<name>.prune`, `fetch.prune` or + `--prune`. Overrides `fetch.pruneTags` settings, if any. ++ +See also `remote.<name>.prune` and the PRUNING section of +linkgit:git-fetch[1]. diff --git a/Documentation/config/remotes.txt b/Documentation/config/remotes.txt new file mode 100644 index 0000000000..4cfe03221e --- /dev/null +++ b/Documentation/config/remotes.txt @@ -0,0 +1,3 @@ +remotes.<group>:: + The list of remotes which are fetched by "git remote update + <group>". See linkgit:git-remote[1]. diff --git a/Documentation/config/repack.txt b/Documentation/config/repack.txt new file mode 100644 index 0000000000..a5c37813fd --- /dev/null +++ b/Documentation/config/repack.txt @@ -0,0 +1,27 @@ +repack.useDeltaBaseOffset:: + By default, linkgit:git-repack[1] creates packs that use + delta-base offset. If you need to share your repository with + Git older than version 1.4.4, either directly or via a dumb + protocol such as http, then you need to set this option to + "false" and repack. Access from old Git versions over the + native protocol are unaffected by this option. + +repack.packKeptObjects:: + If set to true, makes `git repack` act as if + `--pack-kept-objects` was passed. See linkgit:git-repack[1] for + details. Defaults to `false` normally, but `true` if a bitmap + index is being written (either via `--write-bitmap-index` or + `repack.writeBitmaps`). + +repack.useDeltaIslands:: + If set to true, makes `git repack` act as if `--delta-islands` + was passed. Defaults to `false`. + +repack.writeBitmaps:: + When true, git will write a bitmap index when packing all + objects to disk (e.g., when `git repack -a` is run). This + index can speed up the "counting objects" phase of subsequent + packs created for clones and fetches, at the cost of some disk + space and extra time spent on the initial repack. This has + no effect if multiple packfiles are created. + Defaults to false. diff --git a/Documentation/config/rerere.txt b/Documentation/config/rerere.txt new file mode 100644 index 0000000000..40abdf6a6b --- /dev/null +++ b/Documentation/config/rerere.txt @@ -0,0 +1,12 @@ +rerere.autoUpdate:: + When set to true, `git-rerere` updates the index with the + resulting contents after it cleanly resolves conflicts using + previously recorded resolution. Defaults to false. + +rerere.enabled:: + Activate recording of resolved conflicts, so that identical + conflict hunks can be resolved automatically, should they be + encountered again. By default, linkgit:git-rerere[1] is + enabled if there is an `rr-cache` directory under the + `$GIT_DIR`, e.g. if "rerere" was previously used in the + repository. diff --git a/Documentation/config/reset.txt b/Documentation/config/reset.txt new file mode 100644 index 0000000000..63b7c45aac --- /dev/null +++ b/Documentation/config/reset.txt @@ -0,0 +1,2 @@ +reset.quiet:: + When set to true, 'git reset' will default to the '--quiet' option. diff --git a/Documentation/config/sendemail.txt b/Documentation/config/sendemail.txt new file mode 100644 index 0000000000..0006faf800 --- /dev/null +++ b/Documentation/config/sendemail.txt @@ -0,0 +1,63 @@ +sendemail.identity:: + A configuration identity. When given, causes values in the + 'sendemail.<identity>' subsection to take precedence over + values in the 'sendemail' section. The default identity is + the value of `sendemail.identity`. + +sendemail.smtpEncryption:: + See linkgit:git-send-email[1] for description. Note that this + setting is not subject to the 'identity' mechanism. + +sendemail.smtpssl (deprecated):: + Deprecated alias for 'sendemail.smtpEncryption = ssl'. + +sendemail.smtpsslcertpath:: + Path to ca-certificates (either a directory or a single file). + Set it to an empty string to disable certificate verification. + +sendemail.<identity>.*:: + Identity-specific versions of the 'sendemail.*' parameters + found below, taking precedence over those when this + identity is selected, through either the command-line or + `sendemail.identity`. + +sendemail.aliasesFile:: +sendemail.aliasFileType:: +sendemail.annotate:: +sendemail.bcc:: +sendemail.cc:: +sendemail.ccCmd:: +sendemail.chainReplyTo:: +sendemail.confirm:: +sendemail.envelopeSender:: +sendemail.from:: +sendemail.multiEdit:: +sendemail.signedoffbycc:: +sendemail.smtpPass:: +sendemail.suppresscc:: +sendemail.suppressFrom:: +sendemail.to:: +sendemail.tocmd:: +sendemail.smtpDomain:: +sendemail.smtpServer:: +sendemail.smtpServerPort:: +sendemail.smtpServerOption:: +sendemail.smtpUser:: +sendemail.thread:: +sendemail.transferEncoding:: +sendemail.validate:: +sendemail.xmailer:: + See linkgit:git-send-email[1] for description. + +sendemail.signedoffcc (deprecated):: + Deprecated alias for `sendemail.signedoffbycc`. + +sendemail.smtpBatchSize:: + Number of messages to be sent per connection, after that a relogin + will happen. If the value is 0 or undefined, send all messages in + one connection. + See also the `--batch-size` option of linkgit:git-send-email[1]. + +sendemail.smtpReloginDelay:: + Seconds wait before reconnecting to smtp server. + See also the `--relogin-delay` option of linkgit:git-send-email[1]. diff --git a/Documentation/config/sequencer.txt b/Documentation/config/sequencer.txt new file mode 100644 index 0000000000..b48d532a96 --- /dev/null +++ b/Documentation/config/sequencer.txt @@ -0,0 +1,5 @@ +sequence.editor:: + Text editor used by `git rebase -i` for editing the rebase instruction file. + The value is meant to be interpreted by the shell when it is used. + It can be overridden by the `GIT_SEQUENCE_EDITOR` environment variable. + When not configured the default commit message editor is used instead. diff --git a/Documentation/config/showbranch.txt b/Documentation/config/showbranch.txt new file mode 100644 index 0000000000..e79ecd9ee9 --- /dev/null +++ b/Documentation/config/showbranch.txt @@ -0,0 +1,3 @@ +showBranch.default:: + The default set of branches for linkgit:git-show-branch[1]. + See linkgit:git-show-branch[1]. diff --git a/Documentation/config/splitindex.txt b/Documentation/config/splitindex.txt new file mode 100644 index 0000000000..afdb186df8 --- /dev/null +++ b/Documentation/config/splitindex.txt @@ -0,0 +1,24 @@ +splitIndex.maxPercentChange:: + When the split index feature is used, this specifies the + percent of entries the split index can contain compared to the + total number of entries in both the split index and the shared + index before a new shared index is written. + The value should be between 0 and 100. If the value is 0 then + a new shared index is always written, if it is 100 a new + shared index is never written. + By default the value is 20, so a new shared index is written + if the number of entries in the split index would be greater + than 20 percent of the total number of entries. + See linkgit:git-update-index[1]. + +splitIndex.sharedIndexExpire:: + When the split index feature is used, shared index files that + were not modified since the time this variable specifies will + be removed when a new shared index file is created. The value + "now" expires all entries immediately, and "never" suppresses + expiration altogether. + The default value is "2.weeks.ago". + Note that a shared index file is considered modified (for the + purpose of expiration) each time a new split-index file is + either created based on it or read from it. + See linkgit:git-update-index[1]. diff --git a/Documentation/config/ssh.txt b/Documentation/config/ssh.txt new file mode 100644 index 0000000000..2ca4bf93e1 --- /dev/null +++ b/Documentation/config/ssh.txt @@ -0,0 +1,35 @@ +ssh.variant:: + By default, Git determines the command line arguments to use + based on the basename of the configured SSH command (configured + using the environment variable `GIT_SSH` or `GIT_SSH_COMMAND` or + the config setting `core.sshCommand`). If the basename is + unrecognized, Git will attempt to detect support of OpenSSH + options by first invoking the configured SSH command with the + `-G` (print configuration) option and will subsequently use + OpenSSH options (if that is successful) or no options besides + the host and remote command (if it fails). ++ +The config variable `ssh.variant` can be set to override this detection. +Valid values are `ssh` (to use OpenSSH options), `plink`, `putty`, +`tortoiseplink`, `simple` (no options except the host and remote command). +The default auto-detection can be explicitly requested using the value +`auto`. Any other value is treated as `ssh`. This setting can also be +overridden via the environment variable `GIT_SSH_VARIANT`. ++ +The current command-line parameters used for each variant are as +follows: ++ +-- + +* `ssh` - [-p port] [-4] [-6] [-o option] [username@]host command + +* `simple` - [username@]host command + +* `plink` or `putty` - [-P port] [-4] [-6] [username@]host command + +* `tortoiseplink` - [-P port] [-4] [-6] -batch [username@]host command + +-- ++ +Except for the `simple` variant, command-line parameters are likely to +change as git gains new features. diff --git a/Documentation/config/stash.txt b/Documentation/config/stash.txt new file mode 100644 index 0000000000..c583d46d6b --- /dev/null +++ b/Documentation/config/stash.txt @@ -0,0 +1,9 @@ +stash.showPatch:: + If this is set to true, the `git stash show` command without an + option will show the stash entry in patch form. Defaults to false. + See description of 'show' command in linkgit:git-stash[1]. + +stash.showStat:: + If this is set to true, the `git stash show` command without an + option will show diffstat of the stash entry. Defaults to true. + See description of 'show' command in linkgit:git-stash[1]. diff --git a/Documentation/config/status.txt b/Documentation/config/status.txt new file mode 100644 index 0000000000..ed72fa7dae --- /dev/null +++ b/Documentation/config/status.txt @@ -0,0 +1,72 @@ +status.relativePaths:: + By default, linkgit:git-status[1] shows paths relative to the + current directory. Setting this variable to `false` shows paths + relative to the repository root (this was the default for Git + prior to v1.5.4). + +status.short:: + Set to true to enable --short by default in linkgit:git-status[1]. + The option --no-short takes precedence over this variable. + +status.branch:: + Set to true to enable --branch by default in linkgit:git-status[1]. + The option --no-branch takes precedence over this variable. + +status.displayCommentPrefix:: + If set to true, linkgit:git-status[1] will insert a comment + prefix before each output line (starting with + `core.commentChar`, i.e. `#` by default). This was the + behavior of linkgit:git-status[1] in Git 1.8.4 and previous. + Defaults to false. + +status.renameLimit:: + The number of files to consider when performing rename detection + in linkgit:git-status[1] and linkgit:git-commit[1]. Defaults to + the value of diff.renameLimit. + +status.renames:: + Whether and how Git detects renames in linkgit:git-status[1] and + linkgit:git-commit[1] . If set to "false", rename detection is + disabled. If set to "true", basic rename detection is enabled. + If set to "copies" or "copy", Git will detect copies, as well. + Defaults to the value of diff.renames. + +status.showStash:: + If set to true, linkgit:git-status[1] will display the number of + entries currently stashed away. + Defaults to false. + +status.showUntrackedFiles:: + By default, linkgit:git-status[1] and linkgit:git-commit[1] show + files which are not currently tracked by Git. Directories which + contain only untracked files, are shown with the directory name + only. Showing untracked files means that Git needs to lstat() all + the files in the whole repository, which might be slow on some + systems. So, this variable controls how the commands displays + the untracked files. Possible values are: ++ +-- +* `no` - Show no untracked files. +* `normal` - Show untracked files and directories. +* `all` - Show also individual files in untracked directories. +-- ++ +If this variable is not specified, it defaults to 'normal'. +This variable can be overridden with the -u|--untracked-files option +of linkgit:git-status[1] and linkgit:git-commit[1]. + +status.submoduleSummary:: + Defaults to false. + If this is set to a non zero number or true (identical to -1 or an + unlimited number), the submodule summary will be enabled and a + summary of commits for modified submodules will be shown (see + --summary-limit option of linkgit:git-submodule[1]). Please note + that the summary output command will be suppressed for all + submodules when `diff.ignoreSubmodules` is set to 'all' or only + for those submodules where `submodule.<name>.ignore=all`. The only + exception to that rule is that status and commit will show staged + submodule changes. To + also view the summary for ignored submodules you can either use + the --ignore-submodules=dirty command-line option or the 'git + submodule summary' command, which shows a similar output but does + not honor these settings. diff --git a/Documentation/config/submodule.txt b/Documentation/config/submodule.txt new file mode 100644 index 0000000000..0a1293b051 --- /dev/null +++ b/Documentation/config/submodule.txt @@ -0,0 +1,82 @@ +submodule.<name>.url:: + The URL for a submodule. This variable is copied from the .gitmodules + file to the git config via 'git submodule init'. The user can change + the configured URL before obtaining the submodule via 'git submodule + update'. If neither submodule.<name>.active or submodule.active are + set, the presence of this variable is used as a fallback to indicate + whether the submodule is of interest to git commands. + See linkgit:git-submodule[1] and linkgit:gitmodules[5] for details. + +submodule.<name>.update:: + The method by which a submodule is updated by 'git submodule update', + which is the only affected command, others such as + 'git checkout --recurse-submodules' are unaffected. It exists for + historical reasons, when 'git submodule' was the only command to + interact with submodules; settings like `submodule.active` + and `pull.rebase` are more specific. It is populated by + `git submodule init` from the linkgit:gitmodules[5] file. + See description of 'update' command in linkgit:git-submodule[1]. + +submodule.<name>.branch:: + The remote branch name for a submodule, used by `git submodule + update --remote`. Set this option to override the value found in + the `.gitmodules` file. See linkgit:git-submodule[1] and + linkgit:gitmodules[5] for details. + +submodule.<name>.fetchRecurseSubmodules:: + This option can be used to control recursive fetching of this + submodule. It can be overridden by using the --[no-]recurse-submodules + command-line option to "git fetch" and "git pull". + This setting will override that from in the linkgit:gitmodules[5] + file. + +submodule.<name>.ignore:: + Defines under what circumstances "git status" and the diff family show + a submodule as modified. When set to "all", it will never be considered + modified (but it will nonetheless show up in the output of status and + commit when it has been staged), "dirty" will ignore all changes + to the submodules work tree and + takes only differences between the HEAD of the submodule and the commit + recorded in the superproject into account. "untracked" will additionally + let submodules with modified tracked files in their work tree show up. + Using "none" (the default when this option is not set) also shows + submodules that have untracked files in their work tree as changed. + This setting overrides any setting made in .gitmodules for this submodule, + both settings can be overridden on the command line by using the + "--ignore-submodules" option. The 'git submodule' commands are not + affected by this setting. + +submodule.<name>.active:: + Boolean value indicating if the submodule is of interest to git + commands. This config option takes precedence over the + submodule.active config option. See linkgit:gitsubmodules[7] for + details. + +submodule.active:: + A repeated field which contains a pathspec used to match against a + submodule's path to determine if the submodule is of interest to git + commands. See linkgit:gitsubmodules[7] for details. + +submodule.recurse:: + Specifies if commands recurse into submodules by default. This + applies to all commands that have a `--recurse-submodules` option, + except `clone`. + Defaults to false. + +submodule.fetchJobs:: + Specifies how many submodules are fetched/cloned at the same time. + A positive integer allows up to that number of submodules fetched + in parallel. A value of 0 will give some reasonable default. + If unset, it defaults to 1. + +submodule.alternateLocation:: + Specifies how the submodules obtain alternates when submodules are + cloned. Possible values are `no`, `superproject`. + By default `no` is assumed, which doesn't add references. When the + value is set to `superproject` the submodule to be cloned computes + its alternates location relative to the superprojects alternate. + +submodule.alternateErrorStrategy:: + Specifies how to treat errors with the alternates for a submodule + as computed via `submodule.alternateLocation`. Possible values are + `ignore`, `info`, `die`. Default is `die`. diff --git a/Documentation/config/tag.txt b/Documentation/config/tag.txt new file mode 100644 index 0000000000..663663bdec --- /dev/null +++ b/Documentation/config/tag.txt @@ -0,0 +1,16 @@ +tag.forceSignAnnotated:: + A boolean to specify whether annotated tags created should be GPG signed. + If `--annotate` is specified on the command line, it takes + precedence over this option. + +tag.sort:: + This variable controls the sort ordering of tags when displayed by + linkgit:git-tag[1]. Without the "--sort=<value>" option provided, the + value of this variable will be used as the default. + +tar.umask:: + This variable can be used to restrict the permission bits of + tar archive entries. The default is 0002, which turns off the + world write bit. The special value "user" indicates that the + archiving user's umask will be used instead. See umask(2) and + linkgit:git-archive[1]. diff --git a/Documentation/config/transfer.txt b/Documentation/config/transfer.txt new file mode 100644 index 0000000000..4a5dfe2fc1 --- /dev/null +++ b/Documentation/config/transfer.txt @@ -0,0 +1,71 @@ +transfer.fsckObjects:: + When `fetch.fsckObjects` or `receive.fsckObjects` are + not set, the value of this variable is used instead. + Defaults to false. ++ +When set, the fetch or receive will abort in the case of a malformed +object or a link to a nonexistent object. In addition, various other +issues are checked for, including legacy issues (see `fsck.<msg-id>`), +and potential security issues like the existence of a `.GIT` directory +or a malicious `.gitmodules` file (see the release notes for v2.2.1 +and v2.17.1 for details). Other sanity and security checks may be +added in future releases. ++ +On the receiving side, failing fsckObjects will make those objects +unreachable, see "QUARANTINE ENVIRONMENT" in +linkgit:git-receive-pack[1]. On the fetch side, malformed objects will +instead be left unreferenced in the repository. ++ +Due to the non-quarantine nature of the `fetch.fsckObjects` +implementation it can not be relied upon to leave the object store +clean like `receive.fsckObjects` can. ++ +As objects are unpacked they're written to the object store, so there +can be cases where malicious objects get introduced even though the +"fetch" failed, only to have a subsequent "fetch" succeed because only +new incoming objects are checked, not those that have already been +written to the object store. That difference in behavior should not be +relied upon. In the future, such objects may be quarantined for +"fetch" as well. ++ +For now, the paranoid need to find some way to emulate the quarantine +environment if they'd like the same protection as "push". E.g. in the +case of an internal mirror do the mirroring in two steps, one to fetch +the untrusted objects, and then do a second "push" (which will use the +quarantine) to another internal repo, and have internal clients +consume this pushed-to repository, or embargo internal fetches and +only allow them once a full "fsck" has run (and no new fetches have +happened in the meantime). + +transfer.hideRefs:: + String(s) `receive-pack` and `upload-pack` use to decide which + refs to omit from their initial advertisements. Use more than + one definition to specify multiple prefix strings. A ref that is + under the hierarchies listed in the value of this variable is + excluded, and is hidden when responding to `git push` or `git + fetch`. See `receive.hideRefs` and `uploadpack.hideRefs` for + program-specific versions of this config. ++ +You may also include a `!` in front of the ref name to negate the entry, +explicitly exposing it, even if an earlier entry marked it as hidden. +If you have multiple hideRefs values, later entries override earlier ones +(and entries in more-specific config files override less-specific ones). ++ +If a namespace is in use, the namespace prefix is stripped from each +reference before it is matched against `transfer.hiderefs` patterns. +For example, if `refs/heads/master` is specified in `transfer.hideRefs` and +the current namespace is `foo`, then `refs/namespaces/foo/refs/heads/master` +is omitted from the advertisements but `refs/heads/master` and +`refs/namespaces/bar/refs/heads/master` are still advertised as so-called +"have" lines. In order to match refs before stripping, add a `^` in front of +the ref name. If you combine `!` and `^`, `!` must be specified first. ++ +Even if you hide refs, a client may still be able to steal the target +objects via the techniques described in the "SECURITY" section of the +linkgit:gitnamespaces[7] man page; it's best to keep private data in a +separate repository. + +transfer.unpackLimit:: + When `fetch.unpackLimit` or `receive.unpackLimit` are + not set, the value of this variable is used instead. + The default value is 100. diff --git a/Documentation/config/uploadarchive.txt b/Documentation/config/uploadarchive.txt new file mode 100644 index 0000000000..e0698e8c1d --- /dev/null +++ b/Documentation/config/uploadarchive.txt @@ -0,0 +1,6 @@ +uploadarchive.allowUnreachable:: + If true, allow clients to use `git archive --remote` to request + any tree, whether reachable from the ref tips or not. See the + discussion in the "SECURITY" section of + linkgit:git-upload-archive[1] for more details. Defaults to + `false`. diff --git a/Documentation/config/uploadpack.txt b/Documentation/config/uploadpack.txt new file mode 100644 index 0000000000..ed1c835695 --- /dev/null +++ b/Documentation/config/uploadpack.txt @@ -0,0 +1,65 @@ +uploadpack.hideRefs:: + This variable is the same as `transfer.hideRefs`, but applies + only to `upload-pack` (and so affects only fetches, not pushes). + An attempt to fetch a hidden ref by `git fetch` will fail. See + also `uploadpack.allowTipSHA1InWant`. + +uploadpack.allowTipSHA1InWant:: + When `uploadpack.hideRefs` is in effect, allow `upload-pack` + to accept a fetch request that asks for an object at the tip + of a hidden ref (by default, such a request is rejected). + See also `uploadpack.hideRefs`. Even if this is false, a client + may be able to steal objects via the techniques described in the + "SECURITY" section of the linkgit:gitnamespaces[7] man page; it's + best to keep private data in a separate repository. + +uploadpack.allowReachableSHA1InWant:: + Allow `upload-pack` to accept a fetch request that asks for an + object that is reachable from any ref tip. However, note that + calculating object reachability is computationally expensive. + Defaults to `false`. Even if this is false, a client may be able + to steal objects via the techniques described in the "SECURITY" + section of the linkgit:gitnamespaces[7] man page; it's best to + keep private data in a separate repository. + +uploadpack.allowAnySHA1InWant:: + Allow `upload-pack` to accept a fetch request that asks for any + object at all. + Defaults to `false`. + +uploadpack.keepAlive:: + When `upload-pack` has started `pack-objects`, there may be a + quiet period while `pack-objects` prepares the pack. Normally + it would output progress information, but if `--quiet` was used + for the fetch, `pack-objects` will output nothing at all until + the pack data begins. Some clients and networks may consider + the server to be hung and give up. Setting this option instructs + `upload-pack` to send an empty keepalive packet every + `uploadpack.keepAlive` seconds. Setting this option to 0 + disables keepalive packets entirely. The default is 5 seconds. + +uploadpack.packObjectsHook:: + If this option is set, when `upload-pack` would run + `git pack-objects` to create a packfile for a client, it will + run this shell command instead. The `pack-objects` command and + arguments it _would_ have run (including the `git pack-objects` + at the beginning) are appended to the shell command. The stdin + and stdout of the hook are treated as if `pack-objects` itself + was run. I.e., `upload-pack` will feed input intended for + `pack-objects` to the hook, and expects a completed packfile on + stdout. ++ +Note that this configuration variable is ignored if it is seen in the +repository-level config (this is a safety measure against fetching from +untrusted repositories). + +uploadpack.allowFilter:: + If this option is set, `upload-pack` will support partial + clone and partial fetch object filtering. + +uploadpack.allowRefInWant:: + If this option is set, `upload-pack` will support the `ref-in-want` + feature of the protocol version 2 `fetch` command. This feature + is intended for the benefit of load-balanced servers which may + not have the same view of what OIDs their refs point to due to + replication delay. diff --git a/Documentation/config/url.txt b/Documentation/config/url.txt new file mode 100644 index 0000000000..e5566c371d --- /dev/null +++ b/Documentation/config/url.txt @@ -0,0 +1,30 @@ +url.<base>.insteadOf:: + Any URL that starts with this value will be rewritten to + start, instead, with <base>. In cases where some site serves a + large number of repositories, and serves them with multiple + access methods, and some users need to use different access + methods, this feature allows people to specify any of the + equivalent URLs and have Git automatically rewrite the URL to + the best alternative for the particular user, even for a + never-before-seen repository on the site. When more than one + insteadOf strings match a given URL, the longest match is used. ++ +Note that any protocol restrictions will be applied to the rewritten +URL. If the rewrite changes the URL to use a custom protocol or remote +helper, you may need to adjust the `protocol.*.allow` config to permit +the request. In particular, protocols you expect to use for submodules +must be set to `always` rather than the default of `user`. See the +description of `protocol.allow` above. + +url.<base>.pushInsteadOf:: + Any URL that starts with this value will not be pushed to; + instead, it will be rewritten to start with <base>, and the + resulting URL will be pushed to. In cases where some site serves + a large number of repositories, and serves them with multiple + access methods, some of which do not allow push, this feature + allows people to specify a pull-only URL and have Git + automatically use an appropriate URL to push, even for a + never-before-seen repository on the site. When more than one + pushInsteadOf strings match a given URL, the longest match is + used. If a remote has an explicit pushurl, Git will ignore this + setting for that remote. diff --git a/Documentation/config/user.txt b/Documentation/config/user.txt new file mode 100644 index 0000000000..b5b2ba1199 --- /dev/null +++ b/Documentation/config/user.txt @@ -0,0 +1,26 @@ +user.email:: + Your email address to be recorded in any newly created commits. + Can be overridden by the `GIT_AUTHOR_EMAIL`, `GIT_COMMITTER_EMAIL`, and + `EMAIL` environment variables. See linkgit:git-commit-tree[1]. + +user.name:: + Your full name to be recorded in any newly created commits. + Can be overridden by the `GIT_AUTHOR_NAME` and `GIT_COMMITTER_NAME` + environment variables. See linkgit:git-commit-tree[1]. + +user.useConfigOnly:: + Instruct Git to avoid trying to guess defaults for `user.email` + and `user.name`, and instead retrieve the values only from the + configuration. For example, if you have multiple email addresses + and would like to use a different one for each repository, then + with this configuration option set to `true` in the global config + along with a name, Git will prompt you to set up an email before + making new commits in a newly cloned repository. + Defaults to `false`. + +user.signingKey:: + If linkgit:git-tag[1] or linkgit:git-commit[1] is not selecting the + key you want it to automatically when creating a signed tag or + commit, you can override the default selection with this variable. + This option is passed unchanged to gpg's --local-user parameter, + so you may specify a key using any method that gpg supports. diff --git a/Documentation/config/versionsort.txt b/Documentation/config/versionsort.txt new file mode 100644 index 0000000000..6c7cc054fa --- /dev/null +++ b/Documentation/config/versionsort.txt @@ -0,0 +1,33 @@ +versionsort.prereleaseSuffix (deprecated):: + Deprecated alias for `versionsort.suffix`. Ignored if + `versionsort.suffix` is set. + +versionsort.suffix:: + Even when version sort is used in linkgit:git-tag[1], tagnames + with the same base version but different suffixes are still sorted + lexicographically, resulting e.g. in prerelease tags appearing + after the main release (e.g. "1.0-rc1" after "1.0"). This + variable can be specified to determine the sorting order of tags + with different suffixes. ++ +By specifying a single suffix in this variable, any tagname containing +that suffix will appear before the corresponding main release. E.g. if +the variable is set to "-rc", then all "1.0-rcX" tags will appear before +"1.0". If specified multiple times, once per suffix, then the order of +suffixes in the configuration will determine the sorting order of tagnames +with those suffixes. E.g. if "-pre" appears before "-rc" in the +configuration, then all "1.0-preX" tags will be listed before any +"1.0-rcX" tags. The placement of the main release tag relative to tags +with various suffixes can be determined by specifying the empty suffix +among those other suffixes. E.g. if the suffixes "-rc", "", "-ck" and +"-bfs" appear in the configuration in this order, then all "v4.8-rcX" tags +are listed first, followed by "v4.8", then "v4.8-ckX" and finally +"v4.8-bfsX". ++ +If more than one suffixes match the same tagname, then that tagname will +be sorted according to the suffix which starts at the earliest position in +the tagname. If more than one different matching suffixes start at +that earliest position, then that tagname will be sorted according to the +longest of those suffixes. +The sorting order between different suffixes is undefined if they are +in multiple config files. diff --git a/Documentation/config/web.txt b/Documentation/config/web.txt new file mode 100644 index 0000000000..beec8d1303 --- /dev/null +++ b/Documentation/config/web.txt @@ -0,0 +1,4 @@ +web.browser:: + Specify a web browser that may be used by some commands. + Currently only linkgit:git-instaweb[1] and linkgit:git-help[1] + may use it. diff --git a/Documentation/config/worktree.txt b/Documentation/config/worktree.txt new file mode 100644 index 0000000000..b853798fc2 --- /dev/null +++ b/Documentation/config/worktree.txt @@ -0,0 +1,9 @@ +worktree.guessRemote:: + With `add`, if no branch argument, and neither of `-b` nor + `-B` nor `--detach` are given, the command defaults to + creating a new branch from HEAD. If `worktree.guessRemote` is + set to true, `worktree add` tries to find a remote-tracking + branch whose name uniquely matches the new branch name. If + such a branch exists, it is checked out and set as "upstream" + for the new branch. If no such match can be found, it falls + back to creating a new branch from the current HEAD. diff --git a/Documentation/doc-diff b/Documentation/doc-diff index f483fe427c..dfd9418778 100755 --- a/Documentation/doc-diff +++ b/Documentation/doc-diff @@ -1,21 +1,34 @@ #!/bin/sh +# +# Build two documentation trees and diff the resulting formatted output. +# Compared to a source diff, this can reveal mistakes in the formatting. +# For example: +# +# ./doc-diff origin/master HEAD +# +# would show the differences introduced by a branch based on master. OPTIONS_SPEC="\ doc-diff [options] <from> <to> [-- <diff-options>] +doc-diff (-c|--clean) -- j=n parallel argument to pass to make f force rebuild; do not rely on cached results +c,clean cleanup temporary working files " SUBDIRECTORY_OK=1 . "$(git --exec-path)/git-sh-setup" parallel= force= +clean= while test $# -gt 0 do case "$1" in -j) parallel=$2; shift ;; + -c|--clean) + clean=t ;; -f) force=t ;; --) @@ -26,6 +39,17 @@ do shift done +cd_to_toplevel +tmp=Documentation/tmp-doc-diff + +if test -n "$clean" +then + test $# -eq 0 || usage + git worktree remove --force "$tmp/worktree" 2>/dev/null + rm -rf "$tmp" + exit 0 +fi + if test -z "$parallel" then parallel=$(getconf _NPROCESSORS_ONLN 2>/dev/null) @@ -42,9 +66,6 @@ to=$1; shift from_oid=$(git rev-parse --verify "$from") || exit 1 to_oid=$(git rev-parse --verify "$to") || exit 1 -cd_to_toplevel -tmp=Documentation/tmp-doc-diff - if test -n "$force" then rm -rf "$tmp" @@ -54,7 +75,7 @@ fi # results that don't differ between the two trees. if ! test -d "$tmp/worktree" then - git worktree add --detach "$tmp/worktree" "$from" && + git worktree add -f --detach "$tmp/worktree" "$from" && dots=$(echo "$tmp/worktree" | sed 's#[^/]*#..#g') && ln -s "$dots/config.mak" "$tmp/worktree/config.mak" fi @@ -69,12 +90,12 @@ generate_render_makefile () { printf '%s: %s\n' "$dst" "$src" printf '\t@echo >&2 " RENDER $(notdir $@)" && \\\n' printf '\tmkdir -p $(dir $@) && \\\n' - printf '\tMANWIDTH=80 man -l $< >$@+ && \\\n' + printf '\tMANWIDTH=80 man $< >$@+ && \\\n' printf '\tmv $@+ $@\n' done } -# render_tree <dirname> <committish> +# render_tree <committish_oid> render_tree () { # Skip install-man entirely if we already have an installed directory. # We can't rely on make here, since "install-man" unconditionally @@ -84,7 +105,7 @@ render_tree () { # through. if ! test -d "$tmp/installed/$1" then - git -C "$tmp/worktree" checkout "$2" && + git -C "$tmp/worktree" checkout --detach "$1" && make -j$parallel -C "$tmp/worktree" \ GIT_VERSION=omitted \ SOURCE_DATE_EPOCH=0 \ @@ -104,6 +125,6 @@ render_tree () { fi } -render_tree $from_oid "$from" && -render_tree $to_oid "$to" && +render_tree $from_oid && +render_tree $to_oid && git -C $tmp/rendered diff --no-index "$@" $from_oid $to_oid diff --git a/Documentation/fetch-options.txt b/Documentation/fetch-options.txt index 8bc36af4b1..fa0a3151b3 100644 --- a/Documentation/fetch-options.txt +++ b/Documentation/fetch-options.txt @@ -68,11 +68,16 @@ endif::git-pull[] -f:: --force:: - When 'git fetch' is used with `<rbranch>:<lbranch>` - refspec, it refuses to update the local branch - `<lbranch>` unless the remote branch `<rbranch>` it - fetches is a descendant of `<lbranch>`. This option - overrides that check. + When 'git fetch' is used with `<src>:<dst>` refspec it may + refuse to update the local branch as discussed +ifdef::git-pull[] + in the `<refspec>` part of the linkgit:git-fetch[1] + documentation. +endif::git-pull[] +ifndef::git-pull[] + in the `<refspec>` part below. +endif::git-pull[] + This option overrides that check. -k:: --keep:: diff --git a/Documentation/git-branch.txt b/Documentation/git-branch.txt index 9767b2b483..bf5316ffa9 100644 --- a/Documentation/git-branch.txt +++ b/Documentation/git-branch.txt @@ -14,7 +14,7 @@ SYNOPSIS [(--merged | --no-merged) [<commit>]] [--contains [<commit]] [--no-contains [<commit>]] [--points-at <object>] [--format=<format>] [<pattern>...] -'git branch' [--track | --no-track] [-l] [-f] <branchname> [<start-point>] +'git branch' [--track | --no-track] [-f] <branchname> [<start-point>] 'git branch' (--set-upstream-to=<upstream> | -u <upstream>) [<branchname>] 'git branch' --unset-upstream [<branchname>] 'git branch' (-m | -M) [<oldbranch>] <newbranch> @@ -100,8 +100,6 @@ OPTIONS The negated form `--no-create-reflog` only overrides an earlier `--create-reflog`, but currently does not negate the setting of `core.logAllRefUpdates`. -+ -The `-l` option is a deprecated synonym for `--create-reflog`. -f:: --force:: @@ -156,14 +154,11 @@ This option is only applicable in non-verbose mode. --all:: List both remote-tracking branches and local branches. +-l:: --list:: List branches. With optional `<pattern>...`, e.g. `git branch --list 'maint-*'`, list only the branches that match the pattern(s). -+ -This should not be confused with `git branch -l <branchname>`, -which creates a branch named `<branchname>` with a reflog. -See `--create-reflog` above for details. -v:: -vv:: diff --git a/Documentation/git-config.txt b/Documentation/git-config.txt index 8e240435be..1bfe9f56a7 100644 --- a/Documentation/git-config.txt +++ b/Documentation/git-config.txt @@ -45,13 +45,15 @@ unset an existing `--type` specifier with `--no-type`. When reading, the values are read from the system, global and repository local configuration files by default, and options -`--system`, `--global`, `--local` and `--file <filename>` can be -used to tell the command to read from only that location (see <<FILES>>). +`--system`, `--global`, `--local`, `--worktree` and +`--file <filename>` can be used to tell the command to read from only +that location (see <<FILES>>). When writing, the new value is written to the repository local configuration file by default, and options `--system`, `--global`, -`--file <filename>` can be used to tell the command to write to -that location (you can say `--local` but that is the default). +`--worktree`, `--file <filename>` can be used to tell the command to +write to that location (you can say `--local` but that is the +default). This command will fail with non-zero status upon error. Some exit codes are: @@ -131,6 +133,11 @@ from all available files. + See also <<FILES>>. +--worktree:: + Similar to `--local` except that `.git/config.worktree` is + read from or written to if `extensions.worktreeConfig` is + present. If not it's the same as `--local`. + -f config-file:: --file config-file:: Use the given config file instead of the one specified by GIT_CONFIG. @@ -188,8 +195,8 @@ Valid `<type>`'s include: --bool-or-int:: --path:: --expiry-date:: - Historical options for selecting a type specifier. Prefer instead `--type`, - (see: above). + Historical options for selecting a type specifier. Prefer instead `--type` + (see above). --no-type:: Un-sets the previously set type specifier (if one was previously set). This @@ -281,6 +288,10 @@ $XDG_CONFIG_HOME/git/config:: $GIT_DIR/config:: Repository specific configuration file. +$GIT_DIR/config.worktree:: + This is optional and is only searched when + `extensions.worktreeConfig` is present in $GIT_DIR/config. + If no further options are given, all reading options will read all of these files that are available. If the global or the system-wide configuration file are not available they will be ignored. If the repository configuration @@ -299,9 +310,10 @@ configuration file. Note that this also affects options like `--replace-all` and `--unset`. *'git config' will only ever change one file at a time*. You can override these rules either by command-line options or by environment -variables. The `--global` and the `--system` options will limit the file used -to the global or system-wide file respectively. The `GIT_CONFIG` environment -variable has a similar effect, but you can specify any filename you want. +variables. The `--global`, `--system` and `--worktree` options will limit +the file used to the global, system-wide or per-worktree file respectively. +The `GIT_CONFIG` environment variable has a similar effect, but you +can specify any filename you want. ENVIRONMENT @@ -442,9 +454,9 @@ For URLs in `https://weak.example.com`, `http.sslVerify` is set to false, while it is set to `true` for all others: ------------ -% git config --bool --get-urlmatch http.sslverify https://good.example.com +% git config --type=bool --get-urlmatch http.sslverify https://good.example.com true -% git config --bool --get-urlmatch http.sslverify https://weak.example.com +% git config --type=bool --get-urlmatch http.sslverify https://weak.example.com false % git config --get-urlmatch http https://weak.example.com http.cookieFile /tmp/cookie.txt diff --git a/Documentation/git-fmt-merge-msg.txt b/Documentation/git-fmt-merge-msg.txt index 423b6e033b..6793d8fc05 100644 --- a/Documentation/git-fmt-merge-msg.txt +++ b/Documentation/git-fmt-merge-msg.txt @@ -51,7 +51,7 @@ OPTIONS CONFIGURATION ------------- -include::fmt-merge-msg-config.txt[] +include::config/fmt-merge-msg.txt[] merge.summary:: Synonym to `merge.log`; this is deprecated and will be removed in diff --git a/Documentation/git-format-patch.txt b/Documentation/git-format-patch.txt index b41e1329a7..aba4c5febe 100644 --- a/Documentation/git-format-patch.txt +++ b/Documentation/git-format-patch.txt @@ -23,6 +23,8 @@ SYNOPSIS [(--reroll-count|-v) <n>] [--to=<email>] [--cc=<email>] [--[no-]cover-letter] [--quiet] [--notes[=<ref>]] + [--interdiff=<previous>] + [--range-diff=<previous> [--creation-factor=<percent>]] [--progress] [<common diff options>] [ <since> | <revision range> ] @@ -228,6 +230,33 @@ feeding the result to `git send-email`. containing the branch description, shortlog and the overall diffstat. You can fill in a description in the file before sending it out. +--interdiff=<previous>:: + As a reviewer aid, insert an interdiff into the cover letter, + or as commentary of the lone patch of a 1-patch series, showing + the differences between the previous version of the patch series and + the series currently being formatted. `previous` is a single revision + naming the tip of the previous series which shares a common base with + the series being formatted (for example `git format-patch + --cover-letter --interdiff=feature/v1 -3 feature/v2`). + +--range-diff=<previous>:: + As a reviewer aid, insert a range-diff (see linkgit:git-range-diff[1]) + into the cover letter, or as commentary of the lone patch of a + 1-patch series, showing the differences between the previous + version of the patch series and the series currently being formatted. + `previous` can be a single revision naming the tip of the previous + series if it shares a common base with the series being formatted (for + example `git format-patch --cover-letter --range-diff=feature/v1 -3 + feature/v2`), or a revision range if the two versions of the series are + disjoint (for example `git format-patch --cover-letter + --range-diff=feature/v1~3..feature/v1 -3 feature/v2`). + +--creation-factor=<percent>:: + Used with `--range-diff`, tweak the heuristic which matches up commits + between the previous and current series of patches by adjusting the + creation/deletion cost fudge factor. See linkgit:git-range-diff[1]) + for details. + --notes[=<ref>]:: Append the notes (see linkgit:git-notes[1]) for the commit after the three-dash line. diff --git a/Documentation/git-gc.txt b/Documentation/git-gc.txt index f5bc98ccb3..c20ee6c789 100644 --- a/Documentation/git-gc.txt +++ b/Documentation/git-gc.txt @@ -17,7 +17,8 @@ Runs a number of housekeeping tasks within the current repository, such as compressing file revisions (to reduce disk space and increase performance), removing unreachable objects which may have been created from prior invocations of 'git add', packing refs, pruning -reflog, rerere metadata or stale working trees. +reflog, rerere metadata or stale working trees. May also update ancillary +indexes such as the commit-graph. Users are encouraged to run this task on a regular basis within each repository to maintain good disk space utilization and good diff --git a/Documentation/git-grep.txt b/Documentation/git-grep.txt index a3049af1a3..84fe236a8e 100644 --- a/Documentation/git-grep.txt +++ b/Documentation/git-grep.txt @@ -18,7 +18,7 @@ SYNOPSIS [(-O | --open-files-in-pager) [<pager>]] [-z | --null] [ -o | --only-matching ] [-c | --count] [--all-match] [-q | --quiet] - [--max-depth <depth>] + [--max-depth <depth>] [--[no-]recursive] [--color[=<when>] | --no-color] [--break] [--heading] [-p | --show-function] [-A <post-context>] [-B <pre-context>] [-C <context>] @@ -119,11 +119,18 @@ OPTIONS --max-depth <depth>:: For each <pathspec> given on command line, descend at most <depth> - levels of directories. A negative value means no limit. + levels of directories. A value of -1 means no limit. This option is ignored if <pathspec> contains active wildcards. In other words if "a*" matches a directory named "a*", "*" is matched literally so --max-depth is still effective. +-r:: +--recursive:: + Same as `--max-depth=-1`; this is the default. + +--no-recursive:: + Same as `--max-depth=0`. + -w:: --word-regexp:: Match the pattern only at word boundary (either begin at the diff --git a/Documentation/git-help.txt b/Documentation/git-help.txt index 83d25d825a..aab5453bbb 100644 --- a/Documentation/git-help.txt +++ b/Documentation/git-help.txt @@ -8,7 +8,7 @@ git-help - Display help information about Git SYNOPSIS -------- [verse] -'git help' [-a|--all [--verbose]] [-g|--guide] +'git help' [-a|--all [--[no-]verbose]] [-g|--guide] [-i|--info|-m|--man|-w|--web] [COMMAND|GUIDE] DESCRIPTION @@ -29,6 +29,10 @@ guide is brought up. The 'man' program is used by default for this purpose, but this can be overridden by other options or configuration variables. +If an alias is given, git shows the definition of the alias on +standard output. To get the manual page for the aliased command, use +`git COMMAND --help`. + Note that `git --help ...` is identical to `git help ...` because the former is internally converted into the latter. @@ -42,8 +46,10 @@ OPTIONS --all:: Prints all the available commands on the standard output. This option overrides any given command or guide name. - When used with `--verbose` print description for all recognized - commands. + +--verbose:: + When used with `--all` print description for all recognized + commands. This is the default. -c:: --config:: diff --git a/Documentation/git-imap-send.txt b/Documentation/git-imap-send.txt index 7b157441eb..65b53fcc47 100644 --- a/Documentation/git-imap-send.txt +++ b/Documentation/git-imap-send.txt @@ -57,50 +57,7 @@ to appropriate values. Variables ~~~~~~~~~ -imap.folder:: - The folder to drop the mails into, which is typically the Drafts - folder. For example: "INBOX.Drafts", "INBOX/Drafts" or - "[Gmail]/Drafts". Required. - -imap.tunnel:: - Command used to setup a tunnel to the IMAP server through which - commands will be piped instead of using a direct network connection - to the server. Required when imap.host is not set. - -imap.host:: - A URL identifying the server. Use an `imap://` prefix for non-secure - connections and an `imaps://` prefix for secure connections. - Ignored when imap.tunnel is set, but required otherwise. - -imap.user:: - The username to use when logging in to the server. - -imap.pass:: - The password to use when logging in to the server. - -imap.port:: - An integer port number to connect to on the server. - Defaults to 143 for imap:// hosts and 993 for imaps:// hosts. - Ignored when imap.tunnel is set. - -imap.sslverify:: - A boolean to enable/disable verification of the server certificate - used by the SSL/TLS connection. Default is `true`. Ignored when - imap.tunnel is set. - -imap.preformattedHTML:: - A boolean to enable/disable the use of html encoding when sending - a patch. An html encoded patch will be bracketed with <pre> - and have a content type of text/html. Ironically, enabling this - option causes Thunderbird to send the patch as a plain/text, - format=fixed email. Default is `false`. - -imap.authMethod:: - Specify authenticate method for authentication with IMAP server. - If Git was built with the NO_CURL option, or if your curl version is older - than 7.34.0, or if you're running git-imap-send with the `--no-curl` - option, the only supported method is 'CRAM-MD5'. If this is not set - then 'git imap-send' uses the basic IMAP plaintext LOGIN command. +include::config/imap.txt[] Examples ~~~~~~~~ diff --git a/Documentation/git-merge.txt b/Documentation/git-merge.txt index eb36837f86..4cc86469f3 100644 --- a/Documentation/git-merge.txt +++ b/Documentation/git-merge.txt @@ -342,7 +342,7 @@ include::merge-strategies.txt[] CONFIGURATION ------------- -include::merge-config.txt[] +include::config/merge.txt[] branch.<name>.mergeOptions:: Sets default options for merging into branch <name>. The syntax and diff --git a/Documentation/git-mergetool.txt b/Documentation/git-mergetool.txt index 3622d66488..0c7975a050 100644 --- a/Documentation/git-mergetool.txt +++ b/Documentation/git-mergetool.txt @@ -79,6 +79,17 @@ success of the resolution after the custom tool has exited. Prompt before each invocation of the merge resolution program to give the user a chance to skip the path. +-g:: +--gui:: + When 'git-mergetool' is invoked with the `-g` or `--gui` option + the default merge tool will be read from the configured + `merge.guitool` variable instead of `merge.tool`. + +--no-gui:: + This overrides a previous `-g` or `--gui` setting and reads the + default merge tool will be read from the configured `merge.tool` + variable. + -O<orderfile>:: Process files in the order specified in the <orderfile>, which has one shell glob pattern per line. diff --git a/Documentation/git-multi-pack-index.txt b/Documentation/git-multi-pack-index.txt new file mode 100644 index 0000000000..f7778a2c85 --- /dev/null +++ b/Documentation/git-multi-pack-index.txt @@ -0,0 +1,66 @@ +git-multi-pack-index(1) +======================= + +NAME +---- +git-multi-pack-index - Write and verify multi-pack-indexes + + +SYNOPSIS +-------- +[verse] +'git multi-pack-index' [--object-dir=<dir>] <verb> + +DESCRIPTION +----------- +Write or verify a multi-pack-index (MIDX) file. + +OPTIONS +------- + +--object-dir=<dir>:: + Use given directory for the location of Git objects. We check + `<dir>/packs/multi-pack-index` for the current MIDX file, and + `<dir>/packs` for the pack-files to index. + +write:: + When given as the verb, write a new MIDX file to + `<dir>/packs/multi-pack-index`. + +verify:: + When given as the verb, verify the contents of the MIDX file + at `<dir>/packs/multi-pack-index`. + + +EXAMPLES +-------- + +* Write a MIDX file for the packfiles in the current .git folder. ++ +----------------------------------------------- +$ git multi-pack-index write +----------------------------------------------- + +* Write a MIDX file for the packfiles in an alternate object store. ++ +----------------------------------------------- +$ git multi-pack-index --object-dir <alt> write +----------------------------------------------- + +* Verify the MIDX file for the packfiles in the current .git folder. ++ +----------------------------------------------- +$ git multi-pack-index verify +----------------------------------------------- + + +SEE ALSO +-------- +See link:technical/multi-pack-index.html[The Multi-Pack-Index Design +Document] and link:technical/pack-format.html[The Multi-Pack-Index +Format] for more information on the multi-pack-index feature. + + +GIT +--- +Part of the linkgit:git[1] suite diff --git a/Documentation/git-p4.txt b/Documentation/git-p4.txt index 41780a5aa9..f0a0280954 100644 --- a/Documentation/git-p4.txt +++ b/Documentation/git-p4.txt @@ -174,21 +174,21 @@ $ git p4 submit --update-shelve 1234 --update-shelve 2345 Unshelve ~~~~~~~~ Unshelving will take a shelved P4 changelist, and produce the equivalent git commit -in the branch refs/remotes/p4/unshelved/<changelist>. +in the branch refs/remotes/p4-unshelved/<changelist>. The git commit is created relative to the current origin revision (HEAD by default). -If the shelved changelist's parent revisions differ, git-p4 will refuse to unshelve; -you need to be unshelving onto an equivalent tree. +A parent commit is created based on the origin, and then the unshelve commit is +created based on that. The origin revision can be changed with the "--origin" option. -If the target branch in refs/remotes/p4/unshelved already exists, the old one will +If the target branch in refs/remotes/p4-unshelved already exists, the old one will be renamed. ---- $ git p4 sync $ git p4 unshelve 12345 -$ git show refs/remotes/p4/unshelved/12345 +$ git show p4-unshelved/12345 <submit more changes via p4 to the same files> $ git p4 unshelve 12345 <refuses to unshelve until git is in sync with p4 again> diff --git a/Documentation/git-pack-objects.txt b/Documentation/git-pack-objects.txt index d95b472d16..40c825c381 100644 --- a/Documentation/git-pack-objects.txt +++ b/Documentation/git-pack-objects.txt @@ -289,6 +289,103 @@ Unexpected missing object will raise an error. --unpack-unreachable:: Keep unreachable objects in loose form. This implies `--revs`. +--delta-islands:: + Restrict delta matches based on "islands". See DELTA ISLANDS + below. + + +DELTA ISLANDS +------------- + +When possible, `pack-objects` tries to reuse existing on-disk deltas to +avoid having to search for new ones on the fly. This is an important +optimization for serving fetches, because it means the server can avoid +inflating most objects at all and just send the bytes directly from +disk. This optimization can't work when an object is stored as a delta +against a base which the receiver does not have (and which we are not +already sending). In that case the server "breaks" the delta and has to +find a new one, which has a high CPU cost. Therefore it's important for +performance that the set of objects in on-disk delta relationships match +what a client would fetch. + +In a normal repository, this tends to work automatically. The objects +are mostly reachable from the branches and tags, and that's what clients +fetch. Any deltas we find on the server are likely to be between objects +the client has or will have. + +But in some repository setups, you may have several related but separate +groups of ref tips, with clients tending to fetch those groups +independently. For example, imagine that you are hosting several "forks" +of a repository in a single shared object store, and letting clients +view them as separate repositories through `GIT_NAMESPACE` or separate +repos using the alternates mechanism. A naive repack may find that the +optimal delta for an object is against a base that is only found in +another fork. But when a client fetches, they will not have the base +object, and we'll have to find a new delta on the fly. + +A similar situation may exist if you have many refs outside of +`refs/heads/` and `refs/tags/` that point to related objects (e.g., +`refs/pull` or `refs/changes` used by some hosting providers). By +default, clients fetch only heads and tags, and deltas against objects +found only in those other groups cannot be sent as-is. + +Delta islands solve this problem by allowing you to group your refs into +distinct "islands". Pack-objects computes which objects are reachable +from which islands, and refuses to make a delta from an object `A` +against a base which is not present in all of `A`'s islands. This +results in slightly larger packs (because we miss some delta +opportunities), but guarantees that a fetch of one island will not have +to recompute deltas on the fly due to crossing island boundaries. + +When repacking with delta islands the delta window tends to get +clogged with candidates that are forbidden by the config. Repacking +with a big --window helps (and doesn't take as long as it otherwise +might because we can reject some object pairs based on islands before +doing any computation on the content). + +Islands are configured via the `pack.island` option, which can be +specified multiple times. Each value is a left-anchored regular +expressions matching refnames. For example: + +------------------------------------------- +[pack] +island = refs/heads/ +island = refs/tags/ +------------------------------------------- + +puts heads and tags into an island (whose name is the empty string; see +below for more on naming). Any refs which do not match those regular +expressions (e.g., `refs/pull/123`) is not in any island. Any object +which is reachable only from `refs/pull/` (but not heads or tags) is +therefore not a candidate to be used as a base for `refs/heads/`. + +Refs are grouped into islands based on their "names", and two regexes +that produce the same name are considered to be in the same +island. The names are computed from the regexes by concatenating any +capture groups from the regex, with a '-' dash in between. (And if +there are no capture groups, then the name is the empty string, as in +the above example.) This allows you to create arbitrary numbers of +islands. Only up to 14 such capture groups are supported though. + +For example, imagine you store the refs for each fork in +`refs/virtual/ID`, where `ID` is a numeric identifier. You might then +configure: + +------------------------------------------- +[pack] +island = refs/virtual/([0-9]+)/heads/ +island = refs/virtual/([0-9]+)/tags/ +island = refs/virtual/([0-9]+)/(pull)/ +------------------------------------------- + +That puts the heads and tags for each fork in their own island (named +"1234" or similar), and the pull refs for each go into their own +"1234-pull". + +Note that we pick a single island for each regex to go into, using "last +one wins" ordering (which allows repo-specific config to take precedence +over user-wide config, and so forth). + SEE ALSO -------- linkgit:git-rev-list[1] diff --git a/Documentation/git-push.txt b/Documentation/git-push.txt index 55277a9781..a5fc54aeab 100644 --- a/Documentation/git-push.txt +++ b/Documentation/git-push.txt @@ -74,22 +74,57 @@ without any `<refspec>` on the command line. Otherwise, missing `:<dst>` means to update the same ref as the `<src>`. + The object referenced by <src> is used to update the <dst> reference -on the remote side. By default this is only allowed if <dst> is not -a tag (annotated or lightweight), and then only if it can fast-forward -<dst>. By having the optional leading `+`, you can tell Git to update -the <dst> ref even if it is not allowed by default (e.g., it is not a -fast-forward.) This does *not* attempt to merge <src> into <dst>. See -EXAMPLES below for details. -+ -`tag <tag>` means the same as `refs/tags/<tag>:refs/tags/<tag>`. -+ -Pushing an empty <src> allows you to delete the <dst> ref from -the remote repository. +on the remote side. Whether this is allowed depends on where in +`refs/*` the <dst> reference lives as described in detail below, in +those sections "update" means any modifications except deletes, which +as noted after the next few sections are treated differently. ++ +The `refs/heads/*` namespace will only accept commit objects, and +updates only if they can be fast-forwarded. ++ +The `refs/tags/*` namespace will accept any kind of object (as +commits, trees and blobs can be tagged), and any updates to them will +be rejected. ++ +It's possible to push any type of object to any namespace outside of +`refs/{tags,heads}/*`. In the case of tags and commits, these will be +treated as if they were the commits inside `refs/heads/*` for the +purposes of whether the update is allowed. ++ +I.e. a fast-forward of commits and tags outside `refs/{tags,heads}/*` +is allowed, even in cases where what's being fast-forwarded is not a +commit, but a tag object which happens to point to a new commit which +is a fast-forward of the commit the last tag (or commit) it's +replacing. Replacing a tag with an entirely different tag is also +allowed, if it points to the same commit, as well as pushing a peeled +tag, i.e. pushing the commit that existing tag object points to, or a +new tag object which an existing commit points to. ++ +Tree and blob objects outside of `refs/{tags,heads}/*` will be treated +the same way as if they were inside `refs/tags/*`, any update of them +will be rejected. ++ +All of the rules described above about what's not allowed as an update +can be overridden by adding an the optional leading `+` to a refspec +(or using `--force` command line option). The only exception to this +is that no amount of forcing will make the `refs/heads/*` namespace +accept a non-commit object. Hooks and configuration can also override +or amend these rules, see e.g. `receive.denyNonFastForwards` in +linkgit:git-config[1] and `pre-receive` and `update` in +linkgit:githooks[5]. ++ +Pushing an empty <src> allows you to delete the <dst> ref from the +remote repository. Deletions are always accepted without a leading `+` +in the refspec (or `--force`), except when forbidden by configuration +or hooks. See `receive.denyDeletes` in linkgit:git-config[1] and +`pre-receive` and `update` in linkgit:githooks[5]. + The special refspec `:` (or `+:` to allow non-fast-forward updates) directs Git to push "matching" branches: for every branch that exists on the local side, the remote side is updated if a branch of the same name already exists on the remote side. ++ +`tag <tag>` means the same as `refs/tags/<tag>:refs/tags/<tag>`. --all:: Push all branches (i.e. refs under `refs/heads/`); cannot be diff --git a/Documentation/git-range-diff.txt b/Documentation/git-range-diff.txt index f693930fdb..8a6ea2c6c5 100644 --- a/Documentation/git-range-diff.txt +++ b/Documentation/git-range-diff.txt @@ -78,6 +78,23 @@ between patches", i.e. to compare the author, commit message and diff of corresponding old/new commits. There is currently no means to tweak the diff options passed to `git log` when generating those patches. +OUTPUT STABILITY +---------------- + +The output of the `range-diff` command is subject to change. It is +intended to be human-readable porcelain output, not something that can +be used across versions of Git to get a textually stable `range-diff` +(as opposed to something like the `--stable` option to +linkgit:git-patch-id[1]). There's also no equivalent of +linkgit:git-apply[1] for `range-diff`, the output is not intended to +be machine-readable. + +This is particularly true when passing in diff options. Currently some +options like `--stat` can, as an emergent effect, produce output +that's quite useless in the context of `range-diff`. Future versions +of `range-diff` may learn to interpret such options in a manner +specific to `range-diff` (e.g. for `--stat` producing human-readable +output which summarizes how the diffstat changed). CONFIGURATION ------------- diff --git a/Documentation/git-rebase.txt b/Documentation/git-rebase.txt index 432baabe33..80793bad8d 100644 --- a/Documentation/git-rebase.txt +++ b/Documentation/git-rebase.txt @@ -203,7 +203,7 @@ Alternatively, you can undo the 'git rebase' with CONFIGURATION ------------- -include::rebase-config.txt[] +include::config/rebase.txt[] OPTIONS ------- @@ -441,7 +441,8 @@ See also INCOMPATIBLE OPTIONS below. --exec <cmd>:: Append "exec <cmd>" after each line creating a commit in the final history. <cmd> will be interpreted as one or more shell - commands. + commands. Any command that fails will interrupt the rebase, + with exit code 1. + You may execute several commands by either using one instance of `--exec` with several commands: @@ -641,6 +642,9 @@ By replacing the command "pick" with the command "edit", you can tell the files and/or the commit message, amend the commit, and continue rebasing. +To interrupt the rebase (just like an "edit" command would do, but without +cherry-picking any commit first), use the "break" command. + If you just want to edit the commit message for a commit, replace the command "pick" with the command "reword". diff --git a/Documentation/git-reflog.txt b/Documentation/git-reflog.txt index 472a6808cd..ff487ff77d 100644 --- a/Documentation/git-reflog.txt +++ b/Documentation/git-reflog.txt @@ -20,7 +20,7 @@ depending on the subcommand: 'git reflog' ['show'] [log-options] [<ref>] 'git reflog expire' [--expire=<time>] [--expire-unreachable=<time>] [--rewrite] [--updateref] [--stale-fix] - [--dry-run | -n] [--verbose] [--all | <refs>...] + [--dry-run | -n] [--verbose] [--all [--single-worktree] | <refs>...] 'git reflog delete' [--rewrite] [--updateref] [--dry-run | -n] [--verbose] ref@\{specifier\}... 'git reflog exists' <ref> @@ -72,6 +72,11 @@ Options for `expire` --all:: Process the reflogs of all references. +--single-worktree:: + By default when `--all` is specified, reflogs from all working + trees are processed. This option limits the processing to reflogs + from the current working tree only. + --expire=<time>:: Prune entries older than the specified time. If this option is not specified, the expiration time is taken from the diff --git a/Documentation/git-repack.txt b/Documentation/git-repack.txt index d056250968..aa0cc8bd44 100644 --- a/Documentation/git-repack.txt +++ b/Documentation/git-repack.txt @@ -160,6 +160,11 @@ depth is 4095. being removed. In addition, any unreachable loose objects will be packed (and their loose counterparts removed). +-i:: +--delta-islands:: + Pass the `--delta-islands` option to `git-pack-objects`, see + linkgit:git-pack-objects[1]. + Configuration ------------- diff --git a/Documentation/git-rerere.txt b/Documentation/git-rerere.txt index 031f31fa47..df310d2a58 100644 --- a/Documentation/git-rerere.txt +++ b/Documentation/git-rerere.txt @@ -211,6 +211,12 @@ would conflict the same way as the test merge you resolved earlier. 'git rerere' will be run by 'git rebase' to help you resolve this conflict. +[NOTE] 'git rerere' relies on the conflict markers in the file to +detect the conflict. If the file already contains lines that look the +same as lines with conflict markers, 'git rerere' may fail to record a +conflict resolution. To work around this, the `conflict-marker-size` +setting in linkgit:gitattributes[5] can be used. + GIT --- Part of the linkgit:git[1] suite diff --git a/Documentation/git-reset.txt b/Documentation/git-reset.txt index 1d697d9962..2dac95c71a 100644 --- a/Documentation/git-reset.txt +++ b/Documentation/git-reset.txt @@ -95,7 +95,10 @@ OPTIONS -q:: --quiet:: - Be quiet, only report errors. +--no-quiet:: + Be quiet, only report errors. The default behavior is set by the + `reset.quiet` config option. `--quiet` and `--no-quiet` will + override the default behavior. EXAMPLES diff --git a/Documentation/git-send-email.txt b/Documentation/git-send-email.txt index 465a4ecbed..62c6c76f27 100644 --- a/Documentation/git-send-email.txt +++ b/Documentation/git-send-email.txt @@ -190,7 +190,9 @@ $ git send-email --smtp-auth="PLAIN LOGIN GSSAPI" ... If at least one of the specified mechanisms matches the ones advertised by the SMTP server and if it is supported by the utilized SASL library, the mechanism is used for authentication. If neither 'sendemail.smtpAuth' nor `--smtp-auth` -is specified, all mechanisms supported by the SASL library can be used. +is specified, all mechanisms supported by the SASL library can be used. The +special value 'none' maybe specified to completely disable authentication +independently of `--smtp-user` --smtp-pass[=<password>]:: Password for SMTP-AUTH. The argument is optional: If no @@ -204,6 +206,9 @@ or on the command line. If a username has been specified (with specified (with `--smtp-pass` or `sendemail.smtpPass`), then a password is obtained using 'git-credential'. +--no-smtp-auth:: + Disable SMTP authentication. Short hand for `--smtp-auth=none` + --smtp-server=<host>:: If set, specifies the outgoing SMTP server to use (e.g. `smtp.example.com` or a raw IP address). Alternatively it can @@ -321,16 +326,19 @@ Automating auto-cc of: + -- -- 'author' will avoid including the patch author -- 'self' will avoid including the sender +- 'author' will avoid including the patch author. +- 'self' will avoid including the sender. - 'cc' will avoid including anyone mentioned in Cc lines in the patch header except for self (use 'self' for that). - 'bodycc' will avoid including anyone mentioned in Cc lines in the patch body (commit message) except for self (use 'self' for that). - 'sob' will avoid including anyone mentioned in Signed-off-by lines except - for self (use 'self' for that). + for self (use 'self' for that). +- 'misc-by' will avoid including anyone mentioned in Acked-by, + Reviewed-by, Tested-by and other "-by" lines in the patch body, + except Signed-off-by (use 'sob' for that). - 'cccmd' will avoid running the --cc-cmd. -- 'body' is equivalent to 'sob' + 'bodycc' +- 'body' is equivalent to 'sob' + 'bodycc' + 'misc-by'. - 'all' will suppress all auto cc values. -- + diff --git a/Documentation/git-worktree.txt b/Documentation/git-worktree.txt index d707e61198..cb86318f3e 100644 --- a/Documentation/git-worktree.txt +++ b/Documentation/git-worktree.txt @@ -120,8 +120,16 @@ OPTIONS --force:: By default, `add` refuses to create a new working tree when `<commit-ish>` is a branch name and is already checked out by - another working tree and `remove` refuses to remove an unclean - working tree. This option overrides these safeguards. + another working tree, or if `<path>` is already assigned to some + working tree but is missing (for instance, if `<path>` was deleted + manually). This option overrides these safeguards. To add a missing but + locked working tree path, specify `--force` twice. ++ +`move` refuses to move a locked working tree unless `--force` is specified +twice. ++ +`remove` refuses to remove an unclean working tree unless `--force` is used. +To remove a locked working tree, specify `--force` twice. -b <new-branch>:: -B <new-branch>:: @@ -196,6 +204,65 @@ working trees, it can be used to identify worktrees. For example if you only have two working trees, at "/abc/def/ghi" and "/abc/def/ggg", then "ghi" or "def/ghi" is enough to point to the former working tree. +REFS +---- +In multiple working trees, some refs may be shared between all working +trees, some refs are local. One example is HEAD is different for all +working trees. This section is about the sharing rules and how to access +refs of one working tree from another. + +In general, all pseudo refs are per working tree and all refs starting +with "refs/" are shared. Pseudo refs are ones like HEAD which are +directly under GIT_DIR instead of inside GIT_DIR/refs. There are one +exception to this: refs inside refs/bisect and refs/worktree is not +shared. + +Refs that are per working tree can still be accessed from another +working tree via two special paths, main-worktree and worktrees. The +former gives access to per-worktree refs of the main working tree, +while the latter to all linked working trees. + +For example, main-worktree/HEAD or main-worktree/refs/bisect/good +resolve to the same value as the main working tree's HEAD and +refs/bisect/good respectively. Similarly, worktrees/foo/HEAD or +worktrees/bar/refs/bisect/bad are the same as +GIT_COMMON_DIR/worktrees/foo/HEAD and +GIT_COMMON_DIR/worktrees/bar/refs/bisect/bad. + +To access refs, it's best not to look inside GIT_DIR directly. Instead +use commands such as linkgit:git-rev-parse[1] or linkgit:git-update-ref[1] +which will handle refs correctly. + +CONFIGURATION FILE +------------------ +By default, the repository "config" file is shared across all working +trees. If the config variables `core.bare` or `core.worktree` are +already present in the config file, they will be applied to the main +working trees only. + +In order to have configuration specific to working trees, you can turn +on "worktreeConfig" extension, e.g.: + +------------ +$ git config extensions.worktreeConfig true +------------ + +In this mode, specific configuration stays in the path pointed by `git +rev-parse --git-path config.worktree`. You can add or update +configuration in this file with `git config --worktree`. Older Git +versions will refuse to access repositories with this extension. + +Note that in this file, the exception for `core.bare` and `core.worktree` +is gone. If you have them in $GIT_DIR/config before, you must move +them to the `config.worktree` of the main working tree. You may also +take this opportunity to review and move other configuration that you +do not want to share to all working trees: + + - `core.worktree` and `core.bare` should never be shared + + - `core.sparseCheckout` is recommended per working tree, unless you + are sure you always use sparse checkout for all working trees. + DETAILS ------- Each linked working tree has a private sub-directory in the repository's @@ -220,7 +287,8 @@ linked working tree `git rev-parse --git-path HEAD` returns `/path/other/test-next/.git/HEAD` or `/path/main/.git/HEAD`) while `git rev-parse --git-path refs/heads/master` uses $GIT_COMMON_DIR and returns `/path/main/.git/refs/heads/master`, -since refs are shared across all working trees. +since refs are shared across all working trees, except refs/bisect and +refs/worktree. See linkgit:gitrepository-layout[5] for more information. The rule of thumb is do not make any assumption about whether a path belongs to @@ -245,6 +313,9 @@ to `/path/main/.git/worktrees/test-next` then a file named `test-next` entry from being pruned. See linkgit:gitrepository-layout[5] for details. +When extensions.worktreeConfig is enabled, the config file +`.git/worktrees/<id>/config.worktree` is read after `.git/config` is. + LIST OUTPUT FORMAT ------------------ The worktree list command has two output formats. The default format shows the diff --git a/Documentation/git.txt b/Documentation/git.txt index 254f063132..00156d64aa 100644 --- a/Documentation/git.txt +++ b/Documentation/git.txt @@ -76,7 +76,7 @@ Note that omitting the `=` in `git -c foo.bar ...` is allowed and sets `foo.bar` to the boolean true value (just like `[foo]bar` would in a config file). Including the equals but with an empty value (like `git -c foo.bar= ...`) sets `foo.bar` to the empty string which `git config ---bool` will convert to `false`. +--type=bool` will convert to `false`. --exec-path[=<path>]:: Path to wherever your core Git programs are installed. diff --git a/Documentation/gitignore.txt b/Documentation/gitignore.txt index d107daaffd..1c94f08ff4 100644 --- a/Documentation/gitignore.txt +++ b/Documentation/gitignore.txt @@ -129,7 +129,8 @@ full pathname may have special meaning: matches zero or more directories. For example, "`a/**/b`" matches "`a/b`", "`a/x/b`", "`a/x/y/b`" and so on. - - Other consecutive asterisks are considered invalid. + - Other consecutive asterisks are considered regular asterisks and + will match according to the previous rules. NOTES ----- diff --git a/Documentation/gitrepository-layout.txt b/Documentation/gitrepository-layout.txt index e85148f05e..366dee238c 100644 --- a/Documentation/gitrepository-layout.txt +++ b/Documentation/gitrepository-layout.txt @@ -95,8 +95,10 @@ refs:: References are stored in subdirectories of this directory. The 'git prune' command knows to preserve objects reachable from refs found in this directory and - its subdirectories. This directory is ignored if $GIT_COMMON_DIR - is set and "$GIT_COMMON_DIR/refs" will be used instead. + its subdirectories. + This directory is ignored (except refs/bisect and + refs/worktree) if $GIT_COMMON_DIR is set and + "$GIT_COMMON_DIR/refs" will be used instead. refs/heads/`name`:: records tip-of-the-tree commit objects of branch `name` @@ -143,6 +145,11 @@ config:: if $GIT_COMMON_DIR is set and "$GIT_COMMON_DIR/config" will be used instead. +config.worktree:: + Working directory specific configuration file for the main + working directory in multiple working directory setup (see + linkgit:git-worktree[1]). + branches:: A slightly deprecated way to store shorthands to be used to specify a URL to 'git fetch', 'git pull' and 'git push'. @@ -165,6 +172,11 @@ hooks:: each hook. This directory is ignored if $GIT_COMMON_DIR is set and "$GIT_COMMON_DIR/hooks" will be used instead. +common:: + When multiple working trees are used, most of files in + $GIT_DIR are per-worktree with a few known exceptions. All + files under 'common' however will be shared between all + working trees. index:: The current index file for the repository. It is @@ -275,6 +287,11 @@ worktrees/<id>/locked:: or manually by `git worktree prune`. The file may contain a string explaining why the repository is locked. +worktrees/<id>/config.worktree:: + Working directory specific configuration file. + +include::technical/repository-version.txt[] + SEE ALSO -------- linkgit:git-init[1], diff --git a/Documentation/gitrevisions.txt b/Documentation/gitrevisions.txt index 1f6cceaefb..d407b7dee1 100644 --- a/Documentation/gitrevisions.txt +++ b/Documentation/gitrevisions.txt @@ -19,9 +19,10 @@ walk the revision graph (such as linkgit:git-log[1]), all commits which are reachable from that commit. For commands that walk the revision graph one can also specify a range of revisions explicitly. -In addition, some Git commands (such as linkgit:git-show[1]) also take -revision parameters which denote other objects than commits, e.g. blobs -("files") or trees ("directories of files"). +In addition, some Git commands (such as linkgit:git-show[1] and +linkgit:git-push[1]) can also take revision parameters which denote +other objects than commits, e.g. blobs ("files") or trees +("directories of files"). include::revisions.txt[] diff --git a/Documentation/pretty-formats.txt b/Documentation/pretty-formats.txt index 6109ef09aa..417b638cd8 100644 --- a/Documentation/pretty-formats.txt +++ b/Documentation/pretty-formats.txt @@ -153,6 +153,9 @@ endif::git-rev-list[] and "N" for no signature - '%GS': show the name of the signer for a signed commit - '%GK': show the key used to sign a signed commit +- '%GF': show the fingerprint of the key used to sign a signed commit +- '%GP': show the fingerprint of the primary key whose subkey was used + to sign a signed commit - '%gD': reflog selector, e.g., `refs/stash@{1}` or `refs/stash@{2 minutes ago`}; the format follows the rules described for the `-g` option. The portion before the `@` is the refname as diff --git a/Documentation/pull-fetch-param.txt b/Documentation/pull-fetch-param.txt index f1fb08dc68..7d3a60f5b9 100644 --- a/Documentation/pull-fetch-param.txt +++ b/Documentation/pull-fetch-param.txt @@ -33,11 +33,40 @@ name. it requests fetching everything up to the given tag. + The remote ref that matches <src> -is fetched, and if <dst> is not an empty string, the local -ref that matches it is fast-forwarded using <src>. -If the optional plus `+` is used, the local ref -is updated even if it does not result in a fast-forward -update. +is fetched, and if <dst> is not an empty string, an attempt +is made to update the local ref that matches it. ++ +Whether that update is allowed without `--force` depends on the ref +namespace it's being fetched to, the type of object being fetched, and +whether the update is considered to be a fast-forward. Generally, the +same rules apply for fetching as when pushing, see the `<refspec>...` +section of linkgit:git-push[1] for what those are. Exceptions to those +rules particular to 'git fetch' are noted below. ++ +Until Git version 2.20, and unlike when pushing with +linkgit:git-push[1], any updates to `refs/tags/*` would be accepted +without `+` in the refspec (or `--force`). When fetching, we promiscuously +considered all tag updates from a remote to be forced fetches. Since +Git version 2.20, fetching to update `refs/tags/*` works the same way +as when pushing. I.e. any updates will be rejected without `+` in the +refspec (or `--force`). ++ +Unlike when pushing with linkgit:git-push[1], any updates outside of +`refs/{tags,heads}/*` will be accepted without `+` in the refspec (or +`--force`), whether that's swapping e.g. a tree object for a blob, or +a commit for another commit that's doesn't have the previous commit as +an ancestor etc. ++ +Unlike when pushing with linkgit:git-push[1], there is no +configuration which'll amend these rules, and nothing like a +`pre-fetch` hook analogous to the `pre-receive` hook. ++ +As with pushing with linkgit:git-push[1], all of the rules described +above about what's not allowed as an update can be overridden by +adding an the optional leading `+` to a refspec (or using `--force` +command line option). The only exception to this is that no amount of +forcing will make the `refs/heads/*` namespace accept a non-commit +object. + [NOTE] When the remote branch you want to fetch is known to diff --git a/Documentation/rev-list-options.txt b/Documentation/rev-list-options.txt index 21978ebbac..bab5f50b17 100644 --- a/Documentation/rev-list-options.txt +++ b/Documentation/rev-list-options.txt @@ -731,6 +731,11 @@ the requested refs. + The form '--filter=sparse:path=<path>' similarly uses a sparse-checkout specification contained in <path>. ++ +The form '--filter=tree:<depth>' omits all blobs and trees whose depth +from the root tree is >= <depth> (minimum depth if an object is located +at multiple depths in the commits traversed). Currently, only <depth>=0 +is supported, which omits all blobs and trees. --no-filter:: Turn off any previous `--filter=` argument. diff --git a/Documentation/technical/api-diff.txt b/Documentation/technical/api-diff.txt index 8b001de0db..30fc0e9c93 100644 --- a/Documentation/technical/api-diff.txt +++ b/Documentation/technical/api-diff.txt @@ -18,8 +18,8 @@ Calling sequence ---------------- * Prepare `struct diff_options` to record the set of diff options, and - then call `diff_setup()` to initialize this structure. This sets up - the vanilla default. + then call `repo_diff_setup()` to initialize this structure. This + sets up the vanilla default. * Fill in the options structure to specify desired output format, rename detection, etc. `diff_opt_parse()` can be used to parse options given diff --git a/Documentation/technical/api-parse-options.txt b/Documentation/technical/api-parse-options.txt index 829b558110..2b036d7838 100644 --- a/Documentation/technical/api-parse-options.txt +++ b/Documentation/technical/api-parse-options.txt @@ -183,10 +183,6 @@ There are some macros to easily define options: scale the provided value by 1024, 1024^2 or 1024^3 respectively. The scaled value is put into `unsigned_long_var`. -`OPT_DATE(short, long, ×tamp_t_var, description)`:: - Introduce an option with date argument, see `approxidate()`. - The timestamp is put into `timestamp_t_var`. - `OPT_EXPIRY_DATE(short, long, ×tamp_t_var, description)`:: Introduce an option with expiry date argument, see `parse_expiry_date()`. The timestamp is put into `timestamp_t_var`. diff --git a/Documentation/technical/api-revision-walking.txt b/Documentation/technical/api-revision-walking.txt index 55b878ade8..03f9ea6ac4 100644 --- a/Documentation/technical/api-revision-walking.txt +++ b/Documentation/technical/api-revision-walking.txt @@ -15,9 +15,9 @@ revision list. Functions --------- -`init_revisions`:: +`repo_init_revisions`:: - Initialize a rev_info structure with default values. The second + Initialize a rev_info structure with default values. The third parameter may be NULL or can be prefix path, and then the `.prefix` variable will be set to it. This is typically the first function you want to call when you want to deal with a revision list. After calling diff --git a/Documentation/technical/index-format.txt b/Documentation/technical/index-format.txt index db3572626b..7c4d67aa6a 100644 --- a/Documentation/technical/index-format.txt +++ b/Documentation/technical/index-format.txt @@ -314,3 +314,44 @@ The remaining data of each directory block is grouped by type: - An ewah bitmap, the n-th bit indicates whether the n-th index entry is not CE_FSMONITOR_VALID. + +== End of Index Entry + + The End of Index Entry (EOIE) is used to locate the end of the variable + length index entries and the begining of the extensions. Code can take + advantage of this to quickly locate the index extensions without having + to parse through all of the index entries. + + Because it must be able to be loaded before the variable length cache + entries and other index extensions, this extension must be written last. + The signature for this extension is { 'E', 'O', 'I', 'E' }. + + The extension consists of: + + - 32-bit offset to the end of the index entries + + - 160-bit SHA-1 over the extension types and their sizes (but not + their contents). E.g. if we have "TREE" extension that is N-bytes + long, "REUC" extension that is M-bytes long, followed by "EOIE", + then the hash would be: + + SHA-1("TREE" + <binary representation of N> + + "REUC" + <binary representation of M>) + +== Index Entry Offset Table + + The Index Entry Offset Table (IEOT) is used to help address the CPU + cost of loading the index by enabling multi-threading the process of + converting cache entries from the on-disk format to the in-memory format. + The signature for this extension is { 'I', 'E', 'O', 'T' }. + + The extension consists of: + + - 32-bit version (currently 1) + + - A number of index offset entries each consisting of: + + - 32-bit offset from the begining of the file to the first cache entry + in this block of entries. + + - 32-bit count of cache entries in this block diff --git a/Documentation/technical/multi-pack-index.txt b/Documentation/technical/multi-pack-index.txt new file mode 100644 index 0000000000..d7e57639f7 --- /dev/null +++ b/Documentation/technical/multi-pack-index.txt @@ -0,0 +1,109 @@ +Multi-Pack-Index (MIDX) Design Notes +==================================== + +The Git object directory contains a 'pack' directory containing +packfiles (with suffix ".pack") and pack-indexes (with suffix +".idx"). The pack-indexes provide a way to lookup objects and +navigate to their offset within the pack, but these must come +in pairs with the packfiles. This pairing depends on the file +names, as the pack-index differs only in suffix with its pack- +file. While the pack-indexes provide fast lookup per packfile, +this performance degrades as the number of packfiles increases, +because abbreviations need to inspect every packfile and we are +more likely to have a miss on our most-recently-used packfile. +For some large repositories, repacking into a single packfile +is not feasible due to storage space or excessive repack times. + +The multi-pack-index (MIDX for short) stores a list of objects +and their offsets into multiple packfiles. It contains: + +- A list of packfile names. +- A sorted list of object IDs. +- A list of metadata for the ith object ID including: + - A value j referring to the jth packfile. + - An offset within the jth packfile for the object. +- If large offsets are required, we use another list of large + offsets similar to version 2 pack-indexes. + +Thus, we can provide O(log N) lookup time for any number +of packfiles. + +Design Details +-------------- + +- The MIDX is stored in a file named 'multi-pack-index' in the + .git/objects/pack directory. This could be stored in the pack + directory of an alternate. It refers only to packfiles in that + same directory. + +- The pack.multiIndex config setting must be on to consume MIDX files. + +- The file format includes parameters for the object ID hash + function, so a future change of hash algorithm does not require + a change in format. + +- The MIDX keeps only one record per object ID. If an object appears + in multiple packfiles, then the MIDX selects the copy in the most- + recently modified packfile. + +- If there exist packfiles in the pack directory not registered in + the MIDX, then those packfiles are loaded into the `packed_git` + list and `packed_git_mru` cache. + +- The pack-indexes (.idx files) remain in the pack directory so we + can delete the MIDX file, set core.midx to false, or downgrade + without any loss of information. + +- The MIDX file format uses a chunk-based approach (similar to the + commit-graph file) that allows optional data to be added. + +Future Work +----------- + +- Add a 'verify' subcommand to the 'git midx' builtin to verify the + contents of the multi-pack-index file match the offsets listed in + the corresponding pack-indexes. + +- The multi-pack-index allows many packfiles, especially in a context + where repacking is expensive (such as a very large repo), or + unexpected maintenance time is unacceptable (such as a high-demand + build machine). However, the multi-pack-index needs to be rewritten + in full every time. We can extend the format to be incremental, so + writes are fast. By storing a small "tip" multi-pack-index that + points to large "base" MIDX files, we can keep writes fast while + still reducing the number of binary searches required for object + lookups. + +- The reachability bitmap is currently paired directly with a single + packfile, using the pack-order as the object order to hopefully + compress the bitmaps well using run-length encoding. This could be + extended to pair a reachability bitmap with a multi-pack-index. If + the multi-pack-index is extended to store a "stable object order" + (a function Order(hash) = integer that is constant for a given hash, + even as the multi-pack-index is updated) then a reachability bitmap + could point to a multi-pack-index and be updated independently. + +- Packfiles can be marked as "special" using empty files that share + the initial name but replace ".pack" with ".keep" or ".promisor". + We can add an optional chunk of data to the multi-pack-index that + records flags of information about the packfiles. This allows new + states, such as 'repacked' or 'redeltified', that can help with + pack maintenance in a multi-pack environment. It may also be + helpful to organize packfiles by object type (commit, tree, blob, + etc.) and use this metadata to help that maintenance. + +- The partial clone feature records special "promisor" packs that + may point to objects that are not stored locally, but available + on request to a server. The multi-pack-index does not currently + track these promisor packs. + +Related Links +------------- +[0] https://bugs.chromium.org/p/git/issues/detail?id=6 + Chromium work item for: Multi-Pack Index (MIDX) + +[1] https://public-inbox.org/git/20180107181459.222909-1-dstolee@microsoft.com/ + An earlier RFC for the multi-pack-index feature + +[2] https://public-inbox.org/git/alpine.DEB.2.20.1803091557510.23109@alexmv-linux/ + Git Merge 2018 Contributor's summit notes (includes discussion of MIDX) diff --git a/Documentation/technical/pack-format.txt b/Documentation/technical/pack-format.txt index 70a99fd142..cab5bdd2ff 100644 --- a/Documentation/technical/pack-format.txt +++ b/Documentation/technical/pack-format.txt @@ -252,3 +252,80 @@ Pack file entry: <+ corresponding packfile. 20-byte SHA-1-checksum of all of the above. + +== multi-pack-index (MIDX) files have the following format: + +The multi-pack-index files refer to multiple pack-files and loose objects. + +In order to allow extensions that add extra data to the MIDX, we organize +the body into "chunks" and provide a lookup table at the beginning of the +body. The header includes certain length values, such as the number of packs, +the number of base MIDX files, hash lengths and types. + +All 4-byte numbers are in network order. + +HEADER: + + 4-byte signature: + The signature is: {'M', 'I', 'D', 'X'} + + 1-byte version number: + Git only writes or recognizes version 1. + + 1-byte Object Id Version + Git only writes or recognizes version 1 (SHA1). + + 1-byte number of "chunks" + + 1-byte number of base multi-pack-index files: + This value is currently always zero. + + 4-byte number of pack files + +CHUNK LOOKUP: + + (C + 1) * 12 bytes providing the chunk offsets: + First 4 bytes describe chunk id. Value 0 is a terminating label. + Other 8 bytes provide offset in current file for chunk to start. + (Chunks are provided in file-order, so you can infer the length + using the next chunk position if necessary.) + + The remaining data in the body is described one chunk at a time, and + these chunks may be given in any order. Chunks are required unless + otherwise specified. + +CHUNK DATA: + + Packfile Names (ID: {'P', 'N', 'A', 'M'}) + Stores the packfile names as concatenated, null-terminated strings. + Packfiles must be listed in lexicographic order for fast lookups by + name. This is the only chunk not guaranteed to be a multiple of four + bytes in length, so should be the last chunk for alignment reasons. + + OID Fanout (ID: {'O', 'I', 'D', 'F'}) + The ith entry, F[i], stores the number of OIDs with first + byte at most i. Thus F[255] stores the total + number of objects. + + OID Lookup (ID: {'O', 'I', 'D', 'L'}) + The OIDs for all objects in the MIDX are stored in lexicographic + order in this chunk. + + Object Offsets (ID: {'O', 'O', 'F', 'F'}) + Stores two 4-byte values for every object. + 1: The pack-int-id for the pack storing this object. + 2: The offset within the pack. + If all offsets are less than 2^31, then the large offset chunk + will not exist and offsets are stored as in IDX v1. + If there is at least one offset value larger than 2^32-1, then + the large offset chunk must exist. If the large offset chunk + exists and the 31st bit is on, then removing that bit reveals + the row in the large offsets containing the 8-byte offset of + this object. + + [Optional] Object Large Offsets (ID: {'L', 'O', 'F', 'F'}) + 8-byte offsets into large packfiles. + +TRAILER: + + 20-byte SHA1-checksum of the above contents. diff --git a/Documentation/technical/repository-version.txt b/Documentation/technical/repository-version.txt index e03eaccebc..7844ef30ff 100644 --- a/Documentation/technical/repository-version.txt +++ b/Documentation/technical/repository-version.txt @@ -1,5 +1,4 @@ -Git Repository Format Versions -============================== +== Git Repository Format Versions Every git repository is marked with a numeric version in the `core.repositoryformatversion` key of its `config` file. This version @@ -40,16 +39,14 @@ format by default. The currently defined format versions are: -Version `0` ------------ +=== Version `0` This is the format defined by the initial version of git, including but not limited to the format of the repository directory, the repository configuration file, and the object and ref storage. Specifying the complete behavior of git is beyond the scope of this document. -Version `1` ------------ +=== Version `1` This format is identical to version `0`, with the following exceptions: @@ -74,21 +71,18 @@ it here, in order to claim the name. The defined extensions are: -`noop` -~~~~~~ +==== `noop` This extension does not change git's behavior at all. It is useful only for testing format-1 compatibility. -`preciousObjects` -~~~~~~~~~~~~~~~~~ +==== `preciousObjects` When the config key `extensions.preciousObjects` is set to `true`, objects in the repository MUST NOT be deleted (e.g., by `git-prune` or `git repack -d`). -`partialclone` -~~~~~~~~~~~~~~ +==== `partialclone` When the config key `extensions.partialclone` is set, it indicates that the repo was created with a partial clone (or later performed @@ -98,3 +92,11 @@ and it promises that all such omitted objects can be fetched from it in the future. The value of this key is the name of the promisor remote. + +==== `worktreeConfig` + +If set, by default "git config" reads from both "config" and +"config.worktree" file from GIT_DIR in that order. In +multiple working directory mode, "config" file is shared while +"config.worktree" is per-working directory (i.e., it's in +GIT_COMMON_DIR/worktrees/<id>/config.worktree) diff --git a/Documentation/technical/rerere.txt b/Documentation/technical/rerere.txt new file mode 100644 index 0000000000..aa22d7ace8 --- /dev/null +++ b/Documentation/technical/rerere.txt @@ -0,0 +1,186 @@ +Rerere +====== + +This document describes the rerere logic. + +Conflict normalization +---------------------- + +To ensure recorded conflict resolutions can be looked up in the rerere +database, even when branches are merged in a different order, +different branches are merged that result in the same conflict, or +when different conflict style settings are used, rerere normalizes the +conflicts before writing them to the rerere database. + +Different conflict styles and branch names are normalized by stripping +the labels from the conflict markers, and removing the common ancestor +version from the `diff3` conflict style. Branches that are merged +in different order are normalized by sorting the conflict hunks. More +on each of those steps in the following sections. + +Once these two normalization operations are applied, a conflict ID is +calculated based on the normalized conflict, which is later used by +rerere to look up the conflict in the rerere database. + +Removing the common ancestor version +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Say we have three branches AB, AC and AC2. The common ancestor of +these branches has a file with a line containing the string "A" (for +brevity this is called "line A" in the rest of the document). In +branch AB this line is changed to "B", in AC, this line is changed to +"C", and branch AC2 is forked off of AC, after the line was changed to +"C". + +Forking a branch ABAC off of branch AB and then merging AC into it, we +get a conflict like the following: + + <<<<<<< HEAD + B + ======= + C + >>>>>>> AC + +Doing the analogous with AC2 (forking a branch ABAC2 off of branch AB +and then merging branch AC2 into it), using the diff3 conflict style, +we get a conflict like the following: + + <<<<<<< HEAD + B + ||||||| merged common ancestors + A + ======= + C + >>>>>>> AC2 + +By resolving this conflict, to leave line D, the user declares: + + After examining what branches AB and AC did, I believe that making + line A into line D is the best thing to do that is compatible with + what AB and AC wanted to do. + +As branch AC2 refers to the same commit as AC, the above implies that +this is also compatible what AB and AC2 wanted to do. + +By extension, this means that rerere should recognize that the above +conflicts are the same. To do this, the labels on the conflict +markers are stripped, and the common ancestor version is removed. The above +examples would both result in the following normalized conflict: + + <<<<<<< + B + ======= + C + >>>>>>> + +Sorting hunks +~~~~~~~~~~~~~ + +As before, lets imagine that a common ancestor had a file with line A +its early part, and line X in its late part. And then four branches +are forked that do these things: + + - AB: changes A to B + - AC: changes A to C + - XY: changes X to Y + - XZ: changes X to Z + +Now, forking a branch ABAC off of branch AB and then merging AC into +it, and forking a branch ACAB off of branch AC and then merging AB +into it, would yield the conflict in a different order. The former +would say "A became B or C, what now?" while the latter would say "A +became C or B, what now?" + +As a reminder, the act of merging AC into ABAC and resolving the +conflict to leave line D means that the user declares: + + After examining what branches AB and AC did, I believe that + making line A into line D is the best thing to do that is + compatible with what AB and AC wanted to do. + +So the conflict we would see when merging AB into ACAB should be +resolved the same way---it is the resolution that is in line with that +declaration. + +Imagine that similarly previously a branch XYXZ was forked from XY, +and XZ was merged into it, and resolved "X became Y or Z" into "X +became W". + +Now, if a branch ABXY was forked from AB and then merged XY, then ABXY +would have line B in its early part and line Y in its later part. +Such a merge would be quite clean. We can construct 4 combinations +using these four branches ((AB, AC) x (XY, XZ)). + +Merging ABXY and ACXZ would make "an early A became B or C, a late X +became Y or Z" conflict, while merging ACXY and ABXZ would make "an +early A became C or B, a late X became Y or Z". We can see there are +4 combinations of ("B or C", "C or B") x ("X or Y", "Y or X"). + +By sorting, the conflict is given its canonical name, namely, "an +early part became B or C, a late part becames X or Y", and whenever +any of these four patterns appear, and we can get to the same conflict +and resolution that we saw earlier. + +Without the sorting, we'd have to somehow find a previous resolution +from combinatorial explosion. + +Conflict ID calculation +~~~~~~~~~~~~~~~~~~~~~~~ + +Once the conflict normalization is done, the conflict ID is calculated +as the sha1 hash of the conflict hunks appended to each other, +separated by <NUL> characters. The conflict markers are stripped out +before the sha1 is calculated. So in the example above, where we +merge branch AC which changes line A to line C, into branch AB, which +changes line A to line C, the conflict ID would be +SHA1('B<NUL>C<NUL>'). + +If there are multiple conflicts in one file, the sha1 is calculated +the same way with all hunks appended to each other, in the order in +which they appear in the file, separated by a <NUL> character. + +Nested conflicts +~~~~~~~~~~~~~~~~ + +Nested conflicts are handled very similarly to "simple" conflicts. +Similar to simple conflicts, the conflict is first normalized by +stripping the labels from conflict markers, stripping the common ancestor +version, and the sorting the conflict hunks, both for the outer and the +inner conflict. This is done recursively, so any number of nested +conflicts can be handled. + +Note that this only works for conflict markers that "cleanly nest". If +there are any unmatched conflict markers, rerere will fail to handle +the conflict and record a conflict resolution. + +The only difference is in how the conflict ID is calculated. For the +inner conflict, the conflict markers themselves are not stripped out +before calculating the sha1. + +Say we have the following conflict for example: + + <<<<<<< HEAD + 1 + ======= + <<<<<<< HEAD + 3 + ======= + 2 + >>>>>>> branch-2 + >>>>>>> branch-3~ + +After stripping out the labels of the conflict markers, and sorting +the hunks, the conflict would look as follows: + + <<<<<<< + 1 + ======= + <<<<<<< + 2 + ======= + 3 + >>>>>>> + >>>>>>> + +and finally the conflict ID would be calculated as: +`sha1('1<NUL><<<<<<<\n3\n=======\n2\n>>>>>>><NUL>')` diff --git a/GIT-VERSION-GEN b/GIT-VERSION-GEN index 1b9ba0a6b1..5aafb91946 100755 --- a/GIT-VERSION-GEN +++ b/GIT-VERSION-GEN @@ -1,7 +1,7 @@ #!/bin/sh GVF=GIT-VERSION-FILE -DEF_VER=v2.19.2 +DEF_VER=v2.20.0-rc0 LF=' ' @@ -59,6 +59,13 @@ all:: # Define CURL_CONFIG to curl's configuration program that prints information # about the library (e.g., its version number). The default is 'curl-config'. # +# Define CURL_LDFLAGS to specify flags that you need to link when using libcurl, +# if you do not want to rely on the libraries provided by CURL_CONFIG. The +# default value is a result of `curl-config --libs`. An example value for +# CURL_LDFLAGS is as follows: +# +# CURL_LDFLAGS=-lcurl +# # Define NO_EXPAT if you do not have expat installed. git-http-push is # not built, and you cannot push using http:// and https:// transports (dumb). # @@ -183,10 +190,6 @@ all:: # # Define NEEDS_SSL_WITH_CRYPTO if you need -lssl when using -lcrypto (Darwin). # -# Define NEEDS_SSL_WITH_CURL if you need -lssl with -lcurl (Minix). -# -# Define NEEDS_IDN_WITH_CURL if you need -lidn when using -lcurl (Minix). -# # Define NEEDS_LIBICONV if linking with libc is not enough (Darwin). # # Define NEEDS_LIBINTL_BEFORE_LIBICONV if you need libintl before libiconv. @@ -207,10 +210,12 @@ all:: # Define MMAP_PREVENTS_DELETE if a file that is currently mmapped cannot be # deleted or cannot be replaced using rename(). # +# Define NO_POLL_H if you don't have poll.h. +# # Define NO_SYS_POLL_H if you don't have sys/poll.h. # # Define NO_POLL if you do not have or don't want to use poll(). -# This also implies NO_SYS_POLL_H. +# This also implies NO_POLL_H and NO_SYS_POLL_H. # # Define NEEDS_SYS_PARAM_H if you need to include sys/param.h to compile, # *PLEASE* REPORT to git@vger.kernel.org if your platform needs this; @@ -362,11 +367,6 @@ all:: # Define HAVE_DEV_TTY if your system can open /dev/tty to interact with the # user. # -# Define GETTEXT_POISON if you are debugging the choice of strings marked -# for translation. In a GETTEXT_POISON build, you can turn all strings marked -# for translation into gibberish by setting the GIT_GETTEXT_POISON variable -# (to any value) in your environment. -# # Define JSMIN to point to JavaScript minifier that functions as # a filter to have gitweb.js minified. # @@ -400,7 +400,7 @@ all:: # (defaults to "man") if you want to have a different default when # "git help" is called without a parameter specifying the format. # -# Define TEST_GIT_INDEX_VERSION to 2, 3 or 4 to run the test suite +# Define GIT_TEST_INDEX_VERSION to 2, 3 or 4 to run the test suite # with a different indexfile format version. If it isn't set the index # file format used is index-v[23]. # @@ -590,6 +590,8 @@ XDIFF_OBJS = VCSSVN_OBJS = GENERATED_H = EXTRA_CPPFLAGS = +FUZZ_OBJS = +FUZZ_PROGRAMS = LIB_OBJS = PROGRAM_OBJS = PROGRAMS = @@ -614,7 +616,7 @@ SCRIPT_SH += git-merge-one-file.sh SCRIPT_SH += git-merge-resolve.sh SCRIPT_SH += git-mergetool.sh SCRIPT_SH += git-quiltimport.sh -SCRIPT_SH += git-rebase.sh +SCRIPT_SH += git-legacy-rebase.sh SCRIPT_SH += git-remote-testgit.sh SCRIPT_SH += git-request-pull.sh SCRIPT_SH += git-stash.sh @@ -624,7 +626,7 @@ SCRIPT_SH += git-web--browse.sh SCRIPT_LIB += git-mergetool--lib SCRIPT_LIB += git-parse-remote SCRIPT_LIB += git-rebase--am -SCRIPT_LIB += git-rebase--interactive +SCRIPT_LIB += git-rebase--common SCRIPT_LIB += git-rebase--preserve-merges SCRIPT_LIB += git-rebase--merge SCRIPT_LIB += git-sh-setup @@ -682,6 +684,14 @@ SCRIPTS = $(SCRIPT_SH_INS) \ ETAGS_TARGET = TAGS +FUZZ_OBJS += fuzz-pack-headers.o +FUZZ_OBJS += fuzz-pack-idx.o + +# Always build fuzz objects even if not testing, to prevent bit-rot. +all:: $(FUZZ_OBJS) + +FUZZ_PROGRAMS += $(patsubst %.o,%,$(FUZZ_OBJS)) + # Empty... EXTRA_PROGRAMS = @@ -709,7 +719,9 @@ TEST_BUILTINS_OBJS += test-date.o TEST_BUILTINS_OBJS += test-delta.o TEST_BUILTINS_OBJS += test-drop-caches.o TEST_BUILTINS_OBJS += test-dump-cache-tree.o +TEST_BUILTINS_OBJS += test-dump-fsmonitor.o TEST_BUILTINS_OBJS += test-dump-split-index.o +TEST_BUILTINS_OBJS += test-dump-untracked-cache.o TEST_BUILTINS_OBJS += test-example-decorate.o TEST_BUILTINS_OBJS += test-genrandom.o TEST_BUILTINS_OBJS += test-hashmap.o @@ -720,33 +732,36 @@ TEST_BUILTINS_OBJS += test-match-trees.o TEST_BUILTINS_OBJS += test-mergesort.o TEST_BUILTINS_OBJS += test-mktemp.o TEST_BUILTINS_OBJS += test-online-cpus.o +TEST_BUILTINS_OBJS += test-parse-options.o TEST_BUILTINS_OBJS += test-path-utils.o +TEST_BUILTINS_OBJS += test-pkt-line.o TEST_BUILTINS_OBJS += test-prio-queue.o +TEST_BUILTINS_OBJS += test-reach.o TEST_BUILTINS_OBJS += test-read-cache.o +TEST_BUILTINS_OBJS += test-read-midx.o TEST_BUILTINS_OBJS += test-ref-store.o TEST_BUILTINS_OBJS += test-regex.o TEST_BUILTINS_OBJS += test-repository.o TEST_BUILTINS_OBJS += test-revision-walking.o TEST_BUILTINS_OBJS += test-run-command.o TEST_BUILTINS_OBJS += test-scrap-cache-tree.o -TEST_BUILTINS_OBJS += test-sha1-array.o TEST_BUILTINS_OBJS += test-sha1.o +TEST_BUILTINS_OBJS += test-sha1-array.o TEST_BUILTINS_OBJS += test-sigchain.o TEST_BUILTINS_OBJS += test-strcmp-offset.o TEST_BUILTINS_OBJS += test-string-list.o TEST_BUILTINS_OBJS += test-submodule-config.o +TEST_BUILTINS_OBJS += test-submodule-nested-repo-config.o TEST_BUILTINS_OBJS += test-subprocess.o TEST_BUILTINS_OBJS += test-urlmatch-normalization.o TEST_BUILTINS_OBJS += test-wildmatch.o TEST_BUILTINS_OBJS += test-windows-named-pipe.o TEST_BUILTINS_OBJS += test-write-cache.o -TEST_PROGRAMS_NEED_X += test-dump-fsmonitor -TEST_PROGRAMS_NEED_X += test-dump-untracked-cache +# Do not add more tests here unless they have extra dependencies. Add +# them in TEST_BUILTINS_OBJS above. TEST_PROGRAMS_NEED_X += test-fake-ssh TEST_PROGRAMS_NEED_X += test-line-buffer -TEST_PROGRAMS_NEED_X += test-parse-options -TEST_PROGRAMS_NEED_X += test-pkt-line TEST_PROGRAMS_NEED_X += test-svn-fe TEST_PROGRAMS_NEED_X += test-tool @@ -836,6 +851,7 @@ LIB_OBJS += column.o LIB_OBJS += combine-diff.o LIB_OBJS += commit.o LIB_OBJS += commit-graph.o +LIB_OBJS += commit-reach.o LIB_OBJS += compat/obstack.o LIB_OBJS += compat/terminal.o LIB_OBJS += config.o @@ -848,6 +864,7 @@ LIB_OBJS += csum-file.o LIB_OBJS += ctype.o LIB_OBJS += date.o LIB_OBJS += decorate.o +LIB_OBJS += delta-islands.o LIB_OBJS += diffcore-break.o LIB_OBJS += diffcore-delta.o LIB_OBJS += diffcore-order.o @@ -881,6 +898,7 @@ LIB_OBJS += linear-assignment.o LIB_OBJS += help.o LIB_OBJS += hex.o LIB_OBJS += ident.o +LIB_OBJS += interdiff.o LIB_OBJS += json-writer.o LIB_OBJS += kwset.o LIB_OBJS += levenshtein.o @@ -901,6 +919,7 @@ LIB_OBJS += merge.o LIB_OBJS += merge-blobs.o LIB_OBJS += merge-recursive.o LIB_OBJS += mergesort.o +LIB_OBJS += midx.o LIB_OBJS += name-hash.o LIB_OBJS += negotiator/default.o LIB_OBJS += negotiator/skipping.o @@ -936,6 +955,7 @@ LIB_OBJS += quote.o LIB_OBJS += range-diff.o LIB_OBJS += reachable.o LIB_OBJS += read-cache.o +LIB_OBJS += rebase-interactive.o LIB_OBJS += reflog-walk.o LIB_OBJS += refs.o LIB_OBJS += refs/files-backend.o @@ -973,6 +993,7 @@ LIB_OBJS += sub-process.o LIB_OBJS += symlinks.o LIB_OBJS += tag.o LIB_OBJS += tempfile.o +LIB_OBJS += thread-utils.o LIB_OBJS += tmp-objdir.o LIB_OBJS += trace.o LIB_OBJS += trailer.o @@ -1061,6 +1082,7 @@ BUILTIN_OBJS += builtin/merge-recursive.o BUILTIN_OBJS += builtin/merge-tree.o BUILTIN_OBJS += builtin/mktag.o BUILTIN_OBJS += builtin/mktree.o +BUILTIN_OBJS += builtin/multi-pack-index.o BUILTIN_OBJS += builtin/mv.o BUILTIN_OBJS += builtin/name-rev.o BUILTIN_OBJS += builtin/notes.o @@ -1074,7 +1096,8 @@ BUILTIN_OBJS += builtin/pull.o BUILTIN_OBJS += builtin/push.o BUILTIN_OBJS += builtin/range-diff.o BUILTIN_OBJS += builtin/read-tree.o -BUILTIN_OBJS += builtin/rebase--helper.o +BUILTIN_OBJS += builtin/rebase.o +BUILTIN_OBJS += builtin/rebase--interactive.o BUILTIN_OBJS += builtin/receive-pack.o BUILTIN_OBJS += builtin/reflog.o BUILTIN_OBJS += builtin/remote.o @@ -1286,20 +1309,17 @@ else ifdef CURLDIR # Try "-Wl,-rpath=$(CURLDIR)/$(lib)" in such a case. BASIC_CFLAGS += -I$(CURLDIR)/include - CURL_LIBCURL = -L$(CURLDIR)/$(lib) $(CC_LD_DYNPATH)$(CURLDIR)/$(lib) -lcurl + CURL_LIBCURL = -L$(CURLDIR)/$(lib) $(CC_LD_DYNPATH)$(CURLDIR)/$(lib) else - CURL_LIBCURL = -lcurl - endif - ifdef NEEDS_SSL_WITH_CURL - CURL_LIBCURL += -lssl - ifdef NEEDS_CRYPTO_WITH_SSL - CURL_LIBCURL += -lcrypto - endif - endif - ifdef NEEDS_IDN_WITH_CURL - CURL_LIBCURL += -lidn + CURL_LIBCURL = endif +ifdef CURL_LDFLAGS + CURL_LIBCURL += $(CURL_LDFLAGS) +else + CURL_LIBCURL += $(shell $(CURL_CONFIG) --libs) +endif + REMOTE_CURL_PRIMARY = git-remote-http$X REMOTE_CURL_ALIASES = git-remote-https$X git-remote-ftp$X git-remote-ftps$X REMOTE_CURL_NAMES = $(REMOTE_CURL_PRIMARY) $(REMOTE_CURL_ALIASES) @@ -1431,13 +1451,14 @@ ifdef NO_SYMLINK_HEAD BASIC_CFLAGS += -DNO_SYMLINK_HEAD endif ifdef GETTEXT_POISON - BASIC_CFLAGS += -DGETTEXT_POISON +$(warning The GETTEXT_POISON option has been removed in favor of runtime GIT_TEST_GETTEXT_POISON. See t/README!) endif ifdef NO_GETTEXT BASIC_CFLAGS += -DNO_GETTEXT USE_GETTEXT_SCHEME ?= fallthrough endif ifdef NO_POLL + NO_POLL_H = YesPlease NO_SYS_POLL_H = YesPlease COMPAT_CFLAGS += -DNO_POLL -Icompat/poll COMPAT_OBJS += compat/poll/poll.o @@ -1476,6 +1497,9 @@ endif ifdef NO_SYS_SELECT_H BASIC_CFLAGS += -DNO_SYS_SELECT_H endif +ifdef NO_POLL_H + BASIC_CFLAGS += -DNO_POLL_H +endif ifdef NO_SYS_POLL_H BASIC_CFLAGS += -DNO_SYS_POLL_H endif @@ -1655,7 +1679,6 @@ ifdef NO_PTHREADS else BASIC_CFLAGS += $(PTHREAD_CFLAGS) EXTLIBS += $(PTHREAD_LIBS) - LIB_OBJS += thread-utils.o endif ifdef HAVE_PATHS_H @@ -1787,6 +1810,7 @@ ifndef V QUIET_MSGFMT = @echo ' ' MSGFMT $@; QUIET_GCOV = @echo ' ' GCOV $@; QUIET_SP = @echo ' ' SP $<; + QUIET_HDR = @echo ' ' HDR $<; QUIET_RC = @echo ' ' RC $@; QUIET_SUBDIR0 = +@subdir= QUIET_SUBDIR1 = ;$(NO_SUBDIR) echo ' ' SUBDIR $$subdir; \ @@ -2048,7 +2072,7 @@ $(BUILT_INS): git$X command-list.h: generate-cmdlist.sh command-list.txt -command-list.h: $(wildcard Documentation/git*.txt) Documentation/*config.txt +command-list.h: $(wildcard Documentation/git*.txt) Documentation/*config.txt Documentation/config/*.txt $(QUIET_GEN)$(SHELL_PATH) ./generate-cmdlist.sh command-list.txt >$@+ && mv $@+ $@ SCRIPT_DEFINES = $(SHELL_PATH_SQ):$(DIFF_SQ):$(GIT_VERSION):\ @@ -2088,7 +2112,7 @@ $(SCRIPT_LIB) : % : %.sh GIT-SCRIPT-DEFINES $(QUIET_GEN)$(cmd_munge_script) && \ mv $@+ $@ -git.res: git.rc GIT-VERSION-FILE +git.res: git.rc GIT-VERSION-FILE GIT-PREFIX $(QUIET_RC)$(RC) \ $(join -DMAJOR= -DMINOR= -DMICRO= -DPATCHLEVEL=, $(wordlist 1, 4, \ $(shell echo $(GIT_VERSION) 0 0 0 0 | tr '.a-zA-Z-' ' '))) \ @@ -2243,6 +2267,7 @@ TEST_OBJS := $(patsubst %$X,%.o,$(TEST_PROGRAMS)) $(patsubst %,t/helper/%,$(TEST OBJECTS := $(LIB_OBJS) $(BUILTIN_OBJS) $(PROGRAM_OBJS) $(TEST_OBJS) \ $(XDIFF_OBJS) \ $(VCSSVN_OBJS) \ + $(FUZZ_OBJS) \ common-main.o \ git.o ifndef NO_CURL @@ -2413,7 +2438,6 @@ XGETTEXT_FLAGS_PERL = $(XGETTEXT_FLAGS) --language=Perl \ LOCALIZED_C = $(C_OBJ:o=c) $(LIB_H) $(GENERATED_H) LOCALIZED_SH = $(SCRIPT_SH) LOCALIZED_SH += git-parse-remote.sh -LOCALIZED_SH += git-rebase--interactive.sh LOCALIZED_SH += git-rebase--preserve-merges.sh LOCALIZED_SH += git-sh-setup.sh LOCALIZED_PERL = $(SCRIPT_PERL) @@ -2568,6 +2592,7 @@ GIT-BUILD-OPTIONS: FORCE @echo NO_UNIX_SOCKETS=\''$(subst ','\'',$(subst ','\'',$(NO_UNIX_SOCKETS)))'\' >>$@+ @echo PAGER_ENV=\''$(subst ','\'',$(subst ','\'',$(PAGER_ENV)))'\' >>$@+ @echo DC_SHA1=\''$(subst ','\'',$(subst ','\'',$(DC_SHA1)))'\' >>$@+ + @echo X=\'$(X)\' >>$@+ ifdef TEST_OUTPUT_DIRECTORY @echo TEST_OUTPUT_DIRECTORY=\''$(subst ','\'',$(subst ','\'',$(TEST_OUTPUT_DIRECTORY)))'\' >>$@+ endif @@ -2581,7 +2606,6 @@ ifdef GIT_TEST_CMP_USE_COPIED_CONTEXT @echo GIT_TEST_CMP_USE_COPIED_CONTEXT=YesPlease >>$@+ endif @echo NO_GETTEXT=\''$(subst ','\'',$(subst ','\'',$(NO_GETTEXT)))'\' >>$@+ - @echo GETTEXT_POISON=\''$(subst ','\'',$(subst ','\'',$(GETTEXT_POISON)))'\' >>$@+ ifdef GIT_PERF_REPEAT_COUNT @echo GIT_PERF_REPEAT_COUNT=\''$(subst ','\'',$(subst ','\'',$(GIT_PERF_REPEAT_COUNT)))'\' >>$@+ endif @@ -2600,8 +2624,8 @@ endif ifdef GIT_INTEROP_MAKE_OPTS @echo GIT_INTEROP_MAKE_OPTS=\''$(subst ','\'',$(subst ','\'',$(GIT_INTEROP_MAKE_OPTS)))'\' >>$@+ endif -ifdef TEST_GIT_INDEX_VERSION - @echo TEST_GIT_INDEX_VERSION=\''$(subst ','\'',$(subst ','\'',$(TEST_GIT_INDEX_VERSION)))'\' >>$@+ +ifdef GIT_TEST_INDEX_VERSION + @echo GIT_TEST_INDEX_VERSION=\''$(subst ','\'',$(subst ','\'',$(GIT_TEST_INDEX_VERSION)))'\' >>$@+ endif @if cmp $@+ $@ >/dev/null 2>&1; then $(RM) $@+; else mv $@+ $@; fi @@ -2669,6 +2693,17 @@ $(SP_OBJ): %.sp: %.c GIT-CFLAGS FORCE .PHONY: sparse $(SP_OBJ) sparse: $(SP_OBJ) +GEN_HDRS := command-list.h unicode-width.h +EXCEPT_HDRS := $(GEN_HDRS) compat% xdiff% +CHK_HDRS = $(filter-out $(EXCEPT_HDRS),$(patsubst ./%,%,$(LIB_H))) +HCO = $(patsubst %.h,%.hco,$(CHK_HDRS)) + +$(HCO): %.hco: %.h FORCE + $(QUIET_HDR)$(CC) -include git-compat-util.h -I. -o /dev/null -c -xc $< + +.PHONY: hdr-check $(HCO) +hdr-check: $(HCO) + .PHONY: style style: git clang-format --style file --diff --extensions c,h @@ -2707,9 +2742,12 @@ endif then \ echo ' ' SPATCH result: $@; \ fi -coccicheck: $(addsuffix .patch,$(wildcard contrib/coccinelle/*.cocci)) +coccicheck: $(addsuffix .patch,$(filter-out %.pending.cocci,$(wildcard contrib/coccinelle/*.cocci))) -.PHONY: coccicheck +# See contrib/coccinelle/README +coccicheck-pending: $(addsuffix .patch,$(wildcard contrib/coccinelle/*.pending.cocci)) + +.PHONY: coccicheck coccicheck-pending ### Installation rules @@ -2930,6 +2968,7 @@ clean: profile-clean coverage-clean cocciclean $(RM) $(LIB_FILE) $(XDIFF_LIB) $(VCSSVN_LIB) $(RM) $(ALL_PROGRAMS) $(SCRIPT_LIB) $(BUILT_INS) git$X $(RM) $(TEST_PROGRAMS) $(NO_INSTALL) + $(RM) $(FUZZ_PROGRAMS) $(RM) -r bin-wrappers $(dep_dirs) $(RM) -r po/build/ $(RM) *.pyc *.pyo */*.pyc */*.pyo command-list.h $(ETAGS_TARGET) tags cscope* @@ -3054,3 +3093,26 @@ cover_db: coverage-report cover_db_html: cover_db cover -report html -outputdir cover_db_html cover_db + +### Fuzz testing +# +# Building fuzz targets generally requires a special set of compiler flags that +# are not necessarily appropriate for general builds, and that vary greatly +# depending on the compiler version used. +# +# An example command to build against libFuzzer from LLVM 4.0.0: +# +# make CC=clang CXX=clang++ \ +# FUZZ_CXXFLAGS="-fsanitize-coverage=trace-pc-guard -fsanitize=address" \ +# LIB_FUZZING_ENGINE=/usr/lib/llvm-4.0/lib/libFuzzer.a \ +# fuzz-all +# +FUZZ_CXXFLAGS ?= $(CFLAGS) + +.PHONY: fuzz-all + +$(FUZZ_PROGRAMS): all + $(QUIET_LINK)$(CXX) $(FUZZ_CXXFLAGS) $(LIB_OBJS) $(BUILTIN_OBJS) \ + $(XDIFF_OBJS) $(EXTLIBS) git.o $@.o $(LIB_FUZZING_ENGINE) -o $@ + +fuzz-all: $(FUZZ_PROGRAMS) @@ -1 +1 @@ -Documentation/RelNotes/2.19.2.txt
\ No newline at end of file +Documentation/RelNotes/2.20.0.txt
\ No newline at end of file @@ -12,6 +12,7 @@ int advice_push_needs_force = 1; int advice_status_hints = 1; int advice_status_u_option = 1; int advice_commit_before_merge = 1; +int advice_reset_quiet_warning = 1; int advice_resolve_conflict = 1; int advice_implicit_identity = 1; int advice_detached_head = 1; @@ -65,6 +66,7 @@ static struct { { "statusHints", &advice_status_hints }, { "statusUoption", &advice_status_u_option }, { "commitBeforeMerge", &advice_commit_before_merge }, + { "resetQuiet", &advice_reset_quiet_warning }, { "resolveConflict", &advice_resolve_conflict }, { "implicitIdentity", &advice_implicit_identity }, { "detachedHead", &advice_detached_head }, @@ -12,6 +12,7 @@ extern int advice_push_needs_force; extern int advice_status_hints; extern int advice_status_u_option; extern int advice_commit_before_merge; +extern int advice_reset_quiet_warning; extern int advice_resolve_conflict; extern int advice_implicit_identity; extern int advice_detached_head; @@ -1,5 +1,5 @@ -#ifndef __ALIAS_H__ -#define __ALIAS_H__ +#ifndef ALIAS_H +#define ALIAS_H struct string_list; @@ -223,8 +223,8 @@ struct patch { struct fragment *fragments; char *result; size_t resultsize; - char old_sha1_prefix[41]; - char new_sha1_prefix[41]; + char old_oid_prefix[GIT_MAX_HEXSZ + 1]; + char new_oid_prefix[GIT_MAX_HEXSZ + 1]; struct patch *next; /* three-way fallback result */ @@ -1093,13 +1093,14 @@ static int gitdiff_index(struct apply_state *state, */ const char *ptr, *eol; int len; + const unsigned hexsz = the_hash_algo->hexsz; ptr = strchr(line, '.'); - if (!ptr || ptr[1] != '.' || 40 < ptr - line) + if (!ptr || ptr[1] != '.' || hexsz < ptr - line) return 0; len = ptr - line; - memcpy(patch->old_sha1_prefix, line, len); - patch->old_sha1_prefix[len] = 0; + memcpy(patch->old_oid_prefix, line, len); + patch->old_oid_prefix[len] = 0; line = ptr + 2; ptr = strchr(line, ' '); @@ -1109,10 +1110,10 @@ static int gitdiff_index(struct apply_state *state, ptr = eol; len = ptr - line; - if (40 < len) + if (hexsz < len) return 0; - memcpy(patch->new_sha1_prefix, line, len); - patch->new_sha1_prefix[len] = 0; + memcpy(patch->new_oid_prefix, line, len); + patch->new_oid_prefix[len] = 0; if (*ptr == ' ') return gitdiff_oldmode(state, ptr + 1, patch); return 0; @@ -1747,7 +1748,7 @@ static int parse_fragment(struct apply_state *state, } if (oldlines || newlines) return -1; - if (!deleted && !added) + if (!patch->recount && !deleted && !added) return -1; fragment->leading = leading; @@ -2131,10 +2132,12 @@ static int parse_chunk(struct apply_state *state, char *buffer, unsigned long si if (!use_patch(state, patch)) patch->ws_rule = 0; + else if (patch->new_name) + patch->ws_rule = whitespace_rule(state->repo->index, + patch->new_name); else - patch->ws_rule = whitespace_rule(patch->new_name - ? patch->new_name - : patch->old_name); + patch->ws_rule = whitespace_rule(state->repo->index, + patch->old_name); patchsize = parse_single_patch(state, buffer + offset + hdrsize, @@ -2204,7 +2207,7 @@ static void reverse_patches(struct patch *p) SWAP(p->new_mode, p->old_mode); SWAP(p->is_new, p->is_delete); SWAP(p->lines_added, p->lines_deleted); - SWAP(p->old_sha1_prefix, p->new_sha1_prefix); + SWAP(p->old_oid_prefix, p->new_oid_prefix); for (; frag; frag = frag->next) { SWAP(frag->newpos, frag->oldpos); @@ -3142,15 +3145,16 @@ static int apply_binary(struct apply_state *state, { const char *name = patch->old_name ? patch->old_name : patch->new_name; struct object_id oid; + const unsigned hexsz = the_hash_algo->hexsz; /* * For safety, we require patch index line to contain - * full 40-byte textual SHA1 for old and new, at least for now. + * full hex textual object ID for old and new, at least for now. */ - if (strlen(patch->old_sha1_prefix) != 40 || - strlen(patch->new_sha1_prefix) != 40 || - get_oid_hex(patch->old_sha1_prefix, &oid) || - get_oid_hex(patch->new_sha1_prefix, &oid)) + if (strlen(patch->old_oid_prefix) != hexsz || + strlen(patch->new_oid_prefix) != hexsz || + get_oid_hex(patch->old_oid_prefix, &oid) || + get_oid_hex(patch->new_oid_prefix, &oid)) return error(_("cannot apply binary patch to '%s' " "without full index line"), name); @@ -3160,7 +3164,7 @@ static int apply_binary(struct apply_state *state, * applies to. */ hash_object_file(img->buf, img->len, blob_type, &oid); - if (strcmp(oid_to_hex(&oid), patch->old_sha1_prefix)) + if (strcmp(oid_to_hex(&oid), patch->old_oid_prefix)) return error(_("the patch applies to '%s' (%s), " "which does not match the " "current contents."), @@ -3173,7 +3177,7 @@ static int apply_binary(struct apply_state *state, "'%s' but it is not empty"), name); } - get_oid_hex(patch->new_sha1_prefix, &oid); + get_oid_hex(patch->new_oid_prefix, &oid); if (is_null_oid(&oid)) { clear_image(img); return 0; /* deletion patch */ @@ -3189,7 +3193,7 @@ static int apply_binary(struct apply_state *state, if (!result) return error(_("the necessary postimage %s for " "'%s' cannot be read"), - patch->new_sha1_prefix, name); + patch->new_oid_prefix, name); clear_image(img); img->buf = result; img->len = size; @@ -3205,9 +3209,9 @@ static int apply_binary(struct apply_state *state, /* verify that the result matches */ hash_object_file(img->buf, img->len, blob_type, &oid); - if (strcmp(oid_to_hex(&oid), patch->new_sha1_prefix)) + if (strcmp(oid_to_hex(&oid), patch->new_oid_prefix)) return error(_("binary patch to '%s' creates incorrect result (expecting %s, got %s)"), - name, patch->new_sha1_prefix, oid_to_hex(&oid)); + name, patch->new_oid_prefix, oid_to_hex(&oid)); } return 0; @@ -3467,7 +3471,8 @@ static int load_preimage(struct apply_state *state, return 0; } -static int three_way_merge(struct image *image, +static int three_way_merge(struct apply_state *state, + struct image *image, char *path, const struct object_id *base, const struct object_id *ours, @@ -3483,7 +3488,9 @@ static int three_way_merge(struct image *image, status = ll_merge(&result, path, &base_file, "base", &our_file, "ours", - &their_file, "theirs", NULL); + &their_file, "theirs", + state->repo->index, + NULL); free(base_file.ptr); free(our_file.ptr); free(their_file.ptr); @@ -3563,7 +3570,7 @@ static int try_threeway(struct apply_state *state, /* Preimage the patch was prepared for */ if (patch->is_new) write_object_file("", 0, blob_type, &pre_oid); - else if (get_oid(patch->old_sha1_prefix, &pre_oid) || + else if (get_oid(patch->old_oid_prefix, &pre_oid) || read_blob_object(&buf, &pre_oid, patch->old_mode)) return error(_("repository lacks the necessary blob to fall back on 3-way merge.")); @@ -3595,7 +3602,7 @@ static int try_threeway(struct apply_state *state, clear_image(&tmp_image); /* in-core three-way merge between post and our using pre as base */ - status = three_way_merge(image, patch->new_name, + status = three_way_merge(state, image, patch->new_name, &pre_oid, &our_oid, &post_oid); if (status < 0) { if (state->apply_verbosity > verbosity_silent) @@ -4055,13 +4062,13 @@ static int preimage_oid_in_gitlink_patch(struct patch *p, struct object_id *oid) starts_with(++preimage, heading) && /* does it record full SHA-1? */ !get_oid_hex(preimage + sizeof(heading) - 1, oid) && - preimage[sizeof(heading) + GIT_SHA1_HEXSZ - 1] == '\n' && + preimage[sizeof(heading) + the_hash_algo->hexsz - 1] == '\n' && /* does the abbreviated name on the index line agree with it? */ - starts_with(preimage + sizeof(heading) - 1, p->old_sha1_prefix)) + starts_with(preimage + sizeof(heading) - 1, p->old_oid_prefix)) return 0; /* it all looks fine */ /* we may have full object name on the index line */ - return get_oid_hex(p->old_sha1_prefix, oid); + return get_oid_hex(p->old_oid_prefix, oid); } /* Build an index that contains just the files needed for a 3way merge */ @@ -4090,7 +4097,7 @@ static int build_fake_ancestor(struct apply_state *state, struct patch *list) else return error(_("sha1 information is lacking or " "useless for submodule %s"), name); - } else if (!get_oid_blob(patch->old_sha1_prefix, &oid)) { + } else if (!get_oid_blob(patch->old_oid_prefix, &oid)) { ; /* ok */ } else if (!patch->lines_added && !patch->lines_deleted) { /* mode-only change: update the current */ @@ -4627,7 +4634,7 @@ static int write_out_results(struct apply_state *state, struct patch *list) } string_list_clear(&cpath, 0); - rerere(0); + repo_rerere(state->repo, 0); } return errs; @@ -4765,6 +4772,9 @@ static int apply_option_parse_exclude(const struct option *opt, const char *arg, int unset) { struct apply_state *state = opt->value; + + BUG_ON_OPT_NEG(unset); + add_name_limit(state, arg, 1); return 0; } @@ -4773,6 +4783,9 @@ static int apply_option_parse_include(const struct option *opt, const char *arg, int unset) { struct apply_state *state = opt->value; + + BUG_ON_OPT_NEG(unset); + add_name_limit(state, arg, 0); state->has_include = 1; return 0; @@ -4783,6 +4796,9 @@ static int apply_option_parse_p(const struct option *opt, int unset) { struct apply_state *state = opt->value; + + BUG_ON_OPT_NEG(unset); + state->p_value = atoi(arg); state->p_value_known = 1; return 0; @@ -4792,6 +4808,9 @@ static int apply_option_parse_space_change(const struct option *opt, const char *arg, int unset) { struct apply_state *state = opt->value; + + BUG_ON_OPT_ARG(arg); + if (unset) state->ws_ignore_action = ignore_ws_none; else @@ -4803,9 +4822,12 @@ static int apply_option_parse_whitespace(const struct option *opt, const char *arg, int unset) { struct apply_state *state = opt->value; + + BUG_ON_OPT_NEG(unset); + state->whitespace_option = arg; if (parse_whitespace_option(state, arg)) - exit(1); + return -1; return 0; } @@ -4813,6 +4835,9 @@ static int apply_option_parse_directory(const struct option *opt, const char *arg, int unset) { struct apply_state *state = opt->value; + + BUG_ON_OPT_NEG(unset); + strbuf_reset(&state->root); strbuf_addstr(&state->root, arg); strbuf_complete(&state->root, '/'); @@ -4932,10 +4957,10 @@ int apply_parse_options(int argc, const char **argv, struct option builtin_apply_options[] = { { OPTION_CALLBACK, 0, "exclude", state, N_("path"), N_("don't apply changes matching the given path"), - 0, apply_option_parse_exclude }, + PARSE_OPT_NONEG, apply_option_parse_exclude }, { OPTION_CALLBACK, 0, "include", state, N_("path"), N_("apply changes matching the given path"), - 0, apply_option_parse_include }, + PARSE_OPT_NONEG, apply_option_parse_include }, { OPTION_CALLBACK, 'p', NULL, state, N_("num"), N_("remove <num> leading slashes from traditional diff paths"), 0, apply_option_parse_p }, diff --git a/archive-tar.c b/archive-tar.c index 7a535cba24..a58e1a8ebf 100644 --- a/archive-tar.c +++ b/archive-tar.c @@ -202,7 +202,7 @@ static void prepare_header(struct archiver_args *args, unsigned int mode, unsigned long size) { xsnprintf(header->mode, sizeof(header->mode), "%07o", mode & 07777); - xsnprintf(header->size, sizeof(header->size), "%011lo", S_ISREG(mode) ? size : 0); + xsnprintf(header->size, sizeof(header->size), "%011"PRIoMAX , S_ISREG(mode) ? (uintmax_t)size : (uintmax_t)0); xsnprintf(header->mtime, sizeof(header->mtime), "%011lo", (unsigned long) args->time); xsnprintf(header->uid, sizeof(header->uid), "%07o", 0); diff --git a/archive-zip.c b/archive-zip.c index 5a62351ab1..155ee4a779 100644 --- a/archive-zip.c +++ b/archive-zip.c @@ -264,9 +264,10 @@ static int has_only_ascii(const char *s) } } -static int entry_is_binary(const char *path, const void *buffer, size_t size) +static int entry_is_binary(struct index_state *istate, const char *path, + const void *buffer, size_t size) { - struct userdiff_driver *driver = userdiff_find_by_path(path); + struct userdiff_driver *driver = userdiff_find_by_path(istate, path); if (!driver) driver = userdiff_find_by_name("default"); if (driver->binary != -1) @@ -352,7 +353,8 @@ static int write_zip_entry(struct archiver_args *args, return error(_("cannot read %s"), oid_to_hex(oid)); crc = crc32(crc, buffer, size); - is_binary = entry_is_binary(path_without_prefix, + is_binary = entry_is_binary(args->repo->index, + path_without_prefix, buffer, size); out = buffer; } @@ -428,7 +430,8 @@ static int write_zip_entry(struct archiver_args *args, break; crc = crc32(crc, buf, readlen); if (is_binary == -1) - is_binary = entry_is_binary(path_without_prefix, + is_binary = entry_is_binary(args->repo->index, + path_without_prefix, buf, readlen); write_or_die(1, buf, readlen); } @@ -460,7 +463,8 @@ static int write_zip_entry(struct archiver_args *args, break; crc = crc32(crc, buf, readlen); if (is_binary == -1) - is_binary = entry_is_binary(path_without_prefix, + is_binary = entry_is_binary(args->repo->index, + path_without_prefix, buf, readlen); zstream.next_in = buf; @@ -29,6 +29,12 @@ void register_archiver(struct archiver *ar) archivers[nr_archivers++] = ar; } +void init_archivers(void) +{ + init_tar_archiver(); + init_zip_archiver(); +} + static void format_subst(const struct commit *commit, const char *src, size_t len, struct strbuf *buf) @@ -392,7 +398,7 @@ static void parse_treeish_arg(const char **argv, if (get_oid(name, &oid)) die("Not a valid object name"); - commit = lookup_commit_reference_gently(the_repository, &oid, 1); + commit = lookup_commit_reference_gently(ar_args->repo, &oid, 1); if (commit) { commit_sha1 = commit->object.oid.hash; archive_time = commit->date; @@ -531,9 +537,6 @@ int write_archive(int argc, const char **argv, const char *prefix, git_config_get_bool("uploadarchive.allowunreachable", &remote_allow_unreachable); git_config(git_default_config, NULL); - init_tar_archiver(); - init_zip_archiver(); - args.repo = repo; argc = parse_archive_args(argc, argv, &ar, &args, name_hint, remote); if (!startup_info->have_repository) { @@ -43,6 +43,7 @@ extern void register_archiver(struct archiver *); extern void init_tar_archiver(void); extern void init_zip_archiver(void); +extern void init_archivers(void); typedef int (*write_archive_entry_fn_t)(struct archiver_args *args, const struct object_id *oid, @@ -41,23 +41,17 @@ const char *git_attr_name(const struct git_attr *attr) struct attr_hashmap { struct hashmap map; -#ifndef NO_PTHREADS pthread_mutex_t mutex; -#endif }; static inline void hashmap_lock(struct attr_hashmap *map) { -#ifndef NO_PTHREADS pthread_mutex_lock(&map->mutex); -#endif } static inline void hashmap_unlock(struct attr_hashmap *map) { -#ifndef NO_PTHREADS pthread_mutex_unlock(&map->mutex); -#endif } /* @@ -498,23 +492,17 @@ static struct check_vector { size_t nr; size_t alloc; struct attr_check **checks; -#ifndef NO_PTHREADS pthread_mutex_t mutex; -#endif } check_vector; static inline void vector_lock(void) { -#ifndef NO_PTHREADS pthread_mutex_lock(&check_vector.mutex); -#endif } static inline void vector_unlock(void) { -#ifndef NO_PTHREADS pthread_mutex_unlock(&check_vector.mutex); -#endif } static void check_vector_add(struct attr_check *c) @@ -1181,8 +1169,6 @@ void git_all_attrs(const struct index_state *istate, void attr_start(void) { -#ifndef NO_PTHREADS pthread_mutex_init(&g_attr_hashmap.mutex, NULL); pthread_mutex_init(&check_vector.mutex, NULL); -#endif } @@ -13,6 +13,8 @@ #include "sha1-array.h" #include "argv-array.h" #include "commit-slab.h" +#include "commit-reach.h" +#include "object-store.h" static struct oid_array good_revs; static struct oid_array skipped_revs; @@ -120,14 +122,14 @@ static inline int halfway(struct commit_list *p, int nr) } } -#if !DEBUG_BISECT -#define show_list(a,b,c,d) do { ; } while (0) -#else static void show_list(const char *debug, int counted, int nr, struct commit_list *list) { struct commit_list *p; + if (!DEBUG_BISECT) + return; + fprintf(stderr, "%s (%d/%d)\n", debug, counted, nr); for (p = list; p; p = p->next) { @@ -145,7 +147,7 @@ static void show_list(const char *debug, int counted, int nr, (flags & TREESAME) ? ' ' : 'T', (flags & UNINTERESTING) ? 'U' : ' ', (flags & COUNTED) ? 'C' : ' '); - if (commit->util) + if (*commit_weight_at(&commit_weight, p->item)) fprintf(stderr, "%3d", weight(p)); else fprintf(stderr, "---"); @@ -160,7 +162,6 @@ static void show_list(const char *debug, int counted, int nr, fprintf(stderr, "\n"); } } -#endif /* DEBUG_BISECT */ static struct commit_list *best_bisection(struct commit_list *list, int nr) { @@ -595,7 +596,7 @@ static struct commit_list *skip_away(struct commit_list *list, int count) for (i = 0; cur; cur = cur->next, i++) { if (i == index) { - if (oidcmp(&cur->item->object.oid, current_bad_oid)) + if (!oideq(&cur->item->object.oid, current_bad_oid)) return cur; if (previous) return previous; @@ -632,7 +633,7 @@ static void bisect_rev_setup(struct rev_info *revs, const char *prefix, struct argv_array rev_argv = ARGV_ARRAY_INIT; int i; - init_revisions(revs, prefix); + repo_init_revisions(the_repository, revs, prefix); revs->abbrev = 0; revs->commit_format = CMIT_FMT_UNSPECIFIED; @@ -807,7 +808,7 @@ static void check_merge_bases(int rev_nr, struct commit **rev, int no_checkout) for (; result; result = result->next) { const struct object_id *mb = &result->item->object.oid; - if (!oidcmp(mb, current_bad_oid)) { + if (oideq(mb, current_bad_oid)) { handle_bad_merge_base(); } else if (0 <= oid_array_lookup(&good_revs, mb)) { continue; @@ -889,7 +890,7 @@ static void show_diff_tree(const char *prefix, struct commit *commit) struct rev_info opt; /* diff-tree init */ - init_revisions(&opt, prefix); + repo_init_revisions(the_repository, &opt, prefix); git_config(git_diff_basic_config, NULL); /* no "diff" UI options */ opt.abbrev = 0; opt.diff = 1; @@ -988,7 +989,7 @@ int bisect_next_all(const char *prefix, int no_checkout) bisect_rev = &revs.commits->item->object.oid; - if (!oidcmp(bisect_rev, current_bad_oid)) { + if (oideq(bisect_rev, current_bad_oid)) { exit_if_skipped_commits(tried, current_bad_oid); printf("%s is the first %s commit\n", oid_to_hex(bisect_rev), term_bad); @@ -90,7 +90,7 @@ static struct blame_origin *get_origin(struct commit *commit, const char *path) -static void verify_working_tree_path(struct repository *repo, +static void verify_working_tree_path(struct repository *r, struct commit *work_tree, const char *path) { struct commit_list *parents; @@ -102,15 +102,15 @@ static void verify_working_tree_path(struct repository *repo, unsigned mode; if (!get_tree_entry(commit_oid, path, &blob_oid, &mode) && - oid_object_info(repo, &blob_oid, NULL) == OBJ_BLOB) + oid_object_info(r, &blob_oid, NULL) == OBJ_BLOB) return; } - pos = index_name_pos(repo->index, path, strlen(path)); + pos = index_name_pos(r->index, path, strlen(path)); if (pos >= 0) ; /* path is in the index */ - else if (-1 - pos < repo->index->cache_nr && - !strcmp(repo->index->cache[-1 - pos]->name, path)) + else if (-1 - pos < r->index->cache_nr && + !strcmp(r->index->cache[-1 - pos]->name, path)) ; /* path is in the index, unmerged */ else die("no such path '%s' in HEAD", path); @@ -166,7 +166,7 @@ static void set_commit_buffer_from_strbuf(struct commit *c, struct strbuf *sb) * Prepare a dummy commit that represents the work tree (or staged) item. * Note that annotating work tree item never works in the reverse. */ -static struct commit *fake_working_tree_commit(struct repository *repo, +static struct commit *fake_working_tree_commit(struct repository *r, struct diff_options *opt, const char *path, const char *contents_from) @@ -183,7 +183,7 @@ static struct commit *fake_working_tree_commit(struct repository *repo, unsigned mode; struct strbuf msg = STRBUF_INIT; - read_index(repo->index); + read_index(r->index); time(&now); commit = alloc_commit_node(the_repository); commit->object.parsed = 1; @@ -195,7 +195,7 @@ static struct commit *fake_working_tree_commit(struct repository *repo, parent_tail = append_parent(parent_tail, &head_oid); append_merge_parents(parent_tail); - verify_working_tree_path(repo, commit, path); + verify_working_tree_path(r, commit, path); origin = make_origin(commit, path); @@ -234,7 +234,7 @@ static struct commit *fake_working_tree_commit(struct repository *repo, switch (st.st_mode & S_IFMT) { case S_IFREG: if (opt->flags.allow_textconv && - textconv_object(read_from, mode, &null_oid, 0, &buf_ptr, &buf_len)) + textconv_object(r, read_from, mode, &null_oid, 0, &buf_ptr, &buf_len)) strbuf_attach(&buf, buf_ptr, buf_len, buf_len + 1); else if (strbuf_read_file(&buf, read_from, st.st_size) != st.st_size) die_errno("cannot open or read '%s'", read_from); @@ -253,7 +253,7 @@ static struct commit *fake_working_tree_commit(struct repository *repo, if (strbuf_read(&buf, 0, 0) < 0) die_errno("failed to read from stdin"); } - convert_to_git(repo->index, path, buf.buf, buf.len, &buf, 0); + convert_to_git(r->index, path, buf.buf, buf.len, &buf, 0); origin->file.ptr = buf.buf; origin->file.size = buf.len; pretend_object_file(buf.buf, buf.len, OBJ_BLOB, &origin->blob_oid); @@ -264,28 +264,28 @@ static struct commit *fake_working_tree_commit(struct repository *repo, * bits; we are not going to write this index out -- we just * want to run "diff-index --cached". */ - discard_index(repo->index); - read_index(repo->index); + discard_index(r->index); + read_index(r->index); len = strlen(path); if (!mode) { - int pos = index_name_pos(repo->index, path, len); + int pos = index_name_pos(r->index, path, len); if (0 <= pos) - mode = repo->index->cache[pos]->ce_mode; + mode = r->index->cache[pos]->ce_mode; else /* Let's not bother reading from HEAD tree */ mode = S_IFREG | 0644; } - ce = make_empty_cache_entry(repo->index, len); + ce = make_empty_cache_entry(r->index, len); oidcpy(&ce->oid, &origin->blob_oid); memcpy(ce->name, path, len); ce->ce_flags = create_ce_flags(0); ce->ce_namelen = len; ce->ce_mode = create_ce_mode(mode); - add_index_entry(repo->index, ce, + add_index_entry(r->index, ce, ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE); - cache_tree_invalidate_path(repo->index, path); + cache_tree_invalidate_path(r->index, path); return commit; } @@ -318,7 +318,8 @@ static void fill_origin_blob(struct diff_options *opt, (*num_read_blob)++; if (opt->flags.allow_textconv && - textconv_object(o->path, o->mode, &o->blob_oid, 1, &file->ptr, &file_size)) + textconv_object(opt->repo, o->path, o->mode, + &o->blob_oid, 1, &file->ptr, &file_size)) ; else file->ptr = read_object_file(&o->blob_oid, &type, @@ -520,14 +521,14 @@ static void queue_blames(struct blame_scoreboard *sb, struct blame_origin *porig * * This also fills origin->mode for corresponding tree path. */ -static int fill_blob_sha1_and_mode(struct repository *repo, +static int fill_blob_sha1_and_mode(struct repository *r, struct blame_origin *origin) { if (!is_null_oid(&origin->blob_oid)) return 0; if (get_tree_entry(&origin->commit->object.oid, origin->path, &origin->blob_oid, &origin->mode)) goto error_out; - if (oid_object_info(repo, &origin->blob_oid, NULL) != OBJ_BLOB) + if (oid_object_info(r, &origin->blob_oid, NULL) != OBJ_BLOB) goto error_out; return 0; error_out: @@ -540,8 +541,9 @@ static int fill_blob_sha1_and_mode(struct repository *repo, * We have an origin -- check if the same path exists in the * parent and return an origin structure to represent it. */ -static struct blame_origin *find_origin(struct commit *parent, - struct blame_origin *origin) +static struct blame_origin *find_origin(struct repository *r, + struct commit *parent, + struct blame_origin *origin) { struct blame_origin *porigin; struct diff_options diff_opts; @@ -561,7 +563,7 @@ static struct blame_origin *find_origin(struct commit *parent, * and origin first. Most of the time they are the * same and diff-tree is fairly efficient about this. */ - diff_setup(&diff_opts); + repo_diff_setup(r, &diff_opts); diff_opts.flags.recursive = 1; diff_opts.detect_rename = 0; diff_opts.output_format = DIFF_FORMAT_NO_OUTPUT; @@ -628,14 +630,15 @@ static struct blame_origin *find_origin(struct commit *parent, * We have an origin -- find the path that corresponds to it in its * parent and return an origin structure to represent it. */ -static struct blame_origin *find_rename(struct commit *parent, - struct blame_origin *origin) +static struct blame_origin *find_rename(struct repository *r, + struct commit *parent, + struct blame_origin *origin) { struct blame_origin *porigin = NULL; struct diff_options diff_opts; int i; - diff_setup(&diff_opts); + repo_diff_setup(r, &diff_opts); diff_opts.flags.recursive = 1; diff_opts.detect_rename = DIFF_DETECT_RENAME; diff_opts.output_format = DIFF_FORMAT_NO_OUTPUT; @@ -1259,7 +1262,7 @@ static void find_copy_in_parent(struct blame_scoreboard *sb, if (!unblamed) return; /* nothing remains for this target */ - diff_setup(&diff_opts); + repo_diff_setup(sb->repo, &diff_opts); diff_opts.flags.recursive = 1; diff_opts.output_format = DIFF_FORMAT_NO_OUTPUT; @@ -1441,7 +1444,7 @@ static void pass_blame(struct blame_scoreboard *sb, struct blame_origin *origin, * common cases, then we look for renames in the second pass. */ for (pass = 0; pass < 2 - sb->no_whole_file_rename; pass++) { - struct blame_origin *(*find)(struct commit *, struct blame_origin *); + struct blame_origin *(*find)(struct repository *, struct commit *, struct blame_origin *); find = pass ? find_rename : find_origin; for (i = 0, sg = first_scapegoat(revs, commit, sb->reverse); @@ -1454,17 +1457,17 @@ static void pass_blame(struct blame_scoreboard *sb, struct blame_origin *origin, continue; if (parse_commit(p)) continue; - porigin = find(p, origin); + porigin = find(sb->repo, p, origin); if (!porigin) continue; - if (!oidcmp(&porigin->blob_oid, &origin->blob_oid)) { + if (oideq(&porigin->blob_oid, &origin->blob_oid)) { pass_whole_blame(sb, origin, porigin); blame_origin_decref(porigin); goto finish; } for (j = same = 0; j < i; j++) if (sg_origin[j] && - !oidcmp(&sg_origin[j]->blob_oid, &porigin->blob_oid)) { + oideq(&sg_origin[j]->blob_oid, &porigin->blob_oid)) { same = 1; break; } @@ -1832,7 +1835,7 @@ void setup_scoreboard(struct blame_scoreboard *sb, sb->revs->children.name = "children"; while (c->parents && - oidcmp(&c->object.oid, &sb->final->object.oid)) { + !oideq(&c->object.oid, &sb->final->object.oid)) { struct commit_list *l = xcalloc(1, sizeof(*l)); l->item = c; @@ -1842,7 +1845,7 @@ void setup_scoreboard(struct blame_scoreboard *sb, c = c->parents->item; } - if (oidcmp(&c->object.oid, &sb->final->object.oid)) + if (!oideq(&c->object.oid, &sb->final->object.oid)) die(_("--reverse --first-parent together require range along first-parent chain")); } @@ -1857,7 +1860,7 @@ void setup_scoreboard(struct blame_scoreboard *sb, die(_("no such path %s in %s"), path, final_commit_name); if (sb->revs->diffopt.flags.allow_textconv && - textconv_object(path, o->mode, &o->blob_oid, 1, (char **) &sb->final_buf, + textconv_object(sb->repo, path, o->mode, &o->blob_oid, 1, (char **) &sb->final_buf, &sb->final_buf_size)) ; else @@ -191,6 +191,7 @@ extern int cmd_merge_recursive(int argc, const char **argv, const char *prefix); extern int cmd_merge_tree(int argc, const char **argv, const char *prefix); extern int cmd_mktag(int argc, const char **argv, const char *prefix); extern int cmd_mktree(int argc, const char **argv, const char *prefix); +extern int cmd_multi_pack_index(int argc, const char **argv, const char *prefix); extern int cmd_mv(int argc, const char **argv, const char *prefix); extern int cmd_name_rev(int argc, const char **argv, const char *prefix); extern int cmd_notes(int argc, const char **argv, const char *prefix); @@ -203,7 +204,8 @@ extern int cmd_pull(int argc, const char **argv, const char *prefix); extern int cmd_push(int argc, const char **argv, const char *prefix); extern int cmd_range_diff(int argc, const char **argv, const char *prefix); extern int cmd_read_tree(int argc, const char **argv, const char *prefix); -extern int cmd_rebase__helper(int argc, const char **argv, const char *prefix); +extern int cmd_rebase(int argc, const char **argv, const char *prefix); +extern int cmd_rebase__interactive(int argc, const char **argv, const char *prefix); extern int cmd_receive_pack(int argc, const char **argv, const char *prefix); extern int cmd_reflog(int argc, const char **argv, const char *prefix); extern int cmd_remote(int argc, const char **argv, const char *prefix); diff --git a/builtin/add.c b/builtin/add.c index 0b64bcdebe..f65c172299 100644 --- a/builtin/add.c +++ b/builtin/add.c @@ -110,7 +110,7 @@ int add_files_to_cache(const char *prefix, memset(&data, 0, sizeof(data)); data.flags = flags; - init_revisions(&rev, prefix); + repo_init_revisions(the_repository, &rev, prefix); setup_revisions(0, NULL, &rev, NULL); if (pathspec) copy_pathspec(&rev.prune_data, pathspec); @@ -232,7 +232,7 @@ static int edit_patch(int argc, const char **argv, const char *prefix) if (read_cache() < 0) die(_("Could not read the index")); - init_revisions(&rev, prefix); + repo_init_revisions(the_repository, &rev, prefix); rev.diffopt.context = 7; argc = setup_revisions(argc, argv, &rev, NULL); @@ -445,11 +445,6 @@ int cmd_add(int argc, const char **argv, const char *prefix) return 0; } - if (read_cache() < 0) - die(_("index file corrupt")); - - die_in_unpopulated_submodule(&the_index, prefix); - /* * Check the "pathspec '%s' did not match any files" block * below before enabling new magic. @@ -459,6 +454,10 @@ int cmd_add(int argc, const char **argv, const char *prefix) PATHSPEC_SYMLINK_LEADING_PATH, prefix, argv); + if (read_cache_preload(&pathspec) < 0) + die(_("index file corrupt")); + + die_in_unpopulated_submodule(&the_index, prefix); die_path_inside_submodule(&the_index, &pathspec); if (add_new_files) { diff --git a/builtin/am.c b/builtin/am.c index 5e866d17c7..8f27f3375b 100644 --- a/builtin/am.c +++ b/builtin/am.c @@ -261,32 +261,6 @@ static int read_state_file(struct strbuf *sb, const struct am_state *state, } /** - * Take a series of KEY='VALUE' lines where VALUE part is - * sq-quoted, and append <KEY, VALUE> at the end of the string list - */ -static int parse_key_value_squoted(char *buf, struct string_list *list) -{ - while (*buf) { - struct string_list_item *item; - char *np; - char *cp = strchr(buf, '='); - if (!cp) - return -1; - np = strchrnul(cp, '\n'); - *cp++ = '\0'; - item = string_list_append(list, buf); - - buf = np + (*np == '\n'); - *np = '\0'; - cp = sq_dequote(cp); - if (!cp) - return -1; - item->util = xstrdup(cp); - } - return 0; -} - -/** * Reads and parses the state directory's "author-script" file, and sets * state->author_name, state->author_email and state->author_date accordingly. * Returns 0 on success, -1 if the file could not be parsed. @@ -302,42 +276,16 @@ static int parse_key_value_squoted(char *buf, struct string_list *list) * script, and thus if the file differs from what this function expects, it is * better to bail out than to do something that the user does not expect. */ -static int read_author_script(struct am_state *state) +static int read_am_author_script(struct am_state *state) { const char *filename = am_path(state, "author-script"); - struct strbuf buf = STRBUF_INIT; - struct string_list kv = STRING_LIST_INIT_DUP; - int retval = -1; /* assume failure */ - int fd; assert(!state->author_name); assert(!state->author_email); assert(!state->author_date); - fd = open(filename, O_RDONLY); - if (fd < 0) { - if (errno == ENOENT) - return 0; - die_errno(_("could not open '%s' for reading"), filename); - } - strbuf_read(&buf, fd, 0); - close(fd); - if (parse_key_value_squoted(buf.buf, &kv)) - goto finish; - - if (kv.nr != 3 || - strcmp(kv.items[0].string, "GIT_AUTHOR_NAME") || - strcmp(kv.items[1].string, "GIT_AUTHOR_EMAIL") || - strcmp(kv.items[2].string, "GIT_AUTHOR_DATE")) - goto finish; - state->author_name = kv.items[0].util; - state->author_email = kv.items[1].util; - state->author_date = kv.items[2].util; - retval = 0; -finish: - string_list_clear(&kv, !!retval); - strbuf_release(&buf); - return retval; + return read_author_script(filename, &state->author_name, + &state->author_email, &state->author_date, 1); } /** @@ -411,7 +359,7 @@ static void am_load(struct am_state *state) BUG("state file 'last' does not exist"); state->last = strtol(sb.buf, NULL, 10); - if (read_author_script(state) < 0) + if (read_am_author_script(state) < 0) die(_("could not parse author script")); read_commit_msg(state); @@ -1244,6 +1192,10 @@ static int parse_mail(struct am_state *state, const char *mail) fclose(mi.input); fclose(mi.output); + if (mi.format_flowed) + warning(_("Patch sent with format=flowed; " + "space at the end of lines might be lost.")); + /* Extract message and author information */ fp = xfopen(am_path(state, "info"), "r"); while (!strbuf_getline_lf(&sb, fp)) { @@ -1372,7 +1324,7 @@ static void write_commit_patch(const struct am_state *state, struct commit *comm FILE *fp; fp = xfopen(am_path(state, "patch"), "w"); - init_revisions(&rev_info, NULL); + repo_init_revisions(the_repository, &rev_info, NULL); rev_info.diff = 1; rev_info.abbrev = 0; rev_info.disable_stdin = 1; @@ -1407,7 +1359,7 @@ static void write_index_patch(const struct am_state *state) the_repository->hash_algo->empty_tree); fp = xfopen(am_path(state, "patch"), "w"); - init_revisions(&rev_info, NULL); + repo_init_revisions(the_repository, &rev_info, NULL); rev_info.diff = 1; rev_info.disable_stdin = 1; rev_info.no_commit_id = 1; @@ -1565,7 +1517,7 @@ static int fall_back_threeway(const struct am_state *state, const char *index_pa struct rev_info rev_info; const char *diff_filter_str = "--diff-filter=AM"; - init_revisions(&rev_info, NULL); + repo_init_revisions(the_repository, &rev_info, NULL); rev_info.diffopt.output_format = DIFF_FORMAT_NAME_STATUS; diff_opt_parse(&rev_info.diffopt, &diff_filter_str, 1, rev_info.prefix); add_pending_oid(&rev_info, "HEAD", &our_tree, 0); @@ -1604,7 +1556,7 @@ static int fall_back_threeway(const struct am_state *state, const char *index_pa o.verbosity = 0; if (merge_recursive_generic(&o, &our_tree, &their_tree, 1, bases, &result)) { - rerere(state->allow_rerere_autoupdate); + repo_rerere(the_repository, state->allow_rerere_autoupdate); free(their_tree_name); return error(_("Failed to merge in the changes.")); } @@ -1899,7 +1851,7 @@ static void am_resolve(struct am_state *state) goto next; } - rerere(0); + repo_rerere(the_repository, 0); do_commit(state); @@ -2078,7 +2030,7 @@ static int safe_to_abort(const struct am_state *state) if (get_oid("HEAD", &head)) oidclr(&head); - if (!oidcmp(&head, &abort_safety)) + if (oideq(&head, &abort_safety)) return 1; warning(_("You seem to have moved HEAD since the last 'am' failure.\n" @@ -2161,7 +2113,9 @@ static int parse_opt_patchformat(const struct option *opt, const char *arg, int { int *opt_value = opt->value; - if (!strcmp(arg, "mbox")) + if (unset) + *opt_value = PATCH_FORMAT_UNKNOWN; + else if (!strcmp(arg, "mbox")) *opt_value = PATCH_FORMAT_MBOX; else if (!strcmp(arg, "stgit")) *opt_value = PATCH_FORMAT_STGIT; @@ -2324,7 +2278,7 @@ int cmd_am(int argc, const char **argv, const char *prefix) /* Ensure a valid committer ident can be constructed */ git_committer_info(IDENT_STRICT); - if (read_index_preload(&the_index, NULL) < 0) + if (read_index_preload(&the_index, NULL, 0) < 0) die(_("failed to read the index")); if (in_progress) { diff --git a/builtin/archive.c b/builtin/archive.c index e74f675390..d2455237ce 100644 --- a/builtin/archive.c +++ b/builtin/archive.c @@ -97,6 +97,8 @@ int cmd_archive(int argc, const char **argv, const char *prefix) argc = parse_options(argc, argv, prefix, local_opts, NULL, PARSE_OPT_KEEP_ALL); + init_archivers(); + if (output) create_output_file(output); diff --git a/builtin/blame.c b/builtin/blame.c index c2da673ac8..06a7163ffe 100644 --- a/builtin/blame.c +++ b/builtin/blame.c @@ -732,6 +732,8 @@ static int blame_copy_callback(const struct option *option, const char *arg, int { int *opt = option->value; + BUG_ON_OPT_NEG(unset); + /* * -C enables copy from removed files; * -C -C enables copy from existing files, but only @@ -754,6 +756,8 @@ static int blame_move_callback(const struct option *option, const char *arg, int { int *opt = option->value; + BUG_ON_OPT_NEG(unset); + *opt |= PICKAXE_BLAME_MOVE; if (arg) @@ -830,7 +834,7 @@ int cmd_blame(int argc, const char **argv, const char *prefix) setup_default_color_by_age(); git_config(git_blame_config, &output_option); - init_revisions(&revs, NULL); + repo_init_revisions(the_repository, &revs, NULL); revs.date_mode = blame_date_mode; revs.diffopt.flags.allow_textconv = 1; revs.diffopt.flags.follow_renames = 1; @@ -1001,7 +1005,7 @@ parse_done: long bottom, top; if (parse_range_arg(range_list.items[range_i].string, nth_line_cb, &sb, lno, anchor, - &bottom, &top, sb.path)) + &bottom, &top, sb.path, &the_index)) usage(blame_usage); if ((!lno && (top || bottom)) || lno < bottom) die(Q_("file %s has only %lu line", diff --git a/builtin/branch.c b/builtin/branch.c index ca9ed0abe3..0c55f7f065 100644 --- a/builtin/branch.c +++ b/builtin/branch.c @@ -23,6 +23,7 @@ #include "ref-filter.h" #include "worktree.h" #include "help.h" +#include "commit-reach.h" static const char * const builtin_branch_usage[] = { N_("git branch [<options>] [-r | -a] [--merged | --no-merged]"), @@ -37,7 +38,6 @@ static const char * const builtin_branch_usage[] = { static const char *head; static struct object_id head_oid; -static int used_deprecated_reflog_option; static int branch_use_color = -1; static char branch_colors[][COLOR_MAXLEN] = { @@ -578,14 +578,6 @@ static int edit_branch_description(const char *branch_name) return 0; } -static int deprecated_reflog_option_cb(const struct option *opt, - const char *arg, int unset) -{ - used_deprecated_reflog_option = 1; - *(int *)opt->value = !unset; - return 0; -} - int cmd_branch(int argc, const char **argv, const char *prefix) { int delete = 0, rename = 0, copy = 0, force = 0, list = 0; @@ -627,14 +619,8 @@ int cmd_branch(int argc, const char **argv, const char *prefix) OPT_BIT('M', NULL, &rename, N_("move/rename a branch, even if target exists"), 2), OPT_BIT('c', "copy", ©, N_("copy a branch and its reflog"), 1), OPT_BIT('C', NULL, ©, N_("copy a branch, even if target exists"), 2), - OPT_BOOL(0, "list", &list, N_("list branch names")), + OPT_BOOL('l', "list", &list, N_("list branch names")), OPT_BOOL(0, "create-reflog", &reflog, N_("create the branch's reflog")), - { - OPTION_CALLBACK, 'l', NULL, &reflog, NULL, - N_("deprecated synonym for --create-reflog"), - PARSE_OPT_NOARG | PARSE_OPT_HIDDEN, - deprecated_reflog_option_cb - }, OPT_BOOL(0, "edit-description", &edit_description, N_("edit the description for the branch")), OPT__FORCE(&force, N_("force creation, move/rename, deletion"), PARSE_OPT_NOCOMPLETE), @@ -707,11 +693,6 @@ int cmd_branch(int argc, const char **argv, const char *prefix) if (list) setup_auto_pager("branch", 1); - if (used_deprecated_reflog_option && !list) { - warning("the '-l' alias for '--create-reflog' is deprecated;"); - warning("it will be removed in a future version of Git"); - } - if (delete) { if (!argc) die(_("branch name required")); diff --git a/builtin/cat-file.c b/builtin/cat-file.c index 0520cecc9a..2ca56fd086 100644 --- a/builtin/cat-file.c +++ b/builtin/cat-file.c @@ -99,7 +99,7 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name, oi.sizep = &size; if (oid_object_info_extended(the_repository, &oid, &oi, flags) < 0) die("git cat-file: could not get object info"); - printf("%lu\n", size); + printf("%"PRIuMAX"\n", (uintmax_t)size); return 0; case 'e': @@ -120,7 +120,8 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name, die("git cat-file --textconv %s: <object> must be <sha1:path>", obj_name); - if (textconv_object(path, obj_context.mode, &oid, 1, &buf, &size)) + if (textconv_object(the_repository, path, obj_context.mode, + &oid, 1, &buf, &size)) break; /* else fallthrough */ @@ -244,7 +245,7 @@ static void expand_atom(struct strbuf *sb, const char *atom, int len, if (data->mark_query) data->info.sizep = &data->size; else - strbuf_addf(sb, "%lu", data->size); + strbuf_addf(sb, "%"PRIuMAX , (uintmax_t)data->size); } else if (is_atom("objectsize:disk", atom, len)) { if (data->mark_query) data->info.disk_sizep = &data->disk_size; @@ -312,7 +313,8 @@ static void print_object_or_die(struct batch_options *opt, struct expand_data *d oid_to_hex(oid), data->rest); } else if (opt->cmdmode == 'c') { enum object_type type; - if (!textconv_object(data->rest, 0100644, oid, + if (!textconv_object(the_repository, + data->rest, 0100644, oid, 1, &contents, &size)) contents = read_object_file(oid, &type, @@ -601,8 +603,10 @@ static int batch_option_callback(const struct option *opt, { struct batch_options *bo = opt->value; + BUG_ON_OPT_NEG(unset); + if (bo->enabled) { - return 1; + return error(_("only one batch option may be specified")); } bo->enabled = 1; @@ -637,10 +641,12 @@ int cmd_cat_file(int argc, const char **argv, const char *prefix) OPT_BOOL(0, "buffer", &batch.buffer_output, N_("buffer --batch output")), { OPTION_CALLBACK, 0, "batch", &batch, "format", N_("show info and content of objects fed from the standard input"), - PARSE_OPT_OPTARG, batch_option_callback }, + PARSE_OPT_OPTARG | PARSE_OPT_NONEG, + batch_option_callback }, { OPTION_CALLBACK, 0, "batch-check", &batch, "format", N_("show info about objects fed from the standard input"), - PARSE_OPT_OPTARG, batch_option_callback }, + PARSE_OPT_OPTARG | PARSE_OPT_NONEG, + batch_option_callback }, OPT_BOOL(0, "follow-symlinks", &batch.follow_symlinks, N_("follow in-tree symlinks (used with --batch or --batch-check)")), OPT_BOOL(0, "batch-all-objects", &batch.all_objects, diff --git a/builtin/checkout-index.c b/builtin/checkout-index.c index 88b86c8d9f..eb74774cbc 100644 --- a/builtin/checkout-index.c +++ b/builtin/checkout-index.c @@ -132,6 +132,8 @@ static const char * const builtin_checkout_index_usage[] = { static int option_parse_stage(const struct option *opt, const char *arg, int unset) { + BUG_ON_OPT_NEG(unset); + if (!strcmp(arg, "all")) { to_tempfile = 1; checkout_stage = CHECKOUT_ALL; diff --git a/builtin/checkout.c b/builtin/checkout.c index 29ef50013d..acdafc6e4c 100644 --- a/builtin/checkout.c +++ b/builtin/checkout.c @@ -25,6 +25,8 @@ #include "submodule.h" #include "advice.h" +static int checkout_optimize_new_branch; + static const char * const checkout_usage[] = { N_("git checkout [<options>] <branch>"), N_("git checkout [<options>] [<branch>] -- <file>..."), @@ -42,6 +44,10 @@ struct checkout_opts { int ignore_skipworktree; int ignore_other_worktrees; int show_progress; + /* + * If new checkout options are added, skip_merge_working_tree + * should be updated accordingly. + */ const char *new_branch; const char *new_branch_force; @@ -96,7 +102,7 @@ static int update_some(const struct object_id *oid, struct strbuf *base, if (pos >= 0) { struct cache_entry *old = active_cache[pos]; if (ce->ce_mode == old->ce_mode && - !oidcmp(&ce->oid, &old->oid)) { + oideq(&ce->oid, &old->oid)) { old->ce_flags |= CE_UPDATE; discard_cache_entry(ce); return 0; @@ -208,7 +214,8 @@ static int checkout_merged(int pos, const struct checkout *state) * merge.renormalize set, too */ status = ll_merge(&result_buf, path, &ancestor, "base", - &ours, "ours", &theirs, "theirs", NULL); + &ours, "ours", &theirs, "theirs", + state->istate, NULL); free(ancestor.ptr); free(ours.ptr); free(theirs.ptr); @@ -391,7 +398,7 @@ static void show_local_changes(struct object *head, { struct rev_info rev; /* I think we want full paths, even if we're in a subdirectory. */ - init_revisions(&rev, NULL); + repo_init_revisions(the_repository, &rev, NULL); rev.diffopt.flags = opts->flags; rev.diffopt.output_format |= DIFF_FORMAT_NAME_STATUS; diff_setup_done(&rev.diffopt); @@ -472,6 +479,99 @@ static void setup_branch_path(struct branch_info *branch) branch->path = strbuf_detach(&buf, NULL); } +/* + * Skip merging the trees, updating the index and working directory if and + * only if we are creating a new branch via "git checkout -b <new_branch>." + */ +static int skip_merge_working_tree(const struct checkout_opts *opts, + const struct branch_info *old_branch_info, + const struct branch_info *new_branch_info) +{ + /* + * Do the merge if sparse checkout is on and the user has not opted in + * to the optimized behavior + */ + if (core_apply_sparse_checkout && !checkout_optimize_new_branch) + return 0; + + /* + * We must do the merge if we are actually moving to a new commit. + */ + if (!old_branch_info->commit || !new_branch_info->commit || + !oideq(&old_branch_info->commit->object.oid, + &new_branch_info->commit->object.oid)) + return 0; + + /* + * opts->patch_mode cannot be used with switching branches so is + * not tested here + */ + + /* + * opts->quiet only impacts output so doesn't require a merge + */ + + /* + * Honor the explicit request for a three-way merge or to throw away + * local changes + */ + if (opts->merge || opts->force) + return 0; + + /* + * --detach is documented as "updating the index and the files in the + * working tree" but this optimization skips those steps so fall through + * to the regular code path. + */ + if (opts->force_detach) + return 0; + + /* + * opts->writeout_stage cannot be used with switching branches so is + * not tested here + */ + + /* + * Honor the explicit ignore requests + */ + if (!opts->overwrite_ignore || opts->ignore_skipworktree || + opts->ignore_other_worktrees) + return 0; + + /* + * opts->show_progress only impacts output so doesn't require a merge + */ + + /* + * If we aren't creating a new branch any changes or updates will + * happen in the existing branch. Since that could only be updating + * the index and working directory, we don't want to skip those steps + * or we've defeated any purpose in running the command. + */ + if (!opts->new_branch) + return 0; + + /* + * new_branch_force is defined to "create/reset and checkout a branch" + * so needs to go through the merge to do the reset + */ + if (opts->new_branch_force) + return 0; + + /* + * A new orphaned branch requrires the index and the working tree to be + * adjusted to <start_point> + */ + if (opts->new_orphan_branch) + return 0; + + /* + * Remaining variables are not checkout options but used to track state + */ + + return 1; +} + static int merge_working_tree(const struct checkout_opts *opts, struct branch_info *old_branch_info, struct branch_info *new_branch_info, @@ -800,7 +900,7 @@ static void orphaned_commit_warning(struct commit *old_commit, struct commit *ne struct rev_info revs; struct object *object = &old_commit->object; - init_revisions(&revs, NULL); + repo_init_revisions(the_repository, &revs, NULL); setup_revisions(0, NULL, &revs, NULL); object->flags &= ~UNINTERESTING; @@ -846,10 +946,19 @@ static int switch_branches(const struct checkout_opts *opts, parse_commit_or_die(new_branch_info->commit); } - ret = merge_working_tree(opts, &old_branch_info, new_branch_info, &writeout_error); - if (ret) { - free(path_to_free); - return ret; + /* optimize the "checkout -b <new_branch> path */ + if (skip_merge_working_tree(opts, &old_branch_info, new_branch_info)) { + if (!checkout_optimize_new_branch && !opts->quiet) { + if (read_cache_preload(NULL) < 0) + return error(_("index file corrupt")); + show_local_changes(&new_branch_info->commit->object, &opts->diff_options); + } + } else { + ret = merge_working_tree(opts, &old_branch_info, new_branch_info, &writeout_error); + if (ret) { + free(path_to_free); + return ret; + } } if (!opts->quiet && !old_branch_info.path && old_branch_info.commit && new_branch_info->commit != old_branch_info.commit) @@ -864,6 +973,11 @@ static int switch_branches(const struct checkout_opts *opts, static int git_checkout_config(const char *var, const char *value, void *cb) { + if (!strcmp(var, "checkout.optimizenewbranch")) { + checkout_optimize_new_branch = git_config_bool(var, value); + return 0; + } + if (!strcmp(var, "diff.ignoresubmodules")) { struct checkout_opts *opts = cb; handle_ignore_submodules_arg(&opts->diff_options, value); diff --git a/builtin/clean.c b/builtin/clean.c index 8d9a7dc206..bbcdeb2d9e 100644 --- a/builtin/clean.c +++ b/builtin/clean.c @@ -140,6 +140,7 @@ static void clean_print_color(enum color_clean ix) static int exclude_cb(const struct option *opt, const char *arg, int unset) { struct string_list *exclude_list = opt->value; + BUG_ON_OPT_NEG(unset); string_list_append(exclude_list, arg); return 0; } diff --git a/builtin/clone.c b/builtin/clone.c index fd2c3ef090..15b142d646 100644 --- a/builtin/clone.c +++ b/builtin/clone.c @@ -748,6 +748,7 @@ static int checkout(int submodule_progress) memset(&opts, 0, sizeof opts); opts.update = 1; opts.merge = 1; + opts.clone = 1; opts.fn = oneway_merge; opts.verbose_update = (option_verbosity >= 0); opts.src_index = &the_index; diff --git a/builtin/commit-graph.c b/builtin/commit-graph.c index da737df321..c02a3f1221 100644 --- a/builtin/commit-graph.c +++ b/builtin/commit-graph.c @@ -64,6 +64,7 @@ static int graph_verify(int argc, const char **argv) if (!graph) return 0; + UNLEAK(graph); return verify_commit_graph(the_repository, graph); } @@ -89,10 +90,8 @@ static int graph_read(int argc, const char **argv) graph_name = get_commit_graph_filename(opts.obj_dir); graph = load_commit_graph_one(graph_name); - if (!graph) { - UNLEAK(graph_name); + if (!graph) die("graph file %s does not exist", graph_name); - } FREE_AND_NULL(graph_name); @@ -115,7 +114,7 @@ static int graph_read(int argc, const char **argv) printf(" large_edges"); printf("\n"); - free_commit_graph(graph); + UNLEAK(graph); return 0; } @@ -155,7 +154,7 @@ static int graph_write(int argc, const char **argv) read_replace_refs = 0; if (opts.reachable) { - write_commit_graph_reachable(opts.obj_dir, opts.append); + write_commit_graph_reachable(opts.obj_dir, opts.append, 1); return 0; } @@ -170,14 +169,17 @@ static int graph_write(int argc, const char **argv) pack_indexes = &lines; if (opts.stdin_commits) commit_hex = &lines; + + UNLEAK(buf); } write_commit_graph(opts.obj_dir, pack_indexes, commit_hex, - opts.append); + opts.append, + 1); - string_list_clear(&lines, 0); + UNLEAK(lines); return 0; } diff --git a/builtin/commit.c b/builtin/commit.c index 83233ca1a5..c021b119bb 100644 --- a/builtin/commit.c +++ b/builtin/commit.c @@ -33,6 +33,8 @@ #include "sequencer.h" #include "mailmap.h" #include "help.h" +#include "commit-reach.h" +#include "commit-graph.h" static const char * const builtin_commit_usage[] = { N_("git commit [<options>] [--] <pathspec>..."), @@ -159,6 +161,9 @@ static int opt_parse_m(const struct option *opt, const char *arg, int unset) static int opt_parse_rename_score(const struct option *opt, const char *arg, int unset) { const char **value = opt->value; + + BUG_ON_OPT_NEG(unset); + if (arg != NULL && *arg == '=') arg = arg + 1; @@ -506,8 +511,9 @@ static int run_status(FILE *fp, const char *index_file, const char *prefix, int wt_status_collect(s); wt_status_print(s); + wt_status_collect_free_buffers(s); - return s->commitable; + return s->committable; } static int is_a_merge(const struct commit *current_head) @@ -653,7 +659,7 @@ static int prepare_to_commit(const char *index_file, const char *prefix, { struct stat statbuf; struct strbuf committer_ident = STRBUF_INIT; - int commitable; + int committable; struct strbuf sb = STRBUF_INIT; const char *hook_arg1 = NULL; const char *hook_arg2 = NULL; @@ -870,7 +876,7 @@ static int prepare_to_commit(const char *index_file, const char *prefix, saved_color_setting = s->use_color; s->use_color = 0; - commitable = run_status(s->fp, index_file, prefix, 1, s); + committable = run_status(s->fp, index_file, prefix, 1, s); s->use_color = saved_color_setting; string_list_clear(&s->change, 1); } else { @@ -889,7 +895,7 @@ static int prepare_to_commit(const char *index_file, const char *prefix, for (i = 0; i < active_nr; i++) if (ce_intent_to_add(active_cache[i])) ita_nr++; - commitable = active_nr - ita_nr > 0; + committable = active_nr - ita_nr > 0; } else { /* * Unless the user did explicitly request a submodule @@ -905,7 +911,7 @@ static int prepare_to_commit(const char *index_file, const char *prefix, if (ignore_submodule_arg && !strcmp(ignore_submodule_arg, "all")) flags.ignore_submodules = 1; - commitable = index_differs_from(parent, &flags, 1); + committable = index_differs_from(parent, &flags, 1); } } strbuf_release(&committer_ident); @@ -917,7 +923,7 @@ static int prepare_to_commit(const char *index_file, const char *prefix, * explicit --allow-empty. In the cherry-pick case, it may be * empty due to conflict resolution, which the user should okay. */ - if (!commitable && whence != FROM_MERGE && !allow_empty && + if (!committable && whence != FROM_MERGE && !allow_empty && !(amend && is_a_merge(current_head))) { s->display_comment_prefix = old_display_comment_prefix; run_status(stdout, index_file, prefix, 0, s); @@ -981,7 +987,7 @@ static const char *find_author_by_nickname(const char *name) const char *av[20]; int ac = 0; - init_revisions(&revs, NULL); + repo_init_revisions(the_repository, &revs, NULL); strbuf_addf(&buf, "--author=%s", name); av[++ac] = "--all"; av[++ac] = "-i"; @@ -1187,14 +1193,14 @@ static int parse_and_validate_options(int argc, const char *argv[], static int dry_run_commit(int argc, const char **argv, const char *prefix, const struct commit *current_head, struct wt_status *s) { - int commitable; + int committable; const char *index_file; index_file = prepare_index(argc, argv, prefix, current_head, 1); - commitable = run_status(stdout, index_file, prefix, 0, s); + committable = run_status(stdout, index_file, prefix, 0, s); rollback_index_files(); - return commitable ? 0 : 1; + return committable ? 0 : 1; } define_list_config_array_extra(color_status_slots, {"added"}); @@ -1296,6 +1302,7 @@ int cmd_status(int argc, const char **argv, const char *prefix) static int no_renames = -1; static const char *rename_score_arg = (const char *)-1; static struct wt_status s; + unsigned int progress_flag = 0; int fd; struct object_id oid; static struct option builtin_status_options[] = { @@ -1331,7 +1338,7 @@ int cmd_status(int argc, const char **argv, const char *prefix) OPT_BOOL(0, "no-renames", &no_renames, N_("do not detect renames")), { OPTION_CALLBACK, 'M', "find-renames", &rename_score_arg, N_("n"), N_("detect renames, optionally set similarity index"), - PARSE_OPT_OPTARG, opt_parse_rename_score }, + PARSE_OPT_OPTARG | PARSE_OPT_NONEG, opt_parse_rename_score }, OPT_END(), }; @@ -1356,8 +1363,13 @@ int cmd_status(int argc, const char **argv, const char *prefix) PATHSPEC_PREFER_FULL, prefix, argv); - read_cache_preload(&s.pathspec); - refresh_index(&the_index, REFRESH_QUIET|REFRESH_UNMERGED, &s.pathspec, NULL, NULL); + if (status_format != STATUS_FORMAT_PORCELAIN && + status_format != STATUS_FORMAT_PORCELAIN_V2) + progress_flag = REFRESH_PROGRESS; + read_index(&the_index); + refresh_index(&the_index, + REFRESH_QUIET|REFRESH_UNMERGED|progress_flag, + &s.pathspec, NULL, NULL); if (use_optional_locks()) fd = hold_locked_index(&index_lock, 0); @@ -1389,6 +1401,8 @@ int cmd_status(int argc, const char **argv, const char *prefix) s.prefix = prefix; wt_status_print(&s); + wt_status_collect_free_buffers(&s); + return 0; } @@ -1652,7 +1666,10 @@ int cmd_commit(int argc, const char **argv, const char *prefix) "new_index file. Check that disk is not full and quota is\n" "not exceeded, and then \"git reset HEAD\" to recover.")); - rerere(0); + if (git_env_bool(GIT_TEST_COMMIT_GRAPH, 0)) + write_commit_graph_reachable(get_object_directory(), 0, 0); + + repo_rerere(the_repository, 0); run_command_v_opt(argv_gc_auto, RUN_GIT_CMD); run_commit_hook(use_editor, get_index_file(), "post-commit", NULL); if (amend && !no_post_rewrite) { diff --git a/builtin/config.c b/builtin/config.c index 97b58c4aea..84385ef165 100644 --- a/builtin/config.c +++ b/builtin/config.c @@ -5,6 +5,7 @@ #include "parse-options.h" #include "urlmatch.h" #include "quote.h" +#include "worktree.h" static const char *const builtin_config_usage[] = { N_("git config [<options>]"), @@ -24,6 +25,7 @@ static char key_delim = ' '; static char term = '\n'; static int use_global_config, use_system_config, use_local_config; +static int use_worktree_config; static struct git_config_source given_config_source; static int actions, type; static char *default_value; @@ -123,6 +125,7 @@ static struct option builtin_config_options[] = { OPT_BOOL(0, "global", &use_global_config, N_("use global config file")), OPT_BOOL(0, "system", &use_system_config, N_("use system config file")), OPT_BOOL(0, "local", &use_local_config, N_("use repository config file")), + OPT_BOOL(0, "worktree", &use_worktree_config, N_("use per-worktree config file")), OPT_STRING('f', "file", &given_config_source.file, N_("file"), N_("use given config file")), OPT_STRING(0, "blob", &given_config_source.blob, N_("blob-id"), N_("read config from given blob object")), OPT_GROUP(N_("Action")), @@ -602,6 +605,7 @@ int cmd_config(int argc, const char **argv, const char *prefix) PARSE_OPT_STOP_AT_NON_OPTION); if (use_global_config + use_system_config + use_local_config + + use_worktree_config + !!given_config_source.file + !!given_config_source.blob > 1) { error(_("only one config file at a time")); usage_builtin_config(); @@ -645,7 +649,20 @@ int cmd_config(int argc, const char **argv, const char *prefix) given_config_source.file = git_etc_gitconfig(); else if (use_local_config) given_config_source.file = git_pathdup("config"); - else if (given_config_source.file) { + else if (use_worktree_config) { + struct worktree **worktrees = get_worktrees(0); + if (repository_format_worktree_config) + given_config_source.file = git_pathdup("config.worktree"); + else if (worktrees[0] && worktrees[1]) + die(_("--worktree cannot be used with multiple " + "working trees unless the config\n" + "extension worktreeConfig is enabled. " + "Please read \"CONFIGURATION FILE\"\n" + "section in \"git help worktree\" for details")); + else + given_config_source.file = git_pathdup("config"); + free_worktrees(worktrees); + } else if (given_config_source.file) { if (!is_absolute_path(given_config_source.file) && prefix) given_config_source.file = prefix_filename(prefix, given_config_source.file); diff --git a/builtin/count-objects.c b/builtin/count-objects.c index d51e2ce1ec..a7cad052c6 100644 --- a/builtin/count-objects.c +++ b/builtin/count-objects.c @@ -123,7 +123,7 @@ int cmd_count_objects(int argc, const char **argv, const char *prefix) struct strbuf pack_buf = STRBUF_INIT; struct strbuf garbage_buf = STRBUF_INIT; - for (p = get_packed_git(the_repository); p; p = p->next) { + for (p = get_all_packs(the_repository); p; p = p->next) { if (!p->pack_local) continue; if (open_pack_index(p)) diff --git a/builtin/describe.c b/builtin/describe.c index 41606c8a90..cc118448ee 100644 --- a/builtin/describe.c +++ b/builtin/describe.c @@ -62,7 +62,7 @@ static const char *prio_names[] = { N_("head"), N_("lightweight"), N_("annotated"), }; -static int commit_name_cmp(const void *unused_cmp_data, +static int commit_name_neq(const void *unused_cmp_data, const void *entry, const void *entry_or_key, const void *peeled) @@ -70,7 +70,7 @@ static int commit_name_cmp(const void *unused_cmp_data, const struct commit_name *cn1 = entry; const struct commit_name *cn2 = entry_or_key; - return oidcmp(&cn1->peeled, peeled ? peeled : &cn2->peeled); + return !oideq(&cn1->peeled, peeled ? peeled : &cn2->peeled); } static inline struct commit_name *find_commit_name(const struct object_id *peeled) @@ -190,7 +190,7 @@ static int get_name(const char *path, const struct object_id *oid, int flag, voi /* Is it annotated? */ if (!peel_ref(path, &peeled)) { - is_annotated = !!oidcmp(oid, &peeled); + is_annotated = !oideq(oid, &peeled); } else { oidcpy(&peeled, oid); is_annotated = 0; @@ -469,7 +469,7 @@ static void process_object(struct object *obj, const char *path, void *data) { struct process_commit_data *pcd = data; - if (!oidcmp(&pcd->looking_for, &obj->oid) && !pcd->dst->len) { + if (oideq(&pcd->looking_for, &obj->oid) && !pcd->dst->len) { reset_revision_walk(); describe_commit(&pcd->current_commit, pcd->dst); strbuf_addf(pcd->dst, ":%s", path); @@ -488,7 +488,7 @@ static void describe_blob(struct object_id oid, struct strbuf *dst) "--objects", "--in-commit-order", "--reverse", "HEAD", NULL); - init_revisions(&revs, NULL); + repo_init_revisions(the_repository, &revs, NULL); if (setup_revisions(args.argc, args.argv, &revs, NULL) > 1) BUG("setup_revisions could not handle all args?"); @@ -596,7 +596,7 @@ int cmd_describe(int argc, const char **argv, const char *prefix) return cmd_name_rev(args.argc, args.argv, prefix); } - hashmap_init(&names, commit_name_cmp, NULL, 0); + hashmap_init(&names, commit_name_neq, NULL, 0); for_each_rawref(get_name, NULL); if (!hashmap_get_size(&names) && !always) die(_("No names found, cannot describe anything.")); @@ -629,14 +629,14 @@ int cmd_describe(int argc, const char **argv, const char *prefix) struct argv_array args = ARGV_ARRAY_INIT; int fd, result; - read_cache_preload(NULL); + read_cache(); refresh_index(&the_index, REFRESH_QUIET|REFRESH_UNMERGED, NULL, NULL, NULL); fd = hold_locked_index(&index_lock, 0); if (0 <= fd) update_index_if_able(&the_index, &index_lock); - init_revisions(&revs, prefix); + repo_init_revisions(the_repository, &revs, prefix); argv_array_pushv(&args, diff_index_args); if (setup_revisions(args.argc, args.argv, &revs, NULL) != 1) BUG("malformed internal diff-index command line"); diff --git a/builtin/diff-files.c b/builtin/diff-files.c index e88493ffe5..48cfcb935d 100644 --- a/builtin/diff-files.c +++ b/builtin/diff-files.c @@ -25,7 +25,7 @@ int cmd_diff_files(int argc, const char **argv, const char *prefix) usage(diff_files_usage); git_config(git_diff_basic_config, NULL); /* no "diff" UI options */ - init_revisions(&rev, prefix); + repo_init_revisions(the_repository, &rev, prefix); rev.abbrev = 0; precompose_argv(argc, argv); diff --git a/builtin/diff-index.c b/builtin/diff-index.c index 522f4fdffd..fcccd1f10d 100644 --- a/builtin/diff-index.c +++ b/builtin/diff-index.c @@ -22,7 +22,7 @@ int cmd_diff_index(int argc, const char **argv, const char *prefix) usage(diff_cache_usage); git_config(git_diff_basic_config, NULL); /* no "diff" UI options */ - init_revisions(&rev, prefix); + repo_init_revisions(the_repository, &rev, prefix); rev.abbrev = 0; precompose_argv(argc, argv); diff --git a/builtin/diff-tree.c b/builtin/diff-tree.c index d07bf2e4c4..ef996126d7 100644 --- a/builtin/diff-tree.c +++ b/builtin/diff-tree.c @@ -110,7 +110,7 @@ int cmd_diff_tree(int argc, const char **argv, const char *prefix) usage(diff_tree_usage); git_config(git_diff_basic_config, NULL); /* no "diff" UI options */ - init_revisions(opt, prefix); + repo_init_revisions(the_repository, opt, prefix); if (read_cache() < 0) die(_("index file corrupt")); opt->abbrev = 0; diff --git a/builtin/diff.c b/builtin/diff.c index 361a3c3ed3..f0393bba23 100644 --- a/builtin/diff.c +++ b/builtin/diff.c @@ -41,7 +41,7 @@ static void stuff_change(struct diff_options *opt, struct diff_filespec *one, *two; if (!is_null_oid(old_oid) && !is_null_oid(new_oid) && - !oidcmp(old_oid, new_oid) && (old_mode == new_mode)) + oideq(old_oid, new_oid) && (old_mode == new_mode)) return; if (opt->flags.reverse_diff) { @@ -318,7 +318,7 @@ int cmd_diff(int argc, const char **argv, const char *prefix) git_config(git_diff_ui_config, NULL); precompose_argv(argc, argv); - init_revisions(&rev, prefix); + repo_init_revisions(the_repository, &rev, prefix); if (no_index && argc != i + 2) { if (no_index == DIFF_NO_INDEX_IMPLICIT) { @@ -339,7 +339,7 @@ int cmd_diff(int argc, const char **argv, const char *prefix) } if (no_index) /* If this is a no-index diff, just run it and exit there. */ - diff_no_index(&rev, argc, argv); + diff_no_index(the_repository, &rev, argc, argv); /* Otherwise, we are doing the usual "git" diff */ rev.diffopt.skip_stat_unmatch = !!diff_auto_refresh_index; diff --git a/builtin/difftool.c b/builtin/difftool.c index cdd585ca76..544b0e8639 100644 --- a/builtin/difftool.c +++ b/builtin/difftool.c @@ -112,11 +112,11 @@ static int use_wt_file(const char *workdir, const char *name, int fd = open(buf.buf, O_RDONLY); if (fd >= 0 && - !index_fd(&wt_oid, fd, &st, OBJ_BLOB, name, 0)) { + !index_fd(&the_index, &wt_oid, fd, &st, OBJ_BLOB, name, 0)) { if (is_null_oid(oid)) { oidcpy(oid, &wt_oid); use = 1; - } else if (!oidcmp(oid, &wt_oid)) + } else if (oideq(oid, &wt_oid)) use = 1; } } @@ -438,7 +438,7 @@ static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix, strbuf_reset(&buf); strbuf_addf(&buf, "Subproject commit %s", oid_to_hex(&roid)); - if (!oidcmp(&loid, &roid)) + if (oideq(&loid, &roid)) strbuf_addstr(&buf, "-dirty"); add_left_or_right(&submodules, dst_path, buf.buf, 1); continue; diff --git a/builtin/fast-export.c b/builtin/fast-export.c index 9bd8a14b57..5790f0d554 100644 --- a/builtin/fast-export.c +++ b/builtin/fast-export.c @@ -253,7 +253,7 @@ static void export_blob(const struct object_id *oid) mark_next_object(object); - printf("blob\nmark :%"PRIu32"\ndata %lu\n", last_idnum, size); + printf("blob\nmark :%"PRIu32"\ndata %"PRIuMAX"\n", last_idnum, (uintmax_t)size); if (size && fwrite(buf, size, 1, stdout) != 1) die_errno("could not write blob '%s'", oid_to_hex(oid)); printf("\n"); @@ -384,7 +384,7 @@ static void show_filemodify(struct diff_queue_struct *q, string_list_insert(changed, spec->path); putchar('\n'); - if (!oidcmp(&ospec->oid, &spec->oid) && + if (oideq(&ospec->oid, &spec->oid) && ospec->mode == spec->mode) break; } @@ -1033,7 +1033,7 @@ int cmd_fast_export(int argc, const char **argv, const char *prefix) /* we handle encodings */ git_config(git_default_config, NULL); - init_revisions(&revs, prefix); + repo_init_revisions(the_repository, &revs, prefix); init_revision_sources(&revision_sources); revs.topo_order = 1; revs.sources = &revision_sources; diff --git a/builtin/fetch-pack.c b/builtin/fetch-pack.c index 1a1bc63566..63e69a5801 100644 --- a/builtin/fetch-pack.c +++ b/builtin/fetch-pack.c @@ -16,13 +16,14 @@ static void add_sought_entry(struct ref ***sought, int *nr, int *alloc, { struct ref *ref; struct object_id oid; + const char *p; - if (!get_oid_hex(name, &oid)) { - if (name[GIT_SHA1_HEXSZ] == ' ') { - /* <sha1> <ref>, find refname */ - name += GIT_SHA1_HEXSZ + 1; - } else if (name[GIT_SHA1_HEXSZ] == '\0') { - ; /* <sha1>, leave sha1 as name */ + if (!parse_oid_hex(name, &oid, &p)) { + if (*p == ' ') { + /* <oid> <ref>, find refname */ + name = p + 1; + } else if (*p == '\0') { + ; /* <oid>, leave oid as name */ } else { /* <ref>, clear cruft from oid */ oidclr(&oid); diff --git a/builtin/fetch.c b/builtin/fetch.c index 61bec5d213..e0140327aa 100644 --- a/builtin/fetch.c +++ b/builtin/fetch.c @@ -22,6 +22,7 @@ #include "utf8.h" #include "packfile.h" #include "list-objects-filter-options.h" +#include "commit-reach.h" static const char * const builtin_fetch_usage[] = { N_("git fetch [<options>] [<repository> [<refspec>...]]"), @@ -97,6 +98,8 @@ static int git_fetch_config(const char *k, const char *v, void *cb) static int parse_refmap_arg(const struct option *opt, const char *arg, int unset) { + BUG_ON_OPT_NEG(unset); + /* * "git fetch --refmap='' origin foo" * can be used to tell the command not to store anywhere @@ -114,7 +117,7 @@ static struct option builtin_fetch_options[] = { N_("append to .git/FETCH_HEAD instead of overwriting")), OPT_STRING(0, "upload-pack", &upload_pack, N_("path"), N_("path to upload pack on remote end")), - OPT__FORCE(&force, N_("force overwrite of local branch"), 0), + OPT__FORCE(&force, N_("force overwrite of local reference"), 0), OPT_BOOL('m', "multiple", &multiple, N_("fetch from multiple remotes")), OPT_SET_INT('t', "tags", &tags, @@ -222,39 +225,83 @@ static void add_merge_config(struct ref **head, } } -static int add_existing(const char *refname, const struct object_id *oid, - int flag, void *cbdata) -{ - struct string_list *list = (struct string_list *)cbdata; - struct string_list_item *item = string_list_insert(list, refname); - struct object_id *old_oid = xmalloc(sizeof(*old_oid)); - - oidcpy(old_oid, oid); - item->util = old_oid; - return 0; -} - static int will_fetch(struct ref **head, const unsigned char *sha1) { struct ref *rm = *head; while (rm) { - if (!hashcmp(rm->old_oid.hash, sha1)) + if (hasheq(rm->old_oid.hash, sha1)) return 1; rm = rm->next; } return 0; } +struct refname_hash_entry { + struct hashmap_entry ent; /* must be the first member */ + struct object_id oid; + char refname[FLEX_ARRAY]; +}; + +static int refname_hash_entry_cmp(const void *hashmap_cmp_fn_data, + const void *e1_, + const void *e2_, + const void *keydata) +{ + const struct refname_hash_entry *e1 = e1_; + const struct refname_hash_entry *e2 = e2_; + + return strcmp(e1->refname, keydata ? keydata : e2->refname); +} + +static struct refname_hash_entry *refname_hash_add(struct hashmap *map, + const char *refname, + const struct object_id *oid) +{ + struct refname_hash_entry *ent; + size_t len = strlen(refname); + + FLEX_ALLOC_MEM(ent, refname, refname, len); + hashmap_entry_init(ent, strhash(refname)); + oidcpy(&ent->oid, oid); + hashmap_add(map, ent); + return ent; +} + +static int add_one_refname(const char *refname, + const struct object_id *oid, + int flag, void *cbdata) +{ + struct hashmap *refname_map = cbdata; + + (void) refname_hash_add(refname_map, refname, oid); + return 0; +} + +static void refname_hash_init(struct hashmap *map) +{ + hashmap_init(map, refname_hash_entry_cmp, NULL, 0); +} + +static int refname_hash_exists(struct hashmap *map, const char *refname) +{ + return !!hashmap_get_from_hash(map, strhash(refname), refname); +} + static void find_non_local_tags(const struct ref *refs, struct ref **head, struct ref ***tail) { - struct string_list existing_refs = STRING_LIST_INIT_DUP; - struct string_list remote_refs = STRING_LIST_INIT_NODUP; + struct hashmap existing_refs; + struct hashmap remote_refs; + struct string_list remote_refs_list = STRING_LIST_INIT_NODUP; + struct string_list_item *remote_ref_item; const struct ref *ref; - struct string_list_item *item = NULL; + struct refname_hash_entry *item = NULL; - for_each_ref(add_existing, &existing_refs); + refname_hash_init(&existing_refs); + refname_hash_init(&remote_refs); + + for_each_ref(add_one_refname, &existing_refs); for (ref = refs; ref; ref = ref->next) { if (!starts_with(ref->name, "refs/tags/")) continue; @@ -270,10 +317,10 @@ static void find_non_local_tags(const struct ref *refs, !has_object_file_with_flags(&ref->old_oid, OBJECT_INFO_QUICK) && !will_fetch(head, ref->old_oid.hash) && - !has_sha1_file_with_flags(item->util, + !has_sha1_file_with_flags(item->oid.hash, OBJECT_INFO_QUICK) && - !will_fetch(head, item->util)) - item->util = NULL; + !will_fetch(head, item->oid.hash)) + oidclr(&item->oid); item = NULL; continue; } @@ -285,48 +332,53 @@ static void find_non_local_tags(const struct ref *refs, * fetch. */ if (item && - !has_sha1_file_with_flags(item->util, OBJECT_INFO_QUICK) && - !will_fetch(head, item->util)) - item->util = NULL; + !has_sha1_file_with_flags(item->oid.hash, OBJECT_INFO_QUICK) && + !will_fetch(head, item->oid.hash)) + oidclr(&item->oid); item = NULL; /* skip duplicates and refs that we already have */ - if (string_list_has_string(&remote_refs, ref->name) || - string_list_has_string(&existing_refs, ref->name)) + if (refname_hash_exists(&remote_refs, ref->name) || + refname_hash_exists(&existing_refs, ref->name)) continue; - item = string_list_insert(&remote_refs, ref->name); - item->util = (void *)&ref->old_oid; + item = refname_hash_add(&remote_refs, ref->name, &ref->old_oid); + string_list_insert(&remote_refs_list, ref->name); } - string_list_clear(&existing_refs, 1); + hashmap_free(&existing_refs, 1); /* * We may have a final lightweight tag that needs to be * checked to see if it needs fetching. */ if (item && - !has_sha1_file_with_flags(item->util, OBJECT_INFO_QUICK) && - !will_fetch(head, item->util)) - item->util = NULL; + !has_sha1_file_with_flags(item->oid.hash, OBJECT_INFO_QUICK) && + !will_fetch(head, item->oid.hash)) + oidclr(&item->oid); /* - * For all the tags in the remote_refs string list, + * For all the tags in the remote_refs_list, * add them to the list of refs to be fetched */ - for_each_string_list_item(item, &remote_refs) { + for_each_string_list_item(remote_ref_item, &remote_refs_list) { + const char *refname = remote_ref_item->string; + + item = hashmap_get_from_hash(&remote_refs, strhash(refname), refname); + if (!item) + BUG("unseen remote ref?"); + /* Unless we have already decided to ignore this item... */ - if (item->util) - { - struct ref *rm = alloc_ref(item->string); - rm->peer_ref = alloc_ref(item->string); - oidcpy(&rm->old_oid, item->util); + if (!is_null_oid(&item->oid)) { + struct ref *rm = alloc_ref(item->refname); + rm->peer_ref = alloc_ref(item->refname); + oidcpy(&rm->old_oid, &item->oid); **tail = rm; *tail = &rm->next; } } - - string_list_clear(&remote_refs, 0); + hashmap_free(&remote_refs, 1); + string_list_clear(&remote_refs_list, 0); } static struct ref *get_ref_map(struct remote *remote, @@ -342,7 +394,7 @@ static struct ref *get_ref_map(struct remote *remote, /* opportunistically-updated references: */ struct ref *orefs = NULL, **oref_tail = &orefs; - struct string_list existing_refs = STRING_LIST_INIT_DUP; + struct hashmap existing_refs; if (rs->nr) { struct refspec *fetch_refspec; @@ -436,19 +488,24 @@ static struct ref *get_ref_map(struct remote *remote, ref_map = ref_remove_duplicates(ref_map); - for_each_ref(add_existing, &existing_refs); + refname_hash_init(&existing_refs); + for_each_ref(add_one_refname, &existing_refs); + for (rm = ref_map; rm; rm = rm->next) { if (rm->peer_ref) { - struct string_list_item *peer_item = - string_list_lookup(&existing_refs, - rm->peer_ref->name); + const char *refname = rm->peer_ref->name; + struct refname_hash_entry *peer_item; + + peer_item = hashmap_get_from_hash(&existing_refs, + strhash(refname), + refname); if (peer_item) { - struct object_id *old_oid = peer_item->util; + struct object_id *old_oid = &peer_item->oid; oidcpy(&rm->peer_ref->old_oid, old_oid); } } } - string_list_clear(&existing_refs, 1); + hashmap_free(&existing_refs, 1); return ref_map; } @@ -507,7 +564,7 @@ static void adjust_refcol_width(const struct ref *ref) int max, rlen, llen, len; /* uptodate lines are only shown on high verbosity level */ - if (!verbosity && !oidcmp(&ref->peer_ref->old_oid, &ref->old_oid)) + if (!verbosity && oideq(&ref->peer_ref->old_oid, &ref->old_oid)) return; max = term_columns(); @@ -644,7 +701,7 @@ static int update_local_ref(struct ref *ref, if (type < 0) die(_("object %s not found"), oid_to_hex(&ref->new_oid)); - if (!oidcmp(&ref->old_oid, &ref->new_oid)) { + if (oideq(&ref->old_oid, &ref->new_oid)) { if (verbosity > 0) format_display(display, '=', _("[up to date]"), NULL, remote, pretty_ref, summary_width); @@ -667,12 +724,18 @@ static int update_local_ref(struct ref *ref, if (!is_null_oid(&ref->old_oid) && starts_with(ref->name, "refs/tags/")) { - int r; - r = s_update_ref("updating tag", ref, 0); - format_display(display, r ? '!' : 't', _("[tag update]"), - r ? _("unable to update local ref") : NULL, - remote, pretty_ref, summary_width); - return r; + if (force || ref->force) { + int r; + r = s_update_ref("updating tag", ref, 0); + format_display(display, r ? '!' : 't', _("[tag update]"), + r ? _("unable to update local ref") : NULL, + remote, pretty_ref, summary_width); + return r; + } else { + format_display(display, '!', _("[rejected]"), _("would clobber existing tag"), + remote, pretty_ref, summary_width); + return 1; + } } current = lookup_commit_reference_gently(the_repository, @@ -924,10 +987,11 @@ static int store_updated_refs(const char *raw_url, const char *remote_name, * everything we are going to fetch already exists and is connected * locally. */ -static int quickfetch(struct ref *ref_map) +static int check_exist_and_connected(struct ref *ref_map) { struct ref *rm = ref_map; struct check_connected_options opt = CHECK_CONNECTED_INIT; + struct ref *r; /* * If we are deepening a shallow clone we already have these @@ -938,13 +1002,23 @@ static int quickfetch(struct ref *ref_map) */ if (deepen) return -1; + + /* + * check_connected() allows objects to merely be promised, but + * we need all direct targets to exist. + */ + for (r = rm; r; r = r->next) { + if (!has_object_file(&r->old_oid)) + return -1; + } + opt.quiet = 1; return check_connected(iterate_ref_map, &rm, &opt); } static int fetch_refs(struct transport *transport, struct ref *ref_map) { - int ret = quickfetch(ref_map); + int ret = check_exist_and_connected(ref_map); if (ret) ret = transport_fetch_refs(transport, ref_map); if (!ret) @@ -1168,6 +1242,7 @@ static int do_fetch(struct transport *transport, int retcode = 0; const struct ref *remote_refs; struct argv_array ref_prefixes = ARGV_ARRAY_INIT; + int must_list_refs = 1; if (tags == TAGS_DEFAULT) { if (transport->remote->fetch_tags == 2) @@ -1183,17 +1258,36 @@ static int do_fetch(struct transport *transport, goto cleanup; } - if (rs->nr) + if (rs->nr) { + int i; + refspec_ref_prefixes(rs, &ref_prefixes); - else if (transport->remote && transport->remote->fetch.nr) + + /* + * We can avoid listing refs if all of them are exact + * OIDs + */ + must_list_refs = 0; + for (i = 0; i < rs->nr; i++) { + if (!rs->items[i].exact_sha1) { + must_list_refs = 1; + break; + } + } + } else if (transport->remote && transport->remote->fetch.nr) refspec_ref_prefixes(&transport->remote->fetch, &ref_prefixes); - if (ref_prefixes.argc && - (tags == TAGS_SET || (tags == TAGS_DEFAULT))) { - argv_array_push(&ref_prefixes, "refs/tags/"); + if (tags == TAGS_SET || tags == TAGS_DEFAULT) { + must_list_refs = 1; + if (ref_prefixes.argc) + argv_array_push(&ref_prefixes, "refs/tags/"); } - remote_refs = transport_get_remote_refs(transport, &ref_prefixes); + if (must_list_refs) + remote_refs = transport_get_remote_refs(transport, &ref_prefixes); + else + remote_refs = NULL; + argv_array_clear(&ref_prefixes); ref_map = get_ref_map(transport->remote, remote_refs, rs, diff --git a/builtin/fmt-merge-msg.c b/builtin/fmt-merge-msg.c index f35ff1612b..a4615587fd 100644 --- a/builtin/fmt-merge-msg.c +++ b/builtin/fmt-merge-msg.c @@ -12,6 +12,7 @@ #include "fmt-merge-msg.h" #include "gpg-interface.h" #include "repository.h" +#include "commit-reach.h" static const char * const fmt_merge_msg_usage[] = { N_("git fmt-merge-msg [-m <message>] [--log[=<n>] | --no-log] [--file <file>]"), @@ -78,9 +79,9 @@ static struct merge_parent *find_merge_parent(struct merge_parents *table, { int i; for (i = 0; i < table->nr; i++) { - if (given && oidcmp(&table->item[i].given, given)) + if (given && !oideq(&table->item[i].given, given)) continue; - if (commit && oidcmp(&table->item[i].commit, commit)) + if (commit && !oideq(&table->item[i].commit, commit)) continue; return &table->item[i]; } @@ -582,7 +583,7 @@ static void find_merge_parents(struct merge_parents *result, while (parents) { struct commit *cmit = pop_commit(&parents); for (i = 0; i < result->nr; i++) - if (!oidcmp(&result->item[i].commit, &cmit->object.oid)) + if (oideq(&result->item[i].commit, &cmit->object.oid)) result->item[i].used = 1; } @@ -642,7 +643,7 @@ int fmt_merge_msg(struct strbuf *in, struct strbuf *out, struct rev_info rev; head = lookup_commit_or_die(&head_oid, "HEAD"); - init_revisions(&rev, NULL); + repo_init_revisions(the_repository, &rev, NULL); rev.commit_format = CMIT_FMT_ONELINE; rev.ignore_merges = 1; rev.limited = 1; diff --git a/builtin/fsck.c b/builtin/fsck.c index 250f5af118..3c3e0f06e7 100644 --- a/builtin/fsck.c +++ b/builtin/fsck.c @@ -19,6 +19,7 @@ #include "packfile.h" #include "object-store.h" #include "run-command.h" +#include "worktree.h" #define REACHABLE 0x0001 #define SEEN 0x0002 @@ -36,8 +37,6 @@ static int check_strict; static int keep_cache_objects; static struct fsck_options fsck_walk_options = FSCK_OPTIONS_DEFAULT; static struct fsck_options fsck_obj_options = FSCK_OPTIONS_DEFAULT; -static struct object_id head_oid; -static const char *head_points_at; static int errors_found; static int write_lost_and_found; static int verbose; @@ -446,7 +445,11 @@ static int fsck_handle_reflog_ent(struct object_id *ooid, struct object_id *noid static int fsck_handle_reflog(const char *logname, const struct object_id *oid, int flag, void *cb_data) { - for_each_reflog_ent(logname, fsck_handle_reflog_ent, (void *)logname); + struct strbuf refname = STRBUF_INIT; + + strbuf_worktree_ref(cb_data, &refname, logname); + for_each_reflog_ent(refname.buf, fsck_handle_reflog_ent, refname.buf); + strbuf_release(&refname); return 0; } @@ -484,13 +487,34 @@ static int fsck_handle_ref(const char *refname, const struct object_id *oid, return 0; } +static int fsck_head_link(const char *head_ref_name, + const char **head_points_at, + struct object_id *head_oid); + static void get_default_heads(void) { - if (head_points_at && !is_null_oid(&head_oid)) - fsck_handle_ref("HEAD", &head_oid, 0, NULL); + struct worktree **worktrees, **p; + const char *head_points_at; + struct object_id head_oid; + for_each_rawref(fsck_handle_ref, NULL); - if (include_reflogs) - for_each_reflog(fsck_handle_reflog, NULL); + + worktrees = get_worktrees(0); + for (p = worktrees; *p; p++) { + struct worktree *wt = *p; + struct strbuf ref = STRBUF_INIT; + + strbuf_worktree_ref(wt, &ref, "HEAD"); + fsck_head_link(ref.buf, &head_points_at, &head_oid); + if (head_points_at && !is_null_oid(&head_oid)) + fsck_handle_ref(ref.buf, &head_oid, 0, NULL); + strbuf_release(&ref); + + if (include_reflogs) + refs_for_each_reflog(get_worktree_ref_store(wt), + fsck_handle_reflog, wt); + } + free_worktrees(worktrees); /* * Not having any default heads isn't really fatal, but @@ -579,33 +603,36 @@ static void fsck_object_dir(const char *path) stop_progress(&progress); } -static int fsck_head_link(void) +static int fsck_head_link(const char *head_ref_name, + const char **head_points_at, + struct object_id *head_oid) { int null_is_error = 0; if (verbose) - fprintf(stderr, "Checking HEAD link\n"); + fprintf(stderr, "Checking %s link\n", head_ref_name); - head_points_at = resolve_ref_unsafe("HEAD", 0, &head_oid, NULL); - if (!head_points_at) { + *head_points_at = resolve_ref_unsafe(head_ref_name, 0, head_oid, NULL); + if (!*head_points_at) { errors_found |= ERROR_REFS; - return error("Invalid HEAD"); + return error("Invalid %s", head_ref_name); } - if (!strcmp(head_points_at, "HEAD")) + if (!strcmp(*head_points_at, head_ref_name)) /* detached HEAD */ null_is_error = 1; - else if (!starts_with(head_points_at, "refs/heads/")) { + else if (!starts_with(*head_points_at, "refs/heads/")) { errors_found |= ERROR_REFS; - return error("HEAD points to something strange (%s)", - head_points_at); + return error("%s points to something strange (%s)", + head_ref_name, *head_points_at); } - if (is_null_oid(&head_oid)) { + if (is_null_oid(head_oid)) { if (null_is_error) { errors_found |= ERROR_REFS; - return error("HEAD: detached HEAD points at nothing"); + return error("%s: detached HEAD points at nothing", + head_ref_name); } - fprintf(stderr, "notice: HEAD points to an unborn branch (%s)\n", - head_points_at + 11); + fprintf(stderr, "notice: %s points to an unborn branch (%s)\n", + head_ref_name, *head_points_at + 11); } return 0; } @@ -720,7 +747,6 @@ int cmd_fsck(int argc, const char **argv, const char *prefix) git_config(fsck_config, NULL); - fsck_head_link(); if (connectivity_only) { for_each_loose_object(mark_loose_for_connectivity, NULL, 0); for_each_packed_object(mark_packed_for_connectivity, NULL, 0); @@ -740,7 +766,7 @@ int cmd_fsck(int argc, const char **argv, const char *prefix) struct progress *progress = NULL; if (show_progress) { - for (p = get_packed_git(the_repository); p; + for (p = get_all_packs(the_repository); p; p = p->next) { if (open_pack_index(p)) continue; @@ -749,7 +775,7 @@ int cmd_fsck(int argc, const char **argv, const char *prefix) progress = start_progress(_("Checking objects"), total); } - for (p = get_packed_git(the_repository); p; + for (p = get_all_packs(the_repository); p; p = p->next) { /* verify gives error messages itself */ if (verify_pack(p, fsck_obj_buffer, @@ -848,5 +874,23 @@ int cmd_fsck(int argc, const char **argv, const char *prefix) } } + if (!git_config_get_bool("core.multipackindex", &i) && i) { + struct child_process midx_verify = CHILD_PROCESS_INIT; + const char *midx_argv[] = { "multi-pack-index", "verify", NULL, NULL, NULL }; + + midx_verify.argv = midx_argv; + midx_verify.git_cmd = 1; + if (run_command(&midx_verify)) + errors_found |= ERROR_COMMIT_GRAPH; + + prepare_alt_odb(the_repository); + for (alt = the_repository->objects->alt_odb_list; alt; alt = alt->next) { + midx_argv[2] = "--object-dir"; + midx_argv[3] = alt->path; + if (run_command(&midx_verify)) + errors_found |= ERROR_COMMIT_GRAPH; + } + } + return errors_found; } diff --git a/builtin/gc.c b/builtin/gc.c index 57069442b0..871a56f1c5 100644 --- a/builtin/gc.c +++ b/builtin/gc.c @@ -183,7 +183,7 @@ static struct packed_git *find_base_packs(struct string_list *packs, { struct packed_git *p, *base = NULL; - for (p = get_packed_git(the_repository); p; p = p->next) { + for (p = get_all_packs(the_repository); p; p = p->next) { if (!p->pack_local) continue; if (limit) { @@ -208,7 +208,7 @@ static int too_many_packs(void) if (gc_auto_pack_limit <= 0) return 0; - for (cnt = 0, p = get_packed_git(the_repository); p; p = p->next) { + for (cnt = 0, p = get_all_packs(the_repository); p; p = p->next) { if (!p->pack_local) continue; if (p->pack_keep) @@ -441,10 +441,16 @@ static const char *lock_repo_for_gc(int force, pid_t* ret_pid) return NULL; } +/* + * Returns 0 if there was no previous error and gc can proceed, 1 if + * gc should not proceed due to an error in the last run. Prints a + * message and returns -1 if an error occured while reading gc.log + */ static int report_last_gc_error(void) { struct strbuf sb = STRBUF_INIT; int ret = 0; + ssize_t len; struct stat st; char *gc_log_path = git_pathdup("gc.log"); @@ -452,39 +458,47 @@ static int report_last_gc_error(void) if (errno == ENOENT) goto done; - ret = error_errno(_("Can't stat %s"), gc_log_path); + ret = error_errno(_("cannot stat '%s'"), gc_log_path); goto done; } if (st.st_mtime < gc_log_expire_time) goto done; - ret = strbuf_read_file(&sb, gc_log_path, 0); - if (ret > 0) - ret = error(_("The last gc run reported the following. " + len = strbuf_read_file(&sb, gc_log_path, 0); + if (len < 0) + ret = error_errno(_("cannot read '%s'"), gc_log_path); + else if (len > 0) { + /* + * A previous gc failed. Report the error, and don't + * bother with an automatic gc run since it is likely + * to fail in the same way. + */ + warning(_("The last gc run reported the following. " "Please correct the root cause\n" "and remove %s.\n" "Automatic cleanup will not be performed " "until the file is removed.\n\n" "%s"), gc_log_path, sb.buf); + ret = 1; + } strbuf_release(&sb); done: free(gc_log_path); return ret; } -static int gc_before_repack(void) +static void gc_before_repack(void) { if (pack_refs && run_command_v_opt(pack_refs_cmd.argv, RUN_GIT_CMD)) - return error(FAILED_RUN, pack_refs_cmd.argv[0]); + die(FAILED_RUN, pack_refs_cmd.argv[0]); if (prune_reflogs && run_command_v_opt(reflog.argv, RUN_GIT_CMD)) - return error(FAILED_RUN, reflog.argv[0]); + die(FAILED_RUN, reflog.argv[0]); pack_refs = 0; prune_reflogs = 0; - return 0; } int cmd_gc(int argc, const char **argv, const char *prefix) @@ -565,13 +579,17 @@ int cmd_gc(int argc, const char **argv, const char *prefix) fprintf(stderr, _("See \"git help gc\" for manual housekeeping.\n")); } if (detach_auto) { - if (report_last_gc_error()) - return -1; + int ret = report_last_gc_error(); + if (ret < 0) + /* an I/O error occured, already reported */ + exit(128); + if (ret == 1) + /* Last gc --auto failed. Skip this one. */ + return 0; if (lock_repo_for_gc(force, &pid)) return 0; - if (gc_before_repack()) - return -1; + gc_before_repack(); /* dies on failure */ delete_tempfile(&pidfile); /* @@ -611,13 +629,12 @@ int cmd_gc(int argc, const char **argv, const char *prefix) atexit(process_log_file_at_exit); } - if (gc_before_repack()) - return -1; + gc_before_repack(); if (!repository_format_precious_objects) { close_all_packs(the_repository->objects); if (run_command_v_opt(repack.argv, RUN_GIT_CMD)) - return error(FAILED_RUN, repack.argv[0]); + die(FAILED_RUN, repack.argv[0]); if (prune_expire) { argv_array_push(&prune, prune_expire); @@ -627,18 +644,18 @@ int cmd_gc(int argc, const char **argv, const char *prefix) argv_array_push(&prune, "--exclude-promisor-objects"); if (run_command_v_opt(prune.argv, RUN_GIT_CMD)) - return error(FAILED_RUN, prune.argv[0]); + die(FAILED_RUN, prune.argv[0]); } } if (prune_worktrees_expire) { argv_array_push(&prune_worktrees, prune_worktrees_expire); if (run_command_v_opt(prune_worktrees.argv, RUN_GIT_CMD)) - return error(FAILED_RUN, prune_worktrees.argv[0]); + die(FAILED_RUN, prune_worktrees.argv[0]); } if (run_command_v_opt(rerere.argv, RUN_GIT_CMD)) - return error(FAILED_RUN, rerere.argv[0]); + die(FAILED_RUN, rerere.argv[0]); report_garbage = report_pack_garbage; reprepare_packed_git(the_repository); @@ -646,7 +663,8 @@ int cmd_gc(int argc, const char **argv, const char *prefix) clean_pack_garbage(); if (gc_write_commit_graph) - write_commit_graph_reachable(get_object_directory(), 0); + write_commit_graph_reachable(get_object_directory(), 0, + !quiet && !daemonized); if (auto_gc && too_many_loose_objects()) warning(_("There are too many unreachable loose objects; " diff --git a/builtin/grep.c b/builtin/grep.c index 601f801158..71df52a333 100644 --- a/builtin/grep.c +++ b/builtin/grep.c @@ -34,7 +34,6 @@ static int recurse_submodules; #define GREP_NUM_THREADS_DEFAULT 8 static int num_threads; -#ifndef NO_PTHREADS static pthread_t *threads; /* We use one producer thread and THREADS consumer @@ -70,13 +69,11 @@ static pthread_mutex_t grep_mutex; static inline void grep_lock(void) { - assert(num_threads); pthread_mutex_lock(&grep_mutex); } static inline void grep_unlock(void) { - assert(num_threads); pthread_mutex_unlock(&grep_mutex); } @@ -103,7 +100,8 @@ static void add_work(struct grep_opt *opt, const struct grep_source *gs) todo[todo_end].source = *gs; if (opt->binary != GREP_BINARY_TEXT) - grep_source_load_driver(&todo[todo_end].source); + grep_source_load_driver(&todo[todo_end].source, + opt->repo->index); todo[todo_end].done = 0; strbuf_reset(&todo[todo_end].out); todo_end = (todo_end + 1) % ARRAY_SIZE(todo); @@ -233,6 +231,9 @@ static int wait_all(void) int hit = 0; int i; + if (!HAVE_THREADS) + BUG("Never call this function unless you have started threads"); + grep_lock(); all_work_added = 1; @@ -264,13 +265,6 @@ static int wait_all(void) return hit; } -#else /* !NO_PTHREADS */ - -static int wait_all(void) -{ - return 0; -} -#endif static int grep_cmd_config(const char *var, const char *value, void *cb) { @@ -283,17 +277,15 @@ static int grep_cmd_config(const char *var, const char *value, void *cb) if (num_threads < 0) die(_("invalid number of threads specified (%d) for %s"), num_threads, var); -#ifdef NO_PTHREADS - else if (num_threads && num_threads != 1) { + else if (!HAVE_THREADS && num_threads > 1) { /* * TRANSLATORS: %s is the configuration * variable for tweaking threads, currently * grep.threads */ warning(_("no threads support, ignoring %s"), var); - num_threads = 0; + num_threads = 1; } -#endif } if (!strcmp(var, "submodule.recurse")) @@ -329,17 +321,14 @@ static int grep_oid(struct grep_opt *opt, const struct object_id *oid, grep_source_init(&gs, GREP_SOURCE_OID, pathbuf.buf, path, oid); strbuf_release(&pathbuf); -#ifndef NO_PTHREADS - if (num_threads) { + if (num_threads > 1) { /* * add_work() copies gs and thus assumes ownership of * its fields, so do not call grep_source_clear() */ add_work(opt, &gs); return 0; - } else -#endif - { + } else { int hit; hit = grep_source(opt, &gs); @@ -362,17 +351,14 @@ static int grep_file(struct grep_opt *opt, const char *filename) grep_source_init(&gs, GREP_SOURCE_FILE, buf.buf, filename, filename); strbuf_release(&buf); -#ifndef NO_PTHREADS - if (num_threads) { + if (num_threads > 1) { /* * add_work() copies gs and thus assumes ownership of * its fields, so do not call grep_source_clear() */ add_work(opt, &gs); return 0; - } else -#endif - { + } else { int hit; hit = grep_source(opt, &gs); @@ -421,11 +407,23 @@ static int grep_submodule(struct grep_opt *opt, struct repository *superproject, struct repository submodule; int hit; - if (!is_submodule_active(superproject, path)) + /* + * NEEDSWORK: submodules functions need to be protected because they + * access the object store via config_from_gitmodules(): the latter + * uses get_oid() which, for now, relies on the global the_repository + * object. + */ + grep_read_lock(); + + if (!is_submodule_active(superproject, path)) { + grep_read_unlock(); return 0; + } - if (repo_submodule_init(&submodule, superproject, path)) + if (repo_submodule_init(&submodule, superproject, path)) { + grep_read_unlock(); return 0; + } repo_read_gitmodules(&submodule); @@ -439,7 +437,6 @@ static int grep_submodule(struct grep_opt *opt, struct repository *superproject, * store is no longer global and instead is a member of the repository * object. */ - grep_read_lock(); add_to_alternates_memory(submodule.objects->objectdir); grep_read_unlock(); @@ -711,11 +708,14 @@ static int context_callback(const struct option *opt, const char *arg, static int file_callback(const struct option *opt, const char *arg, int unset) { struct grep_opt *grep_opt = opt->value; - int from_stdin = !strcmp(arg, "-"); + int from_stdin; FILE *patterns; int lno = 0; struct strbuf sb = STRBUF_INIT; + BUG_ON_OPT_NEG(unset); + + from_stdin = !strcmp(arg, "-"); patterns = from_stdin ? stdin : fopen(arg, "r"); if (!patterns) die_errno(_("cannot open '%s'"), arg); @@ -736,6 +736,8 @@ static int file_callback(const struct option *opt, const char *arg, int unset) static int not_callback(const struct option *opt, const char *arg, int unset) { struct grep_opt *grep_opt = opt->value; + BUG_ON_OPT_NEG(unset); + BUG_ON_OPT_ARG(arg); append_grep_pattern(grep_opt, "--not", "command line", 0, GREP_NOT); return 0; } @@ -743,6 +745,8 @@ static int not_callback(const struct option *opt, const char *arg, int unset) static int and_callback(const struct option *opt, const char *arg, int unset) { struct grep_opt *grep_opt = opt->value; + BUG_ON_OPT_NEG(unset); + BUG_ON_OPT_ARG(arg); append_grep_pattern(grep_opt, "--and", "command line", 0, GREP_AND); return 0; } @@ -750,6 +754,8 @@ static int and_callback(const struct option *opt, const char *arg, int unset) static int open_callback(const struct option *opt, const char *arg, int unset) { struct grep_opt *grep_opt = opt->value; + BUG_ON_OPT_NEG(unset); + BUG_ON_OPT_ARG(arg); append_grep_pattern(grep_opt, "(", "command line", 0, GREP_OPEN_PAREN); return 0; } @@ -757,6 +763,8 @@ static int open_callback(const struct option *opt, const char *arg, int unset) static int close_callback(const struct option *opt, const char *arg, int unset) { struct grep_opt *grep_opt = opt->value; + BUG_ON_OPT_NEG(unset); + BUG_ON_OPT_ARG(arg); append_grep_pattern(grep_opt, ")", "command line", 0, GREP_CLOSE_PAREN); return 0; } @@ -765,6 +773,7 @@ static int pattern_callback(const struct option *opt, const char *arg, int unset) { struct grep_opt *grep_opt = opt->value; + BUG_ON_OPT_NEG(unset); append_grep_pattern(grep_opt, arg, "-e option", 0, GREP_PATTERN); return 0; } @@ -811,6 +820,8 @@ int cmd_grep(int argc, const char **argv, const char *prefix) GREP_BINARY_NOMATCH), OPT_BOOL(0, "textconv", &opt.allow_textconv, N_("process binary files with textconv filters")), + OPT_SET_INT('r', "recursive", &opt.max_depth, + N_("search in subdirectories (default)"), -1), { OPTION_INTEGER, 0, "max-depth", &opt.max_depth, N_("depth"), N_("descend at most <depth> levels"), PARSE_OPT_NONEG, NULL, 1 }, @@ -904,9 +915,9 @@ int cmd_grep(int argc, const char **argv, const char *prefix) OPT_END() }; - init_grep_defaults(); + init_grep_defaults(the_repository); git_config(grep_cmd_config, NULL); - grep_init(&opt, prefix); + grep_init(&opt, the_repository, prefix); /* * If there is no -- then the paths must exist in the working @@ -1035,39 +1046,35 @@ int cmd_grep(int argc, const char **argv, const char *prefix) pathspec.recursive = 1; pathspec.recurse_submodules = !!recurse_submodules; -#ifndef NO_PTHREADS - if (list.nr || cached || show_in_pager) - num_threads = 0; - else if (num_threads == 0) - num_threads = GREP_NUM_THREADS_DEFAULT; - else if (num_threads < 0) - die(_("invalid number of threads specified (%d)"), num_threads); - if (num_threads == 1) - num_threads = 0; -#else - if (num_threads) + if (list.nr || cached || show_in_pager) { + if (num_threads > 1) + warning(_("invalid option combination, ignoring --threads")); + num_threads = 1; + } else if (!HAVE_THREADS && num_threads > 1) { warning(_("no threads support, ignoring --threads")); - num_threads = 0; -#endif + num_threads = 1; + } else if (num_threads < 0) + die(_("invalid number of threads specified (%d)"), num_threads); + else if (num_threads == 0) + num_threads = HAVE_THREADS ? GREP_NUM_THREADS_DEFAULT : 1; - if (!num_threads) + if (num_threads > 1) { + if (!HAVE_THREADS) + BUG("Somebody got num_threads calculation wrong!"); + if (!(opt.name_only || opt.unmatch_name_only || opt.count) + && (opt.pre_context || opt.post_context || + opt.file_break || opt.funcbody)) + skip_first_line = 1; + start_threads(&opt); + } else { /* * The compiled patterns on the main path are only * used when not using threading. Otherwise - * start_threads() below calls compile_grep_patterns() + * start_threads() above calls compile_grep_patterns() * for each thread. */ compile_grep_patterns(&opt); - -#ifndef NO_PTHREADS - if (num_threads) { - if (!(opt.name_only || opt.unmatch_name_only || opt.count) - && (opt.pre_context || opt.post_context || - opt.file_break || opt.funcbody)) - skip_first_line = 1; - start_threads(&opt); } -#endif if (show_in_pager && (cached || list.nr)) die(_("--open-files-in-pager only works on the worktree")); @@ -1118,7 +1125,7 @@ int cmd_grep(int argc, const char **argv, const char *prefix) hit = grep_objects(&opt, &pathspec, &list); } - if (num_threads) + if (num_threads > 1) hit |= wait_all(); if (hit && show_in_pager) run_pager(&opt, prefix); diff --git a/builtin/hash-object.c b/builtin/hash-object.c index 9ada4f4dfd..d6f06ea32f 100644 --- a/builtin/hash-object.c +++ b/builtin/hash-object.c @@ -40,7 +40,7 @@ static void hash_fd(int fd, const char *type, const char *path, unsigned flags, if (fstat(fd, &st) < 0 || (literally ? hash_literally(&oid, fd, type, flags) - : index_fd(&oid, fd, &st, type_from_string(type), path, flags))) + : index_fd(&the_index, &oid, fd, &st, type_from_string(type), path, flags))) die((flags & HASH_WRITE_OBJECT) ? "Unable to add %s to database" : "Unable to hash %s", path); diff --git a/builtin/help.c b/builtin/help.c index 8d4f6dd301..7739a5c155 100644 --- a/builtin/help.c +++ b/builtin/help.c @@ -38,7 +38,7 @@ static const char *html_path; static int show_all = 0; static int show_guides = 0; static int show_config; -static int verbose; +static int verbose = 1; static unsigned int colopts; static enum help_format help_format = HELP_FORMAT_NONE; static int exclude_guides; @@ -415,9 +415,37 @@ static const char *check_git_cmd(const char* cmd) alias = alias_lookup(cmd); if (alias) { - printf_ln(_("'%s' is aliased to '%s'"), cmd, alias); - free(alias); - exit(0); + const char **argv; + int count; + + /* + * handle_builtin() in git.c rewrites "git cmd --help" + * to "git help --exclude-guides cmd", so we can use + * exclude_guides to distinguish "git cmd --help" from + * "git help cmd". In the latter case, or if cmd is an + * alias for a shell command, just print the alias + * definition. + */ + if (!exclude_guides || alias[0] == '!') { + printf_ln(_("'%s' is aliased to '%s'"), cmd, alias); + free(alias); + exit(0); + } + /* + * Otherwise, we pretend that the command was "git + * word0 --help". We use split_cmdline() to get the + * first word of the alias, to ensure that we use the + * same rules as when the alias is actually + * used. split_cmdline() modifies alias in-place. + */ + fprintf_ln(stderr, _("'%s' is aliased to '%s'"), cmd, alias); + count = split_cmdline(alias, &argv); + if (count < 0) + die(_("bad alias.%s string: %s"), cmd, + split_cmdline_strerror(count)); + free(argv); + UNLEAK(alias); + return alias; } if (exclude_guides) diff --git a/builtin/index-pack.c b/builtin/index-pack.c index 9582ead950..ac1f4ea9a7 100644 --- a/builtin/index-pack.c +++ b/builtin/index-pack.c @@ -42,9 +42,7 @@ struct base_data { }; struct thread_local { -#ifndef NO_PTHREADS pthread_t thread; -#endif struct base_data *base_cache; size_t base_cache_used; int pack_fd; @@ -98,8 +96,6 @@ static uint32_t input_crc32; static int input_fd, output_fd; static const char *curr_pack; -#ifndef NO_PTHREADS - static struct thread_local *thread_data; static int nr_dispatched; static int threads_active; @@ -179,26 +175,6 @@ static void cleanup_thread(void) free(thread_data); } -#else - -#define read_lock() -#define read_unlock() - -#define counter_lock() -#define counter_unlock() - -#define work_lock() -#define work_unlock() - -#define deepest_delta_lock() -#define deepest_delta_unlock() - -#define type_cas_lock() -#define type_cas_unlock() - -#endif - - static int mark_link(struct object *obj, int type, void *data, struct fsck_options *options) { if (!obj) @@ -364,22 +340,20 @@ static NORETURN void bad_object(off_t offset, const char *format, ...) static inline struct thread_local *get_thread_data(void) { -#ifndef NO_PTHREADS - if (threads_active) - return pthread_getspecific(key); - assert(!threads_active && - "This should only be reached when all threads are gone"); -#endif + if (HAVE_THREADS) { + if (threads_active) + return pthread_getspecific(key); + assert(!threads_active && + "This should only be reached when all threads are gone"); + } return ¬hread_data; } -#ifndef NO_PTHREADS static void set_thread_data(struct thread_local *data) { if (threads_active) pthread_setspecific(key, data); } -#endif static struct base_data *alloc_base_data(void) { @@ -450,7 +424,8 @@ static void *unpack_entry_data(off_t offset, unsigned long size, int hdrlen; if (!is_delta_type(type)) { - hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(type), size) + 1; + hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %"PRIuMAX, + type_name(type),(uintmax_t)size) + 1; the_hash_algo->init_fn(&c); the_hash_algo->update_fn(&c, hdr, hdrlen); } else @@ -719,9 +694,9 @@ static void find_ref_delta_children(const struct object_id *oid, *last_index = -1; return; } - while (first > 0 && !oidcmp(&ref_deltas[first - 1].oid, oid)) + while (first > 0 && oideq(&ref_deltas[first - 1].oid, oid)) --first; - while (last < end && !oidcmp(&ref_deltas[last + 1].oid, oid)) + while (last < end && oideq(&ref_deltas[last + 1].oid, oid)) ++last; *first_index = first; *last_index = last; @@ -1092,7 +1067,6 @@ static void resolve_base(struct object_entry *obj) find_unresolved_deltas(base_obj); } -#ifndef NO_PTHREADS static void *threaded_second_pass(void *data) { set_thread_data(data); @@ -1116,7 +1090,6 @@ static void *threaded_second_pass(void *data) } return NULL; } -#endif /* * First pass: @@ -1166,7 +1139,7 @@ static void parse_pack_objects(unsigned char *hash) /* Check pack integrity */ flush(); the_hash_algo->final_fn(hash, &input_ctx); - if (hashcmp(fill(the_hash_algo->rawsz), hash)) + if (!hasheq(fill(the_hash_algo->rawsz), hash)) die(_("pack is corrupted (SHA1 mismatch)")); use(the_hash_algo->rawsz); @@ -1213,7 +1186,6 @@ static void resolve_deltas(void) progress = start_progress(_("Resolving deltas"), nr_ref_deltas + nr_ofs_deltas); -#ifndef NO_PTHREADS nr_dispatched = 0; if (nr_threads > 1 || getenv("GIT_FORCE_THREADS")) { init_thread(); @@ -1229,7 +1201,6 @@ static void resolve_deltas(void) cleanup_thread(); return; } -#endif for (i = 0; i < nr_objects; i++) { struct object_entry *obj = &objects[i]; @@ -1280,7 +1251,7 @@ static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned cha fixup_pack_header_footer(output_fd, pack_hash, curr_pack, nr_objects, read_hash, consumed_bytes-the_hash_algo->rawsz); - if (hashcmp(read_hash, tail_hash) != 0) + if (!hasheq(read_hash, tail_hash)) die(_("Unexpected tail checksum for %s " "(disk corruption?)"), curr_pack); } @@ -1531,11 +1502,10 @@ static int git_index_pack_config(const char *k, const char *v, void *cb) if (nr_threads < 0) die(_("invalid number of threads specified (%d)"), nr_threads); -#ifdef NO_PTHREADS - if (nr_threads != 1) + if (!HAVE_THREADS && nr_threads != 1) { warning(_("no threads support, ignoring %s"), k); - nr_threads = 1; -#endif + nr_threads = 1; + } return 0; } return git_default_config(k, v, cb); @@ -1628,10 +1598,10 @@ static void show_pack_info(int stat_only) chain_histogram[obj_stat[i].delta_depth - 1]++; if (stat_only) continue; - printf("%s %-6s %lu %lu %"PRIuMAX, + printf("%s %-6s %"PRIuMAX" %"PRIuMAX" %"PRIuMAX, oid_to_hex(&obj->idx.oid), - type_name(obj->real_type), obj->size, - (unsigned long)(obj[1].idx.offset - obj->idx.offset), + type_name(obj->real_type), (uintmax_t)obj->size, + (uintmax_t)(obj[1].idx.offset - obj->idx.offset), (uintmax_t)obj->idx.offset); if (is_delta_type(obj->type)) { struct object_entry *bobj = &objects[obj_stat[i].base_object_no]; @@ -1723,12 +1693,10 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix) nr_threads = strtoul(arg+10, &end, 0); if (!arg[10] || *end || nr_threads < 0) usage(index_pack_usage); -#ifdef NO_PTHREADS - if (nr_threads != 1) - warning(_("no threads support, " - "ignoring %s"), arg); - nr_threads = 1; -#endif + if (!HAVE_THREADS && nr_threads != 1) { + warning(_("no threads support, ignoring %s"), arg); + nr_threads = 1; + } } else if (starts_with(arg, "--pack_header=")) { struct pack_header *hdr; char *c; @@ -1791,14 +1759,12 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix) if (strict) opts.flags |= WRITE_IDX_STRICT; -#ifndef NO_PTHREADS - if (!nr_threads) { + if (HAVE_THREADS && !nr_threads) { nr_threads = online_cpus(); /* An experiment showed that more threads does not mean faster */ if (nr_threads > 3) nr_threads = 3; } -#endif curr_pack = open_pack_file(pack_name); parse_pack_header(); diff --git a/builtin/init-db.c b/builtin/init-db.c index 12ddda7e7b..41faffd28d 100644 --- a/builtin/init-db.c +++ b/builtin/init-db.c @@ -451,6 +451,7 @@ static int guess_repository_type(const char *git_dir) static int shared_callback(const struct option *opt, const char *arg, int unset) { + BUG_ON_OPT_NEG(unset); *((int *) opt->value) = (arg) ? git_config_perm("arg", arg) : PERM_GROUP; return 0; } diff --git a/builtin/interpret-trailers.c b/builtin/interpret-trailers.c index 4b87e0dd2e..8ae40dec47 100644 --- a/builtin/interpret-trailers.c +++ b/builtin/interpret-trailers.c @@ -80,6 +80,8 @@ static int parse_opt_parse(const struct option *opt, const char *arg, v->only_trailers = 1; v->only_input = 1; v->unfold = 1; + BUG_ON_OPT_NEG(unset); + BUG_ON_OPT_ARG(arg); return 0; } diff --git a/builtin/log.c b/builtin/log.c index e094560d9a..0fe6f9ba1e 100644 --- a/builtin/log.c +++ b/builtin/log.c @@ -31,6 +31,9 @@ #include "progress.h" #include "commit-slab.h" #include "repository.h" +#include "commit-reach.h" +#include "interdiff.h" +#include "range-diff.h" #define MAIL_DEFAULT_WRAP 72 @@ -104,6 +107,8 @@ static int log_line_range_callback(const struct option *option, const char *arg, { struct line_opt_callback_data *data = option->value; + BUG_ON_OPT_NEG(unset); + if (!arg) return -1; @@ -115,7 +120,7 @@ static int log_line_range_callback(const struct option *option, const char *arg, static void init_log_defaults(void) { - init_grep_defaults(); + init_grep_defaults(the_repository); init_diff_ui_defaults(); decoration_style = auto_decoration_style(); @@ -467,7 +472,7 @@ int cmd_whatchanged(int argc, const char **argv, const char *prefix) init_log_defaults(); git_config(git_log_config, NULL); - init_revisions(&rev, prefix); + repo_init_revisions(the_repository, &rev, prefix); rev.diff = 1; rev.simplify_history = 0; memset(&opt, 0, sizeof(opt)); @@ -507,7 +512,8 @@ static int show_blob_object(const struct object_id *oid, struct rev_info *rev, c &oidc, &obj_context)) die(_("Not a valid object name %s"), obj_name); if (!obj_context.path || - !textconv_object(obj_context.path, obj_context.mode, &oidc, 1, &buf, &size)) { + !textconv_object(the_repository, obj_context.path, + obj_context.mode, &oidc, 1, &buf, &size)) { free(obj_context.path); return stream_blob_to_fd(1, oid, NULL, 0); } @@ -584,7 +590,7 @@ int cmd_show(int argc, const char **argv, const char *prefix) git_config(git_log_config, NULL); memset(&match_all, 0, sizeof(match_all)); - init_revisions(&rev, prefix); + repo_init_revisions(the_repository, &rev, prefix); rev.diff = 1; rev.always_show_header = 1; rev.no_walk = REVISION_WALK_NO_WALK_SORTED; @@ -664,7 +670,7 @@ int cmd_log_reflog(int argc, const char **argv, const char *prefix) init_log_defaults(); git_config(git_log_config, NULL); - init_revisions(&rev, prefix); + repo_init_revisions(the_repository, &rev, prefix); init_reflog_walk(&rev.reflog_info); rev.verbose_header = 1; memset(&opt, 0, sizeof(opt)); @@ -703,7 +709,7 @@ int cmd_log(int argc, const char **argv, const char *prefix) init_log_defaults(); git_config(git_log_config, NULL); - init_revisions(&rev, prefix); + repo_init_revisions(the_repository, &rev, prefix); rev.always_show_header = 1; memset(&opt, 0, sizeof(opt)); opt.def = "HEAD"; @@ -913,10 +919,10 @@ static void get_patch_ids(struct rev_info *rev, struct patch_ids *ids) if ((flags1 & UNINTERESTING) == (flags2 & UNINTERESTING)) die(_("Not a range.")); - init_patch_ids(ids); + init_patch_ids(the_repository, ids); /* given a range a..b get all patch ids for b..a */ - init_revisions(&check_rev, rev->prefix); + repo_init_revisions(the_repository, &check_rev, rev->prefix); check_rev.max_parents = 1; o1->flags ^= UNINTERESTING; o2->flags ^= UNINTERESTING; @@ -992,12 +998,30 @@ static char *find_branch_name(struct rev_info *rev) tip_oid = &rev->cmdline.rev[positive].item->oid; if (dwim_ref(ref, strlen(ref), &branch_oid, &full_ref) && skip_prefix(full_ref, "refs/heads/", &v) && - !oidcmp(tip_oid, &branch_oid)) + oideq(tip_oid, &branch_oid)) branch = xstrdup(v); free(full_ref); return branch; } +static void show_diffstat(struct rev_info *rev, + struct commit *origin, struct commit *head) +{ + struct diff_options opts; + + memcpy(&opts, &rev->diffopt, sizeof(opts)); + opts.output_format = DIFF_FORMAT_SUMMARY | DIFF_FORMAT_DIFFSTAT; + diff_setup_done(&opts); + + diff_tree_oid(get_commit_tree_oid(origin), + get_commit_tree_oid(head), + "", &opts); + diffcore_std(&opts); + diff_flush(&opts); + + fprintf(rev->diffopt.file, "\n"); +} + static void make_cover_letter(struct rev_info *rev, int use_stdout, struct commit *origin, int nr, struct commit **list, @@ -1011,7 +1035,6 @@ static void make_cover_letter(struct rev_info *rev, int use_stdout, struct strbuf sb = STRBUF_INIT; int i; const char *encoding = "UTF-8"; - struct diff_options opts; int need_8bit_cte = 0; struct pretty_print_context pp = {0}; struct commit *head = list[0]; @@ -1061,25 +1084,20 @@ static void make_cover_letter(struct rev_info *rev, int use_stdout, shortlog_output(&log); - /* - * We can only do diffstat with a unique reference point - */ - if (!origin) - return; - - memcpy(&opts, &rev->diffopt, sizeof(opts)); - opts.output_format = DIFF_FORMAT_SUMMARY | DIFF_FORMAT_DIFFSTAT; - opts.stat_width = MAIL_DEFAULT_WRAP; - - diff_setup_done(&opts); + /* We can only do diffstat with a unique reference point */ + if (origin) + show_diffstat(rev, origin, head); - diff_tree_oid(get_commit_tree_oid(origin), - get_commit_tree_oid(head), - "", &opts); - diffcore_std(&opts); - diff_flush(&opts); + if (rev->idiff_oid1) { + fprintf_ln(rev->diffopt.file, "%s", rev->idiff_title); + show_interdiff(rev, 0); + } - fprintf(rev->diffopt.file, "\n"); + if (rev->rdiff1) { + fprintf_ln(rev->diffopt.file, "%s", rev->rdiff_title); + show_range_diff(rev->rdiff1, rev->rdiff2, + rev->creation_factor, 1, &rev->diffopt); + } } static const char *clean_message_id(const char *msg_id) @@ -1133,6 +1151,8 @@ static int keep_subject = 0; static int keep_callback(const struct option *opt, const char *arg, int unset) { + BUG_ON_OPT_NEG(unset); + BUG_ON_OPT_ARG(arg); ((struct rev_info *)opt->value)->total = -1; keep_subject = 1; return 0; @@ -1143,6 +1163,7 @@ static int subject_prefix = 0; static int subject_prefix_callback(const struct option *opt, const char *arg, int unset) { + BUG_ON_OPT_NEG(unset); subject_prefix = 1; ((struct rev_info *)opt->value)->subject_prefix = arg; return 0; @@ -1150,6 +1171,8 @@ static int subject_prefix_callback(const struct option *opt, const char *arg, static int rfc_callback(const struct option *opt, const char *arg, int unset) { + BUG_ON_OPT_NEG(unset); + BUG_ON_OPT_ARG(arg); return subject_prefix_callback(opt, "RFC PATCH", unset); } @@ -1158,6 +1181,7 @@ static int numbered_cmdline_opt = 0; static int numbered_callback(const struct option *opt, const char *arg, int unset) { + BUG_ON_OPT_ARG(arg); *(int *)opt->value = numbered_cmdline_opt = unset ? 0 : 1; if (unset) auto_number = 0; @@ -1167,6 +1191,7 @@ static int numbered_callback(const struct option *opt, const char *arg, static int no_numbered_callback(const struct option *opt, const char *arg, int unset) { + BUG_ON_OPT_NEG(unset); return numbered_callback(opt, arg, 1); } @@ -1174,6 +1199,7 @@ static int output_directory_callback(const struct option *opt, const char *arg, int unset) { const char **dir = (const char **)opt->value; + BUG_ON_OPT_NEG(unset); if (*dir) die(_("Two output directories?")); *dir = arg; @@ -1360,13 +1386,13 @@ static void prepare_bases(struct base_tree_info *bases, return; init_commit_base(&commit_base); - diff_setup(&diffopt); + repo_diff_setup(the_repository, &diffopt); diffopt.flags.recursive = 1; diff_setup_done(&diffopt); oidcpy(&bases->base_commit, &base->object.oid); - init_revisions(&revs, NULL); + repo_init_revisions(the_repository, &revs, NULL); revs.max_parents = 1; revs.topo_order = 1; for (i = 0; i < total; i++) { @@ -1419,6 +1445,36 @@ static void print_bases(struct base_tree_info *bases, FILE *file) oidclr(&bases->base_commit); } +static const char *diff_title(struct strbuf *sb, int reroll_count, + const char *generic, const char *rerolled) +{ + if (reroll_count <= 0) + strbuf_addstr(sb, generic); + else /* RFC may be v0, so allow -v1 to diff against v0 */ + strbuf_addf(sb, rerolled, reroll_count - 1); + return sb->buf; +} + +static void infer_range_diff_ranges(struct strbuf *r1, + struct strbuf *r2, + const char *prev, + struct commit *origin, + struct commit *head) +{ + const char *head_oid = oid_to_hex(&head->object.oid); + + if (!strstr(prev, "..")) { + strbuf_addf(r1, "%s..%s", head_oid, prev); + strbuf_addf(r2, "%s..%s", prev, head_oid); + } else if (!origin) { + die(_("failed to infer range-diff ranges")); + } else { + strbuf_addstr(r1, prev); + strbuf_addf(r2, "%s..%s", + oid_to_hex(&origin->object.oid), head_oid); + } +} + int cmd_format_patch(int argc, const char **argv, const char *prefix) { struct commit *commit; @@ -1446,6 +1502,13 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix) struct base_tree_info bases; int show_progress = 0; struct progress *progress = NULL; + struct oid_array idiff_prev = OID_ARRAY_INIT; + struct strbuf idiff_title = STRBUF_INIT; + const char *rdiff_prev = NULL; + struct strbuf rdiff1 = STRBUF_INIT; + struct strbuf rdiff2 = STRBUF_INIT; + struct strbuf rdiff_title = STRBUF_INIT; + int creation_factor = -1; const struct option builtin_format_patch_options[] = { { OPTION_CALLBACK, 'n', "numbered", &numbered, NULL, @@ -1453,7 +1516,7 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix) PARSE_OPT_NOARG, numbered_callback }, { OPTION_CALLBACK, 'N', "no-numbered", &numbered, NULL, N_("use [PATCH] even with multiple patches"), - PARSE_OPT_NOARG, no_numbered_callback }, + PARSE_OPT_NOARG | PARSE_OPT_NONEG, no_numbered_callback }, OPT_BOOL('s', "signoff", &do_signoff, N_("add Signed-off-by:")), OPT_BOOL(0, "stdout", &use_stdout, N_("print patches to standard out")), @@ -1519,6 +1582,13 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix) OPT__QUIET(&quiet, N_("don't print the patch filenames")), OPT_BOOL(0, "progress", &show_progress, N_("show progress while generating patches")), + OPT_CALLBACK(0, "interdiff", &idiff_prev, N_("rev"), + N_("show changes against <rev> in cover letter or single patch"), + parse_opt_object_name), + OPT_STRING(0, "range-diff", &rdiff_prev, N_("refspec"), + N_("show changes against <refspec> in cover letter or single patch")), + OPT_INTEGER(0, "creation-factor", &creation_factor, + N_("percentage by which creation is weighted")), OPT_END() }; @@ -1527,7 +1597,7 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix) extra_cc.strdup_strings = 1; init_log_defaults(); git_config(git_format_config, NULL); - init_revisions(&rev, prefix); + repo_init_revisions(the_repository, &rev, prefix); rev.commit_format = CMIT_FMT_EMAIL; rev.expand_tabs_in_log_default = 0; rev.verbose_header = 1; @@ -1703,8 +1773,8 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix) /* Don't say anything if head and upstream are the same. */ if (rev.pending.nr == 2) { struct object_array_entry *o = rev.pending.objects; - if (oidcmp(&o[0].item->oid, &o[1].item->oid) == 0) - return 0; + if (oideq(&o[0].item->oid, &o[1].item->oid)) + goto done; } get_patch_ids(&rev, &ids); } @@ -1728,7 +1798,7 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix) } if (nr == 0) /* nothing to do */ - return 0; + goto done; total = nr; if (cover_letter == -1) { if (config_cover_letter == COVER_AUTO) @@ -1741,6 +1811,35 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix) if (numbered) rev.total = total + start_number - 1; + if (idiff_prev.nr) { + if (!cover_letter && total != 1) + die(_("--interdiff requires --cover-letter or single patch")); + rev.idiff_oid1 = &idiff_prev.oid[idiff_prev.nr - 1]; + rev.idiff_oid2 = get_commit_tree_oid(list[0]); + rev.idiff_title = diff_title(&idiff_title, reroll_count, + _("Interdiff:"), + _("Interdiff against v%d:")); + } + + if (creation_factor < 0) + creation_factor = RANGE_DIFF_CREATION_FACTOR_DEFAULT; + else if (!rdiff_prev) + die(_("--creation-factor requires --range-diff")); + + if (rdiff_prev) { + if (!cover_letter && total != 1) + die(_("--range-diff requires --cover-letter or single patch")); + + infer_range_diff_ranges(&rdiff1, &rdiff2, rdiff_prev, + origin, list[0]); + rev.rdiff1 = rdiff1.buf; + rev.rdiff2 = rdiff2.buf; + rev.creation_factor = creation_factor; + rev.rdiff_title = diff_title(&rdiff_title, reroll_count, + _("Range-diff:"), + _("Range-diff against v%d:")); + } + if (!signature) { ; /* --no-signature inhibits all signatures */ } else if (signature && signature != git_version_string) { @@ -1778,6 +1877,9 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix) print_signature(rev.diffopt.file); total++; start_number--; + /* interdiff/range-diff in cover-letter; omit from patches */ + rev.idiff_oid1 = NULL; + rev.rdiff1 = NULL; } rev.add_signoff = do_signoff; @@ -1858,6 +1960,13 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix) string_list_clear(&extra_hdr, 0); if (ignore_if_in_upstream) free_patch_ids(&ids); + +done: + oid_array_clear(&idiff_prev); + strbuf_release(&idiff_title); + strbuf_release(&rdiff1); + strbuf_release(&rdiff2); + strbuf_release(&rdiff_title); return 0; } @@ -1938,7 +2047,7 @@ int cmd_cherry(int argc, const char **argv, const char *prefix) } } - init_revisions(&revs, prefix); + repo_init_revisions(the_repository, &revs, prefix); revs.max_parents = 1; if (add_pending_commit(head, &revs, 0)) @@ -1949,7 +2058,7 @@ int cmd_cherry(int argc, const char **argv, const char *prefix) /* Don't say anything if head and upstream are the same. */ if (revs.pending.nr == 2) { struct object_array_entry *o = revs.pending.objects; - if (oidcmp(&o[0].item->oid, &o[1].item->oid) == 0) + if (oideq(&o[0].item->oid, &o[1].item->oid)) return 0; } diff --git a/builtin/ls-files.c b/builtin/ls-files.c index 7f9919a362..c70a9c7158 100644 --- a/builtin/ls-files.c +++ b/builtin/ls-files.c @@ -475,6 +475,8 @@ static int option_parse_exclude(const struct option *opt, { struct string_list *exclude_list = opt->value; + BUG_ON_OPT_NEG(unset); + exc_given = 1; string_list_append(exclude_list, arg); @@ -486,6 +488,8 @@ static int option_parse_exclude_from(const struct option *opt, { struct dir_struct *dir = opt->value; + BUG_ON_OPT_NEG(unset); + exc_given = 1; add_excludes_from_file(dir, arg); @@ -497,6 +501,9 @@ static int option_parse_exclude_standard(const struct option *opt, { struct dir_struct *dir = opt->value; + BUG_ON_OPT_NEG(unset); + BUG_ON_OPT_ARG(arg); + exc_given = 1; setup_standard_excludes(dir); @@ -548,15 +555,16 @@ int cmd_ls_files(int argc, const char **argv, const char *cmd_prefix) N_("show resolve-undo information")), { OPTION_CALLBACK, 'x', "exclude", &exclude_list, N_("pattern"), N_("skip files matching pattern"), - 0, option_parse_exclude }, + PARSE_OPT_NONEG, option_parse_exclude }, { OPTION_CALLBACK, 'X', "exclude-from", &dir, N_("file"), N_("exclude patterns are read from <file>"), - 0, option_parse_exclude_from }, + PARSE_OPT_NONEG, option_parse_exclude_from }, OPT_STRING(0, "exclude-per-directory", &dir.exclude_per_dir, N_("file"), N_("read additional per-directory exclude patterns in <file>")), { OPTION_CALLBACK, 0, "exclude-standard", &dir, NULL, N_("add the standard git exclusions"), - PARSE_OPT_NOARG, option_parse_exclude_standard }, + PARSE_OPT_NOARG | PARSE_OPT_NONEG, + option_parse_exclude_standard }, OPT_SET_INT_F(0, "full-name", &prefix_len, N_("make the output relative to the project top directory"), 0, PARSE_OPT_NONEG), diff --git a/builtin/ls-remote.c b/builtin/ls-remote.c index 1a25df7ee1..1d7f1f5ce2 100644 --- a/builtin/ls-remote.c +++ b/builtin/ls-remote.c @@ -88,18 +88,15 @@ int cmd_ls_remote(int argc, const char **argv, const char *prefix) int i; pattern = xcalloc(argc, sizeof(const char *)); for (i = 1; i < argc; i++) { - const char *glob; pattern[i - 1] = xstrfmt("*/%s", argv[i]); - - glob = strchr(argv[i], '*'); - if (glob) - argv_array_pushf(&ref_prefixes, "%.*s", - (int)(glob - argv[i]), argv[i]); - else - expand_ref_prefix(&ref_prefixes, argv[i]); } } + if (flags & REF_TAGS) + argv_array_push(&ref_prefixes, "refs/tags/"); + if (flags & REF_HEADS) + argv_array_push(&ref_prefixes, "refs/heads/"); + remote = remote_get(dest); if (!remote) { if (dest) @@ -151,6 +148,6 @@ int cmd_ls_remote(int argc, const char **argv, const char *prefix) } UNLEAK(sorting); - UNLEAK(ref_array); + ref_array_clear(&ref_array); return status; } diff --git a/builtin/ls-tree.c b/builtin/ls-tree.c index fe3b952cb3..7d581d6463 100644 --- a/builtin/ls-tree.c +++ b/builtin/ls-tree.c @@ -100,7 +100,7 @@ static int show_tree(const struct object_id *oid, struct strbuf *base, "BAD"); else xsnprintf(size_text, sizeof(size_text), - "%lu", size); + "%"PRIuMAX, (uintmax_t)size); } else xsnprintf(size_text, sizeof(size_text), "-"); printf("%06o %s %s %7s\t", mode, type, diff --git a/builtin/merge-base.c b/builtin/merge-base.c index 08d91b1f0c..e3f8da13b6 100644 --- a/builtin/merge-base.c +++ b/builtin/merge-base.c @@ -7,6 +7,7 @@ #include "revision.h" #include "parse-options.h" #include "repository.h" +#include "commit-reach.h" static int show_merge_base(struct commit **rev, int rev_nr, int show_all) { @@ -110,54 +111,12 @@ static int handle_is_ancestor(int argc, const char **argv) return 1; } -struct rev_collect { - struct commit **commit; - int nr; - int alloc; - unsigned int initial : 1; -}; - -static void add_one_commit(struct object_id *oid, struct rev_collect *revs) -{ - struct commit *commit; - - if (is_null_oid(oid)) - return; - - commit = lookup_commit(the_repository, oid); - if (!commit || - (commit->object.flags & TMP_MARK) || - parse_commit(commit)) - return; - - ALLOC_GROW(revs->commit, revs->nr + 1, revs->alloc); - revs->commit[revs->nr++] = commit; - commit->object.flags |= TMP_MARK; -} - -static int collect_one_reflog_ent(struct object_id *ooid, struct object_id *noid, - const char *ident, timestamp_t timestamp, - int tz, const char *message, void *cbdata) -{ - struct rev_collect *revs = cbdata; - - if (revs->initial) { - revs->initial = 0; - add_one_commit(ooid, revs); - } - add_one_commit(noid, revs); - return 0; -} - static int handle_fork_point(int argc, const char **argv) { struct object_id oid; char *refname; + struct commit *derived, *fork_point; const char *commitname; - struct rev_collect revs; - struct commit *derived; - struct commit_list *bases; - int i, ret = 0; switch (dwim_ref(argv[0], strlen(argv[0]), &oid, &refname)) { case 0: @@ -173,41 +132,14 @@ static int handle_fork_point(int argc, const char **argv) die("Not a valid object name: '%s'", commitname); derived = lookup_commit_reference(the_repository, &oid); - memset(&revs, 0, sizeof(revs)); - revs.initial = 1; - for_each_reflog_ent(refname, collect_one_reflog_ent, &revs); - if (!revs.nr && !get_oid(refname, &oid)) - add_one_commit(&oid, &revs); + fork_point = get_fork_point(refname, derived); - for (i = 0; i < revs.nr; i++) - revs.commit[i]->object.flags &= ~TMP_MARK; - - bases = get_merge_bases_many_dirty(derived, revs.nr, revs.commit); - - /* - * There should be one and only one merge base, when we found - * a common ancestor among reflog entries. - */ - if (!bases || bases->next) { - ret = 1; - goto cleanup_return; - } - - /* And the found one must be one of the reflog entries */ - for (i = 0; i < revs.nr; i++) - if (&bases->item->object == &revs.commit[i]->object) - break; /* found */ - if (revs.nr <= i) { - ret = 1; /* not found */ - goto cleanup_return; - } - - printf("%s\n", oid_to_hex(&bases->item->object.oid)); + if (!fork_point) + return 1; -cleanup_return: - free_commit_list(bases); - return ret; + printf("%s\n", oid_to_hex(&fork_point->object.oid)); + return 0; } int cmd_merge_base(int argc, const char **argv, const char *prefix) diff --git a/builtin/merge-file.c b/builtin/merge-file.c index b08803e611..06a2f90c48 100644 --- a/builtin/merge-file.c +++ b/builtin/merge-file.c @@ -15,6 +15,8 @@ static int label_cb(const struct option *opt, const char *arg, int unset) static int label_count = 0; const char **names = (const char **)opt->value; + BUG_ON_OPT_NEG(unset); + if (label_count >= 3) return error("too many labels on the command line"); names[label_count++] = arg; diff --git a/builtin/merge-tree.c b/builtin/merge-tree.c index f8023bae1e..70f6fc9167 100644 --- a/builtin/merge-tree.c +++ b/builtin/merge-tree.c @@ -76,7 +76,7 @@ static void *result(struct merge_list *entry, unsigned long *size) their = NULL; if (entry) their = entry->blob; - return merge_blobs(path, base, our, their, size); + return merge_blobs(&the_index, path, base, our, their, size); } static void *origin(struct merge_list *entry, unsigned long *size) @@ -110,7 +110,8 @@ static void show_diff(struct merge_list *entry) xpp.flags = 0; memset(&xecfg, 0, sizeof(xecfg)); xecfg.ctxlen = 3; - ecb.outf = show_outf; + ecb.out_hunk = NULL; + ecb.out_line = show_outf; ecb.priv = NULL; src.ptr = origin(entry, &size); @@ -155,7 +156,7 @@ static int same_entry(struct name_entry *a, struct name_entry *b) { return a->oid && b->oid && - !oidcmp(a->oid, b->oid) && + oideq(a->oid, b->oid) && a->mode == b->mode; } diff --git a/builtin/merge.c b/builtin/merge.c index 8f4a5065c2..c3c976d471 100644 --- a/builtin/merge.c +++ b/builtin/merge.c @@ -36,6 +36,7 @@ #include "packfile.h" #include "tag.h" #include "alias.h" +#include "commit-reach.h" #define DEFAULT_TWOHEAD (1<<0) #define DEFAULT_OCTOPUS (1<<1) @@ -223,6 +224,7 @@ static int option_parse_x(const struct option *opt, static int option_parse_n(const struct option *opt, const char *arg, int unset) { + BUG_ON_OPT_ARG(arg); show_diffstat = unset; return 0; } @@ -389,7 +391,7 @@ static void squash_message(struct commit *commit, struct commit_list *remotehead printf(_("Squash commit -- not updating HEAD\n")); - init_revisions(&rev, NULL); + repo_init_revisions(the_repository, &rev, NULL); rev.ignore_merges = 1; rev.commit_format = CMIT_FMT_MEDIUM; @@ -452,7 +454,7 @@ static void finish(struct commit *head_commit, } if (new_head && show_diffstat) { struct diff_options opts; - diff_setup(&opts); + repo_diff_setup(the_repository, &opts); opts.stat_width = -1; /* use full terminal width */ opts.stat_graph_width = -1; /* respect statGraphWidth config */ opts.output_format |= @@ -728,8 +730,9 @@ static int try_merge_strategy(const char *strategy, struct commit_list *common, die(_("unable to write %s"), get_index_file()); return clean ? 0 : 1; } else { - return try_merge_command(strategy, xopts_nr, xopts, - common, head_arg, remoteheads); + return try_merge_command(the_repository, + strategy, xopts_nr, xopts, + common, head_arg, remoteheads); } } @@ -898,7 +901,7 @@ static int suggest_conflicts(void) fputs(msgbuf.buf, fp); strbuf_release(&msgbuf); fclose(fp); - rerere(allow_rerere_auto); + repo_rerere(the_repository, allow_rerere_auto); printf(_("Automatic merge failed; " "fix conflicts and then commit the result.\n")); return 1; @@ -910,7 +913,7 @@ static int evaluate_result(void) struct rev_info rev; /* Check how many files differ. */ - init_revisions(&rev, ""); + repo_init_revisions(the_repository, &rev, ""); setup_revisions(0, NULL, &rev, NULL); rev.diffopt.output_format |= DIFF_FORMAT_CALLBACK; @@ -1189,7 +1192,7 @@ static int merging_a_throwaway_tag(struct commit *commit) tag_ref = xstrfmt("refs/tags/%s", ((struct tag *)merge_remote_util(commit)->obj)->tag); if (!read_ref(tag_ref, &oid) && - !oidcmp(&oid, &merge_remote_util(commit)->obj->oid)) + oideq(&oid, &merge_remote_util(commit)->obj->oid)) is_throwaway_tag = 0; else is_throwaway_tag = 1; @@ -1334,6 +1337,10 @@ int cmd_merge(int argc, const char **argv, const char *prefix) die(_("%s - not something we can merge"), argv[0]); if (remoteheads->next) die(_("Can merge only exactly one commit into empty head")); + + if (verify_signatures) + verify_merge_signature(remoteheads->item, verbosity); + remote_head_oid = &remoteheads->item->object.oid; read_empty(remote_head_oid, 0); update_ref("initial pull", "HEAD", remote_head_oid, NULL, 0, @@ -1355,31 +1362,7 @@ int cmd_merge(int argc, const char **argv, const char *prefix) if (verify_signatures) { for (p = remoteheads; p; p = p->next) { - struct commit *commit = p->item; - char hex[GIT_MAX_HEXSZ + 1]; - struct signature_check signature_check; - memset(&signature_check, 0, sizeof(signature_check)); - - check_commit_signature(commit, &signature_check); - - find_unique_abbrev_r(hex, &commit->object.oid, DEFAULT_ABBREV); - switch (signature_check.result) { - case 'G': - break; - case 'U': - die(_("Commit %s has an untrusted GPG signature, " - "allegedly by %s."), hex, signature_check.signer); - case 'B': - die(_("Commit %s has a bad GPG signature " - "allegedly by %s."), hex, signature_check.signer); - default: /* 'N' */ - die(_("Commit %s does not have a GPG signature."), hex); - } - if (verbosity >= 0 && signature_check.result == 'G') - printf(_("Commit %s has a good GPG signature by %s\n"), - hex, signature_check.signer); - - signature_check_clear(&signature_check); + verify_merge_signature(p->item, verbosity); } } @@ -1448,7 +1431,7 @@ int cmd_merge(int argc, const char **argv, const char *prefix) goto done; } else if (fast_forward != FF_NO && !remoteheads->next && !common->next && - !oidcmp(&common->item->object.oid, &head_commit->object.oid)) { + oideq(&common->item->object.oid, &head_commit->object.oid)) { /* Again the most common case of merging one remote. */ struct strbuf msg = STRBUF_INIT; struct commit *commit; @@ -1470,7 +1453,8 @@ int cmd_merge(int argc, const char **argv, const char *prefix) goto done; } - if (checkout_fast_forward(&head_commit->object.oid, + if (checkout_fast_forward(the_repository, + &head_commit->object.oid, &commit->object.oid, overwrite_ignore)) { ret = 1; @@ -1521,7 +1505,7 @@ int cmd_merge(int argc, const char **argv, const char *prefix) * HEAD^^" would be missed. */ common_one = get_merge_bases(head_commit, j->item); - if (oidcmp(&common_one->item->object.oid, &j->item->object.oid)) { + if (!oideq(&common_one->item->object.oid, &j->item->object.oid)) { up_to_date = 0; break; } diff --git a/builtin/mktree.c b/builtin/mktree.c index 2dc4ad6ba8..94e82b8504 100644 --- a/builtin/mktree.c +++ b/builtin/mktree.c @@ -98,7 +98,7 @@ static void mktree_line(char *buf, size_t len, int nul_term_line, int allow_miss *ntr++ = 0; /* now at the beginning of SHA1 */ - path = ntr + 41; /* at the beginning of name */ + path = (char *)p + 1; /* at the beginning of name */ if (!nul_term_line && path[0] == '"') { struct strbuf p_uq = STRBUF_INIT; if (unquote_c_style(&p_uq, path, NULL)) diff --git a/builtin/multi-pack-index.c b/builtin/multi-pack-index.c new file mode 100644 index 0000000000..fca70f8e4f --- /dev/null +++ b/builtin/multi-pack-index.c @@ -0,0 +1,49 @@ +#include "builtin.h" +#include "cache.h" +#include "config.h" +#include "parse-options.h" +#include "midx.h" + +static char const * const builtin_multi_pack_index_usage[] = { + N_("git multi-pack-index [--object-dir=<dir>] (write|verify)"), + NULL +}; + +static struct opts_multi_pack_index { + const char *object_dir; +} opts; + +int cmd_multi_pack_index(int argc, const char **argv, + const char *prefix) +{ + static struct option builtin_multi_pack_index_options[] = { + OPT_FILENAME(0, "object-dir", &opts.object_dir, + N_("object directory containing set of packfile and pack-index pairs")), + OPT_END(), + }; + + git_config(git_default_config, NULL); + + argc = parse_options(argc, argv, prefix, + builtin_multi_pack_index_options, + builtin_multi_pack_index_usage, 0); + + if (!opts.object_dir) + opts.object_dir = get_object_directory(); + + if (argc == 0) + usage_with_options(builtin_multi_pack_index_usage, + builtin_multi_pack_index_options); + + if (argc > 1) { + die(_("too many arguments")); + return 1; + } + + if (!strcmp(argv[0], "write")) + return write_midx_file(opts.object_dir); + if (!strcmp(argv[0], "verify")) + return verify_midx_file(opts.object_dir); + + die(_("unrecognized verb: %s"), argv[0]); +} diff --git a/builtin/notes.c b/builtin/notes.c index c05cd004ab..c78b7a0c5b 100644 --- a/builtin/notes.c +++ b/builtin/notes.c @@ -215,6 +215,8 @@ static int parse_msg_arg(const struct option *opt, const char *arg, int unset) { struct note_data *d = opt->value; + BUG_ON_OPT_NEG(unset); + strbuf_grow(&d->buf, strlen(arg) + 2); if (d->buf.len) strbuf_addch(&d->buf, '\n'); @@ -229,6 +231,8 @@ static int parse_file_arg(const struct option *opt, const char *arg, int unset) { struct note_data *d = opt->value; + BUG_ON_OPT_NEG(unset); + if (d->buf.len) strbuf_addch(&d->buf, '\n'); if (!strcmp(arg, "-")) { @@ -250,15 +254,15 @@ static int parse_reuse_arg(const struct option *opt, const char *arg, int unset) enum object_type type; unsigned long len; + BUG_ON_OPT_NEG(unset); + if (d->buf.len) strbuf_addch(&d->buf, '\n'); if (get_oid(arg, &object)) die(_("failed to resolve '%s' as a valid ref."), arg); - if (!(buf = read_object_file(&object, &type, &len))) { - free(buf); + if (!(buf = read_object_file(&object, &type, &len))) die(_("failed to read object '%s'."), arg); - } if (type != OBJ_BLOB) { free(buf); die(_("cannot read note data from non-blob object '%s'."), arg); @@ -273,6 +277,7 @@ static int parse_reuse_arg(const struct option *opt, const char *arg, int unset) static int parse_reedit_arg(const struct option *opt, const char *arg, int unset) { struct note_data *d = opt->value; + BUG_ON_OPT_NEG(unset); d->use_editor = 1; return parse_reuse_arg(opt, arg, unset); } diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c index 2eba0a695b..411aefd687 100644 --- a/builtin/pack-objects.c +++ b/builtin/pack-objects.c @@ -24,6 +24,7 @@ #include "streaming.h" #include "thread-utils.h" #include "pack-bitmap.h" +#include "delta-islands.h" #include "reachable.h" #include "sha1-array.h" #include "argv-array.h" @@ -31,6 +32,7 @@ #include "packfile.h" #include "object-store.h" #include "dir.h" +#include "midx.h" #define IN_PACK(obj) oe_in_pack(&to_pack, obj) #define SIZE(obj) oe_size(&to_pack, obj) @@ -40,6 +42,7 @@ #define DELTA_CHILD(obj) oe_delta_child(&to_pack, obj) #define DELTA_SIBLING(obj) oe_delta_sibling(&to_pack, obj) #define SET_DELTA(obj, val) oe_set_delta(&to_pack, obj, val) +#define SET_DELTA_EXT(obj, oid) oe_set_delta_ext(&to_pack, obj, oid) #define SET_DELTA_SIZE(obj, val) oe_set_delta_size(&to_pack, obj, val) #define SET_DELTA_CHILD(obj, val) oe_set_delta_child(&to_pack, obj, val) #define SET_DELTA_SIBLING(obj, val) oe_set_delta_sibling(&to_pack, obj, val) @@ -59,6 +62,8 @@ static struct packing_data to_pack; static struct pack_idx_entry **written_list; static uint32_t nr_result, nr_written, nr_seen; +static struct bitmap_index *bitmap_git; +static uint32_t write_layer; static int non_empty; static int reuse_delta = 1, reuse_object = 1; @@ -79,6 +84,7 @@ static unsigned long pack_size_limit; static int depth = 50; static int delta_search_threads; static int pack_to_stdout; +static int thin; static int num_preferred_base; static struct progress *progress_state; @@ -93,6 +99,8 @@ static uint16_t write_bitmap_options; static int exclude_promisor_objects; +static int use_delta_islands; + static unsigned long delta_cache_size = 0; static unsigned long max_delta_cache_size = DEFAULT_DELTA_CACHE_SIZE; static unsigned long cache_max_small_delta_size = 1000; @@ -612,7 +620,7 @@ static inline void add_to_write_order(struct object_entry **wo, unsigned int *endp, struct object_entry *e) { - if (e->filled) + if (e->filled || oe_layer(&to_pack, e) != write_layer) return; wo[(*endp)++] = e; e->filled = 1; @@ -672,48 +680,15 @@ static void add_family_to_write_order(struct object_entry **wo, add_descendants_to_write_order(wo, endp, root); } -static struct object_entry **compute_write_order(void) +static void compute_layer_order(struct object_entry **wo, unsigned int *wo_end) { - unsigned int i, wo_end, last_untagged; - - struct object_entry **wo; + unsigned int i, last_untagged; struct object_entry *objects = to_pack.objects; for (i = 0; i < to_pack.nr_objects; i++) { - objects[i].tagged = 0; - objects[i].filled = 0; - SET_DELTA_CHILD(&objects[i], NULL); - SET_DELTA_SIBLING(&objects[i], NULL); - } - - /* - * Fully connect delta_child/delta_sibling network. - * Make sure delta_sibling is sorted in the original - * recency order. - */ - for (i = to_pack.nr_objects; i > 0;) { - struct object_entry *e = &objects[--i]; - if (!DELTA(e)) - continue; - /* Mark me as the first child */ - e->delta_sibling_idx = DELTA(e)->delta_child_idx; - SET_DELTA_CHILD(DELTA(e), e); - } - - /* - * Mark objects that are at the tip of tags. - */ - for_each_tag_ref(mark_tagged, NULL); - - /* - * Give the objects in the original recency order until - * we see a tagged tip. - */ - ALLOC_ARRAY(wo, to_pack.nr_objects); - for (i = wo_end = 0; i < to_pack.nr_objects; i++) { if (objects[i].tagged) break; - add_to_write_order(wo, &wo_end, &objects[i]); + add_to_write_order(wo, wo_end, &objects[i]); } last_untagged = i; @@ -722,7 +697,7 @@ static struct object_entry **compute_write_order(void) */ for (; i < to_pack.nr_objects; i++) { if (objects[i].tagged) - add_to_write_order(wo, &wo_end, &objects[i]); + add_to_write_order(wo, wo_end, &objects[i]); } /* @@ -732,7 +707,7 @@ static struct object_entry **compute_write_order(void) if (oe_type(&objects[i]) != OBJ_COMMIT && oe_type(&objects[i]) != OBJ_TAG) continue; - add_to_write_order(wo, &wo_end, &objects[i]); + add_to_write_order(wo, wo_end, &objects[i]); } /* @@ -741,16 +716,60 @@ static struct object_entry **compute_write_order(void) for (i = last_untagged; i < to_pack.nr_objects; i++) { if (oe_type(&objects[i]) != OBJ_TREE) continue; - add_to_write_order(wo, &wo_end, &objects[i]); + add_to_write_order(wo, wo_end, &objects[i]); } /* * Finally all the rest in really tight order */ for (i = last_untagged; i < to_pack.nr_objects; i++) { - if (!objects[i].filled) - add_family_to_write_order(wo, &wo_end, &objects[i]); + if (!objects[i].filled && oe_layer(&to_pack, &objects[i]) == write_layer) + add_family_to_write_order(wo, wo_end, &objects[i]); } +} + +static struct object_entry **compute_write_order(void) +{ + uint32_t max_layers = 1; + unsigned int i, wo_end; + + struct object_entry **wo; + struct object_entry *objects = to_pack.objects; + + for (i = 0; i < to_pack.nr_objects; i++) { + objects[i].tagged = 0; + objects[i].filled = 0; + SET_DELTA_CHILD(&objects[i], NULL); + SET_DELTA_SIBLING(&objects[i], NULL); + } + + /* + * Fully connect delta_child/delta_sibling network. + * Make sure delta_sibling is sorted in the original + * recency order. + */ + for (i = to_pack.nr_objects; i > 0;) { + struct object_entry *e = &objects[--i]; + if (!DELTA(e)) + continue; + /* Mark me as the first child */ + e->delta_sibling_idx = DELTA(e)->delta_child_idx; + SET_DELTA_CHILD(DELTA(e), e); + } + + /* + * Mark objects that are at the tip of tags. + */ + for_each_tag_ref(mark_tagged, NULL); + + if (use_delta_islands) + max_layers = compute_pack_layers(&to_pack); + + ALLOC_ARRAY(wo, to_pack.nr_objects); + wo_end = 0; + + for (; write_layer < max_layers; ++write_layer) + compute_layer_order(wo, &wo_end); if (wo_end != to_pack.nr_objects) die(_("ordered %u objects, expected %"PRIu32), @@ -1039,6 +1058,7 @@ static int want_object_in_pack(const struct object_id *oid, { int want; struct list_head *pos; + struct multi_pack_index *m; if (!exclude && local && has_loose_object_nonlocal(oid)) return 0; @@ -1053,6 +1073,32 @@ static int want_object_in_pack(const struct object_id *oid, if (want != -1) return want; } + + for (m = get_multi_pack_index(the_repository); m; m = m->next) { + struct pack_entry e; + if (fill_midx_entry(oid, &e, m)) { + struct packed_git *p = e.p; + off_t offset; + + if (p == *found_pack) + offset = *found_offset; + else + offset = find_pack_entry_one(oid->hash, p); + + if (offset) { + if (!*found_pack) { + if (!is_pack_valid(p)) + continue; + *found_offset = offset; + *found_pack = p; + } + want = want_found_object(exclude, p); + if (want != -1) + return want; + } + } + } + list_for_each(pos, get_packed_git_mru(the_repository)) { struct packed_git *p = list_entry(pos, struct packed_git, mru); off_t offset; @@ -1201,7 +1247,7 @@ static struct pbase_tree_cache *pbase_tree_get(const struct object_id *oid) */ for (neigh = 0; neigh < 8; neigh++) { ent = pbase_tree_cache[my_ix]; - if (ent && !oidcmp(&ent->oid, oid)) { + if (ent && oideq(&ent->oid, oid)) { ent->ref++; return ent; } @@ -1383,7 +1429,7 @@ static void add_preferred_base(struct object_id *oid) return; for (it = pbase_tree; it; it = it->next) { - if (!oidcmp(&it->pcache.oid, &tree_oid)) { + if (oideq(&it->pcache.oid, &tree_oid)) { free(data); return; } @@ -1423,6 +1469,57 @@ static void cleanup_preferred_base(void) done_pbase_paths_num = done_pbase_paths_alloc = 0; } +/* + * Return 1 iff the object specified by "delta" can be sent + * literally as a delta against the base in "base_sha1". If + * so, then *base_out will point to the entry in our packing + * list, or NULL if we must use the external-base list. + * + * Depth value does not matter - find_deltas() will + * never consider reused delta as the base object to + * deltify other objects against, in order to avoid + * circular deltas. + */ +static int can_reuse_delta(const unsigned char *base_sha1, + struct object_entry *delta, + struct object_entry **base_out) +{ + struct object_entry *base; + + if (!base_sha1) + return 0; + + /* + * First see if we're already sending the base (or it's explicitly in + * our "excluded" list). + */ + base = packlist_find(&to_pack, base_sha1, NULL); + if (base) { + if (!in_same_island(&delta->idx.oid, &base->idx.oid)) + return 0; + *base_out = base; + return 1; + } + + /* + * Otherwise, reachability bitmaps may tell us if the receiver has it, + * even if it was buried too deep in history to make it into the + * packing list. + */ + if (thin && bitmap_has_sha1_in_uninteresting(bitmap_git, base_sha1)) { + if (use_delta_islands) { + struct object_id base_oid; + hashcpy(base_oid.hash, base_sha1); + if (!in_same_island(&delta->idx.oid, &base_oid)) + return 0; + } + *base_out = NULL; + return 1; + } + + return 0; +} + static void check_object(struct object_entry *entry) { unsigned long canonical_size; @@ -1509,23 +1606,19 @@ static void check_object(struct object_entry *entry) break; } - if (base_ref && (base_entry = packlist_find(&to_pack, base_ref, NULL))) { - /* - * If base_ref was set above that means we wish to - * reuse delta data, and we even found that base - * in the list of objects we want to pack. Goodie! - * - * Depth value does not matter - find_deltas() will - * never consider reused delta as the base object to - * deltify other objects against, in order to avoid - * circular deltas. - */ + if (can_reuse_delta(base_ref, entry, &base_entry)) { oe_set_type(entry, entry->in_pack_type); SET_SIZE(entry, in_pack_size); /* delta size */ - SET_DELTA(entry, base_entry); SET_DELTA_SIZE(entry, in_pack_size); - entry->delta_sibling_idx = base_entry->delta_child_idx; - SET_DELTA_CHILD(base_entry, entry); + + if (base_entry) { + SET_DELTA(entry, base_entry); + entry->delta_sibling_idx = base_entry->delta_child_idx; + SET_DELTA_CHILD(base_entry, entry); + } else { + SET_DELTA_EXT(entry, base_ref); + } + unuse_pack(&w_curs); return; } @@ -1825,6 +1918,11 @@ static int type_size_sort(const void *_a, const void *_b) return -1; if (a->preferred_base < b->preferred_base) return 1; + if (use_delta_islands) { + int island_cmp = island_delta_cmp(&a->idx.oid, &b->idx.oid); + if (island_cmp) + return island_cmp; + } if (a_size > b_size) return -1; if (a_size < b_size) @@ -1855,8 +1953,6 @@ static int delta_cacheable(unsigned long src_size, unsigned long trg_size, return 0; } -#ifndef NO_PTHREADS - /* Protect access to object database */ static pthread_mutex_t read_mutex; #define read_lock() pthread_mutex_lock(&read_mutex) @@ -1881,16 +1977,6 @@ static pthread_mutex_t progress_mutex; * ahead in the list because they can be stolen and would need * progress_mutex for protection. */ -#else - -#define read_lock() (void)0 -#define read_unlock() (void)0 -#define cache_lock() (void)0 -#define cache_unlock() (void)0 -#define progress_lock() (void)0 -#define progress_unlock() (void)0 - -#endif /* * Return the size of the object without doing any delta @@ -1985,6 +2071,9 @@ static int try_delta(struct unpacked *trg, struct unpacked *src, if (trg_size < src_size / 32) return 0; + if (!in_same_island(&trg->entry->idx.oid, &src->entry->idx.oid)) + return 0; + /* Load data if not already done */ if (!trg->data) { read_lock(); @@ -1994,9 +2083,9 @@ static int try_delta(struct unpacked *trg, struct unpacked *src, die(_("object %s cannot be read"), oid_to_hex(&trg_entry->idx.oid)); if (sz != trg_size) - die(_("object %s inconsistent object length (%lu vs %lu)"), - oid_to_hex(&trg_entry->idx.oid), sz, - trg_size); + die(_("object %s inconsistent object length (%"PRIuMAX" vs %"PRIuMAX")"), + oid_to_hex(&trg_entry->idx.oid), (uintmax_t)sz, + (uintmax_t)trg_size); *mem_usage += sz; } if (!src->data) { @@ -2021,9 +2110,9 @@ static int try_delta(struct unpacked *trg, struct unpacked *src, oid_to_hex(&src_entry->idx.oid)); } if (sz != src_size) - die(_("object %s inconsistent object length (%lu vs %lu)"), - oid_to_hex(&src_entry->idx.oid), sz, - src_size); + die(_("object %s inconsistent object length (%"PRIuMAX" vs %"PRIuMAX")"), + oid_to_hex(&src_entry->idx.oid), (uintmax_t)sz, + (uintmax_t)src_size); *mem_usage += sz; } if (!src->index) { @@ -2246,8 +2335,6 @@ static void find_deltas(struct object_entry **list, unsigned *list_size, free(array); } -#ifndef NO_PTHREADS - static void try_to_free_from_threads(size_t size) { read_lock(); @@ -2476,10 +2563,6 @@ static void ll_find_deltas(struct object_entry **list, unsigned list_size, free(p); } -#else -#define ll_find_deltas(l, s, w, d, p) find_deltas(l, &s, w, d, p) -#endif - static void add_tag_chain(const struct object_id *oid) { struct tag *tag; @@ -2526,6 +2609,9 @@ static void prepare_pack(int window, int depth) uint32_t i, nr_deltas; unsigned n; + if (use_delta_islands) + resolve_tree_islands(progress, &to_pack); + get_object_details(); /* @@ -2629,12 +2715,10 @@ static int git_pack_config(const char *k, const char *v, void *cb) if (delta_search_threads < 0) die(_("invalid number of threads specified (%d)"), delta_search_threads); -#ifdef NO_PTHREADS - if (delta_search_threads != 1) { + if (!HAVE_THREADS && delta_search_threads != 1) { warning(_("no threads support, ignoring %s"), k); delta_search_threads = 0; } -#endif return 0; } if (!strcmp(k, "pack.indexversion")) { @@ -2689,6 +2773,9 @@ static void show_commit(struct commit *commit, void *data) if (write_bitmap_index) index_commit_for_bitmap(commit); + + if (use_delta_islands) + propagate_island_marks(commit); } static void show_object(struct object *obj, const char *name, void *data) @@ -2696,6 +2783,21 @@ static void show_object(struct object *obj, const char *name, void *data) add_preferred_base_object(name); add_object_entry(&obj->oid, obj->type, name, 0); obj->flags |= OBJECT_ADDED; + + if (use_delta_islands) { + const char *p; + unsigned depth; + struct object_entry *ent; + + /* the empty string is a root tree, which is depth 0 */ + depth = *name ? 1 : 0; + for (p = strchr(name, '/'); p; p = strchr(p + 1, '/')) + depth++; + + ent = packlist_find(&to_pack, obj->oid.hash, NULL); + if (ent && depth > oe_tree_depth(&to_pack, ent)) + oe_set_tree_depth(&to_pack, ent, depth); + } } static void show_object__ma_allow_any(struct object *obj, const char *name, void *data) @@ -2804,7 +2906,7 @@ static void add_objects_in_unpacked_packs(struct rev_info *revs) memset(&in_pack, 0, sizeof(in_pack)); - for (p = get_packed_git(the_repository); p; p = p->next) { + for (p = get_all_packs(the_repository); p; p = p->next) { struct object_id oid; struct object *o; @@ -2868,7 +2970,7 @@ static int has_sha1_pack_kept_or_nonlocal(const struct object_id *oid) struct packed_git *p; p = (last_found != (void *)1) ? last_found : - get_packed_git(the_repository); + get_all_packs(the_repository); while (p) { if ((!p->pack_local || p->pack_keep || @@ -2878,7 +2980,7 @@ static int has_sha1_pack_kept_or_nonlocal(const struct object_id *oid) return 1; } if (p == last_found) - p = get_packed_git(the_repository); + p = get_all_packs(the_repository); else p = p->next; if (p == last_found) @@ -2914,7 +3016,7 @@ static void loosen_unused_packed_objects(struct rev_info *revs) uint32_t i; struct object_id oid; - for (p = get_packed_git(the_repository); p; p = p->next) { + for (p = get_all_packs(the_repository); p; p = p->next) { if (!p->pack_local || p->pack_keep || p->pack_keep_in_core) continue; @@ -2949,7 +3051,6 @@ static int pack_options_allow_reuse(void) static int get_object_list_from_bitmap(struct rev_info *revs) { - struct bitmap_index *bitmap_git; if (!(bitmap_git = prepare_bitmap_walk(revs))) return -1; @@ -2965,7 +3066,6 @@ static int get_object_list_from_bitmap(struct rev_info *revs) } traverse_bitmap_commit_list(bitmap_git, &add_object_entry_from_bitmap); - free_bitmap_index(bitmap_git); return 0; } @@ -2986,8 +3086,9 @@ static void get_object_list(int ac, const char **av) struct rev_info revs; char line[1000]; int flags = 0; + int save_warning; - init_revisions(&revs, NULL); + repo_init_revisions(the_repository, &revs, NULL); save_commit_buffer = 0; revs.allow_exclude_promisor_objects_opt = 1; setup_revisions(ac, av, &revs, NULL); @@ -2995,6 +3096,9 @@ static void get_object_list(int ac, const char **av) /* make sure shallows are read */ is_repository_shallow(the_repository); + save_warning = warn_on_object_refname_ambiguity; + warn_on_object_refname_ambiguity = 0; + while (fgets(line, sizeof(line), stdin) != NULL) { int len = strlen(line); if (len && line[len - 1] == '\n') @@ -3021,9 +3125,14 @@ static void get_object_list(int ac, const char **av) die(_("bad revision '%s'"), line); } + warn_on_object_refname_ambiguity = save_warning; + if (use_bitmap_index && !get_object_list_from_bitmap(&revs)) return; + if (use_delta_islands) + load_delta_islands(); + if (prepare_revision_walk(&revs)) die(_("revision walk setup failed")); mark_edges_uninteresting(&revs, show_edge); @@ -3062,7 +3171,7 @@ static void add_extra_kept_packs(const struct string_list *names) if (!names->nr) return; - for (p = get_packed_git(the_repository); p; p = p->next) { + for (p = get_all_packs(the_repository); p; p = p->next) { const char *name = basename(p->pack_name); int i; @@ -3086,6 +3195,9 @@ static int option_parse_index_version(const struct option *opt, { char *c; const char *val = arg; + + BUG_ON_OPT_NEG(unset); + pack_idx_opts.version = strtoul(val, &c, 10); if (pack_idx_opts.version > 2) die(_("unsupported index version %s"), val); @@ -3114,7 +3226,6 @@ static int option_parse_unpack_unreachable(const struct option *opt, int cmd_pack_objects(int argc, const char **argv, const char *prefix) { int use_internal_rev_list = 0; - int thin = 0; int shallow = 0; int all_progress_implied = 0; struct argv_array rp = ARGV_ARRAY_INIT; @@ -3133,7 +3244,7 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix) N_("similar to --all-progress when progress meter is shown")), { OPTION_CALLBACK, 0, "index-version", NULL, N_("<version>[,<offset>]"), N_("write the pack index file in the specified idx format version"), - 0, option_parse_index_version }, + PARSE_OPT_NONEG, option_parse_index_version }, OPT_MAGNITUDE(0, "max-pack-size", &pack_size_limit, N_("maximum size of each output pack file")), OPT_BOOL(0, "local", &local, @@ -3203,6 +3314,8 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix) option_parse_missing_action }, OPT_BOOL(0, "exclude-promisor-objects", &exclude_promisor_objects, N_("do not pack objects in promisor packfiles")), + OPT_BOOL(0, "delta-islands", &use_delta_islands, + N_("respect islands during delta compression")), OPT_END(), }; @@ -3280,10 +3393,8 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix) if (!delta_search_threads) /* --threads=0 means autodetect */ delta_search_threads = online_cpus(); -#ifdef NO_PTHREADS - if (delta_search_threads != 1) + if (!HAVE_THREADS && delta_search_threads != 1) warning(_("no threads support, ignoring --threads")); -#endif if (!pack_to_stdout && !pack_size_limit) pack_size_limit = pack_size_limit_cfg; if (pack_to_stdout && pack_size_limit) @@ -3329,13 +3440,16 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix) if (pack_to_stdout || !rev_list_all) write_bitmap_index = 0; + if (use_delta_islands) + argv_array_push(&rp, "--topo-order"); + if (progress && all_progress_implied) progress = 2; add_extra_kept_packs(&keep_pack_list); if (ignore_packed_keep_on_disk) { struct packed_git *p; - for (p = get_packed_git(the_repository); p; p = p->next) + for (p = get_all_packs(the_repository); p; p = p->next) if (p->pack_local && p->pack_keep) break; if (!p) /* no keep-able packs found */ @@ -3348,7 +3462,7 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix) * it also covers non-local objects */ struct packed_git *p; - for (p = get_packed_git(the_repository); p; p = p->next) { + for (p = get_all_packs(the_repository); p; p = p->next) { if (!p->pack_local) { have_non_local_packs = 1; break; diff --git a/builtin/pack-redundant.c b/builtin/pack-redundant.c index 0494dceff7..cf9a9aabd4 100644 --- a/builtin/pack-redundant.c +++ b/builtin/pack-redundant.c @@ -577,7 +577,7 @@ static struct pack_list * add_pack(struct packed_git *p) static struct pack_list * add_pack_file(const char *filename) { - struct packed_git *p = get_packed_git(the_repository); + struct packed_git *p = get_all_packs(the_repository); if (strlen(filename) < 40) die("Bad pack filename: %s", filename); @@ -592,7 +592,7 @@ static struct pack_list * add_pack_file(const char *filename) static void load_all(void) { - struct packed_git *p = get_packed_git(the_repository); + struct packed_git *p = get_all_packs(the_repository); while (p) { add_pack(p); diff --git a/builtin/prune.c b/builtin/prune.c index d356ad70f6..e42653b99c 100644 --- a/builtin/prune.c +++ b/builtin/prune.c @@ -121,7 +121,7 @@ int cmd_prune(int argc, const char **argv, const char *prefix) read_replace_refs = 0; ref_paranoia = 1; revs.allow_exclude_promisor_objects_opt = 1; - init_revisions(&revs, prefix); + repo_init_revisions(the_repository, &revs, prefix); argc = parse_options(argc, argv, prefix, options, prune_usage, 0); diff --git a/builtin/pull.c b/builtin/pull.c index 681c127a07..1b90622b13 100644 --- a/builtin/pull.c +++ b/builtin/pull.c @@ -22,6 +22,7 @@ #include "tempfile.h" #include "lockfile.h" #include "wt-status.h" +#include "commit-reach.h" enum rebase_type { REBASE_INVALID = -1, @@ -556,13 +557,26 @@ static int run_fetch(const char *repo, const char **refspecs) static int pull_into_void(const struct object_id *merge_head, const struct object_id *curr_head) { + if (opt_verify_signatures) { + struct commit *commit; + + commit = lookup_commit(the_repository, merge_head); + if (!commit) + die(_("unable to access commit %s"), + oid_to_hex(merge_head)); + + verify_merge_signature(commit, opt_verbosity); + } + /* * Two-way merge: we treat the index as based on an empty tree, * and try to fast-forward to HEAD. This ensures we will not lose * index/worktree changes that the user already made on the unborn * branch. */ - if (checkout_fast_forward(the_hash_algo->empty_tree, merge_head, 0)) + if (checkout_fast_forward(the_repository, + the_hash_algo->empty_tree, + merge_head, 0)) return 1; if (update_ref("initial pull", "HEAD", merge_head, curr_head, 0, UPDATE_REFS_DIE_ON_ERR)) @@ -799,7 +813,7 @@ static int run_rebase(const struct object_id *curr_head, struct argv_array args = ARGV_ARRAY_INIT; if (!get_octopus_merge_base(&oct_merge_base, curr_head, merge_head, fork_point)) - if (!is_null_oid(fork_point) && !oidcmp(&oct_merge_base, fork_point)) + if (!is_null_oid(fork_point) && oideq(&oct_merge_base, fork_point)) fork_point = NULL; argv_array_push(&args, "rebase"); @@ -902,7 +916,7 @@ int cmd_pull(int argc, const char **argv, const char *prefix) oidclr(&curr_head); if (!is_null_oid(&orig_head) && !is_null_oid(&curr_head) && - oidcmp(&orig_head, &curr_head)) { + !oideq(&orig_head, &curr_head)) { /* * The fetch involved updating the current branch. * @@ -915,7 +929,8 @@ int cmd_pull(int argc, const char **argv, const char *prefix) "fast-forwarding your working tree from\n" "commit %s."), oid_to_hex(&orig_head)); - if (checkout_fast_forward(&orig_head, &curr_head, 0)) + if (checkout_fast_forward(the_repository, &orig_head, + &curr_head, 0)) die(_("Cannot fast-forward your working tree.\n" "After making sure that you saved anything precious from\n" "$ git diff %s\n" @@ -941,7 +956,7 @@ int cmd_pull(int argc, const char **argv, const char *prefix) int ret = 0; if ((recurse_submodules == RECURSE_SUBMODULES_ON || recurse_submodules == RECURSE_SUBMODULES_ON_DEMAND) && - submodule_touches_in_range(&rebase_fork_point, &curr_head)) + submodule_touches_in_range(the_repository, &rebase_fork_point, &curr_head)) die(_("cannot rebase with locally recorded submodule modifications")); if (!autostash) { struct commit_list *list = NULL; diff --git a/builtin/range-diff.c b/builtin/range-diff.c index 0aa9bed41f..f01a0be851 100644 --- a/builtin/range-diff.c +++ b/builtin/range-diff.c @@ -11,14 +11,9 @@ N_("git range-diff [<options>] <base> <old-tip> <new-tip>"), NULL }; -static struct strbuf *output_prefix_cb(struct diff_options *opt, void *data) -{ - return data; -} - int cmd_range_diff(int argc, const char **argv, const char *prefix) { - int creation_factor = 60; + int creation_factor = RANGE_DIFF_CREATION_FACTOR_DEFAULT; struct diff_options diffopt = { NULL }; int simple_color = -1; struct option options[] = { @@ -29,17 +24,11 @@ int cmd_range_diff(int argc, const char **argv, const char *prefix) OPT_END() }; int i, j, res = 0; - struct strbuf four_spaces = STRBUF_INIT; struct strbuf range1 = STRBUF_INIT, range2 = STRBUF_INIT; git_config(git_diff_ui_config, NULL); - diff_setup(&diffopt); - diffopt.output_format = DIFF_FORMAT_PATCH; - diffopt.flags.suppress_diff_headers = 1; - diffopt.output_prefix = output_prefix_cb; - strbuf_addstr(&four_spaces, " "); - diffopt.output_prefix_data = &four_spaces; + repo_diff_setup(the_repository, &diffopt); argc = parse_options(argc, argv, NULL, options, builtin_range_diff_usage, PARSE_OPT_KEEP_UNKNOWN | @@ -63,12 +52,9 @@ int cmd_range_diff(int argc, const char **argv, const char *prefix) options + ARRAY_SIZE(options) - 1, /* OPT_END */ builtin_range_diff_usage, 0); - if (simple_color < 1) { - if (!simple_color) - /* force color when --dual-color was used */ - diffopt.use_color = 1; - diffopt.flags.dual_color_diffed_diffs = 1; - } + /* force color when --dual-color was used */ + if (!simple_color) + diffopt.use_color = 1; if (argc == 2) { if (!strstr(argv[0], "..")) @@ -106,11 +92,10 @@ int cmd_range_diff(int argc, const char **argv, const char *prefix) } res = show_range_diff(range1.buf, range2.buf, creation_factor, - &diffopt); + simple_color < 1, &diffopt); strbuf_release(&range1); strbuf_release(&range2); - strbuf_release(&four_spaces); return res; } diff --git a/builtin/read-tree.c b/builtin/read-tree.c index fbbc98e516..183ee8c1e5 100644 --- a/builtin/read-tree.c +++ b/builtin/read-tree.c @@ -44,6 +44,7 @@ static const char * const read_tree_usage[] = { static int index_output_cb(const struct option *opt, const char *arg, int unset) { + BUG_ON_OPT_NEG(unset); set_alternate_index_output(arg); return 0; } @@ -54,6 +55,8 @@ static int exclude_per_directory_cb(const struct option *opt, const char *arg, struct dir_struct *dir; struct unpack_trees_options *opts; + BUG_ON_OPT_NEG(unset); + opts = (struct unpack_trees_options *)opt->value; if (opts->dir) diff --git a/builtin/rebase--helper.c b/builtin/rebase--helper.c deleted file mode 100644 index f7c2a5fdc8..0000000000 --- a/builtin/rebase--helper.c +++ /dev/null @@ -1,88 +0,0 @@ -#include "builtin.h" -#include "cache.h" -#include "config.h" -#include "parse-options.h" -#include "sequencer.h" - -static const char * const builtin_rebase_helper_usage[] = { - N_("git rebase--helper [<options>]"), - NULL -}; - -int cmd_rebase__helper(int argc, const char **argv, const char *prefix) -{ - struct replay_opts opts = REPLAY_OPTS_INIT; - unsigned flags = 0, keep_empty = 0, rebase_merges = 0; - int abbreviate_commands = 0, rebase_cousins = -1; - enum { - CONTINUE = 1, ABORT, MAKE_SCRIPT, SHORTEN_OIDS, EXPAND_OIDS, - CHECK_TODO_LIST, SKIP_UNNECESSARY_PICKS, REARRANGE_SQUASH, - ADD_EXEC - } command = 0; - struct option options[] = { - OPT_BOOL(0, "ff", &opts.allow_ff, N_("allow fast-forward")), - OPT_BOOL(0, "keep-empty", &keep_empty, N_("keep empty commits")), - OPT_BOOL(0, "allow-empty-message", &opts.allow_empty_message, - N_("allow commits with empty messages")), - OPT_BOOL(0, "rebase-merges", &rebase_merges, N_("rebase merge commits")), - OPT_BOOL(0, "rebase-cousins", &rebase_cousins, - N_("keep original branch points of cousins")), - OPT_CMDMODE(0, "continue", &command, N_("continue rebase"), - CONTINUE), - OPT_CMDMODE(0, "abort", &command, N_("abort rebase"), - ABORT), - OPT_CMDMODE(0, "make-script", &command, - N_("make rebase script"), MAKE_SCRIPT), - OPT_CMDMODE(0, "shorten-ids", &command, - N_("shorten commit ids in the todo list"), SHORTEN_OIDS), - OPT_CMDMODE(0, "expand-ids", &command, - N_("expand commit ids in the todo list"), EXPAND_OIDS), - OPT_CMDMODE(0, "check-todo-list", &command, - N_("check the todo list"), CHECK_TODO_LIST), - OPT_CMDMODE(0, "skip-unnecessary-picks", &command, - N_("skip unnecessary picks"), SKIP_UNNECESSARY_PICKS), - OPT_CMDMODE(0, "rearrange-squash", &command, - N_("rearrange fixup/squash lines"), REARRANGE_SQUASH), - OPT_CMDMODE(0, "add-exec-commands", &command, - N_("insert exec commands in todo list"), ADD_EXEC), - OPT_END() - }; - - sequencer_init_config(&opts); - git_config_get_bool("rebase.abbreviatecommands", &abbreviate_commands); - - opts.action = REPLAY_INTERACTIVE_REBASE; - opts.allow_ff = 1; - opts.allow_empty = 1; - - argc = parse_options(argc, argv, NULL, options, - builtin_rebase_helper_usage, PARSE_OPT_KEEP_ARGV0); - - flags |= keep_empty ? TODO_LIST_KEEP_EMPTY : 0; - flags |= abbreviate_commands ? TODO_LIST_ABBREVIATE_CMDS : 0; - flags |= rebase_merges ? TODO_LIST_REBASE_MERGES : 0; - flags |= rebase_cousins > 0 ? TODO_LIST_REBASE_COUSINS : 0; - flags |= command == SHORTEN_OIDS ? TODO_LIST_SHORTEN_IDS : 0; - - if (rebase_cousins >= 0 && !rebase_merges) - warning(_("--[no-]rebase-cousins has no effect without " - "--rebase-merges")); - - if (command == CONTINUE && argc == 1) - return !!sequencer_continue(&opts); - if (command == ABORT && argc == 1) - return !!sequencer_remove_state(&opts); - if (command == MAKE_SCRIPT && argc > 1) - return !!sequencer_make_script(stdout, argc, argv, flags); - if ((command == SHORTEN_OIDS || command == EXPAND_OIDS) && argc == 1) - return !!transform_todos(flags); - if (command == CHECK_TODO_LIST && argc == 1) - return !!check_todo_list(); - if (command == SKIP_UNNECESSARY_PICKS && argc == 1) - return !!skip_unnecessary_picks(); - if (command == REARRANGE_SQUASH && argc == 1) - return !!rearrange_squash(); - if (command == ADD_EXEC && argc == 2) - return !!sequencer_add_exec_commands(argv[1]); - usage_with_options(builtin_rebase_helper_usage, options); -} diff --git a/builtin/rebase--interactive.c b/builtin/rebase--interactive.c new file mode 100644 index 0000000000..a2ab68ed06 --- /dev/null +++ b/builtin/rebase--interactive.c @@ -0,0 +1,271 @@ +#include "builtin.h" +#include "cache.h" +#include "config.h" +#include "parse-options.h" +#include "sequencer.h" +#include "rebase-interactive.h" +#include "argv-array.h" +#include "refs.h" +#include "rerere.h" +#include "run-command.h" + +static GIT_PATH_FUNC(path_state_dir, "rebase-merge/") +static GIT_PATH_FUNC(path_squash_onto, "rebase-merge/squash-onto") +static GIT_PATH_FUNC(path_interactive, "rebase-merge/interactive") + +static int get_revision_ranges(const char *upstream, const char *onto, + const char **head_hash, + char **revisions, char **shortrevisions) +{ + const char *base_rev = upstream ? upstream : onto, *shorthead; + struct object_id orig_head; + + if (get_oid("HEAD", &orig_head)) + return error(_("no HEAD?")); + + *head_hash = find_unique_abbrev(&orig_head, GIT_MAX_HEXSZ); + *revisions = xstrfmt("%s...%s", base_rev, *head_hash); + + shorthead = find_unique_abbrev(&orig_head, DEFAULT_ABBREV); + + if (upstream) { + const char *shortrev; + struct object_id rev_oid; + + get_oid(base_rev, &rev_oid); + shortrev = find_unique_abbrev(&rev_oid, DEFAULT_ABBREV); + + *shortrevisions = xstrfmt("%s..%s", shortrev, shorthead); + } else + *shortrevisions = xstrdup(shorthead); + + return 0; +} + +static int init_basic_state(struct replay_opts *opts, const char *head_name, + const char *onto, const char *orig_head) +{ + FILE *interactive; + + if (!is_directory(path_state_dir()) && mkdir_in_gitdir(path_state_dir())) + return error_errno(_("could not create temporary %s"), path_state_dir()); + + delete_reflog("REBASE_HEAD"); + + interactive = fopen(path_interactive(), "w"); + if (!interactive) + return error_errno(_("could not mark as interactive")); + fclose(interactive); + + return write_basic_state(opts, head_name, onto, orig_head); +} + +static int do_interactive_rebase(struct replay_opts *opts, unsigned flags, + const char *switch_to, const char *upstream, + const char *onto, const char *onto_name, + const char *squash_onto, const char *head_name, + const char *restrict_revision, char *raw_strategies, + const char *cmd, unsigned autosquash) +{ + int ret; + const char *head_hash = NULL; + char *revisions = NULL, *shortrevisions = NULL; + struct argv_array make_script_args = ARGV_ARRAY_INIT; + FILE *todo_list; + + if (prepare_branch_to_be_rebased(opts, switch_to)) + return -1; + + if (get_revision_ranges(upstream, onto, &head_hash, + &revisions, &shortrevisions)) + return -1; + + if (raw_strategies) + parse_strategy_opts(opts, raw_strategies); + + if (init_basic_state(opts, head_name, onto, head_hash)) { + free(revisions); + free(shortrevisions); + + return -1; + } + + if (!upstream && squash_onto) + write_file(path_squash_onto(), "%s\n", squash_onto); + + todo_list = fopen(rebase_path_todo(), "w"); + if (!todo_list) { + free(revisions); + free(shortrevisions); + + return error_errno(_("could not open %s"), rebase_path_todo()); + } + + argv_array_pushl(&make_script_args, "", revisions, NULL); + if (restrict_revision) + argv_array_push(&make_script_args, restrict_revision); + + ret = sequencer_make_script(todo_list, + make_script_args.argc, make_script_args.argv, + flags); + fclose(todo_list); + + if (ret) + error(_("could not generate todo list")); + else { + discard_cache(); + ret = complete_action(opts, flags, shortrevisions, onto_name, onto, + head_hash, cmd, autosquash); + } + + free(revisions); + free(shortrevisions); + argv_array_clear(&make_script_args); + + return ret; +} + +static const char * const builtin_rebase_interactive_usage[] = { + N_("git rebase--interactive [<options>]"), + NULL +}; + +int cmd_rebase__interactive(int argc, const char **argv, const char *prefix) +{ + struct replay_opts opts = REPLAY_OPTS_INIT; + unsigned flags = 0, keep_empty = 0, rebase_merges = 0, autosquash = 0; + int abbreviate_commands = 0, rebase_cousins = -1, ret = 0; + const char *onto = NULL, *onto_name = NULL, *restrict_revision = NULL, + *squash_onto = NULL, *upstream = NULL, *head_name = NULL, + *switch_to = NULL, *cmd = NULL; + char *raw_strategies = NULL; + enum { + NONE = 0, CONTINUE, SKIP, EDIT_TODO, SHOW_CURRENT_PATCH, + SHORTEN_OIDS, EXPAND_OIDS, CHECK_TODO_LIST, REARRANGE_SQUASH, ADD_EXEC + } command = 0; + struct option options[] = { + OPT_BOOL(0, "ff", &opts.allow_ff, N_("allow fast-forward")), + OPT_BOOL(0, "keep-empty", &keep_empty, N_("keep empty commits")), + OPT_BOOL(0, "allow-empty-message", &opts.allow_empty_message, + N_("allow commits with empty messages")), + OPT_BOOL(0, "rebase-merges", &rebase_merges, N_("rebase merge commits")), + OPT_BOOL(0, "rebase-cousins", &rebase_cousins, + N_("keep original branch points of cousins")), + OPT_BOOL(0, "autosquash", &autosquash, + N_("move commits that begin with squash!/fixup!")), + OPT_BOOL(0, "signoff", &opts.signoff, N_("sign commits")), + OPT__VERBOSE(&opts.verbose, N_("be verbose")), + OPT_CMDMODE(0, "continue", &command, N_("continue rebase"), + CONTINUE), + OPT_CMDMODE(0, "skip", &command, N_("skip commit"), SKIP), + OPT_CMDMODE(0, "edit-todo", &command, N_("edit the todo list"), + EDIT_TODO), + OPT_CMDMODE(0, "show-current-patch", &command, N_("show the current patch"), + SHOW_CURRENT_PATCH), + OPT_CMDMODE(0, "shorten-ids", &command, + N_("shorten commit ids in the todo list"), SHORTEN_OIDS), + OPT_CMDMODE(0, "expand-ids", &command, + N_("expand commit ids in the todo list"), EXPAND_OIDS), + OPT_CMDMODE(0, "check-todo-list", &command, + N_("check the todo list"), CHECK_TODO_LIST), + OPT_CMDMODE(0, "rearrange-squash", &command, + N_("rearrange fixup/squash lines"), REARRANGE_SQUASH), + OPT_CMDMODE(0, "add-exec-commands", &command, + N_("insert exec commands in todo list"), ADD_EXEC), + OPT_STRING(0, "onto", &onto, N_("onto"), N_("onto")), + OPT_STRING(0, "restrict-revision", &restrict_revision, + N_("restrict-revision"), N_("restrict revision")), + OPT_STRING(0, "squash-onto", &squash_onto, N_("squash-onto"), + N_("squash onto")), + OPT_STRING(0, "upstream", &upstream, N_("upstream"), + N_("the upstream commit")), + OPT_STRING(0, "head-name", &head_name, N_("head-name"), N_("head name")), + { OPTION_STRING, 'S', "gpg-sign", &opts.gpg_sign, N_("key-id"), + N_("GPG-sign commits"), + PARSE_OPT_OPTARG, NULL, (intptr_t) "" }, + OPT_STRING(0, "strategy", &opts.strategy, N_("strategy"), + N_("rebase strategy")), + OPT_STRING(0, "strategy-opts", &raw_strategies, N_("strategy-opts"), + N_("strategy options")), + OPT_STRING(0, "switch-to", &switch_to, N_("switch-to"), + N_("the branch or commit to checkout")), + OPT_STRING(0, "onto-name", &onto_name, N_("onto-name"), N_("onto name")), + OPT_STRING(0, "cmd", &cmd, N_("cmd"), N_("the command to run")), + OPT_RERERE_AUTOUPDATE(&opts.allow_rerere_auto), + OPT_END() + }; + + sequencer_init_config(&opts); + git_config_get_bool("rebase.abbreviatecommands", &abbreviate_commands); + + opts.action = REPLAY_INTERACTIVE_REBASE; + opts.allow_ff = 1; + opts.allow_empty = 1; + + if (argc == 1) + usage_with_options(builtin_rebase_interactive_usage, options); + + argc = parse_options(argc, argv, NULL, options, + builtin_rebase_interactive_usage, PARSE_OPT_KEEP_ARGV0); + + opts.gpg_sign = xstrdup_or_null(opts.gpg_sign); + + flags |= keep_empty ? TODO_LIST_KEEP_EMPTY : 0; + flags |= abbreviate_commands ? TODO_LIST_ABBREVIATE_CMDS : 0; + flags |= rebase_merges ? TODO_LIST_REBASE_MERGES : 0; + flags |= rebase_cousins > 0 ? TODO_LIST_REBASE_COUSINS : 0; + flags |= command == SHORTEN_OIDS ? TODO_LIST_SHORTEN_IDS : 0; + + if (rebase_cousins >= 0 && !rebase_merges) + warning(_("--[no-]rebase-cousins has no effect without " + "--rebase-merges")); + + switch (command) { + case NONE: + if (!onto && !upstream) + die(_("a base commit must be provided with --upstream or --onto")); + + ret = do_interactive_rebase(&opts, flags, switch_to, upstream, onto, + onto_name, squash_onto, head_name, restrict_revision, + raw_strategies, cmd, autosquash); + break; + case SKIP: { + struct string_list merge_rr = STRING_LIST_INIT_DUP; + + rerere_clear(&merge_rr); + /* fallthrough */ + case CONTINUE: + ret = sequencer_continue(&opts); + break; + } + case EDIT_TODO: + ret = edit_todo_list(flags); + break; + case SHOW_CURRENT_PATCH: { + struct child_process cmd = CHILD_PROCESS_INIT; + + cmd.git_cmd = 1; + argv_array_pushl(&cmd.args, "show", "REBASE_HEAD", "--", NULL); + ret = run_command(&cmd); + + break; + } + case SHORTEN_OIDS: + case EXPAND_OIDS: + ret = transform_todos(flags); + break; + case CHECK_TODO_LIST: + ret = check_todo_list(); + break; + case REARRANGE_SQUASH: + ret = rearrange_squash(); + break; + case ADD_EXEC: + ret = sequencer_add_exec_commands(cmd); + break; + default: + BUG("invalid command '%d'", command); + } + + return !!ret; +} diff --git a/builtin/rebase.c b/builtin/rebase.c new file mode 100644 index 0000000000..5b3e5baec8 --- /dev/null +++ b/builtin/rebase.c @@ -0,0 +1,1554 @@ +/* + * "git rebase" builtin command + * + * Copyright (c) 2018 Pratik Karki + */ + +#include "builtin.h" +#include "run-command.h" +#include "exec-cmd.h" +#include "argv-array.h" +#include "dir.h" +#include "packfile.h" +#include "refs.h" +#include "quote.h" +#include "config.h" +#include "cache-tree.h" +#include "unpack-trees.h" +#include "lockfile.h" +#include "parse-options.h" +#include "commit.h" +#include "diff.h" +#include "wt-status.h" +#include "revision.h" +#include "commit-reach.h" +#include "rerere.h" +#include "branch.h" + +static char const * const builtin_rebase_usage[] = { + N_("git rebase [-i] [options] [--exec <cmd>] [--onto <newbase>] " + "[<upstream>] [<branch>]"), + N_("git rebase [-i] [options] [--exec <cmd>] [--onto <newbase>] " + "--root [<branch>]"), + N_("git rebase --continue | --abort | --skip | --edit-todo"), + NULL +}; + +static GIT_PATH_FUNC(apply_dir, "rebase-apply") +static GIT_PATH_FUNC(merge_dir, "rebase-merge") + +enum rebase_type { + REBASE_UNSPECIFIED = -1, + REBASE_AM, + REBASE_MERGE, + REBASE_INTERACTIVE, + REBASE_PRESERVE_MERGES +}; + +static int use_builtin_rebase(void) +{ + struct child_process cp = CHILD_PROCESS_INIT; + struct strbuf out = STRBUF_INIT; + int ret, env = git_env_bool("GIT_TEST_REBASE_USE_BUILTIN", -1); + + if (env != -1) + return env; + + argv_array_pushl(&cp.args, + "config", "--bool", "rebase.usebuiltin", NULL); + cp.git_cmd = 1; + if (capture_command(&cp, &out, 6)) { + strbuf_release(&out); + return 1; + } + + strbuf_trim(&out); + ret = !strcmp("true", out.buf); + strbuf_release(&out); + return ret; +} + +struct rebase_options { + enum rebase_type type; + const char *state_dir; + struct commit *upstream; + const char *upstream_name; + const char *upstream_arg; + char *head_name; + struct object_id orig_head; + struct commit *onto; + const char *onto_name; + const char *revisions; + const char *switch_to; + int root; + struct object_id *squash_onto; + struct commit *restrict_revision; + int dont_finish_rebase; + enum { + REBASE_NO_QUIET = 1<<0, + REBASE_VERBOSE = 1<<1, + REBASE_DIFFSTAT = 1<<2, + REBASE_FORCE = 1<<3, + REBASE_INTERACTIVE_EXPLICIT = 1<<4, + } flags; + struct argv_array git_am_opts; + const char *action; + int signoff; + int allow_rerere_autoupdate; + int keep_empty; + int autosquash; + char *gpg_sign_opt; + int autostash; + char *cmd; + int allow_empty_message; + int rebase_merges, rebase_cousins; + char *strategy, *strategy_opts; + struct strbuf git_format_patch_opt; +}; + +static int is_interactive(struct rebase_options *opts) +{ + return opts->type == REBASE_INTERACTIVE || + opts->type == REBASE_PRESERVE_MERGES; +} + +static void imply_interactive(struct rebase_options *opts, const char *option) +{ + switch (opts->type) { + case REBASE_AM: + die(_("%s requires an interactive rebase"), option); + break; + case REBASE_INTERACTIVE: + case REBASE_PRESERVE_MERGES: + break; + case REBASE_MERGE: + /* we silently *upgrade* --merge to --interactive if needed */ + default: + opts->type = REBASE_INTERACTIVE; /* implied */ + break; + } +} + +/* Returns the filename prefixed by the state_dir */ +static const char *state_dir_path(const char *filename, struct rebase_options *opts) +{ + static struct strbuf path = STRBUF_INIT; + static size_t prefix_len; + + if (!prefix_len) { + strbuf_addf(&path, "%s/", opts->state_dir); + prefix_len = path.len; + } + + strbuf_setlen(&path, prefix_len); + strbuf_addstr(&path, filename); + return path.buf; +} + +/* Read one file, then strip line endings */ +static int read_one(const char *path, struct strbuf *buf) +{ + if (strbuf_read_file(buf, path, 0) < 0) + return error_errno(_("could not read '%s'"), path); + strbuf_trim_trailing_newline(buf); + return 0; +} + +/* Initialize the rebase options from the state directory. */ +static int read_basic_state(struct rebase_options *opts) +{ + struct strbuf head_name = STRBUF_INIT; + struct strbuf buf = STRBUF_INIT; + struct object_id oid; + + if (read_one(state_dir_path("head-name", opts), &head_name) || + read_one(state_dir_path("onto", opts), &buf)) + return -1; + opts->head_name = starts_with(head_name.buf, "refs/") ? + xstrdup(head_name.buf) : NULL; + strbuf_release(&head_name); + if (get_oid(buf.buf, &oid)) + return error(_("could not get 'onto': '%s'"), buf.buf); + opts->onto = lookup_commit_or_die(&oid, buf.buf); + + /* + * We always write to orig-head, but interactive rebase used to write to + * head. Fall back to reading from head to cover for the case that the + * user upgraded git with an ongoing interactive rebase. + */ + strbuf_reset(&buf); + if (file_exists(state_dir_path("orig-head", opts))) { + if (read_one(state_dir_path("orig-head", opts), &buf)) + return -1; + } else if (read_one(state_dir_path("head", opts), &buf)) + return -1; + if (get_oid(buf.buf, &opts->orig_head)) + return error(_("invalid orig-head: '%s'"), buf.buf); + + strbuf_reset(&buf); + if (read_one(state_dir_path("quiet", opts), &buf)) + return -1; + if (buf.len) + opts->flags &= ~REBASE_NO_QUIET; + else + opts->flags |= REBASE_NO_QUIET; + + if (file_exists(state_dir_path("verbose", opts))) + opts->flags |= REBASE_VERBOSE; + + if (file_exists(state_dir_path("signoff", opts))) { + opts->signoff = 1; + opts->flags |= REBASE_FORCE; + } + + if (file_exists(state_dir_path("allow_rerere_autoupdate", opts))) { + strbuf_reset(&buf); + if (read_one(state_dir_path("allow_rerere_autoupdate", opts), + &buf)) + return -1; + if (!strcmp(buf.buf, "--rerere-autoupdate")) + opts->allow_rerere_autoupdate = 1; + else if (!strcmp(buf.buf, "--no-rerere-autoupdate")) + opts->allow_rerere_autoupdate = 0; + else + warning(_("ignoring invalid allow_rerere_autoupdate: " + "'%s'"), buf.buf); + } else + opts->allow_rerere_autoupdate = -1; + + if (file_exists(state_dir_path("gpg_sign_opt", opts))) { + strbuf_reset(&buf); + if (read_one(state_dir_path("gpg_sign_opt", opts), + &buf)) + return -1; + free(opts->gpg_sign_opt); + opts->gpg_sign_opt = xstrdup(buf.buf); + } + + if (file_exists(state_dir_path("strategy", opts))) { + strbuf_reset(&buf); + if (read_one(state_dir_path("strategy", opts), &buf)) + return -1; + free(opts->strategy); + opts->strategy = xstrdup(buf.buf); + } + + if (file_exists(state_dir_path("strategy_opts", opts))) { + strbuf_reset(&buf); + if (read_one(state_dir_path("strategy_opts", opts), &buf)) + return -1; + free(opts->strategy_opts); + opts->strategy_opts = xstrdup(buf.buf); + } + + strbuf_release(&buf); + + return 0; +} + +static int apply_autostash(struct rebase_options *opts) +{ + const char *path = state_dir_path("autostash", opts); + struct strbuf autostash = STRBUF_INIT; + struct child_process stash_apply = CHILD_PROCESS_INIT; + + if (!file_exists(path)) + return 0; + + if (read_one(path, &autostash)) + return error(_("Could not read '%s'"), path); + /* Ensure that the hash is not mistaken for a number */ + strbuf_addstr(&autostash, "^0"); + argv_array_pushl(&stash_apply.args, + "stash", "apply", autostash.buf, NULL); + stash_apply.git_cmd = 1; + stash_apply.no_stderr = stash_apply.no_stdout = + stash_apply.no_stdin = 1; + if (!run_command(&stash_apply)) + printf(_("Applied autostash.\n")); + else { + struct argv_array args = ARGV_ARRAY_INIT; + int res = 0; + + argv_array_pushl(&args, + "stash", "store", "-m", "autostash", "-q", + autostash.buf, NULL); + if (run_command_v_opt(args.argv, RUN_GIT_CMD)) + res = error(_("Cannot store %s"), autostash.buf); + argv_array_clear(&args); + strbuf_release(&autostash); + if (res) + return res; + + fprintf(stderr, + _("Applying autostash resulted in conflicts.\n" + "Your changes are safe in the stash.\n" + "You can run \"git stash pop\" or \"git stash drop\" " + "at any time.\n")); + } + + strbuf_release(&autostash); + return 0; +} + +static int finish_rebase(struct rebase_options *opts) +{ + struct strbuf dir = STRBUF_INIT; + const char *argv_gc_auto[] = { "gc", "--auto", NULL }; + + delete_ref(NULL, "REBASE_HEAD", NULL, REF_NO_DEREF); + apply_autostash(opts); + close_all_packs(the_repository->objects); + /* + * We ignore errors in 'gc --auto', since the + * user should see them. + */ + run_command_v_opt(argv_gc_auto, RUN_GIT_CMD); + strbuf_addstr(&dir, opts->state_dir); + remove_dir_recursively(&dir, 0); + strbuf_release(&dir); + + return 0; +} + +static struct commit *peel_committish(const char *name) +{ + struct object *obj; + struct object_id oid; + + if (get_oid(name, &oid)) + return NULL; + obj = parse_object(the_repository, &oid); + return (struct commit *)peel_to_type(name, 0, obj, OBJ_COMMIT); +} + +static void add_var(struct strbuf *buf, const char *name, const char *value) +{ + if (!value) + strbuf_addf(buf, "unset %s; ", name); + else { + strbuf_addf(buf, "%s=", name); + sq_quote_buf(buf, value); + strbuf_addstr(buf, "; "); + } +} + +static const char *resolvemsg = +N_("Resolve all conflicts manually, mark them as resolved with\n" +"\"git add/rm <conflicted_files>\", then run \"git rebase --continue\".\n" +"You can instead skip this commit: run \"git rebase --skip\".\n" +"To abort and get back to the state before \"git rebase\", run " +"\"git rebase --abort\"."); + +static int run_specific_rebase(struct rebase_options *opts) +{ + const char *argv[] = { NULL, NULL }; + struct strbuf script_snippet = STRBUF_INIT, buf = STRBUF_INIT; + int status; + const char *backend, *backend_func; + + if (opts->type == REBASE_INTERACTIVE) { + /* Run builtin interactive rebase */ + struct child_process child = CHILD_PROCESS_INIT; + + argv_array_pushf(&child.env_array, "GIT_CHERRY_PICK_HELP=%s", + resolvemsg); + if (!(opts->flags & REBASE_INTERACTIVE_EXPLICIT)) { + argv_array_push(&child.env_array, "GIT_EDITOR=:"); + opts->autosquash = 0; + } + + child.git_cmd = 1; + argv_array_push(&child.args, "rebase--interactive"); + + if (opts->action) + argv_array_pushf(&child.args, "--%s", opts->action); + if (opts->keep_empty) + argv_array_push(&child.args, "--keep-empty"); + if (opts->rebase_merges) + argv_array_push(&child.args, "--rebase-merges"); + if (opts->rebase_cousins) + argv_array_push(&child.args, "--rebase-cousins"); + if (opts->autosquash) + argv_array_push(&child.args, "--autosquash"); + if (opts->flags & REBASE_VERBOSE) + argv_array_push(&child.args, "--verbose"); + if (opts->flags & REBASE_FORCE) + argv_array_push(&child.args, "--no-ff"); + if (opts->restrict_revision) + argv_array_pushf(&child.args, + "--restrict-revision=^%s", + oid_to_hex(&opts->restrict_revision->object.oid)); + if (opts->upstream) + argv_array_pushf(&child.args, "--upstream=%s", + oid_to_hex(&opts->upstream->object.oid)); + if (opts->onto) + argv_array_pushf(&child.args, "--onto=%s", + oid_to_hex(&opts->onto->object.oid)); + if (opts->squash_onto) + argv_array_pushf(&child.args, "--squash-onto=%s", + oid_to_hex(opts->squash_onto)); + if (opts->onto_name) + argv_array_pushf(&child.args, "--onto-name=%s", + opts->onto_name); + argv_array_pushf(&child.args, "--head-name=%s", + opts->head_name ? + opts->head_name : "detached HEAD"); + if (opts->strategy) + argv_array_pushf(&child.args, "--strategy=%s", + opts->strategy); + if (opts->strategy_opts) + argv_array_pushf(&child.args, "--strategy-opts=%s", + opts->strategy_opts); + if (opts->switch_to) + argv_array_pushf(&child.args, "--switch-to=%s", + opts->switch_to); + if (opts->cmd) + argv_array_pushf(&child.args, "--cmd=%s", opts->cmd); + if (opts->allow_empty_message) + argv_array_push(&child.args, "--allow-empty-message"); + if (opts->allow_rerere_autoupdate > 0) + argv_array_push(&child.args, "--rerere-autoupdate"); + else if (opts->allow_rerere_autoupdate == 0) + argv_array_push(&child.args, "--no-rerere-autoupdate"); + if (opts->gpg_sign_opt) + argv_array_push(&child.args, opts->gpg_sign_opt); + if (opts->signoff) + argv_array_push(&child.args, "--signoff"); + + status = run_command(&child); + goto finished_rebase; + } + + add_var(&script_snippet, "GIT_DIR", absolute_path(get_git_dir())); + add_var(&script_snippet, "state_dir", opts->state_dir); + + add_var(&script_snippet, "upstream_name", opts->upstream_name); + add_var(&script_snippet, "upstream", opts->upstream ? + oid_to_hex(&opts->upstream->object.oid) : NULL); + add_var(&script_snippet, "head_name", + opts->head_name ? opts->head_name : "detached HEAD"); + add_var(&script_snippet, "orig_head", oid_to_hex(&opts->orig_head)); + add_var(&script_snippet, "onto", opts->onto ? + oid_to_hex(&opts->onto->object.oid) : NULL); + add_var(&script_snippet, "onto_name", opts->onto_name); + add_var(&script_snippet, "revisions", opts->revisions); + add_var(&script_snippet, "restrict_revision", opts->restrict_revision ? + oid_to_hex(&opts->restrict_revision->object.oid) : NULL); + add_var(&script_snippet, "GIT_QUIET", + opts->flags & REBASE_NO_QUIET ? "" : "t"); + sq_quote_argv_pretty(&buf, opts->git_am_opts.argv); + add_var(&script_snippet, "git_am_opt", buf.buf); + strbuf_release(&buf); + add_var(&script_snippet, "verbose", + opts->flags & REBASE_VERBOSE ? "t" : ""); + add_var(&script_snippet, "diffstat", + opts->flags & REBASE_DIFFSTAT ? "t" : ""); + add_var(&script_snippet, "force_rebase", + opts->flags & REBASE_FORCE ? "t" : ""); + if (opts->switch_to) + add_var(&script_snippet, "switch_to", opts->switch_to); + add_var(&script_snippet, "action", opts->action ? opts->action : ""); + add_var(&script_snippet, "signoff", opts->signoff ? "--signoff" : ""); + add_var(&script_snippet, "allow_rerere_autoupdate", + opts->allow_rerere_autoupdate < 0 ? "" : + opts->allow_rerere_autoupdate ? + "--rerere-autoupdate" : "--no-rerere-autoupdate"); + add_var(&script_snippet, "keep_empty", opts->keep_empty ? "yes" : ""); + add_var(&script_snippet, "autosquash", opts->autosquash ? "t" : ""); + add_var(&script_snippet, "gpg_sign_opt", opts->gpg_sign_opt); + add_var(&script_snippet, "cmd", opts->cmd); + add_var(&script_snippet, "allow_empty_message", + opts->allow_empty_message ? "--allow-empty-message" : ""); + add_var(&script_snippet, "rebase_merges", + opts->rebase_merges ? "t" : ""); + add_var(&script_snippet, "rebase_cousins", + opts->rebase_cousins ? "t" : ""); + add_var(&script_snippet, "strategy", opts->strategy); + add_var(&script_snippet, "strategy_opts", opts->strategy_opts); + add_var(&script_snippet, "rebase_root", opts->root ? "t" : ""); + add_var(&script_snippet, "squash_onto", + opts->squash_onto ? oid_to_hex(opts->squash_onto) : ""); + add_var(&script_snippet, "git_format_patch_opt", + opts->git_format_patch_opt.buf); + + if (is_interactive(opts) && + !(opts->flags & REBASE_INTERACTIVE_EXPLICIT)) { + strbuf_addstr(&script_snippet, + "GIT_EDITOR=:; export GIT_EDITOR; "); + opts->autosquash = 0; + } + + switch (opts->type) { + case REBASE_AM: + backend = "git-rebase--am"; + backend_func = "git_rebase__am"; + break; + case REBASE_MERGE: + backend = "git-rebase--merge"; + backend_func = "git_rebase__merge"; + break; + case REBASE_PRESERVE_MERGES: + backend = "git-rebase--preserve-merges"; + backend_func = "git_rebase__preserve_merges"; + break; + default: + BUG("Unhandled rebase type %d", opts->type); + break; + } + + strbuf_addf(&script_snippet, + ". git-sh-setup && . git-rebase--common &&" + " . %s && %s", backend, backend_func); + argv[0] = script_snippet.buf; + + status = run_command_v_opt(argv, RUN_USING_SHELL); +finished_rebase: + if (opts->dont_finish_rebase) + ; /* do nothing */ + else if (opts->type == REBASE_INTERACTIVE) + ; /* interactive rebase cleans up after itself */ + else if (status == 0) { + if (!file_exists(state_dir_path("stopped-sha", opts))) + finish_rebase(opts); + } else if (status == 2) { + struct strbuf dir = STRBUF_INIT; + + apply_autostash(opts); + strbuf_addstr(&dir, opts->state_dir); + remove_dir_recursively(&dir, 0); + strbuf_release(&dir); + die("Nothing to do"); + } + + strbuf_release(&script_snippet); + + return status ? -1 : 0; +} + +#define GIT_REFLOG_ACTION_ENVIRONMENT "GIT_REFLOG_ACTION" + +#define RESET_HEAD_DETACH (1<<0) +#define RESET_HEAD_HARD (1<<1) + +static int reset_head(struct object_id *oid, const char *action, + const char *switch_to_branch, unsigned flags, + const char *reflog_orig_head, const char *reflog_head) +{ + unsigned detach_head = flags & RESET_HEAD_DETACH; + unsigned reset_hard = flags & RESET_HEAD_HARD; + struct object_id head_oid; + struct tree_desc desc[2] = { { NULL }, { NULL } }; + struct lock_file lock = LOCK_INIT; + struct unpack_trees_options unpack_tree_opts; + struct tree *tree; + const char *reflog_action; + struct strbuf msg = STRBUF_INIT; + size_t prefix_len; + struct object_id *orig = NULL, oid_orig, + *old_orig = NULL, oid_old_orig; + int ret = 0, nr = 0; + + if (switch_to_branch && !starts_with(switch_to_branch, "refs/")) + BUG("Not a fully qualified branch: '%s'", switch_to_branch); + + if (hold_locked_index(&lock, LOCK_REPORT_ON_ERROR) < 0) { + ret = -1; + goto leave_reset_head; + } + + if ((!oid || !reset_hard) && get_oid("HEAD", &head_oid)) { + ret = error(_("could not determine HEAD revision")); + goto leave_reset_head; + } + + if (!oid) + oid = &head_oid; + + memset(&unpack_tree_opts, 0, sizeof(unpack_tree_opts)); + setup_unpack_trees_porcelain(&unpack_tree_opts, action); + unpack_tree_opts.head_idx = 1; + unpack_tree_opts.src_index = the_repository->index; + unpack_tree_opts.dst_index = the_repository->index; + unpack_tree_opts.fn = reset_hard ? oneway_merge : twoway_merge; + unpack_tree_opts.update = 1; + unpack_tree_opts.merge = 1; + if (!detach_head) + unpack_tree_opts.reset = 1; + + if (read_index_unmerged(the_repository->index) < 0) { + ret = error(_("could not read index")); + goto leave_reset_head; + } + + if (!reset_hard && !fill_tree_descriptor(&desc[nr++], &head_oid)) { + ret = error(_("failed to find tree of %s"), + oid_to_hex(&head_oid)); + goto leave_reset_head; + } + + if (!fill_tree_descriptor(&desc[nr++], oid)) { + ret = error(_("failed to find tree of %s"), oid_to_hex(oid)); + goto leave_reset_head; + } + + if (unpack_trees(nr, desc, &unpack_tree_opts)) { + ret = -1; + goto leave_reset_head; + } + + tree = parse_tree_indirect(oid); + prime_cache_tree(the_repository->index, tree); + + if (write_locked_index(the_repository->index, &lock, COMMIT_LOCK) < 0) { + ret = error(_("could not write index")); + goto leave_reset_head; + } + + reflog_action = getenv(GIT_REFLOG_ACTION_ENVIRONMENT); + strbuf_addf(&msg, "%s: ", reflog_action ? reflog_action : "rebase"); + prefix_len = msg.len; + + if (!get_oid("ORIG_HEAD", &oid_old_orig)) + old_orig = &oid_old_orig; + if (!get_oid("HEAD", &oid_orig)) { + orig = &oid_orig; + if (!reflog_orig_head) { + strbuf_addstr(&msg, "updating ORIG_HEAD"); + reflog_orig_head = msg.buf; + } + update_ref(reflog_orig_head, "ORIG_HEAD", orig, old_orig, 0, + UPDATE_REFS_MSG_ON_ERR); + } else if (old_orig) + delete_ref(NULL, "ORIG_HEAD", old_orig, 0); + if (!reflog_head) { + strbuf_setlen(&msg, prefix_len); + strbuf_addstr(&msg, "updating HEAD"); + reflog_head = msg.buf; + } + if (!switch_to_branch) + ret = update_ref(reflog_head, "HEAD", oid, orig, + detach_head ? REF_NO_DEREF : 0, + UPDATE_REFS_MSG_ON_ERR); + else { + ret = create_symref("HEAD", switch_to_branch, msg.buf); + if (!ret) + ret = update_ref(reflog_head, "HEAD", oid, NULL, 0, + UPDATE_REFS_MSG_ON_ERR); + } + +leave_reset_head: + strbuf_release(&msg); + rollback_lock_file(&lock); + while (nr) + free((void *)desc[--nr].buffer); + return ret; +} + +static int rebase_config(const char *var, const char *value, void *data) +{ + struct rebase_options *opts = data; + + if (!strcmp(var, "rebase.stat")) { + if (git_config_bool(var, value)) + opts->flags |= REBASE_DIFFSTAT; + else + opts->flags &= !REBASE_DIFFSTAT; + return 0; + } + + if (!strcmp(var, "rebase.autosquash")) { + opts->autosquash = git_config_bool(var, value); + return 0; + } + + if (!strcmp(var, "commit.gpgsign")) { + free(opts->gpg_sign_opt); + opts->gpg_sign_opt = git_config_bool(var, value) ? + xstrdup("-S") : NULL; + return 0; + } + + if (!strcmp(var, "rebase.autostash")) { + opts->autostash = git_config_bool(var, value); + return 0; + } + + return git_default_config(var, value, data); +} + +/* + * Determines whether the commits in from..to are linear, i.e. contain + * no merge commits. This function *expects* `from` to be an ancestor of + * `to`. + */ +static int is_linear_history(struct commit *from, struct commit *to) +{ + while (to && to != from) { + parse_commit(to); + if (!to->parents) + return 1; + if (to->parents->next) + return 0; + to = to->parents->item; + } + return 1; +} + +static int can_fast_forward(struct commit *onto, struct object_id *head_oid, + struct object_id *merge_base) +{ + struct commit *head = lookup_commit(the_repository, head_oid); + struct commit_list *merge_bases; + int res; + + if (!head) + return 0; + + merge_bases = get_merge_bases(onto, head); + if (merge_bases && !merge_bases->next) { + oidcpy(merge_base, &merge_bases->item->object.oid); + res = oideq(merge_base, &onto->object.oid); + } else { + oidcpy(merge_base, &null_oid); + res = 0; + } + free_commit_list(merge_bases); + return res && is_linear_history(onto, head); +} + +/* -i followed by -m is still -i */ +static int parse_opt_merge(const struct option *opt, const char *arg, int unset) +{ + struct rebase_options *opts = opt->value; + + BUG_ON_OPT_NEG(unset); + BUG_ON_OPT_ARG(arg); + + if (!is_interactive(opts)) + opts->type = REBASE_MERGE; + + return 0; +} + +/* -i followed by -p is still explicitly interactive, but -p alone is not */ +static int parse_opt_interactive(const struct option *opt, const char *arg, + int unset) +{ + struct rebase_options *opts = opt->value; + + BUG_ON_OPT_NEG(unset); + BUG_ON_OPT_ARG(arg); + + opts->type = REBASE_INTERACTIVE; + opts->flags |= REBASE_INTERACTIVE_EXPLICIT; + + return 0; +} + +static void NORETURN error_on_missing_default_upstream(void) +{ + struct branch *current_branch = branch_get(NULL); + + printf(_("%s\n" + "Please specify which branch you want to rebase against.\n" + "See git-rebase(1) for details.\n" + "\n" + " git rebase '<branch>'\n" + "\n"), + current_branch ? _("There is no tracking information for " + "the current branch.") : + _("You are not currently on a branch.")); + + if (current_branch) { + const char *remote = current_branch->remote_name; + + if (!remote) + remote = _("<remote>"); + + printf(_("If you wish to set tracking information for this " + "branch you can do so with:\n" + "\n" + " git branch --set-upstream-to=%s/<branch> %s\n" + "\n"), + remote, current_branch->name); + } + exit(1); +} + +int cmd_rebase(int argc, const char **argv, const char *prefix) +{ + struct rebase_options options = { + .type = REBASE_UNSPECIFIED, + .flags = REBASE_NO_QUIET, + .git_am_opts = ARGV_ARRAY_INIT, + .allow_rerere_autoupdate = -1, + .allow_empty_message = 1, + .git_format_patch_opt = STRBUF_INIT, + }; + const char *branch_name; + int ret, flags, total_argc, in_progress = 0; + int ok_to_skip_pre_rebase = 0; + struct strbuf msg = STRBUF_INIT; + struct strbuf revisions = STRBUF_INIT; + struct strbuf buf = STRBUF_INIT; + struct object_id merge_base; + enum { + NO_ACTION, + ACTION_CONTINUE, + ACTION_SKIP, + ACTION_ABORT, + ACTION_QUIT, + ACTION_EDIT_TODO, + ACTION_SHOW_CURRENT_PATCH, + } action = NO_ACTION; + const char *gpg_sign = NULL; + struct string_list exec = STRING_LIST_INIT_NODUP; + const char *rebase_merges = NULL; + int fork_point = -1; + struct string_list strategy_options = STRING_LIST_INIT_NODUP; + struct object_id squash_onto; + char *squash_onto_name = NULL; + struct option builtin_rebase_options[] = { + OPT_STRING(0, "onto", &options.onto_name, + N_("revision"), + N_("rebase onto given branch instead of upstream")), + OPT_BOOL(0, "no-verify", &ok_to_skip_pre_rebase, + N_("allow pre-rebase hook to run")), + OPT_NEGBIT('q', "quiet", &options.flags, + N_("be quiet. implies --no-stat"), + REBASE_NO_QUIET| REBASE_VERBOSE | REBASE_DIFFSTAT), + OPT_BIT('v', "verbose", &options.flags, + N_("display a diffstat of what changed upstream"), + REBASE_NO_QUIET | REBASE_VERBOSE | REBASE_DIFFSTAT), + {OPTION_NEGBIT, 'n', "no-stat", &options.flags, NULL, + N_("do not show diffstat of what changed upstream"), + PARSE_OPT_NOARG, NULL, REBASE_DIFFSTAT }, + OPT_BOOL(0, "signoff", &options.signoff, + N_("add a Signed-off-by: line to each commit")), + OPT_PASSTHRU_ARGV(0, "ignore-whitespace", &options.git_am_opts, + NULL, N_("passed to 'git am'"), + PARSE_OPT_NOARG), + OPT_PASSTHRU_ARGV(0, "committer-date-is-author-date", + &options.git_am_opts, NULL, + N_("passed to 'git am'"), PARSE_OPT_NOARG), + OPT_PASSTHRU_ARGV(0, "ignore-date", &options.git_am_opts, NULL, + N_("passed to 'git am'"), PARSE_OPT_NOARG), + OPT_PASSTHRU_ARGV('C', NULL, &options.git_am_opts, N_("n"), + N_("passed to 'git apply'"), 0), + OPT_PASSTHRU_ARGV(0, "whitespace", &options.git_am_opts, + N_("action"), N_("passed to 'git apply'"), 0), + OPT_BIT('f', "force-rebase", &options.flags, + N_("cherry-pick all commits, even if unchanged"), + REBASE_FORCE), + OPT_BIT(0, "no-ff", &options.flags, + N_("cherry-pick all commits, even if unchanged"), + REBASE_FORCE), + OPT_CMDMODE(0, "continue", &action, N_("continue"), + ACTION_CONTINUE), + OPT_CMDMODE(0, "skip", &action, + N_("skip current patch and continue"), ACTION_SKIP), + OPT_CMDMODE(0, "abort", &action, + N_("abort and check out the original branch"), + ACTION_ABORT), + OPT_CMDMODE(0, "quit", &action, + N_("abort but keep HEAD where it is"), ACTION_QUIT), + OPT_CMDMODE(0, "edit-todo", &action, N_("edit the todo list " + "during an interactive rebase"), ACTION_EDIT_TODO), + OPT_CMDMODE(0, "show-current-patch", &action, + N_("show the patch file being applied or merged"), + ACTION_SHOW_CURRENT_PATCH), + { OPTION_CALLBACK, 'm', "merge", &options, NULL, + N_("use merging strategies to rebase"), + PARSE_OPT_NOARG | PARSE_OPT_NONEG, + parse_opt_merge }, + { OPTION_CALLBACK, 'i', "interactive", &options, NULL, + N_("let the user edit the list of commits to rebase"), + PARSE_OPT_NOARG | PARSE_OPT_NONEG, + parse_opt_interactive }, + OPT_SET_INT('p', "preserve-merges", &options.type, + N_("try to recreate merges instead of ignoring " + "them"), REBASE_PRESERVE_MERGES), + OPT_BOOL(0, "rerere-autoupdate", + &options.allow_rerere_autoupdate, + N_("allow rerere to update index with resolved " + "conflict")), + OPT_BOOL('k', "keep-empty", &options.keep_empty, + N_("preserve empty commits during rebase")), + OPT_BOOL(0, "autosquash", &options.autosquash, + N_("move commits that begin with " + "squash!/fixup! under -i")), + { OPTION_STRING, 'S', "gpg-sign", &gpg_sign, N_("key-id"), + N_("GPG-sign commits"), + PARSE_OPT_OPTARG, NULL, (intptr_t) "" }, + OPT_BOOL(0, "autostash", &options.autostash, + N_("automatically stash/stash pop before and after")), + OPT_STRING_LIST('x', "exec", &exec, N_("exec"), + N_("add exec lines after each commit of the " + "editable list")), + OPT_BOOL(0, "allow-empty-message", + &options.allow_empty_message, + N_("allow rebasing commits with empty messages")), + {OPTION_STRING, 'r', "rebase-merges", &rebase_merges, + N_("mode"), + N_("try to rebase merges instead of skipping them"), + PARSE_OPT_OPTARG, NULL, (intptr_t)""}, + OPT_BOOL(0, "fork-point", &fork_point, + N_("use 'merge-base --fork-point' to refine upstream")), + OPT_STRING('s', "strategy", &options.strategy, + N_("strategy"), N_("use the given merge strategy")), + OPT_STRING_LIST('X', "strategy-option", &strategy_options, + N_("option"), + N_("pass the argument through to the merge " + "strategy")), + OPT_BOOL(0, "root", &options.root, + N_("rebase all reachable commits up to the root(s)")), + OPT_END(), + }; + int i; + + /* + * NEEDSWORK: Once the builtin rebase has been tested enough + * and git-legacy-rebase.sh is retired to contrib/, this preamble + * can be removed. + */ + + if (!use_builtin_rebase()) { + const char *path = mkpath("%s/git-legacy-rebase", + git_exec_path()); + + if (sane_execvp(path, (char **)argv) < 0) + die_errno(_("could not exec %s"), path); + else + BUG("sane_execvp() returned???"); + } + + if (argc == 2 && !strcmp(argv[1], "-h")) + usage_with_options(builtin_rebase_usage, + builtin_rebase_options); + + prefix = setup_git_directory(); + trace_repo_setup(prefix); + setup_work_tree(); + + git_config(rebase_config, &options); + + strbuf_reset(&buf); + strbuf_addf(&buf, "%s/applying", apply_dir()); + if(file_exists(buf.buf)) + die(_("It looks like 'git am' is in progress. Cannot rebase.")); + + if (is_directory(apply_dir())) { + options.type = REBASE_AM; + options.state_dir = apply_dir(); + } else if (is_directory(merge_dir())) { + strbuf_reset(&buf); + strbuf_addf(&buf, "%s/rewritten", merge_dir()); + if (is_directory(buf.buf)) { + options.type = REBASE_PRESERVE_MERGES; + options.flags |= REBASE_INTERACTIVE_EXPLICIT; + } else { + strbuf_reset(&buf); + strbuf_addf(&buf, "%s/interactive", merge_dir()); + if(file_exists(buf.buf)) { + options.type = REBASE_INTERACTIVE; + options.flags |= REBASE_INTERACTIVE_EXPLICIT; + } else + options.type = REBASE_MERGE; + } + options.state_dir = merge_dir(); + } + + if (options.type != REBASE_UNSPECIFIED) + in_progress = 1; + + total_argc = argc; + argc = parse_options(argc, argv, prefix, + builtin_rebase_options, + builtin_rebase_usage, 0); + + if (action != NO_ACTION && total_argc != 2) { + usage_with_options(builtin_rebase_usage, + builtin_rebase_options); + } + + if (argc > 2) + usage_with_options(builtin_rebase_usage, + builtin_rebase_options); + + if (action != NO_ACTION && !in_progress) + die(_("No rebase in progress?")); + + if (action == ACTION_EDIT_TODO && !is_interactive(&options)) + die(_("The --edit-todo action can only be used during " + "interactive rebase.")); + + switch (action) { + case ACTION_CONTINUE: { + struct object_id head; + struct lock_file lock_file = LOCK_INIT; + int fd; + + options.action = "continue"; + + /* Sanity check */ + if (get_oid("HEAD", &head)) + die(_("Cannot read HEAD")); + + fd = hold_locked_index(&lock_file, 0); + if (read_index(the_repository->index) < 0) + die(_("could not read index")); + refresh_index(the_repository->index, REFRESH_QUIET, NULL, NULL, + NULL); + if (0 <= fd) + update_index_if_able(the_repository->index, + &lock_file); + rollback_lock_file(&lock_file); + + if (has_unstaged_changes(1)) { + puts(_("You must edit all merge conflicts and then\n" + "mark them as resolved using git add")); + exit(1); + } + if (read_basic_state(&options)) + exit(1); + goto run_rebase; + } + case ACTION_SKIP: { + struct string_list merge_rr = STRING_LIST_INIT_DUP; + + options.action = "skip"; + + rerere_clear(&merge_rr); + string_list_clear(&merge_rr, 1); + + if (reset_head(NULL, "reset", NULL, RESET_HEAD_HARD, + NULL, NULL) < 0) + die(_("could not discard worktree changes")); + remove_branch_state(); + if (read_basic_state(&options)) + exit(1); + goto run_rebase; + } + case ACTION_ABORT: { + struct string_list merge_rr = STRING_LIST_INIT_DUP; + options.action = "abort"; + + rerere_clear(&merge_rr); + string_list_clear(&merge_rr, 1); + + if (read_basic_state(&options)) + exit(1); + if (reset_head(&options.orig_head, "reset", + options.head_name, RESET_HEAD_HARD, + NULL, NULL) < 0) + die(_("could not move back to %s"), + oid_to_hex(&options.orig_head)); + remove_branch_state(); + ret = finish_rebase(&options); + goto cleanup; + } + case ACTION_QUIT: { + strbuf_reset(&buf); + strbuf_addstr(&buf, options.state_dir); + ret = !!remove_dir_recursively(&buf, 0); + if (ret) + die(_("could not remove '%s'"), options.state_dir); + goto cleanup; + } + case ACTION_EDIT_TODO: + options.action = "edit-todo"; + options.dont_finish_rebase = 1; + goto run_rebase; + case ACTION_SHOW_CURRENT_PATCH: + options.action = "show-current-patch"; + options.dont_finish_rebase = 1; + goto run_rebase; + case NO_ACTION: + break; + default: + BUG("action: %d", action); + } + + /* Make sure no rebase is in progress */ + if (in_progress) { + const char *last_slash = strrchr(options.state_dir, '/'); + const char *state_dir_base = + last_slash ? last_slash + 1 : options.state_dir; + const char *cmd_live_rebase = + "git rebase (--continue | --abort | --skip)"; + strbuf_reset(&buf); + strbuf_addf(&buf, "rm -fr \"%s\"", options.state_dir); + die(_("It seems that there is already a %s directory, and\n" + "I wonder if you are in the middle of another rebase. " + "If that is the\n" + "case, please try\n\t%s\n" + "If that is not the case, please\n\t%s\n" + "and run me again. I am stopping in case you still " + "have something\n" + "valuable there.\n"), + state_dir_base, cmd_live_rebase, buf.buf); + } + + for (i = 0; i < options.git_am_opts.argc; i++) { + const char *option = options.git_am_opts.argv[i], *p; + if (!strcmp(option, "--committer-date-is-author-date") || + !strcmp(option, "--ignore-date") || + !strcmp(option, "--whitespace=fix") || + !strcmp(option, "--whitespace=strip")) + options.flags |= REBASE_FORCE; + else if (skip_prefix(option, "-C", &p)) { + while (*p) + if (!isdigit(*(p++))) + die(_("switch `C' expects a " + "numerical value")); + } else if (skip_prefix(option, "--whitespace=", &p)) { + if (*p && strcmp(p, "warn") && strcmp(p, "nowarn") && + strcmp(p, "error") && strcmp(p, "error-all")) + die("Invalid whitespace option: '%s'", p); + } + } + + if (!(options.flags & REBASE_NO_QUIET)) + argv_array_push(&options.git_am_opts, "-q"); + + if (options.keep_empty) + imply_interactive(&options, "--keep-empty"); + + if (gpg_sign) { + free(options.gpg_sign_opt); + options.gpg_sign_opt = xstrfmt("-S%s", gpg_sign); + } + + if (exec.nr) { + int i; + + imply_interactive(&options, "--exec"); + + strbuf_reset(&buf); + for (i = 0; i < exec.nr; i++) + strbuf_addf(&buf, "exec %s\n", exec.items[i].string); + options.cmd = xstrdup(buf.buf); + } + + if (rebase_merges) { + if (!*rebase_merges) + ; /* default mode; do nothing */ + else if (!strcmp("rebase-cousins", rebase_merges)) + options.rebase_cousins = 1; + else if (strcmp("no-rebase-cousins", rebase_merges)) + die(_("Unknown mode: %s"), rebase_merges); + options.rebase_merges = 1; + imply_interactive(&options, "--rebase-merges"); + } + + if (strategy_options.nr) { + int i; + + if (!options.strategy) + options.strategy = "recursive"; + + strbuf_reset(&buf); + for (i = 0; i < strategy_options.nr; i++) + strbuf_addf(&buf, " --%s", + strategy_options.items[i].string); + options.strategy_opts = xstrdup(buf.buf); + } + + if (options.strategy) { + options.strategy = xstrdup(options.strategy); + switch (options.type) { + case REBASE_AM: + die(_("--strategy requires --merge or --interactive")); + case REBASE_MERGE: + case REBASE_INTERACTIVE: + case REBASE_PRESERVE_MERGES: + /* compatible */ + break; + case REBASE_UNSPECIFIED: + options.type = REBASE_MERGE; + break; + default: + BUG("unhandled rebase type (%d)", options.type); + } + } + + if (options.root && !options.onto_name) + imply_interactive(&options, "--root without --onto"); + + if (isatty(2) && options.flags & REBASE_NO_QUIET) + strbuf_addstr(&options.git_format_patch_opt, " --progress"); + + switch (options.type) { + case REBASE_MERGE: + case REBASE_INTERACTIVE: + case REBASE_PRESERVE_MERGES: + options.state_dir = merge_dir(); + break; + case REBASE_AM: + options.state_dir = apply_dir(); + break; + default: + /* the default rebase backend is `--am` */ + options.type = REBASE_AM; + options.state_dir = apply_dir(); + break; + } + + if (options.git_am_opts.argc) { + /* all am options except -q are compatible only with --am */ + for (i = options.git_am_opts.argc - 1; i >= 0; i--) + if (strcmp(options.git_am_opts.argv[i], "-q")) + break; + + if (is_interactive(&options) && i >= 0) + die(_("error: cannot combine interactive options " + "(--interactive, --exec, --rebase-merges, " + "--preserve-merges, --keep-empty, --root + " + "--onto) with am options (%s)"), buf.buf); + if (options.type == REBASE_MERGE && i >= 0) + die(_("error: cannot combine merge options (--merge, " + "--strategy, --strategy-option) with am options " + "(%s)"), buf.buf); + } + + if (options.signoff) { + if (options.type == REBASE_PRESERVE_MERGES) + die("cannot combine '--signoff' with " + "'--preserve-merges'"); + argv_array_push(&options.git_am_opts, "--signoff"); + options.flags |= REBASE_FORCE; + } + + if (options.type == REBASE_PRESERVE_MERGES) + /* + * Note: incompatibility with --signoff handled in signoff block above + * Note: incompatibility with --interactive is just a strong warning; + * git-rebase.txt caveats with "unless you know what you are doing" + */ + if (options.rebase_merges) + die(_("error: cannot combine '--preserve-merges' with " + "'--rebase-merges'")); + + if (options.rebase_merges) { + if (strategy_options.nr) + die(_("error: cannot combine '--rebase-merges' with " + "'--strategy-option'")); + if (options.strategy) + die(_("error: cannot combine '--rebase-merges' with " + "'--strategy'")); + } + + if (!options.root) { + if (argc < 1) { + struct branch *branch; + + branch = branch_get(NULL); + options.upstream_name = branch_get_upstream(branch, + NULL); + if (!options.upstream_name) + error_on_missing_default_upstream(); + if (fork_point < 0) + fork_point = 1; + } else { + options.upstream_name = argv[0]; + argc--; + argv++; + if (!strcmp(options.upstream_name, "-")) + options.upstream_name = "@{-1}"; + } + options.upstream = peel_committish(options.upstream_name); + if (!options.upstream) + die(_("invalid upstream '%s'"), options.upstream_name); + options.upstream_arg = options.upstream_name; + } else { + if (!options.onto_name) { + if (commit_tree("", 0, the_hash_algo->empty_tree, NULL, + &squash_onto, NULL, NULL) < 0) + die(_("Could not create new root commit")); + options.squash_onto = &squash_onto; + options.onto_name = squash_onto_name = + xstrdup(oid_to_hex(&squash_onto)); + } + options.upstream_name = NULL; + options.upstream = NULL; + if (argc > 1) + usage_with_options(builtin_rebase_usage, + builtin_rebase_options); + options.upstream_arg = "--root"; + } + + /* Make sure the branch to rebase onto is valid. */ + if (!options.onto_name) + options.onto_name = options.upstream_name; + if (strstr(options.onto_name, "...")) { + if (get_oid_mb(options.onto_name, &merge_base) < 0) + die(_("'%s': need exactly one merge base"), + options.onto_name); + options.onto = lookup_commit_or_die(&merge_base, + options.onto_name); + } else { + options.onto = peel_committish(options.onto_name); + if (!options.onto) + die(_("Does not point to a valid commit '%s'"), + options.onto_name); + } + + /* + * If the branch to rebase is given, that is the branch we will rebase + * branch_name -- branch/commit being rebased, or + * HEAD (already detached) + * orig_head -- commit object name of tip of the branch before rebasing + * head_name -- refs/heads/<that-branch> or NULL (detached HEAD) + */ + if (argc == 1) { + /* Is it "rebase other branchname" or "rebase other commit"? */ + branch_name = argv[0]; + options.switch_to = argv[0]; + + /* Is it a local branch? */ + strbuf_reset(&buf); + strbuf_addf(&buf, "refs/heads/%s", branch_name); + if (!read_ref(buf.buf, &options.orig_head)) + options.head_name = xstrdup(buf.buf); + /* If not is it a valid ref (branch or commit)? */ + else if (!get_oid(branch_name, &options.orig_head)) + options.head_name = NULL; + else + die(_("fatal: no such branch/commit '%s'"), + branch_name); + } else if (argc == 0) { + /* Do not need to switch branches, we are already on it. */ + options.head_name = + xstrdup_or_null(resolve_ref_unsafe("HEAD", 0, NULL, + &flags)); + if (!options.head_name) + die(_("No such ref: %s"), "HEAD"); + if (flags & REF_ISSYMREF) { + if (!skip_prefix(options.head_name, + "refs/heads/", &branch_name)) + branch_name = options.head_name; + + } else { + free(options.head_name); + options.head_name = NULL; + branch_name = "HEAD"; + } + if (get_oid("HEAD", &options.orig_head)) + die(_("Could not resolve HEAD to a revision")); + } else + BUG("unexpected number of arguments left to parse"); + + if (fork_point > 0) { + struct commit *head = + lookup_commit_reference(the_repository, + &options.orig_head); + options.restrict_revision = + get_fork_point(options.upstream_name, head); + } + + if (read_index(the_repository->index) < 0) + die(_("could not read index")); + + if (options.autostash) { + struct lock_file lock_file = LOCK_INIT; + int fd; + + fd = hold_locked_index(&lock_file, 0); + refresh_cache(REFRESH_QUIET); + if (0 <= fd) + update_index_if_able(&the_index, &lock_file); + rollback_lock_file(&lock_file); + + if (has_unstaged_changes(1) || has_uncommitted_changes(1)) { + const char *autostash = + state_dir_path("autostash", &options); + struct child_process stash = CHILD_PROCESS_INIT; + struct object_id oid; + struct commit *head = + lookup_commit_reference(the_repository, + &options.orig_head); + + argv_array_pushl(&stash.args, + "stash", "create", "autostash", NULL); + stash.git_cmd = 1; + stash.no_stdin = 1; + strbuf_reset(&buf); + if (capture_command(&stash, &buf, GIT_MAX_HEXSZ)) + die(_("Cannot autostash")); + strbuf_trim_trailing_newline(&buf); + if (get_oid(buf.buf, &oid)) + die(_("Unexpected stash response: '%s'"), + buf.buf); + strbuf_reset(&buf); + strbuf_add_unique_abbrev(&buf, &oid, DEFAULT_ABBREV); + + if (safe_create_leading_directories_const(autostash)) + die(_("Could not create directory for '%s'"), + options.state_dir); + write_file(autostash, "%s", oid_to_hex(&oid)); + printf(_("Created autostash: %s\n"), buf.buf); + if (reset_head(&head->object.oid, "reset --hard", + NULL, RESET_HEAD_HARD, NULL, NULL) < 0) + die(_("could not reset --hard")); + printf(_("HEAD is now at %s"), + find_unique_abbrev(&head->object.oid, + DEFAULT_ABBREV)); + strbuf_reset(&buf); + pp_commit_easy(CMIT_FMT_ONELINE, head, &buf); + if (buf.len > 0) + printf(" %s", buf.buf); + putchar('\n'); + + if (discard_index(the_repository->index) < 0 || + read_index(the_repository->index) < 0) + die(_("could not read index")); + } + } + + if (require_clean_work_tree("rebase", + _("Please commit or stash them."), 1, 1)) { + ret = 1; + goto cleanup; + } + + /* + * Now we are rebasing commits upstream..orig_head (or with --root, + * everything leading up to orig_head) on top of onto. + */ + + /* + * Check if we are already based on onto with linear history, + * but this should be done only when upstream and onto are the same + * and if this is not an interactive rebase. + */ + if (can_fast_forward(options.onto, &options.orig_head, &merge_base) && + !is_interactive(&options) && !options.restrict_revision && + options.upstream && + !oidcmp(&options.upstream->object.oid, &options.onto->object.oid)) { + int flag; + + if (!(options.flags & REBASE_FORCE)) { + /* Lazily switch to the target branch if needed... */ + if (options.switch_to) { + struct object_id oid; + + if (get_oid(options.switch_to, &oid) < 0) { + ret = !!error(_("could not parse '%s'"), + options.switch_to); + goto cleanup; + } + + strbuf_reset(&buf); + strbuf_addf(&buf, "rebase: checkout %s", + options.switch_to); + if (reset_head(&oid, "checkout", + options.head_name, 0, + NULL, NULL) < 0) { + ret = !!error(_("could not switch to " + "%s"), + options.switch_to); + goto cleanup; + } + } + + if (!(options.flags & REBASE_NO_QUIET)) + ; /* be quiet */ + else if (!strcmp(branch_name, "HEAD") && + resolve_ref_unsafe("HEAD", 0, NULL, &flag)) + puts(_("HEAD is up to date.")); + else + printf(_("Current branch %s is up to date.\n"), + branch_name); + ret = !!finish_rebase(&options); + goto cleanup; + } else if (!(options.flags & REBASE_NO_QUIET)) + ; /* be quiet */ + else if (!strcmp(branch_name, "HEAD") && + resolve_ref_unsafe("HEAD", 0, NULL, &flag)) + puts(_("HEAD is up to date, rebase forced.")); + else + printf(_("Current branch %s is up to date, rebase " + "forced.\n"), branch_name); + } + + /* If a hook exists, give it a chance to interrupt*/ + if (!ok_to_skip_pre_rebase && + run_hook_le(NULL, "pre-rebase", options.upstream_arg, + argc ? argv[0] : NULL, NULL)) + die(_("The pre-rebase hook refused to rebase.")); + + if (options.flags & REBASE_DIFFSTAT) { + struct diff_options opts; + + if (options.flags & REBASE_VERBOSE) + printf(_("Changes from %s to %s:\n"), + oid_to_hex(&merge_base), + oid_to_hex(&options.onto->object.oid)); + + /* We want color (if set), but no pager */ + diff_setup(&opts); + opts.stat_width = -1; /* use full terminal width */ + opts.stat_graph_width = -1; /* respect statGraphWidth config */ + opts.output_format |= + DIFF_FORMAT_SUMMARY | DIFF_FORMAT_DIFFSTAT; + opts.detect_rename = DIFF_DETECT_RENAME; + diff_setup_done(&opts); + diff_tree_oid(&merge_base, &options.onto->object.oid, + "", &opts); + diffcore_std(&opts); + diff_flush(&opts); + } + + if (is_interactive(&options)) + goto run_rebase; + + /* Detach HEAD and reset the tree */ + if (options.flags & REBASE_NO_QUIET) + printf(_("First, rewinding head to replay your work on top of " + "it...\n")); + + strbuf_addf(&msg, "rebase: checkout %s", options.onto_name); + if (reset_head(&options.onto->object.oid, "checkout", NULL, + RESET_HEAD_DETACH, NULL, msg.buf)) + die(_("Could not detach HEAD")); + strbuf_release(&msg); + + /* + * If the onto is a proper descendant of the tip of the branch, then + * we just fast-forwarded. + */ + strbuf_reset(&msg); + if (!oidcmp(&merge_base, &options.orig_head)) { + printf(_("Fast-forwarded %s to %s. \n"), + branch_name, options.onto_name); + strbuf_addf(&msg, "rebase finished: %s onto %s", + options.head_name ? options.head_name : "detached HEAD", + oid_to_hex(&options.onto->object.oid)); + reset_head(NULL, "Fast-forwarded", options.head_name, 0, + "HEAD", msg.buf); + strbuf_release(&msg); + ret = !!finish_rebase(&options); + goto cleanup; + } + + strbuf_addf(&revisions, "%s..%s", + options.root ? oid_to_hex(&options.onto->object.oid) : + (options.restrict_revision ? + oid_to_hex(&options.restrict_revision->object.oid) : + oid_to_hex(&options.upstream->object.oid)), + oid_to_hex(&options.orig_head)); + + options.revisions = revisions.buf; + +run_rebase: + ret = !!run_specific_rebase(&options); + +cleanup: + strbuf_release(&revisions); + free(options.head_name); + free(options.gpg_sign_opt); + free(options.cmd); + free(squash_onto_name); + return ret; +} diff --git a/builtin/receive-pack.c b/builtin/receive-pack.c index 95eb7002ee..33187bd8e9 100644 --- a/builtin/receive-pack.c +++ b/builtin/receive-pack.c @@ -27,6 +27,7 @@ #include "packfile.h" #include "object-store.h" #include "protocol.h" +#include "commit-reach.h" static const char * const receive_pack_usage[] = { N_("git receive-pack <git-dir>"), @@ -280,8 +281,7 @@ static int show_ref_cb(const char *path_full, const struct object_id *oid, return 0; } -static void show_one_alternate_ref(const char *refname, - const struct object_id *oid, +static void show_one_alternate_ref(const struct object_id *oid, void *data) { struct oidset *seen = data; @@ -1228,8 +1228,8 @@ static void check_aliased_update(struct command *cmd, struct string_list *list) dst_cmd = (struct command *) item->util; - if (!oidcmp(&cmd->old_oid, &dst_cmd->old_oid) && - !oidcmp(&cmd->new_oid, &dst_cmd->new_oid)) + if (oideq(&cmd->old_oid, &dst_cmd->old_oid) && + oideq(&cmd->new_oid, &dst_cmd->new_oid)) return; dst_cmd->skip_update = 1; diff --git a/builtin/reflog.c b/builtin/reflog.c index 3acef5a0ab..7a85e4b164 100644 --- a/builtin/reflog.c +++ b/builtin/reflog.c @@ -10,6 +10,7 @@ #include "diff.h" #include "revision.h" #include "reachable.h" +#include "worktree.h" /* NEEDSWORK: switch to using parse_options */ static const char reflog_expire_usage[] = @@ -52,6 +53,7 @@ struct collect_reflog_cb { struct collected_reflog **e; int alloc; int nr; + struct worktree *wt; }; /* Remember to update object flag allocation in object.h */ @@ -330,13 +332,27 @@ static int push_tip_to_list(const char *refname, const struct object_id *oid, return 0; } +static int is_head(const char *refname) +{ + switch (ref_type(refname)) { + case REF_TYPE_OTHER_PSEUDOREF: + case REF_TYPE_MAIN_PSEUDOREF: + if (parse_worktree_ref(refname, NULL, NULL, &refname)) + BUG("not a worktree ref: %s", refname); + break; + default: + break; + } + return !strcmp(refname, "HEAD"); +} + static void reflog_expiry_prepare(const char *refname, const struct object_id *oid, void *cb_data) { struct expire_reflog_policy_cb *cb = cb_data; - if (!cb->cmd.expire_unreachable || !strcmp(refname, "HEAD")) { + if (!cb->cmd.expire_unreachable || is_head(refname)) { cb->tip_commit = NULL; cb->unreachable_expire_kind = UE_HEAD; } else { @@ -388,8 +404,19 @@ static int collect_reflog(const char *ref, const struct object_id *oid, int unus { struct collected_reflog *e; struct collect_reflog_cb *cb = cb_data; + struct strbuf newref = STRBUF_INIT; + + /* + * Avoid collecting the same shared ref multiple times because + * they are available via all worktrees. + */ + if (!cb->wt->is_current && ref_type(ref) == REF_TYPE_NORMAL) + return 0; + + strbuf_worktree_ref(cb->wt, &newref, ref); + FLEX_ALLOC_STR(e, reflog, newref.buf); + strbuf_release(&newref); - FLEX_ALLOC_STR(e, reflog, ref); oidcpy(&e->oid, oid); ALLOC_GROW(cb->e, cb->nr + 1, cb->alloc); cb->e[cb->nr++] = e; @@ -512,7 +539,7 @@ static int cmd_reflog_expire(int argc, const char **argv, const char *prefix) { struct expire_reflog_policy_cb cb; timestamp_t now = time(NULL); - int i, status, do_all; + int i, status, do_all, all_worktrees = 1; int explicit_expiry = 0; unsigned int flags = 0; @@ -549,6 +576,8 @@ static int cmd_reflog_expire(int argc, const char **argv, const char *prefix) flags |= EXPIRE_REFLOGS_UPDATE_REF; else if (!strcmp(arg, "--all")) do_all = 1; + else if (!strcmp(arg, "--single-worktree")) + all_worktrees = 0; else if (!strcmp(arg, "--verbose")) flags |= EXPIRE_REFLOGS_VERBOSE; else if (!strcmp(arg, "--")) { @@ -567,7 +596,7 @@ static int cmd_reflog_expire(int argc, const char **argv, const char *prefix) * from reflog if the repository was pruned with older git. */ if (cb.cmd.stalefix) { - init_revisions(&cb.cmd.revs, prefix); + repo_init_revisions(the_repository, &cb.cmd.revs, prefix); if (flags & EXPIRE_REFLOGS_VERBOSE) printf("Marking reachable objects..."); mark_reachable_objects(&cb.cmd.revs, 0, 0, NULL); @@ -577,10 +606,19 @@ static int cmd_reflog_expire(int argc, const char **argv, const char *prefix) if (do_all) { struct collect_reflog_cb collected; + struct worktree **worktrees, **p; int i; memset(&collected, 0, sizeof(collected)); - for_each_reflog(collect_reflog, &collected); + worktrees = get_worktrees(0); + for (p = worktrees; *p; p++) { + if (!all_worktrees && !(*p)->is_current) + continue; + collected.wt = *p; + refs_for_each_reflog(get_worktree_ref_store(*p), + collect_reflog, &collected); + } + free_worktrees(worktrees); for (i = 0; i < collected.nr; i++) { struct collected_reflog *e = collected.e[i]; set_reflog_expiry_param(&cb.cmd, explicit_expiry, e->reflog); diff --git a/builtin/remote.c b/builtin/remote.c index 5fd1012faa..f7edf7f2cb 100644 --- a/builtin/remote.c +++ b/builtin/remote.c @@ -10,6 +10,7 @@ #include "refspec.h" #include "object-store.h" #include "argv-array.h" +#include "commit-reach.h" static const char * const builtin_remote_usage[] = { N_("git remote [-v | --verbose]"), @@ -412,7 +413,7 @@ static int get_push_ref_states(const struct ref *remote_refs, if (is_null_oid(&ref->new_oid)) { info->status = PUSH_STATUS_DELETE; - } else if (!oidcmp(&ref->old_oid, &ref->new_oid)) + } else if (oideq(&ref->old_oid, &ref->new_oid)) info->status = PUSH_STATUS_UPTODATE; else if (is_null_oid(&ref->old_oid)) info->status = PUSH_STATUS_CREATE; diff --git a/builtin/repack.c b/builtin/repack.c index 3c30e16ec5..45583683ee 100644 --- a/builtin/repack.c +++ b/builtin/repack.c @@ -8,12 +8,14 @@ #include "strbuf.h" #include "string-list.h" #include "argv-array.h" +#include "midx.h" #include "packfile.h" #include "object-store.h" static int delta_base_offset = 1; static int pack_kept_objects = -1; static int write_bitmaps; +static int use_delta_islands; static char *packdir, *packtmp; static const char *const git_repack_usage[] = { @@ -42,6 +44,10 @@ static int repack_config(const char *var, const char *value, void *cb) write_bitmaps = git_config_bool(var, value); return 0; } + if (!strcmp(var, "repack.usedeltaislands")) { + use_delta_islands = git_config_bool(var, value); + return 0; + } return git_default_config(var, value, cb); } @@ -229,8 +235,8 @@ static void repack_promisor_objects(const struct pack_objects_args *args, while (strbuf_getline_lf(&line, out) != EOF) { char *promisor_name; int fd; - if (line.len != 40) - die("repack: Expecting 40 character sha1 lines only from pack-objects."); + if (line.len != the_hash_algo->hexsz) + die("repack: Expecting full hex object ID lines only from pack-objects."); string_list_append(names, line.buf); /* @@ -280,6 +286,7 @@ int cmd_repack(int argc, const char **argv, const char *prefix) int keep_unreachable = 0; struct string_list keep_pack_list = STRING_LIST_INIT_NODUP; int no_update_server_info = 0; + int midx_cleared = 0; struct pack_objects_args po_args = {NULL}; struct option builtin_repack_options[] = { @@ -301,6 +308,8 @@ int cmd_repack(int argc, const char **argv, const char *prefix) N_("pass --local to git-pack-objects")), OPT_BOOL('b', "write-bitmap-index", &write_bitmaps, N_("write bitmap index")), + OPT_BOOL('i', "delta-islands", &use_delta_islands, + N_("pass --delta-islands to git-pack-objects")), OPT_STRING(0, "unpack-unreachable", &unpack_unreachable, N_("approxidate"), N_("with -A, do not loosen objects older than this")), OPT_BOOL('k', "keep-unreachable", &keep_unreachable, @@ -361,6 +370,8 @@ int cmd_repack(int argc, const char **argv, const char *prefix) argv_array_push(&cmd.args, "--exclude-promisor-objects"); if (write_bitmaps) argv_array_push(&cmd.args, "--write-bitmap-index"); + if (use_delta_islands) + argv_array_push(&cmd.args, "--delta-islands"); if (pack_everything & ALL_INTO_ONE) { get_non_kept_pack_filenames(&existing_packs, &keep_pack_list); @@ -396,8 +407,8 @@ int cmd_repack(int argc, const char **argv, const char *prefix) out = xfdopen(cmd.out, "r"); while (strbuf_getline_lf(&line, out) != EOF) { - if (line.len != 40) - die("repack: Expecting 40 character sha1 lines only from pack-objects."); + if (line.len != the_hash_algo->hexsz) + die("repack: Expecting full hex object ID lines only from pack-objects."); string_list_append(&names, line.buf); } fclose(out); @@ -418,6 +429,12 @@ int cmd_repack(int argc, const char **argv, const char *prefix) for_each_string_list_item(item, &names) { for (ext = 0; ext < ARRAY_SIZE(exts); ext++) { char *fname, *fname_old; + + if (!midx_cleared) { + clear_midx_file(the_repository); + midx_cleared = 1; + } + fname = mkpathdup("%s/pack-%s%s", packdir, item->string, exts[ext].name); if (!file_exists(fname)) { @@ -517,14 +534,15 @@ int cmd_repack(int argc, const char **argv, const char *prefix) reprepare_packed_git(the_repository); if (delete_redundant) { + const int hexsz = the_hash_algo->hexsz; int opts = 0; string_list_sort(&names); for_each_string_list_item(item, &existing_packs) { char *sha1; size_t len = strlen(item->string); - if (len < 40) + if (len < hexsz) continue; - sha1 = item->string + len - 40; + sha1 = item->string + len - hexsz; if (!string_list_has_string(&names, sha1)) remove_redundant_pack(packdir, item->string); } @@ -542,6 +560,10 @@ int cmd_repack(int argc, const char **argv, const char *prefix) if (!no_update_server_info) update_server_info(0); remove_temporary_files(); + + if (git_env_bool(GIT_TEST_MULTI_PACK_INDEX, 0)) + write_midx_file(get_object_directory()); + string_list_clear(&names, 0); string_list_clear(&rollback, 0); string_list_clear(&existing_packs, 0); diff --git a/builtin/replace.c b/builtin/replace.c index 17868a92dc..a58b9c6d13 100644 --- a/builtin/replace.c +++ b/builtin/replace.c @@ -295,7 +295,7 @@ static int import_object(struct object_id *oid, enum object_type type, close(fd); return -1; } - if (index_fd(oid, fd, &st, type, NULL, flags) < 0) + if (index_fd(&the_index, oid, fd, &st, type, NULL, flags) < 0) return error(_("unable to write object to database")); /* index_fd close()s fd for us */ } @@ -343,7 +343,7 @@ static int edit_and_replace(const char *object_ref, int force, int raw) } free(tmpfile); - if (!oidcmp(&old_oid, &new_oid)) + if (oideq(&old_oid, &new_oid)) return error(_("new object is the same as the old one: '%s'"), oid_to_hex(&old_oid)); return replace_object_oid(object_ref, &old_oid, "replacement", &new_oid, force); @@ -414,7 +414,7 @@ static int check_one_mergetag(struct commit *commit, if (get_oid(mergetag_data->argv[i], &oid) < 0) return error(_("not a valid object name: '%s'"), mergetag_data->argv[i]); - if (!oidcmp(&tag->tagged->oid, &oid)) + if (oideq(&tag->tagged->oid, &oid)) return 0; /* found */ } @@ -474,7 +474,7 @@ static int create_graft(int argc, const char **argv, int force, int gentle) strbuf_release(&buf); - if (!oidcmp(&old_oid, &new_oid)) { + if (oideq(&old_oid, &new_oid)) { if (gentle) { warning(_("graft for '%s' unnecessary"), oid_to_hex(&old_oid)); return 0; diff --git a/builtin/rerere.c b/builtin/rerere.c index 0bc40298c2..d78eeaed32 100644 --- a/builtin/rerere.c +++ b/builtin/rerere.c @@ -41,7 +41,8 @@ static int diff_two(const char *file1, const char *label1, xpp.flags = 0; memset(&xecfg, 0, sizeof(xecfg)); xecfg.ctxlen = 3; - ecb.outf = outf; + ecb.out_hunk = NULL; + ecb.out_line = outf; ret = xdi_diff(&minus, &plus, &xpp, &xecfg, &ecb); free(minus.ptr); @@ -70,15 +71,15 @@ int cmd_rerere(int argc, const char **argv, const char *prefix) flags = RERERE_NOAUTOUPDATE; if (argc < 1) - return rerere(flags); + return repo_rerere(the_repository, flags); if (!strcmp(argv[0], "forget")) { struct pathspec pathspec; if (argc < 2) - warning("'git rerere forget' without paths is deprecated"); + warning(_("'git rerere forget' without paths is deprecated")); parse_pathspec(&pathspec, 0, PATHSPEC_PREFER_CWD, prefix, argv + 1); - return rerere_forget(&pathspec); + return rerere_forget(the_repository, &pathspec); } if (!strcmp(argv[0], "clear")) { @@ -91,7 +92,7 @@ int cmd_rerere(int argc, const char **argv, const char *prefix) for (i = 0; i < merge_rr.nr; i++) printf("%s\n", merge_rr.items[i].string); } else if (!strcmp(argv[0], "remaining")) { - rerere_remaining(&merge_rr); + rerere_remaining(the_repository, &merge_rr); for (i = 0; i < merge_rr.nr; i++) { if (merge_rr.items[i].util != RERERE_RESOLVED) printf("%s\n", merge_rr.items[i].string); @@ -107,7 +108,7 @@ int cmd_rerere(int argc, const char **argv, const char *prefix) const char *path = merge_rr.items[i].string; const struct rerere_id *id = merge_rr.items[i].util; if (diff_two(rerere_path(id, "preimage"), path, path, path)) - die("unable to generate diff for %s", rerere_path(id, NULL)); + die(_("unable to generate diff for '%s'"), rerere_path(id, NULL)); } } else usage_with_options(rerere_usage, options); diff --git a/builtin/reset.c b/builtin/reset.c index 11cd0dcb8c..58166964f8 100644 --- a/builtin/reset.c +++ b/builtin/reset.c @@ -25,6 +25,8 @@ #include "submodule.h" #include "submodule-config.h" +#define REFRESH_INDEX_DELAY_WARNING_IN_MS (2 * 1000) + static const char * const git_reset_usage[] = { N_("git reset [--mixed | --soft | --hard | --merge | --keep] [-q] [<commit>]"), N_("git reset [-q] [<tree-ish>] [--] <paths>..."), @@ -159,6 +161,7 @@ static int read_from_tree(const struct pathspec *pathspec, opt.format_callback = update_index_from_diff; opt.format_callback_data = &intent_to_add; opt.flags.override_submodule_config = 1; + opt.repo = the_repository; if (do_diff_cache(tree_oid, &opt)) return 1; @@ -306,6 +309,7 @@ int cmd_reset(int argc, const char **argv, const char *prefix) }; git_config(git_reset_config, NULL); + git_config_get_bool("reset.quiet", &quiet); argc = parse_options(argc, argv, prefix, options, git_reset_usage, PARSE_OPT_KEEP_DASHDASH); @@ -375,9 +379,19 @@ int cmd_reset(int argc, const char **argv, const char *prefix) int flags = quiet ? REFRESH_QUIET : REFRESH_IN_PORCELAIN; if (read_from_tree(&pathspec, &oid, intent_to_add)) return 1; - if (get_git_work_tree()) + if (!quiet && get_git_work_tree()) { + uint64_t t_begin, t_delta_in_ms; + + t_begin = getnanotime(); refresh_index(&the_index, flags, NULL, NULL, _("Unstaged changes after reset:")); + t_delta_in_ms = (getnanotime() - t_begin) / 1000000; + if (advice_reset_quiet_warning && t_delta_in_ms > REFRESH_INDEX_DELAY_WARNING_IN_MS) { + printf(_("\nIt took %.2f seconds to enumerate unstaged changes after reset. You can\n" + "use '--quiet' to avoid this. Set the config setting reset.quiet to true\n" + "to make this the default.\n"), t_delta_in_ms / 1000.0); + } + } } else { int err = reset_index(&oid, reset_type, quiet); if (reset_type == KEEP && !err) diff --git a/builtin/rev-list.c b/builtin/rev-list.c index 41dd33c6a7..2880ed37e3 100644 --- a/builtin/rev-list.c +++ b/builtin/rev-list.c @@ -6,6 +6,7 @@ #include "list-objects.h" #include "list-objects-filter.h" #include "list-objects-filter-options.h" +#include "object.h" #include "object-store.h" #include "pack.h" #include "pack-bitmap.h" @@ -209,7 +210,8 @@ static inline void finish_object__ma(struct object *obj) */ switch (arg_missing_action) { case MA_ERROR: - die("missing blob object '%s'", oid_to_hex(&obj->oid)); + die("missing %s object '%s'", + type_name(obj->type), oid_to_hex(&obj->oid)); return; case MA_ALLOW_ANY: @@ -222,8 +224,8 @@ static inline void finish_object__ma(struct object *obj) case MA_ALLOW_PROMISOR: if (is_promisor_object(&obj->oid)) return; - die("unexpected missing blob object '%s'", - oid_to_hex(&obj->oid)); + die("unexpected missing %s object '%s'", + type_name(obj->type), oid_to_hex(&obj->oid)); return; default: @@ -235,7 +237,7 @@ static inline void finish_object__ma(struct object *obj) static int finish_object(struct object *obj, const char *name, void *cb_data) { struct rev_list_info *info = cb_data; - if (obj->type == OBJ_BLOB && !has_object_file(&obj->oid)) { + if (!has_object_file(&obj->oid)) { finish_object__ma(obj); return 1; } @@ -370,10 +372,11 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix) usage(rev_list_usage); git_config(git_default_config, NULL); - init_revisions(&revs, prefix); + repo_init_revisions(the_repository, &revs, prefix); revs.abbrev = DEFAULT_ABBREV; revs.allow_exclude_promisor_objects_opt = 1; revs.commit_format = CMIT_FMT_UNSPECIFIED; + revs.do_not_die_on_missing_tree = 1; /* * Scan the argument list before invoking setup_revisions(), so that we @@ -494,7 +497,7 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix) if ((!revs.commits && reflog_walk_empty(revs.reflog_info) && (!(revs.tag_objects || revs.tree_objects || revs.blob_objects) && !revs.pending.nr) && - !revs.rev_input_given) || + !revs.rev_input_given && !revs.read_from_stdin) || revs.diff) usage(rev_list_usage); diff --git a/builtin/rev-parse.c b/builtin/rev-parse.c index 0f09bbbf65..10d4dab894 100644 --- a/builtin/rev-parse.c +++ b/builtin/rev-parse.c @@ -14,6 +14,7 @@ #include "revision.h" #include "split-index.h" #include "submodule.h" +#include "commit-reach.h" #define DO_REVS 1 #define DO_NOREV 2 @@ -764,6 +765,7 @@ int cmd_rev_parse(int argc, const char **argv, const char *prefix) } if (!strcmp(arg, "--all")) { for_each_ref(show_reference, NULL); + clear_ref_exclusion(&ref_excludes); continue; } if (skip_prefix(arg, "--disambiguate=", &arg)) { diff --git a/builtin/revert.c b/builtin/revert.c index 9a66720cfc..c93393c89b 100644 --- a/builtin/revert.c +++ b/builtin/revert.c @@ -174,7 +174,7 @@ static int run_sequencer(int argc, const char **argv, struct replay_opts *opts) } else { struct setup_revision_opt s_r_opt; opts->revs = xmalloc(sizeof(*opts->revs)); - init_revisions(opts->revs, NULL); + repo_init_revisions(the_repository, opts->revs, NULL); opts->revs->no_walk = REVISION_WALK_NO_WALK_UNSORTED; if (argc < 2) usage_with_options(usage_str, options); diff --git a/builtin/rm.c b/builtin/rm.c index 2cbe89e0ae..17086d3d97 100644 --- a/builtin/rm.c +++ b/builtin/rm.c @@ -180,7 +180,7 @@ static int check_local_mod(struct object_id *head, int index_only) if (no_head || get_tree_entry(head, name, &oid, &mode) || ce->ce_mode != create_ce_mode(mode) - || oidcmp(&ce->oid, &oid)) + || !oideq(&ce->oid, &oid)) staged_changes = 1; /* diff --git a/builtin/shortlog.c b/builtin/shortlog.c index 3898a2c9c4..88f88e97b2 100644 --- a/builtin/shortlog.c +++ b/builtin/shortlog.c @@ -278,7 +278,7 @@ int cmd_shortlog(int argc, const char **argv, const char *prefix) git_config(git_default_config, NULL); shortlog_init(&log); - init_revisions(&rev, prefix); + repo_init_revisions(the_repository, &rev, prefix); parse_options_start(&ctx, argc, argv, prefix, options, PARSE_OPT_KEEP_DASHDASH | PARSE_OPT_KEEP_ARGV0); diff --git a/builtin/show-branch.c b/builtin/show-branch.c index 363cf8509a..934e514944 100644 --- a/builtin/show-branch.c +++ b/builtin/show-branch.c @@ -412,7 +412,7 @@ static int append_head_ref(const char *refname, const struct object_id *oid, /* If both heads/foo and tags/foo exists, get_sha1 would * get confused. */ - if (get_oid(refname + ofs, &tmp) || oidcmp(&tmp, oid)) + if (get_oid(refname + ofs, &tmp) || !oideq(&tmp, oid)) ofs = 5; return append_ref(refname + ofs, oid, 0); } @@ -427,7 +427,7 @@ static int append_remote_ref(const char *refname, const struct object_id *oid, /* If both heads/foo and tags/foo exists, get_sha1 would * get confused. */ - if (get_oid(refname + ofs, &tmp) || oidcmp(&tmp, oid)) + if (get_oid(refname + ofs, &tmp) || !oideq(&tmp, oid)) ofs = 5; return append_ref(refname + ofs, oid, 0); } @@ -485,7 +485,7 @@ static void snarf_refs(int head, int remotes) static int rev_is_head(const char *head, const char *name, unsigned char *head_sha1, unsigned char *sha1) { - if (!head || (head_sha1 && sha1 && hashcmp(head_sha1, sha1))) + if (!head || (head_sha1 && sha1 && !hasheq(head_sha1, sha1))) return 0; skip_prefix(head, "refs/heads/", &head); if (!skip_prefix(name, "refs/heads/", &name)) @@ -604,6 +604,7 @@ static int parse_reflog_param(const struct option *opt, const char *arg, { char *ep; const char **base = (const char **)opt->value; + BUG_ON_OPT_NEG(unset); if (!arg) arg = ""; reflog = strtoul(arg, &ep, 10); @@ -674,7 +675,7 @@ int cmd_show_branch(int ac, const char **av, const char *prefix) { OPTION_CALLBACK, 'g', "reflog", &reflog_base, N_("<n>[,<base>]"), N_("show <n> most recent ref-log entries starting at " "base"), - PARSE_OPT_OPTARG, + PARSE_OPT_OPTARG | PARSE_OPT_NONEG, parse_reflog_param }, OPT_END() }; diff --git a/builtin/show-ref.c b/builtin/show-ref.c index 2f13f1316f..ed888ffa48 100644 --- a/builtin/show-ref.c +++ b/builtin/show-ref.c @@ -151,6 +151,7 @@ static int hash_callback(const struct option *opt, const char *arg, int unset) static int exclude_existing_callback(const struct option *opt, const char *arg, int unset) { + BUG_ON_OPT_NEG(unset); exclude_arg = 1; *(const char **)opt->value = arg; return 0; diff --git a/builtin/submodule--helper.c b/builtin/submodule--helper.c index 5e6f2db4cd..d38113a31a 100644 --- a/builtin/submodule--helper.c +++ b/builtin/submodule--helper.c @@ -584,6 +584,26 @@ static int module_foreach(int argc, const char **argv, const char *prefix) return 0; } +static char *compute_submodule_clone_url(const char *rel_url) +{ + char *remoteurl, *relurl; + char *remote = get_default_remote(); + struct strbuf remotesb = STRBUF_INIT; + + strbuf_addf(&remotesb, "remote.%s.url", remote); + if (git_config_get_string(remotesb.buf, &remoteurl)) { + warning(_("could not look up configuration '%s'. Assuming this repository is its own authoritative upstream."), remotesb.buf); + remoteurl = xgetcwd(); + } + relurl = relative_url(remoteurl, rel_url, NULL); + + free(remote); + free(remoteurl); + strbuf_release(&remotesb); + + return relurl; +} + struct init_cb { const char *prefix; unsigned int flags; @@ -634,21 +654,9 @@ static void init_submodule(const char *path, const char *prefix, /* Possibly a url relative to parent */ if (starts_with_dot_dot_slash(url) || starts_with_dot_slash(url)) { - char *remoteurl, *relurl; - char *remote = get_default_remote(); - struct strbuf remotesb = STRBUF_INIT; - strbuf_addf(&remotesb, "remote.%s.url", remote); - free(remote); - - if (git_config_get_string(remotesb.buf, &remoteurl)) { - warning(_("could not lookup configuration '%s'. Assuming this repository is its own authoritative upstream."), remotesb.buf); - remoteurl = xgetcwd(); - } - relurl = relative_url(remoteurl, url, NULL); - strbuf_release(&remotesb); - free(remoteurl); - free(url); - url = relurl; + char *oldurl = url; + url = compute_submodule_clone_url(oldurl); + free(oldurl); } if (git_config_set_gently(sb.buf, url)) @@ -792,7 +800,7 @@ static void status_submodule(const char *path, const struct object_id *ce_oid, path, NULL); git_config(git_diff_basic_config, NULL); - init_revisions(&rev, prefix); + repo_init_revisions(the_repository, &rev, prefix); rev.abbrev = 0; diff_files_args.argc = setup_revisions(diff_files_args.argc, diff_files_args.argv, @@ -1444,6 +1452,70 @@ static int module_clone(int argc, const char **argv, const char *prefix) return 0; } +static void determine_submodule_update_strategy(struct repository *r, + int just_cloned, + const char *path, + const char *update, + struct submodule_update_strategy *out) +{ + const struct submodule *sub = submodule_from_path(r, &null_oid, path); + char *key; + const char *val; + + key = xstrfmt("submodule.%s.update", sub->name); + + if (update) { + if (parse_submodule_update_strategy(update, out) < 0) + die(_("Invalid update mode '%s' for submodule path '%s'"), + update, path); + } else if (!repo_config_get_string_const(r, key, &val)) { + if (parse_submodule_update_strategy(val, out) < 0) + die(_("Invalid update mode '%s' configured for submodule path '%s'"), + val, path); + } else if (sub->update_strategy.type != SM_UPDATE_UNSPECIFIED) { + out->type = sub->update_strategy.type; + out->command = sub->update_strategy.command; + } else + out->type = SM_UPDATE_CHECKOUT; + + if (just_cloned && + (out->type == SM_UPDATE_MERGE || + out->type == SM_UPDATE_REBASE || + out->type == SM_UPDATE_NONE)) + out->type = SM_UPDATE_CHECKOUT; + + free(key); +} + +static int module_update_module_mode(int argc, const char **argv, const char *prefix) +{ + const char *path, *update = NULL; + int just_cloned; + struct submodule_update_strategy update_strategy = { .type = SM_UPDATE_CHECKOUT }; + + if (argc < 3 || argc > 4) + die("submodule--helper update-module-clone expects <just-cloned> <path> [<update>]"); + + just_cloned = git_config_int("just_cloned", argv[1]); + path = argv[2]; + + if (argc == 4) + update = argv[3]; + + determine_submodule_update_strategy(the_repository, + just_cloned, path, update, + &update_strategy); + fputs(submodule_strategy_to_string(&update_strategy), stdout); + + return 0; +} + +struct update_clone_data { + const struct submodule *sub; + struct object_id oid; + unsigned just_cloned; +}; + struct submodule_update_clone { /* index into 'list', the list of submodules to look into for cloning */ int current; @@ -1463,8 +1535,9 @@ struct submodule_update_clone { const char *recursive_prefix; const char *prefix; - /* Machine-readable status lines to be consumed by git-submodule.sh */ - struct string_list projectlines; + /* to be consumed by git-submodule.sh */ + struct update_clone_data *update_clone; + int update_clone_nr; int update_clone_alloc; /* If we want to stop as fast as possible and return an error */ unsigned quickstop : 1; @@ -1472,11 +1545,13 @@ struct submodule_update_clone { /* failed clones to be retried again */ const struct cache_entry **failed_clones; int failed_clones_nr, failed_clones_alloc; + + int max_jobs; }; #define SUBMODULE_UPDATE_CLONE_INIT {0, MODULE_LIST_INIT, 0, \ SUBMODULE_UPDATE_STRATEGY_INIT, 0, 0, -1, STRING_LIST_INIT_DUP, 0, \ NULL, NULL, NULL, \ - STRING_LIST_INIT_DUP, 0, NULL, 0, 0} + NULL, 0, 0, 0, NULL, 0, 0, 0} static void next_submodule_warn_missing(struct submodule_update_clone *suc, @@ -1515,6 +1590,7 @@ static int prepare_to_clone_next_submodule(const struct cache_entry *ce, struct strbuf sb = STRBUF_INIT; const char *displaypath = NULL; int needs_cloning = 0; + int need_free_url = 0; if (ce_stage(ce)) { if (suc->recursive_prefix) @@ -1563,18 +1639,25 @@ static int prepare_to_clone_next_submodule(const struct cache_entry *ce, strbuf_reset(&sb); strbuf_addf(&sb, "submodule.%s.url", sub->name); - if (repo_config_get_string_const(the_repository, sb.buf, &url)) - url = sub->url; + if (repo_config_get_string_const(the_repository, sb.buf, &url)) { + if (starts_with_dot_slash(sub->url) || + starts_with_dot_dot_slash(sub->url)) { + url = compute_submodule_clone_url(sub->url); + need_free_url = 1; + } else + url = sub->url; + } strbuf_reset(&sb); strbuf_addf(&sb, "%s/.git", ce->name); needs_cloning = !file_exists(sb.buf); - strbuf_reset(&sb); - strbuf_addf(&sb, "%06o %s %d %d\t%s\n", ce->ce_mode, - oid_to_hex(&ce->oid), ce_stage(ce), - needs_cloning, ce->name); - string_list_append(&suc->projectlines, sb.buf); + ALLOC_GROW(suc->update_clone, suc->update_clone_nr + 1, + suc->update_clone_alloc); + oidcpy(&suc->update_clone[suc->update_clone_nr].oid, &ce->oid); + suc->update_clone[suc->update_clone_nr].just_cloned = needs_cloning; + suc->update_clone[suc->update_clone_nr].sub = sub; + suc->update_clone_nr++; if (!needs_cloning) goto cleanup; @@ -1609,6 +1692,8 @@ static int prepare_to_clone_next_submodule(const struct cache_entry *ce, cleanup: strbuf_reset(&displaypath_sb); strbuf_reset(&sb); + if (need_free_url) + free((void*)url); return needs_cloning; } @@ -1715,11 +1800,44 @@ static int git_update_clone_config(const char *var, const char *value, return 0; } +static void update_submodule(struct update_clone_data *ucd) +{ + fprintf(stdout, "dummy %s %d\t%s\n", + oid_to_hex(&ucd->oid), + ucd->just_cloned, + ucd->sub->path); +} + +static int update_submodules(struct submodule_update_clone *suc) +{ + int i; + + run_processes_parallel(suc->max_jobs, + update_clone_get_next_task, + update_clone_start_failure, + update_clone_task_finished, + suc); + + /* + * We saved the output and put it out all at once now. + * That means: + * - the listener does not have to interleave their (checkout) + * work with our fetching. The writes involved in a + * checkout involve more straightforward sequential I/O. + * - the listener can avoid doing any work if fetching failed. + */ + if (suc->quickstop) + return 1; + + for (i = 0; i < suc->update_clone_nr; i++) + update_submodule(&suc->update_clone[i]); + + return 0; +} + static int update_clone(int argc, const char **argv, const char *prefix) { const char *update = NULL; - int max_jobs = 1; - struct string_list_item *item; struct pathspec pathspec; struct submodule_update_clone suc = SUBMODULE_UPDATE_CLONE_INIT; @@ -1741,7 +1859,7 @@ static int update_clone(int argc, const char **argv, const char *prefix) OPT_STRING(0, "depth", &suc.depth, "<depth>", N_("Create a shallow clone truncated to the " "specified number of revisions")), - OPT_INTEGER('j', "jobs", &max_jobs, + OPT_INTEGER('j', "jobs", &suc.max_jobs, N_("parallel jobs")), OPT_BOOL(0, "recommend-shallow", &suc.recommend_shallow, N_("whether the initial clone should follow the shallow recommendation")), @@ -1757,8 +1875,8 @@ static int update_clone(int argc, const char **argv, const char *prefix) }; suc.prefix = prefix; - update_clone_config_from_gitmodules(&max_jobs); - git_config(git_update_clone_config, &max_jobs); + update_clone_config_from_gitmodules(&suc.max_jobs); + git_config(git_update_clone_config, &suc.max_jobs); argc = parse_options(argc, argv, prefix, module_update_clone_options, git_submodule_helper_usage, 0); @@ -1773,27 +1891,7 @@ static int update_clone(int argc, const char **argv, const char *prefix) if (pathspec.nr) suc.warn_if_uninitialized = 1; - run_processes_parallel(max_jobs, - update_clone_get_next_task, - update_clone_start_failure, - update_clone_task_finished, - &suc); - - /* - * We saved the output and put it out all at once now. - * That means: - * - the listener does not have to interleave their (checkout) - * work with our fetching. The writes involved in a - * checkout involve more straightforward sequential I/O. - * - the listener can avoid doing any work if fetching failed. - */ - if (suc.quickstop) - return 1; - - for_each_string_list_item(item, &suc.projectlines) - fprintf(stdout, "%s", item->string); - - return 0; + return update_submodules(&suc); } static int resolve_relative_path(int argc, const char **argv, const char *prefix) @@ -1939,6 +2037,45 @@ static int push_check(int argc, const char **argv, const char *prefix) return 0; } +static int ensure_core_worktree(int argc, const char **argv, const char *prefix) +{ + const struct submodule *sub; + const char *path; + char *cw; + struct repository subrepo; + + if (argc != 2) + BUG("submodule--helper connect-gitdir-workingtree <name> <path>"); + + path = argv[1]; + + sub = submodule_from_path(the_repository, &null_oid, path); + if (!sub) + BUG("We could get the submodule handle before?"); + + if (repo_submodule_init(&subrepo, the_repository, path)) + die(_("could not get a repository handle for submodule '%s'"), path); + + if (!repo_config_get_string(&subrepo, "core.worktree", &cw)) { + char *cfg_file, *abs_path; + const char *rel_path; + struct strbuf sb = STRBUF_INIT; + + cfg_file = repo_git_path(&subrepo, "config"); + + abs_path = absolute_pathdup(path); + rel_path = relative_path(abs_path, subrepo.gitdir, &sb); + + git_config_set_in_file(cfg_file, "core.worktree", rel_path); + + free(cfg_file); + free(abs_path); + strbuf_release(&sb); + } + + return 0; +} + static int absorb_git_dirs(int argc, const char **argv, const char *prefix) { int i; @@ -2004,6 +2141,45 @@ static int check_name(int argc, const char **argv, const char *prefix) return 0; } +static int module_config(int argc, const char **argv, const char *prefix) +{ + enum { + CHECK_WRITEABLE = 1 + } command = 0; + + struct option module_config_options[] = { + OPT_CMDMODE(0, "check-writeable", &command, + N_("check if it is safe to write to the .gitmodules file"), + CHECK_WRITEABLE), + OPT_END() + }; + const char *const git_submodule_helper_usage[] = { + N_("git submodule--helper config name [value]"), + N_("git submodule--helper config --check-writeable"), + NULL + }; + + argc = parse_options(argc, argv, prefix, module_config_options, + git_submodule_helper_usage, PARSE_OPT_KEEP_ARGV0); + + if (argc == 1 && command == CHECK_WRITEABLE) + return is_writing_gitmodules_ok() ? 0 : -1; + + /* Equivalent to ACTION_GET in builtin/config.c */ + if (argc == 2) + return print_config_from_gitmodules(the_repository, argv[1]); + + /* Equivalent to ACTION_SET in builtin/config.c */ + if (argc == 3) { + if (!is_writing_gitmodules_ok()) + die(_("please make sure that the .gitmodules file is in the working tree")); + + return config_set_in_gitmodules_file_gently(argv[1], argv[2]); + } + + usage_with_options(git_submodule_helper_usage, module_config_options); +} + #define SUPPORT_SUPER_PREFIX (1<<0) struct cmd_struct { @@ -2016,7 +2192,9 @@ static struct cmd_struct commands[] = { {"list", module_list, 0}, {"name", module_name, 0}, {"clone", module_clone, 0}, + {"update-module-mode", module_update_module_mode, 0}, {"update-clone", update_clone, 0}, + {"ensure-core-worktree", ensure_core_worktree, 0}, {"relative-path", resolve_relative_path, 0}, {"resolve-relative-url", resolve_relative_url, 0}, {"resolve-relative-url-test", resolve_relative_url_test, 0}, @@ -2031,6 +2209,7 @@ static struct cmd_struct commands[] = { {"absorb-git-dirs", absorb_git_dirs, SUPPORT_SUPER_PREFIX}, {"is-active", is_active, 0}, {"check-name", check_name, 0}, + {"config", module_config, 0}, }; int cmd_submodule__helper(int argc, const char **argv, const char *prefix) diff --git a/builtin/tag.c b/builtin/tag.c index 9a19ffb49f..02f6bd1279 100644 --- a/builtin/tag.c +++ b/builtin/tag.c @@ -338,6 +338,8 @@ static int parse_msg_arg(const struct option *opt, const char *arg, int unset) { struct msg_arg *msg = opt->value; + BUG_ON_OPT_NEG(unset); + if (!arg) return -1; if (msg->buf.len) @@ -390,8 +392,8 @@ int cmd_tag(int argc, const char **argv, const char *prefix) OPT_GROUP(N_("Tag creation options")), OPT_BOOL('a', "annotate", &annotate, N_("annotated tag, needs a message")), - OPT_CALLBACK('m', "message", &msg, N_("message"), - N_("tag message"), parse_msg_arg), + { OPTION_CALLBACK, 'm', "message", &msg, N_("message"), + N_("tag message"), PARSE_OPT_NONEG, parse_msg_arg }, OPT_FILENAME('F', "file", &msgfile, N_("read message from file")), OPT_BOOL('e', "edit", &edit_flag, N_("force edit of tag message")), OPT_BOOL('s', "sign", &opt.sign, N_("annotated and GPG-signed tag")), @@ -559,7 +561,7 @@ int cmd_tag(int argc, const char **argv, const char *prefix) ref_transaction_commit(transaction, &err)) die("%s", err.buf); ref_transaction_free(transaction); - if (force && !is_null_oid(&prev) && oidcmp(&prev, &object)) + if (force && !is_null_oid(&prev) && !oideq(&prev, &object)) printf(_("Updated tag '%s' (was %s)\n"), tag, find_unique_abbrev(&prev, DEFAULT_ABBREV)); diff --git a/builtin/unpack-objects.c b/builtin/unpack-objects.c index 30d9413b4b..80478808b3 100644 --- a/builtin/unpack-objects.c +++ b/builtin/unpack-objects.c @@ -303,7 +303,7 @@ static void added_object(unsigned nr, enum object_type type, struct delta_info *info; while ((info = *p) != NULL) { - if (!oidcmp(&info->base_oid, &obj_list[nr].oid) || + if (oideq(&info->base_oid, &obj_list[nr].oid) || info->base_offset == obj_list[nr].offset) { *p = info->next; p = &delta_list; @@ -579,7 +579,7 @@ int cmd_unpack_objects(int argc, const char **argv, const char *prefix) if (fsck_finish(&fsck_options)) die(_("fsck error in pack objects")); } - if (hashcmp(fill(the_hash_algo->rawsz), oid.hash)) + if (!hasheq(fill(the_hash_algo->rawsz), oid.hash)) die("final sha1 did not match"); use(the_hash_algo->rawsz); diff --git a/builtin/update-index.c b/builtin/update-index.c index fe84003b4f..31e7cce301 100644 --- a/builtin/update-index.c +++ b/builtin/update-index.c @@ -282,7 +282,7 @@ static int add_one_path(const struct cache_entry *old, const char *path, int len fill_stat_cache_info(ce, st); ce->ce_mode = ce_mode_from_stat(old, st->st_mode); - if (index_path(&ce->oid, path, st, + if (index_path(&the_index, &ce->oid, path, st, info_only ? 0 : HASH_WRITE_OBJECT)) { discard_cache_entry(ce); return -1; @@ -669,7 +669,7 @@ static int unresolve_one(const char *path) ret = -1; goto free_return; } - if (!oidcmp(&ce_2->oid, &ce_3->oid) && + if (oideq(&ce_2->oid, &ce_3->oid) && ce_2->ce_mode == ce_3->ce_mode) { fprintf(stderr, "%s: identical in both, skipping.\n", path); @@ -754,7 +754,7 @@ static int do_reupdate(int ac, const char **av, old = read_one_ent(NULL, &head_oid, ce->name, ce_namelen(ce), 0); if (old && ce->ce_mode == old->ce_mode && - !oidcmp(&ce->oid, &old->oid)) { + oideq(&ce->oid, &old->oid)) { discard_cache_entry(old); continue; /* unchanged */ } @@ -782,7 +782,7 @@ struct refresh_params { static int refresh(struct refresh_params *o, unsigned int flag) { setup_work_tree(); - read_cache_preload(NULL); + read_cache(); *o->has_errors |= refresh_cache(o->flags | flag); return 0; } @@ -790,12 +790,16 @@ static int refresh(struct refresh_params *o, unsigned int flag) static int refresh_callback(const struct option *opt, const char *arg, int unset) { + BUG_ON_OPT_NEG(unset); + BUG_ON_OPT_ARG(arg); return refresh(opt->value, 0); } static int really_refresh_callback(const struct option *opt, const char *arg, int unset) { + BUG_ON_OPT_NEG(unset); + BUG_ON_OPT_ARG(arg); return refresh(opt->value, REFRESH_REALLY); } @@ -803,6 +807,7 @@ static int chmod_callback(const struct option *opt, const char *arg, int unset) { char *flip = opt->value; + BUG_ON_OPT_NEG(unset); if ((arg[0] != '-' && arg[0] != '+') || arg[1] != 'x' || arg[2]) return error("option 'chmod' expects \"+x\" or \"-x\""); *flip = arg[0]; @@ -812,6 +817,8 @@ static int chmod_callback(const struct option *opt, static int resolve_undo_clear_callback(const struct option *opt, const char *arg, int unset) { + BUG_ON_OPT_NEG(unset); + BUG_ON_OPT_ARG(arg); resolve_undo_clear(); return 0; } @@ -847,6 +854,8 @@ static int cacheinfo_callback(struct parse_opt_ctx_t *ctx, unsigned int mode; const char *path; + BUG_ON_OPT_NEG(unset); + if (!parse_new_style_cacheinfo(ctx->argv[1], &mode, &oid, &path)) { if (add_cacheinfo(mode, &oid, path, 0)) die("git update-index: --cacheinfo cannot add %s", path); @@ -869,6 +878,8 @@ static int stdin_cacheinfo_callback(struct parse_opt_ctx_t *ctx, { int *nul_term_line = opt->value; + BUG_ON_OPT_NEG(unset); + if (ctx->argc != 1) return error("option '%s' must be the last argument", opt->long_name); allow_add = allow_replace = allow_remove = 1; @@ -881,6 +892,8 @@ static int stdin_callback(struct parse_opt_ctx_t *ctx, { int *read_from_stdin = opt->value; + BUG_ON_OPT_NEG(unset); + if (ctx->argc != 1) return error("option '%s' must be the last argument", opt->long_name); *read_from_stdin = 1; @@ -888,11 +901,13 @@ static int stdin_callback(struct parse_opt_ctx_t *ctx, } static int unresolve_callback(struct parse_opt_ctx_t *ctx, - const struct option *opt, int flags) + const struct option *opt, int unset) { int *has_errors = opt->value; const char *prefix = startup_info->prefix; + BUG_ON_OPT_NEG(unset); + /* consume remaining arguments. */ *has_errors = do_unresolve(ctx->argc, ctx->argv, prefix, prefix ? strlen(prefix) : 0); @@ -905,11 +920,13 @@ static int unresolve_callback(struct parse_opt_ctx_t *ctx, } static int reupdate_callback(struct parse_opt_ctx_t *ctx, - const struct option *opt, int flags) + const struct option *opt, int unset) { int *has_errors = opt->value; const char *prefix = startup_info->prefix; + BUG_ON_OPT_NEG(unset); + /* consume remaining arguments. */ setup_work_tree(); *has_errors = do_reupdate(ctx->argc, ctx->argv, diff --git a/builtin/upload-archive.c b/builtin/upload-archive.c index 25d9116356..018879737a 100644 --- a/builtin/upload-archive.c +++ b/builtin/upload-archive.c @@ -28,6 +28,8 @@ int cmd_upload_archive_writer(int argc, const char **argv, const char *prefix) if (!enter_repo(argv[1], 0)) die("'%s' does not appear to be a git repository", argv[1]); + init_archivers(); + /* put received options in sent_argv[] */ argv_array_push(&sent_argv, "git-upload-archive"); for (;;) { diff --git a/builtin/worktree.c b/builtin/worktree.c index 41e7714396..5e84026177 100644 --- a/builtin/worktree.c +++ b/builtin/worktree.c @@ -47,6 +47,26 @@ static int git_worktree_config(const char *var, const char *value, void *cb) return git_default_config(var, value, cb); } +static int delete_git_dir(const char *id) +{ + struct strbuf sb = STRBUF_INIT; + int ret; + + strbuf_addstr(&sb, git_common_path("worktrees/%s", id)); + ret = remove_dir_recursively(&sb, 0); + if (ret < 0 && errno == ENOTDIR) + ret = unlink(sb.buf); + if (ret) + error_errno(_("failed to delete '%s'"), sb.buf); + strbuf_release(&sb); + return ret; +} + +static void delete_worktrees_dir_if_empty(void) +{ + rmdir(git_path("worktrees")); /* ignore failed removal */ +} + static int prune_worktree(const char *id, struct strbuf *reason) { struct stat st; @@ -116,10 +136,8 @@ static int prune_worktree(const char *id, struct strbuf *reason) static void prune_worktrees(void) { struct strbuf reason = STRBUF_INIT; - struct strbuf path = STRBUF_INIT; DIR *dir = opendir(git_path("worktrees")); struct dirent *d; - int ret; if (!dir) return; while ((d = readdir(dir)) != NULL) { @@ -132,18 +150,12 @@ static void prune_worktrees(void) printf("%s\n", reason.buf); if (show_only) continue; - git_path_buf(&path, "worktrees/%s", d->d_name); - ret = remove_dir_recursively(&path, 0); - if (ret < 0 && errno == ENOTDIR) - ret = unlink(path.buf); - if (ret) - error_errno(_("failed to remove '%s'"), path.buf); + delete_git_dir(d->d_name); } closedir(dir); if (!show_only) - rmdir(git_path("worktrees")); + delete_worktrees_dir_if_empty(); strbuf_release(&reason); - strbuf_release(&path); } static int prune(int ac, const char **av, const char *prefix) @@ -212,6 +224,43 @@ static const char *worktree_basename(const char *path, int *olen) return name; } +static void validate_worktree_add(const char *path, const struct add_opts *opts) +{ + struct worktree **worktrees; + struct worktree *wt; + int locked; + + if (file_exists(path) && !is_empty_dir(path)) + die(_("'%s' already exists"), path); + + worktrees = get_worktrees(0); + /* + * find_worktree()'s suffix matching may undesirably find the main + * rather than a linked worktree (for instance, when the basenames + * of the main worktree and the one being created are the same). + * We're only interested in linked worktrees, so skip the main + * worktree with +1. + */ + wt = find_worktree(worktrees + 1, NULL, path); + if (!wt) + goto done; + + locked = !!worktree_lock_reason(wt); + if ((!locked && opts->force) || (locked && opts->force > 1)) { + if (delete_git_dir(wt->id)) + die(_("unable to re-add worktree '%s'"), path); + goto done; + } + + if (locked) + die(_("'%s' is a missing but locked worktree;\nuse 'add -f -f' to override, or 'unlock' and 'prune' or 'remove' to clear"), path); + else + die(_("'%s' is a missing but already registered worktree;\nuse 'add -f' to override, or 'prune' or 'remove' to clear"), path); + +done: + free_worktrees(worktrees); +} + static int add_worktree(const char *path, const char *refname, const struct add_opts *opts) { @@ -226,8 +275,7 @@ static int add_worktree(const char *path, const char *refname, struct commit *commit = NULL; int is_branch = 0; - if (file_exists(path) && !is_empty_dir(path)) - die(_("'%s' already exists"), path); + validate_worktree_add(path, opts); /* is 'refname' a branch or commit? */ if (!opts->detach && !strbuf_check_branch_ref(&symref, refname) && @@ -634,7 +682,7 @@ static int lock_worktree(int ac, const char **av, const char *prefix) if (is_main_worktree(wt)) die(_("The main working tree cannot be locked or unlocked")); - old_reason = is_worktree_locked(wt); + old_reason = worktree_lock_reason(wt); if (old_reason) { if (*old_reason) die(_("'%s' is already locked, reason: %s"), @@ -666,7 +714,7 @@ static int unlock_worktree(int ac, const char **av, const char *prefix) die(_("'%s' is not a working tree"), av[0]); if (is_main_worktree(wt)) die(_("The main working tree cannot be locked or unlocked")); - if (!is_worktree_locked(wt)) + if (!worktree_lock_reason(wt)) die(_("'%s' is not locked"), av[0]); ret = unlink_or_warn(git_common_path("worktrees/%s/locked", wt->id)); free_worktrees(worktrees); @@ -697,13 +745,17 @@ static void validate_no_submodules(const struct worktree *wt) static int move_worktree(int ac, const char **av, const char *prefix) { + int force = 0; struct option options[] = { + OPT__FORCE(&force, + N_("force move even if worktree is dirty or locked"), + PARSE_OPT_NOCOMPLETE), OPT_END() }; struct worktree **worktrees, *wt; struct strbuf dst = STRBUF_INIT; struct strbuf errmsg = STRBUF_INIT; - const char *reason; + const char *reason = NULL; char *path; ac = parse_options(ac, av, prefix, options, worktree_usage, 0); @@ -734,12 +786,13 @@ static int move_worktree(int ac, const char **av, const char *prefix) validate_no_submodules(wt); - reason = is_worktree_locked(wt); + if (force < 2) + reason = worktree_lock_reason(wt); if (reason) { if (*reason) - die(_("cannot move a locked working tree, lock reason: %s"), + die(_("cannot move a locked working tree, lock reason: %s\nuse 'move -f -f' to override or unlock first"), reason); - die(_("cannot move a locked working tree")); + die(_("cannot move a locked working tree;\nuse 'move -f -f' to override or unlock first")); } if (validate_worktree(wt, &errmsg, 0)) die(_("validation failed, cannot move working tree: %s"), @@ -822,32 +875,18 @@ static int delete_git_work_tree(struct worktree *wt) return ret; } -static int delete_git_dir(struct worktree *wt) -{ - struct strbuf sb = STRBUF_INIT; - int ret = 0; - - strbuf_addstr(&sb, git_common_path("worktrees/%s", wt->id)); - if (remove_dir_recursively(&sb, 0)) { - error_errno(_("failed to delete '%s'"), sb.buf); - ret = -1; - } - strbuf_release(&sb); - return ret; -} - static int remove_worktree(int ac, const char **av, const char *prefix) { int force = 0; struct option options[] = { OPT__FORCE(&force, - N_("force removing even if the worktree is dirty"), + N_("force removal even if worktree is dirty or locked"), PARSE_OPT_NOCOMPLETE), OPT_END() }; struct worktree **worktrees, *wt; struct strbuf errmsg = STRBUF_INIT; - const char *reason; + const char *reason = NULL; int ret = 0; ac = parse_options(ac, av, prefix, options, worktree_usage, 0); @@ -860,12 +899,13 @@ static int remove_worktree(int ac, const char **av, const char *prefix) die(_("'%s' is not a working tree"), av[0]); if (is_main_worktree(wt)) die(_("'%s' is a main working tree"), av[0]); - reason = is_worktree_locked(wt); + if (force < 2) + reason = worktree_lock_reason(wt); if (reason) { if (*reason) - die(_("cannot remove a locked working tree, lock reason: %s"), + die(_("cannot remove a locked working tree, lock reason: %s\nuse 'remove -f -f' to override or unlock first"), reason); - die(_("cannot remove a locked working tree")); + die(_("cannot remove a locked working tree;\nuse 'remove -f -f' to override or unlock first")); } if (validate_worktree(wt, &errmsg, WT_VALIDATE_WORKTREE_MISSING_OK)) die(_("validation failed, cannot remove working tree: %s"), @@ -882,7 +922,8 @@ static int remove_worktree(int ac, const char **av, const char *prefix) * continue on even if ret is non-zero, there's no going back * from here. */ - ret |= delete_git_dir(wt); + ret |= delete_git_dir(wt->id); + delete_worktrees_dir_if_empty(); free_worktrees(worktrees); return ret; diff --git a/bulk-checkin.c b/bulk-checkin.c index 9f3b644811..409ecb566b 100644 --- a/bulk-checkin.c +++ b/bulk-checkin.c @@ -72,7 +72,7 @@ static int already_written(struct bulk_checkin_state *state, struct object_id *o /* Might want to keep the list sorted */ for (i = 0; i < state->nr_written; i++) - if (!oidcmp(&state->written[i]->oid, oid)) + if (oideq(&state->written[i]->oid, oid)) return 1; /* This is a new object we need to keep */ @@ -140,7 +140,7 @@ int verify_bundle(struct bundle_header *header, int verbose) int i, ret = 0, req_nr; const char *message = _("Repository lacks these prerequisite commits:"); - init_revisions(&revs, NULL); + repo_init_revisions(the_repository, &revs, NULL); for (i = 0; i < p->nr; i++) { struct ref_list_entry *e = p->list + i; struct object *o = parse_object(the_repository, &e->oid); @@ -243,7 +243,7 @@ out: } -/* Write the pack data to bundle_fd, then close it if it is > 1. */ +/* Write the pack data to bundle_fd */ static int write_pack_data(int bundle_fd, struct rev_info *revs) { struct child_process pack_objects = CHILD_PROCESS_INIT; @@ -256,6 +256,20 @@ static int write_pack_data(int bundle_fd, struct rev_info *revs) pack_objects.in = -1; pack_objects.out = bundle_fd; pack_objects.git_cmd = 1; + + /* + * start_command() will close our descriptor if it's >1. Duplicate it + * to avoid surprising the caller. + */ + if (pack_objects.out > 1) { + pack_objects.out = dup(pack_objects.out); + if (pack_objects.out < 0) { + error_errno(_("unable to dup bundle descriptor")); + child_process_clear(&pack_objects); + return -1; + } + } + if (start_command(&pack_objects)) return error(_("Could not spawn pack-objects")); @@ -369,7 +383,7 @@ static int write_bundle_refs(int bundle_fd, struct rev_info *revs) * commit that is referenced by the tag, and not the tag * itself. */ - if (oidcmp(&oid, &e->item->oid)) { + if (!oideq(&oid, &e->item->oid)) { /* * Is this the positive end of a range expressed * in terms of a tag (e.g. v2.0 from the range @@ -421,27 +435,16 @@ int create_bundle(struct bundle_header *header, const char *path, bundle_to_stdout = !strcmp(path, "-"); if (bundle_to_stdout) bundle_fd = 1; - else { + else bundle_fd = hold_lock_file_for_update(&lock, path, LOCK_DIE_ON_ERROR); - /* - * write_pack_data() will close the fd passed to it, - * but commit_lock_file() will also try to close the - * lockfile's fd. So make a copy of the file - * descriptor to avoid trying to close it twice. - */ - bundle_fd = dup(bundle_fd); - if (bundle_fd < 0) - die_errno("unable to dup file descriptor"); - } - /* write signature */ write_or_die(bundle_fd, bundle_signature, strlen(bundle_signature)); /* init revs to list objects for pack-objects later */ save_commit_buffer = 0; - init_revisions(&revs, NULL); + repo_init_revisions(the_repository, &revs, NULL); /* write prerequisites */ if (compute_and_write_prerequisites(bundle_fd, &revs, argc, argv)) @@ -463,10 +466,8 @@ int create_bundle(struct bundle_header *header, const char *path, goto err; /* write pack */ - if (write_pack_data(bundle_fd, &revs)) { - bundle_fd = -1; /* already closed by the above call */ + if (write_pack_data(bundle_fd, &revs)) goto err; - } if (!bundle_to_stdout) { if (commit_lock_file(&lock)) @@ -474,11 +475,7 @@ int create_bundle(struct bundle_header *header, const char *path, } return 0; err: - if (!bundle_to_stdout) { - if (0 <= bundle_fd) - close(bundle_fd); - rollback_lock_file(&lock); - } + rollback_lock_file(&lock); return -1; } diff --git a/cache-tree.c b/cache-tree.c index 16ea022c46..9d454d24bc 100644 --- a/cache-tree.c +++ b/cache-tree.c @@ -4,6 +4,7 @@ #include "tree-walk.h" #include "cache-tree.h" #include "object-store.h" +#include "replace-object.h" #ifndef DEBUG #define DEBUG 0 @@ -325,6 +326,7 @@ static int update_one(struct cache_tree *it, unsigned mode; int expected_missing = 0; int contains_ita = 0; + int ce_missing_ok; path = ce->name; pathlen = ce_namelen(ce); @@ -354,8 +356,11 @@ static int update_one(struct cache_tree *it, i++; } + ce_missing_ok = mode == S_IFGITLINK || missing_ok || + (repository_format_partial_clone && + ce_skip_worktree(ce)); if (is_null_oid(oid) || - (mode != S_IFGITLINK && !missing_ok && !has_object_file(oid))) { + (!ce_missing_ok && !has_object_file(oid))) { strbuf_release(&buffer); if (expected_missing) return -1; @@ -433,7 +438,9 @@ int cache_tree_update(struct index_state *istate, int flags) if (i) return i; + trace_performance_enter(); i = update_one(it, cache, entries, "", 0, &skip, flags); + trace_performance_leave("cache_tree_update"); if (i < 0) return i; istate->cache_changed |= CACHE_TREE_CHANGED; @@ -714,7 +721,84 @@ int cache_tree_matches_traversal(struct cache_tree *root, it = find_cache_tree_from_traversal(root, info); it = cache_tree_find(it, ent->path); - if (it && it->entry_count > 0 && !oidcmp(ent->oid, &it->oid)) + if (it && it->entry_count > 0 && oideq(ent->oid, &it->oid)) return it->entry_count; return 0; } + +static void verify_one(struct index_state *istate, + struct cache_tree *it, + struct strbuf *path) +{ + int i, pos, len = path->len; + struct strbuf tree_buf = STRBUF_INIT; + struct object_id new_oid; + + for (i = 0; i < it->subtree_nr; i++) { + strbuf_addf(path, "%s/", it->down[i]->name); + verify_one(istate, it->down[i]->cache_tree, path); + strbuf_setlen(path, len); + } + + if (it->entry_count < 0 || + /* no verification on tests (t7003) that replace trees */ + lookup_replace_object(the_repository, &it->oid) != &it->oid) + return; + + if (path->len) { + pos = index_name_pos(istate, path->buf, path->len); + pos = -pos - 1; + } else { + pos = 0; + } + + i = 0; + while (i < it->entry_count) { + struct cache_entry *ce = istate->cache[pos + i]; + const char *slash; + struct cache_tree_sub *sub = NULL; + const struct object_id *oid; + const char *name; + unsigned mode; + int entlen; + + if (ce->ce_flags & (CE_STAGEMASK | CE_INTENT_TO_ADD | CE_REMOVE)) + BUG("%s with flags 0x%x should not be in cache-tree", + ce->name, ce->ce_flags); + name = ce->name + path->len; + slash = strchr(name, '/'); + if (slash) { + entlen = slash - name; + sub = find_subtree(it, ce->name + path->len, entlen, 0); + if (!sub || sub->cache_tree->entry_count < 0) + BUG("bad subtree '%.*s'", entlen, name); + oid = &sub->cache_tree->oid; + mode = S_IFDIR; + i += sub->cache_tree->entry_count; + } else { + oid = &ce->oid; + mode = ce->ce_mode; + entlen = ce_namelen(ce) - path->len; + i++; + } + strbuf_addf(&tree_buf, "%o %.*s%c", mode, entlen, name, '\0'); + strbuf_add(&tree_buf, oid->hash, the_hash_algo->rawsz); + } + hash_object_file(tree_buf.buf, tree_buf.len, tree_type, &new_oid); + if (!oideq(&new_oid, &it->oid)) + BUG("cache-tree for path %.*s does not match. " + "Expected %s got %s", len, path->buf, + oid_to_hex(&new_oid), oid_to_hex(&it->oid)); + strbuf_setlen(path, len); + strbuf_release(&tree_buf); +} + +void cache_tree_verify(struct index_state *istate) +{ + struct strbuf path = STRBUF_INIT; + + if (!istate->cache_tree) + return; + verify_one(istate, istate->cache_tree, &path); + strbuf_release(&path); +} diff --git a/cache-tree.h b/cache-tree.h index fc0c842e77..0ab6784ffe 100644 --- a/cache-tree.h +++ b/cache-tree.h @@ -32,6 +32,7 @@ struct cache_tree *cache_tree_read(const char *buffer, unsigned long size); int cache_tree_fully_valid(struct cache_tree *); int cache_tree_update(struct index_state *, int); +void cache_tree_verify(struct index_state *); /* bitmasks to write_cache_as_tree flags */ #define WRITE_TREE_MISSING_OK 1 @@ -410,7 +410,7 @@ void validate_cache_entries(const struct index_state *istate); #define read_cache() read_index(&the_index) #define read_cache_from(path) read_index_from(&the_index, (path), (get_git_dir())) -#define read_cache_preload(pathspec) read_index_preload(&the_index, (pathspec)) +#define read_cache_preload(pathspec) read_index_preload(&the_index, (pathspec), 0) #define is_cache_unborn() is_index_unborn(&the_index) #define read_cache_unmerged() read_index_unmerged(&the_index) #define discard_cache() discard_index(&the_index) @@ -486,6 +486,8 @@ static inline enum object_type object_type(unsigned int mode) #define INFOATTRIBUTES_FILE "info/attributes" #define ATTRIBUTE_MACRO_PREFIX "[attr]" #define GITMODULES_FILE ".gitmodules" +#define GITMODULES_INDEX ":.gitmodules" +#define GITMODULES_HEAD "HEAD:.gitmodules" #define GIT_NOTES_REF_ENVIRONMENT "GIT_NOTES_REF" #define GIT_NOTES_DEFAULT_REF "refs/notes/commits" #define GIT_NOTES_DISPLAY_REF_ENVIRONMENT "GIT_NOTES_DISPLAY_REF" @@ -659,7 +661,12 @@ extern int daemonize(void); /* Initialize and use the cache information */ struct lock_file; extern int read_index(struct index_state *); -extern int read_index_preload(struct index_state *, const struct pathspec *pathspec); +extern void preload_index(struct index_state *index, + const struct pathspec *pathspec, + unsigned int refresh_flags); +extern int read_index_preload(struct index_state *, + const struct pathspec *pathspec, + unsigned int refresh_flags); extern int do_read_index(struct index_state *istate, const char *path, int must_exist); /* for testting only! */ extern int read_index_from(struct index_state *, const char *path, @@ -703,7 +710,7 @@ extern int unmerged_index(const struct index_state *); * provided, the space-separated list of files that differ will be appended * to it. */ -extern int index_has_changes(const struct index_state *istate, +extern int index_has_changes(struct index_state *istate, struct tree *tree, struct strbuf *sb); @@ -789,8 +796,8 @@ extern int ie_modified(struct index_state *, const struct cache_entry *, struct #define HASH_WRITE_OBJECT 1 #define HASH_FORMAT_CHECK 2 #define HASH_RENORMALIZE 4 -extern int index_fd(struct object_id *oid, int fd, struct stat *st, enum object_type type, const char *path, unsigned flags); -extern int index_path(struct object_id *oid, const char *path, struct stat *st, unsigned flags); +extern int index_fd(struct index_state *istate, struct object_id *oid, int fd, struct stat *st, enum object_type type, const char *path, unsigned flags); +extern int index_path(struct index_state *istate, struct object_id *oid, const char *path, struct stat *st, unsigned flags); /* * Record to sd the data from st that we use to check whether a file @@ -816,6 +823,7 @@ extern void fill_stat_cache_info(struct cache_entry *ce, struct stat *st); #define REFRESH_IGNORE_MISSING 0x0008 /* ignore non-existent */ #define REFRESH_IGNORE_SUBMODULES 0x0010 /* ignore submodules */ #define REFRESH_IN_PORCELAIN 0x0020 /* user friendly output, not "needs update" */ +#define REFRESH_PROGRESS 0x0040 /* show progress bar if stderr is tty */ extern int refresh_index(struct index_state *, unsigned int flags, const struct pathspec *pathspec, char *seen, const char *header_msg); extern struct cache_entry *refresh_cache_entry(struct index_state *, struct cache_entry *, unsigned int); @@ -903,14 +911,6 @@ int use_optional_locks(void); extern char comment_line_char; extern int auto_comment_line_char; -/* Windows only */ -enum hide_dotfiles_type { - HIDE_DOTFILES_FALSE = 0, - HIDE_DOTFILES_TRUE, - HIDE_DOTFILES_DOTGITONLY -}; -extern enum hide_dotfiles_type hide_dotfiles; - enum log_refs_config { LOG_REFS_UNSET = -1, LOG_REFS_NONE = 0, @@ -959,11 +959,13 @@ extern int grafts_replace_parents; extern int repository_format_precious_objects; extern char *repository_format_partial_clone; extern const char *core_partial_clone_filter_default; +extern int repository_format_worktree_config; struct repository_format { int version; int precious_objects; char *partial_clone; /* value of extensions.partialclone */ + int worktree_config; int is_bare; int hash_algo; char *work_tree; @@ -1043,14 +1045,24 @@ static inline int oidcmp(const struct object_id *oid1, const struct object_id *o return hashcmp(oid1->hash, oid2->hash); } +static inline int hasheq(const unsigned char *sha1, const unsigned char *sha2) +{ + return !hashcmp(sha1, sha2); +} + +static inline int oideq(const struct object_id *oid1, const struct object_id *oid2) +{ + return hasheq(oid1->hash, oid2->hash); +} + static inline int is_null_sha1(const unsigned char *sha1) { - return !hashcmp(sha1, null_sha1); + return hasheq(sha1, null_sha1); } static inline int is_null_oid(const struct object_id *oid) { - return !hashcmp(oid->hash, null_sha1); + return hasheq(oid->hash, null_sha1); } static inline void hashcpy(unsigned char *sha_dst, const unsigned char *sha_src) @@ -1087,22 +1099,22 @@ static inline void oidread(struct object_id *oid, const unsigned char *hash) static inline int is_empty_blob_sha1(const unsigned char *sha1) { - return !hashcmp(sha1, the_hash_algo->empty_blob->hash); + return hasheq(sha1, the_hash_algo->empty_blob->hash); } static inline int is_empty_blob_oid(const struct object_id *oid) { - return !oidcmp(oid, the_hash_algo->empty_blob); + return oideq(oid, the_hash_algo->empty_blob); } static inline int is_empty_tree_sha1(const unsigned char *sha1) { - return !hashcmp(sha1, the_hash_algo->empty_tree->hash); + return hasheq(sha1, the_hash_algo->empty_tree->hash); } static inline int is_empty_tree_oid(const struct object_id *oid) { - return !oidcmp(oid, the_hash_algo->empty_tree); + return oideq(oid, the_hash_algo->empty_tree); } const char *empty_tree_oid_hex(void); @@ -1474,6 +1486,7 @@ extern const char *fmt_name(const char *name, const char *email); extern const char *ident_default_name(void); extern const char *ident_default_email(void); extern const char *git_editor(void); +extern const char *git_sequence_editor(void); extern const char *git_pager(int stdout_is_tty); extern int is_terminal_dumb(void); extern int git_ident_config(const char *, const char *, void *); @@ -1520,6 +1533,7 @@ struct checkout { unsigned force:1, quiet:1, not_new:1, + clone:1, refresh_cache:1; }; #define CHECKOUT_INIT { NULL, "" } @@ -1696,7 +1710,7 @@ void shift_tree_by(const struct object_id *, const struct object_id *, struct ob /* All WS_* -- when extended, adapt diff.c emit_symbol */ #define WS_RULE_MASK 07777 extern unsigned whitespace_rule_cfg; -extern unsigned whitespace_rule(const char *); +extern unsigned whitespace_rule(struct index_state *, const char *); extern unsigned parse_whitespace_rule(const char *); extern unsigned ws_check(const char *line, int len, unsigned ws_rule); extern void ws_check_emit(const char *line, int len, unsigned ws_rule, FILE *stream, const char *set, const char *reset, const char *ws); @@ -1718,10 +1732,12 @@ extern struct startup_info *startup_info; /* merge.c */ struct commit_list; -int try_merge_command(const char *strategy, size_t xopts_nr, +int try_merge_command(struct repository *r, + const char *strategy, size_t xopts_nr, const char **xopts, struct commit_list *common, const char *head_arg, struct commit_list *remotes); -int checkout_fast_forward(const struct object_id *from, +int checkout_fast_forward(struct repository *r, + const struct object_id *from, const struct object_id *to, int overwrite_ignore); diff --git a/ci/install-dependencies.sh b/ci/install-dependencies.sh index 75a9fd2475..06c3546e1e 100755 --- a/ci/install-dependencies.sh +++ b/ci/install-dependencies.sh @@ -10,6 +10,15 @@ LFSWHENCE=https://github.com/github/git-lfs/releases/download/v$LINUX_GIT_LFS_VE case "$jobname" in linux-clang|linux-gcc) + sudo apt-add-repository -y "ppa:ubuntu-toolchain-r/test" + sudo apt-get -q update + sudo apt-get -q -y install language-pack-is git-svn apache2 + case "$jobname" in + linux-gcc) + sudo apt-get -q -y install gcc-8 + ;; + esac + mkdir --parents "$P4_PATH" pushd "$P4_PATH" wget --quiet "$P4WHENCE/bin.linux26x86_64/p4d" @@ -32,11 +41,25 @@ osx-clang|osx-gcc) brew link --force gettext brew install caskroom/cask/perforce ;; +StaticAnalysis) + sudo apt-get -q update + sudo apt-get -q -y install coccinelle + ;; +Documentation) + sudo apt-get -q update + sudo apt-get -q -y install asciidoc xmlto + ;; esac -echo "$(tput setaf 6)Perforce Server Version$(tput sgr0)" -p4d -V | grep Rev. -echo "$(tput setaf 6)Perforce Client Version$(tput sgr0)" -p4 -V | grep Rev. -echo "$(tput setaf 6)Git-LFS Version$(tput sgr0)" -git-lfs version +if type p4d >/dev/null && type p4 >/dev/null +then + echo "$(tput setaf 6)Perforce Server Version$(tput sgr0)" + p4d -V | grep Rev. + echo "$(tput setaf 6)Perforce Client Version$(tput sgr0)" + p4 -V | grep Rev. +fi +if type git-lfs >/dev/null +then + echo "$(tput setaf 6)Git-LFS Version$(tput sgr0)" + git-lfs version +fi diff --git a/ci/lib-travisci.sh b/ci/lib-travisci.sh index 06970f7213..69dff4d1ec 100755 --- a/ci/lib-travisci.sh +++ b/ci/lib-travisci.sh @@ -123,7 +123,7 @@ osx-clang|osx-gcc) # Travis CI OS X export GIT_SKIP_TESTS="t9810 t9816" ;; -GETTEXT_POISON) - export GETTEXT_POISON=YesPlease +GIT_TEST_GETTEXT_POISON) + export GIT_TEST_GETTEXT_POISON=YesPlease ;; esac diff --git a/ci/run-build-and-tests.sh b/ci/run-build-and-tests.sh index 2a5bff4a1c..cda170d5c2 100755 --- a/ci/run-build-and-tests.sh +++ b/ci/run-build-and-tests.sh @@ -15,6 +15,8 @@ then export GIT_TEST_FULL_IN_PACK_ARRAY=true export GIT_TEST_OE_SIZE=10 export GIT_TEST_OE_DELTA_SIZE=5 + export GIT_TEST_COMMIT_GRAPH=1 + export GIT_TEST_MULTI_PACK_INDEX=1 make --quiet test fi diff --git a/combine-diff.c b/combine-diff.c index de7695e728..ad7752ea6b 100644 --- a/combine-diff.c +++ b/combine-diff.c @@ -285,7 +285,8 @@ static struct lline *coalesce_lines(struct lline *base, int *lenbase, return base; } -static char *grab_blob(const struct object_id *oid, unsigned int mode, +static char *grab_blob(struct repository *r, + const struct object_id *oid, unsigned int mode, unsigned long *size, struct userdiff_driver *textconv, const char *path) { @@ -304,7 +305,7 @@ static char *grab_blob(const struct object_id *oid, unsigned int mode, } else if (textconv) { struct diff_filespec *df = alloc_filespec(path); fill_filespec(df, oid, 1, mode); - *size = fill_textconv(textconv, df, &blob); + *size = fill_textconv(r, textconv, df, &blob); free_filespec(df); } else { blob = read_object_file(oid, &type, size); @@ -344,38 +345,43 @@ struct combine_diff_state { struct sline *lost_bucket; }; -static void consume_line(void *state_, char *line, unsigned long len) +static void consume_hunk(void *state_, + long ob, long on, + long nb, long nn, + const char *funcline, long funclen) { struct combine_diff_state *state = state_; - if (5 < len && !memcmp("@@ -", line, 4)) { - if (parse_hunk_header(line, len, - &state->ob, &state->on, - &state->nb, &state->nn)) - return; - state->lno = state->nb; - if (state->nn == 0) { - /* @@ -X,Y +N,0 @@ removed Y lines - * that would have come *after* line N - * in the result. Our lost buckets hang - * to the line after the removed lines, - * - * Note that this is correct even when N == 0, - * in which case the hunk removes the first - * line in the file. - */ - state->lost_bucket = &state->sline[state->nb]; - if (!state->nb) - state->nb = 1; - } else { - state->lost_bucket = &state->sline[state->nb-1]; - } - if (!state->sline[state->nb-1].p_lno) - state->sline[state->nb-1].p_lno = - xcalloc(state->num_parent, - sizeof(unsigned long)); - state->sline[state->nb-1].p_lno[state->n] = state->ob; - return; + + state->ob = ob; + state->on = on; + state->nb = nb; + state->nn = nn; + state->lno = state->nb; + if (state->nn == 0) { + /* @@ -X,Y +N,0 @@ removed Y lines + * that would have come *after* line N + * in the result. Our lost buckets hang + * to the line after the removed lines, + * + * Note that this is correct even when N == 0, + * in which case the hunk removes the first + * line in the file. + */ + state->lost_bucket = &state->sline[state->nb]; + if (!state->nb) + state->nb = 1; + } else { + state->lost_bucket = &state->sline[state->nb-1]; } + if (!state->sline[state->nb-1].p_lno) + state->sline[state->nb-1].p_lno = + xcalloc(state->num_parent, sizeof(unsigned long)); + state->sline[state->nb-1].p_lno[state->n] = state->ob; +} + +static void consume_line(void *state_, char *line, unsigned long len) +{ + struct combine_diff_state *state = state_; if (!state->lost_bucket) return; /* not in any hunk yet */ switch (line[0]) { @@ -389,7 +395,8 @@ static void consume_line(void *state_, char *line, unsigned long len) } } -static void combine_diff(const struct object_id *parent, unsigned int mode, +static void combine_diff(struct repository *r, + const struct object_id *parent, unsigned int mode, mmfile_t *result_file, struct sline *sline, unsigned int cnt, int n, int num_parent, int result_deleted, @@ -407,7 +414,7 @@ static void combine_diff(const struct object_id *parent, unsigned int mode, if (result_deleted) return; /* result deleted */ - parent_file.ptr = grab_blob(parent, mode, &sz, textconv, path); + parent_file.ptr = grab_blob(r, parent, mode, &sz, textconv, path); parent_file.size = sz; memset(&xpp, 0, sizeof(xpp)); xpp.flags = flags; @@ -419,8 +426,8 @@ static void combine_diff(const struct object_id *parent, unsigned int mode, state.num_parent = num_parent; state.n = n; - if (xdi_diff_outf(&parent_file, result_file, consume_line, &state, - &xpp, &xecfg)) + if (xdi_diff_outf(&parent_file, result_file, consume_hunk, + consume_line, &state, &xpp, &xecfg)) die("unable to generate combined diff for %s", oid_to_hex(parent)); free(parent_file.ptr); @@ -985,7 +992,7 @@ static void show_patch_diff(struct combine_diff_path *elem, int num_parent, const char *line_prefix = diff_line_prefix(opt); context = opt->context; - userdiff = userdiff_find_by_path(elem->path); + userdiff = userdiff_find_by_path(opt->repo->index, elem->path); if (!userdiff) userdiff = userdiff_find_by_name("default"); if (opt->flags.allow_textconv) @@ -993,7 +1000,7 @@ static void show_patch_diff(struct combine_diff_path *elem, int num_parent, /* Read the result of merge first */ if (!working_tree_file) - result = grab_blob(&elem->oid, elem->mode, &result_size, + result = grab_blob(opt->repo, &elem->oid, elem->mode, &result_size, textconv, elem->path); else { /* Used by diff-tree to read from the working tree */ @@ -1016,15 +1023,16 @@ static void show_patch_diff(struct combine_diff_path *elem, int num_parent, } else if (S_ISDIR(st.st_mode)) { struct object_id oid; if (resolve_gitlink_ref(elem->path, "HEAD", &oid) < 0) - result = grab_blob(&elem->oid, elem->mode, - &result_size, NULL, NULL); + result = grab_blob(opt->repo, &elem->oid, + elem->mode, &result_size, + NULL, NULL); else - result = grab_blob(&oid, elem->mode, + result = grab_blob(opt->repo, &oid, elem->mode, &result_size, NULL, NULL); } else if (textconv) { struct diff_filespec *df = alloc_filespec(elem->path); fill_filespec(df, &null_oid, 0, st.st_mode); - result_size = fill_textconv(textconv, df, &result); + result_size = fill_textconv(opt->repo, textconv, df, &result); free_filespec(df); } else if (0 <= (fd = open(elem->path, O_RDONLY))) { size_t len = xsize_t(st.st_size); @@ -1054,7 +1062,8 @@ static void show_patch_diff(struct combine_diff_path *elem, int num_parent, if (is_file) { struct strbuf buf = STRBUF_INIT; - if (convert_to_git(&the_index, elem->path, result, len, &buf, global_conv_flags_eol)) { + if (convert_to_git(rev->diffopt.repo->index, + elem->path, result, len, &buf, global_conv_flags_eol)) { free(result); result = strbuf_detach(&buf, &len); result_size = len; @@ -1089,7 +1098,8 @@ static void show_patch_diff(struct combine_diff_path *elem, int num_parent, for (i = 0; !is_binary && i < num_parent; i++) { char *buf; unsigned long size; - buf = grab_blob(&elem->parent[i].oid, + buf = grab_blob(opt->repo, + &elem->parent[i].oid, elem->parent[i].mode, &size, NULL, NULL); if (buffer_is_binary(buf, size)) @@ -1138,14 +1148,15 @@ static void show_patch_diff(struct combine_diff_path *elem, int num_parent, for (i = 0; i < num_parent; i++) { int j; for (j = 0; j < i; j++) { - if (!oidcmp(&elem->parent[i].oid, - &elem->parent[j].oid)) { + if (oideq(&elem->parent[i].oid, + &elem->parent[j].oid)) { reuse_combine_diff(sline, cnt, i, j); break; } } if (i <= j) - combine_diff(&elem->parent[i].oid, + combine_diff(opt->repo, + &elem->parent[i].oid, elem->parent[i].mode, &result_file, sline, cnt, i, num_parent, result_deleted, diff --git a/command-list.txt b/command-list.txt index 93f3076e31..3a9af104b5 100644 --- a/command-list.txt +++ b/command-list.txt @@ -123,6 +123,7 @@ git-merge-index plumbingmanipulators git-merge-one-file purehelpers git-mergetool ancillarymanipulators complete git-merge-tree ancillaryinterrogators +git-multi-pack-index plumbingmanipulators git-mktag plumbingmanipulators git-mktree plumbingmanipulators git-mv mainporcelain worktree diff --git a/commit-graph.c b/commit-graph.c index 7cfa779dcb..40c855f185 100644 --- a/commit-graph.c +++ b/commit-graph.c @@ -15,6 +15,7 @@ #include "alloc.h" #include "hashmap.h" #include "replace-object.h" +#include "progress.h" #define GRAPH_SIGNATURE 0x43475048 /* "CGPH" */ #define GRAPH_CHUNKID_OIDFANOUT 0x4f494446 /* "OIDF" */ @@ -237,8 +238,9 @@ static int prepare_commit_graph(struct repository *r) return !!r->objects->commit_graph; r->objects->commit_graph_attempted = 1; - if (repo_config_get_bool(r, "core.commitgraph", &config_value) || - !config_value) + if (!git_env_bool(GIT_TEST_COMMIT_GRAPH, 0) && + (repo_config_get_bool(r, "core.commitgraph", &config_value) || + !config_value)) /* * This repository is not configured to use commit graphs, so * do not load one. (But report commit_graph_attempted anyway @@ -260,6 +262,24 @@ static int prepare_commit_graph(struct repository *r) return !!r->objects->commit_graph; } +int generation_numbers_enabled(struct repository *r) +{ + uint32_t first_generation; + struct commit_graph *g; + if (!prepare_commit_graph(r)) + return 0; + + g = r->objects->commit_graph; + + if (!g->num_commits) + return 0; + + first_generation = get_be32(g->chunk_commit_data + + g->hash_len + 8) >> 2; + + return !!first_generation; +} + void close_commit_graph(struct repository *r) { free_commit_graph(r->objects->commit_graph); @@ -575,6 +595,8 @@ struct packed_oid_list { struct object_id *list; int nr; int alloc; + struct progress *progress; + int progress_done; }; static int add_packed_commits(const struct object_id *oid, @@ -587,6 +609,9 @@ static int add_packed_commits(const struct object_id *oid, off_t offset = nth_packed_object_offset(pack, pos); struct object_info oi = OBJECT_INFO_INIT; + if (list->progress) + display_progress(list->progress, ++list->progress_done); + oi.typep = &type; if (packed_object_info(the_repository, pack, offset, &oi) < 0) die(_("unable to get type of object %s"), oid_to_hex(oid)); @@ -614,12 +639,18 @@ static void add_missing_parents(struct packed_oid_list *oids, struct commit *com } } -static void close_reachable(struct packed_oid_list *oids) +static void close_reachable(struct packed_oid_list *oids, int report_progress) { int i; struct commit *commit; + struct progress *progress = NULL; + int j = 0; + if (report_progress) + progress = start_delayed_progress( + _("Annotating commits in commit graph"), 0); for (i = 0; i < oids->nr; i++) { + display_progress(progress, ++j); commit = lookup_commit(the_repository, &oids->list[i]); if (commit) commit->object.flags |= UNINTERESTING; @@ -631,6 +662,7 @@ static void close_reachable(struct packed_oid_list *oids) * closure. */ for (i = 0; i < oids->nr; i++) { + display_progress(progress, ++j); commit = lookup_commit(the_repository, &oids->list[i]); if (commit && !parse_commit(commit)) @@ -638,19 +670,28 @@ static void close_reachable(struct packed_oid_list *oids) } for (i = 0; i < oids->nr; i++) { + display_progress(progress, ++j); commit = lookup_commit(the_repository, &oids->list[i]); if (commit) commit->object.flags &= ~UNINTERESTING; } + stop_progress(&progress); } -static void compute_generation_numbers(struct packed_commit_list* commits) +static void compute_generation_numbers(struct packed_commit_list* commits, + int report_progress) { int i; struct commit_list *list = NULL; + struct progress *progress = NULL; + if (report_progress) + progress = start_progress( + _("Computing commit graph generation numbers"), + commits->nr); for (i = 0; i < commits->nr; i++) { + display_progress(progress, i + 1); if (commits->list[i]->generation != GENERATION_NUMBER_INFINITY && commits->list[i]->generation != GENERATION_NUMBER_ZERO) continue; @@ -682,6 +723,7 @@ static void compute_generation_numbers(struct packed_commit_list* commits) } } } + stop_progress(&progress); } static int add_ref_to_list(const char *refname, @@ -694,19 +736,21 @@ static int add_ref_to_list(const char *refname, return 0; } -void write_commit_graph_reachable(const char *obj_dir, int append) +void write_commit_graph_reachable(const char *obj_dir, int append, + int report_progress) { - struct string_list list; + struct string_list list = STRING_LIST_INIT_DUP; - string_list_init(&list, 1); for_each_ref(add_ref_to_list, &list); - write_commit_graph(obj_dir, NULL, &list, append); + write_commit_graph(obj_dir, NULL, &list, append, report_progress); + + string_list_clear(&list, 0); } void write_commit_graph(const char *obj_dir, struct string_list *pack_indexes, struct string_list *commit_hex, - int append) + int append, int report_progress) { struct packed_oid_list oids; struct packed_commit_list commits; @@ -719,12 +763,15 @@ void write_commit_graph(const char *obj_dir, int num_chunks; int num_extra_edges; struct commit_list *parent; + struct progress *progress = NULL; if (!commit_graph_compatible(the_repository)) return; oids.nr = 0; - oids.alloc = approximate_object_count() / 4; + oids.alloc = approximate_object_count() / 32; + oids.progress = NULL; + oids.progress_done = 0; if (append) { prepare_commit_graph_one(the_repository, obj_dir); @@ -751,6 +798,11 @@ void write_commit_graph(const char *obj_dir, int dirlen; strbuf_addf(&packname, "%s/pack/", obj_dir); dirlen = packname.len; + if (report_progress) { + oids.progress = start_delayed_progress( + _("Finding commits for commit graph"), 0); + oids.progress_done = 0; + } for (i = 0; i < pack_indexes->nr; i++) { struct packed_git *p; strbuf_setlen(&packname, dirlen); @@ -762,16 +814,23 @@ void write_commit_graph(const char *obj_dir, die(_("error opening index for %s"), packname.buf); for_each_object_in_pack(p, add_packed_commits, &oids, 0); close_pack(p); + free(p); } + stop_progress(&oids.progress); strbuf_release(&packname); } if (commit_hex) { + if (report_progress) + progress = start_delayed_progress( + _("Finding commits for commit graph"), + commit_hex->nr); for (i = 0; i < commit_hex->nr; i++) { const char *end; struct object_id oid; struct commit *result; + display_progress(progress, i + 1); if (commit_hex->items[i].string && parse_oid_hex(commit_hex->items[i].string, &oid, &end)) continue; @@ -784,18 +843,24 @@ void write_commit_graph(const char *obj_dir, oids.nr++; } } + stop_progress(&progress); } - if (!pack_indexes && !commit_hex) + if (!pack_indexes && !commit_hex) { + if (report_progress) + oids.progress = start_delayed_progress( + _("Finding commits for commit graph"), 0); for_each_packed_object(add_packed_commits, &oids, 0); + stop_progress(&oids.progress); + } - close_reachable(&oids); + close_reachable(&oids, report_progress); QSORT(oids.list, oids.nr, commit_compare); count_distinct = 1; for (i = 1; i < oids.nr; i++) { - if (oidcmp(&oids.list[i-1], &oids.list[i])) + if (!oideq(&oids.list[i - 1], &oids.list[i])) count_distinct++; } @@ -809,7 +874,7 @@ void write_commit_graph(const char *obj_dir, num_extra_edges = 0; for (i = 0; i < oids.nr; i++) { int num_parents = 0; - if (i > 0 && !oidcmp(&oids.list[i-1], &oids.list[i])) + if (i > 0 && oideq(&oids.list[i - 1], &oids.list[i])) continue; commits.list[commits.nr] = lookup_commit(the_repository, &oids.list[i]); @@ -829,12 +894,14 @@ void write_commit_graph(const char *obj_dir, if (commits.nr >= GRAPH_PARENT_MISSING) die(_("too many commits to write graph")); - compute_generation_numbers(&commits); + compute_generation_numbers(&commits, report_progress); graph_name = get_commit_graph_filename(obj_dir); - if (safe_create_leading_directories(graph_name)) + if (safe_create_leading_directories(graph_name)) { + UNLEAK(graph_name); die_errno(_("unable to create leading directories of %s"), graph_name); + } hold_lock_file_for_update(&lk, graph_name, LOCK_DIE_ON_ERROR); f = hashfd(lk.tempfile->fd, lk.tempfile->filename.buf); @@ -879,9 +946,9 @@ void write_commit_graph(const char *obj_dir, finalize_hashfile(f, NULL, CSUM_HASH_IN_STREAM | CSUM_FSYNC); commit_lock_file(&lk); + free(graph_name); + free(commits.list); free(oids.list); - oids.alloc = 0; - oids.nr = 0; } #define VERIFY_COMMIT_GRAPH_ERROR_HASH 2 @@ -908,6 +975,7 @@ int verify_commit_graph(struct repository *r, struct commit_graph *g) int generation_zero = 0; struct hashfile *f; int devnull; + struct progress *progress = NULL; if (!g) { graph_report("no commit-graph file loaded"); @@ -930,7 +998,7 @@ int verify_commit_graph(struct repository *r, struct commit_graph *g) f = hashfd(devnull, NULL); hashwrite(f, g->data, g->data_len - g->hash_len); finalize_hashfile(f, checksum.hash, CSUM_CLOSE); - if (hashcmp(checksum.hash, g->data + g->data_len - g->hash_len)) { + if (!hasheq(checksum.hash, g->data + g->data_len - g->hash_len)) { graph_report(_("the commit-graph file has incorrect checksum and is likely corrupt")); verify_commit_graph_error = VERIFY_COMMIT_GRAPH_ERROR_HASH; } @@ -975,11 +1043,14 @@ int verify_commit_graph(struct repository *r, struct commit_graph *g) if (verify_commit_graph_error & ~VERIFY_COMMIT_GRAPH_ERROR_HASH) return verify_commit_graph_error; + progress = start_progress(_("Verifying commits in commit graph"), + g->num_commits); for (i = 0; i < g->num_commits; i++) { struct commit *graph_commit, *odb_commit; struct commit_list *graph_parents, *odb_parents; uint32_t max_generation = 0; + display_progress(progress, i + 1); hashcpy(cur_oid.hash, g->chunk_oid_lookup + g->hash_len * i); graph_commit = lookup_commit(r, &cur_oid); @@ -990,7 +1061,7 @@ int verify_commit_graph(struct repository *r, struct commit_graph *g) continue; } - if (oidcmp(&get_commit_tree_in_graph_one(g, graph_commit)->object.oid, + if (!oideq(&get_commit_tree_in_graph_one(g, graph_commit)->object.oid, get_commit_tree_oid(odb_commit))) graph_report("root tree OID for commit %s in commit-graph is %s != %s", oid_to_hex(&cur_oid), @@ -1007,7 +1078,7 @@ int verify_commit_graph(struct repository *r, struct commit_graph *g) break; } - if (oidcmp(&graph_parents->item->object.oid, &odb_parents->item->object.oid)) + if (!oideq(&graph_parents->item->object.oid, &odb_parents->item->object.oid)) graph_report("commit-graph parent for %s is %s != %s", oid_to_hex(&cur_oid), oid_to_hex(&graph_parents->item->object.oid), @@ -1056,6 +1127,7 @@ int verify_commit_graph(struct repository *r, struct commit_graph *g) graph_commit->date, odb_commit->date); } + stop_progress(&progress); return verify_commit_graph_error; } diff --git a/commit-graph.h b/commit-graph.h index b62d1b0837..9db40b4d3a 100644 --- a/commit-graph.h +++ b/commit-graph.h @@ -6,6 +6,8 @@ #include "string-list.h" #include "cache.h" +#define GIT_TEST_COMMIT_GRAPH "GIT_TEST_COMMIT_GRAPH" + struct commit; char *get_commit_graph_filename(const char *obj_dir); @@ -52,11 +54,18 @@ struct commit_graph { struct commit_graph *load_commit_graph_one(const char *graph_file); -void write_commit_graph_reachable(const char *obj_dir, int append); +/* + * Return 1 if and only if the repository has a commit-graph + * file and generation numbers are computed in that file. + */ +int generation_numbers_enabled(struct repository *r); + +void write_commit_graph_reachable(const char *obj_dir, int append, + int report_progress); void write_commit_graph(const char *obj_dir, struct string_list *pack_indexes, struct string_list *commit_hex, - int append); + int append, int report_progress); int verify_commit_graph(struct repository *r, struct commit_graph *g); diff --git a/commit-reach.c b/commit-reach.c new file mode 100644 index 0000000000..d5a39defd3 --- /dev/null +++ b/commit-reach.c @@ -0,0 +1,761 @@ +#include "cache.h" +#include "commit.h" +#include "commit-graph.h" +#include "decorate.h" +#include "prio-queue.h" +#include "tree.h" +#include "ref-filter.h" +#include "revision.h" +#include "tag.h" +#include "commit-reach.h" + +/* Remember to update object flag allocation in object.h */ +#define REACHABLE (1u<<15) +#define PARENT1 (1u<<16) +#define PARENT2 (1u<<17) +#define STALE (1u<<18) +#define RESULT (1u<<19) + +static const unsigned all_flags = (PARENT1 | PARENT2 | STALE | RESULT); + +static int queue_has_nonstale(struct prio_queue *queue) +{ + int i; + for (i = 0; i < queue->nr; i++) { + struct commit *commit = queue->array[i].data; + if (!(commit->object.flags & STALE)) + return 1; + } + return 0; +} + +/* all input commits in one and twos[] must have been parsed! */ +static struct commit_list *paint_down_to_common(struct commit *one, int n, + struct commit **twos, + int min_generation) +{ + struct prio_queue queue = { compare_commits_by_gen_then_commit_date }; + struct commit_list *result = NULL; + int i; + uint32_t last_gen = GENERATION_NUMBER_INFINITY; + + if (!min_generation) + queue.compare = compare_commits_by_commit_date; + + one->object.flags |= PARENT1; + if (!n) { + commit_list_append(one, &result); + return result; + } + prio_queue_put(&queue, one); + + for (i = 0; i < n; i++) { + twos[i]->object.flags |= PARENT2; + prio_queue_put(&queue, twos[i]); + } + + while (queue_has_nonstale(&queue)) { + struct commit *commit = prio_queue_get(&queue); + struct commit_list *parents; + int flags; + + if (min_generation && commit->generation > last_gen) + BUG("bad generation skip %8x > %8x at %s", + commit->generation, last_gen, + oid_to_hex(&commit->object.oid)); + last_gen = commit->generation; + + if (commit->generation < min_generation) + break; + + flags = commit->object.flags & (PARENT1 | PARENT2 | STALE); + if (flags == (PARENT1 | PARENT2)) { + if (!(commit->object.flags & RESULT)) { + commit->object.flags |= RESULT; + commit_list_insert_by_date(commit, &result); + } + /* Mark parents of a found merge stale */ + flags |= STALE; + } + parents = commit->parents; + while (parents) { + struct commit *p = parents->item; + parents = parents->next; + if ((p->object.flags & flags) == flags) + continue; + if (parse_commit(p)) + return NULL; + p->object.flags |= flags; + prio_queue_put(&queue, p); + } + } + + clear_prio_queue(&queue); + return result; +} + +static struct commit_list *merge_bases_many(struct commit *one, int n, struct commit **twos) +{ + struct commit_list *list = NULL; + struct commit_list *result = NULL; + int i; + + for (i = 0; i < n; i++) { + if (one == twos[i]) + /* + * We do not mark this even with RESULT so we do not + * have to clean it up. + */ + return commit_list_insert(one, &result); + } + + if (parse_commit(one)) + return NULL; + for (i = 0; i < n; i++) { + if (parse_commit(twos[i])) + return NULL; + } + + list = paint_down_to_common(one, n, twos, 0); + + while (list) { + struct commit *commit = pop_commit(&list); + if (!(commit->object.flags & STALE)) + commit_list_insert_by_date(commit, &result); + } + return result; +} + +struct commit_list *get_octopus_merge_bases(struct commit_list *in) +{ + struct commit_list *i, *j, *k, *ret = NULL; + + if (!in) + return ret; + + commit_list_insert(in->item, &ret); + + for (i = in->next; i; i = i->next) { + struct commit_list *new_commits = NULL, *end = NULL; + + for (j = ret; j; j = j->next) { + struct commit_list *bases; + bases = get_merge_bases(i->item, j->item); + if (!new_commits) + new_commits = bases; + else + end->next = bases; + for (k = bases; k; k = k->next) + end = k; + } + ret = new_commits; + } + return ret; +} + +static int remove_redundant(struct commit **array, int cnt) +{ + /* + * Some commit in the array may be an ancestor of + * another commit. Move such commit to the end of + * the array, and return the number of commits that + * are independent from each other. + */ + struct commit **work; + unsigned char *redundant; + int *filled_index; + int i, j, filled; + + work = xcalloc(cnt, sizeof(*work)); + redundant = xcalloc(cnt, 1); + ALLOC_ARRAY(filled_index, cnt - 1); + + for (i = 0; i < cnt; i++) + parse_commit(array[i]); + for (i = 0; i < cnt; i++) { + struct commit_list *common; + uint32_t min_generation = array[i]->generation; + + if (redundant[i]) + continue; + for (j = filled = 0; j < cnt; j++) { + if (i == j || redundant[j]) + continue; + filled_index[filled] = j; + work[filled++] = array[j]; + + if (array[j]->generation < min_generation) + min_generation = array[j]->generation; + } + common = paint_down_to_common(array[i], filled, work, + min_generation); + if (array[i]->object.flags & PARENT2) + redundant[i] = 1; + for (j = 0; j < filled; j++) + if (work[j]->object.flags & PARENT1) + redundant[filled_index[j]] = 1; + clear_commit_marks(array[i], all_flags); + clear_commit_marks_many(filled, work, all_flags); + free_commit_list(common); + } + + /* Now collect the result */ + COPY_ARRAY(work, array, cnt); + for (i = filled = 0; i < cnt; i++) + if (!redundant[i]) + array[filled++] = work[i]; + for (j = filled, i = 0; i < cnt; i++) + if (redundant[i]) + array[j++] = work[i]; + free(work); + free(redundant); + free(filled_index); + return filled; +} + +static struct commit_list *get_merge_bases_many_0(struct commit *one, + int n, + struct commit **twos, + int cleanup) +{ + struct commit_list *list; + struct commit **rslt; + struct commit_list *result; + int cnt, i; + + result = merge_bases_many(one, n, twos); + for (i = 0; i < n; i++) { + if (one == twos[i]) + return result; + } + if (!result || !result->next) { + if (cleanup) { + clear_commit_marks(one, all_flags); + clear_commit_marks_many(n, twos, all_flags); + } + return result; + } + + /* There are more than one */ + cnt = commit_list_count(result); + rslt = xcalloc(cnt, sizeof(*rslt)); + for (list = result, i = 0; list; list = list->next) + rslt[i++] = list->item; + free_commit_list(result); + + clear_commit_marks(one, all_flags); + clear_commit_marks_many(n, twos, all_flags); + + cnt = remove_redundant(rslt, cnt); + result = NULL; + for (i = 0; i < cnt; i++) + commit_list_insert_by_date(rslt[i], &result); + free(rslt); + return result; +} + +struct commit_list *get_merge_bases_many(struct commit *one, + int n, + struct commit **twos) +{ + return get_merge_bases_many_0(one, n, twos, 1); +} + +struct commit_list *get_merge_bases_many_dirty(struct commit *one, + int n, + struct commit **twos) +{ + return get_merge_bases_many_0(one, n, twos, 0); +} + +struct commit_list *get_merge_bases(struct commit *one, struct commit *two) +{ + return get_merge_bases_many_0(one, 1, &two, 1); +} + +/* + * Is "commit" a descendant of one of the elements on the "with_commit" list? + */ +int is_descendant_of(struct commit *commit, struct commit_list *with_commit) +{ + if (!with_commit) + return 1; + + if (generation_numbers_enabled(the_repository)) { + struct commit_list *from_list = NULL; + int result; + commit_list_insert(commit, &from_list); + result = can_all_from_reach(from_list, with_commit, 0); + free_commit_list(from_list); + return result; + } else { + while (with_commit) { + struct commit *other; + + other = with_commit->item; + with_commit = with_commit->next; + if (in_merge_bases(other, commit)) + return 1; + } + return 0; + } +} + +/* + * Is "commit" an ancestor of one of the "references"? + */ +int in_merge_bases_many(struct commit *commit, int nr_reference, struct commit **reference) +{ + struct commit_list *bases; + int ret = 0, i; + uint32_t min_generation = GENERATION_NUMBER_INFINITY; + + if (parse_commit(commit)) + return ret; + for (i = 0; i < nr_reference; i++) { + if (parse_commit(reference[i])) + return ret; + if (reference[i]->generation < min_generation) + min_generation = reference[i]->generation; + } + + if (commit->generation > min_generation) + return ret; + + bases = paint_down_to_common(commit, nr_reference, reference, commit->generation); + if (commit->object.flags & PARENT2) + ret = 1; + clear_commit_marks(commit, all_flags); + clear_commit_marks_many(nr_reference, reference, all_flags); + free_commit_list(bases); + return ret; +} + +/* + * Is "commit" an ancestor of (i.e. reachable from) the "reference"? + */ +int in_merge_bases(struct commit *commit, struct commit *reference) +{ + return in_merge_bases_many(commit, 1, &reference); +} + +struct commit_list *reduce_heads(struct commit_list *heads) +{ + struct commit_list *p; + struct commit_list *result = NULL, **tail = &result; + struct commit **array; + int num_head, i; + + if (!heads) + return NULL; + + /* Uniquify */ + for (p = heads; p; p = p->next) + p->item->object.flags &= ~STALE; + for (p = heads, num_head = 0; p; p = p->next) { + if (p->item->object.flags & STALE) + continue; + p->item->object.flags |= STALE; + num_head++; + } + array = xcalloc(num_head, sizeof(*array)); + for (p = heads, i = 0; p; p = p->next) { + if (p->item->object.flags & STALE) { + array[i++] = p->item; + p->item->object.flags &= ~STALE; + } + } + num_head = remove_redundant(array, num_head); + for (i = 0; i < num_head; i++) + tail = &commit_list_insert(array[i], tail)->next; + free(array); + return result; +} + +void reduce_heads_replace(struct commit_list **heads) +{ + struct commit_list *result = reduce_heads(*heads); + free_commit_list(*heads); + *heads = result; +} + +int ref_newer(const struct object_id *new_oid, const struct object_id *old_oid) +{ + struct object *o; + struct commit *old_commit, *new_commit; + struct commit_list *old_commit_list = NULL; + + /* + * Both new_commit and old_commit must be commit-ish and new_commit is descendant of + * old_commit. Otherwise we require --force. + */ + o = deref_tag(the_repository, parse_object(the_repository, old_oid), + NULL, 0); + if (!o || o->type != OBJ_COMMIT) + return 0; + old_commit = (struct commit *) o; + + o = deref_tag(the_repository, parse_object(the_repository, new_oid), + NULL, 0); + if (!o || o->type != OBJ_COMMIT) + return 0; + new_commit = (struct commit *) o; + + if (parse_commit(new_commit) < 0) + return 0; + + commit_list_insert(old_commit, &old_commit_list); + return is_descendant_of(new_commit, old_commit_list); +} + +/* + * Mimicking the real stack, this stack lives on the heap, avoiding stack + * overflows. + * + * At each recursion step, the stack items points to the commits whose + * ancestors are to be inspected. + */ +struct contains_stack { + int nr, alloc; + struct contains_stack_entry { + struct commit *commit; + struct commit_list *parents; + } *contains_stack; +}; + +static int in_commit_list(const struct commit_list *want, struct commit *c) +{ + for (; want; want = want->next) + if (oideq(&want->item->object.oid, &c->object.oid)) + return 1; + return 0; +} + +/* + * Test whether the candidate is contained in the list. + * Do not recurse to find out, though, but return -1 if inconclusive. + */ +static enum contains_result contains_test(struct commit *candidate, + const struct commit_list *want, + struct contains_cache *cache, + uint32_t cutoff) +{ + enum contains_result *cached = contains_cache_at(cache, candidate); + + /* If we already have the answer cached, return that. */ + if (*cached) + return *cached; + + /* or are we it? */ + if (in_commit_list(want, candidate)) { + *cached = CONTAINS_YES; + return CONTAINS_YES; + } + + /* Otherwise, we don't know; prepare to recurse */ + parse_commit_or_die(candidate); + + if (candidate->generation < cutoff) + return CONTAINS_NO; + + return CONTAINS_UNKNOWN; +} + +static void push_to_contains_stack(struct commit *candidate, struct contains_stack *contains_stack) +{ + ALLOC_GROW(contains_stack->contains_stack, contains_stack->nr + 1, contains_stack->alloc); + contains_stack->contains_stack[contains_stack->nr].commit = candidate; + contains_stack->contains_stack[contains_stack->nr++].parents = candidate->parents; +} + +static enum contains_result contains_tag_algo(struct commit *candidate, + const struct commit_list *want, + struct contains_cache *cache) +{ + struct contains_stack contains_stack = { 0, 0, NULL }; + enum contains_result result; + uint32_t cutoff = GENERATION_NUMBER_INFINITY; + const struct commit_list *p; + + for (p = want; p; p = p->next) { + struct commit *c = p->item; + load_commit_graph_info(the_repository, c); + if (c->generation < cutoff) + cutoff = c->generation; + } + + result = contains_test(candidate, want, cache, cutoff); + if (result != CONTAINS_UNKNOWN) + return result; + + push_to_contains_stack(candidate, &contains_stack); + while (contains_stack.nr) { + struct contains_stack_entry *entry = &contains_stack.contains_stack[contains_stack.nr - 1]; + struct commit *commit = entry->commit; + struct commit_list *parents = entry->parents; + + if (!parents) { + *contains_cache_at(cache, commit) = CONTAINS_NO; + contains_stack.nr--; + } + /* + * If we just popped the stack, parents->item has been marked, + * therefore contains_test will return a meaningful yes/no. + */ + else switch (contains_test(parents->item, want, cache, cutoff)) { + case CONTAINS_YES: + *contains_cache_at(cache, commit) = CONTAINS_YES; + contains_stack.nr--; + break; + case CONTAINS_NO: + entry->parents = parents->next; + break; + case CONTAINS_UNKNOWN: + push_to_contains_stack(parents->item, &contains_stack); + break; + } + } + free(contains_stack.contains_stack); + return contains_test(candidate, want, cache, cutoff); +} + +int commit_contains(struct ref_filter *filter, struct commit *commit, + struct commit_list *list, struct contains_cache *cache) +{ + if (filter->with_commit_tag_algo) + return contains_tag_algo(commit, list, cache) == CONTAINS_YES; + return is_descendant_of(commit, list); +} + +static int compare_commits_by_gen(const void *_a, const void *_b) +{ + const struct commit *a = *(const struct commit * const *)_a; + const struct commit *b = *(const struct commit * const *)_b; + + if (a->generation < b->generation) + return -1; + if (a->generation > b->generation) + return 1; + return 0; +} + +int can_all_from_reach_with_flag(struct object_array *from, + unsigned int with_flag, + unsigned int assign_flag, + time_t min_commit_date, + uint32_t min_generation) +{ + struct commit **list = NULL; + int i; + int nr_commits; + int result = 1; + + ALLOC_ARRAY(list, from->nr); + nr_commits = 0; + for (i = 0; i < from->nr; i++) { + struct object *from_one = from->objects[i].item; + + if (!from_one || from_one->flags & assign_flag) + continue; + + from_one = deref_tag(the_repository, from_one, + "a from object", 0); + if (!from_one || from_one->type != OBJ_COMMIT) { + /* + * no way to tell if this is reachable by + * looking at the ancestry chain alone, so + * leave a note to ourselves not to worry about + * this object anymore. + */ + from->objects[i].item->flags |= assign_flag; + continue; + } + + list[nr_commits] = (struct commit *)from_one; + if (parse_commit(list[nr_commits]) || + list[nr_commits]->generation < min_generation) { + result = 0; + goto cleanup; + } + + nr_commits++; + } + + QSORT(list, nr_commits, compare_commits_by_gen); + + for (i = 0; i < nr_commits; i++) { + /* DFS from list[i] */ + struct commit_list *stack = NULL; + + list[i]->object.flags |= assign_flag; + commit_list_insert(list[i], &stack); + + while (stack) { + struct commit_list *parent; + + if (stack->item->object.flags & (with_flag | RESULT)) { + pop_commit(&stack); + if (stack) + stack->item->object.flags |= RESULT; + continue; + } + + for (parent = stack->item->parents; parent; parent = parent->next) { + if (parent->item->object.flags & (with_flag | RESULT)) + stack->item->object.flags |= RESULT; + + if (!(parent->item->object.flags & assign_flag)) { + parent->item->object.flags |= assign_flag; + + if (parse_commit(parent->item) || + parent->item->date < min_commit_date || + parent->item->generation < min_generation) + continue; + + commit_list_insert(parent->item, &stack); + break; + } + } + + if (!parent) + pop_commit(&stack); + } + + if (!(list[i]->object.flags & (with_flag | RESULT))) { + result = 0; + goto cleanup; + } + } + +cleanup: + clear_commit_marks_many(nr_commits, list, RESULT | assign_flag); + free(list); + + for (i = 0; i < from->nr; i++) + from->objects[i].item->flags &= ~assign_flag; + + return result; +} + +int can_all_from_reach(struct commit_list *from, struct commit_list *to, + int cutoff_by_min_date) +{ + struct object_array from_objs = OBJECT_ARRAY_INIT; + time_t min_commit_date = cutoff_by_min_date ? from->item->date : 0; + struct commit_list *from_iter = from, *to_iter = to; + int result; + uint32_t min_generation = GENERATION_NUMBER_INFINITY; + + while (from_iter) { + add_object_array(&from_iter->item->object, NULL, &from_objs); + + if (!parse_commit(from_iter->item)) { + if (from_iter->item->date < min_commit_date) + min_commit_date = from_iter->item->date; + + if (from_iter->item->generation < min_generation) + min_generation = from_iter->item->generation; + } + + from_iter = from_iter->next; + } + + while (to_iter) { + if (!parse_commit(to_iter->item)) { + if (to_iter->item->date < min_commit_date) + min_commit_date = to_iter->item->date; + + if (to_iter->item->generation < min_generation) + min_generation = to_iter->item->generation; + } + + to_iter->item->object.flags |= PARENT2; + + to_iter = to_iter->next; + } + + result = can_all_from_reach_with_flag(&from_objs, PARENT2, PARENT1, + min_commit_date, min_generation); + + while (from) { + clear_commit_marks(from->item, PARENT1); + from = from->next; + } + + while (to) { + clear_commit_marks(to->item, PARENT2); + to = to->next; + } + + object_array_clear(&from_objs); + return result; +} + +struct commit_list *get_reachable_subset(struct commit **from, int nr_from, + struct commit **to, int nr_to, + unsigned int reachable_flag) +{ + struct commit **item; + struct commit *current; + struct commit_list *found_commits = NULL; + struct commit **to_last = to + nr_to; + struct commit **from_last = from + nr_from; + uint32_t min_generation = GENERATION_NUMBER_INFINITY; + int num_to_find = 0; + + struct prio_queue queue = { compare_commits_by_gen_then_commit_date }; + + for (item = to; item < to_last; item++) { + struct commit *c = *item; + + parse_commit(c); + if (c->generation < min_generation) + min_generation = c->generation; + + if (!(c->object.flags & PARENT1)) { + c->object.flags |= PARENT1; + num_to_find++; + } + } + + for (item = from; item < from_last; item++) { + struct commit *c = *item; + if (!(c->object.flags & PARENT2)) { + c->object.flags |= PARENT2; + parse_commit(c); + + prio_queue_put(&queue, *item); + } + } + + while (num_to_find && (current = prio_queue_get(&queue)) != NULL) { + struct commit_list *parents; + + if (current->object.flags & PARENT1) { + current->object.flags &= ~PARENT1; + current->object.flags |= reachable_flag; + commit_list_insert(current, &found_commits); + num_to_find--; + } + + for (parents = current->parents; parents; parents = parents->next) { + struct commit *p = parents->item; + + parse_commit(p); + + if (p->generation < min_generation) + continue; + + if (p->object.flags & PARENT2) + continue; + + p->object.flags |= PARENT2; + prio_queue_put(&queue, p); + } + } + + clear_commit_marks_many(nr_to, to, PARENT1); + clear_commit_marks_many(nr_from, from, PARENT2); + + return found_commits; +} diff --git a/commit-reach.h b/commit-reach.h new file mode 100644 index 0000000000..fb8082a2ec --- /dev/null +++ b/commit-reach.h @@ -0,0 +1,91 @@ +#ifndef COMMIT_REACH_H +#define COMMIT_REACH_H + +#include "commit.h" +#include "commit-slab.h" + +struct commit_list; +struct ref_filter; +struct object_id; +struct object_array; + +struct commit_list *get_merge_bases_many(struct commit *one, + int n, + struct commit **twos); +struct commit_list *get_merge_bases_many_dirty(struct commit *one, + int n, + struct commit **twos); +struct commit_list *get_merge_bases(struct commit *one, struct commit *two); +struct commit_list *get_octopus_merge_bases(struct commit_list *in); + +/* To be used only when object flags after this call no longer matter */ +struct commit_list *get_merge_bases_many_dirty(struct commit *one, int n, struct commit **twos); + +int is_descendant_of(struct commit *commit, struct commit_list *with_commit); +int in_merge_bases_many(struct commit *commit, int nr_reference, struct commit **reference); +int in_merge_bases(struct commit *commit, struct commit *reference); + +/* + * Takes a list of commits and returns a new list where those + * have been removed that can be reached from other commits in + * the list. It is useful for, e.g., reducing the commits + * randomly thrown at the git-merge command and removing + * redundant commits that the user shouldn't have given to it. + * + * This function destroys the STALE bit of the commit objects' + * flags. + */ +struct commit_list *reduce_heads(struct commit_list *heads); + +/* + * Like `reduce_heads()`, except it replaces the list. Use this + * instead of `foo = reduce_heads(foo);` to avoid memory leaks. + */ +void reduce_heads_replace(struct commit_list **heads); + +int ref_newer(const struct object_id *new_oid, const struct object_id *old_oid); + +/* + * Unknown has to be "0" here, because that's the default value for + * contains_cache slab entries that have not yet been assigned. + */ +enum contains_result { + CONTAINS_UNKNOWN = 0, + CONTAINS_NO, + CONTAINS_YES +}; + +define_commit_slab(contains_cache, enum contains_result); + +int commit_contains(struct ref_filter *filter, struct commit *commit, + struct commit_list *list, struct contains_cache *cache); + +/* + * Determine if every commit in 'from' can reach at least one commit + * that is marked with 'with_flag'. As we traverse, use 'assign_flag' + * as a marker for commits that are already visited. Do not walk + * commits with date below 'min_commit_date' or generation below + * 'min_generation'. + */ +int can_all_from_reach_with_flag(struct object_array *from, + unsigned int with_flag, + unsigned int assign_flag, + time_t min_commit_date, + uint32_t min_generation); +int can_all_from_reach(struct commit_list *from, struct commit_list *to, + int commit_date_cutoff); + + +/* + * Return a list of commits containing the commits in the 'to' array + * that are reachable from at least one commit in the 'from' array. + * Also add the given 'flag' to each of the commits in the returned list. + * + * This method uses the PARENT1 and PARENT2 flags during its operation, + * so be sure these flags are not set before calling the method. + */ +struct commit_list *get_reachable_subset(struct commit **from, int nr_from, + struct commit **to, int nr_to, + unsigned int reachable_flag); + +#endif diff --git a/commit-slab-impl.h b/commit-slab-impl.h index ac1e6d409a..5c0eb91a5d 100644 --- a/commit-slab-impl.h +++ b/commit-slab-impl.h @@ -1,10 +1,10 @@ #ifndef COMMIT_SLAB_IMPL_H #define COMMIT_SLAB_IMPL_H -#define MAYBE_UNUSED __attribute__((__unused__)) +#include "git-compat-util.h" #define implement_static_commit_slab(slabname, elemtype) \ - implement_commit_slab(slabname, elemtype, static MAYBE_UNUSED) + implement_commit_slab(slabname, elemtype, MAYBE_UNUSED static) #define implement_shared_commit_slab(slabname, elemtype) \ implement_commit_slab(slabname, elemtype, ) @@ -17,6 +17,8 @@ #include "sha1-lookup.h" #include "wt-status.h" #include "advice.h" +#include "refs.h" +#include "commit-reach.h" static struct commit_extra_header *read_commit_extra_header_lines(const char *buf, size_t len, const char **); @@ -46,7 +48,7 @@ struct commit *lookup_commit_or_die(const struct object_id *oid, const char *ref struct commit *c = lookup_commit_reference(the_repository, oid); if (!c) die(_("could not parse %s"), ref_name); - if (oidcmp(oid, &c->object.oid)) { + if (!oideq(oid, &c->object.oid)) { warning(_("%s %s is not a commit!"), ref_name, oid_to_hex(oid)); } @@ -655,11 +657,10 @@ struct commit *pop_commit(struct commit_list **stack) /* count number of children that have not been emitted */ define_commit_slab(indegree_slab, int); -/* record author-date for each commit object */ define_commit_slab(author_date_slab, timestamp_t); -static void record_author_date(struct author_date_slab *author_date, - struct commit *commit) +void record_author_date(struct author_date_slab *author_date, + struct commit *commit) { const char *buffer = get_commit_buffer(commit, NULL); struct ident_split ident; @@ -684,8 +685,8 @@ fail_exit: unuse_commit_buffer(commit, buffer); } -static int compare_commits_by_author_date(const void *a_, const void *b_, - void *cb_data) +int compare_commits_by_author_date(const void *a_, const void *b_, + void *cb_data) { const struct commit *a = a_, *b = b_; struct author_date_slab *author_date = cb_data; @@ -843,367 +844,86 @@ void sort_in_topological_order(struct commit_list **list, enum rev_sort_order so clear_author_date_slab(&author_date); } -/* merge-base stuff */ - -/* Remember to update object flag allocation in object.h */ -#define PARENT1 (1u<<16) -#define PARENT2 (1u<<17) -#define STALE (1u<<18) -#define RESULT (1u<<19) - -static const unsigned all_flags = (PARENT1 | PARENT2 | STALE | RESULT); - -static int queue_has_nonstale(struct prio_queue *queue) -{ - int i; - for (i = 0; i < queue->nr; i++) { - struct commit *commit = queue->array[i].data; - if (!(commit->object.flags & STALE)) - return 1; - } - return 0; -} +struct rev_collect { + struct commit **commit; + int nr; + int alloc; + unsigned int initial : 1; +}; -/* all input commits in one and twos[] must have been parsed! */ -static struct commit_list *paint_down_to_common(struct commit *one, int n, - struct commit **twos, - int min_generation) +static void add_one_commit(struct object_id *oid, struct rev_collect *revs) { - struct prio_queue queue = { compare_commits_by_gen_then_commit_date }; - struct commit_list *result = NULL; - int i; - uint32_t last_gen = GENERATION_NUMBER_INFINITY; - - if (!min_generation) - queue.compare = compare_commits_by_commit_date; - - one->object.flags |= PARENT1; - if (!n) { - commit_list_append(one, &result); - return result; - } - prio_queue_put(&queue, one); - - for (i = 0; i < n; i++) { - twos[i]->object.flags |= PARENT2; - prio_queue_put(&queue, twos[i]); - } - - while (queue_has_nonstale(&queue)) { - struct commit *commit = prio_queue_get(&queue); - struct commit_list *parents; - int flags; - - if (min_generation && commit->generation > last_gen) - BUG("bad generation skip %8x > %8x at %s", - commit->generation, last_gen, - oid_to_hex(&commit->object.oid)); - last_gen = commit->generation; + struct commit *commit; - if (commit->generation < min_generation) - break; + if (is_null_oid(oid)) + return; - flags = commit->object.flags & (PARENT1 | PARENT2 | STALE); - if (flags == (PARENT1 | PARENT2)) { - if (!(commit->object.flags & RESULT)) { - commit->object.flags |= RESULT; - commit_list_insert_by_date(commit, &result); - } - /* Mark parents of a found merge stale */ - flags |= STALE; - } - parents = commit->parents; - while (parents) { - struct commit *p = parents->item; - parents = parents->next; - if ((p->object.flags & flags) == flags) - continue; - if (parse_commit(p)) - return NULL; - p->object.flags |= flags; - prio_queue_put(&queue, p); - } - } + commit = lookup_commit(the_repository, oid); + if (!commit || + (commit->object.flags & TMP_MARK) || + parse_commit(commit)) + return; - clear_prio_queue(&queue); - return result; + ALLOC_GROW(revs->commit, revs->nr + 1, revs->alloc); + revs->commit[revs->nr++] = commit; + commit->object.flags |= TMP_MARK; } -static struct commit_list *merge_bases_many(struct commit *one, int n, struct commit **twos) +static int collect_one_reflog_ent(struct object_id *ooid, struct object_id *noid, + const char *ident, timestamp_t timestamp, + int tz, const char *message, void *cbdata) { - struct commit_list *list = NULL; - struct commit_list *result = NULL; - int i; - - for (i = 0; i < n; i++) { - if (one == twos[i]) - /* - * We do not mark this even with RESULT so we do not - * have to clean it up. - */ - return commit_list_insert(one, &result); - } - - if (parse_commit(one)) - return NULL; - for (i = 0; i < n; i++) { - if (parse_commit(twos[i])) - return NULL; - } - - list = paint_down_to_common(one, n, twos, 0); + struct rev_collect *revs = cbdata; - while (list) { - struct commit *commit = pop_commit(&list); - if (!(commit->object.flags & STALE)) - commit_list_insert_by_date(commit, &result); + if (revs->initial) { + revs->initial = 0; + add_one_commit(ooid, revs); } - return result; + add_one_commit(noid, revs); + return 0; } -struct commit_list *get_octopus_merge_bases(struct commit_list *in) +struct commit *get_fork_point(const char *refname, struct commit *commit) { - struct commit_list *i, *j, *k, *ret = NULL; + struct object_id oid; + struct rev_collect revs; + struct commit_list *bases; + int i; + struct commit *ret = NULL; - if (!in) - return ret; + memset(&revs, 0, sizeof(revs)); + revs.initial = 1; + for_each_reflog_ent(refname, collect_one_reflog_ent, &revs); - commit_list_insert(in->item, &ret); + if (!revs.nr && !get_oid(refname, &oid)) + add_one_commit(&oid, &revs); - for (i = in->next; i; i = i->next) { - struct commit_list *new_commits = NULL, *end = NULL; + for (i = 0; i < revs.nr; i++) + revs.commit[i]->object.flags &= ~TMP_MARK; - for (j = ret; j; j = j->next) { - struct commit_list *bases; - bases = get_merge_bases(i->item, j->item); - if (!new_commits) - new_commits = bases; - else - end->next = bases; - for (k = bases; k; k = k->next) - end = k; - } - ret = new_commits; - } - return ret; -} + bases = get_merge_bases_many(commit, revs.nr, revs.commit); -static int remove_redundant(struct commit **array, int cnt) -{ /* - * Some commit in the array may be an ancestor of - * another commit. Move such commit to the end of - * the array, and return the number of commits that - * are independent from each other. + * There should be one and only one merge base, when we found + * a common ancestor among reflog entries. */ - struct commit **work; - unsigned char *redundant; - int *filled_index; - int i, j, filled; - - work = xcalloc(cnt, sizeof(*work)); - redundant = xcalloc(cnt, 1); - ALLOC_ARRAY(filled_index, cnt - 1); - - for (i = 0; i < cnt; i++) - parse_commit(array[i]); - for (i = 0; i < cnt; i++) { - struct commit_list *common; - uint32_t min_generation = array[i]->generation; - - if (redundant[i]) - continue; - for (j = filled = 0; j < cnt; j++) { - if (i == j || redundant[j]) - continue; - filled_index[filled] = j; - work[filled++] = array[j]; - - if (array[j]->generation < min_generation) - min_generation = array[j]->generation; - } - common = paint_down_to_common(array[i], filled, work, - min_generation); - if (array[i]->object.flags & PARENT2) - redundant[i] = 1; - for (j = 0; j < filled; j++) - if (work[j]->object.flags & PARENT1) - redundant[filled_index[j]] = 1; - clear_commit_marks(array[i], all_flags); - clear_commit_marks_many(filled, work, all_flags); - free_commit_list(common); - } - - /* Now collect the result */ - COPY_ARRAY(work, array, cnt); - for (i = filled = 0; i < cnt; i++) - if (!redundant[i]) - array[filled++] = work[i]; - for (j = filled, i = 0; i < cnt; i++) - if (redundant[i]) - array[j++] = work[i]; - free(work); - free(redundant); - free(filled_index); - return filled; -} - -static struct commit_list *get_merge_bases_many_0(struct commit *one, - int n, - struct commit **twos, - int cleanup) -{ - struct commit_list *list; - struct commit **rslt; - struct commit_list *result; - int cnt, i; - - result = merge_bases_many(one, n, twos); - for (i = 0; i < n; i++) { - if (one == twos[i]) - return result; - } - if (!result || !result->next) { - if (cleanup) { - clear_commit_marks(one, all_flags); - clear_commit_marks_many(n, twos, all_flags); - } - return result; - } - - /* There are more than one */ - cnt = commit_list_count(result); - rslt = xcalloc(cnt, sizeof(*rslt)); - for (list = result, i = 0; list; list = list->next) - rslt[i++] = list->item; - free_commit_list(result); - - clear_commit_marks(one, all_flags); - clear_commit_marks_many(n, twos, all_flags); - - cnt = remove_redundant(rslt, cnt); - result = NULL; - for (i = 0; i < cnt; i++) - commit_list_insert_by_date(rslt[i], &result); - free(rslt); - return result; -} - -struct commit_list *get_merge_bases_many(struct commit *one, - int n, - struct commit **twos) -{ - return get_merge_bases_many_0(one, n, twos, 1); -} - -struct commit_list *get_merge_bases_many_dirty(struct commit *one, - int n, - struct commit **twos) -{ - return get_merge_bases_many_0(one, n, twos, 0); -} - -struct commit_list *get_merge_bases(struct commit *one, struct commit *two) -{ - return get_merge_bases_many_0(one, 1, &two, 1); -} - -/* - * Is "commit" a descendant of one of the elements on the "with_commit" list? - */ -int is_descendant_of(struct commit *commit, struct commit_list *with_commit) -{ - if (!with_commit) - return 1; - while (with_commit) { - struct commit *other; - - other = with_commit->item; - with_commit = with_commit->next; - if (in_merge_bases(other, commit)) - return 1; - } - return 0; -} - -/* - * Is "commit" an ancestor of one of the "references"? - */ -int in_merge_bases_many(struct commit *commit, int nr_reference, struct commit **reference) -{ - struct commit_list *bases; - int ret = 0, i; - uint32_t min_generation = GENERATION_NUMBER_INFINITY; + if (!bases || bases->next) + goto cleanup_return; - if (parse_commit(commit)) - return ret; - for (i = 0; i < nr_reference; i++) { - if (parse_commit(reference[i])) - return ret; - if (reference[i]->generation < min_generation) - min_generation = reference[i]->generation; - } + /* And the found one must be one of the reflog entries */ + for (i = 0; i < revs.nr; i++) + if (&bases->item->object == &revs.commit[i]->object) + break; /* found */ + if (revs.nr <= i) + goto cleanup_return; - if (commit->generation > min_generation) - return ret; + ret = bases->item; - bases = paint_down_to_common(commit, nr_reference, reference, commit->generation); - if (commit->object.flags & PARENT2) - ret = 1; - clear_commit_marks(commit, all_flags); - clear_commit_marks_many(nr_reference, reference, all_flags); +cleanup_return: free_commit_list(bases); return ret; } -/* - * Is "commit" an ancestor of (i.e. reachable from) the "reference"? - */ -int in_merge_bases(struct commit *commit, struct commit *reference) -{ - return in_merge_bases_many(commit, 1, &reference); -} - -struct commit_list *reduce_heads(struct commit_list *heads) -{ - struct commit_list *p; - struct commit_list *result = NULL, **tail = &result; - struct commit **array; - int num_head, i; - - if (!heads) - return NULL; - - /* Uniquify */ - for (p = heads; p; p = p->next) - p->item->object.flags &= ~STALE; - for (p = heads, num_head = 0; p; p = p->next) { - if (p->item->object.flags & STALE) - continue; - p->item->object.flags |= STALE; - num_head++; - } - array = xcalloc(num_head, sizeof(*array)); - for (p = heads, i = 0; p; p = p->next) { - if (p->item->object.flags & STALE) { - array[i++] = p->item; - p->item->object.flags &= ~STALE; - } - } - num_head = remove_redundant(array, num_head); - for (i = 0; i < num_head; i++) - tail = &commit_list_insert(array[i], tail)->next; - free(array); - return result; -} - -void reduce_heads_replace(struct commit_list **heads) -{ - struct commit_list *result = reduce_heads(*heads); - free_commit_list(*heads); - *heads = result; -} - static const char gpg_sig_header[] = "gpgsig"; static const int gpg_sig_header_len = sizeof(gpg_sig_header) - 1; @@ -1379,7 +1099,33 @@ int check_commit_signature(const struct commit *commit, struct signature_check * return ret; } +void verify_merge_signature(struct commit *commit, int verbosity) +{ + char hex[GIT_MAX_HEXSZ + 1]; + struct signature_check signature_check; + memset(&signature_check, 0, sizeof(signature_check)); + + check_commit_signature(commit, &signature_check); + find_unique_abbrev_r(hex, &commit->object.oid, DEFAULT_ABBREV); + switch (signature_check.result) { + case 'G': + break; + case 'U': + die(_("Commit %s has an untrusted GPG signature, " + "allegedly by %s."), hex, signature_check.signer); + case 'B': + die(_("Commit %s has a bad GPG signature " + "allegedly by %s."), hex, signature_check.signer); + default: /* 'N' */ + die(_("Commit %s does not have a GPG signature."), hex); + } + if (verbosity >= 0 && signature_check.result == 'G') + printf(_("Commit %s has a good GPG signature by %s\n"), + hex, signature_check.signer); + + signature_check_clear(&signature_check); +} void append_merge_tag_headers(struct commit_list *parents, struct commit_extra_header ***tail) @@ -8,6 +8,7 @@ #include "gpg-interface.h" #include "string-list.h" #include "pretty.h" +#include "commit-slab.h" #define COMMIT_NOT_FROM_GRAPH 0xFFFFFFFF #define GENERATION_NUMBER_INFINITY 0xFFFFFFFF @@ -205,12 +206,7 @@ int register_commit_graft(struct repository *r, struct commit_graft *, int); void prepare_commit_graft(struct repository *r); struct commit_graft *lookup_commit_graft(struct repository *r, const struct object_id *oid); -extern struct commit_list *get_merge_bases(struct commit *rev1, struct commit *rev2); -extern struct commit_list *get_merge_bases_many(struct commit *one, int n, struct commit **twos); -extern struct commit_list *get_octopus_merge_bases(struct commit_list *in); - -/* To be used only when object flags after this call no longer matter */ -extern struct commit_list *get_merge_bases_many_dirty(struct commit *one, int n, struct commit **twos); +struct commit *get_fork_point(const char *refname, struct commit *commit); /* largest positive number a signed 32-bit integer can contain */ #define INFINITE_DEPTH 0x7fffffff @@ -261,32 +257,10 @@ extern int delayed_reachability_test(struct shallow_info *si, int c); extern void prune_shallow(unsigned options); extern struct trace_key trace_shallow; -int is_descendant_of(struct commit *, struct commit_list *); -int in_merge_bases(struct commit *, struct commit *); -int in_merge_bases_many(struct commit *, int, struct commit **); - extern int interactive_add(int argc, const char **argv, const char *prefix, int patch); extern int run_add_interactive(const char *revision, const char *patch_mode, const struct pathspec *pathspec); -/* - * Takes a list of commits and returns a new list where those - * have been removed that can be reached from other commits in - * the list. It is useful for, e.g., reducing the commits - * randomly thrown at the git-merge command and removing - * redundant commits that the user shouldn't have given to it. - * - * This function destroys the STALE bit of the commit objects' - * flags. - */ -extern struct commit_list *reduce_heads(struct commit_list *heads); - -/* - * Like `reduce_heads()`, except it replaces the list. Use this - * instead of `foo = reduce_heads(foo);` to avoid memory leaks. - */ -extern void reduce_heads_replace(struct commit_list **heads); - struct commit_extra_header { struct commit_extra_header *next; char *key; @@ -360,6 +334,20 @@ extern int remove_signature(struct strbuf *buf); */ extern int check_commit_signature(const struct commit *commit, struct signature_check *sigc); +/* record author-date for each commit object */ +struct author_date_slab; +void record_author_date(struct author_date_slab *author_date, + struct commit *commit); + +int compare_commits_by_author_date(const void *a_, const void *b_, void *unused); + +/* + * Verify a single commit with check_commit_signature() and die() if it is not + * a good signature. This isn't really suitable for general use, but is a + * helper to implement consistent logic for pull/merge --verify-signatures. + */ +void verify_merge_signature(struct commit *commit, int verbose); + int compare_commits_by_commit_date(const void *a_, const void *b_, void *unused); int compare_commits_by_gen_then_commit_date(const void *a_, const void *b_, void *unused); diff --git a/compat/mingw.c b/compat/mingw.c index 18caf21969..34b3880b29 100644 --- a/compat/mingw.c +++ b/compat/mingw.c @@ -5,6 +5,8 @@ #include "../strbuf.h" #include "../run-command.h" #include "../cache.h" +#include "win32/lazyload.h" +#include "../config.h" #define HCAST(type, handle) ((type)(intptr_t)handle) @@ -202,6 +204,60 @@ static int ask_yes_no_if_possible(const char *format, ...) } } +/* Windows only */ +enum hide_dotfiles_type { + HIDE_DOTFILES_FALSE = 0, + HIDE_DOTFILES_TRUE, + HIDE_DOTFILES_DOTGITONLY +}; + +static enum hide_dotfiles_type hide_dotfiles = HIDE_DOTFILES_DOTGITONLY; +static char *unset_environment_variables; + +int mingw_core_config(const char *var, const char *value, void *cb) +{ + if (!strcmp(var, "core.hidedotfiles")) { + if (value && !strcasecmp(value, "dotgitonly")) + hide_dotfiles = HIDE_DOTFILES_DOTGITONLY; + else + hide_dotfiles = git_config_bool(var, value); + return 0; + } + + if (!strcmp(var, "core.unsetenvvars")) { + free(unset_environment_variables); + unset_environment_variables = xstrdup(value); + return 0; + } + + return 0; +} + +/* Normalizes NT paths as returned by some low-level APIs. */ +static wchar_t *normalize_ntpath(wchar_t *wbuf) +{ + int i; + /* fix absolute path prefixes */ + if (wbuf[0] == '\\') { + /* strip NT namespace prefixes */ + if (!wcsncmp(wbuf, L"\\??\\", 4) || + !wcsncmp(wbuf, L"\\\\?\\", 4)) + wbuf += 4; + else if (!wcsnicmp(wbuf, L"\\DosDevices\\", 12)) + wbuf += 12; + /* replace remaining '...UNC\' with '\\' */ + if (!wcsnicmp(wbuf, L"UNC\\", 4)) { + wbuf += 2; + *wbuf = '\\'; + } + } + /* convert backslashes to slashes */ + for (i = 0; wbuf[i]; i++) + if (wbuf[i] == '\\') + wbuf[i] = '/'; + return wbuf; +} + int mingw_unlink(const char *pathname) { int ret, tries = 0; @@ -592,9 +648,11 @@ static inline long long filetime_to_hnsec(const FILETIME *ft) return winTime - 116444736000000000LL; } -static inline time_t filetime_to_time_t(const FILETIME *ft) +static inline void filetime_to_timespec(const FILETIME *ft, struct timespec *ts) { - return (time_t)(filetime_to_hnsec(ft) / 10000000); + long long hnsec = filetime_to_hnsec(ft); + ts->tv_sec = (time_t)(hnsec / 10000000); + ts->tv_nsec = (hnsec % 10000000) * 100; } /** @@ -653,9 +711,9 @@ static int do_lstat(int follow, const char *file_name, struct stat *buf) buf->st_size = fdata.nFileSizeLow | (((off_t)fdata.nFileSizeHigh)<<32); buf->st_dev = buf->st_rdev = 0; /* not used by Git */ - buf->st_atime = filetime_to_time_t(&(fdata.ftLastAccessTime)); - buf->st_mtime = filetime_to_time_t(&(fdata.ftLastWriteTime)); - buf->st_ctime = filetime_to_time_t(&(fdata.ftCreationTime)); + filetime_to_timespec(&(fdata.ftLastAccessTime), &(buf->st_atim)); + filetime_to_timespec(&(fdata.ftLastWriteTime), &(buf->st_mtim)); + filetime_to_timespec(&(fdata.ftCreationTime), &(buf->st_ctim)); if (fdata.dwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT) { WIN32_FIND_DATAW findbuf; HANDLE handle = FindFirstFileW(wfilename, &findbuf); @@ -736,6 +794,29 @@ static int do_stat_internal(int follow, const char *file_name, struct stat *buf) return do_lstat(follow, alt_name, buf); } +static int get_file_info_by_handle(HANDLE hnd, struct stat *buf) +{ + BY_HANDLE_FILE_INFORMATION fdata; + + if (!GetFileInformationByHandle(hnd, &fdata)) { + errno = err_win_to_posix(GetLastError()); + return -1; + } + + buf->st_ino = 0; + buf->st_gid = 0; + buf->st_uid = 0; + buf->st_nlink = 1; + buf->st_mode = file_attr_to_st_mode(fdata.dwFileAttributes); + buf->st_size = fdata.nFileSizeLow | + (((off_t)fdata.nFileSizeHigh)<<32); + buf->st_dev = buf->st_rdev = 0; /* not used by Git */ + filetime_to_timespec(&(fdata.ftLastAccessTime), &(buf->st_atim)); + filetime_to_timespec(&(fdata.ftLastWriteTime), &(buf->st_mtim)); + filetime_to_timespec(&(fdata.ftCreationTime), &(buf->st_ctim)); + return 0; +} + int mingw_lstat(const char *file_name, struct stat *buf) { return do_stat_internal(0, file_name, buf); @@ -748,32 +829,31 @@ int mingw_stat(const char *file_name, struct stat *buf) int mingw_fstat(int fd, struct stat *buf) { HANDLE fh = (HANDLE)_get_osfhandle(fd); - BY_HANDLE_FILE_INFORMATION fdata; + DWORD avail, type = GetFileType(fh) & ~FILE_TYPE_REMOTE; - if (fh == INVALID_HANDLE_VALUE) { - errno = EBADF; - return -1; - } - /* direct non-file handles to MS's fstat() */ - if (GetFileType(fh) != FILE_TYPE_DISK) - return _fstati64(fd, buf); + switch (type) { + case FILE_TYPE_DISK: + return get_file_info_by_handle(fh, buf); - if (GetFileInformationByHandle(fh, &fdata)) { - buf->st_ino = 0; - buf->st_gid = 0; - buf->st_uid = 0; + case FILE_TYPE_CHAR: + case FILE_TYPE_PIPE: + /* initialize stat fields */ + memset(buf, 0, sizeof(*buf)); buf->st_nlink = 1; - buf->st_mode = file_attr_to_st_mode(fdata.dwFileAttributes); - buf->st_size = fdata.nFileSizeLow | - (((off_t)fdata.nFileSizeHigh)<<32); - buf->st_dev = buf->st_rdev = 0; /* not used by Git */ - buf->st_atime = filetime_to_time_t(&(fdata.ftLastAccessTime)); - buf->st_mtime = filetime_to_time_t(&(fdata.ftLastWriteTime)); - buf->st_ctime = filetime_to_time_t(&(fdata.ftCreationTime)); + + if (type == FILE_TYPE_CHAR) { + buf->st_mode = _S_IFCHR; + } else { + buf->st_mode = _S_IFIFO; + if (PeekNamedPipe(fh, NULL, 0, NULL, &avail, NULL)) + buf->st_size = avail; + } return 0; + + default: + errno = EBADF; + return -1; } - errno = EBADF; - return -1; } static inline void time_t_to_filetime(time_t t, FILETIME *ft) @@ -917,8 +997,29 @@ struct tm *localtime_r(const time_t *timep, struct tm *result) char *mingw_getcwd(char *pointer, int len) { - wchar_t wpointer[MAX_PATH]; - if (!_wgetcwd(wpointer, ARRAY_SIZE(wpointer))) + wchar_t cwd[MAX_PATH], wpointer[MAX_PATH]; + DWORD ret = GetCurrentDirectoryW(ARRAY_SIZE(cwd), cwd); + + if (!ret || ret >= ARRAY_SIZE(cwd)) { + errno = ret ? ENAMETOOLONG : err_win_to_posix(GetLastError()); + return NULL; + } + ret = GetLongPathNameW(cwd, wpointer, ARRAY_SIZE(wpointer)); + if (!ret && GetLastError() == ERROR_ACCESS_DENIED) { + HANDLE hnd = CreateFileW(cwd, 0, + FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, NULL, + OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, NULL); + if (hnd == INVALID_HANDLE_VALUE) + return NULL; + ret = GetFinalPathNameByHandleW(hnd, wpointer, ARRAY_SIZE(wpointer), 0); + CloseHandle(hnd); + if (!ret || ret >= ARRAY_SIZE(wpointer)) + return NULL; + if (xwcstoutf(pointer, normalize_ntpath(wpointer), len) < 0) + return NULL; + return pointer; + } + if (!ret || ret >= ARRAY_SIZE(wpointer)) return NULL; if (xwcstoutf(pointer, wpointer, len) < 0) return NULL; @@ -927,8 +1028,8 @@ char *mingw_getcwd(char *pointer, int len) } /* - * See http://msdn2.microsoft.com/en-us/library/17w5ykft(vs.71).aspx - * (Parsing C++ Command-Line Arguments) + * See "Parsing C++ Command-Line Arguments" at Microsoft's Docs: + * https://docs.microsoft.com/en-us/cpp/cpp/parsing-cpp-command-line-arguments */ static const char *quote_arg(const char *arg) { @@ -1070,44 +1171,142 @@ static char *path_lookup(const char *cmd, int exe_only) return prog; } -static int do_putenv(char **env, const char *name, int size, int free_old); +static const wchar_t *wcschrnul(const wchar_t *s, wchar_t c) +{ + while (*s && *s != c) + s++; + return s; +} -/* used number of elements of environ array, including terminating NULL */ -static int environ_size = 0; -/* allocated size of environ array, in bytes */ -static int environ_alloc = 0; +/* Compare only keys */ +static int wenvcmp(const void *a, const void *b) +{ + wchar_t *p = *(wchar_t **)a, *q = *(wchar_t **)b; + size_t p_len, q_len; + + /* Find the keys */ + p_len = wcschrnul(p, L'=') - p; + q_len = wcschrnul(q, L'=') - q; + + /* If the length differs, include the shorter key's NUL */ + if (p_len < q_len) + p_len++; + else if (p_len > q_len) + p_len = q_len + 1; + + return _wcsnicmp(p, q, p_len); +} + +/* We need a stable sort to convert the environment between UTF-16 <-> UTF-8 */ +#ifndef INTERNAL_QSORT +#include "qsort.c" +#endif /* - * Create environment block suitable for CreateProcess. Merges current - * process environment and the supplied environment changes. + * Build an environment block combining the inherited environment + * merged with the given list of settings. + * + * Values of the form "KEY=VALUE" in deltaenv override inherited values. + * Values of the form "KEY" in deltaenv delete inherited values. + * + * Multiple entries in deltaenv for the same key are explicitly allowed. + * + * We return a contiguous block of UNICODE strings with a final trailing + * zero word. */ static wchar_t *make_environment_block(char **deltaenv) { - wchar_t *wenvblk = NULL; - char **tmpenv; - int i = 0, size = environ_size, wenvsz = 0, wenvpos = 0; + wchar_t *wenv = GetEnvironmentStringsW(), *wdeltaenv, *result, *p; + size_t wlen, s, delta_size, size; + + wchar_t **array = NULL; + size_t alloc = 0, nr = 0, i; + + size = 1; /* for extra NUL at the end */ + + /* If there is no deltaenv to apply, simply return a copy. */ + if (!deltaenv || !*deltaenv) { + for (p = wenv; p && *p; ) { + size_t s = wcslen(p) + 1; + size += s; + p += s; + } + + ALLOC_ARRAY(result, size); + memcpy(result, wenv, size * sizeof(*wenv)); + FreeEnvironmentStringsW(wenv); + return result; + } + + /* + * If there is a deltaenv, let's accumulate all keys into `array`, + * sort them using the stable git_qsort() and then copy, skipping + * duplicate keys + */ + for (p = wenv; p && *p; ) { + ALLOC_GROW(array, nr + 1, alloc); + s = wcslen(p) + 1; + array[nr++] = p; + p += s; + size += s; + } + + /* (over-)assess size needed for wchar version of deltaenv */ + for (delta_size = 0, i = 0; deltaenv[i]; i++) + delta_size += strlen(deltaenv[i]) * 2 + 1; + ALLOC_ARRAY(wdeltaenv, delta_size); + + /* convert the deltaenv, appending to array */ + for (i = 0, p = wdeltaenv; deltaenv[i]; i++) { + ALLOC_GROW(array, nr + 1, alloc); + wlen = xutftowcs(p, deltaenv[i], wdeltaenv + delta_size - p); + array[nr++] = p; + p += wlen + 1; + } + + git_qsort(array, nr, sizeof(*array), wenvcmp); + ALLOC_ARRAY(result, size + delta_size); - while (deltaenv && deltaenv[i]) - i++; + for (p = result, i = 0; i < nr; i++) { + /* Skip any duplicate keys; last one wins */ + while (i + 1 < nr && !wenvcmp(array + i, array + i + 1)) + i++; - /* copy the environment, leaving space for changes */ - ALLOC_ARRAY(tmpenv, size + i); - memcpy(tmpenv, environ, size * sizeof(char*)); + /* Skip "to delete" entry */ + if (!wcschr(array[i], L'=')) + continue; + + size = wcslen(array[i]) + 1; + memcpy(p, array[i], size * sizeof(*p)); + p += size; + } + *p = L'\0'; + + free(array); + free(wdeltaenv); + FreeEnvironmentStringsW(wenv); + return result; +} - /* merge supplied environment changes into the temporary environment */ - for (i = 0; deltaenv && deltaenv[i]; i++) - size = do_putenv(tmpenv, deltaenv[i], size, 0); +static void do_unset_environment_variables(void) +{ + static int done; + char *p = unset_environment_variables; - /* create environment block from temporary environment */ - for (i = 0; tmpenv[i]; i++) { - size = 2 * strlen(tmpenv[i]) + 2; /* +2 for final \0 */ - ALLOC_GROW(wenvblk, (wenvpos + size) * sizeof(wchar_t), wenvsz); - wenvpos += xutftowcs(&wenvblk[wenvpos], tmpenv[i], size) + 1; + if (done || !p) + return; + done = 1; + + for (;;) { + char *comma = strchr(p, ','); + + if (comma) + *comma = '\0'; + unsetenv(p); + if (!comma) + break; + p = comma + 1; } - /* add final \0 terminator */ - wenvblk[wenvpos] = 0; - free(tmpenv); - return wenvblk; } struct pinfo_t { @@ -1128,9 +1327,12 @@ static pid_t mingw_spawnve_fd(const char *cmd, const char **argv, char **deltaen wchar_t wcmd[MAX_PATH], wdir[MAX_PATH], *wargs, *wenvblk = NULL; unsigned flags = CREATE_UNICODE_ENVIRONMENT; BOOL ret; + HANDLE cons; + + do_unset_environment_variables(); /* Determine whether or not we are associated to a console */ - HANDLE cons = CreateFile("CONOUT$", GENERIC_WRITE, + cons = CreateFile("CONOUT$", GENERIC_WRITE, FILE_SHARE_WRITE, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); if (cons == INVALID_HANDLE_VALUE) { @@ -1349,87 +1551,83 @@ int mingw_kill(pid_t pid, int sig) } /* - * Compare environment entries by key (i.e. stopping at '=' or '\0'). + * UTF-8 versions of getenv(), putenv() and unsetenv(). + * Internally, they use the CRT's stock UNICODE routines + * to avoid data loss. */ -static int compareenv(const void *v1, const void *v2) +char *mingw_getenv(const char *name) { - const char *e1 = *(const char**)v1; - const char *e2 = *(const char**)v2; +#define GETENV_MAX_RETAIN 30 + static char *values[GETENV_MAX_RETAIN]; + static int value_counter; + int len_key, len_value; + wchar_t *w_key; + char *value; + wchar_t w_value[32768]; - for (;;) { - int c1 = *e1++; - int c2 = *e2++; - c1 = (c1 == '=') ? 0 : tolower(c1); - c2 = (c2 == '=') ? 0 : tolower(c2); - if (c1 > c2) - return 1; - if (c1 < c2) - return -1; - if (c1 == 0) - return 0; - } -} + if (!name || !*name) + return NULL; -static int bsearchenv(char **env, const char *name, size_t size) -{ - unsigned low = 0, high = size; - while (low < high) { - unsigned mid = low + ((high - low) >> 1); - int cmp = compareenv(&env[mid], &name); - if (cmp < 0) - low = mid + 1; - else if (cmp > 0) - high = mid; - else - return mid; + len_key = strlen(name) + 1; + /* We cannot use xcalloc() here because that uses getenv() itself */ + w_key = calloc(len_key, sizeof(wchar_t)); + if (!w_key) + die("Out of memory, (tried to allocate %u wchar_t's)", len_key); + xutftowcs(w_key, name, len_key); + len_value = GetEnvironmentVariableW(w_key, w_value, ARRAY_SIZE(w_value)); + if (!len_value && GetLastError() == ERROR_ENVVAR_NOT_FOUND) { + free(w_key); + return NULL; } - return ~low; /* not found, return 1's complement of insert position */ + free(w_key); + + len_value = len_value * 3 + 1; + /* We cannot use xcalloc() here because that uses getenv() itself */ + value = calloc(len_value, sizeof(char)); + if (!value) + die("Out of memory, (tried to allocate %u bytes)", len_value); + xwcstoutf(value, w_value, len_value); + + /* + * We return `value` which is an allocated value and the caller is NOT + * expecting to have to free it, so we keep a round-robin array, + * invalidating the buffer after GETENV_MAX_RETAIN getenv() calls. + */ + free(values[value_counter]); + values[value_counter++] = value; + if (value_counter >= ARRAY_SIZE(values)) + value_counter = 0; + + return value; } -/* - * If name contains '=', then sets the variable, otherwise it unsets it - * Size includes the terminating NULL. Env must have room for size + 1 entries - * (in case of insert). Returns the new size. Optionally frees removed entries. - */ -static int do_putenv(char **env, const char *name, int size, int free_old) +int mingw_putenv(const char *namevalue) { - int i = bsearchenv(env, name, size - 1); + int size; + wchar_t *wide, *equal; + BOOL result; - /* optionally free removed / replaced entry */ - if (i >= 0 && free_old) - free(env[i]); + if (!namevalue || !*namevalue) + return 0; - if (strchr(name, '=')) { - /* if new value ('key=value') is specified, insert or replace entry */ - if (i < 0) { - i = ~i; - memmove(&env[i + 1], &env[i], (size - i) * sizeof(char*)); - size++; - } - env[i] = (char*) name; - } else if (i >= 0) { - /* otherwise ('key') remove existing entry */ - size--; - memmove(&env[i], &env[i + 1], (size - i) * sizeof(char*)); + size = strlen(namevalue) * 2 + 1; + wide = calloc(size, sizeof(wchar_t)); + if (!wide) + die("Out of memory, (tried to allocate %u wchar_t's)", size); + xutftowcs(wide, namevalue, size); + equal = wcschr(wide, L'='); + if (!equal) + result = SetEnvironmentVariableW(wide, NULL); + else { + *equal = L'\0'; + result = SetEnvironmentVariableW(wide, equal + 1); } - return size; -} + free(wide); -char *mingw_getenv(const char *name) -{ - char *value; - int pos = bsearchenv(environ, name, environ_size - 1); - if (pos < 0) - return NULL; - value = strchr(environ[pos], '='); - return value ? &value[1] : NULL; -} + if (!result) + errno = err_win_to_posix(GetLastError()); -int mingw_putenv(const char *namevalue) -{ - ALLOC_GROW(environ, (environ_size + 1) * sizeof(char*), environ_alloc); - environ_size = do_putenv(environ, namevalue, environ_size, 1); - return 0; + return result ? 0 : -1; } /* @@ -1577,7 +1775,8 @@ static void ensure_socket_initialization(void) WSAGetLastError()); for (name = libraries; *name; name++) { - ipv6_dll = LoadLibrary(*name); + ipv6_dll = LoadLibraryExA(*name, NULL, + LOAD_LIBRARY_SEARCH_SYSTEM32); if (!ipv6_dll) continue; @@ -1798,18 +1997,63 @@ int mingw_getpagesize(void) return si.dwAllocationGranularity; } +/* See https://msdn.microsoft.com/en-us/library/windows/desktop/ms724435.aspx */ +enum EXTENDED_NAME_FORMAT { + NameDisplay = 3, + NameUserPrincipal = 8 +}; + +static char *get_extended_user_info(enum EXTENDED_NAME_FORMAT type) +{ + DECLARE_PROC_ADDR(secur32.dll, BOOL, GetUserNameExW, + enum EXTENDED_NAME_FORMAT, LPCWSTR, PULONG); + static wchar_t wbuffer[1024]; + DWORD len; + + if (!INIT_PROC_ADDR(GetUserNameExW)) + return NULL; + + len = ARRAY_SIZE(wbuffer); + if (GetUserNameExW(type, wbuffer, &len)) { + char *converted = xmalloc((len *= 3)); + if (xwcstoutf(converted, wbuffer, len) >= 0) + return converted; + free(converted); + } + + return NULL; +} + +char *mingw_query_user_email(void) +{ + return get_extended_user_info(NameUserPrincipal); +} + struct passwd *getpwuid(int uid) { + static unsigned initialized; static char user_name[100]; - static struct passwd p; + static struct passwd *p; + DWORD len; - DWORD len = sizeof(user_name); - if (!GetUserName(user_name, &len)) + if (initialized) + return p; + + len = sizeof(user_name); + if (!GetUserName(user_name, &len)) { + initialized = 1; return NULL; - p.pw_name = user_name; - p.pw_gecos = "unknown"; - p.pw_dir = NULL; - return &p; + } + + p = xmalloc(sizeof(*p)); + p->pw_name = user_name; + p->pw_gecos = get_extended_user_info(NameDisplay); + if (!p->pw_gecos) + p->pw_gecos = "unknown"; + p->pw_dir = NULL; + + initialized = 1; + return p; } static HANDLE timer_event; @@ -1967,24 +2211,12 @@ int mingw_raise(int sig) int link(const char *oldpath, const char *newpath) { - typedef BOOL (WINAPI *T)(LPCWSTR, LPCWSTR, LPSECURITY_ATTRIBUTES); - static T create_hard_link = NULL; wchar_t woldpath[MAX_PATH], wnewpath[MAX_PATH]; if (xutftowcs_path(woldpath, oldpath) < 0 || xutftowcs_path(wnewpath, newpath) < 0) return -1; - if (!create_hard_link) { - create_hard_link = (T) GetProcAddress( - GetModuleHandle("kernel32.dll"), "CreateHardLinkW"); - if (!create_hard_link) - create_hard_link = (T)-1; - } - if (create_hard_link == (T)-1) { - errno = ENOSYS; - return -1; - } - if (!create_hard_link(wnewpath, woldpath, NULL)) { + if (!CreateHardLinkW(wnewpath, woldpath, NULL)) { errno = err_win_to_posix(GetLastError()); return -1; } @@ -2291,17 +2523,6 @@ void mingw_startup(void) maxlen = wcslen(wargv[0]); for (i = 1; i < argc; i++) maxlen = max(maxlen, wcslen(wargv[i])); - for (i = 0; wenv[i]; i++) - maxlen = max(maxlen, wcslen(wenv[i])); - - /* - * nedmalloc can't free CRT memory, allocate resizable environment - * list. Note that xmalloc / xmemdupz etc. call getenv, so we cannot - * use it while initializing the environment itself. - */ - environ_size = i + 1; - environ_alloc = alloc_nr(environ_size * sizeof(char*)); - environ = malloc_startup(environ_alloc); /* allocate buffer (wchar_t encodes to max 3 UTF-8 bytes) */ maxlen = 3 * maxlen + 1; @@ -2310,17 +2531,13 @@ void mingw_startup(void) /* convert command line arguments and environment to UTF-8 */ for (i = 0; i < argc; i++) __argv[i] = wcstoutfdup_startup(buffer, wargv[i], maxlen); - for (i = 0; wenv[i]; i++) - environ[i] = wcstoutfdup_startup(buffer, wenv[i], maxlen); - environ[i] = NULL; free(buffer); - /* sort environment for O(log n) getenv / putenv */ - qsort(environ, i, sizeof(char*), compareenv); - /* fix Windows specific environment settings */ setup_windows_environment(); + unset_environment_variables = xstrdup("PERL5LIB"); + /* initialize critical section for waitpid pinfo_t list */ InitializeCriticalSection(&pinfo_cs); diff --git a/compat/mingw.h b/compat/mingw.h index 571019d0bd..8c24ddaa3e 100644 --- a/compat/mingw.h +++ b/compat/mingw.h @@ -11,6 +11,9 @@ typedef _sigset_t sigset_t; #undef _POSIX_THREAD_SAFE_FUNCTIONS #endif +extern int mingw_core_config(const char *var, const char *value, void *cb); +#define platform_core_config mingw_core_config + /* * things that are not available in header files */ @@ -257,11 +260,35 @@ char *mingw_mktemp(char *template); char *mingw_getcwd(char *pointer, int len); #define getcwd mingw_getcwd +#ifdef NO_UNSETENV +#error "NO_UNSETENV is incompatible with the Windows-specific startup code!" +#endif + +/* + * We bind *env() routines (even the mingw_ ones) to private mingw_ versions. + * These talk to the CRT using UNICODE/wchar_t, but maintain the original + * narrow-char API. + * + * Note that the MSCRT maintains both ANSI (getenv()) and UNICODE (_wgetenv()) + * routines and stores both versions of each environment variable in parallel + * (and secretly updates both when you set one or the other), but it uses CP_ACP + * to do the conversion rather than CP_UTF8. + * + * Since everything in the git code base is UTF8, we define the mingw_ routines + * to access the CRT using the UNICODE routines and manually convert them to + * UTF8. This also avoids round-trip problems. + * + * This also helps with our linkage, since "_wenviron" is publicly exported + * from the CRT. But to access "_environ" we would have to statically link + * to the CRT (/MT). + * + * We require NO_SETENV (and let gitsetenv() call our mingw_putenv). + */ +#define getenv mingw_getenv +#define putenv mingw_putenv +#define unsetenv mingw_putenv char *mingw_getenv(const char *name); -#define getenv mingw_getenv -int mingw_putenv(const char *namevalue); -#define putenv mingw_putenv -#define unsetenv mingw_putenv +int mingw_putenv(const char *name); int mingw_gethostname(char *host, int namelen); #define gethostname mingw_gethostname @@ -327,18 +354,41 @@ static inline int getrlimit(int resource, struct rlimit *rlp) } /* - * Use mingw specific stat()/lstat()/fstat() implementations on Windows. + * Use mingw specific stat()/lstat()/fstat() implementations on Windows, + * including our own struct stat with 64 bit st_size and nanosecond-precision + * file times. */ #ifndef __MINGW64_VERSION_MAJOR #define off_t off64_t #define lseek _lseeki64 +struct timespec { + time_t tv_sec; + long tv_nsec; +}; #endif -/* use struct stat with 64 bit st_size */ +struct mingw_stat { + _dev_t st_dev; + _ino_t st_ino; + _mode_t st_mode; + short st_nlink; + short st_uid; + short st_gid; + _dev_t st_rdev; + off64_t st_size; + struct timespec st_atim; + struct timespec st_mtim; + struct timespec st_ctim; +}; + +#define st_atime st_atim.tv_sec +#define st_mtime st_mtim.tv_sec +#define st_ctime st_ctim.tv_sec + #ifdef stat #undef stat #endif -#define stat _stati64 +#define stat mingw_stat int mingw_lstat(const char *file_name, struct stat *buf); int mingw_stat(const char *file_name, struct stat *buf); int mingw_fstat(int fd, struct stat *buf); @@ -351,13 +401,6 @@ int mingw_fstat(int fd, struct stat *buf); #endif #define lstat mingw_lstat -#ifndef _stati64 -# define _stati64(x,y) mingw_stat(x,y) -#elif defined (_USE_32BIT_TIME_T) -# define _stat32i64(x,y) mingw_stat(x,y) -#else -# define _stat64(x,y) mingw_stat(x,y) -#endif int mingw_utime(const char *file_name, const struct utimbuf *times); #define utime mingw_utime @@ -390,6 +433,9 @@ int mingw_raise(int sig); int winansi_isatty(int fd); #define isatty winansi_isatty +int winansi_dup2(int oldfd, int newfd); +#define dup2 winansi_dup2 + void winansi_init(void); HANDLE winansi_get_osfhandle(int fd); @@ -424,6 +470,8 @@ static inline void convert_slashes(char *path) int mingw_offset_1st_component(const char *path); #define offset_1st_component mingw_offset_1st_component #define PATH_SEP ';' +extern char *mingw_query_user_email(void); +#define query_user_email mingw_query_user_email #if !defined(__MINGW64_VERSION_MAJOR) && (!defined(_MSC_VER) || _MSC_VER < 1800) #define PRIuMAX "I64u" #define PRId64 "I64d" diff --git a/compat/mmap.c b/compat/mmap.c index 7f662fef7b..14d31010df 100644 --- a/compat/mmap.c +++ b/compat/mmap.c @@ -4,7 +4,7 @@ void *git_mmap(void *start, size_t length, int prot, int flags, int fd, off_t of { size_t n = 0; - if (start != NULL || !(flags & MAP_PRIVATE)) + if (start != NULL || flags != MAP_PRIVATE || prot != PROT_READ) die("Invalid usage of mmap when built with NO_MMAP"); start = xmalloc(length); diff --git a/compat/poll/poll.c b/compat/poll/poll.c index 7ed3fbbea1..4459408c7d 100644 --- a/compat/poll/poll.c +++ b/compat/poll/poll.c @@ -18,6 +18,9 @@ You should have received a copy of the GNU General Public License along with this program; if not, see <http://www.gnu.org/licenses/>. */ +/* To bump the minimum Windows version to Windows Vista */ +#include "git-compat-util.h" + /* Tell gcc not to warn about the (nfd < 0) tests, below. */ #if (__GNUC__ == 4 && 3 <= __GNUC_MINOR__) || 4 < __GNUC__ # pragma GCC diagnostic ignored "-Wtype-limits" @@ -29,9 +32,6 @@ #include <sys/types.h> -/* Specification. */ -#include <poll.h> - #include <errno.h> #include <limits.h> #include <assert.h> @@ -55,6 +55,9 @@ # include <unistd.h> #endif +/* Specification. */ +#include "poll.h" + #ifdef HAVE_SYS_IOCTL_H # include <sys/ioctl.h> #endif @@ -449,7 +452,8 @@ poll (struct pollfd *pfd, nfds_t nfd, int timeout) static HANDLE hEvent; WSANETWORKEVENTS ev; HANDLE h, handle_array[FD_SETSIZE + 2]; - DWORD ret, wait_timeout, nhandles, start = 0, elapsed, orig_timeout = 0; + DWORD ret, wait_timeout, nhandles, orig_timeout = 0; + ULONGLONG start = 0; fd_set rfds, wfds, xfds; BOOL poll_again; MSG msg; @@ -465,7 +469,7 @@ poll (struct pollfd *pfd, nfds_t nfd, int timeout) if (timeout != INFTIM) { orig_timeout = timeout; - start = GetTickCount(); + start = GetTickCount64(); } if (!hEvent) @@ -614,8 +618,8 @@ restart: if (!rc && orig_timeout && timeout != INFTIM) { - elapsed = GetTickCount() - start; - timeout = elapsed >= orig_timeout ? 0 : orig_timeout - elapsed; + ULONGLONG elapsed = GetTickCount64() - start; + timeout = elapsed >= orig_timeout ? 0 : (int)(orig_timeout - elapsed); } if (!rc && timeout) diff --git a/compat/poll/poll.h b/compat/poll/poll.h index cd1995292a..1e1597360f 100644 --- a/compat/poll/poll.h +++ b/compat/poll/poll.h @@ -21,6 +21,21 @@ #ifndef _GL_POLL_H #define _GL_POLL_H +#if defined(_WIN32_WINNT) && _WIN32_WINNT >= 0x600 +/* Vista has its own, socket-only poll() */ +#undef POLLIN +#undef POLLPRI +#undef POLLOUT +#undef POLLERR +#undef POLLHUP +#undef POLLNVAL +#undef POLLRDNORM +#undef POLLRDBAND +#undef POLLWRNORM +#undef POLLWRBAND +#define pollfd compat_pollfd +#endif + /* fake a poll(2) environment */ #define POLLIN 0x0001 /* any readable data available */ #define POLLPRI 0x0002 /* OOB/Urgent readable data */ diff --git a/compat/win32/pthread.c b/compat/win32/pthread.c index e18f5c6e2e..2e7eead42c 100644 --- a/compat/win32/pthread.c +++ b/compat/win32/pthread.c @@ -56,141 +56,3 @@ pthread_t pthread_self(void) t.tid = GetCurrentThreadId(); return t; } - -int pthread_cond_init(pthread_cond_t *cond, const void *unused) -{ - cond->waiters = 0; - cond->was_broadcast = 0; - InitializeCriticalSection(&cond->waiters_lock); - - cond->sema = CreateSemaphore(NULL, 0, LONG_MAX, NULL); - if (!cond->sema) - die("CreateSemaphore() failed"); - - cond->continue_broadcast = CreateEvent(NULL, /* security */ - FALSE, /* auto-reset */ - FALSE, /* not signaled */ - NULL); /* name */ - if (!cond->continue_broadcast) - die("CreateEvent() failed"); - - return 0; -} - -int pthread_cond_destroy(pthread_cond_t *cond) -{ - CloseHandle(cond->sema); - CloseHandle(cond->continue_broadcast); - DeleteCriticalSection(&cond->waiters_lock); - return 0; -} - -int pthread_cond_wait(pthread_cond_t *cond, CRITICAL_SECTION *mutex) -{ - int last_waiter; - - EnterCriticalSection(&cond->waiters_lock); - cond->waiters++; - LeaveCriticalSection(&cond->waiters_lock); - - /* - * Unlock external mutex and wait for signal. - * NOTE: we've held mutex locked long enough to increment - * waiters count above, so there's no problem with - * leaving mutex unlocked before we wait on semaphore. - */ - LeaveCriticalSection(mutex); - - /* let's wait - ignore return value */ - WaitForSingleObject(cond->sema, INFINITE); - - /* - * Decrease waiters count. If we are the last waiter, then we must - * notify the broadcasting thread that it can continue. - * But if we continued due to cond_signal, we do not have to do that - * because the signaling thread knows that only one waiter continued. - */ - EnterCriticalSection(&cond->waiters_lock); - cond->waiters--; - last_waiter = cond->was_broadcast && cond->waiters == 0; - LeaveCriticalSection(&cond->waiters_lock); - - if (last_waiter) { - /* - * cond_broadcast was issued while mutex was held. This means - * that all other waiters have continued, but are contending - * for the mutex at the end of this function because the - * broadcasting thread did not leave cond_broadcast, yet. - * (This is so that it can be sure that each waiter has - * consumed exactly one slice of the semaphor.) - * The last waiter must tell the broadcasting thread that it - * can go on. - */ - SetEvent(cond->continue_broadcast); - /* - * Now we go on to contend with all other waiters for - * the mutex. Auf in den Kampf! - */ - } - /* lock external mutex again */ - EnterCriticalSection(mutex); - - return 0; -} - -/* - * IMPORTANT: This implementation requires that pthread_cond_signal - * is called while the mutex is held that is used in the corresponding - * pthread_cond_wait calls! - */ -int pthread_cond_signal(pthread_cond_t *cond) -{ - int have_waiters; - - EnterCriticalSection(&cond->waiters_lock); - have_waiters = cond->waiters > 0; - LeaveCriticalSection(&cond->waiters_lock); - - /* - * Signal only when there are waiters - */ - if (have_waiters) - return ReleaseSemaphore(cond->sema, 1, NULL) ? - 0 : err_win_to_posix(GetLastError()); - else - return 0; -} - -/* - * DOUBLY IMPORTANT: This implementation requires that pthread_cond_broadcast - * is called while the mutex is held that is used in the corresponding - * pthread_cond_wait calls! - */ -int pthread_cond_broadcast(pthread_cond_t *cond) -{ - EnterCriticalSection(&cond->waiters_lock); - - if ((cond->was_broadcast = cond->waiters > 0)) { - /* wake up all waiters */ - ReleaseSemaphore(cond->sema, cond->waiters, NULL); - LeaveCriticalSection(&cond->waiters_lock); - /* - * At this point all waiters continue. Each one takes its - * slice of the semaphor. Now it's our turn to wait: Since - * the external mutex is held, no thread can leave cond_wait, - * yet. For this reason, we can be sure that no thread gets - * a chance to eat *more* than one slice. OTOH, it means - * that the last waiter must send us a wake-up. - */ - WaitForSingleObject(cond->continue_broadcast, INFINITE); - /* - * Since the external mutex is held, no thread can enter - * cond_wait, and, hence, it is safe to reset this flag - * without cond->waiters_lock held. - */ - cond->was_broadcast = 0; - } else { - LeaveCriticalSection(&cond->waiters_lock); - } - return 0; -} diff --git a/compat/win32/pthread.h b/compat/win32/pthread.h index 1c164088fb..c6cb8dd219 100644 --- a/compat/win32/pthread.h +++ b/compat/win32/pthread.h @@ -32,27 +32,13 @@ typedef int pthread_mutexattr_t; #define pthread_mutexattr_settype(a, t) 0 #define PTHREAD_MUTEX_RECURSIVE 0 -/* - * Implement simple condition variable for Windows threads, based on ACE - * implementation. - * - * See original implementation: http://bit.ly/1vkDjo - * ACE homepage: http://www.cse.wustl.edu/~schmidt/ACE.html - * See also: http://www.cse.wustl.edu/~schmidt/win32-cv-1.html - */ -typedef struct { - LONG waiters; - int was_broadcast; - CRITICAL_SECTION waiters_lock; - HANDLE sema; - HANDLE continue_broadcast; -} pthread_cond_t; - -extern int pthread_cond_init(pthread_cond_t *cond, const void *unused); -extern int pthread_cond_destroy(pthread_cond_t *cond); -extern int pthread_cond_wait(pthread_cond_t *cond, CRITICAL_SECTION *mutex); -extern int pthread_cond_signal(pthread_cond_t *cond); -extern int pthread_cond_broadcast(pthread_cond_t *cond); +#define pthread_cond_t CONDITION_VARIABLE + +#define pthread_cond_init(a,b) InitializeConditionVariable((a)) +#define pthread_cond_destroy(a) do {} while (0) +#define pthread_cond_wait(a,b) return_0(SleepConditionVariableCS((a), (b), INFINITE)) +#define pthread_cond_signal WakeConditionVariable +#define pthread_cond_broadcast WakeAllConditionVariable /* * Simple thread creation implementation using pthread API diff --git a/compat/winansi.c b/compat/winansi.c index a11a0f16d2..f4f08237f9 100644 --- a/compat/winansi.c +++ b/compat/winansi.c @@ -474,6 +474,18 @@ static void die_lasterr(const char *fmt, ...) va_end(params); } +#undef dup2 +int winansi_dup2(int oldfd, int newfd) +{ + int ret = dup2(oldfd, newfd); + + if (!ret && newfd >= 0 && newfd <= 2) + fd_is_interactive[newfd] = oldfd < 0 || oldfd > 2 ? + 0 : fd_is_interactive[oldfd]; + + return ret; +} + static HANDLE duplicate_handle(HANDLE hnd) { HANDLE hresult, hproc = GetCurrentProcess(); @@ -1093,7 +1093,7 @@ int git_config_color(char *dest, const char *var, const char *value) return 0; } -static int git_default_core_config(const char *var, const char *value) +static int git_default_core_config(const char *var, const char *value, void *cb) { /* This needs a better name */ if (!strcmp(var, "core.filemode")) { @@ -1344,14 +1344,6 @@ static int git_default_core_config(const char *var, const char *value) return 0; } - if (!strcmp(var, "core.hidedotfiles")) { - if (value && !strcasecmp(value, "dotgitonly")) - hide_dotfiles = HIDE_DOTFILES_DOTGITONLY; - else - hide_dotfiles = git_config_bool(var, value); - return 0; - } - if (!strcmp(var, "core.partialclonefilter")) { return git_config_string(&core_partial_clone_filter_default, var, value); @@ -1363,7 +1355,7 @@ static int git_default_core_config(const char *var, const char *value) } /* Add other config variables here and to Documentation/config.txt. */ - return 0; + return platform_core_config(var, value, cb); } static int git_default_i18n_config(const char *var, const char *value) @@ -1448,13 +1440,13 @@ static int git_default_mailmap_config(const char *var, const char *value) return 0; } -int git_default_config(const char *var, const char *value, void *dummy) +int git_default_config(const char *var, const char *value, void *cb) { if (starts_with(var, "core.")) - return git_default_core_config(var, value); + return git_default_core_config(var, value, cb); if (starts_with(var, "user.")) - return git_ident_config(var, value, dummy); + return git_ident_config(var, value, cb); if (starts_with(var, "i18n.")) return git_default_i18n_config(var, value); @@ -1676,6 +1668,8 @@ static int do_git_config_sequence(const struct config_options *opts, if (opts->commondir) repo_config = mkpathdup("%s/config", opts->commondir); + else if (opts->git_dir) + BUG("git_dir without commondir"); else repo_config = NULL; @@ -1695,6 +1689,17 @@ static int do_git_config_sequence(const struct config_options *opts, if (repo_config && !access_or_die(repo_config, R_OK, 0)) ret += git_config_from_file(fn, repo_config, data); + /* + * Note: this should have a new scope, CONFIG_SCOPE_WORKTREE. + * But let's not complicate things before it's actually needed. + */ + if (repository_format_worktree_config) { + char *path = git_pathdup("config.worktree"); + if (!access_or_die(path, R_OK, 0)) + ret += git_config_from_file(fn, path, data); + free(path); + } + current_parsing_scope = CONFIG_SCOPE_CMDLINE; if (git_config_from_parameters(fn, data) < 0) die(_("unable to parse command-line config")); @@ -2278,7 +2283,7 @@ int git_config_get_max_percent_split_change(void) int git_config_get_fsmonitor(void) { if (git_config_get_pathname("core.fsmonitor", &core_fsmonitor)) - core_fsmonitor = getenv("GIT_FSMONITOR_TEST"); + core_fsmonitor = getenv("GIT_TEST_FSMONITOR"); if (core_fsmonitor && !*core_fsmonitor) core_fsmonitor = NULL; @@ -2289,6 +2294,27 @@ int git_config_get_fsmonitor(void) return 0; } +int git_config_get_index_threads(int *dest) +{ + int is_bool, val; + + val = git_env_ulong("GIT_TEST_INDEX_THREADS", 0); + if (val) { + *dest = val; + return 0; + } + + if (!git_config_get_bool_or_int("index.threads", &is_bool, &val)) { + if (is_bool) + *dest = val ? 0 : 1; + else + *dest = val; + return 0; + } + + return 1; +} + NORETURN void git_die_config_linenr(const char *key, const char *filename, int linenr) { @@ -246,6 +246,7 @@ extern int git_config_get_bool(const char *key, int *dest); extern int git_config_get_bool_or_int(const char *key, int *is_bool, int *dest); extern int git_config_get_maybe_bool(const char *key, int *dest); extern int git_config_get_pathname(const char *key, const char **dest); +extern int git_config_get_index_threads(int *dest); extern int git_config_get_untracked_cache(void); extern int git_config_get_split_index(void); extern int git_config_get_max_percent_split_change(void); diff --git a/config.mak.dev b/config.mak.dev index 92d268137f..bbeeff44fe 100644 --- a/config.mak.dev +++ b/config.mak.dev @@ -34,7 +34,6 @@ ifeq ($(filter extra-all,$(DEVOPTS)),) CFLAGS += -Wno-empty-body CFLAGS += -Wno-missing-field-initializers CFLAGS += -Wno-sign-compare -CFLAGS += -Wno-unused-function CFLAGS += -Wno-unused-parameter endif endif diff --git a/config.mak.uname b/config.mak.uname index e47af72e01..3ee7da0e23 100644 --- a/config.mak.uname +++ b/config.mak.uname @@ -370,7 +370,6 @@ ifeq ($(uname_S),Windows) RUNTIME_PREFIX = YesPlease HAVE_WPGMPTR = YesWeDo NO_ST_BLOCKS_IN_STRUCT_STAT = YesPlease - NO_NSEC = YesPlease USE_WIN32_MMAP = YesPlease MMAP_PREVENTS_DELETE = UnfortunatelyYes # USE_NED_ALLOCATOR = YesPlease @@ -381,8 +380,6 @@ ifeq ($(uname_S),Windows) NO_PYTHON = YesPlease BLK_SHA1 = YesPlease ETAGS_TARGET = ETAGS - NO_INET_PTON = YesPlease - NO_INET_NTOP = YesPlease NO_POSIX_GOODIES = UnfortunatelyYes NATIVE_CRLF = YesPlease DEFAULT_HELP_FORMAT = html @@ -433,8 +430,6 @@ ifeq ($(uname_S),Minix) NO_NSEC = YesPlease NEEDS_LIBGEN = NEEDS_CRYPTO_WITH_SSL = YesPlease - NEEDS_IDN_WITH_CURL = YesPlease - NEEDS_SSL_WITH_CURL = YesPlease NEEDS_RESOLV = NO_HSTRERROR = YesPlease NO_MMAP = YesPlease @@ -460,7 +455,6 @@ ifeq ($(uname_S),NONSTOP_KERNEL) # Missdetected, hence commented out, see below. #NO_CURL = YesPlease # Added manually, see above. - NEEDS_SSL_WITH_CURL = YesPlease HAVE_LIBCHARSET_H = YesPlease HAVE_STRINGS_H = YesPlease NEEDS_LIBICONV = YesPlease @@ -520,7 +514,6 @@ ifneq (,$(findstring MINGW,$(uname_S))) RUNTIME_PREFIX = YesPlease HAVE_WPGMPTR = YesWeDo NO_ST_BLOCKS_IN_STRUCT_STAT = YesPlease - NO_NSEC = YesPlease USE_WIN32_MMAP = YesPlease MMAP_PREVENTS_DELETE = UnfortunatelyYes USE_NED_ALLOCATOR = YesPlease @@ -529,8 +522,6 @@ ifneq (,$(findstring MINGW,$(uname_S))) NO_REGEX = YesPlease NO_PYTHON = YesPlease ETAGS_TARGET = ETAGS - NO_INET_PTON = YesPlease - NO_INET_NTOP = YesPlease NO_POSIX_GOODIES = UnfortunatelyYes DEFAULT_HELP_FORMAT = html COMPAT_CFLAGS += -DNOGDI -Icompat -Icompat/win32 diff --git a/configure.ac b/configure.ac index e11b7976ab..e0d0da3c0c 100644 --- a/configure.ac +++ b/configure.ac @@ -600,17 +600,14 @@ AC_CHECK_PROG([CURL_CONFIG], [curl-config], if test $CURL_CONFIG != no; then GIT_CONF_SUBST([CURL_CONFIG]) - if test -z "${NO_OPENSSL}"; then - AC_MSG_CHECKING([if Curl supports SSL]) - if test $(curl-config --features|grep SSL) = SSL; then - NEEDS_SSL_WITH_CURL=YesPlease - AC_MSG_RESULT([yes]) - else - NEEDS_SSL_WITH_CURL= - AC_MSG_RESULT([no]) - fi - GIT_CONF_SUBST([NEEDS_SSL_WITH_CURL]) + + if test -z "$CURL_CONFIG_OPTS"; then + CURL_CONFIG_OPTS="--libs" fi + + CURL_LDFLAGS=$($CURL_CONFIG $CURL_CONFIG_OPTS) + AC_MSG_NOTICE([Setting CURL_LDFLAGS to '$CURL_LDFLAGS']) + GIT_CONF_SUBST([CURL_LDFLAGS], [$CURL_LDFLAGS]) fi fi @@ -792,6 +789,12 @@ AC_CHECK_HEADER([sys/select.h], [NO_SYS_SELECT_H=UnfortunatelyYes]) GIT_CONF_SUBST([NO_SYS_SELECT_H]) # +# Define NO_POLL_H if you don't have poll.h +AC_CHECK_HEADER([poll.h], +[NO_POLL_H=], +[NO_POLL_H=UnfortunatelyYes]) +GIT_CONF_SUBST([NO_POLL_H]) +# # Define NO_SYS_POLL_H if you don't have sys/poll.h AC_CHECK_HEADER([sys/poll.h], [NO_SYS_POLL_H=], @@ -224,7 +224,7 @@ static int process_dummy_ref(const char *line) return 0; name++; - return !oidcmp(&null_oid, &oid) && !strcmp(name, "capabilities^{}"); + return oideq(&null_oid, &oid) && !strcmp(name, "capabilities^{}"); } static void check_no_capabilities(const char *line, int len) diff --git a/connected.h b/connected.h index e4c961817d..8d5a6b3ad6 100644 --- a/connected.h +++ b/connected.h @@ -51,9 +51,9 @@ struct check_connected_options { #define CHECK_CONNECTED_INIT { 0 } /* - * Make sure that our object store has all the commits necessary to - * connect the ancestry chain to some of our existing refs, and all - * the trees and blobs that these commits use. + * Make sure that all given objects and all objects reachable from them + * either exist in our object store or (if the repository is a partial + * clone) are promised to be available. * * Return 0 if Ok, non zero otherwise (i.e. some missing objects) * diff --git a/contrib/coccinelle/README b/contrib/coccinelle/README index 9c2f8879c2..f0e80bd7f0 100644 --- a/contrib/coccinelle/README +++ b/contrib/coccinelle/README @@ -1,2 +1,43 @@ This directory provides examples of Coccinelle (http://coccinelle.lip6.fr/) semantic patches that might be useful to developers. + +There are two types of semantic patches: + + * Using the semantic transformation to check for bad patterns in the code; + The target 'make coccicheck' is designed to check for these patterns and + it is expected that any resulting patch indicates a regression. + The patches resulting from 'make coccicheck' are small and infrequent, + so once they are found, they can be sent to the mailing list as per usual. + + Example for introducing new patterns: + 67947c34ae (convert "hashcmp() != 0" to "!hasheq()", 2018-08-28) + b84c783882 (fsck: s/++i > 1/i++/, 2018-10-24) + + Example of fixes using this approach: + 248f66ed8e (run-command: use strbuf_addstr() for adding a string to + a strbuf, 2018-03-25) + f919ffebed (Use MOVE_ARRAY, 2018-01-22) + + These types of semantic patches are usually part of testing, c.f. + 0860a7641b (travis-ci: fail if Coccinelle static analysis found something + to transform, 2018-07-23) + + * Using semantic transformations in large scale refactorings throughout + the code base. + + When applying the semantic patch into a real patch, sending it to the + mailing list in the usual way, such a patch would be expected to have a + lot of textual and semantic conflicts as such large scale refactorings + change function signatures that are used widely in the code base. + A textual conflict would arise if surrounding code near any call of such + function changes. A semantic conflict arises when other patch series in + flight introduce calls to such functions. + + So to aid these large scale refactorings, semantic patches can be used. + However we do not want to store them in the same place as the checks for + bad patterns, as then automated builds would fail. + That is why semantic patches 'contrib/coccinelle/*.pending.cocci' + are ignored for checks, and can be applied using 'make coccicheck-pending'. + + This allows to expose plans of pending large scale refactorings without + impacting the bad pattern checks. diff --git a/contrib/coccinelle/commit.cocci b/contrib/coccinelle/commit.cocci index aec3345adb..c49aa558f0 100644 --- a/contrib/coccinelle/commit.cocci +++ b/contrib/coccinelle/commit.cocci @@ -15,10 +15,10 @@ expression c; identifier f !~ "^(get_commit_tree|get_commit_tree_in_graph_one|load_tree_for_commit)$"; expression c; @@ - f(...) {... + f(...) {<... - c->maybe_tree + get_commit_tree(c) - ...} + ...>} @@ expression c; diff --git a/contrib/coccinelle/object_id.cocci b/contrib/coccinelle/object_id.cocci index 09afdbf994..6a7cf3e02d 100644 --- a/contrib/coccinelle/object_id.cocci +++ b/contrib/coccinelle/object_id.cocci @@ -1,110 +1,149 @@ @@ -expression E1; +struct object_id OID; @@ -- is_null_sha1(E1.hash) -+ is_null_oid(&E1) +- is_null_sha1(OID.hash) ++ is_null_oid(&OID) @@ -expression E1; +struct object_id *OIDPTR; @@ -- is_null_sha1(E1->hash) -+ is_null_oid(E1) +- is_null_sha1(OIDPTR->hash) ++ is_null_oid(OIDPTR) @@ -expression E1; +struct object_id OID; @@ -- sha1_to_hex(E1.hash) -+ oid_to_hex(&E1) +- sha1_to_hex(OID.hash) ++ oid_to_hex(&OID) @@ identifier f != oid_to_hex; -expression E1; +struct object_id *OIDPTR; @@ - f(...) {... -- sha1_to_hex(E1->hash) -+ oid_to_hex(E1) - ...} + f(...) {<... +- sha1_to_hex(OIDPTR->hash) ++ oid_to_hex(OIDPTR) + ...>} @@ -expression E1, E2; +expression E; +struct object_id OID; @@ -- sha1_to_hex_r(E1, E2.hash) -+ oid_to_hex_r(E1, &E2) +- sha1_to_hex_r(E, OID.hash) ++ oid_to_hex_r(E, &OID) @@ identifier f != oid_to_hex_r; -expression E1, E2; +expression E; +struct object_id *OIDPTR; @@ - f(...) {... -- sha1_to_hex_r(E1, E2->hash) -+ oid_to_hex_r(E1, E2) - ...} + f(...) {<... +- sha1_to_hex_r(E, OIDPTR->hash) ++ oid_to_hex_r(E, OIDPTR) + ...>} @@ -expression E1; +struct object_id OID; @@ -- hashclr(E1.hash) -+ oidclr(&E1) +- hashclr(OID.hash) ++ oidclr(&OID) @@ identifier f != oidclr; -expression E1; +struct object_id *OIDPTR; @@ - f(...) {... -- hashclr(E1->hash) -+ oidclr(E1) - ...} + f(...) {<... +- hashclr(OIDPTR->hash) ++ oidclr(OIDPTR) + ...>} @@ -expression E1, E2; +struct object_id OID1, OID2; @@ -- hashcmp(E1.hash, E2.hash) -+ oidcmp(&E1, &E2) +- hashcmp(OID1.hash, OID2.hash) ++ oidcmp(&OID1, &OID2) @@ identifier f != oidcmp; -expression E1, E2; +struct object_id *OIDPTR1, OIDPTR2; @@ - f(...) {... -- hashcmp(E1->hash, E2->hash) -+ oidcmp(E1, E2) - ...} + f(...) {<... +- hashcmp(OIDPTR1->hash, OIDPTR2->hash) ++ oidcmp(OIDPTR1, OIDPTR2) + ...>} @@ -expression E1, E2; +struct object_id *OIDPTR; +struct object_id OID; @@ -- hashcmp(E1->hash, E2.hash) -+ oidcmp(E1, &E2) +- hashcmp(OIDPTR->hash, OID.hash) ++ oidcmp(OIDPTR, &OID) @@ -expression E1, E2; +struct object_id *OIDPTR; +struct object_id OID; @@ -- hashcmp(E1.hash, E2->hash) -+ oidcmp(&E1, E2) +- hashcmp(OID.hash, OIDPTR->hash) ++ oidcmp(&OID, OIDPTR) @@ -expression E1, E2; +struct object_id OID1, OID2; @@ -- hashcpy(E1.hash, E2.hash) -+ oidcpy(&E1, &E2) +- hashcpy(OID1.hash, OID2.hash) ++ oidcpy(&OID1, &OID2) @@ identifier f != oidcpy; -expression E1, E2; +struct object_id *OIDPTR1; +struct object_id *OIDPTR2; +@@ + f(...) {<... +- hashcpy(OIDPTR1->hash, OIDPTR2->hash) ++ oidcpy(OIDPTR1, OIDPTR2) + ...>} + @@ - f(...) {... -- hashcpy(E1->hash, E2->hash) -+ oidcpy(E1, E2) - ...} +struct object_id *OIDPTR; +struct object_id OID; +@@ +- hashcpy(OIDPTR->hash, OID.hash) ++ oidcpy(OIDPTR, &OID) + +@@ +struct object_id *OIDPTR; +struct object_id OID; +@@ +- hashcpy(OID.hash, OIDPTR->hash) ++ oidcpy(&OID, OIDPTR) + +@@ +struct object_id *OIDPTR1; +struct object_id *OIDPTR2; +@@ +- oidcmp(OIDPTR1, OIDPTR2) == 0 ++ oideq(OIDPTR1, OIDPTR2) @@ +identifier f != hasheq; expression E1, E2; @@ -- hashcpy(E1->hash, E2.hash) -+ oidcpy(E1, &E2) + f(...) {<... +- hashcmp(E1, E2) == 0 ++ hasheq(E1, E2) + ...>} + +@@ +struct object_id *OIDPTR1; +struct object_id *OIDPTR2; +@@ +- oidcmp(OIDPTR1, OIDPTR2) != 0 ++ !oideq(OIDPTR1, OIDPTR2) @@ +identifier f != hasheq; expression E1, E2; @@ -- hashcpy(E1.hash, E2->hash) -+ oidcpy(&E1, E2) + f(...) {<... +- hashcmp(E1, E2) != 0 ++ !hasheq(E1, E2) + ...>} diff --git a/contrib/completion/git-completion.bash b/contrib/completion/git-completion.bash index 12f7ce0c5c..9e8ec95c3c 100644 --- a/contrib/completion/git-completion.bash +++ b/contrib/completion/git-completion.bash @@ -400,7 +400,7 @@ __gitcomp_builtin () if [ -z "$options" ]; then # leading and trailing spaces are significant to make # option removal work correctly. - options=" $(__git ${cmd/_/ } --git-completion-helper) $incl " + options=" $incl $(__git ${cmd/_/ } --git-completion-helper) " for i in $excl; do options="${options/ $i / }" done @@ -943,6 +943,7 @@ __git_complete_remote_or_refspec () *) ;; esac ;; + --multiple) no_complete_refspec=1; break ;; -*) ;; *) remote="$i"; break ;; esac @@ -1520,13 +1521,9 @@ _git_fetch () __git_complete_remote_or_refspec } -__git_format_patch_options=" - --stdout --attach --no-attach --thread --thread= --no-thread - --numbered --start-number --numbered-files --keep-subject --signoff - --signature --no-signature --in-reply-to= --cc= --full-index --binary - --not --all --cover-letter --no-prefix --src-prefix= --dst-prefix= - --inline --suffix= --ignore-if-in-upstream --subject-prefix= - --output-directory --reroll-count --to= --quiet --notes +__git_format_patch_extra_options=" + --full-index --not --all --no-prefix --src-prefix= + --dst-prefix= --notes " _git_format_patch () @@ -1539,7 +1536,7 @@ _git_format_patch () return ;; --*) - __gitcomp "$__git_format_patch_options" + __gitcomp_builtin format-patch "$__git_format_patch_extra_options" return ;; esac @@ -1821,7 +1818,7 @@ _git_mergetool () return ;; --*) - __gitcomp "--tool= --prompt --no-prompt" + __gitcomp "--tool= --prompt --no-prompt --gui --no-gui" return ;; esac @@ -2069,7 +2066,7 @@ _git_send_email () return ;; --*) - __gitcomp "--annotate --bcc --cc --cc-cmd --chain-reply-to + __gitcomp_builtin send-email "--annotate --bcc --cc --cc-cmd --chain-reply-to --compose --confirm= --dry-run --envelope-sender --from --identity --in-reply-to --no-chain-reply-to --no-signed-off-by-cc @@ -2078,7 +2075,7 @@ _git_send_email () --smtp-server-port --smtp-encryption= --smtp-user --subject --suppress-cc= --suppress-from --thread --to --validate --no-validate - $__git_format_patch_options" + $__git_format_patch_extra_options" return ;; esac @@ -2556,6 +2553,9 @@ _git_stash () drop,--*) __gitcomp "--quiet" ;; + list,--*) + __gitcomp "--name-status --oneline --patch-with-stat" + ;; show,--*|branch,--*) ;; branch,*) diff --git a/contrib/coverage-diff.sh b/contrib/coverage-diff.sh new file mode 100755 index 0000000000..4ec419f900 --- /dev/null +++ b/contrib/coverage-diff.sh @@ -0,0 +1,108 @@ +#!/bin/sh + +# Usage: Run 'contrib/coverage-diff.sh <version1> <version2>' from source-root +# after running +# +# make coverage-test +# make coverage-report +# +# while checked out at <version2>. This script combines the *.gcov files +# generated by the 'make' commands above with 'git diff <version1> <version2>' +# to report new lines that are not covered by the test suite. + +V1=$1 +V2=$2 + +diff_lines () { + perl -e ' + my $line_num; + while (<>) { + # Hunk header? Grab the beginning in postimage. + if (/^@@ -\d+(?:,\d+)? \+(\d+)(?:,\d+)? @@/) { + $line_num = $1; + next; + } + + # Have we seen a hunk? Ignore "diff --git" etc. + next unless defined $line_num; + + # Deleted line? Ignore. + if (/^-/) { + next; + } + + # Show only the line number of added lines. + if (/^\+/) { + print "$line_num\n"; + } + # Either common context or added line appear in + # the postimage. Count it. + $line_num++; + } + ' +} + +files=$(git diff --name-only "$V1" "$V2" -- \*.c) + +# create empty file +>coverage-data.txt + +for file in $files +do + git diff "$V1" "$V2" -- "$file" | + diff_lines | + sort >new_lines.txt + + if ! test -s new_lines.txt + then + continue + fi + + hash_file=$(echo $file | sed "s/\//\#/") + + if ! test -s "$hash_file.gcov" + then + continue + fi + + sed -ne '/#####:/{ + s/ #####:// + s/:.*// + s/ //g + p + }' "$hash_file.gcov" | + sort >uncovered_lines.txt + + comm -12 uncovered_lines.txt new_lines.txt | + sed -e 's/$/\)/' | + sed -e 's/^/ /' >uncovered_new_lines.txt + + grep -q '[^[:space:]]' <uncovered_new_lines.txt && + echo $file >>coverage-data.txt && + git blame -s "$V2" -- "$file" | + sed 's/\t//g' | + grep -f uncovered_new_lines.txt >>coverage-data.txt && + echo >>coverage-data.txt + + rm -f new_lines.txt uncovered_lines.txt uncovered_new_lines.txt +done + +cat coverage-data.txt + +echo "Commits introducing uncovered code:" + +commit_list=$(cat coverage-data.txt | + grep -E '^[0-9a-f]{7,} ' | + awk '{print $1;}' | + sort | + uniq) + +( + for commit in $commit_list + do + git log --no-decorate --pretty=format:'%an %h: %s' -1 $commit + echo + done +) | sort + +rm coverage-data.txt diff --git a/contrib/credential/wincred/git-credential-wincred.c b/contrib/credential/wincred/git-credential-wincred.c index 86518cd93d..5bdad41de1 100644 --- a/contrib/credential/wincred/git-credential-wincred.c +++ b/contrib/credential/wincred/git-credential-wincred.c @@ -75,7 +75,8 @@ static CredDeleteWT CredDeleteW; static void load_cred_funcs(void) { /* load DLLs */ - advapi = LoadLibrary("advapi32.dll"); + advapi = LoadLibraryExA("advapi32.dll", NULL, + LOAD_LIBRARY_SEARCH_SYSTEM32); if (!advapi) die("failed to load advapi32.dll"); diff --git a/contrib/subtree/Makefile b/contrib/subtree/Makefile index 5c6cc4ab2c..6906aae441 100644 --- a/contrib/subtree/Makefile +++ b/contrib/subtree/Makefile @@ -59,6 +59,10 @@ $(GIT_SUBTREE): $(GIT_SUBTREE_SH) doc: $(GIT_SUBTREE_DOC) $(GIT_SUBTREE_HTML) +man: $(GIT_SUBTREE_DOC) + +html: $(GIT_SUBTREE_HTML) + install: $(GIT_SUBTREE) $(INSTALL) -d -m 755 $(DESTDIR)$(gitexecdir) $(INSTALL) -m 755 $(GIT_SUBTREE) $(DESTDIR)$(gitexecdir) diff --git a/contrib/subtree/git-subtree.sh b/contrib/subtree/git-subtree.sh index d3f39a862a..147201dc6c 100755 --- a/contrib/subtree/git-subtree.sh +++ b/contrib/subtree/git-subtree.sh @@ -231,12 +231,14 @@ cache_miss () { } check_parents () { - missed=$(cache_miss "$@") + missed=$(cache_miss "$1") + local indent=$(($2 + 1)) for miss in $missed do if ! test -r "$cachedir/notree/$miss" then debug " incorrect order: $miss" + process_split_commit "$miss" "" "$indent" fi done } @@ -340,7 +342,12 @@ find_existing_splits () { revs="$2" main= sub= - git log --grep="^git-subtree-dir: $dir/*\$" \ + local grep_format="^git-subtree-dir: $dir/*\$" + if test -n "$ignore_joins" + then + grep_format="^Add '$dir/' from commit '" + fi + git log --grep="$grep_format" \ --no-show-signature --pretty=format:'START %H%n%s%n%n%b%nEND%n' $revs | while read a b junk do @@ -534,6 +541,7 @@ copy_or_skip () { nonidentical= p= gotparents= + copycommit= for parent in $newparents do ptree=$(toptree_for_commit $parent) || exit $? @@ -541,7 +549,24 @@ copy_or_skip () { if test "$ptree" = "$tree" then # an identical parent could be used in place of this rev. - identical="$parent" + if test -n "$identical" + then + # if a previous identical parent was found, check whether + # one is already an ancestor of the other + mergebase=$(git merge-base $identical $parent) + if test "$identical" = "$mergebase" + then + # current identical commit is an ancestor of parent + identical="$parent" + elif test "$parent" != "$mergebase" + then + # no common history; commit must be copied + copycommit=1 + fi + else + # first identical parent detected + identical="$parent" + fi else nonidentical="$parent" fi @@ -564,7 +589,6 @@ copy_or_skip () { fi done - copycommit= if test -n "$identical" && test -n "$nonidentical" then extras=$(git rev-list --count $identical..$nonidentical) @@ -598,6 +622,58 @@ ensure_valid_ref_format () { die "'$1' does not look like a ref" } +process_split_commit () { + local rev="$1" + local parents="$2" + local indent=$3 + + if test $indent -eq 0 + then + revcount=$(($revcount + 1)) + else + # processing commit without normal parent information; + # fetch from repo + parents=$(git rev-parse "$rev^@") + extracount=$(($extracount + 1)) + fi + + progress "$revcount/$revmax ($createcount) [$extracount]" + + debug "Processing commit: $rev" + exists=$(cache_get "$rev") + if test -n "$exists" + then + debug " prior: $exists" + return + fi + createcount=$(($createcount + 1)) + debug " parents: $parents" + check_parents "$parents" "$indent" + newparents=$(cache_get $parents) + debug " newparents: $newparents" + + tree=$(subtree_for_commit "$rev" "$dir") + debug " tree is: $tree" + + # ugly. is there no better way to tell if this is a subtree + # vs. a mainline commit? Does it matter? + if test -z "$tree" + then + set_notree "$rev" + if test -n "$newparents" + then + cache_set "$rev" "$rev" + fi + return + fi + + newrev=$(copy_or_skip "$rev" "$tree" "$newparents") || exit $? + debug " newrev is: $newrev" + cache_set "$rev" "$newrev" + cache_set latest_new "$newrev" + cache_set latest_old "$rev" +} + cmd_add () { if test -e "$dir" then @@ -689,12 +765,7 @@ cmd_split () { done fi - if test -n "$ignore_joins" - then - unrevs= - else - unrevs="$(find_existing_splits "$dir" "$revs")" - fi + unrevs="$(find_existing_splits "$dir" "$revs")" # We can't restrict rev-list to only $dir here, because some of our # parents have the $dir contents the root, and those won't match. @@ -703,45 +774,11 @@ cmd_split () { revmax=$(eval "$grl" | wc -l) revcount=0 createcount=0 + extracount=0 eval "$grl" | while read rev parents do - revcount=$(($revcount + 1)) - progress "$revcount/$revmax ($createcount)" - debug "Processing commit: $rev" - exists=$(cache_get "$rev") - if test -n "$exists" - then - debug " prior: $exists" - continue - fi - createcount=$(($createcount + 1)) - debug " parents: $parents" - newparents=$(cache_get $parents) - debug " newparents: $newparents" - - tree=$(subtree_for_commit "$rev" "$dir") - debug " tree is: $tree" - - check_parents $parents - - # ugly. is there no better way to tell if this is a subtree - # vs. a mainline commit? Does it matter? - if test -z "$tree" - then - set_notree "$rev" - if test -n "$newparents" - then - cache_set "$rev" "$rev" - fi - continue - fi - - newrev=$(copy_or_skip "$rev" "$tree" "$newparents") || exit $? - debug " newrev is: $newrev" - cache_set "$rev" "$newrev" - cache_set latest_new "$newrev" - cache_set latest_old "$rev" + process_split_commit "$rev" "$parents" 0 done || exit $? latest_new=$(cache_get latest_new) @@ -887,20 +887,49 @@ static time_t update_tm(struct tm *tm, struct tm *now, time_t sec) return n; } +/* + * Do we have a pending number at the end, or when + * we see a new one? Let's assume it's a month day, + * as in "Dec 6, 1992" + */ +static void pending_number(struct tm *tm, int *num) +{ + int number = *num; + + if (number) { + *num = 0; + if (tm->tm_mday < 0 && number < 32) + tm->tm_mday = number; + else if (tm->tm_mon < 0 && number < 13) + tm->tm_mon = number-1; + else if (tm->tm_year < 0) { + if (number > 1969 && number < 2100) + tm->tm_year = number - 1900; + else if (number > 69 && number < 100) + tm->tm_year = number; + else if (number < 38) + tm->tm_year = 100 + number; + /* We screw up for number = 00 ? */ + } + } +} + static void date_now(struct tm *tm, struct tm *now, int *num) { + *num = 0; update_tm(tm, now, 0); } static void date_yesterday(struct tm *tm, struct tm *now, int *num) { + *num = 0; update_tm(tm, now, 24*60*60); } static void date_time(struct tm *tm, struct tm *now, int hour) { if (tm->tm_hour < hour) - date_yesterday(tm, now, NULL); + update_tm(tm, now, 24*60*60); tm->tm_hour = hour; tm->tm_min = 0; tm->tm_sec = 0; @@ -908,16 +937,19 @@ static void date_time(struct tm *tm, struct tm *now, int hour) static void date_midnight(struct tm *tm, struct tm *now, int *num) { + pending_number(tm, num); date_time(tm, now, 0); } static void date_noon(struct tm *tm, struct tm *now, int *num) { + pending_number(tm, num); date_time(tm, now, 12); } static void date_tea(struct tm *tm, struct tm *now, int *num) { + pending_number(tm, num); date_time(tm, now, 17); } @@ -953,6 +985,7 @@ static void date_never(struct tm *tm, struct tm *now, int *num) { time_t n = 0; localtime_r(&n, tm); + *num = 0; } static const struct special { @@ -1110,33 +1143,6 @@ static const char *approxidate_digit(const char *date, struct tm *tm, int *num, return end; } -/* - * Do we have a pending number at the end, or when - * we see a new one? Let's assume it's a month day, - * as in "Dec 6, 1992" - */ -static void pending_number(struct tm *tm, int *num) -{ - int number = *num; - - if (number) { - *num = 0; - if (tm->tm_mday < 0 && number < 32) - tm->tm_mday = number; - else if (tm->tm_mon < 0 && number < 13) - tm->tm_mon = number-1; - else if (tm->tm_year < 0) { - if (number > 1969 && number < 2100) - tm->tm_year = number - 1900; - else if (number > 69 && number < 100) - tm->tm_year = number; - else if (number < 38) - tm->tm_year = 100 + number; - /* We screw up for number = 00 ? */ - } - } -} - static timestamp_t approxidate_str(const char *date, const struct timeval *tv, int *error_ret) diff --git a/delta-islands.c b/delta-islands.c new file mode 100644 index 0000000000..8e5018e406 --- /dev/null +++ b/delta-islands.c @@ -0,0 +1,502 @@ +#include "cache.h" +#include "attr.h" +#include "object.h" +#include "blob.h" +#include "commit.h" +#include "tag.h" +#include "tree.h" +#include "delta.h" +#include "pack.h" +#include "tree-walk.h" +#include "diff.h" +#include "revision.h" +#include "list-objects.h" +#include "progress.h" +#include "refs.h" +#include "khash.h" +#include "pack-bitmap.h" +#include "pack-objects.h" +#include "delta-islands.h" +#include "sha1-array.h" +#include "config.h" + +KHASH_INIT(str, const char *, void *, 1, kh_str_hash_func, kh_str_hash_equal) + +static khash_sha1 *island_marks; +static unsigned island_counter; +static unsigned island_counter_core; + +static kh_str_t *remote_islands; + +struct remote_island { + uint64_t hash; + struct oid_array oids; +}; + +struct island_bitmap { + uint32_t refcount; + uint32_t bits[FLEX_ARRAY]; +}; + +static uint32_t island_bitmap_size; + +/* + * Allocate a new bitmap; if "old" is not NULL, the new bitmap will be a copy + * of "old". Otherwise, the new bitmap is empty. + */ +static struct island_bitmap *island_bitmap_new(const struct island_bitmap *old) +{ + size_t size = sizeof(struct island_bitmap) + (island_bitmap_size * 4); + struct island_bitmap *b = xcalloc(1, size); + + if (old) + memcpy(b, old, size); + + b->refcount = 1; + return b; +} + +static void island_bitmap_or(struct island_bitmap *a, const struct island_bitmap *b) +{ + uint32_t i; + + for (i = 0; i < island_bitmap_size; ++i) + a->bits[i] |= b->bits[i]; +} + +static int island_bitmap_is_subset(struct island_bitmap *self, + struct island_bitmap *super) +{ + uint32_t i; + + if (self == super) + return 1; + + for (i = 0; i < island_bitmap_size; ++i) { + if ((self->bits[i] & super->bits[i]) != self->bits[i]) + return 0; + } + + return 1; +} + +#define ISLAND_BITMAP_BLOCK(x) (x / 32) +#define ISLAND_BITMAP_MASK(x) (1 << (x % 32)) + +static void island_bitmap_set(struct island_bitmap *self, uint32_t i) +{ + self->bits[ISLAND_BITMAP_BLOCK(i)] |= ISLAND_BITMAP_MASK(i); +} + +static int island_bitmap_get(struct island_bitmap *self, uint32_t i) +{ + return (self->bits[ISLAND_BITMAP_BLOCK(i)] & ISLAND_BITMAP_MASK(i)) != 0; +} + +int in_same_island(const struct object_id *trg_oid, const struct object_id *src_oid) +{ + khiter_t trg_pos, src_pos; + + /* If we aren't using islands, assume everything goes together. */ + if (!island_marks) + return 1; + + /* + * If we don't have a bitmap for the target, we can delta it + * against anything -- it's not an important object + */ + trg_pos = kh_get_sha1(island_marks, trg_oid->hash); + if (trg_pos >= kh_end(island_marks)) + return 1; + + /* + * if the source (our delta base) doesn't have a bitmap, + * we don't want to base any deltas on it! + */ + src_pos = kh_get_sha1(island_marks, src_oid->hash); + if (src_pos >= kh_end(island_marks)) + return 0; + + return island_bitmap_is_subset(kh_value(island_marks, trg_pos), + kh_value(island_marks, src_pos)); +} + +int island_delta_cmp(const struct object_id *a, const struct object_id *b) +{ + khiter_t a_pos, b_pos; + struct island_bitmap *a_bitmap = NULL, *b_bitmap = NULL; + + if (!island_marks) + return 0; + + a_pos = kh_get_sha1(island_marks, a->hash); + if (a_pos < kh_end(island_marks)) + a_bitmap = kh_value(island_marks, a_pos); + + b_pos = kh_get_sha1(island_marks, b->hash); + if (b_pos < kh_end(island_marks)) + b_bitmap = kh_value(island_marks, b_pos); + + if (a_bitmap) { + if (!b_bitmap || !island_bitmap_is_subset(a_bitmap, b_bitmap)) + return -1; + } + if (b_bitmap) { + if (!a_bitmap || !island_bitmap_is_subset(b_bitmap, a_bitmap)) + return 1; + } + + return 0; +} + +static struct island_bitmap *create_or_get_island_marks(struct object *obj) +{ + khiter_t pos; + int hash_ret; + + pos = kh_put_sha1(island_marks, obj->oid.hash, &hash_ret); + if (hash_ret) + kh_value(island_marks, pos) = island_bitmap_new(NULL); + + return kh_value(island_marks, pos); +} + +static void set_island_marks(struct object *obj, struct island_bitmap *marks) +{ + struct island_bitmap *b; + khiter_t pos; + int hash_ret; + + pos = kh_put_sha1(island_marks, obj->oid.hash, &hash_ret); + if (hash_ret) { + /* + * We don't have one yet; make a copy-on-write of the + * parent. + */ + marks->refcount++; + kh_value(island_marks, pos) = marks; + return; + } + + /* + * We do have it. Make sure we split any copy-on-write before + * updating. + */ + b = kh_value(island_marks, pos); + if (b->refcount > 1) { + b->refcount--; + b = kh_value(island_marks, pos) = island_bitmap_new(b); + } + island_bitmap_or(b, marks); +} + +static void mark_remote_island_1(struct remote_island *rl, int is_core_island) +{ + uint32_t i; + + for (i = 0; i < rl->oids.nr; ++i) { + struct island_bitmap *marks; + struct object *obj = parse_object(the_repository, &rl->oids.oid[i]); + + if (!obj) + continue; + + marks = create_or_get_island_marks(obj); + island_bitmap_set(marks, island_counter); + + if (is_core_island && obj->type == OBJ_COMMIT) + obj->flags |= NEEDS_BITMAP; + + /* If it was a tag, also make sure we hit the underlying object. */ + while (obj && obj->type == OBJ_TAG) { + obj = ((struct tag *)obj)->tagged; + if (obj) { + parse_object(the_repository, &obj->oid); + marks = create_or_get_island_marks(obj); + island_bitmap_set(marks, island_counter); + } + } + } + + if (is_core_island) + island_counter_core = island_counter; + + island_counter++; +} + +struct tree_islands_todo { + struct object_entry *entry; + unsigned int depth; +}; + +static int tree_depth_compare(const void *a, const void *b) +{ + const struct tree_islands_todo *todo_a = a; + const struct tree_islands_todo *todo_b = b; + + return todo_a->depth - todo_b->depth; +} + +void resolve_tree_islands(int progress, struct packing_data *to_pack) +{ + struct progress *progress_state = NULL; + struct tree_islands_todo *todo; + int nr = 0; + int i; + + if (!island_marks) + return; + + /* + * We process only trees, as commits and tags have already been handled + * (and passed their marks on to root trees, as well. We must make sure + * to process them in descending tree-depth order so that marks + * propagate down the tree properly, even if a sub-tree is found in + * multiple parent trees. + */ + ALLOC_ARRAY(todo, to_pack->nr_objects); + for (i = 0; i < to_pack->nr_objects; i++) { + if (oe_type(&to_pack->objects[i]) == OBJ_TREE) { + todo[nr].entry = &to_pack->objects[i]; + todo[nr].depth = oe_tree_depth(to_pack, &to_pack->objects[i]); + nr++; + } + } + QSORT(todo, nr, tree_depth_compare); + + if (progress) + progress_state = start_progress(_("Propagating island marks"), nr); + + for (i = 0; i < nr; i++) { + struct object_entry *ent = todo[i].entry; + struct island_bitmap *root_marks; + struct tree *tree; + struct tree_desc desc; + struct name_entry entry; + khiter_t pos; + + pos = kh_get_sha1(island_marks, ent->idx.oid.hash); + if (pos >= kh_end(island_marks)) + continue; + + root_marks = kh_value(island_marks, pos); + + tree = lookup_tree(the_repository, &ent->idx.oid); + if (!tree || parse_tree(tree) < 0) + die(_("bad tree object %s"), oid_to_hex(&ent->idx.oid)); + + init_tree_desc(&desc, tree->buffer, tree->size); + while (tree_entry(&desc, &entry)) { + struct object *obj; + + if (S_ISGITLINK(entry.mode)) + continue; + + obj = lookup_object(the_repository, entry.oid->hash); + if (!obj) + continue; + + set_island_marks(obj, root_marks); + } + + free_tree_buffer(tree); + + display_progress(progress_state, i+1); + } + + stop_progress(&progress_state); + free(todo); +} + +static regex_t *island_regexes; +static unsigned int island_regexes_alloc, island_regexes_nr; +static const char *core_island_name; + +static int island_config_callback(const char *k, const char *v, void *cb) +{ + if (!strcmp(k, "pack.island")) { + struct strbuf re = STRBUF_INIT; + + if (!v) + return config_error_nonbool(k); + + ALLOC_GROW(island_regexes, island_regexes_nr + 1, island_regexes_alloc); + + if (*v != '^') + strbuf_addch(&re, '^'); + strbuf_addstr(&re, v); + + if (regcomp(&island_regexes[island_regexes_nr], re.buf, REG_EXTENDED)) + die(_("failed to load island regex for '%s': %s"), k, re.buf); + + strbuf_release(&re); + island_regexes_nr++; + return 0; + } + + if (!strcmp(k, "pack.islandcore")) + return git_config_string(&core_island_name, k, v); + + return 0; +} + +static void add_ref_to_island(const char *island_name, const struct object_id *oid) +{ + uint64_t sha_core; + struct remote_island *rl = NULL; + + int hash_ret; + khiter_t pos = kh_put_str(remote_islands, island_name, &hash_ret); + + if (hash_ret) { + kh_key(remote_islands, pos) = xstrdup(island_name); + kh_value(remote_islands, pos) = xcalloc(1, sizeof(struct remote_island)); + } + + rl = kh_value(remote_islands, pos); + oid_array_append(&rl->oids, oid); + + memcpy(&sha_core, oid->hash, sizeof(uint64_t)); + rl->hash += sha_core; +} + +static int find_island_for_ref(const char *refname, const struct object_id *oid, + int flags, void *data) +{ + /* + * We should advertise 'ARRAY_SIZE(matches) - 2' as the max, + * so we can diagnose below a config with more capture groups + * than we support. + */ + regmatch_t matches[16]; + int i, m; + struct strbuf island_name = STRBUF_INIT; + + /* walk backwards to get last-one-wins ordering */ + for (i = island_regexes_nr - 1; i >= 0; i--) { + if (!regexec(&island_regexes[i], refname, + ARRAY_SIZE(matches), matches, 0)) + break; + } + + if (i < 0) + return 0; + + if (matches[ARRAY_SIZE(matches) - 1].rm_so != -1) + warning(_("island regex from config has " + "too many capture groups (max=%d)"), + (int)ARRAY_SIZE(matches) - 2); + + for (m = 1; m < ARRAY_SIZE(matches); m++) { + regmatch_t *match = &matches[m]; + + if (match->rm_so == -1) + continue; + + if (island_name.len) + strbuf_addch(&island_name, '-'); + + strbuf_add(&island_name, refname + match->rm_so, match->rm_eo - match->rm_so); + } + + add_ref_to_island(island_name.buf, oid); + strbuf_release(&island_name); + return 0; +} + +static struct remote_island *get_core_island(void) +{ + if (core_island_name) { + khiter_t pos = kh_get_str(remote_islands, core_island_name); + if (pos < kh_end(remote_islands)) + return kh_value(remote_islands, pos); + } + + return NULL; +} + +static void deduplicate_islands(void) +{ + struct remote_island *island, *core = NULL, **list; + unsigned int island_count, dst, src, ref, i = 0; + + island_count = kh_size(remote_islands); + ALLOC_ARRAY(list, island_count); + + kh_foreach_value(remote_islands, island, { + list[i++] = island; + }); + + for (ref = 0; ref + 1 < island_count; ref++) { + for (src = ref + 1, dst = src; src < island_count; src++) { + if (list[ref]->hash == list[src]->hash) + continue; + + if (src != dst) + list[dst] = list[src]; + + dst++; + } + island_count = dst; + } + + island_bitmap_size = (island_count / 32) + 1; + core = get_core_island(); + + for (i = 0; i < island_count; ++i) { + mark_remote_island_1(list[i], core && list[i]->hash == core->hash); + } + + free(list); +} + +void load_delta_islands(void) +{ + island_marks = kh_init_sha1(); + remote_islands = kh_init_str(); + + git_config(island_config_callback, NULL); + for_each_ref(find_island_for_ref, NULL); + deduplicate_islands(); + + fprintf(stderr, _("Marked %d islands, done.\n"), island_counter); +} + +void propagate_island_marks(struct commit *commit) +{ + khiter_t pos = kh_get_sha1(island_marks, commit->object.oid.hash); + + if (pos < kh_end(island_marks)) { + struct commit_list *p; + struct island_bitmap *root_marks = kh_value(island_marks, pos); + + parse_commit(commit); + set_island_marks(&get_commit_tree(commit)->object, root_marks); + for (p = commit->parents; p; p = p->next) + set_island_marks(&p->item->object, root_marks); + } +} + +int compute_pack_layers(struct packing_data *to_pack) +{ + uint32_t i; + + if (!core_island_name || !island_marks) + return 1; + + for (i = 0; i < to_pack->nr_objects; ++i) { + struct object_entry *entry = &to_pack->objects[i]; + khiter_t pos = kh_get_sha1(island_marks, entry->idx.oid.hash); + + oe_set_layer(to_pack, entry, 1); + + if (pos < kh_end(island_marks)) { + struct island_bitmap *bitmap = kh_value(island_marks, pos); + + if (island_bitmap_get(bitmap, island_counter_core)) + oe_set_layer(to_pack, entry, 0); + } + } + + return 2; +} diff --git a/delta-islands.h b/delta-islands.h new file mode 100644 index 0000000000..b635cd07d8 --- /dev/null +++ b/delta-islands.h @@ -0,0 +1,15 @@ +#ifndef DELTA_ISLANDS_H +#define DELTA_ISLANDS_H + +struct object_id; +struct packing_data; +struct commit; + +int island_delta_cmp(const struct object_id *a, const struct object_id *b); +int in_same_island(const struct object_id *, const struct object_id *); +void resolve_tree_islands(int progress, struct packing_data *to_pack); +void load_delta_islands(void); +void propagate_island_marks(struct commit *commit); +int compute_pack_layers(struct packing_data *to_pack); + +#endif /* DELTA_ISLANDS_H */ diff --git a/diff-lib.c b/diff-lib.c index 88a98b1c06..83fce51518 100644 --- a/diff-lib.c +++ b/diff-lib.c @@ -70,7 +70,7 @@ static int match_stat_with_submodule(struct diff_options *diffopt, struct stat *st, unsigned ce_option, unsigned *dirty_submodule) { - int changed = ce_match_stat(ce, st, ce_option); + int changed = ie_match_stat(diffopt->repo->index, ce, st, ce_option); if (S_ISGITLINK(ce->ce_mode)) { struct diff_flags orig_flags = diffopt->flags; if (!diffopt->flags.override_submodule_config) @@ -93,15 +93,16 @@ int run_diff_files(struct rev_info *revs, unsigned int option) unsigned ce_option = ((option & DIFF_RACY_IS_MODIFIED) ? CE_MATCH_RACY_IS_DIRTY : 0); uint64_t start = getnanotime(); + struct index_state *istate = revs->diffopt.repo->index; diff_set_mnemonic_prefix(&revs->diffopt, "i/", "w/"); if (diff_unmerged_stage < 0) diff_unmerged_stage = 2; - entries = active_nr; + entries = istate->cache_nr; for (i = 0; i < entries; i++) { unsigned int oldmode, newmode; - struct cache_entry *ce = active_cache[i]; + struct cache_entry *ce = istate->cache[i]; int changed; unsigned dirty_submodule = 0; const struct object_id *old_oid, *new_oid; @@ -109,7 +110,7 @@ int run_diff_files(struct rev_info *revs, unsigned int option) if (diff_can_quit_early(&revs->diffopt)) break; - if (!ce_path_match(&the_index, ce, &revs->prune_data, NULL)) + if (!ce_path_match(istate, ce, &revs->prune_data, NULL)) continue; if (ce_stage(ce)) { @@ -145,7 +146,7 @@ int run_diff_files(struct rev_info *revs, unsigned int option) dpath->mode = wt_mode; while (i < entries) { - struct cache_entry *nce = active_cache[i]; + struct cache_entry *nce = istate->cache[i]; int stage; if (strcmp(ce->name, nce->name)) @@ -342,7 +343,7 @@ static int show_modified(struct rev_info *revs, } if (revs->combine_merges && !cached && - (oidcmp(oid, &old_entry->oid) || oidcmp(&old_entry->oid, &new_entry->oid))) { + (!oideq(oid, &old_entry->oid) || !oideq(&old_entry->oid, &new_entry->oid))) { struct combine_diff_path *p; int pathlen = ce_namelen(new_entry); @@ -366,7 +367,7 @@ static int show_modified(struct rev_info *revs, } oldmode = old_entry->ce_mode; - if (mode == oldmode && !oidcmp(oid, &old_entry->oid) && !dirty_submodule && + if (mode == oldmode && oideq(oid, &old_entry->oid) && !dirty_submodule && !revs->diffopt.flags.find_copies_harder) return 0; @@ -474,7 +475,9 @@ static int oneway_diff(const struct cache_entry * const *src, if (tree == o->df_conflict_entry) tree = NULL; - if (ce_path_match(&the_index, idx ? idx : tree, &revs->prune_data, NULL)) { + if (ce_path_match(revs->diffopt.repo->index, + idx ? idx : tree, + &revs->prune_data, NULL)) { do_oneway_diff(o, idx, tree); if (diff_can_quit_early(&revs->diffopt)) { o->exiting_early = 1; @@ -506,7 +509,7 @@ static int diff_cache(struct rev_info *revs, opts.merge = 1; opts.fn = oneway_diff; opts.unpack_data = revs; - opts.src_index = &the_index; + opts.src_index = revs->diffopt.repo->index; opts.dst_index = NULL; opts.pathspec = &revs->diffopt.pathspec; opts.pathspec->recursive = 1; @@ -518,11 +521,11 @@ static int diff_cache(struct rev_info *revs, int run_diff_index(struct rev_info *revs, int cached) { struct object_array_entry *ent; - uint64_t start = getnanotime(); if (revs->pending.nr != 1) BUG("run_diff_index must be passed exactly one tree"); + trace_performance_enter(); ent = revs->pending.objects; if (diff_cache(revs, &ent->item->oid, ent->name, cached)) exit(128); @@ -531,7 +534,7 @@ int run_diff_index(struct rev_info *revs, int cached) diffcore_fix_diff_index(&revs->diffopt); diffcore_std(&revs->diffopt); diff_flush(&revs->diffopt); - trace_performance_since(start, "diff-index"); + trace_performance_leave("diff-index"); return 0; } @@ -539,7 +542,7 @@ int do_diff_cache(const struct object_id *tree_oid, struct diff_options *opt) { struct rev_info revs; - init_revisions(&revs, NULL); + repo_init_revisions(the_repository, &revs, NULL); copy_pathspec(&revs.prune_data, &opt->pathspec); revs.diffopt = *opt; @@ -554,7 +557,7 @@ int index_differs_from(const char *def, const struct diff_flags *flags, struct rev_info rev; struct setup_revision_opt opt; - init_revisions(&rev, NULL); + repo_init_revisions(the_repository, &rev, NULL); memset(&opt, 0, sizeof(opt)); opt.def = def; setup_revisions(0, NULL, &rev, &opt); diff --git a/diff-no-index.c b/diff-no-index.c index 0ed5f0f496..9414e922d1 100644 --- a/diff-no-index.c +++ b/diff-no-index.c @@ -233,7 +233,8 @@ static void fixup_paths(const char **path, struct strbuf *replacement) } } -void diff_no_index(struct rev_info *revs, +void diff_no_index(struct repository *r, + struct rev_info *revs, int argc, const char **argv) { int i; @@ -241,7 +242,11 @@ void diff_no_index(struct rev_info *revs, struct strbuf replacement = STRBUF_INIT; const char *prefix = revs->prefix; - diff_setup(&revs->diffopt); + /* + * FIXME: --no-index should not look at index and we should be + * able to pass NULL repo. Maybe later. + */ + repo_diff_setup(r, &revs->diffopt); for (i = 1; i < argc - 2; ) { int j; if (!strcmp(argv[i], "--no-index")) @@ -554,14 +554,15 @@ static int count_lines(const char *data, int size) return count; } -static int fill_mmfile(mmfile_t *mf, struct diff_filespec *one) +static int fill_mmfile(struct repository *r, mmfile_t *mf, + struct diff_filespec *one) { if (!DIFF_FILE_VALID(one)) { mf->ptr = (char *)""; /* does not matter */ mf->size = 0; return 0; } - else if (diff_populate_filespec(one, 0)) + else if (diff_populate_filespec(r, one, 0)) return -1; mf->ptr = one->data; @@ -570,11 +571,12 @@ static int fill_mmfile(mmfile_t *mf, struct diff_filespec *one) } /* like fill_mmfile, but only for size, so we can avoid retrieving blob */ -static unsigned long diff_filespec_size(struct diff_filespec *one) +static unsigned long diff_filespec_size(struct repository *r, + struct diff_filespec *one) { if (!DIFF_FILE_VALID(one)) return 0; - diff_populate_filespec(one, CHECK_SIZE_ONLY); + diff_populate_filespec(r, one, CHECK_SIZE_ONLY); return one->size; } @@ -624,42 +626,54 @@ static void check_blank_at_eof(mmfile_t *mf1, mmfile_t *mf2, } static void emit_line_0(struct diff_options *o, - const char *set, unsigned reverse, const char *reset, + const char *set_sign, const char *set, unsigned reverse, const char *reset, int first, const char *line, int len) { int has_trailing_newline, has_trailing_carriage_return; - int nofirst; + int needs_reset = 0; /* at the end of the line */ FILE *file = o->file; - if (first) - fputs(diff_line_prefix(o), file); - else if (!len) - return; + fputs(diff_line_prefix(o), file); - if (len == 0) { - has_trailing_newline = (first == '\n'); - has_trailing_carriage_return = (!has_trailing_newline && - (first == '\r')); - nofirst = has_trailing_newline || has_trailing_carriage_return; - } else { - has_trailing_newline = (len > 0 && line[len-1] == '\n'); - if (has_trailing_newline) - len--; - has_trailing_carriage_return = (len > 0 && line[len-1] == '\r'); - if (has_trailing_carriage_return) - len--; - nofirst = 0; + has_trailing_newline = (len > 0 && line[len-1] == '\n'); + if (has_trailing_newline) + len--; + + has_trailing_carriage_return = (len > 0 && line[len-1] == '\r'); + if (has_trailing_carriage_return) + len--; + + if (!len && !first) + goto end_of_line; + + if (reverse && want_color(o->use_color)) { + fputs(GIT_COLOR_REVERSE, file); + needs_reset = 1; } - if (len || !nofirst) { - if (reverse && want_color(o->use_color)) - fputs(GIT_COLOR_REVERSE, file); + if (set_sign) { + fputs(set_sign, file); + needs_reset = 1; + } + + if (first) + fputc(first, file); + + if (!len) + goto end_of_line; + + if (set) { + if (set_sign && set != set_sign) + fputs(reset, file); fputs(set, file); - if (first && !nofirst) - fputc(first, file); - fwrite(line, len, 1, file); - fputs(reset, file); + needs_reset = 1; } + fwrite(line, len, 1, file); + needs_reset = 1; /* 'line' may contain color codes. */ + +end_of_line: + if (needs_reset) + fputs(reset, file); if (has_trailing_carriage_return) fputc('\r', file); if (has_trailing_newline) @@ -669,7 +683,7 @@ static void emit_line_0(struct diff_options *o, static void emit_line(struct diff_options *o, const char *set, const char *reset, const char *line, int len) { - emit_line_0(o, set, 0, reset, line[0], line+1, len-1); + emit_line_0(o, set, NULL, 0, reset, 0, line, len); } enum diff_symbol { @@ -764,7 +778,6 @@ struct moved_entry { struct hashmap_entry ent; const struct emitted_diff_symbol *es; struct moved_entry *next_line; - struct ws_delta *wsd; }; /** @@ -781,6 +794,17 @@ struct ws_delta { }; #define WS_DELTA_INIT { NULL, 0 } +struct moved_block { + struct moved_entry *match; + struct ws_delta wsd; +}; + +static void moved_block_clear(struct moved_block *b) +{ + FREE_AND_NULL(b->wsd.string); + b->match = NULL; +} + static int compute_ws_delta(const struct emitted_diff_symbol *a, const struct emitted_diff_symbol *b, struct ws_delta *out) @@ -789,16 +813,19 @@ static int compute_ws_delta(const struct emitted_diff_symbol *a, const struct emitted_diff_symbol *shorter = a->len > b->len ? b : a; int d = longer->len - shorter->len; + if (strncmp(longer->line + d, shorter->line, shorter->len)) + return 0; + out->string = xmemdupz(longer->line, d); out->current_longer = (a == longer); - return !strncmp(longer->line + d, shorter->line, shorter->len); + return 1; } static int cmp_in_block_with_wsd(const struct diff_options *o, const struct moved_entry *cur, const struct moved_entry *match, - struct moved_entry *pmb, + struct moved_block *pmb, int n) { struct emitted_diff_symbol *l = &o->emitted_symbols->buf[n]; @@ -818,16 +845,15 @@ static int cmp_in_block_with_wsd(const struct diff_options *o, if (strcmp(a, b)) return 1; - if (!pmb->wsd) + if (!pmb->wsd.string) /* - * No white space delta was carried forward? This can happen - * when we exit early in this function and do not carry - * forward ws. + * The white space delta is not active? This can happen + * when we exit early in this function. */ return 1; /* - * The indent changes of the block are known and carried forward in + * The indent changes of the block are known and stored in * pmb->wsd; however we need to check if the indent changes of the * current line are still the same as before. * @@ -835,8 +861,8 @@ static int cmp_in_block_with_wsd(const struct diff_options *o, * one of them for the white spaces, depending which was longer. */ - wslen = strlen(pmb->wsd->string); - if (pmb->wsd->current_longer) { + wslen = strlen(pmb->wsd.string); + if (pmb->wsd.current_longer) { c += wslen; cl -= wslen; } else { @@ -844,7 +870,7 @@ static int cmp_in_block_with_wsd(const struct diff_options *o, al -= wslen; } - if (strcmp(a, c)) + if (al != cl || memcmp(a, c, al)) return 1; return 0; @@ -886,7 +912,6 @@ static struct moved_entry *prepare_entry(struct diff_options *o, ret->ent.hash = xdiff_hash_string(l->line, l->len, flags); ret->es = l; ret->next_line = NULL; - ret->wsd = NULL; return ret; } @@ -926,18 +951,18 @@ static void add_lines_to_move_detection(struct diff_options *o, static void pmb_advance_or_null(struct diff_options *o, struct moved_entry *match, struct hashmap *hm, - struct moved_entry **pmb, + struct moved_block *pmb, int pmb_nr) { int i; for (i = 0; i < pmb_nr; i++) { - struct moved_entry *prev = pmb[i]; + struct moved_entry *prev = pmb[i].match; struct moved_entry *cur = (prev && prev->next_line) ? prev->next_line : NULL; if (cur && !hm->cmpfn(o, cur, match, NULL)) { - pmb[i] = cur; + pmb[i].match = cur; } else { - pmb[i] = NULL; + pmb[i].match = NULL; } } } @@ -945,7 +970,7 @@ static void pmb_advance_or_null(struct diff_options *o, static void pmb_advance_or_null_multi_match(struct diff_options *o, struct moved_entry *match, struct hashmap *hm, - struct moved_entry **pmb, + struct moved_block *pmb, int pmb_nr, int n) { int i; @@ -953,48 +978,47 @@ static void pmb_advance_or_null_multi_match(struct diff_options *o, for (; match; match = hashmap_get_next(hm, match)) { for (i = 0; i < pmb_nr; i++) { - struct moved_entry *prev = pmb[i]; + struct moved_entry *prev = pmb[i].match; struct moved_entry *cur = (prev && prev->next_line) ? prev->next_line : NULL; if (!cur) continue; - if (!cmp_in_block_with_wsd(o, cur, match, pmb[i], n)) + if (!cmp_in_block_with_wsd(o, cur, match, &pmb[i], n)) got_match[i] |= 1; } } for (i = 0; i < pmb_nr; i++) { if (got_match[i]) { - /* Carry the white space delta forward */ - pmb[i]->next_line->wsd = pmb[i]->wsd; - pmb[i] = pmb[i]->next_line; - } else - pmb[i] = NULL; + /* Advance to the next line */ + pmb[i].match = pmb[i].match->next_line; + } else { + moved_block_clear(&pmb[i]); + } } + + free(got_match); } -static int shrink_potential_moved_blocks(struct moved_entry **pmb, +static int shrink_potential_moved_blocks(struct moved_block *pmb, int pmb_nr) { int lp, rp; /* Shrink the set of potential block to the remaining running */ for (lp = 0, rp = pmb_nr - 1; lp <= rp;) { - while (lp < pmb_nr && pmb[lp]) + while (lp < pmb_nr && pmb[lp].match) lp++; /* lp points at the first NULL now */ - while (rp > -1 && !pmb[rp]) + while (rp > -1 && !pmb[rp].match) rp--; /* rp points at the last non-NULL */ if (lp < pmb_nr && rp > -1 && lp < rp) { pmb[lp] = pmb[rp]; - if (pmb[rp]->wsd) { - free(pmb[rp]->wsd->string); - FREE_AND_NULL(pmb[rp]->wsd); - } - pmb[rp] = NULL; + pmb[rp].match = NULL; + pmb[rp].wsd.string = NULL; rp--; lp++; } @@ -1041,7 +1065,7 @@ static void mark_color_as_moved(struct diff_options *o, struct hashmap *add_lines, struct hashmap *del_lines) { - struct moved_entry **pmb = NULL; /* potentially moved blocks */ + struct moved_block *pmb = NULL; /* potentially moved blocks */ int pmb_nr = 0, pmb_alloc = 0; int n, flipped_block = 1, block_length = 0; @@ -1070,7 +1094,11 @@ static void mark_color_as_moved(struct diff_options *o, } if (!match) { + int i; + adjust_last_block(o, n, block_length); + for(i = 0; i < pmb_nr; i++) + moved_block_clear(&pmb[i]); pmb_nr = 0; block_length = 0; continue; @@ -1098,14 +1126,12 @@ static void mark_color_as_moved(struct diff_options *o, ALLOC_GROW(pmb, pmb_nr + 1, pmb_alloc); if (o->color_moved_ws_handling & COLOR_MOVED_WS_ALLOW_INDENTATION_CHANGE) { - struct ws_delta *wsd = xmalloc(sizeof(*match->wsd)); - if (compute_ws_delta(l, match->es, wsd)) { - match->wsd = wsd; - pmb[pmb_nr++] = match; - } else - free(wsd); + if (compute_ws_delta(l, match->es, + &pmb[pmb_nr].wsd)) + pmb[pmb_nr++].match = match; } else { - pmb[pmb_nr++] = match; + pmb[pmb_nr].wsd.string = NULL; + pmb[pmb_nr++].match = match; } } @@ -1122,6 +1148,8 @@ static void mark_color_as_moved(struct diff_options *o, } adjust_last_block(o, n, block_length); + for(n = 0; n < pmb_nr; n++) + moved_block_clear(&pmb[n]); free(pmb); } @@ -1187,12 +1215,13 @@ static void dim_moved_lines(struct diff_options *o) } static void emit_line_ws_markup(struct diff_options *o, - const char *set, const char *reset, - const char *line, int len, - const char *set_sign, char sign, + const char *set_sign, const char *set, + const char *reset, + int sign_index, const char *line, int len, unsigned ws_rule, int blank_at_eof) { const char *ws = NULL; + int sign = o->output_indicators[sign_index]; if (o->ws_error_highlight & ws_rule) { ws = diff_get_color_opt(o, DIFF_WHITESPACE); @@ -1201,18 +1230,15 @@ static void emit_line_ws_markup(struct diff_options *o, } if (!ws && !set_sign) - emit_line_0(o, set, 0, reset, sign, line, len); + emit_line_0(o, set, NULL, 0, reset, sign, line, len); else if (!ws) { - /* Emit just the prefix, then the rest. */ - emit_line_0(o, set_sign ? set_sign : set, !!set_sign, reset, - sign, "", 0); - emit_line_0(o, set, 0, reset, 0, line, len); + emit_line_0(o, set_sign, set, !!set_sign, reset, sign, line, len); } else if (blank_at_eof) /* Blank line at EOF - paint '+' as well */ - emit_line_0(o, ws, 0, reset, sign, line, len); + emit_line_0(o, ws, NULL, 0, reset, sign, line, len); else { /* Emit just the prefix, then the rest. */ - emit_line_0(o, set_sign ? set_sign : set, !!set_sign, reset, + emit_line_0(o, set_sign ? set_sign : set, NULL, !!set_sign, reset, sign, "", 0); ws_check_emit(line, len, ws_rule, o->file, set, reset, ws); @@ -1236,7 +1262,7 @@ static void emit_diff_symbol_from_struct(struct diff_options *o, context = diff_get_color_opt(o, DIFF_CONTEXT); reset = diff_get_color_opt(o, DIFF_RESET); putc('\n', o->file); - emit_line_0(o, context, 0, reset, '\\', + emit_line_0(o, context, NULL, 0, reset, '\\', nneof, strlen(nneof)); break; case DIFF_SYMBOL_SUBMODULE_HEADER: @@ -1274,7 +1300,8 @@ static void emit_diff_symbol_from_struct(struct diff_options *o, else if (c == '-') set = diff_get_color_opt(o, DIFF_FILE_OLD); } - emit_line_ws_markup(o, set, reset, line, len, set_sign, ' ', + emit_line_ws_markup(o, set_sign, set, reset, + OUTPUT_INDICATOR_CONTEXT, line, len, flags & (DIFF_SYMBOL_CONTENT_WS_MASK), 0); break; case DIFF_SYMBOL_PLUS: @@ -1317,7 +1344,8 @@ static void emit_diff_symbol_from_struct(struct diff_options *o, set = diff_get_color_opt(o, DIFF_CONTEXT_BOLD); flags &= ~DIFF_SYMBOL_CONTENT_WS_MASK; } - emit_line_ws_markup(o, set, reset, line, len, set_sign, '+', + emit_line_ws_markup(o, set_sign, set, reset, + OUTPUT_INDICATOR_NEW, line, len, flags & DIFF_SYMBOL_CONTENT_WS_MASK, flags & DIFF_SYMBOL_CONTENT_BLANK_LINE_EOF); break; @@ -1360,7 +1388,8 @@ static void emit_diff_symbol_from_struct(struct diff_options *o, else set = diff_get_color_opt(o, DIFF_CONTEXT_DIM); } - emit_line_ws_markup(o, set, reset, line, len, set_sign, '-', + emit_line_ws_markup(o, set_sign, set, reset, + OUTPUT_INDICATOR_OLD, line, len, flags & DIFF_SYMBOL_CONTENT_WS_MASK, 0); break; case DIFF_SYMBOL_WORDS_PORCELAIN: @@ -1698,12 +1727,12 @@ static void emit_rewrite_diff(const char *name_a, quote_two_c_style(&a_name, a_prefix, name_a, 0); quote_two_c_style(&b_name, b_prefix, name_b, 0); - size_one = fill_textconv(textconv_one, one, &data_one); - size_two = fill_textconv(textconv_two, two, &data_two); + size_one = fill_textconv(o->repo, textconv_one, one, &data_one); + size_two = fill_textconv(o->repo, textconv_two, two, &data_two); memset(&ecbdata, 0, sizeof(ecbdata)); ecbdata.color_diff = want_color(o->use_color); - ecbdata.ws_rule = whitespace_rule(name_b); + ecbdata.ws_rule = whitespace_rule(o->repo->index, name_b); ecbdata.opt = o; if (ecbdata.ws_rule & WS_BLANK_AT_EOF) { mmfile_t mf1, mf2; @@ -1883,19 +1912,17 @@ static int color_words_output_graph_prefix(struct diff_words_data *diff_words) } } -static void fn_out_diff_words_aux(void *priv, char *line, unsigned long len) +static void fn_out_diff_words_aux(void *priv, + long minus_first, long minus_len, + long plus_first, long plus_len, + const char *func, long funclen) { struct diff_words_data *diff_words = priv; struct diff_words_style *style = diff_words->style; - int minus_first, minus_len, plus_first, plus_len; const char *minus_begin, *minus_end, *plus_begin, *plus_end; struct diff_options *opt = diff_words->opt; const char *line_prefix; - if (line[0] != '@' || parse_hunk_header(line, len, - &minus_first, &minus_len, &plus_first, &plus_len)) - return; - assert(opt); line_prefix = diff_line_prefix(opt); @@ -2045,8 +2072,8 @@ static void diff_words_show(struct diff_words_data *diff_words) xpp.flags = 0; /* as only the hunk header will be parsed, we need a 0-context */ xecfg.ctxlen = 0; - if (xdi_diff_outf(&minus, &plus, fn_out_diff_words_aux, diff_words, - &xpp, &xecfg)) + if (xdi_diff_outf(&minus, &plus, fn_out_diff_words_aux, NULL, + diff_words, &xpp, &xecfg)) die("unable to generate word diff"); free(minus.ptr); free(plus.ptr); @@ -2091,23 +2118,25 @@ static void diff_words_flush(struct emit_callback *ecbdata) } } -static void diff_filespec_load_driver(struct diff_filespec *one) +static void diff_filespec_load_driver(struct diff_filespec *one, + struct index_state *istate) { /* Use already-loaded driver */ if (one->driver) return; if (S_ISREG(one->mode)) - one->driver = userdiff_find_by_path(one->path); + one->driver = userdiff_find_by_path(istate, one->path); /* Fallback to default settings */ if (!one->driver) one->driver = userdiff_find_by_name("default"); } -static const char *userdiff_word_regex(struct diff_filespec *one) +static const char *userdiff_word_regex(struct diff_filespec *one, + struct index_state *istate) { - diff_filespec_load_driver(one); + diff_filespec_load_driver(one, istate); return one->driver->word_regex; } @@ -2130,9 +2159,9 @@ static void init_diff_words_data(struct emit_callback *ecbdata, xcalloc(1, sizeof(struct emitted_diff_symbols)); if (!o->word_regex) - o->word_regex = userdiff_word_regex(one); + o->word_regex = userdiff_word_regex(one, o->repo->index); if (!o->word_regex) - o->word_regex = userdiff_word_regex(two); + o->word_regex = userdiff_word_regex(two, o->repo->index); if (!o->word_regex) o->word_regex = diff_word_regex_cfg; if (o->word_regex) { @@ -2933,16 +2962,11 @@ static void show_dirstat(struct diff_options *options) struct diff_filepair *p = q->queue[i]; const char *name; unsigned long copied, added, damage; - int content_changed; name = p->two->path ? p->two->path : p->one->path; - if (p->one->oid_valid && p->two->oid_valid) - content_changed = oidcmp(&p->one->oid, &p->two->oid); - else - content_changed = 1; - - if (!content_changed) { + if (p->one->oid_valid && p->two->oid_valid && + oideq(&p->one->oid, &p->two->oid)) { /* * The SHA1 has not changed, so pre-/post-content is * identical. We can therefore skip looking at the @@ -2965,18 +2989,19 @@ static void show_dirstat(struct diff_options *options) } if (DIFF_FILE_VALID(p->one) && DIFF_FILE_VALID(p->two)) { - diff_populate_filespec(p->one, 0); - diff_populate_filespec(p->two, 0); - diffcore_count_changes(p->one, p->two, NULL, NULL, + diff_populate_filespec(options->repo, p->one, 0); + diff_populate_filespec(options->repo, p->two, 0); + diffcore_count_changes(options->repo, + p->one, p->two, NULL, NULL, &copied, &added); diff_free_filespec_data(p->one); diff_free_filespec_data(p->two); } else if (DIFF_FILE_VALID(p->one)) { - diff_populate_filespec(p->one, CHECK_SIZE_ONLY); + diff_populate_filespec(options->repo, p->one, CHECK_SIZE_ONLY); copied = added = 0; diff_free_filespec_data(p->one); } else if (DIFF_FILE_VALID(p->two)) { - diff_populate_filespec(p->two, CHECK_SIZE_ONLY); + diff_populate_filespec(options->repo, p->two, CHECK_SIZE_ONLY); copied = 0; added = p->two->size; diff_free_filespec_data(p->two); @@ -2989,7 +3014,7 @@ static void show_dirstat(struct diff_options *options) * made to the preimage. * If the resulting damage is zero, we know that * diffcore_count_changes() considers the two entries to - * be identical, but since content_changed is true, we + * be identical, but since the oid changed, we * know that there must have been _some_ kind of change, * so we force all entries to have damage > 0. */ @@ -3103,6 +3128,15 @@ static int is_conflict_marker(const char *line, int marker_size, unsigned long l return 1; } +static void checkdiff_consume_hunk(void *priv, + long ob, long on, long nb, long nn, + const char *func, long funclen) + +{ + struct checkdiff_t *data = priv; + data->lineno = nb - 1; +} + static void checkdiff_consume(void *priv, char *line, unsigned long len) { struct checkdiff_t *data = priv; @@ -3138,12 +3172,6 @@ static void checkdiff_consume(void *priv, char *line, unsigned long len) data->o->file, set, reset, ws); } else if (line[0] == ' ') { data->lineno++; - } else if (line[0] == '@') { - char *plus = strchr(line, '+'); - if (plus) - data->lineno = strtol(plus, NULL, 10) - 1; - else - die("invalid diff"); } } @@ -3200,7 +3228,7 @@ static void emit_binary_diff_body(struct diff_options *o, } if (delta && delta_size < deflate_size) { - char *s = xstrfmt("%lu", orig_size); + char *s = xstrfmt("%"PRIuMAX , (uintmax_t)orig_size); emit_diff_symbol(o, DIFF_SYMBOL_BINARY_DIFF_HEADER_DELTA, s, strlen(s), 0); free(s); @@ -3250,15 +3278,16 @@ static void emit_binary_diff(struct diff_options *o, emit_binary_diff_body(o, two, one); } -int diff_filespec_is_binary(struct diff_filespec *one) +int diff_filespec_is_binary(struct repository *r, + struct diff_filespec *one) { if (one->is_binary == -1) { - diff_filespec_load_driver(one); + diff_filespec_load_driver(one, r->index); if (one->driver->binary != -1) one->is_binary = one->driver->binary; else { if (!one->data && DIFF_FILE_VALID(one)) - diff_populate_filespec(one, CHECK_BINARY); + diff_populate_filespec(r, one, CHECK_BINARY); if (one->is_binary == -1 && one->data) one->is_binary = buffer_is_binary(one->data, one->size); @@ -3269,9 +3298,10 @@ int diff_filespec_is_binary(struct diff_filespec *one) return one->is_binary; } -static const struct userdiff_funcname *diff_funcname_pattern(struct diff_filespec *one) +static const struct userdiff_funcname * +diff_funcname_pattern(struct diff_options *o, struct diff_filespec *one) { - diff_filespec_load_driver(one); + diff_filespec_load_driver(one, o->repo->index); return one->driver->funcname.pattern ? &one->driver->funcname : NULL; } @@ -3283,12 +3313,13 @@ void diff_set_mnemonic_prefix(struct diff_options *options, const char *a, const options->b_prefix = b; } -struct userdiff_driver *get_textconv(struct diff_filespec *one) +struct userdiff_driver *get_textconv(struct index_state *istate, + struct diff_filespec *one) { if (!DIFF_FILE_VALID(one)) return NULL; - diff_filespec_load_driver(one); + diff_filespec_load_driver(one, istate); return userdiff_get_textconv(one->driver); } @@ -3338,8 +3369,8 @@ static void builtin_diff(const char *name_a, } if (o->flags.allow_textconv) { - textconv_one = get_textconv(one); - textconv_two = get_textconv(two); + textconv_one = get_textconv(o->repo->index, one); + textconv_two = get_textconv(o->repo->index, two); } /* Never use a non-valid filename anywhere if at all possible */ @@ -3380,13 +3411,13 @@ static void builtin_diff(const char *name_a, if ((one->mode ^ two->mode) & S_IFMT) goto free_ab_and_return; if (complete_rewrite && - (textconv_one || !diff_filespec_is_binary(one)) && - (textconv_two || !diff_filespec_is_binary(two))) { + (textconv_one || !diff_filespec_is_binary(o->repo, one)) && + (textconv_two || !diff_filespec_is_binary(o->repo, two))) { emit_diff_symbol(o, DIFF_SYMBOL_HEADER, header.buf, header.len, 0); strbuf_reset(&header); emit_rewrite_diff(name_a, name_b, one, two, - textconv_one, textconv_two, o); + textconv_one, textconv_two, o); o->found_changes = 1; goto free_ab_and_return; } @@ -3398,13 +3429,13 @@ static void builtin_diff(const char *name_a, strbuf_reset(&header); goto free_ab_and_return; } else if (!o->flags.text && - ( (!textconv_one && diff_filespec_is_binary(one)) || - (!textconv_two && diff_filespec_is_binary(two)) )) { + ( (!textconv_one && diff_filespec_is_binary(o->repo, one)) || + (!textconv_two && diff_filespec_is_binary(o->repo, two)) )) { struct strbuf sb = STRBUF_INIT; if (!one->data && !two->data && S_ISREG(one->mode) && S_ISREG(two->mode) && !o->flags.binary) { - if (!oidcmp(&one->oid, &two->oid)) { + if (oideq(&one->oid, &two->oid)) { if (must_show_header) emit_diff_symbol(o, DIFF_SYMBOL_HEADER, header.buf, header.len, @@ -3420,7 +3451,8 @@ static void builtin_diff(const char *name_a, strbuf_release(&sb); goto free_ab_and_return; } - if (fill_mmfile(&mf1, one) < 0 || fill_mmfile(&mf2, two) < 0) + if (fill_mmfile(o->repo, &mf1, one) < 0 || + fill_mmfile(o->repo, &mf2, two) < 0) die("unable to read files to diff"); /* Quite common confusing case */ if (mf1.size == mf2.size && @@ -3457,12 +3489,12 @@ static void builtin_diff(const char *name_a, strbuf_reset(&header); } - mf1.size = fill_textconv(textconv_one, one, &mf1.ptr); - mf2.size = fill_textconv(textconv_two, two, &mf2.ptr); + mf1.size = fill_textconv(o->repo, textconv_one, one, &mf1.ptr); + mf2.size = fill_textconv(o->repo, textconv_two, two, &mf2.ptr); - pe = diff_funcname_pattern(one); + pe = diff_funcname_pattern(o, one); if (!pe) - pe = diff_funcname_pattern(two); + pe = diff_funcname_pattern(o, two); memset(&xpp, 0, sizeof(xpp)); memset(&xecfg, 0, sizeof(xecfg)); @@ -3471,7 +3503,7 @@ static void builtin_diff(const char *name_a, lbl[0] = NULL; ecbdata.label_path = lbl; ecbdata.color_diff = want_color(o->use_color); - ecbdata.ws_rule = whitespace_rule(name_b); + ecbdata.ws_rule = whitespace_rule(o->repo->index, name_b); if (ecbdata.ws_rule & WS_BLANK_AT_EOF) check_blank_at_eof(&mf1, &mf2, &ecbdata); ecbdata.opt = o; @@ -3495,8 +3527,8 @@ static void builtin_diff(const char *name_a, xecfg.ctxlen = strtoul(v, NULL, 10); if (o->word_diff) init_diff_words_data(&ecbdata, o, one, two); - if (xdi_diff_outf(&mf1, &mf2, fn_out_consume, &ecbdata, - &xpp, &xecfg)) + if (xdi_diff_outf(&mf1, &mf2, NULL, fn_out_consume, + &ecbdata, &xpp, &xecfg)) die("unable to generate diff for %s", one->path); if (o->word_diff) free_diff_words_data(&ecbdata); @@ -3569,22 +3601,23 @@ static void builtin_diffstat(const char *name_a, const char *name_b, return; } - same_contents = !oidcmp(&one->oid, &two->oid); + same_contents = oideq(&one->oid, &two->oid); - if (diff_filespec_is_binary(one) || diff_filespec_is_binary(two)) { + if (diff_filespec_is_binary(o->repo, one) || + diff_filespec_is_binary(o->repo, two)) { data->is_binary = 1; if (same_contents) { data->added = 0; data->deleted = 0; } else { - data->added = diff_filespec_size(two); - data->deleted = diff_filespec_size(one); + data->added = diff_filespec_size(o->repo, two); + data->deleted = diff_filespec_size(o->repo, one); } } else if (complete_rewrite) { - diff_populate_filespec(one, 0); - diff_populate_filespec(two, 0); + diff_populate_filespec(o->repo, one, 0); + diff_populate_filespec(o->repo, two, 0); data->deleted = count_lines(one->data, one->size); data->added = count_lines(two->data, two->size); } @@ -3594,7 +3627,8 @@ static void builtin_diffstat(const char *name_a, const char *name_b, xpparam_t xpp; xdemitconf_t xecfg; - if (fill_mmfile(&mf1, one) < 0 || fill_mmfile(&mf2, two) < 0) + if (fill_mmfile(o->repo, &mf1, one) < 0 || + fill_mmfile(o->repo, &mf2, two) < 0) die("unable to read files to diff"); memset(&xpp, 0, sizeof(xpp)); @@ -3604,8 +3638,8 @@ static void builtin_diffstat(const char *name_a, const char *name_b, xpp.anchors_nr = o->anchors_nr; xecfg.ctxlen = o->context; xecfg.interhunkctxlen = o->interhunkcontext; - if (xdi_diff_outf(&mf1, &mf2, diffstat_consume, diffstat, - &xpp, &xecfg)) + if (xdi_diff_outf(&mf1, &mf2, discard_hunk_line, + diffstat_consume, diffstat, &xpp, &xecfg)) die("unable to generate diffstat for %s", one->path); } @@ -3629,10 +3663,11 @@ static void builtin_checkdiff(const char *name_a, const char *name_b, data.filename = name_b ? name_b : name_a; data.lineno = 0; data.o = o; - data.ws_rule = whitespace_rule(attr_path); - data.conflict_marker_size = ll_merge_marker_size(attr_path); + data.ws_rule = whitespace_rule(o->repo->index, attr_path); + data.conflict_marker_size = ll_merge_marker_size(o->repo->index, attr_path); - if (fill_mmfile(&mf1, one) < 0 || fill_mmfile(&mf2, two) < 0) + if (fill_mmfile(o->repo, &mf1, one) < 0 || + fill_mmfile(o->repo, &mf2, two) < 0) die("unable to read files to diff"); /* @@ -3641,7 +3676,7 @@ static void builtin_checkdiff(const char *name_a, const char *name_b, * introduced changes, and as long as the "new" side is text, we * can and should check what it introduces. */ - if (diff_filespec_is_binary(two)) + if (diff_filespec_is_binary(o->repo, two)) goto free_and_return; else { /* Crazy xdl interfaces.. */ @@ -3652,7 +3687,8 @@ static void builtin_checkdiff(const char *name_a, const char *name_b, memset(&xecfg, 0, sizeof(xecfg)); xecfg.ctxlen = 1; /* at least one context line */ xpp.flags = 0; - if (xdi_diff_outf(&mf1, &mf2, checkdiff_consume, &data, + if (xdi_diff_outf(&mf1, &mf2, checkdiff_consume_hunk, + checkdiff_consume, &data, &xpp, &xecfg)) die("unable to generate checkdiff for %s", one->path); @@ -3714,7 +3750,10 @@ void fill_filespec(struct diff_filespec *spec, const struct object_id *oid, * the work tree has that object contents, return true, so that * prepare_temp_file() does not have to inflate and extract. */ -static int reuse_worktree_file(const char *name, const struct object_id *oid, int want_file) +static int reuse_worktree_file(struct index_state *istate, + const char *name, + const struct object_id *oid, + int want_file) { const struct cache_entry *ce; struct stat st; @@ -3733,7 +3772,7 @@ static int reuse_worktree_file(const char *name, const struct object_id *oid, in * by diff-cache --cached, which does read the cache before * calling us. */ - if (!active_cache) + if (!istate->cache) return 0; /* We want to avoid the working directory if our caller @@ -3752,20 +3791,20 @@ static int reuse_worktree_file(const char *name, const struct object_id *oid, in * Similarly, if we'd have to convert the file contents anyway, that * makes the optimization not worthwhile. */ - if (!want_file && would_convert_to_git(&the_index, name)) + if (!want_file && would_convert_to_git(istate, name)) return 0; len = strlen(name); - pos = cache_name_pos(name, len); + pos = index_name_pos(istate, name, len); if (pos < 0) return 0; - ce = active_cache[pos]; + ce = istate->cache[pos]; /* * This is not the sha1 we are looking for, or * unreusable because it is not a regular file. */ - if (oidcmp(oid, &ce->oid) || !S_ISREG(ce->ce_mode)) + if (!oideq(oid, &ce->oid) || !S_ISREG(ce->ce_mode)) return 0; /* @@ -3779,7 +3818,7 @@ static int reuse_worktree_file(const char *name, const struct object_id *oid, in * If ce matches the file in the work tree, we can reuse it. */ if (ce_uptodate(ce) || - (!lstat(name, &st) && !ce_match_stat(ce, &st, 0))) + (!lstat(name, &st) && !ie_match_stat(istate, ce, &st, 0))) return 1; return 0; @@ -3812,7 +3851,9 @@ static int diff_populate_gitlink(struct diff_filespec *s, int size_only) * grab the data for the blob (or file) for our own in-core comparison. * diff_filespec has data and size fields for this purpose. */ -int diff_populate_filespec(struct diff_filespec *s, unsigned int flags) +int diff_populate_filespec(struct repository *r, + struct diff_filespec *s, + unsigned int flags) { int size_only = flags & CHECK_SIZE_ONLY; int err = 0; @@ -3839,7 +3880,7 @@ int diff_populate_filespec(struct diff_filespec *s, unsigned int flags) return diff_populate_gitlink(s, size_only); if (!s->oid_valid || - reuse_worktree_file(s->path, &s->oid, 0)) { + reuse_worktree_file(r->index, s->path, &s->oid, 0)) { struct strbuf buf = STRBUF_INIT; struct stat st; int fd; @@ -3872,7 +3913,7 @@ int diff_populate_filespec(struct diff_filespec *s, unsigned int flags) * point if the path requires us to run the content * conversion. */ - if (size_only && !would_convert_to_git(&the_index, s->path)) + if (size_only && !would_convert_to_git(r->index, s->path)) return 0; /* @@ -3899,7 +3940,7 @@ int diff_populate_filespec(struct diff_filespec *s, unsigned int flags) /* * Convert from working tree format to canonical git format */ - if (convert_to_git(&the_index, s->path, s->data, s->size, &buf, conv_flags)) { + if (convert_to_git(r->index, s->path, s->data, s->size, &buf, conv_flags)) { size_t size = 0; munmap(s->data, s->size); s->should_munmap = 0; @@ -3911,8 +3952,7 @@ int diff_populate_filespec(struct diff_filespec *s, unsigned int flags) else { enum object_type type; if (size_only || (flags & CHECK_BINARY)) { - type = oid_object_info(the_repository, &s->oid, - &s->size); + type = oid_object_info(r, &s->oid, &s->size); if (type < 0) die("unable to read %s", oid_to_hex(&s->oid)); @@ -3950,7 +3990,8 @@ void diff_free_filespec_data(struct diff_filespec *s) FREE_AND_NULL(s->cnt_data); } -static void prep_temp_blob(const char *path, struct diff_tempfile *temp, +static void prep_temp_blob(struct index_state *istate, + const char *path, struct diff_tempfile *temp, void *blob, unsigned long size, const struct object_id *oid, @@ -3968,7 +4009,7 @@ static void prep_temp_blob(const char *path, struct diff_tempfile *temp, temp->tempfile = mks_tempfile_ts(tempfile.buf, strlen(base) + 1); if (!temp->tempfile) die_errno("unable to create temp-file"); - if (convert_to_working_tree(&the_index, path, + if (convert_to_working_tree(istate, path, (const char *)blob, (size_t)size, &buf)) { blob = buf.buf; size = buf.len; @@ -3984,8 +4025,9 @@ static void prep_temp_blob(const char *path, struct diff_tempfile *temp, free(path_dup); } -static struct diff_tempfile *prepare_temp_file(const char *name, - struct diff_filespec *one) +static struct diff_tempfile *prepare_temp_file(struct repository *r, + const char *name, + struct diff_filespec *one) { struct diff_tempfile *temp = claim_diff_tempfile(); @@ -4002,7 +4044,7 @@ static struct diff_tempfile *prepare_temp_file(const char *name, if (!S_ISGITLINK(one->mode) && (!one->oid_valid || - reuse_worktree_file(name, &one->oid, 1))) { + reuse_worktree_file(r->index, name, &one->oid, 1))) { struct stat st; if (lstat(name, &st) < 0) { if (errno == ENOENT) @@ -4013,7 +4055,7 @@ static struct diff_tempfile *prepare_temp_file(const char *name, struct strbuf sb = STRBUF_INIT; if (strbuf_readlink(&sb, name, st.st_size) < 0) die_errno("readlink(%s)", name); - prep_temp_blob(name, temp, sb.buf, sb.len, + prep_temp_blob(r->index, name, temp, sb.buf, sb.len, (one->oid_valid ? &one->oid : &null_oid), (one->oid_valid ? @@ -4038,19 +4080,21 @@ static struct diff_tempfile *prepare_temp_file(const char *name, return temp; } else { - if (diff_populate_filespec(one, 0)) + if (diff_populate_filespec(r, one, 0)) die("cannot read data blob for %s", one->path); - prep_temp_blob(name, temp, one->data, one->size, + prep_temp_blob(r->index, name, temp, + one->data, one->size, &one->oid, one->mode); } return temp; } -static void add_external_diff_name(struct argv_array *argv, +static void add_external_diff_name(struct repository *r, + struct argv_array *argv, const char *name, struct diff_filespec *df) { - struct diff_tempfile *temp = prepare_temp_file(name, df); + struct diff_tempfile *temp = prepare_temp_file(r, name, df); argv_array_push(argv, temp->name); argv_array_push(argv, temp->hex); argv_array_push(argv, temp->mode); @@ -4079,11 +4123,11 @@ static void run_external_diff(const char *pgm, argv_array_push(&argv, name); if (one && two) { - add_external_diff_name(&argv, name, one); + add_external_diff_name(o->repo, &argv, name, one); if (!other) - add_external_diff_name(&argv, name, two); + add_external_diff_name(o->repo, &argv, name, two); else { - add_external_diff_name(&argv, other, two); + add_external_diff_name(o->repo, &argv, other, two); argv_array_push(&argv, other); argv_array_push(&argv, xfrm_msg); } @@ -4170,14 +4214,16 @@ static void fill_metainfo(struct strbuf *msg, default: *must_show_header = 0; } - if (one && two && oidcmp(&one->oid, &two->oid)) { + if (one && two && !oideq(&one->oid, &two->oid)) { const unsigned hexsz = the_hash_algo->hexsz; int abbrev = o->flags.full_index ? hexsz : DEFAULT_ABBREV; if (o->flags.binary) { mmfile_t mf; - if ((!fill_mmfile(&mf, one) && diff_filespec_is_binary(one)) || - (!fill_mmfile(&mf, two) && diff_filespec_is_binary(two))) + if ((!fill_mmfile(o->repo, &mf, one) && + diff_filespec_is_binary(o->repo, one)) || + (!fill_mmfile(o->repo, &mf, two) && + diff_filespec_is_binary(o->repo, two))) abbrev = hexsz; } strbuf_addf(msg, "%s%sindex %s..%s", line_prefix, set, @@ -4205,7 +4251,9 @@ static void run_diff_cmd(const char *pgm, if (o->flags.allow_external) { - struct userdiff_driver *drv = userdiff_find_by_path(attr_path); + struct userdiff_driver *drv; + + drv = userdiff_find_by_path(o->repo->index, attr_path); if (drv && drv->external) pgm = drv->external; } @@ -4234,7 +4282,7 @@ static void run_diff_cmd(const char *pgm, fprintf(o->file, "* Unmerged path %s\n", name); } -static void diff_fill_oid_info(struct diff_filespec *one) +static void diff_fill_oid_info(struct diff_filespec *one, struct index_state *istate) { if (DIFF_FILE_VALID(one)) { if (!one->oid_valid) { @@ -4245,7 +4293,7 @@ static void diff_fill_oid_info(struct diff_filespec *one) } if (lstat(one->path, &st) < 0) die_errno("stat '%s'", one->path); - if (index_path(&one->oid, one->path, &st, 0)) + if (index_path(istate, &one->oid, one->path, &st, 0)) die("cannot hash %s", one->path); } } @@ -4293,8 +4341,8 @@ static void run_diff(struct diff_filepair *p, struct diff_options *o) return; } - diff_fill_oid_info(one); - diff_fill_oid_info(two); + diff_fill_oid_info(one, o->repo->index); + diff_fill_oid_info(two, o->repo->index); if (!pgm && DIFF_FILE_VALID(one) && DIFF_FILE_VALID(two) && @@ -4305,7 +4353,8 @@ static void run_diff(struct diff_filepair *p, struct diff_options *o) */ struct diff_filespec *null = alloc_filespec(two->path); run_diff_cmd(NULL, name, other, attr_path, - one, null, &msg, o, p); + one, null, &msg, + o, p); free(null); strbuf_release(&msg); @@ -4329,7 +4378,8 @@ static void run_diffstat(struct diff_filepair *p, struct diff_options *o, if (DIFF_PAIR_UNMERGED(p)) { /* unmerged */ - builtin_diffstat(p->one->path, NULL, NULL, NULL, diffstat, o, p); + builtin_diffstat(p->one->path, NULL, NULL, NULL, + diffstat, o, p); return; } @@ -4339,10 +4389,11 @@ static void run_diffstat(struct diff_filepair *p, struct diff_options *o, if (o->prefix_length) strip_prefix(o->prefix_length, &name, &other); - diff_fill_oid_info(p->one); - diff_fill_oid_info(p->two); + diff_fill_oid_info(p->one, o->repo->index); + diff_fill_oid_info(p->two, o->repo->index); - builtin_diffstat(name, other, p->one, p->two, diffstat, o, p); + builtin_diffstat(name, other, p->one, p->two, + diffstat, o, p); } static void run_checkdiff(struct diff_filepair *p, struct diff_options *o) @@ -4363,18 +4414,22 @@ static void run_checkdiff(struct diff_filepair *p, struct diff_options *o) if (o->prefix_length) strip_prefix(o->prefix_length, &name, &other); - diff_fill_oid_info(p->one); - diff_fill_oid_info(p->two); + diff_fill_oid_info(p->one, o->repo->index); + diff_fill_oid_info(p->two, o->repo->index); builtin_checkdiff(name, other, attr_path, p->one, p->two, o); } -void diff_setup(struct diff_options *options) +void repo_diff_setup(struct repository *r, struct diff_options *options) { memcpy(options, &default_diff_options, sizeof(*options)); options->file = stdout; + options->repo = r; + options->output_indicators[OUTPUT_INDICATOR_NEW] = '+'; + options->output_indicators[OUTPUT_INDICATOR_OLD] = '-'; + options->output_indicators[OUTPUT_INDICATOR_CONTEXT] = ' '; options->abbrev = DEFAULT_ABBREV; options->line_termination = '\n'; options->break_opt = -1; @@ -4852,6 +4907,12 @@ int diff_opt_parse(struct diff_options *options, options->output_format |= DIFF_FORMAT_DIFFSTAT; } else if (!strcmp(arg, "--no-compact-summary")) options->flags.stat_with_summary = 0; + else if (skip_prefix(arg, "--output-indicator-new=", &arg)) + options->output_indicators[OUTPUT_INDICATOR_NEW] = arg[0]; + else if (skip_prefix(arg, "--output-indicator-old=", &arg)) + options->output_indicators[OUTPUT_INDICATOR_OLD] = arg[0]; + else if (skip_prefix(arg, "--output-indicator-context=", &arg)) + options->output_indicators[OUTPUT_INDICATOR_CONTEXT] = arg[0]; /* renames options */ else if (starts_with(arg, "-B") || @@ -5323,7 +5384,7 @@ int diff_unmodified_pair(struct diff_filepair *p) * dealing with a change. */ if (one->oid_valid && two->oid_valid && - !oidcmp(&one->oid, &two->oid) && + oideq(&one->oid, &two->oid) && !one->dirty_submodule && !two->dirty_submodule) return 1; /* no change */ if (!one->oid_valid && !two->oid_valid) @@ -5457,7 +5518,7 @@ static void diff_resolve_rename_copy(void) else p->status = DIFF_STATUS_RENAMED; } - else if (oidcmp(&p->one->oid, &p->two->oid) || + else if (!oideq(&p->one->oid, &p->two->oid) || p->one->mode != p->two->mode || p->one->dirty_submodule || p->two->dirty_submodule || @@ -5607,10 +5668,6 @@ static void patch_id_consume(void *priv, char *line, unsigned long len) struct patch_id_t *data = priv; int new_len; - /* Ignore line numbers when computing the SHA1 of the patch */ - if (starts_with(line, "@@ -")) - return; - new_len = remove_space(line, len); git_SHA1_Update(data->ctx, line, new_len); @@ -5663,8 +5720,8 @@ static int diff_get_patch_id(struct diff_options *options, struct object_id *oid if (DIFF_PAIR_UNMERGED(p)) continue; - diff_fill_oid_info(p->one); - diff_fill_oid_info(p->two); + diff_fill_oid_info(p->one, options->repo->index); + diff_fill_oid_info(p->two, options->repo->index); len1 = remove_space(p->one->path, strlen(p->one->path)); len2 = remove_space(p->two->path, strlen(p->two->path)); @@ -5696,12 +5753,12 @@ static int diff_get_patch_id(struct diff_options *options, struct object_id *oid if (diff_header_only) continue; - if (fill_mmfile(&mf1, p->one) < 0 || - fill_mmfile(&mf2, p->two) < 0) + if (fill_mmfile(options->repo, &mf1, p->one) < 0 || + fill_mmfile(options->repo, &mf2, p->two) < 0) return error("unable to read files to diff"); - if (diff_filespec_is_binary(p->one) || - diff_filespec_is_binary(p->two)) { + if (diff_filespec_is_binary(options->repo, p->one) || + diff_filespec_is_binary(options->repo, p->two)) { git_SHA1_Update(&ctx, oid_to_hex(&p->one->oid), GIT_SHA1_HEXSZ); git_SHA1_Update(&ctx, oid_to_hex(&p->two->oid), @@ -5712,8 +5769,8 @@ static int diff_get_patch_id(struct diff_options *options, struct object_id *oid xpp.flags = 0; xecfg.ctxlen = 3; xecfg.flags = 0; - if (xdi_diff_outf(&mf1, &mf2, patch_id_consume, &data, - &xpp, &xecfg)) + if (xdi_diff_outf(&mf1, &mf2, discard_hunk_line, + patch_id_consume, &data, &xpp, &xecfg)) return error("unable to generate patch-id diff for %s", p->one->path); } @@ -5819,8 +5876,8 @@ static void diff_flush_patch_all_file_pairs(struct diff_options *o) if (o->color_moved == COLOR_MOVED_ZEBRA_DIM) dim_moved_lines(o); - hashmap_free(&add_lines, 0); - hashmap_free(&del_lines, 0); + hashmap_free(&add_lines, 1); + hashmap_free(&del_lines, 1); } for (i = 0; i < esm.nr; i++) @@ -6004,19 +6061,21 @@ static void diffcore_apply_filter(struct diff_options *options) } /* Check whether two filespecs with the same mode and size are identical */ -static int diff_filespec_is_identical(struct diff_filespec *one, +static int diff_filespec_is_identical(struct repository *r, + struct diff_filespec *one, struct diff_filespec *two) { if (S_ISGITLINK(one->mode)) return 0; - if (diff_populate_filespec(one, 0)) + if (diff_populate_filespec(r, one, 0)) return 0; - if (diff_populate_filespec(two, 0)) + if (diff_populate_filespec(r, two, 0)) return 0; return !memcmp(one->data, two->data, one->size); } -static int diff_filespec_check_stat_unmatch(struct diff_filepair *p) +static int diff_filespec_check_stat_unmatch(struct repository *r, + struct diff_filepair *p) { if (p->done_skip_stat_unmatch) return p->skip_stat_unmatch_result; @@ -6040,10 +6099,10 @@ static int diff_filespec_check_stat_unmatch(struct diff_filepair *p) !DIFF_FILE_VALID(p->two) || (p->one->oid_valid && p->two->oid_valid) || (p->one->mode != p->two->mode) || - diff_populate_filespec(p->one, CHECK_SIZE_ONLY) || - diff_populate_filespec(p->two, CHECK_SIZE_ONLY) || + diff_populate_filespec(r, p->one, CHECK_SIZE_ONLY) || + diff_populate_filespec(r, p->two, CHECK_SIZE_ONLY) || (p->one->size != p->two->size) || - !diff_filespec_is_identical(p->one, p->two)) /* (2) */ + !diff_filespec_is_identical(r, p->one, p->two)) /* (2) */ p->skip_stat_unmatch_result = 1; return p->skip_stat_unmatch_result; } @@ -6058,7 +6117,7 @@ static void diffcore_skip_stat_unmatch(struct diff_options *diffopt) for (i = 0; i < q->nr; i++) { struct diff_filepair *p = q->queue[i]; - if (diff_filespec_check_stat_unmatch(p)) + if (diff_filespec_check_stat_unmatch(diffopt->repo, p)) diff_q(&outq, p); else { /* @@ -6100,7 +6159,8 @@ void diffcore_std(struct diff_options *options) if (!options->found_follow) { /* See try_to_follow_renames() in tree-diff.c */ if (options->break_opt != -1) - diffcore_break(options->break_opt); + diffcore_break(options->repo, + options->break_opt); if (options->detect_rename) diffcore_rename(options); if (options->break_opt != -1) @@ -6251,7 +6311,7 @@ void diff_change(struct diff_options *options, return; if (options->flags.quick && options->skip_stat_unmatch && - !diff_filespec_check_stat_unmatch(p)) + !diff_filespec_check_stat_unmatch(options->repo, p)) return; options->flags.has_changes = 1; @@ -6273,8 +6333,10 @@ struct diff_filepair *diff_unmerge(struct diff_options *options, const char *pat return pair; } -static char *run_textconv(const char *pgm, struct diff_filespec *spec, - size_t *outsize) +static char *run_textconv(struct repository *r, + const char *pgm, + struct diff_filespec *spec, + size_t *outsize) { struct diff_tempfile *temp; const char *argv[3]; @@ -6283,7 +6345,7 @@ static char *run_textconv(const char *pgm, struct diff_filespec *spec, struct strbuf buf = STRBUF_INIT; int err = 0; - temp = prepare_temp_file(spec->path, spec); + temp = prepare_temp_file(r, spec->path, spec); *arg++ = pgm; *arg++ = temp->name; *arg = NULL; @@ -6310,7 +6372,8 @@ static char *run_textconv(const char *pgm, struct diff_filespec *spec, return strbuf_detach(&buf, outsize); } -size_t fill_textconv(struct userdiff_driver *driver, +size_t fill_textconv(struct repository *r, + struct userdiff_driver *driver, struct diff_filespec *df, char **outbuf) { @@ -6321,7 +6384,7 @@ size_t fill_textconv(struct userdiff_driver *driver, *outbuf = ""; return 0; } - if (diff_populate_filespec(df, 0)) + if (diff_populate_filespec(r, df, 0)) die("unable to read files to diff"); *outbuf = df->data; return df->size; @@ -6338,7 +6401,7 @@ size_t fill_textconv(struct userdiff_driver *driver, return size; } - *outbuf = run_textconv(driver->textconv, df, &size); + *outbuf = run_textconv(r, driver->textconv, df, &size); if (!*outbuf) die("unable to read files to diff"); @@ -6358,7 +6421,8 @@ size_t fill_textconv(struct userdiff_driver *driver, return size; } -int textconv_object(const char *path, +int textconv_object(struct repository *r, + const char *path, unsigned mode, const struct object_id *oid, int oid_valid, @@ -6370,13 +6434,13 @@ int textconv_object(const char *path, df = alloc_filespec(path); fill_filespec(df, oid, oid_valid, mode); - textconv = get_textconv(df); + textconv = get_textconv(r->index, df); if (!textconv) { free_filespec(df); return 0; } - *buf_size = fill_textconv(textconv, df, buf); + *buf_size = fill_textconv(r, textconv, df, buf); free_filespec(df); return 1; } @@ -18,6 +18,7 @@ struct userdiff_driver; struct oid_array; struct commit; struct combine_diff_path; +struct repository; typedef int (*pathchange_fn_t)(struct diff_options *options, struct combine_diff_path *path); @@ -194,6 +195,11 @@ struct diff_options { FILE *file; int close_file; +#define OUTPUT_INDICATOR_NEW 0 +#define OUTPUT_INDICATOR_OLD 1 +#define OUTPUT_INDICATOR_CONTEXT 2 + char output_indicators[3]; + struct pathspec pathspec; pathchange_fn_t pathchange; change_fn_t change; @@ -220,6 +226,8 @@ struct diff_options { /* XDF_WHITESPACE_FLAGS regarding block detection are set at 2, 3, 4 */ #define COLOR_MOVED_WS_ALLOW_INDENTATION_CHANGE (1<<5) int color_moved_ws_handling; + + struct repository *repo; }; void diff_emit_submodule_del(struct diff_options *o, const char *line); @@ -333,7 +341,10 @@ int git_diff_basic_config(const char *var, const char *value, void *cb); int git_diff_heuristic_config(const char *var, const char *value, void *cb); void init_diff_ui_defaults(void); int git_diff_ui_config(const char *var, const char *value, void *cb); -void diff_setup(struct diff_options *); +#ifndef NO_THE_REPOSITORY_COMPATIBILITY_MACROS +#define diff_setup(diffopts) repo_diff_setup(the_repository, diffopts) +#endif +void repo_diff_setup(struct repository *, struct diff_options *); int diff_opt_parse(struct diff_options *, const char **, int, const char *); void diff_setup_done(struct diff_options *); int git_config_rename(const char *var, const char *value); @@ -423,7 +434,7 @@ int diff_flush_patch_id(struct diff_options *, struct object_id *, int); int diff_result_code(struct diff_options *, int); -void diff_no_index(struct rev_info *, int, const char **); +void diff_no_index(struct repository *, struct rev_info *, int, const char **); int index_differs_from(const char *def, const struct diff_flags *flags, int ita_invisible_in_index); @@ -439,7 +450,8 @@ int index_differs_from(const char *def, const struct diff_flags *flags, * struct. If it is non-NULL, then "outbuf" points to a newly allocated buffer * that should be freed by the caller. */ -size_t fill_textconv(struct userdiff_driver *driver, +size_t fill_textconv(struct repository *r, + struct userdiff_driver *driver, struct diff_filespec *df, char **outbuf); @@ -448,14 +460,19 @@ size_t fill_textconv(struct userdiff_driver *driver, * and only if it has textconv enabled (otherwise return NULL). The result * can be passed to fill_textconv(). */ -struct userdiff_driver *get_textconv(struct diff_filespec *one); +struct userdiff_driver *get_textconv(struct index_state *istate, + struct diff_filespec *one); /* * Prepare diff_filespec and convert it using diff textconv API * if the textconv driver exists. * Return 1 if the conversion succeeds, 0 otherwise. */ -int textconv_object(const char *path, unsigned mode, const struct object_id *oid, int oid_valid, char **buf, unsigned long *buf_size); +int textconv_object(struct repository *repo, + const char *path, + unsigned mode, + const struct object_id *oid, int oid_valid, + char **buf, unsigned long *buf_size); int parse_rename_score(const char **cp_p); diff --git a/diffcore-break.c b/diffcore-break.c index c64359f489..875aefd3fe 100644 --- a/diffcore-break.c +++ b/diffcore-break.c @@ -5,7 +5,8 @@ #include "diff.h" #include "diffcore.h" -static int should_break(struct diff_filespec *src, +static int should_break(struct repository *r, + struct diff_filespec *src, struct diff_filespec *dst, int break_score, int *merge_score_p) @@ -58,10 +59,11 @@ static int should_break(struct diff_filespec *src, } if (src->oid_valid && dst->oid_valid && - !oidcmp(&src->oid, &dst->oid)) + oideq(&src->oid, &dst->oid)) return 0; /* they are the same */ - if (diff_populate_filespec(src, 0) || diff_populate_filespec(dst, 0)) + if (diff_populate_filespec(r, src, 0) || + diff_populate_filespec(r, dst, 0)) return 0; /* error but caught downstream */ max_size = ((src->size > dst->size) ? src->size : dst->size); @@ -71,7 +73,7 @@ static int should_break(struct diff_filespec *src, if (!src->size) return 0; /* we do not let empty files get renamed */ - if (diffcore_count_changes(src, dst, + if (diffcore_count_changes(r, src, dst, &src->cnt_data, &dst->cnt_data, &src_copied, &literal_added)) return 0; @@ -114,7 +116,7 @@ static int should_break(struct diff_filespec *src, return 1; } -void diffcore_break(int break_score) +void diffcore_break(struct repository *r, int break_score) { struct diff_queue_struct *q = &diff_queued_diff; struct diff_queue_struct outq; @@ -178,7 +180,7 @@ void diffcore_break(int break_score) object_type(p->one->mode) == OBJ_BLOB && object_type(p->two->mode) == OBJ_BLOB && !strcmp(p->one->path, p->two->path)) { - if (should_break(p->one, p->two, + if (should_break(r, p->one, p->two, break_score, &score)) { /* Split this into delete and create */ struct diff_filespec *null_one, *null_two; diff --git a/diffcore-delta.c b/diffcore-delta.c index c83d45a047..5668ace60d 100644 --- a/diffcore-delta.c +++ b/diffcore-delta.c @@ -121,14 +121,15 @@ static int spanhash_cmp(const void *a_, const void *b_) a->hashval > b->hashval ? 1 : 0; } -static struct spanhash_top *hash_chars(struct diff_filespec *one) +static struct spanhash_top *hash_chars(struct repository *r, + struct diff_filespec *one) { int i, n; unsigned int accum1, accum2, hashval; struct spanhash_top *hash; unsigned char *buf = one->data; unsigned int sz = one->size; - int is_text = !diff_filespec_is_binary(one); + int is_text = !diff_filespec_is_binary(r, one); i = INITIAL_HASH_SIZE; hash = xmalloc(st_add(sizeof(*hash), @@ -162,7 +163,8 @@ static struct spanhash_top *hash_chars(struct diff_filespec *one) return hash; } -int diffcore_count_changes(struct diff_filespec *src, +int diffcore_count_changes(struct repository *r, + struct diff_filespec *src, struct diff_filespec *dst, void **src_count_p, void **dst_count_p, @@ -177,14 +179,14 @@ int diffcore_count_changes(struct diff_filespec *src, if (src_count_p) src_count = *src_count_p; if (!src_count) { - src_count = hash_chars(src); + src_count = hash_chars(r, src); if (src_count_p) *src_count_p = src_count; } if (dst_count_p) dst_count = *dst_count_p; if (!dst_count) { - dst_count = hash_chars(dst); + dst_count = hash_chars(r, dst); if (dst_count_p) *dst_count_p = dst_count; } diff --git a/diffcore-pickaxe.c b/diffcore-pickaxe.c index 800a899c86..69fc55ea1e 100644 --- a/diffcore-pickaxe.c +++ b/diffcore-pickaxe.c @@ -62,7 +62,8 @@ static int diff_grep(mmfile_t *one, mmfile_t *two, ecbdata.hit = 0; xecfg.ctxlen = o->context; xecfg.interhunkctxlen = o->interhunkcontext; - if (xdi_diff_outf(one, two, diffgrep_consume, &ecbdata, &xpp, &xecfg)) + if (xdi_diff_outf(one, two, discard_hunk_line, diffgrep_consume, + &ecbdata, &xpp, &xecfg)) return 0; return ecbdata.hit; } @@ -139,8 +140,8 @@ static int pickaxe_match(struct diff_filepair *p, struct diff_options *o, return 0; if (o->flags.allow_textconv) { - textconv_one = get_textconv(p->one); - textconv_two = get_textconv(p->two); + textconv_one = get_textconv(o->repo->index, p->one); + textconv_two = get_textconv(o->repo->index, p->two); } /* @@ -153,8 +154,8 @@ static int pickaxe_match(struct diff_filepair *p, struct diff_options *o, if (textconv_one == textconv_two && diff_unmodified_pair(p)) return 0; - mf1.size = fill_textconv(textconv_one, p->one, &mf1.ptr); - mf2.size = fill_textconv(textconv_two, p->two, &mf2.ptr); + mf1.size = fill_textconv(o->repo, textconv_one, p->one, &mf1.ptr); + mf2.size = fill_textconv(o->repo, textconv_two, p->two, &mf2.ptr); ret = fn(DIFF_FILE_VALID(p->one) ? &mf1 : NULL, DIFF_FILE_VALID(p->two) ? &mf2 : NULL, diff --git a/diffcore-rename.c b/diffcore-rename.c index d775183c2f..07bd34b631 100644 --- a/diffcore-rename.c +++ b/diffcore-rename.c @@ -128,7 +128,8 @@ struct diff_score { short name_score; }; -static int estimate_similarity(struct diff_filespec *src, +static int estimate_similarity(struct repository *r, + struct diff_filespec *src, struct diff_filespec *dst, int minimum_score) { @@ -165,10 +166,10 @@ static int estimate_similarity(struct diff_filespec *src, * say whether the size is valid or not!) */ if (!src->cnt_data && - diff_populate_filespec(src, CHECK_SIZE_ONLY)) + diff_populate_filespec(r, src, CHECK_SIZE_ONLY)) return 0; if (!dst->cnt_data && - diff_populate_filespec(dst, CHECK_SIZE_ONLY)) + diff_populate_filespec(r, dst, CHECK_SIZE_ONLY)) return 0; max_size = ((src->size > dst->size) ? src->size : dst->size); @@ -186,12 +187,12 @@ static int estimate_similarity(struct diff_filespec *src, if (max_size * (MAX_SCORE-minimum_score) < delta_size * MAX_SCORE) return 0; - if (!src->cnt_data && diff_populate_filespec(src, 0)) + if (!src->cnt_data && diff_populate_filespec(r, src, 0)) return 0; - if (!dst->cnt_data && diff_populate_filespec(dst, 0)) + if (!dst->cnt_data && diff_populate_filespec(r, dst, 0)) return 0; - if (diffcore_count_changes(src, dst, + if (diffcore_count_changes(r, src, dst, &src->cnt_data, &dst->cnt_data, &src_copied, &literal_added)) return 0; @@ -256,10 +257,11 @@ struct file_similarity { struct diff_filespec *filespec; }; -static unsigned int hash_filespec(struct diff_filespec *filespec) +static unsigned int hash_filespec(struct repository *r, + struct diff_filespec *filespec) { if (!filespec->oid_valid) { - if (diff_populate_filespec(filespec, 0)) + if (diff_populate_filespec(r, filespec, 0)) return 0; hash_object_file(filespec->data, filespec->size, "blob", &filespec->oid); @@ -280,13 +282,15 @@ static int find_identical_files(struct hashmap *srcs, /* * Find the best source match for specified destination. */ - p = hashmap_get_from_hash(srcs, hash_filespec(target), NULL); + p = hashmap_get_from_hash(srcs, + hash_filespec(options->repo, target), + NULL); for (; p; p = hashmap_get_next(srcs, p)) { int score; struct diff_filespec *source = p->filespec; /* False hash collision? */ - if (oidcmp(&source->oid, &target->oid)) + if (!oideq(&source->oid, &target->oid)) continue; /* Non-regular files? If so, the modes must match! */ if (!S_ISREG(source->mode) || !S_ISREG(target->mode)) { @@ -316,14 +320,16 @@ static int find_identical_files(struct hashmap *srcs, return renames; } -static void insert_file_table(struct hashmap *table, int index, struct diff_filespec *filespec) +static void insert_file_table(struct repository *r, + struct hashmap *table, int index, + struct diff_filespec *filespec) { struct file_similarity *entry = xmalloc(sizeof(*entry)); entry->index = index; entry->filespec = filespec; - hashmap_entry_init(entry, hash_filespec(filespec)); + hashmap_entry_init(entry, hash_filespec(r, filespec)); hashmap_add(table, entry); } @@ -344,7 +350,9 @@ static int find_exact_renames(struct diff_options *options) */ hashmap_init(&file_table, NULL, NULL, rename_src_nr); for (i = rename_src_nr-1; i >= 0; i--) - insert_file_table(&file_table, i, rename_src[i].p->one); + insert_file_table(options->repo, + &file_table, i, + rename_src[i].p->one); /* Walk the destinations and find best source match */ for (i = 0; i < rename_dst_nr; i++) @@ -557,7 +565,8 @@ void diffcore_rename(struct diff_options *options) diff_unmodified_pair(rename_src[j].p)) continue; - this_src.score = estimate_similarity(one, two, + this_src.score = estimate_similarity(options->repo, + one, two, minimum_score); this_src.name_score = basename_same(one, two); this_src.dst = i; diff --git a/diffcore.h b/diffcore.h index 8d81a45f51..b651061c0e 100644 --- a/diffcore.h +++ b/diffcore.h @@ -7,6 +7,8 @@ #include "cache.h" struct diff_options; +struct repository; +struct userdiff_driver; /* This header file is internal between diff.c and its diff transformers * (e.g. diffcore-rename, diffcore-pickaxe). Never include this header @@ -26,8 +28,6 @@ struct diff_options; #define MINIMUM_BREAK_SIZE 400 /* do not break a file smaller than this */ -struct userdiff_driver; - struct diff_filespec { struct object_id oid; char *path; @@ -61,10 +61,10 @@ void fill_filespec(struct diff_filespec *, const struct object_id *, #define CHECK_SIZE_ONLY 1 #define CHECK_BINARY 2 -int diff_populate_filespec(struct diff_filespec *, unsigned int); +int diff_populate_filespec(struct repository *, struct diff_filespec *, unsigned int); void diff_free_filespec_data(struct diff_filespec *); void diff_free_filespec_blob(struct diff_filespec *); -int diff_filespec_is_binary(struct diff_filespec *); +int diff_filespec_is_binary(struct repository *, struct diff_filespec *); struct diff_filepair { struct diff_filespec *one; @@ -111,7 +111,7 @@ struct diff_filepair *diff_queue(struct diff_queue_struct *, struct diff_filespec *); void diff_q(struct diff_queue_struct *, struct diff_filepair *); -void diffcore_break(int); +void diffcore_break(struct repository *, int); void diffcore_rename(struct diff_options *); void diffcore_merge_broken(void); void diffcore_pickaxe(struct diff_options *); @@ -142,7 +142,8 @@ void diff_debug_queue(const char *, struct diff_queue_struct *); #define diff_debug_queue(a,b) do { /* nothing */ } while (0) #endif -int diffcore_count_changes(struct diff_filespec *src, +int diffcore_count_changes(struct repository *r, + struct diff_filespec *src, struct diff_filespec *dst, void **src_count_p, void **dst_count_p, @@ -281,8 +281,15 @@ static int match_attrs(const struct index_state *istate, const struct pathspec_item *item) { int i; + char *to_free = NULL; + + if (name[namelen]) + name = to_free = xmemdupz(name, namelen); git_check_attr(istate, name, item->attr_check); + + free(to_free); + for (i = 0; i < item->attr_match_nr; i++) { const char *value; int matched; @@ -1282,7 +1289,7 @@ static void prep_exclude(struct dir_struct *dir, * order, though, if you do that. */ if (untracked && - oidcmp(&oid_stat.oid, &untracked->exclude_oid)) { + !oideq(&oid_stat.oid, &untracked->exclude_oid)) { invalidate_gitignore(dir->untracked, untracked); oidcpy(&untracked->exclude_oid, &oid_stat.oid); } @@ -2248,12 +2255,12 @@ static struct untracked_cache_dir *validate_untracked_cache(struct dir_struct *d /* Validate $GIT_DIR/info/exclude and core.excludesfile */ root = dir->untracked->root; - if (oidcmp(&dir->ss_info_exclude.oid, + if (!oideq(&dir->ss_info_exclude.oid, &dir->untracked->ss_info_exclude.oid)) { invalidate_gitignore(dir->untracked, root); dir->untracked->ss_info_exclude = dir->ss_info_exclude; } - if (oidcmp(&dir->ss_excludes_file.oid, + if (!oideq(&dir->ss_excludes_file.oid, &dir->untracked->ss_excludes_file.oid)) { invalidate_gitignore(dir->untracked, root); dir->untracked->ss_excludes_file = dir->ss_excludes_file; @@ -2268,10 +2275,13 @@ int read_directory(struct dir_struct *dir, struct index_state *istate, const char *path, int len, const struct pathspec *pathspec) { struct untracked_cache_dir *untracked; - uint64_t start = getnanotime(); - if (has_symlink_leading_path(path, len)) + trace_performance_enter(); + + if (has_symlink_leading_path(path, len)) { + trace_performance_leave("read directory %.*s", len, path); return dir->nr; + } untracked = validate_untracked_cache(dir, len, pathspec); if (!untracked) @@ -2307,7 +2317,7 @@ int read_directory(struct dir_struct *dir, struct index_state *istate, dir->nr = i; } - trace_performance_since(start, "read directory %.*s", len, path); + trace_performance_leave("read directory %.*s", len, path); if (dir->untracked) { static int force_untracked_cache = -1; static struct trace_key trace_untracked_stats = TRACE_KEY_INIT(UNTRACKED_STATS); @@ -1,4 +1,5 @@ #include "cache.h" +#include "config.h" #include "strbuf.h" #include "run-command.h" #include "sigchain.h" @@ -34,10 +35,21 @@ const char *git_editor(void) return editor; } -int launch_editor(const char *path, struct strbuf *buffer, const char *const *env) +const char *git_sequence_editor(void) { - const char *editor = git_editor(); + const char *editor = getenv("GIT_SEQUENCE_EDITOR"); + + if (!editor) + git_config_get_string_const("sequence.editor", &editor); + if (!editor) + editor = git_editor(); + return editor; +} + +static int launch_specified_editor(const char *editor, const char *path, + struct strbuf *buffer, const char *const *env) +{ if (!editor) return error("Terminal is dumb, but EDITOR unset"); @@ -95,3 +107,14 @@ int launch_editor(const char *path, struct strbuf *buffer, const char *const *en return error_errno("could not read file '%s'", path); return 0; } + +int launch_editor(const char *path, struct strbuf *buffer, const char *const *env) +{ + return launch_specified_editor(git_editor(), path, buffer, env); +} + +int launch_sequence_editor(const char *path, struct strbuf *buffer, + const char *const *env) +{ + return launch_specified_editor(git_sequence_editor(), path, buffer, env); +} @@ -399,6 +399,34 @@ static int check_path(const char *path, int len, struct stat *st, int skiplen) return lstat(path, st); } +static void mark_colliding_entries(const struct checkout *state, + struct cache_entry *ce, struct stat *st) +{ + int i, trust_ino = check_stat; + +#if defined(GIT_WINDOWS_NATIVE) || defined(__CYGWIN__) + trust_ino = 0; +#endif + + ce->ce_flags |= CE_MATCHED; + + for (i = 0; i < state->istate->cache_nr; i++) { + struct cache_entry *dup = state->istate->cache[i]; + + if (dup == ce) + break; + + if (dup->ce_flags & (CE_MATCHED | CE_VALID | CE_SKIP_WORKTREE)) + continue; + + if ((trust_ino && !match_stat_data(&dup->ce_stat_data, st)) || + (!trust_ino && !fspathcmp(ce->name, dup->name))) { + dup->ce_flags |= CE_MATCHED; + break; + } + } +} + /* * Write the contents from ce out to the working tree. * @@ -456,6 +484,9 @@ int checkout_entry(struct cache_entry *ce, return -1; } + if (state->clone) + mark_colliding_entries(state, ce, &st); + /* * We unlink the old file, to get the new one with the * right permissions (including umask, which is nasty diff --git a/environment.c b/environment.c index 3f3c8746c2..3465597707 100644 --- a/environment.c +++ b/environment.c @@ -33,6 +33,7 @@ int ref_paranoia = -1; int repository_format_precious_objects; char *repository_format_partial_clone; const char *core_partial_clone_filter_default; +int repository_format_worktree_config; const char *git_commit_encoding; const char *git_log_output_encoding; const char *apply_default_whitespace; @@ -71,7 +72,6 @@ int core_apply_sparse_checkout; int merge_log_config = -1; int precomposed_unicode = -1; /* see probe_utf8_pathname_composition() */ unsigned long pack_size_limit_cfg; -enum hide_dotfiles_type hide_dotfiles = HIDE_DOTFILES_DOTGITONLY; enum log_refs_config log_all_ref_updates = LOG_REFS_UNSET; #ifndef PROTECT_HFS_DEFAULT diff --git a/ewah/ewok_rlw.h b/ewah/ewok_rlw.h index 7cdfdd0c02..bafa24f4c3 100644 --- a/ewah/ewok_rlw.h +++ b/ewah/ewok_rlw.h @@ -19,6 +19,8 @@ #ifndef __EWOK_RLW_H__ #define __EWOK_RLW_H__ +#include "ewok.h" + #define RLW_RUNNING_BITS (sizeof(eword_t) * 4) #define RLW_LITERAL_BITS (sizeof(eword_t) * 8 - 1 - RLW_RUNNING_BITS) @@ -29,7 +31,7 @@ #define RLW_RUNNING_LEN_PLUS_BIT (((eword_t)1 << (RLW_RUNNING_BITS + 1)) - 1) -static int rlw_get_run_bit(const eword_t *word) +static inline int rlw_get_run_bit(const eword_t *word) { return *word & (eword_t)1; } diff --git a/fast-import.c b/fast-import.c index 89bb0c9db3..69886687ce 100644 --- a/fast-import.c +++ b/fast-import.c @@ -171,6 +171,7 @@ Format of STDIN stream: #include "packfile.h" #include "object-store.h" #include "mem-pool.h" +#include "commit-reach.h" #define PACK_ID_BITS 16 #define MAX_PACK_ID ((1<<PACK_ID_BITS)-1) @@ -572,7 +573,7 @@ static struct object_entry *find_object(struct object_id *oid) unsigned int h = oid->hash[0] << 8 | oid->hash[1]; struct object_entry *e; for (e = object_table[h]; e; e = e->next) - if (!oidcmp(oid, &e->idx.oid)) + if (oideq(oid, &e->idx.oid)) return e; return NULL; } @@ -583,7 +584,7 @@ static struct object_entry *insert_object(struct object_id *oid) struct object_entry *e = object_table[h]; while (e) { - if (!oidcmp(oid, &e->idx.oid)) + if (oideq(oid, &e->idx.oid)) return e; e = e->next; } @@ -1068,7 +1069,7 @@ static int store_object( duplicate_count_by_type[type]++; return 1; } else if (find_sha1_pack(oid.hash, - get_packed_git(the_repository))) { + get_all_packs(the_repository))) { e->type = type; e->pack_id = MAX_PACK_ID; e->idx.offset = 1; /* just not zero! */ @@ -1266,7 +1267,7 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark) truncate_pack(&checkpoint); } else if (find_sha1_pack(oid.hash, - get_packed_git(the_repository))) { + get_all_packs(the_repository))) { e->type = OBJ_BLOB; e->pack_id = MAX_PACK_ID; e->idx.offset = 1; /* just not zero! */ @@ -1533,7 +1534,7 @@ static int tree_content_set( if (!*slash1) { if (!S_ISDIR(mode) && e->versions[1].mode == mode - && !oidcmp(&e->versions[1].oid, oid)) + && oideq(&e->versions[1].oid, oid)) return 0; e->versions[1].mode = mode; oidcpy(&e->versions[1].oid, oid); @@ -2649,7 +2650,7 @@ static int parse_from(struct branch *b) struct object_entry *oe = find_mark(idnum); if (oe->type != OBJ_COMMIT) die("Mark :%" PRIuMAX " not a commit", idnum); - if (oidcmp(&b->oid, &oe->idx.oid)) { + if (!oideq(&b->oid, &oe->idx.oid)) { oidcpy(&b->oid, &oe->idx.oid); if (oe->pack_id != MAX_PACK_ID) { unsigned long size; @@ -2667,7 +2668,7 @@ static int parse_from(struct branch *b) else die("Invalid ref name or SHA1 expression: %s", from); - if (b->branch_tree.tree && oidcmp(&oid, &b->branch_tree.versions[1].oid)) { + if (b->branch_tree.tree && !oideq(&oid, &b->branch_tree.versions[1].oid)) { release_tree_content_recursive(b->branch_tree.tree); b->branch_tree.tree = NULL; } @@ -2954,8 +2955,8 @@ static void cat_blob(struct object_entry *oe, struct object_id *oid) die("Object %s is a %s but a blob was expected.", oid_to_hex(oid), type_name(type)); strbuf_reset(&line); - strbuf_addf(&line, "%s %s %lu\n", oid_to_hex(oid), - type_name(type), size); + strbuf_addf(&line, "%s %s %"PRIuMAX"\n", oid_to_hex(oid), + type_name(type), (uintmax_t)size); cat_blob_write(line.buf, line.len); strbuf_release(&line); cat_blob_write(buf, size); diff --git a/fetch-negotiator.h b/fetch-negotiator.h index ddb44a22dc..9e3967ce66 100644 --- a/fetch-negotiator.h +++ b/fetch-negotiator.h @@ -1,5 +1,5 @@ -#ifndef FETCH_NEGOTIATOR -#define FETCH_NEGOTIATOR +#ifndef FETCH_NEGOTIATOR_H +#define FETCH_NEGOTIATOR_H struct commit; diff --git a/fetch-object.h b/fetch-object.h index d2f996d4e8..d6444caa5a 100644 --- a/fetch-object.h +++ b/fetch-object.h @@ -1,6 +1,8 @@ #ifndef FETCH_OBJECT_H #define FETCH_OBJECT_H +struct object_id; + void fetch_objects(const char *remote_name, const struct object_id *oids, int oid_nr); diff --git a/fetch-pack.c b/fetch-pack.c index 79007f996c..9691046e64 100644 --- a/fetch-pack.c +++ b/fetch-pack.c @@ -76,8 +76,7 @@ struct alternate_object_cache { size_t nr, alloc; }; -static void cache_one_alternate(const char *refname, - const struct object_id *oid, +static void cache_one_alternate(const struct object_id *oid, void *vcache) { struct alternate_object_cache *cache = vcache; @@ -532,21 +531,14 @@ static void add_refs_to_oidset(struct oidset *oids, struct ref *refs) oidset_insert(oids, &refs->old_oid); } -static int tip_oids_contain(struct oidset *tip_oids, - struct ref *unmatched, struct ref *newlist, - const struct object_id *id) +static int is_unmatched_ref(const struct ref *ref) { - /* - * Note that this only looks at the ref lists the first time it's - * called. This works out in filter_refs() because even though it may - * add to "newlist" between calls, the additions will always be for - * oids that are already in the set. - */ - if (!tip_oids->map.map.tablesize) { - add_refs_to_oidset(tip_oids, unmatched); - add_refs_to_oidset(tip_oids, newlist); - } - return oidset_contains(tip_oids, id); + struct object_id oid; + const char *p; + return ref->match_status == REF_NOT_MATCHED && + !parse_oid_hex(ref->name, &oid, &p) && + *p == '\0' && + oideq(&oid, &ref->old_oid); } static void filter_refs(struct fetch_pack_args *args, @@ -559,6 +551,8 @@ static void filter_refs(struct fetch_pack_args *args, struct ref *ref, *next; struct oidset tip_oids = OIDSET_INIT; int i; + int strict = !(allow_unadvertised_object_request & + (ALLOW_TIP_SHA1 | ALLOW_REACHABLE_SHA1)); i = 0; for (ref = *refs; ref; ref = next) { @@ -595,23 +589,25 @@ static void filter_refs(struct fetch_pack_args *args, } } + if (strict) { + for (i = 0; i < nr_sought; i++) { + ref = sought[i]; + if (!is_unmatched_ref(ref)) + continue; + + add_refs_to_oidset(&tip_oids, unmatched); + add_refs_to_oidset(&tip_oids, newlist); + break; + } + } + /* Append unmatched requests to the list */ for (i = 0; i < nr_sought; i++) { - struct object_id oid; - const char *p; - ref = sought[i]; - if (ref->match_status != REF_NOT_MATCHED) - continue; - if (parse_oid_hex(ref->name, &oid, &p) || - *p != '\0' || - oidcmp(&oid, &ref->old_oid)) + if (!is_unmatched_ref(ref)) continue; - if ((allow_unadvertised_object_request & - (ALLOW_TIP_SHA1 | ALLOW_REACHABLE_SHA1)) || - tip_oids_contain(&tip_oids, unmatched, newlist, - &ref->old_oid)) { + if (!strict || oidset_contains(&tip_oids, &ref->old_oid)) { ref->match_status = REF_MATCHED; *newtail = copy_ref(ref); newtail = &(*newtail)->next; @@ -1252,6 +1248,18 @@ static int process_acks(struct fetch_negotiator *negotiator, reader->status != PACKET_READ_DELIM) die(_("error processing acks: %d"), reader->status); + /* + * If an "acknowledgments" section is sent, a packfile is sent if and + * only if "ready" was sent in this section. The other sections + * ("shallow-info" and "wanted-refs") are sent only if a packfile is + * sent. Therefore, a DELIM is expected if "ready" is sent, and a FLUSH + * otherwise. + */ + if (received_ready && reader->status != PACKET_READ_DELIM) + die(_("expected packfile to be sent after 'ready'")); + if (!received_ready && reader->status != PACKET_READ_FLUSH) + die(_("expected no other sections to be sent after no 'ready'")); + /* return 0 if no common, 1 if there are common, or 2 if ready */ return received_ready ? 2 : (received_ack ? 1 : 0); } @@ -1629,7 +1637,7 @@ struct ref *fetch_pack(struct fetch_pack_args *args, parse_list_objects_filter(&args->filter_options, "blob:none"); } - if (!ref) { + if (version != protocol_v2 && !ref) { packet_flush(fd[1]); die(_("no matching remote head")); } @@ -10,7 +10,6 @@ #include "fsck.h" #include "refs.h" #include "utf8.h" -#include "sha1-array.h" #include "decorate.h" #include "oidset.h" #include "packfile.h" @@ -184,40 +183,37 @@ static int fsck_msg_type(enum fsck_msg_id msg_id, static void init_skiplist(struct fsck_options *options, const char *path) { - static struct oid_array skiplist = OID_ARRAY_INIT; - int sorted, fd; - char buffer[GIT_MAX_HEXSZ + 1]; + FILE *fp; + struct strbuf sb = STRBUF_INIT; struct object_id oid; - if (options->skiplist) - sorted = options->skiplist->sorted; - else { - sorted = 1; - options->skiplist = &skiplist; - } - - fd = open(path, O_RDONLY); - if (fd < 0) + fp = fopen(path, "r"); + if (!fp) die("Could not open skip list: %s", path); - for (;;) { + while (!strbuf_getline(&sb, fp)) { const char *p; - int result = read_in_full(fd, buffer, sizeof(buffer)); - if (result < 0) - die_errno("Could not read '%s'", path); - if (!result) - break; - if (parse_oid_hex(buffer, &oid, &p) || *p != '\n') - die("Invalid SHA-1: %s", buffer); - oid_array_append(&skiplist, &oid); - if (sorted && skiplist.nr > 1 && - oidcmp(&skiplist.oid[skiplist.nr - 2], - &oid) > 0) - sorted = 0; - } - close(fd); + const char *hash; - if (sorted) - skiplist.sorted = 1; + /* + * Allow trailing comments, leading whitespace + * (including before commits), and empty or whitespace + * only lines. + */ + hash = strchr(sb.buf, '#'); + if (hash) + strbuf_setlen(&sb, hash - sb.buf); + strbuf_trim(&sb); + if (!sb.len) + continue; + + if (parse_oid_hex(sb.buf, &oid, &p) || *p != '\0') + die("Invalid SHA-1: %s", sb.buf); + oidset_insert(&options->skiplist, &oid); + } + if (ferror(fp)) + die_errno("Could not read '%s'", path); + fclose(fp); + strbuf_release(&sb); } static int parse_msg_type(const char *str) @@ -322,9 +318,7 @@ static void append_msg_id(struct strbuf *sb, const char *msg_id) static int object_on_skiplist(struct fsck_options *opts, struct object *obj) { - if (opts && opts->skiplist && obj) - return oid_array_lookup(opts->skiplist, &obj->oid) >= 0; - return 0; + return opts && obj && oidset_contains(&opts->skiplist, &obj->oid); } __attribute__((format (printf, 4, 5))) @@ -1,6 +1,8 @@ #ifndef GIT_FSCK_H #define GIT_FSCK_H +#include "oidset.h" + #define FSCK_ERROR 1 #define FSCK_WARN 2 #define FSCK_IGNORE 3 @@ -35,12 +37,12 @@ struct fsck_options { fsck_error error_func; unsigned strict:1; int *msg_type; - struct oid_array *skiplist; + struct oidset skiplist; struct decoration *object_names; }; -#define FSCK_OPTIONS_DEFAULT { NULL, fsck_error_function, 0, NULL } -#define FSCK_OPTIONS_STRICT { NULL, fsck_error_function, 1, NULL } +#define FSCK_OPTIONS_DEFAULT { NULL, fsck_error_function, 0, NULL, OIDSET_INIT } +#define FSCK_OPTIONS_STRICT { NULL, fsck_error_function, 1, NULL, OIDSET_INIT } /* descend in all linked child objects * the return value is: diff --git a/fuzz-pack-headers.c b/fuzz-pack-headers.c new file mode 100644 index 0000000000..99da1d0fd3 --- /dev/null +++ b/fuzz-pack-headers.c @@ -0,0 +1,14 @@ +#include "packfile.h" + +int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size); + +int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) +{ + enum object_type type; + unsigned long len; + + unpack_object_header_buffer((const unsigned char *)data, + (unsigned long)size, &type, &len); + + return 0; +} diff --git a/fuzz-pack-idx.c b/fuzz-pack-idx.c new file mode 100644 index 0000000000..0c3d777aac --- /dev/null +++ b/fuzz-pack-idx.c @@ -0,0 +1,13 @@ +#include "object-store.h" +#include "packfile.h" + +int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size); + +int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) +{ + struct packed_git p; + + load_idx("fuzz-input", GIT_SHA1_RAWSZ, (void *)data, size, &p); + + return 0; +} diff --git a/generate-cmdlist.sh b/generate-cmdlist.sh index fa1e5475e8..709d67405b 100755 --- a/generate-cmdlist.sh +++ b/generate-cmdlist.sh @@ -80,7 +80,7 @@ print_config_list () { cat <<EOF static const char *config_name_list[] = { EOF - grep -h '^[a-zA-Z].*\..*::$' Documentation/*config.txt | + grep -h '^[a-zA-Z].*\..*::$' Documentation/*config.txt Documentation/config/*.txt | sed '/deprecated/d; s/::$//; s/, */\n/g' | sort | while read line @@ -7,6 +7,7 @@ #include "gettext.h" #include "strbuf.h" #include "utf8.h" +#include "config.h" #ifndef NO_GETTEXT # include <locale.h> @@ -46,15 +47,15 @@ const char *get_preferred_languages(void) return NULL; } -#ifdef GETTEXT_POISON int use_gettext_poison(void) { static int poison_requested = -1; - if (poison_requested == -1) - poison_requested = getenv("GIT_GETTEXT_POISON") ? 1 : 0; + if (poison_requested == -1) { + const char *v = getenv("GIT_TEST_GETTEXT_POISON"); + poison_requested = v && strlen(v) ? 1 : 0; + } return poison_requested; } -#endif #ifndef NO_GETTEXT static int test_vsnprintf(const char *fmt, ...) @@ -164,6 +165,8 @@ void git_setup_gettext(void) if (!podir) podir = p = system_path(GIT_LOCALE_PATH); + use_gettext_poison(); /* getenv() reentrancy paranoia */ + if (!is_directory(podir)) { free(p); return; @@ -28,12 +28,15 @@ #define FORMAT_PRESERVING(n) __attribute__((format_arg(n))) +extern int use_gettext_poison(void); + #ifndef NO_GETTEXT extern void git_setup_gettext(void); extern int gettext_width(const char *s); #else static inline void git_setup_gettext(void) { + use_gettext_poison(); /* getenv() reentrancy paranoia */ } static inline int gettext_width(const char *s) { @@ -41,12 +44,6 @@ static inline int gettext_width(const char *s) } #endif -#ifdef GETTEXT_POISON -extern int use_gettext_poison(void); -#else -#define use_gettext_poison() 0 -#endif - static inline FORMAT_PRESERVING(1) const char *_(const char *msgid) { if (!*msgid) diff --git a/git-compat-util.h b/git-compat-util.h index 5f2e90932f..09b0102cae 100644 --- a/git-compat-util.h +++ b/git-compat-util.h @@ -146,8 +146,8 @@ #define _SGI_SOURCE 1 #if defined(WIN32) && !defined(__CYGWIN__) /* Both MinGW and MSVC */ -# if defined (_MSC_VER) && !defined(_WIN32_WINNT) -# define _WIN32_WINNT 0x0502 +# if !defined(_WIN32_WINNT) +# define _WIN32_WINNT 0x0600 # endif #define WIN32_LEAN_AND_MEAN /* stops windows.h including winsock.h */ #include <winsock2.h> @@ -180,9 +180,12 @@ #include <regex.h> #include <utime.h> #include <syslog.h> -#ifndef NO_SYS_POLL_H +#if !defined(NO_POLL_H) +#include <poll.h> +#elif !defined(NO_SYS_POLL_H) #include <sys/poll.h> #else +/* Pull the compat stuff */ #include <poll.h> #endif #ifdef HAVE_BSD_SYSCTL @@ -342,6 +345,14 @@ typedef uintmax_t timestamp_t; #define _PATH_DEFPATH "/usr/local/bin:/usr/bin:/bin" #endif +#ifndef platform_core_config +static inline int noop_core_config(const char *var, const char *value, void *cb) +{ + return 0; +} +#define platform_core_config noop_core_config +#endif + #ifndef has_dos_drive_prefix static inline int git_has_dos_drive_prefix(const char *path) { @@ -382,6 +393,10 @@ static inline char *git_find_last_dir_sep(const char *path) #define find_last_dir_sep git_find_last_dir_sep #endif +#ifndef query_user_email +#define query_user_email() NULL +#endif + #if defined(__HP_cc) && (__HP_cc >= 61000) #define NORETURN __attribute__((noreturn)) #define NORETURN_PTR @@ -408,6 +423,8 @@ static inline char *git_find_last_dir_sep(const char *path) #define LAST_ARG_MUST_BE_NULL #endif +#define MAYBE_UNUSED __attribute__((__unused__)) + #include "compat/bswap.h" #include "wildmatch.h" @@ -844,6 +861,7 @@ extern FILE *fopen_or_warn(const char *path, const char *mode); #define FREE_AND_NULL(p) do { free(p); (p) = NULL; } while (0) #define ALLOC_ARRAY(x, alloc) (x) = xmalloc(st_mult(sizeof(*(x)), (alloc))) +#define CALLOC_ARRAY(x, alloc) (x) = xcalloc((alloc), sizeof(*(x))); #define REALLOC_ARRAY(x, alloc) (x) = xrealloc((x), st_mult(sizeof(*(x)), (alloc))) #define COPY_ARRAY(dst, src, n) copy_array((dst), (src), (n), sizeof(*(dst)) + \ diff --git a/git-rebase.sh b/git-legacy-rebase.sh index 7973447645..b97ffdc9dd 100755 --- a/git-rebase.sh +++ b/git-legacy-rebase.sh @@ -57,12 +57,7 @@ cd_to_toplevel LF=' ' ok_to_skip_pre_rebase= -resolvemsg=" -$(gettext 'Resolve all conflicts manually, mark them as resolved with -"git add/rm <conflicted_files>", then run "git rebase --continue". -You can instead skip this commit: run "git rebase --skip". -To abort and get back to the state before "git rebase", run "git rebase --abort".') -" + squash_onto= unset onto unset restrict_revision @@ -102,6 +97,7 @@ case "$(git config --bool commit.gpgsign)" in true) gpg_sign_opt=-S ;; *) gpg_sign_opt= ;; esac +. git-rebase--common read_basic_state () { test -f "$state_dir/head-name" && @@ -132,67 +128,6 @@ read_basic_state () { } } -write_basic_state () { - echo "$head_name" > "$state_dir"/head-name && - echo "$onto" > "$state_dir"/onto && - echo "$orig_head" > "$state_dir"/orig-head && - echo "$GIT_QUIET" > "$state_dir"/quiet && - test t = "$verbose" && : > "$state_dir"/verbose - test -n "$strategy" && echo "$strategy" > "$state_dir"/strategy - test -n "$strategy_opts" && echo "$strategy_opts" > \ - "$state_dir"/strategy_opts - test -n "$allow_rerere_autoupdate" && echo "$allow_rerere_autoupdate" > \ - "$state_dir"/allow_rerere_autoupdate - test -n "$gpg_sign_opt" && echo "$gpg_sign_opt" > "$state_dir"/gpg_sign_opt - test -n "$signoff" && echo "$signoff" >"$state_dir"/signoff -} - -output () { - case "$verbose" in - '') - output=$("$@" 2>&1 ) - status=$? - test $status != 0 && printf "%s\n" "$output" - return $status - ;; - *) - "$@" - ;; - esac -} - -move_to_original_branch () { - case "$head_name" in - refs/*) - message="rebase finished: $head_name onto $onto" - git update-ref -m "$message" \ - $head_name $(git rev-parse HEAD) $orig_head && - git symbolic-ref \ - -m "rebase finished: returning to $head_name" \ - HEAD $head_name || - die "$(eval_gettext "Could not move back to \$head_name")" - ;; - esac -} - -apply_autostash () { - if test -f "$state_dir/autostash" - then - stash_sha1=$(cat "$state_dir/autostash") - if git stash apply $stash_sha1 >/dev/null 2>&1 - then - echo "$(gettext 'Applied autostash.')" >&2 - else - git stash store -m "autostash" -q $stash_sha1 || - die "$(eval_gettext "Cannot store \$stash_sha1")" - gettext 'Applying autostash resulted in conflicts. -Your changes are safe in the stash. -You can run "git stash pop" or "git stash drop" at any time. -' >&2 - fi - fi -} - finish_rebase () { rm -f "$(git rev-parse --git-path REBASE_HEAD)" apply_autostash && @@ -200,26 +135,63 @@ finish_rebase () { rm -rf "$state_dir" } +run_interactive () { + GIT_CHERRY_PICK_HELP="$resolvemsg" + export GIT_CHERRY_PICK_HELP + + test -n "$keep_empty" && keep_empty="--keep-empty" + test -n "$rebase_merges" && rebase_merges="--rebase-merges" + test -n "$rebase_cousins" && rebase_cousins="--rebase-cousins" + test -n "$autosquash" && autosquash="--autosquash" + test -n "$verbose" && verbose="--verbose" + test -n "$force_rebase" && force_rebase="--no-ff" + test -n "$restrict_revision" && \ + restrict_revision="--restrict-revision=^$restrict_revision" + test -n "$upstream" && upstream="--upstream=$upstream" + test -n "$onto" && onto="--onto=$onto" + test -n "$squash_onto" && squash_onto="--squash-onto=$squash_onto" + test -n "$onto_name" && onto_name="--onto-name=$onto_name" + test -n "$head_name" && head_name="--head-name=$head_name" + test -n "$strategy" && strategy="--strategy=$strategy" + test -n "$strategy_opts" && strategy_opts="--strategy-opts=$strategy_opts" + test -n "$switch_to" && switch_to="--switch-to=$switch_to" + test -n "$cmd" && cmd="--cmd=$cmd" + test -n "$action" && action="--$action" + + exec git rebase--interactive "$action" "$keep_empty" "$rebase_merges" "$rebase_cousins" \ + "$upstream" "$onto" "$squash_onto" "$restrict_revision" \ + "$allow_empty_message" "$autosquash" "$verbose" \ + "$force_rebase" "$onto_name" "$head_name" "$strategy" \ + "$strategy_opts" "$cmd" "$switch_to" \ + "$allow_rerere_autoupdate" "$gpg_sign_opt" "$signoff" +} + run_specific_rebase () { if [ "$interactive_rebase" = implied ]; then GIT_EDITOR=: export GIT_EDITOR autosquash= fi - . git-rebase--$type - if test -z "$preserve_merges" + if test -n "$interactive_rebase" -a -z "$preserve_merges" then - git_rebase__$type + run_interactive else - git_rebase__preserve_merges + . git-rebase--$type + + if test -z "$preserve_merges" + then + git_rebase__$type + else + git_rebase__preserve_merges + fi fi ret=$? if test $ret -eq 0 then finish_rebase - elif test $ret -eq 2 # special exit status for rebase -i + elif test $ret -eq 2 # special exit status for rebase -p then apply_autostash && rm -rf "$state_dir" && @@ -365,6 +337,11 @@ do fix|strip) force_rebase=t ;; + warn|nowarn|error|error-all) + ;; # okay, known whitespace option + *) + die "fatal: Invalid whitespace option: '${1#*=}'" + ;; esac ;; --ignore-whitespace) @@ -380,6 +357,9 @@ do git_am_opt="$git_am_opt $1" force_rebase=t ;; + -C*[!0-9]*) + die "fatal: switch \`C' expects a numerical value" + ;; -C*) git_am_opt="$git_am_opt $1" ;; @@ -553,15 +533,15 @@ then # Note: incompatibility with --interactive is just a strong warning; # git-rebase.txt caveats with "unless you know what you are doing" test -n "$rebase_merges" && - die "$(gettext "error: cannot combine '--preserve_merges' with '--rebase-merges'")" + die "$(gettext "error: cannot combine '--preserve-merges' with '--rebase-merges'")" fi if test -n "$rebase_merges" then test -n "$strategy_opts" && - die "$(gettext "error: cannot combine '--rebase_merges' with '--strategy-option'")" + die "$(gettext "error: cannot combine '--rebase-merges' with '--strategy-option'")" test -n "$strategy" && - die "$(gettext "error: cannot combine '--rebase_merges' with '--strategy'")" + die "$(gettext "error: cannot combine '--rebase-merges' with '--strategy'")" fi if test -z "$rebase_root" diff --git a/git-mergetool--lib.sh b/git-mergetool--lib.sh index 9a8b97a2ab..83bf52494c 100644 --- a/git-mergetool--lib.sh +++ b/git-mergetool--lib.sh @@ -350,17 +350,23 @@ guess_merge_tool () { } get_configured_merge_tool () { - # Diff mode first tries diff.tool and falls back to merge.tool. - # Merge mode only checks merge.tool + # If first argument is true, find the guitool instead + if test "$1" = true + then + gui_prefix=gui + fi + + # Diff mode first tries diff.(gui)tool and falls back to merge.(gui)tool. + # Merge mode only checks merge.(gui)tool if diff_mode then - merge_tool=$(git config diff.tool || git config merge.tool) + merge_tool=$(git config diff.${gui_prefix}tool || git config merge.${gui_prefix}tool) else - merge_tool=$(git config merge.tool) + merge_tool=$(git config merge.${gui_prefix}tool) fi if test -n "$merge_tool" && ! valid_tool "$merge_tool" then - echo >&2 "git config option $TOOL_MODE.tool set to unknown tool: $merge_tool" + echo >&2 "git config option $TOOL_MODE.${gui_prefix}tool set to unknown tool: $merge_tool" echo >&2 "Resetting to default..." return 1 fi diff --git a/git-mergetool.sh b/git-mergetool.sh index d07c7f387c..01b9ad59b2 100755 --- a/git-mergetool.sh +++ b/git-mergetool.sh @@ -9,7 +9,7 @@ # at the discretion of Junio C Hamano. # -USAGE='[--tool=tool] [--tool-help] [-y|--no-prompt|--prompt] [-O<orderfile>] [file to merge] ...' +USAGE='[--tool=tool] [--tool-help] [-y|--no-prompt|--prompt] [-g|--gui|--no-gui] [-O<orderfile>] [file to merge] ...' SUBDIRECTORY_OK=Yes NONGIT_OK=Yes OPTIONS_SPEC= @@ -389,6 +389,7 @@ print_noop_and_exit () { main () { prompt=$(git config --bool mergetool.prompt) + gui_tool=false guessed_merge_tool=false orderfile= @@ -414,6 +415,12 @@ main () { shift ;; esac ;; + --no-gui) + gui_tool=false + ;; + -g|--gui) + gui_tool=true + ;; -y|--no-prompt) prompt=false ;; @@ -443,7 +450,7 @@ main () { if test -z "$merge_tool" then # Check if a merge tool has been configured - merge_tool=$(get_configured_merge_tool) + merge_tool=$(get_configured_merge_tool $gui_tool) # Try to guess an appropriate merge tool if no tool has been set. if test -z "$merge_tool" then @@ -1306,6 +1306,9 @@ class GitLFS(LargeFileSystem): return LargeFileSystem.processContent(self, git_mode, relPath, contents) class Command: + delete_actions = ( "delete", "move/delete", "purge" ) + add_actions = ( "add", "move/add" ) + def __init__(self): self.usage = "usage: %prog [options]" self.needsGit = True @@ -2524,7 +2527,6 @@ class View(object): return "" class P4Sync(Command, P4UserMap): - delete_actions = ( "delete", "move/delete", "purge" ) def __init__(self): Command.__init__(self) @@ -2612,20 +2614,7 @@ class P4Sync(Command, P4UserMap): if self.verbose: print("checkpoint finished: " + out) - def cmp_shelved(self, path, filerev, revision): - """ Determine if a path at revision #filerev is the same as the file - at revision @revision for a shelved changelist. If they don't match, - unshelving won't be safe (we will get other changes mixed in). - - This is comparing the revision that the shelved changelist is *based* on, not - the shelved changelist itself. - """ - ret = p4Cmd(["diff2", "{0}#{1}".format(path, filerev), "{0}@{1}".format(path, revision)]) - if verbose: - print("p4 diff2 path %s filerev %s revision %s => %s" % (path, filerev, revision, ret)) - return ret["status"] == "identical" - - def extractFilesFromCommit(self, commit, shelved=False, shelved_cl = 0, origin_revision = 0): + def extractFilesFromCommit(self, commit, shelved=False, shelved_cl = 0): self.cloneExclude = [re.sub(r"\.\.\.$", "", path) for path in self.cloneExclude] files = [] @@ -2650,17 +2639,6 @@ class P4Sync(Command, P4UserMap): file["type"] = commit["type%s" % fnum] if shelved: file["shelved_cl"] = int(shelved_cl) - - # For shelved changelists, check that the revision of each file that the - # shelve was based on matches the revision that we are using for the - # starting point for git-fast-import (self.initialParent). Otherwise - # the resulting diff will contain deltas from multiple commits. - - if file["action"] != "add" and \ - not self.cmp_shelved(path, file["rev"], origin_revision): - sys.exit("change {0} not based on {1} for {2}, cannot unshelve".format( - commit["change"], self.initialParent, path)) - files.append(file) fnum = fnum + 1 return files @@ -2775,7 +2753,10 @@ class P4Sync(Command, P4UserMap): relPath = self.stripRepoPath(file['depotFile'], self.branchPrefixes) relPath = self.encodeWithUTF8(relPath) if verbose: - size = int(self.stream_file['fileSize']) + if 'fileSize' in self.stream_file: + size = int(self.stream_file['fileSize']) + else: + size = 0 # deleted files don't get a fileSize apparently sys.stdout.write('\r%s --> %s (%i MB)\n' % (file['depotFile'], relPath, size/1024/1024)) sys.stdout.flush() @@ -3029,7 +3010,7 @@ class P4Sync(Command, P4UserMap): print('Ignoring file outside of prefix: {0}'.format(path)) return hasPrefix - def commit(self, details, files, branch, parent = ""): + def commit(self, details, files, branch, parent = "", allow_empty=False): epoch = details["time"] author = details["user"] jobs = self.extractJobsFromCommit(details) @@ -3043,7 +3024,10 @@ class P4Sync(Command, P4UserMap): files = [f for f in files if self.inClientSpec(f['path']) and self.hasBranchPrefix(f['path'])] - if not files and not gitConfigBool('git-p4.keepEmptyCommits'): + if gitConfigBool('git-p4.keepEmptyCommits'): + allow_empty = True + + if not files and not allow_empty: print('Ignoring revision {0} as it would produce an empty commit.' .format(details['change'])) return @@ -3384,10 +3368,10 @@ class P4Sync(Command, P4UserMap): else: return None - def importChanges(self, changes, shelved=False, origin_revision=0): + def importChanges(self, changes, origin_revision=0): cnt = 1 for change in changes: - description = p4_describe(change, shelved) + description = p4_describe(change) self.updateOptionDict(description) if not self.silent: @@ -3457,7 +3441,7 @@ class P4Sync(Command, P4UserMap): print("Parent of %s not found. Committing into head of %s" % (branch, parent)) self.commit(description, filesForCommit, branch, parent) else: - files = self.extractFilesFromCommit(description, shelved, change, origin_revision) + files = self.extractFilesFromCommit(description) self.commit(description, files, self.branch, self.initialParent) # only needed once, to connect to the previous commit @@ -3953,7 +3937,7 @@ class P4Unshelve(Command): ] self.verbose = False self.noCommit = False - self.destbranch = "refs/remotes/p4/unshelved" + self.destbranch = "refs/remotes/p4-unshelved" def renameBranch(self, branch_name): """ Rename the existing branch to branch_name.N @@ -3985,6 +3969,32 @@ class P4Unshelve(Command): sys.exit("could not find git-p4 commits in {0}".format(self.origin)) + def createShelveParent(self, change, branch_name, sync, origin): + """ Create a commit matching the parent of the shelved changelist 'change' + """ + parent_description = p4_describe(change, shelved=True) + parent_description['desc'] = 'parent for shelved changelist {}\n'.format(change) + files = sync.extractFilesFromCommit(parent_description, shelved=False, shelved_cl=change) + + parent_files = [] + for f in files: + # if it was added in the shelved changelist, it won't exist in the parent + if f['action'] in self.add_actions: + continue + + # if it was deleted in the shelved changelist it must not be deleted + # in the parent - we might even need to create it if the origin branch + # does not have it + if f['action'] in self.delete_actions: + f['action'] = 'add' + + parent_files.append(f) + + sync.commit(parent_description, parent_files, branch_name, + parent=origin, allow_empty=True) + print("created parent commit for {0} based on {1} in {2}".format( + change, self.origin, branch_name)) + def run(self, args): if len(args) != 1: return False @@ -3994,9 +4004,8 @@ class P4Unshelve(Command): sync = P4Sync() changes = args - sync.initialParent = self.origin - # use the first change in the list to construct the branch to unshelve into + # only one change at a time change = changes[0] # if the target branch already exists, rename it @@ -4009,14 +4018,21 @@ class P4Unshelve(Command): sync.suppress_meta_comment = True settings = self.findLastP4Revision(self.origin) - origin_revision = settings['change'] sync.depotPaths = settings['depot-paths'] sync.branchPrefixes = sync.depotPaths sync.openStreams() sync.loadUserMapFromCache() sync.silent = True - sync.importChanges(changes, shelved=True, origin_revision=origin_revision) + + # create a commit for the parent of the shelved changelist + self.createShelveParent(change, branch_name, sync, self.origin) + + # create the commit for the shelved changelist itself + description = p4_describe(change, True) + files = sync.extractFilesFromCommit(description, True, change) + + sync.commit(description, files, branch_name, "") sync.closeStreams() print("unshelved changelist {0} into {1}".format(change, branch_name)) diff --git a/git-rebase--common.sh b/git-rebase--common.sh new file mode 100644 index 0000000000..7e39d22871 --- /dev/null +++ b/git-rebase--common.sh @@ -0,0 +1,68 @@ + +resolvemsg=" +$(gettext 'Resolve all conflicts manually, mark them as resolved with +"git add/rm <conflicted_files>", then run "git rebase --continue". +You can instead skip this commit: run "git rebase --skip". +To abort and get back to the state before "git rebase", run "git rebase --abort".') +" + +write_basic_state () { + echo "$head_name" > "$state_dir"/head-name && + echo "$onto" > "$state_dir"/onto && + echo "$orig_head" > "$state_dir"/orig-head && + echo "$GIT_QUIET" > "$state_dir"/quiet && + test t = "$verbose" && : > "$state_dir"/verbose + test -n "$strategy" && echo "$strategy" > "$state_dir"/strategy + test -n "$strategy_opts" && echo "$strategy_opts" > \ + "$state_dir"/strategy_opts + test -n "$allow_rerere_autoupdate" && echo "$allow_rerere_autoupdate" > \ + "$state_dir"/allow_rerere_autoupdate + test -n "$gpg_sign_opt" && echo "$gpg_sign_opt" > "$state_dir"/gpg_sign_opt + test -n "$signoff" && echo "$signoff" >"$state_dir"/signoff +} + +apply_autostash () { + if test -f "$state_dir/autostash" + then + stash_sha1=$(cat "$state_dir/autostash") + if git stash apply $stash_sha1 >/dev/null 2>&1 + then + echo "$(gettext 'Applied autostash.')" >&2 + else + git stash store -m "autostash" -q $stash_sha1 || + die "$(eval_gettext "Cannot store \$stash_sha1")" + gettext 'Applying autostash resulted in conflicts. +Your changes are safe in the stash. +You can run "git stash pop" or "git stash drop" at any time. +' >&2 + fi + fi +} + +move_to_original_branch () { + case "$head_name" in + refs/*) + message="rebase finished: $head_name onto $onto" + git update-ref -m "$message" \ + $head_name $(git rev-parse HEAD) $orig_head && + git symbolic-ref \ + -m "rebase finished: returning to $head_name" \ + HEAD $head_name || + die "$(eval_gettext "Could not move back to \$head_name")" + ;; + esac +} + +output () { + case "$verbose" in + '') + output=$("$@" 2>&1 ) + status=$? + test $status != 0 && printf "%s\n" "$output" + return $status + ;; + *) + "$@" + ;; + esac +} diff --git a/git-rebase--interactive.sh b/git-rebase--interactive.sh deleted file mode 100644 index 299ded2137..0000000000 --- a/git-rebase--interactive.sh +++ /dev/null @@ -1,283 +0,0 @@ -# This shell script fragment is sourced by git-rebase to implement -# its interactive mode. "git rebase --interactive" makes it easy -# to fix up commits in the middle of a series and rearrange commits. -# -# Copyright (c) 2006 Johannes E. Schindelin -# -# The original idea comes from Eric W. Biederman, in -# https://public-inbox.org/git/m1odwkyuf5.fsf_-_@ebiederm.dsl.xmission.com/ -# -# The file containing rebase commands, comments, and empty lines. -# This file is created by "git rebase -i" then edited by the user. As -# the lines are processed, they are removed from the front of this -# file and written to the tail of $done. -todo="$state_dir"/git-rebase-todo - -GIT_CHERRY_PICK_HELP="$resolvemsg" -export GIT_CHERRY_PICK_HELP - -comment_char=$(git config --get core.commentchar 2>/dev/null) -case "$comment_char" in -'' | auto) - comment_char="#" - ;; -?) - ;; -*) - comment_char=$(echo "$comment_char" | cut -c1) - ;; -esac - -orig_reflog_action="$GIT_REFLOG_ACTION" - -comment_for_reflog () { - case "$orig_reflog_action" in - ''|rebase*) - GIT_REFLOG_ACTION="rebase -i ($1)" - export GIT_REFLOG_ACTION - ;; - esac -} - -append_todo_help () { - gettext " -Commands: -p, pick <commit> = use commit -r, reword <commit> = use commit, but edit the commit message -e, edit <commit> = use commit, but stop for amending -s, squash <commit> = use commit, but meld into previous commit -f, fixup <commit> = like \"squash\", but discard this commit's log message -x, exec <command> = run command (the rest of the line) using shell -d, drop <commit> = remove commit -l, label <label> = label current HEAD with a name -t, reset <label> = reset HEAD to a label -m, merge [-C <commit> | -c <commit>] <label> [# <oneline>] -. create a merge commit using the original merge commit's -. message (or the oneline, if no original merge commit was -. specified). Use -c <commit> to reword the commit message. - -These lines can be re-ordered; they are executed from top to bottom. -" | git stripspace --comment-lines >>"$todo" - - if test $(get_missing_commit_check_level) = error - then - gettext " -Do not remove any line. Use 'drop' explicitly to remove a commit. -" | git stripspace --comment-lines >>"$todo" - else - gettext " -If you remove a line here THAT COMMIT WILL BE LOST. -" | git stripspace --comment-lines >>"$todo" - fi -} - -die_abort () { - apply_autostash - rm -rf "$state_dir" - die "$1" -} - -has_action () { - test -n "$(git stripspace --strip-comments <"$1")" -} - -git_sequence_editor () { - if test -z "$GIT_SEQUENCE_EDITOR" - then - GIT_SEQUENCE_EDITOR="$(git config sequence.editor)" - if [ -z "$GIT_SEQUENCE_EDITOR" ] - then - GIT_SEQUENCE_EDITOR="$(git var GIT_EDITOR)" || return $? - fi - fi - - eval "$GIT_SEQUENCE_EDITOR" '"$@"' -} - -expand_todo_ids() { - git rebase--helper --expand-ids -} - -collapse_todo_ids() { - git rebase--helper --shorten-ids -} - -# Switch to the branch in $into and notify it in the reflog -checkout_onto () { - GIT_REFLOG_ACTION="$GIT_REFLOG_ACTION: checkout $onto_name" - output git checkout $onto || die_abort "$(gettext "could not detach HEAD")" - git update-ref ORIG_HEAD $orig_head -} - -get_missing_commit_check_level () { - check_level=$(git config --get rebase.missingCommitsCheck) - check_level=${check_level:-ignore} - # Don't be case sensitive - printf '%s' "$check_level" | tr 'A-Z' 'a-z' -} - -# Initiate an action. If the cannot be any -# further action it may exec a command -# or exit and not return. -# -# TODO: Consider a cleaner return model so it -# never exits and always return 0 if process -# is complete. -# -# Parameter 1 is the action to initiate. -# -# Returns 0 if the action was able to complete -# and if 1 if further processing is required. -initiate_action () { - case "$1" in - continue) - exec git rebase--helper ${force_rebase:+--no-ff} $allow_empty_message \ - --continue - ;; - skip) - git rerere clear - exec git rebase--helper ${force_rebase:+--no-ff} $allow_empty_message \ - --continue - ;; - edit-todo) - git stripspace --strip-comments <"$todo" >"$todo".new - mv -f "$todo".new "$todo" - collapse_todo_ids - append_todo_help - gettext " -You are editing the todo file of an ongoing interactive rebase. -To continue rebase after editing, run: - git rebase --continue - -" | git stripspace --comment-lines >>"$todo" - - git_sequence_editor "$todo" || - die "$(gettext "Could not execute editor")" - expand_todo_ids - - exit - ;; - show-current-patch) - exec git show REBASE_HEAD -- - ;; - *) - return 1 # continue - ;; - esac -} - -setup_reflog_action () { - comment_for_reflog start - - if test ! -z "$switch_to" - then - GIT_REFLOG_ACTION="$GIT_REFLOG_ACTION: checkout $switch_to" - output git checkout "$switch_to" -- || - die "$(eval_gettext "Could not checkout \$switch_to")" - - comment_for_reflog start - fi -} - -init_basic_state () { - orig_head=$(git rev-parse --verify HEAD) || die "$(gettext "No HEAD?")" - mkdir -p "$state_dir" || die "$(eval_gettext "Could not create temporary \$state_dir")" - rm -f "$(git rev-parse --git-path REBASE_HEAD)" - - : > "$state_dir"/interactive || die "$(gettext "Could not mark as interactive")" - write_basic_state -} - -init_revisions_and_shortrevisions () { - shorthead=$(git rev-parse --short $orig_head) - shortonto=$(git rev-parse --short $onto) - if test -z "$rebase_root" - # this is now equivalent to ! -z "$upstream" - then - shortupstream=$(git rev-parse --short $upstream) - revisions=$upstream...$orig_head - shortrevisions=$shortupstream..$shorthead - else - revisions=$onto...$orig_head - shortrevisions=$shorthead - test -z "$squash_onto" || - echo "$squash_onto" >"$state_dir"/squash-onto - fi -} - -complete_action() { - test -s "$todo" || echo noop >> "$todo" - test -z "$autosquash" || git rebase--helper --rearrange-squash || exit - test -n "$cmd" && git rebase--helper --add-exec-commands "$cmd" - - todocount=$(git stripspace --strip-comments <"$todo" | wc -l) - todocount=${todocount##* } - -cat >>"$todo" <<EOF - -$comment_char $(eval_ngettext \ - "Rebase \$shortrevisions onto \$shortonto (\$todocount command)" \ - "Rebase \$shortrevisions onto \$shortonto (\$todocount commands)" \ - "$todocount") -EOF - append_todo_help - gettext " - However, if you remove everything, the rebase will be aborted. - - " | git stripspace --comment-lines >>"$todo" - - if test -z "$keep_empty" - then - printf '%s\n' "$comment_char $(gettext "Note that empty commits are commented out")" >>"$todo" - fi - - - has_action "$todo" || - return 2 - - cp "$todo" "$todo".backup - collapse_todo_ids - git_sequence_editor "$todo" || - die_abort "$(gettext "Could not execute editor")" - - has_action "$todo" || - return 2 - - git rebase--helper --check-todo-list || { - ret=$? - checkout_onto - exit $ret - } - - expand_todo_ids - - test -n "$force_rebase" || - onto="$(git rebase--helper --skip-unnecessary-picks)" || - die "Could not skip unnecessary pick commands" - - checkout_onto - require_clean_work_tree "rebase" - exec git rebase--helper ${force_rebase:+--no-ff} $allow_empty_message \ - --continue -} - -git_rebase__interactive () { - initiate_action "$action" - ret=$? - if test $ret = 0; then - return 0 - fi - - setup_reflog_action - init_basic_state - - init_revisions_and_shortrevisions - - git rebase--helper --make-script ${keep_empty:+--keep-empty} \ - ${rebase_merges:+--rebase-merges} \ - ${rebase_cousins:+--rebase-cousins} \ - $revisions ${restrict_revision+^$restrict_revision} >"$todo" || - die "$(gettext "Could not generate todo list")" - - complete_action -} diff --git a/git-rebase--preserve-merges.sh b/git-rebase--preserve-merges.sh index c214c5e4d6..afbb65765d 100644 --- a/git-rebase--preserve-merges.sh +++ b/git-rebase--preserve-merges.sh @@ -711,11 +711,11 @@ do_rest () { } expand_todo_ids() { - git rebase--helper --expand-ids + git rebase--interactive --expand-ids } collapse_todo_ids() { - git rebase--helper --shorten-ids + git rebase--interactive --shorten-ids } # Switch to the branch in $into and notify it in the reflog @@ -876,8 +876,8 @@ init_revisions_and_shortrevisions () { complete_action() { test -s "$todo" || echo noop >> "$todo" - test -z "$autosquash" || git rebase--helper --rearrange-squash || exit - test -n "$cmd" && git rebase--helper --add-exec-commands "$cmd" + test -z "$autosquash" || git rebase--interactive --rearrange-squash || exit + test -n "$cmd" && git rebase--interactive --add-exec-commands --cmd "$cmd" todocount=$(git stripspace --strip-comments <"$todo" | wc -l) todocount=${todocount##* } @@ -912,7 +912,7 @@ However, if you remove everything, the rebase will be aborted. has_action "$todo" || return 2 - git rebase--helper --check-todo-list || { + git rebase--interactive --check-todo-list || { ret=$? checkout_onto exit $ret diff --git a/git-send-email.perl b/git-send-email.perl index 2be5dac337..8eb63b5a2f 100755 --- a/git-send-email.perl +++ b/git-send-email.perl @@ -82,8 +82,11 @@ git send-email --dump-aliases Pass an empty string to disable certificate verification. --smtp-domain <str> * The domain name sent to HELO/EHLO handshake - --smtp-auth <str> * Space-separated list of allowed AUTH mechanisms. + --smtp-auth <str> * Space-separated list of allowed AUTH mechanisms, or + "none" to disable authentication. This setting forces to use one of the listed mechanisms. + --no-smtp-auth Disable SMTP authentication. Shorthand for + `--smtp-auth=none` --smtp-debug <0|1> * Disable, enable Net::SMTP debug. --batch-size <int> * send max <int> message per connection. @@ -94,7 +97,7 @@ git send-email --dump-aliases --identity <str> * Use the sendemail.<id> options. --to-cmd <str> * Email To: via `<str> \$patch_path` --cc-cmd <str> * Email Cc: via `<str> \$patch_path` - --suppress-cc <str> * author, self, sob, cc, cccmd, body, bodycc, all. + --suppress-cc <str> * author, self, sob, cc, cccmd, body, bodycc, misc-by, all. --[no-]cc-cover * Email Cc: addresses in the cover letter. --[no-]to-cover * Email To: addresses in the cover letter. --[no-]signed-off-by-cc * Send to Signed-off-by: addresses. Default on. @@ -119,6 +122,11 @@ EOT exit(1); } +sub completion_helper { + print Git::command('format-patch', '--git-completion-helper'); + exit(0); +} + # most mail servers generate the Date: header, but not all... sub format_2822_time { my ($time) = @_; @@ -311,6 +319,7 @@ $SIG{INT} = \&signal_handler; # needing, first, from the command line: my $help; +my $git_completion_helper; my $rc = GetOptions("h" => \$help, "dump-aliases" => \$dump_aliases); usage() unless $rc; @@ -341,6 +350,7 @@ $rc = GetOptions( "smtp-debug:i" => \$debug_net_smtp, "smtp-domain:s" => \$smtp_domain, "smtp-auth=s" => \$smtp_auth, + "no-smtp-auth" => sub {$smtp_auth = 'none'}, "identity=s" => \$identity, "annotate!" => \$annotate, "no-annotate" => sub {$annotate = 0}, @@ -373,9 +383,11 @@ $rc = GetOptions( "no-xmailer" => sub {$use_xmailer = 0}, "batch-size=i" => \$batch_size, "relogin-delay=i" => \$relogin_delay, + "git-completion-helper" => \$git_completion_helper, ); usage() if $help; +completion_helper() if $git_completion_helper; unless ($rc) { usage(); } @@ -454,13 +466,13 @@ my(%suppress_cc); if (@suppress_cc) { foreach my $entry (@suppress_cc) { die sprintf(__("Unknown --suppress-cc field: '%s'\n"), $entry) - unless $entry =~ /^(?:all|cccmd|cc|author|self|sob|body|bodycc)$/; + unless $entry =~ /^(?:all|cccmd|cc|author|self|sob|body|bodycc|misc-by)$/; $suppress_cc{$entry} = 1; } } if ($suppress_cc{'all'}) { - foreach my $entry (qw (cccmd cc author self sob body bodycc)) { + foreach my $entry (qw (cccmd cc author self sob body bodycc misc-by)) { $suppress_cc{$entry} = 1; } delete $suppress_cc{'all'}; @@ -471,7 +483,7 @@ $suppress_cc{'self'} = $suppress_from if defined $suppress_from; $suppress_cc{'sob'} = !$signed_off_by_cc if defined $signed_off_by_cc; if ($suppress_cc{'body'}) { - foreach my $entry (qw (sob bodycc)) { + foreach my $entry (qw (sob bodycc misc-by)) { $suppress_cc{$entry} = 1; } delete $suppress_cc{'body'}; @@ -1241,7 +1253,7 @@ sub smtp_host_string { # (smtp_user was not specified), and 0 otherwise. sub smtp_auth_maybe { - if (!defined $smtp_authuser || $auth) { + if (!defined $smtp_authuser || $auth || (defined $smtp_auth && $smtp_auth eq "none")) { return 1; } @@ -1681,7 +1693,7 @@ sub process_file { # Now parse the message body while(<$fh>) { $message .= $_; - if (/^(Signed-off-by|Cc): (.*)/i) { + if (/^([a-z-]*-by|Cc): (.*)/i) { chomp; my ($what, $c) = ($1, $2); # strip garbage for the address we'll use: @@ -1691,8 +1703,18 @@ sub process_file { if ($sc eq $sender) { next if ($suppress_cc{'self'}); } else { - next if $suppress_cc{'sob'} and $what =~ /Signed-off-by/i; - next if $suppress_cc{'bodycc'} and $what =~ /Cc/i; + if ($what =~ /^Signed-off-by$/i) { + next if $suppress_cc{'sob'}; + } elsif ($what =~ /-by$/i) { + next if $suppress_cc{'misc-by'}; + } elsif ($what =~ /Cc/i) { + next if $suppress_cc{'bodycc'}; + } + } + if ($c !~ /.+@.+|<.+>/) { + printf("(body) Ignoring %s from line '%s'\n", + $what, $_) unless $quiet; + next; } push @cc, $c; printf(__("(body) Adding cc: %s from line '%s'\n"), @@ -1834,7 +1856,7 @@ sub apply_transfer_encoding { my $from = shift; my $to = shift; - return $message if ($from eq $to and $from ne '7bit'); + return ($message, $to) if ($from eq $to and $from ne '7bit'); require MIME::QuotedPrint; require MIME::Base64; diff --git a/git-sh-i18n.sh b/git-sh-i18n.sh index 9d065fb4bf..e1d917fd27 100644 --- a/git-sh-i18n.sh +++ b/git-sh-i18n.sh @@ -17,7 +17,7 @@ export TEXTDOMAINDIR # First decide what scheme to use... GIT_INTERNAL_GETTEXT_SH_SCHEME=fallthrough -if test -n "$GIT_GETTEXT_POISON" +if test -n "$GIT_TEST_GETTEXT_POISON" then GIT_INTERNAL_GETTEXT_SH_SCHEME=poison elif test -n "@@USE_GETTEXT_SCHEME@@" diff --git a/git-submodule.sh b/git-submodule.sh index 1cb2c0a31b..5e608f8bad 100755 --- a/git-submodule.sh +++ b/git-submodule.sh @@ -72,7 +72,7 @@ get_submodule_config () { value=$(git config submodule."$name"."$option") if test -z "$value" then - value=$(git config -f .gitmodules submodule."$name"."$option") + value=$(git submodule--helper config submodule."$name"."$option") fi printf '%s' "${value:-$default}" } @@ -82,6 +82,11 @@ isnumber() n=$(($1 + 0)) 2>/dev/null && test "$n" = "$1" } +# Given a full hex object ID, is this the zero OID? +is_zero_oid () { + echo "$1" | sane_egrep '^0+$' >/dev/null 2>&1 +} + # Sanitize the local git environment for use within a submodule. We # can't simply use clear_local_git_env since we want to preserve some # of the settings from GIT_CONFIG_PARAMETERS. @@ -159,6 +164,11 @@ cmd_add() shift done + if ! git submodule--helper config --check-writeable >/dev/null 2>&1 + then + die "$(eval_gettext "please make sure that the .gitmodules file is in the working tree")" + fi + if test -n "$reference_path" then is_absolute_path "$reference_path" || @@ -283,11 +293,11 @@ or you are unsure what this means choose another name with the '--name' option." git add --no-warn-embedded-repo $force "$sm_path" || die "$(eval_gettext "Failed to add submodule '\$sm_path'")" - git config -f .gitmodules submodule."$sm_name".path "$sm_path" && - git config -f .gitmodules submodule."$sm_name".url "$repo" && + git submodule--helper config submodule."$sm_name".path "$sm_path" && + git submodule--helper config submodule."$sm_name".url "$repo" && if test -n "$branch" then - git config -f .gitmodules submodule."$sm_name".branch "$branch" + git submodule--helper config submodule."$sm_name".branch "$branch" fi && git add --force .gitmodules || die "$(eval_gettext "Failed to register submodule '\$sm_path'")" @@ -534,31 +544,19 @@ cmd_update() "$@" || echo "#unmatched" $? } | { err= - while read -r mode sha1 stage just_cloned sm_path + while read -r quickabort sha1 just_cloned sm_path do - die_if_unmatched "$mode" "$sha1" + die_if_unmatched "$quickabort" "$sha1" - name=$(git submodule--helper name "$sm_path") || exit - if ! test -z "$update" - then - update_module=$update - else - update_module=$(git config submodule."$name".update) - if test -z "$update_module" - then - update_module="checkout" - fi - fi + git submodule--helper ensure-core-worktree "$sm_path" + + update_module=$(git submodule--helper update-module-mode $just_cloned "$sm_path" $update) displaypath=$(git submodule--helper relative-path "$prefix$sm_path" "$wt_prefix") if test $just_cloned -eq 1 then subsha1= - case "$update_module" in - merge | rebase | none) - update_module=checkout ;; - esac else subsha1=$(sanitize_submodule_env; cd "$sm_path" && git rev-parse --verify HEAD) || @@ -630,7 +628,7 @@ cmd_update() must_die_on_failure=yes ;; *) - die "$(eval_gettext "Invalid update mode '$update_module' for submodule '$name'")" + die "$(eval_gettext "Invalid update mode '$update_module' for submodule path '$path'")" esac if (sanitize_submodule_env; cd "$sm_path" && $command "$sha1") @@ -792,7 +790,7 @@ cmd_summary() { while read -r mod_src mod_dst sha1_src sha1_dst status name do if test -z "$cached" && - test $sha1_dst = 0000000000000000000000000000000000000000 + is_zero_oid $sha1_dst then case "$mod_dst" in 160000) @@ -318,6 +318,9 @@ static int handle_alias(int *argcp, const char ***argv) alias_command = (*argv)[0]; alias_string = alias_lookup(alias_command); if (alias_string) { + if (*argcp > 1 && !strcmp((*argv)[1], "-h")) + fprintf_ln(stderr, _("'%s' is aliased to '%s'"), + alias_command, alias_string); if (alias_string[0] == '!') { struct child_process child = CHILD_PROCESS_INIT; int nongit_ok; @@ -508,6 +511,7 @@ static struct cmd_struct commands[] = { { "merge-tree", cmd_merge_tree, RUN_SETUP | NO_PARSEOPT }, { "mktag", cmd_mktag, RUN_SETUP | NO_PARSEOPT }, { "mktree", cmd_mktree, RUN_SETUP }, + { "multi-pack-index", cmd_multi_pack_index, RUN_SETUP_GENTLY }, { "mv", cmd_mv, RUN_SETUP | NEED_WORK_TREE }, { "name-rev", cmd_name_rev, RUN_SETUP }, { "notes", cmd_notes, RUN_SETUP }, @@ -522,7 +526,13 @@ static struct cmd_struct commands[] = { { "push", cmd_push, RUN_SETUP }, { "range-diff", cmd_range_diff, RUN_SETUP | USE_PAGER }, { "read-tree", cmd_read_tree, RUN_SETUP | SUPPORT_SUPER_PREFIX}, - { "rebase--helper", cmd_rebase__helper, RUN_SETUP | NEED_WORK_TREE }, + /* + * NEEDSWORK: Until the rebase is independent and needs no redirection + * to rebase shell script this is kept as is, then should be changed to + * RUN_SETUP | NEED_WORK_TREE + */ + { "rebase", cmd_rebase }, + { "rebase--interactive", cmd_rebase__interactive, RUN_SETUP | NEED_WORK_TREE }, { "receive-pack", cmd_receive_pack }, { "reflog", cmd_reflog, RUN_SETUP }, { "remote", cmd_remote, RUN_SETUP }, @@ -674,6 +684,8 @@ static void execv_dashed_external(const char **argv) static int run_argv(int *argcp, const char ***argv) { int done_alias = 0; + struct string_list cmd_list = STRING_LIST_INIT_NODUP; + struct string_list_item *seen; while (1) { /* @@ -691,17 +703,37 @@ static int run_argv(int *argcp, const char ***argv) /* .. then try the external ones */ execv_dashed_external(*argv); - /* It could be an alias -- this works around the insanity + seen = unsorted_string_list_lookup(&cmd_list, *argv[0]); + if (seen) { + int i; + struct strbuf sb = STRBUF_INIT; + for (i = 0; i < cmd_list.nr; i++) { + struct string_list_item *item = &cmd_list.items[i]; + + strbuf_addf(&sb, "\n %s", item->string); + if (item == seen) + strbuf_addstr(&sb, " <=="); + else if (i == cmd_list.nr - 1) + strbuf_addstr(&sb, " ==>"); + } + die(_("alias loop detected: expansion of '%s' does" + " not terminate:%s"), cmd_list.items[0].string, sb.buf); + } + + string_list_append(&cmd_list, *argv[0]); + + /* + * It could be an alias -- this works around the insanity * of overriding "git log" with "git show" by having * alias.log = show */ - if (done_alias) - break; if (!handle_alias(argcp, argv)) break; done_alias = 1; } + string_list_clear(&cmd_list, 0); + return done_alias; } diff --git a/gpg-interface.c b/gpg-interface.c index db17d65f8a..8ed274533f 100644 --- a/gpg-interface.c +++ b/gpg-interface.c @@ -73,50 +73,115 @@ void signature_check_clear(struct signature_check *sigc) FREE_AND_NULL(sigc->gpg_status); FREE_AND_NULL(sigc->signer); FREE_AND_NULL(sigc->key); + FREE_AND_NULL(sigc->fingerprint); + FREE_AND_NULL(sigc->primary_key_fingerprint); } +/* An exclusive status -- only one of them can appear in output */ +#define GPG_STATUS_EXCLUSIVE (1<<0) +/* The status includes key identifier */ +#define GPG_STATUS_KEYID (1<<1) +/* The status includes user identifier */ +#define GPG_STATUS_UID (1<<2) +/* The status includes key fingerprints */ +#define GPG_STATUS_FINGERPRINT (1<<3) + +/* Short-hand for standard exclusive *SIG status with keyid & UID */ +#define GPG_STATUS_STDSIG (GPG_STATUS_EXCLUSIVE|GPG_STATUS_KEYID|GPG_STATUS_UID) + static struct { char result; const char *check; + unsigned int flags; } sigcheck_gpg_status[] = { - { 'G', "\n[GNUPG:] GOODSIG " }, - { 'B', "\n[GNUPG:] BADSIG " }, - { 'U', "\n[GNUPG:] TRUST_NEVER" }, - { 'U', "\n[GNUPG:] TRUST_UNDEFINED" }, - { 'E', "\n[GNUPG:] ERRSIG "}, - { 'X', "\n[GNUPG:] EXPSIG "}, - { 'Y', "\n[GNUPG:] EXPKEYSIG "}, - { 'R', "\n[GNUPG:] REVKEYSIG "}, + { 'G', "GOODSIG ", GPG_STATUS_STDSIG }, + { 'B', "BADSIG ", GPG_STATUS_STDSIG }, + { 'U', "TRUST_NEVER", 0 }, + { 'U', "TRUST_UNDEFINED", 0 }, + { 'E', "ERRSIG ", GPG_STATUS_EXCLUSIVE|GPG_STATUS_KEYID }, + { 'X', "EXPSIG ", GPG_STATUS_STDSIG }, + { 'Y', "EXPKEYSIG ", GPG_STATUS_STDSIG }, + { 'R', "REVKEYSIG ", GPG_STATUS_STDSIG }, + { 0, "VALIDSIG ", GPG_STATUS_FINGERPRINT }, }; static void parse_gpg_output(struct signature_check *sigc) { const char *buf = sigc->gpg_status; - int i; - - /* Iterate over all search strings */ - for (i = 0; i < ARRAY_SIZE(sigcheck_gpg_status); i++) { - const char *found, *next; - - if (!skip_prefix(buf, sigcheck_gpg_status[i].check + 1, &found)) { - found = strstr(buf, sigcheck_gpg_status[i].check); - if (!found) - continue; - found += strlen(sigcheck_gpg_status[i].check); - } - sigc->result = sigcheck_gpg_status[i].result; - /* The trust messages are not followed by key/signer information */ - if (sigc->result != 'U') { - next = strchrnul(found, ' '); - sigc->key = xmemdupz(found, next - found); - /* The ERRSIG message is not followed by signer information */ - if (*next && sigc-> result != 'E') { - found = next + 1; - next = strchrnul(found, '\n'); - sigc->signer = xmemdupz(found, next - found); + const char *line, *next; + int i, j; + int seen_exclusive_status = 0; + + /* Iterate over all lines */ + for (line = buf; *line; line = strchrnul(line+1, '\n')) { + while (*line == '\n') + line++; + /* Skip lines that don't start with GNUPG status */ + if (!skip_prefix(line, "[GNUPG:] ", &line)) + continue; + + /* Iterate over all search strings */ + for (i = 0; i < ARRAY_SIZE(sigcheck_gpg_status); i++) { + if (skip_prefix(line, sigcheck_gpg_status[i].check, &line)) { + if (sigcheck_gpg_status[i].flags & GPG_STATUS_EXCLUSIVE) { + if (seen_exclusive_status++) + goto found_duplicate_status; + } + + if (sigcheck_gpg_status[i].result) + sigc->result = sigcheck_gpg_status[i].result; + /* Do we have key information? */ + if (sigcheck_gpg_status[i].flags & GPG_STATUS_KEYID) { + next = strchrnul(line, ' '); + free(sigc->key); + sigc->key = xmemdupz(line, next - line); + /* Do we have signer information? */ + if (*next && (sigcheck_gpg_status[i].flags & GPG_STATUS_UID)) { + line = next + 1; + next = strchrnul(line, '\n'); + free(sigc->signer); + sigc->signer = xmemdupz(line, next - line); + } + } + /* Do we have fingerprint? */ + if (sigcheck_gpg_status[i].flags & GPG_STATUS_FINGERPRINT) { + next = strchrnul(line, ' '); + free(sigc->fingerprint); + sigc->fingerprint = xmemdupz(line, next - line); + + /* Skip interim fields */ + for (j = 9; j > 0; j--) { + if (!*next) + break; + line = next + 1; + next = strchrnul(line, ' '); + } + + next = strchrnul(line, '\n'); + free(sigc->primary_key_fingerprint); + sigc->primary_key_fingerprint = xmemdupz(line, next - line); + } + + break; } } } + return; + +found_duplicate_status: + /* + * GOODSIG, BADSIG etc. can occur only once for each signature. + * Therefore, if we had more than one then we're dealing with multiple + * signatures. We don't support them currently, and they're rather + * hard to create, so something is likely fishy and we should reject + * them altogether. + */ + sigc->result = 'E'; + /* Clear partial data to avoid confusion */ + FREE_AND_NULL(sigc->primary_key_fingerprint); + FREE_AND_NULL(sigc->fingerprint); + FREE_AND_NULL(sigc->signer); + FREE_AND_NULL(sigc->key); } int check_signature(const char *payload, size_t plen, const char *signature, diff --git a/gpg-interface.h b/gpg-interface.h index acf50c4610..3e624ec289 100644 --- a/gpg-interface.h +++ b/gpg-interface.h @@ -23,6 +23,8 @@ struct signature_check { char result; char *signer; char *key; + char *fingerprint; + char *primary_key_fingerprint; }; void signature_check_clear(struct signature_check *sigc); @@ -11,7 +11,8 @@ #include "help.h" static int grep_source_load(struct grep_source *gs); -static int grep_source_is_binary(struct grep_source *gs); +static int grep_source_is_binary(struct grep_source *gs, + struct index_state *istate); static struct grep_opt grep_defaults; @@ -42,7 +43,7 @@ static void color_set(char *dst, const char *color_bytes) * We could let the compiler do this, but without C99 initializers * the code gets unwieldy and unreadable, so... */ -void init_grep_defaults(void) +void init_grep_defaults(struct repository *repo) { struct grep_opt *opt = &grep_defaults; static int run_once; @@ -52,6 +53,7 @@ void init_grep_defaults(void) run_once++; memset(opt, 0, sizeof(*opt)); + opt->repo = repo; opt->relative = 1; opt->pathname = 1; opt->max_depth = -1; @@ -149,12 +151,13 @@ int grep_config(const char *var, const char *value, void *cb) * default values from the template we read the configuration * information in an earlier call to git_config(grep_config). */ -void grep_init(struct grep_opt *opt, const char *prefix) +void grep_init(struct grep_opt *opt, struct repository *repo, const char *prefix) { struct grep_opt *def = &grep_defaults; int i; memset(opt, 0, sizeof(*opt)); + opt->repo = repo; opt->prefix = prefix; opt->prefix_length = (prefix && *prefix) ? strlen(prefix) : 0; opt->pattern_tail = &opt->pattern_list; @@ -1510,7 +1513,6 @@ static void show_line(struct grep_opt *opt, char *bol, char *eol, } } -#ifndef NO_PTHREADS int grep_use_locks; /* @@ -1536,16 +1538,11 @@ static inline void grep_attr_unlock(void) */ pthread_mutex_t grep_read_mutex; -#else -#define grep_attr_lock() -#define grep_attr_unlock() -#endif - static int match_funcname(struct grep_opt *opt, struct grep_source *gs, char *bol, char *eol) { xdemitconf_t *xecfg = opt->priv; if (xecfg && !xecfg->find_func) { - grep_source_load_driver(gs); + grep_source_load_driver(gs, opt->repo->index); if (gs->driver->funcname.pattern) { const struct userdiff_funcname *pe = &gs->driver->funcname; xdiff_set_find_func(xecfg, pe->pattern, pe->cflags); @@ -1708,7 +1705,8 @@ static int look_ahead(struct grep_opt *opt, return 0; } -static int fill_textconv_grep(struct userdiff_driver *driver, +static int fill_textconv_grep(struct repository *r, + struct userdiff_driver *driver, struct grep_source *gs) { struct diff_filespec *df; @@ -1741,7 +1739,7 @@ static int fill_textconv_grep(struct userdiff_driver *driver, * structure. */ grep_read_lock(); - size = fill_textconv(driver, df, &buf); + size = fill_textconv(r, driver, df, &buf); grep_read_unlock(); free_filespec(df); @@ -1801,7 +1799,7 @@ static int grep_source_1(struct grep_opt *opt, struct grep_source *gs, int colle opt->last_shown = 0; if (opt->allow_textconv) { - grep_source_load_driver(gs); + grep_source_load_driver(gs, opt->repo->index); /* * We might set up the shared textconv cache data here, which * is not thread-safe. @@ -1818,11 +1816,11 @@ static int grep_source_1(struct grep_opt *opt, struct grep_source *gs, int colle if (!textconv) { switch (opt->binary) { case GREP_BINARY_DEFAULT: - if (grep_source_is_binary(gs)) + if (grep_source_is_binary(gs, opt->repo->index)) binary_match_only = 1; break; case GREP_BINARY_NOMATCH: - if (grep_source_is_binary(gs)) + if (grep_source_is_binary(gs, opt->repo->index)) return 0; /* Assume unmatch */ break; case GREP_BINARY_TEXT: @@ -1837,7 +1835,7 @@ static int grep_source_1(struct grep_opt *opt, struct grep_source *gs, int colle try_lookahead = should_lookahead(opt); - if (fill_textconv_grep(textconv, gs) < 0) + if (fill_textconv_grep(opt->repo, textconv, gs) < 0) return 0; bol = gs->buf; @@ -2168,22 +2166,24 @@ static int grep_source_load(struct grep_source *gs) BUG("invalid grep_source type to load"); } -void grep_source_load_driver(struct grep_source *gs) +void grep_source_load_driver(struct grep_source *gs, + struct index_state *istate) { if (gs->driver) return; grep_attr_lock(); if (gs->path) - gs->driver = userdiff_find_by_path(gs->path); + gs->driver = userdiff_find_by_path(istate, gs->path); if (!gs->driver) gs->driver = userdiff_find_by_name("default"); grep_attr_unlock(); } -static int grep_source_is_binary(struct grep_source *gs) +static int grep_source_is_binary(struct grep_source *gs, + struct index_state *istate) { - grep_source_load_driver(gs); + grep_source_load_driver(gs, istate); if (gs->driver->binary != -1) return gs->driver->binary; @@ -36,6 +36,8 @@ typedef int pcre2_jit_stack; #include "thread-utils.h" #include "userdiff.h" +struct repository; + enum grep_pat_token { GREP_PATTERN, GREP_PATTERN_HEAD, @@ -136,6 +138,7 @@ struct grep_opt { struct grep_pat *header_list; struct grep_pat **header_tail; struct grep_expr *pattern_expression; + struct repository *repo; const char *prefix; int prefix_length; regex_t regexp; @@ -183,9 +186,9 @@ struct grep_opt { void *output_priv; }; -extern void init_grep_defaults(void); +extern void init_grep_defaults(struct repository *); extern int grep_config(const char *var, const char *value, void *); -extern void grep_init(struct grep_opt *, const char *prefix); +extern void grep_init(struct grep_opt *, struct repository *repo, const char *prefix); void grep_commit_pattern_type(enum grep_pattern_type, struct grep_opt *opt); extern void append_grep_pat(struct grep_opt *opt, const char *pat, size_t patlen, const char *origin, int no, enum grep_pat_token t); @@ -217,7 +220,8 @@ void grep_source_init(struct grep_source *gs, enum grep_source_type type, const void *identifier); void grep_source_clear_data(struct grep_source *gs); void grep_source_clear(struct grep_source *gs); -void grep_source_load_driver(struct grep_source *gs); +void grep_source_load_driver(struct grep_source *gs, + struct index_state *istate); int grep_source(struct grep_opt *opt, struct grep_source *gs); @@ -225,7 +229,6 @@ int grep_source(struct grep_opt *opt, struct grep_source *gs); extern struct grep_opt *grep_opt_dup(const struct grep_opt *opt); extern int grep_threads_ok(const struct grep_opt *opt); -#ifndef NO_PTHREADS /* * Mutex used around access to the attributes machinery if * opt->use_threads. Must be initialized/destroyed by callers! @@ -246,9 +249,4 @@ static inline void grep_read_unlock(void) pthread_mutex_unlock(&grep_read_mutex); } -#else -#define grep_read_lock() -#define grep_read_unlock() -#endif - #endif @@ -98,7 +98,8 @@ static int cmd_name_cmp(const void *elem1, const void *elem2) return strcmp(e1->name, e2->name); } -static void print_cmd_by_category(const struct category_description *catdesc) +static void print_cmd_by_category(const struct category_description *catdesc, + int *longest_p) { struct cmdname_help *cmds; int longest = 0; @@ -124,6 +125,8 @@ static void print_cmd_by_category(const struct category_description *catdesc) print_command_list(cmds, mask, longest); } free(cmds); + if (longest_p) + *longest_p = longest; } void add_cmdname(struct cmdnames *cmds, const char *name, int len) @@ -307,7 +310,7 @@ void list_commands(unsigned int colopts, void list_common_cmds_help(void) { puts(_("These are common Git commands used in various situations:")); - print_cmd_by_category(common_categories); + print_cmd_by_category(common_categories, NULL); } void list_all_main_cmds(struct string_list *list) @@ -405,7 +408,7 @@ void list_common_guides_help(void) { CAT_guide, N_("The common Git guides are:") }, { 0, NULL } }; - print_cmd_by_category(catdesc); + print_cmd_by_category(catdesc, NULL); putchar('\n'); } @@ -494,9 +497,48 @@ void list_config_help(int for_human) string_list_clear(&keys, 0); } +static int get_alias(const char *var, const char *value, void *data) +{ + struct string_list *list = data; + + if (skip_prefix(var, "alias.", &var)) + string_list_append(list, var)->util = xstrdup(value); + + return 0; +} + void list_all_cmds_help(void) { - print_cmd_by_category(main_categories); + struct string_list others = STRING_LIST_INIT_DUP; + struct string_list alias_list = STRING_LIST_INIT_DUP; + struct cmdname_help *aliases; + int i, longest; + + printf_ln(_("See 'git help <command>' to read about a specific subcommand")); + print_cmd_by_category(main_categories, &longest); + + list_all_other_cmds(&others); + if (others.nr) + printf("\n%s\n", _("External commands")); + for (i = 0; i < others.nr; i++) + printf(" %s\n", others.items[i].string); + string_list_clear(&others, 0); + + git_config(get_alias, &alias_list); + string_list_sort(&alias_list); + if (alias_list.nr) { + printf("\n%s\n", _("Command aliases")); + ALLOC_ARRAY(aliases, alias_list.nr + 1); + for (i = 0; i < alias_list.nr; i++) { + aliases[i].name = alias_list.items[i].string; + aliases[i].help = alias_list.items[i].util; + aliases[i].category = 1; + } + aliases[alias_list.nr].name = NULL; + print_command_list(aliases, 1, longest); + free(aliases); + } + string_list_clear(&alias_list, 1); } int is_in_cmdlist(struct cmdnames *c, const char *s) diff --git a/http-backend.c b/http-backend.c index 458642ef72..9e894f197f 100644 --- a/http-backend.c +++ b/http-backend.c @@ -595,13 +595,13 @@ static void get_info_packs(struct strbuf *hdr, char *arg) size_t cnt = 0; select_getanyfile(hdr); - for (p = get_packed_git(the_repository); p; p = p->next) { + for (p = get_all_packs(the_repository); p; p = p->next) { if (p->pack_local) cnt++; } strbuf_grow(&buf, cnt * 53 + 2); - for (p = get_packed_git(the_repository); p; p = p->next) { + for (p = get_all_packs(the_repository); p; p = p->next) { if (p->pack_local) strbuf_addf(&buf, "P %s\n", p->pack_name + objdirlen + 6); } diff --git a/http-push.c b/http-push.c index 5eaf551b51..cd48590912 100644 --- a/http-push.c +++ b/http-push.c @@ -14,7 +14,7 @@ #include "argv-array.h" #include "packfile.h" #include "object-store.h" - +#include "commit-reach.h" #ifdef EXPAT_NEEDS_XMLPARSE_H #include <xmlparse.h> @@ -365,7 +365,7 @@ static void start_put(struct transfer_request *request) git_zstream stream; unpacked = read_object_file(&request->obj->oid, &type, &len); - hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(type), len) + 1; + hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %"PRIuMAX , type_name(type), (uintmax_t)len) + 1; /* Set it up */ git_deflate_init(&stream, zlib_compression_level); @@ -1859,7 +1859,7 @@ int cmd_main(int argc, const char **argv) continue; } - if (!oidcmp(&ref->old_oid, &ref->peer_ref->new_oid)) { + if (oideq(&ref->old_oid, &ref->peer_ref->new_oid)) { if (push_verbosely) fprintf(stderr, "'%s': up-to-date\n", ref->name); if (helper_status) @@ -1925,7 +1925,7 @@ int cmd_main(int argc, const char **argv) if (!push_all && !is_null_oid(&ref->old_oid)) argv_array_pushf(&commit_argv, "^%s", oid_to_hex(&ref->old_oid)); - init_revisions(&revs, setup_git_directory()); + repo_init_revisions(the_repository, &revs, setup_git_directory()); setup_revisions(commit_argv.argc, commit_argv.argv, &revs, NULL); revs.edge_hint = 0; /* just in case */ diff --git a/http-walker.c b/http-walker.c index 7cdfb2f24c..b3334bf657 100644 --- a/http-walker.c +++ b/http-walker.c @@ -483,7 +483,7 @@ static int fetch_object(struct walker *walker, unsigned char *sha1) list_for_each(pos, head) { obj_req = list_entry(pos, struct object_request, node); - if (!hashcmp(obj_req->oid.hash, sha1)) + if (hasheq(obj_req->oid.hash, sha1)) break; } if (obj_req == NULL) @@ -543,7 +543,7 @@ static int fetch_object(struct walker *walker, unsigned char *sha1) } else if (req->zret != Z_STREAM_END) { walker->corrupt_object_found++; ret = error("File %s (%s) corrupt", hex, req->url); - } else if (hashcmp(obj_req->oid.hash, req->real_sha1)) { + } else if (!hasheq(obj_req->oid.hash, req->real_sha1)) { ret = error("File %s has bad hash", hex); } else if (req->rename < 0) { struct strbuf buf = STRBUF_INIT; @@ -155,6 +155,16 @@ static struct active_request_slot *active_queue_head; static char *cached_accept_language; +static char *http_ssl_backend; + +static int http_schannel_check_revoke = 1; +/* + * With the backend being set to `schannel`, setting sslCAinfo would override + * the Certificate Store in cURL v7.60.0 and later, which is not what we want + * by default. + */ +static int http_schannel_use_ssl_cainfo; + size_t fread_buffer(char *ptr, size_t eltsize, size_t nmemb, void *buffer_) { size_t size = eltsize * nmemb; @@ -302,6 +312,22 @@ static int http_options(const char *var, const char *value, void *cb) curl_ssl_try = git_config_bool(var, value); return 0; } + if (!strcmp("http.sslbackend", var)) { + free(http_ssl_backend); + http_ssl_backend = xstrdup_or_null(value); + return 0; + } + + if (!strcmp("http.schannelcheckrevoke", var)) { + http_schannel_check_revoke = git_config_bool(var, value); + return 0; + } + + if (!strcmp("http.schannelusesslcainfo", var)) { + http_schannel_use_ssl_cainfo = git_config_bool(var, value); + return 0; + } + if (!strcmp("http.minsessions", var)) { min_curl_sessions = git_config_int(var, value); #ifndef USE_CURL_MULTI @@ -803,6 +829,15 @@ static CURL *get_curl_handle(void) } #endif + if (http_ssl_backend && !strcmp("schannel", http_ssl_backend) && + !http_schannel_check_revoke) { +#if LIBCURL_VERSION_NUM >= 0x072c00 + curl_easy_setopt(result, CURLOPT_SSL_OPTIONS, CURLSSLOPT_NO_REVOKE); +#else + warning(_("CURLSSLOPT_NO_REVOKE not suported with cURL < 7.44.0")); +#endif + } + if (http_proactive_auth) init_curl_http_auth(result); @@ -844,7 +879,13 @@ static CURL *get_curl_handle(void) if (ssl_pinnedkey != NULL) curl_easy_setopt(result, CURLOPT_PINNEDPUBLICKEY, ssl_pinnedkey); #endif - if (ssl_cainfo != NULL) + if (http_ssl_backend && !strcmp("schannel", http_ssl_backend) && + !http_schannel_use_ssl_cainfo) { + curl_easy_setopt(result, CURLOPT_CAINFO, NULL); +#if LIBCURL_VERSION_NUM >= 0x073400 + curl_easy_setopt(result, CURLOPT_PROXY_CAINFO, NULL); +#endif + } else if (ssl_cainfo != NULL) curl_easy_setopt(result, CURLOPT_CAINFO, ssl_cainfo); if (curl_low_speed_limit > 0 && curl_low_speed_time > 0) { @@ -866,8 +907,7 @@ static CURL *get_curl_handle(void) curl_easy_setopt(result, CURLOPT_PROTOCOLS, get_curl_allowed_protocols(-1)); #else - warning("protocol restrictions not applied to curl redirects because\n" - "your curl version is too old (>= 7.19.4)"); + warning(_("Protocol restrictions not supported with cURL < 7.19.4")); #endif if (getenv("GIT_CURL_VERBOSE")) curl_easy_setopt(result, CURLOPT_VERBOSE, 1L); @@ -995,6 +1035,33 @@ void http_init(struct remote *remote, const char *url, int proactive_auth) git_config(urlmatch_config_entry, &config); free(normalized_url); +#if LIBCURL_VERSION_NUM >= 0x073800 + if (http_ssl_backend) { + const curl_ssl_backend **backends; + struct strbuf buf = STRBUF_INIT; + int i; + + switch (curl_global_sslset(-1, http_ssl_backend, &backends)) { + case CURLSSLSET_UNKNOWN_BACKEND: + strbuf_addf(&buf, _("Unsupported SSL backend '%s'. " + "Supported SSL backends:"), + http_ssl_backend); + for (i = 0; backends[i]; i++) + strbuf_addf(&buf, "\n\t%s", backends[i]->name); + die("%s", buf.buf); + case CURLSSLSET_NO_BACKENDS: + die(_("Could not set SSL backend to '%s': " + "cURL was built without SSL backends"), + http_ssl_backend); + case CURLSSLSET_TOO_LATE: + die(_("Could not set SSL backend to '%s': already set"), + http_ssl_backend); + case CURLSSLSET_OK: + break; /* Okay! */ + } + } +#endif + if (curl_global_init(CURL_GLOBAL_ALL) != CURLE_OK) die("curl_global_init failed"); @@ -2394,7 +2461,7 @@ int finish_http_object_request(struct http_object_request *freq) unlink_or_warn(freq->tmpfile.buf); return -1; } - if (hashcmp(freq->sha1, freq->real_sha1)) { + if (!hasheq(freq->sha1, freq->real_sha1)) { unlink_or_warn(freq->tmpfile.buf); return -1; } @@ -168,6 +168,9 @@ const char *ident_default_email(void) strbuf_addstr(&git_default_email, email); committer_ident_explicitly_given |= IDENT_MAIL_GIVEN; author_ident_explicitly_given |= IDENT_MAIL_GIVEN; + } else if ((email = query_user_email()) && email[0]) { + strbuf_addstr(&git_default_email, email); + free((char *)email); } else copy_email(xgetpwuid_self(&default_email_is_bogus), &git_default_email, &default_email_is_bogus); diff --git a/interdiff.c b/interdiff.c new file mode 100644 index 0000000000..c81d680a6c --- /dev/null +++ b/interdiff.c @@ -0,0 +1,28 @@ +#include "cache.h" +#include "commit.h" +#include "revision.h" +#include "interdiff.h" + +static struct strbuf *idiff_prefix_cb(struct diff_options *opt, void *data) +{ + return data; +} + +void show_interdiff(struct rev_info *rev, int indent) +{ + struct diff_options opts; + struct strbuf prefix = STRBUF_INIT; + + memcpy(&opts, &rev->diffopt, sizeof(opts)); + opts.output_format = DIFF_FORMAT_PATCH; + opts.output_prefix = idiff_prefix_cb; + strbuf_addchars(&prefix, ' ', indent); + opts.output_prefix_data = &prefix; + diff_setup_done(&opts); + + diff_tree_oid(rev->idiff_oid1, rev->idiff_oid2, "", &opts); + diffcore_std(&opts); + diff_flush(&opts); + + strbuf_release(&prefix); +} diff --git a/interdiff.h b/interdiff.h new file mode 100644 index 0000000000..01c730a5c9 --- /dev/null +++ b/interdiff.h @@ -0,0 +1,8 @@ +#ifndef INTERDIFF_H +#define INTERDIFF_H + +struct rev_info; + +void show_interdiff(struct rev_info *, int indent); + +#endif diff --git a/json-writer.h b/json-writer.h index fc18acc7d9..83906b09c1 100644 --- a/json-writer.h +++ b/json-writer.h @@ -42,6 +42,8 @@ * of the given strings. */ +#include "strbuf.h" + struct json_writer { /* @@ -82,11 +82,16 @@ static const double __ac_HASH_UPPER = 0.77; SCOPE kh_##name##_t *kh_init_##name(void) { \ return (kh_##name##_t*)xcalloc(1, sizeof(kh_##name##_t)); \ } \ + SCOPE void kh_release_##name(kh_##name##_t *h) \ + { \ + free(h->flags); \ + free((void *)h->keys); \ + free((void *)h->vals); \ + } \ SCOPE void kh_destroy_##name(kh_##name##_t *h) \ { \ if (h) { \ - free((void *)h->keys); free(h->flags); \ - free((void *)h->vals); \ + kh_release_##name(h); \ free(h); \ } \ } \ @@ -229,7 +234,7 @@ static const double __ac_HASH_UPPER = 0.77; __KHASH_IMPL(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) #define KHASH_INIT(name, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \ - KHASH_INIT2(name, static inline, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) + KHASH_INIT2(name, MAYBE_UNUSED static inline, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) /* Other convenient macros... */ diff --git a/line-log.c b/line-log.c index 72a5fed661..d1d429d738 100644 --- a/line-log.c +++ b/line-log.c @@ -508,7 +508,9 @@ static void fill_blob_sha1(struct commit *commit, struct diff_filespec *spec) return; } -static void fill_line_ends(struct diff_filespec *spec, long *lines, +static void fill_line_ends(struct repository *r, + struct diff_filespec *spec, + long *lines, unsigned long **line_ends) { int num = 0, size = 50; @@ -516,7 +518,7 @@ static void fill_line_ends(struct diff_filespec *spec, long *lines, unsigned long *ends = NULL; char *data = NULL; - if (diff_populate_filespec(spec, 0)) + if (diff_populate_filespec(r, spec, 0)) die("Cannot read blob %s", oid_to_hex(&spec->oid)); ALLOC_ARRAY(ends, size); @@ -555,7 +557,8 @@ static const char *nth_line(void *data, long line) } static struct line_log_data * -parse_lines(struct commit *commit, const char *prefix, struct string_list *args) +parse_lines(struct repository *r, struct commit *commit, + const char *prefix, struct string_list *args) { long lines = 0; unsigned long *ends = NULL; @@ -571,7 +574,7 @@ parse_lines(struct commit *commit, const char *prefix, struct string_list *args) long begin = 0, end = 0; long anchor; - name_part = skip_range_arg(item->string); + name_part = skip_range_arg(item->string, r->index); if (!name_part || *name_part != ':' || !name_part[1]) die("-L argument not 'start,end:file' or ':funcname:file': %s", item->string); @@ -583,7 +586,7 @@ parse_lines(struct commit *commit, const char *prefix, struct string_list *args) spec = alloc_filespec(full_name); fill_blob_sha1(commit, spec); - fill_line_ends(spec, &lines, &ends); + fill_line_ends(r, spec, &lines, &ends); cb_data.spec = spec; cb_data.lines = lines; cb_data.line_ends = ends; @@ -596,7 +599,7 @@ parse_lines(struct commit *commit, const char *prefix, struct string_list *args) if (parse_range_arg(range_part, nth_line, &cb_data, lines, anchor, &begin, &end, - full_name)) + full_name, r->index)) die("malformed -L argument '%s'", range_part); if ((!lines && (begin || end)) || lines < begin) die("file %s has only %lu lines", name_part, lines); @@ -739,7 +742,7 @@ void line_log_init(struct rev_info *rev, const char *prefix, struct string_list struct line_log_data *range; commit = check_single_commit(rev); - range = parse_lines(commit, prefix, args); + range = parse_lines(rev->diffopt.repo, commit, prefix, args); add_line_range(rev, commit, range); if (!rev->diffopt.detect_rename) { @@ -891,8 +894,8 @@ static void dump_diff_hacky_one(struct rev_info *rev, struct line_log_data *rang return; if (pair->one->oid_valid) - fill_line_ends(pair->one, &p_lines, &p_ends); - fill_line_ends(pair->two, &t_lines, &t_ends); + fill_line_ends(rev->diffopt.repo, pair->one, &p_lines, &p_ends); + fill_line_ends(rev->diffopt.repo, pair->two, &t_lines, &t_ends); fprintf(opt->file, "%s%sdiff --git a/%s b/%s%s\n", prefix, c_meta, pair->one->path, pair->two->path, c_reset); fprintf(opt->file, "%s%s--- %s%s%s\n", prefix, c_meta, @@ -1008,12 +1011,12 @@ static int process_diff_filepair(struct rev_info *rev, return 0; assert(pair->two->oid_valid); - diff_populate_filespec(pair->two, 0); + diff_populate_filespec(rev->diffopt.repo, pair->two, 0); file_target.ptr = pair->two->data; file_target.size = pair->two->size; if (pair->one->oid_valid) { - diff_populate_filespec(pair->one, 0); + diff_populate_filespec(rev->diffopt.repo, pair->one, 0); file_parent.ptr = pair->one->data; file_parent.size = pair->one->size; } else { diff --git a/line-range.c b/line-range.c index 232c3909ec..9b50583dc0 100644 --- a/line-range.c +++ b/line-range.c @@ -163,9 +163,10 @@ static const char *find_funcname_matching_regexp(xdemitconf_t *xecfg, const char } } -static const char *parse_range_funcname(const char *arg, nth_line_fn_t nth_line_cb, - void *cb_data, long lines, long anchor, long *begin, long *end, - const char *path) +static const char *parse_range_funcname( + const char *arg, nth_line_fn_t nth_line_cb, + void *cb_data, long lines, long anchor, long *begin, long *end, + const char *path, struct index_state *istate) { char *pattern; const char *term; @@ -198,7 +199,7 @@ static const char *parse_range_funcname(const char *arg, nth_line_fn_t nth_line_ anchor--; /* input is in human terms */ start = nth_line_cb(cb_data, anchor); - drv = userdiff_find_by_path(path); + drv = userdiff_find_by_path(istate, path); if (drv && drv->funcname.pattern) { const struct userdiff_funcname *pe = &drv->funcname; xecfg = xcalloc(1, sizeof(*xecfg)); @@ -244,7 +245,8 @@ static const char *parse_range_funcname(const char *arg, nth_line_fn_t nth_line_ int parse_range_arg(const char *arg, nth_line_fn_t nth_line_cb, void *cb_data, long lines, long anchor, - long *begin, long *end, const char *path) + long *begin, long *end, + const char *path, struct index_state *istate) { *begin = *end = 0; @@ -254,7 +256,9 @@ int parse_range_arg(const char *arg, nth_line_fn_t nth_line_cb, anchor = lines + 1; if (*arg == ':' || (*arg == '^' && *(arg + 1) == ':')) { - arg = parse_range_funcname(arg, nth_line_cb, cb_data, lines, anchor, begin, end, path); + arg = parse_range_funcname(arg, nth_line_cb, cb_data, + lines, anchor, begin, end, + path, istate); if (!arg || *arg) return -1; return 0; @@ -275,10 +279,12 @@ int parse_range_arg(const char *arg, nth_line_fn_t nth_line_cb, return 0; } -const char *skip_range_arg(const char *arg) +const char *skip_range_arg(const char *arg, struct index_state *istate) { if (*arg == ':' || (*arg == '^' && *(arg + 1) == ':')) - return parse_range_funcname(arg, NULL, NULL, 0, 0, NULL, NULL, NULL); + return parse_range_funcname(arg, NULL, NULL, + 0, 0, NULL, NULL, + NULL, istate); arg = parse_loc(arg, NULL, NULL, 0, -1, NULL); diff --git a/line-range.h b/line-range.h index d3c54e45aa..e69bf7c017 100644 --- a/line-range.h +++ b/line-range.h @@ -1,6 +1,8 @@ #ifndef LINE_RANGE_H #define LINE_RANGE_H +struct index_state; + /* * Parse one item in an -L begin,end option w.r.t. the notional file * object 'cb_data' consisting of 'lines' lines. @@ -23,7 +25,7 @@ int parse_range_arg(const char *arg, nth_line_fn_t nth_line_cb, void *cb_data, long lines, long anchor, long *begin, long *end, - const char *path); + const char *path, struct index_state *istate); /* * Scan past a range argument that could be parsed by @@ -34,6 +36,6 @@ int parse_range_arg(const char *arg, * NULL in case the argument is obviously malformed. */ -const char *skip_range_arg(const char *arg); +const char *skip_range_arg(const char *arg, struct index_state *istate); #endif /* LINE_RANGE_H */ diff --git a/list-objects-filter-options.c b/list-objects-filter-options.c index c0e2bd6a06..e8da2e8581 100644 --- a/list-objects-filter-options.c +++ b/list-objects-filter-options.c @@ -30,7 +30,6 @@ static int gently_parse_list_objects_filter( if (filter_options->choice) { if (errbuf) { - strbuf_init(errbuf, 0); strbuf_addstr( errbuf, _("multiple filter-specs cannot be combined")); @@ -50,6 +49,19 @@ static int gently_parse_list_objects_filter( return 0; } + } else if (skip_prefix(arg, "tree:", &v0)) { + unsigned long depth; + if (!git_parse_ulong(v0, &depth) || depth != 0) { + if (errbuf) { + strbuf_addstr( + errbuf, + _("only 'tree:0' is supported")); + } + return 1; + } + filter_options->choice = LOFC_TREE_NONE; + return 0; + } else if (skip_prefix(arg, "sparse:oid=", &v0)) { struct object_context oc; struct object_id sparse_oid; @@ -71,10 +83,9 @@ static int gently_parse_list_objects_filter( return 0; } - if (errbuf) { - strbuf_init(errbuf, 0); + if (errbuf) strbuf_addf(errbuf, "invalid filter-spec '%s'", arg); - } + memset(filter_options, 0, sizeof(*filter_options)); return 1; } diff --git a/list-objects-filter-options.h b/list-objects-filter-options.h index 0000a61f82..af64e5c66f 100644 --- a/list-objects-filter-options.h +++ b/list-objects-filter-options.h @@ -10,6 +10,7 @@ enum list_objects_filter_choice { LOFC_DISABLED = 0, LOFC_BLOB_NONE, LOFC_BLOB_LIMIT, + LOFC_TREE_NONE, LOFC_SPARSE_OID, LOFC_SPARSE_PATH, LOFC__COUNT /* must be last */ diff --git a/list-objects-filter.c b/list-objects-filter.c index a0ba78b20c..765f3df3b0 100644 --- a/list-objects-filter.c +++ b/list-objects-filter.c @@ -44,8 +44,7 @@ static enum list_objects_filter_result filter_blobs_none( switch (filter_situation) { default: - die("unknown filter_situation"); - return LOFR_ZERO; + BUG("unknown filter_situation: %d", filter_situation); case LOFS_BEGIN_TREE: assert(obj->type == OBJ_TREE); @@ -81,6 +80,61 @@ static void *filter_blobs_none__init( } /* + * A filter for list-objects to omit ALL trees and blobs from the traversal. + * Can OPTIONALLY collect a list of the omitted OIDs. + */ +struct filter_trees_none_data { + struct oidset *omits; +}; + +static enum list_objects_filter_result filter_trees_none( + enum list_objects_filter_situation filter_situation, + struct object *obj, + const char *pathname, + const char *filename, + void *filter_data_) +{ + struct filter_trees_none_data *filter_data = filter_data_; + + switch (filter_situation) { + default: + BUG("unknown filter_situation: %d", filter_situation); + + case LOFS_BEGIN_TREE: + case LOFS_BLOB: + if (filter_data->omits) { + oidset_insert(filter_data->omits, &obj->oid); + /* _MARK_SEEN but not _DO_SHOW (hard omit) */ + return LOFR_MARK_SEEN; + } else { + /* + * Not collecting omits so no need to to traverse tree. + */ + return LOFR_SKIP_TREE | LOFR_MARK_SEEN; + } + + case LOFS_END_TREE: + assert(obj->type == OBJ_TREE); + return LOFR_ZERO; + + } +} + +static void* filter_trees_none__init( + struct oidset *omitted, + struct list_objects_filter_options *filter_options, + filter_object_fn *filter_fn, + filter_free_fn *filter_free_fn) +{ + struct filter_trees_none_data *d = xcalloc(1, sizeof(*d)); + d->omits = omitted; + + *filter_fn = filter_trees_none; + *filter_free_fn = free; + return d; +} + +/* * A filter for list-objects to omit large blobs. * And to OPTIONALLY collect a list of the omitted OIDs. */ @@ -102,8 +156,7 @@ static enum list_objects_filter_result filter_blobs_limit( switch (filter_situation) { default: - die("unknown filter_situation"); - return LOFR_ZERO; + BUG("unknown filter_situation: %d", filter_situation); case LOFS_BEGIN_TREE: assert(obj->type == OBJ_TREE); @@ -208,8 +261,7 @@ static enum list_objects_filter_result filter_sparse( switch (filter_situation) { default: - die("unknown filter_situation"); - return LOFR_ZERO; + BUG("unknown filter_situation: %d", filter_situation); case LOFS_BEGIN_TREE: assert(obj->type == OBJ_TREE); @@ -374,6 +426,7 @@ static filter_init_fn s_filters[] = { NULL, filter_blobs_none__init, filter_blobs_limit__init, + filter_trees_none__init, filter_sparse_oid__init, filter_sparse_path__init, }; @@ -389,7 +442,7 @@ void *list_objects_filter__init( assert((sizeof(s_filters) / sizeof(s_filters[0])) == LOFC__COUNT); if (filter_options->choice >= LOFC__COUNT) - die("invalid list-objects filter choice: %d", + BUG("invalid list-objects filter choice: %d", filter_options->choice); init_fn = s_filters[filter_options->choice]; diff --git a/list-objects-filter.h b/list-objects-filter.h index a6f6b4990b..52b4a84da9 100644 --- a/list-objects-filter.h +++ b/list-objects-filter.h @@ -24,6 +24,11 @@ struct oidset; * In general, objects should only be shown once, but * this result DOES NOT imply that we mark it SEEN. * + * _SKIP_TREE : Used in LOFS_BEGIN_TREE situation - indicates that + * the tree's children should not be iterated over. This + * is used as an optimization when all children will + * definitely be ignored. + * * Most of the time, you want the combination (_MARK_SEEN | _DO_SHOW) * but they can be used independently, such as when sparse-checkout * pattern matching is being applied. @@ -45,6 +50,7 @@ enum list_objects_filter_result { LOFR_ZERO = 0, LOFR_MARK_SEEN = 1<<0, LOFR_DO_SHOW = 1<<1, + LOFR_SKIP_TREE = 1<<2, }; enum list_objects_filter_situation { diff --git a/list-objects.c b/list-objects.c index c99c47ac18..c41cc80db5 100644 --- a/list-objects.c +++ b/list-objects.c @@ -11,21 +11,27 @@ #include "list-objects-filter-options.h" #include "packfile.h" #include "object-store.h" +#include "trace.h" -static void process_blob(struct rev_info *revs, +struct traversal_context { + struct rev_info *revs; + show_object_fn show_object; + show_commit_fn show_commit; + void *show_data; + filter_object_fn filter_fn; + void *filter_data; +}; + +static void process_blob(struct traversal_context *ctx, struct blob *blob, - show_object_fn show, struct strbuf *path, - const char *name, - void *cb_data, - filter_object_fn filter_fn, - void *filter_data) + const char *name) { struct object *obj = &blob->object; size_t pathlen; enum list_objects_filter_result r = LOFR_MARK_SEEN | LOFR_DO_SHOW; - if (!revs->blob_objects) + if (!ctx->revs->blob_objects) return; if (!obj) die("bad blob object"); @@ -41,21 +47,21 @@ static void process_blob(struct rev_info *revs, * may cause the actual filter to report an incomplete list * of missing objects. */ - if (revs->exclude_promisor_objects && + if (ctx->revs->exclude_promisor_objects && !has_object_file(&obj->oid) && is_promisor_object(&obj->oid)) return; pathlen = path->len; strbuf_addstr(path, name); - if (!(obj->flags & USER_GIVEN) && filter_fn) - r = filter_fn(LOFS_BLOB, obj, - path->buf, &path->buf[pathlen], - filter_data); + if ((obj->flags & NOT_USER_GIVEN) && ctx->filter_fn) + r = ctx->filter_fn(LOFS_BLOB, obj, + path->buf, &path->buf[pathlen], + ctx->filter_data); if (r & LOFR_MARK_SEEN) obj->flags |= SEEN; if (r & LOFR_DO_SHOW) - show(obj, path->buf, cb_data); + ctx->show_object(obj, path->buf, ctx->show_data); strbuf_setlen(path, pathlen); } @@ -81,34 +87,66 @@ static void process_blob(struct rev_info *revs, * the link, and how to do it. Whether it necessarily makes * any sense what-so-ever to ever do that is another issue. */ -static void process_gitlink(struct rev_info *revs, +static void process_gitlink(struct traversal_context *ctx, const unsigned char *sha1, - show_object_fn show, struct strbuf *path, - const char *name, - void *cb_data) + const char *name) { /* Nothing to do */ } -static void process_tree(struct rev_info *revs, +static void process_tree(struct traversal_context *ctx, struct tree *tree, - show_object_fn show, struct strbuf *base, - const char *name, - void *cb_data, - filter_object_fn filter_fn, - void *filter_data) + const char *name); + +static void process_tree_contents(struct traversal_context *ctx, + struct tree *tree, + struct strbuf *base) { - struct object *obj = &tree->object; struct tree_desc desc; struct name_entry entry; - enum interesting match = revs->diffopt.pathspec.nr == 0 ? - all_entries_interesting: entry_not_interesting; + enum interesting match = ctx->revs->diffopt.pathspec.nr == 0 ? + all_entries_interesting : entry_not_interesting; + + init_tree_desc(&desc, tree->buffer, tree->size); + + while (tree_entry(&desc, &entry)) { + if (match != all_entries_interesting) { + match = tree_entry_interesting(&entry, base, 0, + &ctx->revs->diffopt.pathspec); + if (match == all_entries_not_interesting) + break; + if (match == entry_not_interesting) + continue; + } + + if (S_ISDIR(entry.mode)) { + struct tree *t = lookup_tree(the_repository, entry.oid); + t->object.flags |= NOT_USER_GIVEN; + process_tree(ctx, t, base, entry.path); + } + else if (S_ISGITLINK(entry.mode)) + process_gitlink(ctx, entry.oid->hash, + base, entry.path); + else { + struct blob *b = lookup_blob(the_repository, entry.oid); + b->object.flags |= NOT_USER_GIVEN; + process_blob(ctx, b, base, entry.path); + } + } +} + +static void process_tree(struct traversal_context *ctx, + struct tree *tree, + struct strbuf *base, + const char *name) +{ + struct object *obj = &tree->object; + struct rev_info *revs = ctx->revs; int baselen = base->len; enum list_objects_filter_result r = LOFR_MARK_SEEN | LOFR_DO_SHOW; - int gently = revs->ignore_missing_links || - revs->exclude_promisor_objects; + int failed_parse; if (!revs->tree_objects) return; @@ -116,7 +154,9 @@ static void process_tree(struct rev_info *revs, die("bad tree object"); if (obj->flags & (UNINTERESTING | SEEN)) return; - if (parse_tree_gently(tree, gently) < 0) { + + failed_parse = parse_tree_gently(tree, 1); + if (failed_parse) { if (revs->ignore_missing_links) return; @@ -129,57 +169,35 @@ static void process_tree(struct rev_info *revs, is_promisor_object(&obj->oid)) return; - die("bad tree object %s", oid_to_hex(&obj->oid)); + if (!revs->do_not_die_on_missing_tree) + die("bad tree object %s", oid_to_hex(&obj->oid)); } strbuf_addstr(base, name); - if (!(obj->flags & USER_GIVEN) && filter_fn) - r = filter_fn(LOFS_BEGIN_TREE, obj, - base->buf, &base->buf[baselen], - filter_data); + if ((obj->flags & NOT_USER_GIVEN) && ctx->filter_fn) + r = ctx->filter_fn(LOFS_BEGIN_TREE, obj, + base->buf, &base->buf[baselen], + ctx->filter_data); if (r & LOFR_MARK_SEEN) obj->flags |= SEEN; if (r & LOFR_DO_SHOW) - show(obj, base->buf, cb_data); + ctx->show_object(obj, base->buf, ctx->show_data); if (base->len) strbuf_addch(base, '/'); - init_tree_desc(&desc, tree->buffer, tree->size); + if (r & LOFR_SKIP_TREE) + trace_printf("Skipping contents of tree %s...\n", base->buf); + else if (!failed_parse) + process_tree_contents(ctx, tree, base); - while (tree_entry(&desc, &entry)) { - if (match != all_entries_interesting) { - match = tree_entry_interesting(&entry, base, 0, - &revs->diffopt.pathspec); - if (match == all_entries_not_interesting) - break; - if (match == entry_not_interesting) - continue; - } - - if (S_ISDIR(entry.mode)) - process_tree(revs, - lookup_tree(the_repository, entry.oid), - show, base, entry.path, - cb_data, filter_fn, filter_data); - else if (S_ISGITLINK(entry.mode)) - process_gitlink(revs, entry.oid->hash, - show, base, entry.path, - cb_data); - else - process_blob(revs, - lookup_blob(the_repository, entry.oid), - show, base, entry.path, - cb_data, filter_fn, filter_data); - } - - if (!(obj->flags & USER_GIVEN) && filter_fn) { - r = filter_fn(LOFS_END_TREE, obj, - base->buf, &base->buf[baselen], - filter_data); + if ((obj->flags & NOT_USER_GIVEN) && ctx->filter_fn) { + r = ctx->filter_fn(LOFS_END_TREE, obj, + base->buf, &base->buf[baselen], + ctx->filter_data); if (r & LOFR_MARK_SEEN) obj->flags |= SEEN; if (r & LOFR_DO_SHOW) - show(obj, base->buf, cb_data); + ctx->show_object(obj, base->buf, ctx->show_data); } strbuf_setlen(base, baselen); @@ -196,7 +214,7 @@ static void mark_edge_parents_uninteresting(struct commit *commit, struct commit *parent = parents->item; if (!(parent->object.flags & UNINTERESTING)) continue; - mark_tree_uninteresting(get_commit_tree(parent)); + mark_tree_uninteresting(revs->repo, get_commit_tree(parent)); if (revs->edge_hint && !(parent->object.flags & SHOWN)) { parent->object.flags |= SHOWN; show_edge(parent); @@ -213,7 +231,8 @@ void mark_edges_uninteresting(struct rev_info *revs, show_edge_fn show_edge) struct commit *commit = list->item; if (commit->object.flags & UNINTERESTING) { - mark_tree_uninteresting(get_commit_tree(commit)); + mark_tree_uninteresting(revs->repo, + get_commit_tree(commit)); if (revs->edge_hint_aggressive && !(commit->object.flags & SHOWN)) { commit->object.flags |= SHOWN; show_edge(commit); @@ -228,7 +247,8 @@ void mark_edges_uninteresting(struct rev_info *revs, show_edge_fn show_edge) struct commit *commit = (struct commit *)obj; if (obj->type != OBJ_COMMIT || !(obj->flags & UNINTERESTING)) continue; - mark_tree_uninteresting(get_commit_tree(commit)); + mark_tree_uninteresting(revs->repo, + get_commit_tree(commit)); if (!(obj->flags & SHOWN)) { obj->flags |= SHOWN; show_edge(commit); @@ -242,19 +262,15 @@ static void add_pending_tree(struct rev_info *revs, struct tree *tree) add_pending_object(revs, &tree->object, ""); } -static void traverse_trees_and_blobs(struct rev_info *revs, - struct strbuf *base, - show_object_fn show_object, - void *show_data, - filter_object_fn filter_fn, - void *filter_data) +static void traverse_trees_and_blobs(struct traversal_context *ctx, + struct strbuf *base) { int i; assert(base->len == 0); - for (i = 0; i < revs->pending.nr; i++) { - struct object_array_entry *pending = revs->pending.objects + i; + for (i = 0; i < ctx->revs->pending.nr; i++) { + struct object_array_entry *pending = ctx->revs->pending.objects + i; struct object *obj = pending->item; const char *name = pending->name; const char *path = pending->path; @@ -262,62 +278,52 @@ static void traverse_trees_and_blobs(struct rev_info *revs, continue; if (obj->type == OBJ_TAG) { obj->flags |= SEEN; - show_object(obj, name, show_data); + ctx->show_object(obj, name, ctx->show_data); continue; } if (!path) path = ""; if (obj->type == OBJ_TREE) { - process_tree(revs, (struct tree *)obj, show_object, - base, path, show_data, - filter_fn, filter_data); + process_tree(ctx, (struct tree *)obj, base, path); continue; } if (obj->type == OBJ_BLOB) { - process_blob(revs, (struct blob *)obj, show_object, - base, path, show_data, - filter_fn, filter_data); + process_blob(ctx, (struct blob *)obj, base, path); continue; } die("unknown pending object %s (%s)", oid_to_hex(&obj->oid), name); } - object_array_clear(&revs->pending); + object_array_clear(&ctx->revs->pending); } -static void do_traverse(struct rev_info *revs, - show_commit_fn show_commit, - show_object_fn show_object, - void *show_data, - filter_object_fn filter_fn, - void *filter_data) +static void do_traverse(struct traversal_context *ctx) { struct commit *commit; struct strbuf csp; /* callee's scratch pad */ strbuf_init(&csp, PATH_MAX); - while ((commit = get_revision(revs)) != NULL) { + while ((commit = get_revision(ctx->revs)) != NULL) { /* * an uninteresting boundary commit may not have its tree * parsed yet, but we are not going to show them anyway */ - if (get_commit_tree(commit)) - add_pending_tree(revs, get_commit_tree(commit)); - show_commit(commit, show_data); + if (get_commit_tree(commit)) { + struct tree *tree = get_commit_tree(commit); + tree->object.flags |= NOT_USER_GIVEN; + add_pending_tree(ctx->revs, tree); + } + ctx->show_commit(commit, ctx->show_data); - if (revs->tree_blobs_in_commit_order) + if (ctx->revs->tree_blobs_in_commit_order) /* * NEEDSWORK: Adding the tree and then flushing it here * needs a reallocation for each commit. Can we pass the * tree directory without allocation churn? */ - traverse_trees_and_blobs(revs, &csp, - show_object, show_data, - filter_fn, filter_data); + traverse_trees_and_blobs(ctx, &csp); } - traverse_trees_and_blobs(revs, &csp, - show_object, show_data, - filter_fn, filter_data); + traverse_trees_and_blobs(ctx, &csp); strbuf_release(&csp); } @@ -326,7 +332,14 @@ void traverse_commit_list(struct rev_info *revs, show_object_fn show_object, void *show_data) { - do_traverse(revs, show_commit, show_object, show_data, NULL, NULL); + struct traversal_context ctx; + ctx.revs = revs; + ctx.show_commit = show_commit; + ctx.show_object = show_object; + ctx.show_data = show_data; + ctx.filter_fn = NULL; + ctx.filter_data = NULL; + do_traverse(&ctx); } void traverse_commit_list_filtered( @@ -337,14 +350,18 @@ void traverse_commit_list_filtered( void *show_data, struct oidset *omitted) { - filter_object_fn filter_fn = NULL; + struct traversal_context ctx; filter_free_fn filter_free_fn = NULL; - void *filter_data = NULL; - - filter_data = list_objects_filter__init(omitted, filter_options, - &filter_fn, &filter_free_fn); - do_traverse(revs, show_commit, show_object, show_data, - filter_fn, filter_data); - if (filter_data && filter_free_fn) - filter_free_fn(filter_data); + + ctx.revs = revs; + ctx.show_object = show_object; + ctx.show_commit = show_commit; + ctx.show_data = show_data; + ctx.filter_fn = NULL; + + ctx.filter_data = list_objects_filter__init(omitted, filter_options, + &ctx.filter_fn, &filter_free_fn); + do_traverse(&ctx); + if (ctx.filter_data && filter_free_fn) + filter_free_fn(ctx.filter_data); } diff --git a/ll-merge.c b/ll-merge.c index 1936fee9e1..3c8fb917e9 100644 --- a/ll-merge.c +++ b/ll-merge.c @@ -336,10 +336,10 @@ static const struct ll_merge_driver *find_ll_merge_driver(const char *merge_attr return &ll_merge_drv[LL_TEXT_MERGE]; } -static void normalize_file(mmfile_t *mm, const char *path) +static void normalize_file(mmfile_t *mm, const char *path, struct index_state *istate) { struct strbuf strbuf = STRBUF_INIT; - if (renormalize_buffer(&the_index, path, mm->ptr, mm->size, &strbuf)) { + if (renormalize_buffer(istate, path, mm->ptr, mm->size, &strbuf)) { free(mm->ptr); mm->size = strbuf.len; mm->ptr = strbuf_detach(&strbuf, NULL); @@ -351,6 +351,7 @@ int ll_merge(mmbuffer_t *result_buf, mmfile_t *ancestor, const char *ancestor_label, mmfile_t *ours, const char *our_label, mmfile_t *theirs, const char *their_label, + struct index_state *istate, const struct ll_merge_options *opts) { static struct attr_check *check; @@ -363,15 +364,15 @@ int ll_merge(mmbuffer_t *result_buf, opts = &default_opts; if (opts->renormalize) { - normalize_file(ancestor, path); - normalize_file(ours, path); - normalize_file(theirs, path); + normalize_file(ancestor, path, istate); + normalize_file(ours, path, istate); + normalize_file(theirs, path, istate); } if (!check) check = attr_check_initl("merge", "conflict-marker-size", NULL); - git_check_attr(&the_index, path, check); + git_check_attr(istate, path, check); ll_driver_name = check->items[0].value; if (check->items[1].value) { marker_size = atoi(check->items[1].value); @@ -390,14 +391,14 @@ int ll_merge(mmbuffer_t *result_buf, opts, marker_size); } -int ll_merge_marker_size(const char *path) +int ll_merge_marker_size(struct index_state *istate, const char *path) { static struct attr_check *check; int marker_size = DEFAULT_CONFLICT_MARKER_SIZE; if (!check) check = attr_check_initl("conflict-marker-size", NULL); - git_check_attr(&the_index, path, check); + git_check_attr(istate, path, check); if (check->items[0].value) { marker_size = atoi(check->items[0].value); if (marker_size <= 0) diff --git a/ll-merge.h b/ll-merge.h index b72b19921e..6c6e22e40d 100644 --- a/ll-merge.h +++ b/ll-merge.h @@ -7,6 +7,8 @@ #include "xdiff/xdiff.h" +struct index_state; + struct ll_merge_options { unsigned virtual_ancestor : 1; unsigned variant : 2; /* favor ours, favor theirs, or union merge */ @@ -19,8 +21,9 @@ int ll_merge(mmbuffer_t *result_buf, mmfile_t *ancestor, const char *ancestor_label, mmfile_t *ours, const char *our_label, mmfile_t *theirs, const char *their_label, + struct index_state *istate, const struct ll_merge_options *opts); -int ll_merge_marker_size(const char *path); +int ll_merge_marker_size(struct index_state *istate, const char *path); #endif diff --git a/log-tree.c b/log-tree.c index 7443e5fcc7..7a83e99250 100644 --- a/log-tree.c +++ b/log-tree.c @@ -15,6 +15,8 @@ #include "sequencer.h" #include "line-log.h" #include "help.h" +#include "interdiff.h" +#include "range-diff.h" static struct decoration name_decoration = { "object names" }; static int decoration_loaded; @@ -472,7 +474,7 @@ static int which_parent(const struct object_id *oid, const struct commit *commit const struct commit_list *parent; for (nth = 0, parent = commit->parents; parent; parent = parent->next) { - if (!oidcmp(&parent->item->object.oid, oid)) + if (oideq(&parent->item->object.oid, oid)) return nth; nth++; } @@ -506,8 +508,8 @@ static int show_one_mergetag(struct commit *commit, if (parse_tag_buffer(the_repository, tag, extra->value, extra->len)) strbuf_addstr(&verify_message, "malformed mergetag\n"); else if (is_common_merge(commit) && - !oidcmp(&tag->tagged->oid, - &commit->parents->next->item->object.oid)) + oideq(&tag->tagged->oid, + &commit->parents->next->item->object.oid)) strbuf_addf(&verify_message, "merged tag '%s'\n", tag->tag); else if ((nth = which_parent(&tag->tagged->oid, commit)) < 0) @@ -542,6 +544,16 @@ static int show_mergetag(struct rev_info *opt, struct commit *commit) return for_each_mergetag(show_one_mergetag, commit, opt); } +static void next_commentary_block(struct rev_info *opt, struct strbuf *sb) +{ + const char *x = opt->shown_dashes ? "\n" : "---\n"; + if (sb) + strbuf_addstr(sb, x); + else + fputs(x, opt->diffopt.file); + opt->shown_dashes = 1; +} + void show_log(struct rev_info *opt) { struct strbuf msgbuf = STRBUF_INIT; @@ -699,10 +711,8 @@ void show_log(struct rev_info *opt) if ((ctx.fmt != CMIT_FMT_USERFORMAT) && ctx.notes_message && *ctx.notes_message) { - if (cmit_fmt_is_mail(ctx.fmt)) { - strbuf_addstr(&msgbuf, "---\n"); - opt->shown_dashes = 1; - } + if (cmit_fmt_is_mail(ctx.fmt)) + next_commentary_block(opt, &msgbuf); strbuf_addstr(&msgbuf, ctx.notes_message); } @@ -729,6 +739,33 @@ void show_log(struct rev_info *opt) strbuf_release(&msgbuf); free(ctx.notes_message); + + if (cmit_fmt_is_mail(ctx.fmt) && opt->idiff_oid1) { + struct diff_queue_struct dq; + + memcpy(&dq, &diff_queued_diff, sizeof(diff_queued_diff)); + DIFF_QUEUE_CLEAR(&diff_queued_diff); + + next_commentary_block(opt, NULL); + fprintf_ln(opt->diffopt.file, "%s", opt->idiff_title); + show_interdiff(opt, 2); + + memcpy(&diff_queued_diff, &dq, sizeof(diff_queued_diff)); + } + + if (cmit_fmt_is_mail(ctx.fmt) && opt->rdiff1) { + struct diff_queue_struct dq; + + memcpy(&dq, &diff_queued_diff, sizeof(diff_queued_diff)); + DIFF_QUEUE_CLEAR(&diff_queued_diff); + + next_commentary_block(opt, NULL); + fprintf_ln(opt->diffopt.file, "%s", opt->rdiff_title); + show_range_diff(opt->rdiff1, opt->rdiff2, + opt->creation_factor, 1, &opt->diffopt); + + memcpy(&diff_queued_diff, &dq, sizeof(diff_queued_diff)); + } } int log_tree_diff_flush(struct rev_info *opt) @@ -766,9 +803,10 @@ int log_tree_diff_flush(struct rev_info *opt) /* * We may have shown three-dashes line early - * between notes and the log message, in which - * case we only want a blank line after the - * notes without (an extra) three-dashes line. + * between generated commentary (notes, etc.) + * and the log message, in which case we only + * want a blank line after the commentary + * without (an extra) three-dashes line. * Otherwise, we show the three-dashes line if * we are showing the patch with diffstat, but * in that case, there is no extra blank line diff --git a/mailinfo.c b/mailinfo.c index 3281a37d51..b395adbdf2 100644 --- a/mailinfo.c +++ b/mailinfo.c @@ -237,11 +237,22 @@ static int slurp_attr(const char *line, const char *name, struct strbuf *attr) return 1; } +static int has_attr_value(const char *line, const char *name, const char *value) +{ + struct strbuf sb = STRBUF_INIT; + int rc = slurp_attr(line, name, &sb) && !strcasecmp(sb.buf, value); + strbuf_release(&sb); + return rc; +} + static void handle_content_type(struct mailinfo *mi, struct strbuf *line) { struct strbuf *boundary = xmalloc(sizeof(struct strbuf)); strbuf_init(boundary, line->len); + mi->format_flowed = has_attr_value(line->buf, "format=", "flowed"); + mi->delsp = has_attr_value(line->buf, "delsp=", "yes"); + if (slurp_attr(line->buf, "boundary=", boundary)) { strbuf_insert(boundary, 0, "--", 2); if (++mi->content_top >= &mi->content[MAX_BOUNDARIES]) { @@ -964,6 +975,52 @@ again: return 1; } +static void handle_filter_flowed(struct mailinfo *mi, struct strbuf *line, + struct strbuf *prev) +{ + size_t len = line->len; + const char *rest; + + if (!mi->format_flowed) { + handle_filter(mi, line); + return; + } + + if (line->buf[len - 1] == '\n') { + len--; + if (len && line->buf[len - 1] == '\r') + len--; + } + + /* Keep signature separator as-is. */ + if (skip_prefix(line->buf, "-- ", &rest) && rest - line->buf == len) { + if (prev->len) { + handle_filter(mi, prev); + strbuf_reset(prev); + } + handle_filter(mi, line); + return; + } + + /* Unstuff space-stuffed line. */ + if (len && line->buf[0] == ' ') { + strbuf_remove(line, 0, 1); + len--; + } + + /* Save flowed line for later, but without the soft line break. */ + if (len && line->buf[len - 1] == ' ') { + strbuf_add(prev, line->buf, len - !!mi->delsp); + return; + } + + /* Prepend any previous partial lines */ + strbuf_insert(line, 0, prev->buf, prev->len); + strbuf_reset(prev); + + handle_filter(mi, line); +} + static void handle_body(struct mailinfo *mi, struct strbuf *line) { struct strbuf prev = STRBUF_INIT; @@ -1012,7 +1069,7 @@ static void handle_body(struct mailinfo *mi, struct strbuf *line) strbuf_addbuf(&prev, sb); break; } - handle_filter(mi, sb); + handle_filter_flowed(mi, sb, &prev); } /* * The partial chunk is saved in "prev" and will be @@ -1022,13 +1079,16 @@ static void handle_body(struct mailinfo *mi, struct strbuf *line) break; } default: - handle_filter(mi, line); + handle_filter_flowed(mi, line, &prev); } if (mi->input_error) break; } while (!strbuf_getwholeline(line, mi->input, '\n')); + if (prev.len) + handle_filter(mi, &prev); + flush_inbody_header_accum(mi); handle_body_out: diff --git a/mailinfo.h b/mailinfo.h index 766c03dd1d..6830e1e625 100644 --- a/mailinfo.h +++ b/mailinfo.h @@ -22,6 +22,8 @@ struct mailinfo { struct strbuf *content[MAX_BOUNDARIES]; struct strbuf **content_top; struct strbuf charset; + unsigned int format_flowed:1; + unsigned int delsp:1; char *message_id; enum { TE_DONTCARE, TE_QP, TE_BASE64 diff --git a/match-trees.c b/match-trees.c index 37653308d3..2b6d31ef9d 100644 --- a/match-trees.c +++ b/match-trees.c @@ -106,7 +106,7 @@ static int score_trees(const struct object_id *hash1, const struct object_id *ha update_tree_entry(&two); } else { /* path appears in both */ - if (oidcmp(one.entry.oid, two.entry.oid)) { + if (!oideq(one.entry.oid, two.entry.oid)) { /* they are different */ score += score_differs(one.entry.mode, two.entry.mode, diff --git a/merge-blobs.c b/merge-blobs.c index fabb8c19ce..ee0a0e90c9 100644 --- a/merge-blobs.c +++ b/merge-blobs.c @@ -29,7 +29,12 @@ static void free_mmfile(mmfile_t *f) free(f->ptr); } -static void *three_way_filemerge(const char *path, mmfile_t *base, mmfile_t *our, mmfile_t *their, unsigned long *size) +static void *three_way_filemerge(struct index_state *istate, + const char *path, + mmfile_t *base, + mmfile_t *our, + mmfile_t *their, + unsigned long *size) { int merge_status; mmbuffer_t res; @@ -41,7 +46,8 @@ static void *three_way_filemerge(const char *path, mmfile_t *base, mmfile_t *our * common ancestor. */ merge_status = ll_merge(&res, path, base, NULL, - our, ".our", their, ".their", NULL); + our, ".our", their, ".their", + istate, NULL); if (merge_status < 0) return NULL; @@ -49,7 +55,9 @@ static void *three_way_filemerge(const char *path, mmfile_t *base, mmfile_t *our return res.ptr; } -void *merge_blobs(const char *path, struct blob *base, struct blob *our, struct blob *their, unsigned long *size) +void *merge_blobs(struct index_state *istate, const char *path, + struct blob *base, struct blob *our, + struct blob *their, unsigned long *size) { void *res = NULL; mmfile_t f1, f2, common; @@ -82,7 +90,7 @@ void *merge_blobs(const char *path, struct blob *base, struct blob *our, struct common.ptr = xstrdup(""); common.size = 0; } - res = three_way_filemerge(path, &common, &f1, &f2, size); + res = three_way_filemerge(istate, path, &common, &f1, &f2, size); free_mmfile(&common); out_free_f2_f1: free_mmfile(&f2); diff --git a/merge-blobs.h b/merge-blobs.h index 62b569e472..cc31038b80 100644 --- a/merge-blobs.h +++ b/merge-blobs.h @@ -1,8 +1,11 @@ #ifndef MERGE_BLOBS_H #define MERGE_BLOBS_H -#include "blob.h" +struct blob; +struct index_state; -extern void *merge_blobs(const char *, struct blob *, struct blob *, struct blob *, unsigned long *); +extern void *merge_blobs(struct index_state *, const char *, + struct blob *, struct blob *, + struct blob *, unsigned long *); #endif /* MERGE_BLOBS_H */ diff --git a/merge-recursive.c b/merge-recursive.c index 1b5c255918..acc2f64a4e 100644 --- a/merge-recursive.c +++ b/merge-recursive.c @@ -27,6 +27,7 @@ #include "dir.h" #include "submodule.h" #include "revision.h" +#include "commit-reach.h" struct path_hashmap_entry { struct hashmap_entry e; @@ -156,7 +157,7 @@ static struct tree *shift_tree_object(struct tree *one, struct tree *two, shift_tree_by(&one->object.oid, &two->object.oid, &shifted, subtree_shift); } - if (!oidcmp(&two->object.oid, &shifted)) + if (oideq(&two->object.oid, &shifted)) return two; return lookup_tree(the_repository, &shifted); } @@ -179,7 +180,7 @@ static int oid_eq(const struct object_id *a, const struct object_id *b) { if (!a && !b) return 2; - return a && b && oidcmp(a, b) == 0; + return a && b && oideq(a, b); } enum rename_type { @@ -227,7 +228,26 @@ static inline void setup_rename_conflict_info(enum rename_type rename_type, struct stage_data *src_entry1, struct stage_data *src_entry2) { - struct rename_conflict_info *ci = xcalloc(1, sizeof(struct rename_conflict_info)); + struct rename_conflict_info *ci; + + /* + * When we have two renames involved, it's easiest to get the + * correct things into stage 2 and 3, and to make sure that the + * content merge puts HEAD before the other branch if we just + * ensure that branch1 == o->branch1. So, simply flip arguments + * around if we don't have that. + */ + if (dst_entry2 && branch1 != o->branch1) { + setup_rename_conflict_info(rename_type, + pair2, pair1, + branch2, branch1, + dst_entry2, dst_entry1, + o, + src_entry2, src_entry1); + return; + } + + ci = xcalloc(1, sizeof(struct rename_conflict_info)); ci->rename_type = rename_type; ci->pair1 = pair1; ci->branch1 = branch1; @@ -1083,7 +1103,8 @@ static int merge_3way(struct merge_options *o, read_mmblob(&src2, &b->oid); merge_status = ll_merge(result_buf, a->path, &orig, base_name, - &src1, name1, &src2, name2, &ll_opts); + &src1, name1, &src2, name2, + &the_index, &ll_opts); free(base_name); free(name1); @@ -1114,7 +1135,7 @@ static int find_first_merges(struct object_array *result, const char *path, /* get all revisions that merge commit a */ xsnprintf(merged_revision, sizeof(merged_revision), "^%s", oid_to_hex(&a->object.oid)); - init_revisions(&revs, NULL); + repo_init_revisions(the_repository, &revs, NULL); rev_opts.submodule = path; /* FIXME: can't handle linked worktrees in submodules yet */ revs.single_worktree = path != NULL; @@ -1273,15 +1294,26 @@ static int merge_submodule(struct merge_options *o, return 0; } -static int merge_file_1(struct merge_options *o, - const struct diff_filespec *one, - const struct diff_filespec *a, - const struct diff_filespec *b, - const char *filename, - const char *branch1, - const char *branch2, - struct merge_file_info *result) +static int merge_mode_and_contents(struct merge_options *o, + const struct diff_filespec *one, + const struct diff_filespec *a, + const struct diff_filespec *b, + const char *filename, + const char *branch1, + const char *branch2, + struct merge_file_info *result) { + if (o->branch1 != branch1) { + /* + * It's weird getting a reverse merge with HEAD on the bottom + * side of the conflict markers and the other branch on the + * top. Fix that. + */ + return merge_mode_and_contents(o, one, b, a, + filename, + branch2, branch1, result); + } + result->merge = 0; result->clean = 1; @@ -1365,56 +1397,6 @@ static int merge_file_1(struct merge_options *o, return 0; } -static int merge_file_special_markers(struct merge_options *o, - const struct diff_filespec *one, - const struct diff_filespec *a, - const struct diff_filespec *b, - const char *target_filename, - const char *branch1, - const char *filename1, - const char *branch2, - const char *filename2, - struct merge_file_info *mfi) -{ - char *side1 = NULL; - char *side2 = NULL; - int ret; - - if (filename1) - side1 = xstrfmt("%s:%s", branch1, filename1); - if (filename2) - side2 = xstrfmt("%s:%s", branch2, filename2); - - ret = merge_file_1(o, one, a, b, target_filename, - side1 ? side1 : branch1, - side2 ? side2 : branch2, mfi); - - free(side1); - free(side2); - return ret; -} - -static int merge_file_one(struct merge_options *o, - const char *path, - const struct object_id *o_oid, int o_mode, - const struct object_id *a_oid, int a_mode, - const struct object_id *b_oid, int b_mode, - const char *branch1, - const char *branch2, - struct merge_file_info *mfi) -{ - struct diff_filespec one, a, b; - - one.path = a.path = b.path = (char *)path; - oidcpy(&one.oid, o_oid); - one.mode = o_mode; - oidcpy(&a.oid, a_oid); - a.mode = a_mode; - oidcpy(&b.oid, b_oid); - b.mode = b_mode; - return merge_file_1(o, &one, &a, &b, path, branch1, branch2, mfi); -} - static int handle_rename_via_dir(struct merge_options *o, struct diff_filepair *pair, const char *rename_branch, @@ -1658,11 +1640,8 @@ static int handle_rename_rename_1to2(struct merge_options *o, struct merge_file_info mfi; struct diff_filespec other; struct diff_filespec *add; - if (merge_file_one(o, one->path, - &one->oid, one->mode, - &a->oid, a->mode, - &b->oid, b->mode, - ci->branch1, ci->branch2, &mfi)) + if (merge_mode_and_contents(o, one, a, b, one->path, + ci->branch1, ci->branch2, &mfi)) return -1; /* @@ -1726,16 +1705,12 @@ static int handle_rename_rename_2to1(struct merge_options *o, remove_file(o, 1, a->path, o->call_depth || would_lose_untracked(a->path)); remove_file(o, 1, b->path, o->call_depth || would_lose_untracked(b->path)); - path_side_1_desc = xstrfmt("%s (was %s)", path, a->path); - path_side_2_desc = xstrfmt("%s (was %s)", path, b->path); - if (merge_file_special_markers(o, a, c1, &ci->ren1_other, - path_side_1_desc, - o->branch1, c1->path, - o->branch2, ci->ren1_other.path, &mfi_c1) || - merge_file_special_markers(o, b, &ci->ren2_other, c2, - path_side_2_desc, - o->branch1, ci->ren2_other.path, - o->branch2, c2->path, &mfi_c2)) + path_side_1_desc = xstrfmt("version of %s from %s", path, a->path); + path_side_2_desc = xstrfmt("version of %s from %s", path, b->path); + if (merge_mode_and_contents(o, a, c1, &ci->ren1_other, path_side_1_desc, + o->branch1, o->branch2, &mfi_c1) || + merge_mode_and_contents(o, b, &ci->ren2_other, c2, path_side_2_desc, + o->branch1, o->branch2, &mfi_c2)) return -1; free(path_side_1_desc); free(path_side_2_desc); @@ -1812,7 +1787,7 @@ static struct diff_queue_struct *get_diffpairs(struct merge_options *o, struct diff_queue_struct *ret; struct diff_options opts; - diff_setup(&opts); + repo_diff_setup(the_repository, &opts); opts.flags.recursive = 1; opts.flags.rename_empty = 0; opts.detect_rename = merge_detect_rename(o); @@ -2765,12 +2740,23 @@ static int process_renames(struct merge_options *o, ren1_dst, branch2); if (o->call_depth) { struct merge_file_info mfi; - if (merge_file_one(o, ren1_dst, &null_oid, 0, - &ren1->pair->two->oid, - ren1->pair->two->mode, - &dst_other.oid, - dst_other.mode, - branch1, branch2, &mfi)) { + struct diff_filespec one, a, b; + + oidcpy(&one.oid, &null_oid); + one.mode = 0; + one.path = ren1->pair->two->path; + + oidcpy(&a.oid, &ren1->pair->two->oid); + a.mode = ren1->pair->two->mode; + a.path = one.path; + + oidcpy(&b.oid, &dst_other.oid); + b.mode = dst_other.mode; + b.path = one.path; + + if (merge_mode_and_contents(o, &one, &a, &b, ren1_dst, + branch1, branch2, + &mfi)) { clean_merge = -1; goto cleanup_and_return; } @@ -3020,13 +3006,13 @@ static int handle_modify_delete(struct merge_options *o, _("modify"), _("modified")); } -static int merge_content(struct merge_options *o, - const char *path, - int is_dirty, - struct object_id *o_oid, int o_mode, - struct object_id *a_oid, int a_mode, - struct object_id *b_oid, int b_mode, - struct rename_conflict_info *rename_conflict_info) +static int handle_content_merge(struct merge_options *o, + const char *path, + int is_dirty, + struct object_id *o_oid, int o_mode, + struct object_id *a_oid, int a_mode, + struct object_id *b_oid, int b_mode, + struct rename_conflict_info *rename_conflict_info) { const char *reason = _("content"); const char *path1 = NULL, *path2 = NULL; @@ -3058,14 +3044,16 @@ static int merge_content(struct merge_options *o, path2 = (rename_conflict_info->pair2 || o->branch2 == rename_conflict_info->branch1) ? pair1->two->path : pair1->one->path; + one.path = pair1->one->path; + a.path = (char *)path1; + b.path = (char *)path2; if (dir_in_way(path, !o->call_depth, S_ISGITLINK(pair1->two->mode))) df_conflict_remains = 1; } - if (merge_file_special_markers(o, &one, &a, &b, path, - o->branch1, path1, - o->branch2, path2, &mfi)) + if (merge_mode_and_contents(o, &one, &a, &b, path, + o->branch1, o->branch2, &mfi)) return -1; /* @@ -3156,9 +3144,9 @@ static int handle_rename_normal(struct merge_options *o, struct rename_conflict_info *ci) { /* Merge the content and write it out */ - return merge_content(o, path, was_dirty(o, path), - o_oid, o_mode, a_oid, a_mode, b_oid, b_mode, - ci); + return handle_content_merge(o, path, was_dirty(o, path), + o_oid, o_mode, a_oid, a_mode, b_oid, b_mode, + ci); } /* Per entry merge function */ @@ -3282,9 +3270,11 @@ static int process_entry(struct merge_options *o, /* Case C: Added in both (check for same permissions) and */ /* case D: Modified in both, but differently. */ int is_dirty = 0; /* unpack_trees would have bailed if dirty */ - clean_merge = merge_content(o, path, is_dirty, - o_oid, o_mode, a_oid, a_mode, b_oid, b_mode, - NULL); + clean_merge = handle_content_merge(o, path, is_dirty, + o_oid, o_mode, + a_oid, a_mode, + b_oid, b_mode, + NULL); } else if (!o_oid && !a_oid && !b_oid) { /* * this entry was deleted altogether. a_mode == 0 means @@ -14,7 +14,8 @@ static const char *merge_argument(struct commit *commit) return oid_to_hex(commit ? &commit->object.oid : the_hash_algo->empty_tree); } -int try_merge_command(const char *strategy, size_t xopts_nr, +int try_merge_command(struct repository *r, + const char *strategy, size_t xopts_nr, const char **xopts, struct commit_list *common, const char *head_arg, struct commit_list *remotes) { @@ -35,15 +36,16 @@ int try_merge_command(const char *strategy, size_t xopts_nr, ret = run_command_v_opt(args.argv, RUN_GIT_CMD); argv_array_clear(&args); - discard_cache(); - if (read_cache() < 0) + discard_index(r->index); + if (read_index(r->index) < 0) die(_("failed to read the cache")); - resolve_undo_clear(); + resolve_undo_clear_index(r->index); return ret; } -int checkout_fast_forward(const struct object_id *head, +int checkout_fast_forward(struct repository *r, + const struct object_id *head, const struct object_id *remote, int overwrite_ignore) { @@ -54,7 +56,7 @@ int checkout_fast_forward(const struct object_id *head, struct dir_struct dir; struct lock_file lock_file = LOCK_INIT; - refresh_cache(REFRESH_QUIET); + refresh_index(r->index, REFRESH_QUIET, NULL, NULL, NULL); if (hold_locked_index(&lock_file, LOCK_REPORT_ON_ERROR) < 0) return -1; @@ -86,8 +88,8 @@ int checkout_fast_forward(const struct object_id *head, } opts.head_idx = 1; - opts.src_index = &the_index; - opts.dst_index = &the_index; + opts.src_index = r->index; + opts.dst_index = r->index; opts.update = 1; opts.verbose_update = 1; opts.merge = 1; @@ -101,7 +103,7 @@ int checkout_fast_forward(const struct object_id *head, } clear_unpack_trees_porcelain(&opts); - if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK)) + if (write_locked_index(r->index, &lock_file, COMMIT_LOCK)) return error(_("unable to write new index file")); return 0; } diff --git a/midx.c b/midx.c new file mode 100644 index 0000000000..730ff84dff --- /dev/null +++ b/midx.c @@ -0,0 +1,1027 @@ +#include "cache.h" +#include "config.h" +#include "csum-file.h" +#include "dir.h" +#include "lockfile.h" +#include "packfile.h" +#include "object-store.h" +#include "sha1-lookup.h" +#include "midx.h" +#include "progress.h" + +#define MIDX_SIGNATURE 0x4d494458 /* "MIDX" */ +#define MIDX_VERSION 1 +#define MIDX_BYTE_FILE_VERSION 4 +#define MIDX_BYTE_HASH_VERSION 5 +#define MIDX_BYTE_NUM_CHUNKS 6 +#define MIDX_BYTE_NUM_PACKS 8 +#define MIDX_HASH_VERSION 1 +#define MIDX_HEADER_SIZE 12 +#define MIDX_HASH_LEN 20 +#define MIDX_MIN_SIZE (MIDX_HEADER_SIZE + MIDX_HASH_LEN) + +#define MIDX_MAX_CHUNKS 5 +#define MIDX_CHUNK_ALIGNMENT 4 +#define MIDX_CHUNKID_PACKNAMES 0x504e414d /* "PNAM" */ +#define MIDX_CHUNKID_OIDFANOUT 0x4f494446 /* "OIDF" */ +#define MIDX_CHUNKID_OIDLOOKUP 0x4f49444c /* "OIDL" */ +#define MIDX_CHUNKID_OBJECTOFFSETS 0x4f4f4646 /* "OOFF" */ +#define MIDX_CHUNKID_LARGEOFFSETS 0x4c4f4646 /* "LOFF" */ +#define MIDX_CHUNKLOOKUP_WIDTH (sizeof(uint32_t) + sizeof(uint64_t)) +#define MIDX_CHUNK_FANOUT_SIZE (sizeof(uint32_t) * 256) +#define MIDX_CHUNK_OFFSET_WIDTH (2 * sizeof(uint32_t)) +#define MIDX_CHUNK_LARGE_OFFSET_WIDTH (sizeof(uint64_t)) +#define MIDX_LARGE_OFFSET_NEEDED 0x80000000 + +static char *get_midx_filename(const char *object_dir) +{ + return xstrfmt("%s/pack/multi-pack-index", object_dir); +} + +struct multi_pack_index *load_multi_pack_index(const char *object_dir, int local) +{ + struct multi_pack_index *m = NULL; + int fd; + struct stat st; + size_t midx_size; + void *midx_map = NULL; + uint32_t hash_version; + char *midx_name = get_midx_filename(object_dir); + uint32_t i; + const char *cur_pack_name; + + fd = git_open(midx_name); + + if (fd < 0) + goto cleanup_fail; + if (fstat(fd, &st)) { + error_errno(_("failed to read %s"), midx_name); + goto cleanup_fail; + } + + midx_size = xsize_t(st.st_size); + + if (midx_size < MIDX_MIN_SIZE) { + error(_("multi-pack-index file %s is too small"), midx_name); + goto cleanup_fail; + } + + FREE_AND_NULL(midx_name); + + midx_map = xmmap(NULL, midx_size, PROT_READ, MAP_PRIVATE, fd, 0); + + FLEX_ALLOC_MEM(m, object_dir, object_dir, strlen(object_dir)); + m->fd = fd; + m->data = midx_map; + m->data_len = midx_size; + m->local = local; + + m->signature = get_be32(m->data); + if (m->signature != MIDX_SIGNATURE) + die(_("multi-pack-index signature 0x%08x does not match signature 0x%08x"), + m->signature, MIDX_SIGNATURE); + + m->version = m->data[MIDX_BYTE_FILE_VERSION]; + if (m->version != MIDX_VERSION) + die(_("multi-pack-index version %d not recognized"), + m->version); + + hash_version = m->data[MIDX_BYTE_HASH_VERSION]; + if (hash_version != MIDX_HASH_VERSION) + die(_("hash version %u does not match"), hash_version); + m->hash_len = MIDX_HASH_LEN; + + m->num_chunks = m->data[MIDX_BYTE_NUM_CHUNKS]; + + m->num_packs = get_be32(m->data + MIDX_BYTE_NUM_PACKS); + + for (i = 0; i < m->num_chunks; i++) { + uint32_t chunk_id = get_be32(m->data + MIDX_HEADER_SIZE + + MIDX_CHUNKLOOKUP_WIDTH * i); + uint64_t chunk_offset = get_be64(m->data + MIDX_HEADER_SIZE + 4 + + MIDX_CHUNKLOOKUP_WIDTH * i); + + if (chunk_offset >= m->data_len) + die(_("invalid chunk offset (too large)")); + + switch (chunk_id) { + case MIDX_CHUNKID_PACKNAMES: + m->chunk_pack_names = m->data + chunk_offset; + break; + + case MIDX_CHUNKID_OIDFANOUT: + m->chunk_oid_fanout = (uint32_t *)(m->data + chunk_offset); + break; + + case MIDX_CHUNKID_OIDLOOKUP: + m->chunk_oid_lookup = m->data + chunk_offset; + break; + + case MIDX_CHUNKID_OBJECTOFFSETS: + m->chunk_object_offsets = m->data + chunk_offset; + break; + + case MIDX_CHUNKID_LARGEOFFSETS: + m->chunk_large_offsets = m->data + chunk_offset; + break; + + case 0: + die(_("terminating multi-pack-index chunk id appears earlier than expected")); + break; + + default: + /* + * Do nothing on unrecognized chunks, allowing future + * extensions to add optional chunks. + */ + break; + } + } + + if (!m->chunk_pack_names) + die(_("multi-pack-index missing required pack-name chunk")); + if (!m->chunk_oid_fanout) + die(_("multi-pack-index missing required OID fanout chunk")); + if (!m->chunk_oid_lookup) + die(_("multi-pack-index missing required OID lookup chunk")); + if (!m->chunk_object_offsets) + die(_("multi-pack-index missing required object offsets chunk")); + + m->num_objects = ntohl(m->chunk_oid_fanout[255]); + + m->pack_names = xcalloc(m->num_packs, sizeof(*m->pack_names)); + m->packs = xcalloc(m->num_packs, sizeof(*m->packs)); + + cur_pack_name = (const char *)m->chunk_pack_names; + for (i = 0; i < m->num_packs; i++) { + m->pack_names[i] = cur_pack_name; + + cur_pack_name += strlen(cur_pack_name) + 1; + + if (i && strcmp(m->pack_names[i], m->pack_names[i - 1]) <= 0) + die(_("multi-pack-index pack names out of order: '%s' before '%s'"), + m->pack_names[i - 1], + m->pack_names[i]); + } + + return m; + +cleanup_fail: + free(m); + free(midx_name); + if (midx_map) + munmap(midx_map, midx_size); + if (0 <= fd) + close(fd); + return NULL; +} + +void close_midx(struct multi_pack_index *m) +{ + uint32_t i; + + if (!m) + return; + + munmap((unsigned char *)m->data, m->data_len); + close(m->fd); + m->fd = -1; + + for (i = 0; i < m->num_packs; i++) { + if (m->packs[i]) { + close_pack(m->packs[i]); + free(m->packs[i]); + } + } + FREE_AND_NULL(m->packs); + FREE_AND_NULL(m->pack_names); +} + +int prepare_midx_pack(struct multi_pack_index *m, uint32_t pack_int_id) +{ + struct strbuf pack_name = STRBUF_INIT; + + if (pack_int_id >= m->num_packs) + die(_("bad pack-int-id: %u (%u total packs"), + pack_int_id, m->num_packs); + + if (m->packs[pack_int_id]) + return 0; + + strbuf_addf(&pack_name, "%s/pack/%s", m->object_dir, + m->pack_names[pack_int_id]); + + m->packs[pack_int_id] = add_packed_git(pack_name.buf, pack_name.len, m->local); + strbuf_release(&pack_name); + return !m->packs[pack_int_id]; +} + +int bsearch_midx(const struct object_id *oid, struct multi_pack_index *m, uint32_t *result) +{ + return bsearch_hash(oid->hash, m->chunk_oid_fanout, m->chunk_oid_lookup, + MIDX_HASH_LEN, result); +} + +struct object_id *nth_midxed_object_oid(struct object_id *oid, + struct multi_pack_index *m, + uint32_t n) +{ + if (n >= m->num_objects) + return NULL; + + hashcpy(oid->hash, m->chunk_oid_lookup + m->hash_len * n); + return oid; +} + +static off_t nth_midxed_offset(struct multi_pack_index *m, uint32_t pos) +{ + const unsigned char *offset_data; + uint32_t offset32; + + offset_data = m->chunk_object_offsets + pos * MIDX_CHUNK_OFFSET_WIDTH; + offset32 = get_be32(offset_data + sizeof(uint32_t)); + + if (m->chunk_large_offsets && offset32 & MIDX_LARGE_OFFSET_NEEDED) { + if (sizeof(off_t) < sizeof(uint64_t)) + die(_("multi-pack-index stores a 64-bit offset, but off_t is too small")); + + offset32 ^= MIDX_LARGE_OFFSET_NEEDED; + return get_be64(m->chunk_large_offsets + sizeof(uint64_t) * offset32); + } + + return offset32; +} + +static uint32_t nth_midxed_pack_int_id(struct multi_pack_index *m, uint32_t pos) +{ + return get_be32(m->chunk_object_offsets + pos * MIDX_CHUNK_OFFSET_WIDTH); +} + +static int nth_midxed_pack_entry(struct multi_pack_index *m, struct pack_entry *e, uint32_t pos) +{ + uint32_t pack_int_id; + struct packed_git *p; + + if (pos >= m->num_objects) + return 0; + + pack_int_id = nth_midxed_pack_int_id(m, pos); + + if (prepare_midx_pack(m, pack_int_id)) + die(_("error preparing packfile from multi-pack-index")); + p = m->packs[pack_int_id]; + + /* + * We are about to tell the caller where they can locate the + * requested object. We better make sure the packfile is + * still here and can be accessed before supplying that + * answer, as it may have been deleted since the MIDX was + * loaded! + */ + if (!is_pack_valid(p)) + return 0; + + if (p->num_bad_objects) { + uint32_t i; + struct object_id oid; + nth_midxed_object_oid(&oid, m, pos); + for (i = 0; i < p->num_bad_objects; i++) + if (hasheq(oid.hash, + p->bad_object_sha1 + the_hash_algo->rawsz * i)) + return 0; + } + + e->offset = nth_midxed_offset(m, pos); + e->p = p; + + return 1; +} + +int fill_midx_entry(const struct object_id *oid, struct pack_entry *e, struct multi_pack_index *m) +{ + uint32_t pos; + + if (!bsearch_midx(oid, m, &pos)) + return 0; + + return nth_midxed_pack_entry(m, e, pos); +} + +int midx_contains_pack(struct multi_pack_index *m, const char *idx_name) +{ + uint32_t first = 0, last = m->num_packs; + + while (first < last) { + uint32_t mid = first + (last - first) / 2; + const char *current; + int cmp; + + current = m->pack_names[mid]; + cmp = strcmp(idx_name, current); + if (!cmp) + return 1; + if (cmp > 0) { + first = mid + 1; + continue; + } + last = mid; + } + + return 0; +} + +int prepare_multi_pack_index_one(struct repository *r, const char *object_dir, int local) +{ + struct multi_pack_index *m; + struct multi_pack_index *m_search; + int config_value; + static int env_value = -1; + + if (env_value < 0) + env_value = git_env_bool(GIT_TEST_MULTI_PACK_INDEX, 0); + + if (!env_value && + (repo_config_get_bool(r, "core.multipackindex", &config_value) || + !config_value)) + return 0; + + for (m_search = r->objects->multi_pack_index; m_search; m_search = m_search->next) + if (!strcmp(object_dir, m_search->object_dir)) + return 1; + + m = load_multi_pack_index(object_dir, local); + + if (m) { + m->next = r->objects->multi_pack_index; + r->objects->multi_pack_index = m; + return 1; + } + + return 0; +} + +static size_t write_midx_header(struct hashfile *f, + unsigned char num_chunks, + uint32_t num_packs) +{ + unsigned char byte_values[4]; + + hashwrite_be32(f, MIDX_SIGNATURE); + byte_values[0] = MIDX_VERSION; + byte_values[1] = MIDX_HASH_VERSION; + byte_values[2] = num_chunks; + byte_values[3] = 0; /* unused */ + hashwrite(f, byte_values, sizeof(byte_values)); + hashwrite_be32(f, num_packs); + + return MIDX_HEADER_SIZE; +} + +struct pack_list { + struct packed_git **list; + char **names; + uint32_t nr; + uint32_t alloc_list; + uint32_t alloc_names; + size_t pack_name_concat_len; + struct multi_pack_index *m; +}; + +static void add_pack_to_midx(const char *full_path, size_t full_path_len, + const char *file_name, void *data) +{ + struct pack_list *packs = (struct pack_list *)data; + + if (ends_with(file_name, ".idx")) { + if (packs->m && midx_contains_pack(packs->m, file_name)) + return; + + ALLOC_GROW(packs->list, packs->nr + 1, packs->alloc_list); + ALLOC_GROW(packs->names, packs->nr + 1, packs->alloc_names); + + packs->list[packs->nr] = add_packed_git(full_path, + full_path_len, + 0); + + if (!packs->list[packs->nr]) { + warning(_("failed to add packfile '%s'"), + full_path); + return; + } + + if (open_pack_index(packs->list[packs->nr])) { + warning(_("failed to open pack-index '%s'"), + full_path); + close_pack(packs->list[packs->nr]); + FREE_AND_NULL(packs->list[packs->nr]); + return; + } + + packs->names[packs->nr] = xstrdup(file_name); + packs->pack_name_concat_len += strlen(file_name) + 1; + packs->nr++; + } +} + +struct pack_pair { + uint32_t pack_int_id; + char *pack_name; +}; + +static int pack_pair_compare(const void *_a, const void *_b) +{ + struct pack_pair *a = (struct pack_pair *)_a; + struct pack_pair *b = (struct pack_pair *)_b; + return strcmp(a->pack_name, b->pack_name); +} + +static void sort_packs_by_name(char **pack_names, uint32_t nr_packs, uint32_t *perm) +{ + uint32_t i; + struct pack_pair *pairs; + + ALLOC_ARRAY(pairs, nr_packs); + + for (i = 0; i < nr_packs; i++) { + pairs[i].pack_int_id = i; + pairs[i].pack_name = pack_names[i]; + } + + QSORT(pairs, nr_packs, pack_pair_compare); + + for (i = 0; i < nr_packs; i++) { + pack_names[i] = pairs[i].pack_name; + perm[pairs[i].pack_int_id] = i; + } + + free(pairs); +} + +struct pack_midx_entry { + struct object_id oid; + uint32_t pack_int_id; + time_t pack_mtime; + uint64_t offset; +}; + +static int midx_oid_compare(const void *_a, const void *_b) +{ + const struct pack_midx_entry *a = (const struct pack_midx_entry *)_a; + const struct pack_midx_entry *b = (const struct pack_midx_entry *)_b; + int cmp = oidcmp(&a->oid, &b->oid); + + if (cmp) + return cmp; + + if (a->pack_mtime > b->pack_mtime) + return -1; + else if (a->pack_mtime < b->pack_mtime) + return 1; + + return a->pack_int_id - b->pack_int_id; +} + +static int nth_midxed_pack_midx_entry(struct multi_pack_index *m, + uint32_t *pack_perm, + struct pack_midx_entry *e, + uint32_t pos) +{ + if (pos >= m->num_objects) + return 1; + + nth_midxed_object_oid(&e->oid, m, pos); + e->pack_int_id = pack_perm[nth_midxed_pack_int_id(m, pos)]; + e->offset = nth_midxed_offset(m, pos); + + /* consider objects in midx to be from "old" packs */ + e->pack_mtime = 0; + return 0; +} + +static void fill_pack_entry(uint32_t pack_int_id, + struct packed_git *p, + uint32_t cur_object, + struct pack_midx_entry *entry) +{ + if (!nth_packed_object_oid(&entry->oid, p, cur_object)) + die(_("failed to locate object %d in packfile"), cur_object); + + entry->pack_int_id = pack_int_id; + entry->pack_mtime = p->mtime; + + entry->offset = nth_packed_object_offset(p, cur_object); +} + +/* + * It is possible to artificially get into a state where there are many + * duplicate copies of objects. That can create high memory pressure if + * we are to create a list of all objects before de-duplication. To reduce + * this memory pressure without a significant performance drop, automatically + * group objects by the first byte of their object id. Use the IDX fanout + * tables to group the data, copy to a local array, then sort. + * + * Copy only the de-duplicated entries (selected by most-recent modified time + * of a packfile containing the object). + */ +static struct pack_midx_entry *get_sorted_entries(struct multi_pack_index *m, + struct packed_git **p, + uint32_t *perm, + uint32_t nr_packs, + uint32_t *nr_objects) +{ + uint32_t cur_fanout, cur_pack, cur_object; + uint32_t alloc_fanout, alloc_objects, total_objects = 0; + struct pack_midx_entry *entries_by_fanout = NULL; + struct pack_midx_entry *deduplicated_entries = NULL; + uint32_t start_pack = m ? m->num_packs : 0; + + for (cur_pack = start_pack; cur_pack < nr_packs; cur_pack++) + total_objects += p[cur_pack]->num_objects; + + /* + * As we de-duplicate by fanout value, we expect the fanout + * slices to be evenly distributed, with some noise. Hence, + * allocate slightly more than one 256th. + */ + alloc_objects = alloc_fanout = total_objects > 3200 ? total_objects / 200 : 16; + + ALLOC_ARRAY(entries_by_fanout, alloc_fanout); + ALLOC_ARRAY(deduplicated_entries, alloc_objects); + *nr_objects = 0; + + for (cur_fanout = 0; cur_fanout < 256; cur_fanout++) { + uint32_t nr_fanout = 0; + + if (m) { + uint32_t start = 0, end; + + if (cur_fanout) + start = ntohl(m->chunk_oid_fanout[cur_fanout - 1]); + end = ntohl(m->chunk_oid_fanout[cur_fanout]); + + for (cur_object = start; cur_object < end; cur_object++) { + ALLOC_GROW(entries_by_fanout, nr_fanout + 1, alloc_fanout); + nth_midxed_pack_midx_entry(m, perm, + &entries_by_fanout[nr_fanout], + cur_object); + nr_fanout++; + } + } + + for (cur_pack = start_pack; cur_pack < nr_packs; cur_pack++) { + uint32_t start = 0, end; + + if (cur_fanout) + start = get_pack_fanout(p[cur_pack], cur_fanout - 1); + end = get_pack_fanout(p[cur_pack], cur_fanout); + + for (cur_object = start; cur_object < end; cur_object++) { + ALLOC_GROW(entries_by_fanout, nr_fanout + 1, alloc_fanout); + fill_pack_entry(perm[cur_pack], p[cur_pack], cur_object, &entries_by_fanout[nr_fanout]); + nr_fanout++; + } + } + + QSORT(entries_by_fanout, nr_fanout, midx_oid_compare); + + /* + * The batch is now sorted by OID and then mtime (descending). + * Take only the first duplicate. + */ + for (cur_object = 0; cur_object < nr_fanout; cur_object++) { + if (cur_object && oideq(&entries_by_fanout[cur_object - 1].oid, + &entries_by_fanout[cur_object].oid)) + continue; + + ALLOC_GROW(deduplicated_entries, *nr_objects + 1, alloc_objects); + memcpy(&deduplicated_entries[*nr_objects], + &entries_by_fanout[cur_object], + sizeof(struct pack_midx_entry)); + (*nr_objects)++; + } + } + + free(entries_by_fanout); + return deduplicated_entries; +} + +static size_t write_midx_pack_names(struct hashfile *f, + char **pack_names, + uint32_t num_packs) +{ + uint32_t i; + unsigned char padding[MIDX_CHUNK_ALIGNMENT]; + size_t written = 0; + + for (i = 0; i < num_packs; i++) { + size_t writelen = strlen(pack_names[i]) + 1; + + if (i && strcmp(pack_names[i], pack_names[i - 1]) <= 0) + BUG("incorrect pack-file order: %s before %s", + pack_names[i - 1], + pack_names[i]); + + hashwrite(f, pack_names[i], writelen); + written += writelen; + } + + /* add padding to be aligned */ + i = MIDX_CHUNK_ALIGNMENT - (written % MIDX_CHUNK_ALIGNMENT); + if (i < MIDX_CHUNK_ALIGNMENT) { + memset(padding, 0, sizeof(padding)); + hashwrite(f, padding, i); + written += i; + } + + return written; +} + +static size_t write_midx_oid_fanout(struct hashfile *f, + struct pack_midx_entry *objects, + uint32_t nr_objects) +{ + struct pack_midx_entry *list = objects; + struct pack_midx_entry *last = objects + nr_objects; + uint32_t count = 0; + uint32_t i; + + /* + * Write the first-level table (the list is sorted, + * but we use a 256-entry lookup to be able to avoid + * having to do eight extra binary search iterations). + */ + for (i = 0; i < 256; i++) { + struct pack_midx_entry *next = list; + + while (next < last && next->oid.hash[0] == i) { + count++; + next++; + } + + hashwrite_be32(f, count); + list = next; + } + + return MIDX_CHUNK_FANOUT_SIZE; +} + +static size_t write_midx_oid_lookup(struct hashfile *f, unsigned char hash_len, + struct pack_midx_entry *objects, + uint32_t nr_objects) +{ + struct pack_midx_entry *list = objects; + uint32_t i; + size_t written = 0; + + for (i = 0; i < nr_objects; i++) { + struct pack_midx_entry *obj = list++; + + if (i < nr_objects - 1) { + struct pack_midx_entry *next = list; + if (oidcmp(&obj->oid, &next->oid) >= 0) + BUG("OIDs not in order: %s >= %s", + oid_to_hex(&obj->oid), + oid_to_hex(&next->oid)); + } + + hashwrite(f, obj->oid.hash, (int)hash_len); + written += hash_len; + } + + return written; +} + +static size_t write_midx_object_offsets(struct hashfile *f, int large_offset_needed, + struct pack_midx_entry *objects, uint32_t nr_objects) +{ + struct pack_midx_entry *list = objects; + uint32_t i, nr_large_offset = 0; + size_t written = 0; + + for (i = 0; i < nr_objects; i++) { + struct pack_midx_entry *obj = list++; + + hashwrite_be32(f, obj->pack_int_id); + + if (large_offset_needed && obj->offset >> 31) + hashwrite_be32(f, MIDX_LARGE_OFFSET_NEEDED | nr_large_offset++); + else if (!large_offset_needed && obj->offset >> 32) + BUG("object %s requires a large offset (%"PRIx64") but the MIDX is not writing large offsets!", + oid_to_hex(&obj->oid), + obj->offset); + else + hashwrite_be32(f, (uint32_t)obj->offset); + + written += MIDX_CHUNK_OFFSET_WIDTH; + } + + return written; +} + +static size_t write_midx_large_offsets(struct hashfile *f, uint32_t nr_large_offset, + struct pack_midx_entry *objects, uint32_t nr_objects) +{ + struct pack_midx_entry *list = objects, *end = objects + nr_objects; + size_t written = 0; + + while (nr_large_offset) { + struct pack_midx_entry *obj; + uint64_t offset; + + if (list >= end) + BUG("too many large-offset objects"); + + obj = list++; + offset = obj->offset; + + if (!(offset >> 31)) + continue; + + hashwrite_be32(f, offset >> 32); + hashwrite_be32(f, offset & 0xffffffffUL); + written += 2 * sizeof(uint32_t); + + nr_large_offset--; + } + + return written; +} + +int write_midx_file(const char *object_dir) +{ + unsigned char cur_chunk, num_chunks = 0; + char *midx_name; + uint32_t i; + struct hashfile *f = NULL; + struct lock_file lk; + struct pack_list packs; + uint32_t *pack_perm = NULL; + uint64_t written = 0; + uint32_t chunk_ids[MIDX_MAX_CHUNKS + 1]; + uint64_t chunk_offsets[MIDX_MAX_CHUNKS + 1]; + uint32_t nr_entries, num_large_offsets = 0; + struct pack_midx_entry *entries = NULL; + int large_offsets_needed = 0; + + midx_name = get_midx_filename(object_dir); + if (safe_create_leading_directories(midx_name)) { + UNLEAK(midx_name); + die_errno(_("unable to create leading directories of %s"), + midx_name); + } + + packs.m = load_multi_pack_index(object_dir, 1); + + packs.nr = 0; + packs.alloc_list = packs.m ? packs.m->num_packs : 16; + packs.alloc_names = packs.alloc_list; + packs.list = NULL; + packs.names = NULL; + packs.pack_name_concat_len = 0; + ALLOC_ARRAY(packs.list, packs.alloc_list); + ALLOC_ARRAY(packs.names, packs.alloc_names); + + if (packs.m) { + for (i = 0; i < packs.m->num_packs; i++) { + ALLOC_GROW(packs.list, packs.nr + 1, packs.alloc_list); + ALLOC_GROW(packs.names, packs.nr + 1, packs.alloc_names); + + packs.list[packs.nr] = NULL; + packs.names[packs.nr] = xstrdup(packs.m->pack_names[i]); + packs.pack_name_concat_len += strlen(packs.names[packs.nr]) + 1; + packs.nr++; + } + } + + for_each_file_in_pack_dir(object_dir, add_pack_to_midx, &packs); + + if (packs.m && packs.nr == packs.m->num_packs) + goto cleanup; + + if (packs.pack_name_concat_len % MIDX_CHUNK_ALIGNMENT) + packs.pack_name_concat_len += MIDX_CHUNK_ALIGNMENT - + (packs.pack_name_concat_len % MIDX_CHUNK_ALIGNMENT); + + ALLOC_ARRAY(pack_perm, packs.nr); + sort_packs_by_name(packs.names, packs.nr, pack_perm); + + entries = get_sorted_entries(packs.m, packs.list, pack_perm, packs.nr, &nr_entries); + + for (i = 0; i < nr_entries; i++) { + if (entries[i].offset > 0x7fffffff) + num_large_offsets++; + if (entries[i].offset > 0xffffffff) + large_offsets_needed = 1; + } + + hold_lock_file_for_update(&lk, midx_name, LOCK_DIE_ON_ERROR); + f = hashfd(lk.tempfile->fd, lk.tempfile->filename.buf); + FREE_AND_NULL(midx_name); + + if (packs.m) + close_midx(packs.m); + + cur_chunk = 0; + num_chunks = large_offsets_needed ? 5 : 4; + + written = write_midx_header(f, num_chunks, packs.nr); + + chunk_ids[cur_chunk] = MIDX_CHUNKID_PACKNAMES; + chunk_offsets[cur_chunk] = written + (num_chunks + 1) * MIDX_CHUNKLOOKUP_WIDTH; + + cur_chunk++; + chunk_ids[cur_chunk] = MIDX_CHUNKID_OIDFANOUT; + chunk_offsets[cur_chunk] = chunk_offsets[cur_chunk - 1] + packs.pack_name_concat_len; + + cur_chunk++; + chunk_ids[cur_chunk] = MIDX_CHUNKID_OIDLOOKUP; + chunk_offsets[cur_chunk] = chunk_offsets[cur_chunk - 1] + MIDX_CHUNK_FANOUT_SIZE; + + cur_chunk++; + chunk_ids[cur_chunk] = MIDX_CHUNKID_OBJECTOFFSETS; + chunk_offsets[cur_chunk] = chunk_offsets[cur_chunk - 1] + nr_entries * MIDX_HASH_LEN; + + cur_chunk++; + chunk_offsets[cur_chunk] = chunk_offsets[cur_chunk - 1] + nr_entries * MIDX_CHUNK_OFFSET_WIDTH; + if (large_offsets_needed) { + chunk_ids[cur_chunk] = MIDX_CHUNKID_LARGEOFFSETS; + + cur_chunk++; + chunk_offsets[cur_chunk] = chunk_offsets[cur_chunk - 1] + + num_large_offsets * MIDX_CHUNK_LARGE_OFFSET_WIDTH; + } + + chunk_ids[cur_chunk] = 0; + + for (i = 0; i <= num_chunks; i++) { + if (i && chunk_offsets[i] < chunk_offsets[i - 1]) + BUG("incorrect chunk offsets: %"PRIu64" before %"PRIu64, + chunk_offsets[i - 1], + chunk_offsets[i]); + + if (chunk_offsets[i] % MIDX_CHUNK_ALIGNMENT) + BUG("chunk offset %"PRIu64" is not properly aligned", + chunk_offsets[i]); + + hashwrite_be32(f, chunk_ids[i]); + hashwrite_be32(f, chunk_offsets[i] >> 32); + hashwrite_be32(f, chunk_offsets[i]); + + written += MIDX_CHUNKLOOKUP_WIDTH; + } + + for (i = 0; i < num_chunks; i++) { + if (written != chunk_offsets[i]) + BUG("incorrect chunk offset (%"PRIu64" != %"PRIu64") for chunk id %"PRIx32, + chunk_offsets[i], + written, + chunk_ids[i]); + + switch (chunk_ids[i]) { + case MIDX_CHUNKID_PACKNAMES: + written += write_midx_pack_names(f, packs.names, packs.nr); + break; + + case MIDX_CHUNKID_OIDFANOUT: + written += write_midx_oid_fanout(f, entries, nr_entries); + break; + + case MIDX_CHUNKID_OIDLOOKUP: + written += write_midx_oid_lookup(f, MIDX_HASH_LEN, entries, nr_entries); + break; + + case MIDX_CHUNKID_OBJECTOFFSETS: + written += write_midx_object_offsets(f, large_offsets_needed, entries, nr_entries); + break; + + case MIDX_CHUNKID_LARGEOFFSETS: + written += write_midx_large_offsets(f, num_large_offsets, entries, nr_entries); + break; + + default: + BUG("trying to write unknown chunk id %"PRIx32, + chunk_ids[i]); + } + } + + if (written != chunk_offsets[num_chunks]) + BUG("incorrect final offset %"PRIu64" != %"PRIu64, + written, + chunk_offsets[num_chunks]); + + finalize_hashfile(f, NULL, CSUM_FSYNC | CSUM_HASH_IN_STREAM); + commit_lock_file(&lk); + +cleanup: + for (i = 0; i < packs.nr; i++) { + if (packs.list[i]) { + close_pack(packs.list[i]); + free(packs.list[i]); + } + free(packs.names[i]); + } + + free(packs.list); + free(packs.names); + free(entries); + free(pack_perm); + free(midx_name); + return 0; +} + +void clear_midx_file(struct repository *r) +{ + char *midx = get_midx_filename(r->objects->objectdir); + + if (r->objects && r->objects->multi_pack_index) { + close_midx(r->objects->multi_pack_index); + r->objects->multi_pack_index = NULL; + } + + if (remove_path(midx)) { + UNLEAK(midx); + die(_("failed to clear multi-pack-index at %s"), midx); + } + + free(midx); +} + +static int verify_midx_error; + +static void midx_report(const char *fmt, ...) +{ + va_list ap; + verify_midx_error = 1; + va_start(ap, fmt); + vfprintf(stderr, fmt, ap); + fprintf(stderr, "\n"); + va_end(ap); +} + +int verify_midx_file(const char *object_dir) +{ + uint32_t i; + struct progress *progress; + struct multi_pack_index *m = load_multi_pack_index(object_dir, 1); + verify_midx_error = 0; + + if (!m) + return 0; + + for (i = 0; i < m->num_packs; i++) { + if (prepare_midx_pack(m, i)) + midx_report("failed to load pack in position %d", i); + } + + for (i = 0; i < 255; i++) { + uint32_t oid_fanout1 = ntohl(m->chunk_oid_fanout[i]); + uint32_t oid_fanout2 = ntohl(m->chunk_oid_fanout[i + 1]); + + if (oid_fanout1 > oid_fanout2) + midx_report(_("oid fanout out of order: fanout[%d] = %"PRIx32" > %"PRIx32" = fanout[%d]"), + i, oid_fanout1, oid_fanout2, i + 1); + } + + for (i = 0; i < m->num_objects - 1; i++) { + struct object_id oid1, oid2; + + nth_midxed_object_oid(&oid1, m, i); + nth_midxed_object_oid(&oid2, m, i + 1); + + if (oidcmp(&oid1, &oid2) >= 0) + midx_report(_("oid lookup out of order: oid[%d] = %s >= %s = oid[%d]"), + i, oid_to_hex(&oid1), oid_to_hex(&oid2), i + 1); + } + + progress = start_progress(_("Verifying object offsets"), m->num_objects); + for (i = 0; i < m->num_objects; i++) { + struct object_id oid; + struct pack_entry e; + off_t m_offset, p_offset; + + nth_midxed_object_oid(&oid, m, i); + if (!fill_midx_entry(&oid, &e, m)) { + midx_report(_("failed to load pack entry for oid[%d] = %s"), + i, oid_to_hex(&oid)); + continue; + } + + if (open_pack_index(e.p)) { + midx_report(_("failed to load pack-index for packfile %s"), + e.p->pack_name); + break; + } + + m_offset = e.offset; + p_offset = find_pack_entry_one(oid.hash, e.p); + + if (m_offset != p_offset) + midx_report(_("incorrect object offset for oid[%d] = %s: %"PRIx64" != %"PRIx64), + i, oid_to_hex(&oid), m_offset, p_offset); + + display_progress(progress, i + 1); + } + stop_progress(&progress); + + return verify_midx_error; +} diff --git a/midx.h b/midx.h new file mode 100644 index 0000000000..774f652530 --- /dev/null +++ b/midx.h @@ -0,0 +1,55 @@ +#ifndef MIDX_H +#define MIDX_H + +#include "repository.h" + +struct object_id; +struct pack_entry; + +#define GIT_TEST_MULTI_PACK_INDEX "GIT_TEST_MULTI_PACK_INDEX" + +struct multi_pack_index { + struct multi_pack_index *next; + + int fd; + + const unsigned char *data; + size_t data_len; + + uint32_t signature; + unsigned char version; + unsigned char hash_len; + unsigned char num_chunks; + uint32_t num_packs; + uint32_t num_objects; + + int local; + + const unsigned char *chunk_pack_names; + const uint32_t *chunk_oid_fanout; + const unsigned char *chunk_oid_lookup; + const unsigned char *chunk_object_offsets; + const unsigned char *chunk_large_offsets; + + const char **pack_names; + struct packed_git **packs; + char object_dir[FLEX_ARRAY]; +}; + +struct multi_pack_index *load_multi_pack_index(const char *object_dir, int local); +int prepare_midx_pack(struct multi_pack_index *m, uint32_t pack_int_id); +int bsearch_midx(const struct object_id *oid, struct multi_pack_index *m, uint32_t *result); +struct object_id *nth_midxed_object_oid(struct object_id *oid, + struct multi_pack_index *m, + uint32_t n); +int fill_midx_entry(const struct object_id *oid, struct pack_entry *e, struct multi_pack_index *m); +int midx_contains_pack(struct multi_pack_index *m, const char *idx_name); +int prepare_multi_pack_index_one(struct repository *r, const char *object_dir, int local); + +int write_midx_file(const char *object_dir); +void clear_midx_file(struct repository *r); +int verify_midx_file(const char *object_dir); + +void close_midx(struct multi_pack_index *m); + +#endif diff --git a/name-hash.c b/name-hash.c index 163849831c..623ca6923a 100644 --- a/name-hash.c +++ b/name-hash.c @@ -7,6 +7,7 @@ */ #define NO_THE_INDEX_COMPATIBILITY_MACROS #include "cache.h" +#include "thread-utils.h" struct dir_entry { struct hashmap_entry ent; @@ -131,22 +132,6 @@ static int cache_entry_cmp(const void *unused_cmp_data, static int lazy_try_threaded = 1; static int lazy_nr_dir_threads; -#ifdef NO_PTHREADS - -static inline int lookup_lazy_params(struct index_state *istate) -{ - return 0; -} - -static inline void threaded_lazy_init_name_hash( - struct index_state *istate) -{ -} - -#else - -#include "thread-utils.h" - /* * Set a minimum number of cache_entries that we will handle per * thread and use that to decide how many threads to run (upto @@ -509,6 +494,7 @@ static inline void lazy_update_dir_ref_counts( static void threaded_lazy_init_name_hash( struct index_state *istate) { + int err; int nr_each; int k_start; int t; @@ -516,6 +502,9 @@ static void threaded_lazy_init_name_hash( struct lazy_dir_thread_data *td_dir; struct lazy_name_thread_data *td_name; + if (!HAVE_THREADS) + return; + k_start = 0; nr_each = DIV_ROUND_UP(istate->cache_nr, lazy_nr_dir_threads); @@ -538,8 +527,9 @@ static void threaded_lazy_init_name_hash( if (k_start > istate->cache_nr) k_start = istate->cache_nr; td_dir_t->k_end = k_start; - if (pthread_create(&td_dir_t->pthread, NULL, lazy_dir_thread_proc, td_dir_t)) - die("unable to create lazy_dir_thread"); + err = pthread_create(&td_dir_t->pthread, NULL, lazy_dir_thread_proc, td_dir_t); + if (err) + die(_("unable to create lazy_dir thread: %s"), strerror(err)); } for (t = 0; t < lazy_nr_dir_threads; t++) { struct lazy_dir_thread_data *td_dir_t = td_dir + t; @@ -559,13 +549,15 @@ static void threaded_lazy_init_name_hash( */ td_name->istate = istate; td_name->lazy_entries = lazy_entries; - if (pthread_create(&td_name->pthread, NULL, lazy_name_thread_proc, td_name)) - die("unable to create lazy_name_thread"); + err = pthread_create(&td_name->pthread, NULL, lazy_name_thread_proc, td_name); + if (err) + die(_("unable to create lazy_name thread: %s"), strerror(err)); lazy_update_dir_ref_counts(istate, lazy_entries); - if (pthread_join(td_name->pthread, NULL)) - die("unable to join lazy_name_thread"); + err = pthread_join(td_name->pthread, NULL); + if (err) + die(_("unable to join lazy_name thread: %s"), strerror(err)); cleanup_dir_mutex(); @@ -574,14 +566,12 @@ static void threaded_lazy_init_name_hash( free(lazy_entries); } -#endif - static void lazy_init_name_hash(struct index_state *istate) { - uint64_t start = getnanotime(); if (istate->name_hash_initialized) return; + trace_performance_enter(); hashmap_init(&istate->name_hash, cache_entry_cmp, NULL, istate->cache_nr); hashmap_init(&istate->dir_hash, dir_entry_cmp, NULL, istate->cache_nr); @@ -602,7 +592,7 @@ static void lazy_init_name_hash(struct index_state *istate) } istate->name_hash_initialized = 1; - trace_performance_since(start, "initialize name hash"); + trace_performance_leave("initialize name hash"); } /* diff --git a/notes-merge.c b/notes-merge.c index 76ab19e702..bd05d50b05 100644 --- a/notes-merge.c +++ b/notes-merge.c @@ -12,6 +12,7 @@ #include "notes-merge.h" #include "strbuf.h" #include "notes-utils.h" +#include "commit-reach.h" struct notes_merge_pair { struct object_id obj, base, local, remote; @@ -126,7 +127,7 @@ static struct notes_merge_pair *diff_tree_remote(struct notes_merge_options *o, trace_printf("\tdiff_tree_remote(base = %.7s, remote = %.7s)\n", oid_to_hex(base), oid_to_hex(remote)); - diff_setup(&opt); + repo_diff_setup(the_repository, &opt); opt.flags.recursive = 1; opt.output_format = DIFF_FORMAT_NO_OUTPUT; diff_setup_done(&opt); @@ -151,7 +152,7 @@ static struct notes_merge_pair *diff_tree_remote(struct notes_merge_options *o, mp = find_notes_merge_pair_pos(changes, len, &obj, 1, &occupied); if (occupied) { /* We've found an addition/deletion pair */ - assert(!oidcmp(&mp->obj, &obj)); + assert(oideq(&mp->obj, &obj)); if (is_null_oid(&p->one->oid)) { /* addition */ assert(is_null_oid(&mp->remote)); oidcpy(&mp->remote, &p->two->oid); @@ -189,7 +190,7 @@ static void diff_tree_local(struct notes_merge_options *o, trace_printf("\tdiff_tree_local(len = %i, base = %.7s, local = %.7s)\n", len, oid_to_hex(base), oid_to_hex(local)); - diff_setup(&opt); + repo_diff_setup(the_repository, &opt); opt.flags.recursive = 1; opt.output_format = DIFF_FORMAT_NO_OUTPUT; diff_setup_done(&opt); @@ -218,7 +219,7 @@ static void diff_tree_local(struct notes_merge_options *o, continue; } - assert(!oidcmp(&mp->obj, &obj)); + assert(oideq(&mp->obj, &obj)); if (is_null_oid(&p->two->oid)) { /* deletion */ /* * Either this is a true deletion (1), or it is part @@ -229,7 +230,7 @@ static void diff_tree_local(struct notes_merge_options *o, * (3) mp->local is uninitialized; set it to null_sha1 * (will be overwritten by following addition) */ - if (!oidcmp(&mp->local, &uninitialized)) + if (oideq(&mp->local, &uninitialized)) oidclr(&mp->local); } else if (is_null_oid(&p->one->oid)) { /* addition */ /* @@ -241,7 +242,7 @@ static void diff_tree_local(struct notes_merge_options *o, * (3) mp->local is null_sha1; set to p->two->sha1 */ assert(is_null_oid(&mp->local) || - !oidcmp(&mp->local, &uninitialized)); + oideq(&mp->local, &uninitialized)); oidcpy(&mp->local, &p->two->oid); } else { /* modification */ /* @@ -249,8 +250,8 @@ static void diff_tree_local(struct notes_merge_options *o, * match mp->base, and mp->local shall be uninitialized. * Set mp->local to p->two->sha1. */ - assert(!oidcmp(&p->one->oid, &mp->base)); - assert(!oidcmp(&mp->local, &uninitialized)); + assert(oideq(&p->one->oid, &mp->base)); + assert(oideq(&mp->local, &uninitialized)); oidcpy(&mp->local, &p->two->oid); } trace_printf("\t\tStored local change for %s: %.7s -> %.7s\n", @@ -348,7 +349,8 @@ static int ll_merge_in_worktree(struct notes_merge_options *o, read_mmblob(&remote, &p->remote); status = ll_merge(&result_buf, oid_to_hex(&p->obj), &base, NULL, - &local, o->local_ref, &remote, o->remote_ref, NULL); + &local, o->local_ref, &remote, o->remote_ref, + &the_index, NULL); free(base.ptr); free(local.ptr); @@ -480,14 +482,14 @@ static int merge_changes(struct notes_merge_options *o, oid_to_hex(&p->local), oid_to_hex(&p->remote)); - if (!oidcmp(&p->base, &p->remote)) { + if (oideq(&p->base, &p->remote)) { /* no remote change; nothing to do */ trace_printf("\t\t\tskipping (no remote change)\n"); - } else if (!oidcmp(&p->local, &p->remote)) { + } else if (oideq(&p->local, &p->remote)) { /* same change in local and remote; nothing to do */ trace_printf("\t\t\tskipping (local == remote)\n"); - } else if (!oidcmp(&p->local, &uninitialized) || - !oidcmp(&p->local, &p->base)) { + } else if (oideq(&p->local, &uninitialized) || + oideq(&p->local, &p->base)) { /* no local change; adopt remote change */ trace_printf("\t\t\tno local change, adopted remote\n"); if (add_note(t, &p->obj, &p->remote, @@ -621,14 +623,14 @@ int notes_merge(struct notes_merge_options *o, oid_to_hex(&local->object.oid), oid_to_hex(base_oid)); - if (!oidcmp(&remote->object.oid, base_oid)) { + if (oideq(&remote->object.oid, base_oid)) { /* Already merged; result == local commit */ if (o->verbosity >= 2) printf("Already up to date!\n"); oidcpy(result_oid, &local->object.oid); goto found_result; } - if (!oidcmp(&local->object.oid, base_oid)) { + if (oideq(&local->object.oid, base_oid)) { /* Fast-forward; result == remote commit */ if (o->verbosity >= 2) printf("Fast-forward\n"); @@ -709,7 +711,7 @@ int notes_merge_commit(struct notes_merge_options *o, /* write file as blob, and add to partial_tree */ if (stat(path.buf, &st)) die_errno("Failed to stat '%s'", path.buf); - if (index_path(&blob_oid, path.buf, &st, HASH_WRITE_OBJECT)) + if (index_path(&the_index, &blob_oid, path.buf, &st, HASH_WRITE_OBJECT)) die("Failed to write blob object from '%s'", path.buf); if (add_note(partial_tree, &obj_oid, &blob_oid, NULL)) die("Failed to add resolved note '%s' to notes tree", @@ -147,7 +147,7 @@ static struct leaf_node *note_tree_find(struct notes_tree *t, void **p = note_tree_search(t, &tree, &n, key_sha1); if (GET_PTR_TYPE(*p) == PTR_TYPE_NOTE) { struct leaf_node *l = (struct leaf_node *) CLR_PTR_TYPE(*p); - if (!hashcmp(key_sha1, l->key_oid.hash)) + if (hasheq(key_sha1, l->key_oid.hash)) return l; } return NULL; @@ -206,7 +206,7 @@ static void note_tree_remove(struct notes_tree *t, if (GET_PTR_TYPE(*p) != PTR_TYPE_NOTE) return; /* type mismatch, nothing to remove */ l = (struct leaf_node *) CLR_PTR_TYPE(*p); - if (oidcmp(&l->key_oid, &entry->key_oid)) + if (!oideq(&l->key_oid, &entry->key_oid)) return; /* key mismatch, nothing to remove */ /* we have found a matching entry */ @@ -266,9 +266,9 @@ static int note_tree_insert(struct notes_tree *t, struct int_node *tree, case PTR_TYPE_NOTE: switch (type) { case PTR_TYPE_NOTE: - if (!oidcmp(&l->key_oid, &entry->key_oid)) { + if (oideq(&l->key_oid, &entry->key_oid)) { /* skip concatenation if l == entry */ - if (!oidcmp(&l->val_oid, &entry->val_oid)) + if (oideq(&l->val_oid, &entry->val_oid)) return 0; ret = combine_notes(&l->val_oid, diff --git a/object-store.h b/object-store.h index 67e66227d9..63b7605a3e 100644 --- a/object-store.h +++ b/object-store.h @@ -88,6 +88,8 @@ struct packed_git { char pack_name[FLEX_ARRAY]; /* more */ }; +struct multi_pack_index; + struct raw_object_store { /* * Path to the repository's object store. @@ -113,6 +115,13 @@ struct raw_object_store { /* * private data * + * should only be accessed directly by packfile.c and midx.c + */ + struct multi_pack_index *multi_pack_index; + + /* + * private data + * * should only be accessed directly by packfile.c */ @@ -121,6 +130,12 @@ struct raw_object_store { struct list_head packed_git_mru; /* + * A linked list containing all packfiles, starting with those + * contained in the multi_pack_index. + */ + struct packed_git *all_packs; + + /* * A fast, rough count of the number of objects in the repository. * These two fields are not meant for direct access. Use * approximate_object_count() instead. @@ -95,7 +95,7 @@ struct object *lookup_object(struct repository *r, const unsigned char *sha1) first = i = hash_obj(sha1, r->parsed_objects->obj_hash_size); while ((obj = r->parsed_objects->obj_hash[i]) != NULL) { - if (!hashcmp(sha1, obj->oid.hash)) + if (hasheq(sha1, obj->oid.hash)) break; i++; if (i == r->parsed_objects->obj_hash_size) @@ -59,16 +59,16 @@ struct object_array { /* * object flag allocation: - * revision.h: 0---------10 2526 + * revision.h: 0---------10 25----28 * fetch-pack.c: 01 * negotiator/default.c: 2--5 * walker.c: 0-2 - * upload-pack.c: 4 11----------------19 + * upload-pack.c: 4 11-----14 16-----19 * builtin/blame.c: 12-13 * bisect.c: 16 * bundle.c: 16 * http-push.c: 16-----19 - * commit.c: 16-----19 + * commit-reach.c: 15-------19 * sha1-name.c: 20 * list-objects-filter.c: 21 * builtin/fsck.c: 0--3 @@ -78,7 +78,7 @@ struct object_array { * builtin/show-branch.c: 0-------------------------------------------26 * builtin/unpack-objects.c: 2021 */ -#define FLAG_BITS 27 +#define FLAG_BITS 29 /* * The object type is stored in 3 bits. @@ -1,14 +1,14 @@ #include "cache.h" #include "oidmap.h" -static int cmpfn(const void *hashmap_cmp_fn_data, - const void *entry, const void *entry_or_key, - const void *keydata) +static int oidmap_neq(const void *hashmap_cmp_fn_data, + const void *entry, const void *entry_or_key, + const void *keydata) { const struct oidmap_entry *entry_ = entry; if (keydata) - return oidcmp(&entry_->oid, (const struct object_id *) keydata); - return oidcmp(&entry_->oid, + return !oideq(&entry_->oid, (const struct object_id *) keydata); + return !oideq(&entry_->oid, &((const struct oidmap_entry *) entry_or_key)->oid); } @@ -21,7 +21,7 @@ static int hash(const struct object_id *oid) void oidmap_init(struct oidmap *map, size_t initial_size) { - hashmap_init(&map->map, cmpfn, NULL, initial_size); + hashmap_init(&map->map, oidmap_neq, NULL, initial_size); } void oidmap_free(struct oidmap *map, int free_entries) @@ -1,40 +1,37 @@ #include "cache.h" #include "oidset.h" +void oidset_init(struct oidset *set, size_t initial_size) +{ + memset(&set->set, 0, sizeof(set->set)); + if (initial_size) + kh_resize_oid(&set->set, initial_size); +} + int oidset_contains(const struct oidset *set, const struct object_id *oid) { - if (!set->map.map.tablesize) - return 0; - return !!oidmap_get(&set->map, oid); + khiter_t pos = kh_get_oid(&set->set, *oid); + return pos != kh_end(&set->set); } int oidset_insert(struct oidset *set, const struct object_id *oid) { - struct oidmap_entry *entry; - - if (!set->map.map.tablesize) - oidmap_init(&set->map, 0); - else if (oidset_contains(set, oid)) - return 1; - - entry = xmalloc(sizeof(*entry)); - oidcpy(&entry->oid, oid); - - oidmap_put(&set->map, entry); - return 0; + int added; + kh_put_oid(&set->set, *oid, &added); + return !added; } int oidset_remove(struct oidset *set, const struct object_id *oid) { - struct oidmap_entry *entry; - - entry = oidmap_remove(&set->map, oid); - free(entry); - - return (entry != NULL); + khiter_t pos = kh_get_oid(&set->set, *oid); + if (pos == kh_end(&set->set)) + return 0; + kh_del_oid(&set->set, pos); + return 1; } void oidset_clear(struct oidset *set) { - oidmap_free(&set->map, 1); + kh_release_oid(&set->set); + oidset_init(set, 0); } @@ -1,7 +1,8 @@ #ifndef OIDSET_H #define OIDSET_H -#include "oidmap.h" +#include "hashmap.h" +#include "khash.h" /** * This API is similar to sha1-array, in that it maintains a set of object ids @@ -15,20 +16,35 @@ * table overhead. */ +static inline unsigned int oid_hash(struct object_id oid) +{ + return sha1hash(oid.hash); +} + +static inline int oid_equal(struct object_id a, struct object_id b) +{ + return oideq(&a, &b); +} + +KHASH_INIT(oid, struct object_id, int, 0, oid_hash, oid_equal) + /** * A single oidset; should be zero-initialized (or use OIDSET_INIT). */ struct oidset { - struct oidmap map; + kh_oid_t set; }; -#define OIDSET_INIT { OIDMAP_INIT } +#define OIDSET_INIT { { 0 } } -static inline void oidset_init(struct oidset *set, size_t initial_size) -{ - oidmap_init(&set->map, initial_size); -} +/** + * Initialize the oidset structure `set`. + * + * If `initial_size` is bigger than 0 then preallocate to allow inserting + * the specified number of elements without further allocations. + */ +void oidset_init(struct oidset *set, size_t initial_size); /** * Returns true iff `set` contains `oid`. @@ -58,19 +74,24 @@ int oidset_remove(struct oidset *set, const struct object_id *oid); void oidset_clear(struct oidset *set); struct oidset_iter { - struct oidmap_iter m_iter; + kh_oid_t *set; + khiter_t iter; }; static inline void oidset_iter_init(struct oidset *set, struct oidset_iter *iter) { - oidmap_iter_init(&set->map, &iter->m_iter); + iter->set = &set->set; + iter->iter = kh_begin(iter->set); } static inline struct object_id *oidset_iter_next(struct oidset_iter *iter) { - struct oidmap_entry *e = oidmap_iter_next(&iter->m_iter); - return e ? &e->oid : NULL; + for (; iter->iter != kh_end(iter->set); iter->iter++) { + if (kh_exist(iter->set, iter->iter)) + return &kh_key(iter->set, iter->iter++); + } + return NULL; } static inline struct object_id *oidset_iter_first(struct oidset *set, diff --git a/pack-bitmap-write.c b/pack-bitmap-write.c index d977e9bacb..9d1b951697 100644 --- a/pack-bitmap-write.c +++ b/pack-bitmap-write.c @@ -11,6 +11,7 @@ #include "pack-bitmap.h" #include "sha1-lookup.h" #include "pack-objects.h" +#include "commit-reach.h" struct bitmapped_commit { struct commit *commit; @@ -36,7 +37,7 @@ struct bitmap_writer { struct progress *progress; int show_progress; - unsigned char pack_checksum[20]; + unsigned char pack_checksum[GIT_MAX_RAWSZ]; }; static struct bitmap_writer writer; @@ -261,7 +262,7 @@ void bitmap_writer_build(struct packing_data *to_pack) if (writer.show_progress) writer.progress = start_progress("Building bitmaps", writer.selected_nr); - init_revisions(&revs, NULL); + repo_init_revisions(the_repository, &revs, NULL); revs.tag_objects = 1; revs.tree_objects = 1; revs.blob_objects = 1; diff --git a/pack-bitmap.c b/pack-bitmap.c index f0a1937a1c..5848cc93aa 100644 --- a/pack-bitmap.c +++ b/pack-bitmap.c @@ -86,10 +86,11 @@ struct bitmap_index { /* Bitmap result of the last performed walk */ struct bitmap *result; + /* "have" bitmap from the last performed walk */ + struct bitmap *haves; + /* Version of the bitmap index */ unsigned int version; - - unsigned loaded : 1; }; static struct ewah_bitmap *lookup_stored_bitmap(struct stored_bitmap *st) @@ -303,7 +304,7 @@ static int open_pack_bitmap_1(struct bitmap_index *bitmap_git, struct packed_git static int load_pack_bitmap(struct bitmap_index *bitmap_git) { - assert(bitmap_git->map && !bitmap_git->loaded); + assert(bitmap_git->map); bitmap_git->bitmaps = kh_init_sha1(); bitmap_git->ext_index.positions = kh_init_sha1_pos(); @@ -318,7 +319,6 @@ static int load_pack_bitmap(struct bitmap_index *bitmap_git) if (load_bitmap_entries_v1(bitmap_git) < 0) goto failed; - bitmap_git->loaded = 1; return 0; failed: @@ -333,9 +333,9 @@ static int open_pack_bitmap(struct bitmap_index *bitmap_git) struct packed_git *p; int ret = -1; - assert(!bitmap_git->map && !bitmap_git->loaded); + assert(!bitmap_git->map); - for (p = get_packed_git(the_repository); p; p = p->next) { + for (p = get_all_packs(the_repository); p; p = p->next) { if (open_pack_bitmap_1(bitmap_git, p) == 0) ret = 0; } @@ -735,7 +735,7 @@ struct bitmap_index *prepare_bitmap_walk(struct rev_info *revs) * from disk. this is the point of no return; after this the rev_list * becomes invalidated and we must perform the revwalk through bitmaps */ - if (!bitmap_git->loaded && load_pack_bitmap(bitmap_git) < 0) + if (load_pack_bitmap(bitmap_git) < 0) goto cleanup; object_array_clear(&revs->pending); @@ -759,8 +759,8 @@ struct bitmap_index *prepare_bitmap_walk(struct rev_info *revs) bitmap_and_not(wants_bitmap, haves_bitmap); bitmap_git->result = wants_bitmap; + bitmap_git->haves = haves_bitmap; - bitmap_free(haves_bitmap); return bitmap_git; cleanup: @@ -845,9 +845,6 @@ void traverse_bitmap_commit_list(struct bitmap_index *bitmap_git, OBJ_TAG, show_reachable); show_extended_objects(bitmap_git, show_reachable); - - bitmap_free(bitmap_git->result); - bitmap_git->result = NULL; } static uint32_t count_object_type(struct bitmap_index *bitmap_git, @@ -1114,5 +1111,23 @@ void free_bitmap_index(struct bitmap_index *b) free(b->ext_index.objects); free(b->ext_index.hashes); bitmap_free(b->result); + bitmap_free(b->haves); free(b); } + +int bitmap_has_sha1_in_uninteresting(struct bitmap_index *bitmap_git, + const unsigned char *sha1) +{ + int pos; + + if (!bitmap_git) + return 0; /* no bitmap loaded */ + if (!bitmap_git->haves) + return 0; /* walk had no "haves" */ + + pos = bitmap_position_packfile(bitmap_git, sha1); + if (pos < 0) + return 0; + + return bitmap_get(bitmap_git->haves, pos); +} diff --git a/pack-bitmap.h b/pack-bitmap.h index 8a04741e12..189dd68ad3 100644 --- a/pack-bitmap.h +++ b/pack-bitmap.h @@ -53,6 +53,13 @@ int rebuild_existing_bitmaps(struct bitmap_index *, struct packing_data *mapping khash_sha1 *reused_bitmaps, int show_progress); void free_bitmap_index(struct bitmap_index *); +/* + * After a traversal has been performed by prepare_bitmap_walk(), this can be + * queried to see if a particular object was reachable from any of the + * objects flagged as UNINTERESTING. + */ +int bitmap_has_sha1_in_uninteresting(struct bitmap_index *, const unsigned char *sha1); + void bitmap_writer_show_progress(int show); void bitmap_writer_set_checksum(unsigned char *sha1); void bitmap_writer_build_type_index(struct packing_data *to_pack, diff --git a/pack-check.c b/pack-check.c index d3a57df34f..fa5f0ff8fa 100644 --- a/pack-check.c +++ b/pack-check.c @@ -79,10 +79,10 @@ static int verify_packfile(struct packed_git *p, } while (offset < pack_sig_ofs); the_hash_algo->final_fn(hash, &ctx); pack_sig = use_pack(p, w_curs, pack_sig_ofs, NULL); - if (hashcmp(hash, pack_sig)) + if (!hasheq(hash, pack_sig)) err = error("%s pack checksum mismatch", p->pack_name); - if (hashcmp(index_base + index_size - the_hash_algo->hexsz, pack_sig)) + if (!hasheq(index_base + index_size - the_hash_algo->hexsz, pack_sig)) err = error("%s pack checksum does not match its index", p->pack_name); unuse_pack(w_curs); @@ -180,7 +180,7 @@ int verify_pack_index(struct packed_git *p) the_hash_algo->init_fn(&ctx); the_hash_algo->update_fn(&ctx, index_base, (unsigned int)(index_size - the_hash_algo->rawsz)); the_hash_algo->final_fn(hash, &ctx); - if (hashcmp(hash, index_base + index_size - the_hash_algo->rawsz)) + if (!hasheq(hash, index_base + index_size - the_hash_algo->rawsz)) err = error("Packfile index for %s hash mismatch", p->pack_name); return err; diff --git a/pack-objects.c b/pack-objects.c index f73f609884..b6cdbb0166 100644 --- a/pack-objects.c +++ b/pack-objects.c @@ -16,7 +16,7 @@ static uint32_t locate_object_entry_hash(struct packing_data *pdata, while (pdata->index[i] > 0) { uint32_t pos = pdata->index[i] - 1; - if (!hashcmp(sha1, pdata->objects[pos].idx.oid.hash)) { + if (hasheq(sha1, pdata->objects[pos].idx.oid.hash)) { *found = 1; return i; } @@ -99,7 +99,7 @@ static void prepare_in_pack_by_idx(struct packing_data *pdata) * (i.e. in_pack_idx also zero) should return NULL. */ mapping[cnt++] = NULL; - for (p = get_packed_git(the_repository); p; p = p->next, cnt++) { + for (p = get_all_packs(the_repository); p; p = p->next, cnt++) { if (cnt == nr) { free(mapping); return; @@ -167,6 +167,12 @@ struct object_entry *packlist_alloc(struct packing_data *pdata, REALLOC_ARRAY(pdata->in_pack, pdata->nr_alloc); if (pdata->delta_size) REALLOC_ARRAY(pdata->delta_size, pdata->nr_alloc); + + if (pdata->tree_depth) + REALLOC_ARRAY(pdata->tree_depth, pdata->nr_alloc); + + if (pdata->layer) + REALLOC_ARRAY(pdata->layer, pdata->nr_alloc); } new_entry = pdata->objects + pdata->nr_objects++; @@ -182,5 +188,30 @@ struct object_entry *packlist_alloc(struct packing_data *pdata, if (pdata->in_pack) pdata->in_pack[pdata->nr_objects - 1] = NULL; + if (pdata->tree_depth) + pdata->tree_depth[pdata->nr_objects - 1] = 0; + + if (pdata->layer) + pdata->layer[pdata->nr_objects - 1] = 0; + return new_entry; } + +void oe_set_delta_ext(struct packing_data *pdata, + struct object_entry *delta, + const unsigned char *sha1) +{ + struct object_entry *base; + + ALLOC_GROW(pdata->ext_bases, pdata->nr_ext + 1, pdata->alloc_ext); + base = &pdata->ext_bases[pdata->nr_ext++]; + memset(base, 0, sizeof(*base)); + hashcpy(base->idx.oid.hash, sha1); + + /* These flags mark that we are not part of the actual pack output. */ + base->preferred_base = 1; + base->filled = 1; + + delta->ext_base = 1; + delta->delta_idx = base - pdata->ext_bases + 1; +} diff --git a/pack-objects.h b/pack-objects.h index c34036166a..dc869f26c2 100644 --- a/pack-objects.h +++ b/pack-objects.h @@ -103,6 +103,7 @@ struct object_entry { unsigned no_try_delta:1; unsigned type_:TYPE_BITS; unsigned in_pack_type:TYPE_BITS; /* could be delta */ + unsigned preferred_base:1; /* * we do not pack this, but is available * to be used as the base object to delta @@ -112,6 +113,7 @@ struct object_entry { unsigned filled:1; /* assigned write-order */ unsigned dfs_state:OE_DFS_STATE_BITS; unsigned depth:OE_DEPTH_BITS; + unsigned ext_base:1; /* delta_idx points outside packlist */ /* * pahole results on 64-bit linux (gcc and clang) @@ -143,27 +145,33 @@ struct packing_data { struct packed_git **in_pack_by_idx; struct packed_git **in_pack; -#ifndef NO_PTHREADS pthread_mutex_t lock; -#endif + + /* + * This list contains entries for bases which we know the other side + * has (e.g., via reachability bitmaps), but which aren't in our + * "objects" list. + */ + struct object_entry *ext_bases; + uint32_t nr_ext, alloc_ext; uintmax_t oe_size_limit; uintmax_t oe_delta_size_limit; + + /* delta islands */ + unsigned int *tree_depth; + unsigned char *layer; }; void prepare_packing_data(struct packing_data *pdata); static inline void packing_data_lock(struct packing_data *pdata) { -#ifndef NO_PTHREADS pthread_mutex_lock(&pdata->lock); -#endif } static inline void packing_data_unlock(struct packing_data *pdata) { -#ifndef NO_PTHREADS pthread_mutex_unlock(&pdata->lock); -#endif } struct object_entry *packlist_alloc(struct packing_data *pdata, @@ -249,9 +257,12 @@ static inline struct object_entry *oe_delta( const struct packing_data *pack, const struct object_entry *e) { - if (e->delta_idx) + if (!e->delta_idx) + return NULL; + if (e->ext_base) + return &pack->ext_bases[e->delta_idx - 1]; + else return &pack->objects[e->delta_idx - 1]; - return NULL; } static inline void oe_set_delta(struct packing_data *pack, @@ -264,6 +275,10 @@ static inline void oe_set_delta(struct packing_data *pack, e->delta_idx = 0; } +void oe_set_delta_ext(struct packing_data *pack, + struct object_entry *e, + const unsigned char *sha1); + static inline struct object_entry *oe_delta_child( const struct packing_data *pack, const struct object_entry *e) @@ -384,4 +399,38 @@ static inline void oe_set_delta_size(struct packing_data *pack, } } +static inline unsigned int oe_tree_depth(struct packing_data *pack, + struct object_entry *e) +{ + if (!pack->tree_depth) + return 0; + return pack->tree_depth[e - pack->objects]; +} + +static inline void oe_set_tree_depth(struct packing_data *pack, + struct object_entry *e, + unsigned int tree_depth) +{ + if (!pack->tree_depth) + CALLOC_ARRAY(pack->tree_depth, pack->nr_alloc); + pack->tree_depth[e - pack->objects] = tree_depth; +} + +static inline unsigned char oe_layer(struct packing_data *pack, + struct object_entry *e) +{ + if (!pack->layer) + return 0; + return pack->layer[e - pack->objects]; +} + +static inline void oe_set_layer(struct packing_data *pack, + struct object_entry *e, + unsigned char layer) +{ + if (!pack->layer) + CALLOC_ARRAY(pack->layer, pack->nr_alloc); + pack->layer[e - pack->objects] = layer; +} + #endif diff --git a/pack-revindex.c b/pack-revindex.c index bb521cf7fb..3c58784a5f 100644 --- a/pack-revindex.c +++ b/pack-revindex.c @@ -122,13 +122,14 @@ static void create_pack_revindex(struct packed_git *p) unsigned num_ent = p->num_objects; unsigned i; const char *index = p->index_data; + const unsigned hashsz = the_hash_algo->rawsz; ALLOC_ARRAY(p->revindex, num_ent + 1); index += 4 * 256; if (p->index_version > 1) { const uint32_t *off_32 = - (uint32_t *)(index + 8 + p->num_objects * (20 + 4)); + (uint32_t *)(index + 8 + p->num_objects * (hashsz + 4)); const uint32_t *off_64 = off_32 + p->num_objects; for (i = 0; i < num_ent; i++) { uint32_t off = ntohl(*off_32++); @@ -142,16 +143,17 @@ static void create_pack_revindex(struct packed_git *p) } } else { for (i = 0; i < num_ent; i++) { - uint32_t hl = *((uint32_t *)(index + 24 * i)); + uint32_t hl = *((uint32_t *)(index + (hashsz + 4) * i)); p->revindex[i].offset = ntohl(hl); p->revindex[i].nr = i; } } - /* This knows the pack format -- the 20-byte trailer + /* + * This knows the pack format -- the hash trailer * follows immediately after the last object data. */ - p->revindex[num_ent].offset = p->pack_size - 20; + p->revindex[num_ent].offset = p->pack_size - hashsz; p->revindex[num_ent].nr = -1; sort_revindex(p->revindex, num_ent, p->pack_size); } diff --git a/pack-write.c b/pack-write.c index a9d46bc03f..29d17a9bec 100644 --- a/pack-write.c +++ b/pack-write.c @@ -124,7 +124,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec } hashwrite(f, obj->oid.hash, the_hash_algo->rawsz); if ((opts->flags & WRITE_IDX_STRICT) && - (i && !oidcmp(&list[-2]->oid, &obj->oid))) + (i && oideq(&list[-2]->oid, &obj->oid))) die("The same object %s appears twice in the pack", oid_to_hex(&obj->oid)); } @@ -260,7 +260,7 @@ void fixup_pack_header_footer(int pack_fd, if (partial_pack_offset == 0) { unsigned char hash[GIT_MAX_RAWSZ]; the_hash_algo->final_fn(hash, &old_hash_ctx); - if (hashcmp(hash, partial_pack_hash) != 0) + if (!hasheq(hash, partial_pack_hash)) die("Unexpected checksum for %s " "(disk corruption?)", pack_name); /* diff --git a/packfile.c b/packfile.c index ebcb5742ec..d1e6683ffe 100644 --- a/packfile.c +++ b/packfile.c @@ -15,6 +15,7 @@ #include "tree-walk.h" #include "tree.h" #include "object-store.h" +#include "midx.h" char *odb_pack_name(struct strbuf *buf, const unsigned char *sha1, @@ -79,10 +80,8 @@ void pack_report(void) static int check_packed_git_idx(const char *path, struct packed_git *p) { void *idx_map; - struct pack_idx_header *hdr; size_t idx_size; - uint32_t version, nr, i, *index; - int fd = git_open(path); + int fd = git_open(path), ret; struct stat st; const unsigned int hashsz = the_hash_algo->rawsz; @@ -100,16 +99,32 @@ static int check_packed_git_idx(const char *path, struct packed_git *p) idx_map = xmmap(NULL, idx_size, PROT_READ, MAP_PRIVATE, fd, 0); close(fd); - hdr = idx_map; + ret = load_idx(path, hashsz, idx_map, idx_size, p); + + if (ret) + munmap(idx_map, idx_size); + + return ret; +} + +int load_idx(const char *path, const unsigned int hashsz, void *idx_map, + size_t idx_size, struct packed_git *p) +{ + struct pack_idx_header *hdr = idx_map; + uint32_t version, nr, i, *index; + + if (idx_size < 4 * 256 + hashsz + hashsz) + return error("index file %s is too small", path); + if (idx_map == NULL) + return error("empty data"); + if (hdr->idx_signature == htonl(PACK_IDX_SIGNATURE)) { version = ntohl(hdr->idx_version); - if (version < 2 || version > 2) { - munmap(idx_map, idx_size); + if (version < 2 || version > 2) return error("index file %s is version %"PRIu32 " and is not supported by this binary" " (try upgrading GIT to a newer version)", path, version); - } } else version = 1; @@ -119,10 +134,8 @@ static int check_packed_git_idx(const char *path, struct packed_git *p) index += 2; /* skip index header */ for (i = 0; i < 256; i++) { uint32_t n = ntohl(index[i]); - if (n < nr) { - munmap(idx_map, idx_size); + if (n < nr) return error("non-monotonic index %s", path); - } nr = n; } @@ -134,10 +147,8 @@ static int check_packed_git_idx(const char *path, struct packed_git *p) * - hash of the packfile * - file checksum */ - if (idx_size != 4*256 + nr * (hashsz + 4) + hashsz + hashsz) { - munmap(idx_map, idx_size); + if (idx_size != 4 * 256 + nr * (hashsz + 4) + hashsz + hashsz) return error("wrong index v1 file size in %s", path); - } } else if (version == 2) { /* * Minimum size: @@ -156,20 +167,16 @@ static int check_packed_git_idx(const char *path, struct packed_git *p) unsigned long max_size = min_size; if (nr) max_size += (nr - 1)*8; - if (idx_size < min_size || idx_size > max_size) { - munmap(idx_map, idx_size); + if (idx_size < min_size || idx_size > max_size) return error("wrong index v2 file size in %s", path); - } if (idx_size != min_size && /* * make sure we can deal with large pack offsets. * 31-bit signed offset won't be enough, neither * 32-bit unsigned one will be. */ - (sizeof(off_t) <= 4)) { - munmap(idx_map, idx_size); + (sizeof(off_t) <= 4)) return error("pack too large for current definition of off_t in %s", path); - } } p->index_version = version; @@ -196,6 +203,23 @@ int open_pack_index(struct packed_git *p) return ret; } +uint32_t get_pack_fanout(struct packed_git *p, uint32_t value) +{ + const uint32_t *level1_ofs = p->index_data; + + if (!level1_ofs) { + if (open_pack_index(p)) + return 0; + level1_ofs = p->index_data; + } + + if (p->index_version > 1) { + level1_ofs += 2; + } + + return ntohl(level1_ofs[value]); +} + static struct packed_git *alloc_packed_git(int extra) { struct packed_git *p = xmalloc(st_add(sizeof(*p), extra)); @@ -321,6 +345,11 @@ void close_all_packs(struct raw_object_store *o) BUG("want to close pack marked 'do-not-close'"); else close_pack(p); + + if (o->multi_pack_index) { + close_midx(o->multi_pack_index); + o->multi_pack_index = NULL; + } } /* @@ -451,8 +480,19 @@ static int open_packed_git_1(struct packed_git *p) ssize_t read_result; const unsigned hashsz = the_hash_algo->rawsz; - if (!p->index_data && open_pack_index(p)) - return error("packfile %s index unavailable", p->pack_name); + if (!p->index_data) { + struct multi_pack_index *m; + const char *pack_name = strrchr(p->pack_name, '/'); + + for (m = the_repository->objects->multi_pack_index; + m; m = m->next) { + if (midx_contains_pack(m, pack_name)) + break; + } + + if (!m && open_pack_index(p)) + return error("packfile %s index unavailable", p->pack_name); + } if (!pack_max_fds) { unsigned int max_fds = get_max_fd_limit(); @@ -503,6 +543,10 @@ static int open_packed_git_1(struct packed_git *p) " supported (try upgrading GIT to a newer version)", p->pack_name, ntohl(hdr.hdr_version)); + /* Skip index checking if in multi-pack-index */ + if (!p->index_data) + return 0; + /* Verify the pack matches its index. */ if (p->num_objects != ntohl(hdr.hdr_entries)) return error("packfile %s claims to have %"PRIu32" objects" @@ -517,7 +561,7 @@ static int open_packed_git_1(struct packed_git *p) if (read_result != hashsz) return error("packfile %s signature is unavailable", p->pack_name); idx_hash = ((unsigned char *)p->index_data) + p->index_size - hashsz * 2; - if (hashcmp(hash, idx_hash)) + if (!hasheq(hash, idx_hash)) return error("packfile %s does not match index", p->pack_name); return 0; } @@ -738,13 +782,14 @@ static void report_pack_garbage(struct string_list *list) report_helper(list, seen_bits, first, list->nr); } -static void prepare_packed_git_one(struct repository *r, char *objdir, int local) +void for_each_file_in_pack_dir(const char *objdir, + each_file_in_pack_dir_fn fn, + void *data) { struct strbuf path = STRBUF_INIT; size_t dirnamelen; DIR *dir; struct dirent *de; - struct string_list garbage = STRING_LIST_INIT_DUP; strbuf_addstr(&path, objdir); strbuf_addstr(&path, "/pack"); @@ -759,53 +804,87 @@ static void prepare_packed_git_one(struct repository *r, char *objdir, int local strbuf_addch(&path, '/'); dirnamelen = path.len; while ((de = readdir(dir)) != NULL) { - struct packed_git *p; - size_t base_len; - if (is_dot_or_dotdot(de->d_name)) continue; strbuf_setlen(&path, dirnamelen); strbuf_addstr(&path, de->d_name); - base_len = path.len; - if (strip_suffix_mem(path.buf, &base_len, ".idx")) { - /* Don't reopen a pack we already have. */ - for (p = r->objects->packed_git; p; - p = p->next) { - size_t len; - if (strip_suffix(p->pack_name, ".pack", &len) && - len == base_len && - !memcmp(p->pack_name, path.buf, len)) - break; - } - if (p == NULL && - /* - * See if it really is a valid .idx file with - * corresponding .pack file that we can map. - */ - (p = add_packed_git(path.buf, path.len, local)) != NULL) - install_packed_git(r, p); - } - - if (!report_garbage) - continue; - - if (ends_with(de->d_name, ".idx") || - ends_with(de->d_name, ".pack") || - ends_with(de->d_name, ".bitmap") || - ends_with(de->d_name, ".keep") || - ends_with(de->d_name, ".promisor")) - string_list_append(&garbage, path.buf); - else - report_garbage(PACKDIR_FILE_GARBAGE, path.buf); + fn(path.buf, path.len, de->d_name, data); } + closedir(dir); - report_pack_garbage(&garbage); - string_list_clear(&garbage, 0); strbuf_release(&path); } +struct prepare_pack_data { + struct repository *r; + struct string_list *garbage; + int local; + struct multi_pack_index *m; +}; + +static void prepare_pack(const char *full_name, size_t full_name_len, + const char *file_name, void *_data) +{ + struct prepare_pack_data *data = (struct prepare_pack_data *)_data; + struct packed_git *p; + size_t base_len = full_name_len; + + if (strip_suffix_mem(full_name, &base_len, ".idx") && + !(data->m && midx_contains_pack(data->m, file_name))) { + /* Don't reopen a pack we already have. */ + for (p = data->r->objects->packed_git; p; p = p->next) { + size_t len; + if (strip_suffix(p->pack_name, ".pack", &len) && + len == base_len && + !memcmp(p->pack_name, full_name, len)) + break; + } + + if (!p) { + p = add_packed_git(full_name, full_name_len, data->local); + if (p) + install_packed_git(data->r, p); + } + } + + if (!report_garbage) + return; + + if (!strcmp(file_name, "multi-pack-index")) + return; + if (ends_with(file_name, ".idx") || + ends_with(file_name, ".pack") || + ends_with(file_name, ".bitmap") || + ends_with(file_name, ".keep") || + ends_with(file_name, ".promisor")) + string_list_append(data->garbage, full_name); + else + report_garbage(PACKDIR_FILE_GARBAGE, full_name); +} + +static void prepare_packed_git_one(struct repository *r, char *objdir, int local) +{ + struct prepare_pack_data data; + struct string_list garbage = STRING_LIST_INIT_DUP; + + data.m = r->objects->multi_pack_index; + + /* look for the multi-pack-index for this object directory */ + while (data.m && strcmp(data.m->object_dir, objdir)) + data.m = data.m->next; + + data.r = r; + data.garbage = &garbage; + data.local = local; + + for_each_file_in_pack_dir(objdir, prepare_pack, &data); + + report_pack_garbage(data.garbage); + string_list_clear(data.garbage, 0); +} + static void prepare_packed_git(struct repository *r); /* * Give a fast, rough count of the number of objects in the repository. This @@ -818,10 +897,13 @@ unsigned long approximate_object_count(void) { if (!the_repository->objects->approximate_object_count_valid) { unsigned long count; + struct multi_pack_index *m; struct packed_git *p; prepare_packed_git(the_repository); count = 0; + for (m = get_multi_pack_index(the_repository); m; m = m->next) + count += m->num_objects; for (p = the_repository->objects->packed_git; p; p = p->next) { if (open_pack_index(p)) continue; @@ -893,11 +975,17 @@ static void prepare_packed_git(struct repository *r) if (r->objects->packed_git_initialized) return; + prepare_multi_pack_index_one(r, r->objects->objectdir, 1); prepare_packed_git_one(r, r->objects->objectdir, 1); prepare_alt_odb(r); - for (alt = r->objects->alt_odb_list; alt; alt = alt->next) + for (alt = r->objects->alt_odb_list; alt; alt = alt->next) { + prepare_multi_pack_index_one(r, alt->path, 0); prepare_packed_git_one(r, alt->path, 0); + } rearrange_packed_git(r); + + r->objects->all_packs = NULL; + prepare_packed_git_mru(r); r->objects->packed_git_initialized = 1; } @@ -915,6 +1003,36 @@ struct packed_git *get_packed_git(struct repository *r) return r->objects->packed_git; } +struct multi_pack_index *get_multi_pack_index(struct repository *r) +{ + prepare_packed_git(r); + return r->objects->multi_pack_index; +} + +struct packed_git *get_all_packs(struct repository *r) +{ + prepare_packed_git(r); + + if (!r->objects->all_packs) { + struct packed_git *p = r->objects->packed_git; + struct multi_pack_index *m; + + for (m = r->objects->multi_pack_index; m; m = m->next) { + uint32_t i; + for (i = 0; i < m->num_packs; i++) { + if (!prepare_midx_pack(m, i)) { + m->packs[i]->next = p; + p = m->packs[i]; + } + } + } + + r->objects->all_packs = p; + } + + return r->objects->all_packs; +} + struct list_head *get_packed_git_mru(struct repository *r) { prepare_packed_git(r); @@ -1014,13 +1132,14 @@ int unpack_object_header(struct packed_git *p, void mark_bad_packed_object(struct packed_git *p, const unsigned char *sha1) { unsigned i; + const unsigned hashsz = the_hash_algo->rawsz; for (i = 0; i < p->num_bad_objects; i++) - if (!hashcmp(sha1, p->bad_object_sha1 + GIT_SHA1_RAWSZ * i)) + if (hasheq(sha1, p->bad_object_sha1 + hashsz * i)) return; p->bad_object_sha1 = xrealloc(p->bad_object_sha1, st_mult(GIT_MAX_RAWSZ, st_add(p->num_bad_objects, 1))); - hashcpy(p->bad_object_sha1 + GIT_SHA1_RAWSZ * p->num_bad_objects, sha1); + hashcpy(p->bad_object_sha1 + hashsz * p->num_bad_objects, sha1); p->num_bad_objects++; } @@ -1031,8 +1150,8 @@ const struct packed_git *has_packed_and_bad(const unsigned char *sha1) for (p = the_repository->objects->packed_git; p; p = p->next) for (i = 0; i < p->num_bad_objects; i++) - if (!hashcmp(sha1, - p->bad_object_sha1 + the_hash_algo->rawsz * i)) + if (hasheq(sha1, + p->bad_object_sha1 + the_hash_algo->rawsz * i)) return p; return NULL; } @@ -1830,8 +1949,8 @@ static int fill_pack_entry(const struct object_id *oid, if (p->num_bad_objects) { unsigned i; for (i = 0; i < p->num_bad_objects; i++) - if (!hashcmp(oid->hash, - p->bad_object_sha1 + the_hash_algo->rawsz * i)) + if (hasheq(oid->hash, + p->bad_object_sha1 + the_hash_algo->rawsz * i)) return 0; } @@ -1856,11 +1975,17 @@ static int fill_pack_entry(const struct object_id *oid, int find_pack_entry(struct repository *r, const struct object_id *oid, struct pack_entry *e) { struct list_head *pos; + struct multi_pack_index *m; prepare_packed_git(r); - if (!r->objects->packed_git) + if (!r->objects->packed_git && !r->objects->multi_pack_index) return 0; + for (m = r->objects->multi_pack_index; m; m = m->next) { + if (fill_midx_entry(oid, e, m)) + return 1; + } + list_for_each(pos, &r->objects->packed_git_mru) { struct packed_git *p = list_entry(pos, struct packed_git, mru); if (fill_pack_entry(oid, e, p)) { @@ -1923,7 +2048,7 @@ int for_each_packed_object(each_packed_object_fn cb, void *data, int pack_errors = 0; prepare_packed_git(the_repository); - for (p = the_repository->objects->packed_git; p; p = p->next) { + for (p = get_all_packs(the_repository); p; p = p->next) { if ((flags & FOR_EACH_OBJECT_LOCAL_ONLY) && !p->pack_local) continue; if ((flags & FOR_EACH_OBJECT_PROMISOR_ONLY) && diff --git a/packfile.h b/packfile.h index 630f35cf31..6c4037605d 100644 --- a/packfile.h +++ b/packfile.h @@ -33,6 +33,12 @@ extern char *sha1_pack_index_name(const unsigned char *sha1); extern struct packed_git *parse_pack_index(unsigned char *sha1, const char *idx_path); +typedef void each_file_in_pack_dir_fn(const char *full_path, size_t full_path_len, + const char *file_pach, void *data); +void for_each_file_in_pack_dir(const char *objdir, + each_file_in_pack_dir_fn fn, + void *data); + /* A hook to report invalid files in pack directory */ #define PACKDIR_FILE_PACK 1 #define PACKDIR_FILE_IDX 2 @@ -44,6 +50,8 @@ extern void install_packed_git(struct repository *r, struct packed_git *pack); struct packed_git *get_packed_git(struct repository *r); struct list_head *get_packed_git_mru(struct repository *r); +struct multi_pack_index *get_multi_pack_index(struct repository *r); +struct packed_git *get_all_packs(struct repository *r); /* * Give a rough count of objects in the repository. This sacrifices accuracy @@ -68,6 +76,8 @@ extern int open_pack_index(struct packed_git *); */ extern void close_pack_index(struct packed_git *); +extern uint32_t get_pack_fanout(struct packed_git *p, uint32_t value); + extern unsigned char *use_pack(struct packed_git *, struct pack_window **, off_t, unsigned long *); extern void close_pack_windows(struct packed_git *); extern void close_pack(struct packed_git *); @@ -154,4 +164,17 @@ extern int has_pack_index(const unsigned char *sha1); */ extern int is_promisor_object(const struct object_id *oid); +/* + * Expose a function for fuzz testing. + * + * load_idx() parses a block of memory as a packfile index and puts the results + * into a struct packed_git. + * + * This function should not be used directly. It is exposed here only so that we + * have a convenient entry-point for fuzz testing. For real uses, you should + * probably use open_pack_index() or parse_pack_index() instead. + */ +extern int load_idx(const char *path, const unsigned int hashsz, void *idx_map, + size_t idx_size, struct packed_git *p); + #endif diff --git a/parse-options-cb.c b/parse-options-cb.c index e8236534ac..8c9edce52f 100644 --- a/parse-options-cb.c +++ b/parse-options-cb.c @@ -28,13 +28,6 @@ int parse_opt_abbrev_cb(const struct option *opt, const char *arg, int unset) return 0; } -int parse_opt_approxidate_cb(const struct option *opt, const char *arg, - int unset) -{ - *(timestamp_t *)(opt->value) = approxidate(arg); - return 0; -} - int parse_opt_expiry_date_cb(const struct option *opt, const char *arg, int unset) { @@ -65,6 +58,8 @@ int parse_opt_verbosity_cb(const struct option *opt, const char *arg, { int *target = opt->value; + BUG_ON_OPT_ARG(arg); + if (unset) /* --no-quiet, --no-verbose */ *target = 0; @@ -87,6 +82,8 @@ int parse_opt_commits(const struct option *opt, const char *arg, int unset) struct object_id oid; struct commit *commit; + BUG_ON_OPT_NEG(unset); + if (!arg) return -1; if (get_oid(arg, &oid)) @@ -117,6 +114,9 @@ int parse_opt_object_name(const struct option *opt, const char *arg, int unset) int parse_opt_tertiary(const struct option *opt, const char *arg, int unset) { int *target = opt->value; + + BUG_ON_OPT_ARG(arg); + *target = unset ? 2 : 1; return 0; } diff --git a/parse-options.h b/parse-options.h index dd14911a29..6c4fe2016d 100644 --- a/parse-options.h +++ b/parse-options.h @@ -150,9 +150,6 @@ struct option { (h), 0, &parse_opt_string_list } #define OPT_UYN(s, l, v, h) { OPTION_CALLBACK, (s), (l), (v), NULL, \ (h), PARSE_OPT_NOARG, &parse_opt_tertiary } -#define OPT_DATE(s, l, v, h) \ - { OPTION_CALLBACK, (s), (l), (v), N_("time"),(h), 0, \ - parse_opt_approxidate_cb } #define OPT_EXPIRY_DATE(s, l, v, h) \ { OPTION_CALLBACK, (s), (l), (v), N_("expiry-date"),(h), 0, \ parse_opt_expiry_date_cb } @@ -194,6 +191,20 @@ extern int opterror(const struct option *opt, const char *reason, int flags); #define opterror(o,r,f) (opterror((o),(r),(f)), const_error()) #endif +/* + * Use these assertions for callbacks that expect to be called with NONEG and + * NOARG respectively, and do not otherwise handle the "unset" and "arg" + * parameters. + */ +#define BUG_ON_OPT_NEG(unset) do { \ + if ((unset)) \ + BUG("option callback does not expect negation"); \ +} while (0) +#define BUG_ON_OPT_ARG(arg) do { \ + if ((arg)) \ + BUG("option callback does not expect an argument"); \ +} while (0) + /*----- incremental advanced APIs -----*/ enum { @@ -232,7 +243,6 @@ extern struct option *parse_options_concat(struct option *a, struct option *b); /*----- some often used options -----*/ extern int parse_opt_abbrev_cb(const struct option *, const char *, int); -extern int parse_opt_approxidate_cb(const struct option *, const char *, int); extern int parse_opt_expiry_date_cb(const struct option *, const char *, int); extern int parse_opt_color_flag_cb(const struct option *, const char *, int); extern int parse_opt_verbosity_cb(const struct option *, const char *, int); diff --git a/patch-delta.c b/patch-delta.c index 56e0a5ede2..b5c8594db6 100644 --- a/patch-delta.c +++ b/patch-delta.c @@ -40,24 +40,31 @@ void *patch_delta(const void *src_buf, unsigned long src_size, cmd = *data++; if (cmd & 0x80) { unsigned long cp_off = 0, cp_size = 0; - if (cmd & 0x01) cp_off = *data++; - if (cmd & 0x02) cp_off |= (*data++ << 8); - if (cmd & 0x04) cp_off |= (*data++ << 16); - if (cmd & 0x08) cp_off |= ((unsigned) *data++ << 24); - if (cmd & 0x10) cp_size = *data++; - if (cmd & 0x20) cp_size |= (*data++ << 8); - if (cmd & 0x40) cp_size |= (*data++ << 16); +#define PARSE_CP_PARAM(bit, var, shift) do { \ + if (cmd & (bit)) { \ + if (data >= top) \ + goto bad_length; \ + var |= ((unsigned) *data++ << (shift)); \ + } } while (0) + PARSE_CP_PARAM(0x01, cp_off, 0); + PARSE_CP_PARAM(0x02, cp_off, 8); + PARSE_CP_PARAM(0x04, cp_off, 16); + PARSE_CP_PARAM(0x08, cp_off, 24); + PARSE_CP_PARAM(0x10, cp_size, 0); + PARSE_CP_PARAM(0x20, cp_size, 8); + PARSE_CP_PARAM(0x40, cp_size, 16); +#undef PARSE_CP_PARAM if (cp_size == 0) cp_size = 0x10000; if (unsigned_add_overflows(cp_off, cp_size) || cp_off + cp_size > src_size || cp_size > size) - break; + goto bad_length; memcpy(out, (char *) src_buf + cp_off, cp_size); out += cp_size; size -= cp_size; } else if (cmd) { - if (cmd > size) - break; + if (cmd > size || cmd > top - data) + goto bad_length; memcpy(out, data, cmd); out += cmd; data += cmd; @@ -75,6 +82,7 @@ void *patch_delta(const void *src_buf, unsigned long src_size, /* sanity check */ if (data != top || size != 0) { + bad_length: error("delta replay has gone wild"); bad: free(dst_buf); diff --git a/patch-ids.c b/patch-ids.c index 8f7c25d5db..c262e1be9c 100644 --- a/patch-ids.c +++ b/patch-ids.c @@ -28,14 +28,14 @@ int commit_patch_id(struct commit *commit, struct diff_options *options, /* * When we cannot load the full patch-id for both commits for whatever * reason, the function returns -1 (i.e. return error(...)). Despite - * the "cmp" in the name of this function, the caller only cares about + * the "neq" in the name of this function, the caller only cares about * the return value being zero (a and b are equivalent) or non-zero (a * and b are different), and returning non-zero would keep both in the * result, even if they actually were equivalent, in order to err on * the side of safety. The actual value being negative does not have * any significance; only that it is non-zero matters. */ -static int patch_id_cmp(const void *cmpfn_data, +static int patch_id_neq(const void *cmpfn_data, const void *entry, const void *entry_or_key, const void *unused_keydata) @@ -53,17 +53,17 @@ static int patch_id_cmp(const void *cmpfn_data, commit_patch_id(b->commit, opt, &b->patch_id, 0)) return error("Could not get patch ID for %s", oid_to_hex(&b->commit->object.oid)); - return oidcmp(&a->patch_id, &b->patch_id); + return !oideq(&a->patch_id, &b->patch_id); } -int init_patch_ids(struct patch_ids *ids) +int init_patch_ids(struct repository *r, struct patch_ids *ids) { memset(ids, 0, sizeof(*ids)); - diff_setup(&ids->diffopts); + repo_diff_setup(r, &ids->diffopts); ids->diffopts.detect_rename = 0; ids->diffopts.flags.recursive = 1; diff_setup_done(&ids->diffopts); - hashmap_init(&ids->patches, patch_id_cmp, &ids->diffopts, 256); + hashmap_init(&ids->patches, patch_id_neq, &ids->diffopts, 256); return 0; } diff --git a/patch-ids.h b/patch-ids.h index 79ac9a8498..82a12b66f8 100644 --- a/patch-ids.h +++ b/patch-ids.h @@ -6,6 +6,7 @@ struct commit; struct object_id; +struct repository; struct patch_id { struct hashmap_entry ent; @@ -20,7 +21,7 @@ struct patch_ids { int commit_patch_id(struct commit *commit, struct diff_options *options, struct object_id *oid, int); -int init_patch_ids(struct patch_ids *); +int init_patch_ids(struct repository *, struct patch_ids *); int free_patch_ids(struct patch_ids *); struct patch_id *add_commit_patch_id(struct commit *, struct patch_ids *); struct patch_id *has_commit_patch_id(struct commit *, struct patch_ids *); @@ -108,6 +108,7 @@ struct common_dir { static struct common_dir common_list[] = { { 0, 1, 0, "branches" }, + { 0, 1, 0, "common" }, { 0, 1, 0, "hooks" }, { 0, 1, 0, "info" }, { 0, 0, 1, "info/sparse-checkout" }, @@ -118,6 +119,7 @@ static struct common_dir common_list[] = { { 0, 1, 0, "objects" }, { 0, 1, 0, "refs" }, { 0, 1, 1, "refs/bisect" }, + { 0, 1, 1, "refs/worktree" }, { 0, 1, 0, "remotes" }, { 0, 1, 0, "worktrees" }, { 0, 1, 0, "rr-cache" }, @@ -289,16 +289,11 @@ something in the test suite might still depend on the US English version of the strings, e.g. to grep some error message or other output. -To smoke out issues like these Git can be compiled with gettext poison -support, at the top-level: +To smoke out issues like these, Git tested with a translation mode that +emits gibberish on every call to gettext. To use it run the test suite +with it, e.g.: - make GETTEXT_POISON=YesPlease - -That'll give you a git which emits gibberish on every call to -gettext. It's obviously not meant to be installed, but you should run -the test suite with it: - - cd t && prove -j 9 ./t[0-9]*.sh + cd t && GIT_TEST_GETTEXT_POISON=YesPlease prove -j 9 ./t[0-9]*.sh If tests break with it you should inspect them manually and see if what you're translating is sane, i.e. that you're not translating diff --git a/preload-index.c b/preload-index.c index 71cd2437a3..c7dc3f2b9f 100644 --- a/preload-index.c +++ b/preload-index.c @@ -5,16 +5,9 @@ #include "pathspec.h" #include "dir.h" #include "fsmonitor.h" - -#ifdef NO_PTHREADS -static void preload_index(struct index_state *index, - const struct pathspec *pathspec) -{ - ; /* nothing */ -} -#else - -#include <pthread.h> +#include "config.h" +#include "progress.h" +#include "thread-utils.h" /* * Mostly randomly chosen maximum thread counts: we @@ -25,16 +18,23 @@ static void preload_index(struct index_state *index, #define MAX_PARALLEL (20) #define THREAD_COST (500) +struct progress_data { + unsigned long n; + struct progress *progress; + pthread_mutex_t mutex; +}; + struct thread_data { pthread_t pthread; struct index_state *index; struct pathspec pathspec; + struct progress_data *progress; int offset, nr; }; static void *preload_thread(void *_data) { - int nr; + int nr, last_nr; struct thread_data *p = _data; struct index_state *index = p->index; struct cache_entry **cep = index->cache + p->offset; @@ -43,6 +43,7 @@ static void *preload_thread(void *_data) nr = p->nr; if (nr + p->offset > index->cache_nr) nr = index->cache_nr - p->offset; + last_nr = nr; do { struct cache_entry *ce = *cep++; @@ -58,6 +59,15 @@ static void *preload_thread(void *_data) continue; if (ce->ce_flags & CE_FSMONITOR_VALID) continue; + if (p->progress && !(nr & 31)) { + struct progress_data *pd = p->progress; + + pthread_mutex_lock(&pd->mutex); + pd->n += last_nr - nr; + display_progress(pd->progress, pd->n); + pthread_mutex_unlock(&pd->mutex); + last_nr = nr; + } if (!ce_path_match(index, ce, &p->pathspec, NULL)) continue; if (threaded_has_symlink_leading_path(&cache, ce->name, ce_namelen(ce))) @@ -69,55 +79,79 @@ static void *preload_thread(void *_data) ce_mark_uptodate(ce); mark_fsmonitor_valid(ce); } while (--nr > 0); + if (p->progress) { + struct progress_data *pd = p->progress; + + pthread_mutex_lock(&pd->mutex); + display_progress(pd->progress, pd->n + last_nr); + pthread_mutex_unlock(&pd->mutex); + } cache_def_clear(&cache); return NULL; } -static void preload_index(struct index_state *index, - const struct pathspec *pathspec) +void preload_index(struct index_state *index, + const struct pathspec *pathspec, + unsigned int refresh_flags) { int threads, i, work, offset; struct thread_data data[MAX_PARALLEL]; - uint64_t start = getnanotime(); + struct progress_data pd; - if (!core_preload_index) + if (!HAVE_THREADS || !core_preload_index) return; threads = index->cache_nr / THREAD_COST; - if ((index->cache_nr > 1) && (threads < 2) && getenv("GIT_FORCE_PRELOAD_TEST")) + if ((index->cache_nr > 1) && (threads < 2) && git_env_bool("GIT_TEST_PRELOAD_INDEX", 0)) threads = 2; if (threads < 2) return; + trace_performance_enter(); if (threads > MAX_PARALLEL) threads = MAX_PARALLEL; offset = 0; work = DIV_ROUND_UP(index->cache_nr, threads); memset(&data, 0, sizeof(data)); + + memset(&pd, 0, sizeof(pd)); + if (refresh_flags & REFRESH_PROGRESS && isatty(2)) { + pd.progress = start_delayed_progress(_("Refreshing index"), index->cache_nr); + pthread_mutex_init(&pd.mutex, NULL); + } + for (i = 0; i < threads; i++) { struct thread_data *p = data+i; + int err; + p->index = index; if (pathspec) copy_pathspec(&p->pathspec, pathspec); p->offset = offset; p->nr = work; + if (pd.progress) + p->progress = &pd; offset += work; - if (pthread_create(&p->pthread, NULL, preload_thread, p)) - die("unable to create threaded lstat"); + err = pthread_create(&p->pthread, NULL, preload_thread, p); + + if (err) + die(_("unable to create threaded lstat: %s"), strerror(err)); } for (i = 0; i < threads; i++) { struct thread_data *p = data+i; if (pthread_join(p->pthread, NULL)) die("unable to join threaded lstat"); } - trace_performance_since(start, "preload index"); + stop_progress(&pd.progress); + + trace_performance_leave("preload index"); } -#endif int read_index_preload(struct index_state *index, - const struct pathspec *pathspec) + const struct pathspec *pathspec, + unsigned int refresh_flags) { int retval = read_index(index); - preload_index(index, pathspec); + preload_index(index, pathspec, refresh_flags); return retval; } @@ -1256,6 +1256,14 @@ static size_t format_commit_one(struct strbuf *sb, /* in UTF-8 */ if (c->signature_check.key) strbuf_addstr(sb, c->signature_check.key); break; + case 'F': + if (c->signature_check.fingerprint) + strbuf_addstr(sb, c->signature_check.fingerprint); + break; + case 'P': + if (c->signature_check.primary_key_fingerprint) + strbuf_addstr(sb, c->signature_check.primary_key_fingerprint); + break; default: return 0; } diff --git a/prio-queue.c b/prio-queue.c index a078451872..d3f488cb05 100644 --- a/prio-queue.c +++ b/prio-queue.c @@ -85,3 +85,12 @@ void *prio_queue_get(struct prio_queue *queue) } return result; } + +void *prio_queue_peek(struct prio_queue *queue) +{ + if (!queue->nr) + return NULL; + if (!queue->compare) + return queue->array[queue->nr - 1].data; + return queue->array[0].data; +} diff --git a/prio-queue.h b/prio-queue.h index d030ec9dd6..682e51867a 100644 --- a/prio-queue.h +++ b/prio-queue.h @@ -46,6 +46,12 @@ extern void prio_queue_put(struct prio_queue *, void *thing); */ extern void *prio_queue_get(struct prio_queue *); +/* + * Gain access to the "thing" that would be returned by + * prio_queue_get, but do not remove it from the queue. + */ +extern void *prio_queue_peek(struct prio_queue *); + extern void clear_prio_queue(struct prio_queue *); /* Reverse the LIFO elements */ diff --git a/range-diff.c b/range-diff.c index b6b9abac26..767af8c5bb 100644 --- a/range-diff.c +++ b/range-diff.c @@ -38,6 +38,14 @@ static int read_patches(const char *range, struct string_list *list) argv_array_pushl(&cp.args, "log", "--no-color", "-p", "--no-merges", "--reverse", "--date-order", "--decorate=no", + /* + * Choose indicators that are not used anywhere + * else in diffs, but still look reasonable + * (e.g. will not be confusing when debugging) + */ + "--output-indicator-new=>", + "--output-indicator-old=<", + "--output-indicator-context=#", "--no-abbrev-commit", range, NULL); cp.out = -1; @@ -82,6 +90,7 @@ static int read_patches(const char *range, struct string_list *list) strbuf_addch(&buf, '\n'); if (!util->diff_offset) util->diff_offset = buf.len; + strbuf_addch(&buf, ' '); strbuf_addbuf(&buf, &line); } else if (in_header) { if (starts_with(line.buf, "Author: ")) { @@ -108,8 +117,19 @@ static int read_patches(const char *range, struct string_list *list) * we are not interested. */ continue; - else + else if (line.buf[0] == '>') { + strbuf_addch(&buf, '+'); + strbuf_add(&buf, line.buf + 1, line.len - 1); + } else if (line.buf[0] == '<') { + strbuf_addch(&buf, '-'); + strbuf_add(&buf, line.buf + 1, line.len - 1); + } else if (line.buf[0] == '#') { + strbuf_addch(&buf, ' '); + strbuf_add(&buf, line.buf + 1, line.len - 1); + } else { + strbuf_addch(&buf, ' '); strbuf_addbuf(&buf, &line); + } strbuf_addch(&buf, '\n'); util->diffsize++; @@ -177,6 +197,12 @@ static void diffsize_consume(void *data, char *line, unsigned long len) (*(int *)data)++; } +static void diffsize_hunk(void *data, long ob, long on, long nb, long nn, + const char *funcline, long funclen) +{ + diffsize_consume(data, NULL, 0); +} + static int diffsize(const char *a, const char *b) { xpparam_t pp = { 0 }; @@ -190,7 +216,9 @@ static int diffsize(const char *a, const char *b) mf2.size = strlen(b); cfg.ctxlen = 3; - if (!xdi_diff_outf(&mf1, &mf2, diffsize_consume, &count, &pp, &cfg)) + if (!xdi_diff_outf(&mf1, &mf2, + diffsize_hunk, diffsize_consume, &count, + &pp, &cfg)) return count; error(_("failed to generate diff")); @@ -323,7 +351,7 @@ static void output_pair_header(struct diff_options *diffopt, } strbuf_addf(buf, "%s\n", color_reset); - fwrite(buf->buf, buf->len, 1, stdout); + fwrite(buf->buf, buf->len, 1, diffopt->file); } static struct userdiff_driver no_func_name = { @@ -334,7 +362,7 @@ static struct diff_filespec *get_filespec(const char *name, const char *p) { struct diff_filespec *spec = alloc_filespec(name); - fill_filespec(spec, &null_oid, 0, 0644); + fill_filespec(spec, &null_oid, 0, 0100644); spec->data = (char *)p; spec->size = strlen(p); spec->should_munmap = 0; @@ -409,8 +437,14 @@ static void output(struct string_list *a, struct string_list *b, strbuf_release(&dashes); } +static struct strbuf *output_prefix_cb(struct diff_options *opt, void *data) +{ + return data; +} + int show_range_diff(const char *range1, const char *range2, - int creation_factor, struct diff_options *diffopt) + int creation_factor, int dual_color, + struct diff_options *diffopt) { int res = 0; @@ -423,9 +457,24 @@ int show_range_diff(const char *range1, const char *range2, res = error(_("could not parse log for '%s'"), range2); if (!res) { + struct diff_options opts; + struct strbuf indent = STRBUF_INIT; + + memcpy(&opts, diffopt, sizeof(opts)); + if (!opts.output_format) + opts.output_format = DIFF_FORMAT_PATCH; + opts.flags.suppress_diff_headers = 1; + opts.flags.dual_color_diffed_diffs = dual_color; + opts.output_prefix = output_prefix_cb; + strbuf_addstr(&indent, " "); + opts.output_prefix_data = &indent; + diff_setup_done(&opts); + find_exact_matches(&branch1, &branch2); get_correspondences(&branch1, &branch2, creation_factor); - output(&branch1, &branch2, diffopt); + output(&branch1, &branch2, &opts); + + strbuf_release(&indent); } string_list_clear(&branch1, 1); diff --git a/range-diff.h b/range-diff.h index 2407d46a30..190593f0c7 100644 --- a/range-diff.h +++ b/range-diff.h @@ -3,7 +3,10 @@ #include "diff.h" +#define RANGE_DIFF_CREATION_FACTOR_DEFAULT 60 + int show_range_diff(const char *range1, const char *range2, - int creation_factor, struct diff_options *diffopt); + int creation_factor, int dual_color, + struct diff_options *diffopt); #endif diff --git a/read-cache.c b/read-cache.c index 8f644f68b4..bd45dc3e24 100644 --- a/read-cache.c +++ b/read-cache.c @@ -23,6 +23,8 @@ #include "split-index.h" #include "utf8.h" #include "fsmonitor.h" +#include "thread-utils.h" +#include "progress.h" /* Mask for the name length in ce_flags in the on-disk index */ @@ -43,6 +45,8 @@ #define CACHE_EXT_LINK 0x6c696e6b /* "link" */ #define CACHE_EXT_UNTRACKED 0x554E5452 /* "UNTR" */ #define CACHE_EXT_FSMONITOR 0x46534D4E /* "FSMN" */ +#define CACHE_EXT_ENDOFINDEXENTRIES 0x454F4945 /* "EOIE" */ +#define CACHE_EXT_INDEXENTRYOFFSETTABLE 0x49454F54 /* "IEOT" */ /* changes that can be kept in $GIT_DIR/index (basically all extensions) */ #define EXTMASK (RESOLVE_UNDO_CHANGED | CACHE_TREE_CHANGED | \ @@ -205,15 +209,17 @@ void fill_stat_cache_info(struct cache_entry *ce, struct stat *st) } } -static int ce_compare_data(const struct cache_entry *ce, struct stat *st) +static int ce_compare_data(struct index_state *istate, + const struct cache_entry *ce, + struct stat *st) { int match = -1; int fd = git_open_cloexec(ce->name, O_RDONLY); if (fd >= 0) { struct object_id oid; - if (!index_fd(&oid, fd, st, OBJ_BLOB, ce->name, 0)) - match = oidcmp(&oid, &ce->oid); + if (!index_fd(istate, &oid, fd, st, OBJ_BLOB, ce->name, 0)) + match = !oideq(&oid, &ce->oid); /* index_fd() closed the file descriptor already */ } return match; @@ -254,14 +260,16 @@ static int ce_compare_gitlink(const struct cache_entry *ce) */ if (resolve_gitlink_ref(ce->name, "HEAD", &oid) < 0) return 0; - return oidcmp(&oid, &ce->oid); + return !oideq(&oid, &ce->oid); } -static int ce_modified_check_fs(const struct cache_entry *ce, struct stat *st) +static int ce_modified_check_fs(struct index_state *istate, + const struct cache_entry *ce, + struct stat *st) { switch (st->st_mode & S_IFMT) { case S_IFREG: - if (ce_compare_data(ce, st)) + if (ce_compare_data(istate, ce, st)) return DATA_CHANGED; break; case S_IFLNK: @@ -407,7 +415,7 @@ int ie_match_stat(struct index_state *istate, if (assume_racy_is_modified) changed |= DATA_CHANGED; else - changed |= ce_modified_check_fs(ce, st); + changed |= ce_modified_check_fs(istate, ce, st); } return changed; @@ -447,7 +455,7 @@ int ie_modified(struct index_state *istate, (S_ISGITLINK(ce->ce_mode) || ce->ce_stat_data.sd_size != 0)) return changed; - changed_fs = ce_modified_check_fs(ce, st); + changed_fs = ce_modified_check_fs(istate, ce, st); if (changed_fs) return changed | changed_fs; return 0; @@ -753,7 +761,7 @@ int add_to_index(struct index_state *istate, const char *path, struct stat *st, } } if (!intent_only) { - if (index_path(&ce->oid, path, st, newflags)) { + if (index_path(istate, &ce->oid, path, st, newflags)) { discard_cache_entry(ce); return error("unable to index file %s", path); } @@ -767,7 +775,7 @@ int add_to_index(struct index_state *istate, const char *path, struct stat *st, /* It was suspected to be racily clean, but it turns out to be Ok */ was_same = (alias && !ce_stage(alias) && - !oidcmp(&alias->oid, &ce->oid) && + oideq(&alias->oid, &ce->oid) && ce->ce_mode == alias->ce_mode); if (pretend) @@ -823,7 +831,7 @@ struct cache_entry *make_cache_entry(struct index_state *istate, ce->ce_namelen = len; ce->ce_mode = create_ce_mode(mode); - ret = refresh_cache_entry(&the_index, ce, refresh_options); + ret = refresh_cache_entry(istate, ce, refresh_options); if (ret != ce) discard_cache_entry(ce); return ret; @@ -1476,13 +1484,24 @@ int refresh_index(struct index_state *istate, unsigned int flags, const char *typechange_fmt; const char *added_fmt; const char *unmerged_fmt; - uint64_t start = getnanotime(); + struct progress *progress = NULL; + + if (flags & REFRESH_PROGRESS && isatty(2)) + progress = start_delayed_progress(_("Refresh index"), + istate->cache_nr); + trace_performance_enter(); modified_fmt = (in_porcelain ? "M\t%s\n" : "%s: needs update\n"); deleted_fmt = (in_porcelain ? "D\t%s\n" : "%s: needs update\n"); typechange_fmt = (in_porcelain ? "T\t%s\n" : "%s needs update\n"); added_fmt = (in_porcelain ? "A\t%s\n" : "%s needs update\n"); unmerged_fmt = (in_porcelain ? "U\t%s\n" : "%s: needs merge\n"); + /* + * Use the multi-threaded preload_index() to refresh most of the + * cache entries quickly then in the single threaded loop below, + * we only have to do the special cases that are left. + */ + preload_index(istate, pathspec, 0); for (i = 0; i < istate->cache_nr; i++) { struct cache_entry *ce, *new_entry; int cache_errno = 0; @@ -1493,7 +1512,7 @@ int refresh_index(struct index_state *istate, unsigned int flags, if (ignore_submodules && S_ISGITLINK(ce->ce_mode)) continue; - if (pathspec && !ce_path_match(&the_index, ce, pathspec, seen)) + if (pathspec && !ce_path_match(istate, ce, pathspec, seen)) filtered = 1; if (ce_stage(ce)) { @@ -1516,6 +1535,8 @@ int refresh_index(struct index_state *istate, unsigned int flags, new_entry = refresh_cache_ent(istate, ce, options, &cache_errno, &changed); if (new_entry == ce) continue; + if (progress) + display_progress(progress, i); if (!new_entry) { const char *fmt; @@ -1547,7 +1568,11 @@ int refresh_index(struct index_state *istate, unsigned int flags, replace_index_entry(istate, i, new_entry); } - trace_performance_since(start, "refresh index"); + if (progress) { + display_progress(progress, istate->cache_nr); + stop_progress(&progress); + } + trace_performance_leave("refresh index"); return has_errors; } @@ -1650,7 +1675,7 @@ int verify_index_checksum; /* Allow fsck to force verification of the cache entry order. */ int verify_ce_order; -static int verify_hdr(struct cache_header *hdr, unsigned long size) +static int verify_hdr(const struct cache_header *hdr, unsigned long size) { git_hash_ctx c; unsigned char hash[GIT_MAX_RAWSZ]; @@ -1668,13 +1693,13 @@ static int verify_hdr(struct cache_header *hdr, unsigned long size) the_hash_algo->init_fn(&c); the_hash_algo->update_fn(&c, hdr, size - the_hash_algo->rawsz); the_hash_algo->final_fn(hash, &c); - if (hashcmp(hash, (unsigned char *)hdr + size - the_hash_algo->rawsz)) + if (!hasheq(hash, (unsigned char *)hdr + size - the_hash_algo->rawsz)) return error("bad index file sha1 signature"); return 0; } static int read_index_extension(struct index_state *istate, - const char *ext, void *data, unsigned long sz) + const char *ext, const char *data, unsigned long sz) { switch (CACHE_EXT(ext)) { case CACHE_EXT_TREE: @@ -1693,6 +1718,10 @@ static int read_index_extension(struct index_state *istate, case CACHE_EXT_FSMONITOR: read_fsmonitor_extension(istate, data, sz); break; + case CACHE_EXT_ENDOFINDEXENTRIES: + case CACHE_EXT_INDEXENTRYOFFSETTABLE: + /* already handled in do_read_index() */ + break; default: if (*ext < 'A' || 'Z' < *ext) return error("index uses %.4s extension, which we do not understand", @@ -1713,63 +1742,25 @@ int read_index(struct index_state *istate) return read_index_from(istate, get_index_file(), get_git_dir()); } -static struct cache_entry *cache_entry_from_ondisk(struct mem_pool *mem_pool, - struct ondisk_cache_entry *ondisk, - unsigned int flags, - const char *name, - size_t len) -{ - struct cache_entry *ce = mem_pool__ce_alloc(mem_pool, len); - - ce->ce_stat_data.sd_ctime.sec = get_be32(&ondisk->ctime.sec); - ce->ce_stat_data.sd_mtime.sec = get_be32(&ondisk->mtime.sec); - ce->ce_stat_data.sd_ctime.nsec = get_be32(&ondisk->ctime.nsec); - ce->ce_stat_data.sd_mtime.nsec = get_be32(&ondisk->mtime.nsec); - ce->ce_stat_data.sd_dev = get_be32(&ondisk->dev); - ce->ce_stat_data.sd_ino = get_be32(&ondisk->ino); - ce->ce_mode = get_be32(&ondisk->mode); - ce->ce_stat_data.sd_uid = get_be32(&ondisk->uid); - ce->ce_stat_data.sd_gid = get_be32(&ondisk->gid); - ce->ce_stat_data.sd_size = get_be32(&ondisk->size); - ce->ce_flags = flags & ~CE_NAMEMASK; - ce->ce_namelen = len; - ce->index = 0; - hashcpy(ce->oid.hash, ondisk->sha1); - memcpy(ce->name, name, len); - ce->name[len] = '\0'; - return ce; -} - -/* - * Adjacent cache entries tend to share the leading paths, so it makes - * sense to only store the differences in later entries. In the v4 - * on-disk format of the index, each on-disk cache entry stores the - * number of bytes to be stripped from the end of the previous name, - * and the bytes to append to the result, to come up with its name. - */ -static unsigned long expand_name_field(struct strbuf *name, const char *cp_) -{ - const unsigned char *ep, *cp = (const unsigned char *)cp_; - size_t len = decode_varint(&cp); - - if (name->len < len) - die("malformed name field in the index"); - strbuf_remove(name, name->len - len, len); - for (ep = cp; *ep; ep++) - ; /* find the end */ - strbuf_add(name, cp, ep - cp); - return (const char *)ep + 1 - cp_; -} - -static struct cache_entry *create_from_disk(struct mem_pool *mem_pool, +static struct cache_entry *create_from_disk(struct mem_pool *ce_mem_pool, + unsigned int version, struct ondisk_cache_entry *ondisk, unsigned long *ent_size, - struct strbuf *previous_name) + const struct cache_entry *previous_ce) { struct cache_entry *ce; size_t len; const char *name; unsigned int flags; + size_t copy_len = 0; + /* + * Adjacent cache entries tend to share the leading paths, so it makes + * sense to only store the differences in later entries. In the v4 + * on-disk format of the index, each on-disk cache entry stores the + * number of bytes to be stripped from the end of the previous name, + * and the bytes to append to the result, to come up with its name. + */ + int expand_name_field = version == 4; /* On-disk flags are just 16 bits */ flags = get_be16(&ondisk->flags); @@ -1789,21 +1780,53 @@ static struct cache_entry *create_from_disk(struct mem_pool *mem_pool, else name = ondisk->name; - if (!previous_name) { - /* v3 and earlier */ - if (len == CE_NAMEMASK) - len = strlen(name); - ce = cache_entry_from_ondisk(mem_pool, ondisk, flags, name, len); + if (expand_name_field) { + const unsigned char *cp = (const unsigned char *)name; + size_t strip_len, previous_len; + + /* If we're at the begining of a block, ignore the previous name */ + strip_len = decode_varint(&cp); + if (previous_ce) { + previous_len = previous_ce->ce_namelen; + if (previous_len < strip_len) + die(_("malformed name field in the index, near path '%s'"), + previous_ce->name); + copy_len = previous_len - strip_len; + } + name = (const char *)cp; + } - *ent_size = ondisk_ce_size(ce); - } else { - unsigned long consumed; - consumed = expand_name_field(previous_name, name); - ce = cache_entry_from_ondisk(mem_pool, ondisk, flags, - previous_name->buf, - previous_name->len); + if (len == CE_NAMEMASK) { + len = strlen(name); + if (expand_name_field) + len += copy_len; + } + + ce = mem_pool__ce_alloc(ce_mem_pool, len); + + ce->ce_stat_data.sd_ctime.sec = get_be32(&ondisk->ctime.sec); + ce->ce_stat_data.sd_mtime.sec = get_be32(&ondisk->mtime.sec); + ce->ce_stat_data.sd_ctime.nsec = get_be32(&ondisk->ctime.nsec); + ce->ce_stat_data.sd_mtime.nsec = get_be32(&ondisk->mtime.nsec); + ce->ce_stat_data.sd_dev = get_be32(&ondisk->dev); + ce->ce_stat_data.sd_ino = get_be32(&ondisk->ino); + ce->ce_mode = get_be32(&ondisk->mode); + ce->ce_stat_data.sd_uid = get_be32(&ondisk->uid); + ce->ce_stat_data.sd_gid = get_be32(&ondisk->gid); + ce->ce_stat_data.sd_size = get_be32(&ondisk->size); + ce->ce_flags = flags & ~CE_NAMEMASK; + ce->ce_namelen = len; + ce->index = 0; + hashcpy(ce->oid.hash, ondisk->sha1); - *ent_size = (name - ((char *)ondisk)) + consumed; + if (expand_name_field) { + if (copy_len) + memcpy(ce->name, previous_ce->name, copy_len); + memcpy(ce->name + copy_len, name, len + 1 - copy_len); + *ent_size = (name - ((char *)ondisk)) + len + 1 - copy_len; + } else { + memcpy(ce->name, name, len + 1); + *ent_size = ondisk_ce_size(ce); } return ce; } @@ -1889,16 +1912,228 @@ static size_t estimate_cache_size(size_t ondisk_size, unsigned int entries) return ondisk_size + entries * per_entry; } +struct index_entry_offset +{ + /* starting byte offset into index file, count of index entries in this block */ + int offset, nr; +}; + +struct index_entry_offset_table +{ + int nr; + struct index_entry_offset entries[FLEX_ARRAY]; +}; + +static struct index_entry_offset_table *read_ieot_extension(const char *mmap, size_t mmap_size, size_t offset); +static void write_ieot_extension(struct strbuf *sb, struct index_entry_offset_table *ieot); + +static size_t read_eoie_extension(const char *mmap, size_t mmap_size); +static void write_eoie_extension(struct strbuf *sb, git_hash_ctx *eoie_context, size_t offset); + +struct load_index_extensions +{ + pthread_t pthread; + struct index_state *istate; + const char *mmap; + size_t mmap_size; + unsigned long src_offset; +}; + +static void *load_index_extensions(void *_data) +{ + struct load_index_extensions *p = _data; + unsigned long src_offset = p->src_offset; + + while (src_offset <= p->mmap_size - the_hash_algo->rawsz - 8) { + /* After an array of active_nr index entries, + * there can be arbitrary number of extended + * sections, each of which is prefixed with + * extension name (4-byte) and section length + * in 4-byte network byte order. + */ + uint32_t extsize = get_be32(p->mmap + src_offset + 4); + if (read_index_extension(p->istate, + p->mmap + src_offset, + p->mmap + src_offset + 8, + extsize) < 0) { + munmap((void *)p->mmap, p->mmap_size); + die(_("index file corrupt")); + } + src_offset += 8; + src_offset += extsize; + } + + return NULL; +} + +/* + * A helper function that will load the specified range of cache entries + * from the memory mapped file and add them to the given index. + */ +static unsigned long load_cache_entry_block(struct index_state *istate, + struct mem_pool *ce_mem_pool, int offset, int nr, const char *mmap, + unsigned long start_offset, const struct cache_entry *previous_ce) +{ + int i; + unsigned long src_offset = start_offset; + + for (i = offset; i < offset + nr; i++) { + struct ondisk_cache_entry *disk_ce; + struct cache_entry *ce; + unsigned long consumed; + + disk_ce = (struct ondisk_cache_entry *)(mmap + src_offset); + ce = create_from_disk(ce_mem_pool, istate->version, disk_ce, &consumed, previous_ce); + set_index_entry(istate, i, ce); + + src_offset += consumed; + previous_ce = ce; + } + return src_offset - start_offset; +} + +static unsigned long load_all_cache_entries(struct index_state *istate, + const char *mmap, size_t mmap_size, unsigned long src_offset) +{ + unsigned long consumed; + + if (istate->version == 4) { + mem_pool_init(&istate->ce_mem_pool, + estimate_cache_size_from_compressed(istate->cache_nr)); + } else { + mem_pool_init(&istate->ce_mem_pool, + estimate_cache_size(mmap_size, istate->cache_nr)); + } + + consumed = load_cache_entry_block(istate, istate->ce_mem_pool, + 0, istate->cache_nr, mmap, src_offset, NULL); + return consumed; +} + +/* + * Mostly randomly chosen maximum thread counts: we + * cap the parallelism to online_cpus() threads, and we want + * to have at least 10000 cache entries per thread for it to + * be worth starting a thread. + */ + +#define THREAD_COST (10000) + +struct load_cache_entries_thread_data +{ + pthread_t pthread; + struct index_state *istate; + struct mem_pool *ce_mem_pool; + int offset; + const char *mmap; + struct index_entry_offset_table *ieot; + int ieot_start; /* starting index into the ieot array */ + int ieot_blocks; /* count of ieot entries to process */ + unsigned long consumed; /* return # of bytes in index file processed */ +}; + +/* + * A thread proc to run the load_cache_entries() computation + * across multiple background threads. + */ +static void *load_cache_entries_thread(void *_data) +{ + struct load_cache_entries_thread_data *p = _data; + int i; + + /* iterate across all ieot blocks assigned to this thread */ + for (i = p->ieot_start; i < p->ieot_start + p->ieot_blocks; i++) { + p->consumed += load_cache_entry_block(p->istate, p->ce_mem_pool, + p->offset, p->ieot->entries[i].nr, p->mmap, p->ieot->entries[i].offset, NULL); + p->offset += p->ieot->entries[i].nr; + } + return NULL; +} + +static unsigned long load_cache_entries_threaded(struct index_state *istate, const char *mmap, size_t mmap_size, + unsigned long src_offset, int nr_threads, struct index_entry_offset_table *ieot) +{ + int i, offset, ieot_blocks, ieot_start, err; + struct load_cache_entries_thread_data *data; + unsigned long consumed = 0; + + /* a little sanity checking */ + if (istate->name_hash_initialized) + BUG("the name hash isn't thread safe"); + + mem_pool_init(&istate->ce_mem_pool, 0); + + /* ensure we have no more threads than we have blocks to process */ + if (nr_threads > ieot->nr) + nr_threads = ieot->nr; + data = xcalloc(nr_threads, sizeof(*data)); + + offset = ieot_start = 0; + ieot_blocks = DIV_ROUND_UP(ieot->nr, nr_threads); + for (i = 0; i < nr_threads; i++) { + struct load_cache_entries_thread_data *p = &data[i]; + int nr, j; + + if (ieot_start + ieot_blocks > ieot->nr) + ieot_blocks = ieot->nr - ieot_start; + + p->istate = istate; + p->offset = offset; + p->mmap = mmap; + p->ieot = ieot; + p->ieot_start = ieot_start; + p->ieot_blocks = ieot_blocks; + + /* create a mem_pool for each thread */ + nr = 0; + for (j = p->ieot_start; j < p->ieot_start + p->ieot_blocks; j++) + nr += p->ieot->entries[j].nr; + if (istate->version == 4) { + mem_pool_init(&p->ce_mem_pool, + estimate_cache_size_from_compressed(nr)); + } else { + mem_pool_init(&p->ce_mem_pool, + estimate_cache_size(mmap_size, nr)); + } + + err = pthread_create(&p->pthread, NULL, load_cache_entries_thread, p); + if (err) + die(_("unable to create load_cache_entries thread: %s"), strerror(err)); + + /* increment by the number of cache entries in the ieot block being processed */ + for (j = 0; j < ieot_blocks; j++) + offset += ieot->entries[ieot_start + j].nr; + ieot_start += ieot_blocks; + } + + for (i = 0; i < nr_threads; i++) { + struct load_cache_entries_thread_data *p = &data[i]; + + err = pthread_join(p->pthread, NULL); + if (err) + die(_("unable to join load_cache_entries thread: %s"), strerror(err)); + mem_pool_combine(istate->ce_mem_pool, p->ce_mem_pool); + consumed += p->consumed; + } + + free(data); + + return consumed; +} + /* remember to discard_cache() before reading a different cache! */ int do_read_index(struct index_state *istate, const char *path, int must_exist) { - int fd, i; + int fd; struct stat st; unsigned long src_offset; - struct cache_header *hdr; - void *mmap; + const struct cache_header *hdr; + const char *mmap; size_t mmap_size; - struct strbuf previous_name_buf = STRBUF_INIT, *previous_name; + struct load_index_extensions p; + size_t extension_offset = 0; + int nr_threads, cpus; + struct index_entry_offset_table *ieot = NULL; if (istate->initialized) return istate->cache_nr; @@ -1924,7 +2159,7 @@ int do_read_index(struct index_state *istate, const char *path, int must_exist) die_errno("unable to map index file"); close(fd); - hdr = mmap; + hdr = (const struct cache_header *)mmap; if (verify_hdr(hdr, mmap_size) < 0) goto unmap; @@ -1935,55 +2170,71 @@ int do_read_index(struct index_state *istate, const char *path, int must_exist) istate->cache = xcalloc(istate->cache_alloc, sizeof(*istate->cache)); istate->initialized = 1; - if (istate->version == 4) { - previous_name = &previous_name_buf; - mem_pool_init(&istate->ce_mem_pool, - estimate_cache_size_from_compressed(istate->cache_nr)); - } else { - previous_name = NULL; - mem_pool_init(&istate->ce_mem_pool, - estimate_cache_size(mmap_size, istate->cache_nr)); - } + p.istate = istate; + p.mmap = mmap; + p.mmap_size = mmap_size; src_offset = sizeof(*hdr); - for (i = 0; i < istate->cache_nr; i++) { - struct ondisk_cache_entry *disk_ce; - struct cache_entry *ce; - unsigned long consumed; - disk_ce = (struct ondisk_cache_entry *)((char *)mmap + src_offset); - ce = create_from_disk(istate->ce_mem_pool, disk_ce, &consumed, previous_name); - set_index_entry(istate, i, ce); + if (git_config_get_index_threads(&nr_threads)) + nr_threads = 1; - src_offset += consumed; + /* TODO: does creating more threads than cores help? */ + if (!nr_threads) { + nr_threads = istate->cache_nr / THREAD_COST; + cpus = online_cpus(); + if (nr_threads > cpus) + nr_threads = cpus; } - strbuf_release(&previous_name_buf); + + if (!HAVE_THREADS) + nr_threads = 1; + + if (nr_threads > 1) { + extension_offset = read_eoie_extension(mmap, mmap_size); + if (extension_offset) { + int err; + + p.src_offset = extension_offset; + err = pthread_create(&p.pthread, NULL, load_index_extensions, &p); + if (err) + die(_("unable to create load_index_extensions thread: %s"), strerror(err)); + + nr_threads--; + } + } + + /* + * Locate and read the index entry offset table so that we can use it + * to multi-thread the reading of the cache entries. + */ + if (extension_offset && nr_threads > 1) + ieot = read_ieot_extension(mmap, mmap_size, extension_offset); + + if (ieot) { + src_offset += load_cache_entries_threaded(istate, mmap, mmap_size, src_offset, nr_threads, ieot); + free(ieot); + } else { + src_offset += load_all_cache_entries(istate, mmap, mmap_size, src_offset); + } + istate->timestamp.sec = st.st_mtime; istate->timestamp.nsec = ST_MTIME_NSEC(st); - while (src_offset <= mmap_size - the_hash_algo->rawsz - 8) { - /* After an array of active_nr index entries, - * there can be arbitrary number of extended - * sections, each of which is prefixed with - * extension name (4-byte) and section length - * in 4-byte network byte order. - */ - uint32_t extsize; - memcpy(&extsize, (char *)mmap + src_offset + 4, 4); - extsize = ntohl(extsize); - if (read_index_extension(istate, - (const char *) mmap + src_offset, - (char *) mmap + src_offset + 8, - extsize) < 0) - goto unmap; - src_offset += 8; - src_offset += extsize; + /* if we created a thread, join it otherwise load the extensions on the primary thread */ + if (extension_offset) { + int ret = pthread_join(p.pthread, NULL); + if (ret) + die(_("unable to join load_index_extensions thread: %s"), strerror(ret)); + } else { + p.src_offset = src_offset; + load_index_extensions(&p); } - munmap(mmap, mmap_size); + munmap((void *)mmap, mmap_size); return istate->cache_nr; unmap: - munmap(mmap, mmap_size); + munmap((void *)mmap, mmap_size); die("index file corrupt"); } @@ -2002,7 +2253,6 @@ static void freshen_shared_index(const char *shared_index, int warn) int read_index_from(struct index_state *istate, const char *path, const char *gitdir) { - uint64_t start = getnanotime(); struct split_index *split_index; int ret; char *base_oid_hex; @@ -2012,8 +2262,9 @@ int read_index_from(struct index_state *istate, const char *path, if (istate->initialized) return istate->cache_nr; + trace_performance_enter(); ret = do_read_index(istate, path, 0); - trace_performance_since(start, "read cache %s", path); + trace_performance_leave("read cache %s", path); split_index = istate->split_index; if (!split_index || is_null_oid(&split_index->base_oid)) { @@ -2021,6 +2272,7 @@ int read_index_from(struct index_state *istate, const char *path, return ret; } + trace_performance_enter(); if (split_index->base) discard_index(split_index->base); else @@ -2029,7 +2281,7 @@ int read_index_from(struct index_state *istate, const char *path, base_oid_hex = oid_to_hex(&split_index->base_oid); base_path = xstrfmt("%s/sharedindex.%s", gitdir, base_oid_hex); ret = do_read_index(split_index->base, base_path, 1); - if (oidcmp(&split_index->base_oid, &split_index->base->oid)) + if (!oideq(&split_index->base_oid, &split_index->base->oid)) die("broken index, expect %s in %s, got %s", base_oid_hex, base_path, oid_to_hex(&split_index->base->oid)); @@ -2037,7 +2289,7 @@ int read_index_from(struct index_state *istate, const char *path, freshen_shared_index(base_path, 0); merge_base_index(istate); post_read_index_from(istate); - trace_performance_since(start, "read cache %s", base_path); + trace_performance_leave("read cache %s", base_path); free(base_path); return ret; } @@ -2122,7 +2374,7 @@ int unmerged_index(const struct index_state *istate) return 0; } -int index_has_changes(const struct index_state *istate, +int index_has_changes(struct index_state *istate, struct tree *tree, struct strbuf *sb) { @@ -2137,7 +2389,7 @@ int index_has_changes(const struct index_state *istate, if (tree || !get_oid_tree("HEAD", &cmp)) { struct diff_options opt; - diff_setup(&opt); + repo_diff_setup(the_repository, &opt); opt.flags.exit_with_status = 1; if (!sb) opt.flags.quick = 1; @@ -2198,11 +2450,15 @@ static int ce_write(git_hash_ctx *context, int fd, void *data, unsigned int len) return 0; } -static int write_index_ext_header(git_hash_ctx *context, int fd, - unsigned int ext, unsigned int sz) +static int write_index_ext_header(git_hash_ctx *context, git_hash_ctx *eoie_context, + int fd, unsigned int ext, unsigned int sz) { ext = htonl(ext); sz = htonl(sz); + if (eoie_context) { + the_hash_algo->update_fn(eoie_context, &ext, 4); + the_hash_algo->update_fn(eoie_context, &sz, 4); + } return ((ce_write(context, fd, &ext, 4) < 0) || (ce_write(context, fd, &sz, 4) < 0)) ? -1 : 0; } @@ -2230,7 +2486,8 @@ static int ce_flush(git_hash_ctx *context, int fd, unsigned char *hash) return (write_in_full(fd, write_buffer, left) < 0) ? -1 : 0; } -static void ce_smudge_racily_clean_entry(struct cache_entry *ce) +static void ce_smudge_racily_clean_entry(struct index_state *istate, + struct cache_entry *ce) { /* * The only thing we care about in this function is to smudge the @@ -2249,7 +2506,7 @@ static void ce_smudge_racily_clean_entry(struct cache_entry *ce) return; if (ce_match_stat_basic(ce, &st)) return; - if (ce_modified_check_fs(ce, &st)) { + if (ce_modified_check_fs(istate, ce, &st)) { /* This is "racily clean"; smudge it. Note that this * is a tricky code. At first glance, it may appear * that it can break with this sequence: @@ -2395,7 +2652,7 @@ static int verify_index_from(const struct index_state *istate, const char *path) if (n != the_hash_algo->rawsz) goto out; - if (hashcmp(istate->oid.hash, hash)) + if (!hasheq(istate->oid.hash, hash)) goto out; close(fd); @@ -2433,6 +2690,36 @@ void update_index_if_able(struct index_state *istate, struct lock_file *lockfile rollback_lock_file(lockfile); } +static int record_eoie(void) +{ + int val; + + if (!git_config_get_bool("index.recordendofindexentries", &val)) + return val; + + /* + * As a convenience, the end of index entries extension + * used for threading is written by default if the user + * explicitly requested threaded index reads. + */ + return !git_config_get_index_threads(&val) && val != 1; +} + +static int record_ieot(void) +{ + int val; + + if (!git_config_get_bool("index.recordoffsettable", &val)) + return val; + + /* + * As a convenience, the offset table used for threading is + * written by default if the user explicitly requested + * threaded index reads. + */ + return !git_config_get_index_threads(&val) && val != 1; +} + /* * On success, `tempfile` is closed. If it is the temporary file * of a `struct lock_file`, we will therefore effectively perform @@ -2445,7 +2732,7 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile, { uint64_t start = getnanotime(); int newfd = tempfile->fd; - git_hash_ctx c; + git_hash_ctx c, eoie_c; struct cache_header hdr; int i, err = 0, removed, extended, hdr_version; struct cache_entry **cache = istate->cache; @@ -2454,6 +2741,10 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile, struct ondisk_cache_entry_extended ondisk; struct strbuf previous_name_buf = STRBUF_INIT, *previous_name; int drop_cache_tree = istate->drop_cache_tree; + off_t offset; + int ieot_entries = 1; + struct index_entry_offset_table *ieot = NULL; + int nr, nr_threads; for (i = removed = extended = 0; i < entries; i++) { if (cache[i]->ce_flags & CE_REMOVE) @@ -2487,6 +2778,46 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile, if (ce_write(&c, newfd, &hdr, sizeof(hdr)) < 0) return -1; + if (!HAVE_THREADS || git_config_get_index_threads(&nr_threads)) + nr_threads = 1; + + if (nr_threads != 1 && record_ieot()) { + int ieot_blocks, cpus; + + /* + * ensure default number of ieot blocks maps evenly to the + * default number of threads that will process them leaving + * room for the thread to load the index extensions. + */ + if (!nr_threads) { + ieot_blocks = istate->cache_nr / THREAD_COST; + cpus = online_cpus(); + if (ieot_blocks > cpus - 1) + ieot_blocks = cpus - 1; + } else { + ieot_blocks = nr_threads; + if (ieot_blocks > istate->cache_nr) + ieot_blocks = istate->cache_nr; + } + + /* + * no reason to write out the IEOT extension if we don't + * have enough blocks to utilize multi-threading + */ + if (ieot_blocks > 1) { + ieot = xcalloc(1, sizeof(struct index_entry_offset_table) + + (ieot_blocks * sizeof(struct index_entry_offset))); + ieot_entries = DIV_ROUND_UP(entries, ieot_blocks); + } + } + + offset = lseek(newfd, 0, SEEK_CUR); + if (offset < 0) { + free(ieot); + return -1; + } + offset += write_buffer_len; + nr = 0; previous_name = (hdr_version == 4) ? &previous_name_buf : NULL; for (i = 0; i < entries; i++) { @@ -2494,7 +2825,7 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile, if (ce->ce_flags & CE_REMOVE) continue; if (!ce_uptodate(ce) && is_racy_timestamp(istate, ce)) - ce_smudge_racily_clean_entry(ce); + ce_smudge_racily_clean_entry(istate, ce); if (is_null_oid(&ce->oid)) { static const char msg[] = "cache entry has null sha1: %s"; static int allow = -1; @@ -2508,23 +2839,77 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile, drop_cache_tree = 1; } + if (ieot && i && (i % ieot_entries == 0)) { + ieot->entries[ieot->nr].nr = nr; + ieot->entries[ieot->nr].offset = offset; + ieot->nr++; + /* + * If we have a V4 index, set the first byte to an invalid + * character to ensure there is nothing common with the previous + * entry + */ + if (previous_name) + previous_name->buf[0] = 0; + nr = 0; + offset = lseek(newfd, 0, SEEK_CUR); + if (offset < 0) { + free(ieot); + return -1; + } + offset += write_buffer_len; + } if (ce_write_entry(&c, newfd, ce, previous_name, (struct ondisk_cache_entry *)&ondisk) < 0) err = -1; if (err) break; + nr++; + } + if (ieot && nr) { + ieot->entries[ieot->nr].nr = nr; + ieot->entries[ieot->nr].offset = offset; + ieot->nr++; } strbuf_release(&previous_name_buf); - if (err) + if (err) { + free(ieot); return err; + } /* Write extension data here */ + offset = lseek(newfd, 0, SEEK_CUR); + if (offset < 0) { + free(ieot); + return -1; + } + offset += write_buffer_len; + the_hash_algo->init_fn(&eoie_c); + + /* + * Lets write out CACHE_EXT_INDEXENTRYOFFSETTABLE first so that we + * can minimize the number of extensions we have to scan through to + * find it during load. Write it out regardless of the + * strip_extensions parameter as we need it when loading the shared + * index. + */ + if (ieot) { + struct strbuf sb = STRBUF_INIT; + + write_ieot_extension(&sb, ieot); + err = write_index_ext_header(&c, &eoie_c, newfd, CACHE_EXT_INDEXENTRYOFFSETTABLE, sb.len) < 0 + || ce_write(&c, newfd, sb.buf, sb.len) < 0; + strbuf_release(&sb); + free(ieot); + if (err) + return -1; + } + if (!strip_extensions && istate->split_index) { struct strbuf sb = STRBUF_INIT; err = write_link_extension(&sb, istate) < 0 || - write_index_ext_header(&c, newfd, CACHE_EXT_LINK, + write_index_ext_header(&c, &eoie_c, newfd, CACHE_EXT_LINK, sb.len) < 0 || ce_write(&c, newfd, sb.buf, sb.len) < 0; strbuf_release(&sb); @@ -2535,7 +2920,7 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile, struct strbuf sb = STRBUF_INIT; cache_tree_write(&sb, istate->cache_tree); - err = write_index_ext_header(&c, newfd, CACHE_EXT_TREE, sb.len) < 0 + err = write_index_ext_header(&c, &eoie_c, newfd, CACHE_EXT_TREE, sb.len) < 0 || ce_write(&c, newfd, sb.buf, sb.len) < 0; strbuf_release(&sb); if (err) @@ -2545,7 +2930,7 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile, struct strbuf sb = STRBUF_INIT; resolve_undo_write(&sb, istate->resolve_undo); - err = write_index_ext_header(&c, newfd, CACHE_EXT_RESOLVE_UNDO, + err = write_index_ext_header(&c, &eoie_c, newfd, CACHE_EXT_RESOLVE_UNDO, sb.len) < 0 || ce_write(&c, newfd, sb.buf, sb.len) < 0; strbuf_release(&sb); @@ -2556,7 +2941,7 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile, struct strbuf sb = STRBUF_INIT; write_untracked_extension(&sb, istate->untracked); - err = write_index_ext_header(&c, newfd, CACHE_EXT_UNTRACKED, + err = write_index_ext_header(&c, &eoie_c, newfd, CACHE_EXT_UNTRACKED, sb.len) < 0 || ce_write(&c, newfd, sb.buf, sb.len) < 0; strbuf_release(&sb); @@ -2567,7 +2952,24 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile, struct strbuf sb = STRBUF_INIT; write_fsmonitor_extension(&sb, istate); - err = write_index_ext_header(&c, newfd, CACHE_EXT_FSMONITOR, sb.len) < 0 + err = write_index_ext_header(&c, &eoie_c, newfd, CACHE_EXT_FSMONITOR, sb.len) < 0 + || ce_write(&c, newfd, sb.buf, sb.len) < 0; + strbuf_release(&sb); + if (err) + return -1; + } + + /* + * CACHE_EXT_ENDOFINDEXENTRIES must be written as the last entry before the SHA1 + * so that it can be found and processed before all the index entries are + * read. Write it out regardless of the strip_extensions parameter as we need it + * when loading the shared index. + */ + if (offset && record_eoie()) { + struct strbuf sb = STRBUF_INIT; + + write_eoie_extension(&sb, &eoie_c, offset); + err = write_index_ext_header(&c, NULL, newfd, CACHE_EXT_ENDOFINDEXENTRIES, sb.len) < 0 || ce_write(&c, newfd, sb.buf, sb.len) < 0; strbuf_release(&sb); if (err) @@ -2743,6 +3145,9 @@ int write_locked_index(struct index_state *istate, struct lock_file *lock, int new_shared_index, ret; struct split_index *si = istate->split_index; + if (git_env_bool("GIT_TEST_CHECK_CACHE_TREE", 0)) + cache_tree_verify(istate); + if ((flags & SKIP_IF_UNCHANGED) && !istate->cache_changed) { if (flags & COMMIT_LOCK) rollback_lock_file(lock); @@ -2774,7 +3179,8 @@ int write_locked_index(struct index_state *istate, struct lock_file *lock, struct tempfile *temp; int saved_errno; - temp = mks_tempfile(git_path("sharedindex_XXXXXX")); + /* Same initial permissions as the main .git/index file */ + temp = mks_tempfile_sm(git_path("sharedindex_XXXXXX"), 0, 0666); if (!temp) { oidclr(&si->base_oid); ret = do_write_locked_index(istate, lock, flags); @@ -2939,6 +3345,8 @@ void move_index_extensions(struct index_state *dst, struct index_state *src) { dst->untracked = src->untracked; src->untracked = NULL; + dst->cache_tree = src->cache_tree; + src->cache_tree = NULL; } struct cache_entry *dup_cache_entry(const struct cache_entry *ce, @@ -2978,3 +3386,179 @@ int should_validate_cache_entries(void) return validate_index_cache_entries; } + +#define EOIE_SIZE (4 + GIT_SHA1_RAWSZ) /* <4-byte offset> + <20-byte hash> */ +#define EOIE_SIZE_WITH_HEADER (4 + 4 + EOIE_SIZE) /* <4-byte signature> + <4-byte length> + EOIE_SIZE */ + +static size_t read_eoie_extension(const char *mmap, size_t mmap_size) +{ + /* + * The end of index entries (EOIE) extension is guaranteed to be last + * so that it can be found by scanning backwards from the EOF. + * + * "EOIE" + * <4-byte length> + * <4-byte offset> + * <20-byte hash> + */ + const char *index, *eoie; + uint32_t extsize; + size_t offset, src_offset; + unsigned char hash[GIT_MAX_RAWSZ]; + git_hash_ctx c; + + /* ensure we have an index big enough to contain an EOIE extension */ + if (mmap_size < sizeof(struct cache_header) + EOIE_SIZE_WITH_HEADER + the_hash_algo->rawsz) + return 0; + + /* validate the extension signature */ + index = eoie = mmap + mmap_size - EOIE_SIZE_WITH_HEADER - the_hash_algo->rawsz; + if (CACHE_EXT(index) != CACHE_EXT_ENDOFINDEXENTRIES) + return 0; + index += sizeof(uint32_t); + + /* validate the extension size */ + extsize = get_be32(index); + if (extsize != EOIE_SIZE) + return 0; + index += sizeof(uint32_t); + + /* + * Validate the offset we're going to look for the first extension + * signature is after the index header and before the eoie extension. + */ + offset = get_be32(index); + if (mmap + offset < mmap + sizeof(struct cache_header)) + return 0; + if (mmap + offset >= eoie) + return 0; + index += sizeof(uint32_t); + + /* + * The hash is computed over extension types and their sizes (but not + * their contents). E.g. if we have "TREE" extension that is N-bytes + * long, "REUC" extension that is M-bytes long, followed by "EOIE", + * then the hash would be: + * + * SHA-1("TREE" + <binary representation of N> + + * "REUC" + <binary representation of M>) + */ + src_offset = offset; + the_hash_algo->init_fn(&c); + while (src_offset < mmap_size - the_hash_algo->rawsz - EOIE_SIZE_WITH_HEADER) { + /* After an array of active_nr index entries, + * there can be arbitrary number of extended + * sections, each of which is prefixed with + * extension name (4-byte) and section length + * in 4-byte network byte order. + */ + uint32_t extsize; + memcpy(&extsize, mmap + src_offset + 4, 4); + extsize = ntohl(extsize); + + /* verify the extension size isn't so large it will wrap around */ + if (src_offset + 8 + extsize < src_offset) + return 0; + + the_hash_algo->update_fn(&c, mmap + src_offset, 8); + + src_offset += 8; + src_offset += extsize; + } + the_hash_algo->final_fn(hash, &c); + if (!hasheq(hash, (const unsigned char *)index)) + return 0; + + /* Validate that the extension offsets returned us back to the eoie extension. */ + if (src_offset != mmap_size - the_hash_algo->rawsz - EOIE_SIZE_WITH_HEADER) + return 0; + + return offset; +} + +static void write_eoie_extension(struct strbuf *sb, git_hash_ctx *eoie_context, size_t offset) +{ + uint32_t buffer; + unsigned char hash[GIT_MAX_RAWSZ]; + + /* offset */ + put_be32(&buffer, offset); + strbuf_add(sb, &buffer, sizeof(uint32_t)); + + /* hash */ + the_hash_algo->final_fn(hash, eoie_context); + strbuf_add(sb, hash, the_hash_algo->rawsz); +} + +#define IEOT_VERSION (1) + +static struct index_entry_offset_table *read_ieot_extension(const char *mmap, size_t mmap_size, size_t offset) +{ + const char *index = NULL; + uint32_t extsize, ext_version; + struct index_entry_offset_table *ieot; + int i, nr; + + /* find the IEOT extension */ + if (!offset) + return NULL; + while (offset <= mmap_size - the_hash_algo->rawsz - 8) { + extsize = get_be32(mmap + offset + 4); + if (CACHE_EXT((mmap + offset)) == CACHE_EXT_INDEXENTRYOFFSETTABLE) { + index = mmap + offset + 4 + 4; + break; + } + offset += 8; + offset += extsize; + } + if (!index) + return NULL; + + /* validate the version is IEOT_VERSION */ + ext_version = get_be32(index); + if (ext_version != IEOT_VERSION) { + error("invalid IEOT version %d", ext_version); + return NULL; + } + index += sizeof(uint32_t); + + /* extension size - version bytes / bytes per entry */ + nr = (extsize - sizeof(uint32_t)) / (sizeof(uint32_t) + sizeof(uint32_t)); + if (!nr) { + error("invalid number of IEOT entries %d", nr); + return NULL; + } + ieot = xmalloc(sizeof(struct index_entry_offset_table) + + (nr * sizeof(struct index_entry_offset))); + ieot->nr = nr; + for (i = 0; i < nr; i++) { + ieot->entries[i].offset = get_be32(index); + index += sizeof(uint32_t); + ieot->entries[i].nr = get_be32(index); + index += sizeof(uint32_t); + } + + return ieot; +} + +static void write_ieot_extension(struct strbuf *sb, struct index_entry_offset_table *ieot) +{ + uint32_t buffer; + int i; + + /* version */ + put_be32(&buffer, IEOT_VERSION); + strbuf_add(sb, &buffer, sizeof(uint32_t)); + + /* ieot */ + for (i = 0; i < ieot->nr; i++) { + + /* offset */ + put_be32(&buffer, ieot->entries[i].offset); + strbuf_add(sb, &buffer, sizeof(uint32_t)); + + /* count */ + put_be32(&buffer, ieot->entries[i].nr); + strbuf_add(sb, &buffer, sizeof(uint32_t)); + } +} diff --git a/rebase-interactive.c b/rebase-interactive.c new file mode 100644 index 0000000000..78f3263fc1 --- /dev/null +++ b/rebase-interactive.c @@ -0,0 +1,91 @@ +#include "cache.h" +#include "commit.h" +#include "rebase-interactive.h" +#include "sequencer.h" +#include "strbuf.h" + +void append_todo_help(unsigned edit_todo, unsigned keep_empty, + struct strbuf *buf) +{ + const char *msg = _("\nCommands:\n" +"p, pick <commit> = use commit\n" +"r, reword <commit> = use commit, but edit the commit message\n" +"e, edit <commit> = use commit, but stop for amending\n" +"s, squash <commit> = use commit, but meld into previous commit\n" +"f, fixup <commit> = like \"squash\", but discard this commit's log message\n" +"x, exec <command> = run command (the rest of the line) using shell\n" +"b, break = stop here (continue rebase later with 'git rebase --continue')\n" +"d, drop <commit> = remove commit\n" +"l, label <label> = label current HEAD with a name\n" +"t, reset <label> = reset HEAD to a label\n" +"m, merge [-C <commit> | -c <commit>] <label> [# <oneline>]\n" +". create a merge commit using the original merge commit's\n" +". message (or the oneline, if no original merge commit was\n" +". specified). Use -c <commit> to reword the commit message.\n" +"\n" +"These lines can be re-ordered; they are executed from top to bottom.\n"); + + strbuf_add_commented_lines(buf, msg, strlen(msg)); + + if (get_missing_commit_check_level() == MISSING_COMMIT_CHECK_ERROR) + msg = _("\nDo not remove any line. Use 'drop' " + "explicitly to remove a commit.\n"); + else + msg = _("\nIf you remove a line here " + "THAT COMMIT WILL BE LOST.\n"); + + strbuf_add_commented_lines(buf, msg, strlen(msg)); + + if (edit_todo) + msg = _("\nYou are editing the todo file " + "of an ongoing interactive rebase.\n" + "To continue rebase after editing, run:\n" + " git rebase --continue\n\n"); + else + msg = _("\nHowever, if you remove everything, " + "the rebase will be aborted.\n\n"); + + strbuf_add_commented_lines(buf, msg, strlen(msg)); + + if (!keep_empty) { + msg = _("Note that empty commits are commented out"); + strbuf_add_commented_lines(buf, msg, strlen(msg)); + } +} + +int edit_todo_list(unsigned flags) +{ + struct strbuf buf = STRBUF_INIT; + const char *todo_file = rebase_path_todo(); + + if (strbuf_read_file(&buf, todo_file, 0) < 0) + return error_errno(_("could not read '%s'."), todo_file); + + strbuf_stripspace(&buf, 1); + if (write_message(buf.buf, buf.len, todo_file, 0)) { + strbuf_release(&buf); + return -1; + } + + strbuf_release(&buf); + + transform_todos(flags | TODO_LIST_SHORTEN_IDS); + + if (strbuf_read_file(&buf, todo_file, 0) < 0) + return error_errno(_("could not read '%s'."), todo_file); + + append_todo_help(1, 0, &buf); + if (write_message(buf.buf, buf.len, todo_file, 0)) { + strbuf_release(&buf); + return -1; + } + + strbuf_release(&buf); + + if (launch_sequence_editor(todo_file, NULL, NULL)) + return -1; + + transform_todos(flags & ~(TODO_LIST_SHORTEN_IDS)); + + return 0; +} diff --git a/rebase-interactive.h b/rebase-interactive.h new file mode 100644 index 0000000000..971da03776 --- /dev/null +++ b/rebase-interactive.h @@ -0,0 +1,8 @@ +#ifndef REBASE_INTERACTIVE_H +#define REBASE_INTERACTIVE_H + +void append_todo_help(unsigned edit_todo, unsigned keep_empty, + struct strbuf *buf); +int edit_todo_list(unsigned flags); + +#endif diff --git a/ref-filter.c b/ref-filter.c index 3e8ee04d09..5de616befe 100644 --- a/ref-filter.c +++ b/ref-filter.c @@ -19,6 +19,7 @@ #include "wt-status.h" #include "commit-slab.h" #include "commit-graph.h" +#include "commit-reach.h" static struct ref_msg { const char *gone; @@ -533,6 +534,10 @@ static int parse_ref_filter_atom(const struct ref_format *format, if (ARRAY_SIZE(valid_atom) <= i) return strbuf_addf_ret(err, -1, _("unknown field name: %.*s"), (int)(ep-atom), atom); + if (valid_atom[i].source != SOURCE_NONE && !have_git_dir()) + return strbuf_addf_ret(err, -1, + _("not a git repository, but the field '%.*s' requires access to object data"), + (int)(ep-atom), atom); /* Add it in, including the deref prefix */ at = used_atom_cnt; @@ -874,10 +879,10 @@ static void grab_common_values(struct atom_value *val, int deref, struct expand_ if (deref) name++; if (!strcmp(name, "objecttype")) - v->s = type_name(oi->type); + v->s = xstrdup(type_name(oi->type)); else if (!strcmp(name, "objectsize")) { v->value = oi->size; - v->s = xstrfmt("%lu", oi->size); + v->s = xstrfmt("%"PRIuMAX , (uintmax_t)oi->size); } else if (deref) grab_objectname(name, &oi->oid, v, &used_atom[i]); @@ -898,9 +903,9 @@ static void grab_tag_values(struct atom_value *val, int deref, struct object *ob if (deref) name++; if (!strcmp(name, "tag")) - v->s = tag->tag; + v->s = xstrdup(tag->tag); else if (!strcmp(name, "type") && tag->tagged) - v->s = type_name(tag->tagged->type); + v->s = xstrdup(type_name(tag->tagged->type)); else if (!strcmp(name, "object") && tag->tagged) v->s = xstrdup(oid_to_hex(&tag->tagged->oid)); } @@ -1031,7 +1036,7 @@ static void grab_date(const char *buf, struct atom_value *v, const char *atomnam v->value = timestamp; return; bad: - v->s = ""; + v->s = xstrdup(""); v->value = 0; } @@ -1226,7 +1231,7 @@ static void fill_missing_values(struct atom_value *val) for (i = 0; i < used_atom_cnt; i++) { struct atom_value *v = &val[i]; if (v->s == NULL) - v->s = ""; + v->s = xstrdup(""); } } @@ -1272,7 +1277,8 @@ static inline char *copy_advance(char *dst, const char *src) static const char *lstrip_ref_components(const char *refname, int len) { long remaining = len; - const char *start = refname; + const char *start = xstrdup(refname); + const char *to_free = start; if (len < 0) { int i; @@ -1293,20 +1299,24 @@ static const char *lstrip_ref_components(const char *refname, int len) while (remaining > 0) { switch (*start++) { case '\0': - return ""; + free((char *)to_free); + return xstrdup(""); case '/': remaining--; break; } } + start = xstrdup(start); + free((char *)to_free); return start; } static const char *rstrip_ref_components(const char *refname, int len) { long remaining = len; - char *start = xstrdup(refname); + const char *start = xstrdup(refname); + const char *to_free = start; if (len < 0) { int i; @@ -1326,9 +1336,10 @@ static const char *rstrip_ref_components(const char *refname, int len) while (remaining-- > 0) { char *p = strrchr(start, '/'); - if (p == NULL) - return ""; - else + if (p == NULL) { + free((char *)to_free); + return xstrdup(""); + } else p[0] = '\0'; } return start; @@ -1343,7 +1354,7 @@ static const char *show_ref(struct refname_atom *atom, const char *refname) else if (atom->option == R_RSTRIP) return rstrip_ref_components(refname, atom->rstrip); else - return refname; + return xstrdup(refname); } static void fill_remote_ref_details(struct used_atom *atom, const char *refname, @@ -1357,7 +1368,7 @@ static void fill_remote_ref_details(struct used_atom *atom, const char *refname, NULL, AHEAD_BEHIND_FULL) < 0) { *s = xstrdup(msgs.gone); } else if (!num_ours && !num_theirs) - *s = ""; + *s = xstrdup(""); else if (!num_ours) *s = xstrfmt(msgs.behind, num_theirs); else if (!num_theirs) @@ -1372,36 +1383,31 @@ static void fill_remote_ref_details(struct used_atom *atom, const char *refname, } } else if (atom->u.remote_ref.option == RR_TRACKSHORT) { if (stat_tracking_info(branch, &num_ours, &num_theirs, - NULL, AHEAD_BEHIND_FULL) < 0) + NULL, AHEAD_BEHIND_FULL) < 0) { + *s = xstrdup(""); return; - + } if (!num_ours && !num_theirs) - *s = "="; + *s = xstrdup("="); else if (!num_ours) - *s = "<"; + *s = xstrdup("<"); else if (!num_theirs) - *s = ">"; + *s = xstrdup(">"); else - *s = "<>"; + *s = xstrdup("<>"); } else if (atom->u.remote_ref.option == RR_REMOTE_NAME) { int explicit; const char *remote = atom->u.remote_ref.push ? pushremote_for_branch(branch, &explicit) : remote_for_branch(branch, &explicit); - if (explicit) - *s = xstrdup(remote); - else - *s = ""; + *s = xstrdup(explicit ? remote : ""); } else if (atom->u.remote_ref.option == RR_REMOTE_REF) { int explicit; const char *merge; merge = remote_ref_for_branch(branch, atom->u.remote_ref.push, &explicit); - if (explicit) - *s = xstrdup(merge); - else - *s = ""; + *s = xstrdup(explicit ? merge : ""); } else BUG("unhandled RR_* enum"); } @@ -1450,7 +1456,7 @@ char *get_head_description(void) static const char *get_symref(struct used_atom *atom, struct ref_array_item *ref) { if (!ref->symref) - return ""; + return xstrdup(""); else return show_ref(&atom->u.refname, ref->symref); } @@ -1509,7 +1515,7 @@ static int populate_value(struct ref_array_item *ref, struct strbuf *err) ref->symref = resolve_refdup(ref->refname, RESOLVE_REF_READING, NULL, NULL); if (!ref->symref) - ref->symref = ""; + ref->symref = xstrdup(""); } /* Fill in specials first */ @@ -1535,20 +1541,23 @@ static int populate_value(struct ref_array_item *ref, struct strbuf *err) refname = get_symref(atom, ref); else if (starts_with(name, "upstream")) { const char *branch_name; - v->s = ""; /* only local branches may have an upstream */ if (!skip_prefix(ref->refname, "refs/heads/", - &branch_name)) + &branch_name)) { + v->s = xstrdup(""); continue; + } branch = branch_get(branch_name); refname = branch_get_upstream(branch, NULL); if (refname) fill_remote_ref_details(atom, refname, branch, &v->s); + else + v->s = xstrdup(""); continue; } else if (atom->u.remote_ref.push) { const char *branch_name; - v->s = ""; + v->s = xstrdup(""); if (!skip_prefix(ref->refname, "refs/heads/", &branch_name)) continue; @@ -1561,10 +1570,12 @@ static int populate_value(struct ref_array_item *ref, struct strbuf *err) if (!refname) continue; } + /* We will definitely re-init v->s on the next line. */ + free((char *)v->s); fill_remote_ref_details(atom, refname, branch, &v->s); continue; } else if (starts_with(name, "color:")) { - v->s = atom->u.color; + v->s = xstrdup(atom->u.color); continue; } else if (!strcmp(name, "flag")) { char buf[256], *cp = buf; @@ -1573,7 +1584,7 @@ static int populate_value(struct ref_array_item *ref, struct strbuf *err) if (ref->flag & REF_ISPACKED) cp = copy_advance(cp, ",packed"); if (cp == buf) - v->s = ""; + v->s = xstrdup(""); else { *cp = '\0'; v->s = xstrdup(buf + 1); @@ -1583,40 +1594,42 @@ static int populate_value(struct ref_array_item *ref, struct strbuf *err) continue; } else if (!strcmp(name, "HEAD")) { if (atom->u.head && !strcmp(ref->refname, atom->u.head)) - v->s = "*"; + v->s = xstrdup("*"); else - v->s = " "; + v->s = xstrdup(" "); continue; } else if (starts_with(name, "align")) { v->handler = align_atom_handler; - v->s = ""; + v->s = xstrdup(""); continue; } else if (!strcmp(name, "end")) { v->handler = end_atom_handler; - v->s = ""; + v->s = xstrdup(""); continue; } else if (starts_with(name, "if")) { const char *s; - v->s = ""; if (skip_prefix(name, "if:", &s)) v->s = xstrdup(s); + else + v->s = xstrdup(""); v->handler = if_atom_handler; continue; } else if (!strcmp(name, "then")) { v->handler = then_atom_handler; - v->s = ""; + v->s = xstrdup(""); continue; } else if (!strcmp(name, "else")) { v->handler = else_atom_handler; - v->s = ""; + v->s = xstrdup(""); continue; } else continue; if (!deref) - v->s = refname; + v->s = xstrdup(refname); else v->s = xstrfmt("%s^{}", refname); + free((char *)refname); } for (i = 0; i < used_atom_cnt; i++) { @@ -1676,144 +1689,6 @@ static int get_ref_atom_value(struct ref_array_item *ref, int atom, } /* - * Unknown has to be "0" here, because that's the default value for - * contains_cache slab entries that have not yet been assigned. - */ -enum contains_result { - CONTAINS_UNKNOWN = 0, - CONTAINS_NO, - CONTAINS_YES -}; - -define_commit_slab(contains_cache, enum contains_result); - -struct ref_filter_cbdata { - struct ref_array *array; - struct ref_filter *filter; - struct contains_cache contains_cache; - struct contains_cache no_contains_cache; -}; - -/* - * Mimicking the real stack, this stack lives on the heap, avoiding stack - * overflows. - * - * At each recursion step, the stack items points to the commits whose - * ancestors are to be inspected. - */ -struct contains_stack { - int nr, alloc; - struct contains_stack_entry { - struct commit *commit; - struct commit_list *parents; - } *contains_stack; -}; - -static int in_commit_list(const struct commit_list *want, struct commit *c) -{ - for (; want; want = want->next) - if (!oidcmp(&want->item->object.oid, &c->object.oid)) - return 1; - return 0; -} - -/* - * Test whether the candidate is contained in the list. - * Do not recurse to find out, though, but return -1 if inconclusive. - */ -static enum contains_result contains_test(struct commit *candidate, - const struct commit_list *want, - struct contains_cache *cache, - uint32_t cutoff) -{ - enum contains_result *cached = contains_cache_at(cache, candidate); - - /* If we already have the answer cached, return that. */ - if (*cached) - return *cached; - - /* or are we it? */ - if (in_commit_list(want, candidate)) { - *cached = CONTAINS_YES; - return CONTAINS_YES; - } - - /* Otherwise, we don't know; prepare to recurse */ - parse_commit_or_die(candidate); - - if (candidate->generation < cutoff) - return CONTAINS_NO; - - return CONTAINS_UNKNOWN; -} - -static void push_to_contains_stack(struct commit *candidate, struct contains_stack *contains_stack) -{ - ALLOC_GROW(contains_stack->contains_stack, contains_stack->nr + 1, contains_stack->alloc); - contains_stack->contains_stack[contains_stack->nr].commit = candidate; - contains_stack->contains_stack[contains_stack->nr++].parents = candidate->parents; -} - -static enum contains_result contains_tag_algo(struct commit *candidate, - const struct commit_list *want, - struct contains_cache *cache) -{ - struct contains_stack contains_stack = { 0, 0, NULL }; - enum contains_result result; - uint32_t cutoff = GENERATION_NUMBER_INFINITY; - const struct commit_list *p; - - for (p = want; p; p = p->next) { - struct commit *c = p->item; - load_commit_graph_info(the_repository, c); - if (c->generation < cutoff) - cutoff = c->generation; - } - - result = contains_test(candidate, want, cache, cutoff); - if (result != CONTAINS_UNKNOWN) - return result; - - push_to_contains_stack(candidate, &contains_stack); - while (contains_stack.nr) { - struct contains_stack_entry *entry = &contains_stack.contains_stack[contains_stack.nr - 1]; - struct commit *commit = entry->commit; - struct commit_list *parents = entry->parents; - - if (!parents) { - *contains_cache_at(cache, commit) = CONTAINS_NO; - contains_stack.nr--; - } - /* - * If we just popped the stack, parents->item has been marked, - * therefore contains_test will return a meaningful yes/no. - */ - else switch (contains_test(parents->item, want, cache, cutoff)) { - case CONTAINS_YES: - *contains_cache_at(cache, commit) = CONTAINS_YES; - contains_stack.nr--; - break; - case CONTAINS_NO: - entry->parents = parents->next; - break; - case CONTAINS_UNKNOWN: - push_to_contains_stack(parents->item, &contains_stack); - break; - } - } - free(contains_stack.contains_stack); - return contains_test(candidate, want, cache, cutoff); -} - -static int commit_contains(struct ref_filter *filter, struct commit *commit, - struct commit_list *list, struct contains_cache *cache) -{ - if (filter->with_commit_tag_algo) - return contains_tag_algo(commit, list, cache) == CONTAINS_YES; - return is_descendant_of(commit, list); -} - -/* * Return 1 if the refname matches one of the patterns, otherwise 0. * A pattern can be a literal prefix (e.g. a refname "refs/heads/master" * matches a pattern "refs/heads/mas") or a wildcard (e.g. the same ref @@ -2048,6 +1923,13 @@ static int filter_ref_kind(struct ref_filter *filter, const char *refname) return ref_kind_from_refname(refname); } +struct ref_filter_cbdata { + struct ref_array *array; + struct ref_filter *filter; + struct contains_cache contains_cache; + struct contains_cache no_contains_cache; +}; + /* * A call-back given to for_each_ref(). Filter refs and keep them for * later object processing. @@ -2118,6 +2000,10 @@ static int ref_filter_handler(const char *refname, const struct object_id *oid, static void free_array_item(struct ref_array_item *item) { free((char *)item->symref); + if (item->value) { + free((char *)item->value->s); + free(item->value); + } free(item); } @@ -2126,6 +2012,10 @@ void ref_array_clear(struct ref_array *array) { int i; + for (i = 0; i < used_atom_cnt; i++) + free((char *)used_atom[i].name); + FREE_AND_NULL(used_atom); + used_atom_cnt = 0; for (i = 0; i < array->nr; i++) free_array_item(array->items[i]); FREE_AND_NULL(array->items); @@ -2140,7 +2030,7 @@ static void do_merge_filter(struct ref_filter_cbdata *ref_cbdata) struct ref_array *array = ref_cbdata->array; struct commit **to_clear = xcalloc(sizeof(struct commit *), array->nr); - init_revisions(&revs, NULL); + repo_init_revisions(the_repository, &revs, NULL); for (i = 0; i < array->nr; i++) { struct ref_array_item *item = array->items[i]; @@ -2430,6 +2320,8 @@ int parse_opt_merge_filter(const struct option *opt, const char *arg, int unset) struct object_id oid; int no_merged = starts_with(opt->long_name, "no"); + BUG_ON_OPT_NEG(unset); + if (rf->merge) { if (no_merged) { return opterror(opt, "is incompatible with --merged", 0); @@ -217,6 +217,7 @@ char *resolve_refdup(const char *refname, int resolve_flags, /* The argument to filter_refs */ struct ref_filter { const char *pattern; + const char *prefix; each_ref_fn *fn; void *cb_data; }; @@ -296,6 +297,8 @@ static int filter_refs(const char *refname, const struct object_id *oid, if (wildmatch(filter->pattern, refname, 0)) return 0; + if (filter->prefix) + skip_prefix(refname, filter->prefix, &refname); return filter->fn(refname, oid, flags, filter->cb_data); } @@ -458,6 +461,7 @@ int for_each_glob_ref_in(each_ref_fn fn, const char *pattern, } filter.pattern = real_pattern.buf; + filter.prefix = prefix; filter.fn = fn; filter.cb_data = cb_data; ret = for_each_ref(filter_refs, &filter); @@ -624,6 +628,7 @@ int dwim_log(const char *str, int len, struct object_id *oid, char **log) static int is_per_worktree_ref(const char *refname) { return !strcmp(refname, "HEAD") || + starts_with(refname, "refs/worktree/") || starts_with(refname, "refs/bisect/") || starts_with(refname, "refs/rewritten/"); } @@ -640,13 +645,34 @@ static int is_pseudoref_syntax(const char *refname) return 1; } +static int is_main_pseudoref_syntax(const char *refname) +{ + return skip_prefix(refname, "main-worktree/", &refname) && + *refname && + is_pseudoref_syntax(refname); +} + +static int is_other_pseudoref_syntax(const char *refname) +{ + if (!skip_prefix(refname, "worktrees/", &refname)) + return 0; + refname = strchr(refname, '/'); + if (!refname || !refname[1]) + return 0; + return is_pseudoref_syntax(refname + 1); +} + enum ref_type ref_type(const char *refname) { if (is_per_worktree_ref(refname)) return REF_TYPE_PER_WORKTREE; if (is_pseudoref_syntax(refname)) return REF_TYPE_PSEUDOREF; - return REF_TYPE_NORMAL; + if (is_main_pseudoref_syntax(refname)) + return REF_TYPE_MAIN_PSEUDOREF; + if (is_other_pseudoref_syntax(refname)) + return REF_TYPE_OTHER_PSEUDOREF; + return REF_TYPE_NORMAL; } long get_files_ref_lock_timeout_ms(void) @@ -702,7 +728,7 @@ static int write_pseudoref(const char *pseudoref, const struct object_id *oid, pseudoref); rollback_lock_file(&lock); goto done; - } else if (oidcmp(&actual_old_oid, old_oid)) { + } else if (!oideq(&actual_old_oid, old_oid)) { strbuf_addf(err, _("unexpected object ID when writing '%s'"), pseudoref); rollback_lock_file(&lock); @@ -744,7 +770,7 @@ static int delete_pseudoref(const char *pseudoref, const struct object_id *old_o } if (read_ref(pseudoref, &actual_old_oid)) die(_("could not read ref '%s'"), pseudoref); - if (oidcmp(&actual_old_oid, old_oid)) { + if (!oideq(&actual_old_oid, old_oid)) { error(_("unexpected object ID when deleting '%s'"), pseudoref); rollback_lock_file(&lock); @@ -875,13 +901,13 @@ static int read_ref_at_ent(struct object_id *ooid, struct object_id *noid, */ if (!is_null_oid(&cb->ooid)) { oidcpy(cb->oid, noid); - if (oidcmp(&cb->ooid, noid)) + if (!oideq(&cb->ooid, noid)) warning(_("log for ref %s has gap after %s"), cb->refname, show_date(cb->date, cb->tz, DATE_MODE(RFC2822))); } else if (cb->date == cb->at_time) oidcpy(cb->oid, noid); - else if (oidcmp(noid, cb->oid)) + else if (!oideq(noid, cb->oid)) warning(_("log for ref %s unexpectedly ended on %s"), cb->refname, show_date(cb->date, cb->tz, DATE_MODE(RFC2822))); @@ -714,9 +714,11 @@ int parse_hide_refs_config(const char *var, const char *value, const char *); int ref_is_hidden(const char *, const char *); enum ref_type { - REF_TYPE_PER_WORKTREE, - REF_TYPE_PSEUDOREF, - REF_TYPE_NORMAL, + REF_TYPE_PER_WORKTREE, /* refs inside refs/ but not shared */ + REF_TYPE_PSEUDOREF, /* refs outside refs/ in current worktree */ + REF_TYPE_MAIN_PSEUDOREF, /* pseudo refs from the main worktree */ + REF_TYPE_OTHER_PSEUDOREF, /* pseudo refs from other worktrees */ + REF_TYPE_NORMAL, /* normal/shared refs inside refs/ */ }; enum ref_type ref_type(const char *refname); diff --git a/refs/files-backend.c b/refs/files-backend.c index 1f1a98e4cb..9183875dad 100644 --- a/refs/files-backend.c +++ b/refs/files-backend.c @@ -10,6 +10,7 @@ #include "../object.h" #include "../dir.h" #include "../chdir-notify.h" +#include "worktree.h" /* * This backend uses the following flags in `ref_update::flags` for @@ -149,6 +150,25 @@ static struct files_ref_store *files_downcast(struct ref_store *ref_store, return refs; } +static void files_reflog_path_other_worktrees(struct files_ref_store *refs, + struct strbuf *sb, + const char *refname) +{ + const char *real_ref; + const char *worktree_name; + int length; + + if (parse_worktree_ref(refname, &worktree_name, &length, &real_ref)) + BUG("refname %s is not a other-worktree ref", refname); + + if (worktree_name) + strbuf_addf(sb, "%s/worktrees/%.*s/logs/%s", refs->gitcommondir, + length, worktree_name, real_ref); + else + strbuf_addf(sb, "%s/logs/%s", refs->gitcommondir, + real_ref); +} + static void files_reflog_path(struct files_ref_store *refs, struct strbuf *sb, const char *refname) @@ -158,6 +178,9 @@ static void files_reflog_path(struct files_ref_store *refs, case REF_TYPE_PSEUDOREF: strbuf_addf(sb, "%s/logs/%s", refs->gitdir, refname); break; + case REF_TYPE_OTHER_PSEUDOREF: + case REF_TYPE_MAIN_PSEUDOREF: + return files_reflog_path_other_worktrees(refs, sb, refname); case REF_TYPE_NORMAL: strbuf_addf(sb, "%s/logs/%s", refs->gitcommondir, refname); break; @@ -176,6 +199,11 @@ static void files_ref_path(struct files_ref_store *refs, case REF_TYPE_PSEUDOREF: strbuf_addf(sb, "%s/%s", refs->gitdir, refname); break; + case REF_TYPE_MAIN_PSEUDOREF: + if (!skip_prefix(refname, "main-worktree/", &refname)) + BUG("ref %s is not a main pseudoref", refname); + /* fallthrough */ + case REF_TYPE_OTHER_PSEUDOREF: case REF_TYPE_NORMAL: strbuf_addf(sb, "%s/%s", refs->gitcommondir, refname); break; @@ -269,9 +297,9 @@ static void loose_fill_ref_dir(struct ref_store *ref_store, closedir(d); /* - * Manually add refs/bisect, which, being per-worktree, might - * not appear in the directory listing for refs/ in the main - * repo. + * Manually add refs/bisect and refs/worktree, which, being + * per-worktree, might not appear in the directory listing for + * refs/ in the main repo. */ if (!strcmp(dirname, "refs/")) { int pos = search_ref_dir(dir, "refs/bisect/", 12); @@ -281,6 +309,14 @@ static void loose_fill_ref_dir(struct ref_store *ref_store, dir->cache, "refs/bisect/", 12, 1); add_entry_to_dir(dir, child_entry); } + + pos = search_ref_dir(dir, "refs/worktree/", 11); + + if (pos < 0) { + struct ref_entry *child_entry = create_dir_entry( + dir->cache, "refs/worktree/", 11, 1); + add_entry_to_dir(dir, child_entry); + } } } @@ -841,7 +877,7 @@ static int verify_lock(struct ref_store *ref_store, struct ref_lock *lock, return 0; } } - if (old_oid && oidcmp(&lock->old_oid, old_oid)) { + if (old_oid && !oideq(&lock->old_oid, old_oid)) { strbuf_addf(err, "ref '%s' is at %s but expected %s", lock->ref_name, oid_to_hex(&lock->old_oid), @@ -2307,7 +2343,7 @@ static int check_old_oid(struct ref_update *update, struct object_id *oid, struct strbuf *err) { if (!(update->flags & REF_HAVE_OLD) || - !oidcmp(oid, &update->old_oid)) + oideq(oid, &update->old_oid)) return 0; if (is_null_oid(&update->old_oid)) @@ -2443,7 +2479,7 @@ static int lock_ref_for_update(struct files_ref_store *refs, !(update->flags & REF_DELETING) && !(update->flags & REF_LOG_ONLY)) { if (!(update->type & REF_ISSYMREF) && - !oidcmp(&lock->old_oid, &update->new_oid)) { + oideq(&lock->old_oid, &update->new_oid)) { /* * The reference already has the desired * value, so we don't need to write it. diff --git a/refs/packed-backend.c b/refs/packed-backend.c index d447a731da..c01c7f5901 100644 --- a/refs/packed-backend.c +++ b/refs/packed-backend.c @@ -274,8 +274,8 @@ struct snapshot_record { static int cmp_packed_ref_records(const void *v1, const void *v2) { const struct snapshot_record *e1 = v1, *e2 = v2; - const char *r1 = e1->start + GIT_SHA1_HEXSZ + 1; - const char *r2 = e2->start + GIT_SHA1_HEXSZ + 1; + const char *r1 = e1->start + the_hash_algo->hexsz + 1; + const char *r2 = e2->start + the_hash_algo->hexsz + 1; while (1) { if (*r1 == '\n') @@ -297,7 +297,7 @@ static int cmp_packed_ref_records(const void *v1, const void *v2) */ static int cmp_record_to_refname(const char *rec, const char *refname) { - const char *r1 = rec + GIT_SHA1_HEXSZ + 1; + const char *r1 = rec + the_hash_algo->hexsz + 1; const char *r2 = refname; while (1) { @@ -344,7 +344,7 @@ static void sort_snapshot(struct snapshot *snapshot) if (!eol) /* The safety check should prevent this. */ BUG("unterminated line found in packed-refs"); - if (eol - pos < GIT_SHA1_HEXSZ + 2) + if (eol - pos < the_hash_algo->hexsz + 2) die_invalid_line(snapshot->refs->path, pos, eof - pos); eol++; @@ -456,7 +456,7 @@ static void verify_buffer_safe(struct snapshot *snapshot) return; last_line = find_start_of_record(start, eof - 1); - if (*(eof - 1) != '\n' || eof - last_line < GIT_SHA1_HEXSZ + 2) + if (*(eof - 1) != '\n' || eof - last_line < the_hash_algo->hexsz + 2) die_invalid_line(snapshot->refs->path, last_line, eof - last_line); } @@ -796,7 +796,7 @@ static int next_record(struct packed_ref_iterator *iter) iter->base.flags = REF_ISPACKED; - if (iter->eof - p < GIT_SHA1_HEXSZ + 2 || + if (iter->eof - p < the_hash_algo->hexsz + 2 || parse_oid_hex(p, &iter->oid, &p) || !isspace(*p++)) die_invalid_line(iter->snapshot->refs->path, @@ -826,7 +826,7 @@ static int next_record(struct packed_ref_iterator *iter) if (iter->pos < iter->eof && *iter->pos == '^') { p = iter->pos + 1; - if (iter->eof - p < GIT_SHA1_HEXSZ + 1 || + if (iter->eof - p < the_hash_algo->hexsz + 1 || parse_oid_hex(p, &iter->peeled, &p) || *p++ != '\n') die_invalid_line(iter->snapshot->refs->path, @@ -1160,7 +1160,7 @@ static int write_with_updates(struct packed_ref_store *refs, "reference already exists", update->refname); goto error; - } else if (oidcmp(&update->old_oid, iter->oid)) { + } else if (!oideq(&update->old_oid, iter->oid)) { strbuf_addf(err, "cannot update ref '%s': " "is at %s but expected %s", update->refname, diff --git a/refs/packed-backend.h b/refs/packed-backend.h index 640245d3b9..a01a0aff9c 100644 --- a/refs/packed-backend.h +++ b/refs/packed-backend.h @@ -1,6 +1,8 @@ #ifndef REFS_PACKED_BACKEND_H #define REFS_PACKED_BACKEND_H +struct ref_transaction; + /* * Support for storing references in a `packed-refs` file. * diff --git a/refs/ref-cache.c b/refs/ref-cache.c index 9b110c8494..b7052f72e2 100644 --- a/refs/ref-cache.c +++ b/refs/ref-cache.c @@ -272,7 +272,7 @@ static int is_dup_ref(const struct ref_entry *ref1, const struct ref_entry *ref2 /* This is impossible by construction */ die("Reference directory conflict: %s", ref1->name); - if (oidcmp(&ref1->u.value.oid, &ref2->u.value.oid)) + if (!oideq(&ref1->u.value.oid, &ref2->u.value.oid)) die("Duplicated ref, and SHA1s don't match: %s", ref1->name); warning("Duplicated ref: %s", ref1->name); diff --git a/refs/ref-cache.h b/refs/ref-cache.h index eda65e73ed..3bfb89d2b3 100644 --- a/refs/ref-cache.h +++ b/refs/ref-cache.h @@ -1,7 +1,10 @@ #ifndef REFS_REF_CACHE_H #define REFS_REF_CACHE_H +#include "cache.h" + struct ref_dir; +struct ref_store; /* * If this ref_cache is filled lazily, this function is used to load diff --git a/refs/refs-internal.h b/refs/refs-internal.h index b425ae40b1..f2d8c0123a 100644 --- a/refs/refs-internal.h +++ b/refs/refs-internal.h @@ -1,8 +1,12 @@ #ifndef REFS_REFS_INTERNAL_H #define REFS_REFS_INTERNAL_H +#include "cache.h" +#include "refs.h" #include "iterator.h" +struct ref_transaction; + /* * Data structures and functions for the internal use of the refs * module. Code outside of the refs module should use only the public diff --git a/remote-curl.c b/remote-curl.c index 762a55a75f..1220dffcdc 100644 --- a/remote-curl.c +++ b/remote-curl.c @@ -617,10 +617,11 @@ static int probe_rpc(struct rpc_state *rpc, struct slot_results *results) return err; } -static curl_off_t xcurl_off_t(ssize_t len) { - if (len > maximum_signed_value_of_type(curl_off_t)) +static curl_off_t xcurl_off_t(size_t len) { + uintmax_t size = len; + if (size > maximum_signed_value_of_type(curl_off_t)) die("cannot handle pushes this big"); - return (curl_off_t) len; + return (curl_off_t)size; } static int post_rpc(struct rpc_state *rpc) @@ -12,6 +12,7 @@ #include "string-list.h" #include "mergesort.h" #include "argv-array.h" +#include "commit-reach.h" enum map_direction { FROM_SRC, FROM_DST }; @@ -1204,9 +1205,36 @@ static void add_missing_tags(struct ref *src, struct ref **dst, struct ref ***ds * sent to the other side. */ if (sent_tips.nr) { + const int reachable_flag = 1; + struct commit_list *found_commits; + struct commit **src_commits; + int nr_src_commits = 0, alloc_src_commits = 16; + ALLOC_ARRAY(src_commits, alloc_src_commits); + for_each_string_list_item(item, &src_tag) { struct ref *ref = item->util; + struct commit *commit; + + if (is_null_oid(&ref->new_oid)) + continue; + commit = lookup_commit_reference_gently(the_repository, + &ref->new_oid, + 1); + if (!commit) + /* not pushing a commit, which is not an error */ + continue; + + ALLOC_GROW(src_commits, nr_src_commits + 1, alloc_src_commits); + src_commits[nr_src_commits++] = commit; + } + + found_commits = get_reachable_subset(sent_tips.tip, sent_tips.nr, + src_commits, nr_src_commits, + reachable_flag); + + for_each_string_list_item(item, &src_tag) { struct ref *dst_ref; + struct ref *ref = item->util; struct commit *commit; if (is_null_oid(&ref->new_oid)) @@ -1222,7 +1250,7 @@ static void add_missing_tags(struct ref *src, struct ref **dst, struct ref ***ds * Is this tag, which they do not have, reachable from * any of the commits we are sending? */ - if (!in_merge_bases_many(commit, sent_tips.nr, sent_tips.tip)) + if (!(commit->object.flags & reachable_flag)) continue; /* Add it in */ @@ -1230,7 +1258,12 @@ static void add_missing_tags(struct ref *src, struct ref **dst, struct ref ***ds oidcpy(&dst_ref->new_oid, &ref->new_oid); dst_ref->peer_ref = copy_ref(ref); } + + clear_commit_marks_many(nr_src_commits, src_commits, reachable_flag); + free(src_commits); + free_commit_list(found_commits); } + string_list_clear(&src_tag, 0); free(sent_tips.tip); } @@ -1388,7 +1421,7 @@ void set_ref_status_for_push(struct ref *remote_refs, int send_mirror, ref->deletion = is_null_oid(&ref->new_oid); if (!ref->deletion && - !oidcmp(&ref->old_oid, &ref->new_oid)) { + oideq(&ref->old_oid, &ref->new_oid)) { ref->status = REF_STATUS_UPTODATE; continue; } @@ -1403,7 +1436,7 @@ void set_ref_status_for_push(struct ref *remote_refs, int send_mirror, * branch. */ if (ref->expect_old_sha1) { - if (oidcmp(&ref->old_oid, &ref->old_oid_expect)) + if (!oideq(&ref->old_oid, &ref->old_oid_expect)) reject_reason = REF_STATUS_REJECT_STALE; else /* If the ref isn't stale then force the update. */ @@ -1791,55 +1824,6 @@ int resolve_remote_symref(struct ref *ref, struct ref *list) return 1; } -static void unmark_and_free(struct commit_list *list, unsigned int mark) -{ - while (list) { - struct commit *commit = pop_commit(&list); - commit->object.flags &= ~mark; - } -} - -int ref_newer(const struct object_id *new_oid, const struct object_id *old_oid) -{ - struct object *o; - struct commit *old_commit, *new_commit; - struct commit_list *list, *used; - int found = 0; - - /* - * Both new_commit and old_commit must be commit-ish and new_commit is descendant of - * old_commit. Otherwise we require --force. - */ - o = deref_tag(the_repository, parse_object(the_repository, old_oid), - NULL, 0); - if (!o || o->type != OBJ_COMMIT) - return 0; - old_commit = (struct commit *) o; - - o = deref_tag(the_repository, parse_object(the_repository, new_oid), - NULL, 0); - if (!o || o->type != OBJ_COMMIT) - return 0; - new_commit = (struct commit *) o; - - if (parse_commit(new_commit) < 0) - return 0; - - used = list = NULL; - commit_list_insert(new_commit, &list); - while (list) { - new_commit = pop_most_recent_commit(&list, TMP_MARK); - commit_list_insert(new_commit, &used); - if (new_commit == old_commit) { - found = 1; - break; - } - } - unmark_and_free(list, TMP_MARK); - unmark_and_free(used, TMP_MARK); - return found; -} - /* * Lookup the upstream branch for the given branch and if present, optionally * compute the commit ahead/behind values for the pair. @@ -1903,7 +1887,7 @@ int stat_tracking_info(struct branch *branch, int *num_ours, int *num_theirs, oid_to_hex(&theirs->object.oid)); argv_array_push(&argv, "--"); - init_revisions(&revs, NULL); + repo_init_revisions(the_repository, &revs, NULL); setup_revisions(argv.argc, argv.argv, &revs, NULL); if (prepare_revision_walk(&revs)) die("revision walk setup failed"); @@ -2049,7 +2033,7 @@ struct ref *guess_remote_head(const struct ref *head, /* If refs/heads/master could be right, it is. */ if (!all) { r = find_ref_by_name(refs, "refs/heads/master"); - if (r && !oidcmp(&r->old_oid, &head->old_oid)) + if (r && oideq(&r->old_oid, &head->old_oid)) return copy_ref(r); } @@ -2057,7 +2041,7 @@ struct ref *guess_remote_head(const struct ref *head, for (r = refs; r; r = r->next) { if (r != head && starts_with(r->name, "refs/heads/") && - !oidcmp(&r->old_oid, &head->old_oid)) { + oideq(&r->old_oid, &head->old_oid)) { *tail = copy_ref(r); tail = &((*tail)->next); if (!all) @@ -151,7 +151,6 @@ extern struct ref **get_remote_refs(int fd_out, struct packet_reader *reader, const struct string_list *server_options); int resolve_remote_symref(struct ref *ref, struct ref *list); -int ref_newer(const struct object_id *new_oid, const struct object_id *old_oid); /* * Remove and free all but the first of any entries in the input list @@ -29,7 +29,7 @@ static int rerere_dir_alloc; #define RR_HAS_POSTIMAGE 1 #define RR_HAS_PREIMAGE 2 static struct rerere_dir { - unsigned char sha1[20]; + unsigned char hash[GIT_MAX_HEXSZ]; int status_alloc, status_nr; unsigned char *status; } **rerere_dir; @@ -52,7 +52,7 @@ static void free_rerere_id(struct string_list_item *item) static const char *rerere_id_hex(const struct rerere_id *id) { - return sha1_to_hex(id->collection->sha1); + return sha1_to_hex(id->collection->hash); } static void fit_variant(struct rerere_dir *rr_dir, int variant) @@ -115,7 +115,7 @@ static int is_rr_file(const char *name, const char *filename, int *variant) static void scan_rerere_dir(struct rerere_dir *rr_dir) { struct dirent *de; - DIR *dir = opendir(git_path("rr-cache/%s", sha1_to_hex(rr_dir->sha1))); + DIR *dir = opendir(git_path("rr-cache/%s", sha1_to_hex(rr_dir->hash))); if (!dir) return; @@ -133,24 +133,24 @@ static void scan_rerere_dir(struct rerere_dir *rr_dir) closedir(dir); } -static const unsigned char *rerere_dir_sha1(size_t i, void *table) +static const unsigned char *rerere_dir_hash(size_t i, void *table) { struct rerere_dir **rr_dir = table; - return rr_dir[i]->sha1; + return rr_dir[i]->hash; } static struct rerere_dir *find_rerere_dir(const char *hex) { - unsigned char sha1[20]; + unsigned char hash[GIT_MAX_RAWSZ]; struct rerere_dir *rr_dir; int pos; - if (get_sha1_hex(hex, sha1)) + if (get_sha1_hex(hex, hash)) return NULL; /* BUG */ - pos = sha1_pos(sha1, rerere_dir, rerere_dir_nr, rerere_dir_sha1); + pos = sha1_pos(hash, rerere_dir, rerere_dir_nr, rerere_dir_hash); if (pos < 0) { rr_dir = xmalloc(sizeof(*rr_dir)); - hashcpy(rr_dir->sha1, sha1); + hashcpy(rr_dir->hash, hash); rr_dir->status = NULL; rr_dir->status_nr = 0; rr_dir->status_alloc = 0; @@ -207,26 +207,27 @@ static void read_rr(struct string_list *rr) return; while (!strbuf_getwholeline(&buf, in, '\0')) { char *path; - unsigned char sha1[20]; + unsigned char hash[GIT_MAX_RAWSZ]; struct rerere_id *id; int variant; + const unsigned hexsz = the_hash_algo->hexsz; /* There has to be the hash, tab, path and then NUL */ - if (buf.len < 42 || get_sha1_hex(buf.buf, sha1)) - die("corrupt MERGE_RR"); + if (buf.len < hexsz + 2 || get_sha1_hex(buf.buf, hash)) + die(_("corrupt MERGE_RR")); - if (buf.buf[40] != '.') { + if (buf.buf[hexsz] != '.') { variant = 0; - path = buf.buf + 40; + path = buf.buf + hexsz; } else { errno = 0; - variant = strtol(buf.buf + 41, &path, 10); + variant = strtol(buf.buf + hexsz + 1, &path, 10); if (errno) - die("corrupt MERGE_RR"); + die(_("corrupt MERGE_RR")); } if (*(path++) != '\t') - die("corrupt MERGE_RR"); - buf.buf[40] = '\0'; + die(_("corrupt MERGE_RR")); + buf.buf[hexsz] = '\0'; id = new_rerere_id_hex(buf.buf); id->variant = variant; string_list_insert(rr, path)->util = id; @@ -260,12 +261,12 @@ static int write_rr(struct string_list *rr, int out_fd) rr->items[i].string, 0); if (write_in_full(out_fd, buf.buf, buf.len) < 0) - die("unable to write rerere record"); + die(_("unable to write rerere record")); strbuf_release(&buf); } if (commit_lock_file(&write_lock) != 0) - die("unable to write rerere record"); + die(_("unable to write rerere record")); return 0; } @@ -303,38 +304,6 @@ static void rerere_io_putstr(const char *str, struct rerere_io *io) ferr_puts(str, io->output, &io->wrerror); } -/* - * Write a conflict marker to io->output (if defined). - */ -static void rerere_io_putconflict(int ch, int size, struct rerere_io *io) -{ - char buf[64]; - - while (size) { - if (size <= sizeof(buf) - 2) { - memset(buf, ch, size); - buf[size] = '\n'; - buf[size + 1] = '\0'; - size = 0; - } else { - int sz = sizeof(buf) - 1; - - /* - * Make sure we will not write everything out - * in this round by leaving at least 1 byte - * for the next round, giving the next round - * a chance to add the terminating LF. Yuck. - */ - if (size <= sz) - sz -= (sz - size) + 1; - memset(buf, ch, sz); - buf[sz] = '\0'; - size -= sz; - } - rerere_io_putstr(buf, io); - } -} - static void rerere_io_putmem(const char *mem, size_t sz, struct rerere_io *io) { if (io->output) @@ -385,134 +354,157 @@ static int is_cmarker(char *buf, int marker_char, int marker_size) return isspace(*buf); } -/* - * Read contents a file with conflicts, normalize the conflicts - * by (1) discarding the common ancestor version in diff3-style, - * (2) reordering our side and their side so that whichever sorts - * alphabetically earlier comes before the other one, while - * computing the "conflict ID", which is just an SHA-1 hash of - * one side of the conflict, NUL, the other side of the conflict, - * and NUL concatenated together. - * - * Return the number of conflict hunks found. - * - * NEEDSWORK: the logic and theory of operation behind this conflict - * normalization may deserve to be documented somewhere, perhaps in - * Documentation/technical/rerere.txt. - */ -static int handle_path(unsigned char *sha1, struct rerere_io *io, int marker_size) +static void rerere_strbuf_putconflict(struct strbuf *buf, int ch, size_t size) +{ + strbuf_addchars(buf, ch, size); + strbuf_addch(buf, '\n'); +} + +static int handle_conflict(struct strbuf *out, struct rerere_io *io, + int marker_size, git_hash_ctx *ctx) { - git_SHA_CTX ctx; - int hunk_no = 0; enum { - RR_CONTEXT = 0, RR_SIDE_1, RR_SIDE_2, RR_ORIGINAL - } hunk = RR_CONTEXT; + RR_SIDE_1 = 0, RR_SIDE_2, RR_ORIGINAL + } hunk = RR_SIDE_1; struct strbuf one = STRBUF_INIT, two = STRBUF_INIT; - struct strbuf buf = STRBUF_INIT; - - if (sha1) - git_SHA1_Init(&ctx); + struct strbuf buf = STRBUF_INIT, conflict = STRBUF_INIT; + int has_conflicts = -1; while (!io->getline(&buf, io)) { if (is_cmarker(buf.buf, '<', marker_size)) { - if (hunk != RR_CONTEXT) - goto bad; - hunk = RR_SIDE_1; + if (handle_conflict(&conflict, io, marker_size, NULL) < 0) + break; + if (hunk == RR_SIDE_1) + strbuf_addbuf(&one, &conflict); + else + strbuf_addbuf(&two, &conflict); + strbuf_release(&conflict); } else if (is_cmarker(buf.buf, '|', marker_size)) { if (hunk != RR_SIDE_1) - goto bad; + break; hunk = RR_ORIGINAL; } else if (is_cmarker(buf.buf, '=', marker_size)) { if (hunk != RR_SIDE_1 && hunk != RR_ORIGINAL) - goto bad; + break; hunk = RR_SIDE_2; } else if (is_cmarker(buf.buf, '>', marker_size)) { if (hunk != RR_SIDE_2) - goto bad; + break; if (strbuf_cmp(&one, &two) > 0) strbuf_swap(&one, &two); - hunk_no++; - hunk = RR_CONTEXT; - rerere_io_putconflict('<', marker_size, io); - rerere_io_putmem(one.buf, one.len, io); - rerere_io_putconflict('=', marker_size, io); - rerere_io_putmem(two.buf, two.len, io); - rerere_io_putconflict('>', marker_size, io); - if (sha1) { - git_SHA1_Update(&ctx, one.buf ? one.buf : "", - one.len + 1); - git_SHA1_Update(&ctx, two.buf ? two.buf : "", - two.len + 1); + has_conflicts = 1; + rerere_strbuf_putconflict(out, '<', marker_size); + strbuf_addbuf(out, &one); + rerere_strbuf_putconflict(out, '=', marker_size); + strbuf_addbuf(out, &two); + rerere_strbuf_putconflict(out, '>', marker_size); + if (ctx) { + the_hash_algo->update_fn(ctx, one.buf ? + one.buf : "", + one.len + 1); + the_hash_algo->update_fn(ctx, two.buf ? + two.buf : "", + two.len + 1); } - strbuf_reset(&one); - strbuf_reset(&two); + break; } else if (hunk == RR_SIDE_1) strbuf_addbuf(&one, &buf); else if (hunk == RR_ORIGINAL) ; /* discard */ else if (hunk == RR_SIDE_2) strbuf_addbuf(&two, &buf); - else - rerere_io_putstr(buf.buf, io); - continue; - bad: - hunk = 99; /* force error exit */ - break; } strbuf_release(&one); strbuf_release(&two); strbuf_release(&buf); - if (sha1) - git_SHA1_Final(sha1, &ctx); - if (hunk != RR_CONTEXT) - return -1; - return hunk_no; + return has_conflicts; +} + +/* + * Read contents a file with conflicts, normalize the conflicts + * by (1) discarding the common ancestor version in diff3-style, + * (2) reordering our side and their side so that whichever sorts + * alphabetically earlier comes before the other one, while + * computing the "conflict ID", which is just an SHA-1 hash of + * one side of the conflict, NUL, the other side of the conflict, + * and NUL concatenated together. + * + * Return 1 if conflict hunks are found, 0 if there are no conflict + * hunks and -1 if an error occured. + */ +static int handle_path(unsigned char *hash, struct rerere_io *io, int marker_size) +{ + git_hash_ctx ctx; + struct strbuf buf = STRBUF_INIT, out = STRBUF_INIT; + int has_conflicts = 0; + if (hash) + the_hash_algo->init_fn(&ctx); + + while (!io->getline(&buf, io)) { + if (is_cmarker(buf.buf, '<', marker_size)) { + has_conflicts = handle_conflict(&out, io, marker_size, + hash ? &ctx : NULL); + if (has_conflicts < 0) + break; + rerere_io_putmem(out.buf, out.len, io); + strbuf_reset(&out); + } else + rerere_io_putstr(buf.buf, io); + } + strbuf_release(&buf); + strbuf_release(&out); + + if (hash) + the_hash_algo->final_fn(hash, &ctx); + + return has_conflicts; } /* * Scan the path for conflicts, do the "handle_path()" thing above, and * return the number of conflict hunks found. */ -static int handle_file(const char *path, unsigned char *sha1, const char *output) +static int handle_file(struct index_state *istate, + const char *path, unsigned char *hash, const char *output) { - int hunk_no = 0; + int has_conflicts = 0; struct rerere_io_file io; - int marker_size = ll_merge_marker_size(path); + int marker_size = ll_merge_marker_size(istate, path); memset(&io, 0, sizeof(io)); io.io.getline = rerere_file_getline; io.input = fopen(path, "r"); io.io.wrerror = 0; if (!io.input) - return error_errno("Could not open %s", path); + return error_errno(_("could not open '%s'"), path); if (output) { io.io.output = fopen(output, "w"); if (!io.io.output) { - error_errno("Could not write %s", output); + error_errno(_("could not write '%s'"), output); fclose(io.input); return -1; } } - hunk_no = handle_path(sha1, (struct rerere_io *)&io, marker_size); + has_conflicts = handle_path(hash, (struct rerere_io *)&io, marker_size); fclose(io.input); if (io.io.wrerror) - error("There were errors while writing %s (%s)", + error(_("there were errors while writing '%s' (%s)"), path, strerror(io.io.wrerror)); if (io.io.output && fclose(io.io.output)) - io.io.wrerror = error_errno("Failed to flush %s", path); + io.io.wrerror = error_errno(_("failed to flush '%s'"), path); - if (hunk_no < 0) { + if (has_conflicts < 0) { if (output) unlink_or_warn(output); - return error("Could not parse conflict hunks in %s", path); + return error(_("could not parse conflict hunks in '%s'"), path); } if (io.io.wrerror) return -1; - return hunk_no; + return has_conflicts; } /* @@ -523,9 +515,9 @@ static int handle_file(const char *path, unsigned char *sha1, const char *output * stages we have already looked at in this invocation of this * function. */ -static int check_one_conflict(int i, int *type) +static int check_one_conflict(struct index_state *istate, int i, int *type) { - const struct cache_entry *e = active_cache[i]; + const struct cache_entry *e = istate->cache[i]; if (!ce_stage(e)) { *type = RESOLVED; @@ -533,13 +525,13 @@ static int check_one_conflict(int i, int *type) } *type = PUNTED; - while (i < active_nr && ce_stage(active_cache[i]) == 1) + while (i < istate->cache_nr && ce_stage(istate->cache[i]) == 1) i++; /* Only handle regular files with both stages #2 and #3 */ - if (i + 1 < active_nr) { - const struct cache_entry *e2 = active_cache[i]; - const struct cache_entry *e3 = active_cache[i + 1]; + if (i + 1 < istate->cache_nr) { + const struct cache_entry *e2 = istate->cache[i]; + const struct cache_entry *e3 = istate->cache[i + 1]; if (ce_stage(e2) == 2 && ce_stage(e3) == 3 && ce_same_name(e, e3) && @@ -549,7 +541,7 @@ static int check_one_conflict(int i, int *type) } /* Skip the entries with the same name */ - while (i < active_nr && ce_same_name(e, active_cache[i])) + while (i < istate->cache_nr && ce_same_name(e, istate->cache[i])) i++; return i; } @@ -565,16 +557,17 @@ static int check_one_conflict(int i, int *type) * are identical to the previous round, might want to be handled, * though. */ -static int find_conflict(struct string_list *conflict) +static int find_conflict(struct repository *r, struct string_list *conflict) { int i; - if (read_cache() < 0) - return error("Could not read index"); - for (i = 0; i < active_nr;) { + if (read_index(r->index) < 0) + return error(_("index file corrupt")); + + for (i = 0; i < r->index->cache_nr;) { int conflict_type; - const struct cache_entry *e = active_cache[i]; - i = check_one_conflict(i, &conflict_type); + const struct cache_entry *e = r->index->cache[i]; + i = check_one_conflict(r->index, i, &conflict_type); if (conflict_type == THREE_STAGED) string_list_insert(conflict, (const char *)e->name); } @@ -596,18 +589,19 @@ static int find_conflict(struct string_list *conflict) * NEEDSWORK: we may want to fix the caller that implements "rerere * remaining" to do this without abusing merge_rr. */ -int rerere_remaining(struct string_list *merge_rr) +int rerere_remaining(struct repository *r, struct string_list *merge_rr) { int i; + if (setup_rerere(merge_rr, RERERE_READONLY)) return 0; - if (read_cache() < 0) - return error("Could not read index"); + if (read_index(r->index) < 0) + return error(_("index file corrupt")); - for (i = 0; i < active_nr;) { + for (i = 0; i < r->index->cache_nr;) { int conflict_type; - const struct cache_entry *e = active_cache[i]; - i = check_one_conflict(i, &conflict_type); + const struct cache_entry *e = r->index->cache[i]; + i = check_one_conflict(r->index, i, &conflict_type); if (conflict_type == PUNTED) string_list_insert(merge_rr, (const char *)e->name); else if (conflict_type == RESOLVED) { @@ -627,7 +621,8 @@ int rerere_remaining(struct string_list *merge_rr) * if that recorded conflict resolves cleanly what we * got in the "cur". */ -static int try_merge(const struct rerere_id *id, const char *path, +static int try_merge(struct index_state *istate, + const struct rerere_id *id, const char *path, mmfile_t *cur, mmbuffer_t *result) { int ret; @@ -641,7 +636,8 @@ static int try_merge(const struct rerere_id *id, const char *path, * A three-way merge. Note that this honors user-customizable * low-level merge driver settings. */ - ret = ll_merge(result, path, &base, NULL, cur, "", &other, "", NULL); + ret = ll_merge(result, path, &base, NULL, cur, "", &other, "", + istate, NULL); free(base.ptr); free(other.ptr); @@ -659,7 +655,7 @@ static int try_merge(const struct rerere_id *id, const char *path, * Returns 0 for successful replay of recorded resolution, or non-zero * for failure. */ -static int merge(const struct rerere_id *id, const char *path) +static int merge(struct index_state *istate, const struct rerere_id *id, const char *path) { FILE *f; int ret; @@ -670,13 +666,13 @@ static int merge(const struct rerere_id *id, const char *path) * Normalize the conflicts in path and write it out to * "thisimage" temporary file. */ - if ((handle_file(path, NULL, rerere_path(id, "thisimage")) < 0) || + if ((handle_file(istate, path, NULL, rerere_path(id, "thisimage")) < 0) || read_mmfile(&cur, rerere_path(id, "thisimage"))) { ret = 1; goto out; } - ret = try_merge(id, path, &cur, &result); + ret = try_merge(istate, id, path, &cur, &result); if (ret) goto out; @@ -685,17 +681,17 @@ static int merge(const struct rerere_id *id, const char *path) * Mark that "postimage" was used to help gc. */ if (utime(rerere_path(id, "postimage"), NULL) < 0) - warning_errno("failed utime() on %s", + warning_errno(_("failed utime() on '%s'"), rerere_path(id, "postimage")); /* Update "path" with the resolution */ f = fopen(path, "w"); if (!f) - return error_errno("Could not open %s", path); + return error_errno(_("could not open '%s'"), path); if (fwrite(result.ptr, result.size, 1, f) != 1) - error_errno("Could not write %s", path); + error_errno(_("could not write '%s'"), path); if (fclose(f)) - return error_errno("Writing %s failed", path); + return error_errno(_("writing '%s' failed"), path); out: free(cur.ptr); @@ -704,7 +700,7 @@ out: return ret; } -static void update_paths(struct string_list *update) +static void update_paths(struct repository *r, struct string_list *update) { struct lock_file index_lock = LOCK_INIT; int i; @@ -713,15 +709,15 @@ static void update_paths(struct string_list *update) for (i = 0; i < update->nr; i++) { struct string_list_item *item = &update->items[i]; - if (add_file_to_cache(item->string, 0)) + if (add_file_to_index(r->index, item->string, 0)) exit(128); - fprintf(stderr, "Staged '%s' using previous resolution.\n", + fprintf_ln(stderr, _("Staged '%s' using previous resolution."), item->string); } - if (write_locked_index(&the_index, &index_lock, + if (write_locked_index(r->index, &index_lock, COMMIT_LOCK | SKIP_IF_UNCHANGED)) - die("Unable to write new index file"); + die(_("unable to write new index file")); } static void remove_variant(struct rerere_id *id) @@ -738,7 +734,8 @@ static void remove_variant(struct rerere_id *id) * only have the preimage for that conflict, in which case the result * needs to be recorded as a resolution in a postimage file. */ -static void do_rerere_one_path(struct string_list_item *rr_item, +static void do_rerere_one_path(struct index_state *istate, + struct string_list_item *rr_item, struct string_list *update) { const char *path = rr_item->string; @@ -750,10 +747,10 @@ static void do_rerere_one_path(struct string_list_item *rr_item, /* Has the user resolved it already? */ if (variant >= 0) { - if (!handle_file(path, NULL, NULL)) { + if (!handle_file(istate, path, NULL, NULL)) { copy_file(rerere_path(id, "postimage"), path, 0666); id->collection->status[variant] |= RR_HAS_POSTIMAGE; - fprintf(stderr, "Recorded resolution for '%s'.\n", path); + fprintf_ln(stderr, _("Recorded resolution for '%s'."), path); free_rerere_id(rr_item); rr_item->util = NULL; return; @@ -774,7 +771,7 @@ static void do_rerere_one_path(struct string_list_item *rr_item, continue; vid.variant = variant; - if (merge(&vid, path)) + if (merge(istate, &vid, path)) continue; /* failed to replay */ /* @@ -787,9 +784,9 @@ static void do_rerere_one_path(struct string_list_item *rr_item, if (rerere_autoupdate) string_list_insert(update, path); else - fprintf(stderr, - "Resolved '%s' using previous resolution.\n", - path); + fprintf_ln(stderr, + _("Resolved '%s' using previous resolution."), + path); free_rerere_id(rr_item); rr_item->util = NULL; return; @@ -799,24 +796,25 @@ static void do_rerere_one_path(struct string_list_item *rr_item, assign_variant(id); variant = id->variant; - handle_file(path, NULL, rerere_path(id, "preimage")); + handle_file(istate, path, NULL, rerere_path(id, "preimage")); if (id->collection->status[variant] & RR_HAS_POSTIMAGE) { const char *path = rerere_path(id, "postimage"); if (unlink(path)) - die_errno("cannot unlink stray '%s'", path); + die_errno(_("cannot unlink stray '%s'"), path); id->collection->status[variant] &= ~RR_HAS_POSTIMAGE; } id->collection->status[variant] |= RR_HAS_PREIMAGE; - fprintf(stderr, "Recorded preimage for '%s'\n", path); + fprintf_ln(stderr, _("Recorded preimage for '%s'"), path); } -static int do_plain_rerere(struct string_list *rr, int fd) +static int do_plain_rerere(struct repository *r, + struct string_list *rr, int fd) { struct string_list conflict = STRING_LIST_INIT_DUP; struct string_list update = STRING_LIST_INIT_DUP; int i; - find_conflict(&conflict); + find_conflict(r, &conflict); /* * MERGE_RR records paths with conflicts immediately after @@ -826,23 +824,24 @@ static int do_plain_rerere(struct string_list *rr, int fd) */ for (i = 0; i < conflict.nr; i++) { struct rerere_id *id; - unsigned char sha1[20]; + unsigned char hash[GIT_MAX_RAWSZ]; const char *path = conflict.items[i].string; int ret; - if (string_list_has_string(rr, path)) - continue; - /* * Ask handle_file() to scan and assign a * conflict ID. No need to write anything out * yet. */ - ret = handle_file(path, sha1, NULL); + ret = handle_file(r->index, path, hash, NULL); + if (ret != 0 && string_list_has_string(rr, path)) { + remove_variant(string_list_lookup(rr, path)->util); + string_list_remove(rr, path, 1); + } if (ret < 1) continue; - id = new_rerere_id(sha1); + id = new_rerere_id(hash); string_list_insert(rr, path)->util = id; /* Ensure that the directory exists. */ @@ -850,10 +849,10 @@ static int do_plain_rerere(struct string_list *rr, int fd) } for (i = 0; i < rr->nr; i++) - do_rerere_one_path(&rr->items[i], &update); + do_rerere_one_path(r->index, &rr->items[i], &update); if (update.nr) - update_paths(&update); + update_paths(r, &update); return write_rr(rr, fd); } @@ -879,7 +878,7 @@ static int is_rerere_enabled(void) return rr_cache_exists; if (!rr_cache_exists && mkdir_in_gitdir(git_path_rr_cache())) - die("Could not create directory %s", git_path_rr_cache()); + die(_("could not create directory '%s'"), git_path_rr_cache()); return 1; } @@ -908,7 +907,7 @@ int setup_rerere(struct string_list *merge_rr, int flags) * perform mergy operations, possibly leaving conflicted index entries * and working tree files. */ -int rerere(int flags) +int repo_rerere(struct repository *r, int flags) { struct string_list merge_rr = STRING_LIST_INIT_DUP; int fd, status; @@ -916,7 +915,7 @@ int rerere(int flags) fd = setup_rerere(&merge_rr, flags); if (fd < 0) return 0; - status = do_plain_rerere(&merge_rr, fd); + status = do_plain_rerere(r, &merge_rr, fd); free_rerere_dirs(); return status; } @@ -953,29 +952,30 @@ static int rerere_mem_getline(struct strbuf *sb, struct rerere_io *io_) return 0; } -static int handle_cache(const char *path, unsigned char *sha1, const char *output) +static int handle_cache(struct index_state *istate, + const char *path, unsigned char *hash, const char *output) { mmfile_t mmfile[3] = {{NULL}}; mmbuffer_t result = {NULL, 0}; const struct cache_entry *ce; - int pos, len, i, hunk_no; + int pos, len, i, has_conflicts; struct rerere_io_mem io; - int marker_size = ll_merge_marker_size(path); + int marker_size = ll_merge_marker_size(istate, path); /* * Reproduce the conflicted merge in-core */ len = strlen(path); - pos = cache_name_pos(path, len); + pos = index_name_pos(istate, path, len); if (0 <= pos) return -1; pos = -pos - 1; - while (pos < active_nr) { + while (pos < istate->cache_nr) { enum object_type type; unsigned long size; - ce = active_cache[pos++]; + ce = istate->cache[pos++]; if (ce_namelen(ce) != len || memcmp(ce->name, path, len)) break; i = ce_stage(ce) - 1; @@ -995,7 +995,8 @@ static int handle_cache(const char *path, unsigned char *sha1, const char *outpu */ ll_merge(&result, path, &mmfile[0], NULL, &mmfile[1], "ours", - &mmfile[2], "theirs", NULL); + &mmfile[2], "theirs", + istate, NULL); for (i = 0; i < 3; i++) free(mmfile[i].ptr); @@ -1012,18 +1013,20 @@ static int handle_cache(const char *path, unsigned char *sha1, const char *outpu * Grab the conflict ID and optionally write the original * contents with conflict markers out. */ - hunk_no = handle_path(sha1, (struct rerere_io *)&io, marker_size); + has_conflicts = handle_path(hash, (struct rerere_io *)&io, marker_size); strbuf_release(&io.input); if (io.io.output) fclose(io.io.output); - return hunk_no; + return has_conflicts; } -static int rerere_forget_one_path(const char *path, struct string_list *rr) +static int rerere_forget_one_path(struct index_state *istate, + const char *path, + struct string_list *rr) { const char *filename; struct rerere_id *id; - unsigned char sha1[20]; + unsigned char hash[GIT_MAX_RAWSZ]; int ret; struct string_list_item *item; @@ -1031,12 +1034,12 @@ static int rerere_forget_one_path(const char *path, struct string_list *rr) * Recreate the original conflict from the stages in the * index and compute the conflict ID */ - ret = handle_cache(path, sha1, NULL); + ret = handle_cache(istate, path, hash, NULL); if (ret < 1) - return error("Could not parse conflict hunks in '%s'", path); + return error(_("could not parse conflict hunks in '%s'"), path); /* Nuke the recorded resolution for the conflict */ - id = new_rerere_id(sha1); + id = new_rerere_id(hash); for (id->variant = 0; id->variant < id->collection->status_nr; @@ -1048,13 +1051,13 @@ static int rerere_forget_one_path(const char *path, struct string_list *rr) if (!has_rerere_resolution(id)) continue; - handle_cache(path, sha1, rerere_path(id, "thisimage")); + handle_cache(istate, path, hash, rerere_path(id, "thisimage")); if (read_mmfile(&cur, rerere_path(id, "thisimage"))) { free(cur.ptr); - error("Failed to update conflicted state in '%s'", path); + error(_("failed to update conflicted state in '%s'"), path); goto fail_exit; } - cleanly_resolved = !try_merge(id, path, &cur, &result); + cleanly_resolved = !try_merge(istate, id, path, &cur, &result); free(result.ptr); free(cur.ptr); if (cleanly_resolved) @@ -1062,16 +1065,16 @@ static int rerere_forget_one_path(const char *path, struct string_list *rr) } if (id->collection->status_nr <= id->variant) { - error("no remembered resolution for '%s'", path); + error(_("no remembered resolution for '%s'"), path); goto fail_exit; } filename = rerere_path(id, "postimage"); if (unlink(filename)) { if (errno == ENOENT) - error("no remembered resolution for %s", path); + error(_("no remembered resolution for '%s'"), path); else - error_errno("cannot unlink %s", filename); + error_errno(_("cannot unlink '%s'"), filename); goto fail_exit; } @@ -1080,8 +1083,8 @@ static int rerere_forget_one_path(const char *path, struct string_list *rr) * conflict in the working tree, run us again to record * the postimage. */ - handle_cache(path, sha1, rerere_path(id, "preimage")); - fprintf(stderr, "Updated preimage for '%s'\n", path); + handle_cache(istate, path, hash, rerere_path(id, "preimage")); + fprintf_ln(stderr, _("Updated preimage for '%s'"), path); /* * And remember that we can record resolution for this @@ -1090,7 +1093,7 @@ static int rerere_forget_one_path(const char *path, struct string_list *rr) item = string_list_insert(rr, path); free_rerere_id(item); item->util = id; - fprintf(stderr, "Forgot resolution for %s\n", path); + fprintf(stderr, _("Forgot resolution for '%s'\n"), path); return 0; fail_exit: @@ -1098,14 +1101,14 @@ fail_exit: return -1; } -int rerere_forget(struct pathspec *pathspec) +int rerere_forget(struct repository *r, struct pathspec *pathspec) { int i, fd; struct string_list conflict = STRING_LIST_INIT_DUP; struct string_list merge_rr = STRING_LIST_INIT_DUP; - if (read_cache() < 0) - return error("Could not read index"); + if (read_index(r->index) < 0) + return error(_("index file corrupt")); fd = setup_rerere(&merge_rr, RERERE_NOAUTOUPDATE); if (fd < 0) @@ -1116,14 +1119,14 @@ int rerere_forget(struct pathspec *pathspec) * recover the original conflicted state and then * find the conflicted paths. */ - unmerge_cache(pathspec); - find_conflict(&conflict); + unmerge_index(r->index, pathspec); + find_conflict(r, &conflict); for (i = 0; i < conflict.nr; i++) { struct string_list_item *it = &conflict.items[i]; - if (!match_pathspec(&the_index, pathspec, it->string, + if (!match_pathspec(r->index, pathspec, it->string, strlen(it->string), 0, NULL, 0)) continue; - rerere_forget_one_path(it->string, &merge_rr); + rerere_forget_one_path(r->index, it->string, &merge_rr); } return write_rr(&merge_rr, fd); } @@ -1193,7 +1196,7 @@ void rerere_gc(struct string_list *rr) git_config(git_default_config, NULL); dir = opendir(git_path("rr-cache")); if (!dir) - die_errno("unable to open rr-cache directory"); + die_errno(_("unable to open rr-cache directory")); /* Collect stale conflict IDs ... */ while ((e = readdir(dir))) { struct rerere_dir *rr_dir; @@ -4,6 +4,7 @@ #include "string-list.h" struct pathspec; +struct repository; #define RERERE_AUTOUPDATE 01 #define RERERE_NOAUTOUPDATE 02 @@ -23,7 +24,10 @@ struct rerere_id { }; int setup_rerere(struct string_list *, int); -int rerere(int); +#ifndef NO_THE_REPOSITORY_COMPATIBILITY_MACROS +#define rerere(flags) repo_rerere(the_repository, flags) +#endif +int repo_rerere(struct repository *, int); /* * Given the conflict ID and the name of a "file" used for replaying * the recorded resolution (e.g. "preimage", "postimage"), return the @@ -31,8 +35,8 @@ int rerere(int); * return the path to the directory that houses these files. */ const char *rerere_path(const struct rerere_id *, const char *file); -int rerere_forget(struct pathspec *); -int rerere_remaining(struct string_list *); +int rerere_forget(struct repository *, struct pathspec *); +int rerere_remaining(struct repository *, struct string_list *); void rerere_clear(struct string_list *); void rerere_gc(struct string_list *); diff --git a/revision.c b/revision.c index e411802ebe..13e0519c02 100644 --- a/revision.c +++ b/revision.c @@ -24,6 +24,9 @@ #include "packfile.h" #include "worktree.h" #include "argv-array.h" +#include "commit-reach.h" +#include "commit-graph.h" +#include "prio-queue.h" volatile show_early_output_fn_t show_early_output; @@ -51,7 +54,8 @@ static void mark_blob_uninteresting(struct blob *blob) blob->object.flags |= UNINTERESTING; } -static void mark_tree_contents_uninteresting(struct tree *tree) +static void mark_tree_contents_uninteresting(struct repository *r, + struct tree *tree) { struct tree_desc desc; struct name_entry entry; @@ -63,10 +67,10 @@ static void mark_tree_contents_uninteresting(struct tree *tree) while (tree_entry(&desc, &entry)) { switch (object_type(entry.mode)) { case OBJ_TREE: - mark_tree_uninteresting(lookup_tree(the_repository, entry.oid)); + mark_tree_uninteresting(r, lookup_tree(r, entry.oid)); break; case OBJ_BLOB: - mark_blob_uninteresting(lookup_blob(the_repository, entry.oid)); + mark_blob_uninteresting(lookup_blob(r, entry.oid)); break; default: /* Subproject commit - not in this repository */ @@ -81,7 +85,7 @@ static void mark_tree_contents_uninteresting(struct tree *tree) free_tree_buffer(tree); } -void mark_tree_uninteresting(struct tree *tree) +void mark_tree_uninteresting(struct repository *r, struct tree *tree) { struct object *obj; @@ -92,7 +96,7 @@ void mark_tree_uninteresting(struct tree *tree) if (obj->flags & UNINTERESTING) return; obj->flags |= UNINTERESTING; - mark_tree_contents_uninteresting(tree); + mark_tree_contents_uninteresting(r, tree); } struct commit_stack { @@ -175,7 +179,6 @@ static void add_pending_object_with_path(struct rev_info *revs, strbuf_release(&buf); return; /* do not add the commit itself */ } - obj->flags |= USER_GIVEN; add_object_array_with_path(obj, name, &revs->pending, mode, path); } @@ -198,7 +201,7 @@ void add_head_to_pending(struct rev_info *revs) struct object *obj; if (get_oid("HEAD", &oid)) return; - obj = parse_object(the_repository, &oid); + obj = parse_object(revs->repo, &oid); if (!obj) return; add_pending_object(revs, obj, "HEAD"); @@ -210,7 +213,7 @@ static struct object *get_reference(struct rev_info *revs, const char *name, { struct object *object; - object = parse_object(the_repository, oid); + object = parse_object(revs->repo, oid); if (!object) { if (revs->ignore_missing) return object; @@ -247,7 +250,7 @@ static struct commit *handle_commit(struct rev_info *revs, add_pending_object(revs, object, tag->tag); if (!tag->tagged) die("bad tag"); - object = parse_object(the_repository, &tag->tagged->oid); + object = parse_object(revs->repo, &tag->tagged->oid); if (!object) { if (revs->ignore_missing_links || (flags & UNINTERESTING)) return NULL; @@ -297,7 +300,7 @@ static struct commit *handle_commit(struct rev_info *revs, if (!revs->tree_objects) return NULL; if (flags & UNINTERESTING) { - mark_tree_contents_uninteresting(tree); + mark_tree_contents_uninteresting(revs->repo, tree); return NULL; } add_pending_object_with_path(revs, object, name, mode, path); @@ -766,8 +769,8 @@ static void commit_list_insert_by_date_cached(struct commit *p, struct commit_li *cache = new_entry; } -static int add_parents_to_list(struct rev_info *revs, struct commit *commit, - struct commit_list **list, struct commit_list **cache_ptr) +static int process_parents(struct rev_info *revs, struct commit *commit, + struct commit_list **list, struct commit_list **cache_ptr) { struct commit_list *parent = commit->parents; unsigned left_flag; @@ -806,7 +809,8 @@ static int add_parents_to_list(struct rev_info *revs, struct commit *commit, if (p->object.flags & SEEN) continue; p->object.flags |= SEEN; - commit_list_insert_by_date_cached(p, list, cached_base, cache_ptr); + if (list) + commit_list_insert_by_date_cached(p, list, cached_base, cache_ptr); } return 0; } @@ -845,7 +849,8 @@ static int add_parents_to_list(struct rev_info *revs, struct commit *commit, p->object.flags |= left_flag; if (!(p->object.flags & SEEN)) { p->object.flags |= SEEN; - commit_list_insert_by_date_cached(p, list, cached_base, cache_ptr); + if (list) + commit_list_insert_by_date_cached(p, list, cached_base, cache_ptr); } if (revs->first_parent_only) break; @@ -877,7 +882,7 @@ static void cherry_pick_list(struct commit_list *list, struct rev_info *revs) return; left_first = left_count < right_count; - init_patch_ids(&ids); + init_patch_ids(revs->repo, &ids); ids.diffopts.pathspec = revs->diffopt.pathspec; /* Compute patch-ids for one side */ @@ -1089,7 +1094,7 @@ static int limit_list(struct rev_info *revs) if (revs->max_age != -1 && (commit->date < revs->max_age)) obj->flags |= UNINTERESTING; - if (add_parents_to_list(revs, commit, &list, NULL) < 0) + if (process_parents(revs, commit, &list, NULL) < 0) return -1; if (obj->flags & UNINTERESTING) { mark_parents_uninteresting(commit); @@ -1176,7 +1181,7 @@ struct all_refs_cb { int warned_bad_reflog; struct rev_info *all_revs; const char *name_for_errormsg; - struct ref_store *refs; + struct worktree *wt; }; int ref_excluded(struct string_list *ref_excludes, const char *path) @@ -1213,7 +1218,7 @@ static void init_all_refs_cb(struct all_refs_cb *cb, struct rev_info *revs, cb->all_revs = revs; cb->all_flags = flags; revs->rev_input_given = 1; - cb->refs = NULL; + cb->wt = NULL; } void clear_ref_exclusion(struct string_list **ref_excludes_p) @@ -1253,7 +1258,7 @@ static void handle_one_reflog_commit(struct object_id *oid, void *cb_data) { struct all_refs_cb *cb = cb_data; if (!is_null_oid(oid)) { - struct object *o = parse_object(the_repository, oid); + struct object *o = parse_object(cb->all_revs->repo, oid); if (o) { o->flags |= cb->all_flags; /* ??? CMDLINEFLAGS ??? */ @@ -1276,14 +1281,20 @@ static int handle_one_reflog_ent(struct object_id *ooid, struct object_id *noid, return 0; } -static int handle_one_reflog(const char *path, const struct object_id *oid, +static int handle_one_reflog(const char *refname_in_wt, + const struct object_id *oid, int flag, void *cb_data) { struct all_refs_cb *cb = cb_data; + struct strbuf refname = STRBUF_INIT; + cb->warned_bad_reflog = 0; - cb->name_for_errormsg = path; - refs_for_each_reflog_ent(cb->refs, path, + strbuf_worktree_ref(cb->wt, &refname, refname_in_wt); + cb->name_for_errormsg = refname.buf; + refs_for_each_reflog_ent(get_main_ref_store(the_repository), + refname.buf, handle_one_reflog_ent, cb_data); + strbuf_release(&refname); return 0; } @@ -1298,8 +1309,8 @@ static void add_other_reflogs_to_pending(struct all_refs_cb *cb) if (wt->is_current) continue; - cb->refs = get_worktree_ref_store(wt); - refs_for_each_reflog(cb->refs, + cb->wt = wt; + refs_for_each_reflog(get_worktree_ref_store(wt), handle_one_reflog, cb); } @@ -1312,7 +1323,7 @@ void add_reflogs_to_pending(struct rev_info *revs, unsigned flags) cb.all_revs = revs; cb.all_flags = flags; - cb.refs = get_main_ref_store(the_repository); + cb.wt = NULL; for_each_reflog(handle_one_reflog, &cb); if (!revs->single_worktree) @@ -1320,13 +1331,14 @@ void add_reflogs_to_pending(struct rev_info *revs, unsigned flags) } static void add_cache_tree(struct cache_tree *it, struct rev_info *revs, - struct strbuf *path) + struct strbuf *path, unsigned int flags) { size_t baselen = path->len; int i; if (it->entry_count >= 0) { - struct tree *tree = lookup_tree(the_repository, &it->oid); + struct tree *tree = lookup_tree(revs->repo, &it->oid); + tree->object.flags |= flags; add_pending_object_with_path(revs, &tree->object, "", 040000, path->buf); } @@ -1334,14 +1346,15 @@ static void add_cache_tree(struct cache_tree *it, struct rev_info *revs, for (i = 0; i < it->subtree_nr; i++) { struct cache_tree_sub *sub = it->down[i]; strbuf_addf(path, "%s%s", baselen ? "/" : "", sub->name); - add_cache_tree(sub->cache_tree, revs, path); + add_cache_tree(sub->cache_tree, revs, path, flags); strbuf_setlen(path, baselen); } } static void do_add_index_objects_to_pending(struct rev_info *revs, - struct index_state *istate) + struct index_state *istate, + unsigned int flags) { int i; @@ -1352,16 +1365,17 @@ static void do_add_index_objects_to_pending(struct rev_info *revs, if (S_ISGITLINK(ce->ce_mode)) continue; - blob = lookup_blob(the_repository, &ce->oid); + blob = lookup_blob(revs->repo, &ce->oid); if (!blob) die("unable to add index blob to traversal"); + blob->object.flags |= flags; add_pending_object_with_path(revs, &blob->object, "", ce->ce_mode, ce->name); } if (istate->cache_tree) { struct strbuf path = STRBUF_INIT; - add_cache_tree(istate->cache_tree, revs, &path); + add_cache_tree(istate->cache_tree, revs, &path, flags); strbuf_release(&path); } } @@ -1370,8 +1384,8 @@ void add_index_objects_to_pending(struct rev_info *revs, unsigned int flags) { struct worktree **worktrees, **p; - read_cache(); - do_add_index_objects_to_pending(revs, &the_index); + read_index(revs->repo->index); + do_add_index_objects_to_pending(revs, revs->repo->index, flags); if (revs->single_worktree) return; @@ -1387,7 +1401,7 @@ void add_index_objects_to_pending(struct rev_info *revs, unsigned int flags) if (read_index_from(&istate, worktree_git_path(wt, "index"), get_worktree_git_dir(wt)) > 0) - do_add_index_objects_to_pending(revs, &istate); + do_add_index_objects_to_pending(revs, &istate, flags); discard_index(&istate); } free_worktrees(worktrees); @@ -1439,10 +1453,13 @@ static int add_parents_only(struct rev_info *revs, const char *arg_, int flags, return 1; } -void init_revisions(struct rev_info *revs, const char *prefix) +void repo_init_revisions(struct repository *r, + struct rev_info *revs, + const char *prefix) { memset(revs, 0, sizeof(*revs)); + revs->repo = r; revs->abbrev = DEFAULT_ABBREV; revs->ignore_merges = 1; revs->simplify_history = 1; @@ -1464,11 +1481,11 @@ void init_revisions(struct rev_info *revs, const char *prefix) revs->commit_format = CMIT_FMT_DEFAULT; revs->expand_tabs_in_log_default = 8; - init_grep_defaults(); - grep_init(&revs->grep_filter, prefix); + init_grep_defaults(revs->repo); + grep_init(&revs->grep_filter, revs->repo, prefix); revs->grep_filter.status_only = 1; - diff_setup(&revs->diffopt); + repo_diff_setup(revs->repo, &revs->diffopt); if (prefix && !revs->diffopt.prefix) { revs->diffopt.prefix = prefix; revs->diffopt.prefix_length = strlen(prefix); @@ -1496,6 +1513,7 @@ static void prepare_show_merge(struct rev_info *revs) struct object_id oid; const char **prune = NULL; int i, prune_num = 1; /* counting terminating NULL */ + struct index_state *istate = revs->repo->index; if (get_oid("HEAD", &oid)) die("--merge without HEAD?"); @@ -1511,20 +1529,20 @@ static void prepare_show_merge(struct rev_info *revs) free_commit_list(bases); head->object.flags |= SYMMETRIC_LEFT; - if (!active_nr) - read_cache(); - for (i = 0; i < active_nr; i++) { - const struct cache_entry *ce = active_cache[i]; + if (!istate->cache_nr) + read_index(istate); + for (i = 0; i < istate->cache_nr; i++) { + const struct cache_entry *ce = istate->cache[i]; if (!ce_stage(ce)) continue; - if (ce_path_match(&the_index, ce, &revs->prune_data, NULL)) { + if (ce_path_match(istate, ce, &revs->prune_data, NULL)) { prune_num++; REALLOC_ARRAY(prune, prune_num); prune[prune_num-2] = ce->name; prune[prune_num-1] = NULL; } - while ((i+1 < active_nr) && - ce_same_name(ce, active_cache[i+1])) + while ((i+1 < istate->cache_nr) && + ce_same_name(ce, istate->cache[i+1])) i++; } clear_pathspec(&revs->prune_data); @@ -1581,8 +1599,8 @@ static int handle_dotdot_1(const char *arg, char *dotdot, *dotdot = '\0'; } - a_obj = parse_object(the_repository, &a_oid); - b_obj = parse_object(the_repository, &b_oid); + a_obj = parse_object(revs->repo, &a_oid); + b_obj = parse_object(revs->repo, &b_oid); if (!a_obj || !b_obj) return dotdot_missing(arg, dotdot, revs, symmetric); @@ -1595,8 +1613,8 @@ static int handle_dotdot_1(const char *arg, char *dotdot, struct commit *a, *b; struct commit_list *exclude; - a = lookup_commit_reference(the_repository, &a_obj->oid); - b = lookup_commit_reference(the_repository, &b_obj->oid); + a = lookup_commit_reference(revs->repo, &a_obj->oid); + b = lookup_commit_reference(revs->repo, &b_obj->oid); if (!a || !b) return dotdot_missing(arg, dotdot, revs, symmetric); @@ -2205,7 +2223,7 @@ static int handle_revision_pseudo_opt(const char *submodule, BUG("--single-worktree cannot be used together with submodule"); refs = get_submodule_ref_store(submodule); } else - refs = get_main_ref_store(the_repository); + refs = get_main_ref_store(revs->repo); /* * NOTE! @@ -2319,7 +2337,7 @@ static void NORETURN diagnose_missing_default(const char *def) */ int setup_revisions(int argc, const char **argv, struct rev_info *revs, struct setup_revision_opt *opt) { - int i, flags, left, seen_dashdash, read_from_stdin, got_rev_arg = 0, revarg_opt; + int i, flags, left, seen_dashdash, got_rev_arg = 0, revarg_opt; struct argv_array prune_data = ARGV_ARRAY_INIT; const char *submodule = NULL; @@ -2349,7 +2367,6 @@ int setup_revisions(int argc, const char **argv, struct rev_info *revs, struct s revarg_opt = opt ? opt->revarg_opt : 0; if (seen_dashdash) revarg_opt |= REVARG_CANNOT_BE_FILENAME; - read_from_stdin = 0; for (left = i = 1; i < argc; i++) { const char *arg = argv[i]; if (*arg == '-') { @@ -2368,7 +2385,7 @@ int setup_revisions(int argc, const char **argv, struct rev_info *revs, struct s argv[left++] = arg; continue; } - if (read_from_stdin++) + if (revs->read_from_stdin++) die("--stdin given twice?"); read_revisions_from_stdin(revs, &prune_data); continue; @@ -2455,7 +2472,7 @@ int setup_revisions(int argc, const char **argv, struct rev_info *revs, struct s if (revs->diffopt.objfind) revs->simplify_history = 0; - if (revs->topo_order) + if (revs->topo_order && !generation_numbers_enabled(the_repository)) revs->limited = 1; if (revs->prune_data.nr) { @@ -2886,13 +2903,225 @@ void reset_revision_walk(void) static int mark_uninteresting(const struct object_id *oid, struct packed_git *pack, uint32_t pos, - void *unused) + void *cb) { - struct object *o = parse_object(the_repository, oid); + struct rev_info *revs = cb; + struct object *o = parse_object(revs->repo, oid); o->flags |= UNINTERESTING | SEEN; return 0; } +define_commit_slab(indegree_slab, int); +define_commit_slab(author_date_slab, timestamp_t); + +struct topo_walk_info { + uint32_t min_generation; + struct prio_queue explore_queue; + struct prio_queue indegree_queue; + struct prio_queue topo_queue; + struct indegree_slab indegree; + struct author_date_slab author_date; +}; + +static inline void test_flag_and_insert(struct prio_queue *q, struct commit *c, int flag) +{ + if (c->object.flags & flag) + return; + + c->object.flags |= flag; + prio_queue_put(q, c); +} + +static void explore_walk_step(struct rev_info *revs) +{ + struct topo_walk_info *info = revs->topo_walk_info; + struct commit_list *p; + struct commit *c = prio_queue_get(&info->explore_queue); + + if (!c) + return; + + if (parse_commit_gently(c, 1) < 0) + return; + + if (revs->sort_order == REV_SORT_BY_AUTHOR_DATE) + record_author_date(&info->author_date, c); + + if (revs->max_age != -1 && (c->date < revs->max_age)) + c->object.flags |= UNINTERESTING; + + if (process_parents(revs, c, NULL, NULL) < 0) + return; + + if (c->object.flags & UNINTERESTING) + mark_parents_uninteresting(c); + + for (p = c->parents; p; p = p->next) + test_flag_and_insert(&info->explore_queue, p->item, TOPO_WALK_EXPLORED); +} + +static void explore_to_depth(struct rev_info *revs, + uint32_t gen_cutoff) +{ + struct topo_walk_info *info = revs->topo_walk_info; + struct commit *c; + while ((c = prio_queue_peek(&info->explore_queue)) && + c->generation >= gen_cutoff) + explore_walk_step(revs); +} + +static void indegree_walk_step(struct rev_info *revs) +{ + struct commit_list *p; + struct topo_walk_info *info = revs->topo_walk_info; + struct commit *c = prio_queue_get(&info->indegree_queue); + + if (!c) + return; + + if (parse_commit_gently(c, 1) < 0) + return; + + explore_to_depth(revs, c->generation); + + for (p = c->parents; p; p = p->next) { + struct commit *parent = p->item; + int *pi = indegree_slab_at(&info->indegree, parent); + + if (*pi) + (*pi)++; + else + *pi = 2; + + test_flag_and_insert(&info->indegree_queue, parent, TOPO_WALK_INDEGREE); + + if (revs->first_parent_only) + return; + } +} + +static void compute_indegrees_to_depth(struct rev_info *revs, + uint32_t gen_cutoff) +{ + struct topo_walk_info *info = revs->topo_walk_info; + struct commit *c; + while ((c = prio_queue_peek(&info->indegree_queue)) && + c->generation >= gen_cutoff) + indegree_walk_step(revs); +} + +static void init_topo_walk(struct rev_info *revs) +{ + struct topo_walk_info *info; + struct commit_list *list; + revs->topo_walk_info = xmalloc(sizeof(struct topo_walk_info)); + info = revs->topo_walk_info; + memset(info, 0, sizeof(struct topo_walk_info)); + + init_indegree_slab(&info->indegree); + memset(&info->explore_queue, 0, sizeof(info->explore_queue)); + memset(&info->indegree_queue, 0, sizeof(info->indegree_queue)); + memset(&info->topo_queue, 0, sizeof(info->topo_queue)); + + switch (revs->sort_order) { + default: /* REV_SORT_IN_GRAPH_ORDER */ + info->topo_queue.compare = NULL; + break; + case REV_SORT_BY_COMMIT_DATE: + info->topo_queue.compare = compare_commits_by_commit_date; + break; + case REV_SORT_BY_AUTHOR_DATE: + init_author_date_slab(&info->author_date); + info->topo_queue.compare = compare_commits_by_author_date; + info->topo_queue.cb_data = &info->author_date; + break; + } + + info->explore_queue.compare = compare_commits_by_gen_then_commit_date; + info->indegree_queue.compare = compare_commits_by_gen_then_commit_date; + + info->min_generation = GENERATION_NUMBER_INFINITY; + for (list = revs->commits; list; list = list->next) { + struct commit *c = list->item; + + if (parse_commit_gently(c, 1)) + continue; + + test_flag_and_insert(&info->explore_queue, c, TOPO_WALK_EXPLORED); + test_flag_and_insert(&info->indegree_queue, c, TOPO_WALK_INDEGREE); + + if (c->generation < info->min_generation) + info->min_generation = c->generation; + + *(indegree_slab_at(&info->indegree, c)) = 1; + + if (revs->sort_order == REV_SORT_BY_AUTHOR_DATE) + record_author_date(&info->author_date, c); + } + compute_indegrees_to_depth(revs, info->min_generation); + + for (list = revs->commits; list; list = list->next) { + struct commit *c = list->item; + + if (*(indegree_slab_at(&info->indegree, c)) == 1) + prio_queue_put(&info->topo_queue, c); + } + + /* + * This is unfortunate; the initial tips need to be shown + * in the order given from the revision traversal machinery. + */ + if (revs->sort_order == REV_SORT_IN_GRAPH_ORDER) + prio_queue_reverse(&info->topo_queue); +} + +static struct commit *next_topo_commit(struct rev_info *revs) +{ + struct commit *c; + struct topo_walk_info *info = revs->topo_walk_info; + + /* pop next off of topo_queue */ + c = prio_queue_get(&info->topo_queue); + + if (c) + *(indegree_slab_at(&info->indegree, c)) = 0; + + return c; +} + +static void expand_topo_walk(struct rev_info *revs, struct commit *commit) +{ + struct commit_list *p; + struct topo_walk_info *info = revs->topo_walk_info; + if (process_parents(revs, commit, NULL, NULL) < 0) { + if (!revs->ignore_missing_links) + die("Failed to traverse parents of commit %s", + oid_to_hex(&commit->object.oid)); + } + + for (p = commit->parents; p; p = p->next) { + struct commit *parent = p->item; + int *pi; + + if (parse_commit_gently(parent, 1) < 0) + continue; + + if (parent->generation < info->min_generation) { + info->min_generation = parent->generation; + compute_indegrees_to_depth(revs, info->min_generation); + } + + pi = indegree_slab_at(&info->indegree, parent); + + (*pi)--; + if (*pi == 1) + prio_queue_put(&info->topo_queue, parent); + + if (revs->first_parent_only) + return; + } +} + int prepare_revision_walk(struct rev_info *revs) { int i; @@ -2921,7 +3150,7 @@ int prepare_revision_walk(struct rev_info *revs) revs->treesame.name = "treesame"; if (revs->exclude_promisor_objects) { - for_each_packed_object(mark_uninteresting, NULL, + for_each_packed_object(mark_uninteresting, revs, FOR_EACH_OBJECT_PROMISOR_ONLY); } @@ -2929,11 +3158,13 @@ int prepare_revision_walk(struct rev_info *revs) commit_list_sort_by_date(&revs->commits); if (revs->no_walk) return 0; - if (revs->limited) + if (revs->limited) { if (limit_list(revs) < 0) return -1; - if (revs->topo_order) - sort_in_topological_order(&revs->commits, revs->sort_order); + if (revs->topo_order) + sort_in_topological_order(&revs->commits, revs->sort_order); + } else if (revs->topo_order) + init_topo_walk(revs); if (revs->line_level_traverse) line_log_filter(revs); if (revs->simplify_merges) @@ -2950,7 +3181,7 @@ static enum rewrite_result rewrite_one(struct rev_info *revs, struct commit **pp for (;;) { struct commit *p = *pp; if (!revs->limited) - if (add_parents_to_list(revs, p, &revs->commits, &cache) < 0) + if (process_parents(revs, p, &revs->commits, &cache) < 0) return rewrite_one_error; if (p->object.flags & UNINTERESTING) return rewrite_one_ok; @@ -3239,7 +3470,7 @@ static void track_linear(struct rev_info *revs, struct commit *commit) struct commit_list *p; for (p = revs->previous_parents; p; p = p->next) if (p->item == NULL || /* first commit */ - !oidcmp(&p->item->object.oid, &commit->object.oid)) + oideq(&p->item->object.oid, &commit->object.oid)) break; revs->linear = p != NULL; } @@ -3258,6 +3489,8 @@ static struct commit *get_revision_1(struct rev_info *revs) if (revs->reflog_info) commit = next_reflog_entry(revs->reflog_info); + else if (revs->topo_walk_info) + commit = next_topo_commit(revs); else commit = pop_commit(&revs->commits); @@ -3279,7 +3512,9 @@ static struct commit *get_revision_1(struct rev_info *revs) if (revs->reflog_info) try_to_simplify_commit(revs, commit); - else if (add_parents_to_list(revs, commit, &revs->commits, NULL) < 0) { + else if (revs->topo_walk_info) + expand_topo_walk(revs, commit); + else if (process_parents(revs, commit, &revs->commits, NULL) < 0) { if (!revs->ignore_missing_links) die("Failed to traverse parents of commit %s", oid_to_hex(&commit->object.oid)); diff --git a/revision.h b/revision.h index 701a4973dd..7987bfcd2e 100644 --- a/revision.h +++ b/revision.h @@ -21,15 +21,26 @@ #define SYMMETRIC_LEFT (1u<<8) #define PATCHSAME (1u<<9) #define BOTTOM (1u<<10) -#define USER_GIVEN (1u<<25) /* given directly by the user */ +/* + * Indicates object was reached by traversal. i.e. not given by user on + * command-line or stdin. + * NEEDSWORK: NOT_USER_GIVEN doesn't apply to commits because we only support + * filtering trees and blobs, but it may be useful to support filtering commits + * in the future. + */ +#define NOT_USER_GIVEN (1u<<25) #define TRACK_LINEAR (1u<<26) -#define ALL_REV_FLAGS (((1u<<11)-1) | USER_GIVEN | TRACK_LINEAR) +#define ALL_REV_FLAGS (((1u<<11)-1) | NOT_USER_GIVEN | TRACK_LINEAR) + +#define TOPO_WALK_EXPLORED (1u<<27) +#define TOPO_WALK_INDEGREE (1u<<28) #define DECORATE_SHORT_REFS 1 #define DECORATE_FULL_REFS 2 -struct rev_info; struct log_info; +struct repository; +struct rev_info; struct string_list; struct saved_parents; define_shared_commit_slab(revision_sources, char *); @@ -56,10 +67,13 @@ struct rev_cmdline_info { #define REVISION_WALK_NO_WALK_SORTED 1 #define REVISION_WALK_NO_WALK_UNSORTED 2 +struct topo_walk_info; + struct rev_info { /* Starting list */ struct commit_list *commits; struct object_array pending; + struct repository *repo; /* Parents of shown commits */ struct object_array boundary_commits; @@ -82,6 +96,11 @@ struct rev_info { */ int rev_input_given; + /* + * Whether we read from stdin due to the --stdin option. + */ + int read_from_stdin; + /* topo-sort */ enum rev_sort_order sort_order; @@ -126,6 +145,21 @@ struct rev_info { line_level_traverse:1, tree_blobs_in_commit_order:1, + /* + * Blobs are shown without regard for their existence. + * But not so for trees: unless exclude_promisor_objects + * is set and the tree in question is a promisor object; + * OR ignore_missing_links is set, the revision walker + * dies with a "bad tree object HASH" message when + * encountering a missing tree. For callers that can + * handle missing trees and want them to be filterable + * and showable, set this to true. The revision walker + * will filter and show such a missing tree as usual, + * but will not attempt to recurse into this tree + * object. + */ + do_not_die_on_missing_tree:1, + /* for internal use only */ allow_exclude_promisor_objects_opt:1, exclude_promisor_objects:1; @@ -215,6 +249,17 @@ struct rev_info { /* notes-specific options: which refs to show */ struct display_notes_opt notes_opt; + /* interdiff */ + const struct object_id *idiff_oid1; + const struct object_id *idiff_oid2; + const char *idiff_title; + + /* range-diff */ + const char *rdiff1; + const char *rdiff2; + int creation_factor; + const char *rdiff_title; + /* commit counts */ int count_left; int count_right; @@ -230,6 +275,8 @@ struct rev_info { const char *break_bar; struct revision_sources *sources; + + struct topo_walk_info *topo_walk_info; }; int ref_excluded(struct string_list *, const char *path); @@ -249,12 +296,17 @@ extern volatile show_early_output_fn_t show_early_output; struct setup_revision_opt { const char *def; void (*tweak)(struct rev_info *, struct setup_revision_opt *); - const char *submodule; + const char *submodule; /* TODO: drop this and use rev_info->repo */ int assume_dashdash; unsigned revarg_opt; }; -void init_revisions(struct rev_info *revs, const char *prefix); +#ifndef NO_THE_REPOSITORY_COMPATIBILITY_MACROS +#define init_revisions(revs, prefix) repo_init_revisions(the_repository, revs, prefix) +#endif +void repo_init_revisions(struct repository *r, + struct rev_info *revs, + const char *prefix); int setup_revisions(int argc, const char **argv, struct rev_info *revs, struct setup_revision_opt *); void parse_revision_opt(struct rev_info *revs, struct parse_opt_ctx_t *ctx, @@ -274,7 +326,7 @@ void put_revision_mark(const struct rev_info *revs, const struct commit *commit); void mark_parents_uninteresting(struct commit *commit); -void mark_tree_uninteresting(struct tree *tree); +void mark_tree_uninteresting(struct repository *r, struct tree *tree); void show_object_with_name(FILE *, struct object *, const char *); diff --git a/run-command.c b/run-command.c index d679cc267c..c11ff80674 100644 --- a/run-command.c +++ b/run-command.c @@ -1226,7 +1226,7 @@ int start_async(struct async *async) { int err = pthread_create(&async->tid, NULL, run_thread, async); if (err) { - error_errno("cannot create thread"); + error(_("cannot create async thread: %s"), strerror(err)); goto error; } } @@ -1259,6 +1259,15 @@ int finish_async(struct async *async) #endif } +int async_with_fork(void) +{ +#ifdef NO_PTHREADS + return 1; +#else + return 0; +#endif +} + const char *find_hook(const char *name) { static struct strbuf path = STRBUF_INIT; diff --git a/run-command.h b/run-command.h index 3932420ec8..68f5369fc2 100644 --- a/run-command.h +++ b/run-command.h @@ -1,9 +1,7 @@ #ifndef RUN_COMMAND_H #define RUN_COMMAND_H -#ifndef NO_PTHREADS -#include <pthread.h> -#endif +#include "thread-utils.h" #include "argv-array.h" @@ -143,6 +141,7 @@ struct async { int start_async(struct async *async); int finish_async(struct async *async); int in_async(void); +int async_with_fork(void); void check_pipe(int err); /** diff --git a/send-pack.c b/send-pack.c index e920ca57df..f692686770 100644 --- a/send-pack.c +++ b/send-pack.c @@ -203,9 +203,8 @@ static int receive_status(int in, struct ref *refs) static int sideband_demux(int in, int out, void *data) { int *fd = data, ret; -#ifdef NO_PTHREADS - close(fd[1]); -#endif + if (async_with_fork()) + close(fd[1]); ret = recv_sideband("send-pack", fd[0], out); close(out); return ret; diff --git a/sequencer.c b/sequencer.c index 24451bb683..e1a4dd15f1 100644 --- a/sequencer.c +++ b/sequencer.c @@ -30,6 +30,8 @@ #include "oidset.h" #include "commit-slab.h" #include "alias.h" +#include "commit-reach.h" +#include "rebase-interactive.h" #define GIT_REFLOG_ACTION "GIT_REFLOG_ACTION" @@ -52,7 +54,10 @@ static GIT_PATH_FUNC(rebase_path, "rebase-merge") * the lines are processed, they are removed from the front of this * file and written to the tail of 'done'. */ -static GIT_PATH_FUNC(rebase_path_todo, "rebase-merge/git-rebase-todo") +GIT_PATH_FUNC(rebase_path_todo, "rebase-merge/git-rebase-todo") +static GIT_PATH_FUNC(rebase_path_todo_backup, + "rebase-merge/git-rebase-todo.backup") + /* * The rebase command lines that have already been processed. A line * is moved here when it is first handled, before any associated user @@ -140,7 +145,7 @@ static GIT_PATH_FUNC(rebase_path_refs_to_delete, "rebase-merge/refs-to-delete") /* * The following files are written by git-rebase just after parsing the - * command-line (and are only consumed, not modified, by the sequencer). + * command-line. */ static GIT_PATH_FUNC(rebase_path_gpg_sign_opt, "rebase-merge/gpg_sign_opt") static GIT_PATH_FUNC(rebase_path_orig_head, "rebase-merge/orig-head") @@ -152,6 +157,7 @@ static GIT_PATH_FUNC(rebase_path_autostash, "rebase-merge/autostash") static GIT_PATH_FUNC(rebase_path_strategy, "rebase-merge/strategy") static GIT_PATH_FUNC(rebase_path_strategy_opts, "rebase-merge/strategy_opts") static GIT_PATH_FUNC(rebase_path_allow_rerere_autoupdate, "rebase-merge/allow_rerere_autoupdate") +static GIT_PATH_FUNC(rebase_path_quiet, "rebase-merge/quiet") static int git_sequencer_config(const char *k, const char *v, void *cb) { @@ -376,8 +382,8 @@ static void print_advice(int show_hint, struct replay_opts *opts) } } -static int write_message(const void *buf, size_t len, const char *filename, - int append_eol) +int write_message(const void *buf, size_t len, const char *filename, + int append_eol) { struct lock_file msg_file = LOCK_INIT; @@ -473,8 +479,8 @@ static int fast_forward_to(const struct object_id *to, const struct object_id *f struct strbuf sb = STRBUF_INIT; struct strbuf err = STRBUF_INIT; - read_cache(); - if (checkout_fast_forward(from, to, 1)) + read_index(&the_index); + if (checkout_fast_forward(the_repository, from, to, 1)) return -1; /* the callee should have complained already */ strbuf_addf(&sb, _("%s: fast-forward"), _(action_name(opts))); @@ -613,7 +619,7 @@ static int is_index_unchanged(void) if (!(cache_tree_oid = get_cache_tree_oid())) return -1; - return !oidcmp(cache_tree_oid, get_commit_tree_oid(head_commit)); + return oideq(cache_tree_oid, get_commit_tree_oid(head_commit)); } static int write_author_script(const char *message) @@ -663,55 +669,131 @@ missing_author: return res; } +/** + * Take a series of KEY='VALUE' lines where VALUE part is + * sq-quoted, and append <KEY, VALUE> at the end of the string list + */ +static int parse_key_value_squoted(char *buf, struct string_list *list) +{ + while (*buf) { + struct string_list_item *item; + char *np; + char *cp = strchr(buf, '='); + if (!cp) { + np = strchrnul(buf, '\n'); + return error(_("no key present in '%.*s'"), + (int) (np - buf), buf); + } + np = strchrnul(cp, '\n'); + *cp++ = '\0'; + item = string_list_append(list, buf); + + buf = np + (*np == '\n'); + *np = '\0'; + cp = sq_dequote(cp); + if (!cp) + return error(_("unable to dequote value of '%s'"), + item->string); + item->util = xstrdup(cp); + } + return 0; +} -/* - * write_author_script() used to fail to terminate the last line with a "'" and - * also escaped "'" incorrectly as "'\\\\''" rather than "'\\''". We check for - * the terminating "'" on the last line to see how "'" has been escaped in case - * git was upgraded while rebase was stopped. +/** + * Reads and parses the state directory's "author-script" file, and sets name, + * email and date accordingly. + * Returns 0 on success, -1 if the file could not be parsed. + * + * The author script is of the format: + * + * GIT_AUTHOR_NAME='$author_name' + * GIT_AUTHOR_EMAIL='$author_email' + * GIT_AUTHOR_DATE='$author_date' + * + * where $author_name, $author_email and $author_date are quoted. We are strict + * with our parsing, as the file was meant to be eval'd in the old + * git-am.sh/git-rebase--interactive.sh scripts, and thus if the file differs + * from what this function expects, it is better to bail out than to do + * something that the user does not expect. */ -static int quoting_is_broken(const char *s, size_t n) +int read_author_script(const char *path, char **name, char **email, char **date, + int allow_missing) { - /* Skip any empty lines in case the file was hand edited */ - while (n > 0 && s[--n] == '\n') - ; /* empty */ - if (n > 0 && s[n] != '\'') - return 1; + struct strbuf buf = STRBUF_INIT; + struct string_list kv = STRING_LIST_INIT_DUP; + int retval = -1; /* assume failure */ + int i, name_i = -2, email_i = -2, date_i = -2, err = 0; - return 0; + if (strbuf_read_file(&buf, path, 256) <= 0) { + strbuf_release(&buf); + if (errno == ENOENT && allow_missing) + return 0; + else + return error_errno(_("could not open '%s' for reading"), + path); + } + + if (parse_key_value_squoted(buf.buf, &kv)) + goto finish; + + for (i = 0; i < kv.nr; i++) { + if (!strcmp(kv.items[i].string, "GIT_AUTHOR_NAME")) { + if (name_i != -2) + name_i = error(_("'GIT_AUTHOR_NAME' already given")); + else + name_i = i; + } else if (!strcmp(kv.items[i].string, "GIT_AUTHOR_EMAIL")) { + if (email_i != -2) + email_i = error(_("'GIT_AUTHOR_EMAIL' already given")); + else + email_i = i; + } else if (!strcmp(kv.items[i].string, "GIT_AUTHOR_DATE")) { + if (date_i != -2) + date_i = error(_("'GIT_AUTHOR_DATE' already given")); + else + date_i = i; + } else { + err = error(_("unknown variable '%s'"), + kv.items[i].string); + } + } + if (name_i == -2) + error(_("missing 'GIT_AUTHOR_NAME'")); + if (email_i == -2) + error(_("missing 'GIT_AUTHOR_EMAIL'")); + if (date_i == -2) + error(_("missing 'GIT_AUTHOR_DATE'")); + if (date_i < 0 || email_i < 0 || date_i < 0 || err) + goto finish; + *name = kv.items[name_i].util; + *email = kv.items[email_i].util; + *date = kv.items[date_i].util; + retval = 0; +finish: + string_list_clear(&kv, !!retval); + strbuf_release(&buf); + return retval; } /* - * Read a list of environment variable assignments (such as the author-script - * file) into an environment block. Returns -1 on error, 0 otherwise. + * Read a GIT_AUTHOR_NAME, GIT_AUTHOR_EMAIL AND GIT_AUTHOR_DATE from a + * file with shell quoting into struct argv_array. Returns -1 on + * error, 0 otherwise. */ static int read_env_script(struct argv_array *env) { - struct strbuf script = STRBUF_INIT; - int i, count = 0, sq_bug; - const char *p2; - char *p; + char *name, *email, *date; - if (strbuf_read_file(&script, rebase_path_author_script(), 256) <= 0) + if (read_author_script(rebase_path_author_script(), + &name, &email, &date, 0)) return -1; - /* write_author_script() used to quote incorrectly */ - sq_bug = quoting_is_broken(script.buf, script.len); - for (p = script.buf; *p; p++) - if (sq_bug && skip_prefix(p, "'\\\\''", &p2)) - strbuf_splice(&script, p - script.buf, p2 - p, "'", 1); - else if (skip_prefix(p, "'\\''", &p2)) - strbuf_splice(&script, p - script.buf, p2 - p, "'", 1); - else if (*p == '\'') - strbuf_splice(&script, p-- - script.buf, 1, "", 0); - else if (*p == '\n') { - *p = '\0'; - count++; - } - for (i = 0, p = script.buf; i < count; i++) { - argv_array_push(env, p); - p += strlen(p) + 1; - } + argv_array_pushf(env, "GIT_AUTHOR_NAME=%s", name); + argv_array_pushf(env, "GIT_AUTHOR_EMAIL=%s", email); + argv_array_pushf(env, "GIT_AUTHOR_DATE=%s", date); + free(name); + free(email); + free(date); return 0; } @@ -731,54 +813,28 @@ static char *get_author(const char *message) /* Read author-script and return an ident line (author <email> timestamp) */ static const char *read_author_ident(struct strbuf *buf) { - const char *keys[] = { - "GIT_AUTHOR_NAME=", "GIT_AUTHOR_EMAIL=", "GIT_AUTHOR_DATE=" - }; struct strbuf out = STRBUF_INIT; - char *in, *eol; - const char *val[3]; - int i = 0; - - if (strbuf_read_file(buf, rebase_path_author_script(), 256) <= 0) - return NULL; - - /* dequote values and construct ident line in-place */ - for (in = buf->buf; i < 3 && in - buf->buf < buf->len; i++) { - if (!skip_prefix(in, keys[i], (const char **)&in)) { - warning(_("could not parse '%s' (looking for '%s')"), - rebase_path_author_script(), keys[i]); - return NULL; - } - - eol = strchrnul(in, '\n'); - *eol = '\0'; - if (!sq_dequote(in)) { - warning(_("bad quoting on %s value in '%s'"), - keys[i], rebase_path_author_script()); - return NULL; - } - val[i] = in; - in = eol + 1; - } + char *name, *email, *date; - if (i < 3) { - warning(_("could not parse '%s' (looking for '%s')"), - rebase_path_author_script(), keys[i]); + if (read_author_script(rebase_path_author_script(), + &name, &email, &date, 0)) return NULL; - } /* validate date since fmt_ident() will die() on bad value */ - if (parse_date(val[2], &out)){ + if (parse_date(date, &out)){ warning(_("invalid date format '%s' in '%s'"), - val[2], rebase_path_author_script()); + date, rebase_path_author_script()); strbuf_release(&out); return NULL; } strbuf_reset(&out); - strbuf_addstr(&out, fmt_ident(val[0], val[1], val[2], 0)); + strbuf_addstr(&out, fmt_ident(name, email, date, 0)); strbuf_swap(buf, &out); strbuf_release(&out); + free(name); + free(email); + free(date); return buf->buf; } @@ -803,6 +859,23 @@ N_("you have staged changes in your working tree\n" #define VERIFY_MSG (1<<4) #define CREATE_ROOT_COMMIT (1<<5) +static int run_command_silent_on_success(struct child_process *cmd) +{ + struct strbuf buf = STRBUF_INIT; + int rc; + + cmd->stdout_to_stderr = 1; + rc = pipe_command(cmd, + NULL, 0, + NULL, 0, + &buf, 0); + + if (rc) + fputs(buf.buf, stderr); + strbuf_release(&buf); + return rc; +} + /* * If we are cherry-pick, and if the merge did not result in * hand-editing, we will hit this commit and inherit the original @@ -864,18 +937,11 @@ static int run_git_commit(const char *defmsg, struct replay_opts *opts, cmd.git_cmd = 1; - if (is_rebase_i(opts)) { - if (!(flags & EDIT_MSG)) { - cmd.stdout_to_stderr = 1; - cmd.err = -1; - } - - if (read_env_script(&cmd.env_array)) { - const char *gpg_opt = gpg_sign_opt_quoted(opts); + if (is_rebase_i(opts) && read_env_script(&cmd.env_array)) { + const char *gpg_opt = gpg_sign_opt_quoted(opts); - return error(_(staged_changes_advice), - gpg_opt, gpg_opt); - } + return error(_(staged_changes_advice), + gpg_opt, gpg_opt); } argv_array_push(&cmd.args, "commit"); @@ -905,21 +971,10 @@ static int run_git_commit(const char *defmsg, struct replay_opts *opts, if (!(flags & EDIT_MSG)) argv_array_push(&cmd.args, "--allow-empty-message"); - if (cmd.err == -1) { - /* hide stderr on success */ - struct strbuf buf = STRBUF_INIT; - int rc = pipe_command(&cmd, - NULL, 0, - /* stdout is already redirected */ - NULL, 0, - &buf, 0); - if (rc) - fputs(buf.buf, stderr); - strbuf_release(&buf); - return rc; - } - - return run_command(&cmd); + if (is_rebase_i(opts) && !(flags & EDIT_MSG)) + return run_command_silent_on_success(&cmd); + else + return run_command(&cmd); } static int rest_is_empty(const struct strbuf *sb, int start) @@ -1175,7 +1230,7 @@ void print_commit_summary(const char *prefix, const struct object_id *oid, strbuf_release(&author_ident); strbuf_release(&committer_ident); - init_revisions(&rev, prefix); + repo_init_revisions(the_repository, &rev, prefix); setup_revisions(0, NULL, &rev, NULL); rev.diff = 1; @@ -1220,7 +1275,7 @@ static int parse_head(struct commit **head) current_head = lookup_commit_reference(the_repository, &oid); if (!current_head) return error(_("could not parse HEAD")); - if (oidcmp(&oid, ¤t_head->object.oid)) { + if (!oideq(&oid, ¤t_head->object.oid)) { warning(_("HEAD %s is not a commit!"), oid_to_hex(&oid)); } @@ -1290,9 +1345,9 @@ static int try_to_commit(struct strbuf *msg, const char *author, goto out; } - if (!(flags & ALLOW_EMPTY) && !oidcmp(current_head ? - get_commit_tree_oid(current_head) : - the_hash_algo->empty_tree, &tree)) { + if (!(flags & ALLOW_EMPTY) && oideq(current_head ? + get_commit_tree_oid(current_head) : + the_hash_algo->empty_tree, &tree)) { res = 1; /* run 'git commit' to display error message */ goto out; } @@ -1397,7 +1452,7 @@ static int is_original_commit_empty(struct commit *commit) ptree_oid = the_hash_algo->empty_tree; /* commit is root */ } - return !oidcmp(ptree_oid, get_commit_tree_oid(commit)); + return oideq(ptree_oid, get_commit_tree_oid(commit)); } /* @@ -1453,6 +1508,7 @@ enum todo_command { TODO_SQUASH, /* commands that do something else than handling a single commit */ TODO_EXEC, + TODO_BREAK, TODO_LABEL, TODO_RESET, TODO_MERGE, @@ -1474,6 +1530,7 @@ static struct { { 'f', "fixup" }, { 's', "squash" }, { 'x', "exec" }, + { 'b', "break" }, { 'l', "label" }, { 't', "reset" }, { 'm', "merge" }, @@ -1677,7 +1734,7 @@ static int do_pick_commit(enum todo_command command, struct commit *commit, unborn = get_oid("HEAD", &head); /* Do we want to generate a root commit? */ if (is_pick_or_similar(command) && opts->have_squash_onto && - !oidcmp(&head, &opts->squash_onto)) { + oideq(&head, &opts->squash_onto)) { if (is_fixup(command)) return error(_("cannot fixup root commit")); flags |= CREATE_ROOT_COMMIT; @@ -1720,7 +1777,7 @@ static int do_pick_commit(enum todo_command command, struct commit *commit, oid_to_hex(&commit->object.oid)); if (opts->allow_ff && !is_fixup(command) && - ((parent && !oidcmp(&parent->object.oid, &head)) || + ((parent && oideq(&parent->object.oid, &head)) || (!parent && unborn))) { if (is_rebase_i(opts)) write_author_script(msg.message); @@ -1830,7 +1887,7 @@ static int do_pick_commit(enum todo_command command, struct commit *commit, commit_list_insert(base, &common); commit_list_insert(next, &remotes); - res |= try_merge_command(opts->strategy, + res |= try_merge_command(the_repository, opts->strategy, opts->xopts_nr, (const char **)opts->xopts, common, oid_to_hex(&head), remotes); free_commit_list(common); @@ -1859,7 +1916,7 @@ static int do_pick_commit(enum todo_command command, struct commit *commit, : _("could not apply %s... %s"), short_commit_name(commit), msg.subject); print_advice(res == 1, opts); - rerere(opts->allow_rerere_auto); + repo_rerere(the_repository, opts->allow_rerere_auto); goto leave; } @@ -1912,7 +1969,7 @@ static int read_and_refresh_cache(struct replay_opts *opts) { struct lock_file index_lock = LOCK_INIT; int index_fd = hold_locked_index(&index_lock, 0); - if (read_index_preload(&the_index, NULL) < 0) { + if (read_index(&the_index) < 0) { rollback_lock_file(&index_lock); return error(_("git %s: failed to read the index"), _(action_name(opts))); @@ -1987,7 +2044,8 @@ static int parse_insn_line(struct todo_item *item, const char *bol, char *eol) if (skip_prefix(bol, todo_command_info[i].str, &bol)) { item->command = i; break; - } else if (bol[1] == ' ' && *bol == todo_command_info[i].c) { + } else if ((bol + 1 == eol || bol[1] == ' ') && + *bol == todo_command_info[i].c) { bol++; item->command = i; break; @@ -1999,7 +2057,7 @@ static int parse_insn_line(struct todo_item *item, const char *bol, char *eol) padding = strspn(bol, " \t"); bol += padding; - if (item->command == TODO_NOOP) { + if (item->command == TODO_NOOP || item->command == TODO_BREAK) { if (bol != eol) return error(_("%s does not accept arguments: '%s'"), command_to_string(item->command), bol); @@ -2243,21 +2301,14 @@ static int populate_opts_cb(const char *key, const char *value, void *data) return 0; } -static void read_strategy_opts(struct replay_opts *opts, struct strbuf *buf) +void parse_strategy_opts(struct replay_opts *opts, char *raw_opts) { int i; - char *strategy_opts_string; - - strbuf_reset(buf); - if (!read_oneliner(buf, rebase_path_strategy(), 0)) - return; - opts->strategy = strbuf_detach(buf, NULL); - if (!read_oneliner(buf, rebase_path_strategy_opts(), 0)) - return; + char *strategy_opts_string = raw_opts; - strategy_opts_string = buf->buf; if (*strategy_opts_string == ' ') strategy_opts_string++; + opts->xopts_nr = split_cmdline(strategy_opts_string, (const char ***)&opts->xopts); for (i = 0; i < opts->xopts_nr; i++) { @@ -2268,6 +2319,18 @@ static void read_strategy_opts(struct replay_opts *opts, struct strbuf *buf) } } +static void read_strategy_opts(struct replay_opts *opts, struct strbuf *buf) +{ + strbuf_reset(buf); + if (!read_oneliner(buf, rebase_path_strategy(), 0)) + return; + opts->strategy = strbuf_detach(buf, NULL); + if (!read_oneliner(buf, rebase_path_strategy_opts(), 0)) + return; + + parse_strategy_opts(opts, buf->buf); +} + static int read_populate_opts(struct replay_opts *opts) { if (is_rebase_i(opts)) { @@ -2335,6 +2398,55 @@ static int read_populate_opts(struct replay_opts *opts) return 0; } +static void write_strategy_opts(struct replay_opts *opts) +{ + int i; + struct strbuf buf = STRBUF_INIT; + + for (i = 0; i < opts->xopts_nr; ++i) + strbuf_addf(&buf, " --%s", opts->xopts[i]); + + write_file(rebase_path_strategy_opts(), "%s\n", buf.buf); + strbuf_release(&buf); +} + +int write_basic_state(struct replay_opts *opts, const char *head_name, + const char *onto, const char *orig_head) +{ + const char *quiet = getenv("GIT_QUIET"); + + if (head_name) + write_file(rebase_path_head_name(), "%s\n", head_name); + if (onto) + write_file(rebase_path_onto(), "%s\n", onto); + if (orig_head) + write_file(rebase_path_orig_head(), "%s\n", orig_head); + + if (quiet) + write_file(rebase_path_quiet(), "%s\n", quiet); + else + write_file(rebase_path_quiet(), "\n"); + + if (opts->verbose) + write_file(rebase_path_verbose(), "%s", ""); + if (opts->strategy) + write_file(rebase_path_strategy(), "%s\n", opts->strategy); + if (opts->xopts_nr > 0) + write_strategy_opts(opts); + + if (opts->allow_rerere_auto == RERERE_AUTOUPDATE) + write_file(rebase_path_allow_rerere_autoupdate(), "--rerere-autoupdate\n"); + else if (opts->allow_rerere_auto == RERERE_NOAUTOUPDATE) + write_file(rebase_path_allow_rerere_autoupdate(), "--no-rerere-autoupdate\n"); + + if (opts->gpg_sign) + write_file(rebase_path_gpg_sign_opt(), "-S%s\n", opts->gpg_sign); + if (opts->signoff) + write_file(rebase_path_signoff(), "--signoff\n"); + + return 0; +} + static int walk_revs_populate_todo(struct todo_list *todo_list, struct replay_opts *opts) { @@ -2425,7 +2537,7 @@ static int rollback_is_safe(void) if (get_oid("HEAD", &actual_head)) oidclr(&actual_head); - return !oidcmp(&actual_head, &expected_head); + return oideq(&actual_head, &expected_head); } static int reset_for_rollback(const struct object_id *oid) @@ -2598,7 +2710,7 @@ static int make_patch(struct commit *commit, struct replay_opts *opts) strbuf_addf(&buf, "%s/patch", get_dir(opts)); memset(&log_tree_opt, 0, sizeof(log_tree_opt)); - init_revisions(&log_tree_opt, NULL); + repo_init_revisions(the_repository, &log_tree_opt, NULL); log_tree_opt.abbrev = 0; log_tree_opt.diff = 1; log_tree_opt.diffopt.output_format = DIFF_FORMAT_PATCH; @@ -2828,7 +2940,7 @@ static int do_reset(const char *name, int len, struct replay_opts *opts) struct tree_desc desc; struct tree *tree; struct unpack_trees_options unpack_tree_opts; - int ret = 0, i; + int ret = 0; if (hold_locked_index(&lock, LOCK_REPORT_ON_ERROR) < 0) return -1; @@ -2848,10 +2960,13 @@ static int do_reset(const char *name, int len, struct replay_opts *opts) } oidcpy(&oid, &opts->squash_onto); } else { + int i; + /* Determine the length of the label */ for (i = 0; i < len; i++) if (isspace(name[i])) - len = i; + break; + len = i; strbuf_addf(&ref_name, "refs/rewritten/%.*s", len, name); if (get_oid(ref_name.buf, &oid) && @@ -2986,7 +3101,7 @@ static int do_merge(struct commit *commit, const char *arg, int arg_len, } if (opts->have_squash_onto && - !oidcmp(&head_commit->object.oid, &opts->squash_onto)) { + oideq(&head_commit->object.oid, &opts->squash_onto)) { /* * When the user tells us to "merge" something into a * "[new root]", let's simply fast-forward to the merge head. @@ -3055,8 +3170,8 @@ static int do_merge(struct commit *commit, const char *arg, int arg_len, * commit, we cannot fast-forward. */ can_fast_forward = opts->allow_ff && commit && commit->parents && - !oidcmp(&commit->parents->item->object.oid, - &head_commit->object.oid); + oideq(&commit->parents->item->object.oid, + &head_commit->object.oid); /* * If any merge head is different from the original one, we cannot @@ -3066,7 +3181,7 @@ static int do_merge(struct commit *commit, const char *arg, int arg_len, struct commit_list *p = commit->parents->next; for (j = to_merge; j && p; j = j->next, p = p->next) - if (oidcmp(&j->item->object.oid, + if (!oideq(&j->item->object.oid, &p->item->object.oid)) { can_fast_forward = 0; break; @@ -3129,18 +3244,18 @@ static int do_merge(struct commit *commit, const char *arg, int arg_len, } merge_commit = to_merge->item; - write_message(oid_to_hex(&merge_commit->object.oid), GIT_SHA1_HEXSZ, - git_path_merge_head(the_repository), 0); - write_message("no-ff", 5, git_path_merge_mode(the_repository), 0); - bases = get_merge_bases(head_commit, merge_commit); - if (bases && !oidcmp(&merge_commit->object.oid, - &bases->item->object.oid)) { + if (bases && oideq(&merge_commit->object.oid, + &bases->item->object.oid)) { ret = 0; /* skip merging an ancestor of HEAD */ goto leave_merge; } + write_message(oid_to_hex(&merge_commit->object.oid), GIT_SHA1_HEXSZ, + git_path_merge_head(the_repository), 0); + write_message("no-ff", 5, git_path_merge_mode(the_repository), 0); + for (j = bases; j; j = j->next) commit_list_insert(j->item, &reversed); free_commit_list(bases); @@ -3178,7 +3293,7 @@ static int do_merge(struct commit *commit, const char *arg, int arg_len, rollback_lock_file(&lock); if (ret) - rerere(opts->allow_rerere_auto); + repo_rerere(the_repository, opts->allow_rerere_auto); else /* * In case of problems, we now want to return a positive @@ -3285,6 +3400,73 @@ static const char *reflog_message(struct replay_opts *opts, return buf.buf; } +static int run_git_checkout(struct replay_opts *opts, const char *commit, + const char *action) +{ + struct child_process cmd = CHILD_PROCESS_INIT; + + cmd.git_cmd = 1; + + argv_array_push(&cmd.args, "checkout"); + argv_array_push(&cmd.args, commit); + argv_array_pushf(&cmd.env_array, GIT_REFLOG_ACTION "=%s", action); + + if (opts->verbose) + return run_command(&cmd); + else + return run_command_silent_on_success(&cmd); +} + +int prepare_branch_to_be_rebased(struct replay_opts *opts, const char *commit) +{ + const char *action; + + if (commit && *commit) { + action = reflog_message(opts, "start", "checkout %s", commit); + if (run_git_checkout(opts, commit, action)) + return error(_("could not checkout %s"), commit); + } + + return 0; +} + +static int checkout_onto(struct replay_opts *opts, + const char *onto_name, const char *onto, + const char *orig_head) +{ + struct object_id oid; + const char *action = reflog_message(opts, "start", "checkout %s", onto_name); + + if (get_oid(orig_head, &oid)) + return error(_("%s: not a valid OID"), orig_head); + + if (run_git_checkout(opts, onto, action)) { + apply_autostash(opts); + sequencer_remove_state(opts); + return error(_("could not detach HEAD")); + } + + return update_ref(NULL, "ORIG_HEAD", &oid, NULL, 0, UPDATE_REFS_MSG_ON_ERR); +} + +static int stopped_at_head(void) +{ + struct object_id head; + struct commit *commit; + struct commit_message message; + + if (get_oid("HEAD", &head) || + !(commit = lookup_commit(the_repository, &head)) || + parse_commit(commit) || get_message(commit, &message)) + fprintf(stderr, _("Stopped at HEAD\n")); + else { + fprintf(stderr, _("Stopped at %s\n"), message.label); + free_message(commit, &message); + } + return 0; + +} + static const char rescheduled_advice[] = N_("Could not execute the todo command\n" "\n" @@ -3330,7 +3512,11 @@ static int pick_commits(struct todo_list *todo_list, struct replay_opts *opts) unlink(rebase_path_author_script()); unlink(rebase_path_stopped_sha()); unlink(rebase_path_amend()); + unlink(git_path_merge_head(the_repository)); delete_ref(NULL, "REBASE_HEAD", NULL, REF_NO_DEREF); + + if (item->command == TODO_BREAK) + return stopped_at_head(); } if (item->command <= TODO_SQUASH) { if (is_rebase_i(opts)) @@ -3381,9 +3567,9 @@ static int pick_commits(struct todo_list *todo_list, struct replay_opts *opts) */ if (item->command == TODO_REWORD && !get_oid("HEAD", &oid) && - (!oidcmp(&item->commit->object.oid, &oid) || + (oideq(&item->commit->object.oid, &oid) || (opts->have_squash_onto && - !oidcmp(&opts->squash_onto, &oid)))) + oideq(&opts->squash_onto, &oid)))) to_amend = 1; return res | error_with_patch(item->commit, @@ -3509,7 +3695,7 @@ cleanup_head_ref: struct object_id orig, head; memset(&log_tree_opt, 0, sizeof(log_tree_opt)); - init_revisions(&log_tree_opt, NULL); + repo_init_revisions(the_repository, &log_tree_opt, NULL); log_tree_opt.diff = 1; log_tree_opt.diffopt.output_format = DIFF_FORMAT_DIFFSTAT; @@ -3598,7 +3784,7 @@ static int commit_staged_changes(struct replay_opts *opts, if (get_oid_hex(rev.buf, &to_amend)) return error(_("invalid contents: '%s'"), rebase_path_amend()); - if (!is_clean && oidcmp(&head, &to_amend)) + if (!is_clean && !oideq(&head, &to_amend)) return error(_("\nYou have uncommitted changes in your " "working tree. Please, commit them\n" "first and then run 'git rebase " @@ -3612,7 +3798,7 @@ static int commit_staged_changes(struct replay_opts *opts, */ if (!is_clean || !opts->current_fixup_count) ; /* this is not the final fixup */ - else if (oidcmp(&head, &to_amend) || + else if (!oideq(&head, &to_amend) || !file_exists(rebase_path_stopped_sha())) { /* was a final fixup or squash done manually? */ if (!is_fixup(peek_command(todo_list, 0))) { @@ -3697,6 +3883,7 @@ static int commit_staged_changes(struct replay_opts *opts, opts, flags)) return error(_("could not commit staged changes.")); unlink(rebase_path_amend()); + unlink(git_path_merge_head(the_repository)); if (final_fixup) { unlink(rebase_path_fixup_msg()); unlink(rebase_path_squash_msg()); @@ -4253,7 +4440,7 @@ int sequencer_make_script(FILE *out, int argc, const char **argv, const char *insn = flags & TODO_LIST_ABBREVIATE_CMDS ? "p" : "pick"; int rebase_merges = flags & TODO_LIST_REBASE_MERGES; - init_revisions(&revs, NULL); + repo_init_revisions(the_repository, &revs, NULL); revs.verbose_header = 1; if (!rebase_merges) revs.max_parents = 1; @@ -4419,24 +4606,20 @@ int transform_todos(unsigned flags) return i; } -enum check_level { - CHECK_IGNORE = 0, CHECK_WARN, CHECK_ERROR -}; - -static enum check_level get_missing_commit_check_level(void) +enum missing_commit_check_level get_missing_commit_check_level(void) { const char *value; if (git_config_get_value("rebase.missingcommitscheck", &value) || !strcasecmp("ignore", value)) - return CHECK_IGNORE; + return MISSING_COMMIT_CHECK_IGNORE; if (!strcasecmp("warn", value)) - return CHECK_WARN; + return MISSING_COMMIT_CHECK_WARN; if (!strcasecmp("error", value)) - return CHECK_ERROR; + return MISSING_COMMIT_CHECK_ERROR; warning(_("unrecognized setting %s for option " "rebase.missingCommitsCheck. Ignoring."), value); - return CHECK_IGNORE; + return MISSING_COMMIT_CHECK_IGNORE; } define_commit_slab(commit_seen, unsigned char); @@ -4448,7 +4631,7 @@ define_commit_slab(commit_seen, unsigned char); */ int check_todo_list(void) { - enum check_level check_level = get_missing_commit_check_level(); + enum missing_commit_check_level check_level = get_missing_commit_check_level(); struct strbuf todo_file = STRBUF_INIT; struct todo_list todo_list = TODO_LIST_INIT; struct strbuf missing = STRBUF_INIT; @@ -4465,7 +4648,7 @@ int check_todo_list(void) advise_to_edit_todo = res = parse_insn_buffer(todo_list.buf.buf, &todo_list); - if (res || check_level == CHECK_IGNORE) + if (res || check_level == MISSING_COMMIT_CHECK_IGNORE) goto leave_check; /* Mark the commits in git-rebase-todo as seen */ @@ -4500,7 +4683,7 @@ int check_todo_list(void) if (!missing.len) goto leave_check; - if (check_level == CHECK_ERROR) + if (check_level == MISSING_COMMIT_CHECK_ERROR) advise_to_edit_todo = res = 1; fprintf(stderr, @@ -4546,17 +4729,17 @@ static int rewrite_file(const char *path, const char *buf, size_t len) } /* skip picking commits whose parents are unchanged */ -int skip_unnecessary_picks(void) +static int skip_unnecessary_picks(struct object_id *output_oid) { const char *todo_file = rebase_path_todo(); struct strbuf buf = STRBUF_INIT; struct todo_list todo_list = TODO_LIST_INIT; - struct object_id onto_oid, *oid = &onto_oid, *parent_oid; + struct object_id *parent_oid; int fd, i; if (!read_oneliner(&buf, rebase_path_onto(), 0)) return error(_("could not read 'onto'")); - if (get_oid(buf.buf, &onto_oid)) { + if (get_oid(buf.buf, output_oid)) { strbuf_release(&buf); return error(_("need a HEAD to fixup")); } @@ -4586,9 +4769,9 @@ int skip_unnecessary_picks(void) if (item->commit->parents->next) break; /* merge commit */ parent_oid = &item->commit->parents->item->object.oid; - if (hashcmp(parent_oid->hash, oid->hash)) + if (!oideq(parent_oid, output_oid)) break; - oid = &item->commit->object.oid; + oidcpy(output_oid, &item->commit->object.oid); } if (i > 0) { int offset = get_item_line_offset(&todo_list, i); @@ -4617,15 +4800,114 @@ int skip_unnecessary_picks(void) todo_list.current = i; if (is_fixup(peek_command(&todo_list, 0))) - record_in_rewritten(oid, peek_command(&todo_list, 0)); + record_in_rewritten(output_oid, peek_command(&todo_list, 0)); } todo_list_release(&todo_list); - printf("%s\n", oid_to_hex(oid)); return 0; } +int complete_action(struct replay_opts *opts, unsigned flags, + const char *shortrevisions, const char *onto_name, + const char *onto, const char *orig_head, const char *cmd, + unsigned autosquash) +{ + const char *shortonto, *todo_file = rebase_path_todo(); + struct todo_list todo_list = TODO_LIST_INIT; + struct strbuf *buf = &(todo_list.buf); + struct object_id oid; + struct stat st; + + get_oid(onto, &oid); + shortonto = find_unique_abbrev(&oid, DEFAULT_ABBREV); + + if (!lstat(todo_file, &st) && st.st_size == 0 && + write_message("noop\n", 5, todo_file, 0)) + return -1; + + if (autosquash && rearrange_squash()) + return -1; + + if (cmd && *cmd) + sequencer_add_exec_commands(cmd); + + if (strbuf_read_file(buf, todo_file, 0) < 0) + return error_errno(_("could not read '%s'."), todo_file); + + if (parse_insn_buffer(buf->buf, &todo_list)) { + todo_list_release(&todo_list); + return error(_("unusable todo list: '%s'"), todo_file); + } + + if (count_commands(&todo_list) == 0) { + apply_autostash(opts); + sequencer_remove_state(opts); + todo_list_release(&todo_list); + + return error(_("nothing to do")); + } + + strbuf_addch(buf, '\n'); + strbuf_commented_addf(buf, Q_("Rebase %s onto %s (%d command)", + "Rebase %s onto %s (%d commands)", + count_commands(&todo_list)), + shortrevisions, shortonto, count_commands(&todo_list)); + append_todo_help(0, flags & TODO_LIST_KEEP_EMPTY, buf); + + if (write_message(buf->buf, buf->len, todo_file, 0)) { + todo_list_release(&todo_list); + return -1; + } + + if (copy_file(rebase_path_todo_backup(), todo_file, 0666)) + return error(_("could not copy '%s' to '%s'."), todo_file, + rebase_path_todo_backup()); + + if (transform_todos(flags | TODO_LIST_SHORTEN_IDS)) + return error(_("could not transform the todo list")); + + strbuf_reset(buf); + + if (launch_sequence_editor(todo_file, buf, NULL)) { + apply_autostash(opts); + sequencer_remove_state(opts); + todo_list_release(&todo_list); + + return -1; + } + + strbuf_stripspace(buf, 1); + if (buf->len == 0) { + apply_autostash(opts); + sequencer_remove_state(opts); + todo_list_release(&todo_list); + + return error(_("nothing to do")); + } + + todo_list_release(&todo_list); + + if (check_todo_list()) { + checkout_onto(opts, onto_name, onto, orig_head); + return -1; + } + + if (transform_todos(flags & ~(TODO_LIST_SHORTEN_IDS))) + return error(_("could not transform the todo list")); + + if (opts->allow_ff && skip_unnecessary_picks(&oid)) + return error(_("could not skip unnecessary pick commands")); + + if (checkout_onto(opts, onto_name, oid_to_hex(&oid), orig_head)) + return -1; + + if (require_clean_work_tree("rebase", "", 1, 1)) + return -1; + + return sequencer_continue(opts); +} + struct subject2item_entry { struct hashmap_entry entry; int i; diff --git a/sequencer.h b/sequencer.h index c986bc8251..5071a73563 100644 --- a/sequencer.h +++ b/sequencer.h @@ -8,6 +8,7 @@ struct commit; const char *git_path_commit_editmsg(void); const char *git_path_seq_dir(void); +const char *rebase_path_todo(void); #define APPEND_SIGNOFF_DEDUP (1u << 0) @@ -62,6 +63,15 @@ struct replay_opts { }; #define REPLAY_OPTS_INIT { .action = -1, .current_fixups = STRBUF_INIT } +enum missing_commit_check_level { + MISSING_COMMIT_CHECK_IGNORE = 0, + MISSING_COMMIT_CHECK_WARN, + MISSING_COMMIT_CHECK_ERROR +}; + +int write_message(const void *buf, size_t len, const char *filename, + int append_eol); + /* Call this to setup defaults before parsing command line options */ void sequencer_init_config(struct replay_opts *opts); int sequencer_pick_revisions(struct replay_opts *opts); @@ -84,8 +94,12 @@ int sequencer_make_script(FILE *out, int argc, const char **argv, int sequencer_add_exec_commands(const char *command); int transform_todos(unsigned flags); +enum missing_commit_check_level get_missing_commit_check_level(void); int check_todo_list(void); -int skip_unnecessary_picks(void); +int complete_action(struct replay_opts *opts, unsigned flags, + const char *shortrevisions, const char *onto_name, + const char *onto, const char *orig_head, const char *cmd, + unsigned autosquash); int rearrange_squash(void); extern const char sign_off_header[]; @@ -110,8 +124,17 @@ int update_head_with_reflog(const struct commit *old_head, void commit_post_rewrite(const struct commit *current_head, const struct object_id *new_head); +int prepare_branch_to_be_rebased(struct replay_opts *opts, const char *commit); + #define SUMMARY_INITIAL_COMMIT (1 << 0) #define SUMMARY_SHOW_AUTHOR_DATE (1 << 1) void print_commit_summary(const char *prefix, const struct object_id *oid, unsigned int flags); + +int read_author_script(const char *path, char **name, char **email, char **date, + int allow_missing); #endif + +void parse_strategy_opts(struct replay_opts *opts, char *raw_opts); +int write_basic_state(struct replay_opts *opts, const char *head_name, + const char *onto, const char *orig_head); diff --git a/server-info.c b/server-info.c index 41050c2449..e2b2d6a27a 100644 --- a/server-info.c +++ b/server-info.c @@ -199,7 +199,7 @@ static void init_pack_info(const char *infofile, int force) objdir = get_object_directory(); objdirlen = strlen(objdir); - for (p = get_packed_git(the_repository); p; p = p->next) { + for (p = get_all_packs(the_repository); p; p = p->next) { /* we ignore things on alternate path since they are * not available to the pullers in general. */ @@ -209,7 +209,7 @@ static void init_pack_info(const char *infofile, int force) } num_pack = i; info = xcalloc(num_pack, sizeof(struct pack_info *)); - for (i = 0, p = get_packed_git(the_repository); p; p = p->next) { + for (i = 0, p = get_all_packs(the_repository); p; p = p->next) { if (!p->pack_local) continue; info[i] = xcalloc(1, sizeof(struct pack_info)); @@ -402,6 +402,20 @@ void setup_work_tree(void) initialized = 1; } +static int read_worktree_config(const char *var, const char *value, void *vdata) +{ + struct repository_format *data = vdata; + + if (strcmp(var, "core.bare") == 0) { + data->is_bare = git_config_bool(var, value); + } else if (strcmp(var, "core.worktree") == 0) { + if (!value) + return config_error_nonbool(var); + data->work_tree = xstrdup(value); + } + return 0; +} + static int check_repo_format(const char *var, const char *value, void *vdata) { struct repository_format *data = vdata; @@ -423,16 +437,13 @@ static int check_repo_format(const char *var, const char *value, void *vdata) if (!value) return config_error_nonbool(var); data->partial_clone = xstrdup(value); - } else + } else if (!strcmp(ext, "worktreeconfig")) + data->worktree_config = git_config_bool(var, value); + else string_list_append(&data->unknown_extensions, ext); - } else if (strcmp(var, "core.bare") == 0) { - data->is_bare = git_config_bool(var, value); - } else if (strcmp(var, "core.worktree") == 0) { - if (!value) - return config_error_nonbool(var); - data->work_tree = xstrdup(value); } - return 0; + + return read_worktree_config(var, value, vdata); } static int check_repository_format_gently(const char *gitdir, struct repository_format *candidate, int *nongit_ok) @@ -466,7 +477,20 @@ static int check_repository_format_gently(const char *gitdir, struct repository_ repository_format_precious_objects = candidate->precious_objects; repository_format_partial_clone = candidate->partial_clone; + repository_format_worktree_config = candidate->worktree_config; string_list_clear(&candidate->unknown_extensions, 0); + + if (repository_format_worktree_config) { + /* + * pick up core.bare and core.worktree from per-worktree + * config if present + */ + strbuf_addf(&sb, "%s/config.worktree", gitdir); + git_config_from_file(read_worktree_config, sb.buf, candidate); + strbuf_release(&sb); + has_common = 0; + } + if (!has_common) { if (candidate->is_bare != -1) { is_bare_repository_cfg = candidate->is_bare; diff --git a/sha1-array.c b/sha1-array.c index 265941fbf4..b94e0ec0f5 100644 --- a/sha1-array.c +++ b/sha1-array.c @@ -69,7 +69,7 @@ int oid_array_for_each_unique(struct oid_array *array, for (i = 0; i < array->nr; i++) { int ret; - if (i > 0 && !oidcmp(array->oid + i, array->oid + i - 1)) + if (i > 0 && oideq(array->oid + i, array->oid + i - 1)) continue; ret = fn(array->oid + i, data); if (ret) diff --git a/sha1-file.c b/sha1-file.c index 03b86aec71..5bd11c85bc 100644 --- a/sha1-file.c +++ b/sha1-file.c @@ -149,10 +149,10 @@ static struct cached_object *find_cached_object(const struct object_id *oid) struct cached_object *co = cached_objects; for (i = 0; i < cached_object_nr; i++, co++) { - if (!oidcmp(&co->oid, oid)) + if (oideq(&co->oid, oid)) return co; } - if (!oidcmp(oid, the_hash_algo->empty_tree)) + if (oideq(oid, the_hash_algo->empty_tree)) return &empty_tree; return NULL; } @@ -825,7 +825,7 @@ int check_object_signature(const struct object_id *oid, void *map, if (map) { hash_object_file(map, size, type, &real_oid); - return oidcmp(oid, &real_oid) ? -1 : 0; + return !oideq(oid, &real_oid) ? -1 : 0; } st = open_istream(oid, &obj_type, &size, NULL); @@ -833,7 +833,7 @@ int check_object_signature(const struct object_id *oid, void *map, return -1; /* Generate the header */ - hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(obj_type), size) + 1; + hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %"PRIuMAX , type_name(obj_type), (uintmax_t)size) + 1; /* Sha1.. */ the_hash_algo->init_fn(&c); @@ -852,7 +852,7 @@ int check_object_signature(const struct object_id *oid, void *map, } the_hash_algo->final_fn(real_oid.hash, &c); close_istream(st); - return oidcmp(oid, &real_oid) ? -1 : 0; + return !oideq(oid, &real_oid) ? -1 : 0; } int git_open_cloexec(const char *name, int flags) @@ -1492,7 +1492,7 @@ static void write_object_file_prepare(const void *buf, unsigned long len, git_hash_ctx c; /* Generate the header */ - *hdrlen = xsnprintf(hdr, *hdrlen, "%s %lu", type, len)+1; + *hdrlen = xsnprintf(hdr, *hdrlen, "%s %"PRIuMAX , type, (uintmax_t)len)+1; /* Sha1.. */ the_hash_algo->init_fn(&c); @@ -1671,7 +1671,7 @@ static int write_loose_object(const struct object_id *oid, char *hdr, die(_("deflateEnd on object %s failed (%d)"), oid_to_hex(oid), ret); the_hash_algo->final_fn(parano_oid.hash, &c); - if (oidcmp(oid, ¶no_oid) != 0) + if (!oideq(oid, ¶no_oid)) die(_("confused by unstable object source data for %s"), oid_to_hex(oid)); @@ -1758,7 +1758,7 @@ int force_object_loose(const struct object_id *oid, time_t mtime) buf = read_object(oid->hash, &type, &len); if (!buf) return error(_("cannot read sha1_file for %s"), oid_to_hex(oid)); - hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(type), len) + 1; + hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %"PRIuMAX , type_name(type), (uintmax_t)len) + 1; ret = write_loose_object(oid, hdr, hdrlen, buf, len, mtime); free(buf); @@ -1813,7 +1813,8 @@ static void check_tag(const void *buf, size_t size) die(_("corrupt tag")); } -static int index_mem(struct object_id *oid, void *buf, size_t size, +static int index_mem(struct index_state *istate, + struct object_id *oid, void *buf, size_t size, enum object_type type, const char *path, unsigned flags) { @@ -1828,7 +1829,7 @@ static int index_mem(struct object_id *oid, void *buf, size_t size, */ if ((type == OBJ_BLOB) && path) { struct strbuf nbuf = STRBUF_INIT; - if (convert_to_git(&the_index, path, buf, size, &nbuf, + if (convert_to_git(istate, path, buf, size, &nbuf, get_conv_flags(flags))) { buf = strbuf_detach(&nbuf, &size); re_allocated = 1; @@ -1852,17 +1853,20 @@ static int index_mem(struct object_id *oid, void *buf, size_t size, return ret; } -static int index_stream_convert_blob(struct object_id *oid, int fd, - const char *path, unsigned flags) +static int index_stream_convert_blob(struct index_state *istate, + struct object_id *oid, + int fd, + const char *path, + unsigned flags) { int ret; const int write_object = flags & HASH_WRITE_OBJECT; struct strbuf sbuf = STRBUF_INIT; assert(path); - assert(would_convert_to_git_filter_fd(&the_index, path)); + assert(would_convert_to_git_filter_fd(istate, path)); - convert_to_git_filter_fd(&the_index, path, fd, &sbuf, + convert_to_git_filter_fd(istate, path, fd, &sbuf, get_conv_flags(flags)); if (write_object) @@ -1875,14 +1879,15 @@ static int index_stream_convert_blob(struct object_id *oid, int fd, return ret; } -static int index_pipe(struct object_id *oid, int fd, enum object_type type, +static int index_pipe(struct index_state *istate, struct object_id *oid, + int fd, enum object_type type, const char *path, unsigned flags) { struct strbuf sbuf = STRBUF_INIT; int ret; if (strbuf_read(&sbuf, fd, 4096) >= 0) - ret = index_mem(oid, sbuf.buf, sbuf.len, type, path, flags); + ret = index_mem(istate, oid, sbuf.buf, sbuf.len, type, path, flags); else ret = -1; strbuf_release(&sbuf); @@ -1891,14 +1896,15 @@ static int index_pipe(struct object_id *oid, int fd, enum object_type type, #define SMALL_FILE_SIZE (32*1024) -static int index_core(struct object_id *oid, int fd, size_t size, +static int index_core(struct index_state *istate, + struct object_id *oid, int fd, size_t size, enum object_type type, const char *path, unsigned flags) { int ret; if (!size) { - ret = index_mem(oid, "", size, type, path, flags); + ret = index_mem(istate, oid, "", size, type, path, flags); } else if (size <= SMALL_FILE_SIZE) { char *buf = xmalloc(size); ssize_t read_result = read_in_full(fd, buf, size); @@ -1909,11 +1915,11 @@ static int index_core(struct object_id *oid, int fd, size_t size, ret = error(_("short read while indexing %s"), path ? path : "<unknown>"); else - ret = index_mem(oid, buf, size, type, path, flags); + ret = index_mem(istate, oid, buf, size, type, path, flags); free(buf); } else { void *buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0); - ret = index_mem(oid, buf, size, type, path, flags); + ret = index_mem(istate, oid, buf, size, type, path, flags); munmap(buf, size); } return ret; @@ -1941,7 +1947,8 @@ static int index_stream(struct object_id *oid, int fd, size_t size, return index_bulk_checkin(oid, fd, size, type, path, flags); } -int index_fd(struct object_id *oid, int fd, struct stat *st, +int index_fd(struct index_state *istate, struct object_id *oid, + int fd, struct stat *st, enum object_type type, const char *path, unsigned flags) { int ret; @@ -1950,14 +1957,14 @@ int index_fd(struct object_id *oid, int fd, struct stat *st, * Call xsize_t() only when needed to avoid potentially unnecessary * die() for large files. */ - if (type == OBJ_BLOB && path && would_convert_to_git_filter_fd(&the_index, path)) - ret = index_stream_convert_blob(oid, fd, path, flags); + if (type == OBJ_BLOB && path && would_convert_to_git_filter_fd(istate, path)) + ret = index_stream_convert_blob(istate, oid, fd, path, flags); else if (!S_ISREG(st->st_mode)) - ret = index_pipe(oid, fd, type, path, flags); + ret = index_pipe(istate, oid, fd, type, path, flags); else if (st->st_size <= big_file_threshold || type != OBJ_BLOB || - (path && would_convert_to_git(&the_index, path))) - ret = index_core(oid, fd, xsize_t(st->st_size), type, path, - flags); + (path && would_convert_to_git(istate, path))) + ret = index_core(istate, oid, fd, xsize_t(st->st_size), + type, path, flags); else ret = index_stream(oid, fd, xsize_t(st->st_size), type, path, flags); @@ -1965,7 +1972,8 @@ int index_fd(struct object_id *oid, int fd, struct stat *st, return ret; } -int index_path(struct object_id *oid, const char *path, struct stat *st, unsigned flags) +int index_path(struct index_state *istate, struct object_id *oid, + const char *path, struct stat *st, unsigned flags) { int fd; struct strbuf sb = STRBUF_INIT; @@ -1976,7 +1984,7 @@ int index_path(struct object_id *oid, const char *path, struct stat *st, unsigne fd = open(path, O_RDONLY); if (fd < 0) return error_errno("open(\"%s\")", path); - if (index_fd(oid, fd, st, OBJ_BLOB, path, flags) < 0) + if (index_fd(istate, oid, fd, st, OBJ_BLOB, path, flags) < 0) return error(_("%s: failed to insert into database"), path); break; @@ -2214,7 +2222,7 @@ static int check_stream_sha1(git_zstream *stream, } the_hash_algo->final_fn(real_sha1, &c); - if (hashcmp(expected_sha1, real_sha1)) { + if (!hasheq(expected_sha1, real_sha1)) { error(_("sha1 mismatch for %s (expected %s)"), path, sha1_to_hex(expected_sha1)); return -1; diff --git a/sha1-name.c b/sha1-name.c index c9cc1318b7..faa60f69e3 100644 --- a/sha1-name.c +++ b/sha1-name.c @@ -12,6 +12,8 @@ #include "packfile.h" #include "object-store.h" #include "repository.h" +#include "midx.h" +#include "commit-reach.h" static int get_oid_oneline(const char *, struct object_id *, struct commit_list *); @@ -44,7 +46,7 @@ static void update_candidates(struct disambiguate_state *ds, const struct object oidcpy(&ds->candidate, current); ds->candidate_exists = 1; return; - } else if (!oidcmp(&ds->candidate, current)) { + } else if (oideq(&ds->candidate, current)) { /* the same as what we already have seen */ return; } @@ -149,6 +151,32 @@ static int match_sha(unsigned len, const unsigned char *a, const unsigned char * return 1; } +static void unique_in_midx(struct multi_pack_index *m, + struct disambiguate_state *ds) +{ + uint32_t num, i, first = 0; + const struct object_id *current = NULL; + num = m->num_objects; + + if (!num) + return; + + bsearch_midx(&ds->bin_pfx, m, &first); + + /* + * At this point, "first" is the location of the lowest object + * with an object name that could match "bin_pfx". See if we have + * 0, 1 or more objects that actually match(es). + */ + for (i = first; i < num && !ds->ambiguous; i++) { + struct object_id oid; + current = nth_midxed_object_oid(&oid, m, i); + if (!match_sha(ds->len, ds->bin_pfx.hash, current->hash)) + break; + update_candidates(ds, current); + } +} + static void unique_in_pack(struct packed_git *p, struct disambiguate_state *ds) { @@ -177,8 +205,12 @@ static void unique_in_pack(struct packed_git *p, static void find_short_packed_object(struct disambiguate_state *ds) { + struct multi_pack_index *m; struct packed_git *p; + for (m = get_multi_pack_index(the_repository); m && !ds->ambiguous; + m = m->next) + unique_in_midx(m, ds); for (p = get_packed_git(the_repository); p && !ds->ambiguous; p = p->next) unique_in_pack(p, ds); @@ -529,6 +561,42 @@ static int extend_abbrev_len(const struct object_id *oid, void *cb_data) return 0; } +static void find_abbrev_len_for_midx(struct multi_pack_index *m, + struct min_abbrev_data *mad) +{ + int match = 0; + uint32_t num, first = 0; + struct object_id oid; + const struct object_id *mad_oid; + + if (!m->num_objects) + return; + + num = m->num_objects; + mad_oid = mad->oid; + match = bsearch_midx(mad_oid, m, &first); + + /* + * first is now the position in the packfile where we would insert + * mad->hash if it does not exist (or the position of mad->hash if + * it does exist). Hence, we consider a maximum of two objects + * nearby for the abbreviation length. + */ + mad->init_len = 0; + if (!match) { + if (nth_midxed_object_oid(&oid, m, first)) + extend_abbrev_len(&oid, mad); + } else if (first < num - 1) { + if (nth_midxed_object_oid(&oid, m, first + 1)) + extend_abbrev_len(&oid, mad); + } + if (first > 0) { + if (nth_midxed_object_oid(&oid, m, first - 1)) + extend_abbrev_len(&oid, mad); + } + mad->init_len = mad->cur_len; +} + static void find_abbrev_len_for_pack(struct packed_git *p, struct min_abbrev_data *mad) { @@ -567,8 +635,11 @@ static void find_abbrev_len_for_pack(struct packed_git *p, static void find_abbrev_len_packed(struct min_abbrev_data *mad) { + struct multi_pack_index *m; struct packed_git *p; + for (m = get_multi_pack_index(the_repository); m; m = m->next) + find_abbrev_len_for_midx(m, mad); for (p = get_packed_git(the_repository); p; p = p->next) find_abbrev_len_for_pack(p, mad); } @@ -16,6 +16,7 @@ #include "list-objects.h" #include "commit-slab.h" #include "repository.h" +#include "commit-reach.h" void set_alternate_shallow_file(struct repository *r, const char *path, int override) { @@ -184,7 +185,7 @@ struct commit_list *get_shallow_commits_by_rev_list(int ac, const char **av, is_repository_shallow(the_repository); /* make sure shallows are read */ - init_revisions(&revs, NULL); + repo_init_revisions(the_repository, &revs, NULL); save_commit_buffer = 0; setup_revisions(ac, av, &revs, NULL); @@ -120,6 +120,15 @@ void strbuf_trim_trailing_dir_sep(struct strbuf *sb) sb->buf[sb->len] = '\0'; } +void strbuf_trim_trailing_newline(struct strbuf *sb) +{ + if (sb->len > 0 && sb->buf[sb->len - 1] == '\n') { + if (--sb->len > 0 && sb->buf[sb->len - 1] == '\r') + --sb->len; + sb->buf[sb->len] = '\0'; + } +} + void strbuf_ltrim(struct strbuf *sb) { char *b = sb->buf; @@ -190,6 +190,9 @@ void strbuf_ltrim(struct strbuf *sb); /* Strip trailing directory separators */ void strbuf_trim_trailing_dir_sep(struct strbuf *sb); +/* Strip trailing LF or CR/LF */ +void strbuf_trim_trailing_newline(struct strbuf *sb); + /** * Replace the contents of the strbuf with a reencoded form. Returns -1 * on error, 0 on success. @@ -582,10 +585,12 @@ void strbuf_add_unique_abbrev(struct strbuf *sb, * run in. If the buffer is NULL the editor is launched as usual but the * file's contents are not read into the buffer upon completion. */ -int launch_editor(const char *path, - struct strbuf *buffer, +int launch_editor(const char *path, struct strbuf *buffer, const char *const *env); +int launch_sequence_editor(const char *path, struct strbuf *buffer, + const char *const *env); + void strbuf_add_lines(struct strbuf *sb, const char *prefix, const char *buf, diff --git a/streaming.c b/streaming.c index d1e6b2dce6..ac7c7a22f9 100644 --- a/streaming.c +++ b/streaming.c @@ -408,6 +408,15 @@ static read_method_decl(pack_non_delta) st->z_state = z_done; break; } + + /* + * Unlike the loose object case, we do not have to worry here + * about running out of input bytes and spinning infinitely. If + * we get Z_BUF_ERROR due to too few input bytes, then we'll + * replenish them in the next use_pack() call when we loop. If + * we truly hit the end of the pack (i.e., because it's corrupt + * or truncated), then use_pack() catches that and will die(). + */ if (status != Z_OK && status != Z_BUF_ERROR) { git_inflate_end(&st->z); st->z_state = z_error; diff --git a/submodule-config.c b/submodule-config.c index 0b2f1647ab..52702c62d9 100644 --- a/submodule-config.c +++ b/submodule-config.c @@ -1,4 +1,5 @@ #include "cache.h" +#include "dir.h" #include "repository.h" #include "config.h" #include "submodule-config.h" @@ -45,7 +46,7 @@ static int config_path_cmp(const void *unused_cmp_data, const struct submodule_entry *b = entry_or_key; return strcmp(a->config->path, b->config->path) || - oidcmp(&a->config->gitmodules_oid, &b->config->gitmodules_oid); + !oideq(&a->config->gitmodules_oid, &b->config->gitmodules_oid); } static int config_name_cmp(const void *unused_cmp_data, @@ -57,7 +58,7 @@ static int config_name_cmp(const void *unused_cmp_data, const struct submodule_entry *b = entry_or_key; return strcmp(a->config->name, b->config->name) || - oidcmp(&a->config->gitmodules_oid, &b->config->gitmodules_oid); + !oideq(&a->config->gitmodules_oid, &b->config->gitmodules_oid); } static struct submodule_cache *submodule_cache_alloc(void) @@ -613,8 +614,34 @@ static void submodule_cache_check_init(struct repository *repo) static void config_from_gitmodules(config_fn_t fn, struct repository *repo, void *data) { if (repo->worktree) { - char *file = repo_worktree_path(repo, GITMODULES_FILE); - git_config_from_file(fn, file, data); + struct git_config_source config_source = { 0 }; + const struct config_options opts = { 0 }; + struct object_id oid; + char *file; + + file = repo_worktree_path(repo, GITMODULES_FILE); + if (file_exists(file)) { + config_source.file = file; + } else if (repo->submodule_prefix) { + /* + * When get_oid and config_with_options, used below, + * become able to work on a specific repository, this + * warning branch can be removed. + */ + warning("nested submodules without %s in the working tree are not supported yet", + GITMODULES_FILE); + goto out; + } else if (get_oid(GITMODULES_INDEX, &oid) >= 0) { + config_source.blob = GITMODULES_INDEX; + } else if (get_oid(GITMODULES_HEAD, &oid) >= 0) { + config_source.blob = GITMODULES_HEAD; + } else { + goto out; + } + + config_with_options(fn, data, &config_source, &opts); + +out: free(file); } } @@ -692,6 +719,43 @@ void submodule_free(struct repository *r) submodule_cache_clear(r->submodule_cache); } +static int config_print_callback(const char *var, const char *value, void *cb_data) +{ + char *wanted_key = cb_data; + + if (!strcmp(wanted_key, var)) + printf("%s\n", value); + + return 0; +} + +int print_config_from_gitmodules(struct repository *repo, const char *key) +{ + int ret; + char *store_key; + + ret = git_config_parse_key(key, &store_key, NULL); + if (ret < 0) + return CONFIG_INVALID_KEY; + + config_from_gitmodules(config_print_callback, repo, store_key); + + free(store_key); + return 0; +} + +int config_set_in_gitmodules_file_gently(const char *key, const char *value) +{ + int ret; + + ret = git_config_set_in_file_gently(GITMODULES_FILE, key, value); + if (ret < 0) + /* Maybe the user already did that, don't error out here */ + warning(_("Could not update .gitmodules entry %s"), key); + + return ret; +} + struct fetch_config { int *max_children; int *recurse_submodules; diff --git a/submodule-config.h b/submodule-config.h index dc7278eea4..4dc9b0771c 100644 --- a/submodule-config.h +++ b/submodule-config.h @@ -48,6 +48,8 @@ const struct submodule *submodule_from_path(struct repository *r, const struct object_id *commit_or_tree, const char *path); void submodule_free(struct repository *r); +int print_config_from_gitmodules(struct repository *repo, const char *key); +int config_set_in_gitmodules_file_gently(const char *key, const char *value); /* * Returns 0 if the name is syntactically acceptable as a submodule "name" diff --git a/submodule.c b/submodule.c index f27f14e868..6415cc5580 100644 --- a/submodule.c +++ b/submodule.c @@ -22,6 +22,7 @@ #include "worktree.h" #include "parse-options.h" #include "object-store.h" +#include "commit-reach.h" static int config_update_recurse_submodules = RECURSE_SUBMODULES_OFF; static struct string_list changed_submodule_names = STRING_LIST_INIT_DUP; @@ -51,6 +52,24 @@ int is_gitmodules_unmerged(const struct index_state *istate) } /* + * Check if the .gitmodules file is safe to write. + * + * Writing to the .gitmodules file requires that the file exists in the + * working tree or, if it doesn't, that a brand new .gitmodules file is going + * to be created (i.e. it's neither in the index nor in the current branch). + * + * It is not safe to write to .gitmodules if it's not in the working tree but + * it is in the index or in the current branch, because writing new values + * (and staging them) would blindly overwrite ALL the old content. + */ +int is_writing_gitmodules_ok(void) +{ + struct object_id oid; + return file_exists(GITMODULES_FILE) || + (get_oid(GITMODULES_INDEX, &oid) < 0 && get_oid(GITMODULES_HEAD, &oid) < 0); +} + +/* * Check if the .gitmodules file has unstaged modifications. This must be * checked before allowing modifications to the .gitmodules file with the * intention to stage them later, because when continuing we would stage the @@ -88,6 +107,7 @@ int update_path_in_gitmodules(const char *oldpath, const char *newpath) { struct strbuf entry = STRBUF_INIT; const struct submodule *submodule; + int ret; if (!file_exists(GITMODULES_FILE)) /* Do nothing without .gitmodules */ return -1; @@ -103,14 +123,9 @@ int update_path_in_gitmodules(const char *oldpath, const char *newpath) strbuf_addstr(&entry, "submodule."); strbuf_addstr(&entry, submodule->name); strbuf_addstr(&entry, ".path"); - if (git_config_set_in_file_gently(GITMODULES_FILE, entry.buf, newpath) < 0) { - /* Maybe the user already did that, don't error out here */ - warning(_("Could not update .gitmodules entry %s"), entry.buf); - strbuf_release(&entry); - return -1; - } + ret = config_set_in_gitmodules_file_gently(entry.buf, newpath); strbuf_release(&entry); - return 0; + return ret; } /* @@ -427,7 +442,7 @@ static int prepare_submodule_summary(struct rev_info *rev, const char *path, { struct commit_list *list; - init_revisions(rev, NULL); + repo_init_revisions(the_repository, rev, NULL); setup_revisions(0, NULL, rev, NULL); rev->left_right = 1; rev->first_parent_only = 1; @@ -535,7 +550,7 @@ static void show_submodule_header(struct diff_options *o, const char *path, fast_backward = 1; } - if (!oidcmp(one, two)) { + if (oideq(one, two)) { strbuf_release(&sb); return; } @@ -693,6 +708,7 @@ static struct oid_array *submodule_commits(struct string_list *submodules, } struct collect_changed_submodules_cb_data { + struct repository *repo; struct string_list *changed; const struct object_id *commit_oid; }; @@ -732,7 +748,7 @@ static void collect_changed_submodules_cb(struct diff_queue_struct *q, if (!S_ISGITLINK(p->two->mode)) continue; - submodule = submodule_from_path(the_repository, + submodule = submodule_from_path(me->repo, commit_oid, p->two->path); if (submodule) name = submodule->name; @@ -740,7 +756,7 @@ static void collect_changed_submodules_cb(struct diff_queue_struct *q, name = default_name_or_path(p->two->path); /* make sure name does not collide with existing one */ if (name) - submodule = submodule_from_name(the_repository, + submodule = submodule_from_name(me->repo, commit_oid, name); if (submodule) { warning("Submodule in commit %s at path: " @@ -765,13 +781,14 @@ static void collect_changed_submodules_cb(struct diff_queue_struct *q, * have a corresponding 'struct oid_array' (in the 'util' field) which lists * what the submodule pointers were updated to during the change. */ -static void collect_changed_submodules(struct string_list *changed, +static void collect_changed_submodules(struct repository *r, + struct string_list *changed, struct argv_array *argv) { struct rev_info rev; const struct commit *commit; - init_revisions(&rev, NULL); + repo_init_revisions(r, &rev, NULL); setup_revisions(argv->argc, argv->argv, &rev, NULL); if (prepare_revision_walk(&rev)) die("revision walk setup failed"); @@ -779,10 +796,11 @@ static void collect_changed_submodules(struct string_list *changed, while ((commit = get_revision(&rev))) { struct rev_info diff_rev; struct collect_changed_submodules_cb_data data; + data.repo = r; data.changed = changed; data.commit_oid = &commit->object.oid; - init_revisions(&diff_rev, NULL); + repo_init_revisions(r, &diff_rev, NULL); diff_rev.diffopt.output_format |= DIFF_FORMAT_CALLBACK; diff_rev.diffopt.format_callback = collect_changed_submodules_cb; diff_rev.diffopt.format_callback_data = &data; @@ -814,6 +832,7 @@ static int append_oid_to_argv(const struct object_id *oid, void *data) } struct has_commit_data { + struct repository *repo; int result; const char *path; }; @@ -822,7 +841,7 @@ static int check_has_commit(const struct object_id *oid, void *data) { struct has_commit_data *cb = data; - enum object_type type = oid_object_info(the_repository, oid, NULL); + enum object_type type = oid_object_info(cb->repo, oid, NULL); switch (type) { case OBJ_COMMIT: @@ -840,9 +859,11 @@ static int check_has_commit(const struct object_id *oid, void *data) } } -static int submodule_has_commits(const char *path, struct oid_array *commits) +static int submodule_has_commits(struct repository *r, + const char *path, + struct oid_array *commits) { - struct has_commit_data has_commit = { 1, path }; + struct has_commit_data has_commit = { r, 1, path }; /* * Perform a cheap, but incorrect check for the existence of 'commits'. @@ -885,9 +906,11 @@ static int submodule_has_commits(const char *path, struct oid_array *commits) return has_commit.result; } -static int submodule_needs_pushing(const char *path, struct oid_array *commits) +static int submodule_needs_pushing(struct repository *r, + const char *path, + struct oid_array *commits) { - if (!submodule_has_commits(path, commits)) + if (!submodule_has_commits(r, path, commits)) /* * NOTE: We do consider it safe to return "no" here. The * correct answer would be "We do not know" instead of @@ -929,8 +952,10 @@ static int submodule_needs_pushing(const char *path, struct oid_array *commits) return 0; } -int find_unpushed_submodules(struct oid_array *commits, - const char *remotes_name, struct string_list *needs_pushing) +int find_unpushed_submodules(struct repository *r, + struct oid_array *commits, + const char *remotes_name, + struct string_list *needs_pushing) { struct string_list submodules = STRING_LIST_INIT_DUP; struct string_list_item *name; @@ -942,14 +967,14 @@ int find_unpushed_submodules(struct oid_array *commits, argv_array_push(&argv, "--not"); argv_array_pushf(&argv, "--remotes=%s", remotes_name); - collect_changed_submodules(&submodules, &argv); + collect_changed_submodules(r, &submodules, &argv); for_each_string_list_item(name, &submodules) { struct oid_array *commits = name->util; const struct submodule *submodule; const char *path = NULL; - submodule = submodule_from_name(the_repository, &null_oid, name->string); + submodule = submodule_from_name(r, &null_oid, name->string); if (submodule) path = submodule->path; else @@ -958,7 +983,7 @@ int find_unpushed_submodules(struct oid_array *commits, if (!path) continue; - if (submodule_needs_pushing(path, commits)) + if (submodule_needs_pushing(r, path, commits)) string_list_insert(needs_pushing, path); } @@ -1043,7 +1068,8 @@ static void submodule_push_check(const char *path, const char *head, die("process for submodule '%s' failed", path); } -int push_unpushed_submodules(struct oid_array *commits, +int push_unpushed_submodules(struct repository *r, + struct oid_array *commits, const struct remote *remote, const struct refspec *rs, const struct string_list *push_options, @@ -1052,7 +1078,8 @@ int push_unpushed_submodules(struct oid_array *commits, int i, ret = 1; struct string_list needs_pushing = STRING_LIST_INIT_DUP; - if (!find_unpushed_submodules(commits, remote->name, &needs_pushing)) + if (!find_unpushed_submodules(r, commits, + remote->name, &needs_pushing)) return 1; /* @@ -1109,14 +1136,14 @@ void check_for_new_submodule_commits(struct object_id *oid) oid_array_append(&ref_tips_after_fetch, oid); } -static void calculate_changed_submodule_paths(void) +static void calculate_changed_submodule_paths(struct repository *r) { struct argv_array argv = ARGV_ARRAY_INIT; struct string_list changed_submodules = STRING_LIST_INIT_DUP; const struct string_list_item *name; /* No need to check if there are no submodules configured */ - if (!submodule_from_path(the_repository, NULL, NULL)) + if (!submodule_from_path(r, NULL, NULL)) return; argv_array_push(&argv, "--"); /* argv[0] program name */ @@ -1130,14 +1157,14 @@ static void calculate_changed_submodule_paths(void) * Collect all submodules (whether checked out or not) for which new * commits have been recorded upstream in "changed_submodule_names". */ - collect_changed_submodules(&changed_submodules, &argv); + collect_changed_submodules(r, &changed_submodules, &argv); for_each_string_list_item(name, &changed_submodules) { struct oid_array *commits = name->util; const struct submodule *submodule; const char *path = NULL; - submodule = submodule_from_name(the_repository, &null_oid, name->string); + submodule = submodule_from_name(r, &null_oid, name->string); if (submodule) path = submodule->path; else @@ -1146,7 +1173,7 @@ static void calculate_changed_submodule_paths(void) if (!path) continue; - if (!submodule_has_commits(path, commits)) + if (!submodule_has_commits(r, path, commits)) string_list_append(&changed_submodule_names, name->string); } @@ -1157,7 +1184,8 @@ static void calculate_changed_submodule_paths(void) initialized_fetch_ref_tips = 0; } -int submodule_touches_in_range(struct object_id *excl_oid, +int submodule_touches_in_range(struct repository *r, + struct object_id *excl_oid, struct object_id *incl_oid) { struct string_list subs = STRING_LIST_INIT_DUP; @@ -1165,7 +1193,7 @@ int submodule_touches_in_range(struct object_id *excl_oid, int ret; /* No need to check if there are no submodules configured */ - if (!submodule_from_path(the_repository, NULL, NULL)) + if (!submodule_from_path(r, NULL, NULL)) return 0; argv_array_push(&args, "--"); /* args[0] program name */ @@ -1175,7 +1203,7 @@ int submodule_touches_in_range(struct object_id *excl_oid, argv_array_push(&args, oid_to_hex(excl_oid)); } - collect_changed_submodules(&subs, &args); + collect_changed_submodules(r, &subs, &args); ret = subs.nr; argv_array_clear(&args); @@ -1345,7 +1373,7 @@ int fetch_populated_submodules(struct repository *r, argv_array_push(&spf.args, "--recurse-submodules-default"); /* default value, "--submodule-prefix" and its value are added later */ - calculate_changed_submodule_paths(); + calculate_changed_submodule_paths(r); run_processes_parallel(max_parallel_jobs, get_next_submodule, fetch_start_failure, diff --git a/submodule.h b/submodule.h index e452919aa4..a680214c01 100644 --- a/submodule.h +++ b/submodule.h @@ -40,6 +40,7 @@ struct submodule_update_strategy { #define SUBMODULE_UPDATE_STRATEGY_INIT {SM_UPDATE_UNSPECIFIED, NULL} int is_gitmodules_unmerged(const struct index_state *istate); +int is_writing_gitmodules_ok(void); int is_staging_gitmodules_ok(struct index_state *istate); int update_path_in_gitmodules(const char *oldpath, const char *newpath); int remove_path_from_gitmodules(const char *path); @@ -102,13 +103,16 @@ int add_submodule_odb(const char *path); * Checks if there are submodule changes in a..b. If a is the null OID, * checks b and all its ancestors instead. */ -int submodule_touches_in_range(struct object_id *a, +int submodule_touches_in_range(struct repository *r, + struct object_id *a, struct object_id *b); -int find_unpushed_submodules(struct oid_array *commits, +int find_unpushed_submodules(struct repository *r, + struct oid_array *commits, const char *remotes_name, struct string_list *needs_pushing); struct refspec; -int push_unpushed_submodules(struct oid_array *commits, +int push_unpushed_submodules(struct repository *r, + struct oid_array *commits, const struct remote *remote, const struct refspec *rs, const struct string_list *push_options, @@ -302,6 +302,12 @@ that cannot be easily covered by a few specific test cases. These could be enabled by running the test suite with correct GIT_TEST_ environment set. +GIT_TEST_GETTEXT_POISON=<non-empty?> turns all strings marked for +translation into gibberish if non-empty (think "test -n"). Used for +spotting those tests that need to be marked with a C_LOCALE_OUTPUT +prerequisite when adding more strings for translation. See "Testing +marked strings" in po/README for details. + GIT_TEST_SPLIT_INDEX=<boolean> forces split-index mode on the whole test suite. Accept any boolean values that are accepted by git-config. @@ -316,10 +322,42 @@ packs on demand. This normally only happens when the object size is over 2GB. This variable forces the code path on any object larger than <n> bytes. -GIT_TEST_OE_DELTA_SIZE=<n> exercises the uncomon pack-objects code +GIT_TEST_OE_DELTA_SIZE=<n> exercises the uncommon pack-objects code path where deltas larger than this limit require extra memory allocation for bookkeeping. +GIT_TEST_VALIDATE_INDEX_CACHE_ENTRIES=<boolean> checks that cache-tree +records are valid when the index is written out or after a merge. This +is mostly to catch missing invalidation. Default is true. + +GIT_TEST_COMMIT_GRAPH=<boolean>, when true, forces the commit-graph to +be written after every 'git commit' command, and overrides the +'core.commitGraph' setting to true. + +GIT_TEST_FSMONITOR=$PWD/t7519/fsmonitor-all exercises the fsmonitor +code path for utilizing a file system monitor to speed up detecting +new or changed files. + +GIT_TEST_INDEX_VERSION=<n> exercises the index read/write code path +for the index version specified. Can be set to any valid version +(currently 2, 3, or 4). + +GIT_TEST_PRELOAD_INDEX=<boolean> exercises the preload-index code path +by overriding the minimum number of cache entries required per thread. + +GIT_TEST_REBASE_USE_BUILTIN=<boolean>, when false, disables the +builtin version of git-rebase. See 'rebase.useBuiltin' in +git-config(1). + +GIT_TEST_INDEX_THREADS=<n> enables exercising the multi-threaded loading +of the index for the whole test suite by bypassing the default number of +cache entries and thread minimums. Setting this to 1 will make the +index loading single threaded. + +GIT_TEST_MULTI_PACK_INDEX=<boolean>, when true, forces the multi-pack- +index to be written after every 'git repack' command, and overrides the +'core.multiPackIndex' setting to true. + Naming Tests ------------ @@ -394,13 +432,13 @@ This test harness library does the following things: consistently when command line arguments --verbose (or -v), --debug (or -d), and --immediate (or -i) is given. -Do's, don'ts & things to keep in mind -------------------------------------- +Do's & don'ts +------------- Here are a few examples of things you probably should and shouldn't do when writing tests. -Do: +Here are the "do's:" - Put all code inside test_expect_success and other assertions. @@ -445,16 +483,21 @@ Do: Windows, where the shell (MSYS bash) mangles absolute path names. For details, see the commit message of 4114156ae9. -Don't: + - Remember that inside the <script> part, the standard output and + standard error streams are discarded, and the test harness only + reports "ok" or "not ok" to the end user running the tests. Under + --verbose, they are shown to help debug the tests. - - exit() within a <script> part. +And here are the "don'ts:" + + - Don't exit() within a <script> part. The harness will catch this as a programming error of the test. Use test_done instead if you need to stop the tests early (see "Skipping tests" below). - - use '! git cmd' when you want to make sure the git command exits - with failure in a controlled way by calling "die()". Instead, + - Don't use '! git cmd' when you want to make sure the git command + exits with failure in a controlled way by calling "die()". Instead, use 'test_must_fail git cmd'. This will signal a failure if git dies in an unexpected way (e.g. segfault). @@ -462,8 +505,35 @@ Don't: platform commands; just use '! cmd'. We are not in the business of verifying that the world given to us sanely works. - - use perl without spelling it as "$PERL_PATH". This is to help our - friends on Windows where the platform Perl often adds CR before + - Don't feed the output of a git command to a pipe, as in: + + git -C repo ls-files | + xargs -n 1 basename | + grep foo + + which will discard git's exit code and may mask a crash. In the + above example, all exit codes are ignored except grep's. + + Instead, write the output of that command to a temporary + file with ">" or assign it to a variable with "x=$(git ...)" rather + than pipe it. + + - Don't use command substitution in a way that discards git's exit + code. When assigning to a variable, the exit code is not discarded, + e.g.: + + x=$(git cat-file -p $sha) && + ... + + is OK because a crash in "git cat-file" will cause the "&&" chain + to fail, but: + + test "refs/heads/foo" = "$(git symbolic-ref HEAD)" + + is not OK and a crash in git could go undetected. + + - Don't use perl without spelling it as "$PERL_PATH". This is to help + our friends on Windows where the platform Perl often adds CR before the end of line, and they bundle Git with a version of Perl that does not do so, whose path is specified with $PERL_PATH. Note that we provide a "perl" function which uses $PERL_PATH under the hood, so @@ -471,17 +541,17 @@ Don't: (but you do, for example, on a shebang line or in a sub script created via "write_script"). - - use sh without spelling it as "$SHELL_PATH", when the script can - be misinterpreted by broken platform shell (e.g. Solaris). + - Don't use sh without spelling it as "$SHELL_PATH", when the script + can be misinterpreted by broken platform shell (e.g. Solaris). - - chdir around in tests. It is not sufficient to chdir to + - Don't chdir around in tests. It is not sufficient to chdir to somewhere and then chdir back to the original location later in the test, as any intermediate step can fail and abort the test, causing the next test to start in an unexpected directory. Do so inside a subshell if necessary. - - save and verify the standard error of compound commands, i.e. group - commands, subshells, and shell functions (except test helper + - Don't save and verify the standard error of compound commands, i.e. + group commands, subshells, and shell functions (except test helper functions like 'test_must_fail') like this: ( cd dir && git cmd ) 2>error && @@ -496,7 +566,7 @@ Don't: ( cd dir && git cmd 2>../error ) && test_cmp expect error - - Break the TAP output + - Don't break the TAP output The raw output from your test may be interpreted by a TAP harness. TAP harnesses will ignore everything they don't know about, but don't step @@ -516,13 +586,6 @@ Don't: but the best indication is to just run the tests with prove(1), it'll complain if anything is amiss. -Keep in mind: - - - Inside the <script> part, the standard output and standard error - streams are discarded, and the test harness only reports "ok" or - "not ok" to the end user running the tests. Under --verbose, they - are shown to help debugging the tests. - Skipping tests -------------- @@ -807,6 +870,28 @@ library for your script to use. the symbolic link in the file system and a part that does; then only the latter part need be protected by a SYMLINKS prerequisite (see below). + - test_oid_init + + This function loads facts and useful object IDs related to the hash + algorithm(s) in use from the files in t/oid-info. + + - test_oid_cache + + This function reads per-hash algorithm information from standard + input (usually a heredoc) in the format described in + t/oid-info/README. This is useful for test-specific values, such as + object IDs, which must vary based on the hash algorithm. + + Certain fixed values, such as hash sizes and common placeholder + object IDs, can be loaded with test_oid_init (described above). + + - test_oid <key> + + This function looks up a value for the hash algorithm in use, based + on the key given. The value must have been loaded using + test_oid_init or test_oid_cache. Providing an unknown key is an + error. + Prerequisites ------------- diff --git a/t/helper/test-delta.c b/t/helper/test-delta.c index 34c7259248..e749a49c88 100644 --- a/t/helper/test-delta.c +++ b/t/helper/test-delta.c @@ -34,8 +34,8 @@ int cmd__delta(int argc, const char **argv) return 1; } from_size = st.st_size; - from_buf = mmap(NULL, from_size, PROT_READ, MAP_PRIVATE, fd, 0); - if (from_buf == MAP_FAILED) { + from_buf = xmalloc(from_size); + if (read_in_full(fd, from_buf, from_size) < 0) { perror(argv[2]); close(fd); return 1; @@ -48,8 +48,8 @@ int cmd__delta(int argc, const char **argv) return 1; } data_size = st.st_size; - data_buf = mmap(NULL, data_size, PROT_READ, MAP_PRIVATE, fd, 0); - if (data_buf == MAP_FAILED) { + data_buf = xmalloc(data_size); + if (read_in_full(fd, data_buf, data_size) < 0) { perror(argv[3]); close(fd); return 1; diff --git a/t/helper/test-dump-cache-tree.c b/t/helper/test-dump-cache-tree.c index 98a4891f1d..6a3f88f5f5 100644 --- a/t/helper/test-dump-cache-tree.c +++ b/t/helper/test-dump-cache-tree.c @@ -33,7 +33,7 @@ static int dump_cache_tree(struct cache_tree *it, } else { dump_one(it, pfx, ""); - if (oidcmp(&it->oid, &ref->oid) || + if (!oideq(&it->oid, &ref->oid) || ref->entry_count != it->entry_count || ref->subtree_nr != it->subtree_nr) { /* claims to be valid but is lying */ diff --git a/t/helper/test-dump-fsmonitor.c b/t/helper/test-dump-fsmonitor.c index ad452707e8..08e3684aff 100644 --- a/t/helper/test-dump-fsmonitor.c +++ b/t/helper/test-dump-fsmonitor.c @@ -1,6 +1,7 @@ +#include "test-tool.h" #include "cache.h" -int cmd_main(int ac, const char **av) +int cmd__dump_fsmonitor(int ac, const char **av) { struct index_state *istate = &the_index; int i; diff --git a/t/helper/test-dump-untracked-cache.c b/t/helper/test-dump-untracked-cache.c index bd92fb305a..52870ebbb3 100644 --- a/t/helper/test-dump-untracked-cache.c +++ b/t/helper/test-dump-untracked-cache.c @@ -1,3 +1,4 @@ +#include "test-tool.h" #include "cache.h" #include "dir.h" @@ -38,7 +39,7 @@ static void dump(struct untracked_cache_dir *ucd, struct strbuf *base) strbuf_setlen(base, len); } -int cmd_main(int ac, const char **av) +int cmd__dump_untracked_cache(int ac, const char **av) { struct untracked_cache *uc; struct strbuf base = STRBUF_INIT; diff --git a/t/helper/test-parse-options.c b/t/helper/test-parse-options.c index 630c76d127..47fee660b8 100644 --- a/t/helper/test-parse-options.c +++ b/t/helper/test-parse-options.c @@ -1,3 +1,4 @@ +#include "test-tool.h" #include "cache.h" #include "parse-options.h" #include "string-list.h" @@ -35,6 +36,7 @@ static int length_callback(const struct option *opt, const char *arg, int unset) static int number_callback(const struct option *opt, const char *arg, int unset) { + BUG_ON_OPT_NEG(unset); *(int *)opt->value = strtol(arg, NULL, 10); return 0; } @@ -94,11 +96,11 @@ static void show(struct string_list *expect, int *status, const char *fmt, ...) strbuf_release(&buf); } -int cmd_main(int argc, const char **argv) +int cmd__parse_options(int argc, const char **argv) { const char *prefix = "prefix/"; const char *usage[] = { - "test-parse-options <options>", + "test-tool parse-options <options>", "", "A helper function for the parse-options API.", NULL @@ -118,7 +120,6 @@ int cmd_main(int argc, const char **argv) OPT_INTEGER('j', NULL, &integer, "get a integer, too"), OPT_MAGNITUDE('m', "magnitude", &magnitude, "get a magnitude"), OPT_SET_INT(0, "set23", &integer, "set integer to 23", 23), - OPT_DATE('t', NULL, ×tamp, "get timestamp of <time>"), OPT_CALLBACK('L', "length", &integer, "str", "get length of <str>", length_callback), OPT_FILENAME('F', "file", &file, "set file to <file>"), diff --git a/t/helper/test-pkt-line.c b/t/helper/test-pkt-line.c index 30775f986f..282d536384 100644 --- a/t/helper/test-pkt-line.c +++ b/t/helper/test-pkt-line.c @@ -1,4 +1,5 @@ #include "cache.h" +#include "test-tool.h" #include "pkt-line.h" static void pack_line(const char *line) @@ -79,7 +80,7 @@ static void unpack_sideband(void) } } -int cmd_main(int argc, const char **argv) +int cmd__pkt_line(int argc, const char **argv) { if (argc < 2) die("too few arguments"); diff --git a/t/helper/test-prio-queue.c b/t/helper/test-prio-queue.c index 9807b649b1..5bc9c46ea5 100644 --- a/t/helper/test-prio-queue.c +++ b/t/helper/test-prio-queue.c @@ -22,14 +22,24 @@ int cmd__prio_queue(int argc, const char **argv) struct prio_queue pq = { intcmp }; while (*++argv) { - if (!strcmp(*argv, "get")) - show(prio_queue_get(&pq)); - else if (!strcmp(*argv, "dump")) { - int *v; - while ((v = prio_queue_get(&pq))) - show(v); - } - else { + if (!strcmp(*argv, "get")) { + void *peek = prio_queue_peek(&pq); + void *get = prio_queue_get(&pq); + if (peek != get) + BUG("peek and get results do not match"); + show(get); + } else if (!strcmp(*argv, "dump")) { + void *peek; + void *get; + while ((peek = prio_queue_peek(&pq))) { + get = prio_queue_get(&pq); + if (peek != get) + BUG("peek and get results do not match"); + show(get); + } + } else if (!strcmp(*argv, "stack")) { + pq.compare = NULL; + } else { int *v = malloc(sizeof(*v)); *v = atoi(*argv); prio_queue_put(&pq, v); diff --git a/t/helper/test-reach.c b/t/helper/test-reach.c new file mode 100644 index 0000000000..a0272178b7 --- /dev/null +++ b/t/helper/test-reach.c @@ -0,0 +1,168 @@ +#include "test-tool.h" +#include "cache.h" +#include "commit.h" +#include "commit-reach.h" +#include "config.h" +#include "parse-options.h" +#include "ref-filter.h" +#include "string-list.h" +#include "tag.h" + +static void print_sorted_commit_ids(struct commit_list *list) +{ + int i; + struct string_list s = STRING_LIST_INIT_DUP; + + while (list) { + string_list_append(&s, oid_to_hex(&list->item->object.oid)); + list = list->next; + } + + string_list_sort(&s); + + for (i = 0; i < s.nr; i++) + printf("%s\n", s.items[i].string); + + string_list_clear(&s, 0); +} + +int cmd__reach(int ac, const char **av) +{ + struct object_id oid_A, oid_B; + struct commit *A, *B; + struct commit_list *X, *Y; + struct object_array X_obj = OBJECT_ARRAY_INIT; + struct commit **X_array, **Y_array; + int X_nr, X_alloc, Y_nr, Y_alloc; + struct strbuf buf = STRBUF_INIT; + struct repository *r = the_repository; + + setup_git_directory(); + + if (ac < 2) + exit(1); + + A = B = NULL; + X = Y = NULL; + X_nr = Y_nr = 0; + X_alloc = Y_alloc = 16; + ALLOC_ARRAY(X_array, X_alloc); + ALLOC_ARRAY(Y_array, Y_alloc); + + while (strbuf_getline(&buf, stdin) != EOF) { + struct object_id oid; + struct object *orig; + struct object *peeled; + struct commit *c; + if (buf.len < 3) + continue; + + if (get_oid_committish(buf.buf + 2, &oid)) + die("failed to resolve %s", buf.buf + 2); + + orig = parse_object(r, &oid); + peeled = deref_tag_noverify(orig); + + if (!peeled) + die("failed to load commit for input %s resulting in oid %s\n", + buf.buf, oid_to_hex(&oid)); + + c = object_as_type(r, peeled, OBJ_COMMIT, 0); + + if (!c) + die("failed to load commit for input %s resulting in oid %s\n", + buf.buf, oid_to_hex(&oid)); + + switch (buf.buf[0]) { + case 'A': + oidcpy(&oid_A, &oid); + A = c; + break; + + case 'B': + oidcpy(&oid_B, &oid); + B = c; + break; + + case 'X': + commit_list_insert(c, &X); + ALLOC_GROW(X_array, X_nr + 1, X_alloc); + X_array[X_nr++] = c; + add_object_array(orig, NULL, &X_obj); + break; + + case 'Y': + commit_list_insert(c, &Y); + ALLOC_GROW(Y_array, Y_nr + 1, Y_alloc); + Y_array[Y_nr++] = c; + break; + + default: + die("unexpected start of line: %c", buf.buf[0]); + } + } + strbuf_release(&buf); + + if (!strcmp(av[1], "ref_newer")) + printf("%s(A,B):%d\n", av[1], ref_newer(&oid_A, &oid_B)); + else if (!strcmp(av[1], "in_merge_bases")) + printf("%s(A,B):%d\n", av[1], in_merge_bases(A, B)); + else if (!strcmp(av[1], "is_descendant_of")) + printf("%s(A,X):%d\n", av[1], is_descendant_of(A, X)); + else if (!strcmp(av[1], "get_merge_bases_many")) { + struct commit_list *list = get_merge_bases_many(A, X_nr, X_array); + printf("%s(A,X):\n", av[1]); + print_sorted_commit_ids(list); + } else if (!strcmp(av[1], "reduce_heads")) { + struct commit_list *list = reduce_heads(X); + printf("%s(X):\n", av[1]); + print_sorted_commit_ids(list); + } else if (!strcmp(av[1], "can_all_from_reach")) { + printf("%s(X,Y):%d\n", av[1], can_all_from_reach(X, Y, 1)); + } else if (!strcmp(av[1], "can_all_from_reach_with_flag")) { + struct commit_list *iter = Y; + + while (iter) { + iter->item->object.flags |= 2; + iter = iter->next; + } + + printf("%s(X,_,_,0,0):%d\n", av[1], can_all_from_reach_with_flag(&X_obj, 2, 4, 0, 0)); + } else if (!strcmp(av[1], "commit_contains")) { + struct ref_filter filter; + struct contains_cache cache; + init_contains_cache(&cache); + + if (ac > 2 && !strcmp(av[2], "--tag")) + filter.with_commit_tag_algo = 1; + else + filter.with_commit_tag_algo = 0; + + printf("%s(_,A,X,_):%d\n", av[1], commit_contains(&filter, A, X, &cache)); + } else if (!strcmp(av[1], "get_reachable_subset")) { + const int reachable_flag = 1; + int i, count = 0; + struct commit_list *current; + struct commit_list *list = get_reachable_subset(X_array, X_nr, + Y_array, Y_nr, + reachable_flag); + printf("get_reachable_subset(X,Y)\n"); + for (current = list; current; current = current->next) { + if (!(list->item->object.flags & reachable_flag)) + die(_("commit %s is not marked reachable"), + oid_to_hex(&list->item->object.oid)); + count++; + } + for (i = 0; i < Y_nr; i++) { + if (Y_array[i]->object.flags & reachable_flag) + count--; + } + + if (count < 0) + die(_("too many commits marked reachable")); + + print_sorted_commit_ids(list); + } + + exit(0); +} diff --git a/t/helper/test-read-midx.c b/t/helper/test-read-midx.c new file mode 100644 index 0000000000..831b586d02 --- /dev/null +++ b/t/helper/test-read-midx.c @@ -0,0 +1,51 @@ +#include "test-tool.h" +#include "cache.h" +#include "midx.h" +#include "repository.h" +#include "object-store.h" + +static int read_midx_file(const char *object_dir) +{ + uint32_t i; + struct multi_pack_index *m = load_multi_pack_index(object_dir, 1); + + if (!m) + return 1; + + printf("header: %08x %d %d %d\n", + m->signature, + m->version, + m->num_chunks, + m->num_packs); + + printf("chunks:"); + + if (m->chunk_pack_names) + printf(" pack-names"); + if (m->chunk_oid_fanout) + printf(" oid-fanout"); + if (m->chunk_oid_lookup) + printf(" oid-lookup"); + if (m->chunk_object_offsets) + printf(" object-offsets"); + if (m->chunk_large_offsets) + printf(" large-offsets"); + + printf("\nnum_objects: %d\n", m->num_objects); + + printf("packs:\n"); + for (i = 0; i < m->num_packs; i++) + printf("%s\n", m->pack_names[i]); + + printf("object-dir: %s\n", m->object_dir); + + return 0; +} + +int cmd__read_midx(int argc, const char **argv) +{ + if (argc != 2) + usage("read-midx <object-dir>"); + + return read_midx_file(argv[1]); +} diff --git a/t/helper/test-revision-walking.c b/t/helper/test-revision-walking.c index 4f8bc75821..625b2dbf82 100644 --- a/t/helper/test-revision-walking.c +++ b/t/helper/test-revision-walking.c @@ -32,7 +32,7 @@ static int run_revision_walk(void) int argc = ARRAY_SIZE(argv) - 1; int got_revision = 0; - init_revisions(&rev, NULL); + repo_init_revisions(the_repository, &rev, NULL); setup_revisions(argc, argv, &rev, NULL); if (prepare_revision_walk(&rev)) die("revision walk setup failed"); diff --git a/t/helper/test-submodule-nested-repo-config.c b/t/helper/test-submodule-nested-repo-config.c new file mode 100644 index 0000000000..a31e2a9bea --- /dev/null +++ b/t/helper/test-submodule-nested-repo-config.c @@ -0,0 +1,30 @@ +#include "test-tool.h" +#include "submodule-config.h" + +static void die_usage(int argc, const char **argv, const char *msg) +{ + fprintf(stderr, "%s\n", msg); + fprintf(stderr, "Usage: %s <submodulepath> <config name>\n", argv[0]); + exit(1); +} + +int cmd__submodule_nested_repo_config(int argc, const char **argv) +{ + struct repository submodule; + + if (argc < 3) + die_usage(argc, argv, "Wrong number of arguments."); + + setup_git_directory(); + + if (repo_submodule_init(&submodule, the_repository, argv[1])) { + die_usage(argc, argv, "Submodule not found."); + } + + /* Read the config of _child_ submodules. */ + print_config_from_gitmodules(&submodule, argv[2]); + + submodule_free(the_repository); + + return 0; +} diff --git a/t/helper/test-tool.c b/t/helper/test-tool.c index db19131f71..bfb195b1a8 100644 --- a/t/helper/test-tool.c +++ b/t/helper/test-tool.c @@ -14,7 +14,9 @@ static struct test_cmd cmds[] = { { "delta", cmd__delta }, { "drop-caches", cmd__drop_caches }, { "dump-cache-tree", cmd__dump_cache_tree }, + { "dump-fsmonitor", cmd__dump_fsmonitor }, { "dump-split-index", cmd__dump_split_index }, + { "dump-untracked-cache", cmd__dump_untracked_cache }, { "example-decorate", cmd__example_decorate }, { "genrandom", cmd__genrandom }, { "hashmap", cmd__hashmap }, @@ -25,21 +27,26 @@ static struct test_cmd cmds[] = { { "mergesort", cmd__mergesort }, { "mktemp", cmd__mktemp }, { "online-cpus", cmd__online_cpus }, + { "parse-options", cmd__parse_options }, { "path-utils", cmd__path_utils }, + { "pkt-line", cmd__pkt_line }, { "prio-queue", cmd__prio_queue }, + { "reach", cmd__reach }, { "read-cache", cmd__read_cache }, + { "read-midx", cmd__read_midx }, { "ref-store", cmd__ref_store }, { "regex", cmd__regex }, { "repository", cmd__repository }, { "revision-walking", cmd__revision_walking }, { "run-command", cmd__run_command }, { "scrap-cache-tree", cmd__scrap_cache_tree }, - { "sha1-array", cmd__sha1_array }, { "sha1", cmd__sha1 }, + { "sha1-array", cmd__sha1_array }, { "sigchain", cmd__sigchain }, { "strcmp-offset", cmd__strcmp_offset }, { "string-list", cmd__string_list }, { "submodule-config", cmd__submodule_config }, + { "submodule-nested-repo-config", cmd__submodule_nested_repo_config }, { "subprocess", cmd__subprocess }, { "urlmatch-normalization", cmd__urlmatch_normalization }, { "wildmatch", cmd__wildmatch }, @@ -49,13 +56,23 @@ static struct test_cmd cmds[] = { { "write-cache", cmd__write_cache }, }; +static NORETURN void die_usage(void) +{ + size_t i; + + fprintf(stderr, "usage: test-tool <toolname> [args]\n"); + for (i = 0; i < ARRAY_SIZE(cmds); i++) + fprintf(stderr, " %s\n", cmds[i].name); + exit(128); +} + int cmd_main(int argc, const char **argv) { int i; BUG_exit_code = 99; if (argc < 2) - die("I need a test name!"); + die_usage(); for (i = 0; i < ARRAY_SIZE(cmds); i++) { if (!strcmp(cmds[i].name, argv[1])) { @@ -64,5 +81,6 @@ int cmd_main(int argc, const char **argv) return cmds[i].fn(argc, argv); } } - die("There is no test named '%s'", argv[1]); + error("there is no tool named '%s'", argv[1]); + die_usage(); } diff --git a/t/helper/test-tool.h b/t/helper/test-tool.h index 4914001086..042f12464b 100644 --- a/t/helper/test-tool.h +++ b/t/helper/test-tool.h @@ -1,5 +1,5 @@ -#ifndef __TEST_TOOL_H__ -#define __TEST_TOOL_H__ +#ifndef TEST_TOOL_H +#define TEST_TOOL_H #include "git-compat-util.h" @@ -10,7 +10,9 @@ int cmd__date(int argc, const char **argv); int cmd__delta(int argc, const char **argv); int cmd__drop_caches(int argc, const char **argv); int cmd__dump_cache_tree(int argc, const char **argv); +int cmd__dump_fsmonitor(int argc, const char **argv); int cmd__dump_split_index(int argc, const char **argv); +int cmd__dump_untracked_cache(int argc, const char **argv); int cmd__example_decorate(int argc, const char **argv); int cmd__genrandom(int argc, const char **argv); int cmd__hashmap(int argc, const char **argv); @@ -21,21 +23,26 @@ int cmd__match_trees(int argc, const char **argv); int cmd__mergesort(int argc, const char **argv); int cmd__mktemp(int argc, const char **argv); int cmd__online_cpus(int argc, const char **argv); +int cmd__parse_options(int argc, const char **argv); int cmd__path_utils(int argc, const char **argv); +int cmd__pkt_line(int argc, const char **argv); int cmd__prio_queue(int argc, const char **argv); +int cmd__reach(int argc, const char **argv); int cmd__read_cache(int argc, const char **argv); +int cmd__read_midx(int argc, const char **argv); int cmd__ref_store(int argc, const char **argv); int cmd__regex(int argc, const char **argv); int cmd__repository(int argc, const char **argv); int cmd__revision_walking(int argc, const char **argv); int cmd__run_command(int argc, const char **argv); int cmd__scrap_cache_tree(int argc, const char **argv); -int cmd__sha1_array(int argc, const char **argv); int cmd__sha1(int argc, const char **argv); +int cmd__sha1_array(int argc, const char **argv); int cmd__sigchain(int argc, const char **argv); int cmd__strcmp_offset(int argc, const char **argv); int cmd__string_list(int argc, const char **argv); int cmd__submodule_config(int argc, const char **argv); +int cmd__submodule_nested_repo_config(int argc, const char **argv); int cmd__subprocess(int argc, const char **argv); int cmd__urlmatch_normalization(int argc, const char **argv); int cmd__wildmatch(int argc, const char **argv); diff --git a/t/lib-gettext.sh b/t/lib-gettext.sh index eec757f104..2139b427ca 100644 --- a/t/lib-gettext.sh +++ b/t/lib-gettext.sh @@ -10,9 +10,14 @@ GIT_TEXTDOMAINDIR="$GIT_BUILD_DIR/po/build/locale" GIT_PO_PATH="$GIT_BUILD_DIR/po" export GIT_TEXTDOMAINDIR GIT_PO_PATH -. "$GIT_BUILD_DIR"/git-sh-i18n +if test -n "$GIT_TEST_INSTALLED" +then + . "$(git --exec-path)"/git-sh-i18n +else + . "$GIT_BUILD_DIR"/git-sh-i18n +fi -if test_have_prereq GETTEXT && ! test_have_prereq GETTEXT_POISON +if test_have_prereq GETTEXT && test_have_prereq C_LOCALE_OUTPUT then # is_IS.UTF-8 on Solaris and FreeBSD, is_IS.utf8 on Debian is_IS_locale=$(locale -a 2>/dev/null | diff --git a/t/lib-gpg.sh b/t/lib-gpg.sh index 3fe02876c1..f1277bef4f 100755 --- a/t/lib-gpg.sh +++ b/t/lib-gpg.sh @@ -57,9 +57,12 @@ then echo | gpgsm --homedir "${GNUPGHOME}" 2>/dev/null \ --passphrase-fd 0 --pinentry-mode loopback \ --import "$TEST_DIRECTORY"/lib-gpg/gpgsm_cert.p12 && - gpgsm --homedir "${GNUPGHOME}" 2>/dev/null -K \ - | grep fingerprint: | cut -d" " -f4 | tr -d '\n' > \ - ${GNUPGHOME}/trustlist.txt && + + gpgsm --homedir "${GNUPGHOME}" 2>/dev/null -K | + grep fingerprint: | + cut -d" " -f4 | + tr -d '\n' >"${GNUPGHOME}/trustlist.txt" && + echo " S relax" >> ${GNUPGHOME}/trustlist.txt && (gpgconf --kill gpg-agent >/dev/null 2>&1 || : ) && echo hello | gpgsm --homedir "${GNUPGHOME}" >/dev/null \ diff --git a/t/lib-gpg/keyring.gpg b/t/lib-gpg/keyring.gpg index d4754a1f19..918dfce332 100644 --- a/t/lib-gpg/keyring.gpg +++ b/t/lib-gpg/keyring.gpg @@ -30,7 +30,6 @@ Cezx4Q2khACcCs+/LtE8Lb9hC+2cvr3uH5p82AI= =aEiU -----END PGP PRIVATE KEY BLOCK----- -----BEGIN PGP PRIVATE KEY BLOCK----- -Version: GnuPG v1 lQOYBFFMlkcBCADJi/xnAF8yI34PHilSCbM7VtOFO17oFMkpu4cgN2QpPuM5MVjy cvrzKSguZFvPCDLzeAFJW1uPxL4SHaHSkisCrFhijH7OJWcOPNPSFCwu+inAoAsv @@ -83,11 +82,43 @@ fn1sY/IG5atoKK+ypmV/TlBlMZqFQzuPIJQT8VLbmxtLlDhJG04LbI6c8axIZxOO ZKLy5nTTSy16ztqEeS7eifHLPZg1UFFyEEIQ1XW0CNDAeuWKh90ERjyl4Cg7PnWS Z9Ei+zj6JD5Pcdi3BJhQo9WOLOVEJ0NHmewTYqk9QVXH/0v1Hdl4LMJtgcbdbDWk 4UTkXbg9pn3umCgkNJ3Vs8fWnIWO9Izdr2/wrFY2JvUT7Yvl+wsNIWatvOEzGy7n -BOW78WUxzhu0YJTLKy+iKCjg5HS5dx6OC+e4aEEgfhNPCMkbvDsJjtQ= -=hieJ +BOW78WUxzhu0YJTLKy+iKCjg5HS5dx6OC+e4aEEgfhNPCMkbvDsJjtSdA5gEW967 +3AEIAKjseT0sTQjyN39fOn0fzxWp89REMUUKgLigb01MKuuNI3cedBZsz3hpFOKV +cii5rldw8uf3yS3Okht2DfHPSD4NrGzLGEzSTpQ10S8N2q0DUYwyLU6C0U8HnMZm +/n+lCGBbUoxvnruohAvKAjpHO3rmJ8D4De9hlWg/fwdAxQQ0Sve0kN8Vwk2p1GuO +OWQKV1SU9c+kBiou7dewQmbilPRanKmP5ZSU4emhpTOMlJFXF+kmYSODQk1cMvWW +Ob3ttll2llX0Gul7Sjf+haq/FcRyRk7Tw5MHwZjr5aWiCny0/0+byvfF6SBIfzyE +qlyWURQ2gHZUqSiG3QPMZiYr04cAEQEAAQAH/Am4rv/oQF6wodgz5y4zc6JJiTDA +4+nKdIuR7OKqUxk1oo7eZjJML/xvMumygNyUvJ9nodl1SlMKilOhdAswfkKj9gJY +BdDJLm1OufhW3pJwy6ahbjeqEgwJFVENtSPF0zkuyED9kElrpbD2ZTGfzwdM0e9D +10ZDFWtODCw8rzOFcijujgI8oilLtxSNrkkTKW+25WJFRNPSHgIkMIm8UlPAG+rj +3Yj9UqodeXTSvXwG2zceOxjFJadV77sOFJDgwWslN6J8El4+GcgwFVepJxoZEj7e +cKkmVr0Dc9/Q04D5dWATc1FYcIhZbTu3oImCAh45ep4u9WYLUV5PGyeMviEEAMwo +mJbYBxWuPjpNa722HQcbvMUiZWWDwHfLCib/SaP0AgfDahid8/PcZwxOPHPByBrm +GDi0z7ibn/pgJr07kpp1Cic9ntfc2FvkI0QMzG0EuiekzQyPEnzjoDHF+V4nJIj2 +GWVjLYYqlZWEmhsfKt1CnlPXBunKoDJ30ABPcHJ/BADT0WxAIVKF4lO2HlrDVP44 +bufBEG9Ct7dl/G08Qve4Ag3VEZpT82vEFp0LzX0mTCDIUKJUYAYLxAIPhP7IvIfc +EZXrwyDUxU7YSgKTHMKo9nFC6fIc1GeGPRalIF1gmTY32qlYJC6y5BTDhZNV5ydG +u8QL2P/orP7XuRrJyeyK+QP/XTekr/DS6Jkct826MPA52ciIkWVgYLatH5fO4HCq +ssDU8vz7FbbvGs0G1Xn7GA4m9dNYVOZtKwX++3nf2IEOpgPiZVTn/nP2u3HutpJb +/HMLlcfZGiGdxS6n/vdz6wsEobJoi6STkHkA+VFNOSZmdsw6eKl3X911tpCTYfOG +2U47/IkCbAQYAQgAIBYhBNS+IjEa0xMeXtoppGEJLoW3InGJBQJb3rvcAhsCAUAJ +EGEJLoW3InGJwHQgBBkBCAAdFiEE+DZKWeB//p9NYwBaZaDuoC4wytcFAlveu9wA +CgkQZaDuoC4wytcD9gf/WigtHl7lFyl8RaE/uqROFEelZyM00v1h55fd/IGRG88E +tN0Lr4FaqBqPkMZjU/LN9UMBaTd+748vHlHaweZqljXJu99CO9Id7Y4w7WzF3C3Y +yQsGZ92EGxthsPK0+rhHV0MbaINupI1oO9gATFglSxq17o83FJatGRjaXCZau8jr +57/By1MGtjk+Iq1NkzGkrX778LdRQGLKDw2Qa7lsdHY8d3lUPAH8mbb97ELmIc9t +PG2aM7ATJL7nBmFuTHo6hmEcIw32Ei9KK1zxM0ZylEYkjBjHAlklWmKb9MiayMC5 +uHW7Iyhjl+NbgbIEr2JTamW/9tL6UrIIxiDEdqaHNfCaB/9D+V31Upcohc9azwB4 +AF8diQwt5nfiVpnVeF/W8+eS1By2W6QrwLNthNRabYFnuSf9USHAY6atDWe+egId +MLIv4ce0i3ykoczSu0oMoUCMxdl9kQrsNHZCqWX/OiDDLSb05u/P/3he900y6tSB +15MbIPA6i5Bw/693nHguqxS1ASbBB/LiIu3vCXdFEs9RMvIJ+qkP3xQA96oImQiK +R3U6OGv593eONKijUINNqHRq6+UxIyJ+OCAi+L2QTidAhJLRCp6EZD96u02cthYq +8KA8j1+rx9BcbeacVVHepeG1JsgxsXX8BTJ7ZuS5VVndZOjag8URW/9nJMf01w/h +el64 +=Iv7W -----END PGP PRIVATE KEY BLOCK----- -----BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1 mQGiBEZnyykRBACzCPjIpTYNL7Y2tQqlEGTTDlvZcWNLjF5f7ZzuyOqNOidLUgFD 36qch1LZLSZkShdR3Gae+bsolyjxrlFuFP0eXRPMtqK20aLw7WZvPFpEV1ThMne+ @@ -137,6 +168,25 @@ bGPyBuWraCivsqZlf05QZTGahUM7jyCUE/FS25sbS5Q4SRtOC2yOnPGsSGcTjmSi 8uZ000stes7ahHku3onxyz2YNVBRchBCENV1tAjQwHrliofdBEY8peAoOz51kmfR Ivs4+iQ+T3HYtwSYUKPVjizlRCdDR5nsE2KpPUFVx/9L9R3ZeCzCbYHG3Ww1pOFE 5F24PaZ97pgoJDSd1bPH1pyFjvSM3a9v8KxWNib1E+2L5fsLDSFmrbzhMxsu5wTl -u/FlMc4btGCUyysvoigo4OR0uXcejgvnuGhBIH4TTwjJG7w7CY7U -=iYv/ +u/FlMc4btGCUyysvoigo4OR0uXcejgvnuGhBIH4TTwjJG7w7CY7UuQENBFveu9wB +CACo7Hk9LE0I8jd/Xzp9H88VqfPURDFFCoC4oG9NTCrrjSN3HnQWbM94aRTilXIo +ua5XcPLn98ktzpIbdg3xz0g+DaxsyxhM0k6UNdEvDdqtA1GMMi1OgtFPB5zGZv5/ +pQhgW1KMb567qIQLygI6Rzt65ifA+A3vYZVoP38HQMUENEr3tJDfFcJNqdRrjjlk +CldUlPXPpAYqLu3XsEJm4pT0Wpypj+WUlOHpoaUzjJSRVxfpJmEjg0JNXDL1ljm9 +7bZZdpZV9Brpe0o3/oWqvxXEckZO08OTB8GY6+Wlogp8tP9Pm8r3xekgSH88hKpc +llEUNoB2VKkoht0DzGYmK9OHABEBAAGJAmwEGAEIACAWIQTUviIxGtMTHl7aKaRh +CS6FtyJxiQUCW9673AIbAgFACRBhCS6FtyJxicB0IAQZAQgAHRYhBPg2Slngf/6f +TWMAWmWg7qAuMMrXBQJb3rvcAAoJEGWg7qAuMMrXA/YH/1ooLR5e5RcpfEWhP7qk +ThRHpWcjNNL9YeeX3fyBkRvPBLTdC6+BWqgaj5DGY1PyzfVDAWk3fu+PLx5R2sHm +apY1ybvfQjvSHe2OMO1sxdwt2MkLBmfdhBsbYbDytPq4R1dDG2iDbqSNaDvYAExY +JUsate6PNxSWrRkY2lwmWrvI6+e/wctTBrY5PiKtTZMxpK1++/C3UUBiyg8NkGu5 +bHR2PHd5VDwB/Jm2/exC5iHPbTxtmjOwEyS+5wZhbkx6OoZhHCMN9hIvSitc8TNG +cpRGJIwYxwJZJVpim/TImsjAubh1uyMoY5fjW4GyBK9iU2plv/bS+lKyCMYgxHam +hzXwmgf/Q/ld9VKXKIXPWs8AeABfHYkMLeZ34laZ1Xhf1vPnktQctlukK8CzbYTU +Wm2BZ7kn/VEhwGOmrQ1nvnoCHTCyL+HHtIt8pKHM0rtKDKFAjMXZfZEK7DR2Qqll +/zogwy0m9Obvz/94XvdNMurUgdeTGyDwOouQcP+vd5x4LqsUtQEmwQfy4iLt7wl3 +RRLPUTLyCfqpD98UAPeqCJkIikd1Ojhr+fd3jjSoo1CDTah0auvlMSMifjggIvi9 +kE4nQISS0QqehGQ/ertNnLYWKvCgPI9fq8fQXG3mnFVR3qXhtSbIMbF1/AUye2bk +uVVZ3WTo2oPFEVv/ZyTH9NcP4XpeuA== +=KRyT -----END PGP PUBLIC KEY BLOCK----- diff --git a/t/lib-rebase.sh b/t/lib-rebase.sh index 0c93d00bdd..7ea30e5006 100644 --- a/t/lib-rebase.sh +++ b/t/lib-rebase.sh @@ -49,7 +49,7 @@ set_fake_editor () { case $line in pick|p|squash|s|fixup|f|edit|e|reword|r|drop|d) action="$line";; - exec_*|x_*) + exec_*|x_*|break|b) echo "$line" | sed 's/_/ /g' >> "$1";; "#") echo '# comment' >> "$1";; diff --git a/t/oid-info/README b/t/oid-info/README new file mode 100644 index 0000000000..27f843fc00 --- /dev/null +++ b/t/oid-info/README @@ -0,0 +1,19 @@ +This directory contains various per-hash values that are used in the testsuite. + +Each file contains lines containing a key-value pair; blank lines and lines +starting with `#` are ignored. The key and value are separated by whitespace +(specifically, those whitespace in the default `$IFS`). The key consists only +of shell identifier characters, and the value consists of a hash algorithm, +colon, and value. The hash algorithm also consists only of shell identifier +characters; it should match the value in sha1-file.c. + +For example, the following lines map the key "rawsz" to "20" if SHA-1 is in use +and to "32" if SHA-256 is in use: + +---- +rawsz sha1:20 +rawsz sha256:32 +---- + +The keys and values used here are loaded by `test_oid_init` (see the README file +in the "t" directory) and are used by calling `test_oid`. diff --git a/t/oid-info/hash-info b/t/oid-info/hash-info new file mode 100644 index 0000000000..ccdbfdf974 --- /dev/null +++ b/t/oid-info/hash-info @@ -0,0 +1,8 @@ +rawsz sha1:20 +rawsz sha256:32 + +hexsz sha1:40 +hexsz sha256:64 + +zero sha1:0000000000000000000000000000000000000000 +zero sha256:0000000000000000000000000000000000000000000000000000000000000000 diff --git a/t/oid-info/oid b/t/oid-info/oid new file mode 100644 index 0000000000..a754970523 --- /dev/null +++ b/t/oid-info/oid @@ -0,0 +1,29 @@ +# These are some common invalid and partial object IDs used in tests. +001 sha1:0000000000000000000000000000000000000001 +001 sha256:0000000000000000000000000000000000000000000000000000000000000001 +002 sha1:0000000000000000000000000000000000000002 +002 sha256:0000000000000000000000000000000000000000000000000000000000000002 +003 sha1:0000000000000000000000000000000000000003 +003 sha256:0000000000000000000000000000000000000000000000000000000000000003 +004 sha1:0000000000000000000000000000000000000004 +004 sha256:0000000000000000000000000000000000000000000000000000000000000004 +005 sha1:0000000000000000000000000000000000000005 +005 sha256:0000000000000000000000000000000000000000000000000000000000000005 +006 sha1:0000000000000000000000000000000000000006 +006 sha256:0000000000000000000000000000000000000000000000000000000000000006 +007 sha1:0000000000000000000000000000000000000007 +007 sha256:0000000000000000000000000000000000000000000000000000000000000007 +# All zeros or Fs missing one or two hex segments. +zero_1 sha1:000000000000000000000000000000000000000 +zero_1 sha256:000000000000000000000000000000000000000000000000000000000000000 +zero_2 sha1:00000000000000000000000000000000000000 +zero_2 sha256:00000000000000000000000000000000000000000000000000000000000000 +ff_1 sha1:fffffffffffffffffffffffffffffffffffffff +ff_1 sha256:fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff +ff_2 sha1:ffffffffffffffffffffffffffffffffffffff +ff_2 sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff +# More various invalid OIDs. +numeric sha1:0123456789012345678901234567890123456789 +numeric sha256:0123456789012345678901234567890123456789012345678901234567890123 +deadbeef sha1:deadbeefdeadbeefdeadbeefdeadbeefdeadbeef +deadbeef sha256:deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef diff --git a/t/perf/README b/t/perf/README index 21321a0f36..be12090c38 100644 --- a/t/perf/README +++ b/t/perf/README @@ -168,3 +168,28 @@ that While we have tried to make sure that it can cope with embedded whitespace and other special characters, it will not work with multi-line data. + +Rather than tracking the performance by run-time as `test_perf` does, you +may also track output size by using `test_size`. The stdout of the +function should be a single numeric value, which will be captured and +shown in the aggregated output. For example: + + test_perf 'time foo' ' + ./foo >foo.out + ' + + test_size 'output size' + wc -c <foo.out + ' + +might produce output like: + + Test origin HEAD + ------------------------------------------------------------- + 1234.1 time foo 0.37(0.79+0.02) 0.26(0.51+0.02) -29.7% + 1234.2 output size 4.3M 3.6M -14.7% + +The item being measured (and its units) is up to the test; the context +and the test title should make it clear to the user whether bigger or +smaller numbers are better. Unlike test_perf, the test code will only be +run once, since output sizes tend to be more deterministic than timings. diff --git a/t/perf/aggregate.perl b/t/perf/aggregate.perl index bc865160e7..494907a892 100755 --- a/t/perf/aggregate.perl +++ b/t/perf/aggregate.perl @@ -13,27 +13,42 @@ sub get_times { my $line = <$fh>; return undef if not defined $line; close $fh or die "cannot close $name: $!"; - $line =~ /^(?:(\d+):)?(\d+):(\d+(?:\.\d+)?) (\d+(?:\.\d+)?) (\d+(?:\.\d+)?)$/ - or die "bad input line: $line"; - my $rt = ((defined $1 ? $1 : 0.0)*60+$2)*60+$3; - return ($rt, $4, $5); + # times + if ($line =~ /^(?:(\d+):)?(\d+):(\d+(?:\.\d+)?) (\d+(?:\.\d+)?) (\d+(?:\.\d+)?)$/) { + my $rt = ((defined $1 ? $1 : 0.0)*60+$2)*60+$3; + return ($rt, $4, $5); + # size + } elsif ($line =~ /^\d+$/) { + return $&; + } else { + die "bad input line: $line"; + } +} + +sub relative_change { + my ($r, $firstr) = @_; + if ($firstr > 0) { + return sprintf "%+.1f%%", 100.0*($r-$firstr)/$firstr; + } elsif ($r == 0) { + return "="; + } else { + return "+inf"; + } } sub format_times { my ($r, $u, $s, $firstr) = @_; + # no value means we did not finish the test if (!defined $r) { return "<missing>"; } - my $out = sprintf "%.2f(%.2f+%.2f)", $r, $u, $s; - if (defined $firstr) { - if ($firstr > 0) { - $out .= sprintf " %+.1f%%", 100.0*($r-$firstr)/$firstr; - } elsif ($r == 0) { - $out .= " ="; - } else { - $out .= " +inf"; - } + # a single value means we have a size, not times + if (!defined $u) { + return format_size($r, $firstr); } + # otherwise, we have real/user/system times + my $out = sprintf "%.2f(%.2f+%.2f)", $r, $u, $s; + $out .= ' ' . relative_change($r, $firstr) if defined $firstr; return $out; } @@ -51,6 +66,25 @@ EOT exit(1); } +sub human_size { + my $n = shift; + my @units = ('', qw(K M G)); + while ($n > 900 && @units > 1) { + $n /= 1000; + shift @units; + } + return $n unless length $units[0]; + return sprintf '%.1f%s', $n, $units[0]; +} + +sub format_size { + my ($size, $first) = @_; + # match the width of a time: 0.00(0.00+0.00) + my $out = sprintf '%15s', human_size($size); + $out .= ' ' . relative_change($size, $first) if defined $first; + return $out; +} + my (@dirs, %dirnames, %dirabbrevs, %prefixes, @tests, $codespeed, $sortby, $subsection, $reponame); @@ -181,7 +215,14 @@ sub print_default_results { my $firstr; for my $i (0..$#dirs) { my $d = $dirs[$i]; - $times{$prefixes{$d}.$t} = [get_times("$resultsdir/$prefixes{$d}$t.times")]; + my $base = "$resultsdir/$prefixes{$d}$t"; + $times{$prefixes{$d}.$t} = []; + foreach my $type (qw(times size)) { + if (-e "$base.$type") { + $times{$prefixes{$d}.$t} = [get_times("$base.$type")]; + last; + } + } my ($r,$u,$s) = @{$times{$prefixes{$d}.$t}}; my $w = length format_times($r,$u,$s,$firstr); $colwidth[$i] = $w if $w > $colwidth[$i]; diff --git a/t/perf/p1450-fsck.sh b/t/perf/p1450-fsck.sh new file mode 100755 index 0000000000..ae1b84198b --- /dev/null +++ b/t/perf/p1450-fsck.sh @@ -0,0 +1,13 @@ +#!/bin/sh + +test_description='Test fsck performance' + +. ./perf-lib.sh + +test_perf_large_repo + +test_perf 'fsck' ' + git fsck +' + +test_done diff --git a/t/perf/p1451-fsck-skip-list.sh b/t/perf/p1451-fsck-skip-list.sh new file mode 100755 index 0000000000..c2b97d2487 --- /dev/null +++ b/t/perf/p1451-fsck-skip-list.sh @@ -0,0 +1,40 @@ +#!/bin/sh + +test_description='Test fsck skipList performance' + +. ./perf-lib.sh + +test_perf_fresh_repo + +n=1000000 + +test_expect_success "setup $n bad commits" ' + for i in $(test_seq 1 $n) + do + echo "commit refs/heads/master" && + echo "committer C <c@example.com> 1234567890 +0000" && + echo "data <<EOF" && + echo "$i.Q." && + echo "EOF" + done | q_to_nul | git fast-import +' + +skip=0 +while test $skip -le $n +do + test_expect_success "create skipList for $skip bad commits" ' + git log --format=%H --max-count=$skip | + sort >skiplist + ' + + test_perf "fsck with $skip skipped bad commits" ' + git -c fsck.skipList=skiplist fsck + ' + + case $skip in + 0) skip=1 ;; + *) skip=${skip}0 ;; + esac +done + +test_done diff --git a/t/perf/p3400-rebase.sh b/t/perf/p3400-rebase.sh index ce271ca4c1..d202aaed06 100755 --- a/t/perf/p3400-rebase.sh +++ b/t/perf/p3400-rebase.sh @@ -6,9 +6,9 @@ test_description='Tests rebase performance' test_perf_default_repo test_expect_success 'setup rebasing on top of a lot of changes' ' - git checkout -f -b base && - git checkout -b to-rebase && - git checkout -b upstream && + git checkout -f -B base && + git checkout -B to-rebase && + git checkout -B upstream && for i in $(seq 100) do # simulate huge diffs @@ -35,8 +35,8 @@ test_perf 'rebase on top of a lot of unrelated changes' ' test_expect_success 'setup rebasing many changes without split-index' ' git config core.splitIndex false && - git checkout -b upstream2 to-rebase && - git checkout -b to-rebase2 upstream + git checkout -B upstream2 to-rebase && + git checkout -B to-rebase2 upstream ' test_perf 'rebase a lot of unrelated changes without split-index' ' diff --git a/t/perf/p5311-pack-bitmaps-fetch.sh b/t/perf/p5311-pack-bitmaps-fetch.sh new file mode 100755 index 0000000000..b04575951f --- /dev/null +++ b/t/perf/p5311-pack-bitmaps-fetch.sh @@ -0,0 +1,45 @@ +#!/bin/sh + +test_description='performance of fetches from bitmapped packs' +. ./perf-lib.sh + +test_perf_default_repo + +test_expect_success 'create bitmapped server repo' ' + git config pack.writebitmaps true && + git config pack.writebitmaphashcache true && + git repack -ad +' + +# simulate a fetch from a repository that last fetched N days ago, for +# various values of N. We do so by following the first-parent chain, +# and assume the first entry in the chain that is N days older than the current +# HEAD is where the HEAD would have been then. +for days in 1 2 4 8 16 32 64 128; do + title=$(printf '%10s' "($days days)") + test_expect_success "setup revs from $days days ago" ' + now=$(git log -1 --format=%ct HEAD) && + then=$(($now - ($days * 86400))) && + tip=$(git rev-list -1 --first-parent --until=$then HEAD) && + { + echo HEAD && + echo ^$tip + } >revs + ' + + test_perf "server $title" ' + git pack-objects --stdout --revs \ + --thin --delta-base-offset \ + <revs >tmp.pack + ' + + test_size "size $title" ' + wc -c <tmp.pack + ' + + test_perf "client $title" ' + git index-pack --stdin --fix-thin <tmp.pack + ' +done + +test_done diff --git a/t/perf/perf-lib.sh b/t/perf/perf-lib.sh index e4c343a6b7..11d1922cf5 100644 --- a/t/perf/perf-lib.sh +++ b/t/perf/perf-lib.sh @@ -179,8 +179,8 @@ exit $ret' >&3 2>&4 return "$eval_ret" } - -test_perf () { +test_wrapper_ () { + test_wrapper_func_=$1; shift test_start_ test "$#" = 3 && { test_prereq=$1; shift; } || test_prereq= test "$#" = 2 || @@ -191,35 +191,57 @@ test_perf () { base=$(basename "$0" .sh) echo "$test_count" >>"$perf_results_dir"/$base.subtests echo "$1" >"$perf_results_dir"/$base.$test_count.descr - if test -z "$verbose"; then - printf "%s" "perf $test_count - $1:" - else - echo "perf $test_count - $1:" - fi - for i in $(test_seq 1 $GIT_PERF_REPEAT_COUNT); do - say >&3 "running: $2" - if test_run_perf_ "$2" - then - if test -z "$verbose"; then - printf " %s" "$i" - else - echo "* timing run $i/$GIT_PERF_REPEAT_COUNT:" - fi + base="$perf_results_dir"/"$perf_results_prefix$(basename "$0" .sh)"."$test_count" + "$test_wrapper_func_" "$@" + fi + + test_finish_ +} + +test_perf_ () { + if test -z "$verbose"; then + printf "%s" "perf $test_count - $1:" + else + echo "perf $test_count - $1:" + fi + for i in $(test_seq 1 $GIT_PERF_REPEAT_COUNT); do + say >&3 "running: $2" + if test_run_perf_ "$2" + then + if test -z "$verbose"; then + printf " %s" "$i" else - test -z "$verbose" && echo - test_failure_ "$@" - break + echo "* timing run $i/$GIT_PERF_REPEAT_COUNT:" fi - done - if test -z "$verbose"; then - echo " ok" else - test_ok_ "$1" + test -z "$verbose" && echo + test_failure_ "$@" + break fi - base="$perf_results_dir"/"$perf_results_prefix$(basename "$0" .sh)"."$test_count" - "$TEST_DIRECTORY"/perf/min_time.perl test_time.* >"$base".times + done + if test -z "$verbose"; then + echo " ok" + else + test_ok_ "$1" fi - test_finish_ + "$TEST_DIRECTORY"/perf/min_time.perl test_time.* >"$base".times +} + +test_perf () { + test_wrapper_ test_perf_ "$@" +} + +test_size_ () { + say >&3 "running: $2" + if test_eval_ "$2" 3>"$base".size; then + test_ok_ "$1" + else + test_failure_ "$@" + fi +} + +test_size () { + test_wrapper_ test_size_ "$@" } # We extend test_done to print timings at the end (./run disables this diff --git a/t/t0000-basic.sh b/t/t0000-basic.sh index 850f651e4e..b6566003dd 100755 --- a/t/t0000-basic.sh +++ b/t/t0000-basic.sh @@ -87,6 +87,10 @@ _run_sub_test_lib_test_common () { passing metrics ' + # Tell the framework that we are self-testing to make sure + # it yields a stable result. + GIT_TEST_FRAMEWORK_SELFTEST=t && + # Point to the t/test-lib.sh, which isn't in ../ as usual . "\$TEST_DIRECTORY"/test-lib.sh EOF @@ -270,7 +274,7 @@ test_expect_success 'pretend we have a mix of all possible results' " EOF " -test_expect_success 'test --verbose' ' +test_expect_success C_LOCALE_OUTPUT 'test --verbose' ' test_must_fail run_sub_test_lib_test \ test-verbose "test verbose" --verbose <<-\EOF && test_expect_success "passing test" true @@ -821,9 +825,87 @@ test_expect_success 'tests clean up even on failures' " EOF " +test_expect_success 'test_oid setup' ' + test_oid_init +' + +test_expect_success 'test_oid provides sane info by default' ' + test_oid zero >actual && + grep "^00*\$" actual && + rawsz="$(test_oid rawsz)" && + hexsz="$(test_oid hexsz)" && + test "$hexsz" -eq $(wc -c <actual) && + test $(( $rawsz * 2)) -eq "$hexsz" +' + +test_expect_success 'test_oid can look up data for SHA-1' ' + test_when_finished "test_detect_hash" && + test_set_hash sha1 && + test_oid zero >actual && + grep "^00*\$" actual && + rawsz="$(test_oid rawsz)" && + hexsz="$(test_oid hexsz)" && + test $(wc -c <actual) -eq 40 && + test "$rawsz" -eq 20 && + test "$hexsz" -eq 40 +' + +test_expect_success 'test_oid can look up data for SHA-256' ' + test_when_finished "test_detect_hash" && + test_set_hash sha256 && + test_oid zero >actual && + grep "^00*\$" actual && + rawsz="$(test_oid rawsz)" && + hexsz="$(test_oid hexsz)" && + test $(wc -c <actual) -eq 64 && + test "$rawsz" -eq 32 && + test "$hexsz" -eq 64 +' + ################################################################ # Basics of the basics +test_oid_cache <<\EOF +path0f sha1:f87290f8eb2cbbea7857214459a0739927eab154 +path0f sha256:638106af7c38be056f3212cbd7ac65bc1bac74f420ca5a436ff006a9d025d17d + +path0s sha1:15a98433ae33114b085f3eb3bb03b832b3180a01 +path0s sha256:3a24cc53cf68edddac490bbf94a418a52932130541361f685df685e41dd6c363 + +path2f sha1:3feff949ed00a62d9f7af97c15cd8a30595e7ac7 +path2f sha256:2a7f36571c6fdbaf0e3f62751a0b25a3f4c54d2d1137b3f4af9cb794bb498e5f + +path2s sha1:d8ce161addc5173867a3c3c730924388daedbc38 +path2s sha256:18fd611b787c2e938ddcc248fabe4d66a150f9364763e9ec133dd01d5bb7c65a + +path2d sha1:58a09c23e2ca152193f2786e06986b7b6712bdbe +path2d sha256:00e4b32b96e7e3d65d79112dcbea53238a22715f896933a62b811377e2650c17 + +path3f sha1:0aa34cae68d0878578ad119c86ca2b5ed5b28376 +path3f sha256:09f58616b951bd571b8cb9dc76d372fbb09ab99db2393f5ab3189d26c45099ad + +path3s sha1:8599103969b43aff7e430efea79ca4636466794f +path3s sha256:fce1aed087c053306f3f74c32c1a838c662bbc4551a7ac2420f5d6eb061374d0 + +path3d sha1:21ae8269cacbe57ae09138dcc3a2887f904d02b3 +path3d sha256:9b60497be959cb830bf3f0dc82bcc9ad9e925a24e480837ade46b2295e47efe1 + +subp3f sha1:00fb5908cb97c2564a9783c0c64087333b3b464f +subp3f sha256:a1a9e16998c988453f18313d10375ee1d0ddefe757e710dcae0d66aa1e0c58b3 + +subp3s sha1:6649a1ebe9e9f1c553b66f5a6e74136a07ccc57c +subp3s sha256:81759d9f5e93c6546ecfcadb560c1ff057314b09f93fe8ec06e2d8610d34ef10 + +subp3d sha1:3c5e5399f3a333eddecce7a9b9465b63f65f51e2 +subp3d sha256:76b4ef482d4fa1c754390344cf3851c7f883b27cf9bc999c6547928c46aeafb7 + +root sha1:087704a96baf1c2d1c869a8b084481e121c88b5b +root sha256:9481b52abab1b2ffeedbf9de63ce422b929f179c1b98ff7bee5f8f1bc0710751 + +simpletree sha1:7bb943559a305bdd6bdee2cef6e5df2413c3d30a +simpletree sha256:1710c07a6c86f9a3c7376364df04c47ee39e5a5e221fcdd84b743bc9bb7e2bc5 +EOF + # updating a new file without --add should fail. test_expect_success 'git update-index without --add should fail adding' ' test_must_fail git update-index should-be-empty @@ -839,8 +921,8 @@ test_expect_success 'writing tree out with git write-tree' ' ' # we know the shape and contents of the tree and know the object ID for it. -test_expect_success SHA1 'validate object ID of a known tree' ' - test "$tree" = 7bb943559a305bdd6bdee2cef6e5df2413c3d30a +test_expect_success 'validate object ID of a known tree' ' + test "$tree" = "$(test_oid simpletree)" ' # Removing paths. @@ -882,16 +964,16 @@ test_expect_success 'showing stage with git ls-files --stage' ' git ls-files --stage >current ' -test_expect_success SHA1 'validate git ls-files output for a known tree' ' - cat >expected <<-\EOF && - 100644 f87290f8eb2cbbea7857214459a0739927eab154 0 path0 - 120000 15a98433ae33114b085f3eb3bb03b832b3180a01 0 path0sym - 100644 3feff949ed00a62d9f7af97c15cd8a30595e7ac7 0 path2/file2 - 120000 d8ce161addc5173867a3c3c730924388daedbc38 0 path2/file2sym - 100644 0aa34cae68d0878578ad119c86ca2b5ed5b28376 0 path3/file3 - 120000 8599103969b43aff7e430efea79ca4636466794f 0 path3/file3sym - 100644 00fb5908cb97c2564a9783c0c64087333b3b464f 0 path3/subp3/file3 - 120000 6649a1ebe9e9f1c553b66f5a6e74136a07ccc57c 0 path3/subp3/file3sym +test_expect_success 'validate git ls-files output for a known tree' ' + cat >expected <<-EOF && + 100644 $(test_oid path0f) 0 path0 + 120000 $(test_oid path0s) 0 path0sym + 100644 $(test_oid path2f) 0 path2/file2 + 120000 $(test_oid path2s) 0 path2/file2sym + 100644 $(test_oid path3f) 0 path3/file3 + 120000 $(test_oid path3s) 0 path3/file3sym + 100644 $(test_oid subp3f) 0 path3/subp3/file3 + 120000 $(test_oid subp3s) 0 path3/subp3/file3sym EOF test_cmp expected current ' @@ -900,20 +982,20 @@ test_expect_success 'writing tree out with git write-tree' ' tree=$(git write-tree) ' -test_expect_success SHA1 'validate object ID for a known tree' ' - test "$tree" = 087704a96baf1c2d1c869a8b084481e121c88b5b +test_expect_success 'validate object ID for a known tree' ' + test "$tree" = "$(test_oid root)" ' test_expect_success 'showing tree with git ls-tree' ' git ls-tree $tree >current ' -test_expect_success SHA1 'git ls-tree output for a known tree' ' - cat >expected <<-\EOF && - 100644 blob f87290f8eb2cbbea7857214459a0739927eab154 path0 - 120000 blob 15a98433ae33114b085f3eb3bb03b832b3180a01 path0sym - 040000 tree 58a09c23e2ca152193f2786e06986b7b6712bdbe path2 - 040000 tree 21ae8269cacbe57ae09138dcc3a2887f904d02b3 path3 +test_expect_success 'git ls-tree output for a known tree' ' + cat >expected <<-EOF && + 100644 blob $(test_oid path0f) path0 + 120000 blob $(test_oid path0s) path0sym + 040000 tree $(test_oid path2d) path2 + 040000 tree $(test_oid path3d) path3 EOF test_cmp expected current ' @@ -924,16 +1006,16 @@ test_expect_success 'showing tree with git ls-tree -r' ' git ls-tree -r $tree >current ' -test_expect_success SHA1 'git ls-tree -r output for a known tree' ' - cat >expected <<-\EOF && - 100644 blob f87290f8eb2cbbea7857214459a0739927eab154 path0 - 120000 blob 15a98433ae33114b085f3eb3bb03b832b3180a01 path0sym - 100644 blob 3feff949ed00a62d9f7af97c15cd8a30595e7ac7 path2/file2 - 120000 blob d8ce161addc5173867a3c3c730924388daedbc38 path2/file2sym - 100644 blob 0aa34cae68d0878578ad119c86ca2b5ed5b28376 path3/file3 - 120000 blob 8599103969b43aff7e430efea79ca4636466794f path3/file3sym - 100644 blob 00fb5908cb97c2564a9783c0c64087333b3b464f path3/subp3/file3 - 120000 blob 6649a1ebe9e9f1c553b66f5a6e74136a07ccc57c path3/subp3/file3sym +test_expect_success 'git ls-tree -r output for a known tree' ' + cat >expected <<-EOF && + 100644 blob $(test_oid path0f) path0 + 120000 blob $(test_oid path0s) path0sym + 100644 blob $(test_oid path2f) path2/file2 + 120000 blob $(test_oid path2s) path2/file2sym + 100644 blob $(test_oid path3f) path3/file3 + 120000 blob $(test_oid path3s) path3/file3sym + 100644 blob $(test_oid subp3f) path3/subp3/file3 + 120000 blob $(test_oid subp3s) path3/subp3/file3sym EOF test_cmp expected current ' @@ -943,19 +1025,19 @@ test_expect_success 'showing tree with git ls-tree -r -t' ' git ls-tree -r -t $tree >current ' -test_expect_success SHA1 'git ls-tree -r output for a known tree' ' - cat >expected <<-\EOF && - 100644 blob f87290f8eb2cbbea7857214459a0739927eab154 path0 - 120000 blob 15a98433ae33114b085f3eb3bb03b832b3180a01 path0sym - 040000 tree 58a09c23e2ca152193f2786e06986b7b6712bdbe path2 - 100644 blob 3feff949ed00a62d9f7af97c15cd8a30595e7ac7 path2/file2 - 120000 blob d8ce161addc5173867a3c3c730924388daedbc38 path2/file2sym - 040000 tree 21ae8269cacbe57ae09138dcc3a2887f904d02b3 path3 - 100644 blob 0aa34cae68d0878578ad119c86ca2b5ed5b28376 path3/file3 - 120000 blob 8599103969b43aff7e430efea79ca4636466794f path3/file3sym - 040000 tree 3c5e5399f3a333eddecce7a9b9465b63f65f51e2 path3/subp3 - 100644 blob 00fb5908cb97c2564a9783c0c64087333b3b464f path3/subp3/file3 - 120000 blob 6649a1ebe9e9f1c553b66f5a6e74136a07ccc57c path3/subp3/file3sym +test_expect_success 'git ls-tree -r output for a known tree' ' + cat >expected <<-EOF && + 100644 blob $(test_oid path0f) path0 + 120000 blob $(test_oid path0s) path0sym + 040000 tree $(test_oid path2d) path2 + 100644 blob $(test_oid path2f) path2/file2 + 120000 blob $(test_oid path2s) path2/file2sym + 040000 tree $(test_oid path3d) path3 + 100644 blob $(test_oid path3f) path3/file3 + 120000 blob $(test_oid path3s) path3/file3sym + 040000 tree $(test_oid subp3d) path3/subp3 + 100644 blob $(test_oid subp3f) path3/subp3/file3 + 120000 blob $(test_oid subp3s) path3/subp3/file3sym EOF test_cmp expected current ' @@ -964,26 +1046,27 @@ test_expect_success 'writing partial tree out with git write-tree --prefix' ' ptree=$(git write-tree --prefix=path3) ' -test_expect_success SHA1 'validate object ID for a known tree' ' - test "$ptree" = 21ae8269cacbe57ae09138dcc3a2887f904d02b3 +test_expect_success 'validate object ID for a known tree' ' + test "$ptree" = $(test_oid path3d) ' test_expect_success 'writing partial tree out with git write-tree --prefix' ' ptree=$(git write-tree --prefix=path3/subp3) ' -test_expect_success SHA1 'validate object ID for a known tree' ' - test "$ptree" = 3c5e5399f3a333eddecce7a9b9465b63f65f51e2 +test_expect_success 'validate object ID for a known tree' ' + test "$ptree" = $(test_oid subp3d) ' test_expect_success 'put invalid objects into the index' ' rm -f .git/index && - cat >badobjects <<-\EOF && - 100644 blob 1000000000000000000000000000000000000000 dir/file1 - 100644 blob 2000000000000000000000000000000000000000 dir/file2 - 100644 blob 3000000000000000000000000000000000000000 dir/file3 - 100644 blob 4000000000000000000000000000000000000000 dir/file4 - 100644 blob 5000000000000000000000000000000000000000 dir/file5 + suffix=$(echo $ZERO_OID | sed -e "s/^.//") && + cat >badobjects <<-EOF && + 100644 blob $(test_oid 001) dir/file1 + 100644 blob $(test_oid 002) dir/file2 + 100644 blob $(test_oid 003) dir/file3 + 100644 blob $(test_oid 004) dir/file4 + 100644 blob $(test_oid 005) dir/file5 EOF git update-index --index-info <badobjects ' @@ -1006,19 +1089,19 @@ test_expect_success 'git read-tree followed by write-tree should be idempotent' test "$newtree" = "$tree" ' -test_expect_success SHA1 'validate git diff-files output for a know cache/work tree state' ' - cat >expected <<\EOF && -:100644 100644 f87290f8eb2cbbea7857214459a0739927eab154 0000000000000000000000000000000000000000 M path0 -:120000 120000 15a98433ae33114b085f3eb3bb03b832b3180a01 0000000000000000000000000000000000000000 M path0sym -:100644 100644 3feff949ed00a62d9f7af97c15cd8a30595e7ac7 0000000000000000000000000000000000000000 M path2/file2 -:120000 120000 d8ce161addc5173867a3c3c730924388daedbc38 0000000000000000000000000000000000000000 M path2/file2sym -:100644 100644 0aa34cae68d0878578ad119c86ca2b5ed5b28376 0000000000000000000000000000000000000000 M path3/file3 -:120000 120000 8599103969b43aff7e430efea79ca4636466794f 0000000000000000000000000000000000000000 M path3/file3sym -:100644 100644 00fb5908cb97c2564a9783c0c64087333b3b464f 0000000000000000000000000000000000000000 M path3/subp3/file3 -:120000 120000 6649a1ebe9e9f1c553b66f5a6e74136a07ccc57c 0000000000000000000000000000000000000000 M path3/subp3/file3sym +test_expect_success 'validate git diff-files output for a know cache/work tree state' ' + cat >expected <<EOF && +:100644 100644 $(test_oid path0f) $ZERO_OID M path0 +:120000 120000 $(test_oid path0s) $ZERO_OID M path0sym +:100644 100644 $(test_oid path2f) $ZERO_OID M path2/file2 +:120000 120000 $(test_oid path2s) $ZERO_OID M path2/file2sym +:100644 100644 $(test_oid path3f) $ZERO_OID M path3/file3 +:120000 120000 $(test_oid path3s) $ZERO_OID M path3/file3sym +:100644 100644 $(test_oid subp3f) $ZERO_OID M path3/subp3/file3 +:120000 120000 $(test_oid subp3s) $ZERO_OID M path3/subp3/file3sym EOF git diff-files >current && - test_cmp current expected + test_cmp expected current ' test_expect_success 'git update-index --refresh should succeed' ' @@ -1031,23 +1114,23 @@ test_expect_success 'no diff after checkout and git update-index --refresh' ' ' ################################################################ -P=087704a96baf1c2d1c869a8b084481e121c88b5b +P=$(test_oid root) -test_expect_success SHA1 'git commit-tree records the correct tree in a commit' ' +test_expect_success 'git commit-tree records the correct tree in a commit' ' commit0=$(echo NO | git commit-tree $P) && tree=$(git show --pretty=raw $commit0 | sed -n -e "s/^tree //p" -e "/^author /q") && test "z$tree" = "z$P" ' -test_expect_success SHA1 'git commit-tree records the correct parent in a commit' ' +test_expect_success 'git commit-tree records the correct parent in a commit' ' commit1=$(echo NO | git commit-tree $P -p $commit0) && parent=$(git show --pretty=raw $commit1 | sed -n -e "s/^parent //p" -e "/^author /q") && test "z$commit0" = "z$parent" ' -test_expect_success SHA1 'git commit-tree omits duplicated parent in a commit' ' +test_expect_success 'git commit-tree omits duplicated parent in a commit' ' commit2=$(echo NO | git commit-tree $P -p $commit0 -p $commit0) && parent=$(git show --pretty=raw $commit2 | sed -n -e "s/^parent //p" -e "/^author /q" | diff --git a/t/t0002-gitfile.sh b/t/t0002-gitfile.sh index 3691023d51..0aa9908ea1 100755 --- a/t/t0002-gitfile.sh +++ b/t/t0002-gitfile.sh @@ -92,11 +92,12 @@ test_expect_success 'enter_repo non-strict mode' ' mv .git .realgit && echo "gitdir: .realgit" >.git ) && + head=$(git -C enter_repo rev-parse HEAD) && git ls-remote enter_repo >actual && - cat >expected <<-\EOF && - 946e985ab20de757ca5b872b16d64e92ff3803a9 HEAD - 946e985ab20de757ca5b872b16d64e92ff3803a9 refs/heads/master - 946e985ab20de757ca5b872b16d64e92ff3803a9 refs/tags/foo + cat >expected <<-EOF && + $head HEAD + $head refs/heads/master + $head refs/tags/foo EOF test_cmp expected actual ' @@ -106,21 +107,23 @@ test_expect_success 'enter_repo linked checkout' ' cd enter_repo && git worktree add ../foo refs/tags/foo ) && + head=$(git -C enter_repo rev-parse HEAD) && git ls-remote foo >actual && - cat >expected <<-\EOF && - 946e985ab20de757ca5b872b16d64e92ff3803a9 HEAD - 946e985ab20de757ca5b872b16d64e92ff3803a9 refs/heads/master - 946e985ab20de757ca5b872b16d64e92ff3803a9 refs/tags/foo + cat >expected <<-EOF && + $head HEAD + $head refs/heads/master + $head refs/tags/foo EOF test_cmp expected actual ' test_expect_success 'enter_repo strict mode' ' + head=$(git -C enter_repo rev-parse HEAD) && git ls-remote --upload-pack="git upload-pack --strict" foo/.git >actual && - cat >expected <<-\EOF && - 946e985ab20de757ca5b872b16d64e92ff3803a9 HEAD - 946e985ab20de757ca5b872b16d64e92ff3803a9 refs/heads/master - 946e985ab20de757ca5b872b16d64e92ff3803a9 refs/tags/foo + cat >expected <<-EOF && + $head HEAD + $head refs/heads/master + $head refs/tags/foo EOF test_cmp expected actual ' diff --git a/t/t0006-date.sh b/t/t0006-date.sh index 64ff86df8e..ffb2975e48 100755 --- a/t/t0006-date.sh +++ b/t/t0006-date.sh @@ -113,6 +113,8 @@ check_approxidate '3:00' '2009-08-30 03:00:00' check_approxidate '15:00' '2009-08-30 15:00:00' check_approxidate 'noon today' '2009-08-30 12:00:00' check_approxidate 'noon yesterday' '2009-08-29 12:00:00' +check_approxidate 'January 5th noon pm' '2009-01-05 12:00:00' +check_approxidate '10am noon' '2009-08-29 12:00:00' check_approxidate 'last tuesday' '2009-08-25 19:20:00' check_approxidate 'July 5th' '2009-07-05 19:20:00' diff --git a/t/t0009-prio-queue.sh b/t/t0009-prio-queue.sh index e56dfce668..3941ad2528 100755 --- a/t/t0009-prio-queue.sh +++ b/t/t0009-prio-queue.sh @@ -47,4 +47,18 @@ test_expect_success 'notice empty queue' ' test_cmp expect actual ' +cat >expect <<'EOF' +3 +2 +6 +4 +5 +1 +8 +EOF +test_expect_success 'stack order' ' + test-tool prio-queue stack 8 1 5 4 6 2 3 dump >actual && + test_cmp expect actual +' + test_done diff --git a/t/t0012-help.sh b/t/t0012-help.sh index bc27df7f38..e8ef7300ec 100755 --- a/t/t0012-help.sh +++ b/t/t0012-help.sh @@ -29,9 +29,9 @@ test_expect_success "setup" ' # to verify test_expect_success 'basic help commands' ' git help >/dev/null && - git help -a >/dev/null && + git help -a --no-verbose >/dev/null && git help -g >/dev/null && - git help -av >/dev/null + git help -a >/dev/null ' test_expect_success "works for commands and guides by default" ' diff --git a/t/t0014-alias.sh b/t/t0014-alias.sh new file mode 100755 index 0000000000..a070e645d7 --- /dev/null +++ b/t/t0014-alias.sh @@ -0,0 +1,40 @@ +#!/bin/sh + +test_description='git command aliasing' + +. ./test-lib.sh + +test_expect_success 'nested aliases - internal execution' ' + git config alias.nested-internal-1 nested-internal-2 && + git config alias.nested-internal-2 status && + git nested-internal-1 >output && + test_i18ngrep "^On branch " output +' + +test_expect_success 'nested aliases - mixed execution' ' + git config alias.nested-external-1 nested-external-2 && + git config alias.nested-external-2 "!git nested-external-3" && + git config alias.nested-external-3 status && + git nested-external-1 >output && + test_i18ngrep "^On branch " output +' + +test_expect_success 'looping aliases - internal execution' ' + git config alias.loop-internal-1 loop-internal-2 && + git config alias.loop-internal-2 loop-internal-3 && + git config alias.loop-internal-3 loop-internal-2 && + test_must_fail git loop-internal-1 2>output && + test_i18ngrep "^fatal: alias loop detected: expansion of" output +' + +# This test is disabled until external loops are fixed, because would block +# the test suite for a full minute. +# +#test_expect_failure 'looping aliases - mixed execution' ' +# git config alias.loop-mixed-1 loop-mixed-2 && +# git config alias.loop-mixed-2 "!git loop-mixed-1" && +# test_must_fail git loop-mixed-1 2>output && +# test_i18ngrep "^fatal: alias loop detected: expansion of" output +#' + +test_done diff --git a/t/t0021-conversion.sh b/t/t0021-conversion.sh index 308cd28f3b..fd5f1ac649 100755 --- a/t/t0021-conversion.sh +++ b/t/t0021-conversion.sh @@ -166,10 +166,10 @@ test_expect_success expanded_in_repo ' rm -f expanded-keywords expanded-keywords-crlf && git checkout -- expanded-keywords && - test_cmp expanded-keywords expected-output && + test_cmp expected-output expanded-keywords && git checkout -- expanded-keywords-crlf && - test_cmp expanded-keywords-crlf expected-output-crlf + test_cmp expected-output-crlf expanded-keywords-crlf ' # The use of %f in a filter definition is expanded to the path to diff --git a/t/t0029-core-unsetenvvars.sh b/t/t0029-core-unsetenvvars.sh new file mode 100755 index 0000000000..24ce46a6ea --- /dev/null +++ b/t/t0029-core-unsetenvvars.sh @@ -0,0 +1,30 @@ +#!/bin/sh + +test_description='test the Windows-only core.unsetenvvars setting' + +. ./test-lib.sh + +if ! test_have_prereq MINGW +then + skip_all='skipping Windows-specific tests' + test_done +fi + +test_expect_success 'setup' ' + mkdir -p "$TRASH_DIRECTORY/.git/hooks" && + write_script "$TRASH_DIRECTORY/.git/hooks/pre-commit" <<-\EOF + echo $HOBBES >&2 + EOF +' + +test_expect_success 'core.unsetenvvars works' ' + HOBBES=Calvin && + export HOBBES && + git commit --allow-empty -m with 2>err && + grep Calvin err && + git -c core.unsetenvvars=FINDUS,HOBBES,CALVIN \ + commit --allow-empty -m without 2>err && + ! grep Calvin err +' + +test_done diff --git a/t/t0040-parse-options.sh b/t/t0040-parse-options.sh index 5b0560fa20..f5b10861c4 100755 --- a/t/t0040-parse-options.sh +++ b/t/t0040-parse-options.sh @@ -8,7 +8,7 @@ test_description='our own option parser' . ./test-lib.sh cat >expect <<\EOF -usage: test-parse-options <options> +usage: test-tool parse-options <options> A helper function for the parse-options API. @@ -23,7 +23,6 @@ usage: test-parse-options <options> -j <n> get a integer, too -m, --magnitude <n> get a magnitude --set23 set integer to 23 - -t <time> get timestamp of <time> -L, --length <str> get length of <str> -F, --file <file> set file to <file> @@ -52,7 +51,7 @@ Standard options EOF test_expect_success 'test help' ' - test_must_fail test-parse-options -h >output 2>output.err && + test_must_fail test-tool parse-options -h >output 2>output.err && test_must_be_empty output.err && test_i18ncmp expect output ' @@ -64,7 +63,7 @@ check () { shift && expect="$1" && shift && - test-parse-options --expect="$what $expect" "$@" + test-tool parse-options --expect="$what $expect" "$@" } check_unknown_i18n() { @@ -75,7 +74,7 @@ check_unknown_i18n() { echo error: unknown switch \`${1#-}\' >expect ;; esac && cat expect.err >>expect && - test_must_fail test-parse-options $* >output 2>output.err && + test_must_fail test-tool parse-options $* >output 2>output.err && test_must_be_empty output && test_i18ncmp expect output.err } @@ -133,7 +132,7 @@ file: prefix/my.file EOF test_expect_success 'short options' ' - test-parse-options -s123 -b -i 1729 -m 16k -b -vv -n -F my.file \ + test-tool parse-options -s123 -b -i 1729 -m 16k -b -vv -n -F my.file \ >output 2>output.err && test_cmp expect output && test_must_be_empty output.err @@ -153,7 +152,7 @@ file: prefix/fi.le EOF test_expect_success 'long options' ' - test-parse-options --boolean --integer 1729 --magnitude 16k \ + test-tool parse-options --boolean --integer 1729 --magnitude 16k \ --boolean --string2=321 --verbose --verbose --no-dry-run \ --abbrev=10 --file fi.le --obsolete \ >output 2>output.err && @@ -162,9 +161,9 @@ test_expect_success 'long options' ' ' test_expect_success 'missing required value' ' - test_expect_code 129 test-parse-options -s && - test_expect_code 129 test-parse-options --string && - test_expect_code 129 test-parse-options --file + test_expect_code 129 test-tool parse-options -s && + test_expect_code 129 test-tool parse-options --string && + test_expect_code 129 test-tool parse-options --file ' cat >expect <<\EOF @@ -184,7 +183,7 @@ arg 02: --boolean EOF test_expect_success 'intermingled arguments' ' - test-parse-options a1 --string 123 b1 --boolean -j 13 -- --boolean \ + test-tool parse-options a1 --string 123 b1 --boolean -j 13 -- --boolean \ >output 2>output.err && test_must_be_empty output.err && test_cmp expect output @@ -204,21 +203,21 @@ file: (not set) EOF test_expect_success 'unambiguously abbreviated option' ' - test-parse-options --int 2 --boolean --no-bo >output 2>output.err && + test-tool parse-options --int 2 --boolean --no-bo >output 2>output.err && test_must_be_empty output.err && test_cmp expect output ' test_expect_success 'unambiguously abbreviated option with "="' ' - test-parse-options --expect="integer: 2" --int=2 + test-tool parse-options --expect="integer: 2" --int=2 ' test_expect_success 'ambiguously abbreviated option' ' - test_expect_code 129 test-parse-options --strin 123 + test_expect_code 129 test-tool parse-options --strin 123 ' test_expect_success 'non ambiguous option (after two options it abbreviates)' ' - test-parse-options --expect="string: 123" --st 123 + test-tool parse-options --expect="string: 123" --st 123 ' cat >typo.err <<\EOF @@ -226,7 +225,7 @@ error: did you mean `--boolean` (with two dashes ?) EOF test_expect_success 'detect possible typos' ' - test_must_fail test-parse-options -boolean >output 2>output.err && + test_must_fail test-tool parse-options -boolean >output 2>output.err && test_must_be_empty output && test_cmp typo.err output.err ' @@ -236,34 +235,13 @@ error: did you mean `--ambiguous` (with two dashes ?) EOF test_expect_success 'detect possible typos' ' - test_must_fail test-parse-options -ambiguous >output 2>output.err && + test_must_fail test-tool parse-options -ambiguous >output 2>output.err && test_must_be_empty output && test_cmp typo.err output.err ' test_expect_success 'keep some options as arguments' ' - test-parse-options --expect="arg 00: --quux" --quux -' - -cat >expect <<\EOF -boolean: 0 -integer: 0 -magnitude: 0 -timestamp: 1 -string: (not set) -abbrev: 7 -verbose: -1 -quiet: 1 -dry run: no -file: (not set) -arg 00: foo -EOF - -test_expect_success 'OPT_DATE() works' ' - test-parse-options -t "1970-01-01 00:00:01 +0000" \ - foo -q >output 2>output.err && - test_must_be_empty output.err && - test_cmp expect output + test-tool parse-options --expect="arg 00: --quux" --quux ' cat >expect <<\EOF @@ -281,13 +259,13 @@ file: (not set) EOF test_expect_success 'OPT_CALLBACK() and OPT_BIT() work' ' - test-parse-options --length=four -b -4 >output 2>output.err && + test-tool parse-options --length=four -b -4 >output 2>output.err && test_must_be_empty output.err && test_cmp expect output ' test_expect_success 'OPT_CALLBACK() and callback errors work' ' - test_must_fail test-parse-options --no-length >output 2>output.err && + test_must_fail test-tool parse-options --no-length >output 2>output.err && test_must_be_empty output && test_must_be_empty output.err ' @@ -306,31 +284,31 @@ file: (not set) EOF test_expect_success 'OPT_BIT() and OPT_SET_INT() work' ' - test-parse-options --set23 -bbbbb --no-or4 >output 2>output.err && + test-tool parse-options --set23 -bbbbb --no-or4 >output 2>output.err && test_must_be_empty output.err && test_cmp expect output ' test_expect_success 'OPT_NEGBIT() and OPT_SET_INT() work' ' - test-parse-options --set23 -bbbbb --neg-or4 >output 2>output.err && + test-tool parse-options --set23 -bbbbb --neg-or4 >output 2>output.err && test_must_be_empty output.err && test_cmp expect output ' test_expect_success 'OPT_BIT() works' ' - test-parse-options --expect="boolean: 6" -bb --or4 + test-tool parse-options --expect="boolean: 6" -bb --or4 ' test_expect_success 'OPT_NEGBIT() works' ' - test-parse-options --expect="boolean: 6" -bb --no-neg-or4 + test-tool parse-options --expect="boolean: 6" -bb --no-neg-or4 ' test_expect_success 'OPT_COUNTUP() with PARSE_OPT_NODASH works' ' - test-parse-options --expect="boolean: 6" + + + + + + + test-tool parse-options --expect="boolean: 6" + + + + + + ' test_expect_success 'OPT_NUMBER_CALLBACK() works' ' - test-parse-options --expect="integer: 12345" -12345 + test-tool parse-options --expect="integer: 12345" -12345 ' cat >expect <<\EOF @@ -347,7 +325,7 @@ file: (not set) EOF test_expect_success 'negation of OPT_NONEG flags is not ambiguous' ' - test-parse-options --no-ambig >output 2>output.err && + test-tool parse-options --no-ambig >output 2>output.err && test_must_be_empty output.err && test_cmp expect output ' @@ -358,38 +336,38 @@ list: bar list: baz EOF test_expect_success '--list keeps list of strings' ' - test-parse-options --list foo --list=bar --list=baz >output && + test-tool parse-options --list foo --list=bar --list=baz >output && test_cmp expect output ' test_expect_success '--no-list resets list' ' - test-parse-options --list=other --list=irrelevant --list=options \ + test-tool parse-options --list=other --list=irrelevant --list=options \ --no-list --list=foo --list=bar --list=baz >output && test_cmp expect output ' test_expect_success 'multiple quiet levels' ' - test-parse-options --expect="quiet: 3" -q -q -q + test-tool parse-options --expect="quiet: 3" -q -q -q ' test_expect_success 'multiple verbose levels' ' - test-parse-options --expect="verbose: 3" -v -v -v + test-tool parse-options --expect="verbose: 3" -v -v -v ' test_expect_success '--no-quiet sets --quiet to 0' ' - test-parse-options --expect="quiet: 0" --no-quiet + test-tool parse-options --expect="quiet: 0" --no-quiet ' test_expect_success '--no-quiet resets multiple -q to 0' ' - test-parse-options --expect="quiet: 0" -q -q -q --no-quiet + test-tool parse-options --expect="quiet: 0" -q -q -q --no-quiet ' test_expect_success '--no-verbose sets verbose to 0' ' - test-parse-options --expect="verbose: 0" --no-verbose + test-tool parse-options --expect="verbose: 0" --no-verbose ' test_expect_success '--no-verbose resets multiple verbose to 0' ' - test-parse-options --expect="verbose: 0" -v -v -v --no-verbose + test-tool parse-options --expect="verbose: 0" -v -v -v --no-verbose ' test_done diff --git a/t/t0060-path-utils.sh b/t/t0060-path-utils.sh index cd74c0a471..c7b53e494b 100755 --- a/t/t0060-path-utils.sh +++ b/t/t0060-path-utils.sh @@ -306,6 +306,8 @@ test_git_path GIT_COMMON_DIR=bar hooks/me bar/hooks/me test_git_path GIT_COMMON_DIR=bar config bar/config test_git_path GIT_COMMON_DIR=bar packed-refs bar/packed-refs test_git_path GIT_COMMON_DIR=bar shallow bar/shallow +test_git_path GIT_COMMON_DIR=bar common bar/common +test_git_path GIT_COMMON_DIR=bar common/file bar/common/file # In the tests below, $(pwd) must be used because it is a native path on # Windows and avoids MSYS's path mangling (which simplifies "foo/../bar" and diff --git a/t/t0064-sha1-array.sh b/t/t0064-sha1-array.sh index 67484502a0..5dda570b9a 100755 --- a/t/t0064-sha1-array.sh +++ b/t/t0064-sha1-array.sh @@ -3,30 +3,30 @@ test_description='basic tests for the SHA1 array implementation' . ./test-lib.sh -echo20 () { +echoid () { prefix="${1:+$1 }" shift while test $# -gt 0 do - echo "$prefix$1$1$1$1$1$1$1$1$1$1$1$1$1$1$1$1$1$1$1$1" + echo "$prefix$ZERO_OID" | sed -e "s/00/$1/g" shift done } test_expect_success 'ordered enumeration' ' - echo20 "" 44 55 88 aa >expect && + echoid "" 44 55 88 aa >expect && { - echo20 append 88 44 aa 55 && + echoid append 88 44 aa 55 && echo for_each_unique } | test-tool sha1-array >actual && test_cmp expect actual ' test_expect_success 'ordered enumeration with duplicate suppression' ' - echo20 "" 44 55 88 aa >expect && + echoid "" 44 55 88 aa >expect && { - echo20 append 88 44 aa 55 && - echo20 append 88 44 aa 55 && + echoid append 88 44 aa 55 && + echoid append 88 44 aa 55 && echo for_each_unique } | test-tool sha1-array >actual && test_cmp expect actual @@ -34,8 +34,8 @@ test_expect_success 'ordered enumeration with duplicate suppression' ' test_expect_success 'lookup' ' { - echo20 append 88 44 aa 55 && - echo20 lookup 55 + echoid append 88 44 aa 55 && + echoid lookup 55 } | test-tool sha1-array >actual && n=$(cat actual) && test "$n" -eq 1 @@ -43,8 +43,8 @@ test_expect_success 'lookup' ' test_expect_success 'lookup non-existing entry' ' { - echo20 append 88 44 aa 55 && - echo20 lookup 33 + echoid append 88 44 aa 55 && + echoid lookup 33 } | test-tool sha1-array >actual && n=$(cat actual) && test "$n" -lt 0 @@ -52,9 +52,9 @@ test_expect_success 'lookup non-existing entry' ' test_expect_success 'lookup with duplicates' ' { - echo20 append 88 44 aa 55 && - echo20 append 88 44 aa 55 && - echo20 lookup 55 + echoid append 88 44 aa 55 && + echoid append 88 44 aa 55 && + echoid lookup 55 } | test-tool sha1-array >actual && n=$(cat actual) && test "$n" -ge 2 && @@ -63,19 +63,24 @@ test_expect_success 'lookup with duplicates' ' test_expect_success 'lookup non-existing entry with duplicates' ' { - echo20 append 88 44 aa 55 && - echo20 append 88 44 aa 55 && - echo20 lookup 66 + echoid append 88 44 aa 55 && + echoid append 88 44 aa 55 && + echoid lookup 66 } | test-tool sha1-array >actual && n=$(cat actual) && test "$n" -lt 0 ' test_expect_success 'lookup with almost duplicate values' ' + # n-1 5s + root=$(echoid "" 55) && + root=${root%5} && { - echo "append 5555555555555555555555555555555555555555" && - echo "append 555555555555555555555555555555555555555f" && - echo20 lookup 55 + id1="${root}5" && + id2="${root}f" && + echo "append $id1" && + echo "append $id2" && + echoid lookup 55 } | test-tool sha1-array >actual && n=$(cat actual) && test "$n" -eq 0 @@ -83,8 +88,8 @@ test_expect_success 'lookup with almost duplicate values' ' test_expect_success 'lookup with single duplicate value' ' { - echo20 append 55 55 && - echo20 lookup 55 + echoid append 55 55 && + echoid lookup 55 } | test-tool sha1-array >actual && n=$(cat actual) && test "$n" -ge 0 && diff --git a/t/t0205-gettext-poison.sh b/t/t0205-gettext-poison.sh index 438e778d6a..a06269f38a 100755 --- a/t/t0205-gettext-poison.sh +++ b/t/t0205-gettext-poison.sh @@ -5,13 +5,15 @@ test_description='Gettext Shell poison' +GIT_TEST_GETTEXT_POISON=YesPlease +export GIT_TEST_GETTEXT_POISON . ./lib-gettext.sh -test_expect_success GETTEXT_POISON 'sanity: $GIT_INTERNAL_GETTEXT_SH_SCHEME" is poison' ' +test_expect_success 'sanity: $GIT_INTERNAL_GETTEXT_SH_SCHEME" is poison' ' test "$GIT_INTERNAL_GETTEXT_SH_SCHEME" = "poison" ' -test_expect_success GETTEXT_POISON 'gettext: our gettext() fallback has poison semantics' ' +test_expect_success 'gettext: our gettext() fallback has poison semantics' ' printf "# GETTEXT POISON #" >expect && gettext "test" >actual && test_cmp expect actual && @@ -20,7 +22,7 @@ test_expect_success GETTEXT_POISON 'gettext: our gettext() fallback has poison s test_cmp expect actual ' -test_expect_success GETTEXT_POISON 'eval_gettext: our eval_gettext() fallback has poison semantics' ' +test_expect_success 'eval_gettext: our eval_gettext() fallback has poison semantics' ' printf "# GETTEXT POISON #" >expect && eval_gettext "test" >actual && test_cmp expect actual && diff --git a/t/t0410-partial-clone.sh b/t/t0410-partial-clone.sh index 7762f89b3d..ba3887f178 100755 --- a/t/t0410-partial-clone.sh +++ b/t/t0410-partial-clone.sh @@ -234,11 +234,56 @@ test_expect_success 'rev-list stops traversal at missing and promised commit' ' git -C repo config core.repositoryformatversion 1 && git -C repo config extensions.partialclone "arbitrary string" && - git -C repo rev-list --exclude-promisor-objects --objects bar >out && + GIT_TEST_COMMIT_GRAPH=0 git -C repo rev-list --exclude-promisor-objects --objects bar >out && grep $(git -C repo rev-parse bar) out && ! grep $FOO out ' +test_expect_success 'missing tree objects with --missing=allow-promisor and --exclude-promisor-objects' ' + rm -rf repo && + test_create_repo repo && + test_commit -C repo foo && + test_commit -C repo bar && + test_commit -C repo baz && + + promise_and_delete $(git -C repo rev-parse bar^{tree}) && + promise_and_delete $(git -C repo rev-parse foo^{tree}) && + + git -C repo config core.repositoryformatversion 1 && + git -C repo config extensions.partialclone "arbitrary string" && + + git -C repo rev-list --missing=allow-promisor --objects HEAD >objs 2>rev_list_err && + test_must_be_empty rev_list_err && + # 3 commits, 3 blobs, and 1 tree + test_line_count = 7 objs && + + # Do the same for --exclude-promisor-objects, but with all trees gone. + promise_and_delete $(git -C repo rev-parse baz^{tree}) && + git -C repo rev-list --exclude-promisor-objects --objects HEAD >objs 2>rev_list_err && + test_must_be_empty rev_list_err && + # 3 commits, no blobs or trees + test_line_count = 3 objs +' + +test_expect_success 'missing non-root tree object and rev-list' ' + rm -rf repo && + test_create_repo repo && + mkdir repo/dir && + echo foo >repo/dir/foo && + git -C repo add dir/foo && + git -C repo commit -m "commit dir/foo" && + + promise_and_delete $(git -C repo rev-parse HEAD:dir) && + + git -C repo config core.repositoryformatversion 1 && + git -C repo config extensions.partialclone "arbitrary string" && + + git -C repo rev-list --missing=allow-any --objects HEAD >objs 2>rev_list_err && + test_must_be_empty rev_list_err && + # 1 commit and 1 tree + test_line_count = 2 objs +' + test_expect_success 'rev-list stops traversal at missing and promised tree' ' rm -rf repo && test_create_repo repo && diff --git a/t/t1006-cat-file.sh b/t/t1006-cat-file.sh index 7f19d591f2..43c4be1e5e 100755 --- a/t/t1006-cat-file.sh +++ b/t/t1006-cat-file.sh @@ -140,15 +140,17 @@ test_expect_success '--batch-check without %(rest) considers whole line' ' test_cmp expect actual ' +test_oid_init + tree_sha1=$(git write-tree) -tree_size=33 +tree_size=$(($(test_oid rawsz) + 13)) tree_pretty_content="100644 blob $hello_sha1 hello" run_tests 'tree' $tree_sha1 $tree_size "" "$tree_pretty_content" commit_message="Initial commit" commit_sha1=$(echo_without_newline "$commit_message" | git commit-tree $tree_sha1) -commit_size=177 +commit_size=$(($(test_oid hexsz) + 137)) commit_content="tree $tree_sha1 author $GIT_AUTHOR_NAME <$GIT_AUTHOR_EMAIL> 0000000000 +0000 committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> 0000000000 +0000 @@ -218,8 +220,8 @@ test_expect_success "--batch-check for a non-existent hash" ' test "0000000000000000000000000000000000000042 missing 0000000000000000000000000000000000000084 missing" = \ "$( ( echo 0000000000000000000000000000000000000042; - echo_without_newline 0000000000000000000000000000000000000084; ) \ - | git cat-file --batch-check)" + echo_without_newline 0000000000000000000000000000000000000084; ) | + git cat-file --batch-check)" ' test_expect_success "--batch for an existent and a non-existent hash" ' @@ -227,8 +229,8 @@ test_expect_success "--batch for an existent and a non-existent hash" ' $tag_content 0000000000000000000000000000000000000000 missing" = \ "$( ( echo $tag_sha1; - echo_without_newline 0000000000000000000000000000000000000000; ) \ - | git cat-file --batch)" + echo_without_newline 0000000000000000000000000000000000000000; ) | + git cat-file --batch)" ' test_expect_success "--batch-check for an empty line" ' diff --git a/t/t1060-object-corruption.sh b/t/t1060-object-corruption.sh index ac1f189fd2..4feb65157d 100755 --- a/t/t1060-object-corruption.sh +++ b/t/t1060-object-corruption.sh @@ -117,8 +117,10 @@ test_expect_failure 'clone --local detects misnamed objects' ' ' test_expect_success 'fetch into corrupted repo with index-pack' ' + cp -R bit-error bit-error-cp && + test_when_finished "rm -rf bit-error-cp" && ( - cd bit-error && + cd bit-error-cp && test_must_fail git -c transfer.unpackLimit=1 \ fetch ../no-bit-error 2>stderr && test_i18ngrep ! -i collision stderr diff --git a/t/t1090-sparse-checkout-scope.sh b/t/t1090-sparse-checkout-scope.sh index 1f61eb3e88..090b7fc3d3 100755 --- a/t/t1090-sparse-checkout-scope.sh +++ b/t/t1090-sparse-checkout-scope.sh @@ -31,6 +31,20 @@ test_expect_success 'perform sparse checkout of master' ' test_path_is_file c ' +test_expect_success 'checkout -b checkout.optimizeNewBranch interaction' ' + cp .git/info/sparse-checkout .git/info/sparse-checkout.bak && + test_when_finished " + mv -f .git/info/sparse-checkout.bak .git/info/sparse-checkout + git checkout master + " && + echo "/b" >>.git/info/sparse-checkout && + test "$(git ls-files -t b)" = "S b" && + git -c checkout.optimizeNewBranch=true checkout -b fast && + test "$(git ls-files -t b)" = "S b" && + git checkout -b slow && + test "$(git ls-files -t b)" = "H b" +' + test_expect_success 'merge feature branch into sparse checkout of master' ' git merge feature && test_path_is_file a && @@ -49,4 +63,37 @@ test_expect_success 'return to full checkout of master' ' test "$(cat b)" = "modified" ' +test_expect_success 'in partial clone, sparse checkout only fetches needed blobs' ' + test_create_repo server && + git clone "file://$(pwd)/server" client && + + test_config -C server uploadpack.allowfilter 1 && + test_config -C server uploadpack.allowanysha1inwant 1 && + echo a >server/a && + echo bb >server/b && + mkdir server/c && + echo ccc >server/c/c && + git -C server add a b c/c && + git -C server commit -m message && + + test_config -C client core.sparsecheckout 1 && + test_config -C client extensions.partialclone origin && + echo "!/*" >client/.git/info/sparse-checkout && + echo "/a" >>client/.git/info/sparse-checkout && + git -C client fetch --filter=blob:none origin && + git -C client checkout FETCH_HEAD && + + git -C client rev-list HEAD \ + --quiet --objects --missing=print >unsorted_actual && + ( + printf "?" && + git hash-object server/b && + printf "?" && + git hash-object server/c/c + ) >unsorted_expect && + sort unsorted_actual >actual && + sort unsorted_expect >expect && + test_cmp expect actual +' + test_done diff --git a/t/t1300-config.sh b/t/t1300-config.sh index cdf1fed5d1..9652b241c7 100755 --- a/t/t1300-config.sh +++ b/t/t1300-config.sh @@ -76,15 +76,11 @@ EOF test_expect_success 'non-match result' 'test_cmp expect .git/config' test_expect_success 'find mixed-case key by canonical name' ' - echo Second >expect && - git config cores.whatever >actual && - test_cmp expect actual + test_cmp_config Second cores.whatever ' test_expect_success 'find mixed-case key by non-canonical name' ' - echo Second >expect && - git config CoReS.WhAtEvEr >actual && - test_cmp expect actual + test_cmp_config Second CoReS.WhAtEvEr ' test_expect_success 'subsections are not canonicalized by git-config' ' @@ -94,12 +90,8 @@ test_expect_success 'subsections are not canonicalized by git-config' ' [section "SubSection"] key = two EOF - echo one >expect && - git config section.subsection.key >actual && - test_cmp expect actual && - echo two >expect && - git config section.SubSection.key >actual && - test_cmp expect actual + test_cmp_config one section.subsection.key && + test_cmp_config two section.SubSection.key ' cat > .git/config <<\EOF @@ -212,9 +204,7 @@ test_expect_success 'really really mean test' ' ' test_expect_success 'get value' ' - echo alpha >expect && - git config beta.haha >actual && - test_cmp expect actual + test_cmp_config alpha beta.haha ' cat > expect << EOF @@ -251,15 +241,11 @@ test_expect_success 'non-match' ' ' test_expect_success 'non-match value' ' - echo wow >expect && - git config --get nextsection.nonewline !for >actual && - test_cmp expect actual + test_cmp_config wow --get nextsection.nonewline !for ' test_expect_success 'multi-valued get returns final one' ' - echo "wow2 for me" >expect && - git config --get nextsection.nonewline >actual && - test_cmp expect actual + test_cmp_config "wow2 for me" --get nextsection.nonewline ' test_expect_success 'multi-valued get-all returns all' ' @@ -520,21 +506,11 @@ test_expect_success 'editing stdin is an error' ' test_expect_success 'refer config from subdirectory' ' mkdir x && - ( - cd x && - echo strasse >expect && - git config --get --file ../other-config ein.bahn >actual && - test_cmp expect actual - ) - + test_cmp_config -C x strasse --get --file ../other-config ein.bahn ' test_expect_success 'refer config from subdirectory via --file' ' - ( - cd x && - git config --file=../other-config --get ein.bahn >actual && - test_cmp expect actual - ) + test_cmp_config -C x strasse --file=../other-config --get ein.bahn ' cat > expect << EOF @@ -688,16 +664,13 @@ test_expect_success numbers ' test_expect_success '--int is at least 64 bits' ' git config giga.watts 121g && - echo 129922760704 >expect && - git config --int --get giga.watts >actual && - test_cmp expect actual + echo >expect && + test_cmp_config 129922760704 --int --get giga.watts ' test_expect_success 'invalid unit' ' git config aninvalid.unit "1auto" && - echo 1auto >expect && - git config aninvalid.unit >actual && - test_cmp expect actual && + test_cmp_config 1auto aninvalid.unit && test_must_fail git config --int --get aninvalid.unit 2>actual && test_i18ngrep "bad numeric config value .1auto. for .aninvalid.unit. in file .git/config: invalid unit" actual ' @@ -1001,7 +974,7 @@ EOF test_expect_success 'value continued on next line' ' git config --list > result && - test_cmp result expect + test_cmp expect result ' cat > .git/config <<\EOF @@ -1039,9 +1012,7 @@ test_expect_success '--null --get-regexp' ' test_expect_success 'inner whitespace kept verbatim' ' git config section.val "foo bar" && - echo "foo bar" >expect && - git config section.val >actual && - test_cmp expect actual + test_cmp_config "foo bar" section.val ' test_expect_success SYMLINKS 'symlinked configuration' ' @@ -1770,8 +1741,9 @@ test_expect_success '--show-origin stdin with file include' ' cat >expect <<-EOF && file:$INCLUDE_DIR/stdin.include include EOF - echo "[include]path=\"$INCLUDE_DIR\"/stdin.include" \ - | git config --show-origin --includes --file - user.stdin >output && + echo "[include]path=\"$INCLUDE_DIR\"/stdin.include" | + git config --show-origin --includes --file - user.stdin >output && + test_cmp expect output ' @@ -1808,21 +1780,15 @@ big = 1M EOF test_expect_success 'identical modern --type specifiers are allowed' ' - git config --type=int --type=int core.big >actual && - echo 1048576 >expect && - test_cmp expect actual + test_cmp_config 1048576 --type=int --type=int core.big ' test_expect_success 'identical legacy --type specifiers are allowed' ' - git config --int --int core.big >actual && - echo 1048576 >expect && - test_cmp expect actual + test_cmp_config 1048576 --int --int core.big ' test_expect_success 'identical mixed --type specifiers are allowed' ' - git config --int --type=int core.big >actual && - echo 1048576 >expect && - test_cmp expect actual + test_cmp_config 1048576 --int --type=int core.big ' test_expect_success 'non-identical modern --type specifiers are not allowed' ' @@ -1841,21 +1807,15 @@ test_expect_success 'non-identical mixed --type specifiers are not allowed' ' ' test_expect_success '--type allows valid type specifiers' ' - echo "true" >expect && - git config --type=bool core.foo >actual && - test_cmp expect actual + test_cmp_config true --type=bool core.foo ' test_expect_success '--no-type unsets type specifiers' ' - echo "10" >expect && - git config --type=bool --no-type core.number >actual && - test_cmp expect actual + test_cmp_config 10 --type=bool --no-type core.number ' test_expect_success 'unset type specifiers may be reset to conflicting ones' ' - echo 1048576 >expect && - git config --type=bool --no-type --type=int core.big >actual && - test_cmp expect actual + test_cmp_config 1048576 --type=bool --no-type --type=int core.big ' test_expect_success '--type rejects unknown specifiers' ' @@ -1881,7 +1841,7 @@ test_expect_success '--replace-all does not invent newlines' ' Qkey = b EOF git config --replace-all abc.key b && - test_cmp .git/config expect + test_cmp expect .git/config ' test_done diff --git a/t/t1303-wacky-config.sh b/t/t1303-wacky-config.sh index 3b92083e19..0000e664e7 100755 --- a/t/t1303-wacky-config.sh +++ b/t/t1303-wacky-config.sh @@ -14,7 +14,7 @@ setup() { check() { echo "$2" >expected git config --get "$1" >actual 2>&1 - test_cmp actual expected + test_cmp expected actual } # 'check section.key regex value' verifies that the entry for @@ -22,7 +22,7 @@ check() { check_regex() { echo "$3" >expected git config --get "$1" "$2" >actual 2>&1 - test_cmp actual expected + test_cmp expected actual } test_expect_success 'modify same key' ' diff --git a/t/t1400-update-ref.sh b/t/t1400-update-ref.sh index b72beebe42..1fbd940408 100755 --- a/t/t1400-update-ref.sh +++ b/t/t1400-update-ref.sh @@ -346,7 +346,7 @@ test_expect_success "verifying $m's log (logged by config)" ' git update-ref $m $D cat >.git/logs/$m <<EOF -0000000000000000000000000000000000000000 $C $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> 1117150320 -0500 +$Z $C $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> 1117150320 -0500 $C $A $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> 1117150350 -0500 $A $B $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> 1117150380 -0500 $F $Z $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> 1117150680 -0500 diff --git a/t/t1404-update-ref-errors.sh b/t/t1404-update-ref-errors.sh index 2a42a589a4..51a4f4c0ac 100755 --- a/t/t1404-update-ref-errors.sh +++ b/t/t1404-update-ref-errors.sh @@ -559,9 +559,9 @@ test_expect_success 'no bogus intermediate values during delete' ' { # Note: the following command is intentionally run in the # background. We increase the timeout so that `update-ref` - # attempts to acquire the `packed-refs` lock for longer than - # it takes for us to do the check then delete it: - git -c core.packedrefstimeout=3000 update-ref -d $prefix/foo & + # attempts to acquire the `packed-refs` lock for much longer + # than it takes for us to do the check then delete it: + git -c core.packedrefstimeout=30000 update-ref -d $prefix/foo & } && pid2=$! && # Give update-ref plenty of time to get to the point where it tries diff --git a/t/t1405-main-ref-store.sh b/t/t1405-main-ref-store.sh index a74c38b5fb..331899ddc4 100755 --- a/t/t1405-main-ref-store.sh +++ b/t/t1405-main-ref-store.sh @@ -54,7 +54,7 @@ test_expect_success 'for_each_ref(refs/heads/)' ' ' test_expect_success 'for_each_ref() is sorted' ' - $RUN for-each-ref refs/heads/ | cut -c 42- >actual && + $RUN for-each-ref refs/heads/ | cut -d" " -f 2- >actual && sort actual > expected && test_cmp expected actual ' @@ -71,7 +71,7 @@ test_expect_success 'verify_ref(new-master)' ' ' test_expect_success 'for_each_reflog()' ' - $RUN for-each-reflog | sort -k2 | cut -c 42- >actual && + $RUN for-each-reflog | sort -k2 | cut -d" " -f 2- >actual && cat >expected <<-\EOF && HEAD 0x1 refs/heads/master 0x0 diff --git a/t/t1406-submodule-ref-store.sh b/t/t1406-submodule-ref-store.sh index e093782cc3..d199d872fb 100755 --- a/t/t1406-submodule-ref-store.sh +++ b/t/t1406-submodule-ref-store.sh @@ -39,7 +39,7 @@ test_expect_success 'rename_refs() not allowed' ' ' test_expect_success 'for_each_ref(refs/heads/)' ' - $RUN for-each-ref refs/heads/ | cut -c 42- >actual && + $RUN for-each-ref refs/heads/ | cut -d" " -f 2- >actual && cat >expected <<-\EOF && master 0x0 new-master 0x0 @@ -48,7 +48,7 @@ test_expect_success 'for_each_ref(refs/heads/)' ' ' test_expect_success 'for_each_ref() is sorted' ' - $RUN for-each-ref refs/heads/ | cut -c 42- >actual && + $RUN for-each-ref refs/heads/ | cut -d" " -f 2- >actual && sort actual > expected && test_cmp expected actual ' @@ -65,7 +65,7 @@ test_expect_success 'verify_ref(new-master)' ' ' test_expect_success 'for_each_reflog()' ' - $RUN for-each-reflog | sort | cut -c 42- >actual && + $RUN for-each-reflog | sort | cut -d" " -f 2- >actual && cat >expected <<-\EOF && HEAD 0x1 refs/heads/master 0x0 diff --git a/t/t1407-worktree-ref-store.sh b/t/t1407-worktree-ref-store.sh index 4623ae15c4..9a84858118 100755 --- a/t/t1407-worktree-ref-store.sh +++ b/t/t1407-worktree-ref-store.sh @@ -58,7 +58,7 @@ test_expect_success 'for_each_reflog()' ' mkdir -p .git/worktrees/wt/logs/refs/bisect && echo $ZERO_OID > .git/worktrees/wt/logs/refs/bisect/wt-random && - $RWT for-each-reflog | cut -c 42- | sort >actual && + $RWT for-each-reflog | cut -d" " -f 2- | sort >actual && cat >expected <<-\EOF && HEAD 0x1 PSEUDO-WT 0x0 @@ -68,7 +68,7 @@ test_expect_success 'for_each_reflog()' ' EOF test_cmp expected actual && - $RMAIN for-each-reflog | cut -c 42- | sort >actual && + $RMAIN for-each-reflog | cut -d" " -f 2- | sort >actual && cat >expected <<-\EOF && HEAD 0x1 PSEUDO-MAIN 0x0 diff --git a/t/t1410-reflog.sh b/t/t1410-reflog.sh index 388b0611d8..3e053532eb 100755 --- a/t/t1410-reflog.sh +++ b/t/t1410-reflog.sh @@ -368,4 +368,19 @@ test_expect_success 'continue walking past root commits' ' ) ' +test_expect_success 'expire with multiple worktrees' ' + git init main-wt && + ( + cd main-wt && + test_tick && + test_commit foo && + git worktree add link-wt && + test_tick && + test_commit -C link-wt foobar && + test_tick && + git reflog expire --verbose --all --expire=$test_tick && + test_must_be_empty .git/worktrees/link-wt/logs/HEAD + ) +' + test_done diff --git a/t/t1415-worktree-refs.sh b/t/t1415-worktree-refs.sh new file mode 100755 index 0000000000..b664e51250 --- /dev/null +++ b/t/t1415-worktree-refs.sh @@ -0,0 +1,79 @@ +#!/bin/sh + +test_description='per-worktree refs' + +. ./test-lib.sh + +test_expect_success 'setup' ' + test_commit initial && + test_commit wt1 && + test_commit wt2 && + git worktree add wt1 wt1 && + git worktree add wt2 wt2 && + git checkout initial && + git update-ref refs/worktree/foo HEAD && + git -C wt1 update-ref refs/worktree/foo HEAD && + git -C wt2 update-ref refs/worktree/foo HEAD +' + +test_expect_success 'refs/worktree must not be packed' ' + git pack-refs --all && + test_path_is_missing .git/refs/tags/wt1 && + test_path_is_file .git/refs/worktree/foo && + test_path_is_file .git/worktrees/wt1/refs/worktree/foo && + test_path_is_file .git/worktrees/wt2/refs/worktree/foo +' + +test_expect_success 'refs/worktree are per-worktree' ' + test_cmp_rev worktree/foo initial && + ( cd wt1 && test_cmp_rev worktree/foo wt1 ) && + ( cd wt2 && test_cmp_rev worktree/foo wt2 ) +' + +test_expect_success 'resolve main-worktree/HEAD' ' + test_cmp_rev main-worktree/HEAD initial && + ( cd wt1 && test_cmp_rev main-worktree/HEAD initial ) && + ( cd wt2 && test_cmp_rev main-worktree/HEAD initial ) +' + +test_expect_success 'ambiguous main-worktree/HEAD' ' + mkdir -p .git/refs/heads/main-worktree && + test_when_finished rm -f .git/refs/heads/main-worktree/HEAD && + cp .git/HEAD .git/refs/heads/main-worktree/HEAD && + git rev-parse main-worktree/HEAD 2>warn && + grep "main-worktree/HEAD.*ambiguous" warn +' + +test_expect_success 'resolve worktrees/xx/HEAD' ' + test_cmp_rev worktrees/wt1/HEAD wt1 && + ( cd wt1 && test_cmp_rev worktrees/wt1/HEAD wt1 ) && + ( cd wt2 && test_cmp_rev worktrees/wt1/HEAD wt1 ) +' + +test_expect_success 'ambiguous worktrees/xx/HEAD' ' + mkdir -p .git/refs/heads/worktrees/wt1 && + test_when_finished rm -f .git/refs/heads/worktrees/wt1/HEAD && + cp .git/HEAD .git/refs/heads/worktrees/wt1/HEAD && + git rev-parse worktrees/wt1/HEAD 2>warn && + grep "worktrees/wt1/HEAD.*ambiguous" warn +' + +test_expect_success 'reflog of main-worktree/HEAD' ' + git reflog HEAD | sed "s/HEAD/main-worktree\/HEAD/" >expected && + git reflog main-worktree/HEAD >actual && + test_cmp expected actual && + git -C wt1 reflog main-worktree/HEAD >actual.wt1 && + test_cmp expected actual.wt1 +' + +test_expect_success 'reflog of worktrees/xx/HEAD' ' + git -C wt2 reflog HEAD | sed "s/HEAD/worktrees\/wt2\/HEAD/" >expected && + git reflog worktrees/wt2/HEAD >actual && + test_cmp expected actual && + git -C wt1 reflog worktrees/wt2/HEAD >actual.wt1 && + test_cmp expected actual.wt1 && + git -C wt2 reflog worktrees/wt2/HEAD >actual.wt2 && + test_cmp expected actual.wt2 +' + +test_done diff --git a/t/t1450-fsck.sh b/t/t1450-fsck.sh index b5677d26a4..e20e8fa830 100755 --- a/t/t1450-fsck.sh +++ b/t/t1450-fsck.sh @@ -101,6 +101,41 @@ test_expect_success 'HEAD link pointing at a funny place' ' grep "HEAD points to something strange" out ' +test_expect_success 'HEAD link pointing at a funny object (from different wt)' ' + test_when_finished "mv .git/SAVED_HEAD .git/HEAD" && + test_when_finished "rm -rf .git/worktrees wt" && + git worktree add wt && + mv .git/HEAD .git/SAVED_HEAD && + echo $ZERO_OID >.git/HEAD && + # avoid corrupt/broken HEAD from interfering with repo discovery + test_must_fail git -C wt fsck 2>out && + grep "main-worktree/HEAD: detached HEAD points" out +' + +test_expect_success 'other worktree HEAD link pointing at a funny object' ' + test_when_finished "rm -rf .git/worktrees other" && + git worktree add other && + echo $ZERO_OID >.git/worktrees/other/HEAD && + test_must_fail git fsck 2>out && + grep "worktrees/other/HEAD: detached HEAD points" out +' + +test_expect_success 'other worktree HEAD link pointing at missing object' ' + test_when_finished "rm -rf .git/worktrees other" && + git worktree add other && + echo "Contents missing from repo" | git hash-object --stdin >.git/worktrees/other/HEAD && + test_must_fail git fsck 2>out && + grep "worktrees/other/HEAD: invalid sha1 pointer" out +' + +test_expect_success 'other worktree HEAD link pointing at a funny place' ' + test_when_finished "rm -rf .git/worktrees other" && + git worktree add other && + echo "ref: refs/funny/place" >.git/worktrees/other/HEAD && + test_must_fail git fsck 2>out && + grep "worktrees/other/HEAD points to something strange" out +' + test_expect_success 'email without @ is okay' ' git cat-file commit HEAD >basis && sed "s/@/AT/" basis >okay && diff --git a/t/t1700-split-index.sh b/t/t1700-split-index.sh index f053bf83eb..4667e1a190 100755 --- a/t/t1700-split-index.sh +++ b/t/t1700-split-index.sh @@ -6,10 +6,12 @@ test_description='split index mode tests' # We need total control of index splitting here sane_unset GIT_TEST_SPLIT_INDEX -# A couple of tests expect the index to have a specific checksum, -# but the presence of the optional FSMN extension would interfere -# with those checks, so disable it in this test script. -sane_unset GIT_FSMONITOR_TEST + +# Testing a hard coded SHA against an index with an extension +# that can vary from run to run is problematic so we disable +# those extensions. +sane_unset GIT_TEST_FSMONITOR +sane_unset GIT_TEST_INDEX_THREADS # Create a file named as $1 with content read from stdin. # Set the file's mtime to a few seconds in the past to avoid racy situations. @@ -23,6 +25,8 @@ test_expect_success 'enable split index' ' git update-index --split-index && test-tool dump-split-index .git/index >actual && indexversion=$(test-tool index-version <.git/index) && + + # NEEDSWORK: Stop hard-coding checksums. if test "$indexversion" = "4" then own=432ef4b63f32193984f339431fd50ca796493569 @@ -31,6 +35,7 @@ test_expect_success 'enable split index' ' own=8299b0bcd1ac364e5f1d7768efb62fa2da79a339 base=39d890139ee5356c7ef572216cebcd27aa41f9df fi && + cat >expect <<-EOF && own $own base $base @@ -379,6 +384,26 @@ test_expect_success 'check splitIndex.sharedIndexExpire set to "never" and "now" test $(ls .git/sharedindex.* | wc -l) -le 2 ' +test_expect_success POSIXPERM 'same mode for index & split index' ' + git init same-mode && + ( + cd same-mode && + test_commit A && + test_modebits .git/index >index_mode && + test_must_fail git config core.sharedRepository && + git -c core.splitIndex=true status && + shared=$(ls .git/sharedindex.*) && + case "$shared" in + *" "*) + # we have more than one??? + false ;; + *) + test_modebits "$shared" >split_index_mode && + test_cmp index_mode split_index_mode ;; + esac + ) +' + while read -r mode modebits do test_expect_success POSIXPERM "split index respects core.sharedrepository $mode" ' diff --git a/t/t2000-checkout-cache-clash.sh b/t/t2000-checkout-cache-clash.sh deleted file mode 100755 index de3edb5d57..0000000000 --- a/t/t2000-checkout-cache-clash.sh +++ /dev/null @@ -1,60 +0,0 @@ -#!/bin/sh -# -# Copyright (c) 2005 Junio C Hamano -# - -test_description='git checkout-index test. - -This test registers the following filesystem structure in the -cache: - - path0 - a file - path1/file1 - a file in a directory - -And then tries to checkout in a work tree that has the following: - - path0/file0 - a file in a directory - path1 - a file - -The git checkout-index command should fail when attempting to checkout -path0, finding it is occupied by a directory, and path1/file1, finding -path1 is occupied by a non-directory. With "-f" flag, it should remove -the conflicting paths and succeed. -' -. ./test-lib.sh - -date >path0 -mkdir path1 -date >path1/file1 - -test_expect_success \ - 'git update-index --add various paths.' \ - 'git update-index --add path0 path1/file1' - -rm -fr path0 path1 -mkdir path0 -date >path0/file0 -date >path1 - -test_expect_success \ - 'git checkout-index without -f should fail on conflicting work tree.' \ - 'test_must_fail git checkout-index -a' - -test_expect_success \ - 'git checkout-index with -f should succeed.' \ - 'git checkout-index -f -a' - -test_expect_success \ - 'git checkout-index conflicting paths.' \ - 'test -f path0 && test -d path1 && test -f path1/file1' - -test_expect_success SYMLINKS 'checkout-index -f twice with --prefix' ' - mkdir -p tar/get && - ln -s tar/get there && - echo first && - git checkout-index -a -f --prefix=there/ && - echo second && - git checkout-index -a -f --prefix=there/ -' - -test_done diff --git a/t/t2000-conflict-when-checking-files-out.sh b/t/t2000-conflict-when-checking-files-out.sh new file mode 100755 index 0000000000..f18616ad2b --- /dev/null +++ b/t/t2000-conflict-when-checking-files-out.sh @@ -0,0 +1,135 @@ +#!/bin/sh +# +# Copyright (c) 2005 Junio C Hamano +# + +test_description='git conflicts when checking files out test.' + +# The first test registers the following filesystem structure in the +# cache: +# +# path0 - a file +# path1/file1 - a file in a directory +# +# And then tries to checkout in a work tree that has the following: +# +# path0/file0 - a file in a directory +# path1 - a file +# +# The git checkout-index command should fail when attempting to checkout +# path0, finding it is occupied by a directory, and path1/file1, finding +# path1 is occupied by a non-directory. With "-f" flag, it should remove +# the conflicting paths and succeed. + +. ./test-lib.sh + +show_files() { + # show filesystem files, just [-dl] for type and name + find path? -ls | + sed -e 's/^[0-9]* * [0-9]* * \([-bcdl]\)[^ ]* *[0-9]* *[^ ]* *[^ ]* *[0-9]* [A-Z][a-z][a-z] [0-9][0-9] [^ ]* /fs: \1 /' + # what's in the cache, just mode and name + git ls-files --stage | + sed -e 's/^\([0-9]*\) [0-9a-f]* [0-3] /ca: \1 /' + # what's in the tree, just mode and name. + git ls-tree -r "$1" | + sed -e 's/^\([0-9]*\) [^ ]* [0-9a-f]* /tr: \1 /' +} + +date >path0 +mkdir path1 +date >path1/file1 + +test_expect_success \ + 'git update-index --add various paths.' \ + 'git update-index --add path0 path1/file1' + +rm -fr path0 path1 +mkdir path0 +date >path0/file0 +date >path1 + +test_expect_success \ + 'git checkout-index without -f should fail on conflicting work tree.' \ + 'test_must_fail git checkout-index -a' + +test_expect_success \ + 'git checkout-index with -f should succeed.' \ + 'git checkout-index -f -a' + +test_expect_success \ + 'git checkout-index conflicting paths.' \ + 'test -f path0 && test -d path1 && test -f path1/file1' + +test_expect_success SYMLINKS 'checkout-index -f twice with --prefix' ' + mkdir -p tar/get && + ln -s tar/get there && + echo first && + git checkout-index -a -f --prefix=there/ && + echo second && + git checkout-index -a -f --prefix=there/ +' + +# The second test registers the following filesystem structure in the cache: +# +# path2/file0 - a file in a directory +# path3/file1 - a file in a directory +# +# and attempts to check it out when the work tree has: +# +# path2/file0 - a file in a directory +# path3 - a symlink pointing at "path2" +# +# Checkout cache should fail to extract path3/file1 because the leading +# path path3 is occupied by a non-directory. With "-f" it should remove +# the symlink path3 and create directory path3 and file path3/file1. + +mkdir path2 +date >path2/file0 +test_expect_success \ + 'git update-index --add path2/file0' \ + 'git update-index --add path2/file0' +test_expect_success \ + 'writing tree out with git write-tree' \ + 'tree1=$(git write-tree)' +test_debug 'show_files $tree1' + +mkdir path3 +date >path3/file1 +test_expect_success \ + 'git update-index --add path3/file1' \ + 'git update-index --add path3/file1' +test_expect_success \ + 'writing tree out with git write-tree' \ + 'tree2=$(git write-tree)' +test_debug 'show_files $tree2' + +rm -fr path3 +test_expect_success \ + 'read previously written tree and checkout.' \ + 'git read-tree -m $tree1 && git checkout-index -f -a' +test_debug 'show_files $tree1' + +test_expect_success \ + 'add a symlink' \ + 'test_ln_s_add path2 path3' +test_expect_success \ + 'writing tree out with git write-tree' \ + 'tree3=$(git write-tree)' +test_debug 'show_files $tree3' + +# Morten says "Got that?" here. +# Test begins. + +test_expect_success \ + 'read previously written tree and checkout.' \ + 'git read-tree $tree2 && git checkout-index -f -a' +test_debug 'show_files $tree2' + +test_expect_success \ + 'checking out conflicting path with -f' \ + 'test ! -h path2 && test -d path2 && + test ! -h path3 && test -d path3 && + test ! -h path2/file0 && test -f path2/file0 && + test ! -h path3/file1 && test -f path3/file1' + +test_done diff --git a/t/t2001-checkout-cache-clash.sh b/t/t2001-checkout-cache-clash.sh deleted file mode 100755 index 1fc8e634b7..0000000000 --- a/t/t2001-checkout-cache-clash.sh +++ /dev/null @@ -1,85 +0,0 @@ -#!/bin/sh -# -# Copyright (c) 2005 Junio C Hamano -# - -test_description='git checkout-index test. - -This test registers the following filesystem structure in the cache: - - path0/file0 - a file in a directory - path1/file1 - a file in a directory - -and attempts to check it out when the work tree has: - - path0/file0 - a file in a directory - path1 - a symlink pointing at "path0" - -Checkout cache should fail to extract path1/file1 because the leading -path path1 is occupied by a non-directory. With "-f" it should remove -the symlink path1 and create directory path1 and file path1/file1. -' -. ./test-lib.sh - -show_files() { - # show filesystem files, just [-dl] for type and name - find path? -ls | - sed -e 's/^[0-9]* * [0-9]* * \([-bcdl]\)[^ ]* *[0-9]* *[^ ]* *[^ ]* *[0-9]* [A-Z][a-z][a-z] [0-9][0-9] [^ ]* /fs: \1 /' - # what's in the cache, just mode and name - git ls-files --stage | - sed -e 's/^\([0-9]*\) [0-9a-f]* [0-3] /ca: \1 /' - # what's in the tree, just mode and name. - git ls-tree -r "$1" | - sed -e 's/^\([0-9]*\) [^ ]* [0-9a-f]* /tr: \1 /' -} - -mkdir path0 -date >path0/file0 -test_expect_success \ - 'git update-index --add path0/file0' \ - 'git update-index --add path0/file0' -test_expect_success \ - 'writing tree out with git write-tree' \ - 'tree1=$(git write-tree)' -test_debug 'show_files $tree1' - -mkdir path1 -date >path1/file1 -test_expect_success \ - 'git update-index --add path1/file1' \ - 'git update-index --add path1/file1' -test_expect_success \ - 'writing tree out with git write-tree' \ - 'tree2=$(git write-tree)' -test_debug 'show_files $tree2' - -rm -fr path1 -test_expect_success \ - 'read previously written tree and checkout.' \ - 'git read-tree -m $tree1 && git checkout-index -f -a' -test_debug 'show_files $tree1' - -test_expect_success \ - 'add a symlink' \ - 'test_ln_s_add path0 path1' -test_expect_success \ - 'writing tree out with git write-tree' \ - 'tree3=$(git write-tree)' -test_debug 'show_files $tree3' - -# Morten says "Got that?" here. -# Test begins. - -test_expect_success \ - 'read previously written tree and checkout.' \ - 'git read-tree $tree2 && git checkout-index -f -a' -test_debug 'show_files $tree2' - -test_expect_success \ - 'checking out conflicting path with -f' \ - 'test ! -h path0 && test -d path0 && - test ! -h path1 && test -d path1 && - test ! -h path0/file0 && test -f path0/file0 && - test ! -h path1/file1 && test -f path1/file1' - -test_done diff --git a/t/t2025-worktree-add.sh b/t/t2025-worktree-add.sh index 07d292317c..286bba35d8 100755 --- a/t/t2025-worktree-add.sh +++ b/t/t2025-worktree-add.sh @@ -552,4 +552,22 @@ test_expect_success '"add" in bare repo invokes post-checkout hook' ' test_cmp hook.expect goozy/hook.actual ' +test_expect_success '"add" an existing but missing worktree' ' + git worktree add --detach pneu && + test_must_fail git worktree add --detach pneu && + rm -fr pneu && + test_must_fail git worktree add --detach pneu && + git worktree add --force --detach pneu +' + +test_expect_success '"add" an existing locked but missing worktree' ' + git worktree add --detach gnoo && + git worktree lock gnoo && + test_when_finished "git worktree unlock gnoo || :" && + rm -fr gnoo && + test_must_fail git worktree add --detach gnoo && + test_must_fail git worktree add --force --detach gnoo && + git worktree add --force --force --detach gnoo +' + test_done diff --git a/t/t2028-worktree-move.sh b/t/t2028-worktree-move.sh index 5f7d45b7b7..33c0337733 100755 --- a/t/t2028-worktree-move.sh +++ b/t/t2028-worktree-move.sh @@ -98,6 +98,20 @@ test_expect_success 'move worktree to another dir' ' test_cmp expected2 actual2 ' +test_expect_success 'move locked worktree (force)' ' + test_when_finished " + git worktree unlock flump || : + git worktree remove flump || : + git worktree unlock ploof || : + git worktree remove ploof || : + " && + git worktree add --detach flump && + git worktree lock flump && + test_must_fail git worktree move flump ploof" && + test_must_fail git worktree move --force flump ploof" && + git worktree move --force --force flump ploof +' + test_expect_success 'remove main worktree' ' test_must_fail git worktree remove . ' @@ -141,4 +155,34 @@ test_expect_success 'NOT remove missing-but-locked worktree' ' test_path_is_dir .git/worktrees/gone-but-locked ' +test_expect_success 'proper error when worktree not found' ' + for i in noodle noodle/bork + do + test_must_fail git worktree lock $i 2>err && + test_i18ngrep "not a working tree" err || return 1 + done +' + +test_expect_success 'remove locked worktree (force)' ' + git worktree add --detach gumby && + test_when_finished "git worktree remove gumby || :" && + git worktree lock gumby && + test_when_finished "git worktree unlock gumby || :" && + test_must_fail git worktree remove gumby && + test_must_fail git worktree remove --force gumby && + git worktree remove --force --force gumby +' + +test_expect_success 'remove cleans up .git/worktrees when empty' ' + git init moog && + ( + cd moog && + test_commit bim && + git worktree add --detach goom && + test_path_exists .git/worktrees && + git worktree remove goom && + test_path_is_missing .git/worktrees + ) +' + test_done diff --git a/t/t2029-worktree-config.sh b/t/t2029-worktree-config.sh new file mode 100755 index 0000000000..286121d8de --- /dev/null +++ b/t/t2029-worktree-config.sh @@ -0,0 +1,79 @@ +#!/bin/sh + +test_description="config file in multi worktree" + +. ./test-lib.sh + +test_expect_success 'setup' ' + test_commit start +' + +test_expect_success 'config --worktree in single worktree' ' + git config --worktree foo.bar true && + test_cmp_config true foo.bar +' + +test_expect_success 'add worktrees' ' + git worktree add wt1 && + git worktree add wt2 +' + +test_expect_success 'config --worktree without extension' ' + test_must_fail git config --worktree foo.bar false +' + +test_expect_success 'enable worktreeConfig extension' ' + git config extensions.worktreeConfig true && + test_cmp_config true extensions.worktreeConfig +' + +test_expect_success 'config is shared as before' ' + git config this.is shared && + test_cmp_config shared this.is && + test_cmp_config -C wt1 shared this.is && + test_cmp_config -C wt2 shared this.is +' + +test_expect_success 'config is shared (set from another worktree)' ' + git -C wt1 config that.is also-shared && + test_cmp_config also-shared that.is && + test_cmp_config -C wt1 also-shared that.is && + test_cmp_config -C wt2 also-shared that.is +' + +test_expect_success 'config private to main worktree' ' + git config --worktree this.is for-main && + test_cmp_config for-main this.is && + test_cmp_config -C wt1 shared this.is && + test_cmp_config -C wt2 shared this.is +' + +test_expect_success 'config private to linked worktree' ' + git -C wt1 config --worktree this.is for-wt1 && + test_cmp_config for-main this.is && + test_cmp_config -C wt1 for-wt1 this.is && + test_cmp_config -C wt2 shared this.is +' + +test_expect_success 'core.bare no longer for main only' ' + test_config core.bare true && + test "$(git rev-parse --is-bare-repository)" = true && + test "$(git -C wt1 rev-parse --is-bare-repository)" = true && + test "$(git -C wt2 rev-parse --is-bare-repository)" = true +' + +test_expect_success 'per-worktree core.bare is picked up' ' + git -C wt1 config --worktree core.bare true && + test "$(git rev-parse --is-bare-repository)" = false && + test "$(git -C wt1 rev-parse --is-bare-repository)" = true && + test "$(git -C wt2 rev-parse --is-bare-repository)" = false +' + +test_expect_success 'config.worktree no longer read without extension' ' + git config --unset extensions.worktreeConfig && + test_cmp_config shared this.is && + test_cmp_config -C wt1 shared this.is && + test_cmp_config -C wt2 shared this.is +' + +test_done diff --git a/t/t2101-update-index-reupdate.sh b/t/t2101-update-index-reupdate.sh index 685ec45639..6c32d42c8c 100755 --- a/t/t2101-update-index-reupdate.sh +++ b/t/t2101-update-index-reupdate.sh @@ -73,7 +73,7 @@ test_expect_success 'update-index --update from subdir' ' 100644 $(git hash-object dir1/file3) 0 dir1/file3 100644 $file2 0 file2 EOF - test_cmp current expected + test_cmp expected current ' test_expect_success 'update-index --update with pathspec' ' diff --git a/t/t3070-wildmatch.sh b/t/t3070-wildmatch.sh index 46aca0af10..891d4d7cb9 100755 --- a/t/t3070-wildmatch.sh +++ b/t/t3070-wildmatch.sh @@ -237,7 +237,7 @@ match 0 0 0 0 foobar 'foo\*bar' match 1 1 1 1 'f\oo' 'f\\oo' match 1 1 1 1 ball '*[al]?' match 0 0 0 0 ten '[ten]' -match 0 0 1 1 ten '**[!te]' +match 1 1 1 1 ten '**[!te]' match 0 0 0 0 ten '**[!ten]' match 1 1 1 1 ten 't[a-g]n' match 0 0 0 0 ten 't[!a-g]n' @@ -253,7 +253,7 @@ match 1 1 1 1 ']' ']' # Extended slash-matching features match 0 0 1 1 'foo/baz/bar' 'foo*bar' match 0 0 1 1 'foo/baz/bar' 'foo**bar' -match 0 0 1 1 'foobazbar' 'foo**bar' +match 1 1 1 1 'foobazbar' 'foo**bar' match 1 1 1 1 'foo/baz/bar' 'foo/**/bar' match 1 1 0 0 'foo/baz/bar' 'foo/**/**/bar' match 1 1 1 1 'foo/b/a/z/bar' 'foo/**/bar' diff --git a/t/t3200-branch.sh b/t/t3200-branch.sh index 93f21ab078..478b82cf9b 100755 --- a/t/t3200-branch.sh +++ b/t/t3200-branch.sh @@ -1221,7 +1221,7 @@ test_expect_success 'use --edit-description' ' EOF EDITOR=./editor git branch --edit-description && echo "New contents" >expect && - test_cmp EDITOR_OUTPUT expect + test_cmp expect EDITOR_OUTPUT ' test_expect_success 'detect typo in branch name when using --edit-description' ' diff --git a/t/t3206-range-diff.sh b/t/t3206-range-diff.sh index fb4c13a84a..e497c1358f 100755 --- a/t/t3206-range-diff.sh +++ b/t/t3206-range-diff.sh @@ -122,6 +122,70 @@ test_expect_success 'changed commit' ' test_cmp expected actual ' +test_expect_success 'changed commit with --no-patch diff option' ' + git range-diff --no-color --no-patch topic...changed >actual && + cat >expected <<-EOF && + 1: 4de457d = 1: a4b3333 s/5/A/ + 2: fccce22 = 2: f51d370 s/4/A/ + 3: 147e64e ! 3: 0559556 s/11/B/ + 4: a63e992 ! 4: d966c5c s/12/B/ + EOF + test_cmp expected actual +' + +test_expect_success 'changed commit with --stat diff option' ' + git range-diff --no-color --stat topic...changed >actual && + cat >expected <<-EOF && + 1: 4de457d = 1: a4b3333 s/5/A/ + a => b | 0 + 1 file changed, 0 insertions(+), 0 deletions(-) + 2: fccce22 = 2: f51d370 s/4/A/ + a => b | 0 + 1 file changed, 0 insertions(+), 0 deletions(-) + 3: 147e64e ! 3: 0559556 s/11/B/ + a => b | 0 + 1 file changed, 0 insertions(+), 0 deletions(-) + 4: a63e992 ! 4: d966c5c s/12/B/ + a => b | 0 + 1 file changed, 0 insertions(+), 0 deletions(-) + EOF + test_cmp expected actual +' + +test_expect_success 'changed commit with sm config' ' + git range-diff --no-color --submodule=log topic...changed >actual && + cat >expected <<-EOF && + 1: 4de457d = 1: a4b3333 s/5/A/ + 2: fccce22 = 2: f51d370 s/4/A/ + 3: 147e64e ! 3: 0559556 s/11/B/ + @@ -10,7 +10,7 @@ + 9 + 10 + -11 + -+B + ++BB + 12 + 13 + 14 + 4: a63e992 ! 4: d966c5c s/12/B/ + @@ -8,7 +8,7 @@ + @@ + 9 + 10 + - B + + BB + -12 + +B + 13 + EOF + test_cmp expected actual +' + +test_expect_success 'no commits on one side' ' + git commit --amend -m "new message" && + git range-diff master HEAD@{1} HEAD +' + test_expect_success 'changed message' ' git range-diff --no-color topic...changed-message >actual && sed s/Z/\ /g >expected <<-EOF && @@ -133,18 +197,69 @@ test_expect_success 'changed message' ' Z + Also a silly comment here! + - Zdiff --git a/file b/file - Z--- a/file - Z+++ b/file + Z diff --git a/file b/file + Z --- a/file + Z +++ b/file 3: 147e64e = 3: b9cb956 s/11/B/ 4: a63e992 = 4: 8add5f1 s/12/B/ EOF test_cmp expected actual ' -test_expect_success 'no commits on one side' ' - git commit --amend -m "new message" && - git range-diff master HEAD@{1} HEAD +test_expect_success 'dual-coloring' ' + sed -e "s|^:||" >expect <<-\EOF && + :<YELLOW>1: a4b3333 = 1: f686024 s/5/A/<RESET> + :<RED>2: f51d370 <RESET><YELLOW>!<RESET><GREEN> 2: 4ab067d<RESET><YELLOW> s/4/A/<RESET> + : <REVERSE><CYAN>@@ -2,6 +2,8 @@<RESET> + : <RESET> + : s/4/A/<RESET> + : <RESET> + : <REVERSE><GREEN>+<RESET><BOLD> Also a silly comment here!<RESET> + : <REVERSE><GREEN>+<RESET> + : diff --git a/file b/file<RESET> + : --- a/file<RESET> + : +++ b/file<RESET> + :<RED>3: 0559556 <RESET><YELLOW>!<RESET><GREEN> 3: b9cb956<RESET><YELLOW> s/11/B/<RESET> + : <REVERSE><CYAN>@@ -10,7 +10,7 @@<RESET> + : 9<RESET> + : 10<RESET> + : <RED> -11<RESET> + : <REVERSE><RED>-<RESET><FAINT;GREEN>+BB<RESET> + : <REVERSE><GREEN>+<RESET><BOLD;GREEN>+B<RESET> + : 12<RESET> + : 13<RESET> + : 14<RESET> + :<RED>4: d966c5c <RESET><YELLOW>!<RESET><GREEN> 4: 8add5f1<RESET><YELLOW> s/12/B/<RESET> + : <REVERSE><CYAN>@@ -8,7 +8,7 @@<RESET> + : <CYAN> @@<RESET> + : 9<RESET> + : 10<RESET> + : <REVERSE><RED>-<RESET><FAINT> BB<RESET> + : <REVERSE><GREEN>+<RESET><BOLD> B<RESET> + : <RED> -12<RESET> + : <GREEN> +B<RESET> + : 13<RESET> + EOF + git range-diff changed...changed-message --color --dual-color >actual.raw && + test_decode_color >actual <actual.raw && + test_cmp expect actual +' + +for prev in topic master..topic +do + test_expect_success "format-patch --range-diff=$prev" ' + git format-patch --stdout --cover-letter --range-diff=$prev \ + master..unmodified >actual && + grep "= 1: .* s/5/A" actual && + grep "= 2: .* s/4/A" actual && + grep "= 3: .* s/11/B" actual && + grep "= 4: .* s/12/B" actual + ' +done + +test_expect_success 'format-patch --range-diff as commentary' ' + git format-patch --stdout --range-diff=HEAD~1 HEAD~1 >actual && + test_i18ngrep "^Range-diff:$" actual ' test_done diff --git a/t/t3320-notes-merge-worktrees.sh b/t/t3320-notes-merge-worktrees.sh index 10bfc8b947..823fdbda1f 100755 --- a/t/t3320-notes-merge-worktrees.sh +++ b/t/t3320-notes-merge-worktrees.sh @@ -44,7 +44,7 @@ test_expect_success 'merge z into y fails and sets NOTES_MERGE_REF' ' git config core.notesRef refs/notes/y && test_must_fail git notes merge z && echo "ref: refs/notes/y" >expect && - test_cmp .git/NOTES_MERGE_REF expect + test_cmp expect .git/NOTES_MERGE_REF ' test_expect_success 'merge z into y while mid-merge in another workdir fails' ' @@ -66,7 +66,7 @@ test_expect_success 'merge z into x while mid-merge on y succeeds' ' grep -v "A notes merge into refs/notes/x is already in-progress in" out ) && echo "ref: refs/notes/x" >expect && - test_cmp .git/worktrees/worktree2/NOTES_MERGE_REF expect + test_cmp expect .git/worktrees/worktree2/NOTES_MERGE_REF ' test_done diff --git a/t/t3400-rebase.sh b/t/t3400-rebase.sh index 3996ee0135..3e73f7584c 100755 --- a/t/t3400-rebase.sh +++ b/t/t3400-rebase.sh @@ -183,13 +183,13 @@ test_expect_success 'cherry-picked commits and fork-point work together' ' test_commit final_B B "Final B" && git rebase && echo Amended >expect && - test_cmp A expect && + test_cmp expect A && echo "Final B" >expect && - test_cmp B expect && + test_cmp expect B && echo C >expect && - test_cmp C expect && + test_cmp expect C && echo D >expect && - test_cmp D expect + test_cmp expect D ' test_expect_success 'rebase -q is quiet' ' diff --git a/t/t3404-rebase-interactive.sh b/t/t3404-rebase-interactive.sh index 0075ac5c2a..7a440e08d8 100755 --- a/t/t3404-rebase-interactive.sh +++ b/t/t3404-rebase-interactive.sh @@ -75,6 +75,16 @@ test_expect_success 'rebase --keep-empty' ' test_line_count = 6 actual ' +cat > expect <<EOF +error: nothing to do +EOF + +test_expect_success 'rebase -i with empty HEAD' ' + set_fake_editor && + test_must_fail env FAKE_LINES="1 exec_true" git rebase -i HEAD^ >actual 2>&1 && + test_i18ncmp expect actual +' + test_expect_success 'rebase -i with the exec command' ' git checkout master && ( @@ -312,7 +322,7 @@ test_expect_success 'retain authorship when squashing' ' git show HEAD | grep "^Author: Twerp Snog" ' -test_expect_success '-p handles "no changes" gracefully' ' +test_expect_success REBASE_P '-p handles "no changes" gracefully' ' HEAD=$(git rev-parse HEAD) && set_fake_editor && git rebase -i -p HEAD^ && @@ -322,7 +332,7 @@ test_expect_success '-p handles "no changes" gracefully' ' test $HEAD = $(git rev-parse HEAD) ' -test_expect_failure 'exchange two commits with -p' ' +test_expect_failure REBASE_P 'exchange two commits with -p' ' git checkout H && set_fake_editor && FAKE_LINES="2 1" git rebase -i -p HEAD~2 && @@ -330,7 +340,7 @@ test_expect_failure 'exchange two commits with -p' ' test G = $(git cat-file commit HEAD | sed -ne \$p) ' -test_expect_success 'preserve merges with -p' ' +test_expect_success REBASE_P 'preserve merges with -p' ' git checkout -b to-be-preserved master^ && : > unrelated-file && git add unrelated-file && @@ -373,7 +383,7 @@ test_expect_success 'preserve merges with -p' ' test $(git show HEAD:unrelated-file) = 1 ' -test_expect_success 'edit ancestor with -p' ' +test_expect_success REBASE_P 'edit ancestor with -p' ' set_fake_editor && FAKE_LINES="1 2 edit 3 4" git rebase -i -p HEAD~3 && echo 2 > unrelated-file && @@ -387,6 +397,7 @@ test_expect_success 'edit ancestor with -p' ' ' test_expect_success '--continue tries to commit' ' + git reset --hard D && test_tick && set_fake_editor && test_must_fail git rebase -i --onto new-branch1 HEAD^ && @@ -426,7 +437,7 @@ test_expect_success C_LOCALE_OUTPUT 'multi-fixup does not fire up editor' ' git rebase -i $base && test $base = $(git rev-parse HEAD^) && test 0 = $(git show | grep NEVER | wc -l) && - git checkout to-be-rebased && + git checkout @{-1} && git branch -D multi-fixup ' @@ -441,7 +452,7 @@ test_expect_success 'commit message used after conflict' ' git rebase --continue && test $base = $(git rev-parse HEAD^) && test 1 = $(git show | grep ONCE | wc -l) && - git checkout to-be-rebased && + git checkout @{-1} && git branch -D conflict-fixup ' @@ -456,7 +467,7 @@ test_expect_success 'commit message retained after conflict' ' git rebase --continue && test $base = $(git rev-parse HEAD^) && test 2 = $(git show | grep TWICE | wc -l) && - git checkout to-be-rebased && + git checkout @{-1} && git branch -D conflict-squash ' @@ -481,7 +492,7 @@ test_expect_success C_LOCALE_OUTPUT 'squash and fixup generate correct log messa grep "^# This is a combination of 3 commits\." && git cat-file commit HEAD@{3} | grep "^# This is a combination of 2 commits\." && - git checkout to-be-rebased && + git checkout @{-1} && git branch -D squash-fixup ' @@ -494,7 +505,7 @@ test_expect_success C_LOCALE_OUTPUT 'squash ignores comments' ' git rebase -i $base && test $base = $(git rev-parse HEAD^) && test 1 = $(git show | grep ONCE | wc -l) && - git checkout to-be-rebased && + git checkout @{-1} && git branch -D skip-comments ' @@ -507,7 +518,7 @@ test_expect_success C_LOCALE_OUTPUT 'squash ignores blank lines' ' git rebase -i $base && test $base = $(git rev-parse HEAD^) && test 1 = $(git show | grep ONCE | wc -l) && - git checkout to-be-rebased && + git checkout @{-1} && git branch -D skip-blank-lines ' @@ -648,7 +659,7 @@ test_expect_success 'rebase with a file named HEAD in worktree' ' ) && set_fake_editor && - FAKE_LINES="1 squash 2" git rebase -i to-be-rebased && + FAKE_LINES="1 squash 2" git rebase -i @{-1} && test "$(git show -s --pretty=format:%an)" = "Squashed Away" ' diff --git a/t/t3406-rebase-message.sh b/t/t3406-rebase-message.sh index 0392e36d23..38bd876cab 100755 --- a/t/t3406-rebase-message.sh +++ b/t/t3406-rebase-message.sh @@ -77,11 +77,18 @@ test_expect_success 'rebase -n overrides config rebase.stat config' ' # "Does not point to a valid commit: invalid-ref" # # NEEDSWORK: This "grep" is fine in real non-C locales, but -# GETTEXT_POISON poisons the refname along with the enclosing +# GIT_TEST_GETTEXT_POISON poisons the refname along with the enclosing # error message. test_expect_success 'rebase --onto outputs the invalid ref' ' test_must_fail git rebase --onto invalid-ref HEAD HEAD 2>err && test_i18ngrep "invalid-ref" err ' +test_expect_success 'error out early upon -C<n> or --whitespace=<bad>' ' + test_must_fail git rebase -Cnot-a-number HEAD 2>err && + test_i18ngrep "numerical value" err && + test_must_fail git rebase --whitespace=bad HEAD 2>err && + test_i18ngrep "Invalid whitespace option" err +' + test_done diff --git a/t/t3408-rebase-multi-line.sh b/t/t3408-rebase-multi-line.sh index e7292f5b9b..d2bd7c17b0 100755 --- a/t/t3408-rebase-multi-line.sh +++ b/t/t3408-rebase-multi-line.sh @@ -52,7 +52,7 @@ test_expect_success rebase ' test_cmp expect actual ' -test_expect_success rebasep ' +test_expect_success REBASE_P rebasep ' git checkout side-merge && git rebase -p side && diff --git a/t/t3409-rebase-preserve-merges.sh b/t/t3409-rebase-preserve-merges.sh index 8c251c57a6..3b340f1ece 100755 --- a/t/t3409-rebase-preserve-merges.sh +++ b/t/t3409-rebase-preserve-merges.sh @@ -8,6 +8,11 @@ Run "git rebase -p" and check that merges are properly carried along ' . ./test-lib.sh +if ! test_have_prereq REBASE_P; then + skip_all='skipping git rebase -p tests, as asked for' + test_done +fi + GIT_AUTHOR_EMAIL=bogus_email_address export GIT_AUTHOR_EMAIL diff --git a/t/t3410-rebase-preserve-dropped-merges.sh b/t/t3410-rebase-preserve-dropped-merges.sh index 6f73b95558..2e29866993 100755 --- a/t/t3410-rebase-preserve-dropped-merges.sh +++ b/t/t3410-rebase-preserve-dropped-merges.sh @@ -11,6 +11,11 @@ rewritten. ' . ./test-lib.sh +if ! test_have_prereq REBASE_P; then + skip_all='skipping git rebase -p tests, as asked for' + test_done +fi + # set up two branches like this: # # A - B - C - D - E diff --git a/t/t3411-rebase-preserve-around-merges.sh b/t/t3411-rebase-preserve-around-merges.sh index dc81bf27eb..fb45e7bf7b 100755 --- a/t/t3411-rebase-preserve-around-merges.sh +++ b/t/t3411-rebase-preserve-around-merges.sh @@ -10,6 +10,11 @@ a merge to before the merge. ' . ./test-lib.sh +if ! test_have_prereq REBASE_P; then + skip_all='skipping git rebase -p tests, as asked for' + test_done +fi + . "$TEST_DIRECTORY"/lib-rebase.sh set_fake_editor diff --git a/t/t3412-rebase-root.sh b/t/t3412-rebase-root.sh index 73a39f2923..21632a984e 100755 --- a/t/t3412-rebase-root.sh +++ b/t/t3412-rebase-root.sh @@ -86,14 +86,14 @@ test_expect_success 'pre-rebase got correct input (4)' ' test "z$(cat .git/PRE-REBASE-INPUT)" = z--root,work4 ' -test_expect_success 'rebase -i -p with linear history' ' +test_expect_success REBASE_P 'rebase -i -p with linear history' ' git checkout -b work5 other && git rebase -i -p --root --onto master && git log --pretty=tformat:"%s" > rebased5 && test_cmp expect rebased5 ' -test_expect_success 'pre-rebase got correct input (5)' ' +test_expect_success REBASE_P 'pre-rebase got correct input (5)' ' test "z$(cat .git/PRE-REBASE-INPUT)" = z--root, ' @@ -120,7 +120,7 @@ commit work6~4 1 EOF -test_expect_success 'rebase -i -p with merge' ' +test_expect_success REBASE_P 'rebase -i -p with merge' ' git checkout -b work6 other && git rebase -i -p --root --onto master && log_with_names work6 > rebased6 && @@ -155,7 +155,7 @@ commit work7~5 1 EOF -test_expect_success 'rebase -i -p with two roots' ' +test_expect_success REBASE_P 'rebase -i -p with two roots' ' git checkout -b work7 other && git rebase -i -p --root --onto master && log_with_names work7 > rebased7 && @@ -261,7 +261,7 @@ commit conflict3~6 1 EOF -test_expect_success 'rebase -i -p --root with conflict (first part)' ' +test_expect_success REBASE_P 'rebase -i -p --root with conflict (first part)' ' git checkout -b conflict3 other && test_must_fail git rebase -i -p --root --onto master && git ls-files -u | grep "B$" @@ -272,7 +272,7 @@ test_expect_success 'fix the conflict' ' git add B ' -test_expect_success 'rebase -i -p --root with conflict (second part)' ' +test_expect_success REBASE_P 'rebase -i -p --root with conflict (second part)' ' git rebase --continue && log_with_names conflict3 >out && test_cmp expect-conflict-p out diff --git a/t/t3414-rebase-preserve-onto.sh b/t/t3414-rebase-preserve-onto.sh index ee0a6cccfd..72e04b5386 100755 --- a/t/t3414-rebase-preserve-onto.sh +++ b/t/t3414-rebase-preserve-onto.sh @@ -10,6 +10,11 @@ aren'"'"'t on top of $ONTO, even if they are on top of $UPSTREAM. ' . ./test-lib.sh +if ! test_have_prereq REBASE_P; then + skip_all='skipping git rebase -p tests, as asked for' + test_done +fi + . "$TEST_DIRECTORY"/lib-rebase.sh # Set up branches like this: diff --git a/t/t3417-rebase-whitespace-fix.sh b/t/t3417-rebase-whitespace-fix.sh index 1fb3e499b4..e85cdc7037 100755 --- a/t/t3417-rebase-whitespace-fix.sh +++ b/t/t3417-rebase-whitespace-fix.sh @@ -55,7 +55,7 @@ test_expect_success 'blank line at end of file; extend at end of file' ' git add file && git commit -m second && git rebase --whitespace=fix HEAD^^ && git diff --exit-code HEAD^:file expect-first && - test_cmp file expect-second + test_cmp expect-second file ' # prepare third revision of "file" @@ -82,7 +82,7 @@ test_expect_success 'two blanks line at end of file; extend at end of file' ' cp third file && git add file && git commit -m third && git rebase --whitespace=fix HEAD^^ && git diff --exit-code HEAD^:file expect-second && - test_cmp file expect-third + test_cmp expect-third file ' test_expect_success 'same, but do not remove trailing spaces' ' @@ -120,7 +120,7 @@ test_expect_success 'at beginning of file' ' done >> file && git commit -m more file && git rebase --whitespace=fix HEAD^^ && - test_cmp file expect-beginning + test_cmp expect-beginning file ' test_done diff --git a/t/t3418-rebase-continue.sh b/t/t3418-rebase-continue.sh index 25099d715c..0210b2ac6f 100755 --- a/t/t3418-rebase-continue.sh +++ b/t/t3418-rebase-continue.sh @@ -106,7 +106,7 @@ test_expect_success 'rebase -i --continue handles merge strategy and options' ' test -f funny.was.run ' -test_expect_success 'rebase passes merge strategy options correctly' ' +test_expect_success REBASE_P 'rebase passes merge strategy options correctly' ' rm -fr .git/rebase-* && git reset --hard commit-new-file-F3-on-topic-branch && test_commit theirs-to-merge && @@ -177,6 +177,7 @@ test_expect_success 'setup rerere database' ' git checkout master && test_commit "commit-new-file-F3" F3 3 && test_config rerere.enabled true && + git update-ref refs/heads/topic commit-new-file-F3-on-topic-branch && test_must_fail git rebase -m master topic && echo "Resolved" >F2 && cp F2 expected-F2 && @@ -240,6 +241,17 @@ test_rerere_autoupdate test_rerere_autoupdate -m GIT_SEQUENCE_EDITOR=: && export GIT_SEQUENCE_EDITOR test_rerere_autoupdate -i -test_rerere_autoupdate --preserve-merges +test_have_prereq !REBASE_P || test_rerere_autoupdate --preserve-merges +unset GIT_SEQUENCE_EDITOR + +test_expect_success 'the todo command "break" works' ' + rm -f execed && + FAKE_LINES="break b exec_>execed" git rebase -i HEAD && + test_path_is_missing execed && + git rebase --continue && + test_path_is_missing execed && + git rebase --continue && + test_path_is_file execed +' test_done diff --git a/t/t3420-rebase-autostash.sh b/t/t3420-rebase-autostash.sh index 0c4eefec76..4c7494cc8f 100755 --- a/t/t3420-rebase-autostash.sh +++ b/t/t3420-rebase-autostash.sh @@ -351,4 +351,22 @@ test_expect_success 'autostash is saved on editor failure with conflict' ' test_cmp expected file0 ' +test_expect_success 'autostash with dirty submodules' ' + test_when_finished "git reset --hard && git checkout master" && + git checkout -b with-submodule && + git submodule add ./ sub && + test_tick && + git commit -m add-submodule && + echo changed >sub/file0 && + git rebase -i --autostash HEAD +' + +test_expect_success 'branch is left alone when possible' ' + git checkout -b unchanged-branch && + echo changed >file0 && + git rebase --autostash unchanged-branch && + test changed = "$(cat file0)" && + test unchanged-branch = "$(git rev-parse --abbrev-ref HEAD)" +' + test_done diff --git a/t/t3421-rebase-topology-linear.sh b/t/t3421-rebase-topology-linear.sh index 99b2aac921..23ad4cff35 100755 --- a/t/t3421-rebase-topology-linear.sh +++ b/t/t3421-rebase-topology-linear.sh @@ -29,7 +29,7 @@ test_run_rebase () { test_run_rebase success '' test_run_rebase success -m test_run_rebase success -i -test_run_rebase success -p +test_have_prereq !REBASE_P || test_run_rebase success -p test_run_rebase () { result=$1 @@ -43,7 +43,7 @@ test_run_rebase () { test_run_rebase success '' test_run_rebase success -m test_run_rebase success -i -test_run_rebase success -p +test_have_prereq !REBASE_P || test_run_rebase success -p test_run_rebase () { result=$1 @@ -59,7 +59,7 @@ test_run_rebase () { test_run_rebase success '' test_run_rebase success -m test_run_rebase success -i -test_run_rebase failure -p +test_have_prereq !REBASE_P || test_run_rebase failure -p test_run_rebase () { result=$1 @@ -73,7 +73,7 @@ test_run_rebase () { test_run_rebase success '' test_run_rebase success -m test_run_rebase success -i -test_run_rebase success -p +test_have_prereq !REBASE_P || test_run_rebase success -p # f # / @@ -113,7 +113,7 @@ test_run_rebase () { test_run_rebase success '' test_run_rebase failure -m test_run_rebase success -i -test_run_rebase success -p +test_have_prereq !REBASE_P || test_run_rebase success -p test_run_rebase () { result=$1 @@ -128,7 +128,7 @@ test_run_rebase () { test_run_rebase success '' test_run_rebase failure -m test_run_rebase success -i -test_run_rebase success -p +test_have_prereq !REBASE_P || test_run_rebase success -p test_run_rebase () { result=$1 @@ -143,7 +143,7 @@ test_run_rebase () { test_run_rebase success '' test_run_rebase failure -m test_run_rebase success -i -test_run_rebase success -p +test_have_prereq !REBASE_P || test_run_rebase success -p test_run_rebase () { result=$1 @@ -158,7 +158,7 @@ test_run_rebase () { test_run_rebase success '' test_run_rebase success -m test_run_rebase success -i -test_run_rebase success -p +test_have_prereq !REBASE_P || test_run_rebase success -p # a---b---c---j! # \ @@ -186,7 +186,7 @@ test_run_rebase () { test_run_rebase success '' test_run_rebase success -m test_run_rebase success -i -test_run_rebase success -p +test_have_prereq !REBASE_P || test_run_rebase success -p test_run_rebase () { result=$1 @@ -201,7 +201,7 @@ test_run_rebase () { test_run_rebase success '' test_run_rebase success -m test_run_rebase success -i -test_run_rebase failure -p +test_have_prereq !REBASE_P || test_run_rebase failure -p test_run_rebase () { result=$1 @@ -216,7 +216,7 @@ test_run_rebase () { test_run_rebase success '' test_run_rebase success -m test_run_rebase success -i -test_run_rebase failure -p +test_have_prereq !REBASE_P || test_run_rebase failure -p test_run_rebase success --rebase-merges # m @@ -256,7 +256,7 @@ test_run_rebase () { test_run_rebase success '' test_run_rebase success -m test_run_rebase success -i -test_run_rebase success -p +test_have_prereq !REBASE_P || test_run_rebase success -p test_run_rebase () { result=$1 @@ -271,7 +271,7 @@ test_run_rebase () { test_run_rebase success '' test_run_rebase success -m test_run_rebase success -i -test_run_rebase failure -p +test_have_prereq !REBASE_P || test_run_rebase failure -p test_run_rebase () { result=$1 @@ -286,7 +286,7 @@ test_run_rebase () { test_run_rebase success '' test_run_rebase failure -m test_run_rebase success -i -test_run_rebase success -p +test_have_prereq !REBASE_P || test_run_rebase success -p test_run_rebase () { result=$1 @@ -302,7 +302,7 @@ test_run_rebase () { test_run_rebase success '' test_run_rebase success -m test_run_rebase success -i -test_run_rebase failure -p +test_have_prereq !REBASE_P || test_run_rebase failure -p test_run_rebase () { result=$1 @@ -317,7 +317,7 @@ test_run_rebase () { test_run_rebase success '' test_run_rebase failure -m test_run_rebase success -i -test_run_rebase failure -p +test_have_prereq !REBASE_P || test_run_rebase failure -p test_run_rebase () { result=$1 @@ -331,7 +331,7 @@ test_run_rebase () { test_run_rebase success '' test_run_rebase success -m test_run_rebase success -i -test_run_rebase failure -p +test_have_prereq !REBASE_P || test_run_rebase failure -p test_run_rebase () { result=$1 @@ -346,6 +346,6 @@ test_run_rebase () { test_run_rebase success '' test_run_rebase success -m test_run_rebase success -i -test_run_rebase success -p +test_have_prereq !REBASE_P || test_run_rebase success -p test_done diff --git a/t/t3425-rebase-topology-merges.sh b/t/t3425-rebase-topology-merges.sh index 846f85c27e..5f892e33d7 100755 --- a/t/t3425-rebase-topology-merges.sh +++ b/t/t3425-rebase-topology-merges.sh @@ -109,6 +109,11 @@ test_run_rebase success 'd e n o' '' test_run_rebase success 'd e n o' -m test_run_rebase success 'd n o e' -i +if ! test_have_prereq REBASE_P; then + skip_all='skipping git rebase -p tests, as asked for' + test_done +fi + test_expect_success "rebase -p is no-op in non-linear history" " reset_rebase && git rebase -p d w && diff --git a/t/t3430-rebase-merges.sh b/t/t3430-rebase-merges.sh index aa7bfc88ec..cc5646836f 100755 --- a/t/t3430-rebase-merges.sh +++ b/t/t3430-rebase-merges.sh @@ -396,4 +396,20 @@ test_expect_success 'with --autosquash and --exec' ' grep "G: +G" actual ' +test_expect_success '--continue after resolving conflicts after a merge' ' + git checkout -b already-has-g E && + git cherry-pick E..G && + test_commit H2 && + + git checkout -b conflicts-in-merge H && + test_commit H2 H2.t conflicts H2-conflict && + test_must_fail git rebase -r already-has-g && + grep conflicts H2.t && + echo resolved >H2.t && + git add -u && + git rebase --continue && + test_must_fail git rev-parse --verify HEAD^2 && + test_path_is_missing .git/MERGE_HEAD +' + test_done diff --git a/t/t3702-add-edit.sh b/t/t3702-add-edit.sh index c6af7f82b5..6c676645d8 100755 --- a/t/t3702-add-edit.sh +++ b/t/t3702-add-edit.sh @@ -110,10 +110,10 @@ test_expect_success 'add -e' ' cp second-part file && git add -e && test_cmp second-part file && - test_cmp orig-patch expected-patch && + test_cmp expected-patch orig-patch && git diff --cached >actual && grep -v index actual >out && - test_cmp out expected + test_cmp expected out ' diff --git a/t/t3903-stash.sh b/t/t3903-stash.sh index 6450bc6698..cd216655b9 100755 --- a/t/t3903-stash.sh +++ b/t/t3903-stash.sh @@ -36,7 +36,7 @@ EOF test_expect_success 'parents of stash' ' test $(git rev-parse stash^) = $(git rev-parse HEAD) && git diff stash^2..stash > output && - test_cmp output expect + test_cmp expect output ' test_expect_success 'applying bogus stash does nothing' ' @@ -210,9 +210,9 @@ test_expect_success 'stash branch' ' test refs/heads/stashbranch = $(git symbolic-ref HEAD) && test $(git rev-parse HEAD) = $(git rev-parse master^) && git diff --cached > output && - test_cmp output expect && + test_cmp expect output && git diff > output && - test_cmp output expect1 && + test_cmp expect1 output && git add file && git commit -m alternate\ second && git diff master..stashbranch > output && @@ -710,7 +710,7 @@ test_expect_success 'stash where working directory contains "HEAD" file' ' git diff-index --cached --quiet HEAD && test "$(git rev-parse stash^)" = "$(git rev-parse HEAD)" && git diff stash^..stash > output && - test_cmp output expect + test_cmp expect output ' test_expect_success 'store called with invalid commit' ' diff --git a/t/t3905-stash-include-untracked.sh b/t/t3905-stash-include-untracked.sh index 597b0637d1..cc1c8a7bb2 100755 --- a/t/t3905-stash-include-untracked.sh +++ b/t/t3905-stash-include-untracked.sh @@ -142,7 +142,7 @@ test_expect_success 'stash save --include-untracked removed files' ' rm -f file && git stash save --include-untracked && echo 1 > expect && - test_cmp file expect + test_cmp expect file ' rm -f expect diff --git a/t/t4014-format-patch.sh b/t/t4014-format-patch.sh index 53880da7bb..909c743c13 100755 --- a/t/t4014-format-patch.sh +++ b/t/t4014-format-patch.sh @@ -1717,4 +1717,38 @@ test_expect_success 'format-patch --pretty=mboxrd' ' test_cmp expect actual ' +test_expect_success 'interdiff: setup' ' + git checkout -b boop master && + test_commit fnorp blorp && + test_commit fleep blorp +' + +test_expect_success 'interdiff: cover-letter' ' + sed "y/q/ /" >expect <<-\EOF && + +fleep + --q + EOF + git format-patch --cover-letter --interdiff=boop~2 -1 boop && + test_i18ngrep "^Interdiff:$" 0000-cover-letter.patch && + test_i18ngrep ! "^Interdiff:$" 0001-fleep.patch && + sed "1,/^@@ /d; /^-- $/q" <0000-cover-letter.patch >actual && + test_cmp expect actual +' + +test_expect_success 'interdiff: reroll-count' ' + git format-patch --cover-letter --interdiff=boop~2 -v2 -1 boop && + test_i18ngrep "^Interdiff ..* v1:$" v2-0000-cover-letter.patch +' + +test_expect_success 'interdiff: solo-patch' ' + cat >expect <<-\EOF && + +fleep + + EOF + git format-patch --interdiff=boop~2 -1 boop && + test_i18ngrep "^Interdiff:$" 0001-fleep.patch && + sed "1,/^ @@ /d; /^$/q" <0001-fleep.patch >actual && + test_cmp expect actual +' + test_done diff --git a/t/t4025-hunk-header.sh b/t/t4025-hunk-header.sh index fa44e78869..35578f2bb9 100755 --- a/t/t4025-hunk-header.sh +++ b/t/t4025-hunk-header.sh @@ -37,7 +37,7 @@ test_expect_success 'hunk header truncation with an overly long line' ' echo " A $N$N$N$N$N$N$N$N$N2" && echo " L $N$N$N$N$N$N$N$N$N1" ) >expected && - test_cmp actual expected + test_cmp expected actual ' diff --git a/t/t4052-stat-output.sh b/t/t4052-stat-output.sh index 6e2cf933f7..28c053849a 100755 --- a/t/t4052-stat-output.sh +++ b/t/t4052-stat-output.sh @@ -44,42 +44,50 @@ show --stat log -1 --stat EOF -while read cmd args +cat >expect.60 <<-'EOF' + ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1 + +EOF +cat >expect.6030 <<-'EOF' + ...aaaaaaaaaaaaaaaaaaaaaaaaaaa | 1 + +EOF +cat >expect2.60 <<-'EOF' + ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1 + + ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1 + +EOF +cat >expect2.6030 <<-'EOF' + ...aaaaaaaaaaaaaaaaaaaaaaaaaaa | 1 + + ...aaaaaaaaaaaaaaaaaaaaaaaaaaa | 1 + +EOF +while read expect cmd args do - cat >expect <<-'EOF' - ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1 + - EOF test_expect_success "$cmd --stat=width: a long name is given more room when the bar is short" ' git $cmd $args --stat=40 >output && grep " | " output >actual && - test_cmp expect actual + test_cmp $expect.60 actual ' test_expect_success "$cmd --stat-width=width with long name" ' git $cmd $args --stat-width=40 >output && grep " | " output >actual && - test_cmp expect actual + test_cmp $expect.60 actual ' - cat >expect <<-'EOF' - ...aaaaaaaaaaaaaaaaaaaaaaaaaaa | 1 + - EOF test_expect_success "$cmd --stat=...,name-width with long name" ' git $cmd $args --stat=60,30 >output && grep " | " output >actual && - test_cmp expect actual + test_cmp $expect.6030 actual ' test_expect_success "$cmd --stat-name-width with long name" ' git $cmd $args --stat-name-width=30 >output && grep " | " output >actual && - test_cmp expect actual + test_cmp $expect.6030 actual ' done <<\EOF -format-patch -1 --stdout -diff HEAD^ HEAD --stat -show --stat -log -1 --stat +expect2 format-patch --cover-letter -1 --stdout +expect diff HEAD^ HEAD --stat +expect show --stat +expect log -1 --stat EOF @@ -97,6 +105,16 @@ test_expect_success 'preparation for big change tests' ' cat >expect72 <<'EOF' abcd | 1000 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + abcd | 1000 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +EOF +test_expect_success "format-patch --cover-letter ignores COLUMNS (big change)" ' + COLUMNS=200 git format-patch -1 --stdout --cover-letter >output && + grep " | " output >actual && + test_cmp expect72 actual +' + +cat >expect72 <<'EOF' + abcd | 1000 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ EOF cat >expect72-graph <<'EOF' | abcd | 1000 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ diff --git a/t/t4117-apply-reject.sh b/t/t4117-apply-reject.sh index d80187de94..f7de6f077a 100755 --- a/t/t4117-apply-reject.sh +++ b/t/t4117-apply-reject.sh @@ -72,7 +72,7 @@ test_expect_success 'apply with --reject should fail but update the file' ' rm -f file1.rej file2.rej && test_must_fail git apply --reject patch.1 && - test_cmp file1 expected && + test_cmp expected file1 && cat file1.rej && test_path_is_missing file2.rej @@ -85,7 +85,7 @@ test_expect_success 'apply with --reject should fail but update the file' ' test_must_fail git apply --reject patch.2 >rejects && test_path_is_missing file1 && - test_cmp file2 expected && + test_cmp expected file2 && cat file2.rej && test_path_is_missing file1.rej @@ -99,7 +99,7 @@ test_expect_success 'the same test with --verbose' ' test_must_fail git apply --reject --verbose patch.2 >rejects && test_path_is_missing file1 && - test_cmp file2 expected && + test_cmp expected file2 && cat file2.rej && test_path_is_missing file1.rej diff --git a/t/t4124-apply-ws-rule.sh b/t/t4124-apply-ws-rule.sh index 7e32237a2a..ff51e9e789 100755 --- a/t/t4124-apply-ws-rule.sh +++ b/t/t4124-apply-ws-rule.sh @@ -313,9 +313,9 @@ test_expect_success 'applying beyond EOF requires one non-blank context line' ' { echo a; echo; } >one && cp one expect && test_must_fail git apply --whitespace=fix patch && - test_cmp one expect && + test_cmp expect one && test_must_fail git apply --ignore-space-change --whitespace=fix patch && - test_cmp one expect + test_cmp expect one ' test_expect_success 'tons of blanks at EOF should not apply' ' @@ -342,10 +342,10 @@ test_expect_success 'missing blank line at end with --whitespace=fix' ' cp one saved-one && test_must_fail git apply patch && git apply --whitespace=fix patch && - test_cmp one expect && + test_cmp expect one && mv saved-one one && git apply --ignore-space-change --whitespace=fix patch && - test_cmp one expect + test_cmp expect one ' test_expect_success 'two missing blank lines at end with --whitespace=fix' ' @@ -360,11 +360,11 @@ test_expect_success 'two missing blank lines at end with --whitespace=fix' ' cp no-blank-lines one && test_must_fail git apply patch && git apply --whitespace=fix patch && - test_cmp one expect && + test_cmp expect one && mv no-blank-lines one && test_must_fail git apply patch && git apply --ignore-space-change --whitespace=fix patch && - test_cmp one expect + test_cmp expect one ' test_expect_success 'missing blank line at end, insert before end, --whitespace=fix' ' @@ -376,7 +376,7 @@ test_expect_success 'missing blank line at end, insert before end, --whitespace= echo a >one && test_must_fail git apply patch && git apply --whitespace=fix patch && - test_cmp one expect + test_cmp expect one ' test_expect_success 'shrink file with tons of missing blanks at end of file' ' @@ -392,10 +392,10 @@ test_expect_success 'shrink file with tons of missing blanks at end of file' ' cp no-blank-lines one && test_must_fail git apply patch && git apply --whitespace=fix patch && - test_cmp one expect && + test_cmp expect one && mv no-blank-lines one && git apply --ignore-space-change --whitespace=fix patch && - test_cmp one expect + test_cmp expect one ' test_expect_success 'missing blanks at EOF must only match blank lines' ' @@ -427,7 +427,7 @@ test_expect_success 'missing blank line should match context line with spaces' ' git add one && git apply --whitespace=fix patch && - test_cmp one expect + test_cmp expect one ' sed -e's/Z//' >one <<EOF @@ -447,7 +447,7 @@ test_expect_success 'same, but with the --ignore-space-option' ' git checkout-index -f one && git apply --ignore-space-change --whitespace=fix patch && - test_cmp one expect + test_cmp expect one ' test_expect_success 'same, but with CR-LF line endings && cr-at-eol set' ' @@ -464,7 +464,7 @@ test_expect_success 'same, but with CR-LF line endings && cr-at-eol set' ' mv save-one one && git apply --ignore-space-change --whitespace=fix patch && - test_cmp one expect + test_cmp expect one ' test_expect_success 'CR-LF line endings && add line && text=auto' ' @@ -478,7 +478,7 @@ test_expect_success 'CR-LF line endings && add line && text=auto' ' mv save-one one && echo "one text=auto" >.gitattributes && git apply patch && - test_cmp one expect + test_cmp expect one ' test_expect_success 'CR-LF line endings && change line && text=auto' ' @@ -491,7 +491,7 @@ test_expect_success 'CR-LF line endings && change line && text=auto' ' mv save-one one && echo "one text=auto" >.gitattributes && git apply patch && - test_cmp one expect + test_cmp expect one ' test_expect_success 'LF in repo, CRLF in worktree && change line && text=auto' ' @@ -503,7 +503,7 @@ test_expect_success 'LF in repo, CRLF in worktree && change line && text=auto' ' echo "one text=auto" >.gitattributes && git -c core.eol=CRLF apply patch && printf "b\r\n" >expect && - test_cmp one expect + test_cmp expect one ' test_expect_success 'whitespace=fix to expand' ' diff --git a/t/t4136-apply-check.sh b/t/t4136-apply-check.sh index 6d92872318..4c3f264a63 100755 --- a/t/t4136-apply-check.sh +++ b/t/t4136-apply-check.sh @@ -29,6 +29,18 @@ test_expect_success 'apply exits non-zero with no-op patch' ' test_must_fail git apply --check input ' +test_expect_success '`apply --recount` allows no-op patch' ' + echo 1 >1 && + git apply --recount --check <<-\EOF + diff --get a/1 b/1 + index 6696ea4..606eddd 100644 + --- a/1 + +++ b/1 + @@ -1,1 +1,1 @@ + 1 + EOF +' + test_expect_success 'invalid combination: create and copy' ' test_must_fail git apply --check - <<-\EOF diff --git a/1 b/2 diff --git a/t/t4138-apply-ws-expansion.sh b/t/t4138-apply-ws-expansion.sh index 0ffe33fbef..3b636a63a3 100755 --- a/t/t4138-apply-ws-expansion.sh +++ b/t/t4138-apply-ws-expansion.sh @@ -114,7 +114,7 @@ for t in 1 2 3 4 do test_expect_success 'apply with ws expansion (t=$t)' ' git apply patch$t.patch && - test_cmp test-$t expect-$t + test_cmp expect-$t test-$t ' done diff --git a/t/t4200-rerere.sh b/t/t4200-rerere.sh index 313222d0d6..55b7750ade 100755 --- a/t/t4200-rerere.sh +++ b/t/t4200-rerere.sh @@ -577,6 +577,71 @@ test_expect_success 'multiple identical conflicts' ' count_pre_post 0 0 ' +test_expect_success 'rerere with unexpected conflict markers does not crash' ' + git reset --hard && + + git checkout -b branch-1 master && + echo "bar" >test && + git add test && + git commit -q -m two && + + git reset --hard && + git checkout -b branch-2 master && + echo "foo" >test && + git add test && + git commit -q -a -m one && + + test_must_fail git merge branch-1 && + echo "<<<<<<< a" >test && + git rerere && + + git rerere clear +' + +test_expect_success 'rerere with inner conflict markers' ' + git reset --hard && + + git checkout -b A master && + echo "bar" >test && + git add test && + git commit -q -m two && + echo "baz" >test && + git add test && + git commit -q -m three && + + git reset --hard && + git checkout -b B master && + echo "foo" >test && + git add test && + git commit -q -a -m one && + + test_must_fail git merge A~ && + git add test && + git commit -q -m "will solve conflicts later" && + test_must_fail git merge A && + + echo "resolved" >test && + git add test && + git commit -q -m "solved conflict" && + + echo "resolved" >expect && + + git reset --hard HEAD~~ && + test_must_fail git merge A~ && + git add test && + git commit -q -m "will solve conflicts later" && + test_must_fail git merge A && + cat test >actual && + test_cmp expect actual && + + git add test && + git commit -m "rerere solved conflict" && + git reset --hard HEAD~ && + test_must_fail git merge A && + cat test >actual && + test_cmp expect actual +' + test_expect_success 'setup simple stage 1 handling' ' test_create_repo stage_1_handling && ( diff --git a/t/t4256-am-format-flowed.sh b/t/t4256-am-format-flowed.sh new file mode 100755 index 0000000000..6340310e9a --- /dev/null +++ b/t/t4256-am-format-flowed.sh @@ -0,0 +1,19 @@ +#!/bin/sh + +test_description='test format=flowed support of git am' + +. ./test-lib.sh + +test_expect_success 'setup' ' + cp "$TEST_DIRECTORY/t4256/1/mailinfo.c.orig" mailinfo.c && + git add mailinfo.c && + git commit -m initial +' + +test_expect_success 'am with format=flowed' ' + git am <"$TEST_DIRECTORY/t4256/1/patch" >stdout 2>stderr && + test_i18ngrep "warning: Patch sent with format=flowed" stderr && + test_cmp "$TEST_DIRECTORY/t4256/1/mailinfo.c" mailinfo.c +' + +test_done diff --git a/t/t4256/1/mailinfo.c b/t/t4256/1/mailinfo.c new file mode 100644 index 0000000000..b395adbdf2 --- /dev/null +++ b/t/t4256/1/mailinfo.c @@ -0,0 +1,1245 @@ +#include "cache.h" +#include "config.h" +#include "utf8.h" +#include "strbuf.h" +#include "mailinfo.h" + +static void cleanup_space(struct strbuf *sb) +{ + size_t pos, cnt; + for (pos = 0; pos < sb->len; pos++) { + if (isspace(sb->buf[pos])) { + sb->buf[pos] = ' '; + for (cnt = 0; isspace(sb->buf[pos + cnt + 1]); cnt++); + strbuf_remove(sb, pos + 1, cnt); + } + } +} + +static void get_sane_name(struct strbuf *out, struct strbuf *name, struct strbuf *email) +{ + struct strbuf *src = name; + if (name->len < 3 || 60 < name->len || strchr(name->buf, '@') || + strchr(name->buf, '<') || strchr(name->buf, '>')) + src = email; + else if (name == out) + return; + strbuf_reset(out); + strbuf_addbuf(out, src); +} + +static void parse_bogus_from(struct mailinfo *mi, const struct strbuf *line) +{ + /* John Doe <johndoe> */ + + char *bra, *ket; + /* This is fallback, so do not bother if we already have an + * e-mail address. + */ + if (mi->email.len) + return; + + bra = strchr(line->buf, '<'); + if (!bra) + return; + ket = strchr(bra, '>'); + if (!ket) + return; + + strbuf_reset(&mi->email); + strbuf_add(&mi->email, bra + 1, ket - bra - 1); + + strbuf_reset(&mi->name); + strbuf_add(&mi->name, line->buf, bra - line->buf); + strbuf_trim(&mi->name); + get_sane_name(&mi->name, &mi->name, &mi->email); +} + +static const char *unquote_comment(struct strbuf *outbuf, const char *in) +{ + int c; + int take_next_literally = 0; + + strbuf_addch(outbuf, '('); + + while ((c = *in++) != 0) { + if (take_next_literally == 1) { + take_next_literally = 0; + } else { + switch (c) { + case '\\': + take_next_literally = 1; + continue; + case '(': + in = unquote_comment(outbuf, in); + continue; + case ')': + strbuf_addch(outbuf, ')'); + return in; + } + } + + strbuf_addch(outbuf, c); + } + + return in; +} + +static const char *unquote_quoted_string(struct strbuf *outbuf, const char *in) +{ + int c; + int take_next_literally = 0; + + while ((c = *in++) != 0) { + if (take_next_literally == 1) { + take_next_literally = 0; + } else { + switch (c) { + case '\\': + take_next_literally = 1; + continue; + case '"': + return in; + } + } + + strbuf_addch(outbuf, c); + } + + return in; +} + +static void unquote_quoted_pair(struct strbuf *line) +{ + struct strbuf outbuf; + const char *in = line->buf; + int c; + + strbuf_init(&outbuf, line->len); + + while ((c = *in++) != 0) { + switch (c) { + case '"': + in = unquote_quoted_string(&outbuf, in); + continue; + case '(': + in = unquote_comment(&outbuf, in); + continue; + } + + strbuf_addch(&outbuf, c); + } + + strbuf_swap(&outbuf, line); + strbuf_release(&outbuf); + +} + +static void handle_from(struct mailinfo *mi, const struct strbuf *from) +{ + char *at; + size_t el; + struct strbuf f; + + strbuf_init(&f, from->len); + strbuf_addbuf(&f, from); + + unquote_quoted_pair(&f); + + at = strchr(f.buf, '@'); + if (!at) { + parse_bogus_from(mi, from); + goto out; + } + + /* + * If we already have one email, don't take any confusing lines + */ + if (mi->email.len && strchr(at + 1, '@')) + goto out; + + /* Pick up the string around '@', possibly delimited with <> + * pair; that is the email part. + */ + while (at > f.buf) { + char c = at[-1]; + if (isspace(c)) + break; + if (c == '<') { + at[-1] = ' '; + break; + } + at--; + } + el = strcspn(at, " \n\t\r\v\f>"); + strbuf_reset(&mi->email); + strbuf_add(&mi->email, at, el); + strbuf_remove(&f, at - f.buf, el + (at[el] ? 1 : 0)); + + /* The remainder is name. It could be + * + * - "John Doe <john.doe@xz>" (a), or + * - "john.doe@xz (John Doe)" (b), or + * - "John (zzz) Doe <john.doe@xz> (Comment)" (c) + * + * but we have removed the email part, so + * + * - remove extra spaces which could stay after email (case 'c'), and + * - trim from both ends, possibly removing the () pair at the end + * (cases 'a' and 'b'). + */ + cleanup_space(&f); + strbuf_trim(&f); + if (f.buf[0] == '(' && f.len && f.buf[f.len - 1] == ')') { + strbuf_remove(&f, 0, 1); + strbuf_setlen(&f, f.len - 1); + } + + get_sane_name(&mi->name, &f, &mi->email); +out: + strbuf_release(&f); +} + +static void handle_header(struct strbuf **out, const struct strbuf *line) +{ + if (!*out) { + *out = xmalloc(sizeof(struct strbuf)); + strbuf_init(*out, line->len); + } else + strbuf_reset(*out); + + strbuf_addbuf(*out, line); +} + +/* NOTE NOTE NOTE. We do not claim we do full MIME. We just attempt + * to have enough heuristics to grok MIME encoded patches often found + * on our mailing lists. For example, we do not even treat header lines + * case insensitively. + */ + +static int slurp_attr(const char *line, const char *name, struct strbuf *attr) +{ + const char *ends, *ap = strcasestr(line, name); + size_t sz; + + strbuf_setlen(attr, 0); + if (!ap) + return 0; + ap += strlen(name); + if (*ap == '"') { + ap++; + ends = "\""; + } + else + ends = "; \t"; + sz = strcspn(ap, ends); + strbuf_add(attr, ap, sz); + return 1; +} + +static int has_attr_value(const char *line, const char *name, const char *value) +{ + struct strbuf sb = STRBUF_INIT; + int rc = slurp_attr(line, name, &sb) && !strcasecmp(sb.buf, value); + strbuf_release(&sb); + return rc; +} + +static void handle_content_type(struct mailinfo *mi, struct strbuf *line) +{ + struct strbuf *boundary = xmalloc(sizeof(struct strbuf)); + strbuf_init(boundary, line->len); + + mi->format_flowed = has_attr_value(line->buf, "format=", "flowed"); + mi->delsp = has_attr_value(line->buf, "delsp=", "yes"); + + if (slurp_attr(line->buf, "boundary=", boundary)) { + strbuf_insert(boundary, 0, "--", 2); + if (++mi->content_top >= &mi->content[MAX_BOUNDARIES]) { + error("Too many boundaries to handle"); + mi->input_error = -1; + mi->content_top = &mi->content[MAX_BOUNDARIES] - 1; + return; + } + *(mi->content_top) = boundary; + boundary = NULL; + } + slurp_attr(line->buf, "charset=", &mi->charset); + + if (boundary) { + strbuf_release(boundary); + free(boundary); + } +} + +static void handle_content_transfer_encoding(struct mailinfo *mi, + const struct strbuf *line) +{ + if (strcasestr(line->buf, "base64")) + mi->transfer_encoding = TE_BASE64; + else if (strcasestr(line->buf, "quoted-printable")) + mi->transfer_encoding = TE_QP; + else + mi->transfer_encoding = TE_DONTCARE; +} + +static int is_multipart_boundary(struct mailinfo *mi, const struct strbuf *line) +{ + struct strbuf *content_top = *(mi->content_top); + + return ((content_top->len <= line->len) && + !memcmp(line->buf, content_top->buf, content_top->len)); +} + +static void cleanup_subject(struct mailinfo *mi, struct strbuf *subject) +{ + size_t at = 0; + + while (at < subject->len) { + char *pos; + size_t remove; + + switch (subject->buf[at]) { + case 'r': case 'R': + if (subject->len <= at + 3) + break; + if ((subject->buf[at + 1] == 'e' || + subject->buf[at + 1] == 'E') && + subject->buf[at + 2] == ':') { + strbuf_remove(subject, at, 3); + continue; + } + at++; + break; + case ' ': case '\t': case ':': + strbuf_remove(subject, at, 1); + continue; + case '[': + pos = strchr(subject->buf + at, ']'); + if (!pos) + break; + remove = pos - subject->buf + at + 1; + if (!mi->keep_non_patch_brackets_in_subject || + (7 <= remove && + memmem(subject->buf + at, remove, "PATCH", 5))) + strbuf_remove(subject, at, remove); + else { + at += remove; + /* + * If the input had a space after the ], keep + * it. We don't bother with finding the end of + * the space, since we later normalize it + * anyway. + */ + if (isspace(subject->buf[at])) + at += 1; + } + continue; + } + break; + } + strbuf_trim(subject); +} + +#define MAX_HDR_PARSED 10 +static const char *header[MAX_HDR_PARSED] = { + "From","Subject","Date", +}; + +static inline int cmp_header(const struct strbuf *line, const char *hdr) +{ + int len = strlen(hdr); + return !strncasecmp(line->buf, hdr, len) && line->len > len && + line->buf[len] == ':' && isspace(line->buf[len + 1]); +} + +static int is_format_patch_separator(const char *line, int len) +{ + static const char SAMPLE[] = + "From e6807f3efca28b30decfecb1732a56c7db1137ee Mon Sep 17 00:00:00 2001\n"; + const char *cp; + + if (len != strlen(SAMPLE)) + return 0; + if (!skip_prefix(line, "From ", &cp)) + return 0; + if (strspn(cp, "0123456789abcdef") != 40) + return 0; + cp += 40; + return !memcmp(SAMPLE + (cp - line), cp, strlen(SAMPLE) - (cp - line)); +} + +static struct strbuf *decode_q_segment(const struct strbuf *q_seg, int rfc2047) +{ + const char *in = q_seg->buf; + int c; + struct strbuf *out = xmalloc(sizeof(struct strbuf)); + strbuf_init(out, q_seg->len); + + while ((c = *in++) != 0) { + if (c == '=') { + int ch, d = *in; + if (d == '\n' || !d) + break; /* drop trailing newline */ + ch = hex2chr(in); + if (ch >= 0) { + strbuf_addch(out, ch); + in += 2; + continue; + } + /* garbage -- fall through */ + } + if (rfc2047 && c == '_') /* rfc2047 4.2 (2) */ + c = 0x20; + strbuf_addch(out, c); + } + return out; +} + +static struct strbuf *decode_b_segment(const struct strbuf *b_seg) +{ + /* Decode in..ep, possibly in-place to ot */ + int c, pos = 0, acc = 0; + const char *in = b_seg->buf; + struct strbuf *out = xmalloc(sizeof(struct strbuf)); + strbuf_init(out, b_seg->len); + + while ((c = *in++) != 0) { + if (c == '+') + c = 62; + else if (c == '/') + c = 63; + else if ('A' <= c && c <= 'Z') + c -= 'A'; + else if ('a' <= c && c <= 'z') + c -= 'a' - 26; + else if ('0' <= c && c <= '9') + c -= '0' - 52; + else + continue; /* garbage */ + switch (pos++) { + case 0: + acc = (c << 2); + break; + case 1: + strbuf_addch(out, (acc | (c >> 4))); + acc = (c & 15) << 4; + break; + case 2: + strbuf_addch(out, (acc | (c >> 2))); + acc = (c & 3) << 6; + break; + case 3: + strbuf_addch(out, (acc | c)); + acc = pos = 0; + break; + } + } + return out; +} + +static int convert_to_utf8(struct mailinfo *mi, + struct strbuf *line, const char *charset) +{ + char *out; + + if (!mi->metainfo_charset || !charset || !*charset) + return 0; + + if (same_encoding(mi->metainfo_charset, charset)) + return 0; + out = reencode_string(line->buf, mi->metainfo_charset, charset); + if (!out) { + mi->input_error = -1; + return error("cannot convert from %s to %s", + charset, mi->metainfo_charset); + } + strbuf_attach(line, out, strlen(out), strlen(out)); + return 0; +} + +static void decode_header(struct mailinfo *mi, struct strbuf *it) +{ + char *in, *ep, *cp; + struct strbuf outbuf = STRBUF_INIT, *dec; + struct strbuf charset_q = STRBUF_INIT, piecebuf = STRBUF_INIT; + int found_error = 1; /* pessimism */ + + in = it->buf; + while (in - it->buf <= it->len && (ep = strstr(in, "=?")) != NULL) { + int encoding; + strbuf_reset(&charset_q); + strbuf_reset(&piecebuf); + + if (in != ep) { + /* + * We are about to process an encoded-word + * that begins at ep, but there is something + * before the encoded word. + */ + char *scan; + for (scan = in; scan < ep; scan++) + if (!isspace(*scan)) + break; + + if (scan != ep || in == it->buf) { + /* + * We should not lose that "something", + * unless we have just processed an + * encoded-word, and there is only LWS + * before the one we are about to process. + */ + strbuf_add(&outbuf, in, ep - in); + } + } + /* E.g. + * ep : "=?iso-2022-jp?B?GyR...?= foo" + * ep : "=?ISO-8859-1?Q?Foo=FCbar?= baz" + */ + ep += 2; + + if (ep - it->buf >= it->len || !(cp = strchr(ep, '?'))) + goto release_return; + + if (cp + 3 - it->buf > it->len) + goto release_return; + strbuf_add(&charset_q, ep, cp - ep); + + encoding = cp[1]; + if (!encoding || cp[2] != '?') + goto release_return; + ep = strstr(cp + 3, "?="); + if (!ep) + goto release_return; + strbuf_add(&piecebuf, cp + 3, ep - cp - 3); + switch (tolower(encoding)) { + default: + goto release_return; + case 'b': + dec = decode_b_segment(&piecebuf); + break; + case 'q': + dec = decode_q_segment(&piecebuf, 1); + break; + } + if (convert_to_utf8(mi, dec, charset_q.buf)) + goto release_return; + + strbuf_addbuf(&outbuf, dec); + strbuf_release(dec); + free(dec); + in = ep + 2; + } + strbuf_addstr(&outbuf, in); + strbuf_reset(it); + strbuf_addbuf(it, &outbuf); + found_error = 0; +release_return: + strbuf_release(&outbuf); + strbuf_release(&charset_q); + strbuf_release(&piecebuf); + + if (found_error) + mi->input_error = -1; +} + +static int check_header(struct mailinfo *mi, + const struct strbuf *line, + struct strbuf *hdr_data[], int overwrite) +{ + int i, ret = 0, len; + struct strbuf sb = STRBUF_INIT; + + /* search for the interesting parts */ + for (i = 0; header[i]; i++) { + int len = strlen(header[i]); + if ((!hdr_data[i] || overwrite) && cmp_header(line, header[i])) { + /* Unwrap inline B and Q encoding, and optionally + * normalize the meta information to utf8. + */ + strbuf_add(&sb, line->buf + len + 2, line->len - len - 2); + decode_header(mi, &sb); + handle_header(&hdr_data[i], &sb); + ret = 1; + goto check_header_out; + } + } + + /* Content stuff */ + if (cmp_header(line, "Content-Type")) { + len = strlen("Content-Type: "); + strbuf_add(&sb, line->buf + len, line->len - len); + decode_header(mi, &sb); + strbuf_insert(&sb, 0, "Content-Type: ", len); + handle_content_type(mi, &sb); + ret = 1; + goto check_header_out; + } + if (cmp_header(line, "Content-Transfer-Encoding")) { + len = strlen("Content-Transfer-Encoding: "); + strbuf_add(&sb, line->buf + len, line->len - len); + decode_header(mi, &sb); + handle_content_transfer_encoding(mi, &sb); + ret = 1; + goto check_header_out; + } + if (cmp_header(line, "Message-Id")) { + len = strlen("Message-Id: "); + strbuf_add(&sb, line->buf + len, line->len - len); + decode_header(mi, &sb); + if (mi->add_message_id) + mi->message_id = strbuf_detach(&sb, NULL); + ret = 1; + goto check_header_out; + } + +check_header_out: + strbuf_release(&sb); + return ret; +} + +/* + * Returns 1 if the given line or any line beginning with the given line is an + * in-body header (that is, check_header will succeed when passed + * mi->s_hdr_data). + */ +static int is_inbody_header(const struct mailinfo *mi, + const struct strbuf *line) +{ + int i; + for (i = 0; header[i]; i++) + if (!mi->s_hdr_data[i] && cmp_header(line, header[i])) + return 1; + return 0; +} + +static void decode_transfer_encoding(struct mailinfo *mi, struct strbuf *line) +{ + struct strbuf *ret; + + switch (mi->transfer_encoding) { + case TE_QP: + ret = decode_q_segment(line, 0); + break; + case TE_BASE64: + ret = decode_b_segment(line); + break; + case TE_DONTCARE: + default: + return; + } + strbuf_reset(line); + strbuf_addbuf(line, ret); + strbuf_release(ret); + free(ret); +} + +static inline int patchbreak(const struct strbuf *line) +{ + size_t i; + + /* Beginning of a "diff -" header? */ + if (starts_with(line->buf, "diff -")) + return 1; + + /* CVS "Index: " line? */ + if (starts_with(line->buf, "Index: ")) + return 1; + + /* + * "--- <filename>" starts patches without headers + * "---<sp>*" is a manual separator + */ + if (line->len < 4) + return 0; + + if (starts_with(line->buf, "---")) { + /* space followed by a filename? */ + if (line->buf[3] == ' ' && !isspace(line->buf[4])) + return 1; + /* Just whitespace? */ + for (i = 3; i < line->len; i++) { + unsigned char c = line->buf[i]; + if (c == '\n') + return 1; + if (!isspace(c)) + break; + } + return 0; + } + return 0; +} + +static int is_scissors_line(const char *line) +{ + const char *c; + int scissors = 0, gap = 0; + const char *first_nonblank = NULL, *last_nonblank = NULL; + int visible, perforation = 0, in_perforation = 0; + + for (c = line; *c; c++) { + if (isspace(*c)) { + if (in_perforation) { + perforation++; + gap++; + } + continue; + } + last_nonblank = c; + if (first_nonblank == NULL) + first_nonblank = c; + if (*c == '-') { + in_perforation = 1; + perforation++; + continue; + } + if ((!memcmp(c, ">8", 2) || !memcmp(c, "8<", 2) || + !memcmp(c, ">%", 2) || !memcmp(c, "%<", 2))) { + in_perforation = 1; + perforation += 2; + scissors += 2; + c++; + continue; + } + in_perforation = 0; + } + + /* + * The mark must be at least 8 bytes long (e.g. "-- >8 --"). + * Even though there can be arbitrary cruft on the same line + * (e.g. "cut here"), in order to avoid misidentification, the + * perforation must occupy more than a third of the visible + * width of the line, and dashes and scissors must occupy more + * than half of the perforation. + */ + + if (first_nonblank && last_nonblank) + visible = last_nonblank - first_nonblank + 1; + else + visible = 0; + return (scissors && 8 <= visible && + visible < perforation * 3 && + gap * 2 < perforation); +} + +static void flush_inbody_header_accum(struct mailinfo *mi) +{ + if (!mi->inbody_header_accum.len) + return; + if (!check_header(mi, &mi->inbody_header_accum, mi->s_hdr_data, 0)) + BUG("inbody_header_accum, if not empty, must always contain a valid in-body header"); + strbuf_reset(&mi->inbody_header_accum); +} + +static int check_inbody_header(struct mailinfo *mi, const struct strbuf *line) +{ + if (mi->inbody_header_accum.len && + (line->buf[0] == ' ' || line->buf[0] == '\t')) { + if (mi->use_scissors && is_scissors_line(line->buf)) { + /* + * This is a scissors line; do not consider this line + * as a header continuation line. + */ + flush_inbody_header_accum(mi); + return 0; + } + strbuf_strip_suffix(&mi->inbody_header_accum, "\n"); + strbuf_addbuf(&mi->inbody_header_accum, line); + return 1; + } + + flush_inbody_header_accum(mi); + + if (starts_with(line->buf, ">From") && isspace(line->buf[5])) + return is_format_patch_separator(line->buf + 1, line->len - 1); + if (starts_with(line->buf, "[PATCH]") && isspace(line->buf[7])) { + int i; + for (i = 0; header[i]; i++) + if (!strcmp("Subject", header[i])) { + handle_header(&mi->s_hdr_data[i], line); + return 1; + } + return 0; + } + if (is_inbody_header(mi, line)) { + strbuf_addbuf(&mi->inbody_header_accum, line); + return 1; + } + return 0; +} + +static int handle_commit_msg(struct mailinfo *mi, struct strbuf *line) +{ + assert(!mi->filter_stage); + + if (mi->header_stage) { + if (!line->len || (line->len == 1 && line->buf[0] == '\n')) { + if (mi->inbody_header_accum.len) { + flush_inbody_header_accum(mi); + mi->header_stage = 0; + } + return 0; + } + } + + if (mi->use_inbody_headers && mi->header_stage) { + mi->header_stage = check_inbody_header(mi, line); + if (mi->header_stage) + return 0; + } else + /* Only trim the first (blank) line of the commit message + * when ignoring in-body headers. + */ + mi->header_stage = 0; + + /* normalize the log message to UTF-8. */ + if (convert_to_utf8(mi, line, mi->charset.buf)) + return 0; /* mi->input_error already set */ + + if (mi->use_scissors && is_scissors_line(line->buf)) { + int i; + + strbuf_setlen(&mi->log_message, 0); + mi->header_stage = 1; + + /* + * We may have already read "secondary headers"; purge + * them to give ourselves a clean restart. + */ + for (i = 0; header[i]; i++) { + if (mi->s_hdr_data[i]) + strbuf_release(mi->s_hdr_data[i]); + mi->s_hdr_data[i] = NULL; + } + return 0; + } + + if (patchbreak(line)) { + if (mi->message_id) + strbuf_addf(&mi->log_message, + "Message-Id: %s\n", mi->message_id); + return 1; + } + + strbuf_addbuf(&mi->log_message, line); + return 0; +} + +static void handle_patch(struct mailinfo *mi, const struct strbuf *line) +{ + fwrite(line->buf, 1, line->len, mi->patchfile); + mi->patch_lines++; +} + +static void handle_filter(struct mailinfo *mi, struct strbuf *line) +{ + switch (mi->filter_stage) { + case 0: + if (!handle_commit_msg(mi, line)) + break; + mi->filter_stage++; + /* fallthrough */ + case 1: + handle_patch(mi, line); + break; + } +} + +static int is_rfc2822_header(const struct strbuf *line) +{ + /* + * The section that defines the loosest possible + * field name is "3.6.8 Optional fields". + * + * optional-field = field-name ":" unstructured CRLF + * field-name = 1*ftext + * ftext = %d33-57 / %59-126 + */ + int ch; + char *cp = line->buf; + + /* Count mbox From headers as headers */ + if (starts_with(cp, "From ") || starts_with(cp, ">From ")) + return 1; + + while ((ch = *cp++)) { + if (ch == ':') + return 1; + if ((33 <= ch && ch <= 57) || + (59 <= ch && ch <= 126)) + continue; + break; + } + return 0; +} + +static int read_one_header_line(struct strbuf *line, FILE *in) +{ + struct strbuf continuation = STRBUF_INIT; + + /* Get the first part of the line. */ + if (strbuf_getline_lf(line, in)) + return 0; + + /* + * Is it an empty line or not a valid rfc2822 header? + * If so, stop here, and return false ("not a header") + */ + strbuf_rtrim(line); + if (!line->len || !is_rfc2822_header(line)) { + /* Re-add the newline */ + strbuf_addch(line, '\n'); + return 0; + } + + /* + * Now we need to eat all the continuation lines.. + * Yuck, 2822 header "folding" + */ + for (;;) { + int peek; + + peek = fgetc(in); + if (peek == EOF) + break; + ungetc(peek, in); + if (peek != ' ' && peek != '\t') + break; + if (strbuf_getline_lf(&continuation, in)) + break; + continuation.buf[0] = ' '; + strbuf_rtrim(&continuation); + strbuf_addbuf(line, &continuation); + } + strbuf_release(&continuation); + + return 1; +} + +static int find_boundary(struct mailinfo *mi, struct strbuf *line) +{ + while (!strbuf_getline_lf(line, mi->input)) { + if (*(mi->content_top) && is_multipart_boundary(mi, line)) + return 1; + } + return 0; +} + +static int handle_boundary(struct mailinfo *mi, struct strbuf *line) +{ + struct strbuf newline = STRBUF_INIT; + + strbuf_addch(&newline, '\n'); +again: + if (line->len >= (*(mi->content_top))->len + 2 && + !memcmp(line->buf + (*(mi->content_top))->len, "--", 2)) { + /* we hit an end boundary */ + /* pop the current boundary off the stack */ + strbuf_release(*(mi->content_top)); + FREE_AND_NULL(*(mi->content_top)); + + /* technically won't happen as is_multipart_boundary() + will fail first. But just in case.. + */ + if (--mi->content_top < mi->content) { + error("Detected mismatched boundaries, can't recover"); + mi->input_error = -1; + mi->content_top = mi->content; + strbuf_release(&newline); + return 0; + } + handle_filter(mi, &newline); + strbuf_release(&newline); + if (mi->input_error) + return 0; + + /* skip to the next boundary */ + if (!find_boundary(mi, line)) + return 0; + goto again; + } + + /* set some defaults */ + mi->transfer_encoding = TE_DONTCARE; + strbuf_reset(&mi->charset); + + /* slurp in this section's info */ + while (read_one_header_line(line, mi->input)) + check_header(mi, line, mi->p_hdr_data, 0); + + strbuf_release(&newline); + /* replenish line */ + if (strbuf_getline_lf(line, mi->input)) + return 0; + strbuf_addch(line, '\n'); + return 1; +} + +static void handle_filter_flowed(struct mailinfo *mi, struct strbuf *line, + struct strbuf *prev) +{ + size_t len = line->len; + const char *rest; + + if (!mi->format_flowed) { + handle_filter(mi, line); + return; + } + + if (line->buf[len - 1] == '\n') { + len--; + if (len && line->buf[len - 1] == '\r') + len--; + } + + /* Keep signature separator as-is. */ + if (skip_prefix(line->buf, "-- ", &rest) && rest - line->buf == len) { + if (prev->len) { + handle_filter(mi, prev); + strbuf_reset(prev); + } + handle_filter(mi, line); + return; + } + + /* Unstuff space-stuffed line. */ + if (len && line->buf[0] == ' ') { + strbuf_remove(line, 0, 1); + len--; + } + + /* Save flowed line for later, but without the soft line break. */ + if (len && line->buf[len - 1] == ' ') { + strbuf_add(prev, line->buf, len - !!mi->delsp); + return; + } + + /* Prepend any previous partial lines */ + strbuf_insert(line, 0, prev->buf, prev->len); + strbuf_reset(prev); + + handle_filter(mi, line); +} + +static void handle_body(struct mailinfo *mi, struct strbuf *line) +{ + struct strbuf prev = STRBUF_INIT; + + /* Skip up to the first boundary */ + if (*(mi->content_top)) { + if (!find_boundary(mi, line)) + goto handle_body_out; + } + + do { + /* process any boundary lines */ + if (*(mi->content_top) && is_multipart_boundary(mi, line)) { + /* flush any leftover */ + if (prev.len) { + handle_filter(mi, &prev); + strbuf_reset(&prev); + } + if (!handle_boundary(mi, line)) + goto handle_body_out; + } + + /* Unwrap transfer encoding */ + decode_transfer_encoding(mi, line); + + switch (mi->transfer_encoding) { + case TE_BASE64: + case TE_QP: + { + struct strbuf **lines, **it, *sb; + + /* Prepend any previous partial lines */ + strbuf_insert(line, 0, prev.buf, prev.len); + strbuf_reset(&prev); + + /* + * This is a decoded line that may contain + * multiple new lines. Pass only one chunk + * at a time to handle_filter() + */ + lines = strbuf_split(line, '\n'); + for (it = lines; (sb = *it); it++) { + if (*(it + 1) == NULL) /* The last line */ + if (sb->buf[sb->len - 1] != '\n') { + /* Partial line, save it for later. */ + strbuf_addbuf(&prev, sb); + break; + } + handle_filter_flowed(mi, sb, &prev); + } + /* + * The partial chunk is saved in "prev" and will be + * appended by the next iteration of read_line_with_nul(). + */ + strbuf_list_free(lines); + break; + } + default: + handle_filter_flowed(mi, line, &prev); + } + + if (mi->input_error) + break; + } while (!strbuf_getwholeline(line, mi->input, '\n')); + + if (prev.len) + handle_filter(mi, &prev); + + flush_inbody_header_accum(mi); + +handle_body_out: + strbuf_release(&prev); +} + +static void output_header_lines(FILE *fout, const char *hdr, const struct strbuf *data) +{ + const char *sp = data->buf; + while (1) { + char *ep = strchr(sp, '\n'); + int len; + if (!ep) + len = strlen(sp); + else + len = ep - sp; + fprintf(fout, "%s: %.*s\n", hdr, len, sp); + if (!ep) + break; + sp = ep + 1; + } +} + +static void handle_info(struct mailinfo *mi) +{ + struct strbuf *hdr; + int i; + + for (i = 0; header[i]; i++) { + /* only print inbody headers if we output a patch file */ + if (mi->patch_lines && mi->s_hdr_data[i]) + hdr = mi->s_hdr_data[i]; + else if (mi->p_hdr_data[i]) + hdr = mi->p_hdr_data[i]; + else + continue; + + if (!strcmp(header[i], "Subject")) { + if (!mi->keep_subject) { + cleanup_subject(mi, hdr); + cleanup_space(hdr); + } + output_header_lines(mi->output, "Subject", hdr); + } else if (!strcmp(header[i], "From")) { + cleanup_space(hdr); + handle_from(mi, hdr); + fprintf(mi->output, "Author: %s\n", mi->name.buf); + fprintf(mi->output, "Email: %s\n", mi->email.buf); + } else { + cleanup_space(hdr); + fprintf(mi->output, "%s: %s\n", header[i], hdr->buf); + } + } + fprintf(mi->output, "\n"); +} + +int mailinfo(struct mailinfo *mi, const char *msg, const char *patch) +{ + FILE *cmitmsg; + int peek; + struct strbuf line = STRBUF_INIT; + + cmitmsg = fopen(msg, "w"); + if (!cmitmsg) { + perror(msg); + return -1; + } + mi->patchfile = fopen(patch, "w"); + if (!mi->patchfile) { + perror(patch); + fclose(cmitmsg); + return -1; + } + + mi->p_hdr_data = xcalloc(MAX_HDR_PARSED, sizeof(*(mi->p_hdr_data))); + mi->s_hdr_data = xcalloc(MAX_HDR_PARSED, sizeof(*(mi->s_hdr_data))); + + do { + peek = fgetc(mi->input); + if (peek == EOF) { + fclose(cmitmsg); + return error("empty patch: '%s'", patch); + } + } while (isspace(peek)); + ungetc(peek, mi->input); + + /* process the email header */ + while (read_one_header_line(&line, mi->input)) + check_header(mi, &line, mi->p_hdr_data, 1); + + handle_body(mi, &line); + fwrite(mi->log_message.buf, 1, mi->log_message.len, cmitmsg); + fclose(cmitmsg); + fclose(mi->patchfile); + + handle_info(mi); + strbuf_release(&line); + return mi->input_error; +} + +static int git_mailinfo_config(const char *var, const char *value, void *mi_) +{ + struct mailinfo *mi = mi_; + + if (!starts_with(var, "mailinfo.")) + return git_default_config(var, value, NULL); + if (!strcmp(var, "mailinfo.scissors")) { + mi->use_scissors = git_config_bool(var, value); + return 0; + } + /* perhaps others here */ + return 0; +} + +void setup_mailinfo(struct mailinfo *mi) +{ + memset(mi, 0, sizeof(*mi)); + strbuf_init(&mi->name, 0); + strbuf_init(&mi->email, 0); + strbuf_init(&mi->charset, 0); + strbuf_init(&mi->log_message, 0); + strbuf_init(&mi->inbody_header_accum, 0); + mi->header_stage = 1; + mi->use_inbody_headers = 1; + mi->content_top = mi->content; + git_config(git_mailinfo_config, mi); +} + +void clear_mailinfo(struct mailinfo *mi) +{ + int i; + + strbuf_release(&mi->name); + strbuf_release(&mi->email); + strbuf_release(&mi->charset); + strbuf_release(&mi->inbody_header_accum); + free(mi->message_id); + + if (mi->p_hdr_data) + for (i = 0; mi->p_hdr_data[i]; i++) + strbuf_release(mi->p_hdr_data[i]); + free(mi->p_hdr_data); + if (mi->s_hdr_data) + for (i = 0; mi->s_hdr_data[i]; i++) + strbuf_release(mi->s_hdr_data[i]); + free(mi->s_hdr_data); + + while (mi->content < mi->content_top) { + free(*(mi->content_top)); + mi->content_top--; + } + + strbuf_release(&mi->log_message); +} diff --git a/t/t4256/1/mailinfo.c.orig b/t/t4256/1/mailinfo.c.orig new file mode 100644 index 0000000000..3281a37d51 --- /dev/null +++ b/t/t4256/1/mailinfo.c.orig @@ -0,0 +1,1185 @@ +#include "cache.h" +#include "config.h" +#include "utf8.h" +#include "strbuf.h" +#include "mailinfo.h" + +static void cleanup_space(struct strbuf *sb) +{ + size_t pos, cnt; + for (pos = 0; pos < sb->len; pos++) { + if (isspace(sb->buf[pos])) { + sb->buf[pos] = ' '; + for (cnt = 0; isspace(sb->buf[pos + cnt + 1]); cnt++); + strbuf_remove(sb, pos + 1, cnt); + } + } +} + +static void get_sane_name(struct strbuf *out, struct strbuf *name, struct strbuf *email) +{ + struct strbuf *src = name; + if (name->len < 3 || 60 < name->len || strchr(name->buf, '@') || + strchr(name->buf, '<') || strchr(name->buf, '>')) + src = email; + else if (name == out) + return; + strbuf_reset(out); + strbuf_addbuf(out, src); +} + +static void parse_bogus_from(struct mailinfo *mi, const struct strbuf *line) +{ + /* John Doe <johndoe> */ + + char *bra, *ket; + /* This is fallback, so do not bother if we already have an + * e-mail address. + */ + if (mi->email.len) + return; + + bra = strchr(line->buf, '<'); + if (!bra) + return; + ket = strchr(bra, '>'); + if (!ket) + return; + + strbuf_reset(&mi->email); + strbuf_add(&mi->email, bra + 1, ket - bra - 1); + + strbuf_reset(&mi->name); + strbuf_add(&mi->name, line->buf, bra - line->buf); + strbuf_trim(&mi->name); + get_sane_name(&mi->name, &mi->name, &mi->email); +} + +static const char *unquote_comment(struct strbuf *outbuf, const char *in) +{ + int c; + int take_next_literally = 0; + + strbuf_addch(outbuf, '('); + + while ((c = *in++) != 0) { + if (take_next_literally == 1) { + take_next_literally = 0; + } else { + switch (c) { + case '\\': + take_next_literally = 1; + continue; + case '(': + in = unquote_comment(outbuf, in); + continue; + case ')': + strbuf_addch(outbuf, ')'); + return in; + } + } + + strbuf_addch(outbuf, c); + } + + return in; +} + +static const char *unquote_quoted_string(struct strbuf *outbuf, const char *in) +{ + int c; + int take_next_literally = 0; + + while ((c = *in++) != 0) { + if (take_next_literally == 1) { + take_next_literally = 0; + } else { + switch (c) { + case '\\': + take_next_literally = 1; + continue; + case '"': + return in; + } + } + + strbuf_addch(outbuf, c); + } + + return in; +} + +static void unquote_quoted_pair(struct strbuf *line) +{ + struct strbuf outbuf; + const char *in = line->buf; + int c; + + strbuf_init(&outbuf, line->len); + + while ((c = *in++) != 0) { + switch (c) { + case '"': + in = unquote_quoted_string(&outbuf, in); + continue; + case '(': + in = unquote_comment(&outbuf, in); + continue; + } + + strbuf_addch(&outbuf, c); + } + + strbuf_swap(&outbuf, line); + strbuf_release(&outbuf); + +} + +static void handle_from(struct mailinfo *mi, const struct strbuf *from) +{ + char *at; + size_t el; + struct strbuf f; + + strbuf_init(&f, from->len); + strbuf_addbuf(&f, from); + + unquote_quoted_pair(&f); + + at = strchr(f.buf, '@'); + if (!at) { + parse_bogus_from(mi, from); + goto out; + } + + /* + * If we already have one email, don't take any confusing lines + */ + if (mi->email.len && strchr(at + 1, '@')) + goto out; + + /* Pick up the string around '@', possibly delimited with <> + * pair; that is the email part. + */ + while (at > f.buf) { + char c = at[-1]; + if (isspace(c)) + break; + if (c == '<') { + at[-1] = ' '; + break; + } + at--; + } + el = strcspn(at, " \n\t\r\v\f>"); + strbuf_reset(&mi->email); + strbuf_add(&mi->email, at, el); + strbuf_remove(&f, at - f.buf, el + (at[el] ? 1 : 0)); + + /* The remainder is name. It could be + * + * - "John Doe <john.doe@xz>" (a), or + * - "john.doe@xz (John Doe)" (b), or + * - "John (zzz) Doe <john.doe@xz> (Comment)" (c) + * + * but we have removed the email part, so + * + * - remove extra spaces which could stay after email (case 'c'), and + * - trim from both ends, possibly removing the () pair at the end + * (cases 'a' and 'b'). + */ + cleanup_space(&f); + strbuf_trim(&f); + if (f.buf[0] == '(' && f.len && f.buf[f.len - 1] == ')') { + strbuf_remove(&f, 0, 1); + strbuf_setlen(&f, f.len - 1); + } + + get_sane_name(&mi->name, &f, &mi->email); +out: + strbuf_release(&f); +} + +static void handle_header(struct strbuf **out, const struct strbuf *line) +{ + if (!*out) { + *out = xmalloc(sizeof(struct strbuf)); + strbuf_init(*out, line->len); + } else + strbuf_reset(*out); + + strbuf_addbuf(*out, line); +} + +/* NOTE NOTE NOTE. We do not claim we do full MIME. We just attempt + * to have enough heuristics to grok MIME encoded patches often found + * on our mailing lists. For example, we do not even treat header lines + * case insensitively. + */ + +static int slurp_attr(const char *line, const char *name, struct strbuf *attr) +{ + const char *ends, *ap = strcasestr(line, name); + size_t sz; + + strbuf_setlen(attr, 0); + if (!ap) + return 0; + ap += strlen(name); + if (*ap == '"') { + ap++; + ends = "\""; + } + else + ends = "; \t"; + sz = strcspn(ap, ends); + strbuf_add(attr, ap, sz); + return 1; +} + +static void handle_content_type(struct mailinfo *mi, struct strbuf *line) +{ + struct strbuf *boundary = xmalloc(sizeof(struct strbuf)); + strbuf_init(boundary, line->len); + + if (slurp_attr(line->buf, "boundary=", boundary)) { + strbuf_insert(boundary, 0, "--", 2); + if (++mi->content_top >= &mi->content[MAX_BOUNDARIES]) { + error("Too many boundaries to handle"); + mi->input_error = -1; + mi->content_top = &mi->content[MAX_BOUNDARIES] - 1; + return; + } + *(mi->content_top) = boundary; + boundary = NULL; + } + slurp_attr(line->buf, "charset=", &mi->charset); + + if (boundary) { + strbuf_release(boundary); + free(boundary); + } +} + +static void handle_content_transfer_encoding(struct mailinfo *mi, + const struct strbuf *line) +{ + if (strcasestr(line->buf, "base64")) + mi->transfer_encoding = TE_BASE64; + else if (strcasestr(line->buf, "quoted-printable")) + mi->transfer_encoding = TE_QP; + else + mi->transfer_encoding = TE_DONTCARE; +} + +static int is_multipart_boundary(struct mailinfo *mi, const struct strbuf *line) +{ + struct strbuf *content_top = *(mi->content_top); + + return ((content_top->len <= line->len) && + !memcmp(line->buf, content_top->buf, content_top->len)); +} + +static void cleanup_subject(struct mailinfo *mi, struct strbuf *subject) +{ + size_t at = 0; + + while (at < subject->len) { + char *pos; + size_t remove; + + switch (subject->buf[at]) { + case 'r': case 'R': + if (subject->len <= at + 3) + break; + if ((subject->buf[at + 1] == 'e' || + subject->buf[at + 1] == 'E') && + subject->buf[at + 2] == ':') { + strbuf_remove(subject, at, 3); + continue; + } + at++; + break; + case ' ': case '\t': case ':': + strbuf_remove(subject, at, 1); + continue; + case '[': + pos = strchr(subject->buf + at, ']'); + if (!pos) + break; + remove = pos - subject->buf + at + 1; + if (!mi->keep_non_patch_brackets_in_subject || + (7 <= remove && + memmem(subject->buf + at, remove, "PATCH", 5))) + strbuf_remove(subject, at, remove); + else { + at += remove; + /* + * If the input had a space after the ], keep + * it. We don't bother with finding the end of + * the space, since we later normalize it + * anyway. + */ + if (isspace(subject->buf[at])) + at += 1; + } + continue; + } + break; + } + strbuf_trim(subject); +} + +#define MAX_HDR_PARSED 10 +static const char *header[MAX_HDR_PARSED] = { + "From","Subject","Date", +}; + +static inline int cmp_header(const struct strbuf *line, const char *hdr) +{ + int len = strlen(hdr); + return !strncasecmp(line->buf, hdr, len) && line->len > len && + line->buf[len] == ':' && isspace(line->buf[len + 1]); +} + +static int is_format_patch_separator(const char *line, int len) +{ + static const char SAMPLE[] = + "From e6807f3efca28b30decfecb1732a56c7db1137ee Mon Sep 17 00:00:00 2001\n"; + const char *cp; + + if (len != strlen(SAMPLE)) + return 0; + if (!skip_prefix(line, "From ", &cp)) + return 0; + if (strspn(cp, "0123456789abcdef") != 40) + return 0; + cp += 40; + return !memcmp(SAMPLE + (cp - line), cp, strlen(SAMPLE) - (cp - line)); +} + +static struct strbuf *decode_q_segment(const struct strbuf *q_seg, int rfc2047) +{ + const char *in = q_seg->buf; + int c; + struct strbuf *out = xmalloc(sizeof(struct strbuf)); + strbuf_init(out, q_seg->len); + + while ((c = *in++) != 0) { + if (c == '=') { + int ch, d = *in; + if (d == '\n' || !d) + break; /* drop trailing newline */ + ch = hex2chr(in); + if (ch >= 0) { + strbuf_addch(out, ch); + in += 2; + continue; + } + /* garbage -- fall through */ + } + if (rfc2047 && c == '_') /* rfc2047 4.2 (2) */ + c = 0x20; + strbuf_addch(out, c); + } + return out; +} + +static struct strbuf *decode_b_segment(const struct strbuf *b_seg) +{ + /* Decode in..ep, possibly in-place to ot */ + int c, pos = 0, acc = 0; + const char *in = b_seg->buf; + struct strbuf *out = xmalloc(sizeof(struct strbuf)); + strbuf_init(out, b_seg->len); + + while ((c = *in++) != 0) { + if (c == '+') + c = 62; + else if (c == '/') + c = 63; + else if ('A' <= c && c <= 'Z') + c -= 'A'; + else if ('a' <= c && c <= 'z') + c -= 'a' - 26; + else if ('0' <= c && c <= '9') + c -= '0' - 52; + else + continue; /* garbage */ + switch (pos++) { + case 0: + acc = (c << 2); + break; + case 1: + strbuf_addch(out, (acc | (c >> 4))); + acc = (c & 15) << 4; + break; + case 2: + strbuf_addch(out, (acc | (c >> 2))); + acc = (c & 3) << 6; + break; + case 3: + strbuf_addch(out, (acc | c)); + acc = pos = 0; + break; + } + } + return out; +} + +static int convert_to_utf8(struct mailinfo *mi, + struct strbuf *line, const char *charset) +{ + char *out; + + if (!mi->metainfo_charset || !charset || !*charset) + return 0; + + if (same_encoding(mi->metainfo_charset, charset)) + return 0; + out = reencode_string(line->buf, mi->metainfo_charset, charset); + if (!out) { + mi->input_error = -1; + return error("cannot convert from %s to %s", + charset, mi->metainfo_charset); + } + strbuf_attach(line, out, strlen(out), strlen(out)); + return 0; +} + +static void decode_header(struct mailinfo *mi, struct strbuf *it) +{ + char *in, *ep, *cp; + struct strbuf outbuf = STRBUF_INIT, *dec; + struct strbuf charset_q = STRBUF_INIT, piecebuf = STRBUF_INIT; + int found_error = 1; /* pessimism */ + + in = it->buf; + while (in - it->buf <= it->len && (ep = strstr(in, "=?")) != NULL) { + int encoding; + strbuf_reset(&charset_q); + strbuf_reset(&piecebuf); + + if (in != ep) { + /* + * We are about to process an encoded-word + * that begins at ep, but there is something + * before the encoded word. + */ + char *scan; + for (scan = in; scan < ep; scan++) + if (!isspace(*scan)) + break; + + if (scan != ep || in == it->buf) { + /* + * We should not lose that "something", + * unless we have just processed an + * encoded-word, and there is only LWS + * before the one we are about to process. + */ + strbuf_add(&outbuf, in, ep - in); + } + } + /* E.g. + * ep : "=?iso-2022-jp?B?GyR...?= foo" + * ep : "=?ISO-8859-1?Q?Foo=FCbar?= baz" + */ + ep += 2; + + if (ep - it->buf >= it->len || !(cp = strchr(ep, '?'))) + goto release_return; + + if (cp + 3 - it->buf > it->len) + goto release_return; + strbuf_add(&charset_q, ep, cp - ep); + + encoding = cp[1]; + if (!encoding || cp[2] != '?') + goto release_return; + ep = strstr(cp + 3, "?="); + if (!ep) + goto release_return; + strbuf_add(&piecebuf, cp + 3, ep - cp - 3); + switch (tolower(encoding)) { + default: + goto release_return; + case 'b': + dec = decode_b_segment(&piecebuf); + break; + case 'q': + dec = decode_q_segment(&piecebuf, 1); + break; + } + if (convert_to_utf8(mi, dec, charset_q.buf)) + goto release_return; + + strbuf_addbuf(&outbuf, dec); + strbuf_release(dec); + free(dec); + in = ep + 2; + } + strbuf_addstr(&outbuf, in); + strbuf_reset(it); + strbuf_addbuf(it, &outbuf); + found_error = 0; +release_return: + strbuf_release(&outbuf); + strbuf_release(&charset_q); + strbuf_release(&piecebuf); + + if (found_error) + mi->input_error = -1; +} + +static int check_header(struct mailinfo *mi, + const struct strbuf *line, + struct strbuf *hdr_data[], int overwrite) +{ + int i, ret = 0, len; + struct strbuf sb = STRBUF_INIT; + + /* search for the interesting parts */ + for (i = 0; header[i]; i++) { + int len = strlen(header[i]); + if ((!hdr_data[i] || overwrite) && cmp_header(line, header[i])) { + /* Unwrap inline B and Q encoding, and optionally + * normalize the meta information to utf8. + */ + strbuf_add(&sb, line->buf + len + 2, line->len - len - 2); + decode_header(mi, &sb); + handle_header(&hdr_data[i], &sb); + ret = 1; + goto check_header_out; + } + } + + /* Content stuff */ + if (cmp_header(line, "Content-Type")) { + len = strlen("Content-Type: "); + strbuf_add(&sb, line->buf + len, line->len - len); + decode_header(mi, &sb); + strbuf_insert(&sb, 0, "Content-Type: ", len); + handle_content_type(mi, &sb); + ret = 1; + goto check_header_out; + } + if (cmp_header(line, "Content-Transfer-Encoding")) { + len = strlen("Content-Transfer-Encoding: "); + strbuf_add(&sb, line->buf + len, line->len - len); + decode_header(mi, &sb); + handle_content_transfer_encoding(mi, &sb); + ret = 1; + goto check_header_out; + } + if (cmp_header(line, "Message-Id")) { + len = strlen("Message-Id: "); + strbuf_add(&sb, line->buf + len, line->len - len); + decode_header(mi, &sb); + if (mi->add_message_id) + mi->message_id = strbuf_detach(&sb, NULL); + ret = 1; + goto check_header_out; + } + +check_header_out: + strbuf_release(&sb); + return ret; +} + +/* + * Returns 1 if the given line or any line beginning with the given line is an + * in-body header (that is, check_header will succeed when passed + * mi->s_hdr_data). + */ +static int is_inbody_header(const struct mailinfo *mi, + const struct strbuf *line) +{ + int i; + for (i = 0; header[i]; i++) + if (!mi->s_hdr_data[i] && cmp_header(line, header[i])) + return 1; + return 0; +} + +static void decode_transfer_encoding(struct mailinfo *mi, struct strbuf *line) +{ + struct strbuf *ret; + + switch (mi->transfer_encoding) { + case TE_QP: + ret = decode_q_segment(line, 0); + break; + case TE_BASE64: + ret = decode_b_segment(line); + break; + case TE_DONTCARE: + default: + return; + } + strbuf_reset(line); + strbuf_addbuf(line, ret); + strbuf_release(ret); + free(ret); +} + +static inline int patchbreak(const struct strbuf *line) +{ + size_t i; + + /* Beginning of a "diff -" header? */ + if (starts_with(line->buf, "diff -")) + return 1; + + /* CVS "Index: " line? */ + if (starts_with(line->buf, "Index: ")) + return 1; + + /* + * "--- <filename>" starts patches without headers + * "---<sp>*" is a manual separator + */ + if (line->len < 4) + return 0; + + if (starts_with(line->buf, "---")) { + /* space followed by a filename? */ + if (line->buf[3] == ' ' && !isspace(line->buf[4])) + return 1; + /* Just whitespace? */ + for (i = 3; i < line->len; i++) { + unsigned char c = line->buf[i]; + if (c == '\n') + return 1; + if (!isspace(c)) + break; + } + return 0; + } + return 0; +} + +static int is_scissors_line(const char *line) +{ + const char *c; + int scissors = 0, gap = 0; + const char *first_nonblank = NULL, *last_nonblank = NULL; + int visible, perforation = 0, in_perforation = 0; + + for (c = line; *c; c++) { + if (isspace(*c)) { + if (in_perforation) { + perforation++; + gap++; + } + continue; + } + last_nonblank = c; + if (first_nonblank == NULL) + first_nonblank = c; + if (*c == '-') { + in_perforation = 1; + perforation++; + continue; + } + if ((!memcmp(c, ">8", 2) || !memcmp(c, "8<", 2) || + !memcmp(c, ">%", 2) || !memcmp(c, "%<", 2))) { + in_perforation = 1; + perforation += 2; + scissors += 2; + c++; + continue; + } + in_perforation = 0; + } + + /* + * The mark must be at least 8 bytes long (e.g. "-- >8 --"). + * Even though there can be arbitrary cruft on the same line + * (e.g. "cut here"), in order to avoid misidentification, the + * perforation must occupy more than a third of the visible + * width of the line, and dashes and scissors must occupy more + * than half of the perforation. + */ + + if (first_nonblank && last_nonblank) + visible = last_nonblank - first_nonblank + 1; + else + visible = 0; + return (scissors && 8 <= visible && + visible < perforation * 3 && + gap * 2 < perforation); +} + +static void flush_inbody_header_accum(struct mailinfo *mi) +{ + if (!mi->inbody_header_accum.len) + return; + if (!check_header(mi, &mi->inbody_header_accum, mi->s_hdr_data, 0)) + BUG("inbody_header_accum, if not empty, must always contain a valid in-body header"); + strbuf_reset(&mi->inbody_header_accum); +} + +static int check_inbody_header(struct mailinfo *mi, const struct strbuf *line) +{ + if (mi->inbody_header_accum.len && + (line->buf[0] == ' ' || line->buf[0] == '\t')) { + if (mi->use_scissors && is_scissors_line(line->buf)) { + /* + * This is a scissors line; do not consider this line + * as a header continuation line. + */ + flush_inbody_header_accum(mi); + return 0; + } + strbuf_strip_suffix(&mi->inbody_header_accum, "\n"); + strbuf_addbuf(&mi->inbody_header_accum, line); + return 1; + } + + flush_inbody_header_accum(mi); + + if (starts_with(line->buf, ">From") && isspace(line->buf[5])) + return is_format_patch_separator(line->buf + 1, line->len - 1); + if (starts_with(line->buf, "[PATCH]") && isspace(line->buf[7])) { + int i; + for (i = 0; header[i]; i++) + if (!strcmp("Subject", header[i])) { + handle_header(&mi->s_hdr_data[i], line); + return 1; + } + return 0; + } + if (is_inbody_header(mi, line)) { + strbuf_addbuf(&mi->inbody_header_accum, line); + return 1; + } + return 0; +} + +static int handle_commit_msg(struct mailinfo *mi, struct strbuf *line) +{ + assert(!mi->filter_stage); + + if (mi->header_stage) { + if (!line->len || (line->len == 1 && line->buf[0] == '\n')) { + if (mi->inbody_header_accum.len) { + flush_inbody_header_accum(mi); + mi->header_stage = 0; + } + return 0; + } + } + + if (mi->use_inbody_headers && mi->header_stage) { + mi->header_stage = check_inbody_header(mi, line); + if (mi->header_stage) + return 0; + } else + /* Only trim the first (blank) line of the commit message + * when ignoring in-body headers. + */ + mi->header_stage = 0; + + /* normalize the log message to UTF-8. */ + if (convert_to_utf8(mi, line, mi->charset.buf)) + return 0; /* mi->input_error already set */ + + if (mi->use_scissors && is_scissors_line(line->buf)) { + int i; + + strbuf_setlen(&mi->log_message, 0); + mi->header_stage = 1; + + /* + * We may have already read "secondary headers"; purge + * them to give ourselves a clean restart. + */ + for (i = 0; header[i]; i++) { + if (mi->s_hdr_data[i]) + strbuf_release(mi->s_hdr_data[i]); + mi->s_hdr_data[i] = NULL; + } + return 0; + } + + if (patchbreak(line)) { + if (mi->message_id) + strbuf_addf(&mi->log_message, + "Message-Id: %s\n", mi->message_id); + return 1; + } + + strbuf_addbuf(&mi->log_message, line); + return 0; +} + +static void handle_patch(struct mailinfo *mi, const struct strbuf *line) +{ + fwrite(line->buf, 1, line->len, mi->patchfile); + mi->patch_lines++; +} + +static void handle_filter(struct mailinfo *mi, struct strbuf *line) +{ + switch (mi->filter_stage) { + case 0: + if (!handle_commit_msg(mi, line)) + break; + mi->filter_stage++; + /* fallthrough */ + case 1: + handle_patch(mi, line); + break; + } +} + +static int is_rfc2822_header(const struct strbuf *line) +{ + /* + * The section that defines the loosest possible + * field name is "3.6.8 Optional fields". + * + * optional-field = field-name ":" unstructured CRLF + * field-name = 1*ftext + * ftext = %d33-57 / %59-126 + */ + int ch; + char *cp = line->buf; + + /* Count mbox From headers as headers */ + if (starts_with(cp, "From ") || starts_with(cp, ">From ")) + return 1; + + while ((ch = *cp++)) { + if (ch == ':') + return 1; + if ((33 <= ch && ch <= 57) || + (59 <= ch && ch <= 126)) + continue; + break; + } + return 0; +} + +static int read_one_header_line(struct strbuf *line, FILE *in) +{ + struct strbuf continuation = STRBUF_INIT; + + /* Get the first part of the line. */ + if (strbuf_getline_lf(line, in)) + return 0; + + /* + * Is it an empty line or not a valid rfc2822 header? + * If so, stop here, and return false ("not a header") + */ + strbuf_rtrim(line); + if (!line->len || !is_rfc2822_header(line)) { + /* Re-add the newline */ + strbuf_addch(line, '\n'); + return 0; + } + + /* + * Now we need to eat all the continuation lines.. + * Yuck, 2822 header "folding" + */ + for (;;) { + int peek; + + peek = fgetc(in); + if (peek == EOF) + break; + ungetc(peek, in); + if (peek != ' ' && peek != '\t') + break; + if (strbuf_getline_lf(&continuation, in)) + break; + continuation.buf[0] = ' '; + strbuf_rtrim(&continuation); + strbuf_addbuf(line, &continuation); + } + strbuf_release(&continuation); + + return 1; +} + +static int find_boundary(struct mailinfo *mi, struct strbuf *line) +{ + while (!strbuf_getline_lf(line, mi->input)) { + if (*(mi->content_top) && is_multipart_boundary(mi, line)) + return 1; + } + return 0; +} + +static int handle_boundary(struct mailinfo *mi, struct strbuf *line) +{ + struct strbuf newline = STRBUF_INIT; + + strbuf_addch(&newline, '\n'); +again: + if (line->len >= (*(mi->content_top))->len + 2 && + !memcmp(line->buf + (*(mi->content_top))->len, "--", 2)) { + /* we hit an end boundary */ + /* pop the current boundary off the stack */ + strbuf_release(*(mi->content_top)); + FREE_AND_NULL(*(mi->content_top)); + + /* technically won't happen as is_multipart_boundary() + will fail first. But just in case.. + */ + if (--mi->content_top < mi->content) { + error("Detected mismatched boundaries, can't recover"); + mi->input_error = -1; + mi->content_top = mi->content; + strbuf_release(&newline); + return 0; + } + handle_filter(mi, &newline); + strbuf_release(&newline); + if (mi->input_error) + return 0; + + /* skip to the next boundary */ + if (!find_boundary(mi, line)) + return 0; + goto again; + } + + /* set some defaults */ + mi->transfer_encoding = TE_DONTCARE; + strbuf_reset(&mi->charset); + + /* slurp in this section's info */ + while (read_one_header_line(line, mi->input)) + check_header(mi, line, mi->p_hdr_data, 0); + + strbuf_release(&newline); + /* replenish line */ + if (strbuf_getline_lf(line, mi->input)) + return 0; + strbuf_addch(line, '\n'); + return 1; +} + +static void handle_body(struct mailinfo *mi, struct strbuf *line) +{ + struct strbuf prev = STRBUF_INIT; + + /* Skip up to the first boundary */ + if (*(mi->content_top)) { + if (!find_boundary(mi, line)) + goto handle_body_out; + } + + do { + /* process any boundary lines */ + if (*(mi->content_top) && is_multipart_boundary(mi, line)) { + /* flush any leftover */ + if (prev.len) { + handle_filter(mi, &prev); + strbuf_reset(&prev); + } + if (!handle_boundary(mi, line)) + goto handle_body_out; + } + + /* Unwrap transfer encoding */ + decode_transfer_encoding(mi, line); + + switch (mi->transfer_encoding) { + case TE_BASE64: + case TE_QP: + { + struct strbuf **lines, **it, *sb; + + /* Prepend any previous partial lines */ + strbuf_insert(line, 0, prev.buf, prev.len); + strbuf_reset(&prev); + + /* + * This is a decoded line that may contain + * multiple new lines. Pass only one chunk + * at a time to handle_filter() + */ + lines = strbuf_split(line, '\n'); + for (it = lines; (sb = *it); it++) { + if (*(it + 1) == NULL) /* The last line */ + if (sb->buf[sb->len - 1] != '\n') { + /* Partial line, save it for later. */ + strbuf_addbuf(&prev, sb); + break; + } + handle_filter(mi, sb); + } + /* + * The partial chunk is saved in "prev" and will be + * appended by the next iteration of read_line_with_nul(). + */ + strbuf_list_free(lines); + break; + } + default: + handle_filter(mi, line); + } + + if (mi->input_error) + break; + } while (!strbuf_getwholeline(line, mi->input, '\n')); + + flush_inbody_header_accum(mi); + +handle_body_out: + strbuf_release(&prev); +} + +static void output_header_lines(FILE *fout, const char *hdr, const struct strbuf *data) +{ + const char *sp = data->buf; + while (1) { + char *ep = strchr(sp, '\n'); + int len; + if (!ep) + len = strlen(sp); + else + len = ep - sp; + fprintf(fout, "%s: %.*s\n", hdr, len, sp); + if (!ep) + break; + sp = ep + 1; + } +} + +static void handle_info(struct mailinfo *mi) +{ + struct strbuf *hdr; + int i; + + for (i = 0; header[i]; i++) { + /* only print inbody headers if we output a patch file */ + if (mi->patch_lines && mi->s_hdr_data[i]) + hdr = mi->s_hdr_data[i]; + else if (mi->p_hdr_data[i]) + hdr = mi->p_hdr_data[i]; + else + continue; + + if (!strcmp(header[i], "Subject")) { + if (!mi->keep_subject) { + cleanup_subject(mi, hdr); + cleanup_space(hdr); + } + output_header_lines(mi->output, "Subject", hdr); + } else if (!strcmp(header[i], "From")) { + cleanup_space(hdr); + handle_from(mi, hdr); + fprintf(mi->output, "Author: %s\n", mi->name.buf); + fprintf(mi->output, "Email: %s\n", mi->email.buf); + } else { + cleanup_space(hdr); + fprintf(mi->output, "%s: %s\n", header[i], hdr->buf); + } + } + fprintf(mi->output, "\n"); +} + +int mailinfo(struct mailinfo *mi, const char *msg, const char *patch) +{ + FILE *cmitmsg; + int peek; + struct strbuf line = STRBUF_INIT; + + cmitmsg = fopen(msg, "w"); + if (!cmitmsg) { + perror(msg); + return -1; + } + mi->patchfile = fopen(patch, "w"); + if (!mi->patchfile) { + perror(patch); + fclose(cmitmsg); + return -1; + } + + mi->p_hdr_data = xcalloc(MAX_HDR_PARSED, sizeof(*(mi->p_hdr_data))); + mi->s_hdr_data = xcalloc(MAX_HDR_PARSED, sizeof(*(mi->s_hdr_data))); + + do { + peek = fgetc(mi->input); + if (peek == EOF) { + fclose(cmitmsg); + return error("empty patch: '%s'", patch); + } + } while (isspace(peek)); + ungetc(peek, mi->input); + + /* process the email header */ + while (read_one_header_line(&line, mi->input)) + check_header(mi, &line, mi->p_hdr_data, 1); + + handle_body(mi, &line); + fwrite(mi->log_message.buf, 1, mi->log_message.len, cmitmsg); + fclose(cmitmsg); + fclose(mi->patchfile); + + handle_info(mi); + strbuf_release(&line); + return mi->input_error; +} + +static int git_mailinfo_config(const char *var, const char *value, void *mi_) +{ + struct mailinfo *mi = mi_; + + if (!starts_with(var, "mailinfo.")) + return git_default_config(var, value, NULL); + if (!strcmp(var, "mailinfo.scissors")) { + mi->use_scissors = git_config_bool(var, value); + return 0; + } + /* perhaps others here */ + return 0; +} + +void setup_mailinfo(struct mailinfo *mi) +{ + memset(mi, 0, sizeof(*mi)); + strbuf_init(&mi->name, 0); + strbuf_init(&mi->email, 0); + strbuf_init(&mi->charset, 0); + strbuf_init(&mi->log_message, 0); + strbuf_init(&mi->inbody_header_accum, 0); + mi->header_stage = 1; + mi->use_inbody_headers = 1; + mi->content_top = mi->content; + git_config(git_mailinfo_config, mi); +} + +void clear_mailinfo(struct mailinfo *mi) +{ + int i; + + strbuf_release(&mi->name); + strbuf_release(&mi->email); + strbuf_release(&mi->charset); + strbuf_release(&mi->inbody_header_accum); + free(mi->message_id); + + if (mi->p_hdr_data) + for (i = 0; mi->p_hdr_data[i]; i++) + strbuf_release(mi->p_hdr_data[i]); + free(mi->p_hdr_data); + if (mi->s_hdr_data) + for (i = 0; mi->s_hdr_data[i]; i++) + strbuf_release(mi->s_hdr_data[i]); + free(mi->s_hdr_data); + + while (mi->content < mi->content_top) { + free(*(mi->content_top)); + mi->content_top--; + } + + strbuf_release(&mi->log_message); +} diff --git a/t/t4256/1/patch b/t/t4256/1/patch new file mode 100644 index 0000000000..bd0d8b0251 --- /dev/null +++ b/t/t4256/1/patch @@ -0,0 +1,129 @@ +From: A <author@example.com> +Subject: [PATCH] mailinfo: support format=flowed +Message-ID: <aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa@example.com> +Date: Sat, 25 Aug 2018 22:04:50 +0200 +User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:60.0) Gecko/20100101 + Thunderbird/60.0 +MIME-Version: 1.0 +Content-Type: text/plain; charset=utf-8; format=flowed +Content-Language: en-US +Content-Transfer-Encoding: 7bit + +--- + mailinfo.c | 64 ++++++++++++++++++++++++++++++++++++++++++++++++++++-- + 1 file changed, 62 insertions(+), 2 deletions(-) + +diff --git a/mailinfo.c b/mailinfo.c +index 3281a37d51..b395adbdf2 100644 +--- a/mailinfo.c ++++ b/mailinfo.c +@@ -237,11 +237,22 @@ static int slurp_attr(const char *line, const char +*name, struct strbuf *attr) + return 1; + } + ++static int has_attr_value(const char *line, const char *name, const +char *value) ++{ ++ struct strbuf sb = STRBUF_INIT; ++ int rc = slurp_attr(line, name, &sb) && !strcasecmp(sb.buf, value); ++ strbuf_release(&sb); ++ return rc; ++} ++ + static void handle_content_type(struct mailinfo *mi, struct strbuf *line) + { + struct strbuf *boundary = xmalloc(sizeof(struct strbuf)); + strbuf_init(boundary, line->len); + ++ mi->format_flowed = has_attr_value(line->buf, "format=", "flowed"); ++ mi->delsp = has_attr_value(line->buf, "delsp=", "yes"); ++ + if (slurp_attr(line->buf, "boundary=", boundary)) { + strbuf_insert(boundary, 0, "--", 2); + if (++mi->content_top >= &mi->content[MAX_BOUNDARIES]) { +@@ -964,6 +975,52 @@ static int handle_boundary(struct mailinfo *mi, +struct strbuf *line) + return 1; + } + ++static void handle_filter_flowed(struct mailinfo *mi, struct strbuf *line, ++ struct strbuf *prev) ++{ ++ size_t len = line->len; ++ const char *rest; ++ ++ if (!mi->format_flowed) { ++ handle_filter(mi, line); ++ return; ++ } ++ ++ if (line->buf[len - 1] == '\n') { ++ len--; ++ if (len && line->buf[len - 1] == '\r') ++ len--; ++ } ++ ++ /* Keep signature separator as-is. */ ++ if (skip_prefix(line->buf, "-- ", &rest) && rest - line->buf == len) { ++ if (prev->len) { ++ handle_filter(mi, prev); ++ strbuf_reset(prev); ++ } ++ handle_filter(mi, line); ++ return; ++ } ++ ++ /* Unstuff space-stuffed line. */ ++ if (len && line->buf[0] == ' ') { ++ strbuf_remove(line, 0, 1); ++ len--; ++ } ++ ++ /* Save flowed line for later, but without the soft line break. */ ++ if (len && line->buf[len - 1] == ' ') { ++ strbuf_add(prev, line->buf, len - !!mi->delsp); ++ return; ++ } ++ ++ /* Prepend any previous partial lines */ ++ strbuf_insert(line, 0, prev->buf, prev->len); ++ strbuf_reset(prev); ++ ++ handle_filter(mi, line); ++} ++ + static void handle_body(struct mailinfo *mi, struct strbuf *line) + { + struct strbuf prev = STRBUF_INIT; +@@ -1012,7 +1069,7 @@ static void handle_body(struct mailinfo *mi, +struct strbuf *line) + strbuf_addbuf(&prev, sb); + break; + } +- handle_filter(mi, sb); ++ handle_filter_flowed(mi, sb, &prev); + } + /* + * The partial chunk is saved in "prev" and will be +@@ -1022,13 +1079,16 @@ static void handle_body(struct mailinfo *mi, +struct strbuf *line) + break; + } + default: +- handle_filter(mi, line); ++ handle_filter_flowed(mi, line, &prev); + } + + if (mi->input_error) + break; + } while (!strbuf_getwholeline(line, mi->input, '\n')); + ++ if (prev.len) ++ handle_filter(mi, &prev); ++ + flush_inbody_header_accum(mi); + + handle_body_out: +-- +2.18.0 diff --git a/t/t5000-tar-tree.sh b/t/t5000-tar-tree.sh index 2a97b27b0a..602bfd9574 100755 --- a/t/t5000-tar-tree.sh +++ b/t/t5000-tar-tree.sh @@ -206,6 +206,12 @@ test_expect_success 'git archive with --output, override inferred format' ' test_cmp_bin b.tar d4.zip ' +test_expect_success GZIP 'git archive with --output and --remote creates .tgz' ' + git archive --output=d5.tgz --remote=. HEAD && + gzip -d -c <d5.tgz >d5.tar && + test_cmp_bin b.tar d5.tar +' + test_expect_success 'git archive --list outside of a git repo' ' nongit git archive --list ' diff --git a/t/t5003-archive-zip.sh b/t/t5003-archive-zip.sh index 55c7870997..106eddbd85 100755 --- a/t/t5003-archive-zip.sh +++ b/t/t5003-archive-zip.sh @@ -158,11 +158,16 @@ test_expect_success 'git archive --format=zip with --output' \ 'git archive --format=zip --output=d2.zip HEAD && test_cmp_bin d.zip d2.zip' -test_expect_success 'git archive with --output, inferring format' ' +test_expect_success 'git archive with --output, inferring format (local)' ' git archive --output=d3.zip HEAD && test_cmp_bin d.zip d3.zip ' +test_expect_success 'git archive with --output, inferring format (remote)' ' + git archive --remote=. --output=d4.zip HEAD && + test_cmp_bin d.zip d4.zip +' + test_expect_success \ 'git archive --format=zip with prefix' \ 'git archive --format=zip --prefix=prefix/ HEAD >e.zip' diff --git a/t/t5300-pack-object.sh b/t/t5300-pack-object.sh index 6c620cd540..410a09b0dd 100755 --- a/t/t5300-pack-object.sh +++ b/t/t5300-pack-object.sh @@ -468,29 +468,32 @@ test_expect_success 'pack-objects in too-many-packs mode' ' git fsck ' -# -# WARNING! -# -# The following test is destructive. Please keep the next -# two tests at the end of this file. -# - -test_expect_success \ - 'fake a SHA1 hash collision' \ - 'long_a=$(git hash-object a | sed -e "s!^..!&/!") && - long_b=$(git hash-object b | sed -e "s!^..!&/!") && - test -f .git/objects/$long_b && - cp -f .git/objects/$long_a \ - .git/objects/$long_b' +test_expect_success 'setup: fake a SHA1 hash collision' ' + git init corrupt && + ( + cd corrupt && + long_a=$(git hash-object -w ../a | sed -e "s!^..!&/!") && + long_b=$(git hash-object -w ../b | sed -e "s!^..!&/!") && + test -f .git/objects/$long_b && + cp -f .git/objects/$long_a \ + .git/objects/$long_b + ) +' -test_expect_success \ - 'make sure index-pack detects the SHA1 collision' \ - 'test_must_fail git index-pack -o bad.idx test-3.pack 2>msg && - test_i18ngrep "SHA1 COLLISION FOUND" msg' +test_expect_success 'make sure index-pack detects the SHA1 collision' ' + ( + cd corrupt && + test_must_fail git index-pack -o ../bad.idx ../test-3.pack 2>msg && + test_i18ngrep "SHA1 COLLISION FOUND" msg + ) +' -test_expect_success \ - 'make sure index-pack detects the SHA1 collision (large blobs)' \ - 'test_must_fail git -c core.bigfilethreshold=1 index-pack -o bad.idx test-3.pack 2>msg && - test_i18ngrep "SHA1 COLLISION FOUND" msg' +test_expect_success 'make sure index-pack detects the SHA1 collision (large blobs)' ' + ( + cd corrupt && + test_must_fail git -c core.bigfilethreshold=1 index-pack -o ../bad.idx ../test-3.pack 2>msg && + test_i18ngrep "SHA1 COLLISION FOUND" msg + ) +' test_done diff --git a/t/t5303-pack-corruption-resilience.sh b/t/t5303-pack-corruption-resilience.sh index 3634e258f8..41e6dc4dcf 100755 --- a/t/t5303-pack-corruption-resilience.sh +++ b/t/t5303-pack-corruption-resilience.sh @@ -311,4 +311,93 @@ test_expect_success \ test_must_fail git cat-file blob $blob_2 > /dev/null && test_must_fail git cat-file blob $blob_3 > /dev/null' +# \0 - empty base +# \1 - one byte in result +# \1 - one literal byte (X) +test_expect_success \ + 'apply good minimal delta' \ + 'printf "\0\1\1X" > minimal_delta && + test-tool delta -p /dev/null minimal_delta /dev/null' + +# \0 - empty base +# \1 - 1 byte in result +# \2 - two literal bytes (one too many) +test_expect_success \ + 'apply delta with too many literal bytes' \ + 'printf "\0\1\2XX" > too_big_literal && + test_must_fail test-tool delta -p /dev/null too_big_literal /dev/null' + +# \4 - four bytes in base +# \1 - one byte in result +# \221 - copy, one byte offset, one byte size +# \0 - copy from offset 0 +# \2 - copy two bytes (one too many) +test_expect_success \ + 'apply delta with too many copied bytes' \ + 'printf "\4\1\221\0\2" > too_big_copy && + printf base >base && + test_must_fail test-tool delta -p base too_big_copy /dev/null' + +# \0 - empty base +# \2 - two bytes in result +# \2 - two literal bytes (we are short one) +test_expect_success \ + 'apply delta with too few literal bytes' \ + 'printf "\0\2\2X" > truncated_delta && + test_must_fail test-tool delta -p /dev/null truncated_delta /dev/null' + +# \0 - empty base +# \1 - one byte in result +# \221 - copy, one byte offset, one byte size +# \0 - copy from offset 0 +# \1 - copy one byte (we are short one) +test_expect_success \ + 'apply delta with too few bytes in base' \ + 'printf "\0\1\221\0\1" > truncated_base && + test_must_fail test-tool delta -p /dev/null truncated_base /dev/null' + +# \4 - four bytes in base +# \2 - two bytes in result +# \1 - one literal byte (X) +# \221 - copy, one byte offset, one byte size +# (offset/size missing) +# +# Note that the literal byte is necessary to get past the uninteresting minimum +# delta size check. +test_expect_success \ + 'apply delta with truncated copy parameters' \ + 'printf "\4\2\1X\221" > truncated_copy_delta && + printf base >base && + test_must_fail test-tool delta -p base truncated_copy_delta /dev/null' + +# \0 - empty base +# \1 - one byte in result +# \1 - one literal byte (X) +# \1 - trailing garbage command +test_expect_success \ + 'apply delta with trailing garbage literal' \ + 'printf "\0\1\1X\1" > tail_garbage_literal && + test_must_fail test-tool delta -p /dev/null tail_garbage_literal /dev/null' + +# \4 - four bytes in base +# \1 - one byte in result +# \1 - one literal byte (X) +# \221 - copy, one byte offset, one byte size +# \0 - copy from offset 0 +# \1 - copy 1 byte +test_expect_success \ + 'apply delta with trailing garbage copy' \ + 'printf "\4\1\1X\221\0\1" > tail_garbage_copy && + printf base >base && + test_must_fail test-tool delta -p /dev/null tail_garbage_copy /dev/null' + +# \0 - empty base +# \1 - one byte in result +# \1 - one literal byte (X) +# \0 - bogus opcode +test_expect_success \ + 'apply delta with trailing garbage opcode' \ + 'printf "\0\1\1X\0" > tail_garbage_opcode && + test_must_fail test-tool delta -p /dev/null tail_garbage_opcode /dev/null' + test_done diff --git a/t/t5307-pack-missing-commit.sh b/t/t5307-pack-missing-commit.sh index ae52a1882d..dacb440b27 100755 --- a/t/t5307-pack-missing-commit.sh +++ b/t/t5307-pack-missing-commit.sh @@ -24,11 +24,11 @@ test_expect_success 'check corruption' ' ' test_expect_success 'rev-list notices corruption (1)' ' - test_must_fail git rev-list HEAD + test_must_fail env GIT_TEST_COMMIT_GRAPH=0 git rev-list HEAD ' test_expect_success 'rev-list notices corruption (2)' ' - test_must_fail git rev-list --objects HEAD + test_must_fail env GIT_TEST_COMMIT_GRAPH=0 git rev-list --objects HEAD ' test_expect_success 'pack-objects notices corruption' ' diff --git a/t/t5310-pack-bitmaps.sh b/t/t5310-pack-bitmaps.sh index 7bff7923f2..82d7f7f6a5 100755 --- a/t/t5310-pack-bitmaps.sh +++ b/t/t5310-pack-bitmaps.sh @@ -191,6 +191,7 @@ test_expect_success 'pack-objects respects --honor-pack-keep (local bitmapped pa test_expect_success 'pack-objects respects --local (non-local bitmapped pack)' ' mv .git/objects/pack/$packbitmap.* alt.git/objects/pack/ && + rm -f .git/objects/pack/multi-pack-index && test_when_finished "mv alt.git/objects/pack/$packbitmap.* .git/objects/pack/" && echo HEAD | git pack-objects --local --stdout --revs >3b.pack && git index-pack 3b.pack && @@ -342,4 +343,97 @@ test_expect_success 'truncated bitmap fails gracefully' ' test_i18ngrep corrupt stderr ' +# have_delta <obj> <expected_base> +# +# Note that because this relies on cat-file, it might find _any_ copy of an +# object in the repository. The caller is responsible for making sure +# there's only one (e.g., via "repack -ad", or having just fetched a copy). +have_delta () { + echo $2 >expect && + echo $1 | git cat-file --batch-check="%(deltabase)" >actual && + test_cmp expect actual +} + +# Create a state of history with these properties: +# +# - refs that allow a client to fetch some new history, while sharing some old +# history with the server; we use branches delta-reuse-old and +# delta-reuse-new here +# +# - the new history contains an object that is stored on the server as a delta +# against a base that is in the old history +# +# - the base object is not immediately reachable from the tip of the old +# history; finding it would involve digging down through history we know the +# other side has +# +# This should result in a state where fetching from old->new would not +# traditionally reuse the on-disk delta (because we'd have to dig to realize +# that the client has it), but we will do so if bitmaps can tell us cheaply +# that the other side has it. +test_expect_success 'set up thin delta-reuse parent' ' + # This first commit contains the buried base object. + test-tool genrandom delta 16384 >file && + git add file && + git commit -m "delta base" && + base=$(git rev-parse --verify HEAD:file) && + + # These intermediate commits bury the base back in history. + # This becomes the "old" state. + for i in 1 2 3 4 5 + do + echo $i >file && + git commit -am "intermediate $i" || return 1 + done && + git branch delta-reuse-old && + + # And now our new history has a delta against the buried base. Note + # that this must be smaller than the original file, since pack-objects + # prefers to create deltas from smaller objects to larger. + test-tool genrandom delta 16300 >file && + git commit -am "delta result" && + delta=$(git rev-parse --verify HEAD:file) && + git branch delta-reuse-new && + + # Repack with bitmaps and double check that we have the expected delta + # relationship. + git repack -adb && + have_delta $delta $base +' + +# Now we can sanity-check the non-bitmap behavior (that the server is not able +# to reuse the delta). This isn't strictly something we care about, so this +# test could be scrapped in the future. But it makes sure that the next test is +# actually triggering the feature we want. +# +# Note that our tools for working with on-the-wire "thin" packs are limited. So +# we actually perform the fetch, retain the resulting pack, and inspect the +# result. +test_expect_success 'fetch without bitmaps ignores delta against old base' ' + test_config pack.usebitmaps false && + test_when_finished "rm -rf client.git" && + git init --bare client.git && + ( + cd client.git && + git config transfer.unpackLimit 1 && + git fetch .. delta-reuse-old:delta-reuse-old && + git fetch .. delta-reuse-new:delta-reuse-new && + have_delta $delta $ZERO_OID + ) +' + +# And do the same for the bitmap case, where we do expect to find the delta. +test_expect_success 'fetch with bitmaps can reuse old base' ' + test_config pack.usebitmaps true && + test_when_finished "rm -rf client.git" && + git init --bare client.git && + ( + cd client.git && + git config transfer.unpackLimit 1 && + git fetch .. delta-reuse-old:delta-reuse-old && + git fetch .. delta-reuse-new:delta-reuse-new && + have_delta $delta $base + ) +' + test_done diff --git a/t/t5317-pack-objects-filter-objects.sh b/t/t5317-pack-objects-filter-objects.sh index 6710c8bc8c..24541ea137 100755 --- a/t/t5317-pack-objects-filter-objects.sh +++ b/t/t5317-pack-objects-filter-objects.sh @@ -21,17 +21,21 @@ test_expect_success 'setup r1' ' test_expect_success 'verify blob count in normal packfile' ' git -C r1 ls-files -s file.1 file.2 file.3 file.4 file.5 \ - | awk -f print_2.awk \ - | sort >expected && + >ls_files_result && + awk -f print_2.awk ls_files_result | + sort >expected && + git -C r1 pack-objects --rev --stdout >all.pack <<-EOF && HEAD EOF git -C r1 index-pack ../all.pack && - git -C r1 verify-pack -v ../all.pack \ - | grep blob \ - | awk -f print_1.awk \ - | sort >observed && - test_cmp observed expected + + git -C r1 verify-pack -v ../all.pack >verify_result && + grep blob verify_result | + awk -f print_1.awk | + sort >observed && + + test_cmp expected observed ' test_expect_success 'verify blob:none packfile has no blobs' ' @@ -39,24 +43,69 @@ test_expect_success 'verify blob:none packfile has no blobs' ' HEAD EOF git -C r1 index-pack ../filter.pack && - git -C r1 verify-pack -v ../filter.pack \ - | grep blob \ - | awk -f print_1.awk \ - | sort >observed && + + git -C r1 verify-pack -v ../filter.pack >verify_result && + grep blob verify_result | + awk -f print_1.awk | + sort >observed && + nr=$(wc -l <observed) && test 0 -eq $nr ' test_expect_success 'verify normal and blob:none packfiles have same commits/trees' ' - git -C r1 verify-pack -v ../all.pack \ - | grep -E "commit|tree" \ - | awk -f print_1.awk \ - | sort >expected && - git -C r1 verify-pack -v ../filter.pack \ - | grep -E "commit|tree" \ - | awk -f print_1.awk \ - | sort >observed && - test_cmp observed expected + git -C r1 verify-pack -v ../all.pack >verify_result && + grep -E "commit|tree" verify_result | + awk -f print_1.awk | + sort >expected && + + git -C r1 verify-pack -v ../filter.pack >verify_result && + grep -E "commit|tree" verify_result | + awk -f print_1.awk | + sort >observed && + + test_cmp expected observed +' + +test_expect_success 'get an error for missing tree object' ' + git init r5 && + echo foo >r5/foo && + git -C r5 add foo && + git -C r5 commit -m "foo" && + del=$(git -C r5 rev-parse HEAD^{tree} | sed "s|..|&/|") && + rm r5/.git/objects/$del && + test_must_fail git -C r5 pack-objects --rev --stdout 2>bad_tree <<-EOF && + HEAD + EOF + grep "bad tree object" bad_tree +' + +test_expect_success 'setup for tests of tree:0' ' + mkdir r1/subtree && + echo "This is a file in a subtree" >r1/subtree/file && + git -C r1 add subtree/file && + git -C r1 commit -m subtree +' + +test_expect_success 'verify tree:0 packfile has no blobs or trees' ' + git -C r1 pack-objects --rev --stdout --filter=tree:0 >commitsonly.pack <<-EOF && + HEAD + EOF + git -C r1 index-pack ../commitsonly.pack && + git -C r1 verify-pack -v ../commitsonly.pack >objs && + ! grep -E "tree|blob" objs +' + +test_expect_success 'grab tree directly when using tree:0' ' + # We should get the tree specified directly but not its blobs or subtrees. + git -C r1 pack-objects --rev --stdout --filter=tree:0 >commitsonly.pack <<-EOF && + HEAD: + EOF + git -C r1 index-pack ../commitsonly.pack && + git -C r1 verify-pack -v ../commitsonly.pack >objs && + awk "/tree|blob/{print \$1}" objs >trees_and_blobs && + git -C r1 rev-parse HEAD: >expected && + test_cmp expected trees_and_blobs ' # Test blob:limit=<n>[kmg] filter. @@ -75,18 +124,21 @@ test_expect_success 'setup r2' ' ' test_expect_success 'verify blob count in normal packfile' ' - git -C r2 ls-files -s large.1000 large.10000 \ - | awk -f print_2.awk \ - | sort >expected && + git -C r2 ls-files -s large.1000 large.10000 >ls_files_result && + awk -f print_2.awk ls_files_result | + sort >expected && + git -C r2 pack-objects --rev --stdout >all.pack <<-EOF && HEAD EOF git -C r2 index-pack ../all.pack && - git -C r2 verify-pack -v ../all.pack \ - | grep blob \ - | awk -f print_1.awk \ - | sort >observed && - test_cmp observed expected + + git -C r2 verify-pack -v ../all.pack >verify_result && + grep blob verify_result | + awk -f print_1.awk | + sort >observed && + + test_cmp expected observed ' test_expect_success 'verify blob:limit=500 omits all blobs' ' @@ -94,10 +146,12 @@ test_expect_success 'verify blob:limit=500 omits all blobs' ' HEAD EOF git -C r2 index-pack ../filter.pack && - git -C r2 verify-pack -v ../filter.pack \ - | grep blob \ - | awk -f print_1.awk \ - | sort >observed && + + git -C r2 verify-pack -v ../filter.pack >verify_result && + grep blob verify_result | + awk -f print_1.awk | + sort >observed && + nr=$(wc -l <observed) && test 0 -eq $nr ' @@ -107,100 +161,119 @@ test_expect_success 'verify blob:limit=1000' ' HEAD EOF git -C r2 index-pack ../filter.pack && - git -C r2 verify-pack -v ../filter.pack \ - | grep blob \ - | awk -f print_1.awk \ - | sort >observed && + + git -C r2 verify-pack -v ../filter.pack >verify_result && + grep blob verify_result | + awk -f print_1.awk | + sort >observed && + nr=$(wc -l <observed) && test 0 -eq $nr ' test_expect_success 'verify blob:limit=1001' ' - git -C r2 ls-files -s large.1000 \ - | awk -f print_2.awk \ - | sort >expected && + git -C r2 ls-files -s large.1000 >ls_files_result && + awk -f print_2.awk ls_files_result | + sort >expected && + git -C r2 pack-objects --rev --stdout --filter=blob:limit=1001 >filter.pack <<-EOF && HEAD EOF git -C r2 index-pack ../filter.pack && - git -C r2 verify-pack -v ../filter.pack \ - | grep blob \ - | awk -f print_1.awk \ - | sort >observed && - test_cmp observed expected + + git -C r2 verify-pack -v ../filter.pack >verify_result && + grep blob verify_result | + awk -f print_1.awk | + sort >observed && + + test_cmp expected observed ' test_expect_success 'verify blob:limit=10001' ' - git -C r2 ls-files -s large.1000 large.10000 \ - | awk -f print_2.awk \ - | sort >expected && + git -C r2 ls-files -s large.1000 large.10000 >ls_files_result && + awk -f print_2.awk ls_files_result | + sort >expected && + git -C r2 pack-objects --rev --stdout --filter=blob:limit=10001 >filter.pack <<-EOF && HEAD EOF git -C r2 index-pack ../filter.pack && - git -C r2 verify-pack -v ../filter.pack \ - | grep blob \ - | awk -f print_1.awk \ - | sort >observed && - test_cmp observed expected + + git -C r2 verify-pack -v ../filter.pack >verify_result && + grep blob verify_result | + awk -f print_1.awk | + sort >observed && + + test_cmp expected observed ' test_expect_success 'verify blob:limit=1k' ' - git -C r2 ls-files -s large.1000 \ - | awk -f print_2.awk \ - | sort >expected && + git -C r2 ls-files -s large.1000 >ls_files_result && + awk -f print_2.awk ls_files_result | + sort >expected && + git -C r2 pack-objects --rev --stdout --filter=blob:limit=1k >filter.pack <<-EOF && HEAD EOF git -C r2 index-pack ../filter.pack && - git -C r2 verify-pack -v ../filter.pack \ - | grep blob \ - | awk -f print_1.awk \ - | sort >observed && - test_cmp observed expected + + git -C r2 verify-pack -v ../filter.pack >verify_result && + grep blob verify_result | + awk -f print_1.awk | + sort >observed && + + test_cmp expected observed ' test_expect_success 'verify explicitly specifying oversized blob in input' ' - git -C r2 ls-files -s large.1000 large.10000 \ - | awk -f print_2.awk \ - | sort >expected && + git -C r2 ls-files -s large.1000 large.10000 >ls_files_result && + awk -f print_2.awk ls_files_result | + sort >expected && + git -C r2 pack-objects --rev --stdout --filter=blob:limit=1k >filter.pack <<-EOF && HEAD $(git -C r2 rev-parse HEAD:large.10000) EOF git -C r2 index-pack ../filter.pack && - git -C r2 verify-pack -v ../filter.pack \ - | grep blob \ - | awk -f print_1.awk \ - | sort >observed && - test_cmp observed expected + + git -C r2 verify-pack -v ../filter.pack >verify_result && + grep blob verify_result | + awk -f print_1.awk | + sort >observed && + + test_cmp expected observed ' test_expect_success 'verify blob:limit=1m' ' - git -C r2 ls-files -s large.1000 large.10000 \ - | awk -f print_2.awk \ - | sort >expected && + git -C r2 ls-files -s large.1000 large.10000 >ls_files_result && + awk -f print_2.awk ls_files_result | + sort >expected && + git -C r2 pack-objects --rev --stdout --filter=blob:limit=1m >filter.pack <<-EOF && HEAD EOF git -C r2 index-pack ../filter.pack && - git -C r2 verify-pack -v ../filter.pack \ - | grep blob \ - | awk -f print_1.awk \ - | sort >observed && - test_cmp observed expected + + git -C r2 verify-pack -v ../filter.pack >verify_result && + grep blob verify_result | + awk -f print_1.awk | + sort >observed && + + test_cmp expected observed ' test_expect_success 'verify normal and blob:limit packfiles have same commits/trees' ' - git -C r2 verify-pack -v ../all.pack \ - | grep -E "commit|tree" \ - | awk -f print_1.awk \ - | sort >expected && - git -C r2 verify-pack -v ../filter.pack \ - | grep -E "commit|tree" \ - | awk -f print_1.awk \ - | sort >observed && - test_cmp observed expected + git -C r2 verify-pack -v ../all.pack >verify_result && + grep -E "commit|tree" verify_result | + awk -f print_1.awk | + sort >expected && + + git -C r2 verify-pack -v ../filter.pack >verify_result && + grep -E "commit|tree" verify_result | + awk -f print_1.awk | + sort >observed && + + test_cmp expected observed ' # Test sparse:path=<path> filter. @@ -225,71 +298,85 @@ test_expect_success 'setup r3' ' test_expect_success 'verify blob count in normal packfile' ' git -C r3 ls-files -s sparse1 sparse2 dir1/sparse1 dir1/sparse2 \ - | awk -f print_2.awk \ - | sort >expected && + >ls_files_result && + awk -f print_2.awk ls_files_result | + sort >expected && + git -C r3 pack-objects --rev --stdout >all.pack <<-EOF && HEAD EOF git -C r3 index-pack ../all.pack && - git -C r3 verify-pack -v ../all.pack \ - | grep blob \ - | awk -f print_1.awk \ - | sort >observed && - test_cmp observed expected + + git -C r3 verify-pack -v ../all.pack >verify_result && + grep blob verify_result | + awk -f print_1.awk | + sort >observed && + + test_cmp expected observed ' test_expect_success 'verify sparse:path=pattern1' ' - git -C r3 ls-files -s dir1/sparse1 dir1/sparse2 \ - | awk -f print_2.awk \ - | sort >expected && + git -C r3 ls-files -s dir1/sparse1 dir1/sparse2 >ls_files_result && + awk -f print_2.awk ls_files_result | + sort >expected && + git -C r3 pack-objects --rev --stdout --filter=sparse:path=../pattern1 >filter.pack <<-EOF && HEAD EOF git -C r3 index-pack ../filter.pack && - git -C r3 verify-pack -v ../filter.pack \ - | grep blob \ - | awk -f print_1.awk \ - | sort >observed && - test_cmp observed expected + + git -C r3 verify-pack -v ../filter.pack >verify_result && + grep blob verify_result | + awk -f print_1.awk | + sort >observed && + + test_cmp expected observed ' test_expect_success 'verify normal and sparse:path=pattern1 packfiles have same commits/trees' ' - git -C r3 verify-pack -v ../all.pack \ - | grep -E "commit|tree" \ - | awk -f print_1.awk \ - | sort >expected && - git -C r3 verify-pack -v ../filter.pack \ - | grep -E "commit|tree" \ - | awk -f print_1.awk \ - | sort >observed && - test_cmp observed expected + git -C r3 verify-pack -v ../all.pack >verify_result && + grep -E "commit|tree" verify_result | + awk -f print_1.awk | + sort >expected && + + git -C r3 verify-pack -v ../filter.pack >verify_result && + grep -E "commit|tree" verify_result | + awk -f print_1.awk | + sort >observed && + + test_cmp expected observed ' test_expect_success 'verify sparse:path=pattern2' ' - git -C r3 ls-files -s sparse1 dir1/sparse1 \ - | awk -f print_2.awk \ - | sort >expected && + git -C r3 ls-files -s sparse1 dir1/sparse1 >ls_files_result && + awk -f print_2.awk ls_files_result | + sort >expected && + git -C r3 pack-objects --rev --stdout --filter=sparse:path=../pattern2 >filter.pack <<-EOF && HEAD EOF git -C r3 index-pack ../filter.pack && - git -C r3 verify-pack -v ../filter.pack \ - | grep blob \ - | awk -f print_1.awk \ - | sort >observed && - test_cmp observed expected + + git -C r3 verify-pack -v ../filter.pack >verify_result && + grep blob verify_result | + awk -f print_1.awk | + sort >observed && + + test_cmp expected observed ' test_expect_success 'verify normal and sparse:path=pattern2 packfiles have same commits/trees' ' - git -C r3 verify-pack -v ../all.pack \ - | grep -E "commit|tree" \ - | awk -f print_1.awk \ - | sort >expected && - git -C r3 verify-pack -v ../filter.pack \ - | grep -E "commit|tree" \ - | awk -f print_1.awk \ - | sort >observed && - test_cmp observed expected + git -C r3 verify-pack -v ../all.pack >verify_result && + grep -E "commit|tree" verify_result | + awk -f print_1.awk | + sort >expected && + + git -C r3 verify-pack -v ../filter.pack >verify_result && + grep -E "commit|tree" verify_result | + awk -f print_1.awk | + sort >observed && + + test_cmp expected observed ' # Test sparse:oid=<oid-ish> filter. @@ -313,48 +400,58 @@ test_expect_success 'setup r4' ' test_expect_success 'verify blob count in normal packfile' ' git -C r4 ls-files -s pattern sparse1 sparse2 dir1/sparse1 dir1/sparse2 \ - | awk -f print_2.awk \ - | sort >expected && + >ls_files_result && + awk -f print_2.awk ls_files_result | + sort >expected && + git -C r4 pack-objects --rev --stdout >all.pack <<-EOF && HEAD EOF git -C r4 index-pack ../all.pack && - git -C r4 verify-pack -v ../all.pack \ - | grep blob \ - | awk -f print_1.awk \ - | sort >observed && - test_cmp observed expected + + git -C r4 verify-pack -v ../all.pack >verify_result && + grep blob verify_result | + awk -f print_1.awk | + sort >observed && + + test_cmp expected observed ' test_expect_success 'verify sparse:oid=OID' ' - git -C r4 ls-files -s dir1/sparse1 dir1/sparse2 \ - | awk -f print_2.awk \ - | sort >expected && + git -C r4 ls-files -s dir1/sparse1 dir1/sparse2 >ls_files_result && + awk -f print_2.awk ls_files_result | + sort >expected && + oid=$(git -C r4 ls-files -s pattern | awk -f print_2.awk) && git -C r4 pack-objects --rev --stdout --filter=sparse:oid=$oid >filter.pack <<-EOF && HEAD EOF git -C r4 index-pack ../filter.pack && - git -C r4 verify-pack -v ../filter.pack \ - | grep blob \ - | awk -f print_1.awk \ - | sort >observed && - test_cmp observed expected + + git -C r4 verify-pack -v ../filter.pack >verify_result && + grep blob verify_result | + awk -f print_1.awk | + sort >observed && + + test_cmp expected observed ' test_expect_success 'verify sparse:oid=oid-ish' ' - git -C r4 ls-files -s dir1/sparse1 dir1/sparse2 \ - | awk -f print_2.awk \ - | sort >expected && + git -C r4 ls-files -s dir1/sparse1 dir1/sparse2 >ls_files_result && + awk -f print_2.awk ls_files_result | + sort >expected && + git -C r4 pack-objects --rev --stdout --filter=sparse:oid=master:pattern >filter.pack <<-EOF && HEAD EOF git -C r4 index-pack ../filter.pack && - git -C r4 verify-pack -v ../filter.pack \ - | grep blob \ - | awk -f print_1.awk \ - | sort >observed && - test_cmp observed expected + + git -C r4 verify-pack -v ../filter.pack >verify_result && + grep blob verify_result | + awk -f print_1.awk | + sort >observed && + + test_cmp expected observed ' # Delete some loose objects and use pack-objects, but WITHOUT any filtering. @@ -362,8 +459,10 @@ test_expect_success 'verify sparse:oid=oid-ish' ' test_expect_success 'setup r1 - delete loose blobs' ' git -C r1 ls-files -s file.1 file.2 file.3 file.4 file.5 \ - | awk -f print_2.awk \ - | sort >expected && + >ls_files_result && + awk -f print_2.awk ls_files_result | + sort >expected && + for id in `cat expected | sed "s|..|&/|"` do rm r1/.git/objects/$id diff --git a/t/t5318-commit-graph.sh b/t/t5318-commit-graph.sh index 2799969f3e..5fe21db99f 100755 --- a/t/t5318-commit-graph.sh +++ b/t/t5318-commit-graph.sh @@ -8,7 +8,8 @@ test_expect_success 'setup full repo' ' cd "$TRASH_DIRECTORY/full" && git init && git config core.commitGraph true && - objdir=".git/objects" + objdir=".git/objects" && + test_oid_init ' test_expect_success 'verify graph with no graph file' ' @@ -35,7 +36,7 @@ test_expect_success 'create commits and repack' ' graph_git_two_modes() { git -c core.commitGraph=true $1 >output git -c core.commitGraph=false $1 >expect - test_cmp output expect + test_cmp expect output } graph_git_behavior() { @@ -333,7 +334,7 @@ test_expect_success 'git commit-graph verify' ' NUM_COMMITS=9 NUM_OCTOPUS_EDGES=2 -HASH_LEN=20 +HASH_LEN="$(test_oid rawsz)" GRAPH_BYTE_VERSION=4 GRAPH_BYTE_HASH=5 GRAPH_BYTE_CHUNK_COUNT=6 diff --git a/t/t5319-multi-pack-index.sh b/t/t5319-multi-pack-index.sh new file mode 100755 index 0000000000..70926b5bc0 --- /dev/null +++ b/t/t5319-multi-pack-index.sh @@ -0,0 +1,351 @@ +#!/bin/sh + +test_description='multi-pack-indexes' +. ./test-lib.sh + +objdir=.git/objects + +midx_read_expect () { + NUM_PACKS=$1 + NUM_OBJECTS=$2 + NUM_CHUNKS=$3 + OBJECT_DIR=$4 + EXTRA_CHUNKS="$5" + { + cat <<-EOF && + header: 4d494458 1 $NUM_CHUNKS $NUM_PACKS + chunks: pack-names oid-fanout oid-lookup object-offsets$EXTRA_CHUNKS + num_objects: $NUM_OBJECTS + packs: + EOF + if test $NUM_PACKS -ge 1 + then + ls $OBJECT_DIR/pack/ | grep idx | sort + fi && + printf "object-dir: $OBJECT_DIR\n" + } >expect && + test-tool read-midx $OBJECT_DIR >actual && + test_cmp expect actual +} + +test_expect_success 'write midx with no packs' ' + test_when_finished rm -f pack/multi-pack-index && + git multi-pack-index --object-dir=. write && + midx_read_expect 0 0 4 . +' + +generate_objects () { + i=$1 + iii=$(printf '%03i' $i) + { + test-tool genrandom "bar" 200 && + test-tool genrandom "baz $iii" 50 + } >wide_delta_$iii && + { + test-tool genrandom "foo"$i 100 && + test-tool genrandom "foo"$(( $i + 1 )) 100 && + test-tool genrandom "foo"$(( $i + 2 )) 100 + } >deep_delta_$iii && + { + echo $iii && + test-tool genrandom "$iii" 8192 + } >file_$iii && + git update-index --add file_$iii deep_delta_$iii wide_delta_$iii +} + +commit_and_list_objects () { + { + echo 101 && + test-tool genrandom 100 8192; + } >file_101 && + git update-index --add file_101 && + tree=$(git write-tree) && + commit=$(git commit-tree $tree -p HEAD</dev/null) && + { + echo $tree && + git ls-tree $tree | sed -e "s/.* \\([0-9a-f]*\\) .*/\\1/" + } >obj-list && + git reset --hard $commit +} + +test_expect_success 'create objects' ' + test_commit initial && + for i in $(test_seq 1 5) + do + generate_objects $i + done && + commit_and_list_objects +' + +test_expect_success 'write midx with one v1 pack' ' + pack=$(git pack-objects --index-version=1 $objdir/pack/test <obj-list) && + test_when_finished rm $objdir/pack/test-$pack.pack \ + $objdir/pack/test-$pack.idx $objdir/pack/multi-pack-index && + git multi-pack-index --object-dir=$objdir write && + midx_read_expect 1 18 4 $objdir +' + +midx_git_two_modes () { + if [ "$2" = "sorted" ] + then + git -c core.multiPackIndex=false $1 | sort >expect && + git -c core.multiPackIndex=true $1 | sort >actual + else + git -c core.multiPackIndex=false $1 >expect && + git -c core.multiPackIndex=true $1 >actual + fi && + test_cmp expect actual +} + +compare_results_with_midx () { + MSG=$1 + test_expect_success "check normal git operations: $MSG" ' + midx_git_two_modes "rev-list --objects --all" && + midx_git_two_modes "log --raw" && + midx_git_two_modes "count-objects --verbose" && + midx_git_two_modes "cat-file --batch-all-objects --buffer --batch-check" && + midx_git_two_modes "cat-file --batch-all-objects --buffer --batch-check --unsorted" sorted + ' +} + +test_expect_success 'write midx with one v2 pack' ' + git pack-objects --index-version=2,0x40 $objdir/pack/test <obj-list && + git multi-pack-index --object-dir=$objdir write && + midx_read_expect 1 18 4 $objdir +' + +compare_results_with_midx "one v2 pack" + +test_expect_success 'add more objects' ' + for i in $(test_seq 6 10) + do + generate_objects $i + done && + commit_and_list_objects +' + +test_expect_success 'write midx with two packs' ' + git pack-objects --index-version=1 $objdir/pack/test-2 <obj-list && + git multi-pack-index --object-dir=$objdir write && + midx_read_expect 2 34 4 $objdir +' + +compare_results_with_midx "two packs" + +test_expect_success 'add more packs' ' + for j in $(test_seq 11 20) + do + generate_objects $j && + commit_and_list_objects && + git pack-objects --index-version=2 $objdir/pack/test-pack <obj-list + done +' + +compare_results_with_midx "mixed mode (two packs + extra)" + +test_expect_success 'write midx with twelve packs' ' + git multi-pack-index --object-dir=$objdir write && + midx_read_expect 12 74 4 $objdir +' + +compare_results_with_midx "twelve packs" + +test_expect_success 'verify multi-pack-index success' ' + git multi-pack-index verify --object-dir=$objdir +' + +# usage: corrupt_midx_and_verify <pos> <data> <objdir> <string> +corrupt_midx_and_verify() { + POS=$1 && + DATA="${2:-\0}" && + OBJDIR=$3 && + GREPSTR="$4" && + COMMAND="$5" && + if test -z "$COMMAND" + then + COMMAND="git multi-pack-index verify --object-dir=$OBJDIR" + fi && + FILE=$OBJDIR/pack/multi-pack-index && + chmod a+w $FILE && + test_when_finished mv midx-backup $FILE && + cp $FILE midx-backup && + printf "$DATA" | dd of="$FILE" bs=1 seek="$POS" conv=notrunc && + test_must_fail $COMMAND 2>test_err && + grep -v "^+" test_err >err && + test_i18ngrep "$GREPSTR" err +} + +test_expect_success 'verify bad signature' ' + corrupt_midx_and_verify 0 "\00" $objdir \ + "multi-pack-index signature" +' + +HASH_LEN=20 +NUM_OBJECTS=74 +MIDX_BYTE_VERSION=4 +MIDX_BYTE_OID_VERSION=5 +MIDX_BYTE_CHUNK_COUNT=6 +MIDX_HEADER_SIZE=12 +MIDX_BYTE_CHUNK_ID=$MIDX_HEADER_SIZE +MIDX_BYTE_CHUNK_OFFSET=$(($MIDX_HEADER_SIZE + 4)) +MIDX_NUM_CHUNKS=5 +MIDX_CHUNK_LOOKUP_WIDTH=12 +MIDX_OFFSET_PACKNAMES=$(($MIDX_HEADER_SIZE + \ + $MIDX_NUM_CHUNKS * $MIDX_CHUNK_LOOKUP_WIDTH)) +MIDX_BYTE_PACKNAME_ORDER=$(($MIDX_OFFSET_PACKNAMES + 2)) +MIDX_OFFSET_OID_FANOUT=$(($MIDX_OFFSET_PACKNAMES + 652)) +MIDX_OID_FANOUT_WIDTH=4 +MIDX_BYTE_OID_FANOUT_ORDER=$((MIDX_OFFSET_OID_FANOUT + 250 * $MIDX_OID_FANOUT_WIDTH + 1)) +MIDX_OFFSET_OID_LOOKUP=$(($MIDX_OFFSET_OID_FANOUT + 256 * $MIDX_OID_FANOUT_WIDTH)) +MIDX_BYTE_OID_LOOKUP=$(($MIDX_OFFSET_OID_LOOKUP + 16 * $HASH_LEN)) +MIDX_OFFSET_OBJECT_OFFSETS=$(($MIDX_OFFSET_OID_LOOKUP + $NUM_OBJECTS * $HASH_LEN)) +MIDX_OFFSET_WIDTH=8 +MIDX_BYTE_PACK_INT_ID=$(($MIDX_OFFSET_OBJECT_OFFSETS + 16 * $MIDX_OFFSET_WIDTH + 2)) +MIDX_BYTE_OFFSET=$(($MIDX_OFFSET_OBJECT_OFFSETS + 16 * $MIDX_OFFSET_WIDTH + 6)) + +test_expect_success 'verify bad version' ' + corrupt_midx_and_verify $MIDX_BYTE_VERSION "\00" $objdir \ + "multi-pack-index version" +' + +test_expect_success 'verify bad OID version' ' + corrupt_midx_and_verify $MIDX_BYTE_OID_VERSION "\02" $objdir \ + "hash version" +' + +test_expect_success 'verify truncated chunk count' ' + corrupt_midx_and_verify $MIDX_BYTE_CHUNK_COUNT "\01" $objdir \ + "missing required" +' + +test_expect_success 'verify extended chunk count' ' + corrupt_midx_and_verify $MIDX_BYTE_CHUNK_COUNT "\07" $objdir \ + "terminating multi-pack-index chunk id appears earlier than expected" +' + +test_expect_success 'verify missing required chunk' ' + corrupt_midx_and_verify $MIDX_BYTE_CHUNK_ID "\01" $objdir \ + "missing required" +' + +test_expect_success 'verify invalid chunk offset' ' + corrupt_midx_and_verify $MIDX_BYTE_CHUNK_OFFSET "\01" $objdir \ + "invalid chunk offset (too large)" +' + +test_expect_success 'verify packnames out of order' ' + corrupt_midx_and_verify $MIDX_BYTE_PACKNAME_ORDER "z" $objdir \ + "pack names out of order" +' + +test_expect_success 'verify packnames out of order' ' + corrupt_midx_and_verify $MIDX_BYTE_PACKNAME_ORDER "a" $objdir \ + "failed to load pack" +' + +test_expect_success 'verify oid fanout out of order' ' + corrupt_midx_and_verify $MIDX_BYTE_OID_FANOUT_ORDER "\01" $objdir \ + "oid fanout out of order" +' + +test_expect_success 'verify oid lookup out of order' ' + corrupt_midx_and_verify $MIDX_BYTE_OID_LOOKUP "\00" $objdir \ + "oid lookup out of order" +' + +test_expect_success 'verify incorrect pack-int-id' ' + corrupt_midx_and_verify $MIDX_BYTE_PACK_INT_ID "\07" $objdir \ + "bad pack-int-id" +' + +test_expect_success 'verify incorrect offset' ' + corrupt_midx_and_verify $MIDX_BYTE_OFFSET "\07" $objdir \ + "incorrect object offset" +' + +test_expect_success 'git-fsck incorrect offset' ' + corrupt_midx_and_verify $MIDX_BYTE_OFFSET "\07" $objdir \ + "incorrect object offset" \ + "git -c core.multipackindex=true fsck" +' + +test_expect_success 'repack removes multi-pack-index' ' + test_path_is_file $objdir/pack/multi-pack-index && + GIT_TEST_MULTI_PACK_INDEX=0 git repack -adf && + test_path_is_missing $objdir/pack/multi-pack-index +' + +compare_results_with_midx "after repack" + +test_expect_success 'multi-pack-index and pack-bitmap' ' + git -c repack.writeBitmaps=true repack -ad && + git multi-pack-index write && + git rev-list --test-bitmap HEAD +' + +test_expect_success 'multi-pack-index and alternates' ' + git init --bare alt.git && + echo $(pwd)/alt.git/objects >.git/objects/info/alternates && + echo content1 >file1 && + altblob=$(GIT_DIR=alt.git git hash-object -w file1) && + git cat-file blob $altblob && + git rev-list --all +' + +compare_results_with_midx "with alternate (local midx)" + +test_expect_success 'multi-pack-index in an alternate' ' + mv .git/objects/pack/* alt.git/objects/pack && + test_commit add_local_objects && + git repack --local && + git multi-pack-index write && + midx_read_expect 1 3 4 $objdir && + git reset --hard HEAD~1 && + rm -f .git/objects/pack/* +' + +compare_results_with_midx "with alternate (remote midx)" + +# usage: corrupt_data <file> <pos> [<data>] +corrupt_data () { + file=$1 + pos=$2 + data="${3:-\0}" + printf "$data" | dd of="$file" bs=1 seek="$pos" conv=notrunc +} + +# Force 64-bit offsets by manipulating the idx file. +# This makes the IDX file _incorrect_ so be careful to clean up after! +test_expect_success 'force some 64-bit offsets with pack-objects' ' + mkdir objects64 && + mkdir objects64/pack && + for i in $(test_seq 1 11) + do + generate_objects 11 + done && + commit_and_list_objects && + pack64=$(git pack-objects --index-version=2,0x40 objects64/pack/test-64 <obj-list) && + idx64=objects64/pack/test-64-$pack64.idx && + chmod u+w $idx64 && + corrupt_data $idx64 2999 "\02" && + midx64=$(git multi-pack-index --object-dir=objects64 write) && + midx_read_expect 1 63 5 objects64 " large-offsets" +' + +test_expect_success 'verify multi-pack-index with 64-bit offsets' ' + git multi-pack-index verify --object-dir=objects64 +' + +NUM_OBJECTS=63 +MIDX_OFFSET_OID_FANOUT=$((MIDX_OFFSET_PACKNAMES + 54)) +MIDX_OFFSET_OID_LOOKUP=$((MIDX_OFFSET_OID_FANOUT + 256 * $MIDX_OID_FANOUT_WIDTH)) +MIDX_OFFSET_OBJECT_OFFSETS=$(($MIDX_OFFSET_OID_LOOKUP + $NUM_OBJECTS * $HASH_LEN)) +MIDX_OFFSET_LARGE_OFFSETS=$(($MIDX_OFFSET_OBJECT_OFFSETS + $NUM_OBJECTS * $MIDX_OFFSET_WIDTH)) +MIDX_BYTE_LARGE_OFFSET=$(($MIDX_OFFSET_LARGE_OFFSETS + 3)) + +test_expect_success 'verify incorrect 64-bit offset' ' + corrupt_midx_and_verify $MIDX_BYTE_LARGE_OFFSET "\07" objects64 \ + "incorrect object offset" +' + +test_done diff --git a/t/t5320-delta-islands.sh b/t/t5320-delta-islands.sh new file mode 100755 index 0000000000..fea92a5777 --- /dev/null +++ b/t/t5320-delta-islands.sh @@ -0,0 +1,143 @@ +#!/bin/sh + +test_description='exercise delta islands' +. ./test-lib.sh + +# returns true iff $1 is a delta based on $2 +is_delta_base () { + delta_base=$(echo "$1" | git cat-file --batch-check='%(deltabase)') && + echo >&2 "$1 has base $delta_base" && + test "$delta_base" = "$2" +} + +# generate a commit on branch $1 with a single file, "file", whose +# content is mostly based on the seed $2, but with a unique bit +# of content $3 appended. This should allow us to see whether +# blobs of different refs delta against each other. +commit() { + blob=$({ test-tool genrandom "$2" 10240 && echo "$3"; } | + git hash-object -w --stdin) && + tree=$(printf '100644 blob %s\tfile\n' "$blob" | git mktree) && + commit=$(echo "$2-$3" | git commit-tree "$tree" ${4:+-p "$4"}) && + git update-ref "refs/heads/$1" "$commit" && + eval "$1"'=$(git rev-parse $1:file)' && + eval "echo >&2 $1=\$$1" +} + +test_expect_success 'setup commits' ' + commit one seed 1 && + commit two seed 12 +' + +# Note: This is heavily dependent on the "prefer larger objects as base" +# heuristic. +test_expect_success 'vanilla repack deltas one against two' ' + git repack -adf && + is_delta_base $one $two +' + +test_expect_success 'island repack with no island definition is vanilla' ' + git repack -adfi && + is_delta_base $one $two +' + +test_expect_success 'island repack with no matches is vanilla' ' + git -c "pack.island=refs/foo" repack -adfi && + is_delta_base $one $two +' + +test_expect_success 'separate islands disallows delta' ' + git -c "pack.island=refs/heads/(.*)" repack -adfi && + ! is_delta_base $one $two && + ! is_delta_base $two $one +' + +test_expect_success 'same island allows delta' ' + git -c "pack.island=refs/heads" repack -adfi && + is_delta_base $one $two +' + +test_expect_success 'coalesce same-named islands' ' + git \ + -c "pack.island=refs/(.*)/one" \ + -c "pack.island=refs/(.*)/two" \ + repack -adfi && + is_delta_base $one $two +' + +test_expect_success 'island restrictions drop reused deltas' ' + git repack -adfi && + is_delta_base $one $two && + git -c "pack.island=refs/heads/(.*)" repack -adi && + ! is_delta_base $one $two && + ! is_delta_base $two $one +' + +test_expect_success 'island regexes are left-anchored' ' + git -c "pack.island=heads/(.*)" repack -adfi && + is_delta_base $one $two +' + +test_expect_success 'island regexes follow last-one-wins scheme' ' + git \ + -c "pack.island=refs/heads/(.*)" \ + -c "pack.island=refs/heads/" \ + repack -adfi && + is_delta_base $one $two +' + +test_expect_success 'setup shared history' ' + commit root shared root && + commit one shared 1 root && + commit two shared 12-long root +' + +# We know that $two will be preferred as a base from $one, +# because we can transform it with a pure deletion. +# +# We also expect $root as a delta against $two by the "longest is base" rule. +test_expect_success 'vanilla delta goes between branches' ' + git repack -adf && + is_delta_base $one $two && + is_delta_base $root $two +' + +# Here we should allow $one to base itself on $root; even though +# they are in different islands, the objects in $root are in a superset +# of islands compared to those in $one. +# +# Similarly, $two can delta against $root by our rules. And unlike $one, +# in which we are just allowing it, the island rules actually put $root +# as a possible base for $two, which it would not otherwise be (due to the size +# sorting). +test_expect_success 'deltas allowed against superset islands' ' + git -c "pack.island=refs/heads/(.*)" repack -adfi && + is_delta_base $one $root && + is_delta_base $two $root +' + +# We are going to test the packfile order here, so we again have to make some +# assumptions. We assume that "$root", as part of our core "one", must come +# before "$two". This should be guaranteed by the island code. However, for +# this test to fail without islands, we are also assuming that it would not +# otherwise do so. This is true by the current write order, which will put +# commits (and their contents) before their parents. +test_expect_success 'island core places core objects first' ' + cat >expect <<-EOF && + $root + $two + EOF + git -c "pack.island=refs/heads/(.*)" \ + -c "pack.islandcore=one" \ + repack -adfi && + git verify-pack -v .git/objects/pack/*.pack | + cut -d" " -f1 | + egrep "$root|$two" >actual && + test_cmp expect actual +' + +test_expect_success 'unmatched island core is not fatal' ' + git -c "pack.islandcore=one" repack -adfi +' + +test_done diff --git a/t/t5410-receive-pack-alternates.sh b/t/t5410-receive-pack-alternates.sh new file mode 100755 index 0000000000..f00d0da860 --- /dev/null +++ b/t/t5410-receive-pack-alternates.sh @@ -0,0 +1,41 @@ +#!/bin/sh + +test_description='git receive-pack with alternate ref filtering' + +. ./test-lib.sh + +test_expect_success 'setup' ' + test_commit base && + git clone -s --bare . fork && + git checkout -b public/branch master && + test_commit public && + git checkout -b private/branch master && + test_commit private +' + +extract_haves () { + depacketize | perl -lne '/^(\S+) \.have/ and print $1' +} + +test_expect_success 'with core.alternateRefsCommand' ' + write_script fork/alternate-refs <<-\EOF && + git --git-dir="$1" for-each-ref \ + --format="%(objectname)" \ + refs/heads/public/ + EOF + test_config -C fork core.alternateRefsCommand ./alternate-refs && + git rev-parse public/branch >expect && + printf "0000" | git receive-pack fork >actual && + extract_haves <actual >actual.haves && + test_cmp expect actual.haves +' + +test_expect_success 'with core.alternateRefsPrefixes' ' + test_config -C fork core.alternateRefsPrefixes "refs/heads/private" && + git rev-parse private/branch >expect && + printf "0000" | git receive-pack fork >actual && + extract_haves <actual >actual.haves && + test_cmp expect actual.haves +' + +test_done diff --git a/t/t5500-fetch-pack.sh b/t/t5500-fetch-pack.sh index 1b5a4a6d38..086f2c40f6 100755 --- a/t/t5500-fetch-pack.sh +++ b/t/t5500-fetch-pack.sh @@ -50,8 +50,11 @@ pull_to_client () { case "$heads" in *B*) git update-ref refs/heads/B "$BTIP";; esac && - git symbolic-ref HEAD refs/heads/$(echo $heads \ - | sed -e "s/^\(.\).*$/\1/") && + + git symbolic-ref HEAD refs/heads/$( + echo $heads | + sed -e "s/^\(.\).*$/\1/" + ) && git fsck --full && diff --git a/t/t5504-fetch-receive-strict.sh b/t/t5504-fetch-receive-strict.sh index 62f3569891..7bc706873c 100755 --- a/t/t5504-fetch-receive-strict.sh +++ b/t/t5504-fetch-receive-strict.sh @@ -133,6 +133,34 @@ committer Bugs Bunny <bugs@bun.ni> 1234567890 +0000 This commit object intentionally broken EOF +test_expect_success 'setup bogus commit' ' + commit="$(git hash-object -t commit -w --stdin <bogus-commit)" +' + +test_expect_success 'fsck with no skipList input' ' + test_must_fail git fsck 2>err && + test_i18ngrep "missingEmail" err +' + +test_expect_success 'setup sorted and unsorted skipLists' ' + cat >SKIP.unsorted <<-EOF && + 0000000000000000000000000000000000000004 + 0000000000000000000000000000000000000002 + $commit + 0000000000000000000000000000000000000001 + 0000000000000000000000000000000000000003 + EOF + sort SKIP.unsorted >SKIP.sorted +' + +test_expect_success 'fsck with sorted skipList' ' + git -c fsck.skipList=SKIP.sorted fsck +' + +test_expect_success 'fsck with unsorted skipList' ' + git -c fsck.skipList=SKIP.unsorted fsck +' + test_expect_success 'fsck with invalid or bogus skipList input' ' git -c fsck.skipList=/dev/null -c fsck.missingEmail=ignore fsck && test_must_fail git -c fsck.skipList=does-not-exist -c fsck.missingEmail=ignore fsck 2>err && @@ -141,8 +169,47 @@ test_expect_success 'fsck with invalid or bogus skipList input' ' test_i18ngrep "Invalid SHA-1: \[core\]" err ' +test_expect_success 'fsck with other accepted skipList input (comments & empty lines)' ' + cat >SKIP.with-comment <<-EOF && + # Some bad commit + 0000000000000000000000000000000000000001 + EOF + test_must_fail git -c fsck.skipList=SKIP.with-comment fsck 2>err-with-comment && + test_i18ngrep "missingEmail" err-with-comment && + cat >SKIP.with-empty-line <<-EOF && + 0000000000000000000000000000000000000001 + + 0000000000000000000000000000000000000002 + EOF + test_must_fail git -c fsck.skipList=SKIP.with-empty-line fsck 2>err-with-empty-line && + test_i18ngrep "missingEmail" err-with-empty-line +' + +test_expect_success 'fsck no garbage output from comments & empty lines errors' ' + test_line_count = 1 err-with-comment && + test_line_count = 1 err-with-empty-line +' + +test_expect_success 'fsck with invalid abbreviated skipList input' ' + echo $commit | test_copy_bytes 20 >SKIP.abbreviated && + test_must_fail git -c fsck.skipList=SKIP.abbreviated fsck 2>err-abbreviated && + test_i18ngrep "^fatal: Invalid SHA-1: " err-abbreviated +' + +test_expect_success 'fsck with exhaustive accepted skipList input (various types of comments etc.)' ' + >SKIP.exhaustive && + echo "# A commented line" >>SKIP.exhaustive && + echo "" >>SKIP.exhaustive && + echo " " >>SKIP.exhaustive && + echo " # Comment after whitespace" >>SKIP.exhaustive && + echo "$commit # Our bad commit (with leading whitespace and trailing comment)" >>SKIP.exhaustive && + echo "# Some bad commit (leading whitespace)" >>SKIP.exhaustive && + echo " 0000000000000000000000000000000000000001" >>SKIP.exhaustive && + git -c fsck.skipList=SKIP.exhaustive fsck 2>err && + test_must_be_empty err +' + test_expect_success 'push with receive.fsck.skipList' ' - commit="$(git hash-object -t commit -w --stdin <bogus-commit)" && git push . $commit:refs/heads/bogus && rm -rf dst && git init dst && @@ -169,7 +236,6 @@ test_expect_success 'push with receive.fsck.skipList' ' ' test_expect_success 'fetch with fetch.fsck.skipList' ' - commit="$(git hash-object -t commit -w --stdin <bogus-commit)" && refspec=refs/heads/bogus:refs/heads/bogus && git push . $commit:refs/heads/bogus && rm -rf dst && @@ -204,7 +270,6 @@ test_expect_success 'fsck.<unknownmsg-id> dies' ' ' test_expect_success 'push with receive.fsck.missingEmail=warn' ' - commit="$(git hash-object -t commit -w --stdin <bogus-commit)" && git push . $commit:refs/heads/bogus && rm -rf dst && git init dst && @@ -232,7 +297,6 @@ test_expect_success 'push with receive.fsck.missingEmail=warn' ' ' test_expect_success 'fetch with fetch.fsck.missingEmail=warn' ' - commit="$(git hash-object -t commit -w --stdin <bogus-commit)" && refspec=refs/heads/bogus:refs/heads/bogus && git push . $commit:refs/heads/bogus && rm -rf dst && diff --git a/t/t5512-ls-remote.sh b/t/t5512-ls-remote.sh index bc5703ff9b..32e722db2e 100755 --- a/t/t5512-ls-remote.sh +++ b/t/t5512-ls-remote.sh @@ -302,4 +302,28 @@ test_expect_success 'ls-remote works outside repository' ' nongit git ls-remote dst.git ' +test_expect_success 'ls-remote --sort fails gracefully outside repository' ' + # Use a sort key that requires access to the referenced objects. + nongit test_must_fail git ls-remote --sort=authordate "$TRASH_DIRECTORY" 2>err && + test_i18ngrep "^fatal: not a git repository, but the field '\''authordate'\'' requires access to object data" err +' + +test_expect_success 'ls-remote patterns work with all protocol versions' ' + git for-each-ref --format="%(objectname) %(refname)" \ + refs/heads/master refs/remotes/origin/master >expect && + git -c protocol.version=1 ls-remote . master >actual.v1 && + test_cmp expect actual.v1 && + git -c protocol.version=2 ls-remote . master >actual.v2 && + test_cmp expect actual.v2 +' + +test_expect_success 'ls-remote prefixes work with all protocol versions' ' + git for-each-ref --format="%(objectname) %(refname)" \ + refs/heads/ refs/tags/ >expect && + git -c protocol.version=1 ls-remote --heads --tags . >actual.v1 && + test_cmp expect actual.v1 && + git -c protocol.version=2 ls-remote --heads --tags . >actual.v2 && + test_cmp expect actual.v2 +' + test_done diff --git a/t/t5516-fetch-push.sh b/t/t5516-fetch-push.sh index bb0a36535c..7316365a24 100755 --- a/t/t5516-fetch-push.sh +++ b/t/t5516-fetch-push.sh @@ -969,7 +969,7 @@ test_force_push_tag () { tag_type_description=$1 tag_args=$2 - test_expect_success 'force pushing required to update lightweight tag' " + test_expect_success "force pushing required to update $tag_type_description" " mk_test testrepo heads/master && mk_child testrepo child1 && mk_child testrepo child2 && @@ -1009,7 +1009,32 @@ test_force_push_tag () { } test_force_push_tag "lightweight tag" "-f" -test_force_push_tag "annotated tag" "-f -a -m'msg'" +test_force_push_tag "annotated tag" "-f -a -m'tag message'" + +test_force_fetch_tag () { + tag_type_description=$1 + tag_args=$2 + + test_expect_success "fetch will not clobber an existing $tag_type_description without --force" " + mk_test testrepo heads/master && + mk_child testrepo child1 && + mk_child testrepo child2 && + ( + cd testrepo && + git tag testTag && + git -C ../child1 fetch origin tag testTag && + >file1 && + git add file1 && + git commit -m 'file1' && + git tag $tag_args testTag && + test_must_fail git -C ../child1 fetch origin tag testTag && + git -C ../child1 fetch origin '+refs/tags/*:refs/tags/*' + ) + " +} + +test_force_fetch_tag "lightweight tag" "-f" +test_force_fetch_tag "annotated tag" "-f -a -m'tag message'" test_expect_success 'push --porcelain' ' mk_empty testrepo && diff --git a/t/t5520-pull.sh b/t/t5520-pull.sh index 5e501c8b08..cf4cc32fd0 100755 --- a/t/t5520-pull.sh +++ b/t/t5520-pull.sh @@ -461,7 +461,8 @@ test_expect_success 'pull.rebase=1 is treated as true and flattens keep-merge' ' test file3 = "$(git show HEAD:file3.t)" ' -test_expect_success 'pull.rebase=preserve rebases and merges keep-merge' ' +test_expect_success REBASE_P \ + 'pull.rebase=preserve rebases and merges keep-merge' ' git reset --hard before-preserve-rebase && test_config pull.rebase preserve && git pull . copy && @@ -514,7 +515,8 @@ test_expect_success '--rebase=true rebases and flattens keep-merge' ' test file3 = "$(git show HEAD:file3.t)" ' -test_expect_success '--rebase=preserve rebases and merges keep-merge' ' +test_expect_success REBASE_P \ + '--rebase=preserve rebases and merges keep-merge' ' git reset --hard before-preserve-rebase && test_config pull.rebase true && git pull --rebase=preserve . copy && diff --git a/t/t5551-http-fetch-smart.sh b/t/t5551-http-fetch-smart.sh index 3dc8f8ecec..8630b0cc39 100755 --- a/t/t5551-http-fetch-smart.sh +++ b/t/t5551-http-fetch-smart.sh @@ -381,6 +381,21 @@ test_expect_success 'using fetch command in remote-curl updates refs' ' test_cmp expect actual ' +test_expect_success 'fetch by SHA-1 without tag following' ' + SERVER="$HTTPD_DOCUMENT_ROOT_PATH/server" && + rm -rf "$SERVER" client && + + git init "$SERVER" && + test_commit -C "$SERVER" foo && + + git clone $HTTPD_URL/smart/server client && + + test_commit -C "$SERVER" bar && + git -C "$SERVER" rev-parse bar >bar_hash && + git -C client -c protocol.version=0 fetch \ + --no-tags origin $(cat bar_hash) +' + test_expect_success 'GIT_REDACT_COOKIES redacts cookies' ' rm -rf clone && echo "Set-Cookie: Foo=1" >cookies && diff --git a/t/t5562-http-backend-content-length.sh b/t/t5562-http-backend-content-length.sh index f94d01f69e..b24d8b05a4 100755 --- a/t/t5562-http-backend-content-length.sh +++ b/t/t5562-http-backend-content-length.sh @@ -155,8 +155,8 @@ test_expect_success 'CONTENT_LENGTH overflow ssite_t' ' test_expect_success 'empty CONTENT_LENGTH' ' env \ - QUERY_STRING=/repo.git/HEAD \ - PATH_TRANSLATED="$PWD"/.git/HEAD \ + QUERY_STRING="service=git-receive-pack" \ + PATH_TRANSLATED="$PWD"/.git/info/refs \ GIT_HTTP_EXPORT_ALL=TRUE \ REQUEST_METHOD=GET \ CONTENT_LENGTH="" \ diff --git a/t/t5573-pull-verify-signatures.sh b/t/t5573-pull-verify-signatures.sh index 747775c147..3e9876e197 100755 --- a/t/t5573-pull-verify-signatures.sh +++ b/t/t5573-pull-verify-signatures.sh @@ -78,4 +78,11 @@ test_expect_success GPG 'pull commit with bad signature with --no-verify-signatu git pull --ff-only --no-verify-signatures bad 2>pullerror ' +test_expect_success GPG 'pull unsigned commit into unborn branch' ' + git init empty-repo && + test_must_fail \ + git -C empty-repo pull --verify-signatures .. 2>pullerror && + test_i18ngrep "does not have a GPG signature" pullerror +' + test_done diff --git a/t/t5601-clone.sh b/t/t5601-clone.sh index ddaa96ac4f..c28d51bd59 100755 --- a/t/t5601-clone.sh +++ b/t/t5601-clone.sh @@ -624,10 +624,16 @@ test_expect_success 'clone on case-insensitive fs' ' git hash-object -w -t tree --stdin) && c=$(git commit-tree -m bogus $t) && git update-ref refs/heads/bogus $c && - git clone -b bogus . bogus + git clone -b bogus . bogus 2>warning ) ' +test_expect_success !MINGW,CASE_INSENSITIVE_FS 'colliding file detection' ' + grep X icasefs/warning && + grep x icasefs/warning && + test_i18ngrep "the following paths have collided" icasefs/warning +' + partial_clone () { SERVER="$1" && URL="$2" && diff --git a/t/t5607-clone-bundle.sh b/t/t5607-clone-bundle.sh index 348d9b3bc7..cf39e9e243 100755 --- a/t/t5607-clone-bundle.sh +++ b/t/t5607-clone-bundle.sh @@ -71,4 +71,10 @@ test_expect_success 'prerequisites with an empty commit message' ' git bundle verify bundle ' +test_expect_success 'failed bundle creation does not leave cruft' ' + # This fails because the bundle would be empty. + test_must_fail git bundle create fail.bundle master..master && + test_path_is_missing fail.bundle.lock +' + test_done diff --git a/t/t5612-clone-refspec.sh b/t/t5612-clone-refspec.sh index 5582b3d5fd..e36ac01661 100755 --- a/t/t5612-clone-refspec.sh +++ b/t/t5612-clone-refspec.sh @@ -103,7 +103,7 @@ test_expect_success 'clone with --no-tags' ' test_expect_success '--single-branch while HEAD pointing at master' ' ( cd dir_master && - git fetch && + git fetch --force && git for-each-ref refs/remotes/origin | sed -e "/HEAD$/d" \ -e "s|/remotes/origin/|/heads/|" >../actual @@ -114,7 +114,7 @@ test_expect_success '--single-branch while HEAD pointing at master' ' test_cmp expect actual && ( cd dir_master && - git fetch --tags && + git fetch --tags --force && git for-each-ref refs/tags >../actual ) && git for-each-ref refs/tags >expect && diff --git a/t/t5616-partial-clone.sh b/t/t5616-partial-clone.sh index bbbe7537df..336f02a41a 100755 --- a/t/t5616-partial-clone.sh +++ b/t/t5616-partial-clone.sh @@ -34,10 +34,12 @@ test_expect_success 'setup bare clone for server' ' # confirm partial clone was registered in the local config. test_expect_success 'do partial clone 1' ' git clone --no-checkout --filter=blob:none "file://$(pwd)/srv.bare" pc1 && - git -C pc1 rev-list HEAD --quiet --objects --missing=print \ - | awk -f print_1.awk \ - | sed "s/?//" \ - | sort >observed.oids && + + git -C pc1 rev-list --quiet --objects --missing=print HEAD >revs && + awk -f print_1.awk revs | + sed "s/?//" | + sort >observed.oids && + test_cmp expect_1.oids observed.oids && test "$(git -C pc1 config --local core.repositoryformatversion)" = "1" && test "$(git -C pc1 config --local extensions.partialclone)" = "origin" && @@ -46,10 +48,10 @@ test_expect_success 'do partial clone 1' ' # checkout master to force dynamic object fetch of blobs at HEAD. test_expect_success 'verify checkout with dynamic object fetch' ' - git -C pc1 rev-list HEAD --quiet --objects --missing=print >observed && + git -C pc1 rev-list --quiet --objects --missing=print HEAD >observed && test_line_count = 4 observed && git -C pc1 checkout master && - git -C pc1 rev-list HEAD --quiet --objects --missing=print >observed && + git -C pc1 rev-list --quiet --objects --missing=print HEAD >observed && test_line_count = 0 observed ' @@ -72,7 +74,8 @@ test_expect_success 'push new commits to server' ' # have the new blobs. test_expect_success 'partial fetch inherits filter settings' ' git -C pc1 fetch origin && - git -C pc1 rev-list master..origin/master --quiet --objects --missing=print >observed && + git -C pc1 rev-list --quiet --objects --missing=print \ + master..origin/master >observed && test_line_count = 5 observed ' @@ -80,7 +83,8 @@ test_expect_success 'partial fetch inherits filter settings' ' # we should only get 1 new blob (for the file in origin/master). test_expect_success 'verify diff causes dynamic object fetch' ' git -C pc1 diff master..origin/master -- file.1.txt && - git -C pc1 rev-list master..origin/master --quiet --objects --missing=print >observed && + git -C pc1 rev-list --quiet --objects --missing=print \ + master..origin/master >observed && test_line_count = 4 observed ' @@ -89,7 +93,8 @@ test_expect_success 'verify diff causes dynamic object fetch' ' test_expect_success 'verify blame causes dynamic object fetch' ' git -C pc1 blame origin/master -- file.1.txt >observed.blame && test_cmp expect.blame observed.blame && - git -C pc1 rev-list master..origin/master --quiet --objects --missing=print >observed && + git -C pc1 rev-list --quiet --objects --missing=print \ + master..origin/master >observed && test_line_count = 0 observed ' @@ -109,7 +114,8 @@ test_expect_success 'push new commits to server for file.2.txt' ' # Verify we have all the new blobs. test_expect_success 'override inherited filter-spec using --no-filter' ' git -C pc1 fetch --no-filter origin && - git -C pc1 rev-list master..origin/master --quiet --objects --missing=print >observed && + git -C pc1 rev-list --quiet --objects --missing=print \ + master..origin/master >observed && test_line_count = 0 observed ' @@ -130,16 +136,22 @@ test_expect_success 'push new commits to server for file.3.txt' ' # perhaps combined with a command in dry-run mode. test_expect_success 'manual prefetch of missing objects' ' git -C pc1 fetch --filter=blob:none origin && - git -C pc1 rev-list master..origin/master --quiet --objects --missing=print \ - | awk -f print_1.awk \ - | sed "s/?//" \ - | sort >observed.oids && + + git -C pc1 rev-list --quiet --objects --missing=print \ + master..origin/master >revs && + awk -f print_1.awk revs | + sed "s/?//" | + sort >observed.oids && + test_line_count = 6 observed.oids && git -C pc1 fetch-pack --stdin "file://$(pwd)/srv.bare" <observed.oids && - git -C pc1 rev-list master..origin/master --quiet --objects --missing=print \ - | awk -f print_1.awk \ - | sed "s/?//" \ - | sort >observed.oids && + + git -C pc1 rev-list --quiet --objects --missing=print \ + master..origin/master >revs && + awk -f print_1.awk revs | + sed "s/?//" | + sort >observed.oids && + test_line_count = 0 observed.oids ' @@ -154,6 +166,48 @@ test_expect_success 'partial clone with transfer.fsckobjects=1 uses index-pack - grep "git index-pack.*--fsck-objects" trace ' +test_expect_success 'use fsck before and after manually fetching a missing subtree' ' + # push new commit so server has a subtree + mkdir src/dir && + echo "in dir" >src/dir/file.txt && + git -C src add dir/file.txt && + git -C src commit -m "file in dir" && + git -C src push -u srv master && + SUBTREE=$(git -C src rev-parse HEAD:dir) && + + rm -rf dst && + git clone --no-checkout --filter=tree:0 "file://$(pwd)/srv.bare" dst && + git -C dst fsck && + + # Make sure we only have commits, and all trees and blobs are missing. + git -C dst rev-list --missing=allow-any --objects master \ + >fetched_objects && + awk -f print_1.awk fetched_objects | + xargs -n1 git -C dst cat-file -t >fetched_types && + + sort -u fetched_types >unique_types.observed && + echo commit >unique_types.expected && + test_cmp unique_types.expected unique_types.observed && + + # Auto-fetch a tree with cat-file. + git -C dst cat-file -p $SUBTREE >tree_contents && + grep file.txt tree_contents && + + # fsck still works after an auto-fetch of a tree. + git -C dst fsck && + + # Auto-fetch all remaining trees and blobs with --missing=error + git -C dst rev-list --missing=error --objects master >fetched_objects && + test_line_count = 70 fetched_objects && + + awk -f print_1.awk fetched_objects | + xargs -n1 git -C dst cat-file -t >fetched_types && + + sort -u fetched_types >unique_types.observed && + test_write_lines blob commit tree >unique_types.expected && + test_cmp unique_types.expected unique_types.observed +' + test_expect_success 'partial clone fetches blobs pointed to by refs even if normally filtered out' ' rm -rf src dst && git init src && @@ -170,6 +224,23 @@ test_expect_success 'partial clone fetches blobs pointed to by refs even if norm git -C dst fsck ' +test_expect_success 'fetch what is specified on CLI even if already promised' ' + rm -rf src dst.git && + git init src && + test_commit -C src foo && + test_config -C src uploadpack.allowfilter 1 && + test_config -C src uploadpack.allowanysha1inwant 1 && + + git hash-object --stdin <src/foo.t >blob && + + git clone --bare --filter=blob:none "file://$(pwd)/src" dst.git && + git -C dst.git rev-list --objects --quiet --missing=print HEAD >missing_before && + grep "?$(cat blob)" missing_before && + git -C dst.git fetch origin $(cat blob) && + git -C dst.git rev-list --objects --quiet --missing=print HEAD >missing_after && + ! grep "?$(cat blob)" missing_after +' + . "$TEST_DIRECTORY"/lib-httpd.sh start_httpd @@ -194,7 +265,7 @@ test_expect_success 'upon cloning, check that all refs point to objects' ' # Craft a packfile not including that blob. git -C "$SERVER" rev-parse HEAD | - git -C "$SERVER" pack-objects --stdout >incomplete.pack && + git -C "$SERVER" pack-objects --stdout >incomplete.pack && # Replace the existing packfile with the crafted one. The protocol # requires that the packfile be sent in sideband 1, hence the extra diff --git a/t/t5701-git-serve.sh b/t/t5701-git-serve.sh index 75ec79e6cb..ae79c6bbc0 100755 --- a/t/t5701-git-serve.sh +++ b/t/t5701-git-serve.sh @@ -15,13 +15,13 @@ test_expect_success 'test capability advertisement' ' EOF git serve --advertise-capabilities >out && - test-pkt-line unpack <out >actual && - test_cmp actual expect + test-tool pkt-line unpack <out >actual && + test_cmp expect actual ' test_expect_success 'stateless-rpc flag does not list capabilities' ' # Empty request - test-pkt-line pack >in <<-EOF && + test-tool pkt-line pack >in <<-EOF && 0000 EOF git serve --stateless-rpc >out <in && @@ -33,7 +33,7 @@ test_expect_success 'stateless-rpc flag does not list capabilities' ' ' test_expect_success 'request invalid capability' ' - test-pkt-line pack >in <<-EOF && + test-tool pkt-line pack >in <<-EOF && foobar 0000 EOF @@ -42,7 +42,7 @@ test_expect_success 'request invalid capability' ' ' test_expect_success 'request with no command' ' - test-pkt-line pack >in <<-EOF && + test-tool pkt-line pack >in <<-EOF && agent=git/test 0000 EOF @@ -51,7 +51,7 @@ test_expect_success 'request with no command' ' ' test_expect_success 'request invalid command' ' - test-pkt-line pack >in <<-EOF && + test-tool pkt-line pack >in <<-EOF && command=foo agent=git/test 0000 @@ -71,7 +71,7 @@ test_expect_success 'setup some refs and tags' ' ' test_expect_success 'basics of ls-refs' ' - test-pkt-line pack >in <<-EOF && + test-tool pkt-line pack >in <<-EOF && command=ls-refs 0000 EOF @@ -88,12 +88,12 @@ test_expect_success 'basics of ls-refs' ' EOF git serve --stateless-rpc <in >out && - test-pkt-line unpack <out >actual && - test_cmp actual expect + test-tool pkt-line unpack <out >actual && + test_cmp expect actual ' test_expect_success 'basic ref-prefixes' ' - test-pkt-line pack >in <<-EOF && + test-tool pkt-line pack >in <<-EOF && command=ls-refs 0001 ref-prefix refs/heads/master @@ -108,12 +108,12 @@ test_expect_success 'basic ref-prefixes' ' EOF git serve --stateless-rpc <in >out && - test-pkt-line unpack <out >actual && - test_cmp actual expect + test-tool pkt-line unpack <out >actual && + test_cmp expect actual ' test_expect_success 'refs/heads prefix' ' - test-pkt-line pack >in <<-EOF && + test-tool pkt-line pack >in <<-EOF && command=ls-refs 0001 ref-prefix refs/heads/ @@ -128,12 +128,12 @@ test_expect_success 'refs/heads prefix' ' EOF git serve --stateless-rpc <in >out && - test-pkt-line unpack <out >actual && - test_cmp actual expect + test-tool pkt-line unpack <out >actual && + test_cmp expect actual ' test_expect_success 'peel parameter' ' - test-pkt-line pack >in <<-EOF && + test-tool pkt-line pack >in <<-EOF && command=ls-refs 0001 peel @@ -149,12 +149,12 @@ test_expect_success 'peel parameter' ' EOF git serve --stateless-rpc <in >out && - test-pkt-line unpack <out >actual && - test_cmp actual expect + test-tool pkt-line unpack <out >actual && + test_cmp expect actual ' test_expect_success 'symrefs parameter' ' - test-pkt-line pack >in <<-EOF && + test-tool pkt-line pack >in <<-EOF && command=ls-refs 0001 symrefs @@ -170,12 +170,12 @@ test_expect_success 'symrefs parameter' ' EOF git serve --stateless-rpc <in >out && - test-pkt-line unpack <out >actual && - test_cmp actual expect + test-tool pkt-line unpack <out >actual && + test_cmp expect actual ' test_expect_success 'sending server-options' ' - test-pkt-line pack >in <<-EOF && + test-tool pkt-line pack >in <<-EOF && command=ls-refs server-option=hello server-option=world @@ -190,14 +190,14 @@ test_expect_success 'sending server-options' ' EOF git serve --stateless-rpc <in >out && - test-pkt-line unpack <out >actual && - test_cmp actual expect + test-tool pkt-line unpack <out >actual && + test_cmp expect actual ' test_expect_success 'unexpected lines are not allowed in fetch request' ' git init server && - test-pkt-line pack >in <<-EOF && + test-tool pkt-line pack >in <<-EOF && command=fetch 0001 this-is-not-a-command diff --git a/t/t5702-protocol-v2.sh b/t/t5702-protocol-v2.sh index 3beeed4546..0f2b09ebb8 100755 --- a/t/t5702-protocol-v2.sh +++ b/t/t5702-protocol-v2.sh @@ -29,7 +29,7 @@ test_expect_success 'list refs with git:// using protocol v2' ' grep "git< version 2" log && git ls-remote --symref "$GIT_DAEMON_URL/parent" >expect && - test_cmp actual expect + test_cmp expect actual ' test_expect_success 'ref advertisment is filtered with ls-remote using protocol v2' ' @@ -42,7 +42,7 @@ test_expect_success 'ref advertisment is filtered with ls-remote using protocol $(git -C "$daemon_parent" rev-parse refs/heads/master)$(printf "\t")refs/heads/master EOF - test_cmp actual expect + test_cmp expect actual ' test_expect_success 'clone with git:// using protocol v2' ' @@ -79,6 +79,19 @@ test_expect_success 'fetch with git:// using protocol v2' ' grep "fetch< version 2" log ' +test_expect_success 'fetch by hash without tag following with protocol v2 does not list refs' ' + test_when_finished "rm -f log" && + + test_commit -C "$daemon_parent" two_a && + git -C "$daemon_parent" rev-parse two_a >two_a_hash && + + GIT_TRACE_PACKET="$(pwd)/log" git -C daemon_child -c protocol.version=2 \ + fetch --no-tags origin $(cat two_a_hash) && + + grep "fetch< version 2" log && + ! grep "fetch> command=ls-refs" log +' + test_expect_success 'pull with git:// using protocol v2' ' test_when_finished "rm -f log" && @@ -138,7 +151,7 @@ test_expect_success 'list refs with file:// using protocol v2' ' grep "git< version 2" log && git ls-remote --symref "file://$(pwd)/file_parent" >expect && - test_cmp actual expect + test_cmp expect actual ' test_expect_success 'ref advertisment is filtered with ls-remote using protocol v2' ' @@ -151,7 +164,7 @@ test_expect_success 'ref advertisment is filtered with ls-remote using protocol $(git -C file_parent rev-parse refs/heads/master)$(printf "\t")refs/heads/master EOF - test_cmp actual expect + test_cmp expect actual ' test_expect_success 'server-options are sent when using ls-remote' ' @@ -164,7 +177,7 @@ test_expect_success 'server-options are sent when using ls-remote' ' $(git -C file_parent rev-parse refs/heads/master)$(printf "\t")refs/heads/master EOF - test_cmp actual expect && + test_cmp expect actual && grep "server-option=hello" log && grep "server-option=world" log ' @@ -271,7 +284,7 @@ test_expect_success 'partial clone' ' grep "version 2" trace && # Ensure that the old version of the file is missing - git -C client rev-list master --quiet --objects --missing=print \ + git -C client rev-list --quiet --objects --missing=print master \ >observed.oids && grep "$(git -C server rev-parse message1:a.txt)" observed.oids && @@ -286,6 +299,10 @@ test_expect_success 'dynamically fetch missing object' ' grep "version 2" trace ' +test_expect_success 'when dynamically fetching missing object, do not list refs' ' + ! grep "git> command=ls-refs" trace +' + test_expect_success 'partial fetch' ' rm -rf client "$(pwd)/trace" && git init client && @@ -297,7 +314,7 @@ test_expect_success 'partial fetch' ' grep "version 2" trace && # Ensure that the old version of the file is missing - git -C client rev-list other --quiet --objects --missing=print \ + git -C client rev-list --quiet --objects --missing=print other \ >observed.oids && grep "$(git -C server rev-parse message1:a.txt)" observed.oids && @@ -334,7 +351,7 @@ test_expect_success 'even with handcrafted request, filter does not work if not git -C server config uploadpack.allowfilter 0 && # Custom request that tries to filter even though it is not advertised. - test-pkt-line pack >in <<-EOF && + test-tool pkt-line pack >in <<-EOF && command=fetch 0001 want $(git -C server rev-parse master) @@ -429,6 +446,31 @@ test_expect_success 'fetch supports include-tag and tag following' ' git -C client cat-file -e $(git -C client rev-parse annotated_tag) ' +test_expect_success 'upload-pack respects client shallows' ' + rm -rf server client trace && + + git init server && + test_commit -C server base && + test_commit -C server client_has && + + git clone --depth=1 "file://$(pwd)/server" client && + + # Add extra commits to the client so that the whole fetch takes more + # than 1 request (due to negotiation) + for i in $(test_seq 1 32) + do + test_commit -C client c$i + done && + + git -C server checkout -b newbranch base && + test_commit -C server client_wants && + + GIT_TRACE_PACKET="$(pwd)/trace" git -C client -c protocol.version=2 \ + fetch origin newbranch && + # Ensure that protocol v2 is used + grep "fetch< version 2" trace +' + # Test protocol v2 with 'http://' transport # . "$TEST_DIRECTORY"/lib-httpd.sh @@ -495,6 +537,56 @@ test_expect_success 'push with http:// and a config of v2 does not request v2' ' ! grep "git< version 2" log ' +test_expect_success 'when server sends "ready", expect DELIM' ' + rm -rf "$HTTPD_DOCUMENT_ROOT_PATH/http_parent" http_child && + + git init "$HTTPD_DOCUMENT_ROOT_PATH/http_parent" && + test_commit -C "$HTTPD_DOCUMENT_ROOT_PATH/http_parent" one && + + git clone "$HTTPD_URL/smart/http_parent" http_child && + + test_commit -C "$HTTPD_DOCUMENT_ROOT_PATH/http_parent" two && + + # After "ready" in the acknowledgments section, pretend that a FLUSH + # (0000) was sent instead of a DELIM (0001). + printf "/ready/,$ s/0001/0000/" \ + >"$HTTPD_ROOT_PATH/one-time-sed" && + + test_must_fail git -C http_child -c protocol.version=2 \ + fetch "$HTTPD_URL/one_time_sed/http_parent" 2> err && + test_i18ngrep "expected packfile to be sent after .ready." err +' + +test_expect_success 'when server does not send "ready", expect FLUSH' ' + rm -rf "$HTTPD_DOCUMENT_ROOT_PATH/http_parent" http_child log && + + git init "$HTTPD_DOCUMENT_ROOT_PATH/http_parent" && + test_commit -C "$HTTPD_DOCUMENT_ROOT_PATH/http_parent" one && + + git clone "$HTTPD_URL/smart/http_parent" http_child && + + test_commit -C "$HTTPD_DOCUMENT_ROOT_PATH/http_parent" two && + + # Create many commits to extend the negotiation phase across multiple + # requests, so that the server does not send "ready" in the first + # request. + for i in $(test_seq 1 32) + do + test_commit -C http_child c$i + done && + + # After the acknowledgments section, pretend that a DELIM + # (0001) was sent instead of a FLUSH (0000). + printf "/acknowledgments/,$ s/0000/0001/" \ + >"$HTTPD_ROOT_PATH/one-time-sed" && + + test_must_fail env GIT_TRACE_PACKET="$(pwd)/log" git -C http_child \ + -c protocol.version=2 \ + fetch "$HTTPD_URL/one_time_sed/http_parent" 2> err && + grep "fetch< acknowledgments" log && + ! grep "fetch< ready" log && + test_i18ngrep "expected no other sections to be sent after no .ready." err +' stop_httpd diff --git a/t/t5703-upload-pack-ref-in-want.sh b/t/t5703-upload-pack-ref-in-want.sh index d1ccc22331..3f58f05cbb 100755 --- a/t/t5703-upload-pack-ref-in-want.sh +++ b/t/t5703-upload-pack-ref-in-want.sh @@ -9,14 +9,14 @@ get_actual_refs () { /wanted-refs/d /0001/d p - }' <out | test-pkt-line unpack >actual_refs + }' <out | test-tool pkt-line unpack >actual_refs } get_actual_commits () { sed -n -e '/packfile/,/0000/{ /packfile/d p - }' <out | test-pkt-line unpack-sideband >o.pack && + }' <out | test-tool pkt-line unpack-sideband >o.pack && git index-pack o.pack && git verify-pack -v o.idx | grep commit | cut -c-40 | sort >actual_commits } @@ -61,7 +61,7 @@ test_expect_success 'config controls ref-in-want advertisement' ' ' test_expect_success 'invalid want-ref line' ' - test-pkt-line pack >in <<-EOF && + test-tool pkt-line pack >in <<-EOF && command=fetch 0001 no-progress @@ -80,7 +80,7 @@ test_expect_success 'basic want-ref' ' EOF git rev-parse f | sort >expected_commits && - test-pkt-line pack >in <<-EOF && + test-tool pkt-line pack >in <<-EOF && command=fetch 0001 no-progress @@ -101,7 +101,7 @@ test_expect_success 'multiple want-ref lines' ' EOF git rev-parse c d | sort >expected_commits && - test-pkt-line pack >in <<-EOF && + test-tool pkt-line pack >in <<-EOF && command=fetch 0001 no-progress @@ -122,7 +122,7 @@ test_expect_success 'mix want and want-ref' ' EOF git rev-parse e f | sort >expected_commits && - test-pkt-line pack >in <<-EOF && + test-tool pkt-line pack >in <<-EOF && command=fetch 0001 no-progress @@ -143,7 +143,7 @@ test_expect_success 'want-ref with ref we already have commit for' ' EOF >expected_commits && - test-pkt-line pack >in <<-EOF && + test-tool pkt-line pack >in <<-EOF && command=fetch 0001 no-progress diff --git a/t/t6000-rev-list-misc.sh b/t/t6000-rev-list-misc.sh index fb4d295aa0..0507999729 100755 --- a/t/t6000-rev-list-misc.sh +++ b/t/t6000-rev-list-misc.sh @@ -90,11 +90,18 @@ test_expect_success 'rev-list can show index objects' ' 9200b628cf9dc883a85a7abc8d6e6730baee589c two EOF echo only-in-index >only-in-index && + test_when_finished "git reset --hard" && git add only-in-index && git rev-list --objects --indexed-objects >actual && test_cmp expect actual ' +test_expect_success 'rev-list can negate index objects' ' + git rev-parse HEAD >expect && + git rev-list -1 --objects HEAD --not --indexed-objects >actual && + test_cmp expect actual +' + test_expect_success '--bisect and --first-parent can not be combined' ' test_must_fail git rev-list --bisect --first-parent HEAD ' diff --git a/t/t6011-rev-list-with-bad-commit.sh b/t/t6011-rev-list-with-bad-commit.sh index e51eb41f4b..545b461e51 100755 --- a/t/t6011-rev-list-with-bad-commit.sh +++ b/t/t6011-rev-list-with-bad-commit.sh @@ -41,10 +41,9 @@ test_expect_success 'corrupt second commit object' \ test_must_fail git fsck --full ' -test_expect_success 'rev-list should fail' \ - ' - test_must_fail git rev-list --all > /dev/null - ' +test_expect_success 'rev-list should fail' ' + test_must_fail env GIT_TEST_COMMIT_GRAPH=0 git rev-list --all > /dev/null +' test_expect_success 'git repack _MUST_ fail' \ ' diff --git a/t/t6012-rev-list-simplify.sh b/t/t6012-rev-list-simplify.sh index b5a1190ffe..a10f0df02b 100755 --- a/t/t6012-rev-list-simplify.sh +++ b/t/t6012-rev-list-simplify.sh @@ -12,6 +12,22 @@ unnote () { git name-rev --tags --stdin | sed -e "s|$OID_REGEX (tags/\([^)]*\)) |\1 |g" } +# +# Create a test repo with interesting commit graph: +# +# A--B----------G--H--I--K--L +# \ \ / / +# \ \ / / +# C------E---F J +# \_/ +# +# The commits are laid out from left-to-right starting with +# the root commit A and terminating at the tip commit L. +# +# There are a few places where we adjust the commit date or +# author date to make the --topo-order, --date-order, and +# --author-date-order flags produce different output. + test_expect_success setup ' echo "Hi there" >file && echo "initial" >lost && @@ -21,10 +37,18 @@ test_expect_success setup ' git branch other-branch && + git symbolic-ref HEAD refs/heads/unrelated && + git rm -f "*" && + echo "Unrelated branch" >side && + git add side && + test_tick && git commit -m "Side root" && + note J && + git checkout master && + echo "Hello" >file && echo "second" >lost && git add file lost && - test_tick && git commit -m "Modified file and lost" && + test_tick && GIT_AUTHOR_DATE=$(($test_tick + 120)) git commit -m "Modified file and lost" && note B && git checkout other-branch && @@ -63,13 +87,6 @@ test_expect_success setup ' test_tick && git commit -a -m "Final change" && note I && - git symbolic-ref HEAD refs/heads/unrelated && - git rm -f "*" && - echo "Unrelated branch" >side && - git add side && - test_tick && git commit -m "Side root" && - note J && - git checkout master && test_tick && git merge --allow-unrelated-histories -m "Coolest" unrelated && note K && @@ -103,14 +120,24 @@ check_result () { check_outcome success "$@" } -check_result 'L K J I H G F E D C B A' --full-history +check_result 'L K J I H F E D C G B A' --full-history --topo-order +check_result 'L K I H G F E D C B J A' --full-history +check_result 'L K I H G F E D C B J A' --full-history --date-order +check_result 'L K I H G F E D B C J A' --full-history --author-date-order check_result 'K I H E C B A' --full-history -- file check_result 'K I H E C B A' --full-history --topo-order -- file check_result 'K I H E C B A' --full-history --date-order -- file +check_result 'K I H E B C A' --full-history --author-date-order -- file check_result 'I E C B A' --simplify-merges -- file +check_result 'I E C B A' --simplify-merges --topo-order -- file +check_result 'I E C B A' --simplify-merges --date-order -- file +check_result 'I E B C A' --simplify-merges --author-date-order -- file check_result 'I B A' -- file check_result 'I B A' --topo-order -- file +check_result 'I B A' --date-order -- file +check_result 'I B A' --author-date-order -- file check_result 'H' --first-parent -- another-file +check_result 'H' --first-parent --topo-order -- another-file check_result 'E C B A' --full-history E -- lost test_expect_success 'full history simplification without parent' ' diff --git a/t/t6018-rev-list-glob.sh b/t/t6018-rev-list-glob.sh index 0bf10d0686..bb5aeac07f 100755 --- a/t/t6018-rev-list-glob.sh +++ b/t/t6018-rev-list-glob.sh @@ -36,7 +36,13 @@ test_expect_success 'setup' ' git tag foo/bar master && commit master3 && git update-ref refs/remotes/foo/baz master && - commit master4 + commit master4 && + git update-ref refs/remotes/upstream/one subspace/one && + git update-ref refs/remotes/upstream/two subspace/two && + git update-ref refs/remotes/upstream/x subspace-x && + git tag qux/one subspace/one && + git tag qux/two subspace/two && + git tag qux/x subspace-x ' test_expect_success 'rev-parse --glob=refs/heads/subspace/*' ' @@ -141,6 +147,66 @@ test_expect_success 'rev-parse accumulates multiple --exclude' ' compare rev-parse "--exclude=refs/remotes/* --exclude=refs/tags/* --all" --branches ' +test_expect_success 'rev-parse --branches clears --exclude' ' + compare rev-parse "--exclude=* --branches --branches" "--branches" +' + +test_expect_success 'rev-parse --tags clears --exclude' ' + compare rev-parse "--exclude=* --tags --tags" "--tags" +' + +test_expect_success 'rev-parse --all clears --exclude' ' + compare rev-parse "--exclude=* --all --all" "--all" +' + +test_expect_success 'rev-parse --exclude=glob with --branches=glob' ' + compare rev-parse "--exclude=subspace-* --branches=sub*" "subspace/one subspace/two" +' + +test_expect_success 'rev-parse --exclude=glob with --tags=glob' ' + compare rev-parse "--exclude=qux/? --tags=qux/*" "qux/one qux/two" +' + +test_expect_success 'rev-parse --exclude=glob with --remotes=glob' ' + compare rev-parse "--exclude=upstream/? --remotes=upstream/*" "upstream/one upstream/two" +' + +test_expect_success 'rev-parse --exclude=ref with --branches=glob' ' + compare rev-parse "--exclude=subspace-x --branches=sub*" "subspace/one subspace/two" +' + +test_expect_success 'rev-parse --exclude=ref with --tags=glob' ' + compare rev-parse "--exclude=qux/x --tags=qux/*" "qux/one qux/two" +' + +test_expect_success 'rev-parse --exclude=ref with --remotes=glob' ' + compare rev-parse "--exclude=upstream/x --remotes=upstream/*" "upstream/one upstream/two" +' + +test_expect_success 'rev-list --exclude=glob with --branches=glob' ' + compare rev-list "--exclude=subspace-* --branches=sub*" "subspace/one subspace/two" +' + +test_expect_success 'rev-list --exclude=glob with --tags=glob' ' + compare rev-list "--exclude=qux/? --tags=qux/*" "qux/one qux/two" +' + +test_expect_success 'rev-list --exclude=glob with --remotes=glob' ' + compare rev-list "--exclude=upstream/? --remotes=upstream/*" "upstream/one upstream/two" +' + +test_expect_success 'rev-list --exclude=ref with --branches=glob' ' + compare rev-list "--exclude=subspace-x --branches=sub*" "subspace/one subspace/two" +' + +test_expect_success 'rev-list --exclude=ref with --tags=glob' ' + compare rev-list "--exclude=qux/x --tags=qux/*" "qux/one qux/two" +' + +test_expect_success 'rev-list --exclude=ref with --remotes=glob' ' + compare rev-list "--exclude=upstream/x --remotes=upstream/*" "upstream/one upstream/two" +' + test_expect_success 'rev-list --glob=refs/heads/subspace/*' ' compare rev-list "subspace/one subspace/two" "--glob=refs/heads/subspace/*" @@ -233,7 +299,7 @@ test_expect_success 'rev-list --tags=foo' ' test_expect_success 'rev-list --tags' ' - compare rev-list "foo/bar" "--tags" + compare rev-list "foo/bar qux/x qux/two qux/one" "--tags" ' @@ -255,7 +321,7 @@ test_expect_success 'rev-list accumulates multiple --exclude' ' compare rev-list "--exclude=refs/remotes/* --exclude=refs/tags/* --all" --branches ' -test_expect_failure 'rev-list should succeed with empty output on empty stdin' ' +test_expect_success 'rev-list should succeed with empty output on empty stdin' ' git rev-list --stdin </dev/null >actual && test_must_be_empty actual ' @@ -292,7 +358,7 @@ test_expect_success 'shortlog accepts --glob/--tags/--remotes' ' "master other/three someref subspace-x subspace/one subspace/two" \ "--glob=heads/*" && compare shortlog foo/bar --tags=foo && - compare shortlog foo/bar --tags && + compare shortlog "foo/bar qux/one qux/two qux/x" --tags && compare shortlog foo/baz --remotes=foo ' diff --git a/t/t6023-merge-file.sh b/t/t6023-merge-file.sh index 20aee43f95..51ee887a77 100755 --- a/t/t6023-merge-file.sh +++ b/t/t6023-merge-file.sh @@ -99,7 +99,7 @@ EOF printf "propter nomen suum." >> expect.txt test_expect_success "merge does not add LF away of change" \ - "test_cmp test3.txt expect.txt" + "test_cmp expect.txt test3.txt" cp test.txt backup.txt test_expect_success "merge with conflicts" \ @@ -122,7 +122,7 @@ non timebo mala, quoniam tu mecum es: virga tua et baculus tuus ipsa me consolata sunt. EOF -test_expect_success "expected conflict markers" "test_cmp test.txt expect.txt" +test_expect_success "expected conflict markers" "test_cmp expect.txt test.txt" cp backup.txt test.txt @@ -138,7 +138,7 @@ non timebo mala, quoniam tu mecum es: virga tua et baculus tuus ipsa me consolata sunt. EOF test_expect_success "merge conflicting with --ours" \ - "git merge-file --ours test.txt orig.txt new3.txt && test_cmp test.txt expect.txt" + "git merge-file --ours test.txt orig.txt new3.txt && test_cmp expect.txt test.txt" cp backup.txt test.txt cat > expect.txt << EOF @@ -154,7 +154,7 @@ non timebo mala, quoniam tu mecum es: virga tua et baculus tuus ipsa me consolata sunt. EOF test_expect_success "merge conflicting with --theirs" \ - "git merge-file --theirs test.txt orig.txt new3.txt && test_cmp test.txt expect.txt" + "git merge-file --theirs test.txt orig.txt new3.txt && test_cmp expect.txt test.txt" cp backup.txt test.txt cat > expect.txt << EOF @@ -171,7 +171,7 @@ non timebo mala, quoniam tu mecum es: virga tua et baculus tuus ipsa me consolata sunt. EOF test_expect_success "merge conflicting with --union" \ - "git merge-file --union test.txt orig.txt new3.txt && test_cmp test.txt expect.txt" + "git merge-file --union test.txt orig.txt new3.txt && test_cmp expect.txt test.txt" cp backup.txt test.txt test_expect_success "merge with conflicts, using -L" \ @@ -195,7 +195,7 @@ virga tua et baculus tuus ipsa me consolata sunt. EOF test_expect_success "expected conflict markers, with -L" \ - "test_cmp test.txt expect.txt" + "test_cmp expect.txt test.txt" sed "s/ tu / TU /" < new1.txt > new5.txt test_expect_success "conflict in removed tail" \ diff --git a/t/t6024-recursive-merge.sh b/t/t6024-recursive-merge.sh index 3f59e58dfb..27c7de90ce 100755 --- a/t/t6024-recursive-merge.sh +++ b/t/t6024-recursive-merge.sh @@ -60,9 +60,9 @@ git update-index a1 && GIT_AUTHOR_DATE="2006-12-12 23:00:08" git commit -m F ' -test_expect_success "combined merge conflicts" " - test_must_fail git merge -m final G -" +test_expect_success 'combined merge conflicts' ' + test_must_fail env GIT_TEST_COMMIT_GRAPH=0 git merge -m final G +' cat > expect << EOF <<<<<<< HEAD diff --git a/t/t6027-merge-binary.sh b/t/t6027-merge-binary.sh index 07735410b9..4e6c7cb77e 100755 --- a/t/t6027-merge-binary.sh +++ b/t/t6027-merge-binary.sh @@ -45,7 +45,7 @@ test_expect_success resolve ' false else git ls-files -s >current - test_cmp current expect + test_cmp expect current fi ' @@ -60,7 +60,7 @@ test_expect_success recursive ' false else git ls-files -s >current - test_cmp current expect + test_cmp expect current fi ' diff --git a/t/t6031-merge-filemode.sh b/t/t6031-merge-filemode.sh index 7d06461f13..87741efad3 100755 --- a/t/t6031-merge-filemode.sh +++ b/t/t6031-merge-filemode.sh @@ -61,7 +61,7 @@ do_both_modes () { git checkout -f a2 && test_must_fail git merge -s $strategy b2 && git ls-files -u >actual && - test_cmp actual expect && + test_cmp expect actual && git ls-files -s file2 | grep ^100755 ' diff --git a/t/t6036-recursive-corner-cases.sh b/t/t6036-recursive-corner-cases.sh index 59e52c5a09..e1cef58f2a 100755 --- a/t/t6036-recursive-corner-cases.sh +++ b/t/t6036-recursive-corner-cases.sh @@ -230,13 +230,13 @@ test_expect_success 'git detects differently handled merges conflict' ' :2:new_a :3:new_a && test_cmp expect actual && - git cat-file -p B:new_a >ours && - git cat-file -p C:new_a >theirs && + git cat-file -p C:new_a >ours && + git cat-file -p B:new_a >theirs && >empty && test_must_fail git merge-file \ - -L "Temporary merge branch 2" \ - -L "" \ -L "Temporary merge branch 1" \ + -L "" \ + -L "Temporary merge branch 2" \ ours empty theirs && sed -e "s/^\([<=>]\)/\1\1\1/" ours >expect && git cat-file -p :1:new_a >actual && diff --git a/t/t6112-rev-list-filters-objects.sh b/t/t6112-rev-list-filters-objects.sh index d4ff0b3bef..eb32505a6e 100755 --- a/t/t6112-rev-list-filters-objects.sh +++ b/t/t6112-rev-list-filters-objects.sh @@ -21,24 +21,43 @@ test_expect_success 'setup r1' ' test_expect_success 'verify blob:none omits all 5 blobs' ' git -C r1 ls-files -s file.1 file.2 file.3 file.4 file.5 \ - | awk -f print_2.awk \ - | sort >expected && - git -C r1 rev-list HEAD --quiet --objects --filter-print-omitted --filter=blob:none \ - | awk -f print_1.awk \ - | sed "s/~//" \ - | sort >observed && - test_cmp observed expected + >ls_files_result && + awk -f print_2.awk ls_files_result | + sort >expected && + + git -C r1 rev-list --quiet --objects --filter-print-omitted \ + --filter=blob:none HEAD >revs && + awk -f print_1.awk revs | + sed "s/~//" | + sort >observed && + + test_cmp expected observed +' + +test_expect_success 'specify blob explicitly prevents filtering' ' + file_3=$(git -C r1 ls-files -s file.3 | + awk -f print_2.awk) && + + file_4=$(git -C r1 ls-files -s file.4 | + awk -f print_2.awk) && + + git -C r1 rev-list --objects --filter=blob:none HEAD $file_3 >observed && + grep "$file_3" observed && + ! grep "$file_4" observed ' test_expect_success 'verify emitted+omitted == all' ' - git -C r1 rev-list HEAD --objects \ - | awk -f print_1.awk \ - | sort >expected && - git -C r1 rev-list HEAD --objects --filter-print-omitted --filter=blob:none \ - | awk -f print_1.awk \ - | sed "s/~//" \ - | sort >observed && - test_cmp observed expected + git -C r1 rev-list --objects HEAD >revs && + awk -f print_1.awk revs | + sort >expected && + + git -C r1 rev-list --objects --filter-print-omitted --filter=blob:none \ + HEAD >revs && + awk -f print_1.awk revs | + sed "s/~//" | + sort >observed && + + test_cmp expected observed ' @@ -58,65 +77,82 @@ test_expect_success 'setup r2' ' ' test_expect_success 'verify blob:limit=500 omits all blobs' ' - git -C r2 ls-files -s large.1000 large.10000 \ - | awk -f print_2.awk \ - | sort >expected && - git -C r2 rev-list HEAD --quiet --objects --filter-print-omitted --filter=blob:limit=500 \ - | awk -f print_1.awk \ - | sed "s/~//" \ - | sort >observed && - test_cmp observed expected + git -C r2 ls-files -s large.1000 large.10000 >ls_files_result && + awk -f print_2.awk ls_files_result | + sort >expected && + + git -C r2 rev-list --quiet --objects --filter-print-omitted \ + --filter=blob:limit=500 HEAD >revs && + awk -f print_1.awk revs | + sed "s/~//" | + sort >observed && + + test_cmp expected observed ' test_expect_success 'verify emitted+omitted == all' ' - git -C r2 rev-list HEAD --objects \ - | awk -f print_1.awk \ - | sort >expected && - git -C r2 rev-list HEAD --objects --filter-print-omitted --filter=blob:limit=500 \ - | awk -f print_1.awk \ - | sed "s/~//" \ - | sort >observed && - test_cmp observed expected + git -C r2 rev-list --objects HEAD >revs && + awk -f print_1.awk revs | + sort >expected && + + git -C r2 rev-list --objects --filter-print-omitted \ + --filter=blob:limit=500 HEAD >revs && + awk -f print_1.awk revs | + sed "s/~//" | + sort >observed && + + test_cmp expected observed ' test_expect_success 'verify blob:limit=1000' ' - git -C r2 ls-files -s large.1000 large.10000 \ - | awk -f print_2.awk \ - | sort >expected && - git -C r2 rev-list HEAD --quiet --objects --filter-print-omitted --filter=blob:limit=1000 \ - | awk -f print_1.awk \ - | sed "s/~//" \ - | sort >observed && - test_cmp observed expected + git -C r2 ls-files -s large.1000 large.10000 >ls_files_result && + awk -f print_2.awk ls_files_result | + sort >expected && + + git -C r2 rev-list --quiet --objects --filter-print-omitted \ + --filter=blob:limit=1000 HEAD >revs && + awk -f print_1.awk revs | + sed "s/~//" | + sort >observed && + + test_cmp expected observed ' test_expect_success 'verify blob:limit=1001' ' - git -C r2 ls-files -s large.10000 \ - | awk -f print_2.awk \ - | sort >expected && - git -C r2 rev-list HEAD --quiet --objects --filter-print-omitted --filter=blob:limit=1001 \ - | awk -f print_1.awk \ - | sed "s/~//" \ - | sort >observed && - test_cmp observed expected + git -C r2 ls-files -s large.10000 >ls_files_result && + awk -f print_2.awk ls_files_result | + sort >expected && + + git -C r2 rev-list --quiet --objects --filter-print-omitted \ + --filter=blob:limit=1001 HEAD >revs && + awk -f print_1.awk revs | + sed "s/~//" | + sort >observed && + + test_cmp expected observed ' test_expect_success 'verify blob:limit=1k' ' - git -C r2 ls-files -s large.10000 \ - | awk -f print_2.awk \ - | sort >expected && - git -C r2 rev-list HEAD --quiet --objects --filter-print-omitted --filter=blob:limit=1k \ - | awk -f print_1.awk \ - | sed "s/~//" \ - | sort >observed && - test_cmp observed expected + git -C r2 ls-files -s large.10000 >ls_files_result && + awk -f print_2.awk ls_files_result | + sort >expected && + + git -C r2 rev-list --quiet --objects --filter-print-omitted \ + --filter=blob:limit=1k HEAD >revs && + awk -f print_1.awk revs | + sed "s/~//" | + sort >observed && + + test_cmp expected observed ' test_expect_success 'verify blob:limit=1m' ' - git -C r2 rev-list HEAD --quiet --objects --filter-print-omitted --filter=blob:limit=1m \ - | awk -f print_1.awk \ - | sed "s/~//" \ - | sort >observed && + git -C r2 rev-list --quiet --objects --filter-print-omitted \ + --filter=blob:limit=1m HEAD >revs && + awk -f print_1.awk revs | + sed "s/~//" | + sort >observed && + test_must_be_empty observed ' @@ -141,25 +177,31 @@ test_expect_success 'setup r3' ' ' test_expect_success 'verify sparse:path=pattern1 omits top-level files' ' - git -C r3 ls-files -s sparse1 sparse2 \ - | awk -f print_2.awk \ - | sort >expected && - git -C r3 rev-list HEAD --quiet --objects --filter-print-omitted --filter=sparse:path=../pattern1 \ - | awk -f print_1.awk \ - | sed "s/~//" \ - | sort >observed && - test_cmp observed expected + git -C r3 ls-files -s sparse1 sparse2 >ls_files_result && + awk -f print_2.awk ls_files_result | + sort >expected && + + git -C r3 rev-list --quiet --objects --filter-print-omitted \ + --filter=sparse:path=../pattern1 HEAD >revs && + awk -f print_1.awk revs | + sed "s/~//" | + sort >observed && + + test_cmp expected observed ' test_expect_success 'verify sparse:path=pattern2 omits both sparse2 files' ' - git -C r3 ls-files -s sparse2 dir1/sparse2 \ - | awk -f print_2.awk \ - | sort >expected && - git -C r3 rev-list HEAD --quiet --objects --filter-print-omitted --filter=sparse:path=../pattern2 \ - | awk -f print_1.awk \ - | sed "s/~//" \ - | sort >observed && - test_cmp observed expected + git -C r3 ls-files -s sparse2 dir1/sparse2 >ls_files_result && + awk -f print_2.awk ls_files_result | + sort >expected && + + git -C r3 rev-list --quiet --objects --filter-print-omitted \ + --filter=sparse:path=../pattern2 HEAD >revs && + awk -f print_1.awk revs | + sed "s/~//" | + sort >observed && + + test_cmp expected observed ' # Test sparse:oid=<oid-ish> filter. @@ -173,26 +215,83 @@ test_expect_success 'setup r3 part 2' ' ' test_expect_success 'verify sparse:oid=OID omits top-level files' ' - git -C r3 ls-files -s pattern sparse1 sparse2 \ - | awk -f print_2.awk \ - | sort >expected && + git -C r3 ls-files -s pattern sparse1 sparse2 >ls_files_result && + awk -f print_2.awk ls_files_result | + sort >expected && + oid=$(git -C r3 ls-files -s pattern | awk -f print_2.awk) && - git -C r3 rev-list HEAD --quiet --objects --filter-print-omitted --filter=sparse:oid=$oid \ - | awk -f print_1.awk \ - | sed "s/~//" \ - | sort >observed && - test_cmp observed expected + + git -C r3 rev-list --quiet --objects --filter-print-omitted \ + --filter=sparse:oid=$oid HEAD >revs && + awk -f print_1.awk revs | + sed "s/~//" | + sort >observed && + + test_cmp expected observed ' test_expect_success 'verify sparse:oid=oid-ish omits top-level files' ' - git -C r3 ls-files -s pattern sparse1 sparse2 \ - | awk -f print_2.awk \ - | sort >expected && - git -C r3 rev-list HEAD --quiet --objects --filter-print-omitted --filter=sparse:oid=master:pattern \ - | awk -f print_1.awk \ - | sed "s/~//" \ - | sort >observed && - test_cmp observed expected + git -C r3 ls-files -s pattern sparse1 sparse2 >ls_files_result && + awk -f print_2.awk ls_files_result | + sort >expected && + + git -C r3 rev-list --quiet --objects --filter-print-omitted \ + --filter=sparse:oid=master:pattern HEAD >revs && + awk -f print_1.awk revs | + sed "s/~//" | + sort >observed && + + test_cmp expected observed +' + +test_expect_success 'rev-list W/ --missing=print and --missing=allow-any for trees' ' + TREE=$(git -C r3 rev-parse HEAD:dir1) && + + # Create a spare repo because we will be deleting objects from this one. + git clone r3 r3.b && + + rm r3.b/.git/objects/$(echo $TREE | sed "s|^..|&/|") && + + git -C r3.b rev-list --quiet --missing=print --objects HEAD \ + >missing_objs 2>rev_list_err && + echo "?$TREE" >expected && + test_cmp expected missing_objs && + + # do not complain when a missing tree cannot be parsed + test_must_be_empty rev_list_err && + + git -C r3.b rev-list --missing=allow-any --objects HEAD \ + >objs 2>rev_list_err && + ! grep $TREE objs && + test_must_be_empty rev_list_err +' + +# Test tree:0 filter. + +test_expect_success 'verify tree:0 includes trees in "filtered" output' ' + git -C r3 rev-list --quiet --objects --filter-print-omitted \ + --filter=tree:0 HEAD >revs && + + awk -f print_1.awk revs | + sed s/~// | + xargs -n1 git -C r3 cat-file -t >unsorted_filtered_types && + + sort -u unsorted_filtered_types >filtered_types && + test_write_lines blob tree >expected && + test_cmp expected filtered_types +' + +# Make sure tree:0 does not iterate through any trees. + +test_expect_success 'filter a GIANT tree through tree:0' ' + GIT_TRACE=1 git -C r3 rev-list \ + --objects --filter=tree:0 HEAD 2>filter_trace && + grep "Skipping contents of tree [.][.][.]" filter_trace >actual && + # One line for each commit traversed. + test_line_count = 2 actual && + + # Make sure no other trees were considered besides the root. + ! grep "Skipping contents of tree [^.]" filter_trace ' # Delete some loose objects and use rev-list, but WITHOUT any filtering. @@ -200,17 +299,21 @@ test_expect_success 'verify sparse:oid=oid-ish omits top-level files' ' test_expect_success 'rev-list W/ --missing=print' ' git -C r1 ls-files -s file.1 file.2 file.3 file.4 file.5 \ - | awk -f print_2.awk \ - | sort >expected && + >ls_files_result && + awk -f print_2.awk ls_files_result | + sort >expected && + for id in `cat expected | sed "s|..|&/|"` do rm r1/.git/objects/$id done && - git -C r1 rev-list --quiet HEAD --missing=print --objects \ - | awk -f print_1.awk \ - | sed "s/?//" \ - | sort >observed && - test_cmp observed expected + + git -C r1 rev-list --quiet --missing=print --objects HEAD >revs && + awk -f print_1.awk revs | + sed "s/?//" | + sort >observed && + + test_cmp expected observed ' test_expect_success 'rev-list W/O --missing fails' ' diff --git a/t/t6132-pathspec-exclude.sh b/t/t6132-pathspec-exclude.sh index eb829fce97..2462b19ddd 100755 --- a/t/t6132-pathspec-exclude.sh +++ b/t/t6132-pathspec-exclude.sh @@ -194,4 +194,21 @@ test_expect_success 'multiple exclusions' ' test_cmp expect actual ' +test_expect_success 't_e_i() exclude case #8' ' + git init case8 && + ( + cd case8 && + echo file >file1 && + echo file >file2 && + git add file1 file2 && + git commit -m twofiles && + git grep -l file HEAD :^file2 >actual && + echo HEAD:file1 >expected && + test_cmp expected actual && + git grep -l file HEAD :^file1 >actual && + echo HEAD:file2 >expected && + test_cmp expected actual + ) +' + test_done diff --git a/t/t6500-gc.sh b/t/t6500-gc.sh index 818435f04e..4684d06552 100755 --- a/t/t6500-gc.sh +++ b/t/t6500-gc.sh @@ -4,6 +4,7 @@ test_description='basic git gc tests ' . ./test-lib.sh +. "$TEST_DIRECTORY"/lib-terminal.sh test_expect_success 'setup' ' # do not let the amount of physical memory affects gc @@ -99,6 +100,26 @@ test_expect_success 'auto gc with too many loose objects does not attempt to cre test_line_count = 2 new # There is one new pack and its .idx ' +test_expect_success 'gc --no-quiet' ' + git -c gc.writeCommitGraph=true gc --no-quiet >stdout 2>stderr && + test_must_be_empty stdout && + test_line_count = 1 stderr && + test_i18ngrep "Computing commit graph generation numbers" stderr +' + +test_expect_success TTY 'with TTY: gc --no-quiet' ' + test_terminal git -c gc.writeCommitGraph=true gc --no-quiet >stdout 2>stderr && + test_must_be_empty stdout && + test_i18ngrep "Enumerating objects" stderr && + test_i18ngrep "Computing commit graph generation numbers" stderr +' + +test_expect_success 'gc --quiet' ' + git -c gc.writeCommitGraph=true gc --quiet >stdout 2>stderr && + test_must_be_empty stdout && + test_must_be_empty stderr +' + run_and_wait_for_auto_gc () { # We read stdout from gc for the side effect of waiting until the # background gc process exits, closing its fd 9. Furthermore, the @@ -116,11 +137,11 @@ test_expect_success 'background auto gc does not run if gc.log is present and re test_config gc.autopacklimit 1 && test_config gc.autodetach true && echo fleem >.git/gc.log && - test_must_fail git gc --auto 2>err && - test_i18ngrep "^error:" err && + git gc --auto 2>err && + test_i18ngrep "^warning:" err && test_config gc.logexpiry 5.days && test-tool chmtime =-345600 .git/gc.log && - test_must_fail git gc --auto && + git gc --auto && test_config gc.logexpiry 2.days && run_and_wait_for_auto_gc && ls .git/objects/pack/pack-*.pack >packs && diff --git a/t/t6600-test-reach.sh b/t/t6600-test-reach.sh new file mode 100755 index 0000000000..b24d850036 --- /dev/null +++ b/t/t6600-test-reach.sh @@ -0,0 +1,408 @@ +#!/bin/sh + +test_description='basic commit reachability tests' + +. ./test-lib.sh + +# Construct a grid-like commit graph with points (x,y) +# with 1 <= x <= 10, 1 <= y <= 10, where (x,y) has +# parents (x-1, y) and (x, y-1), keeping in mind that +# we drop a parent if a coordinate is nonpositive. +# +# (10,10) +# / \ +# (10,9) (9,10) +# / \ / \ +# (10,8) (9,9) (8,10) +# / \ / \ / \ +# ( continued...) +# \ / \ / \ / +# (3,1) (2,2) (1,3) +# \ / \ / +# (2,1) (2,1) +# \ / +# (1,1) +# +# We use branch 'commit-x-y' to refer to (x,y). +# This grid allows interesting reachability and +# non-reachability queries: (x,y) can reach (x',y') +# if and only if x' <= x and y' <= y. +test_expect_success 'setup' ' + for i in $(test_seq 1 10) + do + test_commit "1-$i" && + git branch -f commit-1-$i && + git tag -a -m "1-$i" tag-1-$i commit-1-$i + done && + for j in $(test_seq 1 9) + do + git reset --hard commit-$j-1 && + x=$(($j + 1)) && + test_commit "$x-1" && + git branch -f commit-$x-1 && + git tag -a -m "$x-1" tag-$x-1 commit-$x-1 && + + for i in $(test_seq 2 10) + do + git merge commit-$j-$i -m "$x-$i" && + git branch -f commit-$x-$i && + git tag -a -m "$x-$i" tag-$x-$i commit-$x-$i + done + done && + git commit-graph write --reachable && + mv .git/objects/info/commit-graph commit-graph-full && + git show-ref -s commit-5-5 | git commit-graph write --stdin-commits && + mv .git/objects/info/commit-graph commit-graph-half && + git config core.commitGraph true +' + +run_three_modes () { + test_when_finished rm -rf .git/objects/info/commit-graph && + "$@" <input >actual && + test_cmp expect actual && + cp commit-graph-full .git/objects/info/commit-graph && + "$@" <input >actual && + test_cmp expect actual && + cp commit-graph-half .git/objects/info/commit-graph && + "$@" <input >actual && + test_cmp expect actual +} + +test_three_modes () { + run_three_modes test-tool reach "$@" +} + +test_expect_success 'ref_newer:miss' ' + cat >input <<-\EOF && + A:commit-5-7 + B:commit-4-9 + EOF + echo "ref_newer(A,B):0" >expect && + test_three_modes ref_newer +' + +test_expect_success 'ref_newer:hit' ' + cat >input <<-\EOF && + A:commit-5-7 + B:commit-2-3 + EOF + echo "ref_newer(A,B):1" >expect && + test_three_modes ref_newer +' + +test_expect_success 'in_merge_bases:hit' ' + cat >input <<-\EOF && + A:commit-5-7 + B:commit-8-8 + EOF + echo "in_merge_bases(A,B):1" >expect && + test_three_modes in_merge_bases +' + +test_expect_success 'in_merge_bases:miss' ' + cat >input <<-\EOF && + A:commit-6-8 + B:commit-5-9 + EOF + echo "in_merge_bases(A,B):0" >expect && + test_three_modes in_merge_bases +' + +test_expect_success 'is_descendant_of:hit' ' + cat >input <<-\EOF && + A:commit-5-7 + X:commit-4-8 + X:commit-6-6 + X:commit-1-1 + EOF + echo "is_descendant_of(A,X):1" >expect && + test_three_modes is_descendant_of +' + +test_expect_success 'is_descendant_of:miss' ' + cat >input <<-\EOF && + A:commit-6-8 + X:commit-5-9 + X:commit-4-10 + X:commit-7-6 + EOF + echo "is_descendant_of(A,X):0" >expect && + test_three_modes is_descendant_of +' + +test_expect_success 'get_merge_bases_many' ' + cat >input <<-\EOF && + A:commit-5-7 + X:commit-4-8 + X:commit-6-6 + X:commit-8-3 + EOF + { + echo "get_merge_bases_many(A,X):" && + git rev-parse commit-5-6 \ + commit-4-7 | sort + } >expect && + test_three_modes get_merge_bases_many +' + +test_expect_success 'reduce_heads' ' + cat >input <<-\EOF && + X:commit-1-10 + X:commit-2-8 + X:commit-3-6 + X:commit-4-4 + X:commit-1-7 + X:commit-2-5 + X:commit-3-3 + X:commit-5-1 + EOF + { + echo "reduce_heads(X):" && + git rev-parse commit-5-1 \ + commit-4-4 \ + commit-3-6 \ + commit-2-8 \ + commit-1-10 | sort + } >expect && + test_three_modes reduce_heads +' + +test_expect_success 'can_all_from_reach:hit' ' + cat >input <<-\EOF && + X:commit-2-10 + X:commit-3-9 + X:commit-4-8 + X:commit-5-7 + X:commit-6-6 + X:commit-7-5 + X:commit-8-4 + X:commit-9-3 + Y:commit-1-9 + Y:commit-2-8 + Y:commit-3-7 + Y:commit-4-6 + Y:commit-5-5 + Y:commit-6-4 + Y:commit-7-3 + Y:commit-8-1 + EOF + echo "can_all_from_reach(X,Y):1" >expect && + test_three_modes can_all_from_reach +' + +test_expect_success 'can_all_from_reach:miss' ' + cat >input <<-\EOF && + X:commit-2-10 + X:commit-3-9 + X:commit-4-8 + X:commit-5-7 + X:commit-6-6 + X:commit-7-5 + X:commit-8-4 + X:commit-9-3 + Y:commit-1-9 + Y:commit-2-8 + Y:commit-3-7 + Y:commit-4-6 + Y:commit-5-5 + Y:commit-6-4 + Y:commit-8-5 + EOF + echo "can_all_from_reach(X,Y):0" >expect && + test_three_modes can_all_from_reach +' + +test_expect_success 'can_all_from_reach_with_flag: tags case' ' + cat >input <<-\EOF && + X:tag-2-10 + X:tag-3-9 + X:tag-4-8 + X:commit-5-7 + X:commit-6-6 + X:commit-7-5 + X:commit-8-4 + X:commit-9-3 + Y:tag-1-9 + Y:tag-2-8 + Y:tag-3-7 + Y:commit-4-6 + Y:commit-5-5 + Y:commit-6-4 + Y:commit-7-3 + Y:commit-8-1 + EOF + echo "can_all_from_reach_with_flag(X,_,_,0,0):1" >expect && + test_three_modes can_all_from_reach_with_flag +' + +test_expect_success 'commit_contains:hit' ' + cat >input <<-\EOF && + A:commit-7-7 + X:commit-2-10 + X:commit-3-9 + X:commit-4-8 + X:commit-5-7 + X:commit-6-6 + X:commit-7-5 + X:commit-8-4 + X:commit-9-3 + EOF + echo "commit_contains(_,A,X,_):1" >expect && + test_three_modes commit_contains && + test_three_modes commit_contains --tag +' + +test_expect_success 'commit_contains:miss' ' + cat >input <<-\EOF && + A:commit-6-5 + X:commit-2-10 + X:commit-3-9 + X:commit-4-8 + X:commit-5-7 + X:commit-6-6 + X:commit-7-5 + X:commit-8-4 + X:commit-9-3 + EOF + echo "commit_contains(_,A,X,_):0" >expect && + test_three_modes commit_contains && + test_three_modes commit_contains --tag +' + +test_expect_success 'rev-list: basic topo-order' ' + git rev-parse \ + commit-6-6 commit-5-6 commit-4-6 commit-3-6 commit-2-6 commit-1-6 \ + commit-6-5 commit-5-5 commit-4-5 commit-3-5 commit-2-5 commit-1-5 \ + commit-6-4 commit-5-4 commit-4-4 commit-3-4 commit-2-4 commit-1-4 \ + commit-6-3 commit-5-3 commit-4-3 commit-3-3 commit-2-3 commit-1-3 \ + commit-6-2 commit-5-2 commit-4-2 commit-3-2 commit-2-2 commit-1-2 \ + commit-6-1 commit-5-1 commit-4-1 commit-3-1 commit-2-1 commit-1-1 \ + >expect && + run_three_modes git rev-list --topo-order commit-6-6 +' + +test_expect_success 'rev-list: first-parent topo-order' ' + git rev-parse \ + commit-6-6 \ + commit-6-5 \ + commit-6-4 \ + commit-6-3 \ + commit-6-2 \ + commit-6-1 commit-5-1 commit-4-1 commit-3-1 commit-2-1 commit-1-1 \ + >expect && + run_three_modes git rev-list --first-parent --topo-order commit-6-6 +' + +test_expect_success 'rev-list: range topo-order' ' + git rev-parse \ + commit-6-6 commit-5-6 commit-4-6 commit-3-6 commit-2-6 commit-1-6 \ + commit-6-5 commit-5-5 commit-4-5 commit-3-5 commit-2-5 commit-1-5 \ + commit-6-4 commit-5-4 commit-4-4 commit-3-4 commit-2-4 commit-1-4 \ + commit-6-3 commit-5-3 commit-4-3 \ + commit-6-2 commit-5-2 commit-4-2 \ + commit-6-1 commit-5-1 commit-4-1 \ + >expect && + run_three_modes git rev-list --topo-order commit-3-3..commit-6-6 +' + +test_expect_success 'rev-list: range topo-order' ' + git rev-parse \ + commit-6-6 commit-5-6 commit-4-6 \ + commit-6-5 commit-5-5 commit-4-5 \ + commit-6-4 commit-5-4 commit-4-4 \ + commit-6-3 commit-5-3 commit-4-3 \ + commit-6-2 commit-5-2 commit-4-2 \ + commit-6-1 commit-5-1 commit-4-1 \ + >expect && + run_three_modes git rev-list --topo-order commit-3-8..commit-6-6 +' + +test_expect_success 'rev-list: first-parent range topo-order' ' + git rev-parse \ + commit-6-6 \ + commit-6-5 \ + commit-6-4 \ + commit-6-3 \ + commit-6-2 \ + commit-6-1 commit-5-1 commit-4-1 \ + >expect && + run_three_modes git rev-list --first-parent --topo-order commit-3-8..commit-6-6 +' + +test_expect_success 'rev-list: ancestry-path topo-order' ' + git rev-parse \ + commit-6-6 commit-5-6 commit-4-6 commit-3-6 \ + commit-6-5 commit-5-5 commit-4-5 commit-3-5 \ + commit-6-4 commit-5-4 commit-4-4 commit-3-4 \ + commit-6-3 commit-5-3 commit-4-3 \ + >expect && + run_three_modes git rev-list --topo-order --ancestry-path commit-3-3..commit-6-6 +' + +test_expect_success 'rev-list: symmetric difference topo-order' ' + git rev-parse \ + commit-6-6 commit-5-6 commit-4-6 \ + commit-6-5 commit-5-5 commit-4-5 \ + commit-6-4 commit-5-4 commit-4-4 \ + commit-6-3 commit-5-3 commit-4-3 \ + commit-6-2 commit-5-2 commit-4-2 \ + commit-6-1 commit-5-1 commit-4-1 \ + commit-3-8 commit-2-8 commit-1-8 \ + commit-3-7 commit-2-7 commit-1-7 \ + >expect && + run_three_modes git rev-list --topo-order commit-3-8...commit-6-6 +' + +test_expect_success 'get_reachable_subset:all' ' + cat >input <<-\EOF && + X:commit-9-1 + X:commit-8-3 + X:commit-7-5 + X:commit-6-6 + X:commit-1-7 + Y:commit-3-3 + Y:commit-1-7 + Y:commit-5-6 + EOF + ( + echo "get_reachable_subset(X,Y)" && + git rev-parse commit-3-3 \ + commit-1-7 \ + commit-5-6 | sort + ) >expect && + test_three_modes get_reachable_subset +' + +test_expect_success 'get_reachable_subset:some' ' + cat >input <<-\EOF && + X:commit-9-1 + X:commit-8-3 + X:commit-7-5 + X:commit-1-7 + Y:commit-3-3 + Y:commit-1-7 + Y:commit-5-6 + EOF + ( + echo "get_reachable_subset(X,Y)" && + git rev-parse commit-3-3 \ + commit-1-7 | sort + ) >expect && + test_three_modes get_reachable_subset +' + +test_expect_success 'get_reachable_subset:none' ' + cat >input <<-\EOF && + X:commit-9-1 + X:commit-8-3 + X:commit-7-5 + X:commit-1-7 + Y:commit-9-3 + Y:commit-7-6 + Y:commit-2-8 + EOF + echo "get_reachable_subset(X,Y)" >expect && + test_three_modes get_reachable_subset +' + +test_done diff --git a/t/t7063-status-untracked-cache.sh b/t/t7063-status-untracked-cache.sh index 2da57fce7b..190ae149cf 100755 --- a/t/t7063-status-untracked-cache.sh +++ b/t/t7063-status-untracked-cache.sh @@ -55,7 +55,7 @@ test_expect_success 'setup' ' ' test_expect_success 'untracked cache is empty' ' - test-dump-untracked-cache >../actual && + test-tool dump-untracked-cache >../actual && cat >../expect-empty <<EOF && info/exclude 0000000000000000000000000000000000000000 core.excludesfile 0000000000000000000000000000000000000000 @@ -106,7 +106,7 @@ EOF ' test_expect_success 'untracked cache after first status' ' - test-dump-untracked-cache >../actual && + test-tool dump-untracked-cache >../actual && test_cmp ../dump.expect ../actual ' @@ -126,7 +126,7 @@ EOF ' test_expect_success 'untracked cache after second status' ' - test-dump-untracked-cache >../actual && + test-tool dump-untracked-cache >../actual && test_cmp ../dump.expect ../actual ' @@ -157,7 +157,7 @@ EOF ' test_expect_success 'verify untracked cache dump' ' - test-dump-untracked-cache >../actual && + test-tool dump-untracked-cache >../actual && cat >../expect <<EOF && info/exclude $EMPTY_BLOB core.excludesfile 0000000000000000000000000000000000000000 @@ -204,7 +204,7 @@ EOF ' test_expect_success 'verify untracked cache dump' ' - test-dump-untracked-cache >../actual && + test-tool dump-untracked-cache >../actual && cat >../expect <<EOF && info/exclude $EMPTY_BLOB core.excludesfile 0000000000000000000000000000000000000000 @@ -248,7 +248,7 @@ EOF ' test_expect_success 'verify untracked cache dump' ' - test-dump-untracked-cache >../actual && + test-tool dump-untracked-cache >../actual && cat >../expect <<EOF && info/exclude 13263c0978fb9fad16b2d580fb800b6d811c3ff0 core.excludesfile 0000000000000000000000000000000000000000 @@ -267,7 +267,7 @@ EOF test_expect_success 'move two from tracked to untracked' ' git rm --cached two && - test-dump-untracked-cache >../actual && + test-tool dump-untracked-cache >../actual && cat >../expect <<EOF && info/exclude 13263c0978fb9fad16b2d580fb800b6d811c3ff0 core.excludesfile 0000000000000000000000000000000000000000 @@ -304,7 +304,7 @@ EOF ' test_expect_success 'verify untracked cache dump' ' - test-dump-untracked-cache >../actual && + test-tool dump-untracked-cache >../actual && cat >../expect <<EOF && info/exclude 13263c0978fb9fad16b2d580fb800b6d811c3ff0 core.excludesfile 0000000000000000000000000000000000000000 @@ -324,7 +324,7 @@ EOF test_expect_success 'move two from untracked to tracked' ' git add two && - test-dump-untracked-cache >../actual && + test-tool dump-untracked-cache >../actual && cat >../expect <<EOF && info/exclude 13263c0978fb9fad16b2d580fb800b6d811c3ff0 core.excludesfile 0000000000000000000000000000000000000000 @@ -361,7 +361,7 @@ EOF ' test_expect_success 'verify untracked cache dump' ' - test-dump-untracked-cache >../actual && + test-tool dump-untracked-cache >../actual && cat >../expect <<EOF && info/exclude 13263c0978fb9fad16b2d580fb800b6d811c3ff0 core.excludesfile 0000000000000000000000000000000000000000 @@ -405,7 +405,7 @@ EOF ' test_expect_success 'untracked cache correct after commit' ' - test-dump-untracked-cache >../actual && + test-tool dump-untracked-cache >../actual && cat >../expect <<EOF && info/exclude 13263c0978fb9fad16b2d580fb800b6d811c3ff0 core.excludesfile 0000000000000000000000000000000000000000 @@ -464,7 +464,7 @@ EOF ' test_expect_success 'untracked cache correct after status' ' - test-dump-untracked-cache >../actual && + test-tool dump-untracked-cache >../actual && cat >../expect <<EOF && info/exclude 13263c0978fb9fad16b2d580fb800b6d811c3ff0 core.excludesfile 0000000000000000000000000000000000000000 @@ -532,7 +532,7 @@ EOF ' test_expect_success 'verify untracked cache dump (sparse/subdirs)' ' - test-dump-untracked-cache >../actual && + test-tool dump-untracked-cache >../actual && cat >../expect-from-test-dump <<EOF && info/exclude 13263c0978fb9fad16b2d580fb800b6d811c3ff0 core.excludesfile 0000000000000000000000000000000000000000 @@ -598,66 +598,66 @@ EOF test_expect_success '--no-untracked-cache removes the cache' ' git update-index --no-untracked-cache && - test-dump-untracked-cache >../actual && + test-tool dump-untracked-cache >../actual && echo "no untracked cache" >../expect-no-uc && test_cmp ../expect-no-uc ../actual ' test_expect_success 'git status does not change anything' ' git status && - test-dump-untracked-cache >../actual && + test-tool dump-untracked-cache >../actual && test_cmp ../expect-no-uc ../actual ' test_expect_success 'setting core.untrackedCache to true and using git status creates the cache' ' git config core.untrackedCache true && - test-dump-untracked-cache >../actual && + test-tool dump-untracked-cache >../actual && test_cmp ../expect-no-uc ../actual && git status && - test-dump-untracked-cache >../actual && + test-tool dump-untracked-cache >../actual && test_cmp ../expect-from-test-dump ../actual ' test_expect_success 'using --no-untracked-cache does not fail when core.untrackedCache is true' ' git update-index --no-untracked-cache && - test-dump-untracked-cache >../actual && + test-tool dump-untracked-cache >../actual && test_cmp ../expect-no-uc ../actual && git update-index --untracked-cache && - test-dump-untracked-cache >../actual && + test-tool dump-untracked-cache >../actual && test_cmp ../expect-empty ../actual ' test_expect_success 'setting core.untrackedCache to false and using git status removes the cache' ' git config core.untrackedCache false && - test-dump-untracked-cache >../actual && + test-tool dump-untracked-cache >../actual && test_cmp ../expect-empty ../actual && git status && - test-dump-untracked-cache >../actual && + test-tool dump-untracked-cache >../actual && test_cmp ../expect-no-uc ../actual ' test_expect_success 'using --untracked-cache does not fail when core.untrackedCache is false' ' git update-index --untracked-cache && - test-dump-untracked-cache >../actual && + test-tool dump-untracked-cache >../actual && test_cmp ../expect-empty ../actual ' test_expect_success 'setting core.untrackedCache to keep' ' git config core.untrackedCache keep && git update-index --untracked-cache && - test-dump-untracked-cache >../actual && + test-tool dump-untracked-cache >../actual && test_cmp ../expect-empty ../actual && git status && - test-dump-untracked-cache >../actual && + test-tool dump-untracked-cache >../actual && test_cmp ../expect-from-test-dump ../actual && git update-index --no-untracked-cache && - test-dump-untracked-cache >../actual && + test-tool dump-untracked-cache >../actual && test_cmp ../expect-no-uc ../actual && git update-index --force-untracked-cache && - test-dump-untracked-cache >../actual && + test-tool dump-untracked-cache >../actual && test_cmp ../expect-empty ../actual && git status && - test-dump-untracked-cache >../actual && + test-tool dump-untracked-cache >../actual && test_cmp ../expect-from-test-dump ../actual ' @@ -671,23 +671,23 @@ test_expect_success 'test ident field is working' ' test_expect_success 'untracked cache survives a checkout' ' git commit --allow-empty -m empty && - test-dump-untracked-cache >../before && + test-tool dump-untracked-cache >../before && test_when_finished "git checkout master" && git checkout -b other_branch && - test-dump-untracked-cache >../after && + test-tool dump-untracked-cache >../after && test_cmp ../before ../after && test_commit test && - test-dump-untracked-cache >../before && + test-tool dump-untracked-cache >../before && git checkout master && - test-dump-untracked-cache >../after && + test-tool dump-untracked-cache >../after && test_cmp ../before ../after ' test_expect_success 'untracked cache survives a commit' ' - test-dump-untracked-cache >../before && + test-tool dump-untracked-cache >../before && git add done/two && git commit -m commit && - test-dump-untracked-cache >../after && + test-tool dump-untracked-cache >../after && test_cmp ../before ../after ' @@ -751,7 +751,7 @@ test_expect_success '"status" after file replacement should be clean with UC=tru git checkout master && avoid_racy && status_is_clean && - test-dump-untracked-cache >../actual && + test-tool dump-untracked-cache >../actual && grep -F "recurse valid" ../actual >../actual.grep && cat >../expect.grep <<EOF && / 0000000000000000000000000000000000000000 recurse valid diff --git a/t/t7201-co.sh b/t/t7201-co.sh index 324933acfe..72b9b375ba 100755 --- a/t/t7201-co.sh +++ b/t/t7201-co.sh @@ -160,7 +160,7 @@ test_expect_success 'checkout -m with merge conflict' ' git diff master:one :3:uno | sed -e "1,/^@@/d" -e "/^ /d" -e "s/^-/d/" -e "s/^+/a/" >current && fill d2 aT d7 aS >expect && - test_cmp current expect && + test_cmp expect current && git diff --cached two >current && test_must_be_empty current ' @@ -174,7 +174,7 @@ test_expect_success 'format of merge conflict from checkout -m' ' git ls-files >current && fill same two two two >expect && - test_cmp current expect && + test_cmp expect current && cat <<-EOF >expect && <<<<<<< simple @@ -254,9 +254,9 @@ test_expect_success 'checkout to detach HEAD (with advice declined)' ' test_expect_success 'checkout to detach HEAD' ' git config advice.detachedHead true && git checkout -f renamer && git clean -f && - git checkout renamer^ 2>messages && - test_i18ngrep "HEAD is now at 7329388" messages && - (test_line_count -gt 1 messages || test -n "$GETTEXT_POISON") && + GIT_TEST_GETTEXT_POISON= git checkout renamer^ 2>messages && + grep "HEAD is now at 7329388" messages && + test_line_count -gt 1 messages && H=$(git rev-parse --verify HEAD) && M=$(git show-ref -s --verify refs/heads/master) && test "z$H" = "z$M" && diff --git a/t/t7400-submodule-basic.sh b/t/t7400-submodule-basic.sh index c0ffc1022a..76a7cb0af7 100755 --- a/t/t7400-submodule-basic.sh +++ b/t/t7400-submodule-basic.sh @@ -1224,6 +1224,30 @@ test_expect_success 'submodule update and setting submodule.<name>.active' ' test_cmp expect actual ' +test_expect_success 'clone active submodule without submodule url set' ' + test_when_finished "rm -rf test/test" && + mkdir test && + # another dir breaks accidental relative paths still being correct + git clone file://"$pwd"/multisuper test/test && + ( + cd test/test && + git config submodule.active "." && + + # do not pass --init flag, as the submodule is already active: + git submodule update && + git submodule status >actual_raw && + + cut -c 1,43- actual_raw >actual && + cat >expect <<-\EOF && + sub0 (test2) + sub1 (test2) + sub2 (test2) + sub3 (test2) + EOF + test_cmp expect actual + ) +' + test_expect_success 'clone --recurse-submodules with a pathspec works' ' test_when_finished "rm -rf multisuper_clone" && cat >expected <<-\EOF && diff --git a/t/t7406-submodule-update.sh b/t/t7406-submodule-update.sh index 10dc91620a..e87164aa8f 100755 --- a/t/t7406-submodule-update.sh +++ b/t/t7406-submodule-update.sh @@ -789,7 +789,7 @@ test_expect_success 'submodule add places git-dir in superprojects git-dir' ' (cd .git/modules/deeper/submodule && git log > ../../../../actual ) && - test_cmp actual expected + test_cmp expected actual ) ' @@ -807,7 +807,7 @@ test_expect_success 'submodule update places git-dir in superprojects git-dir' ' (cd .git/modules/deeper/submodule && git log > ../../../../actual ) && - test_cmp actual expected + test_cmp expected actual ) ' @@ -827,7 +827,7 @@ test_expect_success 'submodule add places git-dir in superprojects git-dir recur git add deeper/submodule && git commit -m "update submodule" && git push origin : && - test_cmp actual expected + test_cmp expected actual ) ' @@ -874,7 +874,7 @@ test_expect_success 'submodule update places git-dir in superprojects git-dir re (cd .git/modules/submodule/modules/subsubmodule && git log > ../../../../../actual ) && - test_cmp actual expected + test_cmp expected actual ) ' diff --git a/t/t7411-submodule-config.sh b/t/t7411-submodule-config.sh index 0bde5850ac..89690b7adb 100755 --- a/t/t7411-submodule-config.sh +++ b/t/t7411-submodule-config.sh @@ -82,29 +82,23 @@ Submodule name: 'a' for path 'b' Submodule name: 'submodule' for path 'submodule' EOF -test_expect_success 'error in one submodule config lets continue' ' +test_expect_success 'error in history of one submodule config lets continue, stderr message contains blob ref' ' + ORIG=$(git -C super rev-parse HEAD) && + test_when_finished "git -C super reset --hard $ORIG" && (cd super && cp .gitmodules .gitmodules.bak && echo " value = \"" >>.gitmodules && git add .gitmodules && mv .gitmodules.bak .gitmodules && git commit -m "add error" && - test-tool submodule-config \ - HEAD b \ - HEAD submodule \ - >actual && - test_cmp expect_error actual - ) -' - -test_expect_success 'error message contains blob reference' ' - (cd super && sha1=$(git rev-parse HEAD) && test-tool submodule-config \ HEAD b \ HEAD submodule \ - 2>actual_err && - test_i18ngrep "submodule-blob $sha1:.gitmodules" actual_err >/dev/null + >actual \ + 2>actual_stderr && + test_cmp expect_error actual && + test_i18ngrep "submodule-blob $sha1:.gitmodules" actual_stderr >/dev/null ) ' @@ -123,6 +117,8 @@ test_expect_success 'using different treeishs works' ' ' test_expect_success 'error in history in fetchrecursesubmodule lets continue' ' + ORIG=$(git -C super rev-parse HEAD) && + test_when_finished "git -C super reset --hard $ORIG" && (cd super && git config -f .gitmodules \ submodule.submodule.fetchrecursesubmodules blabla && @@ -134,8 +130,123 @@ test_expect_success 'error in history in fetchrecursesubmodule lets continue' ' HEAD b \ HEAD submodule \ >actual && - test_cmp expect_error actual && - git reset --hard HEAD^ + test_cmp expect_error actual + ) +' + +test_expect_success 'reading submodules config from the working tree with "submodule--helper config"' ' + (cd super && + echo "../submodule" >expect && + git submodule--helper config submodule.submodule.url >actual && + test_cmp expect actual + ) +' + +test_expect_success 'writing submodules config with "submodule--helper config"' ' + (cd super && + echo "new_url" >expect && + git submodule--helper config submodule.submodule.url "new_url" && + git submodule--helper config submodule.submodule.url >actual && + test_cmp expect actual + ) +' + +test_expect_success 'overwriting unstaged submodules config with "submodule--helper config"' ' + test_when_finished "git -C super checkout .gitmodules" && + (cd super && + echo "newer_url" >expect && + git submodule--helper config submodule.submodule.url "newer_url" && + git submodule--helper config submodule.submodule.url >actual && + test_cmp expect actual + ) +' + +test_expect_success 'writeable .gitmodules when it is in the working tree' ' + git -C super submodule--helper config --check-writeable +' + +test_expect_success 'writeable .gitmodules when it is nowhere in the repository' ' + ORIG=$(git -C super rev-parse HEAD) && + test_when_finished "git -C super reset --hard $ORIG" && + (cd super && + git rm .gitmodules && + git commit -m "remove .gitmodules from the current branch" && + git submodule--helper config --check-writeable + ) +' + +test_expect_success 'non-writeable .gitmodules when it is in the index but not in the working tree' ' + test_when_finished "git -C super checkout .gitmodules" && + (cd super && + rm -f .gitmodules && + test_must_fail git submodule--helper config --check-writeable + ) +' + +test_expect_success 'non-writeable .gitmodules when it is in the current branch but not in the index' ' + ORIG=$(git -C super rev-parse HEAD) && + test_when_finished "git -C super reset --hard $ORIG" && + (cd super && + git rm .gitmodules && + test_must_fail git submodule--helper config --check-writeable + ) +' + +test_expect_success 'reading submodules config from the index when .gitmodules is not in the working tree' ' + ORIG=$(git -C super rev-parse HEAD) && + test_when_finished "git -C super reset --hard $ORIG" && + (cd super && + git submodule--helper config submodule.submodule.url "staged_url" && + git add .gitmodules && + rm -f .gitmodules && + echo "staged_url" >expect && + git submodule--helper config submodule.submodule.url >actual && + test_cmp expect actual + ) +' + +test_expect_success 'reading submodules config from the current branch when .gitmodules is not in the index' ' + ORIG=$(git -C super rev-parse HEAD) && + test_when_finished "git -C super reset --hard $ORIG" && + (cd super && + git rm .gitmodules && + echo "../submodule" >expect && + git submodule--helper config submodule.submodule.url >actual && + test_cmp expect actual + ) +' + +test_expect_success 'reading nested submodules config' ' + (cd super && + git init submodule/nested_submodule && + echo "a" >submodule/nested_submodule/a && + git -C submodule/nested_submodule add a && + git -C submodule/nested_submodule commit -m "add a" && + git -C submodule submodule add ./nested_submodule && + git -C submodule add nested_submodule && + git -C submodule commit -m "added nested_submodule" && + git add submodule && + git commit -m "updated submodule" && + echo "./nested_submodule" >expect && + test-tool submodule-nested-repo-config \ + submodule submodule.nested_submodule.url >actual && + test_cmp expect actual + ) +' + +# When this test eventually passes, before turning it into +# test_expect_success, remember to replace the test_i18ngrep below with +# a "test_must_be_empty warning" to be sure that the warning is actually +# removed from the code. +test_expect_failure 'reading nested submodules config when .gitmodules is not in the working tree' ' + test_when_finished "git -C super/submodule checkout .gitmodules" && + (cd super && + echo "./nested_submodule" >expect && + rm submodule/.gitmodules && + test-tool submodule-nested-repo-config \ + submodule submodule.nested_submodule.url >actual 2>warning && + test_i18ngrep "nested submodules without %s in the working tree are not supported yet" warning && + test_cmp expect actual ) ' diff --git a/t/t7418-submodule-sparse-gitmodules.sh b/t/t7418-submodule-sparse-gitmodules.sh new file mode 100755 index 0000000000..3f7f271883 --- /dev/null +++ b/t/t7418-submodule-sparse-gitmodules.sh @@ -0,0 +1,122 @@ +#!/bin/sh +# +# Copyright (C) 2018 Antonio Ospite <ao2@ao2.it> +# + +test_description='Test reading/writing .gitmodules when not in the working tree + +This test verifies that, when .gitmodules is in the current branch but is not +in the working tree reading from it still works but writing to it does not. + +The test setup uses a sparse checkout, however the same scenario can be set up +also by committing .gitmodules and then just removing it from the filesystem. +' + +. ./test-lib.sh + +test_expect_success 'sparse checkout setup which hides .gitmodules' ' + git init upstream && + git init submodule && + (cd submodule && + echo file >file && + git add file && + test_tick && + git commit -m "Add file" + ) && + (cd upstream && + git submodule add ../submodule && + test_tick && + git commit -m "Add submodule" + ) && + git clone upstream super && + (cd super && + cat >.git/info/sparse-checkout <<-\EOF && + /* + !/.gitmodules + EOF + git config core.sparsecheckout true && + git read-tree -m -u HEAD && + test_path_is_missing .gitmodules + ) +' + +test_expect_success 'reading gitmodules config file when it is not checked out' ' + echo "../submodule" >expect && + git -C super submodule--helper config submodule.submodule.url >actual && + test_cmp expect actual +' + +test_expect_success 'not writing gitmodules config file when it is not checked out' ' + test_must_fail git -C super submodule--helper config submodule.submodule.url newurl && + test_path_is_missing super/.gitmodules +' + +test_expect_success 'initialising submodule when the gitmodules config is not checked out' ' + test_must_fail git -C super config submodule.submodule.url && + git -C super submodule init && + git -C super config submodule.submodule.url >actual && + echo "$(pwd)/submodule" >expect && + test_cmp expect actual +' + +test_expect_success 'updating submodule when the gitmodules config is not checked out' ' + test_path_is_missing super/submodule/file && + git -C super submodule update && + test_cmp submodule/file super/submodule/file +' + +ORIG_SUBMODULE=$(git -C submodule rev-parse HEAD) +ORIG_UPSTREAM=$(git -C upstream rev-parse HEAD) +ORIG_SUPER=$(git -C super rev-parse HEAD) + +test_expect_success 're-updating submodule when the gitmodules config is not checked out' ' + test_when_finished "git -C submodule reset --hard $ORIG_SUBMODULE; + git -C upstream reset --hard $ORIG_UPSTREAM; + git -C super reset --hard $ORIG_SUPER; + git -C upstream submodule update --remote; + git -C super pull; + git -C super submodule update --remote" && + (cd submodule && + echo file2 >file2 && + git add file2 && + test_tick && + git commit -m "Add file2 to submodule" + ) && + (cd upstream && + git submodule update --remote && + git add submodule && + test_tick && + git commit -m "Update submodule" + ) && + git -C super pull && + # The --for-status options reads the gitmodules config + git -C super submodule summary --for-status >actual && + rev1=$(git -C submodule rev-parse --short HEAD) && + rev2=$(git -C submodule rev-parse --short HEAD^) && + cat >expect <<-EOF && + * submodule ${rev1}...${rev2} (1): + < Add file2 to submodule + + EOF + test_cmp expect actual && + # Test that the update actually succeeds + test_path_is_missing super/submodule/file2 && + git -C super submodule update && + test_cmp submodule/file2 super/submodule/file2 && + git -C super status --short >output && + test_must_be_empty output +' + +test_expect_success 'not adding submodules when the gitmodules config is not checked out' ' + git clone submodule new_submodule && + test_must_fail git -C super submodule add ../new_submodule && + test_path_is_missing .gitmodules +' + +# This test checks that the previous "git submodule add" did not leave the +# repository in a spurious state when it failed. +test_expect_success 'init submodule still works even after the previous add failed' ' + git -C super submodule init +' + +test_done diff --git a/t/t7500-commit.sh b/t/t7500-commit-template-squash-signoff.sh index 31ab608b67..46a5cd4b73 100755 --- a/t/t7500-commit.sh +++ b/t/t7500-commit-template-squash-signoff.sh @@ -5,7 +5,7 @@ test_description='git commit -Tests for selected commit options.' +Tests for template, signoff, squash and -F functions.' . ./test-lib.sh diff --git a/t/t7501-commit.sh b/t/t7501-commit-basic-functionality.sh index 1a6773ee68..f1349af56e 100755 --- a/t/t7501-commit.sh +++ b/t/t7501-commit-basic-functionality.sh @@ -99,12 +99,12 @@ test_expect_success '--dry-run with stuff to commit returns ok' ' git commit -m next -a --dry-run ' -test_expect_failure '--short with stuff to commit returns ok' ' +test_expect_success '--short with stuff to commit returns ok' ' echo bongo bongo bongo >>file && git commit -m next -a --short ' -test_expect_failure '--porcelain with stuff to commit returns ok' ' +test_expect_success '--porcelain with stuff to commit returns ok' ' echo bongo bongo bongo >>file && git commit -m next -a --porcelain ' @@ -698,4 +698,10 @@ test_expect_success '--dry-run with conflicts fixed from a merge' ' git commit -m "conflicts fixed from merge." ' +test_expect_success '--dry-run --short' ' + >test-file && + git add test-file && + git commit --dry-run --short +' + test_done diff --git a/t/t7502-commit.sh b/t/t7502-commit-porcelain.sh index ca4a740da0..ca4a740da0 100755 --- a/t/t7502-commit.sh +++ b/t/t7502-commit-porcelain.sh diff --git a/t/t7505-prepare-commit-msg-hook.sh b/t/t7505-prepare-commit-msg-hook.sh index 1f43b3cd4c..ebfcad9c4c 100755 --- a/t/t7505-prepare-commit-msg-hook.sh +++ b/t/t7505-prepare-commit-msg-hook.sh @@ -253,7 +253,7 @@ test_rebase () { } test_rebase success -i -test_rebase success -p +test_have_prereq !REBASE_P || test_rebase success -p test_expect_success 'with hook (cherry-pick)' ' test_when_finished "git checkout -f master" && diff --git a/t/t7506-status-submodule.sh b/t/t7506-status-submodule.sh index 943708fb04..08629a6e70 100755 --- a/t/t7506-status-submodule.sh +++ b/t/t7506-status-submodule.sh @@ -325,7 +325,8 @@ test_expect_success 'setup superproject with untracked file in nested submodule' ( cd super && git clean -dfx && - rm .gitmodules && + git rm .gitmodules && + git commit -m "remove .gitmodules" && git submodule add -f ./sub1 && git submodule add -f ./sub2 && git submodule add -f ./sub1 sub3 && diff --git a/t/t7509-commit.sh b/t/t7509-commit-authorship.sh index ddef7ea6b0..500ab2fe72 100755 --- a/t/t7509-commit.sh +++ b/t/t7509-commit-authorship.sh @@ -3,7 +3,7 @@ # Copyright (c) 2009 Erick Mattos # -test_description='git commit --reset-author' +test_description='commit tests of various authorhip options. ' . ./test-lib.sh diff --git a/t/t7510-signed-commit.sh b/t/t7510-signed-commit.sh index 4e37ff8f16..86d3f93fa2 100755 --- a/t/t7510-signed-commit.sh +++ b/t/t7510-signed-commit.sh @@ -175,8 +175,10 @@ test_expect_success GPG 'show good signature with custom format' ' G 13B6F51ECDDE430D C O Mitter <committer@example.com> + 73D758744BE721698EC54E8713B6F51ECDDE430D + 73D758744BE721698EC54E8713B6F51ECDDE430D EOF - git log -1 --format="%G?%n%GK%n%GS" sixth-signed >actual && + git log -1 --format="%G?%n%GK%n%GS%n%GF%n%GP" sixth-signed >actual && test_cmp expect actual ' @@ -185,28 +187,34 @@ test_expect_success GPG 'show bad signature with custom format' ' B 13B6F51ECDDE430D C O Mitter <committer@example.com> + + EOF - git log -1 --format="%G?%n%GK%n%GS" $(cat forged1.commit) >actual && + git log -1 --format="%G?%n%GK%n%GS%n%GF%n%GP" $(cat forged1.commit) >actual && test_cmp expect actual ' test_expect_success GPG 'show untrusted signature with custom format' ' cat >expect <<-\EOF && U - 61092E85B7227189 + 65A0EEA02E30CAD7 Eris Discordia <discord@example.net> + F8364A59E07FFE9F4D63005A65A0EEA02E30CAD7 + D4BE22311AD3131E5EDA29A461092E85B7227189 EOF - git log -1 --format="%G?%n%GK%n%GS" eighth-signed-alt >actual && + git log -1 --format="%G?%n%GK%n%GS%n%GF%n%GP" eighth-signed-alt >actual && test_cmp expect actual ' test_expect_success GPG 'show unknown signature with custom format' ' cat >expect <<-\EOF && E - 61092E85B7227189 + 65A0EEA02E30CAD7 + + EOF - GNUPGHOME="$GNUPGHOME_NOT_USED" git log -1 --format="%G?%n%GK%n%GS" eighth-signed-alt >actual && + GNUPGHOME="$GNUPGHOME_NOT_USED" git log -1 --format="%G?%n%GK%n%GS%n%GF%n%GP" eighth-signed-alt >actual && test_cmp expect actual ' @@ -215,8 +223,10 @@ test_expect_success GPG 'show lack of signature with custom format' ' N + + EOF - git log -1 --format="%G?%n%GK%n%GS" seventh-unsigned >actual && + git log -1 --format="%G?%n%GK%n%GS%n%GF%n%GP" seventh-unsigned >actual && test_cmp expect actual ' @@ -234,4 +244,32 @@ test_expect_success GPG 'check config gpg.format values' ' test_must_fail git commit -S --amend -m "fail" ' +test_expect_success GPG 'detect fudged commit with double signature' ' + sed -e "/gpgsig/,/END PGP/d" forged1 >double-base && + sed -n -e "/gpgsig/,/END PGP/p" forged1 | \ + sed -e "s/^gpgsig//;s/^ //" | gpg --dearmor >double-sig1.sig && + gpg -o double-sig2.sig -u 29472784 --detach-sign double-base && + cat double-sig1.sig double-sig2.sig | gpg --enarmor >double-combined.asc && + sed -e "s/^\(-.*\)ARMORED FILE/\1SIGNATURE/;1s/^/gpgsig /;2,\$s/^/ /" \ + double-combined.asc > double-gpgsig && + sed -e "/committer/r double-gpgsig" double-base >double-commit && + git hash-object -w -t commit double-commit >double-commit.commit && + test_must_fail git verify-commit $(cat double-commit.commit) && + git show --pretty=short --show-signature $(cat double-commit.commit) >double-actual && + grep "BAD signature from" double-actual && + grep "Good signature from" double-actual +' + +test_expect_success GPG 'show double signature with custom format' ' + cat >expect <<-\EOF && + E + + + + + EOF + git log -1 --format="%G?%n%GK%n%GS%n%GF%n%GP" $(cat double-commit.commit) >actual && + test_cmp expect actual +' + test_done diff --git a/t/t7517-per-repo-email.sh b/t/t7517-per-repo-email.sh index 2a22fa7588..231b8cc19d 100755 --- a/t/t7517-per-repo-email.sh +++ b/t/t7517-per-repo-email.sh @@ -72,12 +72,14 @@ test_expect_success 'noop interactive rebase does not care about ident' ' git rebase -i HEAD^ ' -test_expect_success 'fast-forward rebase does not care about ident (preserve)' ' +test_expect_success REBASE_P \ + 'fast-forward rebase does not care about ident (preserve)' ' git checkout -B tmp side-without-commit && git rebase -p master ' -test_expect_success 'non-fast-forward rebase refuses to write commits (preserve)' ' +test_expect_success REBASE_P \ + 'non-fast-forward rebase refuses to write commits (preserve)' ' test_when_finished "git rebase --abort || true" && git checkout -B tmp side-with-commit && test_must_fail git rebase -p master diff --git a/t/t7519-status-fsmonitor.sh b/t/t7519-status-fsmonitor.sh index 756beb0d8e..3e0a61db23 100755 --- a/t/t7519-status-fsmonitor.sh +++ b/t/t7519-status-fsmonitor.sh @@ -4,13 +4,6 @@ test_description='git status with file system watcher' . ./test-lib.sh -# -# To run the entire git test suite using fsmonitor: -# -# copy t/t7519/fsmonitor-all to a location in your path and then set -# GIT_FSMONITOR_TEST=fsmonitor-all and run your tests. -# - # Note, after "git reset --hard HEAD" no extensions exist other than 'TREE' # "git update-index --fsmonitor" can be used to get the extension written # before testing the results. @@ -84,21 +77,21 @@ test_expect_success 'setup' ' # test that the fsmonitor extension is off by default test_expect_success 'fsmonitor extension is off by default' ' - test-dump-fsmonitor >actual && + test-tool dump-fsmonitor >actual && grep "^no fsmonitor" actual ' # test that "update-index --fsmonitor" adds the fsmonitor extension test_expect_success 'update-index --fsmonitor" adds the fsmonitor extension' ' git update-index --fsmonitor && - test-dump-fsmonitor >actual && + test-tool dump-fsmonitor >actual && grep "^fsmonitor last update" actual ' # test that "update-index --no-fsmonitor" removes the fsmonitor extension test_expect_success 'update-index --no-fsmonitor" removes the fsmonitor extension' ' git update-index --no-fsmonitor && - test-dump-fsmonitor >actual && + test-tool dump-fsmonitor >actual && grep "^no fsmonitor" actual ' @@ -245,9 +238,9 @@ do git config core.preloadIndex $preload_val && if test $preload_val = true then - GIT_FORCE_PRELOAD_TEST=$preload_val; export GIT_FORCE_PRELOAD_TEST + GIT_TEST_PRELOAD_INDEX=$preload_val; export GIT_TEST_PRELOAD_INDEX else - unset GIT_FORCE_PRELOAD_TEST + sane_unset GIT_TEST_PRELOAD_INDEX fi ' @@ -307,9 +300,9 @@ test_expect_success 'splitting the index results in the same state' ' dirty_repo && git update-index --fsmonitor && git ls-files -f >expect && - test-dump-fsmonitor >&2 && echo && + test-tool dump-fsmonitor >&2 && echo && git update-index --fsmonitor --split-index && - test-dump-fsmonitor >&2 && echo && + test-tool dump-fsmonitor >&2 && echo && git ls-files -f >actual && test_cmp expect actual ' @@ -333,7 +326,7 @@ test_expect_success UNTRACKED_CACHE 'ignore .git changes when invalidating UNTR' git update-index --fsmonitor && GIT_TRACE_UNTRACKED_STATS="$TRASH_DIRECTORY/trace-before" \ git status && - test-dump-untracked-cache >../before + test-tool dump-untracked-cache >../before ) && cat >>dot-git/.git/hooks/fsmonitor-test <<-\EOF && printf ".git\0" @@ -345,7 +338,7 @@ test_expect_success UNTRACKED_CACHE 'ignore .git changes when invalidating UNTR' cd dot-git && GIT_TRACE_UNTRACKED_STATS="$TRASH_DIRECTORY/trace-after" \ git status && - test-dump-untracked-cache >../after + test-tool dump-untracked-cache >../after ) && grep "directory invalidation" trace-before >>before && grep "directory invalidation" trace-after >>after && diff --git a/t/t7612-merge-verify-signatures.sh b/t/t7612-merge-verify-signatures.sh index e2b1df817a..d99218a725 100755 --- a/t/t7612-merge-verify-signatures.sh +++ b/t/t7612-merge-verify-signatures.sh @@ -103,4 +103,11 @@ test_expect_success GPG 'merge commit with bad signature with merge.verifySignat git merge --no-verify-signatures $(cat forged.commit) ' +test_expect_success GPG 'merge unsigned commit into unborn branch' ' + test_when_finished "git checkout initial" && + git checkout --orphan unborn && + test_must_fail git merge --verify-signatures side-unsigned 2>mergeerror && + test_i18ngrep "does not have a GPG signature" mergeerror +' + test_done diff --git a/t/t7800-difftool.sh b/t/t7800-difftool.sh index 668bbee73c..22b9199d59 100755 --- a/t/t7800-difftool.sh +++ b/t/t7800-difftool.sh @@ -332,7 +332,7 @@ test_expect_success 'difftool --extcmd cat arg1' ' test_expect_success 'difftool --extcmd cat arg2' ' echo branch >expect && git difftool --no-prompt \ - --extcmd sh\ -c\ \"cat\ \$2\" branch >actual && + --extcmd sh\ -c\ \"cat\ \\\"\$2\\\"\" branch >actual && test_cmp expect actual ' @@ -557,7 +557,7 @@ test_expect_success SYMLINKS 'difftool --dir-diff --symlink without unstaged cha EOF git difftool --dir-diff --symlink \ --extcmd "./.git/CHECK_SYMLINKS" branch HEAD && - test_cmp actual expect + test_cmp expect actual ' write_script modify-right-file <<\EOF diff --git a/t/t7810-grep.sh b/t/t7810-grep.sh index be5c1bd553..43aa4161cf 100755 --- a/t/t7810-grep.sh +++ b/t/t7810-grep.sh @@ -309,6 +309,8 @@ do echo ${HC}v:1:vvv } >expected && git grep --max-depth -1 -n -e vvv $H >actual && + test_cmp expected actual && + git grep --recursive -n -e vvv $H >actual && test_cmp expected actual ' @@ -317,6 +319,8 @@ do echo ${HC}v:1:vvv } >expected && git grep --max-depth 0 -n -e vvv $H >actual && + test_cmp expected actual && + git grep --no-recursive -n -e vvv $H >actual && test_cmp expected actual ' @@ -327,6 +331,8 @@ do echo ${HC}v:1:vvv } >expected && git grep --max-depth 0 -n -e vvv $H -- "*" >actual && + test_cmp expected actual && + git grep --no-recursive -n -e vvv $H -- "*" >actual && test_cmp expected actual ' @@ -344,6 +350,8 @@ do echo ${HC}t/v:1:vvv } >expected && git grep --max-depth 0 -n -e vvv $H -- t >actual && + test_cmp expected actual && + git grep --no-recursive -n -e vvv $H -- t >actual && test_cmp expected actual ' @@ -353,6 +361,8 @@ do echo ${HC}v:1:vvv } >expected && git grep --max-depth 0 -n -e vvv $H -- . t >actual && + test_cmp expected actual && + git grep --no-recursive -n -e vvv $H -- . t >actual && test_cmp expected actual ' @@ -362,6 +372,8 @@ do echo ${HC}v:1:vvv } >expected && git grep --max-depth 0 -n -e vvv $H -- t . >actual && + test_cmp expected actual && + git grep --no-recursive -n -e vvv $H -- t . >actual && test_cmp expected actual ' test_expect_success "grep $L with grep.extendedRegexp=false" ' diff --git a/t/t7814-grep-recurse-submodules.sh b/t/t7814-grep-recurse-submodules.sh index 7184113b9b..fa475d52fa 100755 --- a/t/t7814-grep-recurse-submodules.sh +++ b/t/t7814-grep-recurse-submodules.sh @@ -380,4 +380,20 @@ test_expect_success 'grep --recurse-submodules should pass the pattern type alon fi ' +# Recursing down into nested submodules which do not have .gitmodules in their +# working tree does not work yet. This is because config_from_gitmodules() +# uses get_oid() and the latter is still not able to get objects from an +# arbitrary repository (the nested submodule, in this case). +test_expect_failure 'grep --recurse-submodules with submodules without .gitmodules in the working tree' ' + test_when_finished "git -C submodule checkout .gitmodules" && + rm submodule/.gitmodules && + git grep --recurse-submodules -e "(.|.)[\d]" >actual && + cat >expect <<-\EOF && + a:(1|2)d(3|4) + submodule/a:(1|2)d(3|4) + submodule/sub/a:(1|2)d(3|4) + EOF + test_cmp expect actual +' + test_done diff --git a/t/t9001-send-email.sh b/t/t9001-send-email.sh index 1ef1a19003..ee1efcc59d 100755 --- a/t/t9001-send-email.sh +++ b/t/t9001-send-email.sh @@ -492,6 +492,21 @@ do --validate \ $patches longline.patch ' + +done + +for enc in 7bit 8bit quoted-printable base64 +do + test_expect_success $PREREQ "--transfer-encoding=$enc produces correct header" ' + clean_fake_sendmail && + git send-email \ + --from="Example <nobody@example.com>" \ + --to=nobody@example.com \ + --smtp-server="$(pwd)/fake.sendmail" \ + --transfer-encoding=$enc \ + $patches && + grep "Content-Transfer-Encoding: $enc" msgtxt1 + ' done test_expect_success $PREREQ 'Invalid In-Reply-To' ' diff --git a/t/t9100-git-svn-basic.sh b/t/t9100-git-svn-basic.sh index 9af6078844..2c309a57d9 100755 --- a/t/t9100-git-svn-basic.sh +++ b/t/t9100-git-svn-basic.sh @@ -221,7 +221,7 @@ tree d667270a1f7b109f5eb3aaea21ede14b56bfdd6e tree 8f51f74cf0163afc9ad68a4b1537288c4558b5a4 EOF -test_expect_success POSIXPERM,SYMLINKS "$name" "test_cmp a expected" +test_expect_success POSIXPERM,SYMLINKS "$name" "test_cmp expected a" test_expect_success 'exit if remote refs are ambigious' ' git config --add svn-remote.svn.fetch \ diff --git a/t/t9101-git-svn-props.sh b/t/t9101-git-svn-props.sh index 8a5c8dc1aa..c26c4b0927 100755 --- a/t/t9101-git-svn-props.sh +++ b/t/t9101-git-svn-props.sh @@ -174,7 +174,8 @@ test_expect_success 'test create-ignore' " cmp ./deeply/.gitignore create-ignore.expect && cmp ./deeply/nested/.gitignore create-ignore.expect && cmp ./deeply/nested/directory/.gitignore create-ignore.expect && - git ls-files -s | grep gitignore | cmp - create-ignore-index.expect + git ls-files -s >ls_files_result && + grep gitignore ls_files_result | cmp - create-ignore-index.expect " cat >prop.expect <<\EOF @@ -189,17 +190,21 @@ EOF # This test can be improved: since all the svn:ignore contain the same # pattern, it can pass even though the propget did not execute on the # right directory. -test_expect_success 'test propget' " - git svn propget svn:ignore . | cmp - prop.expect && +test_expect_success 'test propget' ' + test_propget () { + git svn propget $1 $2 >actual && + cmp $3 actual + } && + test_propget svn:ignore . prop.expect && cd deeply && - git svn propget svn:ignore . | cmp - ../prop.expect && - git svn propget svn:entry:committed-rev nested/directory/.keep \ - | cmp - ../prop2.expect && - git svn propget svn:ignore .. | cmp - ../prop.expect && - git svn propget svn:ignore nested/ | cmp - ../prop.expect && - git svn propget svn:ignore ./nested | cmp - ../prop.expect && - git svn propget svn:ignore .././deeply/nested | cmp - ../prop.expect - " + test_propget svn:ignore . ../prop.expect && + test_propget svn:entry:committed-rev nested/directory/.keep \ + ../prop2.expect && + test_propget svn:ignore .. ../prop.expect && + test_propget svn:ignore nested/ ../prop.expect && + test_propget svn:ignore ./nested ../prop.expect && + test_propget svn:ignore .././deeply/nested ../prop.expect + ' cat >prop.expect <<\EOF Properties on '.': @@ -218,8 +223,11 @@ Properties on 'nested/directory/.keep': EOF test_expect_success 'test proplist' " - git svn proplist . | cmp - prop.expect && - git svn proplist nested/directory/.keep | cmp - prop2.expect + git svn proplist . >actual && + cmp prop.expect actual && + + git svn proplist nested/directory/.keep >actual && + cmp prop2.expect actual " test_done diff --git a/t/t9133-git-svn-nested-git-repo.sh b/t/t9133-git-svn-nested-git-repo.sh index f3c30e63b7..f894860867 100755 --- a/t/t9133-git-svn-nested-git-repo.sh +++ b/t/t9133-git-svn-nested-git-repo.sh @@ -45,7 +45,7 @@ test_expect_success 'update git svn-cloned repo' ' git svn rebase && echo a > expect && echo b >> expect && - test_cmp a expect && + test_cmp expect a && rm expect ) ' @@ -69,7 +69,7 @@ test_expect_success 'update git svn-cloned repo' ' git svn rebase && echo a > expect && echo b >> expect && - test_cmp a expect && + test_cmp expect a && rm expect ) ' @@ -93,7 +93,7 @@ test_expect_success 'update git svn-cloned repo again' ' echo a > expect && echo b >> expect && echo c >> expect && - test_cmp a expect && + test_cmp expect a && rm expect ) ' diff --git a/t/t9300-fast-import.sh b/t/t9300-fast-import.sh index 40fe7e4976..59a13b6a77 100755 --- a/t/t9300-fast-import.sh +++ b/t/t9300-fast-import.sh @@ -1558,7 +1558,7 @@ test_expect_success 'O: blank lines not necessary after other commands' ' INPUT_END git fast-import <input && - test 8 = $(find .git/objects/pack -type f | wc -l) && + test 8 = $(find .git/objects/pack -type f | grep -v multi-pack-index | wc -l) && test $(git rev-parse refs/tags/O3-2nd) = $(git rev-parse O3^) && git log --reverse --pretty=oneline O3 | sed s/^.*z// >actual && test_cmp expect actual diff --git a/t/t9600-cvsimport.sh b/t/t9600-cvsimport.sh index 5dfee07d9a..251fdd66c4 100755 --- a/t/t9600-cvsimport.sh +++ b/t/t9600-cvsimport.sh @@ -148,7 +148,7 @@ test_expect_success PERL 'import from a CVS working tree' ' git cvsimport -a -z0 && echo 1 >expect && git log -1 --pretty=format:%s%n >actual && - test_cmp actual expect + test_cmp expect actual ) ' diff --git a/t/t9603-cvsimport-patchsets.sh b/t/t9603-cvsimport-patchsets.sh index c4c3c49546..3e64b11eac 100755 --- a/t/t9603-cvsimport-patchsets.sh +++ b/t/t9603-cvsimport-patchsets.sh @@ -29,11 +29,11 @@ test_expect_failure PERL 'import with criss cross times on revisions' ' Rev 3 Rev 2 Rev 1" > expect-master && - test_cmp actual-master expect-master && + test_cmp expect-master actual-master && echo "Rev 5 Branch A Wed Mar 11 19:09:10 2009 +0000 Rev 4 Branch A Wed Mar 11 19:03:52 2009 +0000" > expect-A && - test_cmp actual-A expect-A + test_cmp expect-A actual-A ' test_done diff --git a/t/t9604-cvsimport-timestamps.sh b/t/t9604-cvsimport-timestamps.sh index a4b3db24bd..2ff4aa932d 100755 --- a/t/t9604-cvsimport-timestamps.sh +++ b/t/t9604-cvsimport-timestamps.sh @@ -31,7 +31,7 @@ test_expect_success PERL 'check timestamps are UTC (TZ=CST6CDT)' ' Rev 2 2005-02-01 00:00:00 +0000 Rev 1 2005-01-01 00:00:00 +0000 EOF - test_cmp actual-1 expect-1 + test_cmp expect-1 actual-1 ' test_expect_success PERL 'check timestamps with author-specific timezones' ' @@ -65,7 +65,7 @@ test_expect_success PERL 'check timestamps with author-specific timezones' ' Rev 2 2005-01-31 18:00:00 -0600 User Two Rev 1 2005-01-01 00:00:00 +0000 User One EOF - test_cmp actual-2 expect-2 + test_cmp expect-2 actual-2 ' test_done diff --git a/t/t9832-unshelve.sh b/t/t9832-unshelve.sh index 48ec7679b8..41c09f11f4 100755 --- a/t/t9832-unshelve.sh +++ b/t/t9832-unshelve.sh @@ -19,8 +19,10 @@ test_expect_success 'init depot' ' p4 add file1 && p4 submit -d "change 1" && : >file_to_delete && + : >file_to_move && p4 add file_to_delete && - p4 submit -d "file to delete" + p4 add file_to_move && + p4 submit -d "add files to delete" ) ' @@ -36,6 +38,8 @@ test_expect_success 'create shelved changelist' ' echo "new file" >file2 && p4 add file2 && p4 delete file_to_delete && + p4 edit file_to_move && + p4 move file_to_move moved_file && p4 opened && p4 shelve -i <<EOF Change: new @@ -47,6 +51,8 @@ Files: //depot/file1 //depot/file2 //depot/file_to_delete + //depot/file_to_move + //depot/moved_file EOF ) && @@ -54,12 +60,14 @@ EOF cd "$git" && change=$(last_shelved_change) && git p4 unshelve $change && - git show refs/remotes/p4/unshelved/$change | grep -q "Further description" && - git cherry-pick refs/remotes/p4/unshelved/$change && + git show refs/remotes/p4-unshelved/$change | grep -q "Further description" && + git cherry-pick refs/remotes/p4-unshelved/$change && test_path_is_file file2 && test_cmp file1 "$cli"/file1 && test_cmp file2 "$cli"/file2 && - test_path_is_missing file_to_delete + test_path_is_missing file_to_delete && + test_path_is_missing file_to_move && + test_path_is_file moved_file ) ' @@ -88,10 +96,22 @@ EOF cd "$git" && change=$(last_shelved_change) && git p4 unshelve $change && - git diff refs/remotes/p4/unshelved/$change.0 refs/remotes/p4/unshelved/$change | grep -q file3 + git diff refs/remotes/p4-unshelved/$change.0 refs/remotes/p4-unshelved/$change | grep -q file3 ) ' +shelve_one_file () { + description="Change to be unshelved" && + file="$1" && + p4 shelve -i <<EOF +Change: new +Description: + $description +Files: + $file +EOF +} + # This is the tricky case where the shelved changelist base revision doesn't # match git-p4's idea of the base revision # @@ -108,29 +128,52 @@ test_expect_success 'create shelved changelist based on p4 change ahead of p4/ma p4 submit -d "change:foo" && p4 edit file1 && echo "bar" >>file1 && - p4 shelve -i <<EOF && -Change: new -Description: - Change to be unshelved -Files: - //depot/file1 -EOF + shelve_one_file //depot/file1 && change=$(last_shelved_change) && - p4 describe -S $change | grep -q "Change to be unshelved" + p4 describe -S $change >out.txt && + grep -q "Change to be unshelved" out.txt ) ' -# Now try to unshelve it. git-p4 should refuse to do so. +# Now try to unshelve it. test_expect_success 'try to unshelve the change' ' test_when_finished cleanup_git && ( change=$(last_shelved_change) && cd "$git" && - test_must_fail git p4 unshelve $change 2>out.txt && - grep -q "cannot unshelve" out.txt + git p4 unshelve $change >out.txt && + grep -q "unshelved changelist $change" out.txt ) ' +# Specify the origin. Create 2 unrelated files, and check that +# we only get the one in HEAD~, not the one in HEAD. + +test_expect_success 'unshelve specifying the origin' ' + ( + cd "$cli" && + : >unrelated_file0 && + p4 add unrelated_file0 && + p4 submit -d "unrelated" && + : >unrelated_file1 && + p4 add unrelated_file1 && + p4 submit -d "unrelated" && + : >file_to_shelve && + p4 add file_to_shelve && + shelve_one_file //depot/file_to_shelve + ) && + test_when_finished cleanup_git && + git p4 clone --dest="$git" //depot/@all && + ( + cd "$git" && + change=$(last_shelved_change) && + git p4 unshelve --origin HEAD~ $change && + git checkout refs/remotes/p4-unshelved/$change && + test_path_is_file unrelated_file0 && + test_path_is_missing unrelated_file1 && + test_path_is_file file_to_shelve + ) +' test_expect_success 'kill p4d' ' kill_p4d ' diff --git a/t/t9902-completion.sh b/t/t9902-completion.sh index 175f83d704..3c6b185b60 100755 --- a/t/t9902-completion.sh +++ b/t/t9902-completion.sh @@ -1697,7 +1697,8 @@ test_expect_success 'sourcing the completion script clears cached commands' ' verbose test -z "$__git_all_commands" ' -test_expect_success !GETTEXT_POISON 'sourcing the completion script clears cached merge strategies' ' +test_expect_success 'sourcing the completion script clears cached merge strategies' ' + GIT_TEST_GETTEXT_POISON= && __git_compute_merge_strategies && verbose test -n "$__git_merge_strategies" && . "$GIT_BUILD_DIR/contrib/completion/git-completion.bash" && diff --git a/t/test-lib-functions.sh b/t/test-lib-functions.sh index 4207af4077..b4e391526a 100644 --- a/t/test-lib-functions.sh +++ b/t/test-lib-functions.sh @@ -42,6 +42,8 @@ test_decode_color () { function name(n) { if (n == 0) return "RESET"; if (n == 1) return "BOLD"; + if (n == 2) return "FAINT"; + if (n == 3) return "ITALIC"; if (n == 7) return "REVERSE"; if (n == 30) return "BLACK"; if (n == 31) return "RED"; @@ -745,6 +747,29 @@ test_cmp() { $GIT_TEST_CMP "$@" } +# Check that the given config key has the expected value. +# +# test_cmp_config [-C <dir>] <expected-value> +# [<git-config-options>...] <config-key> +# +# for example to check that the value of core.bar is foo +# +# test_cmp_config foo core.bar +# +test_cmp_config() { + local GD && + if test "$1" = "-C" + then + shift && + GD="-C $1" && + shift + fi && + printf "%s\n" "$1" >expect.config && + shift && + git $GD config "$@" >actual.config && + test_cmp expect.config actual.config +} + # test_cmp_bin - helper to compare binary files test_cmp_bin() { @@ -753,16 +778,16 @@ test_cmp_bin() { # Use this instead of test_cmp to compare files that contain expected and # actual output from git commands that can be translated. When running -# under GETTEXT_POISON this pretends that the command produced expected +# under GIT_TEST_GETTEXT_POISON this pretends that the command produced expected # results. test_i18ncmp () { - test -n "$GETTEXT_POISON" || test_cmp "$@" + ! test_have_prereq C_LOCALE_OUTPUT || test_cmp "$@" } # Use this instead of "grep expected-string actual" to see if the # output from a git command that can be translated either contains an # expected string, or does not contain an unwanted one. When running -# under GETTEXT_POISON this pretends that the command produced expected +# under GIT_TEST_GETTEXT_POISON this pretends that the command produced expected # results. test_i18ngrep () { eval "last_arg=\${$#}" @@ -777,7 +802,7 @@ test_i18ngrep () { error "bug in the test script: too few parameters to test_i18ngrep" fi - if test -n "$GETTEXT_POISON" + if test_have_prereq !C_LOCALE_OUTPUT then # pretend success return 0 @@ -898,7 +923,8 @@ test_create_repo () { mkdir -p "$repo" ( cd "$repo" || error "Cannot setup test environment" - "$GIT_EXEC_PATH/git-init" "--template=$GIT_BUILD_DIR/templates/blt/" >&3 2>&4 || + "${GIT_TEST_INSTALLED:-$GIT_EXEC_PATH}/git$X" init \ + "--template=$GIT_BUILD_DIR/templates/blt/" >&3 2>&4 || error "cannot run git init -- have you built things yet?" mv .git/hooks .git/hooks-disabled ) || exit @@ -1155,3 +1181,72 @@ depacketize () { } ' } + +# Set the hash algorithm in use to $1. Only useful when testing the testsuite. +test_set_hash () { + test_hash_algo="$1" +} + +# Detect the hash algorithm in use. +test_detect_hash () { + # Currently we only support SHA-1, but in the future this function will + # actually detect the algorithm in use. + test_hash_algo='sha1' +} + +# Load common hash metadata and common placeholder object IDs for use with +# test_oid. +test_oid_init () { + test -n "$test_hash_algo" || test_detect_hash && + test_oid_cache <"$TEST_DIRECTORY/oid-info/hash-info" && + test_oid_cache <"$TEST_DIRECTORY/oid-info/oid" +} + +# Load key-value pairs from stdin suitable for use with test_oid. Blank lines +# and lines starting with "#" are ignored. Keys must be shell identifier +# characters. +# +# Examples: +# rawsz sha1:20 +# rawsz sha256:32 +test_oid_cache () { + local tag rest k v && + + { test -n "$test_hash_algo" || test_detect_hash; } && + while read tag rest + do + case $tag in + \#*) + continue;; + ?*) + # non-empty + ;; + *) + # blank line + continue;; + esac && + + k="${rest%:*}" && + v="${rest#*:}" && + + if ! expr "$k" : '[a-z0-9][a-z0-9]*$' >/dev/null + then + error 'bug in the test script: bad hash algorithm' + fi && + eval "test_oid_${k}_$tag=\"\$v\"" + done +} + +# Look up a per-hash value based on a key ($1). The value must have been loaded +# by test_oid_init or test_oid_cache. +test_oid () { + local var="test_oid_${test_hash_algo}_$1" && + + # If the variable is unset, we must be missing an entry for this + # key-hash pair, so exit with an error. + if eval "test -z \"\${$var+set}\"" + then + error "bug in the test script: undefined key '$1'" >&2 + fi && + eval "printf '%s' \"\${$var}\"" +} diff --git a/t/test-lib.sh b/t/test-lib.sh index 1373fd4592..6c6c0af7a1 100644 --- a/t/test-lib.sh +++ b/t/test-lib.sh @@ -49,18 +49,28 @@ export ASAN_OPTIONS : ${LSAN_OPTIONS=abort_on_error=1} export LSAN_OPTIONS +if test ! -f "$GIT_BUILD_DIR"/GIT-BUILD-OPTIONS +then + echo >&2 'error: GIT-BUILD-OPTIONS missing (has Git been built?).' + exit 1 +fi +. "$GIT_BUILD_DIR"/GIT-BUILD-OPTIONS +export PERL_PATH SHELL_PATH + ################################################################ # It appears that people try to run tests without building... -"$GIT_BUILD_DIR/git" >/dev/null +"${GIT_TEST_INSTALLED:-$GIT_BUILD_DIR}/git$X" >/dev/null if test $? != 1 then - echo >&2 'error: you do not seem to have built git yet.' + if test -n "$GIT_TEST_INSTALLED" + then + echo >&2 "error: there is no working Git at '$GIT_TEST_INSTALLED'" + else + echo >&2 'error: you do not seem to have built git yet.' + fi exit 1 fi -. "$GIT_BUILD_DIR"/GIT-BUILD-OPTIONS -export PERL_PATH SHELL_PATH - # if --tee was passed, write the output not only to the terminal, but # additionally to the file test-results/$BASENAME.out, too. case "$GIT_TEST_TEE_STARTED, $* " in @@ -95,6 +105,16 @@ PAGER=cat TZ=UTC export LANG LC_ALL PAGER TZ EDITOR=: + +# GIT_TEST_GETTEXT_POISON should not influence git commands executed +# during initialization of test-lib and the test repo. Back it up, +# unset and then restore after initialization is finished. +if test -n "$GIT_TEST_GETTEXT_POISON" +then + GIT_TEST_GETTEXT_POISON_ORIG=$GIT_TEST_GETTEXT_POISON + unset GIT_TEST_GETTEXT_POISON +fi + # A call to "unset" with no arguments causes at least Solaris 10 # /usr/xpg4/bin/sh and /bin/ksh to bail out. So keep the unsets # deriving from the command substitution clustered with the other @@ -134,9 +154,40 @@ export EDITOR GIT_TRACE_BARE=1 export GIT_TRACE_BARE -if test -n "${TEST_GIT_INDEX_VERSION:+isset}" +check_var_migration () { + # the warnings and hints given from this helper depends + # on end-user settings, which will disrupt the self-test + # done on the test framework itself. + case "$GIT_TEST_FRAMEWORK_SELFTEST" in + t) return ;; + esac + + old_name=$1 new_name=$2 + eval "old_isset=\${${old_name}:+isset}" + eval "new_isset=\${${new_name}:+isset}" + + case "$old_isset,$new_isset" in + isset,) + echo >&2 "warning: $old_name is now $new_name" + echo >&2 "hint: set $new_name too during the transition period" + eval "$new_name=\$$old_name" + ;; + isset,isset) + # do this later + # echo >&2 "warning: $old_name is now $new_name" + # echo >&2 "hint: remove $old_name" + ;; + esac +} + +check_var_migration GIT_FSMONITOR_TEST GIT_TEST_FSMONITOR +check_var_migration TEST_GIT_INDEX_VERSION GIT_TEST_INDEX_VERSION +check_var_migration GIT_FORCE_PRELOAD_TEST GIT_TEST_PRELOAD_INDEX + +# Use specific version of the index file format +if test -n "${GIT_TEST_INDEX_VERSION:+isset}" then - GIT_INDEX_VERSION="$TEST_GIT_INDEX_VERSION" + GIT_INDEX_VERSION="$GIT_TEST_INDEX_VERSION" export GIT_INDEX_VERSION fi @@ -926,7 +977,7 @@ elif test -n "$GIT_TEST_INSTALLED" then GIT_EXEC_PATH=$($GIT_TEST_INSTALLED/git --exec-path) || error "Cannot run git from $GIT_TEST_INSTALLED." - PATH=$GIT_TEST_INSTALLED:$GIT_BUILD_DIR:$PATH + PATH=$GIT_TEST_INSTALLED:$GIT_BUILD_DIR/t/helper:$PATH GIT_EXEC_PATH=${GIT_TEST_EXEC_PATH:-$GIT_EXEC_PATH} else # normal case, use ../bin-wrappers only unless $with_dashes: git_bin_dir="$GIT_BUILD_DIR/bin-wrappers" @@ -1073,16 +1124,24 @@ test -n "$USE_LIBPCRE1" && test_set_prereq LIBPCRE1 test -n "$USE_LIBPCRE2" && test_set_prereq LIBPCRE2 test -z "$NO_GETTEXT" && test_set_prereq GETTEXT +if test -n "$GIT_TEST_GETTEXT_POISON_ORIG" +then + GIT_TEST_GETTEXT_POISON=$GIT_TEST_GETTEXT_POISON_ORIG + unset GIT_TEST_GETTEXT_POISON_ORIG +fi + # Can we rely on git's output in the C locale? -if test -n "$GETTEXT_POISON" +if test -z "$GIT_TEST_GETTEXT_POISON" then - GIT_GETTEXT_POISON=YesPlease - export GIT_GETTEXT_POISON - test_set_prereq GETTEXT_POISON -else test_set_prereq C_LOCALE_OUTPUT fi +if test -z "$GIT_TEST_CHECK_CACHE_TREE" +then + GIT_TEST_CHECK_CACHE_TREE=true + export GIT_TEST_CHECK_CACHE_TREE +fi + test_lazy_prereq PIPE ' # test whether the filesystem supports FIFOs test_have_prereq !MINGW,!CYGWIN && @@ -1231,3 +1290,7 @@ test_lazy_prereq CURL ' test_lazy_prereq SHA1 ' test $(git hash-object /dev/null) = e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 ' + +test_lazy_prereq REBASE_P ' + test -z "$GIT_TEST_SKIP_REBASE_P" +' @@ -144,7 +144,7 @@ int parse_tag_buffer(struct repository *r, struct tag *item, const void *data, u return 0; item->object.parsed = 1; - if (size < GIT_SHA1_HEXSZ + 24) + if (size < the_hash_algo->hexsz + 24) return -1; if (memcmp("object ", bufptr, 7) || parse_oid_hex(bufptr + 7, &oid, &bufptr) || *bufptr++ != '\n') return -1; diff --git a/thread-utils.c b/thread-utils.c index a2135e0743..5329845691 100644 --- a/thread-utils.c +++ b/thread-utils.c @@ -20,6 +20,9 @@ int online_cpus(void) { +#ifdef NO_PTHREADS + return 1; +#else #ifdef _SC_NPROCESSORS_ONLN long ncpus; #endif @@ -59,10 +62,12 @@ int online_cpus(void) #endif return 1; +#endif } int init_recursive_mutex(pthread_mutex_t *m) { +#ifndef NO_PTHREADS pthread_mutexattr_t a; int ret; @@ -74,4 +79,47 @@ int init_recursive_mutex(pthread_mutex_t *m) pthread_mutexattr_destroy(&a); } return ret; +#else + return 0; +#endif +} + +#ifdef NO_PTHREADS +int dummy_pthread_create(pthread_t *pthread, const void *attr, + void *(*fn)(void *), void *data) +{ + /* + * Do nothing. + * + * The main purpose of this function is to break compiler's + * flow analysis and avoid -Wunused-variable false warnings. + */ + return ENOSYS; +} + +int dummy_pthread_init(void *data) +{ + /* + * Do nothing. + * + * The main purpose of this function is to break compiler's + * flow analysis or it may realize that functions like + * pthread_mutex_init() is no-op, which means the (static) + * variable is not used/initialized at all and trigger + * -Wunused-variable + */ + return ENOSYS; } + +int dummy_pthread_join(pthread_t pthread, void **retval) +{ + /* + * Do nothing. + * + * The main purpose of this function is to break compiler's + * flow analysis and avoid -Wunused-variable false warnings. + */ + return ENOSYS; +} + +#endif diff --git a/thread-utils.h b/thread-utils.h index d9a769d190..4961487ed9 100644 --- a/thread-utils.h +++ b/thread-utils.h @@ -4,12 +4,54 @@ #ifndef NO_PTHREADS #include <pthread.h> -extern int online_cpus(void); -extern int init_recursive_mutex(pthread_mutex_t*); +#define HAVE_THREADS 1 #else -#define online_cpus() 1 +#define HAVE_THREADS 0 + +/* + * macros instead of typedefs because pthread definitions may have + * been pulled in by some system dependencies even though the user + * wants to disable pthread. + */ +#define pthread_t int +#define pthread_mutex_t int +#define pthread_cond_t int +#define pthread_key_t int + +#define pthread_mutex_init(mutex, attr) dummy_pthread_init(mutex) +#define pthread_mutex_lock(mutex) +#define pthread_mutex_unlock(mutex) +#define pthread_mutex_destroy(mutex) + +#define pthread_cond_init(cond, attr) dummy_pthread_init(cond) +#define pthread_cond_wait(cond, mutex) +#define pthread_cond_signal(cond) +#define pthread_cond_broadcast(cond) +#define pthread_cond_destroy(cond) + +#define pthread_key_create(key, attr) dummy_pthread_init(key) +#define pthread_key_delete(key) + +#define pthread_create(thread, attr, fn, data) \ + dummy_pthread_create(thread, attr, fn, data) +#define pthread_join(thread, retval) \ + dummy_pthread_join(thread, retval) + +#define pthread_setspecific(key, data) +#define pthread_getspecific(key) NULL + +int dummy_pthread_create(pthread_t *pthread, const void *attr, + void *(*fn)(void *), void *data); +int dummy_pthread_join(pthread_t pthread, void **retval); + +int dummy_pthread_init(void *); #endif + +int online_cpus(void); +int init_recursive_mutex(pthread_mutex_t*); + + #endif /* THREAD_COMPAT_H */ @@ -176,10 +176,30 @@ void trace_strbuf_fl(const char *file, int line, struct trace_key *key, strbuf_release(&buf); } +static uint64_t perf_start_times[10]; +static int perf_indent; + +uint64_t trace_performance_enter(void) +{ + uint64_t now; + + if (!trace_want(&trace_perf_key)) + return 0; + + now = getnanotime(); + perf_start_times[perf_indent] = now; + if (perf_indent + 1 < ARRAY_SIZE(perf_start_times)) + perf_indent++; + else + BUG("Too deep indentation"); + return now; +} + static void trace_performance_vprintf_fl(const char *file, int line, uint64_t nanos, const char *format, va_list ap) { + static const char space[] = " "; struct strbuf buf = STRBUF_INIT; if (!prepare_trace_line(file, line, &trace_perf_key, &buf)) @@ -188,7 +208,10 @@ static void trace_performance_vprintf_fl(const char *file, int line, strbuf_addf(&buf, "performance: %.9f s", (double) nanos / 1000000000); if (format && *format) { - strbuf_addstr(&buf, ": "); + if (perf_indent >= strlen(space)) + BUG("Too deep indentation"); + + strbuf_addf(&buf, ":%.*s ", perf_indent, space); strbuf_vaddf(&buf, format, ap); } @@ -244,6 +267,24 @@ void trace_performance_since(uint64_t start, const char *format, ...) va_end(ap); } +void trace_performance_leave(const char *format, ...) +{ + va_list ap; + uint64_t since; + + if (perf_indent) + perf_indent--; + + if (!format) /* Allow callers to leave without tracing anything */ + return; + + since = perf_start_times[perf_indent]; + va_start(ap, format); + trace_performance_vprintf_fl(NULL, 0, getnanotime() - since, + format, ap); + va_end(ap); +} + #else void trace_printf_key_fl(const char *file, int line, struct trace_key *key, @@ -273,6 +314,24 @@ void trace_performance_fl(const char *file, int line, uint64_t nanos, va_end(ap); } +void trace_performance_leave_fl(const char *file, int line, + uint64_t nanos, const char *format, ...) +{ + va_list ap; + uint64_t since; + + if (perf_indent) + perf_indent--; + + if (!format) /* Allow callers to leave without tracing anything */ + return; + + since = perf_start_times[perf_indent]; + va_start(ap, format); + trace_performance_vprintf_fl(file, line, nanos - since, format, ap); + va_end(ap); +} + #endif /* HAVE_VARIADIC_MACROS */ @@ -411,13 +470,11 @@ uint64_t getnanotime(void) } } -static uint64_t command_start_time; static struct strbuf command_line = STRBUF_INIT; static void print_command_performance_atexit(void) { - trace_performance_since(command_start_time, "git command:%s", - command_line.buf); + trace_performance_leave("git command:%s", command_line.buf); } void trace_command_performance(const char **argv) @@ -425,10 +482,10 @@ void trace_command_performance(const char **argv) if (!trace_want(&trace_perf_key)) return; - if (!command_start_time) + if (!command_line.len) atexit(print_command_performance_atexit); strbuf_reset(&command_line); sq_quote_argv_pretty(&command_line, argv); - command_start_time = getnanotime(); + trace_performance_enter(); } @@ -23,6 +23,7 @@ extern void trace_disable(struct trace_key *key); extern uint64_t getnanotime(void); extern void trace_command_performance(const char **argv); extern void trace_verbatim(struct trace_key *key, const void *buf, unsigned len); +uint64_t trace_performance_enter(void); #ifndef HAVE_VARIADIC_MACROS @@ -45,6 +46,9 @@ extern void trace_performance(uint64_t nanos, const char *format, ...); __attribute__((format (printf, 2, 3))) extern void trace_performance_since(uint64_t start, const char *format, ...); +__attribute__((format (printf, 1, 2))) +void trace_performance_leave(const char *format, ...); + #else /* @@ -118,6 +122,14 @@ extern void trace_performance_since(uint64_t start, const char *format, ...); __VA_ARGS__); \ } while (0) +#define trace_performance_leave(...) \ + do { \ + if (trace_pass_fl(&trace_perf_key)) \ + trace_performance_leave_fl(TRACE_CONTEXT, __LINE__, \ + getnanotime(), \ + __VA_ARGS__); \ + } while (0) + /* backend functions, use non-*fl macros instead */ __attribute__((format (printf, 4, 5))) extern void trace_printf_key_fl(const char *file, int line, struct trace_key *key, @@ -130,6 +142,9 @@ extern void trace_strbuf_fl(const char *file, int line, struct trace_key *key, __attribute__((format (printf, 4, 5))) extern void trace_performance_fl(const char *file, int line, uint64_t nanos, const char *fmt, ...); +__attribute__((format (printf, 4, 5))) +extern void trace_performance_leave_fl(const char *file, int line, + uint64_t nanos, const char *fmt, ...); static inline int trace_pass_fl(struct trace_key *key) { return key->fd || !key->initialized; diff --git a/transport-helper.c b/transport-helper.c index 143ca008c8..7213fa0d32 100644 --- a/transport-helper.c +++ b/transport-helper.c @@ -1105,6 +1105,7 @@ static struct ref *get_refs_list(struct transport *transport, int for_push, } static struct transport_vtable vtable = { + 0, set_helper_option, get_refs_list, fetch, diff --git a/transport-internal.h b/transport-internal.h index 1cde6258a7..004bee5e36 100644 --- a/transport-internal.h +++ b/transport-internal.h @@ -7,6 +7,12 @@ struct argv_array; struct transport_vtable { /** + * This transport supports the fetch() function being called + * without get_refs_list() first being called. + */ + unsigned fetch_without_list : 1; + + /** * Returns 0 if successful, positive if the option is not * recognized or is inapplicable, and negative if the option * is applicable but the value is invalid. diff --git a/transport.c b/transport.c index 06ffea2774..5a74b609ff 100644 --- a/transport.c +++ b/transport.c @@ -252,8 +252,18 @@ static int connect_setup(struct transport *transport, int for_push) return 0; } -static struct ref *get_refs_via_connect(struct transport *transport, int for_push, - const struct argv_array *ref_prefixes) +/* + * Obtains the protocol version from the transport and writes it to + * transport->data->version, first connecting if not already connected. + * + * If the protocol version is one that allows skipping the listing of remote + * refs, and must_list_refs is 0, the listing of remote refs is skipped and + * this function returns NULL. Otherwise, this function returns the list of + * remote refs. + */ +static struct ref *handshake(struct transport *transport, int for_push, + const struct argv_array *ref_prefixes, + int must_list_refs) { struct git_transport_data *data = transport->data; struct ref *refs = NULL; @@ -268,8 +278,10 @@ static struct ref *get_refs_via_connect(struct transport *transport, int for_pus data->version = discover_version(&reader); switch (data->version) { case protocol_v2: - get_remote_refs(data->fd[1], &reader, &refs, for_push, - ref_prefixes, transport->server_options); + if (must_list_refs) + get_remote_refs(data->fd[1], &reader, &refs, for_push, + ref_prefixes, + transport->server_options); break; case protocol_v1: case protocol_v0: @@ -283,9 +295,18 @@ static struct ref *get_refs_via_connect(struct transport *transport, int for_pus } data->got_remote_heads = 1; + if (reader.line_peeked) + BUG("buffer must be empty at the end of handshake()"); + return refs; } +static struct ref *get_refs_via_connect(struct transport *transport, int for_push, + const struct argv_array *ref_prefixes) +{ + return handshake(transport, for_push, ref_prefixes, 1); +} + static int fetch_refs_via_pack(struct transport *transport, int nr_heads, struct ref **to_fetch) { @@ -320,8 +341,17 @@ static int fetch_refs_via_pack(struct transport *transport, args.server_options = transport->server_options; args.negotiation_tips = data->options.negotiation_tips; - if (!data->got_remote_heads) - refs_tmp = get_refs_via_connect(transport, 0, NULL); + if (!data->got_remote_heads) { + int i; + int must_list_refs = 0; + for (i = 0; i < nr_heads; i++) { + if (!to_fetch[i]->exact_oid) { + must_list_refs = 1; + break; + } + } + refs_tmp = handshake(transport, 0, NULL, must_list_refs); + } switch (data->version) { case protocol_v2: @@ -703,6 +733,7 @@ static int disconnect_git(struct transport *transport) } static struct transport_vtable taken_over_vtable = { + 1, NULL, get_refs_via_connect, fetch_refs_via_pack, @@ -852,6 +883,7 @@ void transport_check_allowed(const char *type) } static struct transport_vtable bundle_vtable = { + 0, NULL, get_refs_from_bundle, fetch_refs_from_bundle, @@ -861,6 +893,7 @@ static struct transport_vtable bundle_vtable = { }; static struct transport_vtable builtin_smart_vtable = { + 1, NULL, get_refs_via_connect, fetch_refs_via_pack, @@ -1139,7 +1172,8 @@ int transport_push(struct transport *transport, oid_array_append(&commits, &ref->new_oid); - if (!push_unpushed_submodules(&commits, + if (!push_unpushed_submodules(the_repository, + &commits, transport->remote, rs, transport->push_options, @@ -1163,8 +1197,10 @@ int transport_push(struct transport *transport, oid_array_append(&commits, &ref->new_oid); - if (find_unpushed_submodules(&commits, transport->remote->name, - &needs_pushing)) { + if (find_unpushed_submodules(the_repository, + &commits, + transport->remote->name, + &needs_pushing)) { oid_array_clear(&commits); die_with_unpushed_submodules(&needs_pushing); } @@ -1224,11 +1260,20 @@ int transport_fetch_refs(struct transport *transport, struct ref *refs) struct ref **heads = NULL; struct ref *rm; + if (!transport->vtable->fetch_without_list) + /* + * Some transports (e.g. the built-in bundle transport and the + * transport helper interface) do not work when fetching is + * done immediately after transport creation. List the remote + * refs anyway (if not already listed) as a workaround. + */ + transport_get_remote_refs(transport, NULL); + for (rm = refs; rm; rm = rm->next) { nr_refs++; if (rm->peer_ref && !is_null_oid(&rm->old_oid) && - !oidcmp(&rm->peer_ref->old_oid, &rm->old_oid)) + oideq(&rm->peer_ref->old_oid, &rm->old_oid)) continue; ALLOC_GROW(heads, nr_heads + 1, nr_alloc); heads[nr_heads++] = rm; @@ -1325,6 +1370,33 @@ literal_copy: return xstrdup(url); } +static void fill_alternate_refs_command(struct child_process *cmd, + const char *repo_path) +{ + const char *value; + + if (!git_config_get_value("core.alternateRefsCommand", &value)) { + cmd->use_shell = 1; + + argv_array_push(&cmd->args, value); + argv_array_push(&cmd->args, repo_path); + } else { + cmd->git_cmd = 1; + + argv_array_pushf(&cmd->args, "--git-dir=%s", repo_path); + argv_array_push(&cmd->args, "for-each-ref"); + argv_array_push(&cmd->args, "--format=%(objectname)"); + + if (!git_config_get_value("core.alternateRefsPrefixes", &value)) { + argv_array_push(&cmd->args, "--"); + argv_array_split(&cmd->args, value); + } + } + + cmd->env = local_repo_env; + cmd->out = -1; +} + static void read_alternate_refs(const char *path, alternate_ref_fn *cb, void *data) @@ -1333,12 +1405,7 @@ static void read_alternate_refs(const char *path, struct strbuf line = STRBUF_INIT; FILE *fh; - cmd.git_cmd = 1; - argv_array_pushf(&cmd.args, "--git-dir=%s", path); - argv_array_push(&cmd.args, "for-each-ref"); - argv_array_push(&cmd.args, "--format=%(objectname) %(refname)"); - cmd.env = local_repo_env; - cmd.out = -1; + fill_alternate_refs_command(&cmd, path); if (start_command(&cmd)) return; @@ -1346,15 +1413,15 @@ static void read_alternate_refs(const char *path, fh = xfdopen(cmd.out, "r"); while (strbuf_getline_lf(&line, fh) != EOF) { struct object_id oid; + const char *p; - if (get_oid_hex(line.buf, &oid) || - line.buf[GIT_SHA1_HEXSZ] != ' ') { + if (parse_oid_hex(line.buf, &oid, &p) || *p) { warning(_("invalid line while parsing alternate refs: %s"), line.buf); break; } - cb(line.buf + GIT_SHA1_HEXSZ + 1, &oid, data); + cb(&oid, data); } fclose(fh); diff --git a/transport.h b/transport.h index 01e717c29e..9baeca2d7a 100644 --- a/transport.h +++ b/transport.h @@ -261,6 +261,6 @@ int transport_refs_pushed(struct ref *ref); void transport_print_push_status(const char *dest, struct ref *refs, int verbose, int porcelain, unsigned int *reject_reasons); -typedef void alternate_ref_fn(const char *refname, const struct object_id *oid, void *); +typedef void alternate_ref_fn(const struct object_id *oid, void *); extern void for_each_alternate_ref(alternate_ref_fn, void *); #endif diff --git a/tree-diff.c b/tree-diff.c index 553bc0e63a..0e54324610 100644 --- a/tree-diff.c +++ b/tree-diff.c @@ -491,7 +491,7 @@ static struct combine_diff_path *ll_diff_tree_paths( continue; /* diff(t,pi) != ø */ - if (oidcmp(t.entry.oid, tp[i].entry.oid) || + if (!oideq(t.entry.oid, tp[i].entry.oid) || (t.entry.mode != tp[i].entry.mode)) continue; @@ -605,7 +605,7 @@ static void try_to_follow_renames(const struct object_id *old_oid, choice = q->queue[0]; q->nr = 0; - diff_setup(&diff_opts); + repo_diff_setup(opt->repo, &diff_opts); diff_opts.flags.recursive = 1; diff_opts.flags.find_copies_harder = 1; diff_opts.output_format = DIFF_FORMAT_NO_OUTPUT; diff --git a/tree-walk.c b/tree-walk.c index 77b37f36fa..79bafbd1a2 100644 --- a/tree-walk.c +++ b/tree-walk.c @@ -1107,7 +1107,7 @@ enum interesting tree_entry_interesting(const struct name_entry *entry, * 5 | file | 1 | 1 | 0 * 6 | file | 1 | 2 | 0 * 7 | file | 2 | -1 | 2 - * 8 | file | 2 | 0 | 2 + * 8 | file | 2 | 0 | 1 * 9 | file | 2 | 1 | 0 * 10 | file | 2 | 2 | -1 * -----+-------+----------+----------+------- @@ -1118,7 +1118,7 @@ enum interesting tree_entry_interesting(const struct name_entry *entry, * 15 | dir | 1 | 1 | 1 (*) * 16 | dir | 1 | 2 | 0 * 17 | dir | 2 | -1 | 2 - * 18 | dir | 2 | 0 | 2 + * 18 | dir | 2 | 0 | 1 * 19 | dir | 2 | 1 | 1 (*) * 20 | dir | 2 | 2 | -1 * @@ -1134,7 +1134,12 @@ enum interesting tree_entry_interesting(const struct name_entry *entry, negative = do_match(entry, base, base_offset, ps, 1); - /* #3, #4, #7, #8, #13, #14, #17, #18 */ + /* #8, #18 */ + if (positive == all_entries_interesting && + negative == entry_not_interesting) + return entry_interesting; + + /* #3, #4, #7, #13, #14, #17 */ if (negative <= entry_not_interesting) return positive; diff --git a/unpack-trees.c b/unpack-trees.c index 82a83dbc67..7570df481b 100644 --- a/unpack-trees.c +++ b/unpack-trees.c @@ -336,20 +336,64 @@ static struct progress *get_progress(struct unpack_trees_options *o) return start_delayed_progress(_("Checking out files"), total); } +static void setup_collided_checkout_detection(struct checkout *state, + struct index_state *index) +{ + int i; + + state->clone = 1; + for (i = 0; i < index->cache_nr; i++) + index->cache[i]->ce_flags &= ~CE_MATCHED; +} + +static void report_collided_checkout(struct index_state *index) +{ + struct string_list list = STRING_LIST_INIT_NODUP; + int i; + + for (i = 0; i < index->cache_nr; i++) { + struct cache_entry *ce = index->cache[i]; + + if (!(ce->ce_flags & CE_MATCHED)) + continue; + + string_list_append(&list, ce->name); + ce->ce_flags &= ~CE_MATCHED; + } + + list.cmp = fspathcmp; + string_list_sort(&list); + + if (list.nr) { + warning(_("the following paths have collided (e.g. case-sensitive paths\n" + "on a case-insensitive filesystem) and only one from the same\n" + "colliding group is in the working tree:\n")); + + for (i = 0; i < list.nr; i++) + fprintf(stderr, " '%s'\n", list.items[i].string); + } + + string_list_clear(&list, 0); +} + static int check_updates(struct unpack_trees_options *o) { unsigned cnt = 0; int errs = 0; - struct progress *progress = NULL; + struct progress *progress; struct index_state *index = &o->result; struct checkout state = CHECKOUT_INIT; int i; + trace_performance_enter(); state.force = 1; state.quiet = 1; state.refresh_cache = 1; state.istate = index; + if (o->clone) + setup_collided_checkout_detection(&state, index); + progress = get_progress(o); if (o->update) @@ -414,6 +458,11 @@ static int check_updates(struct unpack_trees_options *o) errs |= finish_delayed_checkout(&state); if (o->update) git_attr_set_direction(GIT_ATTR_CHECKIN); + + if (o->clone) + report_collided_checkout(index); + + trace_performance_leave("check_updates"); return errs != 0; } @@ -630,7 +679,114 @@ static int switch_cache_bottom(struct traverse_info *info) static inline int are_same_oid(struct name_entry *name_j, struct name_entry *name_k) { - return name_j->oid && name_k->oid && !oidcmp(name_j->oid, name_k->oid); + return name_j->oid && name_k->oid && oideq(name_j->oid, name_k->oid); +} + +static int all_trees_same_as_cache_tree(int n, unsigned long dirmask, + struct name_entry *names, + struct traverse_info *info) +{ + struct unpack_trees_options *o = info->data; + int i; + + if (!o->merge || dirmask != ((1 << n) - 1)) + return 0; + + for (i = 1; i < n; i++) + if (!are_same_oid(names, names + i)) + return 0; + + return cache_tree_matches_traversal(o->src_index->cache_tree, names, info); +} + +static int index_pos_by_traverse_info(struct name_entry *names, + struct traverse_info *info) +{ + struct unpack_trees_options *o = info->data; + int len = traverse_path_len(info, names); + char *name = xmalloc(len + 1 /* slash */ + 1 /* NUL */); + int pos; + + make_traverse_path(name, info, names); + name[len++] = '/'; + name[len] = '\0'; + pos = index_name_pos(o->src_index, name, len); + if (pos >= 0) + BUG("This is a directory and should not exist in index"); + pos = -pos - 1; + if (!starts_with(o->src_index->cache[pos]->name, name) || + (pos > 0 && starts_with(o->src_index->cache[pos-1]->name, name))) + BUG("pos must point at the first entry in this directory"); + free(name); + return pos; +} + +/* + * Fast path if we detect that all trees are the same as cache-tree at this + * path. We'll walk these trees in an iterative loop using cache-tree/index + * instead of ODB since we already know what these trees contain. + */ +static int traverse_by_cache_tree(int pos, int nr_entries, int nr_names, + struct name_entry *names, + struct traverse_info *info) +{ + struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, }; + struct unpack_trees_options *o = info->data; + struct cache_entry *tree_ce = NULL; + int ce_len = 0; + int i, d; + + if (!o->merge) + BUG("We need cache-tree to do this optimization"); + + /* + * Do what unpack_callback() and unpack_nondirectories() normally + * do. But we walk all paths in an iterative loop instead. + * + * D/F conflicts and higher stage entries are not a concern + * because cache-tree would be invalidated and we would never + * get here in the first place. + */ + for (i = 0; i < nr_entries; i++) { + int new_ce_len, len, rc; + + src[0] = o->src_index->cache[pos + i]; + + len = ce_namelen(src[0]); + new_ce_len = cache_entry_size(len); + + if (new_ce_len > ce_len) { + new_ce_len <<= 1; + tree_ce = xrealloc(tree_ce, new_ce_len); + memset(tree_ce, 0, new_ce_len); + ce_len = new_ce_len; + + tree_ce->ce_flags = create_ce_flags(0); + + for (d = 1; d <= nr_names; d++) + src[d] = tree_ce; + } + + tree_ce->ce_mode = src[0]->ce_mode; + tree_ce->ce_namelen = len; + oidcpy(&tree_ce->oid, &src[0]->oid); + memcpy(tree_ce->name, src[0]->name, len + 1); + + rc = call_unpack_fn((const struct cache_entry * const *)src, o); + if (rc < 0) { + free(tree_ce); + return rc; + } + + mark_ce_used(src[0], o); + } + free(tree_ce); + if (o->debug_unpack) + printf("Unpacked %d entries from %s to %s using cache-tree\n", + nr_entries, + o->src_index->cache[pos]->name, + o->src_index->cache[pos + nr_entries - 1]->name); + return 0; } static int traverse_trees_recursive(int n, unsigned long dirmask, @@ -644,6 +800,27 @@ static int traverse_trees_recursive(int n, unsigned long dirmask, void *buf[MAX_UNPACK_TREES]; struct traverse_info newinfo; struct name_entry *p; + int nr_entries; + + nr_entries = all_trees_same_as_cache_tree(n, dirmask, names, info); + if (nr_entries > 0) { + struct unpack_trees_options *o = info->data; + int pos = index_pos_by_traverse_info(names, info); + + if (!o->merge || df_conflicts) + BUG("Wrong condition to get here buddy"); + + /* + * All entries up to 'pos' must have been processed + * (i.e. marked CE_UNPACKED) at this point. But to be safe, + * save and restore cache_bottom anyway to not miss + * unprocessed entries before 'pos'. + */ + bottom = o->cache_bottom; + ret = traverse_by_cache_tree(pos, nr_entries, n, names, info); + o->cache_bottom = bottom; + return ret; + } p = names; while (!p->mode) @@ -810,6 +987,11 @@ static struct cache_entry *create_ce_entry(const struct traverse_info *info, return ce; } +/* + * Note that traverse_by_cache_tree() duplicates some logic in this function + * without actually calling it. If you change the logic here you may need to + * check and change there as well. + */ static int unpack_nondirectories(int n, unsigned long mask, unsigned long dirmask, struct cache_entry **src, @@ -1002,6 +1184,11 @@ static void debug_unpack_callback(int n, debug_name_entry(i, names + i); } +/* + * Note that traverse_by_cache_tree() duplicates some logic in this function + * without actually calling it. If you change the logic here you may need to + * check and change there as well. + */ static int unpack_callback(int n, unsigned long mask, unsigned long dirmask, struct name_entry *names, struct traverse_info *info) { struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, }; @@ -1289,6 +1476,7 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options if (len > MAX_UNPACK_TREES) die("unpack_trees takes at most %d trees", MAX_UNPACK_TREES); + trace_performance_enter(); memset(&el, 0, sizeof(el)); if (!core_apply_sparse_checkout || !o->update) o->skip_sparse_checkout = 1; @@ -1361,7 +1549,10 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options } } - if (traverse_trees(len, t, &info) < 0) + trace_performance_enter(); + ret = traverse_trees(len, t, &info); + trace_performance_leave("traverse_trees"); + if (ret < 0) goto return_failed; } @@ -1436,7 +1627,10 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options ret = check_updates(o) ? (-2) : 0; if (o->dst_index) { + move_index_extensions(&o->result, o->src_index); if (!ret) { + if (git_env_bool("GIT_TEST_CHECK_CACHE_TREE", 0)) + cache_tree_verify(&o->result); if (!o->result.cache_tree) o->result.cache_tree = cache_tree(); if (!cache_tree_fully_valid(o->result.cache_tree)) @@ -1444,7 +1638,6 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options WRITE_TREE_SILENT | WRITE_TREE_REPAIR); } - move_index_extensions(&o->result, o->src_index); discard_index(o->dst_index); *o->dst_index = o->result; } else { @@ -1453,6 +1646,7 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options o->src_index = NULL; done: + trace_performance_leave("unpack_trees"); clear_exclude_list(&el); return ret; @@ -1484,7 +1678,7 @@ static int same(const struct cache_entry *a, const struct cache_entry *b) if ((a->ce_flags | b->ce_flags) & CE_CONFLICTED) return 0; return a->ce_mode == b->ce_mode && - !oidcmp(&a->oid, &b->oid); + oideq(&a->oid, &b->oid); } @@ -1616,7 +1810,7 @@ static int verify_clean_subdirectory(const struct cache_entry *ce, * If we are not going to update the submodule, then * we don't care. */ - if (!sub_head && !oidcmp(&oid, &ce->oid)) + if (!sub_head && oideq(&oid, &ce->oid)) return 0; return verify_clean_submodule(sub_head ? NULL : oid_to_hex(&oid), ce, error_type, o); @@ -1644,6 +1838,7 @@ static int verify_clean_subdirectory(const struct cache_entry *ce, if (verify_uptodate(ce2, o)) return -1; add_entry(o, ce2, CE_REMOVE, 0); + invalidate_ce_path(ce, o); mark_ce_used(ce2, o); } cnt++; @@ -1903,6 +2098,8 @@ static int keep_entry(const struct cache_entry *ce, struct unpack_trees_options *o) { add_entry(o, ce, 0, 0); + if (ce_stage(ce)) + invalidate_ce_path(ce, o); return 1; } diff --git a/unpack-trees.h b/unpack-trees.h index 847f217dba..0135080a7b 100644 --- a/unpack-trees.h +++ b/unpack-trees.h @@ -45,6 +45,7 @@ struct unpack_trees_options { unsigned int reset, merge, update, + clone, index_only, nontrivial_merge, trivial_merges_only, diff --git a/upload-pack.c b/upload-pack.c index 9d4d6ad82f..5e81f1ff24 100644 --- a/upload-pack.c +++ b/upload-pack.c @@ -25,19 +25,22 @@ #include "upload-pack.h" #include "serve.h" #include "commit-graph.h" +#include "commit-reach.h" /* Remember to update object flag allocation in object.h */ #define THEY_HAVE (1u << 11) #define OUR_REF (1u << 12) #define WANTED (1u << 13) #define COMMON_KNOWN (1u << 14) -#define REACHABLE (1u << 15) #define SHALLOW (1u << 16) #define NOT_SHALLOW (1u << 17) #define CLIENT_SHALLOW (1u << 18) #define HIDDEN_REF (1u << 19) +#define ALL_FLAGS (THEY_HAVE | OUR_REF | WANTED | COMMON_KNOWN | SHALLOW | \ + NOT_SHALLOW | CLIENT_SHALLOW | HIDDEN_REF) + static timestamp_t oldest_have; static int deepen_relative; @@ -53,8 +56,6 @@ static int no_progress, daemon_mode; #define ALLOW_ANY_SHA1 07 static unsigned int allow_unadvertised_object_request; static int shallow_nr; -static struct object_array have_obj; -static struct object_array want_obj; static struct object_array extra_edge_obj; static unsigned int timeout; static int keepalive = 5; @@ -100,7 +101,8 @@ static int write_one_shallow(const struct commit_graft *graft, void *cb_data) return 0; } -static void create_pack_file(void) +static void create_pack_file(const struct object_array *have_obj, + const struct object_array *want_obj) { struct child_process pack_objects = CHILD_PROCESS_INIT; char data[8193], progress[128]; @@ -161,13 +163,13 @@ static void create_pack_file(void) if (shallow_nr) for_each_commit_graft(write_one_shallow, pipe_fd); - for (i = 0; i < want_obj.nr; i++) + for (i = 0; i < want_obj->nr; i++) fprintf(pipe_fd, "%s\n", - oid_to_hex(&want_obj.objects[i].item->oid)); + oid_to_hex(&want_obj->objects[i].item->oid)); fprintf(pipe_fd, "--not\n"); - for (i = 0; i < have_obj.nr; i++) + for (i = 0; i < have_obj->nr; i++) fprintf(pipe_fd, "%s\n", - oid_to_hex(&have_obj.objects[i].item->oid)); + oid_to_hex(&have_obj->objects[i].item->oid)); for (i = 0; i < extra_edge_obj.nr; i++) fprintf(pipe_fd, "%s\n", oid_to_hex(&extra_edge_obj.objects[i].item->oid)); @@ -304,7 +306,8 @@ static void create_pack_file(void) die("git upload-pack: %s", abort_msg); } -static int got_oid(const char *hex, struct object_id *oid) +static int got_oid(const char *hex, struct object_id *oid, + struct object_array *have_obj) { struct object *o; int we_knew_they_have = 0; @@ -332,73 +335,27 @@ static int got_oid(const char *hex, struct object_id *oid) parents->item->object.flags |= THEY_HAVE; } if (!we_knew_they_have) { - add_object_array(o, NULL, &have_obj); + add_object_array(o, NULL, have_obj); return 1; } return 0; } -static int reachable(struct commit *want) -{ - struct prio_queue work = { compare_commits_by_commit_date }; - - prio_queue_put(&work, want); - while (work.nr) { - struct commit_list *list; - struct commit *commit = prio_queue_get(&work); - - if (commit->object.flags & THEY_HAVE) { - want->object.flags |= COMMON_KNOWN; - break; - } - if (!commit->object.parsed) - parse_object(the_repository, &commit->object.oid); - if (commit->object.flags & REACHABLE) - continue; - commit->object.flags |= REACHABLE; - if (commit->date < oldest_have) - continue; - for (list = commit->parents; list; list = list->next) { - struct commit *parent = list->item; - if (!(parent->object.flags & REACHABLE)) - prio_queue_put(&work, parent); - } - } - want->object.flags |= REACHABLE; - clear_commit_marks(want, REACHABLE); - clear_prio_queue(&work); - return (want->object.flags & COMMON_KNOWN); -} - -static int ok_to_give_up(void) +static int ok_to_give_up(const struct object_array *have_obj, + struct object_array *want_obj) { - int i; + uint32_t min_generation = GENERATION_NUMBER_ZERO; - if (!have_obj.nr) + if (!have_obj->nr) return 0; - for (i = 0; i < want_obj.nr; i++) { - struct object *want = want_obj.objects[i].item; - - if (want->flags & COMMON_KNOWN) - continue; - want = deref_tag(the_repository, want, "a want line", 0); - if (!want || want->type != OBJ_COMMIT) { - /* no way to tell if this is reachable by - * looking at the ancestry chain alone, so - * leave a note to ourselves not to worry about - * this object anymore. - */ - want_obj.objects[i].item->flags |= COMMON_KNOWN; - continue; - } - if (!reachable((struct commit *)want)) - return 0; - } - return 1; + return can_all_from_reach_with_flag(want_obj, THEY_HAVE, + COMMON_KNOWN, oldest_have, + min_generation); } -static int get_common_commits(void) +static int get_common_commits(struct object_array *have_obj, + struct object_array *want_obj) { struct object_id oid; char last_hex[GIT_MAX_HEXSZ + 1]; @@ -416,11 +373,11 @@ static int get_common_commits(void) if (!line) { if (multi_ack == 2 && got_common - && !got_other && ok_to_give_up()) { + && !got_other && ok_to_give_up(have_obj, want_obj)) { sent_ready = 1; packet_write_fmt(1, "ACK %s ready\n", last_hex); } - if (have_obj.nr == 0 || multi_ack) + if (have_obj->nr == 0 || multi_ack) packet_write_fmt(1, "NAK\n"); if (no_done && sent_ready) { @@ -434,10 +391,10 @@ static int get_common_commits(void) continue; } if (skip_prefix(line, "have ", &arg)) { - switch (got_oid(arg, &oid)) { + switch (got_oid(arg, &oid, have_obj)) { case -1: /* they have what we do not */ got_other = 1; - if (multi_ack && ok_to_give_up()) { + if (multi_ack && ok_to_give_up(have_obj, want_obj)) { const char *hex = oid_to_hex(&oid); if (multi_ack == 2) { sent_ready = 1; @@ -453,14 +410,14 @@ static int get_common_commits(void) packet_write_fmt(1, "ACK %s common\n", last_hex); else if (multi_ack) packet_write_fmt(1, "ACK %s continue\n", last_hex); - else if (have_obj.nr == 1) + else if (have_obj->nr == 1) packet_write_fmt(1, "ACK %s\n", last_hex); break; } continue; } if (!strcmp(line, "done")) { - if (have_obj.nr > 0) { + if (have_obj->nr > 0) { if (multi_ack) packet_write_fmt(1, "ACK %s\n", last_hex); return 0; @@ -492,6 +449,7 @@ static int do_reachable_revlist(struct child_process *cmd, struct object *o; char namebuf[GIT_MAX_HEXSZ + 2]; /* ^ + hash + LF */ int i; + const unsigned hexsz = the_hash_algo->hexsz; cmd->argv = argv; cmd->git_cmd = 1; @@ -510,7 +468,7 @@ static int do_reachable_revlist(struct child_process *cmd, goto error; namebuf[0] = '^'; - namebuf[GIT_SHA1_HEXSZ + 1] = '\n'; + namebuf[hexsz + 1] = '\n'; for (i = get_max_object_index(); 0 < i; ) { o = get_indexed_object(--i); if (!o) @@ -519,11 +477,11 @@ static int do_reachable_revlist(struct child_process *cmd, o->flags &= ~TMP_MARK; if (!is_our_ref(o)) continue; - memcpy(namebuf + 1, oid_to_hex(&o->oid), GIT_SHA1_HEXSZ); - if (write_in_full(cmd->in, namebuf, GIT_SHA1_HEXSZ + 2) < 0) + memcpy(namebuf + 1, oid_to_hex(&o->oid), hexsz); + if (write_in_full(cmd->in, namebuf, hexsz + 2) < 0) goto error; } - namebuf[GIT_SHA1_HEXSZ] = '\n'; + namebuf[hexsz] = '\n'; for (i = 0; i < src->nr; i++) { o = src->objects[i].item; if (is_our_ref(o)) { @@ -533,8 +491,8 @@ static int do_reachable_revlist(struct child_process *cmd, } if (reachable && o->type == OBJ_COMMIT) o->flags |= TMP_MARK; - memcpy(namebuf, oid_to_hex(&o->oid), GIT_SHA1_HEXSZ); - if (write_in_full(cmd->in, namebuf, GIT_SHA1_HEXSZ + 1) < 0) + memcpy(namebuf, oid_to_hex(&o->oid), hexsz); + if (write_in_full(cmd->in, namebuf, hexsz + 1) < 0) goto error; } close(cmd->in); @@ -630,7 +588,7 @@ error: return 1; } -static void check_non_tip(void) +static void check_non_tip(struct object_array *want_obj) { int i; @@ -641,14 +599,14 @@ static void check_non_tip(void) */ if (!stateless_rpc && !(allow_unadvertised_object_request & ALLOW_REACHABLE_SHA1)) goto error; - if (!has_unreachable(&want_obj)) + if (!has_unreachable(want_obj)) /* All the non-tip ones are ancestors of what we advertised */ return; error: /* Pick one of them (we know there at least is one) */ - for (i = 0; i < want_obj.nr; i++) { - struct object *o = want_obj.objects[i].item; + for (i = 0; i < want_obj->nr; i++) { + struct object *o = want_obj->objects[i].item; if (!is_our_ref(o)) die("git upload-pack: not our ref %s", oid_to_hex(&o->oid)); @@ -669,7 +627,8 @@ static void send_shallow(struct commit_list *result) } } -static void send_unshallow(const struct object_array *shallows) +static void send_unshallow(const struct object_array *shallows, + struct object_array *want_obj) { int i; @@ -693,7 +652,7 @@ static void send_unshallow(const struct object_array *shallows) parents = ((struct commit *)object)->parents; while (parents) { add_object_array(&parents->item->object, - NULL, &want_obj); + NULL, want_obj); parents = parents->next; } add_object_array(object, NULL, &extra_edge_obj); @@ -704,7 +663,7 @@ static void send_unshallow(const struct object_array *shallows) } static void deepen(int depth, int deepen_relative, - struct object_array *shallows) + struct object_array *shallows, struct object_array *want_obj) { if (depth == INFINITE_DEPTH && !is_repository_shallow(the_repository)) { int i; @@ -727,17 +686,18 @@ static void deepen(int depth, int deepen_relative, } else { struct commit_list *result; - result = get_shallow_commits(&want_obj, depth, + result = get_shallow_commits(want_obj, depth, SHALLOW, NOT_SHALLOW); send_shallow(result); free_commit_list(result); } - send_unshallow(shallows); + send_unshallow(shallows, want_obj); } static void deepen_by_rev_list(int ac, const char **av, - struct object_array *shallows) + struct object_array *shallows, + struct object_array *want_obj) { struct commit_list *result; @@ -745,21 +705,22 @@ static void deepen_by_rev_list(int ac, const char **av, result = get_shallow_commits_by_rev_list(ac, av, SHALLOW, NOT_SHALLOW); send_shallow(result); free_commit_list(result); - send_unshallow(shallows); + send_unshallow(shallows, want_obj); } /* Returns 1 if a shallow list is sent or 0 otherwise */ static int send_shallow_list(int depth, int deepen_rev_list, timestamp_t deepen_since, struct string_list *deepen_not, - struct object_array *shallows) + struct object_array *shallows, + struct object_array *want_obj) { int ret = 0; if (depth > 0 && deepen_rev_list) die("git upload-pack: deepen and deepen-since (or deepen-not) cannot be used together"); if (depth > 0) { - deepen(depth, deepen_relative, shallows); + deepen(depth, deepen_relative, shallows, want_obj); ret = 1; } else if (deepen_rev_list) { struct argv_array av = ARGV_ARRAY_INIT; @@ -776,11 +737,11 @@ static int send_shallow_list(int depth, int deepen_rev_list, } argv_array_push(&av, "--not"); } - for (i = 0; i < want_obj.nr; i++) { - struct object *o = want_obj.objects[i].item; + for (i = 0; i < want_obj->nr; i++) { + struct object *o = want_obj->objects[i].item; argv_array_push(&av, oid_to_hex(&o->oid)); } - deepen_by_rev_list(av.argc, av.argv, shallows); + deepen_by_rev_list(av.argc, av.argv, shallows, want_obj); argv_array_clear(&av); ret = 1; } else { @@ -865,7 +826,7 @@ static int process_deepen_not(const char *line, struct string_list *deepen_not, return 0; } -static void receive_needs(void) +static void receive_needs(struct object_array *want_obj) { struct object_array shallows = OBJECT_ARRAY_INIT; struct string_list deepen_not = STRING_LIST_INIT_DUP; @@ -943,7 +904,7 @@ static void receive_needs(void) if (!((allow_unadvertised_object_request & ALLOW_ANY_SHA1) == ALLOW_ANY_SHA1 || is_our_ref(o))) has_non_tip = 1; - add_object_array(o, NULL, &want_obj); + add_object_array(o, NULL, want_obj); } } @@ -955,7 +916,7 @@ static void receive_needs(void) * by another process that handled the initial request. */ if (has_non_tip) - check_non_tip(); + check_non_tip(want_obj); if (!use_sideband && daemon_mode) no_progress = 1; @@ -964,7 +925,7 @@ static void receive_needs(void) return; if (send_shallow_list(depth, deepen_rev_list, deepen_since, - &deepen_not, &shallows)) + &deepen_not, &shallows, want_obj)) packet_flush(1); object_array_clear(&shallows); } @@ -1093,6 +1054,7 @@ static int upload_pack_config(const char *var, const char *value, void *unused) void upload_pack(struct upload_pack_options *options) { struct string_list symref = STRING_LIST_INIT_DUP; + struct object_array want_obj = OBJECT_ARRAY_INIT; stateless_rpc = options->stateless_rpc; timeout = options->timeout; @@ -1116,10 +1078,11 @@ void upload_pack(struct upload_pack_options *options) if (options->advertise_refs) return; - receive_needs(); + receive_needs(&want_obj); if (want_obj.nr) { - get_common_commits(); - create_pack_file(); + struct object_array have_obj = OBJECT_ARRAY_INIT; + get_common_commits(&have_obj, &want_obj); + create_pack_file(&have_obj, &want_obj); } } @@ -1169,7 +1132,7 @@ static void upload_pack_data_clear(struct upload_pack_data *data) string_list_clear(&data->deepen_not, 0); } -static int parse_want(const char *line) +static int parse_want(const char *line, struct object_array *want_obj) { const char *arg; if (skip_prefix(line, "want ", &arg)) { @@ -1191,7 +1154,7 @@ static int parse_want(const char *line) if (!(o->flags & WANTED)) { o->flags |= WANTED; - add_object_array(o, NULL, &want_obj); + add_object_array(o, NULL, want_obj); } return 1; @@ -1200,7 +1163,8 @@ static int parse_want(const char *line) return 0; } -static int parse_want_ref(const char *line, struct string_list *wanted_refs) +static int parse_want_ref(const char *line, struct string_list *wanted_refs, + struct object_array *want_obj) { const char *arg; if (skip_prefix(line, "want-ref ", &arg)) { @@ -1219,7 +1183,7 @@ static int parse_want_ref(const char *line, struct string_list *wanted_refs) o = parse_object_or_die(&oid, arg); if (!(o->flags & WANTED)) { o->flags |= WANTED; - add_object_array(o, NULL, &want_obj); + add_object_array(o, NULL, want_obj); } return 1; @@ -1244,16 +1208,18 @@ static int parse_have(const char *line, struct oid_array *haves) } static void process_args(struct packet_reader *request, - struct upload_pack_data *data) + struct upload_pack_data *data, + struct object_array *want_obj) { while (packet_reader_read(request) != PACKET_READ_FLUSH) { const char *arg = request->line; const char *p; /* process want */ - if (parse_want(arg)) + if (parse_want(arg, want_obj)) continue; - if (allow_ref_in_want && parse_want_ref(arg, &data->wanted_refs)) + if (allow_ref_in_want && + parse_want_ref(arg, &data->wanted_refs, want_obj)) continue; /* process have line */ if (parse_have(arg, &data->haves)) @@ -1307,7 +1273,8 @@ static void process_args(struct packet_reader *request, } } -static int process_haves(struct oid_array *haves, struct oid_array *common) +static int process_haves(struct oid_array *haves, struct oid_array *common, + struct object_array *have_obj) { int i; @@ -1340,13 +1307,15 @@ static int process_haves(struct oid_array *haves, struct oid_array *common) parents->item->object.flags |= THEY_HAVE; } if (!we_knew_they_have) - add_object_array(o, NULL, &have_obj); + add_object_array(o, NULL, have_obj); } return 0; } -static int send_acks(struct oid_array *acks, struct strbuf *response) +static int send_acks(struct oid_array *acks, struct strbuf *response, + const struct object_array *have_obj, + struct object_array *want_obj) { int i; @@ -1361,7 +1330,7 @@ static int send_acks(struct oid_array *acks, struct strbuf *response) oid_to_hex(&acks->oid[i])); } - if (ok_to_give_up()) { + if (ok_to_give_up(have_obj, want_obj)) { /* Send Ready */ packet_buf_write(response, "ready\n"); return 1; @@ -1370,16 +1339,18 @@ static int send_acks(struct oid_array *acks, struct strbuf *response) return 0; } -static int process_haves_and_send_acks(struct upload_pack_data *data) +static int process_haves_and_send_acks(struct upload_pack_data *data, + struct object_array *have_obj, + struct object_array *want_obj) { struct oid_array common = OID_ARRAY_INIT; struct strbuf response = STRBUF_INIT; int ret = 0; - process_haves(&data->haves, &common); + process_haves(&data->haves, &common, have_obj); if (data->done) { ret = 1; - } else if (send_acks(&common, &response)) { + } else if (send_acks(&common, &response, have_obj, want_obj)) { packet_buf_delim(&response); ret = 1; } else { @@ -1415,7 +1386,8 @@ static void send_wanted_ref_info(struct upload_pack_data *data) packet_delim(1); } -static void send_shallow_info(struct upload_pack_data *data) +static void send_shallow_info(struct upload_pack_data *data, + struct object_array *want_obj) { /* No shallow info needs to be sent */ if (!data->depth && !data->deepen_rev_list && !data->shallows.nr && @@ -1426,9 +1398,10 @@ static void send_shallow_info(struct upload_pack_data *data) if (!send_shallow_list(data->depth, data->deepen_rev_list, data->deepen_since, &data->deepen_not, - &data->shallows) && + &data->shallows, want_obj) && is_repository_shallow(the_repository)) - deepen(INFINITE_DEPTH, data->deepen_relative, &data->shallows); + deepen(INFINITE_DEPTH, data->deepen_relative, &data->shallows, + want_obj); packet_delim(1); } @@ -1445,6 +1418,10 @@ int upload_pack_v2(struct repository *r, struct argv_array *keys, { enum fetch_state state = FETCH_PROCESS_ARGS; struct upload_pack_data data; + struct object_array have_obj = OBJECT_ARRAY_INIT; + struct object_array want_obj = OBJECT_ARRAY_INIT; + + clear_object_flags(ALL_FLAGS); git_config(upload_pack_config, NULL); @@ -1454,7 +1431,7 @@ int upload_pack_v2(struct repository *r, struct argv_array *keys, while (state != FETCH_DONE) { switch (state) { case FETCH_PROCESS_ARGS: - process_args(request, &data); + process_args(request, &data, &want_obj); if (!want_obj.nr) { /* @@ -1476,17 +1453,18 @@ int upload_pack_v2(struct repository *r, struct argv_array *keys, } break; case FETCH_SEND_ACKS: - if (process_haves_and_send_acks(&data)) + if (process_haves_and_send_acks(&data, &have_obj, + &want_obj)) state = FETCH_SEND_PACK; else state = FETCH_DONE; break; case FETCH_SEND_PACK: send_wanted_ref_info(&data); - send_shallow_info(&data); + send_shallow_info(&data, &want_obj); packet_write_fmt(1, "packfile\n"); - create_pack_file(); + create_pack_file(&have_obj, &want_obj); state = FETCH_DONE; break; case FETCH_DONE: @@ -1495,6 +1473,8 @@ int upload_pack_v2(struct repository *r, struct argv_array *keys, } upload_pack_data_clear(&data); + object_array_clear(&have_obj); + object_array_clear(&want_obj); return 0; } diff --git a/userdiff.c b/userdiff.c index f565f6731d..46d34cc2a4 100644 --- a/userdiff.c +++ b/userdiff.c @@ -270,7 +270,8 @@ struct userdiff_driver *userdiff_find_by_name(const char *name) { return userdiff_find_by_namelen(name, len); } -struct userdiff_driver *userdiff_find_by_path(const char *path) +struct userdiff_driver *userdiff_find_by_path(struct index_state *istate, + const char *path) { static struct attr_check *check; @@ -278,7 +279,7 @@ struct userdiff_driver *userdiff_find_by_path(const char *path) check = attr_check_initl("diff", NULL); if (!path) return NULL; - git_check_attr(&the_index, path, check); + git_check_attr(istate, path, check); if (ATTR_TRUE(check->items[0].value)) return &driver_true; diff --git a/userdiff.h b/userdiff.h index 2ef0ce5452..b072bfe89a 100644 --- a/userdiff.h +++ b/userdiff.h @@ -3,6 +3,8 @@ #include "notes-cache.h" +struct index_state; + struct userdiff_funcname { const char *pattern; int cflags; @@ -21,7 +23,8 @@ struct userdiff_driver { int userdiff_config(const char *k, const char *v); struct userdiff_driver *userdiff_find_by_name(const char *name); -struct userdiff_driver *userdiff_find_by_path(const char *path); +struct userdiff_driver *userdiff_find_by_path(struct index_state *istate, + const char *path); /* * Initialize any textconv-related fields in the driver and return it, or NULL diff --git a/vcs-svn/fast_export.h b/vcs-svn/fast_export.h index 60b79c35b9..9dcf9337c1 100644 --- a/vcs-svn/fast_export.h +++ b/vcs-svn/fast_export.h @@ -1,5 +1,5 @@ -#ifndef FAST_EXPORT_H_ -#define FAST_EXPORT_H_ +#ifndef FAST_EXPORT_H +#define FAST_EXPORT_H struct strbuf; struct line_buffer; diff --git a/vcs-svn/line_buffer.h b/vcs-svn/line_buffer.h index ee23b4f490..e192aedea2 100644 --- a/vcs-svn/line_buffer.h +++ b/vcs-svn/line_buffer.h @@ -1,5 +1,5 @@ -#ifndef LINE_BUFFER_H_ -#define LINE_BUFFER_H_ +#ifndef LINE_BUFFER_H +#define LINE_BUFFER_H #include "strbuf.h" diff --git a/vcs-svn/sliding_window.h b/vcs-svn/sliding_window.h index b43a825cba..189c32d84c 100644 --- a/vcs-svn/sliding_window.h +++ b/vcs-svn/sliding_window.h @@ -1,5 +1,5 @@ -#ifndef SLIDING_WINDOW_H_ -#define SLIDING_WINDOW_H_ +#ifndef SLIDING_WINDOW_H +#define SLIDING_WINDOW_H #include "strbuf.h" diff --git a/vcs-svn/svndiff.h b/vcs-svn/svndiff.h index 74eb464bab..10a2cbc40e 100644 --- a/vcs-svn/svndiff.h +++ b/vcs-svn/svndiff.h @@ -1,5 +1,5 @@ -#ifndef SVNDIFF_H_ -#define SVNDIFF_H_ +#ifndef SVNDIFF_H +#define SVNDIFF_H struct line_buffer; struct sliding_view; diff --git a/vcs-svn/svndump.h b/vcs-svn/svndump.h index b8eb12954e..26faed5968 100644 --- a/vcs-svn/svndump.h +++ b/vcs-svn/svndump.h @@ -1,5 +1,5 @@ -#ifndef SVNDUMP_H_ -#define SVNDUMP_H_ +#ifndef SVNDUMP_H +#define SVNDUMP_H int svndump_init(const char *filename); int svndump_init_fd(int in_fd, int back_fd); diff --git a/wildmatch.c b/wildmatch.c index d074c1be10..9e9e2a2f95 100644 --- a/wildmatch.c +++ b/wildmatch.c @@ -104,8 +104,8 @@ static int dowild(const uchar *p, const uchar *text, unsigned int flags) dowild(p + 1, text, flags) == WM_MATCH) return WM_MATCH; match_slash = 1; - } else - return WM_ABORT_MALFORMED; + } else /* WM_PATHNAME is set */ + match_slash = 0; } else /* without WM_PATHNAME, '*' == '**' */ match_slash = flags & WM_PATHNAME ? 0 : 1; diff --git a/wildmatch.h b/wildmatch.h index b8c826aa68..5993696298 100644 --- a/wildmatch.h +++ b/wildmatch.h @@ -4,7 +4,6 @@ #define WM_CASEFOLD 1 #define WM_PATHNAME 2 -#define WM_ABORT_MALFORMED 2 #define WM_NOMATCH 1 #define WM_MATCH 0 #define WM_ABORT_ALL -1 diff --git a/worktree.c b/worktree.c index 97cda5f97b..d6a0ee7f73 100644 --- a/worktree.c +++ b/worktree.c @@ -217,7 +217,11 @@ struct worktree *find_worktree(struct worktree **list, if (prefix) arg = to_free = prefix_filename(prefix, arg); - path = real_pathdup(arg, 1); + path = real_pathdup(arg, 0); + if (!path) { + free(to_free); + return NULL; + } for (; *list; list++) if (!fspathcmp(path, real_path((*list)->path))) break; @@ -231,7 +235,7 @@ int is_main_worktree(const struct worktree *wt) return !wt->id; } -const char *is_worktree_locked(struct worktree *wt) +const char *worktree_lock_reason(struct worktree *wt) { assert(!is_main_worktree(wt)); @@ -483,6 +487,75 @@ int submodule_uses_worktrees(const char *path) return ret; } +int parse_worktree_ref(const char *worktree_ref, const char **name, + int *name_length, const char **ref) +{ + if (skip_prefix(worktree_ref, "main-worktree/", &worktree_ref)) { + if (!*worktree_ref) + return -1; + if (name) + *name = NULL; + if (name_length) + *name_length = 0; + if (ref) + *ref = worktree_ref; + return 0; + } + if (skip_prefix(worktree_ref, "worktrees/", &worktree_ref)) { + const char *slash = strchr(worktree_ref, '/'); + + if (!slash || slash == worktree_ref || !slash[1]) + return -1; + if (name) + *name = worktree_ref; + if (name_length) + *name_length = slash - worktree_ref; + if (ref) + *ref = slash + 1; + return 0; + } + return -1; +} + +void strbuf_worktree_ref(const struct worktree *wt, + struct strbuf *sb, + const char *refname) +{ + switch (ref_type(refname)) { + case REF_TYPE_PSEUDOREF: + case REF_TYPE_PER_WORKTREE: + if (wt && !wt->is_current) { + if (is_main_worktree(wt)) + strbuf_addstr(sb, "main-worktree/"); + else + strbuf_addf(sb, "worktrees/%s/", wt->id); + } + break; + + case REF_TYPE_MAIN_PSEUDOREF: + case REF_TYPE_OTHER_PSEUDOREF: + break; + + case REF_TYPE_NORMAL: + /* + * For shared refs, don't prefix worktrees/ or + * main-worktree/. It's not necessary and + * files-backend.c can't handle it anyway. + */ + break; + } + strbuf_addstr(sb, refname); +} + +const char *worktree_ref(const struct worktree *wt, const char *refname) +{ + static struct strbuf sb = STRBUF_INIT; + + strbuf_reset(&sb); + strbuf_worktree_ref(wt, &sb, refname); + return sb.buf; +} + int other_head_refs(each_ref_fn fn, void *cb_data) { struct worktree **worktrees, **p; @@ -491,13 +564,17 @@ int other_head_refs(each_ref_fn fn, void *cb_data) worktrees = get_worktrees(0); for (p = worktrees; *p; p++) { struct worktree *wt = *p; - struct ref_store *refs; + struct object_id oid; + int flag; if (wt->is_current) continue; - refs = get_worktree_ref_store(wt); - ret = refs_head_ref(refs, fn, cb_data); + if (!refs_read_ref_full(get_main_ref_store(the_repository), + worktree_ref(wt, "HEAD"), + RESOLVE_REF_READING, + &oid, &flag)) + ret = fn(worktree_ref(wt, "HEAD"), &oid, flag, cb_data); if (ret) break; } diff --git a/worktree.h b/worktree.h index df3fc30f73..9e3b0b7b6f 100644 --- a/worktree.h +++ b/worktree.h @@ -10,12 +10,12 @@ struct worktree { char *path; char *id; char *head_ref; /* NULL if HEAD is broken or detached */ - char *lock_reason; /* internal use */ + char *lock_reason; /* private - use worktree_lock_reason */ struct object_id head_oid; int is_detached; int is_bare; int is_current; - int lock_reason_valid; + int lock_reason_valid; /* private */ }; /* Functions for acting on the information about worktrees. */ @@ -60,7 +60,7 @@ extern int is_main_worktree(const struct worktree *wt); * Return the reason string if the given worktree is locked or NULL * otherwise. */ -extern const char *is_worktree_locked(struct worktree *wt); +extern const char *worktree_lock_reason(struct worktree *wt); #define WT_VALIDATE_WORKTREE_MISSING_OK (1 << 0) @@ -108,4 +108,28 @@ extern const char *worktree_git_path(const struct worktree *wt, const char *fmt, ...) __attribute__((format (printf, 2, 3))); +/* + * Parse a worktree ref (i.e. with prefix main-worktree/ or + * worktrees/) and return the position of the worktree's name and + * length (or NULL and zero if it's main worktree), and ref. + * + * All name, name_length and ref arguments could be NULL. + */ +int parse_worktree_ref(const char *worktree_ref, const char **name, + int *name_length, const char **ref); + +/* + * Return a refname suitable for access from the current ref store. + */ +void strbuf_worktree_ref(const struct worktree *wt, + struct strbuf *sb, + const char *refname); + +/* + * Return a refname suitable for access from the current ref + * store. The result will be destroyed at the next call. + */ +const char *worktree_ref(const struct worktree *wt, + const char *refname); + #endif @@ -3,7 +3,6 @@ * * Copyright (c) 2007 Junio C Hamano */ - #include "cache.h" #include "attr.h" @@ -71,7 +70,7 @@ unsigned parse_whitespace_rule(const char *string) return rule; } -unsigned whitespace_rule(const char *pathname) +unsigned whitespace_rule(struct index_state *istate, const char *pathname) { static struct attr_check *attr_whitespace_rule; const char *value; @@ -79,7 +78,7 @@ unsigned whitespace_rule(const char *pathname) if (!attr_whitespace_rule) attr_whitespace_rule = attr_check_initl("whitespace", NULL); - git_check_attr(&the_index, pathname, attr_whitespace_rule); + git_check_attr(istate, pathname, attr_whitespace_rule); value = attr_whitespace_rule->items[0].value; if (ATTR_TRUE(value)) { /* true (whitespace) */ diff --git a/wt-status.c b/wt-status.c index 5ffab61015..a24711374c 100644 --- a/wt-status.c +++ b/wt-status.c @@ -453,8 +453,8 @@ static void wt_status_collect_changed_cb(struct diff_queue_struct *q, d->worktree_status = p->status; if (S_ISGITLINK(p->two->mode)) { d->dirty_submodule = p->two->dirty_submodule; - d->new_submodule_commits = !!oidcmp(&p->one->oid, - &p->two->oid); + d->new_submodule_commits = !oideq(&p->one->oid, + &p->two->oid); if (s->status_format == STATUS_FORMAT_SHORT) d->worktree_status = short_submodule_status(d); } @@ -540,10 +540,12 @@ static void wt_status_collect_updated_cb(struct diff_queue_struct *q, /* Leave {mode,oid}_head zero for an add. */ d->mode_index = p->two->mode; oidcpy(&d->oid_index, &p->two->oid); + s->committable = 1; break; case DIFF_STATUS_DELETED: d->mode_head = p->one->mode; oidcpy(&d->oid_head, &p->one->oid); + s->committable = 1; /* Leave {mode,oid}_index zero for a delete. */ break; @@ -561,6 +563,7 @@ static void wt_status_collect_updated_cb(struct diff_queue_struct *q, d->mode_index = p->two->mode; oidcpy(&d->oid_head, &p->one->oid); oidcpy(&d->oid_index, &p->two->oid); + s->committable = 1; break; case DIFF_STATUS_UNMERGED: d->stagemask = unmerged_mask(p->two->path); @@ -582,7 +585,7 @@ static void wt_status_collect_changes_worktree(struct wt_status *s) { struct rev_info rev; - init_revisions(&rev, NULL); + repo_init_revisions(the_repository, &rev, NULL); setup_revisions(0, NULL, &rev, NULL); rev.diffopt.output_format |= DIFF_FORMAT_CALLBACK; rev.diffopt.flags.dirty_submodules = 1; @@ -607,7 +610,7 @@ static void wt_status_collect_changes_index(struct wt_status *s) struct rev_info rev; struct setup_revision_opt opt; - init_revisions(&rev, NULL); + repo_init_revisions(the_repository, &rev, NULL); memset(&opt, 0, sizeof(opt)); opt.def = s->is_initial ? empty_tree_oid_hex() : s->reference; setup_revisions(0, NULL, &rev, &opt); @@ -665,11 +668,13 @@ static void wt_status_collect_changes_initial(struct wt_status *s) * code will output the stage values directly and not use the * values in these fields. */ + s->committable = 1; } else { d->index_status = DIFF_STATUS_ADDED; /* Leave {mode,oid}_head zero for adds. */ d->mode_index = ce->ce_mode; oidcpy(&d->oid_index, &ce->oid); + s->committable = 1; } } } @@ -724,15 +729,38 @@ static void wt_status_collect_untracked(struct wt_status *s) s->untracked_in_ms = (getnanotime() - t_begin) / 1000000; } +static int has_unmerged(struct wt_status *s) +{ + int i; + + for (i = 0; i < s->change.nr; i++) { + struct wt_status_change_data *d; + d = s->change.items[i].util; + if (d->stagemask) + return 1; + } + return 0; +} + void wt_status_collect(struct wt_status *s) { wt_status_collect_changes_worktree(s); - if (s->is_initial) wt_status_collect_changes_initial(s); else wt_status_collect_changes_index(s); wt_status_collect_untracked(s); + + wt_status_get_state(&s->state, s->branch && !strcmp(s->branch, "HEAD")); + if (s->state.merge_in_progress && !has_unmerged(s)) + s->committable = 1; +} + +void wt_status_collect_free_buffers(struct wt_status *s) +{ + free(s->state.branch); + free(s->state.onto); + free(s->state.detached_from); } static void wt_longstatus_print_unmerged(struct wt_status *s) @@ -773,7 +801,6 @@ static void wt_longstatus_print_updated(struct wt_status *s) continue; if (!shown_header) { wt_longstatus_print_cached_header(s); - s->commitable = 1; shown_header = 1; } wt_longstatus_print_change_data(s, WT_STATUS_UPDATED, it); @@ -982,7 +1009,7 @@ static void wt_longstatus_print_verbose(struct wt_status *s) int dirty_submodules; const char *c = color(WT_STATUS_HEADER, s); - init_revisions(&rev, NULL); + repo_init_revisions(the_repository, &rev, NULL); rev.diffopt.flags.allow_textconv = 1; rev.diffopt.ita_invisible_in_index = 1; @@ -1008,7 +1035,7 @@ static void wt_longstatus_print_verbose(struct wt_status *s) rev.diffopt.use_color = 0; wt_status_add_cut_line(s->fp); } - if (s->verbose > 1 && s->commitable) { + if (s->verbose > 1 && s->committable) { /* print_updated() printed a header, so do we */ if (s->fp != stdout) wt_longstatus_print_trailer(s); @@ -1063,22 +1090,8 @@ static void wt_longstatus_print_tracking(struct wt_status *s) strbuf_release(&sb); } -static int has_unmerged(struct wt_status *s) -{ - int i; - - for (i = 0; i < s->change.nr; i++) { - struct wt_status_change_data *d; - d = s->change.items[i].util; - if (d->stagemask) - return 1; - } - return 0; -} - static void show_merge_in_progress(struct wt_status *s, - struct wt_status_state *state, - const char *color) + const char *color) { if (has_unmerged(s)) { status_printf_ln(s, color, _("You have unmerged paths.")); @@ -1089,7 +1102,6 @@ static void show_merge_in_progress(struct wt_status *s, _(" (use \"git merge --abort\" to abort the merge)")); } } else { - s-> commitable = 1; status_printf_ln(s, color, _("All conflicts fixed but you are still merging.")); if (s->hints) @@ -1100,16 +1112,15 @@ static void show_merge_in_progress(struct wt_status *s, } static void show_am_in_progress(struct wt_status *s, - struct wt_status_state *state, const char *color) { status_printf_ln(s, color, _("You are in the middle of an am session.")); - if (state->am_empty_patch) + if (s->state.am_empty_patch) status_printf_ln(s, color, _("The current patch is empty.")); if (s->hints) { - if (!state->am_empty_patch) + if (!s->state.am_empty_patch) status_printf_ln(s, color, _(" (fix conflicts and then run \"git am --continue\")")); status_printf_ln(s, color, @@ -1233,10 +1244,9 @@ static int read_rebase_todolist(const char *fname, struct string_list *lines) } static void show_rebase_information(struct wt_status *s, - struct wt_status_state *state, - const char *color) + const char *color) { - if (state->rebase_interactive_in_progress) { + if (s->state.rebase_interactive_in_progress) { int i; int nr_lines_to_show = 2; @@ -1287,28 +1297,26 @@ static void show_rebase_information(struct wt_status *s, } static void print_rebase_state(struct wt_status *s, - struct wt_status_state *state, - const char *color) + const char *color) { - if (state->branch) + if (s->state.branch) status_printf_ln(s, color, _("You are currently rebasing branch '%s' on '%s'."), - state->branch, - state->onto); + s->state.branch, + s->state.onto); else status_printf_ln(s, color, _("You are currently rebasing.")); } static void show_rebase_in_progress(struct wt_status *s, - struct wt_status_state *state, - const char *color) + const char *color) { struct stat st; - show_rebase_information(s, state, color); + show_rebase_information(s, color); if (has_unmerged(s)) { - print_rebase_state(s, state, color); + print_rebase_state(s, color); if (s->hints) { status_printf_ln(s, color, _(" (fix conflicts and then run \"git rebase --continue\")")); @@ -1317,17 +1325,18 @@ static void show_rebase_in_progress(struct wt_status *s, status_printf_ln(s, color, _(" (use \"git rebase --abort\" to check out the original branch)")); } - } else if (state->rebase_in_progress || !stat(git_path_merge_msg(the_repository), &st)) { - print_rebase_state(s, state, color); + } else if (s->state.rebase_in_progress || + !stat(git_path_merge_msg(the_repository), &st)) { + print_rebase_state(s, color); if (s->hints) status_printf_ln(s, color, _(" (all conflicts fixed: run \"git rebase --continue\")")); } else if (split_commit_in_progress(s)) { - if (state->branch) + if (s->state.branch) status_printf_ln(s, color, _("You are currently splitting a commit while rebasing branch '%s' on '%s'."), - state->branch, - state->onto); + s->state.branch, + s->state.onto); else status_printf_ln(s, color, _("You are currently splitting a commit during a rebase.")); @@ -1335,11 +1344,11 @@ static void show_rebase_in_progress(struct wt_status *s, status_printf_ln(s, color, _(" (Once your working directory is clean, run \"git rebase --continue\")")); } else { - if (state->branch) + if (s->state.branch) status_printf_ln(s, color, _("You are currently editing a commit while rebasing branch '%s' on '%s'."), - state->branch, - state->onto); + s->state.branch, + s->state.onto); else status_printf_ln(s, color, _("You are currently editing a commit during a rebase.")); @@ -1354,11 +1363,10 @@ static void show_rebase_in_progress(struct wt_status *s, } static void show_cherry_pick_in_progress(struct wt_status *s, - struct wt_status_state *state, - const char *color) + const char *color) { status_printf_ln(s, color, _("You are currently cherry-picking commit %s."), - find_unique_abbrev(&state->cherry_pick_head_oid, DEFAULT_ABBREV)); + find_unique_abbrev(&s->state.cherry_pick_head_oid, DEFAULT_ABBREV)); if (s->hints) { if (has_unmerged(s)) status_printf_ln(s, color, @@ -1373,11 +1381,10 @@ static void show_cherry_pick_in_progress(struct wt_status *s, } static void show_revert_in_progress(struct wt_status *s, - struct wt_status_state *state, - const char *color) + const char *color) { status_printf_ln(s, color, _("You are currently reverting commit %s."), - find_unique_abbrev(&state->revert_head_oid, DEFAULT_ABBREV)); + find_unique_abbrev(&s->state.revert_head_oid, DEFAULT_ABBREV)); if (s->hints) { if (has_unmerged(s)) status_printf_ln(s, color, @@ -1392,13 +1399,12 @@ static void show_revert_in_progress(struct wt_status *s, } static void show_bisect_in_progress(struct wt_status *s, - struct wt_status_state *state, - const char *color) + const char *color) { - if (state->branch) + if (s->state.branch) status_printf_ln(s, color, _("You are currently bisecting, started from branch '%s'."), - state->branch); + s->state.branch); else status_printf_ln(s, color, _("You are currently bisecting.")); @@ -1487,10 +1493,10 @@ static void wt_status_get_detached_from(struct wt_status_state *state) if (dwim_ref(cb.buf.buf, cb.buf.len, &oid, &ref) == 1 && /* sha1 is a commit? match without further lookup */ - (!oidcmp(&cb.noid, &oid) || + (oideq(&cb.noid, &oid) || /* perhaps sha1 is a tag, try to dereference to a commit */ ((commit = lookup_commit_reference_gently(the_repository, &oid, 1)) != NULL && - !oidcmp(&cb.noid, &commit->object.oid)))) { + oideq(&cb.noid, &commit->object.oid)))) { const char *from = ref; if (!skip_prefix(from, "refs/tags/", &from)) skip_prefix(from, "refs/remotes/", &from); @@ -1500,7 +1506,7 @@ static void wt_status_get_detached_from(struct wt_status_state *state) xstrdup(find_unique_abbrev(&cb.noid, DEFAULT_ABBREV)); oidcpy(&state->detached_oid, &cb.noid); state->detached_at = !get_oid("HEAD", &oid) && - !oidcmp(&oid, &state->detached_oid); + oideq(&oid, &state->detached_oid); free(ref); strbuf_release(&cb.buf); @@ -1553,6 +1559,7 @@ void wt_status_get_state(struct wt_status_state *state, struct object_id oid; if (!stat(git_path_merge_head(the_repository), &st)) { + wt_status_check_rebase(NULL, state); state->merge_in_progress = 1; } else if (wt_status_check_rebase(NULL, state)) { ; /* all set */ @@ -1572,48 +1579,49 @@ void wt_status_get_state(struct wt_status_state *state, wt_status_get_detached_from(state); } -static void wt_longstatus_print_state(struct wt_status *s, - struct wt_status_state *state) +static void wt_longstatus_print_state(struct wt_status *s) { const char *state_color = color(WT_STATUS_HEADER, s); - if (state->merge_in_progress) - show_merge_in_progress(s, state, state_color); - else if (state->am_in_progress) - show_am_in_progress(s, state, state_color); + struct wt_status_state *state = &s->state; + + if (state->merge_in_progress) { + if (state->rebase_interactive_in_progress) { + show_rebase_information(s, state_color); + fputs("\n", s->fp); + } + show_merge_in_progress(s, state_color); + } else if (state->am_in_progress) + show_am_in_progress(s, state_color); else if (state->rebase_in_progress || state->rebase_interactive_in_progress) - show_rebase_in_progress(s, state, state_color); + show_rebase_in_progress(s, state_color); else if (state->cherry_pick_in_progress) - show_cherry_pick_in_progress(s, state, state_color); + show_cherry_pick_in_progress(s, state_color); else if (state->revert_in_progress) - show_revert_in_progress(s, state, state_color); + show_revert_in_progress(s, state_color); if (state->bisect_in_progress) - show_bisect_in_progress(s, state, state_color); + show_bisect_in_progress(s, state_color); } static void wt_longstatus_print(struct wt_status *s) { const char *branch_color = color(WT_STATUS_ONBRANCH, s); const char *branch_status_color = color(WT_STATUS_HEADER, s); - struct wt_status_state state; - - memset(&state, 0, sizeof(state)); - wt_status_get_state(&state, - s->branch && !strcmp(s->branch, "HEAD")); if (s->branch) { const char *on_what = _("On branch "); const char *branch_name = s->branch; if (!strcmp(branch_name, "HEAD")) { branch_status_color = color(WT_STATUS_NOBRANCH, s); - if (state.rebase_in_progress || state.rebase_interactive_in_progress) { - if (state.rebase_interactive_in_progress) + if (s->state.rebase_in_progress || + s->state.rebase_interactive_in_progress) { + if (s->state.rebase_interactive_in_progress) on_what = _("interactive rebase in progress; onto "); else on_what = _("rebase in progress; onto "); - branch_name = state.onto; - } else if (state.detached_from) { - branch_name = state.detached_from; - if (state.detached_at) + branch_name = s->state.onto; + } else if (s->state.detached_from) { + branch_name = s->state.detached_from; + if (s->state.detached_at) on_what = _("HEAD detached at "); else on_what = _("HEAD detached from "); @@ -1630,10 +1638,7 @@ static void wt_longstatus_print(struct wt_status *s) wt_longstatus_print_tracking(s); } - wt_longstatus_print_state(s, &state); - free(state.branch); - free(state.onto); - free(state.detached_from); + wt_longstatus_print_state(s); if (s->is_initial) { status_printf_ln(s, color(WT_STATUS_HEADER, s), "%s", ""); @@ -1665,14 +1670,14 @@ static void wt_longstatus_print(struct wt_status *s) "new files yourself (see 'git help status')."), s->untracked_in_ms / 1000.0); } - } else if (s->commitable) + } else if (s->committable) status_printf_ln(s, GIT_COLOR_NORMAL, _("Untracked files not listed%s"), s->hints ? _(" (use -u option to show untracked files)") : ""); if (s->verbose) wt_longstatus_print_verbose(s); - if (!s->commitable) { + if (!s->committable) { if (s->amend) status_printf_ln(s, GIT_COLOR_NORMAL, _("No changes")); else if (s->nowarn) @@ -1937,13 +1942,9 @@ static void wt_porcelain_v2_print_tracking(struct wt_status *s) struct branch *branch; const char *base; const char *branch_name; - struct wt_status_state state; int ab_info, nr_ahead, nr_behind; char eol = s->null_termination ? '\0' : '\n'; - memset(&state, 0, sizeof(state)); - wt_status_get_state(&state, s->branch && !strcmp(s->branch, "HEAD")); - fprintf(s->fp, "# branch.oid %s%c", (s->is_initial ? "(initial)" : sha1_to_hex(s->sha1_commit)), eol); @@ -1954,10 +1955,11 @@ static void wt_porcelain_v2_print_tracking(struct wt_status *s) if (!strcmp(s->branch, "HEAD")) { fprintf(s->fp, "# branch.head %s%c", "(detached)", eol); - if (state.rebase_in_progress || state.rebase_interactive_in_progress) - branch_name = state.onto; - else if (state.detached_from) - branch_name = state.detached_from; + if (s->state.rebase_in_progress || + s->state.rebase_interactive_in_progress) + branch_name = s->state.onto; + else if (s->state.detached_from) + branch_name = s->state.detached_from; else branch_name = ""; } else { @@ -1991,10 +1993,6 @@ static void wt_porcelain_v2_print_tracking(struct wt_status *s) } } } - - free(state.branch); - free(state.onto); - free(state.detached_from); } /* @@ -2314,7 +2312,7 @@ int has_unstaged_changes(int ignore_submodules) struct rev_info rev_info; int result; - init_revisions(&rev_info, NULL); + repo_init_revisions(the_repository, &rev_info, NULL); if (ignore_submodules) { rev_info.diffopt.flags.ignore_submodules = 1; rev_info.diffopt.flags.override_submodule_config = 1; @@ -2336,7 +2334,7 @@ int has_uncommitted_changes(int ignore_submodules) if (is_cache_unborn()) return 0; - init_revisions(&rev_info, NULL); + repo_init_revisions(the_repository, &rev_info, NULL); if (ignore_submodules) rev_info.diffopt.flags.ignore_submodules = 1; rev_info.diffopt.flags.quick = 1; diff --git a/wt-status.h b/wt-status.h index 1673d146fa..1fcf93afbf 100644 --- a/wt-status.h +++ b/wt-status.h @@ -64,6 +64,24 @@ enum wt_status_format { STATUS_FORMAT_UNSPECIFIED }; +struct wt_status_state { + int merge_in_progress; + int am_in_progress; + int am_empty_patch; + int rebase_in_progress; + int rebase_interactive_in_progress; + int cherry_pick_in_progress; + int bisect_in_progress; + int revert_in_progress; + int detached_at; + char *branch; + char *onto; + char *detached_from; + struct object_id detached_oid; + struct object_id revert_head_oid; + struct object_id cherry_pick_head_oid; +}; + struct wt_status { int is_initial; char *branch; @@ -93,10 +111,11 @@ struct wt_status { int rename_score; int rename_limit; enum wt_status_format status_format; + struct wt_status_state state; unsigned char sha1_commit[GIT_MAX_RAWSZ]; /* when not Initial */ /* These are computed during processing of the individual sections */ - int commitable; + int committable; int workdir_dirty; const char *index_file; FILE *fp; @@ -107,29 +126,12 @@ struct wt_status { uint32_t untracked_in_ms; }; -struct wt_status_state { - int merge_in_progress; - int am_in_progress; - int am_empty_patch; - int rebase_in_progress; - int rebase_interactive_in_progress; - int cherry_pick_in_progress; - int bisect_in_progress; - int revert_in_progress; - int detached_at; - char *branch; - char *onto; - char *detached_from; - struct object_id detached_oid; - struct object_id revert_head_oid; - struct object_id cherry_pick_head_oid; -}; - size_t wt_status_locate_end(const char *s, size_t len); void wt_status_add_cut_line(FILE *fp); void wt_status_prepare(struct wt_status *s); void wt_status_print(struct wt_status *s); void wt_status_collect(struct wt_status *s); +void wt_status_collect_free_buffers(struct wt_status *s); void wt_status_get_state(struct wt_status_state *state, int get_detached_from); int wt_status_check_rebase(const struct worktree *wt, struct wt_status_state *state); diff --git a/xdiff-interface.c b/xdiff-interface.c index ec6e574e4a..80f060d278 100644 --- a/xdiff-interface.c +++ b/xdiff-interface.c @@ -9,54 +9,26 @@ #include "xdiff/xutils.h" struct xdiff_emit_state { - xdiff_emit_consume_fn consume; + xdiff_emit_hunk_fn hunk_fn; + xdiff_emit_line_fn line_fn; void *consume_callback_data; struct strbuf remainder; }; -static int parse_num(char **cp_p, int *num_p) +static int xdiff_out_hunk(void *priv_, + long old_begin, long old_nr, + long new_begin, long new_nr, + const char *func, long funclen) { - char *cp = *cp_p; - int num = 0; + struct xdiff_emit_state *priv = priv_; - while ('0' <= *cp && *cp <= '9') - num = num * 10 + *cp++ - '0'; - if (!(cp - *cp_p)) - return -1; - *cp_p = cp; - *num_p = num; - return 0; -} + if (priv->remainder.len) + BUG("xdiff emitted hunk in the middle of a line"); -int parse_hunk_header(char *line, int len, - int *ob, int *on, - int *nb, int *nn) -{ - char *cp; - cp = line + 4; - if (parse_num(&cp, ob)) { - bad_line: - return error("malformed diff output: %s", line); - } - if (*cp == ',') { - cp++; - if (parse_num(&cp, on)) - goto bad_line; - } - else - *on = 1; - if (*cp++ != ' ' || *cp++ != '+') - goto bad_line; - if (parse_num(&cp, nb)) - goto bad_line; - if (*cp == ',') { - cp++; - if (parse_num(&cp, nn)) - goto bad_line; - } - else - *nn = 1; - return -!!memcmp(cp, " @@", 3); + priv->hunk_fn(priv->consume_callback_data, + old_begin, old_nr, new_begin, new_nr, + func, funclen); + return 0; } static void consume_one(void *priv_, char *s, unsigned long size) @@ -67,7 +39,7 @@ static void consume_one(void *priv_, char *s, unsigned long size) unsigned long this_size; ep = memchr(s, '\n', size); this_size = (ep == NULL) ? size : (ep - s + 1); - priv->consume(priv->consume_callback_data, s, this_size); + priv->line_fn(priv->consume_callback_data, s, this_size); size -= this_size; s += this_size; } @@ -78,6 +50,9 @@ static int xdiff_outf(void *priv_, mmbuffer_t *mb, int nbuf) struct xdiff_emit_state *priv = priv_; int i; + if (!priv->line_fn) + return 0; + for (i = 0; i < nbuf; i++) { if (mb[i].ptr[mb[i].size-1] != '\n') { /* Incomplete line */ @@ -140,8 +115,16 @@ int xdi_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp, xdemitconf_t co return xdl_diff(&a, &b, xpp, xecfg, xecb); } +void discard_hunk_line(void *priv, + long ob, long on, long nb, long nn, + const char *func, long funclen) +{ +} + int xdi_diff_outf(mmfile_t *mf1, mmfile_t *mf2, - xdiff_emit_consume_fn fn, void *consume_callback_data, + xdiff_emit_hunk_fn hunk_fn, + xdiff_emit_line_fn line_fn, + void *consume_callback_data, xpparam_t const *xpp, xdemitconf_t const *xecfg) { int ret; @@ -149,10 +132,13 @@ int xdi_diff_outf(mmfile_t *mf1, mmfile_t *mf2, xdemitcb_t ecb; memset(&state, 0, sizeof(state)); - state.consume = fn; + state.hunk_fn = hunk_fn; + state.line_fn = line_fn; state.consume_callback_data = consume_callback_data; memset(&ecb, 0, sizeof(ecb)); - ecb.outf = xdiff_outf; + if (hunk_fn) + ecb.out_hunk = xdiff_out_hunk; + ecb.out_line = xdiff_outf; ecb.priv = &state; strbuf_init(&state.remainder, 0); ret = xdi_diff(mf1, mf2, xpp, xecfg, &ecb); @@ -186,7 +172,7 @@ void read_mmblob(mmfile_t *ptr, const struct object_id *oid) unsigned long size; enum object_type type; - if (!oidcmp(oid, &null_oid)) { + if (oideq(oid, &null_oid)) { ptr->ptr = xstrdup(""); ptr->size = 0; return; diff --git a/xdiff-interface.h b/xdiff-interface.h index 135fc05d72..2d41fffd4c 100644 --- a/xdiff-interface.h +++ b/xdiff-interface.h @@ -11,15 +11,18 @@ */ #define MAX_XDIFF_SIZE (1024UL * 1024 * 1023) -typedef void (*xdiff_emit_consume_fn)(void *, char *, unsigned long); +typedef void (*xdiff_emit_line_fn)(void *, char *, unsigned long); +typedef void (*xdiff_emit_hunk_fn)(void *data, + long old_begin, long old_nr, + long new_begin, long new_nr, + const char *func, long funclen); int xdi_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp, xdemitconf_t const *xecfg, xdemitcb_t *ecb); int xdi_diff_outf(mmfile_t *mf1, mmfile_t *mf2, - xdiff_emit_consume_fn fn, void *consume_callback_data, + xdiff_emit_hunk_fn hunk_fn, + xdiff_emit_line_fn line_fn, + void *consume_callback_data, xpparam_t const *xpp, xdemitconf_t const *xecfg); -int parse_hunk_header(char *line, int len, - int *ob, int *on, - int *nb, int *nn); int read_mmfile(mmfile_t *ptr, const char *filename); void read_mmblob(mmfile_t *ptr, const struct object_id *oid); int buffer_is_binary(const char *ptr, unsigned long size); @@ -30,6 +33,14 @@ extern int git_xmerge_config(const char *var, const char *value, void *cb); extern int git_xmerge_style; /* + * Can be used as a no-op hunk_fn for xdi_diff_outf(), since a NULL + * one just sends the hunk line to the line_fn callback). + */ +void discard_hunk_line(void *priv, + long ob, long on, long nb, long nn, + const char *func, long funclen); + +/* * Compare the strings l1 with l2 which are of size s1 and s2 respectively. * Returns 1 if the strings are deemed equal, 0 otherwise. * The `flags` given as XDF_WHITESPACE_FLAGS determine how white spaces diff --git a/xdiff/xdiff.h b/xdiff/xdiff.h index 2356da5f78..b158369020 100644 --- a/xdiff/xdiff.h +++ b/xdiff/xdiff.h @@ -86,7 +86,11 @@ typedef struct s_xpparam { typedef struct s_xdemitcb { void *priv; - int (*outf)(void *, mmbuffer_t *, int); + int (*out_hunk)(void *, + long old_begin, long old_nr, + long new_begin, long new_nr, + const char *func, long funclen); + int (*out_line)(void *, mmbuffer_t *, int); } xdemitcb_t; typedef long (*find_func_t)(const char *line, long line_len, char *buffer, long buffer_size, void *priv); diff --git a/xdiff/xutils.c b/xdiff/xutils.c index 88e5995535..963e1c58b9 100644 --- a/xdiff/xutils.c +++ b/xdiff/xutils.c @@ -54,7 +54,7 @@ int xdl_emit_diffrec(char const *rec, long size, char const *pre, long psize, mb[2].size = strlen(mb[2].ptr); i++; } - if (ecb->outf(ecb->priv, mb, i) < 0) { + if (ecb->out_line(ecb->priv, mb, i) < 0) { return -1; } @@ -344,8 +344,9 @@ int xdl_num_out(char *out, long val) { return str - out; } -int xdl_emit_hunk_hdr(long s1, long c1, long s2, long c2, - const char *func, long funclen, xdemitcb_t *ecb) { +static int xdl_format_hunk_hdr(long s1, long c1, long s2, long c2, + const char *func, long funclen, + xdemitcb_t *ecb) { int nb = 0; mmbuffer_t mb; char buf[128]; @@ -387,9 +388,21 @@ int xdl_emit_hunk_hdr(long s1, long c1, long s2, long c2, mb.ptr = buf; mb.size = nb; - if (ecb->outf(ecb->priv, &mb, 1) < 0) + if (ecb->out_line(ecb->priv, &mb, 1) < 0) return -1; + return 0; +} +int xdl_emit_hunk_hdr(long s1, long c1, long s2, long c2, + const char *func, long funclen, + xdemitcb_t *ecb) { + if (!ecb->out_hunk) + return xdl_format_hunk_hdr(s1, c1, s2, c2, func, funclen, ecb); + if (ecb->out_hunk(ecb->priv, + c1 ? s1 : s1 - 1, c1, + c2 ? s2 : s2 - 1, c2, + func, funclen) < 0) + return -1; return 0; } |