summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/workflows/main.yml5
-rw-r--r--Documentation/Makefile1
-rw-r--r--Documentation/RelNotes/2.32.0.txt126
-rw-r--r--Documentation/config/clone.txt4
-rw-r--r--Documentation/config/pack.txt15
-rw-r--r--Documentation/config/rebase.txt7
-rw-r--r--Documentation/git-apply.txt11
-rw-r--r--Documentation/git-clone.txt7
-rw-r--r--Documentation/git-commit.txt14
-rw-r--r--Documentation/git-format-patch.txt26
-rw-r--r--Documentation/git-multi-pack-index.txt14
-rw-r--r--Documentation/gitweb.conf.txt11
-rw-r--r--Documentation/howto/coordinate-embargoed-releases.txt131
-rw-r--r--Documentation/technical/api-simple-ipc.txt105
-rw-r--r--Documentation/technical/multi-pack-index.txt5
-rw-r--r--Documentation/technical/pack-format.txt83
-rw-r--r--Documentation/user-manual.txt3
-rw-r--r--INSTALL4
-rw-r--r--Makefile20
-rw-r--r--SECURITY.md51
-rw-r--r--apply.c23
-rw-r--r--archive.c30
-rw-r--r--branch.c1
-rw-r--r--builtin/checkout-index.c1
-rw-r--r--builtin/checkout.c9
-rw-r--r--builtin/clone.c35
-rw-r--r--builtin/column.c8
-rw-r--r--builtin/commit.c22
-rw-r--r--builtin/credential-cache--daemon.c3
-rw-r--r--builtin/credential-cache.c2
-rw-r--r--builtin/difftool.c1
-rw-r--r--builtin/fsck.c14
-rw-r--r--builtin/index-pack.c30
-rw-r--r--builtin/init-db.c32
-rw-r--r--builtin/log.c33
-rw-r--r--builtin/ls-files.c75
-rw-r--r--builtin/ls-remote.c4
-rw-r--r--builtin/ls-tree.c6
-rw-r--r--builtin/mktag.c14
-rw-r--r--builtin/multi-pack-index.c182
-rw-r--r--builtin/pack-objects.c38
-rw-r--r--builtin/range-diff.c2
-rw-r--r--builtin/rebase.c12
-rw-r--r--builtin/remote.c8
-rw-r--r--builtin/repack.c2
-rw-r--r--builtin/reset.c2
-rw-r--r--builtin/revert.c4
-rw-r--r--builtin/stash.c2
-rw-r--r--builtin/symbolic-ref.c4
-rw-r--r--builtin/unpack-objects.c3
-rw-r--r--builtin/worktree.c10
-rw-r--r--cache.h28
-rwxr-xr-xci/run-build-and-tests.sh1
-rw-r--r--compat/precompose_utf8.c9
-rw-r--r--compat/precompose_utf8.h1
-rw-r--r--compat/simple-ipc/ipc-shared.c28
-rw-r--r--compat/simple-ipc/ipc-unix-socket.c999
-rw-r--r--compat/simple-ipc/ipc-win32.c751
-rw-r--r--config.c16
-rw-r--r--config.mak.uname2
-rw-r--r--contrib/buildsystems/CMakeLists.txt34
-rw-r--r--contrib/completion/git-completion.bash8
-rw-r--r--convert.c154
-rw-r--r--convert.h96
-rw-r--r--csum-file.c33
-rw-r--r--daemon.c8
-rw-r--r--diffcore-rename.c271
-rw-r--r--diffcore.h18
-rw-r--r--entry.c87
-rw-r--r--entry.h59
-rw-r--r--fetch-pack.c43
-rw-r--r--fetch-pack.h1
-rw-r--r--fsck.c207
-rw-r--r--fsck.h127
-rw-r--r--git-compat-util.h5
-rwxr-xr-xgit-send-email.perl47
-rw-r--r--git.c2
-rwxr-xr-xgitweb/gitweb.perl34
-rw-r--r--http.c15
-rw-r--r--log-tree.c10
-rw-r--r--merge-ort.c554
-rw-r--r--merge-recursive.c43
-rw-r--r--midx.c219
-rw-r--r--midx.h11
-rw-r--r--pack-bitmap.c25
-rw-r--r--pack-bitmap.h4
-rw-r--r--pack-revindex.c126
-rw-r--r--pack-revindex.h53
-rw-r--r--pack-write.c36
-rw-r--r--pack.h1
-rw-r--r--packfile.c3
-rw-r--r--parse-options.c19
-rw-r--r--parse-options.h35
-rw-r--r--path.c1
-rw-r--r--path.h2
-rw-r--r--perl/Git.pm13
-rw-r--r--pkt-line.c59
-rw-r--r--pkt-line.h17
-rw-r--r--ref-filter.c2
-rw-r--r--revision.h2
-rw-r--r--sequencer.c57
-rw-r--r--sequencer.h6
-rw-r--r--setup.c28
-rw-r--r--simple-ipc.h239
-rw-r--r--symlinks.c54
-rw-r--r--t/helper/test-bitmap.c24
-rw-r--r--t/helper/test-bloom.c2
-rw-r--r--t/helper/test-read-midx.c24
-rw-r--r--t/helper/test-simple-ipc.c787
-rw-r--r--t/helper/test-tool.c2
-rw-r--r--t/helper/test-tool.h2
-rwxr-xr-xt/perf/p5310-pack-bitmaps.sh14
-rwxr-xr-xt/t0052-simple-ipc.sh122
-rwxr-xr-xt/t2021-checkout-overwrite.sh12
-rwxr-xr-xt/t3060-ls-files-with-tree.sh41
-rwxr-xr-xt/t3206-range-diff.sh24
-rwxr-xr-xt/t3400-rebase.sh16
-rwxr-xr-xt/t3510-cherry-pick-sequence.sh32
-rwxr-xr-xt/t3512-cherry-pick-submodule.sh7
-rwxr-xr-xt/t3513-revert-submodule.sh5
-rwxr-xr-xt/t3800-mktag.sh2
-rwxr-xr-xt/t4014-format-patch.sh34
-rwxr-xr-xt/t4053-diff-no-index.sh60
-rwxr-xr-xt/t4108-apply-threeway.sh70
-rwxr-xr-xt/t5310-pack-bitmaps.sh38
-rwxr-xr-xt/t5319-multi-pack-index.sh42
-rwxr-xr-xt/t5572-pull-submodule.sh7
-rwxr-xr-xt/t5601-clone.sh9
-rwxr-xr-xt/t5606-clone-options.sh27
-rwxr-xr-xt/t5611-clone-config.sh25
-rwxr-xr-xt/t6300-for-each-ref.sh10
-rwxr-xr-xt/t6423-merge-rename-directories.sh73
-rwxr-xr-xt/t6428-merge-conflicts-sparse.sh158
-rwxr-xr-xt/t6437-submodule-merge.sh5
-rwxr-xr-xt/t6438-submodule-directory-file-conflicts.sh7
-rwxr-xr-xt/t7007-show.sh39
-rwxr-xr-xt/t7502-commit-porcelain.sh312
-rwxr-xr-xt/t9001-send-email.sh57
-rw-r--r--t/test-lib.sh2
-rw-r--r--transport.c6
-rw-r--r--transport.h4
-rw-r--r--tree.c119
-rw-r--r--tree.h18
-rw-r--r--unix-socket.c53
-rw-r--r--unix-socket.h12
-rw-r--r--unix-stream-server.c125
-rw-r--r--unix-stream-server.h33
-rw-r--r--unpack-trees.c3
148 files changed, 7412 insertions, 1010 deletions
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index 5f2f884b92..73856bafc9 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -186,6 +186,11 @@ jobs:
## Unzip and remove the artifact
unzip artifacts.zip
rm artifacts.zip
+ - name: initialize vcpkg
+ uses: actions/checkout@v2
+ with:
+ repository: 'microsoft/vcpkg'
+ path: 'compat/vcbuild/vcpkg'
- name: download vcpkg artifacts
shell: powershell
run: |
diff --git a/Documentation/Makefile b/Documentation/Makefile
index 81d1bf7a04..874a01d7a8 100644
--- a/Documentation/Makefile
+++ b/Documentation/Makefile
@@ -76,6 +76,7 @@ SP_ARTICLES += howto/rebuild-from-update-hook
SP_ARTICLES += howto/rebase-from-internal-branch
SP_ARTICLES += howto/keep-canonical-history-correct
SP_ARTICLES += howto/maintain-git
+SP_ARTICLES += howto/coordinate-embargoed-releases
API_DOCS = $(patsubst %.txt,%,$(filter-out technical/api-index-skel.txt technical/api-index.txt, $(wildcard technical/api-*.txt)))
SP_ARTICLES += $(API_DOCS)
diff --git a/Documentation/RelNotes/2.32.0.txt b/Documentation/RelNotes/2.32.0.txt
index 6f69a521ac..c746ad11c9 100644
--- a/Documentation/RelNotes/2.32.0.txt
+++ b/Documentation/RelNotes/2.32.0.txt
@@ -40,6 +40,41 @@ UI, Workflows & Features
tweak both the message and the contents, and only the message,
respectively.
+ * When accessing a server with a URL like https://user:pass@site/, we
+ did not to fall back to the basic authentication with the
+ credential material embedded in the URL after the "Negotiate"
+ authentication failed. Now we do.
+
+ * "git send-email" learned to honor the core.hooksPath configuration.
+
+ * "git format-patch -v<n>" learned to allow a reroll count that is
+ not an integer.
+
+ * "git commit" learned "--trailer <key>[=<value>]" option; together
+ with the interpret-trailers command, this will make it easier to
+ support custom trailers.
+
+ * "git clone --reject-shallow" option fails the clone as soon as we
+ notice that we are cloning from a shallow repository.
+
+ * A configuration variable has been added to force tips of certain
+ refs to be given a reachability bitmap.
+
+ * "gitweb" learned "e-mail privacy" feature to redact strings that
+ look like e-mail addresses on various pages.
+
+ * "git apply --3way" has always been "to fall back to 3-way merge
+ only when straight application fails". Swap the order of falling
+ back so that 3-way is always attempted first (only when the option
+ is given, of course) and then straight patch application is used as
+ a fallback when it fails.
+
+ * "git apply" now takes "--3way" and "--cached" at the same time, and
+ work and record results only in the index.
+
+ * The command line completion (in contrib/) has learned that
+ CHERRY_PICK_HEAD is a possible pseudo-ref.
+
Performance, Internal Implementation, Development Support etc.
@@ -58,6 +93,38 @@ Performance, Internal Implementation, Development Support etc.
* Reorganize Makefile to allow building git.o and other essential
objects without extra stuff needed only for testing.
+ * Preparatory API changes for parallel checkout.
+
+ * A simple IPC interface gets introduced to build services like
+ fsmonitor on top.
+
+ * Fsck API clean-up.
+
+ * SECURITY.md that is facing individual contributors and end users
+ has been introduced. Also a procedure to follow when preparing
+ embargoed releases has been spelled out.
+ (merge 09420b7648 js/security-md later to maint).
+
+ * Optimize "rev-list --use-bitmap-index --objects" corner case that
+ uses negative tags as the stopping points.
+
+ * CMake update for vsbuild.
+
+ * An on-disk reverse-index to map the in-pack location of an object
+ back to its object name across multiple packfiles is introduced.
+
+ * Generate [ec]tags under $(QUIET_GEN).
+
+ * Clean-up codepaths that implements "git send-email --validate"
+ option and improves the message from it.
+
+ * The last remnant of gettext-poison has been removed.
+
+ * The test framework has been taught to optionally turn the default
+ merge strategy to "ort" throughout the system where we use
+ three-way merges internally, like cherry-pick, rebase etc.,
+ primarily to enhance its test coverage (the strategy has been
+ available as an explicit "-s ort" choice).
Fixes since v2.31
@@ -66,29 +133,22 @@ Fixes since v2.31
* The fsmonitor interface read from its input without making sure
there is something to read from. This bug is new in 2.31
timeframe.
- (merge 097ea2c848 jh/fsmonitor-prework later to maint).
* The data structure used by fsmonitor interface was not properly
duplicated during an in-core merge, leading to use-after-free etc.
- (merge 4abc57848d js/fsmonitor-unpack-fix later to maint).
* "git bisect" reimplemented more in C during 2.30 timeframe did not
take an annotated tag as a good/bad endpoint well. This regression
has been corrected.
- (merge 7730f85594 jk/bisect-peel-tag-fix later to maint).
* Fix macros that can silently inject unintended null-statements.
- (merge 116affac3f rs/avoid-null-statement-after-macro-call later to maint).
* CALLOC_ARRAY() macro replaces many uses of xcalloc().
- (merge 1c57cc70ec rs/calloc-array later to maint).
* Update insn in Makefile comments to run fuzz-all target.
- (merge 68b5c3aa48 ah/make-fuzz-all-doc-update later to maint).
* Fix a corner case bug in "git mv" on case insensitive systems,
which was introduced in 2.29 timeframe.
- (merge 93c3d297b5 tb/git-mv-icase-fix later to maint).
* We had a code to diagnose and die cleanly when a required
clean/smudge filter is missing, but an assert before that
@@ -115,15 +175,57 @@ Fixes since v2.31
which has been corrected.
(merge 75555676ad bc/clone-bare-with-conflicting-config later to maint).
+ * When "git checkout" removes a path that does not exist in the
+ commit it is checking out, it wasn't careful enough not to follow
+ symbolic links, which has been corrected.
+ (merge fab78a0c3d mt/checkout-remove-nofollow later to maint).
+
+ * A few option description strings started with capital letters,
+ which were corrected.
+ (merge 5ee90326dc cc/downcase-opt-help later to maint).
+
+ * Plug or annotate remaining leaks that trigger while running the
+ very basic set of tests.
+ (merge 68ffe095a2 ah/plugleaks later to maint).
+
+ * The hashwrite() API uses a buffering mechanism to avoid calling
+ write(2) too frequently. This logic has been refactored to be
+ easier to understand.
+ (merge ddaf1f62e3 ds/clarify-hashwrite later to maint).
+
+ * "git cherry-pick/revert" with or without "--[no-]edit" did not spawn
+ the editor as expected (e.g. "revert --no-edit" after a conflict
+ still asked to edit the message), which has been corrected.
+ (merge 39edfd5cbc en/sequencer-edit-upon-conflict-fix later to maint).
+
+ * "git daemon" has been tightened against systems that take backslash
+ as directory separator.
+ (merge 9a7f1ce8b7 rs/daemon-sanitize-dir-sep later to maint).
+
+ * A NULL-dereference bug has been corrected in an error codepath in
+ "git for-each-ref", "git branch --list" etc.
+ (merge c685450880 jk/ref-filter-segfault-fix later to maint).
+
+ * Streamline the codepath to fix the UTF-8 encoding issues in the
+ argv[] and the prefix on macOS.
+ (merge c7d0e61016 tb/precompose-prefix-simplify later to maint).
+
+ * The command-line completion script (in contrib/) had a couple of
+ references that would have given a warning under the "-u" (nounset)
+ option.
+ (merge c5c0548d79 vs/completion-with-set-u later to maint).
+
* Other code cleanup, docfix, build fix, etc.
- (merge 486f4bd183 jc/calloc-fix later to maint).
- (merge 5f70859c15 jt/clone-unborn-head later to maint).
- (merge cfd409ed09 km/config-doc-typofix later to maint).
- (merge 8588aa8657 jk/slimmed-down later to maint).
- (merge 241b5d3ebe rs/xcalloc-takes-nelem-first later to maint).
(merge f451960708 dl/cat-file-doc-cleanup later to maint).
(merge 12604a8d0c sv/t9801-test-path-is-file-cleanup later to maint).
(merge ea7e63921c jr/doc-ignore-typofix later to maint).
(merge 23c781f173 ps/update-ref-trans-hook-doc later to maint).
(merge 42efa1231a jk/filter-branch-sha256 later to maint).
(merge 4c8e3dca6e tb/push-simple-uses-branch-merge-config later to maint).
+ (merge 6534d436a2 bs/asciidoctor-installation-hints later to maint).
+ (merge 47957485b3 ab/read-tree later to maint).
+ (merge 2be927f3d1 ab/diff-no-index-tests later to maint).
+ (merge 76593c09bb ab/detox-gettext-tests later to maint).
+ (merge 28e29ee38b jc/doc-format-patch-clarify later to maint).
+ (merge fc12b6fdde fm/user-manual-use-preface later to maint).
+ (merge dba94e3a85 cc/test-helper-bloom-usage-fix later to maint).
diff --git a/Documentation/config/clone.txt b/Documentation/config/clone.txt
index 47de36a5fe..7bcfbd18a5 100644
--- a/Documentation/config/clone.txt
+++ b/Documentation/config/clone.txt
@@ -2,3 +2,7 @@ clone.defaultRemoteName::
The name of the remote to create when cloning a repository. Defaults to
`origin`, and can be overridden by passing the `--origin` command-line
option to linkgit:git-clone[1].
+
+clone.rejectShallow::
+ Reject to clone a repository if it is a shallow one, can be overridden by
+ passing option `--reject-shallow` in command line. See linkgit:git-clone[1]
diff --git a/Documentation/config/pack.txt b/Documentation/config/pack.txt
index 3da4ea98e2..c0844d8d8e 100644
--- a/Documentation/config/pack.txt
+++ b/Documentation/config/pack.txt
@@ -122,6 +122,21 @@ pack.useSparse::
commits contain certain types of direct renames. Default is
`true`.
+pack.preferBitmapTips::
+ When selecting which commits will receive bitmaps, prefer a
+ commit at the tip of any reference that is a suffix of any value
+ of this configuration over any other commits in the "selection
+ window".
++
+Note that setting this configuration to `refs/foo` does not mean that
+the commits at the tips of `refs/foo/bar` and `refs/foo/baz` will
+necessarily be selected. This is because commits are selected for
+bitmaps from within a series of windows of variable length.
++
+If a commit at the tip of any reference which is a suffix of any value
+of this configuration is seen in a window, it is immediately given
+preference over any other commit in that window.
+
pack.writeBitmaps (deprecated)::
This is a deprecated synonym for `repack.writeBitmaps`.
diff --git a/Documentation/config/rebase.txt b/Documentation/config/rebase.txt
index 214f31b451..8c979cb20f 100644
--- a/Documentation/config/rebase.txt
+++ b/Documentation/config/rebase.txt
@@ -1,10 +1,3 @@
-rebase.useBuiltin::
- Unused configuration variable. Used in Git versions 2.20 and
- 2.21 as an escape hatch to enable the legacy shellscript
- implementation of rebase. Now the built-in rewrite of it in C
- is always used. Setting this will emit a warning, to alert any
- remaining users that setting this now does nothing.
-
rebase.backend::
Default backend to use for rebasing. Possible choices are
'apply' or 'merge'. In the future, if the merge backend gains
diff --git a/Documentation/git-apply.txt b/Documentation/git-apply.txt
index 91d9a8601c..aa1ae56a25 100644
--- a/Documentation/git-apply.txt
+++ b/Documentation/git-apply.txt
@@ -84,12 +84,13 @@ OPTIONS
-3::
--3way::
- When the patch does not apply cleanly, fall back on 3-way merge if
- the patch records the identity of blobs it is supposed to apply to,
- and we have those blobs available locally, possibly leaving the
+ Attempt 3-way merge if the patch records the identity of blobs it is supposed
+ to apply to and we have those blobs available locally, possibly leaving the
conflict markers in the files in the working tree for the user to
- resolve. This option implies the `--index` option, and is incompatible
- with the `--reject` and the `--cached` options.
+ resolve. This option implies the `--index` option unless the
+ `--cached` option is used, and is incompatible with the `--reject` option.
+ When used with the `--cached` option, any conflicts are left at higher stages
+ in the cache.
--build-fake-ancestor=<file>::
Newer 'git diff' output has embedded 'index information'
diff --git a/Documentation/git-clone.txt b/Documentation/git-clone.txt
index 02d9c19cec..3fe3810f1c 100644
--- a/Documentation/git-clone.txt
+++ b/Documentation/git-clone.txt
@@ -15,7 +15,7 @@ SYNOPSIS
[--dissociate] [--separate-git-dir <git dir>]
[--depth <depth>] [--[no-]single-branch] [--no-tags]
[--recurse-submodules[=<pathspec>]] [--[no-]shallow-submodules]
- [--[no-]remote-submodules] [--jobs <n>] [--sparse]
+ [--[no-]remote-submodules] [--jobs <n>] [--sparse] [--[no-]reject-shallow]
[--filter=<filter>] [--] <repository>
[<directory>]
@@ -149,6 +149,11 @@ objects from the source repository into a pack in the cloned repository.
--no-checkout::
No checkout of HEAD is performed after the clone is complete.
+--[no-]reject-shallow::
+ Fail if the source repository is a shallow repository.
+ The 'clone.rejectShallow' configuration variable can be used to
+ specify the default.
+
--bare::
Make a 'bare' Git repository. That is, instead of
creating `<directory>` and placing the administrative
diff --git a/Documentation/git-commit.txt b/Documentation/git-commit.txt
index 3c69f461c9..340c5fbb48 100644
--- a/Documentation/git-commit.txt
+++ b/Documentation/git-commit.txt
@@ -14,7 +14,8 @@ SYNOPSIS
[--allow-empty-message] [--no-verify] [-e] [--author=<author>]
[--date=<date>] [--cleanup=<mode>] [--[no-]status]
[-i | -o] [--pathspec-from-file=<file> [--pathspec-file-nul]]
- [-S[<keyid>]] [--] [<pathspec>...]
+ [(--trailer <token>[(=|:)<value>])...] [-S[<keyid>]]
+ [--] [<pathspec>...]
DESCRIPTION
-----------
@@ -199,6 +200,17 @@ The `-m` option is mutually exclusive with `-c`, `-C`, and `-F`.
include::signoff-option.txt[]
+--trailer <token>[(=|:)<value>]::
+ Specify a (<token>, <value>) pair that should be applied as a
+ trailer. (e.g. `git commit --trailer "Signed-off-by:C O Mitter \
+ <committer@example.com>" --trailer "Helped-by:C O Mitter \
+ <committer@example.com>"` will add the "Signed-off-by" trailer
+ and the "Helped-by" trailer to the commit message.)
+ The `trailer.*` configuration variables
+ (linkgit:git-interpret-trailers[1]) can be used to define if
+ a duplicated trailer is omitted, where in the run of trailers
+ each trailer would appear, and other details.
+
-n::
--no-verify::
This option bypasses the pre-commit and commit-msg hooks.
diff --git a/Documentation/git-format-patch.txt b/Documentation/git-format-patch.txt
index 3e49bf2210..911da181a1 100644
--- a/Documentation/git-format-patch.txt
+++ b/Documentation/git-format-patch.txt
@@ -36,11 +36,28 @@ SYNOPSIS
DESCRIPTION
-----------
-Prepare each commit with its patch in
-one file per commit, formatted to resemble UNIX mailbox format.
+Prepare each commit with its "patch" in
+one "message" per commit, formatted to resemble a UNIX mailbox.
The output of this command is convenient for e-mail submission or
for use with 'git am'.
+A "message" generated by the command consists of three parts:
+
+* A brief metadata header that begins with `From <commit>`
+ with a fixed `Mon Sep 17 00:00:00 2001` datestamp to help programs
+ like "file(1)" to recognize that the file is an output from this
+ command, fields that record the author identity, the author date,
+ and the title of the change (taken from the first paragraph of the
+ commit log message).
+
+* The second and subsequent paragraphs of the commit log message.
+
+* The "patch", which is the "diff -p --stat" output (see
+ linkgit:git-diff[1]) between the commit and its parent.
+
+The log message and the patch is separated by a line with a
+three-dash line.
+
There are two ways to specify which commits to operate on.
1. A single commit, <since>, specifies that the commits leading
@@ -221,6 +238,11 @@ populated with placeholder text.
`--subject-prefix` option) has ` v<n>` appended to it. E.g.
`--reroll-count=4` may produce `v4-0001-add-makefile.patch`
file that has "Subject: [PATCH v4 1/20] Add makefile" in it.
+ `<n>` does not have to be an integer (e.g. "--reroll-count=4.4",
+ or "--reroll-count=4rev2" are allowed), but the downside of
+ using such a reroll-count is that the range-diff/interdiff
+ with the previous version does not state exactly which
+ version the new interation is compared against.
--to=<email>::
Add a `To:` header to the email headers. This is in addition
diff --git a/Documentation/git-multi-pack-index.txt b/Documentation/git-multi-pack-index.txt
index eb0caa0439..ffd601bc17 100644
--- a/Documentation/git-multi-pack-index.txt
+++ b/Documentation/git-multi-pack-index.txt
@@ -9,7 +9,8 @@ git-multi-pack-index - Write and verify multi-pack-indexes
SYNOPSIS
--------
[verse]
-'git multi-pack-index' [--object-dir=<dir>] [--[no-]progress] <subcommand>
+'git multi-pack-index' [--object-dir=<dir>] [--[no-]progress]
+ [--preferred-pack=<pack>] <subcommand>
DESCRIPTION
-----------
@@ -30,7 +31,16 @@ OPTIONS
The following subcommands are available:
write::
- Write a new MIDX file.
+ Write a new MIDX file. The following options are available for
+ the `write` sub-command:
++
+--
+ --preferred-pack=<pack>::
+ Optionally specify the tie-breaking pack used when
+ multiple packs contain the same object. If not given,
+ ties are broken in favor of the pack with the lowest
+ mtime.
+--
verify::
Verify the contents of the MIDX file.
diff --git a/Documentation/gitweb.conf.txt b/Documentation/gitweb.conf.txt
index 7963a79ba9..34b1d6e224 100644
--- a/Documentation/gitweb.conf.txt
+++ b/Documentation/gitweb.conf.txt
@@ -751,6 +751,17 @@ default font sizes or lineheights are changed (e.g. via adding extra
CSS stylesheet in `@stylesheets`), it may be appropriate to change
these values.
+email-privacy::
+ Redact e-mail addresses from the generated HTML, etc. content.
+ This obscures e-mail addresses retrieved from the author/committer
+ and comment sections of the Git log.
+ It is meant to hinder web crawlers that harvest and abuse addresses.
+ Such crawlers may not respect robots.txt.
+ Note that users and user tools also see the addresses as redacted.
+ If Gitweb is not the final step in a workflow then subsequent steps
+ may misbehave because of the redacted information they receive.
+ Disabled by default.
+
highlight::
Server-side syntax highlight support in "blob" view. It requires
`$highlight_bin` program to be available (see the description of
diff --git a/Documentation/howto/coordinate-embargoed-releases.txt b/Documentation/howto/coordinate-embargoed-releases.txt
new file mode 100644
index 0000000000..601aae88e9
--- /dev/null
+++ b/Documentation/howto/coordinate-embargoed-releases.txt
@@ -0,0 +1,131 @@
+Content-type: text/asciidoc
+Abstract: When a critical vulnerability is discovered and fixed, we follow this
+ script to coordinate a public release.
+
+How we coordinate embargoed releases
+====================================
+
+To protect Git users from critical vulnerabilities, we do not just release
+fixed versions like regular maintenance releases. Instead, we coordinate
+releases with packagers, keeping the fixes under an embargo until the release
+date. That way, users will have a chance to upgrade on that date, no matter
+what Operating System or distribution they run.
+
+Open a Security Advisory draft
+------------------------------
+
+The first step is to https://github.com/git/git/security/advisories/new[open an
+advisory]. Technically, it is not necessary, but it is convenient and saves a
+bit of hassle. This advisory can also be used to obtain the CVE number and it
+will give us a private fork associated with it that can be used to collaborate
+on a fix.
+
+Release date of the embargoed version
+-------------------------------------
+
+If the vulnerability affects Windows users, we want to have our friends over at
+Visual Studio on board. This means we need to target a "Patch Tuesday" (i.e. a
+second Tuesday of the month), at the minimum three weeks from heads-up to
+coordinated release.
+
+If the vulnerability affects the server side, or can benefit from scans on the
+server side (i.e. if `git fsck` can detect an attack), it is important to give
+all involved Git repository hosting sites enough time to scan all of those
+repositories.
+
+Notifying the Linux distributions
+---------------------------------
+
+At most two weeks before release date, we need to send a notification to
+distros@vs.openwall.org, preferably less than 7 days before the release date.
+This will reach most (all?) Linux distributions. See an example below, and the
+guidelines for this mailing list at
+https://oss-security.openwall.org/wiki/mailing-lists/distros#how-to-use-the-lists[here].
+
+Once the version has been published, we send a note about that to oss-security.
+As an example, see https://www.openwall.com/lists/oss-security/2019/12/13/1[the
+v2.24.1 mail];
+https://oss-security.openwall.org/wiki/mailing-lists/oss-security[Here] are
+their guidelines.
+
+The mail to oss-security should also describe the exploit, and give credit to
+the reporter(s): security researchers still receive too little respect for the
+invaluable service they provide, and public credit goes a long way to keep them
+paid by their respective organizations.
+
+Technically, describing any exploit can be delayed up to 7 days, but we usually
+refrain from doing that, including it right away.
+
+As a courtesy we typically attach a Git bundle (as `.tar.xz` because the list
+will drop `.bundle` attachments) in the mail to distros@ so that the involved
+parties can take care of integrating/backporting them. This bundle is typically
+created using a command like this:
+
+ git bundle create cve-xxx.bundle ^origin/master vA.B.C vD.E.F
+ tar cJvf cve-xxx.bundle.tar.xz cve-xxx.bundle
+
+Example mail to distros@vs.openwall.org
+---------------------------------------
+
+....
+To: distros@vs.openwall.org
+Cc: git-security@googlegroups.com, <other people involved in the report/fix>
+Subject: [vs] Upcoming Git security fix release
+
+Team,
+
+The Git project will release new versions on <date> at 10am Pacific Time or
+soon thereafter. I have attached a Git bundle (embedded in a `.tar.xz` to avoid
+it being dropped) which you can fetch into a clone of
+https://github.com/git/git via `git fetch --tags /path/to/cve-xxx.bundle`,
+containing the tags for versions <versions>.
+
+You can verify with `git tag -v <tag>` that the versions were signed by
+the Git maintainer, using the same GPG key as e.g. v2.24.0.
+
+Please use these tags to prepare `git` packages for your various
+distributions, using the appropriate tagged versions. The added test cases
+help verify the correctness.
+
+The addressed issues are:
+
+<list of CVEs with a short description, typically copy/pasted from Git's
+release notes, usually demo exploit(s), too>
+
+Credit for finding the vulnerability goes to <reporter>, credit for fixing
+it goes to <developer>.
+
+Thanks,
+<name>
+
+....
+
+Example mail to oss-security@lists.openwall.com
+-----------------------------------------------
+
+....
+To: oss-security@lists.openwall.com
+Cc: git-security@googlegroups.com, <other people involved in the report/fix>
+Subject: git: <copy from security advisory>
+
+Team,
+
+The Git project released new versions on <date>, addressing <CVE>.
+
+All supported platforms are affected in one way or another, and all Git
+versions all the way back to <version> are affected. The fixed versions are:
+<versions>.
+
+Link to the announcement: <link to lore.kernel.org/git>
+
+We highly recommend to upgrade.
+
+The addressed issues are:
+* <list of CVEs and their explanations, along with demo exploits>
+
+Credit for finding the vulnerability goes to <reporter>, credit for fixing
+it goes to <developer>.
+
+Thanks,
+<name>
+....
diff --git a/Documentation/technical/api-simple-ipc.txt b/Documentation/technical/api-simple-ipc.txt
new file mode 100644
index 0000000000..d79ad323e6
--- /dev/null
+++ b/Documentation/technical/api-simple-ipc.txt
@@ -0,0 +1,105 @@
+Simple-IPC API
+==============
+
+The Simple-IPC API is a collection of `ipc_` prefixed library routines
+and a basic communication protocol that allow an IPC-client process to
+send an application-specific IPC-request message to an IPC-server
+process and receive an application-specific IPC-response message.
+
+Communication occurs over a named pipe on Windows and a Unix domain
+socket on other platforms. IPC-clients and IPC-servers rendezvous at
+a previously agreed-to application-specific pathname (which is outside
+the scope of this design) that is local to the computer system.
+
+The IPC-server routines within the server application process create a
+thread pool to listen for connections and receive request messages
+from multiple concurrent IPC-clients. When received, these messages
+are dispatched up to the server application callbacks for handling.
+IPC-server routines then incrementally relay responses back to the
+IPC-client.
+
+The IPC-client routines within a client application process connect
+to the IPC-server and send a request message and wait for a response.
+When received, the response is returned back the caller.
+
+For example, the `fsmonitor--daemon` feature will be built as a server
+application on top of the IPC-server library routines. It will have
+threads watching for file system events and a thread pool waiting for
+client connections. Clients, such as `git status` will request a list
+of file system events since a point in time and the server will
+respond with a list of changed files and directories. The formats of
+the request and response are application-specific; the IPC-client and
+IPC-server routines treat them as opaque byte streams.
+
+
+Comparison with sub-process model
+---------------------------------
+
+The Simple-IPC mechanism differs from the existing `sub-process.c`
+model (Documentation/technical/long-running-process-protocol.txt) and
+used by applications like Git-LFS. In the LFS-style sub-process model
+the helper is started by the foreground process, communication happens
+via a pair of file descriptors bound to the stdin/stdout of the
+sub-process, the sub-process only serves the current foreground
+process, and the sub-process exits when the foreground process
+terminates.
+
+In the Simple-IPC model the server is a very long-running service. It
+can service many clients at the same time and has a private socket or
+named pipe connection to each active client. It might be started
+(on-demand) by the current client process or it might have been
+started by a previous client or by the OS at boot time. The server
+process is not associated with a terminal and it persists after
+clients terminate. Clients do not have access to the stdin/stdout of
+the server process and therefore must communicate over sockets or
+named pipes.
+
+
+Server startup and shutdown
+---------------------------
+
+How an application server based upon IPC-server is started is also
+outside the scope of the Simple-IPC design and is a property of the
+application using it. For example, the server might be started or
+restarted during routine maintenance operations, or it might be
+started as a system service during the system boot-up sequence, or it
+might be started on-demand by a foreground Git command when needed.
+
+Similarly, server shutdown is a property of the application using
+the simple-ipc routines. For example, the server might decide to
+shutdown when idle or only upon explicit request.
+
+
+Simple-IPC protocol
+-------------------
+
+The Simple-IPC protocol consists of a single request message from the
+client and an optional response message from the server. Both the
+client and server messages are unlimited in length and are terminated
+with a flush packet.
+
+The pkt-line routines (Documentation/technical/protocol-common.txt)
+are used to simplify buffer management during message generation,
+transmission, and reception. A flush packet is used to mark the end
+of the message. This allows the sender to incrementally generate and
+transmit the message. It allows the receiver to incrementally receive
+the message in chunks and to know when they have received the entire
+message.
+
+The actual byte format of the client request and server response
+messages are application specific. The IPC layer transmits and
+receives them as opaque byte buffers without any concern for the
+content within. It is the job of the calling application layer to
+understand the contents of the request and response messages.
+
+
+Summary
+-------
+
+Conceptually, the Simple-IPC protocol is similar to an HTTP REST
+request. Clients connect, make an application-specific and
+stateless request, receive an application-specific
+response, and disconnect. It is a one round trip facility for
+querying the server. The Simple-IPC routines hide the socket,
+named pipe, and thread pool details and allow the application
+layer to focus on the application at hand.
diff --git a/Documentation/technical/multi-pack-index.txt b/Documentation/technical/multi-pack-index.txt
index e8e377a59f..fb688976c4 100644
--- a/Documentation/technical/multi-pack-index.txt
+++ b/Documentation/technical/multi-pack-index.txt
@@ -43,8 +43,9 @@ Design Details
a change in format.
- The MIDX keeps only one record per object ID. If an object appears
- in multiple packfiles, then the MIDX selects the copy in the most-
- recently modified packfile.
+ in multiple packfiles, then the MIDX selects the copy in the
+ preferred packfile, otherwise selecting from the most-recently
+ modified packfile.
- If there exist packfiles in the pack directory not registered in
the MIDX, then those packfiles are loaded into the `packed_git`
diff --git a/Documentation/technical/pack-format.txt b/Documentation/technical/pack-format.txt
index 1faa949bf6..8d2f42f29e 100644
--- a/Documentation/technical/pack-format.txt
+++ b/Documentation/technical/pack-format.txt
@@ -379,3 +379,86 @@ CHUNK DATA:
TRAILER:
Index checksum of the above contents.
+
+== multi-pack-index reverse indexes
+
+Similar to the pack-based reverse index, the multi-pack index can also
+be used to generate a reverse index.
+
+Instead of mapping between offset, pack-, and index position, this
+reverse index maps between an object's position within the MIDX, and
+that object's position within a pseudo-pack that the MIDX describes
+(i.e., the ith entry of the multi-pack reverse index holds the MIDX
+position of ith object in pseudo-pack order).
+
+To clarify the difference between these orderings, consider a multi-pack
+reachability bitmap (which does not yet exist, but is what we are
+building towards here). Each bit needs to correspond to an object in the
+MIDX, and so we need an efficient mapping from bit position to MIDX
+position.
+
+One solution is to let bits occupy the same position in the oid-sorted
+index stored by the MIDX. But because oids are effectively random, their
+resulting reachability bitmaps would have no locality, and thus compress
+poorly. (This is the reason that single-pack bitmaps use the pack
+ordering, and not the .idx ordering, for the same purpose.)
+
+So we'd like to define an ordering for the whole MIDX based around
+pack ordering, which has far better locality (and thus compresses more
+efficiently). We can think of a pseudo-pack created by the concatenation
+of all of the packs in the MIDX. E.g., if we had a MIDX with three packs
+(a, b, c), with 10, 15, and 20 objects respectively, we can imagine an
+ordering of the objects like:
+
+ |a,0|a,1|...|a,9|b,0|b,1|...|b,14|c,0|c,1|...|c,19|
+
+where the ordering of the packs is defined by the MIDX's pack list,
+and then the ordering of objects within each pack is the same as the
+order in the actual packfile.
+
+Given the list of packs and their counts of objects, you can
+naïvely reconstruct that pseudo-pack ordering (e.g., the object at
+position 27 must be (c,1) because packs "a" and "b" consumed 25 of the
+slots). But there's a catch. Objects may be duplicated between packs, in
+which case the MIDX only stores one pointer to the object (and thus we'd
+want only one slot in the bitmap).
+
+Callers could handle duplicates themselves by reading objects in order
+of their bit-position, but that's linear in the number of objects, and
+much too expensive for ordinary bitmap lookups. Building a reverse index
+solves this, since it is the logical inverse of the index, and that
+index has already removed duplicates. But, building a reverse index on
+the fly can be expensive. Since we already have an on-disk format for
+pack-based reverse indexes, let's reuse it for the MIDX's pseudo-pack,
+too.
+
+Objects from the MIDX are ordered as follows to string together the
+pseudo-pack. Let `pack(o)` return the pack from which `o` was selected
+by the MIDX, and define an ordering of packs based on their numeric ID
+(as stored by the MIDX). Let `offset(o)` return the object offset of `o`
+within `pack(o)`. Then, compare `o1` and `o2` as follows:
+
+ - If one of `pack(o1)` and `pack(o2)` is preferred and the other
+ is not, then the preferred one sorts first.
++
+(This is a detail that allows the MIDX bitmap to determine which
+pack should be used by the pack-reuse mechanism, since it can ask
+the MIDX for the pack containing the object at bit position 0).
+
+ - If `pack(o1) ≠ pack(o2)`, then sort the two objects in descending
+ order based on the pack ID.
+
+ - Otherwise, `pack(o1) = pack(o2)`, and the objects are sorted in
+ pack-order (i.e., `o1` sorts ahead of `o2` exactly when `offset(o1)
+ < offset(o2)`).
+
+In short, a MIDX's pseudo-pack is the de-duplicated concatenation of
+objects in packs stored by the MIDX, laid out in pack order, and the
+packs arranged in MIDX order (with the preferred pack coming first).
+
+Finally, note that the MIDX's reverse index is not stored as a chunk in
+the multi-pack-index itself. This is done because the reverse index
+includes the checksum of the pack or MIDX to which it belongs, which
+makes it impossible to write in the MIDX. To avoid races when rewriting
+the MIDX, a MIDX reverse index includes the MIDX's checksum in its
+filename (e.g., `multi-pack-index-xyz.rev`).
diff --git a/Documentation/user-manual.txt b/Documentation/user-manual.txt
index fd480b8645..f9e54b8674 100644
--- a/Documentation/user-manual.txt
+++ b/Documentation/user-manual.txt
@@ -1,5 +1,8 @@
= Git User Manual
+[preface]
+== Introduction
+
Git is a fast distributed revision control system.
This manual is designed to be readable by someone with basic UNIX
diff --git a/INSTALL b/INSTALL
index 8474ad01bf..66389ce059 100644
--- a/INSTALL
+++ b/INSTALL
@@ -197,7 +197,9 @@ Issues of note:
Building and installing the pdf file additionally requires
dblatex. Version >= 0.2.7 is known to work.
- All formats require at least asciidoc 8.4.1.
+ All formats require at least asciidoc 8.4.1. Alternatively, you can
+ use Asciidoctor (requires Ruby) by passing USE_ASCIIDOCTOR=YesPlease
+ to make. You need at least Asciidoctor version 1.5.
There are also "make quick-install-doc", "make quick-install-man"
and "make quick-install-html" which install preformatted man pages
diff --git a/Makefile b/Makefile
index 55c8035fa8..21c0bf1667 100644
--- a/Makefile
+++ b/Makefile
@@ -693,6 +693,7 @@ X =
PROGRAMS += $(patsubst %.o,git-%$X,$(PROGRAM_OBJS))
TEST_BUILTINS_OBJS += test-advise.o
+TEST_BUILTINS_OBJS += test-bitmap.o
TEST_BUILTINS_OBJS += test-bloom.o
TEST_BUILTINS_OBJS += test-chmtime.o
TEST_BUILTINS_OBJS += test-config.o
@@ -744,6 +745,7 @@ TEST_BUILTINS_OBJS += test-serve-v2.o
TEST_BUILTINS_OBJS += test-sha1.o
TEST_BUILTINS_OBJS += test-sha256.o
TEST_BUILTINS_OBJS += test-sigchain.o
+TEST_BUILTINS_OBJS += test-simple-ipc.o
TEST_BUILTINS_OBJS += test-strcmp-offset.o
TEST_BUILTINS_OBJS += test-string-list.o
TEST_BUILTINS_OBJS += test-submodule-config.o
@@ -1679,6 +1681,14 @@ ifdef NO_UNIX_SOCKETS
BASIC_CFLAGS += -DNO_UNIX_SOCKETS
else
LIB_OBJS += unix-socket.o
+ LIB_OBJS += unix-stream-server.o
+ LIB_OBJS += compat/simple-ipc/ipc-shared.o
+ LIB_OBJS += compat/simple-ipc/ipc-unix-socket.o
+endif
+
+ifdef USE_WIN32_IPC
+ LIB_OBJS += compat/simple-ipc/ipc-shared.o
+ LIB_OBJS += compat/simple-ipc/ipc-win32.o
endif
ifdef NO_ICONV
@@ -2690,12 +2700,14 @@ FIND_SOURCE_FILES = ( \
)
$(ETAGS_TARGET): FORCE
- $(RM) $(ETAGS_TARGET)
- $(FIND_SOURCE_FILES) | xargs etags -a -o $(ETAGS_TARGET)
+ $(QUIET_GEN)$(RM) "$(ETAGS_TARGET)+" && \
+ $(FIND_SOURCE_FILES) | xargs etags -a -o "$(ETAGS_TARGET)+" && \
+ mv "$(ETAGS_TARGET)+" "$(ETAGS_TARGET)"
tags: FORCE
- $(RM) tags
- $(FIND_SOURCE_FILES) | xargs ctags -a
+ $(QUIET_GEN)$(RM) tags+ && \
+ $(FIND_SOURCE_FILES) | xargs ctags -a -o tags+ && \
+ mv tags+ tags
cscope:
$(RM) cscope*
diff --git a/SECURITY.md b/SECURITY.md
new file mode 100644
index 0000000000..c720c2ae7f
--- /dev/null
+++ b/SECURITY.md
@@ -0,0 +1,51 @@
+# Security Policy
+
+## Reporting a vulnerability
+
+Please send a detailed mail to git-security@googlegroups.com to
+report vulnerabilities in Git.
+
+Even when unsure whether the bug in question is an exploitable
+vulnerability, it is recommended to send the report to
+git-security@googlegroups.com (and obviously not to discuss the
+issue anywhere else).
+
+Vulnerabilities are expected to be discussed _only_ on that
+list, and not in public, until the official announcement on the
+Git mailing list on the release date.
+
+Examples for details to include:
+
+- Ideally a short description (or a script) to demonstrate an
+ exploit.
+- The affected platforms and scenarios (the vulnerability might
+ only affect setups with case-sensitive file systems, for
+ example).
+- The name and affiliation of the security researchers who are
+ involved in the discovery, if any.
+- Whether the vulnerability has already been disclosed.
+- How long an embargo would be required to be safe.
+
+## Supported Versions
+
+There are no official "Long Term Support" versions in Git.
+Instead, the maintenance track (i.e. the versions based on the
+most recently published feature release, also known as ".0"
+version) sees occasional updates with bug fixes.
+
+Fixes to vulnerabilities are made for the maintenance track for
+the latest feature release and merged up to the in-development
+branches. The Git project makes no formal guarantee for any
+older maintenance tracks to receive updates. In practice,
+though, critical vulnerability fixes are applied not only to the
+most recent track, but to at least a couple more maintenance
+tracks.
+
+This is typically done by making the fix on the oldest and still
+relevant maintenance track, and merging it upwards to newer and
+newer maintenance tracks.
+
+For example, v2.24.1 was released to address a couple of
+[CVEs](https://cve.mitre.org/), and at the same time v2.14.6,
+v2.15.4, v2.16.6, v2.17.3, v2.18.2, v2.19.3, v2.20.2, v2.21.1,
+v2.22.2 and v2.23.1 were released.
diff --git a/apply.c b/apply.c
index 6695a931e9..8c5b29809b 100644
--- a/apply.c
+++ b/apply.c
@@ -21,6 +21,7 @@
#include "quote.h"
#include "rerere.h"
#include "apply.h"
+#include "entry.h"
struct gitdiff_data {
struct strbuf *root;
@@ -133,8 +134,6 @@ int check_apply_state(struct apply_state *state, int force_apply)
if (state->apply_with_reject && state->threeway)
return error(_("--reject and --3way cannot be used together."));
- if (state->cached && state->threeway)
- return error(_("--cached and --3way cannot be used together."));
if (state->threeway) {
if (is_not_gitdir)
return error(_("--3way outside a repository"));
@@ -3569,10 +3568,10 @@ static int try_threeway(struct apply_state *state,
write_object_file("", 0, blob_type, &pre_oid);
else if (get_oid(patch->old_oid_prefix, &pre_oid) ||
read_blob_object(&buf, &pre_oid, patch->old_mode))
- return error(_("repository lacks the necessary blob to fall back on 3-way merge."));
+ return error(_("repository lacks the necessary blob to perform 3-way merge."));
if (state->apply_verbosity > verbosity_silent)
- fprintf(stderr, _("Falling back to three-way merge...\n"));
+ fprintf(stderr, _("Performing three-way merge...\n"));
img = strbuf_detach(&buf, &len);
prepare_image(&tmp_image, img, len, 1);
@@ -3604,7 +3603,7 @@ static int try_threeway(struct apply_state *state,
if (status < 0) {
if (state->apply_verbosity > verbosity_silent)
fprintf(stderr,
- _("Failed to fall back on three-way merge...\n"));
+ _("Failed to perform three-way merge...\n"));
return status;
}
@@ -3637,10 +3636,9 @@ static int apply_data(struct apply_state *state, struct patch *patch,
if (load_preimage(state, &image, patch, st, ce) < 0)
return -1;
- if (patch->direct_to_threeway ||
- apply_fragments(state, &image, patch) < 0) {
+ if (!state->threeway || try_threeway(state, &image, patch, st, ce) < 0) {
/* Note: with --reject, apply_fragments() returns 0 */
- if (!state->threeway || try_threeway(state, &image, patch, st, ce) < 0)
+ if (patch->direct_to_threeway || apply_fragments(state, &image, patch) < 0)
return -1;
}
patch->result = image.buf;
@@ -4646,7 +4644,12 @@ static int write_out_results(struct apply_state *state, struct patch *list)
}
string_list_clear(&cpath, 0);
- repo_rerere(state->repo, 0);
+ /*
+ * rerere relies on the partially merged result being in the working
+ * tree with conflict markers, but that isn't written with --cached.
+ */
+ if (!state->cached)
+ repo_rerere(state->repo, 0);
}
return errs;
@@ -5017,7 +5020,7 @@ int apply_parse_options(int argc, const char **argv,
OPT_BOOL(0, "apply", force_apply,
N_("also apply the patch (use with --stat/--summary/--check)")),
OPT_BOOL('3', "3way", &state->threeway,
- N_( "attempt three-way merge if a patch does not apply")),
+ N_( "attempt three-way merge, fall back on normal patch if that fails")),
OPT_FILENAME(0, "build-fake-ancestor", &state->fake_ancestor,
N_("build a temporary index based on embedded index information")),
/* Think twice before adding "--nul" synonym to this */
diff --git a/archive.c b/archive.c
index 2dd2236ae0..295615580d 100644
--- a/archive.c
+++ b/archive.c
@@ -104,7 +104,6 @@ struct directory {
struct object_id oid;
int baselen, len;
unsigned mode;
- int stage;
char path[FLEX_ARRAY];
};
@@ -135,7 +134,7 @@ static int check_attr_export_subst(const struct attr_check *check)
}
static int write_archive_entry(const struct object_id *oid, const char *base,
- int baselen, const char *filename, unsigned mode, int stage,
+ int baselen, const char *filename, unsigned mode,
void *context)
{
static struct strbuf path = STRBUF_INIT;
@@ -194,7 +193,7 @@ static int write_archive_entry(const struct object_id *oid, const char *base,
static void queue_directory(const unsigned char *sha1,
struct strbuf *base, const char *filename,
- unsigned mode, int stage, struct archiver_context *c)
+ unsigned mode, struct archiver_context *c)
{
struct directory *d;
size_t len = st_add4(base->len, 1, strlen(filename), 1);
@@ -202,7 +201,6 @@ static void queue_directory(const unsigned char *sha1,
d->up = c->bottom;
d->baselen = base->len;
d->mode = mode;
- d->stage = stage;
c->bottom = d;
d->len = xsnprintf(d->path, len, "%.*s%s/", (int)base->len, base->buf, filename);
hashcpy(d->oid.hash, sha1);
@@ -221,14 +219,14 @@ static int write_directory(struct archiver_context *c)
write_directory(c) ||
write_archive_entry(&d->oid, d->path, d->baselen,
d->path + d->baselen, d->mode,
- d->stage, c) != READ_TREE_RECURSIVE;
+ c) != READ_TREE_RECURSIVE;
free(d);
return ret ? -1 : 0;
}
static int queue_or_write_archive_entry(const struct object_id *oid,
struct strbuf *base, const char *filename,
- unsigned mode, int stage, void *context)
+ unsigned mode, void *context)
{
struct archiver_context *c = context;
@@ -253,14 +251,14 @@ static int queue_or_write_archive_entry(const struct object_id *oid,
if (check_attr_export_ignore(check))
return 0;
queue_directory(oid->hash, base, filename,
- mode, stage, c);
+ mode, c);
return READ_TREE_RECURSIVE;
}
if (write_directory(c))
return -1;
return write_archive_entry(oid, base->buf, base->len, filename, mode,
- stage, context);
+ context);
}
struct extra_file_info {
@@ -313,10 +311,10 @@ int write_archive_entries(struct archiver_args *args,
git_attr_set_direction(GIT_ATTR_INDEX);
}
- err = read_tree_recursive(args->repo, args->tree, "",
- 0, 0, &args->pathspec,
- queue_or_write_archive_entry,
- &context);
+ err = read_tree(args->repo, args->tree,
+ &args->pathspec,
+ queue_or_write_archive_entry,
+ &context);
if (err == READ_TREE_RECURSIVE)
err = 0;
while (context.bottom) {
@@ -375,7 +373,7 @@ struct path_exists_context {
static int reject_entry(const struct object_id *oid, struct strbuf *base,
const char *filename, unsigned mode,
- int stage, void *context)
+ void *context)
{
int ret = -1;
struct path_exists_context *ctx = context;
@@ -402,9 +400,9 @@ static int path_exists(struct archiver_args *args, const char *path)
ctx.args = args;
parse_pathspec(&ctx.pathspec, 0, 0, "", paths);
ctx.pathspec.recursive = 1;
- ret = read_tree_recursive(args->repo, args->tree, "",
- 0, 0, &ctx.pathspec,
- reject_entry, &ctx);
+ ret = read_tree(args->repo, args->tree,
+ &ctx.pathspec,
+ reject_entry, &ctx);
clear_pathspec(&ctx.pathspec);
return ret != 0;
}
diff --git a/branch.c b/branch.c
index 9c9dae1eae..b71a2de29d 100644
--- a/branch.c
+++ b/branch.c
@@ -344,6 +344,7 @@ void remove_merge_branch_state(struct repository *r)
unlink(git_path_merge_rr(r));
unlink(git_path_merge_msg(r));
unlink(git_path_merge_mode(r));
+ unlink(git_path_auto_merge(r));
save_autostash(git_path_merge_autostash(r));
}
diff --git a/builtin/checkout-index.c b/builtin/checkout-index.c
index 023e49e271..c0bf4ac1b2 100644
--- a/builtin/checkout-index.c
+++ b/builtin/checkout-index.c
@@ -11,6 +11,7 @@
#include "quote.h"
#include "cache-tree.h"
#include "parse-options.h"
+#include "entry.h"
#define CHECKOUT_ALL 4
static int nul_term_line;
diff --git a/builtin/checkout.c b/builtin/checkout.c
index 2d6550bc3c..4c696ef480 100644
--- a/builtin/checkout.c
+++ b/builtin/checkout.c
@@ -26,6 +26,7 @@
#include "unpack-trees.h"
#include "wt-status.h"
#include "xdiff-interface.h"
+#include "entry.h"
static const char * const checkout_usage[] = {
N_("git checkout [<options>] <branch>"),
@@ -114,7 +115,7 @@ static int post_checkout_hook(struct commit *old_commit, struct commit *new_comm
}
static int update_some(const struct object_id *oid, struct strbuf *base,
- const char *pathname, unsigned mode, int stage, void *context)
+ const char *pathname, unsigned mode, void *context)
{
int len;
struct cache_entry *ce;
@@ -155,8 +156,8 @@ static int update_some(const struct object_id *oid, struct strbuf *base,
static int read_tree_some(struct tree *tree, const struct pathspec *pathspec)
{
- read_tree_recursive(the_repository, tree, "", 0, 0,
- pathspec, update_some, NULL);
+ read_tree(the_repository, tree,
+ pathspec, update_some, NULL);
/* update the index with the given tree's info
* for all args, expanding wildcards, and exit
@@ -322,7 +323,7 @@ static void mark_ce_for_checkout_overlay(struct cache_entry *ce,
* If it comes from the tree-ish, we already know it
* matches the pathspec and could just stamp
* CE_MATCHED to it from update_some(). But we still
- * need ps_matched and read_tree_recursive (and
+ * need ps_matched and read_tree (and
* eventually tree_entry_interesting) cannot fill
* ps_matched yet. Once it can, we can avoid calling
* match_pathspec() for _all_ entries when
diff --git a/builtin/clone.c b/builtin/clone.c
index 51e844a2de..f6b0c48bed 100644
--- a/builtin/clone.c
+++ b/builtin/clone.c
@@ -50,6 +50,8 @@ static int option_no_checkout, option_bare, option_mirror, option_single_branch
static int option_local = -1, option_no_hardlinks, option_shared;
static int option_no_tags;
static int option_shallow_submodules;
+static int option_reject_shallow = -1; /* unspecified */
+static int config_reject_shallow = -1; /* unspecified */
static int deepen;
static char *option_template, *option_depth, *option_since;
static char *option_origin = NULL;
@@ -90,6 +92,8 @@ static struct option builtin_clone_options[] = {
OPT__VERBOSITY(&option_verbosity),
OPT_BOOL(0, "progress", &option_progress,
N_("force progress reporting")),
+ OPT_BOOL(0, "reject-shallow", &option_reject_shallow,
+ N_("don't clone shallow repository")),
OPT_BOOL('n', "no-checkout", &option_no_checkout,
N_("don't create a checkout")),
OPT_BOOL(0, "bare", &option_bare, N_("create a bare repository")),
@@ -858,6 +862,9 @@ static int git_clone_config(const char *k, const char *v, void *cb)
free(remote_name);
remote_name = xstrdup(v);
}
+ if (!strcmp(k, "clone.rejectshallow"))
+ config_reject_shallow = git_config_bool(k, v);
+
return git_default_config(k, v, cb);
}
@@ -963,11 +970,12 @@ static int path_exists(const char *path)
int cmd_clone(int argc, const char **argv, const char *prefix)
{
int is_bundle = 0, is_local;
+ int reject_shallow = 0;
const char *repo_name, *repo, *work_tree, *git_dir;
- char *path, *dir, *display_repo = NULL;
+ char *path = NULL, *dir, *display_repo = NULL;
int dest_exists, real_dest_exists = 0;
const struct ref *refs, *remote_head;
- const struct ref *remote_head_points_at;
+ struct ref *remote_head_points_at = NULL;
const struct ref *our_head_points_at;
struct ref *mapped_refs;
const struct ref *ref;
@@ -1017,9 +1025,10 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
repo_name = argv[0];
path = get_repo_path(repo_name, &is_bundle);
- if (path)
+ if (path) {
+ FREE_AND_NULL(path);
repo = absolute_pathdup(repo_name);
- else if (strchr(repo_name, ':')) {
+ } else if (strchr(repo_name, ':')) {
repo = repo_name;
display_repo = transport_anonymize_url(repo);
} else
@@ -1157,6 +1166,15 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
git_config(git_clone_config, NULL);
/*
+ * If option_reject_shallow is specified from CLI option,
+ * ignore config_reject_shallow from git_clone_config.
+ */
+ if (config_reject_shallow != -1)
+ reject_shallow = config_reject_shallow;
+ if (option_reject_shallow != -1)
+ reject_shallow = option_reject_shallow;
+
+ /*
* apply the remote name provided by --origin only after this second
* call to git_config, to ensure it overrides all config-based values.
*/
@@ -1216,6 +1234,8 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
if (filter_options.choice)
warning(_("--filter is ignored in local clones; use file:// instead."));
if (!access(mkpath("%s/shallow", path), F_OK)) {
+ if (reject_shallow)
+ die(_("source repository is shallow, reject to clone."));
if (option_local > 0)
warning(_("source repository is shallow, ignoring --local"));
is_local = 0;
@@ -1227,6 +1247,8 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
transport_set_option(transport, TRANS_OPT_KEEP, "yes");
+ if (reject_shallow)
+ transport_set_option(transport, TRANS_OPT_REJECT_SHALLOW, "1");
if (option_depth)
transport_set_option(transport, TRANS_OPT_DEPTH,
option_depth);
@@ -1393,6 +1415,11 @@ cleanup:
strbuf_release(&reflog_msg);
strbuf_release(&branch_top);
strbuf_release(&key);
+ free_refs(mapped_refs);
+ free_refs(remote_head_points_at);
+ free(dir);
+ free(path);
+ UNLEAK(repo);
junk_mode = JUNK_LEAVE_ALL;
strvec_clear(&transport_ls_refs_options.ref_prefixes);
diff --git a/builtin/column.c b/builtin/column.c
index e815e148aa..40d4b3bee2 100644
--- a/builtin/column.c
+++ b/builtin/column.c
@@ -27,10 +27,10 @@ int cmd_column(int argc, const char **argv, const char *prefix)
OPT_STRING(0, "command", &real_command, N_("name"), N_("lookup config vars")),
OPT_COLUMN(0, "mode", &colopts, N_("layout to use")),
OPT_INTEGER(0, "raw-mode", &colopts, N_("layout to use")),
- OPT_INTEGER(0, "width", &copts.width, N_("Maximum width")),
- OPT_STRING(0, "indent", &copts.indent, N_("string"), N_("Padding space on left border")),
- OPT_INTEGER(0, "nl", &copts.nl, N_("Padding space on right border")),
- OPT_INTEGER(0, "padding", &copts.padding, N_("Padding space between columns")),
+ OPT_INTEGER(0, "width", &copts.width, N_("maximum width")),
+ OPT_STRING(0, "indent", &copts.indent, N_("string"), N_("padding space on left border")),
+ OPT_INTEGER(0, "nl", &copts.nl, N_("padding space on right border")),
+ OPT_INTEGER(0, "padding", &copts.padding, N_("padding space between columns")),
OPT_END()
};
diff --git a/builtin/commit.c b/builtin/commit.c
index d513858218..55d50a8891 100644
--- a/builtin/commit.c
+++ b/builtin/commit.c
@@ -114,6 +114,7 @@ static int config_commit_verbose = -1; /* unspecified */
static int no_post_rewrite, allow_empty_message, pathspec_file_nul;
static char *untracked_files_arg, *force_date, *ignore_submodule_arg, *ignored_arg;
static char *sign_commit, *pathspec_from_file;
+static struct strvec trailer_args = STRVEC_INIT;
/*
* The default commit message cleanup mode will remove the lines
@@ -132,6 +133,14 @@ static struct strbuf message = STRBUF_INIT;
static enum wt_status_format status_format = STATUS_FORMAT_UNSPECIFIED;
+static int opt_pass_trailer(const struct option *opt, const char *arg, int unset)
+{
+ BUG_ON_OPT_NEG(unset);
+
+ strvec_pushl(&trailer_args, "--trailer", arg, NULL);
+ return 0;
+}
+
static int opt_parse_porcelain(const struct option *opt, const char *arg, int unset)
{
enum wt_status_format *value = (enum wt_status_format *)opt->value;
@@ -994,6 +1003,18 @@ static int prepare_to_commit(const char *index_file, const char *prefix,
fclose(s->fp);
+ if (trailer_args.nr) {
+ struct child_process run_trailer = CHILD_PROCESS_INIT;
+
+ strvec_pushl(&run_trailer.args, "interpret-trailers",
+ "--in-place", git_path_commit_editmsg(), NULL);
+ strvec_pushv(&run_trailer.args, trailer_args.v);
+ run_trailer.git_cmd = 1;
+ if (run_command(&run_trailer))
+ die(_("unable to pass trailers to --trailers"));
+ strvec_clear(&trailer_args);
+ }
+
/*
* Reject an attempt to record a non-merge empty commit without
* explicit --allow-empty. In the cherry-pick case, it may be
@@ -1596,6 +1617,7 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
OPT_STRING(0, "fixup", &fixup_message, N_("[(amend|reword):]commit"), N_("use autosquash formatted message to fixup or amend/reword specified commit")),
OPT_STRING(0, "squash", &squash_message, N_("commit"), N_("use autosquash formatted message to squash specified commit")),
OPT_BOOL(0, "reset-author", &renew_authorship, N_("the commit is authored by me now (used with -C/-c/--amend)")),
+ OPT_CALLBACK_F(0, "trailer", NULL, N_("trailer"), N_("add custom trailer(s)"), PARSE_OPT_NONEG, opt_pass_trailer),
OPT_BOOL('s', "signoff", &signoff, N_("add a Signed-off-by trailer")),
OPT_FILENAME('t', "template", &template_file, N_("use specified template file")),
OPT_BOOL('e', "edit", &edit_flag, N_("force edit of commit")),
diff --git a/builtin/credential-cache--daemon.c b/builtin/credential-cache--daemon.c
index c61f123a3b..4c6c89ab0d 100644
--- a/builtin/credential-cache--daemon.c
+++ b/builtin/credential-cache--daemon.c
@@ -203,9 +203,10 @@ static int serve_cache_loop(int fd)
static void serve_cache(const char *socket_path, int debug)
{
+ struct unix_stream_listen_opts opts = UNIX_STREAM_LISTEN_OPTS_INIT;
int fd;
- fd = unix_stream_listen(socket_path);
+ fd = unix_stream_listen(socket_path, &opts);
if (fd < 0)
die_errno("unable to bind to '%s'", socket_path);
diff --git a/builtin/credential-cache.c b/builtin/credential-cache.c
index 9b3f709905..76a6ba3722 100644
--- a/builtin/credential-cache.c
+++ b/builtin/credential-cache.c
@@ -14,7 +14,7 @@
static int send_request(const char *socket, const struct strbuf *out)
{
int got_data = 0;
- int fd = unix_stream_connect(socket);
+ int fd = unix_stream_connect(socket, 0);
if (fd < 0)
return -1;
diff --git a/builtin/difftool.c b/builtin/difftool.c
index 6e18e623fd..ef25729d49 100644
--- a/builtin/difftool.c
+++ b/builtin/difftool.c
@@ -23,6 +23,7 @@
#include "lockfile.h"
#include "object-store.h"
#include "dir.h"
+#include "entry.h"
static int trust_exit_code;
diff --git a/builtin/fsck.c b/builtin/fsck.c
index 821e7798c7..70ff95837a 100644
--- a/builtin/fsck.c
+++ b/builtin/fsck.c
@@ -71,11 +71,6 @@ static const char *printable_type(const struct object_id *oid,
return ret;
}
-static int fsck_config(const char *var, const char *value, void *cb)
-{
- return fsck_config_internal(var, value, cb, &fsck_obj_options);
-}
-
static int objerror(struct object *obj, const char *err)
{
errors_found |= ERROR_OBJECT;
@@ -89,7 +84,9 @@ static int objerror(struct object *obj, const char *err)
static int fsck_error_func(struct fsck_options *o,
const struct object_id *oid,
enum object_type object_type,
- int msg_type, const char *message)
+ enum fsck_msg_type msg_type,
+ enum fsck_msg_id msg_id,
+ const char *message)
{
switch (msg_type) {
case FSCK_WARN:
@@ -197,7 +194,8 @@ static int traverse_reachable(void)
return !!result;
}
-static int mark_used(struct object *obj, int type, void *data, struct fsck_options *options)
+static int mark_used(struct object *obj, enum object_type object_type,
+ void *data, struct fsck_options *options)
{
if (!obj)
return 1;
@@ -803,7 +801,7 @@ int cmd_fsck(int argc, const char **argv, const char *prefix)
if (name_objects)
fsck_enable_object_names(&fsck_walk_options);
- git_config(fsck_config, NULL);
+ git_config(git_fsck_config, &fsck_obj_options);
if (connectivity_only) {
for_each_loose_object(mark_loose_for_connectivity, NULL, 0);
diff --git a/builtin/index-pack.c b/builtin/index-pack.c
index 21899687e2..15507b5cff 100644
--- a/builtin/index-pack.c
+++ b/builtin/index-pack.c
@@ -120,7 +120,7 @@ static int nr_threads;
static int from_stdin;
static int strict;
static int do_fsck_object;
-static struct fsck_options fsck_options = FSCK_OPTIONS_STRICT;
+static struct fsck_options fsck_options = FSCK_OPTIONS_MISSING_GITMODULES;
static int verbose;
static int show_resolving_progress;
static int show_stat;
@@ -212,7 +212,8 @@ static void cleanup_thread(void)
free(thread_data);
}
-static int mark_link(struct object *obj, int type, void *data, struct fsck_options *options)
+static int mark_link(struct object *obj, enum object_type type,
+ void *data, struct fsck_options *options)
{
if (!obj)
return -1;
@@ -1712,22 +1713,6 @@ static void show_pack_info(int stat_only)
}
}
-static int print_dangling_gitmodules(struct fsck_options *o,
- const struct object_id *oid,
- enum object_type object_type,
- int msg_type, const char *message)
-{
- /*
- * NEEDSWORK: Plumb the MSG_ID (from fsck.c) here and use it
- * instead of relying on this string check.
- */
- if (starts_with(message, "gitmodulesMissing")) {
- printf("%s\n", oid_to_hex(oid));
- return 0;
- }
- return fsck_error_function(o, oid, object_type, msg_type, message);
-}
-
int cmd_index_pack(int argc, const char **argv, const char *prefix)
{
int i, fix_thin_pack = 0, verify = 0, stat_only = 0, rev_index;
@@ -1948,13 +1933,8 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix)
else
close(input_fd);
- if (do_fsck_object) {
- struct fsck_options fo = fsck_options;
-
- fo.error_func = print_dangling_gitmodules;
- if (fsck_finish(&fo))
- die(_("fsck error in pack objects"));
- }
+ if (do_fsck_object && fsck_finish(&fsck_options))
+ die(_("fsck error in pack objects"));
free(objects);
strbuf_release(&index_name_buf);
diff --git a/builtin/init-db.c b/builtin/init-db.c
index f82efe4aff..c19b35f1e6 100644
--- a/builtin/init-db.c
+++ b/builtin/init-db.c
@@ -25,7 +25,6 @@
static int init_is_bare_repository = 0;
static int init_shared_repository = -1;
-static const char *init_db_template_dir;
static void copy_templates_1(struct strbuf *path, struct strbuf *template_path,
DIR *dir)
@@ -94,7 +93,7 @@ static void copy_templates_1(struct strbuf *path, struct strbuf *template_path,
}
}
-static void copy_templates(const char *template_dir)
+static void copy_templates(const char *template_dir, const char *init_template_dir)
{
struct strbuf path = STRBUF_INIT;
struct strbuf template_path = STRBUF_INIT;
@@ -107,7 +106,7 @@ static void copy_templates(const char *template_dir)
if (!template_dir)
template_dir = getenv(TEMPLATE_DIR_ENVIRONMENT);
if (!template_dir)
- template_dir = init_db_template_dir;
+ template_dir = init_template_dir;
if (!template_dir)
template_dir = to_free = system_path(DEFAULT_GIT_TEMPLATE_DIR);
if (!template_dir[0]) {
@@ -154,17 +153,6 @@ free_return:
clear_repository_format(&template_format);
}
-static int git_init_db_config(const char *k, const char *v, void *cb)
-{
- if (!strcmp(k, "init.templatedir"))
- return git_config_pathname(&init_db_template_dir, k, v);
-
- if (starts_with(k, "core."))
- return platform_core_config(k, v, cb);
-
- return 0;
-}
-
/*
* If the git_dir is not directly inside the working tree, then git will not
* find it by default, and we need to set the worktree explicitly.
@@ -212,12 +200,9 @@ static int create_default_files(const char *template_path,
int reinit;
int filemode;
struct strbuf err = STRBUF_INIT;
+ const char *init_template_dir = NULL;
const char *work_tree = get_git_work_tree();
- /* Just look for `init.templatedir` */
- init_db_template_dir = NULL; /* re-set in case it was set before */
- git_config(git_init_db_config, NULL);
-
/*
* First copy the templates -- we might have the default
* config file there, in which case we would want to read
@@ -227,7 +212,8 @@ static int create_default_files(const char *template_path,
* values (since we've just potentially changed what's available on
* disk).
*/
- copy_templates(template_path);
+ git_config_get_value("init.templatedir", &init_template_dir);
+ copy_templates(template_path, init_template_dir);
git_config_clear();
reset_shared_repository();
git_config(git_default_config, NULL);
@@ -422,8 +408,8 @@ int init_db(const char *git_dir, const char *real_git_dir,
}
startup_info->have_repository = 1;
- /* Just look for `core.hidedotfiles` */
- git_config(git_init_db_config, NULL);
+ /* Ensure `core.hidedotfiles` is processed */
+ git_config(platform_core_config, NULL);
safe_create_dir(git_dir, 0);
@@ -575,8 +561,10 @@ int cmd_init_db(int argc, const char **argv, const char *prefix)
if (real_git_dir && !is_absolute_path(real_git_dir))
real_git_dir = real_pathdup(real_git_dir, 1);
- if (template_dir && *template_dir && !is_absolute_path(template_dir))
+ if (template_dir && *template_dir && !is_absolute_path(template_dir)) {
template_dir = absolute_pathdup(template_dir);
+ UNLEAK(template_dir);
+ }
if (argc == 1) {
int mkdir_tried = 0;
diff --git a/builtin/log.c b/builtin/log.c
index f67b67d80e..8acd285daf 100644
--- a/builtin/log.c
+++ b/builtin/log.c
@@ -599,7 +599,7 @@ static int show_tag_object(const struct object_id *oid, struct rev_info *rev)
static int show_tree_object(const struct object_id *oid,
struct strbuf *base,
- const char *pathname, unsigned mode, int stage, void *context)
+ const char *pathname, unsigned mode, void *context)
{
FILE *file = context;
fprintf(file, "%s%s\n", pathname, S_ISDIR(mode) ? "/" : "");
@@ -681,9 +681,9 @@ int cmd_show(int argc, const char **argv, const char *prefix)
diff_get_color_opt(&rev.diffopt, DIFF_COMMIT),
name,
diff_get_color_opt(&rev.diffopt, DIFF_RESET));
- read_tree_recursive(the_repository, (struct tree *)o, "",
- 0, 0, &match_all, show_tree_object,
- rev.diffopt.file);
+ read_tree(the_repository, (struct tree *)o,
+ &match_all, show_tree_object,
+ rev.diffopt.file);
rev.shown_one = 1;
break;
case OBJ_COMMIT:
@@ -1662,13 +1662,19 @@ static void print_bases(struct base_tree_info *bases, FILE *file)
oidclr(&bases->base_commit);
}
-static const char *diff_title(struct strbuf *sb, int reroll_count,
- const char *generic, const char *rerolled)
+static const char *diff_title(struct strbuf *sb,
+ const char *reroll_count,
+ const char *generic,
+ const char *rerolled)
{
- if (reroll_count <= 0)
+ int v;
+
+ /* RFC may be v0, so allow -v1 to diff against v0 */
+ if (reroll_count && !strtol_i(reroll_count, 10, &v) &&
+ v >= 1)
+ strbuf_addf(sb, rerolled, v - 1);
+ else
strbuf_addstr(sb, generic);
- else /* RFC may be v0, so allow -v1 to diff against v0 */
- strbuf_addf(sb, rerolled, reroll_count - 1);
return sb->buf;
}
@@ -1717,7 +1723,7 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
struct strbuf buf = STRBUF_INIT;
int use_patch_format = 0;
int quiet = 0;
- int reroll_count = -1;
+ const char *reroll_count = NULL;
char *cover_from_description_arg = NULL;
char *branch_name = NULL;
char *base_commit = NULL;
@@ -1751,7 +1757,7 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
N_("use <sfx> instead of '.patch'")),
OPT_INTEGER(0, "start-number", &start_number,
N_("start numbering patches at <n> instead of 1")),
- OPT_INTEGER('v', "reroll-count", &reroll_count,
+ OPT_STRING('v', "reroll-count", &reroll_count, N_("reroll-count"),
N_("mark the series as Nth re-roll")),
OPT_INTEGER(0, "filename-max-length", &fmt_patch_name_max,
N_("max length of output filename")),
@@ -1862,9 +1868,10 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
if (cover_from_description_arg)
cover_from_description_mode = parse_cover_from_description(cover_from_description_arg);
- if (0 < reroll_count) {
+ if (reroll_count) {
struct strbuf sprefix = STRBUF_INIT;
- strbuf_addf(&sprefix, "%s v%d",
+
+ strbuf_addf(&sprefix, "%s v%s",
rev.subject_prefix, reroll_count);
rev.reroll_count = reroll_count;
rev.subject_prefix = strbuf_detach(&sprefix, NULL);
diff --git a/builtin/ls-files.c b/builtin/ls-files.c
index f6f9e483b2..60a2913a01 100644
--- a/builtin/ls-files.c
+++ b/builtin/ls-files.c
@@ -12,6 +12,7 @@
#include "dir.h"
#include "builtin.h"
#include "tree.h"
+#include "cache-tree.h"
#include "parse-options.h"
#include "resolve-undo.h"
#include "string-list.h"
@@ -420,6 +421,53 @@ static int get_common_prefix_len(const char *common_prefix)
return common_prefix_len;
}
+static int read_one_entry_opt(struct index_state *istate,
+ const struct object_id *oid,
+ struct strbuf *base,
+ const char *pathname,
+ unsigned mode, int opt)
+{
+ int len;
+ struct cache_entry *ce;
+
+ if (S_ISDIR(mode))
+ return READ_TREE_RECURSIVE;
+
+ len = strlen(pathname);
+ ce = make_empty_cache_entry(istate, base->len + len);
+
+ ce->ce_mode = create_ce_mode(mode);
+ ce->ce_flags = create_ce_flags(1);
+ ce->ce_namelen = base->len + len;
+ memcpy(ce->name, base->buf, base->len);
+ memcpy(ce->name + base->len, pathname, len+1);
+ oidcpy(&ce->oid, oid);
+ return add_index_entry(istate, ce, opt);
+}
+
+static int read_one_entry(const struct object_id *oid, struct strbuf *base,
+ const char *pathname, unsigned mode,
+ void *context)
+{
+ struct index_state *istate = context;
+ return read_one_entry_opt(istate, oid, base, pathname,
+ mode,
+ ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
+}
+
+/*
+ * This is used when the caller knows there is no existing entries at
+ * the stage that will conflict with the entry being added.
+ */
+static int read_one_entry_quick(const struct object_id *oid, struct strbuf *base,
+ const char *pathname, unsigned mode,
+ void *context)
+{
+ struct index_state *istate = context;
+ return read_one_entry_opt(istate, oid, base, pathname,
+ mode, ADD_CACHE_JUST_APPEND);
+}
+
/*
* Read the tree specified with --with-tree option
* (typically, HEAD) into stage #1 and then
@@ -436,6 +484,8 @@ void overlay_tree_on_index(struct index_state *istate,
struct pathspec pathspec;
struct cache_entry *last_stage0 = NULL;
int i;
+ read_tree_fn_t fn = NULL;
+ int err;
if (get_oid(tree_name, &oid))
die("tree-ish %s not found.", tree_name);
@@ -458,9 +508,32 @@ void overlay_tree_on_index(struct index_state *istate,
PATHSPEC_PREFER_CWD, prefix, matchbuf);
} else
memset(&pathspec, 0, sizeof(pathspec));
- if (read_tree(the_repository, tree, 1, &pathspec, istate))
+
+ /*
+ * See if we have cache entry at the stage. If so,
+ * do it the original slow way, otherwise, append and then
+ * sort at the end.
+ */
+ for (i = 0; !fn && i < istate->cache_nr; i++) {
+ const struct cache_entry *ce = istate->cache[i];
+ if (ce_stage(ce) == 1)
+ fn = read_one_entry;
+ }
+
+ if (!fn)
+ fn = read_one_entry_quick;
+ err = read_tree(the_repository, tree, &pathspec, fn, istate);
+ if (err)
die("unable to read tree entries %s", tree_name);
+ /*
+ * Sort the cache entry -- we need to nuke the cache tree, though.
+ */
+ if (fn == read_one_entry_quick) {
+ cache_tree_free(&istate->cache_tree);
+ QSORT(istate->cache, istate->cache_nr, cmp_cache_name_compare);
+ }
+
for (i = 0; i < istate->cache_nr; i++) {
struct cache_entry *ce = istate->cache[i];
switch (ce_stage(ce)) {
diff --git a/builtin/ls-remote.c b/builtin/ls-remote.c
index abfa984737..1794548c71 100644
--- a/builtin/ls-remote.c
+++ b/builtin/ls-remote.c
@@ -124,8 +124,6 @@ int cmd_ls_remote(int argc, const char **argv, const char *prefix)
int hash_algo = hash_algo_by_ptr(transport_get_hash_algo(transport));
repo_set_hash_algo(the_repository, hash_algo);
}
- if (transport_disconnect(transport))
- return 1;
if (!dest && !quiet)
fprintf(stderr, "From %s\n", *remote->url);
@@ -151,5 +149,7 @@ int cmd_ls_remote(int argc, const char **argv, const char *prefix)
}
ref_array_clear(&ref_array);
+ if (transport_disconnect(transport))
+ return 1;
return status;
}
diff --git a/builtin/ls-tree.c b/builtin/ls-tree.c
index 7cad3f24eb..3a442631c7 100644
--- a/builtin/ls-tree.c
+++ b/builtin/ls-tree.c
@@ -62,7 +62,7 @@ static int show_recursive(const char *base, int baselen, const char *pathname)
}
static int show_tree(const struct object_id *oid, struct strbuf *base,
- const char *pathname, unsigned mode, int stage, void *context)
+ const char *pathname, unsigned mode, void *context)
{
int retval = 0;
int baselen;
@@ -185,6 +185,6 @@ int cmd_ls_tree(int argc, const char **argv, const char *prefix)
tree = parse_tree_indirect(&oid);
if (!tree)
die("not a tree object");
- return !!read_tree_recursive(the_repository, tree, "", 0, 0,
- &pathspec, show_tree, NULL);
+ return !!read_tree(the_repository, tree,
+ &pathspec, show_tree, NULL);
}
diff --git a/builtin/mktag.c b/builtin/mktag.c
index 41a399a69e..dddcccdd36 100644
--- a/builtin/mktag.c
+++ b/builtin/mktag.c
@@ -14,15 +14,12 @@ static int option_strict = 1;
static struct fsck_options fsck_options = FSCK_OPTIONS_STRICT;
-static int mktag_config(const char *var, const char *value, void *cb)
-{
- return fsck_config_internal(var, value, cb, &fsck_options);
-}
-
static int mktag_fsck_error_func(struct fsck_options *o,
const struct object_id *oid,
enum object_type object_type,
- int msg_type, const char *message)
+ enum fsck_msg_type msg_type,
+ enum fsck_msg_id msg_id,
+ const char *message)
{
switch (msg_type) {
case FSCK_WARN:
@@ -91,9 +88,10 @@ int cmd_mktag(int argc, const char **argv, const char *prefix)
die_errno(_("could not read from stdin"));
fsck_options.error_func = mktag_fsck_error_func;
- fsck_set_msg_type(&fsck_options, "extraheaderentry", "warn");
+ fsck_set_msg_type_from_ids(&fsck_options, FSCK_MSG_EXTRA_HEADER_ENTRY,
+ FSCK_WARN);
/* config might set fsck.extraHeaderEntry=* again */
- git_config(mktag_config, NULL);
+ git_config(git_fsck_config, &fsck_options);
if (fsck_tag_standalone(NULL, buf.buf, buf.len, &fsck_options,
&tagged_oid, &tagged_type))
die(_("tag on stdin did not pass our strict fsck check"));
diff --git a/builtin/multi-pack-index.c b/builtin/multi-pack-index.c
index 5bf88cd2a8..5d3ea445fd 100644
--- a/builtin/multi-pack-index.c
+++ b/builtin/multi-pack-index.c
@@ -4,67 +4,181 @@
#include "parse-options.h"
#include "midx.h"
#include "trace2.h"
+#include "object-store.h"
+#define BUILTIN_MIDX_WRITE_USAGE \
+ N_("git multi-pack-index [<options>] write [--preferred-pack=<pack>]")
+
+#define BUILTIN_MIDX_VERIFY_USAGE \
+ N_("git multi-pack-index [<options>] verify")
+
+#define BUILTIN_MIDX_EXPIRE_USAGE \
+ N_("git multi-pack-index [<options>] expire")
+
+#define BUILTIN_MIDX_REPACK_USAGE \
+ N_("git multi-pack-index [<options>] repack [--batch-size=<size>]")
+
+static char const * const builtin_multi_pack_index_write_usage[] = {
+ BUILTIN_MIDX_WRITE_USAGE,
+ NULL
+};
+static char const * const builtin_multi_pack_index_verify_usage[] = {
+ BUILTIN_MIDX_VERIFY_USAGE,
+ NULL
+};
+static char const * const builtin_multi_pack_index_expire_usage[] = {
+ BUILTIN_MIDX_EXPIRE_USAGE,
+ NULL
+};
+static char const * const builtin_multi_pack_index_repack_usage[] = {
+ BUILTIN_MIDX_REPACK_USAGE,
+ NULL
+};
static char const * const builtin_multi_pack_index_usage[] = {
- N_("git multi-pack-index [<options>] (write|verify|expire|repack --batch-size=<size>)"),
+ BUILTIN_MIDX_WRITE_USAGE,
+ BUILTIN_MIDX_VERIFY_USAGE,
+ BUILTIN_MIDX_EXPIRE_USAGE,
+ BUILTIN_MIDX_REPACK_USAGE,
NULL
};
static struct opts_multi_pack_index {
const char *object_dir;
+ const char *preferred_pack;
unsigned long batch_size;
- int progress;
+ unsigned flags;
} opts;
-int cmd_multi_pack_index(int argc, const char **argv,
- const char *prefix)
+static struct option common_opts[] = {
+ OPT_FILENAME(0, "object-dir", &opts.object_dir,
+ N_("object directory containing set of packfile and pack-index pairs")),
+ OPT_BIT(0, "progress", &opts.flags, N_("force progress reporting"), MIDX_PROGRESS),
+ OPT_END(),
+};
+
+static struct option *add_common_options(struct option *prev)
{
- unsigned flags = 0;
+ return parse_options_concat(common_opts, prev);
+}
+
+static int cmd_multi_pack_index_write(int argc, const char **argv)
+{
+ struct option *options;
+ static struct option builtin_multi_pack_index_write_options[] = {
+ OPT_STRING(0, "preferred-pack", &opts.preferred_pack,
+ N_("preferred-pack"),
+ N_("pack for reuse when computing a multi-pack bitmap")),
+ OPT_END(),
+ };
+
+ options = add_common_options(builtin_multi_pack_index_write_options);
+
+ trace2_cmd_mode(argv[0]);
- static struct option builtin_multi_pack_index_options[] = {
- OPT_FILENAME(0, "object-dir", &opts.object_dir,
- N_("object directory containing set of packfile and pack-index pairs")),
- OPT_BOOL(0, "progress", &opts.progress, N_("force progress reporting")),
+ argc = parse_options(argc, argv, NULL,
+ options, builtin_multi_pack_index_write_usage,
+ PARSE_OPT_KEEP_UNKNOWN);
+ if (argc)
+ usage_with_options(builtin_multi_pack_index_write_usage,
+ options);
+
+ FREE_AND_NULL(options);
+
+ return write_midx_file(opts.object_dir, opts.preferred_pack,
+ opts.flags);
+}
+
+static int cmd_multi_pack_index_verify(int argc, const char **argv)
+{
+ struct option *options = common_opts;
+
+ trace2_cmd_mode(argv[0]);
+
+ argc = parse_options(argc, argv, NULL,
+ options, builtin_multi_pack_index_verify_usage,
+ PARSE_OPT_KEEP_UNKNOWN);
+ if (argc)
+ usage_with_options(builtin_multi_pack_index_verify_usage,
+ options);
+
+ return verify_midx_file(the_repository, opts.object_dir, opts.flags);
+}
+
+static int cmd_multi_pack_index_expire(int argc, const char **argv)
+{
+ struct option *options = common_opts;
+
+ trace2_cmd_mode(argv[0]);
+
+ argc = parse_options(argc, argv, NULL,
+ options, builtin_multi_pack_index_expire_usage,
+ PARSE_OPT_KEEP_UNKNOWN);
+ if (argc)
+ usage_with_options(builtin_multi_pack_index_expire_usage,
+ options);
+
+ return expire_midx_packs(the_repository, opts.object_dir, opts.flags);
+}
+
+static int cmd_multi_pack_index_repack(int argc, const char **argv)
+{
+ struct option *options;
+ static struct option builtin_multi_pack_index_repack_options[] = {
OPT_MAGNITUDE(0, "batch-size", &opts.batch_size,
N_("during repack, collect pack-files of smaller size into a batch that is larger than this size")),
OPT_END(),
};
+ options = add_common_options(builtin_multi_pack_index_repack_options);
+
+ trace2_cmd_mode(argv[0]);
+
+ argc = parse_options(argc, argv, NULL,
+ options,
+ builtin_multi_pack_index_repack_usage,
+ PARSE_OPT_KEEP_UNKNOWN);
+ if (argc)
+ usage_with_options(builtin_multi_pack_index_repack_usage,
+ options);
+
+ FREE_AND_NULL(options);
+
+ return midx_repack(the_repository, opts.object_dir,
+ (size_t)opts.batch_size, opts.flags);
+}
+
+int cmd_multi_pack_index(int argc, const char **argv,
+ const char *prefix)
+{
+ struct option *builtin_multi_pack_index_options = common_opts;
+
git_config(git_default_config, NULL);
- opts.progress = isatty(2);
+ if (isatty(2))
+ opts.flags |= MIDX_PROGRESS;
argc = parse_options(argc, argv, prefix,
builtin_multi_pack_index_options,
- builtin_multi_pack_index_usage, 0);
+ builtin_multi_pack_index_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
if (!opts.object_dir)
opts.object_dir = get_object_directory();
- if (opts.progress)
- flags |= MIDX_PROGRESS;
if (argc == 0)
+ goto usage;
+
+ if (!strcmp(argv[0], "repack"))
+ return cmd_multi_pack_index_repack(argc, argv);
+ else if (!strcmp(argv[0], "write"))
+ return cmd_multi_pack_index_write(argc, argv);
+ else if (!strcmp(argv[0], "verify"))
+ return cmd_multi_pack_index_verify(argc, argv);
+ else if (!strcmp(argv[0], "expire"))
+ return cmd_multi_pack_index_expire(argc, argv);
+ else {
+usage:
+ error(_("unrecognized subcommand: %s"), argv[0]);
usage_with_options(builtin_multi_pack_index_usage,
builtin_multi_pack_index_options);
-
- if (argc > 1) {
- die(_("too many arguments"));
- return 1;
}
-
- trace2_cmd_mode(argv[0]);
-
- if (!strcmp(argv[0], "repack"))
- return midx_repack(the_repository, opts.object_dir,
- (size_t)opts.batch_size, flags);
- if (opts.batch_size)
- die(_("--batch-size option is only for 'repack' subcommand"));
-
- if (!strcmp(argv[0], "write"))
- return write_midx_file(opts.object_dir, flags);
- if (!strcmp(argv[0], "verify"))
- return verify_midx_file(the_repository, opts.object_dir, flags);
- if (!strcmp(argv[0], "expire"))
- return expire_midx_packs(the_repository, opts.object_dir, flags);
-
- die(_("unrecognized subcommand: %s"), argv[0]);
}
diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c
index 8319831514..a1e33d7507 100644
--- a/builtin/pack-objects.c
+++ b/builtin/pack-objects.c
@@ -815,8 +815,8 @@ static struct reused_chunk {
/* The offset of the first object of this chunk in the original
* packfile. */
off_t original;
- /* The offset of the first object of this chunk in the generated
- * packfile minus "original". */
+ /* The difference for "original" minus the offset of the first object of
+ * this chunk in the generated packfile. */
off_t difference;
} *reused_chunks;
static int reused_chunks_nr;
@@ -3547,6 +3547,37 @@ static void record_recent_commit(struct commit *commit, void *data)
oid_array_append(&recent_objects, &commit->object.oid);
}
+static int mark_bitmap_preferred_tip(const char *refname,
+ const struct object_id *oid, int flags,
+ void *_data)
+{
+ struct object_id peeled;
+ struct object *object;
+
+ if (!peel_iterated_oid(oid, &peeled))
+ oid = &peeled;
+
+ object = parse_object_or_die(oid, refname);
+ if (object->type == OBJ_COMMIT)
+ object->flags |= NEEDS_BITMAP;
+
+ return 0;
+}
+
+static void mark_bitmap_preferred_tips(void)
+{
+ struct string_list_item *item;
+ const struct string_list *preferred_tips;
+
+ preferred_tips = bitmap_preferred_tips(the_repository);
+ if (!preferred_tips)
+ return;
+
+ for_each_string_list_item(item, preferred_tips) {
+ for_each_ref_in(item->string, mark_bitmap_preferred_tip, NULL);
+ }
+}
+
static void get_object_list(int ac, const char **av)
{
struct rev_info revs;
@@ -3601,6 +3632,9 @@ static void get_object_list(int ac, const char **av)
if (use_delta_islands)
load_delta_islands(the_repository, progress);
+ if (write_bitmap_index)
+ mark_bitmap_preferred_tips();
+
if (prepare_revision_walk(&revs))
die(_("revision walk setup failed"));
mark_edges_uninteresting(&revs, show_edge, sparse);
diff --git a/builtin/range-diff.c b/builtin/range-diff.c
index 78bc9fa770..50318849d6 100644
--- a/builtin/range-diff.c
+++ b/builtin/range-diff.c
@@ -25,7 +25,7 @@ int cmd_range_diff(int argc, const char **argv, const char *prefix)
struct option range_diff_options[] = {
OPT_INTEGER(0, "creation-factor",
&range_diff_opts.creation_factor,
- N_("Percentage by which creation is weighted")),
+ N_("percentage by which creation is weighted")),
OPT_BOOL(0, "no-dual-color", &simple_color,
N_("use simple diff colors")),
OPT_PASSTHRU_ARGV(0, "notes", &other_arg,
diff --git a/builtin/rebase.c b/builtin/rebase.c
index de400f9a19..ed1da1760e 100644
--- a/builtin/rebase.c
+++ b/builtin/rebase.c
@@ -100,7 +100,6 @@ struct rebase_options {
char *strategy, *strategy_opts;
struct strbuf git_format_patch_opt;
int reschedule_failed_exec;
- int use_legacy_rebase;
int reapply_cherry_picks;
int fork_point;
};
@@ -739,6 +738,7 @@ static int finish_rebase(struct rebase_options *opts)
int ret = 0;
delete_ref(NULL, "REBASE_HEAD", NULL, REF_NO_DEREF);
+ unlink(git_path_auto_merge(the_repository));
apply_autostash(state_dir_path("autostash", opts));
close_object_store(the_repository->objects);
/*
@@ -1102,11 +1102,6 @@ static int rebase_config(const char *var, const char *value, void *data)
return 0;
}
- if (!strcmp(var, "rebase.usebuiltin")) {
- opts->use_legacy_rebase = !git_config_bool(var, value);
- return 0;
- }
-
if (!strcmp(var, "rebase.backend")) {
return git_config_string(&opts->default_backend, var, value);
}
@@ -1441,11 +1436,6 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
gpg_sign = options.gpg_sign_opt ? "" : NULL;
FREE_AND_NULL(options.gpg_sign_opt);
- if (options.use_legacy_rebase ||
- !git_env_bool("GIT_TEST_REBASE_USE_BUILTIN", -1))
- warning(_("the rebase.useBuiltin support has been removed!\n"
- "See its entry in 'git help config' for details."));
-
strbuf_reset(&buf);
strbuf_addf(&buf, "%s/applying", apply_dir());
if(file_exists(buf.buf))
diff --git a/builtin/remote.c b/builtin/remote.c
index 717b662d45..7f88e6ce9d 100644
--- a/builtin/remote.c
+++ b/builtin/remote.c
@@ -938,9 +938,6 @@ static int get_remote_ref_states(const char *name,
struct ref_states *states,
int query)
{
- struct transport *transport;
- const struct ref *remote_refs;
-
states->remote = remote_get(name);
if (!states->remote)
return error(_("No such remote: '%s'"), name);
@@ -948,10 +945,12 @@ static int get_remote_ref_states(const char *name,
read_branches();
if (query) {
+ struct transport *transport;
+ const struct ref *remote_refs;
+
transport = transport_get(states->remote, states->remote->url_nr > 0 ?
states->remote->url[0] : NULL);
remote_refs = transport_get_remote_refs(transport, NULL);
- transport_disconnect(transport);
states->queried = 1;
if (query & GET_REF_STATES)
@@ -960,6 +959,7 @@ static int get_remote_ref_states(const char *name,
get_head_names(remote_refs, states);
if (query & GET_PUSH_REF_STATES)
get_push_ref_states(remote_refs, states);
+ transport_disconnect(transport);
} else {
for_each_ref(append_ref_to_tracked_list, states);
string_list_sort(&states->tracked);
diff --git a/builtin/repack.c b/builtin/repack.c
index 6ce2556c9e..2847fdfbab 100644
--- a/builtin/repack.c
+++ b/builtin/repack.c
@@ -721,7 +721,7 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
remove_temporary_files();
if (git_env_bool(GIT_TEST_MULTI_PACK_INDEX, 0))
- write_midx_file(get_object_directory(), 0);
+ write_midx_file(get_object_directory(), NULL, 0);
string_list_clear(&names, 0);
string_list_clear(&rollback, 0);
diff --git a/builtin/reset.c b/builtin/reset.c
index c635b062c3..43e855cb88 100644
--- a/builtin/reset.c
+++ b/builtin/reset.c
@@ -425,7 +425,7 @@ int cmd_reset(int argc, const char **argv, const char *prefix)
dwim_ref(rev, strlen(rev), &dummy, &ref, 0);
if (ref && !starts_with(ref, "refs/"))
- ref = NULL;
+ FREE_AND_NULL(ref);
err = reset_index(ref, &oid, reset_type, quiet);
if (reset_type == KEEP && !err)
diff --git a/builtin/revert.c b/builtin/revert.c
index 314a86c562..237f2f18d4 100644
--- a/builtin/revert.c
+++ b/builtin/revert.c
@@ -182,7 +182,7 @@ static int run_sequencer(int argc, const char **argv, struct replay_opts *opts)
"--signoff", opts->signoff,
"--no-commit", opts->no_commit,
"-x", opts->record_origin,
- "--edit", opts->edit,
+ "--edit", opts->edit > 0,
NULL);
if (cmd) {
@@ -230,8 +230,6 @@ int cmd_revert(int argc, const char **argv, const char *prefix)
struct replay_opts opts = REPLAY_OPTS_INIT;
int res;
- if (isatty(0))
- opts.edit = 1;
opts.action = REPLAY_REVERT;
sequencer_init_config(&opts);
res = run_sequencer(argc, argv, &opts);
diff --git a/builtin/stash.c b/builtin/stash.c
index 3477e940e3..c56fed3354 100644
--- a/builtin/stash.c
+++ b/builtin/stash.c
@@ -10,11 +10,13 @@
#include "strvec.h"
#include "run-command.h"
#include "dir.h"
+#include "entry.h"
#include "rerere.h"
#include "revision.h"
#include "log-tree.h"
#include "diffcore.h"
#include "exec-cmd.h"
+#include "entry.h"
#define INCLUDE_ALL_FILES 2
diff --git a/builtin/symbolic-ref.c b/builtin/symbolic-ref.c
index 80237f0df1..e547a08d6c 100644
--- a/builtin/symbolic-ref.c
+++ b/builtin/symbolic-ref.c
@@ -24,9 +24,11 @@ static int check_symref(const char *HEAD, int quiet, int shorten, int print)
return 1;
}
if (print) {
+ char *to_free = NULL;
if (shorten)
- refname = shorten_unambiguous_ref(refname, 0);
+ refname = to_free = shorten_unambiguous_ref(refname, 0);
puts(refname);
+ free(to_free);
}
return 0;
}
diff --git a/builtin/unpack-objects.c b/builtin/unpack-objects.c
index a4ba2ebac6..4a70b17f8f 100644
--- a/builtin/unpack-objects.c
+++ b/builtin/unpack-objects.c
@@ -187,7 +187,8 @@ static void write_cached_object(struct object *obj, struct obj_buffer *obj_buf)
* that have reachability requirements and calls this function.
* Verify its reachability and validity recursively and write it out.
*/
-static int check_object(struct object *obj, int type, void *data, struct fsck_options *options)
+static int check_object(struct object *obj, enum object_type type,
+ void *data, struct fsck_options *options)
{
struct obj_buffer *obj_buf;
diff --git a/builtin/worktree.c b/builtin/worktree.c
index 1cd5c2016e..8771453493 100644
--- a/builtin/worktree.c
+++ b/builtin/worktree.c
@@ -446,16 +446,18 @@ static void print_preparing_worktree_line(int detach,
static const char *dwim_branch(const char *path, const char **new_branch)
{
int n;
+ int branch_exists;
const char *s = worktree_basename(path, &n);
const char *branchname = xstrndup(s, n);
struct strbuf ref = STRBUF_INIT;
UNLEAK(branchname);
- if (!strbuf_check_branch_ref(&ref, branchname) &&
- ref_exists(ref.buf)) {
- strbuf_release(&ref);
+
+ branch_exists = !strbuf_check_branch_ref(&ref, branchname) &&
+ ref_exists(ref.buf);
+ strbuf_release(&ref);
+ if (branch_exists)
return branchname;
- }
*new_branch = branchname;
if (guess_remote) {
diff --git a/cache.h b/cache.h
index 6fda8091f1..148d9ab5f1 100644
--- a/cache.h
+++ b/cache.h
@@ -803,7 +803,7 @@ static inline int index_pos_to_insert_pos(uintmax_t pos)
#define ADD_CACHE_OK_TO_ADD 1 /* Ok to add */
#define ADD_CACHE_OK_TO_REPLACE 2 /* Ok to replace file/directory */
#define ADD_CACHE_SKIP_DFCHECK 4 /* Ok to skip DF conflict checks */
-#define ADD_CACHE_JUST_APPEND 8 /* Append only; tree.c::read_tree() */
+#define ADD_CACHE_JUST_APPEND 8 /* Append only */
#define ADD_CACHE_NEW_ONLY 16 /* Do not replace existing ones */
#define ADD_CACHE_KEEP_CACHE_TREE 32 /* Do not invalidate cache-tree */
#define ADD_CACHE_RENORMALIZE 64 /* Pass along HASH_RENORMALIZE */
@@ -1621,30 +1621,6 @@ const char *show_ident_date(const struct ident_split *id,
*/
int ident_cmp(const struct ident_split *, const struct ident_split *);
-struct checkout {
- struct index_state *istate;
- const char *base_dir;
- int base_dir_len;
- struct delayed_checkout *delayed_checkout;
- struct checkout_metadata meta;
- unsigned force:1,
- quiet:1,
- not_new:1,
- clone:1,
- refresh_cache:1;
-};
-#define CHECKOUT_INIT { NULL, "" }
-
-#define TEMPORARY_FILENAME_LENGTH 25
-int checkout_entry(struct cache_entry *ce, const struct checkout *state, char *topath, int *nr_checkouts);
-void enable_delayed_checkout(struct checkout *state);
-int finish_delayed_checkout(struct checkout *state, int *nr_checkouts);
-/*
- * Unlink the last component and schedule the leading directories for
- * removal, such that empty directories get removed.
- */
-void unlink_entry(const struct cache_entry *ce);
-
struct cache_def {
struct strbuf path;
int flags;
@@ -1659,7 +1635,7 @@ static inline void cache_def_clear(struct cache_def *cache)
int has_symlink_leading_path(const char *name, int len);
int threaded_has_symlink_leading_path(struct cache_def *, const char *, int);
-int check_leading_path(const char *name, int len);
+int check_leading_path(const char *name, int len, int warn_on_lstat_err);
int has_dirs_only_path(const char *name, int len, int prefix_len);
void invalidate_lstat_cache(void);
void schedule_dir_for_removal(const char *name, int len);
diff --git a/ci/run-build-and-tests.sh b/ci/run-build-and-tests.sh
index a66b5e8c75..d19be40544 100755
--- a/ci/run-build-and-tests.sh
+++ b/ci/run-build-and-tests.sh
@@ -16,6 +16,7 @@ linux-gcc)
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
make test
export GIT_TEST_SPLIT_INDEX=yes
+ export GIT_TEST_MERGE_ALGORITHM=recursive
export GIT_TEST_FULL_IN_PACK_ARRAY=true
export GIT_TEST_OE_SIZE=10
export GIT_TEST_OE_DELTA_SIZE=5
diff --git a/compat/precompose_utf8.c b/compat/precompose_utf8.c
index ec560565a8..cce1d57a46 100644
--- a/compat/precompose_utf8.c
+++ b/compat/precompose_utf8.c
@@ -60,10 +60,12 @@ void probe_utf8_pathname_composition(void)
strbuf_release(&path);
}
-static inline const char *precompose_string_if_needed(const char *in)
+const char *precompose_string_if_needed(const char *in)
{
size_t inlen;
size_t outlen;
+ if (!in)
+ return NULL;
if (has_non_ascii(in, (size_t)-1, &inlen)) {
iconv_t ic_prec;
char *out;
@@ -96,10 +98,7 @@ const char *precompose_argv_prefix(int argc, const char **argv, const char *pref
argv[i] = precompose_string_if_needed(argv[i]);
i++;
}
- if (prefix) {
- prefix = precompose_string_if_needed(prefix);
- }
- return prefix;
+ return precompose_string_if_needed(prefix);
}
diff --git a/compat/precompose_utf8.h b/compat/precompose_utf8.h
index d70b84665c..fea06cf28a 100644
--- a/compat/precompose_utf8.h
+++ b/compat/precompose_utf8.h
@@ -29,6 +29,7 @@ typedef struct {
} PREC_DIR;
const char *precompose_argv_prefix(int argc, const char **argv, const char *prefix);
+const char *precompose_string_if_needed(const char *in);
void probe_utf8_pathname_composition(void);
PREC_DIR *precompose_utf8_opendir(const char *dirname);
diff --git a/compat/simple-ipc/ipc-shared.c b/compat/simple-ipc/ipc-shared.c
new file mode 100644
index 0000000000..1edec81595
--- /dev/null
+++ b/compat/simple-ipc/ipc-shared.c
@@ -0,0 +1,28 @@
+#include "cache.h"
+#include "simple-ipc.h"
+#include "strbuf.h"
+#include "pkt-line.h"
+#include "thread-utils.h"
+
+#ifdef SUPPORTS_SIMPLE_IPC
+
+int ipc_server_run(const char *path, const struct ipc_server_opts *opts,
+ ipc_server_application_cb *application_cb,
+ void *application_data)
+{
+ struct ipc_server_data *server_data = NULL;
+ int ret;
+
+ ret = ipc_server_run_async(&server_data, path, opts,
+ application_cb, application_data);
+ if (ret)
+ return ret;
+
+ ret = ipc_server_await(server_data);
+
+ ipc_server_free(server_data);
+
+ return ret;
+}
+
+#endif /* SUPPORTS_SIMPLE_IPC */
diff --git a/compat/simple-ipc/ipc-unix-socket.c b/compat/simple-ipc/ipc-unix-socket.c
new file mode 100644
index 0000000000..38689b278d
--- /dev/null
+++ b/compat/simple-ipc/ipc-unix-socket.c
@@ -0,0 +1,999 @@
+#include "cache.h"
+#include "simple-ipc.h"
+#include "strbuf.h"
+#include "pkt-line.h"
+#include "thread-utils.h"
+#include "unix-socket.h"
+#include "unix-stream-server.h"
+
+#ifdef NO_UNIX_SOCKETS
+#error compat/simple-ipc/ipc-unix-socket.c requires Unix sockets
+#endif
+
+enum ipc_active_state ipc_get_active_state(const char *path)
+{
+ enum ipc_active_state state = IPC_STATE__OTHER_ERROR;
+ struct ipc_client_connect_options options
+ = IPC_CLIENT_CONNECT_OPTIONS_INIT;
+ struct stat st;
+ struct ipc_client_connection *connection_test = NULL;
+
+ options.wait_if_busy = 0;
+ options.wait_if_not_found = 0;
+
+ if (lstat(path, &st) == -1) {
+ switch (errno) {
+ case ENOENT:
+ case ENOTDIR:
+ return IPC_STATE__NOT_LISTENING;
+ default:
+ return IPC_STATE__INVALID_PATH;
+ }
+ }
+
+ /* also complain if a plain file is in the way */
+ if ((st.st_mode & S_IFMT) != S_IFSOCK)
+ return IPC_STATE__INVALID_PATH;
+
+ /*
+ * Just because the filesystem has a S_IFSOCK type inode
+ * at `path`, doesn't mean it that there is a server listening.
+ * Ping it to be sure.
+ */
+ state = ipc_client_try_connect(path, &options, &connection_test);
+ ipc_client_close_connection(connection_test);
+
+ return state;
+}
+
+/*
+ * Retry frequency when trying to connect to a server.
+ *
+ * This value should be short enough that we don't seriously delay our
+ * caller, but not fast enough that our spinning puts pressure on the
+ * system.
+ */
+#define WAIT_STEP_MS (50)
+
+/*
+ * Try to connect to the server. If the server is just starting up or
+ * is very busy, we may not get a connection the first time.
+ */
+static enum ipc_active_state connect_to_server(
+ const char *path,
+ int timeout_ms,
+ const struct ipc_client_connect_options *options,
+ int *pfd)
+{
+ int k;
+
+ *pfd = -1;
+
+ for (k = 0; k < timeout_ms; k += WAIT_STEP_MS) {
+ int fd = unix_stream_connect(path, options->uds_disallow_chdir);
+
+ if (fd != -1) {
+ *pfd = fd;
+ return IPC_STATE__LISTENING;
+ }
+
+ if (errno == ENOENT) {
+ if (!options->wait_if_not_found)
+ return IPC_STATE__PATH_NOT_FOUND;
+
+ goto sleep_and_try_again;
+ }
+
+ if (errno == ETIMEDOUT) {
+ if (!options->wait_if_busy)
+ return IPC_STATE__NOT_LISTENING;
+
+ goto sleep_and_try_again;
+ }
+
+ if (errno == ECONNREFUSED) {
+ if (!options->wait_if_busy)
+ return IPC_STATE__NOT_LISTENING;
+
+ goto sleep_and_try_again;
+ }
+
+ return IPC_STATE__OTHER_ERROR;
+
+ sleep_and_try_again:
+ sleep_millisec(WAIT_STEP_MS);
+ }
+
+ return IPC_STATE__NOT_LISTENING;
+}
+
+/*
+ * The total amount of time that we are willing to wait when trying to
+ * connect to a server.
+ *
+ * When the server is first started, it might take a little while for
+ * it to become ready to service requests. Likewise, the server may
+ * be very (temporarily) busy and not respond to our connections.
+ *
+ * We should gracefully and silently handle those conditions and try
+ * again for a reasonable time period.
+ *
+ * The value chosen here should be long enough for the server
+ * to reliably heal from the above conditions.
+ */
+#define MY_CONNECTION_TIMEOUT_MS (1000)
+
+enum ipc_active_state ipc_client_try_connect(
+ const char *path,
+ const struct ipc_client_connect_options *options,
+ struct ipc_client_connection **p_connection)
+{
+ enum ipc_active_state state = IPC_STATE__OTHER_ERROR;
+ int fd = -1;
+
+ *p_connection = NULL;
+
+ trace2_region_enter("ipc-client", "try-connect", NULL);
+ trace2_data_string("ipc-client", NULL, "try-connect/path", path);
+
+ state = connect_to_server(path, MY_CONNECTION_TIMEOUT_MS,
+ options, &fd);
+
+ trace2_data_intmax("ipc-client", NULL, "try-connect/state",
+ (intmax_t)state);
+ trace2_region_leave("ipc-client", "try-connect", NULL);
+
+ if (state == IPC_STATE__LISTENING) {
+ (*p_connection) = xcalloc(1, sizeof(struct ipc_client_connection));
+ (*p_connection)->fd = fd;
+ }
+
+ return state;
+}
+
+void ipc_client_close_connection(struct ipc_client_connection *connection)
+{
+ if (!connection)
+ return;
+
+ if (connection->fd != -1)
+ close(connection->fd);
+
+ free(connection);
+}
+
+int ipc_client_send_command_to_connection(
+ struct ipc_client_connection *connection,
+ const char *message, struct strbuf *answer)
+{
+ int ret = 0;
+
+ strbuf_setlen(answer, 0);
+
+ trace2_region_enter("ipc-client", "send-command", NULL);
+
+ if (write_packetized_from_buf_no_flush(message, strlen(message),
+ connection->fd) < 0 ||
+ packet_flush_gently(connection->fd) < 0) {
+ ret = error(_("could not send IPC command"));
+ goto done;
+ }
+
+ if (read_packetized_to_strbuf(
+ connection->fd, answer,
+ PACKET_READ_GENTLE_ON_EOF | PACKET_READ_GENTLE_ON_READ_ERROR) < 0) {
+ ret = error(_("could not read IPC response"));
+ goto done;
+ }
+
+done:
+ trace2_region_leave("ipc-client", "send-command", NULL);
+ return ret;
+}
+
+int ipc_client_send_command(const char *path,
+ const struct ipc_client_connect_options *options,
+ const char *message, struct strbuf *answer)
+{
+ int ret = -1;
+ enum ipc_active_state state;
+ struct ipc_client_connection *connection = NULL;
+
+ state = ipc_client_try_connect(path, options, &connection);
+
+ if (state != IPC_STATE__LISTENING)
+ return ret;
+
+ ret = ipc_client_send_command_to_connection(connection, message, answer);
+
+ ipc_client_close_connection(connection);
+
+ return ret;
+}
+
+static int set_socket_blocking_flag(int fd, int make_nonblocking)
+{
+ int flags;
+
+ flags = fcntl(fd, F_GETFL, NULL);
+
+ if (flags < 0)
+ return -1;
+
+ if (make_nonblocking)
+ flags |= O_NONBLOCK;
+ else
+ flags &= ~O_NONBLOCK;
+
+ return fcntl(fd, F_SETFL, flags);
+}
+
+/*
+ * Magic numbers used to annotate callback instance data.
+ * These are used to help guard against accidentally passing the
+ * wrong instance data across multiple levels of callbacks (which
+ * is easy to do if there are `void*` arguments).
+ */
+enum magic {
+ MAGIC_SERVER_REPLY_DATA,
+ MAGIC_WORKER_THREAD_DATA,
+ MAGIC_ACCEPT_THREAD_DATA,
+ MAGIC_SERVER_DATA,
+};
+
+struct ipc_server_reply_data {
+ enum magic magic;
+ int fd;
+ struct ipc_worker_thread_data *worker_thread_data;
+};
+
+struct ipc_worker_thread_data {
+ enum magic magic;
+ struct ipc_worker_thread_data *next_thread;
+ struct ipc_server_data *server_data;
+ pthread_t pthread_id;
+};
+
+struct ipc_accept_thread_data {
+ enum magic magic;
+ struct ipc_server_data *server_data;
+
+ struct unix_ss_socket *server_socket;
+
+ int fd_send_shutdown;
+ int fd_wait_shutdown;
+ pthread_t pthread_id;
+};
+
+/*
+ * With unix-sockets, the conceptual "ipc-server" is implemented as a single
+ * controller "accept-thread" thread and a pool of "worker-thread" threads.
+ * The former does the usual `accept()` loop and dispatches connections
+ * to an idle worker thread. The worker threads wait in an idle loop for
+ * a new connection, communicate with the client and relay data to/from
+ * the `application_cb` and then wait for another connection from the
+ * server thread. This avoids the overhead of constantly creating and
+ * destroying threads.
+ */
+struct ipc_server_data {
+ enum magic magic;
+ ipc_server_application_cb *application_cb;
+ void *application_data;
+ struct strbuf buf_path;
+
+ struct ipc_accept_thread_data *accept_thread;
+ struct ipc_worker_thread_data *worker_thread_list;
+
+ pthread_mutex_t work_available_mutex;
+ pthread_cond_t work_available_cond;
+
+ /*
+ * Accepted but not yet processed client connections are kept
+ * in a circular buffer FIFO. The queue is empty when the
+ * positions are equal.
+ */
+ int *fifo_fds;
+ int queue_size;
+ int back_pos;
+ int front_pos;
+
+ int shutdown_requested;
+ int is_stopped;
+};
+
+/*
+ * Remove and return the oldest queued connection.
+ *
+ * Returns -1 if empty.
+ */
+static int fifo_dequeue(struct ipc_server_data *server_data)
+{
+ /* ASSERT holding mutex */
+
+ int fd;
+
+ if (server_data->back_pos == server_data->front_pos)
+ return -1;
+
+ fd = server_data->fifo_fds[server_data->front_pos];
+ server_data->fifo_fds[server_data->front_pos] = -1;
+
+ server_data->front_pos++;
+ if (server_data->front_pos == server_data->queue_size)
+ server_data->front_pos = 0;
+
+ return fd;
+}
+
+/*
+ * Push a new fd onto the back of the queue.
+ *
+ * Drop it and return -1 if queue is already full.
+ */
+static int fifo_enqueue(struct ipc_server_data *server_data, int fd)
+{
+ /* ASSERT holding mutex */
+
+ int next_back_pos;
+
+ next_back_pos = server_data->back_pos + 1;
+ if (next_back_pos == server_data->queue_size)
+ next_back_pos = 0;
+
+ if (next_back_pos == server_data->front_pos) {
+ /* Queue is full. Just drop it. */
+ close(fd);
+ return -1;
+ }
+
+ server_data->fifo_fds[server_data->back_pos] = fd;
+ server_data->back_pos = next_back_pos;
+
+ return fd;
+}
+
+/*
+ * Wait for a connection to be queued to the FIFO and return it.
+ *
+ * Returns -1 if someone has already requested a shutdown.
+ */
+static int worker_thread__wait_for_connection(
+ struct ipc_worker_thread_data *worker_thread_data)
+{
+ /* ASSERT NOT holding mutex */
+
+ struct ipc_server_data *server_data = worker_thread_data->server_data;
+ int fd = -1;
+
+ pthread_mutex_lock(&server_data->work_available_mutex);
+ for (;;) {
+ if (server_data->shutdown_requested)
+ break;
+
+ fd = fifo_dequeue(server_data);
+ if (fd >= 0)
+ break;
+
+ pthread_cond_wait(&server_data->work_available_cond,
+ &server_data->work_available_mutex);
+ }
+ pthread_mutex_unlock(&server_data->work_available_mutex);
+
+ return fd;
+}
+
+/*
+ * Forward declare our reply callback function so that any compiler
+ * errors are reported when we actually define the function (in addition
+ * to any errors reported when we try to pass this callback function as
+ * a parameter in a function call). The former are easier to understand.
+ */
+static ipc_server_reply_cb do_io_reply_callback;
+
+/*
+ * Relay application's response message to the client process.
+ * (We do not flush at this point because we allow the caller
+ * to chunk data to the client thru us.)
+ */
+static int do_io_reply_callback(struct ipc_server_reply_data *reply_data,
+ const char *response, size_t response_len)
+{
+ if (reply_data->magic != MAGIC_SERVER_REPLY_DATA)
+ BUG("reply_cb called with wrong instance data");
+
+ return write_packetized_from_buf_no_flush(response, response_len,
+ reply_data->fd);
+}
+
+/* A randomly chosen value. */
+#define MY_WAIT_POLL_TIMEOUT_MS (10)
+
+/*
+ * If the client hangs up without sending any data on the wire, just
+ * quietly close the socket and ignore this client.
+ *
+ * This worker thread is committed to reading the IPC request data
+ * from the client at the other end of this fd. Wait here for the
+ * client to actually put something on the wire -- because if the
+ * client just does a ping (connect and hangup without sending any
+ * data), our use of the pkt-line read routines will spew an error
+ * message.
+ *
+ * Return -1 if the client hung up.
+ * Return 0 if data (possibly incomplete) is ready.
+ */
+static int worker_thread__wait_for_io_start(
+ struct ipc_worker_thread_data *worker_thread_data,
+ int fd)
+{
+ struct ipc_server_data *server_data = worker_thread_data->server_data;
+ struct pollfd pollfd[1];
+ int result;
+
+ for (;;) {
+ pollfd[0].fd = fd;
+ pollfd[0].events = POLLIN;
+
+ result = poll(pollfd, 1, MY_WAIT_POLL_TIMEOUT_MS);
+ if (result < 0) {
+ if (errno == EINTR)
+ continue;
+ goto cleanup;
+ }
+
+ if (result == 0) {
+ /* a timeout */
+
+ int in_shutdown;
+
+ pthread_mutex_lock(&server_data->work_available_mutex);
+ in_shutdown = server_data->shutdown_requested;
+ pthread_mutex_unlock(&server_data->work_available_mutex);
+
+ /*
+ * If a shutdown is already in progress and this
+ * client has not started talking yet, just drop it.
+ */
+ if (in_shutdown)
+ goto cleanup;
+ continue;
+ }
+
+ if (pollfd[0].revents & POLLHUP)
+ goto cleanup;
+
+ if (pollfd[0].revents & POLLIN)
+ return 0;
+
+ goto cleanup;
+ }
+
+cleanup:
+ close(fd);
+ return -1;
+}
+
+/*
+ * Receive the request/command from the client and pass it to the
+ * registered request-callback. The request-callback will compose
+ * a response and call our reply-callback to send it to the client.
+ */
+static int worker_thread__do_io(
+ struct ipc_worker_thread_data *worker_thread_data,
+ int fd)
+{
+ /* ASSERT NOT holding lock */
+
+ struct strbuf buf = STRBUF_INIT;
+ struct ipc_server_reply_data reply_data;
+ int ret = 0;
+
+ reply_data.magic = MAGIC_SERVER_REPLY_DATA;
+ reply_data.worker_thread_data = worker_thread_data;
+
+ reply_data.fd = fd;
+
+ ret = read_packetized_to_strbuf(
+ reply_data.fd, &buf,
+ PACKET_READ_GENTLE_ON_EOF | PACKET_READ_GENTLE_ON_READ_ERROR);
+ if (ret >= 0) {
+ ret = worker_thread_data->server_data->application_cb(
+ worker_thread_data->server_data->application_data,
+ buf.buf, do_io_reply_callback, &reply_data);
+
+ packet_flush_gently(reply_data.fd);
+ }
+ else {
+ /*
+ * The client probably disconnected/shutdown before it
+ * could send a well-formed message. Ignore it.
+ */
+ }
+
+ strbuf_release(&buf);
+ close(reply_data.fd);
+
+ return ret;
+}
+
+/*
+ * Block SIGPIPE on the current thread (so that we get EPIPE from
+ * write() rather than an actual signal).
+ *
+ * Note that using sigchain_push() and _pop() to control SIGPIPE
+ * around our IO calls is not thread safe:
+ * [] It uses a global stack of handler frames.
+ * [] It uses ALLOC_GROW() to resize it.
+ * [] Finally, according to the `signal(2)` man-page:
+ * "The effects of `signal()` in a multithreaded process are unspecified."
+ */
+static void thread_block_sigpipe(sigset_t *old_set)
+{
+ sigset_t new_set;
+
+ sigemptyset(&new_set);
+ sigaddset(&new_set, SIGPIPE);
+
+ sigemptyset(old_set);
+ pthread_sigmask(SIG_BLOCK, &new_set, old_set);
+}
+
+/*
+ * Thread proc for an IPC worker thread. It handles a series of
+ * connections from clients. It pulls the next fd from the queue
+ * processes it, and then waits for the next client.
+ *
+ * Block SIGPIPE in this worker thread for the life of the thread.
+ * This avoids stray (and sometimes delayed) SIGPIPE signals caused
+ * by client errors and/or when we are under extremely heavy IO load.
+ *
+ * This means that the application callback will have SIGPIPE blocked.
+ * The callback should not change it.
+ */
+static void *worker_thread_proc(void *_worker_thread_data)
+{
+ struct ipc_worker_thread_data *worker_thread_data = _worker_thread_data;
+ struct ipc_server_data *server_data = worker_thread_data->server_data;
+ sigset_t old_set;
+ int fd, io;
+ int ret;
+
+ trace2_thread_start("ipc-worker");
+
+ thread_block_sigpipe(&old_set);
+
+ for (;;) {
+ fd = worker_thread__wait_for_connection(worker_thread_data);
+ if (fd == -1)
+ break; /* in shutdown */
+
+ io = worker_thread__wait_for_io_start(worker_thread_data, fd);
+ if (io == -1)
+ continue; /* client hung up without sending anything */
+
+ ret = worker_thread__do_io(worker_thread_data, fd);
+
+ if (ret == SIMPLE_IPC_QUIT) {
+ trace2_data_string("ipc-worker", NULL, "queue_stop_async",
+ "application_quit");
+ /*
+ * The application layer is telling the ipc-server
+ * layer to shutdown.
+ *
+ * We DO NOT have a response to send to the client.
+ *
+ * Queue an async stop (to stop the other threads) and
+ * allow this worker thread to exit now (no sense waiting
+ * for the thread-pool shutdown signal).
+ *
+ * Other non-idle worker threads are allowed to finish
+ * responding to their current clients.
+ */
+ ipc_server_stop_async(server_data);
+ break;
+ }
+ }
+
+ trace2_thread_exit();
+ return NULL;
+}
+
+/* A randomly chosen value. */
+#define MY_ACCEPT_POLL_TIMEOUT_MS (60 * 1000)
+
+/*
+ * Accept a new client connection on our socket. This uses non-blocking
+ * IO so that we can also wait for shutdown requests on our socket-pair
+ * without actually spinning on a fast timeout.
+ */
+static int accept_thread__wait_for_connection(
+ struct ipc_accept_thread_data *accept_thread_data)
+{
+ struct pollfd pollfd[2];
+ int result;
+
+ for (;;) {
+ pollfd[0].fd = accept_thread_data->fd_wait_shutdown;
+ pollfd[0].events = POLLIN;
+
+ pollfd[1].fd = accept_thread_data->server_socket->fd_socket;
+ pollfd[1].events = POLLIN;
+
+ result = poll(pollfd, 2, MY_ACCEPT_POLL_TIMEOUT_MS);
+ if (result < 0) {
+ if (errno == EINTR)
+ continue;
+ return result;
+ }
+
+ if (result == 0) {
+ /* a timeout */
+
+ /*
+ * If someone deletes or force-creates a new unix
+ * domain socket at our path, all future clients
+ * will be routed elsewhere and we silently starve.
+ * If that happens, just queue a shutdown.
+ */
+ if (unix_ss_was_stolen(
+ accept_thread_data->server_socket)) {
+ trace2_data_string("ipc-accept", NULL,
+ "queue_stop_async",
+ "socket_stolen");
+ ipc_server_stop_async(
+ accept_thread_data->server_data);
+ }
+ continue;
+ }
+
+ if (pollfd[0].revents & POLLIN) {
+ /* shutdown message queued to socketpair */
+ return -1;
+ }
+
+ if (pollfd[1].revents & POLLIN) {
+ /* a connection is available on server_socket */
+
+ int client_fd =
+ accept(accept_thread_data->server_socket->fd_socket,
+ NULL, NULL);
+ if (client_fd >= 0)
+ return client_fd;
+
+ /*
+ * An error here is unlikely -- it probably
+ * indicates that the connecting process has
+ * already dropped the connection.
+ */
+ continue;
+ }
+
+ BUG("unandled poll result errno=%d r[0]=%d r[1]=%d",
+ errno, pollfd[0].revents, pollfd[1].revents);
+ }
+}
+
+/*
+ * Thread proc for the IPC server "accept thread". This waits for
+ * an incoming socket connection, appends it to the queue of available
+ * connections, and notifies a worker thread to process it.
+ *
+ * Block SIGPIPE in this thread for the life of the thread. This
+ * avoids any stray SIGPIPE signals when closing pipe fds under
+ * extremely heavy loads (such as when the fifo queue is full and we
+ * drop incomming connections).
+ */
+static void *accept_thread_proc(void *_accept_thread_data)
+{
+ struct ipc_accept_thread_data *accept_thread_data = _accept_thread_data;
+ struct ipc_server_data *server_data = accept_thread_data->server_data;
+ sigset_t old_set;
+
+ trace2_thread_start("ipc-accept");
+
+ thread_block_sigpipe(&old_set);
+
+ for (;;) {
+ int client_fd = accept_thread__wait_for_connection(
+ accept_thread_data);
+
+ pthread_mutex_lock(&server_data->work_available_mutex);
+ if (server_data->shutdown_requested) {
+ pthread_mutex_unlock(&server_data->work_available_mutex);
+ if (client_fd >= 0)
+ close(client_fd);
+ break;
+ }
+
+ if (client_fd < 0) {
+ /* ignore transient accept() errors */
+ }
+ else {
+ fifo_enqueue(server_data, client_fd);
+ pthread_cond_broadcast(&server_data->work_available_cond);
+ }
+ pthread_mutex_unlock(&server_data->work_available_mutex);
+ }
+
+ trace2_thread_exit();
+ return NULL;
+}
+
+/*
+ * We can't predict the connection arrival rate relative to the worker
+ * processing rate, therefore we allow the "accept-thread" to queue up
+ * a generous number of connections, since we'd rather have the client
+ * not unnecessarily timeout if we can avoid it. (The assumption is
+ * that this will be used for FSMonitor and a few second wait on a
+ * connection is better than having the client timeout and do the full
+ * computation itself.)
+ *
+ * The FIFO queue size is set to a multiple of the worker pool size.
+ * This value chosen at random.
+ */
+#define FIFO_SCALE (100)
+
+/*
+ * The backlog value for `listen(2)`. This doesn't need to huge,
+ * rather just large enough for our "accept-thread" to wake up and
+ * queue incoming connections onto the FIFO without the kernel
+ * dropping any.
+ *
+ * This value chosen at random.
+ */
+#define LISTEN_BACKLOG (50)
+
+static int create_listener_socket(
+ const char *path,
+ const struct ipc_server_opts *ipc_opts,
+ struct unix_ss_socket **new_server_socket)
+{
+ struct unix_ss_socket *server_socket = NULL;
+ struct unix_stream_listen_opts uslg_opts = UNIX_STREAM_LISTEN_OPTS_INIT;
+ int ret;
+
+ uslg_opts.listen_backlog_size = LISTEN_BACKLOG;
+ uslg_opts.disallow_chdir = ipc_opts->uds_disallow_chdir;
+
+ ret = unix_ss_create(path, &uslg_opts, -1, &server_socket);
+ if (ret)
+ return ret;
+
+ if (set_socket_blocking_flag(server_socket->fd_socket, 1)) {
+ int saved_errno = errno;
+ unix_ss_free(server_socket);
+ errno = saved_errno;
+ return -1;
+ }
+
+ *new_server_socket = server_socket;
+
+ trace2_data_string("ipc-server", NULL, "listen-with-lock", path);
+ return 0;
+}
+
+static int setup_listener_socket(
+ const char *path,
+ const struct ipc_server_opts *ipc_opts,
+ struct unix_ss_socket **new_server_socket)
+{
+ int ret, saved_errno;
+
+ trace2_region_enter("ipc-server", "create-listener_socket", NULL);
+
+ ret = create_listener_socket(path, ipc_opts, new_server_socket);
+
+ saved_errno = errno;
+ trace2_region_leave("ipc-server", "create-listener_socket", NULL);
+ errno = saved_errno;
+
+ return ret;
+}
+
+/*
+ * Start IPC server in a pool of background threads.
+ */
+int ipc_server_run_async(struct ipc_server_data **returned_server_data,
+ const char *path, const struct ipc_server_opts *opts,
+ ipc_server_application_cb *application_cb,
+ void *application_data)
+{
+ struct unix_ss_socket *server_socket = NULL;
+ struct ipc_server_data *server_data;
+ int sv[2];
+ int k;
+ int ret;
+ int nr_threads = opts->nr_threads;
+
+ *returned_server_data = NULL;
+
+ /*
+ * Create a socketpair and set sv[1] to non-blocking. This
+ * will used to send a shutdown message to the accept-thread
+ * and allows the accept-thread to wait on EITHER a client
+ * connection or a shutdown request without spinning.
+ */
+ if (socketpair(AF_UNIX, SOCK_STREAM, 0, sv) < 0)
+ return -1;
+
+ if (set_socket_blocking_flag(sv[1], 1)) {
+ int saved_errno = errno;
+ close(sv[0]);
+ close(sv[1]);
+ errno = saved_errno;
+ return -1;
+ }
+
+ ret = setup_listener_socket(path, opts, &server_socket);
+ if (ret) {
+ int saved_errno = errno;
+ close(sv[0]);
+ close(sv[1]);
+ errno = saved_errno;
+ return ret;
+ }
+
+ server_data = xcalloc(1, sizeof(*server_data));
+ server_data->magic = MAGIC_SERVER_DATA;
+ server_data->application_cb = application_cb;
+ server_data->application_data = application_data;
+ strbuf_init(&server_data->buf_path, 0);
+ strbuf_addstr(&server_data->buf_path, path);
+
+ if (nr_threads < 1)
+ nr_threads = 1;
+
+ pthread_mutex_init(&server_data->work_available_mutex, NULL);
+ pthread_cond_init(&server_data->work_available_cond, NULL);
+
+ server_data->queue_size = nr_threads * FIFO_SCALE;
+ CALLOC_ARRAY(server_data->fifo_fds, server_data->queue_size);
+
+ server_data->accept_thread =
+ xcalloc(1, sizeof(*server_data->accept_thread));
+ server_data->accept_thread->magic = MAGIC_ACCEPT_THREAD_DATA;
+ server_data->accept_thread->server_data = server_data;
+ server_data->accept_thread->server_socket = server_socket;
+ server_data->accept_thread->fd_send_shutdown = sv[0];
+ server_data->accept_thread->fd_wait_shutdown = sv[1];
+
+ if (pthread_create(&server_data->accept_thread->pthread_id, NULL,
+ accept_thread_proc, server_data->accept_thread))
+ die_errno(_("could not start accept_thread '%s'"), path);
+
+ for (k = 0; k < nr_threads; k++) {
+ struct ipc_worker_thread_data *wtd;
+
+ wtd = xcalloc(1, sizeof(*wtd));
+ wtd->magic = MAGIC_WORKER_THREAD_DATA;
+ wtd->server_data = server_data;
+
+ if (pthread_create(&wtd->pthread_id, NULL, worker_thread_proc,
+ wtd)) {
+ if (k == 0)
+ die(_("could not start worker[0] for '%s'"),
+ path);
+ /*
+ * Limp along with the thread pool that we have.
+ */
+ break;
+ }
+
+ wtd->next_thread = server_data->worker_thread_list;
+ server_data->worker_thread_list = wtd;
+ }
+
+ *returned_server_data = server_data;
+ return 0;
+}
+
+/*
+ * Gently tell the IPC server treads to shutdown.
+ * Can be run on any thread.
+ */
+int ipc_server_stop_async(struct ipc_server_data *server_data)
+{
+ /* ASSERT NOT holding mutex */
+
+ int fd;
+
+ if (!server_data)
+ return 0;
+
+ trace2_region_enter("ipc-server", "server-stop-async", NULL);
+
+ pthread_mutex_lock(&server_data->work_available_mutex);
+
+ server_data->shutdown_requested = 1;
+
+ /*
+ * Write a byte to the shutdown socket pair to wake up the
+ * accept-thread.
+ */
+ if (write(server_data->accept_thread->fd_send_shutdown, "Q", 1) < 0)
+ error_errno("could not write to fd_send_shutdown");
+
+ /*
+ * Drain the queue of existing connections.
+ */
+ while ((fd = fifo_dequeue(server_data)) != -1)
+ close(fd);
+
+ /*
+ * Gently tell worker threads to stop processing new connections
+ * and exit. (This does not abort in-process conversations.)
+ */
+ pthread_cond_broadcast(&server_data->work_available_cond);
+
+ pthread_mutex_unlock(&server_data->work_available_mutex);
+
+ trace2_region_leave("ipc-server", "server-stop-async", NULL);
+
+ return 0;
+}
+
+/*
+ * Wait for all IPC server threads to stop.
+ */
+int ipc_server_await(struct ipc_server_data *server_data)
+{
+ pthread_join(server_data->accept_thread->pthread_id, NULL);
+
+ if (!server_data->shutdown_requested)
+ BUG("ipc-server: accept-thread stopped for '%s'",
+ server_data->buf_path.buf);
+
+ while (server_data->worker_thread_list) {
+ struct ipc_worker_thread_data *wtd =
+ server_data->worker_thread_list;
+
+ pthread_join(wtd->pthread_id, NULL);
+
+ server_data->worker_thread_list = wtd->next_thread;
+ free(wtd);
+ }
+
+ server_data->is_stopped = 1;
+
+ return 0;
+}
+
+void ipc_server_free(struct ipc_server_data *server_data)
+{
+ struct ipc_accept_thread_data * accept_thread_data;
+
+ if (!server_data)
+ return;
+
+ if (!server_data->is_stopped)
+ BUG("cannot free ipc-server while running for '%s'",
+ server_data->buf_path.buf);
+
+ accept_thread_data = server_data->accept_thread;
+ if (accept_thread_data) {
+ unix_ss_free(accept_thread_data->server_socket);
+
+ if (accept_thread_data->fd_send_shutdown != -1)
+ close(accept_thread_data->fd_send_shutdown);
+ if (accept_thread_data->fd_wait_shutdown != -1)
+ close(accept_thread_data->fd_wait_shutdown);
+
+ free(server_data->accept_thread);
+ }
+
+ while (server_data->worker_thread_list) {
+ struct ipc_worker_thread_data *wtd =
+ server_data->worker_thread_list;
+
+ server_data->worker_thread_list = wtd->next_thread;
+ free(wtd);
+ }
+
+ pthread_cond_destroy(&server_data->work_available_cond);
+ pthread_mutex_destroy(&server_data->work_available_mutex);
+
+ strbuf_release(&server_data->buf_path);
+
+ free(server_data->fifo_fds);
+ free(server_data);
+}
diff --git a/compat/simple-ipc/ipc-win32.c b/compat/simple-ipc/ipc-win32.c
new file mode 100644
index 0000000000..8f89c02037
--- /dev/null
+++ b/compat/simple-ipc/ipc-win32.c
@@ -0,0 +1,751 @@
+#include "cache.h"
+#include "simple-ipc.h"
+#include "strbuf.h"
+#include "pkt-line.h"
+#include "thread-utils.h"
+
+#ifndef GIT_WINDOWS_NATIVE
+#error This file can only be compiled on Windows
+#endif
+
+static int initialize_pipe_name(const char *path, wchar_t *wpath, size_t alloc)
+{
+ int off = 0;
+ struct strbuf realpath = STRBUF_INIT;
+
+ if (!strbuf_realpath(&realpath, path, 0))
+ return -1;
+
+ off = swprintf(wpath, alloc, L"\\\\.\\pipe\\");
+ if (xutftowcs(wpath + off, realpath.buf, alloc - off) < 0)
+ return -1;
+
+ /* Handle drive prefix */
+ if (wpath[off] && wpath[off + 1] == L':') {
+ wpath[off + 1] = L'_';
+ off += 2;
+ }
+
+ for (; wpath[off]; off++)
+ if (wpath[off] == L'/')
+ wpath[off] = L'\\';
+
+ strbuf_release(&realpath);
+ return 0;
+}
+
+static enum ipc_active_state get_active_state(wchar_t *pipe_path)
+{
+ if (WaitNamedPipeW(pipe_path, NMPWAIT_USE_DEFAULT_WAIT))
+ return IPC_STATE__LISTENING;
+
+ if (GetLastError() == ERROR_SEM_TIMEOUT)
+ return IPC_STATE__NOT_LISTENING;
+
+ if (GetLastError() == ERROR_FILE_NOT_FOUND)
+ return IPC_STATE__PATH_NOT_FOUND;
+
+ return IPC_STATE__OTHER_ERROR;
+}
+
+enum ipc_active_state ipc_get_active_state(const char *path)
+{
+ wchar_t pipe_path[MAX_PATH];
+
+ if (initialize_pipe_name(path, pipe_path, ARRAY_SIZE(pipe_path)) < 0)
+ return IPC_STATE__INVALID_PATH;
+
+ return get_active_state(pipe_path);
+}
+
+#define WAIT_STEP_MS (50)
+
+static enum ipc_active_state connect_to_server(
+ const wchar_t *wpath,
+ DWORD timeout_ms,
+ const struct ipc_client_connect_options *options,
+ int *pfd)
+{
+ DWORD t_start_ms, t_waited_ms;
+ DWORD step_ms;
+ HANDLE hPipe = INVALID_HANDLE_VALUE;
+ DWORD mode = PIPE_READMODE_BYTE;
+ DWORD gle;
+
+ *pfd = -1;
+
+ for (;;) {
+ hPipe = CreateFileW(wpath, GENERIC_READ | GENERIC_WRITE,
+ 0, NULL, OPEN_EXISTING, 0, NULL);
+ if (hPipe != INVALID_HANDLE_VALUE)
+ break;
+
+ gle = GetLastError();
+
+ switch (gle) {
+ case ERROR_FILE_NOT_FOUND:
+ if (!options->wait_if_not_found)
+ return IPC_STATE__PATH_NOT_FOUND;
+ if (!timeout_ms)
+ return IPC_STATE__PATH_NOT_FOUND;
+
+ step_ms = (timeout_ms < WAIT_STEP_MS) ?
+ timeout_ms : WAIT_STEP_MS;
+ sleep_millisec(step_ms);
+
+ timeout_ms -= step_ms;
+ break; /* try again */
+
+ case ERROR_PIPE_BUSY:
+ if (!options->wait_if_busy)
+ return IPC_STATE__NOT_LISTENING;
+ if (!timeout_ms)
+ return IPC_STATE__NOT_LISTENING;
+
+ t_start_ms = (DWORD)(getnanotime() / 1000000);
+
+ if (!WaitNamedPipeW(wpath, timeout_ms)) {
+ if (GetLastError() == ERROR_SEM_TIMEOUT)
+ return IPC_STATE__NOT_LISTENING;
+
+ return IPC_STATE__OTHER_ERROR;
+ }
+
+ /*
+ * A pipe server instance became available.
+ * Race other client processes to connect to
+ * it.
+ *
+ * But first decrement our overall timeout so
+ * that we don't starve if we keep losing the
+ * race. But also guard against special
+ * NPMWAIT_ values (0 and -1).
+ */
+ t_waited_ms = (DWORD)(getnanotime() / 1000000) - t_start_ms;
+ if (t_waited_ms < timeout_ms)
+ timeout_ms -= t_waited_ms;
+ else
+ timeout_ms = 1;
+ break; /* try again */
+
+ default:
+ return IPC_STATE__OTHER_ERROR;
+ }
+ }
+
+ if (!SetNamedPipeHandleState(hPipe, &mode, NULL, NULL)) {
+ CloseHandle(hPipe);
+ return IPC_STATE__OTHER_ERROR;
+ }
+
+ *pfd = _open_osfhandle((intptr_t)hPipe, O_RDWR|O_BINARY);
+ if (*pfd < 0) {
+ CloseHandle(hPipe);
+ return IPC_STATE__OTHER_ERROR;
+ }
+
+ /* fd now owns hPipe */
+
+ return IPC_STATE__LISTENING;
+}
+
+/*
+ * The default connection timeout for Windows clients.
+ *
+ * This is not currently part of the ipc_ API (nor the config settings)
+ * because of differences between Windows and other platforms.
+ *
+ * This value was chosen at random.
+ */
+#define WINDOWS_CONNECTION_TIMEOUT_MS (30000)
+
+enum ipc_active_state ipc_client_try_connect(
+ const char *path,
+ const struct ipc_client_connect_options *options,
+ struct ipc_client_connection **p_connection)
+{
+ wchar_t wpath[MAX_PATH];
+ enum ipc_active_state state = IPC_STATE__OTHER_ERROR;
+ int fd = -1;
+
+ *p_connection = NULL;
+
+ trace2_region_enter("ipc-client", "try-connect", NULL);
+ trace2_data_string("ipc-client", NULL, "try-connect/path", path);
+
+ if (initialize_pipe_name(path, wpath, ARRAY_SIZE(wpath)) < 0)
+ state = IPC_STATE__INVALID_PATH;
+ else
+ state = connect_to_server(wpath, WINDOWS_CONNECTION_TIMEOUT_MS,
+ options, &fd);
+
+ trace2_data_intmax("ipc-client", NULL, "try-connect/state",
+ (intmax_t)state);
+ trace2_region_leave("ipc-client", "try-connect", NULL);
+
+ if (state == IPC_STATE__LISTENING) {
+ (*p_connection) = xcalloc(1, sizeof(struct ipc_client_connection));
+ (*p_connection)->fd = fd;
+ }
+
+ return state;
+}
+
+void ipc_client_close_connection(struct ipc_client_connection *connection)
+{
+ if (!connection)
+ return;
+
+ if (connection->fd != -1)
+ close(connection->fd);
+
+ free(connection);
+}
+
+int ipc_client_send_command_to_connection(
+ struct ipc_client_connection *connection,
+ const char *message, struct strbuf *answer)
+{
+ int ret = 0;
+
+ strbuf_setlen(answer, 0);
+
+ trace2_region_enter("ipc-client", "send-command", NULL);
+
+ if (write_packetized_from_buf_no_flush(message, strlen(message),
+ connection->fd) < 0 ||
+ packet_flush_gently(connection->fd) < 0) {
+ ret = error(_("could not send IPC command"));
+ goto done;
+ }
+
+ FlushFileBuffers((HANDLE)_get_osfhandle(connection->fd));
+
+ if (read_packetized_to_strbuf(
+ connection->fd, answer,
+ PACKET_READ_GENTLE_ON_EOF | PACKET_READ_GENTLE_ON_READ_ERROR) < 0) {
+ ret = error(_("could not read IPC response"));
+ goto done;
+ }
+
+done:
+ trace2_region_leave("ipc-client", "send-command", NULL);
+ return ret;
+}
+
+int ipc_client_send_command(const char *path,
+ const struct ipc_client_connect_options *options,
+ const char *message, struct strbuf *response)
+{
+ int ret = -1;
+ enum ipc_active_state state;
+ struct ipc_client_connection *connection = NULL;
+
+ state = ipc_client_try_connect(path, options, &connection);
+
+ if (state != IPC_STATE__LISTENING)
+ return ret;
+
+ ret = ipc_client_send_command_to_connection(connection, message, response);
+
+ ipc_client_close_connection(connection);
+
+ return ret;
+}
+
+/*
+ * Duplicate the given pipe handle and wrap it in a file descriptor so
+ * that we can use pkt-line on it.
+ */
+static int dup_fd_from_pipe(const HANDLE pipe)
+{
+ HANDLE process = GetCurrentProcess();
+ HANDLE handle;
+ int fd;
+
+ if (!DuplicateHandle(process, pipe, process, &handle, 0, FALSE,
+ DUPLICATE_SAME_ACCESS)) {
+ errno = err_win_to_posix(GetLastError());
+ return -1;
+ }
+
+ fd = _open_osfhandle((intptr_t)handle, O_RDWR|O_BINARY);
+ if (fd < 0) {
+ errno = err_win_to_posix(GetLastError());
+ CloseHandle(handle);
+ return -1;
+ }
+
+ /*
+ * `handle` is now owned by `fd` and will be automatically closed
+ * when the descriptor is closed.
+ */
+
+ return fd;
+}
+
+/*
+ * Magic numbers used to annotate callback instance data.
+ * These are used to help guard against accidentally passing the
+ * wrong instance data across multiple levels of callbacks (which
+ * is easy to do if there are `void*` arguments).
+ */
+enum magic {
+ MAGIC_SERVER_REPLY_DATA,
+ MAGIC_SERVER_THREAD_DATA,
+ MAGIC_SERVER_DATA,
+};
+
+struct ipc_server_reply_data {
+ enum magic magic;
+ int fd;
+ struct ipc_server_thread_data *server_thread_data;
+};
+
+struct ipc_server_thread_data {
+ enum magic magic;
+ struct ipc_server_thread_data *next_thread;
+ struct ipc_server_data *server_data;
+ pthread_t pthread_id;
+ HANDLE hPipe;
+};
+
+/*
+ * On Windows, the conceptual "ipc-server" is implemented as a pool of
+ * n idential/peer "server-thread" threads. That is, there is no
+ * hierarchy of threads; and therefore no controller thread managing
+ * the pool. Each thread has an independent handle to the named pipe,
+ * receives incoming connections, processes the client, and re-uses
+ * the pipe for the next client connection.
+ *
+ * Therefore, the "ipc-server" only needs to maintain a list of the
+ * spawned threads for eventual "join" purposes.
+ *
+ * A single "stop-event" is visible to all of the server threads to
+ * tell them to shutdown (when idle).
+ */
+struct ipc_server_data {
+ enum magic magic;
+ ipc_server_application_cb *application_cb;
+ void *application_data;
+ struct strbuf buf_path;
+ wchar_t wpath[MAX_PATH];
+
+ HANDLE hEventStopRequested;
+ struct ipc_server_thread_data *thread_list;
+ int is_stopped;
+};
+
+enum connect_result {
+ CR_CONNECTED = 0,
+ CR_CONNECT_PENDING,
+ CR_CONNECT_ERROR,
+ CR_WAIT_ERROR,
+ CR_SHUTDOWN,
+};
+
+static enum connect_result queue_overlapped_connect(
+ struct ipc_server_thread_data *server_thread_data,
+ OVERLAPPED *lpo)
+{
+ if (ConnectNamedPipe(server_thread_data->hPipe, lpo))
+ goto failed;
+
+ switch (GetLastError()) {
+ case ERROR_IO_PENDING:
+ return CR_CONNECT_PENDING;
+
+ case ERROR_PIPE_CONNECTED:
+ SetEvent(lpo->hEvent);
+ return CR_CONNECTED;
+
+ default:
+ break;
+ }
+
+failed:
+ error(_("ConnectNamedPipe failed for '%s' (%lu)"),
+ server_thread_data->server_data->buf_path.buf,
+ GetLastError());
+ return CR_CONNECT_ERROR;
+}
+
+/*
+ * Use Windows Overlapped IO to wait for a connection or for our event
+ * to be signalled.
+ */
+static enum connect_result wait_for_connection(
+ struct ipc_server_thread_data *server_thread_data,
+ OVERLAPPED *lpo)
+{
+ enum connect_result r;
+ HANDLE waitHandles[2];
+ DWORD dwWaitResult;
+
+ r = queue_overlapped_connect(server_thread_data, lpo);
+ if (r != CR_CONNECT_PENDING)
+ return r;
+
+ waitHandles[0] = server_thread_data->server_data->hEventStopRequested;
+ waitHandles[1] = lpo->hEvent;
+
+ dwWaitResult = WaitForMultipleObjects(2, waitHandles, FALSE, INFINITE);
+ switch (dwWaitResult) {
+ case WAIT_OBJECT_0 + 0:
+ return CR_SHUTDOWN;
+
+ case WAIT_OBJECT_0 + 1:
+ ResetEvent(lpo->hEvent);
+ return CR_CONNECTED;
+
+ default:
+ return CR_WAIT_ERROR;
+ }
+}
+
+/*
+ * Forward declare our reply callback function so that any compiler
+ * errors are reported when we actually define the function (in addition
+ * to any errors reported when we try to pass this callback function as
+ * a parameter in a function call). The former are easier to understand.
+ */
+static ipc_server_reply_cb do_io_reply_callback;
+
+/*
+ * Relay application's response message to the client process.
+ * (We do not flush at this point because we allow the caller
+ * to chunk data to the client thru us.)
+ */
+static int do_io_reply_callback(struct ipc_server_reply_data *reply_data,
+ const char *response, size_t response_len)
+{
+ if (reply_data->magic != MAGIC_SERVER_REPLY_DATA)
+ BUG("reply_cb called with wrong instance data");
+
+ return write_packetized_from_buf_no_flush(response, response_len,
+ reply_data->fd);
+}
+
+/*
+ * Receive the request/command from the client and pass it to the
+ * registered request-callback. The request-callback will compose
+ * a response and call our reply-callback to send it to the client.
+ *
+ * Simple-IPC only contains one round trip, so we flush and close
+ * here after the response.
+ */
+static int do_io(struct ipc_server_thread_data *server_thread_data)
+{
+ struct strbuf buf = STRBUF_INIT;
+ struct ipc_server_reply_data reply_data;
+ int ret = 0;
+
+ reply_data.magic = MAGIC_SERVER_REPLY_DATA;
+ reply_data.server_thread_data = server_thread_data;
+
+ reply_data.fd = dup_fd_from_pipe(server_thread_data->hPipe);
+ if (reply_data.fd < 0)
+ return error(_("could not create fd from pipe for '%s'"),
+ server_thread_data->server_data->buf_path.buf);
+
+ ret = read_packetized_to_strbuf(
+ reply_data.fd, &buf,
+ PACKET_READ_GENTLE_ON_EOF | PACKET_READ_GENTLE_ON_READ_ERROR);
+ if (ret >= 0) {
+ ret = server_thread_data->server_data->application_cb(
+ server_thread_data->server_data->application_data,
+ buf.buf, do_io_reply_callback, &reply_data);
+
+ packet_flush_gently(reply_data.fd);
+
+ FlushFileBuffers((HANDLE)_get_osfhandle((reply_data.fd)));
+ }
+ else {
+ /*
+ * The client probably disconnected/shutdown before it
+ * could send a well-formed message. Ignore it.
+ */
+ }
+
+ strbuf_release(&buf);
+ close(reply_data.fd);
+
+ return ret;
+}
+
+/*
+ * Handle IPC request and response with this connected client. And reset
+ * the pipe to prepare for the next client.
+ */
+static int use_connection(struct ipc_server_thread_data *server_thread_data)
+{
+ int ret;
+
+ ret = do_io(server_thread_data);
+
+ FlushFileBuffers(server_thread_data->hPipe);
+ DisconnectNamedPipe(server_thread_data->hPipe);
+
+ return ret;
+}
+
+/*
+ * Thread proc for an IPC server worker thread. It handles a series of
+ * connections from clients. It cleans and reuses the hPipe between each
+ * client.
+ */
+static void *server_thread_proc(void *_server_thread_data)
+{
+ struct ipc_server_thread_data *server_thread_data = _server_thread_data;
+ HANDLE hEventConnected = INVALID_HANDLE_VALUE;
+ OVERLAPPED oConnect;
+ enum connect_result cr;
+ int ret;
+
+ assert(server_thread_data->hPipe != INVALID_HANDLE_VALUE);
+
+ trace2_thread_start("ipc-server");
+ trace2_data_string("ipc-server", NULL, "pipe",
+ server_thread_data->server_data->buf_path.buf);
+
+ hEventConnected = CreateEventW(NULL, TRUE, FALSE, NULL);
+
+ memset(&oConnect, 0, sizeof(oConnect));
+ oConnect.hEvent = hEventConnected;
+
+ for (;;) {
+ cr = wait_for_connection(server_thread_data, &oConnect);
+
+ switch (cr) {
+ case CR_SHUTDOWN:
+ goto finished;
+
+ case CR_CONNECTED:
+ ret = use_connection(server_thread_data);
+ if (ret == SIMPLE_IPC_QUIT) {
+ ipc_server_stop_async(
+ server_thread_data->server_data);
+ goto finished;
+ }
+ if (ret > 0) {
+ /*
+ * Ignore (transient) IO errors with this
+ * client and reset for the next client.
+ */
+ }
+ break;
+
+ case CR_CONNECT_PENDING:
+ /* By construction, this should not happen. */
+ BUG("ipc-server[%s]: unexpeced CR_CONNECT_PENDING",
+ server_thread_data->server_data->buf_path.buf);
+
+ case CR_CONNECT_ERROR:
+ case CR_WAIT_ERROR:
+ /*
+ * Ignore these theoretical errors.
+ */
+ DisconnectNamedPipe(server_thread_data->hPipe);
+ break;
+
+ default:
+ BUG("unandled case after wait_for_connection");
+ }
+ }
+
+finished:
+ CloseHandle(server_thread_data->hPipe);
+ CloseHandle(hEventConnected);
+
+ trace2_thread_exit();
+ return NULL;
+}
+
+static HANDLE create_new_pipe(wchar_t *wpath, int is_first)
+{
+ HANDLE hPipe;
+ DWORD dwOpenMode, dwPipeMode;
+ LPSECURITY_ATTRIBUTES lpsa = NULL;
+
+ dwOpenMode = PIPE_ACCESS_INBOUND | PIPE_ACCESS_OUTBOUND |
+ FILE_FLAG_OVERLAPPED;
+
+ dwPipeMode = PIPE_TYPE_MESSAGE | PIPE_READMODE_BYTE | PIPE_WAIT |
+ PIPE_REJECT_REMOTE_CLIENTS;
+
+ if (is_first) {
+ dwOpenMode |= FILE_FLAG_FIRST_PIPE_INSTANCE;
+
+ /*
+ * On Windows, the first server pipe instance gets to
+ * set the ACL / Security Attributes on the named
+ * pipe; subsequent instances inherit and cannot
+ * change them.
+ *
+ * TODO Should we allow the application layer to
+ * specify security attributes, such as `LocalService`
+ * or `LocalSystem`, when we create the named pipe?
+ * This question is probably not important when the
+ * daemon is started by a foreground user process and
+ * only needs to talk to the current user, but may be
+ * if the daemon is run via the Control Panel as a
+ * System Service.
+ */
+ }
+
+ hPipe = CreateNamedPipeW(wpath, dwOpenMode, dwPipeMode,
+ PIPE_UNLIMITED_INSTANCES, 1024, 1024, 0, lpsa);
+
+ return hPipe;
+}
+
+int ipc_server_run_async(struct ipc_server_data **returned_server_data,
+ const char *path, const struct ipc_server_opts *opts,
+ ipc_server_application_cb *application_cb,
+ void *application_data)
+{
+ struct ipc_server_data *server_data;
+ wchar_t wpath[MAX_PATH];
+ HANDLE hPipeFirst = INVALID_HANDLE_VALUE;
+ int k;
+ int ret = 0;
+ int nr_threads = opts->nr_threads;
+
+ *returned_server_data = NULL;
+
+ ret = initialize_pipe_name(path, wpath, ARRAY_SIZE(wpath));
+ if (ret < 0) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ hPipeFirst = create_new_pipe(wpath, 1);
+ if (hPipeFirst == INVALID_HANDLE_VALUE) {
+ errno = EADDRINUSE;
+ return -2;
+ }
+
+ server_data = xcalloc(1, sizeof(*server_data));
+ server_data->magic = MAGIC_SERVER_DATA;
+ server_data->application_cb = application_cb;
+ server_data->application_data = application_data;
+ server_data->hEventStopRequested = CreateEvent(NULL, TRUE, FALSE, NULL);
+ strbuf_init(&server_data->buf_path, 0);
+ strbuf_addstr(&server_data->buf_path, path);
+ wcscpy(server_data->wpath, wpath);
+
+ if (nr_threads < 1)
+ nr_threads = 1;
+
+ for (k = 0; k < nr_threads; k++) {
+ struct ipc_server_thread_data *std;
+
+ std = xcalloc(1, sizeof(*std));
+ std->magic = MAGIC_SERVER_THREAD_DATA;
+ std->server_data = server_data;
+ std->hPipe = INVALID_HANDLE_VALUE;
+
+ std->hPipe = (k == 0)
+ ? hPipeFirst
+ : create_new_pipe(server_data->wpath, 0);
+
+ if (std->hPipe == INVALID_HANDLE_VALUE) {
+ /*
+ * If we've reached a pipe instance limit for
+ * this path, just use fewer threads.
+ */
+ free(std);
+ break;
+ }
+
+ if (pthread_create(&std->pthread_id, NULL,
+ server_thread_proc, std)) {
+ /*
+ * Likewise, if we're out of threads, just use
+ * fewer threads than requested.
+ *
+ * However, we just give up if we can't even get
+ * one thread. This should not happen.
+ */
+ if (k == 0)
+ die(_("could not start thread[0] for '%s'"),
+ path);
+
+ CloseHandle(std->hPipe);
+ free(std);
+ break;
+ }
+
+ std->next_thread = server_data->thread_list;
+ server_data->thread_list = std;
+ }
+
+ *returned_server_data = server_data;
+ return 0;
+}
+
+int ipc_server_stop_async(struct ipc_server_data *server_data)
+{
+ if (!server_data)
+ return 0;
+
+ /*
+ * Gently tell all of the ipc_server threads to shutdown.
+ * This will be seen the next time they are idle (and waiting
+ * for a connection).
+ *
+ * We DO NOT attempt to force them to drop an active connection.
+ */
+ SetEvent(server_data->hEventStopRequested);
+ return 0;
+}
+
+int ipc_server_await(struct ipc_server_data *server_data)
+{
+ DWORD dwWaitResult;
+
+ if (!server_data)
+ return 0;
+
+ dwWaitResult = WaitForSingleObject(server_data->hEventStopRequested, INFINITE);
+ if (dwWaitResult != WAIT_OBJECT_0)
+ return error(_("wait for hEvent failed for '%s'"),
+ server_data->buf_path.buf);
+
+ while (server_data->thread_list) {
+ struct ipc_server_thread_data *std = server_data->thread_list;
+
+ pthread_join(std->pthread_id, NULL);
+
+ server_data->thread_list = std->next_thread;
+ free(std);
+ }
+
+ server_data->is_stopped = 1;
+
+ return 0;
+}
+
+void ipc_server_free(struct ipc_server_data *server_data)
+{
+ if (!server_data)
+ return;
+
+ if (!server_data->is_stopped)
+ BUG("cannot free ipc-server while running for '%s'",
+ server_data->buf_path.buf);
+
+ strbuf_release(&server_data->buf_path);
+
+ if (server_data->hEventStopRequested != INVALID_HANDLE_VALUE)
+ CloseHandle(server_data->hEventStopRequested);
+
+ while (server_data->thread_list) {
+ struct ipc_server_thread_data *std = server_data->thread_list;
+
+ server_data->thread_list = std->next_thread;
+ free(std);
+ }
+
+ free(server_data);
+}
diff --git a/config.c b/config.c
index 6428393a41..870d9534de 100644
--- a/config.c
+++ b/config.c
@@ -1180,20 +1180,6 @@ static void die_bad_number(const char *name, const char *value)
}
}
-NORETURN
-static void die_bad_bool(const char *name, const char *value)
-{
- if (!strcmp(name, "GIT_TEST_GETTEXT_POISON"))
- /*
- * We explicitly *don't* use _() here since it would
- * cause an infinite loop with _() needing to call
- * use_gettext_poison().
- */
- die("bad boolean config value '%s' for '%s'", value, name);
- else
- die(_("bad boolean config value '%s' for '%s'"), value, name);
-}
-
int git_config_int(const char *name, const char *value)
{
int ret;
@@ -1268,7 +1254,7 @@ int git_config_bool(const char *name, const char *value)
{
int v = git_parse_maybe_bool(value);
if (v < 0)
- die_bad_bool(name, value);
+ die(_("bad boolean config value '%s' for '%s'"), value, name);
return v;
}
diff --git a/config.mak.uname b/config.mak.uname
index d204c20a64..cb443b4e02 100644
--- a/config.mak.uname
+++ b/config.mak.uname
@@ -424,6 +424,7 @@ ifeq ($(uname_S),Windows)
RUNTIME_PREFIX = YesPlease
HAVE_WPGMPTR = YesWeDo
NO_ST_BLOCKS_IN_STRUCT_STAT = YesPlease
+ USE_WIN32_IPC = YesPlease
USE_WIN32_MMAP = YesPlease
MMAP_PREVENTS_DELETE = UnfortunatelyYes
# USE_NED_ALLOCATOR = YesPlease
@@ -600,6 +601,7 @@ ifneq (,$(findstring MINGW,$(uname_S)))
RUNTIME_PREFIX = YesPlease
HAVE_WPGMPTR = YesWeDo
NO_ST_BLOCKS_IN_STRUCT_STAT = YesPlease
+ USE_WIN32_IPC = YesPlease
USE_WIN32_MMAP = YesPlease
MMAP_PREVENTS_DELETE = UnfortunatelyYes
USE_NED_ALLOCATOR = YesPlease
diff --git a/contrib/buildsystems/CMakeLists.txt b/contrib/buildsystems/CMakeLists.txt
index ac3dbc079a..75ed198a6a 100644
--- a/contrib/buildsystems/CMakeLists.txt
+++ b/contrib/buildsystems/CMakeLists.txt
@@ -58,6 +58,10 @@ if(WIN32)
# In the vcpkg edition, we need this to be able to link to libcurl
set(CURL_NO_CURL_CMAKE ON)
+
+ # Copy the necessary vcpkg DLLs (like iconv) to the install dir
+ set(X_VCPKG_APPLOCAL_DEPS_INSTALL ON)
+ set(CMAKE_TOOLCHAIN_FILE ${VCPKG_DIR}/scripts/buildsystems/vcpkg.cmake CACHE STRING "Vcpkg toolchain file")
endif()
find_program(SH_EXE sh PATHS "C:/Program Files/Git/bin")
@@ -243,7 +247,13 @@ if(CMAKE_SYSTEM_NAME STREQUAL "Windows")
elseif(CMAKE_SYSTEM_NAME STREQUAL "Linux")
add_compile_definitions(PROCFS_EXECUTABLE_PATH="/proc/self/exe" HAVE_DEV_TTY )
- list(APPEND compat_SOURCES unix-socket.c)
+ list(APPEND compat_SOURCES unix-socket.c unix-stream-server.c)
+endif()
+
+if(CMAKE_SYSTEM_NAME STREQUAL "Windows")
+ list(APPEND compat_SOURCES compat/simple-ipc/ipc-shared.c compat/simple-ipc/ipc-win32.c)
+else()
+ list(APPEND compat_SOURCES compat/simple-ipc/ipc-shared.c compat/simple-ipc/ipc-unix-socket.c)
endif()
set(EXE_EXTENSION ${CMAKE_EXECUTABLE_SUFFIX})
@@ -685,13 +695,17 @@ endif()
parse_makefile_for_executables(git_builtin_extra "BUILT_INS")
+option(SKIP_DASHED_BUILT_INS "Skip hardlinking the dashed versions of the built-ins")
+
#Creating hardlinks
+if(NOT SKIP_DASHED_BUILT_INS)
foreach(s ${git_SOURCES} ${git_builtin_extra})
string(REPLACE "${CMAKE_SOURCE_DIR}/builtin/" "" s ${s})
string(REPLACE ".c" "" s ${s})
file(APPEND ${CMAKE_BINARY_DIR}/CreateLinks.cmake "file(CREATE_LINK git${EXE_EXTENSION} git-${s}${EXE_EXTENSION})\n")
list(APPEND git_links ${CMAKE_BINARY_DIR}/git-${s}${EXE_EXTENSION})
endforeach()
+endif()
if(CURL_FOUND)
set(remote_exes
@@ -807,15 +821,19 @@ list(TRANSFORM git_shell_scripts PREPEND "${CMAKE_BINARY_DIR}/")
list(TRANSFORM git_perl_scripts PREPEND "${CMAKE_BINARY_DIR}/")
#install
-install(TARGETS git git-shell
+foreach(program ${PROGRAMS_BUILT})
+if(program STREQUAL "git" OR program STREQUAL "git-shell")
+install(TARGETS ${program}
RUNTIME DESTINATION bin)
+else()
+install(TARGETS ${program}
+ RUNTIME DESTINATION libexec/git-core)
+endif()
+endforeach()
+
install(PROGRAMS ${CMAKE_BINARY_DIR}/git-cvsserver
DESTINATION bin)
-list(REMOVE_ITEM PROGRAMS_BUILT git git-shell)
-install(TARGETS ${PROGRAMS_BUILT}
- RUNTIME DESTINATION libexec/git-core)
-
set(bin_links
git-receive-pack git-upload-archive git-upload-pack)
@@ -828,12 +846,12 @@ install(CODE "file(CREATE_LINK ${CMAKE_INSTALL_PREFIX}/bin/git-shell${EXE_EXTENS
foreach(b ${git_links})
string(REPLACE "${CMAKE_BINARY_DIR}" "" b ${b})
- install(CODE "file(CREATE_LINK ${CMAKE_INSTALL_PREFIX}/bin/git${EXE_EXTENSION} ${CMAKE_INSTALL_PREFIX}/libexec/git-core/${b}${EXE_EXTENSION})")
+ install(CODE "file(CREATE_LINK ${CMAKE_INSTALL_PREFIX}/bin/git${EXE_EXTENSION} ${CMAKE_INSTALL_PREFIX}/libexec/git-core/${b})")
endforeach()
foreach(b ${git_http_links})
string(REPLACE "${CMAKE_BINARY_DIR}" "" b ${b})
- install(CODE "file(CREATE_LINK ${CMAKE_INSTALL_PREFIX}/libexec/git-core/git-remote-http${EXE_EXTENSION} ${CMAKE_INSTALL_PREFIX}/libexec/git-core/${b}${EXE_EXTENSION})")
+ install(CODE "file(CREATE_LINK ${CMAKE_INSTALL_PREFIX}/libexec/git-core/git-remote-http${EXE_EXTENSION} ${CMAKE_INSTALL_PREFIX}/libexec/git-core/${b})")
endforeach()
install(PROGRAMS ${git_shell_scripts} ${git_perl_scripts} ${CMAKE_BINARY_DIR}/git-p4
diff --git a/contrib/completion/git-completion.bash b/contrib/completion/git-completion.bash
index e1a66954fe..dfa735ea62 100644
--- a/contrib/completion/git-completion.bash
+++ b/contrib/completion/git-completion.bash
@@ -77,7 +77,7 @@ __git_find_repo_path ()
test -d "$__git_dir" &&
__git_repo_path="$__git_dir"
elif [ -n "${GIT_DIR-}" ]; then
- test -d "${GIT_DIR-}" &&
+ test -d "$GIT_DIR" &&
__git_repo_path="$GIT_DIR"
elif [ -d .git ]; then
__git_repo_path=.git
@@ -427,7 +427,7 @@ __gitcomp_builtin ()
if [ -z "$options" ]; then
local completion_helper
- if [ "$GIT_COMPLETION_SHOW_ALL" = "1" ]; then
+ if [ "${GIT_COMPLETION_SHOW_ALL-}" = "1" ]; then
completion_helper="--git-completion-helper-all"
else
completion_helper="--git-completion-helper"
@@ -744,7 +744,7 @@ __git_refs ()
track=""
;;
*)
- for i in HEAD FETCH_HEAD ORIG_HEAD MERGE_HEAD REBASE_HEAD; do
+ for i in HEAD FETCH_HEAD ORIG_HEAD MERGE_HEAD REBASE_HEAD CHERRY_PICK_HEAD; do
case "$i" in
$match*)
if [ -e "$dir/$i" ]; then
@@ -1910,7 +1910,7 @@ _git_help ()
return
;;
esac
- if test -n "$GIT_TESTING_ALL_COMMAND_LIST"
+ if test -n "${GIT_TESTING_ALL_COMMAND_LIST-}"
then
__gitcomp "$GIT_TESTING_ALL_COMMAND_LIST $(__git --list-cmds=alias,list-guide) gitk"
else
diff --git a/convert.c b/convert.c
index 2d3a5a713c..45ac75f80c 100644
--- a/convert.c
+++ b/convert.c
@@ -24,17 +24,6 @@
#define CONVERT_STAT_BITS_TXT_CRLF 0x2
#define CONVERT_STAT_BITS_BIN 0x4
-enum crlf_action {
- CRLF_UNDEFINED,
- CRLF_BINARY,
- CRLF_TEXT,
- CRLF_TEXT_INPUT,
- CRLF_TEXT_CRLF,
- CRLF_AUTO,
- CRLF_AUTO_INPUT,
- CRLF_AUTO_CRLF
-};
-
struct text_stat {
/* NUL, CR, LF and CRLF counts */
unsigned nul, lonecr, lonelf, crlf;
@@ -172,7 +161,7 @@ static int text_eol_is_crlf(void)
return 0;
}
-static enum eol output_eol(enum crlf_action crlf_action)
+static enum eol output_eol(enum convert_crlf_action crlf_action)
{
switch (crlf_action) {
case CRLF_BINARY:
@@ -246,7 +235,7 @@ static int has_crlf_in_index(const struct index_state *istate, const char *path)
}
static int will_convert_lf_to_crlf(struct text_stat *stats,
- enum crlf_action crlf_action)
+ enum convert_crlf_action crlf_action)
{
if (output_eol(crlf_action) != EOL_CRLF)
return 0;
@@ -499,7 +488,7 @@ static int encode_to_worktree(const char *path, const char *src, size_t src_len,
static int crlf_to_git(const struct index_state *istate,
const char *path, const char *src, size_t len,
struct strbuf *buf,
- enum crlf_action crlf_action, int conv_flags)
+ enum convert_crlf_action crlf_action, int conv_flags)
{
struct text_stat stats;
char *dst;
@@ -585,8 +574,8 @@ static int crlf_to_git(const struct index_state *istate,
return 1;
}
-static int crlf_to_worktree(const char *src, size_t len,
- struct strbuf *buf, enum crlf_action crlf_action)
+static int crlf_to_worktree(const char *src, size_t len, struct strbuf *buf,
+ enum convert_crlf_action crlf_action)
{
char *to_free = NULL;
struct text_stat stats;
@@ -884,9 +873,13 @@ static int apply_multi_file_filter(const char *path, const char *src, size_t len
goto done;
if (fd >= 0)
- err = write_packetized_from_fd(fd, process->in);
+ err = write_packetized_from_fd_no_flush(fd, process->in);
else
- err = write_packetized_from_buf(src, len, process->in);
+ err = write_packetized_from_buf_no_flush(src, len, process->in);
+ if (err)
+ goto done;
+
+ err = packet_flush_gently(process->in);
if (err)
goto done;
@@ -903,7 +896,8 @@ static int apply_multi_file_filter(const char *path, const char *src, size_t len
if (err)
goto done;
- err = read_packetized_to_strbuf(process->out, &nbuf) < 0;
+ err = read_packetized_to_strbuf(process->out, &nbuf,
+ PACKET_READ_GENTLE_ON_EOF) < 0;
if (err)
goto done;
@@ -1247,7 +1241,7 @@ static const char *git_path_check_encoding(struct attr_check_item *check)
return value;
}
-static enum crlf_action git_path_check_crlf(struct attr_check_item *check)
+static enum convert_crlf_action git_path_check_crlf(struct attr_check_item *check)
{
const char *value = check->value;
@@ -1297,18 +1291,10 @@ static int git_path_check_ident(struct attr_check_item *check)
return !!ATTR_TRUE(value);
}
-struct conv_attrs {
- struct convert_driver *drv;
- enum crlf_action attr_action; /* What attr says */
- enum crlf_action crlf_action; /* When no attr is set, use core.autocrlf */
- int ident;
- const char *working_tree_encoding; /* Supported encoding or default encoding if NULL */
-};
-
static struct attr_check *check;
-static void convert_attrs(const struct index_state *istate,
- struct conv_attrs *ca, const char *path)
+void convert_attrs(const struct index_state *istate,
+ struct conv_attrs *ca, const char *path)
{
struct attr_check_item *ccheck = NULL;
@@ -1465,19 +1451,16 @@ void convert_to_git_filter_fd(const struct index_state *istate,
ident_to_git(dst->buf, dst->len, dst, ca.ident);
}
-static int convert_to_working_tree_internal(const struct index_state *istate,
- const char *path, const char *src,
- size_t len, struct strbuf *dst,
- int normalizing,
- const struct checkout_metadata *meta,
- struct delayed_checkout *dco)
+static int convert_to_working_tree_ca_internal(const struct conv_attrs *ca,
+ const char *path, const char *src,
+ size_t len, struct strbuf *dst,
+ int normalizing,
+ const struct checkout_metadata *meta,
+ struct delayed_checkout *dco)
{
int ret = 0, ret_filter = 0;
- struct conv_attrs ca;
- convert_attrs(istate, &ca, path);
-
- ret |= ident_to_worktree(src, len, dst, ca.ident);
+ ret |= ident_to_worktree(src, len, dst, ca->ident);
if (ret) {
src = dst->buf;
len = dst->len;
@@ -1487,49 +1470,56 @@ static int convert_to_working_tree_internal(const struct index_state *istate,
* is a smudge or process filter (even if the process filter doesn't
* support smudge). The filters might expect CRLFs.
*/
- if ((ca.drv && (ca.drv->smudge || ca.drv->process)) || !normalizing) {
- ret |= crlf_to_worktree(src, len, dst, ca.crlf_action);
+ if ((ca->drv && (ca->drv->smudge || ca->drv->process)) || !normalizing) {
+ ret |= crlf_to_worktree(src, len, dst, ca->crlf_action);
if (ret) {
src = dst->buf;
len = dst->len;
}
}
- ret |= encode_to_worktree(path, src, len, dst, ca.working_tree_encoding);
+ ret |= encode_to_worktree(path, src, len, dst, ca->working_tree_encoding);
if (ret) {
src = dst->buf;
len = dst->len;
}
ret_filter = apply_filter(
- path, src, len, -1, dst, ca.drv, CAP_SMUDGE, meta, dco);
- if (!ret_filter && ca.drv && ca.drv->required)
- die(_("%s: smudge filter %s failed"), path, ca.drv->name);
+ path, src, len, -1, dst, ca->drv, CAP_SMUDGE, meta, dco);
+ if (!ret_filter && ca->drv && ca->drv->required)
+ die(_("%s: smudge filter %s failed"), path, ca->drv->name);
return ret | ret_filter;
}
-int async_convert_to_working_tree(const struct index_state *istate,
- const char *path, const char *src,
- size_t len, struct strbuf *dst,
- const struct checkout_metadata *meta,
- void *dco)
+int async_convert_to_working_tree_ca(const struct conv_attrs *ca,
+ const char *path, const char *src,
+ size_t len, struct strbuf *dst,
+ const struct checkout_metadata *meta,
+ void *dco)
{
- return convert_to_working_tree_internal(istate, path, src, len, dst, 0, meta, dco);
+ return convert_to_working_tree_ca_internal(ca, path, src, len, dst, 0,
+ meta, dco);
}
-int convert_to_working_tree(const struct index_state *istate,
- const char *path, const char *src,
- size_t len, struct strbuf *dst,
- const struct checkout_metadata *meta)
+int convert_to_working_tree_ca(const struct conv_attrs *ca,
+ const char *path, const char *src,
+ size_t len, struct strbuf *dst,
+ const struct checkout_metadata *meta)
{
- return convert_to_working_tree_internal(istate, path, src, len, dst, 0, meta, NULL);
+ return convert_to_working_tree_ca_internal(ca, path, src, len, dst, 0,
+ meta, NULL);
}
int renormalize_buffer(const struct index_state *istate, const char *path,
const char *src, size_t len, struct strbuf *dst)
{
- int ret = convert_to_working_tree_internal(istate, path, src, len, dst, 1, NULL, NULL);
+ struct conv_attrs ca;
+ int ret;
+
+ convert_attrs(istate, &ca, path);
+ ret = convert_to_working_tree_ca_internal(&ca, path, src, len, dst, 1,
+ NULL, NULL);
if (ret) {
src = dst->buf;
len = dst->len;
@@ -1956,34 +1946,25 @@ static struct stream_filter *ident_filter(const struct object_id *oid)
}
/*
- * Return an appropriately constructed filter for the path, or NULL if
+ * Return an appropriately constructed filter for the given ca, or NULL if
* the contents cannot be filtered without reading the whole thing
* in-core.
*
* Note that you would be crazy to set CRLF, smudge/clean or ident to a
* large binary blob you would want us not to slurp into the memory!
*/
-struct stream_filter *get_stream_filter(const struct index_state *istate,
- const char *path,
- const struct object_id *oid)
+struct stream_filter *get_stream_filter_ca(const struct conv_attrs *ca,
+ const struct object_id *oid)
{
- struct conv_attrs ca;
struct stream_filter *filter = NULL;
- convert_attrs(istate, &ca, path);
- if (ca.drv && (ca.drv->process || ca.drv->smudge || ca.drv->clean))
+ if (classify_conv_attrs(ca) != CA_CLASS_STREAMABLE)
return NULL;
- if (ca.working_tree_encoding)
- return NULL;
-
- if (ca.crlf_action == CRLF_AUTO || ca.crlf_action == CRLF_AUTO_CRLF)
- return NULL;
-
- if (ca.ident)
+ if (ca->ident)
filter = ident_filter(oid);
- if (output_eol(ca.crlf_action) == EOL_CRLF)
+ if (output_eol(ca->crlf_action) == EOL_CRLF)
filter = cascade_filter(filter, lf_to_crlf_filter());
else
filter = cascade_filter(filter, &null_filter_singleton);
@@ -1991,6 +1972,15 @@ struct stream_filter *get_stream_filter(const struct index_state *istate,
return filter;
}
+struct stream_filter *get_stream_filter(const struct index_state *istate,
+ const char *path,
+ const struct object_id *oid)
+{
+ struct conv_attrs ca;
+ convert_attrs(istate, &ca, path);
+ return get_stream_filter_ca(&ca, oid);
+}
+
void free_stream_filter(struct stream_filter *filter)
{
filter->vtbl->free(filter);
@@ -2024,3 +2014,21 @@ void clone_checkout_metadata(struct checkout_metadata *dst,
if (blob)
oidcpy(&dst->blob, blob);
}
+
+enum conv_attrs_classification classify_conv_attrs(const struct conv_attrs *ca)
+{
+ if (ca->drv) {
+ if (ca->drv->process)
+ return CA_CLASS_INCORE_PROCESS;
+ if (ca->drv->smudge || ca->drv->clean)
+ return CA_CLASS_INCORE_FILTER;
+ }
+
+ if (ca->working_tree_encoding)
+ return CA_CLASS_INCORE;
+
+ if (ca->crlf_action == CRLF_AUTO || ca->crlf_action == CRLF_AUTO_CRLF)
+ return CA_CLASS_INCORE;
+
+ return CA_CLASS_STREAMABLE;
+}
diff --git a/convert.h b/convert.h
index e29d1026a6..43e567a59b 100644
--- a/convert.h
+++ b/convert.h
@@ -63,6 +63,30 @@ struct checkout_metadata {
struct object_id blob;
};
+enum convert_crlf_action {
+ CRLF_UNDEFINED,
+ CRLF_BINARY,
+ CRLF_TEXT,
+ CRLF_TEXT_INPUT,
+ CRLF_TEXT_CRLF,
+ CRLF_AUTO,
+ CRLF_AUTO_INPUT,
+ CRLF_AUTO_CRLF
+};
+
+struct convert_driver;
+
+struct conv_attrs {
+ struct convert_driver *drv;
+ enum convert_crlf_action attr_action; /* What attr says */
+ enum convert_crlf_action crlf_action; /* When no attr is set, use core.autocrlf */
+ int ident;
+ const char *working_tree_encoding; /* Supported encoding or default encoding if NULL */
+};
+
+void convert_attrs(const struct index_state *istate,
+ struct conv_attrs *ca, const char *path);
+
extern enum eol core_eol;
extern char *check_roundtrip_encoding;
const char *get_cached_convert_stats_ascii(const struct index_state *istate,
@@ -75,15 +99,34 @@ const char *get_convert_attr_ascii(const struct index_state *istate,
int convert_to_git(const struct index_state *istate,
const char *path, const char *src, size_t len,
struct strbuf *dst, int conv_flags);
-int convert_to_working_tree(const struct index_state *istate,
- const char *path, const char *src,
- size_t len, struct strbuf *dst,
- const struct checkout_metadata *meta);
-int async_convert_to_working_tree(const struct index_state *istate,
- const char *path, const char *src,
- size_t len, struct strbuf *dst,
- const struct checkout_metadata *meta,
- void *dco);
+int convert_to_working_tree_ca(const struct conv_attrs *ca,
+ const char *path, const char *src,
+ size_t len, struct strbuf *dst,
+ const struct checkout_metadata *meta);
+int async_convert_to_working_tree_ca(const struct conv_attrs *ca,
+ const char *path, const char *src,
+ size_t len, struct strbuf *dst,
+ const struct checkout_metadata *meta,
+ void *dco);
+static inline int convert_to_working_tree(const struct index_state *istate,
+ const char *path, const char *src,
+ size_t len, struct strbuf *dst,
+ const struct checkout_metadata *meta)
+{
+ struct conv_attrs ca;
+ convert_attrs(istate, &ca, path);
+ return convert_to_working_tree_ca(&ca, path, src, len, dst, meta);
+}
+static inline int async_convert_to_working_tree(const struct index_state *istate,
+ const char *path, const char *src,
+ size_t len, struct strbuf *dst,
+ const struct checkout_metadata *meta,
+ void *dco)
+{
+ struct conv_attrs ca;
+ convert_attrs(istate, &ca, path);
+ return async_convert_to_working_tree_ca(&ca, path, src, len, dst, meta, dco);
+}
int async_query_available_blobs(const char *cmd,
struct string_list *available_paths);
int renormalize_buffer(const struct index_state *istate,
@@ -136,6 +179,8 @@ struct stream_filter; /* opaque */
struct stream_filter *get_stream_filter(const struct index_state *istate,
const char *path,
const struct object_id *);
+struct stream_filter *get_stream_filter_ca(const struct conv_attrs *ca,
+ const struct object_id *oid);
void free_stream_filter(struct stream_filter *);
int is_null_stream_filter(struct stream_filter *);
@@ -155,4 +200,37 @@ int stream_filter(struct stream_filter *,
const char *input, size_t *isize_p,
char *output, size_t *osize_p);
+enum conv_attrs_classification {
+ /*
+ * The blob must be loaded into a buffer before it can be
+ * smudged. All smudging is done in-proc.
+ */
+ CA_CLASS_INCORE,
+
+ /*
+ * The blob must be loaded into a buffer, but uses a
+ * single-file driver filter, such as rot13.
+ */
+ CA_CLASS_INCORE_FILTER,
+
+ /*
+ * The blob must be loaded into a buffer, but uses a
+ * long-running driver process, such as LFS. This might or
+ * might not use delayed operations. (The important thing is
+ * that there is a single subordinate long-running process
+ * handling all associated blobs and in case of delayed
+ * operations, may hold per-blob state.)
+ */
+ CA_CLASS_INCORE_PROCESS,
+
+ /*
+ * The blob can be streamed and smudged without needing to
+ * completely read it into a buffer.
+ */
+ CA_CLASS_STREAMABLE,
+};
+
+enum conv_attrs_classification classify_conv_attrs(
+ const struct conv_attrs *ca);
+
#endif /* CONVERT_H */
diff --git a/csum-file.c b/csum-file.c
index 0f35fa5ee4..7510950fa3 100644
--- a/csum-file.c
+++ b/csum-file.c
@@ -89,32 +89,35 @@ int finalize_hashfile(struct hashfile *f, unsigned char *result, unsigned int fl
void hashwrite(struct hashfile *f, const void *buf, unsigned int count)
{
while (count) {
- unsigned offset = f->offset;
- unsigned left = sizeof(f->buffer) - offset;
+ unsigned left = sizeof(f->buffer) - f->offset;
unsigned nr = count > left ? left : count;
- const void *data;
if (f->do_crc)
f->crc32 = crc32(f->crc32, buf, nr);
if (nr == sizeof(f->buffer)) {
- /* process full buffer directly without copy */
- data = buf;
+ /*
+ * Flush a full batch worth of data directly
+ * from the input, skipping the memcpy() to
+ * the hashfile's buffer. In this block,
+ * f->offset is necessarily zero.
+ */
+ the_hash_algo->update_fn(&f->ctx, buf, nr);
+ flush(f, buf, nr);
} else {
- memcpy(f->buffer + offset, buf, nr);
- data = f->buffer;
+ /*
+ * Copy to the hashfile's buffer, flushing only
+ * if it became full.
+ */
+ memcpy(f->buffer + f->offset, buf, nr);
+ f->offset += nr;
+ left -= nr;
+ if (!left)
+ hashflush(f);
}
count -= nr;
- offset += nr;
buf = (char *) buf + nr;
- left -= nr;
- if (!left) {
- the_hash_algo->update_fn(&f->ctx, data, offset);
- flush(f, data, offset);
- offset = 0;
- }
- f->offset = offset;
}
}
diff --git a/daemon.c b/daemon.c
index 343531980d..5c4cbad62d 100644
--- a/daemon.c
+++ b/daemon.c
@@ -566,14 +566,14 @@ static void parse_host_and_port(char *hostport, char **host,
/*
* Sanitize a string from the client so that it's OK to be inserted into a
- * filesystem path. Specifically, we disallow slashes, runs of "..", and
- * trailing and leading dots, which means that the client cannot escape
- * our base path via ".." traversal.
+ * filesystem path. Specifically, we disallow directory separators, runs
+ * of "..", and trailing and leading dots, which means that the client
+ * cannot escape our base path via ".." traversal.
*/
static void sanitize_client(struct strbuf *out, const char *in)
{
for (; *in; in++) {
- if (*in == '/')
+ if (is_dir_sep(*in))
continue;
if (*in == '.' && (!out->len || out->buf[out->len - 1] == '.'))
continue;
diff --git a/diffcore-rename.c b/diffcore-rename.c
index e2ed648176..963ca58221 100644
--- a/diffcore-rename.c
+++ b/diffcore-rename.c
@@ -371,7 +371,7 @@ struct dir_rename_info {
struct strintmap idx_map;
struct strmap dir_rename_guess;
struct strmap *dir_rename_count;
- struct strset *relevant_source_dirs;
+ struct strintmap *relevant_source_dirs;
unsigned setup;
};
@@ -407,6 +407,28 @@ static const char *get_highest_rename_path(struct strintmap *counts)
return highest_destination_dir;
}
+static char *UNKNOWN_DIR = "/"; /* placeholder -- short, illegal directory */
+
+static int dir_rename_already_determinable(struct strintmap *counts)
+{
+ struct hashmap_iter iter;
+ struct strmap_entry *entry;
+ int first = 0, second = 0, unknown = 0;
+ strintmap_for_each_entry(counts, &iter, entry) {
+ const char *destination_dir = entry->key;
+ intptr_t count = (intptr_t)entry->value;
+ if (!strcmp(destination_dir, UNKNOWN_DIR)) {
+ unknown = count;
+ } else if (count >= first) {
+ second = first;
+ first = count;
+ } else if (count >= second) {
+ second = count;
+ }
+ }
+ return first > second + unknown;
+}
+
static void increment_count(struct dir_rename_info *info,
char *old_dir,
char *new_dir)
@@ -429,7 +451,7 @@ static void increment_count(struct dir_rename_info *info,
}
static void update_dir_rename_counts(struct dir_rename_info *info,
- struct strset *dirs_removed,
+ struct strintmap *dirs_removed,
const char *oldname,
const char *newname)
{
@@ -461,10 +483,12 @@ static void update_dir_rename_counts(struct dir_rename_info *info,
return;
while (1) {
+ int drd_flag = NOT_RELEVANT;
+
/* Get old_dir, skip if its directory isn't relevant. */
dirname_munge(old_dir);
if (info->relevant_source_dirs &&
- !strset_contains(info->relevant_source_dirs, old_dir))
+ !strintmap_contains(info->relevant_source_dirs, old_dir))
break;
/* Get new_dir */
@@ -509,16 +533,31 @@ static void update_dir_rename_counts(struct dir_rename_info *info,
}
}
- if (strset_contains(dirs_removed, old_dir))
+ /*
+ * Above we suggested that we'd keep recording renames for
+ * all ancestor directories where the trailing directories
+ * matched, i.e. for
+ * "a/b/c/d/e/foo.c" -> "a/b/some/thing/else/e/foo.c"
+ * we'd increment rename counts for each of
+ * a/b/c/d/e/ => a/b/some/thing/else/e/
+ * a/b/c/d/ => a/b/some/thing/else/
+ * However, we only need the rename counts for directories
+ * in dirs_removed whose value is RELEVANT_FOR_SELF.
+ * However, we add one special case of also recording it for
+ * first_time_in_loop because find_basename_matches() can
+ * use that as a hint to find a good pairing.
+ */
+ if (dirs_removed)
+ drd_flag = strintmap_get(dirs_removed, old_dir);
+ if (drd_flag == RELEVANT_FOR_SELF || first_time_in_loop)
increment_count(info, old_dir, new_dir);
- else
- break;
+ first_time_in_loop = 0;
+ if (drd_flag == NOT_RELEVANT)
+ break;
/* If we hit toplevel directory ("") for old or new dir, quit */
if (!*old_dir || !*new_dir)
break;
-
- first_time_in_loop = 0;
}
/* Free resources we don't need anymore */
@@ -527,14 +566,15 @@ static void update_dir_rename_counts(struct dir_rename_info *info,
}
static void initialize_dir_rename_info(struct dir_rename_info *info,
- struct strset *dirs_removed,
+ struct strintmap *relevant_sources,
+ struct strintmap *dirs_removed,
struct strmap *dir_rename_count)
{
struct hashmap_iter iter;
struct strmap_entry *entry;
int i;
- if (!dirs_removed) {
+ if (!dirs_removed && !relevant_sources) {
info->setup = 0;
return;
}
@@ -549,7 +589,21 @@ static void initialize_dir_rename_info(struct dir_rename_info *info,
strmap_init_with_options(&info->dir_rename_guess, NULL, 0);
/* Setup info->relevant_source_dirs */
- info->relevant_source_dirs = dirs_removed;
+ info->relevant_source_dirs = NULL;
+ if (dirs_removed || !relevant_sources) {
+ info->relevant_source_dirs = dirs_removed; /* might be NULL */
+ } else {
+ info->relevant_source_dirs = xmalloc(sizeof(struct strintmap));
+ strintmap_init(info->relevant_source_dirs, 0 /* unused */);
+ strintmap_for_each_entry(relevant_sources, &iter, entry) {
+ char *dirname = get_dirname(entry->key);
+ if (!dirs_removed ||
+ strintmap_contains(dirs_removed, dirname))
+ strintmap_set(info->relevant_source_dirs,
+ dirname, 0 /* value irrelevant */);
+ free(dirname);
+ }
+ }
/*
* Loop setting up both info->idx_map, and doing setup of
@@ -610,7 +664,7 @@ void partial_clear_dir_rename_count(struct strmap *dir_rename_count)
}
static void cleanup_dir_rename_info(struct dir_rename_info *info,
- struct strset *dirs_removed,
+ struct strintmap *dirs_removed,
int keep_dir_rename_count)
{
struct hashmap_iter iter;
@@ -627,6 +681,13 @@ static void cleanup_dir_rename_info(struct dir_rename_info *info,
/* dir_rename_guess */
strmap_clear(&info->dir_rename_guess, 1);
+ /* relevant_source_dirs */
+ if (info->relevant_source_dirs &&
+ info->relevant_source_dirs != dirs_removed) {
+ strintmap_clear(info->relevant_source_dirs);
+ FREE_AND_NULL(info->relevant_source_dirs);
+ }
+
/* dir_rename_count */
if (!keep_dir_rename_count) {
partial_clear_dir_rename_count(info->dir_rename_count);
@@ -638,18 +699,22 @@ static void cleanup_dir_rename_info(struct dir_rename_info *info,
/*
* Although dir_rename_count was passed in
* diffcore_rename_extended() and we want to keep it around and
- * return it to that caller, we first want to remove any data
+ * return it to that caller, we first want to remove any counts in
+ * the maps associated with UNKNOWN_DIR entries and any data
* associated with directories that weren't renamed.
*/
strmap_for_each_entry(info->dir_rename_count, &iter, entry) {
const char *source_dir = entry->key;
struct strintmap *counts = entry->value;
- if (!strset_contains(dirs_removed, source_dir)) {
+ if (!strintmap_get(dirs_removed, source_dir)) {
string_list_append(&to_remove, source_dir);
strintmap_clear(counts);
continue;
}
+
+ if (strintmap_contains(counts, UNKNOWN_DIR))
+ strintmap_remove(counts, UNKNOWN_DIR);
}
for (i = 0; i < to_remove.nr; ++i)
strmap_remove(info->dir_rename_count,
@@ -749,7 +814,8 @@ static int idx_possible_rename(char *filename, struct dir_rename_info *info)
static int find_basename_matches(struct diff_options *options,
int minimum_score,
struct dir_rename_info *info,
- struct strset *dirs_removed)
+ struct strintmap *relevant_sources,
+ struct strintmap *dirs_removed)
{
/*
* When I checked in early 2020, over 76% of file renames in linux
@@ -839,6 +905,11 @@ static int find_basename_matches(struct diff_options *options,
intptr_t src_index;
intptr_t dst_index;
+ /* Skip irrelevant sources */
+ if (relevant_sources &&
+ !strintmap_contains(relevant_sources, filename))
+ continue;
+
/*
* If the basename is unique among remaining sources, then
* src_index will equal 'i' and we can attempt to match it
@@ -967,7 +1038,7 @@ static int find_renames(struct diff_score *mx,
int minimum_score,
int copies,
struct dir_rename_info *info,
- struct strset *dirs_removed)
+ struct strintmap *dirs_removed)
{
int count = 0, i;
@@ -991,11 +1062,12 @@ static int find_renames(struct diff_score *mx,
return count;
}
-static void remove_unneeded_paths_from_src(int detecting_copies)
+static void remove_unneeded_paths_from_src(int detecting_copies,
+ struct strintmap *interesting)
{
int i, new_num_src;
- if (detecting_copies)
+ if (detecting_copies && !interesting)
return; /* nothing to remove */
if (break_idx)
return; /* culling incompatible with break detection */
@@ -1022,12 +1094,18 @@ static void remove_unneeded_paths_from_src(int detecting_copies)
* from rename_src here.
*/
for (i = 0, new_num_src = 0; i < rename_src_nr; i++) {
+ struct diff_filespec *one = rename_src[i].p->one;
+
/*
* renames are stored in rename_dst, so if a rename has
* already been detected using this source, we can just
* remove the source knowing rename_dst has its info.
*/
- if (rename_src[i].p->one->rename_used)
+ if (!detecting_copies && one->rename_used)
+ continue;
+
+ /* If we don't care about the source path, skip it */
+ if (interesting && !strintmap_contains(interesting, one->path))
continue;
if (new_num_src < i)
@@ -1039,8 +1117,136 @@ static void remove_unneeded_paths_from_src(int detecting_copies)
rename_src_nr = new_num_src;
}
+static void handle_early_known_dir_renames(struct dir_rename_info *info,
+ struct strintmap *relevant_sources,
+ struct strintmap *dirs_removed)
+{
+ /*
+ * Directory renames are determined via an aggregate of all renames
+ * under them and using a "majority wins" rule. The fact that
+ * "majority wins", though, means we don't need all the renames
+ * under the given directory, we only need enough to ensure we have
+ * a majority.
+ */
+
+ int i, new_num_src;
+ struct hashmap_iter iter;
+ struct strmap_entry *entry;
+
+ if (!dirs_removed || !relevant_sources)
+ return; /* nothing to cull */
+ if (break_idx)
+ return; /* culling incompatbile with break detection */
+
+ /*
+ * Supplement dir_rename_count with number of potential renames,
+ * marking all potential rename sources as mapping to UNKNOWN_DIR.
+ */
+ for (i = 0; i < rename_src_nr; i++) {
+ char *old_dir;
+ struct diff_filespec *one = rename_src[i].p->one;
+
+ /*
+ * sources that are part of a rename will have already been
+ * removed by a prior call to remove_unneeded_paths_from_src()
+ */
+ assert(!one->rename_used);
+
+ old_dir = get_dirname(one->path);
+ while (*old_dir != '\0' &&
+ NOT_RELEVANT != strintmap_get(dirs_removed, old_dir)) {
+ char *freeme = old_dir;
+
+ increment_count(info, old_dir, UNKNOWN_DIR);
+ old_dir = get_dirname(old_dir);
+
+ /* Free resources we don't need anymore */
+ free(freeme);
+ }
+ /*
+ * old_dir and new_dir free'd in increment_count, but
+ * get_dirname() gives us a new pointer we need to free for
+ * old_dir. Also, if the loop runs 0 times we need old_dir
+ * to be freed.
+ */
+ free(old_dir);
+ }
+
+ /*
+ * For any directory which we need a potential rename detected for
+ * (i.e. those marked as RELEVANT_FOR_SELF in dirs_removed), check
+ * whether we have enough renames to satisfy the "majority rules"
+ * requirement such that detecting any more renames of files under
+ * it won't change the result. For any such directory, mark that
+ * we no longer need to detect a rename for it. However, since we
+ * might need to still detect renames for an ancestor of that
+ * directory, use RELEVANT_FOR_ANCESTOR.
+ */
+ strmap_for_each_entry(info->dir_rename_count, &iter, entry) {
+ /* entry->key is source_dir */
+ struct strintmap *counts = entry->value;
+
+ if (strintmap_get(dirs_removed, entry->key) ==
+ RELEVANT_FOR_SELF &&
+ dir_rename_already_determinable(counts)) {
+ strintmap_set(dirs_removed, entry->key,
+ RELEVANT_FOR_ANCESTOR);
+ }
+ }
+
+ for (i = 0, new_num_src = 0; i < rename_src_nr; i++) {
+ struct diff_filespec *one = rename_src[i].p->one;
+ int val;
+
+ val = strintmap_get(relevant_sources, one->path);
+
+ /*
+ * sources that were not found in relevant_sources should
+ * have already been removed by a prior call to
+ * remove_unneeded_paths_from_src()
+ */
+ assert(val != -1);
+
+ if (val == RELEVANT_LOCATION) {
+ int removable = 1;
+ char *dir = get_dirname(one->path);
+ while (1) {
+ char *freeme = dir;
+ int res = strintmap_get(dirs_removed, dir);
+
+ /* Quit if not found or irrelevant */
+ if (res == NOT_RELEVANT)
+ break;
+ /* If RELEVANT_FOR_SELF, can't remove */
+ if (res == RELEVANT_FOR_SELF) {
+ removable = 0;
+ break;
+ }
+ /* Else continue searching upwards */
+ assert(res == RELEVANT_FOR_ANCESTOR);
+ dir = get_dirname(dir);
+ free(freeme);
+ }
+ free(dir);
+ if (removable) {
+ strintmap_set(relevant_sources, one->path,
+ RELEVANT_NO_MORE);
+ continue;
+ }
+ }
+
+ if (new_num_src < i)
+ memcpy(&rename_src[new_num_src], &rename_src[i],
+ sizeof(struct diff_rename_src));
+ new_num_src++;
+ }
+
+ rename_src_nr = new_num_src;
+}
+
void diffcore_rename_extended(struct diff_options *options,
- struct strset *dirs_removed,
+ struct strintmap *relevant_sources,
+ struct strintmap *dirs_removed,
struct strmap *dir_rename_count)
{
int detect_rename = options->detect_rename;
@@ -1060,6 +1266,8 @@ void diffcore_rename_extended(struct diff_options *options,
want_copies = (detect_rename == DIFF_DETECT_COPY);
if (dirs_removed && (break_idx || want_copies))
BUG("dirs_removed incompatible with break/copy detection");
+ if (break_idx && relevant_sources)
+ BUG("break detection incompatible with source specification");
if (!minimum_score)
minimum_score = DEFAULT_RENAME_SCORE;
@@ -1127,9 +1335,10 @@ void diffcore_rename_extended(struct diff_options *options,
/*
* Cull sources:
* - remove ones corresponding to exact renames
+ * - remove ones not found in relevant_sources
*/
trace2_region_enter("diff", "cull after exact", options->repo);
- remove_unneeded_paths_from_src(want_copies);
+ remove_unneeded_paths_from_src(want_copies, relevant_sources);
trace2_region_leave("diff", "cull after exact", options->repo);
} else {
/* Determine minimum score to match basenames */
@@ -1148,12 +1357,12 @@ void diffcore_rename_extended(struct diff_options *options,
* - remove ones involved in renames (found via exact match)
*/
trace2_region_enter("diff", "cull after exact", options->repo);
- remove_unneeded_paths_from_src(want_copies);
+ remove_unneeded_paths_from_src(want_copies, NULL);
trace2_region_leave("diff", "cull after exact", options->repo);
/* Preparation for basename-driven matching. */
trace2_region_enter("diff", "dir rename setup", options->repo);
- initialize_dir_rename_info(&info,
+ initialize_dir_rename_info(&info, relevant_sources,
dirs_removed, dir_rename_count);
trace2_region_leave("diff", "dir rename setup", options->repo);
@@ -1161,15 +1370,25 @@ void diffcore_rename_extended(struct diff_options *options,
trace2_region_enter("diff", "basename matches", options->repo);
rename_count += find_basename_matches(options,
min_basename_score,
- &info, dirs_removed);
+ &info,
+ relevant_sources,
+ dirs_removed);
trace2_region_leave("diff", "basename matches", options->repo);
/*
* Cull sources, again:
* - remove ones involved in renames (found via basenames)
+ * - remove ones not found in relevant_sources
+ * and
+ * - remove ones in relevant_sources which are needed only
+ * for directory renames IF no ancestory directory
+ * actually needs to know any more individual path
+ * renames under them
*/
trace2_region_enter("diff", "cull basename", options->repo);
- remove_unneeded_paths_from_src(want_copies);
+ remove_unneeded_paths_from_src(want_copies, relevant_sources);
+ handle_early_known_dir_renames(&info, relevant_sources,
+ dirs_removed);
trace2_region_leave("diff", "cull basename", options->repo);
}
@@ -1341,5 +1560,5 @@ void diffcore_rename_extended(struct diff_options *options,
void diffcore_rename(struct diff_options *options)
{
- diffcore_rename_extended(options, NULL, NULL);
+ diffcore_rename_extended(options, NULL, NULL, NULL);
}
diff --git a/diffcore.h b/diffcore.h
index b9a230ab7f..f5c6de4841 100644
--- a/diffcore.h
+++ b/diffcore.h
@@ -8,8 +8,8 @@
struct diff_options;
struct repository;
+struct strintmap;
struct strmap;
-struct strset;
struct userdiff_driver;
/* This header file is internal between diff.c and its diff transformers
@@ -161,12 +161,26 @@ struct diff_filepair *diff_queue(struct diff_queue_struct *,
struct diff_filespec *);
void diff_q(struct diff_queue_struct *, struct diff_filepair *);
+/* dir_rename_relevance: the reason we want rename information for a dir */
+enum dir_rename_relevance {
+ NOT_RELEVANT = 0,
+ RELEVANT_FOR_ANCESTOR = 1,
+ RELEVANT_FOR_SELF = 2
+};
+/* file_rename_relevance: the reason(s) we want rename information for a file */
+enum file_rename_relevance {
+ RELEVANT_NO_MORE = 0, /* i.e. NOT relevant */
+ RELEVANT_CONTENT = 1,
+ RELEVANT_LOCATION = 2
+};
+
void partial_clear_dir_rename_count(struct strmap *dir_rename_count);
void diffcore_break(struct repository *, int);
void diffcore_rename(struct diff_options *);
void diffcore_rename_extended(struct diff_options *options,
- struct strset *dirs_removed,
+ struct strintmap *relevant_sources,
+ struct strintmap *dirs_removed,
struct strmap *dir_rename_count);
void diffcore_merge_broken(void);
void diffcore_pickaxe(struct diff_options *);
diff --git a/entry.c b/entry.c
index 7b9f43716f..2dc94ba5cc 100644
--- a/entry.c
+++ b/entry.c
@@ -6,6 +6,7 @@
#include "submodule.h"
#include "progress.h"
#include "fsmonitor.h"
+#include "entry.h"
static void create_directories(const char *path, int path_len,
const struct checkout *state)
@@ -83,7 +84,7 @@ static int create_file(const char *path, unsigned int mode)
return open(path, O_WRONLY | O_CREAT | O_EXCL, mode);
}
-static void *read_blob_entry(const struct cache_entry *ce, unsigned long *size)
+void *read_blob_entry(const struct cache_entry *ce, unsigned long *size)
{
enum object_type type;
void *blob_data = read_object_file(&ce->oid, &type, size);
@@ -108,7 +109,7 @@ static int open_output_fd(char *path, const struct cache_entry *ce, int to_tempf
}
}
-static int fstat_output(int fd, const struct checkout *state, struct stat *st)
+int fstat_checkout_output(int fd, const struct checkout *state, struct stat *st)
{
/* use fstat() only when path == ce->name */
if (fstat_is_reliable() &&
@@ -131,7 +132,7 @@ static int streaming_write_entry(const struct cache_entry *ce, char *path,
return -1;
result |= stream_blob_to_fd(fd, &ce->oid, filter, 1);
- *fstat_done = fstat_output(fd, state, statbuf);
+ *fstat_done = fstat_checkout_output(fd, state, statbuf);
result |= close(fd);
if (result)
@@ -250,8 +251,21 @@ int finish_delayed_checkout(struct checkout *state, int *nr_checkouts)
return errs;
}
-static int write_entry(struct cache_entry *ce,
- char *path, const struct checkout *state, int to_tempfile)
+void update_ce_after_write(const struct checkout *state, struct cache_entry *ce,
+ struct stat *st)
+{
+ if (state->refresh_cache) {
+ assert(state->istate);
+ fill_stat_cache_info(state->istate, ce, st);
+ ce->ce_flags |= CE_UPDATE_IN_BASE;
+ mark_fsmonitor_invalid(state->istate, ce);
+ state->istate->cache_changed |= CE_ENTRY_CHANGED;
+ }
+}
+
+/* Note: ca is used (and required) iff the entry refers to a regular file. */
+static int write_entry(struct cache_entry *ce, char *path, struct conv_attrs *ca,
+ const struct checkout *state, int to_tempfile)
{
unsigned int ce_mode_s_ifmt = ce->ce_mode & S_IFMT;
struct delayed_checkout *dco = state->delayed_checkout;
@@ -268,8 +282,7 @@ static int write_entry(struct cache_entry *ce,
clone_checkout_metadata(&meta, &state->meta, &ce->oid);
if (ce_mode_s_ifmt == S_IFREG) {
- struct stream_filter *filter = get_stream_filter(state->istate, ce->name,
- &ce->oid);
+ struct stream_filter *filter = get_stream_filter_ca(ca, &ce->oid);
if (filter &&
!streaming_write_entry(ce, path, filter,
state, to_tempfile,
@@ -316,14 +329,17 @@ static int write_entry(struct cache_entry *ce,
* Convert from git internal format to working tree format
*/
if (dco && dco->state != CE_NO_DELAY) {
- ret = async_convert_to_working_tree(state->istate, ce->name, new_blob,
- size, &buf, &meta, dco);
+ ret = async_convert_to_working_tree_ca(ca, ce->name,
+ new_blob, size,
+ &buf, &meta, dco);
if (ret && string_list_has_string(&dco->paths, ce->name)) {
free(new_blob);
goto delayed;
}
- } else
- ret = convert_to_working_tree(state->istate, ce->name, new_blob, size, &buf, &meta);
+ } else {
+ ret = convert_to_working_tree_ca(ca, ce->name, new_blob,
+ size, &buf, &meta);
+ }
if (ret) {
free(new_blob);
@@ -345,7 +361,7 @@ static int write_entry(struct cache_entry *ce,
wrote = write_in_full(fd, new_blob, size);
if (!to_tempfile)
- fstat_done = fstat_output(fd, state, &st);
+ fstat_done = fstat_checkout_output(fd, state, &st);
close(fd);
free(new_blob);
if (wrote < 0)
@@ -370,15 +386,10 @@ static int write_entry(struct cache_entry *ce,
finish:
if (state->refresh_cache) {
- assert(state->istate);
- if (!fstat_done)
- if (lstat(ce->name, &st) < 0)
- return error_errno("unable to stat just-written file %s",
- ce->name);
- fill_stat_cache_info(state->istate, ce, &st);
- ce->ce_flags |= CE_UPDATE_IN_BASE;
- mark_fsmonitor_invalid(state->istate, ce);
- state->istate->cache_changed |= CE_ENTRY_CHANGED;
+ if (!fstat_done && lstat(ce->name, &st) < 0)
+ return error_errno("unable to stat just-written file %s",
+ ce->name);
+ update_ce_after_write(state, ce , &st);
}
delayed:
return 0;
@@ -429,19 +440,13 @@ static void mark_colliding_entries(const struct checkout *state,
}
}
-/*
- * Write the contents from ce out to the working tree.
- *
- * When topath[] is not NULL, instead of writing to the working tree
- * file named by ce, a temporary file is created by this function and
- * its name is returned in topath[], which must be able to hold at
- * least TEMPORARY_FILENAME_LENGTH bytes long.
- */
-int checkout_entry(struct cache_entry *ce, const struct checkout *state,
- char *topath, int *nr_checkouts)
+int checkout_entry_ca(struct cache_entry *ce, struct conv_attrs *ca,
+ const struct checkout *state, char *topath,
+ int *nr_checkouts)
{
static struct strbuf path = STRBUF_INIT;
struct stat st;
+ struct conv_attrs ca_buf;
if (ce->ce_flags & CE_WT_REMOVE) {
if (topath)
@@ -454,8 +459,13 @@ int checkout_entry(struct cache_entry *ce, const struct checkout *state,
return 0;
}
- if (topath)
- return write_entry(ce, topath, state, 1);
+ if (topath) {
+ if (S_ISREG(ce->ce_mode) && !ca) {
+ convert_attrs(state->istate, &ca_buf, ce->name);
+ ca = &ca_buf;
+ }
+ return write_entry(ce, topath, ca, state, 1);
+ }
strbuf_reset(&path);
strbuf_add(&path, state->base_dir, state->base_dir_len);
@@ -517,9 +527,16 @@ int checkout_entry(struct cache_entry *ce, const struct checkout *state,
return 0;
create_directories(path.buf, path.len, state);
+
if (nr_checkouts)
(*nr_checkouts)++;
- return write_entry(ce, path.buf, state, 0);
+
+ if (S_ISREG(ce->ce_mode) && !ca) {
+ convert_attrs(state->istate, &ca_buf, ce->name);
+ ca = &ca_buf;
+ }
+
+ return write_entry(ce, path.buf, ca, state, 0);
}
void unlink_entry(const struct cache_entry *ce)
@@ -530,7 +547,7 @@ void unlink_entry(const struct cache_entry *ce)
submodule_move_head(ce->name, "HEAD", NULL,
SUBMODULE_MOVE_HEAD_FORCE);
}
- if (!check_leading_path(ce->name, ce_namelen(ce)))
+ if (check_leading_path(ce->name, ce_namelen(ce), 1) >= 0)
return;
if (remove_or_warn(ce->ce_mode, ce->name))
return;
diff --git a/entry.h b/entry.h
new file mode 100644
index 0000000000..b8c0e170dc
--- /dev/null
+++ b/entry.h
@@ -0,0 +1,59 @@
+#ifndef ENTRY_H
+#define ENTRY_H
+
+#include "cache.h"
+#include "convert.h"
+
+struct checkout {
+ struct index_state *istate;
+ const char *base_dir;
+ int base_dir_len;
+ struct delayed_checkout *delayed_checkout;
+ struct checkout_metadata meta;
+ unsigned force:1,
+ quiet:1,
+ not_new:1,
+ clone:1,
+ refresh_cache:1;
+};
+#define CHECKOUT_INIT { NULL, "" }
+
+#define TEMPORARY_FILENAME_LENGTH 25
+/*
+ * Write the contents from ce out to the working tree.
+ *
+ * When topath[] is not NULL, instead of writing to the working tree
+ * file named by ce, a temporary file is created by this function and
+ * its name is returned in topath[], which must be able to hold at
+ * least TEMPORARY_FILENAME_LENGTH bytes long.
+ *
+ * With checkout_entry_ca(), callers can optionally pass a preloaded
+ * conv_attrs struct (to avoid reloading it), when ce refers to a
+ * regular file. If ca is NULL, the attributes will be loaded
+ * internally when (and if) needed.
+ */
+int checkout_entry_ca(struct cache_entry *ce, struct conv_attrs *ca,
+ const struct checkout *state, char *topath,
+ int *nr_checkouts);
+static inline int checkout_entry(struct cache_entry *ce,
+ const struct checkout *state, char *topath,
+ int *nr_checkouts)
+{
+ return checkout_entry_ca(ce, NULL, state, topath, nr_checkouts);
+}
+
+void enable_delayed_checkout(struct checkout *state);
+int finish_delayed_checkout(struct checkout *state, int *nr_checkouts);
+
+/*
+ * Unlink the last component and schedule the leading directories for
+ * removal, such that empty directories get removed.
+ */
+void unlink_entry(const struct cache_entry *ce);
+
+void *read_blob_entry(const struct cache_entry *ce, unsigned long *size);
+int fstat_checkout_output(int fd, const struct checkout *state, struct stat *st);
+void update_ce_after_write(const struct checkout *state, struct cache_entry *ce,
+ struct stat *st);
+
+#endif /* ENTRY_H */
diff --git a/fetch-pack.c b/fetch-pack.c
index fb04a76ca2..6e68276aa3 100644
--- a/fetch-pack.c
+++ b/fetch-pack.c
@@ -38,6 +38,7 @@ static int server_supports_filtering;
static int advertise_sid;
static struct shallow_lock shallow_lock;
static const char *alternate_shallow_file;
+static struct fsck_options fsck_options = FSCK_OPTIONS_MISSING_GITMODULES;
static struct strbuf fsck_msg_types = STRBUF_INIT;
static struct string_list uri_protocols = STRING_LIST_INIT_DUP;
@@ -987,22 +988,6 @@ static int cmp_ref_by_name(const void *a_, const void *b_)
return strcmp(a->name, b->name);
}
-static void fsck_gitmodules_oids(struct oidset *gitmodules_oids)
-{
- struct oidset_iter iter;
- const struct object_id *oid;
- struct fsck_options fo = FSCK_OPTIONS_STRICT;
-
- if (!oidset_size(gitmodules_oids))
- return;
-
- oidset_iter_init(gitmodules_oids, &iter);
- while ((oid = oidset_iter_next(&iter)))
- register_found_gitmodules(oid);
- if (fsck_finish(&fo))
- die("fsck failed");
-}
-
static struct ref *do_fetch_pack(struct fetch_pack_args *args,
int fd[2],
const struct ref *orig_ref,
@@ -1017,7 +1002,6 @@ static struct ref *do_fetch_pack(struct fetch_pack_args *args,
int agent_len;
struct fetch_negotiator negotiator_alloc;
struct fetch_negotiator *negotiator;
- struct oidset gitmodules_oids = OIDSET_INIT;
negotiator = &negotiator_alloc;
fetch_negotiator_init(r, negotiator);
@@ -1129,14 +1113,17 @@ static struct ref *do_fetch_pack(struct fetch_pack_args *args,
if (args->deepen)
setup_alternate_shallow(&shallow_lock, &alternate_shallow_file,
NULL);
- else if (si->nr_ours || si->nr_theirs)
+ else if (si->nr_ours || si->nr_theirs) {
+ if (args->reject_shallow_remote)
+ die(_("source repository is shallow, reject to clone."));
alternate_shallow_file = setup_temporary_shallow(si->shallow);
- else
+ } else
alternate_shallow_file = NULL;
if (get_pack(args, fd, pack_lockfiles, NULL, sought, nr_sought,
- &gitmodules_oids))
+ &fsck_options.gitmodules_found))
die(_("git fetch-pack: fetch failed."));
- fsck_gitmodules_oids(&gitmodules_oids);
+ if (fsck_finish(&fsck_options))
+ die("fsck failed");
all_done:
if (negotiator)
@@ -1498,10 +1485,12 @@ static void receive_shallow_info(struct fetch_pack_args *args,
* rejected (unless --update-shallow is set); do the same.
*/
prepare_shallow_info(si, shallows);
- if (si->nr_ours || si->nr_theirs)
+ if (si->nr_ours || si->nr_theirs) {
+ if (args->reject_shallow_remote)
+ die(_("source repository is shallow, reject to clone."));
alternate_shallow_file =
setup_temporary_shallow(si->shallow);
- else
+ } else
alternate_shallow_file = NULL;
} else {
alternate_shallow_file = NULL;
@@ -1587,7 +1576,6 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
struct string_list packfile_uris = STRING_LIST_INIT_DUP;
int i;
struct strvec index_pack_args = STRVEC_INIT;
- struct oidset gitmodules_oids = OIDSET_INIT;
negotiator = &negotiator_alloc;
fetch_negotiator_init(r, negotiator);
@@ -1678,7 +1666,7 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
process_section_header(&reader, "packfile", 0);
if (get_pack(args, fd, pack_lockfiles,
packfile_uris.nr ? &index_pack_args : NULL,
- sought, nr_sought, &gitmodules_oids))
+ sought, nr_sought, &fsck_options.gitmodules_found))
die(_("git fetch-pack: fetch failed."));
do_check_stateless_delimiter(args, &reader);
@@ -1721,7 +1709,7 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
packname[the_hash_algo->hexsz] = '\0';
- parse_gitmodules_oids(cmd.out, &gitmodules_oids);
+ parse_gitmodules_oids(cmd.out, &fsck_options.gitmodules_found);
close(cmd.out);
@@ -1742,7 +1730,8 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
string_list_clear(&packfile_uris, 0);
strvec_clear(&index_pack_args);
- fsck_gitmodules_oids(&gitmodules_oids);
+ if (fsck_finish(&fsck_options))
+ die("fsck failed");
if (negotiator)
negotiator->release(negotiator);
diff --git a/fetch-pack.h b/fetch-pack.h
index 736a3dae46..f114d72775 100644
--- a/fetch-pack.h
+++ b/fetch-pack.h
@@ -39,6 +39,7 @@ struct fetch_pack_args {
unsigned self_contained_and_connected:1;
unsigned cloning:1;
unsigned update_shallow:1;
+ unsigned reject_shallow_remote:1;
unsigned deepen:1;
/*
diff --git a/fsck.c b/fsck.c
index e3030f3b35..f5ed6a2635 100644
--- a/fsck.c
+++ b/fsck.c
@@ -19,90 +19,19 @@
#include "credential.h"
#include "help.h"
-static struct oidset gitmodules_found = OIDSET_INIT;
-static struct oidset gitmodules_done = OIDSET_INIT;
-
-#define FSCK_FATAL -1
-#define FSCK_INFO -2
-
-#define FOREACH_MSG_ID(FUNC) \
- /* fatal errors */ \
- FUNC(NUL_IN_HEADER, FATAL) \
- FUNC(UNTERMINATED_HEADER, FATAL) \
- /* errors */ \
- FUNC(BAD_DATE, ERROR) \
- FUNC(BAD_DATE_OVERFLOW, ERROR) \
- FUNC(BAD_EMAIL, ERROR) \
- FUNC(BAD_NAME, ERROR) \
- FUNC(BAD_OBJECT_SHA1, ERROR) \
- FUNC(BAD_PARENT_SHA1, ERROR) \
- FUNC(BAD_TAG_OBJECT, ERROR) \
- FUNC(BAD_TIMEZONE, ERROR) \
- FUNC(BAD_TREE, ERROR) \
- FUNC(BAD_TREE_SHA1, ERROR) \
- FUNC(BAD_TYPE, ERROR) \
- FUNC(DUPLICATE_ENTRIES, ERROR) \
- FUNC(MISSING_AUTHOR, ERROR) \
- FUNC(MISSING_COMMITTER, ERROR) \
- FUNC(MISSING_EMAIL, ERROR) \
- FUNC(MISSING_NAME_BEFORE_EMAIL, ERROR) \
- FUNC(MISSING_OBJECT, ERROR) \
- FUNC(MISSING_SPACE_BEFORE_DATE, ERROR) \
- FUNC(MISSING_SPACE_BEFORE_EMAIL, ERROR) \
- FUNC(MISSING_TAG, ERROR) \
- FUNC(MISSING_TAG_ENTRY, ERROR) \
- FUNC(MISSING_TREE, ERROR) \
- FUNC(MISSING_TREE_OBJECT, ERROR) \
- FUNC(MISSING_TYPE, ERROR) \
- FUNC(MISSING_TYPE_ENTRY, ERROR) \
- FUNC(MULTIPLE_AUTHORS, ERROR) \
- FUNC(TREE_NOT_SORTED, ERROR) \
- FUNC(UNKNOWN_TYPE, ERROR) \
- FUNC(ZERO_PADDED_DATE, ERROR) \
- FUNC(GITMODULES_MISSING, ERROR) \
- FUNC(GITMODULES_BLOB, ERROR) \
- FUNC(GITMODULES_LARGE, ERROR) \
- FUNC(GITMODULES_NAME, ERROR) \
- FUNC(GITMODULES_SYMLINK, ERROR) \
- FUNC(GITMODULES_URL, ERROR) \
- FUNC(GITMODULES_PATH, ERROR) \
- FUNC(GITMODULES_UPDATE, ERROR) \
- /* warnings */ \
- FUNC(BAD_FILEMODE, WARN) \
- FUNC(EMPTY_NAME, WARN) \
- FUNC(FULL_PATHNAME, WARN) \
- FUNC(HAS_DOT, WARN) \
- FUNC(HAS_DOTDOT, WARN) \
- FUNC(HAS_DOTGIT, WARN) \
- FUNC(NULL_SHA1, WARN) \
- FUNC(ZERO_PADDED_FILEMODE, WARN) \
- FUNC(NUL_IN_COMMIT, WARN) \
- /* infos (reported as warnings, but ignored by default) */ \
- FUNC(GITMODULES_PARSE, INFO) \
- FUNC(BAD_TAG_NAME, INFO) \
- FUNC(MISSING_TAGGER_ENTRY, INFO) \
- /* ignored (elevated when requested) */ \
- FUNC(EXTRA_HEADER_ENTRY, IGNORE)
-
-#define MSG_ID(id, msg_type) FSCK_MSG_##id,
-enum fsck_msg_id {
- FOREACH_MSG_ID(MSG_ID)
- FSCK_MSG_MAX
-};
-#undef MSG_ID
-
#define STR(x) #x
#define MSG_ID(id, msg_type) { STR(id), NULL, NULL, FSCK_##msg_type },
static struct {
const char *id_string;
const char *downcased;
const char *camelcased;
- int msg_type;
+ enum fsck_msg_type msg_type;
} msg_id_info[FSCK_MSG_MAX + 1] = {
- FOREACH_MSG_ID(MSG_ID)
+ FOREACH_FSCK_MSG_ID(MSG_ID)
{ NULL, NULL, NULL, -1 }
};
#undef MSG_ID
+#undef STR
static void prepare_msg_ids(void)
{
@@ -164,25 +93,23 @@ void list_config_fsck_msg_ids(struct string_list *list, const char *prefix)
list_config_item(list, prefix, msg_id_info[i].camelcased);
}
-static int fsck_msg_type(enum fsck_msg_id msg_id,
+static enum fsck_msg_type fsck_msg_type(enum fsck_msg_id msg_id,
struct fsck_options *options)
{
- int msg_type;
-
assert(msg_id >= 0 && msg_id < FSCK_MSG_MAX);
- if (options->msg_type)
- msg_type = options->msg_type[msg_id];
- else {
- msg_type = msg_id_info[msg_id].msg_type;
+ if (!options->msg_type) {
+ enum fsck_msg_type msg_type = msg_id_info[msg_id].msg_type;
+
if (options->strict && msg_type == FSCK_WARN)
msg_type = FSCK_ERROR;
+ return msg_type;
}
- return msg_type;
+ return options->msg_type[msg_id];
}
-static int parse_msg_type(const char *str)
+static enum fsck_msg_type parse_msg_type(const char *str)
{
if (!strcmp(str, "error"))
return FSCK_ERROR;
@@ -202,28 +129,35 @@ int is_valid_msg_type(const char *msg_id, const char *msg_type)
return 1;
}
-void fsck_set_msg_type(struct fsck_options *options,
- const char *msg_id, const char *msg_type)
+void fsck_set_msg_type_from_ids(struct fsck_options *options,
+ enum fsck_msg_id msg_id,
+ enum fsck_msg_type msg_type)
{
- int id = parse_msg_id(msg_id), type;
-
- if (id < 0)
- die("Unhandled message id: %s", msg_id);
- type = parse_msg_type(msg_type);
-
- if (type != FSCK_ERROR && msg_id_info[id].msg_type == FSCK_FATAL)
- die("Cannot demote %s to %s", msg_id, msg_type);
-
if (!options->msg_type) {
int i;
- int *msg_type;
- ALLOC_ARRAY(msg_type, FSCK_MSG_MAX);
+ enum fsck_msg_type *severity;
+ ALLOC_ARRAY(severity, FSCK_MSG_MAX);
for (i = 0; i < FSCK_MSG_MAX; i++)
- msg_type[i] = fsck_msg_type(i, options);
- options->msg_type = msg_type;
+ severity[i] = fsck_msg_type(i, options);
+ options->msg_type = severity;
}
- options->msg_type[id] = type;
+ options->msg_type[msg_id] = msg_type;
+}
+
+void fsck_set_msg_type(struct fsck_options *options,
+ const char *msg_id_str, const char *msg_type_str)
+{
+ int msg_id = parse_msg_id(msg_id_str);
+ enum fsck_msg_type msg_type = parse_msg_type(msg_type_str);
+
+ if (msg_id < 0)
+ die("Unhandled message id: %s", msg_id_str);
+
+ if (msg_type != FSCK_ERROR && msg_id_info[msg_id].msg_type == FSCK_FATAL)
+ die("Cannot demote %s to %s", msg_id_str, msg_type_str);
+
+ fsck_set_msg_type_from_ids(options, msg_id, msg_type);
}
void fsck_set_msg_types(struct fsck_options *options, const char *values)
@@ -264,24 +198,6 @@ void fsck_set_msg_types(struct fsck_options *options, const char *values)
free(to_free);
}
-static void append_msg_id(struct strbuf *sb, const char *msg_id)
-{
- for (;;) {
- char c = *(msg_id)++;
-
- if (!c)
- break;
- if (c != '_')
- strbuf_addch(sb, tolower(c));
- else {
- assert(*msg_id);
- strbuf_addch(sb, *(msg_id)++);
- }
- }
-
- strbuf_addstr(sb, ": ");
-}
-
static int object_on_skiplist(struct fsck_options *opts,
const struct object_id *oid)
{
@@ -291,11 +207,12 @@ static int object_on_skiplist(struct fsck_options *opts,
__attribute__((format (printf, 5, 6)))
static int report(struct fsck_options *options,
const struct object_id *oid, enum object_type object_type,
- enum fsck_msg_id id, const char *fmt, ...)
+ enum fsck_msg_id msg_id, const char *fmt, ...)
{
va_list ap;
struct strbuf sb = STRBUF_INIT;
- int msg_type = fsck_msg_type(id, options), result;
+ enum fsck_msg_type msg_type = fsck_msg_type(msg_id, options);
+ int result;
if (msg_type == FSCK_IGNORE)
return 0;
@@ -308,12 +225,13 @@ static int report(struct fsck_options *options,
else if (msg_type == FSCK_INFO)
msg_type = FSCK_WARN;
- append_msg_id(&sb, msg_id_info[id].id_string);
+ prepare_msg_ids();
+ strbuf_addf(&sb, "%s: ", msg_id_info[msg_id].camelcased);
va_start(ap, fmt);
strbuf_vaddf(&sb, fmt, ap);
result = options->error_func(options, oid, object_type,
- msg_type, sb.buf);
+ msg_type, msg_id, sb.buf);
strbuf_release(&sb);
va_end(ap);
@@ -685,7 +603,7 @@ static int fsck_tree(const struct object_id *oid,
if (is_hfs_dotgitmodules(name) || is_ntfs_dotgitmodules(name)) {
if (!S_ISLNK(mode))
- oidset_insert(&gitmodules_found, oid);
+ oidset_insert(&options->gitmodules_found, oid);
else
retval += report(options,
oid, OBJ_TREE,
@@ -699,7 +617,7 @@ static int fsck_tree(const struct object_id *oid,
has_dotgit |= is_ntfs_dotgit(backslash);
if (is_ntfs_dotgitmodules(backslash)) {
if (!S_ISLNK(mode))
- oidset_insert(&gitmodules_found, oid);
+ oidset_insert(&options->gitmodules_found, oid);
else
retval += report(options, oid, OBJ_TREE,
FSCK_MSG_GITMODULES_SYMLINK,
@@ -1211,9 +1129,9 @@ static int fsck_blob(const struct object_id *oid, const char *buf,
struct fsck_gitmodules_data data;
struct config_options config_opts = { 0 };
- if (!oidset_contains(&gitmodules_found, oid))
+ if (!oidset_contains(&options->gitmodules_found, oid))
return 0;
- oidset_insert(&gitmodules_done, oid);
+ oidset_insert(&options->gitmodules_done, oid);
if (object_on_skiplist(options, oid))
return 0;
@@ -1266,7 +1184,9 @@ int fsck_object(struct object *obj, void *data, unsigned long size,
int fsck_error_function(struct fsck_options *o,
const struct object_id *oid,
enum object_type object_type,
- int msg_type, const char *message)
+ enum fsck_msg_type msg_type,
+ enum fsck_msg_id msg_id,
+ const char *message)
{
if (msg_type == FSCK_WARN) {
warning("object %s: %s", fsck_describe_object(o, oid), message);
@@ -1276,24 +1196,19 @@ int fsck_error_function(struct fsck_options *o,
return 1;
}
-void register_found_gitmodules(const struct object_id *oid)
-{
- oidset_insert(&gitmodules_found, oid);
-}
-
int fsck_finish(struct fsck_options *options)
{
int ret = 0;
struct oidset_iter iter;
const struct object_id *oid;
- oidset_iter_init(&gitmodules_found, &iter);
+ oidset_iter_init(&options->gitmodules_found, &iter);
while ((oid = oidset_iter_next(&iter))) {
enum object_type type;
unsigned long size;
char *buf;
- if (oidset_contains(&gitmodules_done, oid))
+ if (oidset_contains(&options->gitmodules_done, oid))
continue;
buf = read_object_file(oid, &type, &size);
@@ -1318,14 +1233,14 @@ int fsck_finish(struct fsck_options *options)
}
- oidset_clear(&gitmodules_found);
- oidset_clear(&gitmodules_done);
+ oidset_clear(&options->gitmodules_found);
+ oidset_clear(&options->gitmodules_done);
return ret;
}
-int fsck_config_internal(const char *var, const char *value, void *cb,
- struct fsck_options *options)
+int git_fsck_config(const char *var, const char *value, void *cb)
{
+ struct fsck_options *options = cb;
if (strcmp(var, "fsck.skiplist") == 0) {
const char *path;
struct strbuf sb = STRBUF_INIT;
@@ -1346,3 +1261,21 @@ int fsck_config_internal(const char *var, const char *value, void *cb,
return git_default_config(var, value, cb);
}
+
+/*
+ * Custom error callbacks that are used in more than one place.
+ */
+
+int fsck_error_cb_print_missing_gitmodules(struct fsck_options *o,
+ const struct object_id *oid,
+ enum object_type object_type,
+ enum fsck_msg_type msg_type,
+ enum fsck_msg_id msg_id,
+ const char *message)
+{
+ if (msg_id == FSCK_MSG_GITMODULES_MISSING) {
+ puts(oid_to_hex(oid));
+ return 0;
+ }
+ return fsck_error_function(o, oid, object_type, msg_type, msg_id, message);
+}
diff --git a/fsck.h b/fsck.h
index 733378f126..7202c3c87e 100644
--- a/fsck.h
+++ b/fsck.h
@@ -3,15 +3,90 @@
#include "oidset.h"
-#define FSCK_ERROR 1
-#define FSCK_WARN 2
-#define FSCK_IGNORE 3
+enum fsck_msg_type {
+ /* for internal use only */
+ FSCK_IGNORE,
+ FSCK_INFO,
+ FSCK_FATAL,
+ /* "public", fed to e.g. error_func callbacks */
+ FSCK_ERROR,
+ FSCK_WARN,
+};
+
+#define FOREACH_FSCK_MSG_ID(FUNC) \
+ /* fatal errors */ \
+ FUNC(NUL_IN_HEADER, FATAL) \
+ FUNC(UNTERMINATED_HEADER, FATAL) \
+ /* errors */ \
+ FUNC(BAD_DATE, ERROR) \
+ FUNC(BAD_DATE_OVERFLOW, ERROR) \
+ FUNC(BAD_EMAIL, ERROR) \
+ FUNC(BAD_NAME, ERROR) \
+ FUNC(BAD_OBJECT_SHA1, ERROR) \
+ FUNC(BAD_PARENT_SHA1, ERROR) \
+ FUNC(BAD_TAG_OBJECT, ERROR) \
+ FUNC(BAD_TIMEZONE, ERROR) \
+ FUNC(BAD_TREE, ERROR) \
+ FUNC(BAD_TREE_SHA1, ERROR) \
+ FUNC(BAD_TYPE, ERROR) \
+ FUNC(DUPLICATE_ENTRIES, ERROR) \
+ FUNC(MISSING_AUTHOR, ERROR) \
+ FUNC(MISSING_COMMITTER, ERROR) \
+ FUNC(MISSING_EMAIL, ERROR) \
+ FUNC(MISSING_NAME_BEFORE_EMAIL, ERROR) \
+ FUNC(MISSING_OBJECT, ERROR) \
+ FUNC(MISSING_SPACE_BEFORE_DATE, ERROR) \
+ FUNC(MISSING_SPACE_BEFORE_EMAIL, ERROR) \
+ FUNC(MISSING_TAG, ERROR) \
+ FUNC(MISSING_TAG_ENTRY, ERROR) \
+ FUNC(MISSING_TREE, ERROR) \
+ FUNC(MISSING_TREE_OBJECT, ERROR) \
+ FUNC(MISSING_TYPE, ERROR) \
+ FUNC(MISSING_TYPE_ENTRY, ERROR) \
+ FUNC(MULTIPLE_AUTHORS, ERROR) \
+ FUNC(TREE_NOT_SORTED, ERROR) \
+ FUNC(UNKNOWN_TYPE, ERROR) \
+ FUNC(ZERO_PADDED_DATE, ERROR) \
+ FUNC(GITMODULES_MISSING, ERROR) \
+ FUNC(GITMODULES_BLOB, ERROR) \
+ FUNC(GITMODULES_LARGE, ERROR) \
+ FUNC(GITMODULES_NAME, ERROR) \
+ FUNC(GITMODULES_SYMLINK, ERROR) \
+ FUNC(GITMODULES_URL, ERROR) \
+ FUNC(GITMODULES_PATH, ERROR) \
+ FUNC(GITMODULES_UPDATE, ERROR) \
+ /* warnings */ \
+ FUNC(BAD_FILEMODE, WARN) \
+ FUNC(EMPTY_NAME, WARN) \
+ FUNC(FULL_PATHNAME, WARN) \
+ FUNC(HAS_DOT, WARN) \
+ FUNC(HAS_DOTDOT, WARN) \
+ FUNC(HAS_DOTGIT, WARN) \
+ FUNC(NULL_SHA1, WARN) \
+ FUNC(ZERO_PADDED_FILEMODE, WARN) \
+ FUNC(NUL_IN_COMMIT, WARN) \
+ /* infos (reported as warnings, but ignored by default) */ \
+ FUNC(GITMODULES_PARSE, INFO) \
+ FUNC(BAD_TAG_NAME, INFO) \
+ FUNC(MISSING_TAGGER_ENTRY, INFO) \
+ /* ignored (elevated when requested) */ \
+ FUNC(EXTRA_HEADER_ENTRY, IGNORE)
+
+#define MSG_ID(id, msg_type) FSCK_MSG_##id,
+enum fsck_msg_id {
+ FOREACH_FSCK_MSG_ID(MSG_ID)
+ FSCK_MSG_MAX
+};
+#undef MSG_ID
struct fsck_options;
struct object;
+void fsck_set_msg_type_from_ids(struct fsck_options *options,
+ enum fsck_msg_id msg_id,
+ enum fsck_msg_type msg_type);
void fsck_set_msg_type(struct fsck_options *options,
- const char *msg_id, const char *msg_type);
+ const char *msg_id, const char *msg_type);
void fsck_set_msg_types(struct fsck_options *options, const char *values);
int is_valid_msg_type(const char *msg_id, const char *msg_type);
@@ -23,28 +98,55 @@ int is_valid_msg_type(const char *msg_id, const char *msg_type);
* <0 error signaled and abort
* >0 error signaled and do not abort
*/
-typedef int (*fsck_walk_func)(struct object *obj, int type, void *data, struct fsck_options *options);
+typedef int (*fsck_walk_func)(struct object *obj, enum object_type object_type,
+ void *data, struct fsck_options *options);
/* callback for fsck_object, type is FSCK_ERROR or FSCK_WARN */
typedef int (*fsck_error)(struct fsck_options *o,
const struct object_id *oid, enum object_type object_type,
- int msg_type, const char *message);
+ enum fsck_msg_type msg_type, enum fsck_msg_id msg_id,
+ const char *message);
int fsck_error_function(struct fsck_options *o,
const struct object_id *oid, enum object_type object_type,
- int msg_type, const char *message);
+ enum fsck_msg_type msg_type, enum fsck_msg_id msg_id,
+ const char *message);
+int fsck_error_cb_print_missing_gitmodules(struct fsck_options *o,
+ const struct object_id *oid,
+ enum object_type object_type,
+ enum fsck_msg_type msg_type,
+ enum fsck_msg_id msg_id,
+ const char *message);
struct fsck_options {
fsck_walk_func walk;
fsck_error error_func;
unsigned strict:1;
- int *msg_type;
+ enum fsck_msg_type *msg_type;
struct oidset skiplist;
+ struct oidset gitmodules_found;
+ struct oidset gitmodules_done;
kh_oid_map_t *object_names;
};
-#define FSCK_OPTIONS_DEFAULT { NULL, fsck_error_function, 0, NULL, OIDSET_INIT }
-#define FSCK_OPTIONS_STRICT { NULL, fsck_error_function, 1, NULL, OIDSET_INIT }
+#define FSCK_OPTIONS_DEFAULT { \
+ .skiplist = OIDSET_INIT, \
+ .gitmodules_found = OIDSET_INIT, \
+ .gitmodules_done = OIDSET_INIT, \
+ .error_func = fsck_error_function \
+}
+#define FSCK_OPTIONS_STRICT { \
+ .strict = 1, \
+ .gitmodules_found = OIDSET_INIT, \
+ .gitmodules_done = OIDSET_INIT, \
+ .error_func = fsck_error_function, \
+}
+#define FSCK_OPTIONS_MISSING_GITMODULES { \
+ .strict = 1, \
+ .gitmodules_found = OIDSET_INIT, \
+ .gitmodules_done = OIDSET_INIT, \
+ .error_func = fsck_error_cb_print_missing_gitmodules, \
+}
/* descend in all linked child objects
* the return value is:
@@ -62,8 +164,6 @@ int fsck_walk(struct object *obj, void *data, struct fsck_options *options);
int fsck_object(struct object *obj, void *data, unsigned long size,
struct fsck_options *options);
-void register_found_gitmodules(const struct object_id *oid);
-
/*
* fsck a tag, and pass info about it back to the caller. This is
* exposed fsck_object() internals for git-mktag(1).
@@ -109,7 +209,6 @@ const char *fsck_describe_object(struct fsck_options *options,
* git_config() callback for use by fsck-y tools that want to support
* fsck.<msg> fsck.skipList etc.
*/
-int fsck_config_internal(const char *var, const char *value, void *cb,
- struct fsck_options *options);
+int git_fsck_config(const char *var, const char *value, void *cb);
#endif
diff --git a/git-compat-util.h b/git-compat-util.h
index 9ddf9d7044..a508dbe5a3 100644
--- a/git-compat-util.h
+++ b/git-compat-util.h
@@ -256,6 +256,11 @@ static inline const char *precompose_argv_prefix(int argc, const char **argv, co
{
return prefix;
}
+static inline const char *precompose_string_if_needed(const char *in)
+{
+ return in;
+}
+
#define probe_utf8_pathname_composition()
#endif
diff --git a/git-send-email.perl b/git-send-email.perl
index 1f425c0809..175da07d94 100755
--- a/git-send-email.perl
+++ b/git-send-email.perl
@@ -212,22 +212,31 @@ my $dump_aliases = 0;
my $multiedit;
my $editor;
+sub system_or_msg {
+ my ($args, $msg) = @_;
+ system(@$args);
+ my $signalled = $? & 127;
+ my $exit_code = $? >> 8;
+ return unless $signalled or $exit_code;
+
+ return sprintf(__("fatal: command '%s' died with exit code %d"),
+ $args->[0], $exit_code);
+}
+
+sub system_or_die {
+ my $msg = system_or_msg(@_);
+ die $msg if $msg;
+}
+
sub do_edit {
if (!defined($editor)) {
$editor = Git::command_oneline('var', 'GIT_EDITOR');
}
+ my $die_msg = __("the editor exited uncleanly, aborting everything");
if (defined($multiedit) && !$multiedit) {
- map {
- system('sh', '-c', $editor.' "$@"', $editor, $_);
- if (($? & 127) || ($? >> 8)) {
- die(__("the editor exited uncleanly, aborting everything"));
- }
- } @_;
+ system_or_die(['sh', '-c', $editor.' "$@"', $editor, $_], $die_msg) for @_;
} else {
- system('sh', '-c', $editor.' "$@"', $editor, @_);
- if (($? & 127) || ($? >> 8)) {
- die(__("the editor exited uncleanly, aborting everything"));
- }
+ system_or_die(['sh', '-c', $editor.' "$@"', $editor, @_], $die_msg);
}
}
@@ -698,9 +707,7 @@ if (@rev_list_opts) {
if ($validate) {
foreach my $f (@files) {
unless (-p $f) {
- my $error = validate_patch($f, $target_xfer_encoding);
- $error and die sprintf(__("fatal: %s: %s\nwarning: no patches were sent\n"),
- $f, $error);
+ validate_patch($f, $target_xfer_encoding);
}
}
}
@@ -1942,7 +1949,7 @@ sub validate_patch {
my ($fn, $xfer_encoding) = @_;
if ($repo) {
- my $validate_hook = catfile(catdir($repo->repo_path(), 'hooks'),
+ my $validate_hook = catfile($repo->hooks_path(),
'sendemail-validate');
my $hook_error;
if (-x $validate_hook) {
@@ -1952,11 +1959,14 @@ sub validate_patch {
chdir($repo->wc_path() or $repo->repo_path())
or die("chdir: $!");
local $ENV{"GIT_DIR"} = $repo->repo_path();
- $hook_error = "rejected by sendemail-validate hook"
- if system($validate_hook, $target);
+ $hook_error = system_or_msg([$validate_hook, $target]);
chdir($cwd_save) or die("chdir: $!");
}
- return $hook_error if $hook_error;
+ if ($hook_error) {
+ die sprintf(__("fatal: %s: rejected by sendemail-validate hook\n" .
+ "%s\n" .
+ "warning: no patches were sent\n"), $fn, $hook_error);
+ }
}
# Any long lines will be automatically fixed if we use a suitable transfer
@@ -1966,7 +1976,8 @@ sub validate_patch {
or die sprintf(__("unable to open %s: %s\n"), $fn, $!);
while (my $line = <$fh>) {
if (length($line) > 998) {
- return sprintf(__("%s: patch contains a line longer than 998 characters"), $.);
+ die sprintf(__("fatal: %s:%d is longer than 998 characters\n" .
+ "warning: no patches were sent\n"), $fn, $.);
}
}
}
diff --git a/git.c b/git.c
index 9bc077a025..b53e665671 100644
--- a/git.c
+++ b/git.c
@@ -423,7 +423,7 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
int nongit_ok;
prefix = setup_git_directory_gently(&nongit_ok);
}
- prefix = precompose_argv_prefix(argc, argv, prefix);
+ precompose_argv_prefix(argc, argv, NULL);
if (use_pager == -1 && p->option & (RUN_SETUP | RUN_SETUP_GENTLY) &&
!(p->option & DELAY_PAGER_CONFIG))
use_pager = check_pager_config(p->cmd);
diff --git a/gitweb/gitweb.perl b/gitweb/gitweb.perl
index 0959a782ec..e09e024a09 100755
--- a/gitweb/gitweb.perl
+++ b/gitweb/gitweb.perl
@@ -569,6 +569,15 @@ our %feature = (
'sub' => \&feature_extra_branch_refs,
'override' => 0,
'default' => []},
+
+ # Redact e-mail addresses.
+
+ # To enable system wide have in $GITWEB_CONFIG
+ # $feature{'email-privacy'}{'default'} = [1];
+ 'email-privacy' => {
+ 'sub' => sub { feature_bool('email-privacy', @_) },
+ 'override' => 1,
+ 'default' => [0]},
);
sub gitweb_get_feature {
@@ -3449,6 +3458,13 @@ sub parse_date {
return %date;
}
+sub hide_mailaddrs_if_private {
+ my $line = shift;
+ return $line unless gitweb_check_feature('email-privacy');
+ $line =~ s/<[^@>]+@[^>]+>/<redacted>/g;
+ return $line;
+}
+
sub parse_tag {
my $tag_id = shift;
my %tag;
@@ -3465,7 +3481,7 @@ sub parse_tag {
} elsif ($line =~ m/^tag (.+)$/) {
$tag{'name'} = $1;
} elsif ($line =~ m/^tagger (.*) ([0-9]+) (.*)$/) {
- $tag{'author'} = $1;
+ $tag{'author'} = hide_mailaddrs_if_private($1);
$tag{'author_epoch'} = $2;
$tag{'author_tz'} = $3;
if ($tag{'author'} =~ m/^([^<]+) <([^>]*)>/) {
@@ -3513,7 +3529,7 @@ sub parse_commit_text {
} elsif ((!defined $withparents) && ($line =~ m/^parent ($oid_regex)$/)) {
push @parents, $1;
} elsif ($line =~ m/^author (.*) ([0-9]+) (.*)$/) {
- $co{'author'} = to_utf8($1);
+ $co{'author'} = hide_mailaddrs_if_private(to_utf8($1));
$co{'author_epoch'} = $2;
$co{'author_tz'} = $3;
if ($co{'author'} =~ m/^([^<]+) <([^>]*)>/) {
@@ -3523,7 +3539,7 @@ sub parse_commit_text {
$co{'author_name'} = $co{'author'};
}
} elsif ($line =~ m/^committer (.*) ([0-9]+) (.*)$/) {
- $co{'committer'} = to_utf8($1);
+ $co{'committer'} = hide_mailaddrs_if_private(to_utf8($1));
$co{'committer_epoch'} = $2;
$co{'committer_tz'} = $3;
if ($co{'committer'} =~ m/^([^<]+) <([^>]*)>/) {
@@ -3568,9 +3584,10 @@ sub parse_commit_text {
if (! defined $co{'title'} || $co{'title'} eq "") {
$co{'title'} = $co{'title_short'} = '(no commit message)';
}
- # remove added spaces
+ # remove added spaces, redact e-mail addresses if applicable.
foreach my $line (@commit_lines) {
$line =~ s/^ //;
+ $line = hide_mailaddrs_if_private($line);
}
$co{'comment'} = \@commit_lines;
@@ -7489,7 +7506,8 @@ sub git_log_generic {
-accesskey => "n", -title => "Alt-n"}, "next");
}
my $patch_max = gitweb_get_feature('patches');
- if ($patch_max && !defined $file_name) {
+ if ($patch_max && !defined $file_name &&
+ !gitweb_check_feature('email-privacy')) {
if ($patch_max < 0 || @commitlist <= $patch_max) {
$paging_nav .= " &sdot; " .
$cgi->a({-href => href(action=>"patches", -replay=>1)},
@@ -7550,7 +7568,8 @@ sub git_commit {
} @$parents ) .
')';
}
- if (gitweb_check_feature('patches') && @$parents <= 1) {
+ if (gitweb_check_feature('patches') && @$parents <= 1 &&
+ !gitweb_check_feature('email-privacy')) {
$formats_nav .= " | " .
$cgi->a({-href => href(action=>"patch", -replay=>1)},
"patch");
@@ -7863,7 +7882,8 @@ sub git_commitdiff {
$formats_nav =
$cgi->a({-href => href(action=>"commitdiff_plain", -replay=>1)},
"raw");
- if ($patch_max && @{$co{'parents'}} <= 1) {
+ if ($patch_max && @{$co{'parents'}} <= 1 &&
+ !gitweb_check_feature('email-privacy')) {
$formats_nav .= " | " .
$cgi->a({-href => href(action=>"patch", -replay=>1)},
"patch");
diff --git a/http.c b/http.c
index 70b0f15aef..406410f884 100644
--- a/http.c
+++ b/http.c
@@ -1650,17 +1650,18 @@ static int handle_curl_result(struct slot_results *results)
} else if (missing_target(results))
return HTTP_MISSING_TARGET;
else if (results->http_code == 401) {
+#ifdef LIBCURL_CAN_HANDLE_AUTH_ANY
+ http_auth_methods &= ~CURLAUTH_GSSNEGOTIATE;
+ if (results->auth_avail) {
+ http_auth_methods &= results->auth_avail;
+ http_auth_methods_restricted = 1;
+ return HTTP_REAUTH;
+ }
+#endif
if (http_auth.username && http_auth.password) {
credential_reject(&http_auth);
return HTTP_NOAUTH;
} else {
-#ifdef LIBCURL_CAN_HANDLE_AUTH_ANY
- http_auth_methods &= ~CURLAUTH_GSSNEGOTIATE;
- if (results->auth_avail) {
- http_auth_methods &= results->auth_avail;
- http_auth_methods_restricted = 1;
- }
-#endif
return HTTP_REAUTH;
}
} else {
diff --git a/log-tree.c b/log-tree.c
index 4531cebfab..f3178a66a9 100644
--- a/log-tree.c
+++ b/log-tree.c
@@ -369,8 +369,14 @@ void fmt_output_subject(struct strbuf *filename,
int start_len = filename->len;
int max_len = start_len + info->patch_name_max - (strlen(suffix) + 1);
- if (0 < info->reroll_count)
- strbuf_addf(filename, "v%d-", info->reroll_count);
+ if (info->reroll_count) {
+ struct strbuf temp = STRBUF_INIT;
+
+ strbuf_addf(&temp, "v%s", info->reroll_count);
+ format_sanitized_subject(filename, temp.buf, temp.len);
+ strbuf_addstr(filename, "-");
+ strbuf_release(&temp);
+ }
strbuf_addf(filename, "%04d-%s", nr, subject);
if (max_len < filename->len)
diff --git a/merge-ort.c b/merge-ort.c
index ba35600d4e..b1795d838e 100644
--- a/merge-ort.c
+++ b/merge-ort.c
@@ -18,6 +18,7 @@
#include "merge-ort.h"
#include "alloc.h"
+#include "attr.h"
#include "blob.h"
#include "cache-tree.h"
#include "commit.h"
@@ -25,6 +26,7 @@
#include "diff.h"
#include "diffcore.h"
#include "dir.h"
+#include "entry.h"
#include "ll-merge.h"
#include "object-store.h"
#include "revision.h"
@@ -51,6 +53,12 @@ enum merge_side {
MERGE_SIDE2 = 2
};
+struct traversal_callback_data {
+ unsigned long mask;
+ unsigned long dirmask;
+ struct name_entry names[3];
+};
+
struct rename_info {
/*
* All variables that are arrays of size 3 correspond to data tracked
@@ -67,8 +75,12 @@ struct rename_info {
/*
* dirs_removed: directories removed on a given side of history.
+ *
+ * The keys of dirs_removed[side] are the directories that were removed
+ * on the given side of history. The value of the strintmap for each
+ * directory is a value from enum dir_rename_relevance.
*/
- struct strset dirs_removed[3];
+ struct strintmap dirs_removed[3];
/*
* dir_rename_count: tracking where parts of a directory were renamed to
@@ -89,6 +101,46 @@ struct rename_info {
struct strmap dir_renames[3];
/*
+ * relevant_sources: deleted paths wanted in rename detection, and why
+ *
+ * relevant_sources is a set of deleted paths on each side of
+ * history for which we need rename detection. If a path is deleted
+ * on one side of history, we need to detect if it is part of a
+ * rename if either
+ * * the file is modified/deleted on the other side of history
+ * * we need to detect renames for an ancestor directory
+ * If neither of those are true, we can skip rename detection for
+ * that path. The reason is stored as a value from enum
+ * file_rename_relevance, as the reason can inform the algorithm in
+ * diffcore_rename_extended().
+ */
+ struct strintmap relevant_sources[3];
+
+ /*
+ * dir_rename_mask:
+ * 0: optimization removing unmodified potential rename source okay
+ * 2 or 4: optimization okay, but must check for files added to dir
+ * 7: optimization forbidden; need rename source in case of dir rename
+ */
+ unsigned dir_rename_mask:3;
+
+ /*
+ * callback_data_*: supporting data structures for alternate traversal
+ *
+ * We sometimes need to be able to traverse through all the files
+ * in a given tree before all immediate subdirectories within that
+ * tree. Since traverse_trees() doesn't do that naturally, we have
+ * a traverse_trees_wrapper() that stores any immediate
+ * subdirectories while traversing files, then traverses the
+ * immediate subdirectories later. These callback_data* variables
+ * store the information for the subdirectories so that we can do
+ * that traversal order.
+ */
+ struct traversal_callback_data *callback_data;
+ int callback_data_nr, callback_data_alloc;
+ char *callback_data_traverse_path;
+
+ /*
* needed_limit: value needed for inexact rename detection to run
*
* If the current rename limit wasn't high enough for inexact
@@ -171,6 +223,16 @@ struct merge_options_internal {
struct rename_info renames;
/*
+ * attr_index: hacky minimal index used for renormalization
+ *
+ * renormalization code _requires_ an index, though it only needs to
+ * find a .gitattributes file within the index. So, when
+ * renormalization is important, we create a special index with just
+ * that one file.
+ */
+ struct index_state attr_index;
+
+ /*
* current_dir_name, toplevel_dir: temporary vars
*
* These are used in collect_merge_info_callback(), and will set the
@@ -318,8 +380,8 @@ static void clear_or_reinit_internal_opts(struct merge_options_internal *opti,
int i;
void (*strmap_func)(struct strmap *, int) =
reinitialize ? strmap_partial_clear : strmap_clear;
- void (*strset_func)(struct strset *) =
- reinitialize ? strset_partial_clear : strset_clear;
+ void (*strintmap_func)(struct strintmap *) =
+ reinitialize ? strintmap_partial_clear : strintmap_clear;
/*
* We marked opti->paths with strdup_strings = 0, so that we
@@ -349,15 +411,20 @@ static void clear_or_reinit_internal_opts(struct merge_options_internal *opti,
string_list_clear(&opti->paths_to_free, 0);
opti->paths_to_free.strdup_strings = 0;
+ if (opti->attr_index.cache_nr) /* true iff opt->renormalize */
+ discard_index(&opti->attr_index);
+
/* Free memory used by various renames maps */
for (i = MERGE_SIDE1; i <= MERGE_SIDE2; ++i) {
- strset_func(&renames->dirs_removed[i]);
+ strintmap_func(&renames->dirs_removed[i]);
partial_clear_dir_rename_count(&renames->dir_rename_count[i]);
if (!reinitialize)
strmap_clear(&renames->dir_rename_count[i], 1);
strmap_func(&renames->dir_renames[i], 0);
+
+ strintmap_func(&renames->relevant_sources[i]);
}
if (!reinitialize) {
@@ -380,6 +447,12 @@ static void clear_or_reinit_internal_opts(struct merge_options_internal *opti,
}
strmap_clear(&opti->output, 0);
}
+
+ renames->dir_rename_mask = 0;
+
+ /* Clean out callback_data as well. */
+ FREE_AND_NULL(renames->callback_data);
+ renames->callback_data_nr = renames->callback_data_alloc = 0;
}
static int err(struct merge_options *opt, const char *err, ...)
@@ -470,6 +543,82 @@ static char *unique_path(struct strmap *existing_paths,
/*** Function Grouping: functions related to collect_merge_info() ***/
+static int traverse_trees_wrapper_callback(int n,
+ unsigned long mask,
+ unsigned long dirmask,
+ struct name_entry *names,
+ struct traverse_info *info)
+{
+ struct merge_options *opt = info->data;
+ struct rename_info *renames = &opt->priv->renames;
+ unsigned filemask = mask & ~dirmask;
+
+ assert(n==3);
+
+ if (!renames->callback_data_traverse_path)
+ renames->callback_data_traverse_path = xstrdup(info->traverse_path);
+
+ if (filemask && filemask == renames->dir_rename_mask)
+ renames->dir_rename_mask = 0x07;
+
+ ALLOC_GROW(renames->callback_data, renames->callback_data_nr + 1,
+ renames->callback_data_alloc);
+ renames->callback_data[renames->callback_data_nr].mask = mask;
+ renames->callback_data[renames->callback_data_nr].dirmask = dirmask;
+ COPY_ARRAY(renames->callback_data[renames->callback_data_nr].names,
+ names, 3);
+ renames->callback_data_nr++;
+
+ return mask;
+}
+
+/*
+ * Much like traverse_trees(), BUT:
+ * - read all the tree entries FIRST, saving them
+ * - note that the above step provides an opportunity to compute necessary
+ * additional details before the "real" traversal
+ * - loop through the saved entries and call the original callback on them
+ */
+static int traverse_trees_wrapper(struct index_state *istate,
+ int n,
+ struct tree_desc *t,
+ struct traverse_info *info)
+{
+ int ret, i, old_offset;
+ traverse_callback_t old_fn;
+ char *old_callback_data_traverse_path;
+ struct merge_options *opt = info->data;
+ struct rename_info *renames = &opt->priv->renames;
+
+ assert(renames->dir_rename_mask == 2 || renames->dir_rename_mask == 4);
+
+ old_callback_data_traverse_path = renames->callback_data_traverse_path;
+ old_fn = info->fn;
+ old_offset = renames->callback_data_nr;
+
+ renames->callback_data_traverse_path = NULL;
+ info->fn = traverse_trees_wrapper_callback;
+ ret = traverse_trees(istate, n, t, info);
+ if (ret < 0)
+ return ret;
+
+ info->traverse_path = renames->callback_data_traverse_path;
+ info->fn = old_fn;
+ for (i = old_offset; i < renames->callback_data_nr; ++i) {
+ info->fn(n,
+ renames->callback_data[i].mask,
+ renames->callback_data[i].dirmask,
+ renames->callback_data[i].names,
+ info);
+ }
+
+ renames->callback_data_nr = old_offset;
+ free(renames->callback_data_traverse_path);
+ renames->callback_data_traverse_path = old_callback_data_traverse_path;
+ info->traverse_path = NULL;
+ return 0;
+}
+
static void setup_path_info(struct merge_options *opt,
struct string_list_item *result,
const char *current_dir_name,
@@ -533,12 +682,25 @@ static void add_pair(struct merge_options *opt,
struct name_entry *names,
const char *pathname,
unsigned side,
- unsigned is_add /* if false, is_delete */)
+ unsigned is_add /* if false, is_delete */,
+ unsigned match_mask,
+ unsigned dir_rename_mask)
{
struct diff_filespec *one, *two;
struct rename_info *renames = &opt->priv->renames;
int names_idx = is_add ? side : 0;
+ if (!is_add) {
+ unsigned content_relevant = (match_mask == 0);
+ unsigned location_relevant = (dir_rename_mask == 0x07);
+
+ if (content_relevant || location_relevant) {
+ /* content_relevant trumps location_relevant */
+ strintmap_set(&renames->relevant_sources[side], pathname,
+ content_relevant ? RELEVANT_CONTENT : RELEVANT_LOCATION);
+ }
+ }
+
one = alloc_filespec(pathname);
two = alloc_filespec(pathname);
fill_filespec(is_add ? two : one,
@@ -557,14 +719,75 @@ static void collect_rename_info(struct merge_options *opt,
struct rename_info *renames = &opt->priv->renames;
unsigned side;
+ /*
+ * Update dir_rename_mask (determines ignore-rename-source validity)
+ *
+ * dir_rename_mask helps us keep track of when directory rename
+ * detection may be relevant. Basically, whenver a directory is
+ * removed on one side of history, and a file is added to that
+ * directory on the other side of history, directory rename
+ * detection is relevant (meaning we have to detect renames for all
+ * files within that directory to deduce where the directory
+ * moved). Also, whenever a directory needs directory rename
+ * detection, due to the "majority rules" choice for where to move
+ * it (see t6423 testcase 1f), we also need to detect renames for
+ * all files within subdirectories of that directory as well.
+ *
+ * Here we haven't looked at files within the directory yet, we are
+ * just looking at the directory itself. So, if we aren't yet in
+ * a case where a parent directory needed directory rename detection
+ * (i.e. dir_rename_mask != 0x07), and if the directory was removed
+ * on one side of history, record the mask of the other side of
+ * history in dir_rename_mask.
+ */
+ if (renames->dir_rename_mask != 0x07 &&
+ (dirmask == 3 || dirmask == 5)) {
+ /* simple sanity check */
+ assert(renames->dir_rename_mask == 0 ||
+ renames->dir_rename_mask == (dirmask & ~1));
+ /* update dir_rename_mask; have it record mask of new side */
+ renames->dir_rename_mask = (dirmask & ~1);
+ }
+
/* Update dirs_removed, as needed */
if (dirmask == 1 || dirmask == 3 || dirmask == 5) {
/* absent_mask = 0x07 - dirmask; sides = absent_mask/2 */
unsigned sides = (0x07 - dirmask)/2;
+ unsigned relevance = (renames->dir_rename_mask == 0x07) ?
+ RELEVANT_FOR_ANCESTOR : NOT_RELEVANT;
+ /*
+ * Record relevance of this directory. However, note that
+ * when collect_merge_info_callback() recurses into this
+ * directory and calls collect_rename_info() on paths
+ * within that directory, if we find a path that was added
+ * to this directory on the other side of history, we will
+ * upgrade this value to RELEVANT_FOR_SELF; see below.
+ */
if (sides & 1)
- strset_add(&renames->dirs_removed[1], fullname);
+ strintmap_set(&renames->dirs_removed[1], fullname,
+ relevance);
if (sides & 2)
- strset_add(&renames->dirs_removed[2], fullname);
+ strintmap_set(&renames->dirs_removed[2], fullname,
+ relevance);
+ }
+
+ /*
+ * Here's the block that potentially upgrades to RELEVANT_FOR_SELF.
+ * When we run across a file added to a directory. In such a case,
+ * find the directory of the file and upgrade its relevance.
+ */
+ if (renames->dir_rename_mask == 0x07 &&
+ (filemask == 2 || filemask == 4)) {
+ /*
+ * Need directory rename for parent directory on other side
+ * of history from added file. Thus
+ * side = (~filemask & 0x06) >> 1
+ * or
+ * side = 3 - (filemask/2).
+ */
+ unsigned side = 3 - (filemask >> 1);
+ strintmap_set(&renames->dirs_removed[side], dirname,
+ RELEVANT_FOR_SELF);
}
if (filemask == 0 || filemask == 7)
@@ -575,11 +798,15 @@ static void collect_rename_info(struct merge_options *opt,
/* Check for deletion on side */
if ((filemask & 1) && !(filemask & side_mask))
- add_pair(opt, names, fullname, side, 0 /* delete */);
+ add_pair(opt, names, fullname, side, 0 /* delete */,
+ match_mask & filemask,
+ renames->dir_rename_mask);
/* Check for addition on side */
if (!(filemask & 1) && (filemask & side_mask))
- add_pair(opt, names, fullname, side, 1 /* add */);
+ add_pair(opt, names, fullname, side, 1 /* add */,
+ match_mask & filemask,
+ renames->dir_rename_mask);
}
}
@@ -597,12 +824,14 @@ static int collect_merge_info_callback(int n,
*/
struct merge_options *opt = info->data;
struct merge_options_internal *opti = opt->priv;
+ struct rename_info *renames = &opt->priv->renames;
struct string_list_item pi; /* Path Info */
struct conflict_info *ci; /* typed alias to pi.util (which is void*) */
struct name_entry *p;
size_t len;
char *fullpath;
const char *dirname = opti->current_dir_name;
+ unsigned prev_dir_rename_mask = renames->dir_rename_mask;
unsigned filemask = mask & ~dirmask;
unsigned match_mask = 0; /* will be updated below */
unsigned mbase_null = !(mask & 1);
@@ -743,8 +972,13 @@ static int collect_merge_info_callback(int n,
original_dir_name = opti->current_dir_name;
opti->current_dir_name = pi.string;
- ret = traverse_trees(NULL, 3, t, &newinfo);
+ if (renames->dir_rename_mask == 0 ||
+ renames->dir_rename_mask == 0x07)
+ ret = traverse_trees(NULL, 3, t, &newinfo);
+ else
+ ret = traverse_trees_wrapper(NULL, 3, t, &newinfo);
opti->current_dir_name = original_dir_name;
+ renames->dir_rename_mask = prev_dir_rename_mask;
for (i = MERGE_BASE; i <= MERGE_SIDE2; i++)
free(buf[i]);
@@ -968,6 +1202,63 @@ static int merge_submodule(struct merge_options *opt,
return 0;
}
+static void initialize_attr_index(struct merge_options *opt)
+{
+ /*
+ * The renormalize_buffer() functions require attributes, and
+ * annoyingly those can only be read from the working tree or from
+ * an index_state. merge-ort doesn't have an index_state, so we
+ * generate a fake one containing only attribute information.
+ */
+ struct merged_info *mi;
+ struct index_state *attr_index = &opt->priv->attr_index;
+ struct cache_entry *ce;
+
+ attr_index->initialized = 1;
+
+ if (!opt->renormalize)
+ return;
+
+ mi = strmap_get(&opt->priv->paths, GITATTRIBUTES_FILE);
+ if (!mi)
+ return;
+
+ if (mi->clean) {
+ int len = strlen(GITATTRIBUTES_FILE);
+ ce = make_empty_cache_entry(attr_index, len);
+ ce->ce_mode = create_ce_mode(mi->result.mode);
+ ce->ce_flags = create_ce_flags(0);
+ ce->ce_namelen = len;
+ oidcpy(&ce->oid, &mi->result.oid);
+ memcpy(ce->name, GITATTRIBUTES_FILE, len);
+ add_index_entry(attr_index, ce,
+ ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE);
+ get_stream_filter(attr_index, GITATTRIBUTES_FILE, &ce->oid);
+ } else {
+ int stage, len;
+ struct conflict_info *ci;
+
+ ASSIGN_AND_VERIFY_CI(ci, mi);
+ for (stage = 0; stage < 3; stage++) {
+ unsigned stage_mask = (1 << stage);
+
+ if (!(ci->filemask & stage_mask))
+ continue;
+ len = strlen(GITATTRIBUTES_FILE);
+ ce = make_empty_cache_entry(attr_index, len);
+ ce->ce_mode = create_ce_mode(ci->stages[stage].mode);
+ ce->ce_flags = create_ce_flags(stage);
+ ce->ce_namelen = len;
+ oidcpy(&ce->oid, &ci->stages[stage].oid);
+ memcpy(ce->name, GITATTRIBUTES_FILE, len);
+ add_index_entry(attr_index, ce,
+ ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE);
+ get_stream_filter(attr_index, GITATTRIBUTES_FILE,
+ &ce->oid);
+ }
+ }
+}
+
static int merge_3way(struct merge_options *opt,
const char *path,
const struct object_id *o,
@@ -982,6 +1273,9 @@ static int merge_3way(struct merge_options *opt,
char *base, *name1, *name2;
int merge_status;
+ if (!opt->priv->attr_index.initialized)
+ initialize_attr_index(opt);
+
ll_opts.renormalize = opt->renormalize;
ll_opts.extra_marker_size = extra_marker_size;
ll_opts.xdl_opts = opt->xdl_opts;
@@ -1020,7 +1314,7 @@ static int merge_3way(struct merge_options *opt,
merge_status = ll_merge(result_buf, path, &orig, base,
&src1, name1, &src2, name2,
- opt->repo->index, &ll_opts);
+ &opt->priv->attr_index, &ll_opts);
free(base);
free(name1);
@@ -1332,6 +1626,9 @@ static void get_provisional_directory_renames(struct merge_options *opt,
}
}
+ if (max == 0)
+ continue;
+
if (bad_max == max) {
path_msg(opt, source_dir, 0,
_("CONFLICT (directory rename split): "
@@ -1340,18 +1637,7 @@ static void get_provisional_directory_renames(struct merge_options *opt,
"no destination getting a majority of the "
"files."),
source_dir);
- /*
- * We should mark this as unclean IF something attempts
- * to use this rename. We do not yet have the logic
- * in place to detect if this directory rename is being
- * used, and optimizations that reduce the number of
- * renames cause this to falsely trigger. For now,
- * just disable it, causing t6423 testcase 2a to break.
- * We'll later fix the detection, and when we do we
- * will re-enable setting *clean to 0 (and thereby fix
- * t6423 testcase 2a).
- */
- /* *clean = 0; */
+ *clean = 0;
} else {
strmap_put(&renames->dir_renames[side],
source_dir, (void*)best);
@@ -1977,6 +2263,19 @@ static int process_renames(struct merge_options *opt,
return clean_merge;
}
+static inline int possible_side_renames(struct rename_info *renames,
+ unsigned side_index)
+{
+ return renames->pairs[side_index].nr > 0 &&
+ !strintmap_empty(&renames->relevant_sources[side_index]);
+}
+
+static inline int possible_renames(struct rename_info *renames)
+{
+ return possible_side_renames(renames, 1) ||
+ possible_side_renames(renames, 2);
+}
+
static void resolve_diffpair_statuses(struct diff_queue_struct *q)
{
/*
@@ -2013,6 +2312,16 @@ static void detect_regular_renames(struct merge_options *opt,
struct diff_options diff_opts;
struct rename_info *renames = &opt->priv->renames;
+ if (!possible_side_renames(renames, side_index)) {
+ /*
+ * No rename detection needed for this side, but we still need
+ * to make sure 'adds' are marked correctly in case the other
+ * side had directory renames.
+ */
+ resolve_diffpair_statuses(&renames->pairs[side_index]);
+ return;
+ }
+
repo_diff_setup(opt->repo, &diff_opts);
diff_opts.flags.recursive = 1;
diff_opts.flags.rename_empty = 0;
@@ -2028,6 +2337,7 @@ static void detect_regular_renames(struct merge_options *opt,
diff_queued_diff = renames->pairs[side_index];
trace2_region_enter("diff", "diffcore_rename", opt->repo);
diffcore_rename_extended(&diff_opts,
+ &renames->relevant_sources[side_index],
&renames->dirs_removed[side_index],
&renames->dir_rename_count[side_index]);
trace2_region_leave("diff", "diffcore_rename", opt->repo);
@@ -2129,6 +2439,8 @@ static int detect_and_process_renames(struct merge_options *opt,
int need_dir_renames, s, clean = 1;
memset(&combined, 0, sizeof(combined));
+ if (!possible_renames(renames))
+ goto cleanup;
trace2_region_enter("merge", "regular renames", opt->repo);
detect_regular_renames(opt, MERGE_SIDE1);
@@ -2156,13 +2468,32 @@ static int detect_and_process_renames(struct merge_options *opt,
clean &= collect_renames(opt, &combined, MERGE_SIDE2,
&renames->dir_renames[1],
&renames->dir_renames[2]);
- QSORT(combined.queue, combined.nr, compare_pairs);
+ STABLE_QSORT(combined.queue, combined.nr, compare_pairs);
trace2_region_leave("merge", "directory renames", opt->repo);
trace2_region_enter("merge", "process renames", opt->repo);
clean &= process_renames(opt, &combined);
trace2_region_leave("merge", "process renames", opt->repo);
+ goto simple_cleanup; /* collect_renames() handles some of cleanup */
+
+cleanup:
+ /*
+ * Free now unneeded filepairs, which would have been handled
+ * in collect_renames() normally but we skipped that code.
+ */
+ for (s = MERGE_SIDE1; s <= MERGE_SIDE2; s++) {
+ struct diff_queue_struct *side_pairs;
+ int i;
+
+ side_pairs = &renames->pairs[s];
+ for (i = 0; i < side_pairs->nr; ++i) {
+ struct diff_filepair *p = side_pairs->queue[i];
+ diff_free_filepair(p);
+ }
+ }
+
+simple_cleanup:
/* Free memory for renames->pairs[] and combined */
for (s = MERGE_SIDE1; s <= MERGE_SIDE2; s++) {
free(renames->pairs[s].queue);
@@ -2207,6 +2538,61 @@ static int string_list_df_name_compare(const char *one, const char *two)
return onelen - twolen;
}
+static int read_oid_strbuf(struct merge_options *opt,
+ const struct object_id *oid,
+ struct strbuf *dst)
+{
+ void *buf;
+ enum object_type type;
+ unsigned long size;
+ buf = read_object_file(oid, &type, &size);
+ if (!buf)
+ return err(opt, _("cannot read object %s"), oid_to_hex(oid));
+ if (type != OBJ_BLOB) {
+ free(buf);
+ return err(opt, _("object %s is not a blob"), oid_to_hex(oid));
+ }
+ strbuf_attach(dst, buf, size, size + 1);
+ return 0;
+}
+
+static int blob_unchanged(struct merge_options *opt,
+ const struct version_info *base,
+ const struct version_info *side,
+ const char *path)
+{
+ struct strbuf basebuf = STRBUF_INIT;
+ struct strbuf sidebuf = STRBUF_INIT;
+ int ret = 0; /* assume changed for safety */
+ const struct index_state *idx = &opt->priv->attr_index;
+
+ if (!idx->initialized)
+ initialize_attr_index(opt);
+
+ if (base->mode != side->mode)
+ return 0;
+ if (oideq(&base->oid, &side->oid))
+ return 1;
+
+ if (read_oid_strbuf(opt, &base->oid, &basebuf) ||
+ read_oid_strbuf(opt, &side->oid, &sidebuf))
+ goto error_return;
+ /*
+ * Note: binary | is used so that both renormalizations are
+ * performed. Comparison can be skipped if both files are
+ * unchanged since their sha1s have already been compared.
+ */
+ if (renormalize_buffer(idx, path, basebuf.buf, basebuf.len, &basebuf) |
+ renormalize_buffer(idx, path, sidebuf.buf, sidebuf.len, &sidebuf))
+ ret = (basebuf.len == sidebuf.len &&
+ !memcmp(basebuf.buf, sidebuf.buf, basebuf.len));
+
+error_return:
+ strbuf_release(&basebuf);
+ strbuf_release(&sidebuf);
+ return ret;
+}
+
struct directory_versions {
/*
* versions: list of (basename -> version_info)
@@ -2267,22 +2653,15 @@ static void write_tree(struct object_id *result_oid,
size_t hash_size)
{
size_t maxlen = 0, extra;
- unsigned int nr = versions->nr - offset;
+ unsigned int nr;
struct strbuf buf = STRBUF_INIT;
- struct string_list relevant_entries = STRING_LIST_INIT_NODUP;
int i;
- /*
- * We want to sort the last (versions->nr-offset) entries in versions.
- * Do so by abusing the string_list API a bit: make another string_list
- * that contains just those entries and then sort them.
- *
- * We won't use relevant_entries again and will let it just pop off the
- * stack, so there won't be allocation worries or anything.
- */
- relevant_entries.items = versions->items + offset;
- relevant_entries.nr = versions->nr - offset;
- QSORT(relevant_entries.items, relevant_entries.nr, tree_entry_order);
+ assert(offset <= versions->nr);
+ nr = versions->nr - offset;
+ if (versions->nr)
+ /* No need for STABLE_QSORT -- filenames must be unique */
+ QSORT(versions->items + offset, nr, tree_entry_order);
/* Pre-allocate some space in buf */
extra = hash_size + 8; /* 8: 6 for mode, 1 for space, 1 for NUL char */
@@ -2793,8 +3172,13 @@ static void process_entry(struct merge_options *opt,
modify_branch = (side == 1) ? opt->branch1 : opt->branch2;
delete_branch = (side == 1) ? opt->branch2 : opt->branch1;
- if (ci->path_conflict &&
- oideq(&ci->stages[0].oid, &ci->stages[side].oid)) {
+ if (opt->renormalize &&
+ blob_unchanged(opt, &ci->stages[0], &ci->stages[side],
+ path)) {
+ ci->merged.is_null = 1;
+ ci->merged.clean = 1;
+ } else if (ci->path_conflict &&
+ oideq(&ci->stages[0].oid, &ci->stages[side].oid)) {
/*
* This came from a rename/delete; no action to take,
* but avoid printing "modify/delete" conflict notice
@@ -2966,23 +3350,27 @@ static int checkout(struct merge_options *opt,
return ret;
}
-static int record_conflicted_index_entries(struct merge_options *opt,
- struct index_state *index,
- struct strmap *paths,
- struct strmap *conflicted)
+static int record_conflicted_index_entries(struct merge_options *opt)
{
struct hashmap_iter iter;
struct strmap_entry *e;
+ struct index_state *index = opt->repo->index;
+ struct checkout state = CHECKOUT_INIT;
int errs = 0;
int original_cache_nr;
- if (strmap_empty(conflicted))
+ if (strmap_empty(&opt->priv->conflicted))
return 0;
+ /* If any entries have skip_worktree set, we'll have to check 'em out */
+ state.force = 1;
+ state.quiet = 1;
+ state.refresh_cache = 1;
+ state.istate = index;
original_cache_nr = index->cache_nr;
/* Put every entry from paths into plist, then sort */
- strmap_for_each_entry(conflicted, &iter, e) {
+ strmap_for_each_entry(&opt->priv->conflicted, &iter, e) {
const char *path = e->key;
struct conflict_info *ci = e->value;
int pos;
@@ -3023,9 +3411,23 @@ static int record_conflicted_index_entries(struct merge_options *opt,
* the higher order stages. Thus, we need override
* the CE_SKIP_WORKTREE bit and manually write those
* files to the working disk here.
- *
- * TODO: Implement this CE_SKIP_WORKTREE fixup.
*/
+ if (ce_skip_worktree(ce)) {
+ struct stat st;
+
+ if (!lstat(path, &st)) {
+ char *new_name = unique_path(&opt->priv->paths,
+ path,
+ "cruft");
+
+ path_msg(opt, path, 1,
+ _("Note: %s not up to date and in way of checking out conflicted version; old copy renamed to %s"),
+ path, new_name);
+ errs |= rename(path, new_name);
+ free(new_name);
+ }
+ errs |= checkout_entry(ce, &state, NULL, NULL);
+ }
/*
* Mark this cache entry for removal and instead add
@@ -3057,6 +3459,11 @@ static int record_conflicted_index_entries(struct merge_options *opt,
* entries we added to the end into their right locations.
*/
remove_marked_cache_entries(index, 1);
+ /*
+ * No need for STABLE_QSORT -- cmp_cache_name_compare sorts primarily
+ * on filename and secondarily on stage, and (name, stage #) are a
+ * unique tuple.
+ */
QSORT(index->cache, index->cache_nr, cmp_cache_name_compare);
return errs;
@@ -3070,7 +3477,8 @@ void merge_switch_to_result(struct merge_options *opt,
{
assert(opt->priv == NULL);
if (result->clean >= 0 && update_worktree_and_index) {
- struct merge_options_internal *opti = result->priv;
+ const char *filename;
+ FILE *fp;
trace2_region_enter("merge", "checkout", opt->repo);
if (checkout(opt, head, result->tree)) {
@@ -3081,14 +3489,22 @@ void merge_switch_to_result(struct merge_options *opt,
trace2_region_leave("merge", "checkout", opt->repo);
trace2_region_enter("merge", "record_conflicted", opt->repo);
- if (record_conflicted_index_entries(opt, opt->repo->index,
- &opti->paths,
- &opti->conflicted)) {
+ opt->priv = result->priv;
+ if (record_conflicted_index_entries(opt)) {
/* failure to function */
+ opt->priv = NULL;
result->clean = -1;
return;
}
+ opt->priv = NULL;
trace2_region_leave("merge", "record_conflicted", opt->repo);
+
+ trace2_region_enter("merge", "write_auto_merge", opt->repo);
+ filename = git_path_auto_merge(opt->repo);
+ fp = xfopen(filename, "w");
+ fprintf(fp, "%s\n", oid_to_hex(&result->tree->object.oid));
+ fclose(fp);
+ trace2_region_leave("merge", "write_auto_merge", opt->repo);
}
if (display_update_msgs) {
@@ -3133,6 +3549,8 @@ void merge_finalize(struct merge_options *opt,
{
struct merge_options_internal *opti = result->priv;
+ if (opt->renormalize)
+ git_attr_set_direction(GIT_ATTR_CHECKIN);
assert(opt->priv == NULL);
clear_or_reinit_internal_opts(opti, 0);
@@ -3141,6 +3559,23 @@ void merge_finalize(struct merge_options *opt,
/*** Function Grouping: helper functions for merge_incore_*() ***/
+static struct tree *shift_tree_object(struct repository *repo,
+ struct tree *one, struct tree *two,
+ const char *subtree_shift)
+{
+ struct object_id shifted;
+
+ if (!*subtree_shift) {
+ shift_tree(repo, &one->object.oid, &two->object.oid, &shifted, 0);
+ } else {
+ shift_tree_by(repo, &one->object.oid, &two->object.oid, &shifted,
+ subtree_shift);
+ }
+ if (oideq(&two->object.oid, &shifted))
+ return two;
+ return lookup_tree(repo, &shifted);
+}
+
static inline void set_commit_tree(struct commit *c, struct tree *t)
{
c->maybe_tree = t;
@@ -3208,6 +3643,10 @@ static void merge_start(struct merge_options *opt, struct merge_result *result)
/* Default to histogram diff. Actually, just hardcode it...for now. */
opt->xdl_opts = DIFF_WITH_ALG(opt, HISTOGRAM_DIFF);
+ /* Handle attr direction stuff for renormalization */
+ if (opt->renormalize)
+ git_attr_set_direction(GIT_ATTR_CHECKOUT);
+
/* Initialization of opt->priv, our internal merge data */
trace2_region_enter("merge", "allocate/init", opt->repo);
if (opt->priv) {
@@ -3220,12 +3659,14 @@ static void merge_start(struct merge_options *opt, struct merge_result *result)
/* Initialization of various renames fields */
renames = &opt->priv->renames;
for (i = MERGE_SIDE1; i <= MERGE_SIDE2; i++) {
- strset_init_with_options(&renames->dirs_removed[i],
- NULL, 0);
+ strintmap_init_with_options(&renames->dirs_removed[i],
+ NOT_RELEVANT, NULL, 0);
strmap_init_with_options(&renames->dir_rename_count[i],
NULL, 1);
strmap_init_with_options(&renames->dir_renames[i],
NULL, 0);
+ strintmap_init_with_options(&renames->relevant_sources[i],
+ 0, NULL, 0);
}
/*
@@ -3264,6 +3705,13 @@ static void merge_ort_nonrecursive_internal(struct merge_options *opt,
{
struct object_id working_tree_oid;
+ if (opt->subtree_shift) {
+ side2 = shift_tree_object(opt->repo, side1, side2,
+ opt->subtree_shift);
+ merge_base = shift_tree_object(opt->repo, side1, merge_base,
+ opt->subtree_shift);
+ }
+
trace2_region_enter("merge", "collect_merge_info", opt->repo);
if (collect_merge_info(opt, merge_base, side1, side2) != 0) {
/*
diff --git a/merge-recursive.c b/merge-recursive.c
index b69e694d98..7618303f7b 100644
--- a/merge-recursive.c
+++ b/merge-recursive.c
@@ -453,7 +453,7 @@ static void unpack_trees_finish(struct merge_options *opt)
static int save_files_dirs(const struct object_id *oid,
struct strbuf *base, const char *path,
- unsigned int mode, int stage, void *context)
+ unsigned int mode, void *context)
{
struct path_hashmap_entry *entry;
int baselen = base->len;
@@ -473,8 +473,8 @@ static void get_files_dirs(struct merge_options *opt, struct tree *tree)
{
struct pathspec match_all;
memset(&match_all, 0, sizeof(match_all));
- read_tree_recursive(opt->repo, tree, "", 0, 0,
- &match_all, save_files_dirs, opt);
+ read_tree(opt->repo, tree,
+ &match_all, save_files_dirs, opt);
}
static int get_tree_entry_if_blob(struct repository *r,
@@ -1075,6 +1075,11 @@ static int merge_3way(struct merge_options *opt,
read_mmblob(&src1, &a->oid);
read_mmblob(&src2, &b->oid);
+ /*
+ * FIXME: Using a->path for normalization rules in ll_merge could be
+ * wrong if we renamed from a->path to b->path. We should use the
+ * target path for where the file will be written.
+ */
merge_status = ll_merge(result_buf, a->path, &orig, base,
&src1, name1, &src2, name2,
opt->repo->index, &ll_opts);
@@ -1154,6 +1159,8 @@ static void print_commit(struct commit *commit)
struct strbuf sb = STRBUF_INIT;
struct pretty_print_context ctx = {0};
ctx.date_mode.type = DATE_NORMAL;
+ /* FIXME: Merge this with output_commit_title() */
+ assert(!merge_remote_util(commit));
format_commit_message(commit, " %h: %m %s", &sb, &ctx);
fprintf(stderr, "%s\n", sb.buf);
strbuf_release(&sb);
@@ -1177,6 +1184,11 @@ static int merge_submodule(struct merge_options *opt,
int search = !opt->priv->call_depth;
/* store a in result in case we fail */
+ /* FIXME: This is the WRONG resolution for the recursive case when
+ * we need to be careful to avoid accidentally matching either side.
+ * Should probably use o instead there, much like we do for merging
+ * binaries.
+ */
oidcpy(result, a);
/* we can not handle deletion conflicts */
@@ -1301,6 +1313,13 @@ static int merge_mode_and_contents(struct merge_options *opt,
if ((S_IFMT & a->mode) != (S_IFMT & b->mode)) {
result->clean = 0;
+ /*
+ * FIXME: This is a bad resolution for recursive case; for
+ * the recursive case we want something that is unlikely to
+ * accidentally match either side. Also, while it makes
+ * sense to prefer regular files over symlinks, it doesn't
+ * make sense to prefer regular files over submodules.
+ */
if (S_ISREG(a->mode)) {
result->blob.mode = a->mode;
oidcpy(&result->blob.oid, &a->oid);
@@ -1349,6 +1368,7 @@ static int merge_mode_and_contents(struct merge_options *opt,
free(result_buf.ptr);
if (ret)
return ret;
+ /* FIXME: bug, what if modes didn't match? */
result->clean = (merge_status == 0);
} else if (S_ISGITLINK(a->mode)) {
result->clean = merge_submodule(opt, &result->blob.oid,
@@ -2663,6 +2683,14 @@ static int process_renames(struct merge_options *opt,
struct string_list b_by_dst = STRING_LIST_INIT_NODUP;
const struct rename *sre;
+ /*
+ * FIXME: As string-list.h notes, it's O(n^2) to build a sorted
+ * string_list one-by-one, but O(n log n) to build it unsorted and
+ * then sort it. Note that as we build the list, we do not need to
+ * check if the existing destination path is already in the list,
+ * because the structure of diffcore_rename guarantees we won't
+ * have duplicates.
+ */
for (i = 0; i < a_renames->nr; i++) {
sre = a_renames->items[i].util;
string_list_insert(&a_by_dst, sre->pair->two->path)->util
@@ -3601,6 +3629,15 @@ static int merge_recursive_internal(struct merge_options *opt,
return err(opt, _("merge returned no commit"));
}
+ /*
+ * FIXME: Since merge_recursive_internal() is only ever called by
+ * places that ensure the index is loaded first
+ * (e.g. builtin/merge.c, rebase/sequencer, etc.), in the common
+ * case where the merge base was unique that means when we get here
+ * we immediately discard the index and re-read it, which is a
+ * complete waste of time. We should only be discarding and
+ * re-reading if we were forced to recurse.
+ */
discard_index(opt->repo->index);
if (!opt->priv->call_depth)
repo_read_index(opt->repo);
diff --git a/midx.c b/midx.c
index becfafe65e..9e86583172 100644
--- a/midx.c
+++ b/midx.c
@@ -12,6 +12,7 @@
#include "run-command.h"
#include "repository.h"
#include "chunk-format.h"
+#include "pack.h"
#define MIDX_SIGNATURE 0x4d494458 /* "MIDX" */
#define MIDX_VERSION 1
@@ -47,11 +48,22 @@ static uint8_t oid_version(void)
}
}
+static const unsigned char *get_midx_checksum(struct multi_pack_index *m)
+{
+ return m->data + m->data_len - the_hash_algo->rawsz;
+}
+
static char *get_midx_filename(const char *object_dir)
{
return xstrfmt("%s/pack/multi-pack-index", object_dir);
}
+char *get_midx_rev_filename(struct multi_pack_index *m)
+{
+ return xstrfmt("%s/pack/multi-pack-index-%s.rev",
+ m->object_dir, hash_to_hex(get_midx_checksum(m)));
+}
+
static int midx_read_oid_fanout(const unsigned char *chunk_start,
size_t chunk_size, void *data)
{
@@ -239,7 +251,7 @@ struct object_id *nth_midxed_object_oid(struct object_id *oid,
return oid;
}
-static off_t nth_midxed_offset(struct multi_pack_index *m, uint32_t pos)
+off_t nth_midxed_offset(struct multi_pack_index *m, uint32_t pos)
{
const unsigned char *offset_data;
uint32_t offset32;
@@ -258,7 +270,7 @@ static off_t nth_midxed_offset(struct multi_pack_index *m, uint32_t pos)
return offset32;
}
-static uint32_t nth_midxed_pack_int_id(struct multi_pack_index *m, uint32_t pos)
+uint32_t nth_midxed_pack_int_id(struct multi_pack_index *m, uint32_t pos)
{
return get_be32(m->chunk_object_offsets +
(off_t)pos * MIDX_CHUNK_OFFSET_WIDTH);
@@ -431,6 +443,14 @@ static int pack_info_compare(const void *_a, const void *_b)
return strcmp(a->pack_name, b->pack_name);
}
+static int idx_or_pack_name_cmp(const void *_va, const void *_vb)
+{
+ const char *pack_name = _va;
+ const struct pack_info *compar = _vb;
+
+ return cmp_idx_or_pack_name(pack_name, compar->pack_name);
+}
+
struct write_midx_context {
struct pack_info *info;
uint32_t nr;
@@ -443,8 +463,11 @@ struct write_midx_context {
uint32_t entries_nr;
uint32_t *pack_perm;
+ uint32_t *pack_order;
unsigned large_offsets_needed:1;
uint32_t num_large_offsets;
+
+ int preferred_pack_idx;
};
static void add_pack_to_midx(const char *full_path, size_t full_path_len,
@@ -489,6 +512,7 @@ struct pack_midx_entry {
uint32_t pack_int_id;
time_t pack_mtime;
uint64_t offset;
+ unsigned preferred : 1;
};
static int midx_oid_compare(const void *_a, const void *_b)
@@ -500,6 +524,12 @@ static int midx_oid_compare(const void *_a, const void *_b)
if (cmp)
return cmp;
+ /* Sort objects in a preferred pack first when multiple copies exist. */
+ if (a->preferred > b->preferred)
+ return -1;
+ if (a->preferred < b->preferred)
+ return 1;
+
if (a->pack_mtime > b->pack_mtime)
return -1;
else if (a->pack_mtime < b->pack_mtime)
@@ -527,7 +557,8 @@ static int nth_midxed_pack_midx_entry(struct multi_pack_index *m,
static void fill_pack_entry(uint32_t pack_int_id,
struct packed_git *p,
uint32_t cur_object,
- struct pack_midx_entry *entry)
+ struct pack_midx_entry *entry,
+ int preferred)
{
if (nth_packed_object_id(&entry->oid, p, cur_object) < 0)
die(_("failed to locate object %d in packfile"), cur_object);
@@ -536,6 +567,7 @@ static void fill_pack_entry(uint32_t pack_int_id,
entry->pack_mtime = p->mtime;
entry->offset = nth_packed_object_offset(p, cur_object);
+ entry->preferred = !!preferred;
}
/*
@@ -552,7 +584,8 @@ static void fill_pack_entry(uint32_t pack_int_id,
static struct pack_midx_entry *get_sorted_entries(struct multi_pack_index *m,
struct pack_info *info,
uint32_t nr_packs,
- uint32_t *nr_objects)
+ uint32_t *nr_objects,
+ int preferred_pack)
{
uint32_t cur_fanout, cur_pack, cur_object;
uint32_t alloc_fanout, alloc_objects, total_objects = 0;
@@ -589,12 +622,17 @@ static struct pack_midx_entry *get_sorted_entries(struct multi_pack_index *m,
nth_midxed_pack_midx_entry(m,
&entries_by_fanout[nr_fanout],
cur_object);
+ if (nth_midxed_pack_int_id(m, cur_object) == preferred_pack)
+ entries_by_fanout[nr_fanout].preferred = 1;
+ else
+ entries_by_fanout[nr_fanout].preferred = 0;
nr_fanout++;
}
}
for (cur_pack = start_pack; cur_pack < nr_packs; cur_pack++) {
uint32_t start = 0, end;
+ int preferred = cur_pack == preferred_pack;
if (cur_fanout)
start = get_pack_fanout(info[cur_pack].p, cur_fanout - 1);
@@ -602,7 +640,11 @@ static struct pack_midx_entry *get_sorted_entries(struct multi_pack_index *m,
for (cur_object = start; cur_object < end; cur_object++) {
ALLOC_GROW(entries_by_fanout, nr_fanout + 1, alloc_fanout);
- fill_pack_entry(cur_pack, info[cur_pack].p, cur_object, &entries_by_fanout[nr_fanout]);
+ fill_pack_entry(cur_pack,
+ info[cur_pack].p,
+ cur_object,
+ &entries_by_fanout[nr_fanout],
+ preferred);
nr_fanout++;
}
}
@@ -776,10 +818,80 @@ static int write_midx_large_offsets(struct hashfile *f,
return 0;
}
+struct midx_pack_order_data {
+ uint32_t nr;
+ uint32_t pack;
+ off_t offset;
+};
+
+static int midx_pack_order_cmp(const void *va, const void *vb)
+{
+ const struct midx_pack_order_data *a = va, *b = vb;
+ if (a->pack < b->pack)
+ return -1;
+ else if (a->pack > b->pack)
+ return 1;
+ else if (a->offset < b->offset)
+ return -1;
+ else if (a->offset > b->offset)
+ return 1;
+ else
+ return 0;
+}
+
+static uint32_t *midx_pack_order(struct write_midx_context *ctx)
+{
+ struct midx_pack_order_data *data;
+ uint32_t *pack_order;
+ uint32_t i;
+
+ ALLOC_ARRAY(data, ctx->entries_nr);
+ for (i = 0; i < ctx->entries_nr; i++) {
+ struct pack_midx_entry *e = &ctx->entries[i];
+ data[i].nr = i;
+ data[i].pack = ctx->pack_perm[e->pack_int_id];
+ if (!e->preferred)
+ data[i].pack |= (1U << 31);
+ data[i].offset = e->offset;
+ }
+
+ QSORT(data, ctx->entries_nr, midx_pack_order_cmp);
+
+ ALLOC_ARRAY(pack_order, ctx->entries_nr);
+ for (i = 0; i < ctx->entries_nr; i++)
+ pack_order[i] = data[i].nr;
+ free(data);
+
+ return pack_order;
+}
+
+static void write_midx_reverse_index(char *midx_name, unsigned char *midx_hash,
+ struct write_midx_context *ctx)
+{
+ struct strbuf buf = STRBUF_INIT;
+ const char *tmp_file;
+
+ strbuf_addf(&buf, "%s-%s.rev", midx_name, hash_to_hex(midx_hash));
+
+ tmp_file = write_rev_file_order(NULL, ctx->pack_order, ctx->entries_nr,
+ midx_hash, WRITE_REV);
+
+ if (finalize_object_file(tmp_file, buf.buf))
+ die(_("cannot store reverse index file"));
+
+ strbuf_release(&buf);
+}
+
+static void clear_midx_files_ext(struct repository *r, const char *ext,
+ unsigned char *keep_hash);
+
static int write_midx_internal(const char *object_dir, struct multi_pack_index *m,
- struct string_list *packs_to_drop, unsigned flags)
+ struct string_list *packs_to_drop,
+ const char *preferred_pack_name,
+ unsigned flags)
{
char *midx_name;
+ unsigned char midx_hash[GIT_MAX_RAWSZ];
uint32_t i;
struct hashfile *f = NULL;
struct lock_file lk;
@@ -828,7 +940,19 @@ static int write_midx_internal(const char *object_dir, struct multi_pack_index *
if (ctx.m && ctx.nr == ctx.m->num_packs && !packs_to_drop)
goto cleanup;
- ctx.entries = get_sorted_entries(ctx.m, ctx.info, ctx.nr, &ctx.entries_nr);
+ ctx.preferred_pack_idx = -1;
+ if (preferred_pack_name) {
+ for (i = 0; i < ctx.nr; i++) {
+ if (!cmp_idx_or_pack_name(preferred_pack_name,
+ ctx.info[i].pack_name)) {
+ ctx.preferred_pack_idx = i;
+ break;
+ }
+ }
+ }
+
+ ctx.entries = get_sorted_entries(ctx.m, ctx.info, ctx.nr, &ctx.entries_nr,
+ ctx.preferred_pack_idx);
ctx.large_offsets_needed = 0;
for (i = 0; i < ctx.entries_nr; i++) {
@@ -889,13 +1013,30 @@ static int write_midx_internal(const char *object_dir, struct multi_pack_index *
pack_name_concat_len += strlen(ctx.info[i].pack_name) + 1;
}
+ /* Check that the preferred pack wasn't expired (if given). */
+ if (preferred_pack_name) {
+ struct pack_info *preferred = bsearch(preferred_pack_name,
+ ctx.info, ctx.nr,
+ sizeof(*ctx.info),
+ idx_or_pack_name_cmp);
+
+ if (!preferred)
+ warning(_("unknown preferred pack: '%s'"),
+ preferred_pack_name);
+ else {
+ uint32_t perm = ctx.pack_perm[preferred->orig_pack_int_id];
+ if (perm == PACK_EXPIRED)
+ warning(_("preferred pack '%s' is expired"),
+ preferred_pack_name);
+ }
+ }
+
if (pack_name_concat_len % MIDX_CHUNK_ALIGNMENT)
pack_name_concat_len += MIDX_CHUNK_ALIGNMENT -
(pack_name_concat_len % MIDX_CHUNK_ALIGNMENT);
hold_lock_file_for_update(&lk, midx_name, LOCK_DIE_ON_ERROR);
f = hashfd(get_lock_file_fd(&lk), get_lock_file_path(&lk));
- FREE_AND_NULL(midx_name);
if (ctx.m)
close_midx(ctx.m);
@@ -927,8 +1068,16 @@ static int write_midx_internal(const char *object_dir, struct multi_pack_index *
write_midx_header(f, get_num_chunks(cf), ctx.nr - dropped_packs);
write_chunkfile(cf, &ctx);
- finalize_hashfile(f, NULL, CSUM_FSYNC | CSUM_HASH_IN_STREAM);
+ finalize_hashfile(f, midx_hash, CSUM_FSYNC | CSUM_HASH_IN_STREAM);
free_chunkfile(cf);
+
+ if (flags & MIDX_WRITE_REV_INDEX)
+ ctx.pack_order = midx_pack_order(&ctx);
+
+ if (flags & MIDX_WRITE_REV_INDEX)
+ write_midx_reverse_index(midx_name, midx_hash, &ctx);
+ clear_midx_files_ext(the_repository, ".rev", midx_hash);
+
commit_lock_file(&lk);
cleanup:
@@ -943,13 +1092,55 @@ cleanup:
free(ctx.info);
free(ctx.entries);
free(ctx.pack_perm);
+ free(ctx.pack_order);
free(midx_name);
return result;
}
-int write_midx_file(const char *object_dir, unsigned flags)
+int write_midx_file(const char *object_dir,
+ const char *preferred_pack_name,
+ unsigned flags)
{
- return write_midx_internal(object_dir, NULL, NULL, flags);
+ return write_midx_internal(object_dir, NULL, NULL, preferred_pack_name,
+ flags);
+}
+
+struct clear_midx_data {
+ char *keep;
+ const char *ext;
+};
+
+static void clear_midx_file_ext(const char *full_path, size_t full_path_len,
+ const char *file_name, void *_data)
+{
+ struct clear_midx_data *data = _data;
+
+ if (!(starts_with(file_name, "multi-pack-index-") &&
+ ends_with(file_name, data->ext)))
+ return;
+ if (data->keep && !strcmp(data->keep, file_name))
+ return;
+
+ if (unlink(full_path))
+ die_errno(_("failed to remove %s"), full_path);
+}
+
+static void clear_midx_files_ext(struct repository *r, const char *ext,
+ unsigned char *keep_hash)
+{
+ struct clear_midx_data data;
+ memset(&data, 0, sizeof(struct clear_midx_data));
+
+ if (keep_hash)
+ data.keep = xstrfmt("multi-pack-index-%s%s",
+ hash_to_hex(keep_hash), ext);
+ data.ext = ext;
+
+ for_each_file_in_pack_dir(r->objects->odb->path,
+ clear_midx_file_ext,
+ &data);
+
+ free(data.keep);
}
void clear_midx_file(struct repository *r)
@@ -964,6 +1155,8 @@ void clear_midx_file(struct repository *r)
if (remove_path(midx))
die(_("failed to clear multi-pack-index at %s"), midx);
+ clear_midx_files_ext(r, ".rev", NULL);
+
free(midx);
}
@@ -1184,7 +1377,7 @@ int expire_midx_packs(struct repository *r, const char *object_dir, unsigned fla
free(count);
if (packs_to_drop.nr)
- result = write_midx_internal(object_dir, m, &packs_to_drop, flags);
+ result = write_midx_internal(object_dir, m, &packs_to_drop, NULL, flags);
string_list_clear(&packs_to_drop, 0);
return result;
@@ -1373,7 +1566,7 @@ int midx_repack(struct repository *r, const char *object_dir, size_t batch_size,
goto cleanup;
}
- result = write_midx_internal(object_dir, m, NULL, flags);
+ result = write_midx_internal(object_dir, m, NULL, NULL, flags);
m = NULL;
cleanup:
diff --git a/midx.h b/midx.h
index b18cf53bc4..8684cf0fef 100644
--- a/midx.h
+++ b/midx.h
@@ -15,6 +15,10 @@ struct multi_pack_index {
const unsigned char *data;
size_t data_len;
+ const uint32_t *revindex_data;
+ const uint32_t *revindex_map;
+ size_t revindex_len;
+
uint32_t signature;
unsigned char version;
unsigned char hash_len;
@@ -36,10 +40,15 @@ struct multi_pack_index {
};
#define MIDX_PROGRESS (1 << 0)
+#define MIDX_WRITE_REV_INDEX (1 << 1)
+
+char *get_midx_rev_filename(struct multi_pack_index *m);
struct multi_pack_index *load_multi_pack_index(const char *object_dir, int local);
int prepare_midx_pack(struct repository *r, struct multi_pack_index *m, uint32_t pack_int_id);
int bsearch_midx(const struct object_id *oid, struct multi_pack_index *m, uint32_t *result);
+off_t nth_midxed_offset(struct multi_pack_index *m, uint32_t pos);
+uint32_t nth_midxed_pack_int_id(struct multi_pack_index *m, uint32_t pos);
struct object_id *nth_midxed_object_oid(struct object_id *oid,
struct multi_pack_index *m,
uint32_t n);
@@ -47,7 +56,7 @@ int fill_midx_entry(struct repository *r, const struct object_id *oid, struct pa
int midx_contains_pack(struct multi_pack_index *m, const char *idx_or_pack_name);
int prepare_multi_pack_index_one(struct repository *r, const char *object_dir, int local);
-int write_midx_file(const char *object_dir, unsigned flags);
+int write_midx_file(const char *object_dir, const char *preferred_pack_name, unsigned flags);
void clear_midx_file(struct repository *r);
int verify_midx_file(struct repository *r, const char *object_dir, unsigned flags);
int expire_midx_packs(struct repository *r, const char *object_dir, unsigned flags);
diff --git a/pack-bitmap.c b/pack-bitmap.c
index 1ebe0c8162..3ed15431cd 100644
--- a/pack-bitmap.c
+++ b/pack-bitmap.c
@@ -13,6 +13,7 @@
#include "repository.h"
#include "object-store.h"
#include "list-objects-filter-options.h"
+#include "config.h"
/*
* An entry on the bitmap index, representing the bitmap for a given
@@ -997,6 +998,7 @@ struct bitmap_index *prepare_bitmap_walk(struct rev_info *revs,
object_list_insert(object, &wants);
object = parse_object_or_die(get_tagged_oid(tag), NULL);
+ object->flags |= (tag->object.flags & UNINTERESTING);
}
if (object->flags & UNINTERESTING)
@@ -1350,6 +1352,24 @@ void test_bitmap_walk(struct rev_info *revs)
free_bitmap_index(bitmap_git);
}
+int test_bitmap_commits(struct repository *r)
+{
+ struct bitmap_index *bitmap_git = prepare_bitmap_git(r);
+ struct object_id oid;
+ MAYBE_UNUSED void *value;
+
+ if (!bitmap_git)
+ die("failed to load bitmap indexes");
+
+ kh_foreach(bitmap_git->bitmaps, oid, value, {
+ printf("%s\n", oid_to_hex(&oid));
+ });
+
+ free_bitmap_index(bitmap_git);
+
+ return 0;
+}
+
int rebuild_bitmap(const uint32_t *reposition,
struct ewah_bitmap *source,
struct bitmap *dest)
@@ -1511,3 +1531,8 @@ off_t get_disk_usage_from_bitmap(struct bitmap_index *bitmap_git,
return total;
}
+
+const struct string_list *bitmap_preferred_tips(struct repository *r)
+{
+ return repo_config_get_value_multi(r, "pack.preferbitmaptips");
+}
diff --git a/pack-bitmap.h b/pack-bitmap.h
index 36d99930d8..78f2b3ff79 100644
--- a/pack-bitmap.h
+++ b/pack-bitmap.h
@@ -5,6 +5,7 @@
#include "khash.h"
#include "pack.h"
#include "pack-objects.h"
+#include "string-list.h"
struct commit;
struct repository;
@@ -49,6 +50,7 @@ void traverse_bitmap_commit_list(struct bitmap_index *,
struct rev_info *revs,
show_reachable_fn show_reachable);
void test_bitmap_walk(struct rev_info *revs);
+int test_bitmap_commits(struct repository *r);
struct bitmap_index *prepare_bitmap_walk(struct rev_info *revs,
struct list_objects_filter_options *filter);
int reuse_partial_packfile_from_bitmap(struct bitmap_index *,
@@ -90,4 +92,6 @@ void bitmap_writer_finish(struct pack_idx_entry **index,
const char *filename,
uint16_t options);
+const struct string_list *bitmap_preferred_tips(struct repository *r);
+
#endif
diff --git a/pack-revindex.c b/pack-revindex.c
index 4262530449..0e4a31d9db 100644
--- a/pack-revindex.c
+++ b/pack-revindex.c
@@ -3,6 +3,7 @@
#include "object-store.h"
#include "packfile.h"
#include "config.h"
+#include "midx.h"
struct revindex_entry {
off_t offset;
@@ -293,6 +294,43 @@ int load_pack_revindex(struct packed_git *p)
return -1;
}
+int load_midx_revindex(struct multi_pack_index *m)
+{
+ char *revindex_name;
+ int ret;
+ if (m->revindex_data)
+ return 0;
+
+ revindex_name = get_midx_rev_filename(m);
+
+ ret = load_revindex_from_disk(revindex_name,
+ m->num_objects,
+ &m->revindex_map,
+ &m->revindex_len);
+ if (ret)
+ goto cleanup;
+
+ m->revindex_data = (const uint32_t *)((const char *)m->revindex_map + RIDX_HEADER_SIZE);
+
+cleanup:
+ free(revindex_name);
+ return ret;
+}
+
+int close_midx_revindex(struct multi_pack_index *m)
+{
+ if (!m || !m->revindex_map)
+ return 0;
+
+ munmap((void*)m->revindex_map, m->revindex_len);
+
+ m->revindex_map = NULL;
+ m->revindex_data = NULL;
+ m->revindex_len = 0;
+
+ return 0;
+}
+
int offset_to_pack_pos(struct packed_git *p, off_t ofs, uint32_t *pos)
{
unsigned lo, hi;
@@ -347,3 +385,91 @@ off_t pack_pos_to_offset(struct packed_git *p, uint32_t pos)
else
return nth_packed_object_offset(p, pack_pos_to_index(p, pos));
}
+
+uint32_t pack_pos_to_midx(struct multi_pack_index *m, uint32_t pos)
+{
+ if (!m->revindex_data)
+ BUG("pack_pos_to_midx: reverse index not yet loaded");
+ if (m->num_objects <= pos)
+ BUG("pack_pos_to_midx: out-of-bounds object at %"PRIu32, pos);
+ return get_be32(m->revindex_data + pos);
+}
+
+struct midx_pack_key {
+ uint32_t pack;
+ off_t offset;
+
+ uint32_t preferred_pack;
+ struct multi_pack_index *midx;
+};
+
+static int midx_pack_order_cmp(const void *va, const void *vb)
+{
+ const struct midx_pack_key *key = va;
+ struct multi_pack_index *midx = key->midx;
+
+ uint32_t versus = pack_pos_to_midx(midx, (uint32_t*)vb - (const uint32_t *)midx->revindex_data);
+ uint32_t versus_pack = nth_midxed_pack_int_id(midx, versus);
+ off_t versus_offset;
+
+ uint32_t key_preferred = key->pack == key->preferred_pack;
+ uint32_t versus_preferred = versus_pack == key->preferred_pack;
+
+ /*
+ * First, compare the preferred-ness, noting that the preferred pack
+ * comes first.
+ */
+ if (key_preferred && !versus_preferred)
+ return -1;
+ else if (!key_preferred && versus_preferred)
+ return 1;
+
+ /* Then, break ties first by comparing the pack IDs. */
+ if (key->pack < versus_pack)
+ return -1;
+ else if (key->pack > versus_pack)
+ return 1;
+
+ /* Finally, break ties by comparing offsets within a pack. */
+ versus_offset = nth_midxed_offset(midx, versus);
+ if (key->offset < versus_offset)
+ return -1;
+ else if (key->offset > versus_offset)
+ return 1;
+
+ return 0;
+}
+
+int midx_to_pack_pos(struct multi_pack_index *m, uint32_t at, uint32_t *pos)
+{
+ struct midx_pack_key key;
+ uint32_t *found;
+
+ if (!m->revindex_data)
+ BUG("midx_to_pack_pos: reverse index not yet loaded");
+ if (m->num_objects <= at)
+ BUG("midx_to_pack_pos: out-of-bounds object at %"PRIu32, at);
+
+ key.pack = nth_midxed_pack_int_id(m, at);
+ key.offset = nth_midxed_offset(m, at);
+ key.midx = m;
+ /*
+ * The preferred pack sorts first, so determine its identifier by
+ * looking at the first object in pseudo-pack order.
+ *
+ * Note that if no --preferred-pack is explicitly given when writing a
+ * multi-pack index, then whichever pack has the lowest identifier
+ * implicitly is preferred (and includes all its objects, since ties are
+ * broken first by pack identifier).
+ */
+ key.preferred_pack = nth_midxed_pack_int_id(m, pack_pos_to_midx(m, 0));
+
+ found = bsearch(&key, m->revindex_data, m->num_objects,
+ sizeof(*m->revindex_data), midx_pack_order_cmp);
+
+ if (!found)
+ return error("bad offset for revindex");
+
+ *pos = found - m->revindex_data;
+ return 0;
+}
diff --git a/pack-revindex.h b/pack-revindex.h
index ba7c82c125..479b8f2f9c 100644
--- a/pack-revindex.h
+++ b/pack-revindex.h
@@ -14,6 +14,20 @@
*
* - offset: the byte offset within the .pack file at which the object contents
* can be found
+ *
+ * The revindex can also be used with a multi-pack index (MIDX). In this
+ * setting:
+ *
+ * - index position refers to an object's numeric position within the MIDX
+ *
+ * - pack position refers to an object's position within a non-existent pack
+ * described by the MIDX. The pack structure is described in
+ * Documentation/technical/pack-format.txt.
+ *
+ * It is effectively a concatanation of all packs in the MIDX (ordered by
+ * their numeric ID within the MIDX) in their original order within each
+ * pack), removing duplicates, and placing the preferred pack (if any)
+ * first.
*/
@@ -24,6 +38,7 @@
#define GIT_TEST_REV_INDEX_DIE_IN_MEMORY "GIT_TEST_REV_INDEX_DIE_IN_MEMORY"
struct packed_git;
+struct multi_pack_index;
/*
* load_pack_revindex populates the revindex's internal data-structures for the
@@ -35,6 +50,22 @@ struct packed_git;
int load_pack_revindex(struct packed_git *p);
/*
+ * load_midx_revindex loads the '.rev' file corresponding to the given
+ * multi-pack index by mmap-ing it and assigning pointers in the
+ * multi_pack_index to point at it.
+ *
+ * A negative number is returned on error.
+ */
+int load_midx_revindex(struct multi_pack_index *m);
+
+/*
+ * Frees resources associated with a multi-pack reverse index.
+ *
+ * A negative number is returned on error.
+ */
+int close_midx_revindex(struct multi_pack_index *m);
+
+/*
* offset_to_pack_pos converts an object offset to a pack position. This
* function returns zero on success, and a negative number otherwise. The
* parameter 'pos' is usable only on success.
@@ -71,4 +102,26 @@ uint32_t pack_pos_to_index(struct packed_git *p, uint32_t pos);
*/
off_t pack_pos_to_offset(struct packed_git *p, uint32_t pos);
+/*
+ * pack_pos_to_midx converts the object at position "pos" within the MIDX
+ * pseudo-pack into a MIDX position.
+ *
+ * If the reverse index has not yet been loaded, or the position is out of
+ * bounds, this function aborts.
+ *
+ * This function runs in time O(log N) with the number of objects in the MIDX.
+ */
+uint32_t pack_pos_to_midx(struct multi_pack_index *m, uint32_t pos);
+
+/*
+ * midx_to_pack_pos converts from the MIDX-relative position at "at" to the
+ * corresponding pack position.
+ *
+ * If the reverse index has not yet been loaded, or the position is out of
+ * bounds, this function aborts.
+ *
+ * This function runs in constant time.
+ */
+int midx_to_pack_pos(struct multi_pack_index *midx, uint32_t at, uint32_t *pos);
+
#endif
diff --git a/pack-write.c b/pack-write.c
index 2ca85a9d16..f1fc3ecafa 100644
--- a/pack-write.c
+++ b/pack-write.c
@@ -201,21 +201,12 @@ static void write_rev_header(struct hashfile *f)
}
static void write_rev_index_positions(struct hashfile *f,
- struct pack_idx_entry **objects,
+ uint32_t *pack_order,
uint32_t nr_objects)
{
- uint32_t *pack_order;
uint32_t i;
-
- ALLOC_ARRAY(pack_order, nr_objects);
- for (i = 0; i < nr_objects; i++)
- pack_order[i] = i;
- QSORT_S(pack_order, nr_objects, pack_order_cmp, objects);
-
for (i = 0; i < nr_objects; i++)
hashwrite_be32(f, pack_order[i]);
-
- free(pack_order);
}
static void write_rev_trailer(struct hashfile *f, const unsigned char *hash)
@@ -229,6 +220,29 @@ const char *write_rev_file(const char *rev_name,
const unsigned char *hash,
unsigned flags)
{
+ uint32_t *pack_order;
+ uint32_t i;
+ const char *ret;
+
+ ALLOC_ARRAY(pack_order, nr_objects);
+ for (i = 0; i < nr_objects; i++)
+ pack_order[i] = i;
+ QSORT_S(pack_order, nr_objects, pack_order_cmp, objects);
+
+ ret = write_rev_file_order(rev_name, pack_order, nr_objects, hash,
+ flags);
+
+ free(pack_order);
+
+ return ret;
+}
+
+const char *write_rev_file_order(const char *rev_name,
+ uint32_t *pack_order,
+ uint32_t nr_objects,
+ const unsigned char *hash,
+ unsigned flags)
+{
struct hashfile *f;
int fd;
@@ -262,7 +276,7 @@ const char *write_rev_file(const char *rev_name,
write_rev_header(f);
- write_rev_index_positions(f, objects, nr_objects);
+ write_rev_index_positions(f, pack_order, nr_objects);
write_rev_trailer(f, hash);
if (rev_name && adjust_shared_perm(rev_name) < 0)
diff --git a/pack.h b/pack.h
index 857cbd5bd4..fa13954526 100644
--- a/pack.h
+++ b/pack.h
@@ -94,6 +94,7 @@ struct ref;
void write_promisor_file(const char *promisor_name, struct ref **sought, int nr_sought);
const char *write_rev_file(const char *rev_name, struct pack_idx_entry **objects, uint32_t nr_objects, const unsigned char *hash, unsigned flags);
+const char *write_rev_file_order(const char *rev_name, uint32_t *pack_order, uint32_t nr_objects, const unsigned char *hash, unsigned flags);
/*
* The "hdr" output buffer should be at least this big, which will handle sizes
diff --git a/packfile.c b/packfile.c
index 6661f3325a..8668345d93 100644
--- a/packfile.c
+++ b/packfile.c
@@ -862,6 +862,9 @@ static void prepare_pack(const char *full_name, size_t full_name_len,
if (!strcmp(file_name, "multi-pack-index"))
return;
+ if (starts_with(file_name, "multi-pack-index") &&
+ ends_with(file_name, ".rev"))
+ return;
if (ends_with(file_name, ".idx") ||
ends_with(file_name, ".rev") ||
ends_with(file_name, ".pack") ||
diff --git a/parse-options.c b/parse-options.c
index fbea16eaf5..e6f56768ca 100644
--- a/parse-options.c
+++ b/parse-options.c
@@ -625,6 +625,8 @@ static int show_gitcomp(const struct option *opts, int show_all)
*
* Right now this is only used to preprocess and substitute
* OPTION_ALIAS.
+ *
+ * The returned options should be freed using free_preprocessed_options.
*/
static struct option *preprocess_options(struct parse_opt_ctx_t *ctx,
const struct option *options)
@@ -678,6 +680,7 @@ static struct option *preprocess_options(struct parse_opt_ctx_t *ctx,
newopt[i].short_name = short_name;
newopt[i].long_name = long_name;
newopt[i].help = strbuf_detach(&help, NULL);
+ newopt[i].flags |= PARSE_OPT_FROM_ALIAS;
break;
}
@@ -693,6 +696,20 @@ static struct option *preprocess_options(struct parse_opt_ctx_t *ctx,
return newopt;
}
+static void free_preprocessed_options(struct option *options)
+{
+ int i;
+
+ if (!options)
+ return;
+
+ for (i = 0; options[i].type != OPTION_END; i++) {
+ if (options[i].flags & PARSE_OPT_FROM_ALIAS)
+ free((void *)options[i].help);
+ }
+ free(options);
+}
+
static int usage_with_options_internal(struct parse_opt_ctx_t *,
const char * const *,
const struct option *, int, int);
@@ -870,7 +887,7 @@ int parse_options(int argc, const char **argv, const char *prefix,
}
precompose_argv_prefix(argc, argv, NULL);
- free(real_options);
+ free_preprocessed_options(real_options);
free(ctx.alias_groups);
return parse_options_end(&ctx);
}
diff --git a/parse-options.h b/parse-options.h
index ff6506a504..a845a9d952 100644
--- a/parse-options.h
+++ b/parse-options.h
@@ -28,26 +28,27 @@ enum parse_opt_type {
};
enum parse_opt_flags {
- PARSE_OPT_KEEP_DASHDASH = 1,
- PARSE_OPT_STOP_AT_NON_OPTION = 2,
- PARSE_OPT_KEEP_ARGV0 = 4,
- PARSE_OPT_KEEP_UNKNOWN = 8,
- PARSE_OPT_NO_INTERNAL_HELP = 16,
- PARSE_OPT_ONE_SHOT = 32
+ PARSE_OPT_KEEP_DASHDASH = 1 << 0,
+ PARSE_OPT_STOP_AT_NON_OPTION = 1 << 1,
+ PARSE_OPT_KEEP_ARGV0 = 1 << 2,
+ PARSE_OPT_KEEP_UNKNOWN = 1 << 3,
+ PARSE_OPT_NO_INTERNAL_HELP = 1 << 4,
+ PARSE_OPT_ONE_SHOT = 1 << 5,
};
enum parse_opt_option_flags {
- PARSE_OPT_OPTARG = 1,
- PARSE_OPT_NOARG = 2,
- PARSE_OPT_NONEG = 4,
- PARSE_OPT_HIDDEN = 8,
- PARSE_OPT_LASTARG_DEFAULT = 16,
- PARSE_OPT_NODASH = 32,
- PARSE_OPT_LITERAL_ARGHELP = 64,
- PARSE_OPT_SHELL_EVAL = 256,
- PARSE_OPT_NOCOMPLETE = 512,
- PARSE_OPT_COMP_ARG = 1024,
- PARSE_OPT_CMDMODE = 2048
+ PARSE_OPT_OPTARG = 1 << 0,
+ PARSE_OPT_NOARG = 1 << 1,
+ PARSE_OPT_NONEG = 1 << 2,
+ PARSE_OPT_HIDDEN = 1 << 3,
+ PARSE_OPT_LASTARG_DEFAULT = 1 << 4,
+ PARSE_OPT_NODASH = 1 << 5,
+ PARSE_OPT_LITERAL_ARGHELP = 1 << 6,
+ PARSE_OPT_FROM_ALIAS = 1 << 7,
+ PARSE_OPT_SHELL_EVAL = 1 << 8,
+ PARSE_OPT_NOCOMPLETE = 1 << 9,
+ PARSE_OPT_COMP_ARG = 1 << 10,
+ PARSE_OPT_CMDMODE = 1 << 11,
};
enum parse_opt_result {
diff --git a/path.c b/path.c
index 7b385e5eb2..9e883eb524 100644
--- a/path.c
+++ b/path.c
@@ -1534,5 +1534,6 @@ REPO_GIT_PATH_FUNC(merge_rr, "MERGE_RR")
REPO_GIT_PATH_FUNC(merge_mode, "MERGE_MODE")
REPO_GIT_PATH_FUNC(merge_head, "MERGE_HEAD")
REPO_GIT_PATH_FUNC(merge_autostash, "MERGE_AUTOSTASH")
+REPO_GIT_PATH_FUNC(auto_merge, "AUTO_MERGE")
REPO_GIT_PATH_FUNC(fetch_head, "FETCH_HEAD")
REPO_GIT_PATH_FUNC(shallow, "shallow")
diff --git a/path.h b/path.h
index e7e77da6aa..251c78d980 100644
--- a/path.h
+++ b/path.h
@@ -176,6 +176,7 @@ struct path_cache {
const char *merge_mode;
const char *merge_head;
const char *merge_autostash;
+ const char *auto_merge;
const char *fetch_head;
const char *shallow;
};
@@ -191,6 +192,7 @@ const char *git_path_merge_rr(struct repository *r);
const char *git_path_merge_mode(struct repository *r);
const char *git_path_merge_head(struct repository *r);
const char *git_path_merge_autostash(struct repository *r);
+const char *git_path_auto_merge(struct repository *r);
const char *git_path_fetch_head(struct repository *r);
const char *git_path_shallow(struct repository *r);
diff --git a/perl/Git.pm b/perl/Git.pm
index 02eacef0c2..73ebbf80cc 100644
--- a/perl/Git.pm
+++ b/perl/Git.pm
@@ -619,6 +619,19 @@ Return path to the git repository. Must be called on a repository instance.
sub repo_path { $_[0]->{opts}->{Repository} }
+=item hooks_path ()
+
+Return path to the hooks directory. Must be called on a repository instance.
+
+=cut
+
+sub hooks_path {
+ my ($self) = @_;
+
+ my $dir = $self->command_oneline('rev-parse', '--git-path', 'hooks');
+ my $abs = abs_path($dir);
+ return $abs;
+}
=item wc_path ()
diff --git a/pkt-line.c b/pkt-line.c
index d633005ef7..0194137528 100644
--- a/pkt-line.c
+++ b/pkt-line.c
@@ -196,17 +196,26 @@ int packet_write_fmt_gently(int fd, const char *fmt, ...)
static int packet_write_gently(const int fd_out, const char *buf, size_t size)
{
- static char packet_write_buffer[LARGE_PACKET_MAX];
+ char header[4];
size_t packet_size;
- if (size > sizeof(packet_write_buffer) - 4)
+ if (size > LARGE_PACKET_DATA_MAX)
return error(_("packet write failed - data exceeds max packet size"));
packet_trace(buf, size, 1);
packet_size = size + 4;
- set_packet_header(packet_write_buffer, packet_size);
- memcpy(packet_write_buffer + 4, buf, size);
- if (write_in_full(fd_out, packet_write_buffer, packet_size) < 0)
+
+ set_packet_header(header, packet_size);
+
+ /*
+ * Write the header and the buffer in 2 parts so that we do
+ * not need to allocate a buffer or rely on a static buffer.
+ * This also avoids putting a large buffer on the stack which
+ * might have multi-threading issues.
+ */
+
+ if (write_in_full(fd_out, header, 4) < 0 ||
+ write_in_full(fd_out, buf, size) < 0)
return error(_("packet write failed"));
return 0;
}
@@ -242,26 +251,27 @@ void packet_buf_write_len(struct strbuf *buf, const char *data, size_t len)
packet_trace(data, len, 1);
}
-int write_packetized_from_fd(int fd_in, int fd_out)
+int write_packetized_from_fd_no_flush(int fd_in, int fd_out)
{
- static char buf[LARGE_PACKET_DATA_MAX];
+ char *buf = xmalloc(LARGE_PACKET_DATA_MAX);
int err = 0;
ssize_t bytes_to_write;
while (!err) {
- bytes_to_write = xread(fd_in, buf, sizeof(buf));
- if (bytes_to_write < 0)
+ bytes_to_write = xread(fd_in, buf, LARGE_PACKET_DATA_MAX);
+ if (bytes_to_write < 0) {
+ free(buf);
return COPY_READ_ERROR;
+ }
if (bytes_to_write == 0)
break;
err = packet_write_gently(fd_out, buf, bytes_to_write);
}
- if (!err)
- err = packet_flush_gently(fd_out);
+ free(buf);
return err;
}
-int write_packetized_from_buf(const char *src_in, size_t len, int fd_out)
+int write_packetized_from_buf_no_flush(const char *src_in, size_t len, int fd_out)
{
int err = 0;
size_t bytes_written = 0;
@@ -277,8 +287,6 @@ int write_packetized_from_buf(const char *src_in, size_t len, int fd_out)
err = packet_write_gently(fd_out, src_in + bytes_written, bytes_to_write);
bytes_written += bytes_to_write;
}
- if (!err)
- err = packet_flush_gently(fd_out);
return err;
}
@@ -298,8 +306,11 @@ static int get_packet_data(int fd, char **src_buf, size_t *src_size,
*src_size -= ret;
} else {
ret = read_in_full(fd, dst, size);
- if (ret < 0)
+ if (ret < 0) {
+ if (options & PACKET_READ_GENTLE_ON_READ_ERROR)
+ return error_errno(_("read error"));
die_errno(_("read error"));
+ }
}
/* And complain if we didn't get enough bytes to satisfy the read. */
@@ -307,6 +318,8 @@ static int get_packet_data(int fd, char **src_buf, size_t *src_size,
if (options & PACKET_READ_GENTLE_ON_EOF)
return -1;
+ if (options & PACKET_READ_GENTLE_ON_READ_ERROR)
+ return error(_("the remote end hung up unexpectedly"));
die(_("the remote end hung up unexpectedly"));
}
@@ -335,6 +348,9 @@ enum packet_read_status packet_read_with_status(int fd, char **src_buffer,
len = packet_length(linelen);
if (len < 0) {
+ if (options & PACKET_READ_GENTLE_ON_READ_ERROR)
+ return error(_("protocol error: bad line length "
+ "character: %.4s"), linelen);
die(_("protocol error: bad line length character: %.4s"), linelen);
} else if (!len) {
packet_trace("0000", 4, 0);
@@ -349,12 +365,19 @@ enum packet_read_status packet_read_with_status(int fd, char **src_buffer,
*pktlen = 0;
return PACKET_READ_RESPONSE_END;
} else if (len < 4) {
+ if (options & PACKET_READ_GENTLE_ON_READ_ERROR)
+ return error(_("protocol error: bad line length %d"),
+ len);
die(_("protocol error: bad line length %d"), len);
}
len -= 4;
- if ((unsigned)len >= size)
+ if ((unsigned)len >= size) {
+ if (options & PACKET_READ_GENTLE_ON_READ_ERROR)
+ return error(_("protocol error: bad line length %d"),
+ len);
die(_("protocol error: bad line length %d"), len);
+ }
if (get_packet_data(fd, src_buffer, src_len, buffer, len, options) < 0) {
*pktlen = -1;
@@ -421,7 +444,7 @@ char *packet_read_line_buf(char **src, size_t *src_len, int *dst_len)
return packet_read_line_generic(-1, src, src_len, dst_len);
}
-ssize_t read_packetized_to_strbuf(int fd_in, struct strbuf *sb_out)
+ssize_t read_packetized_to_strbuf(int fd_in, struct strbuf *sb_out, int options)
{
int packet_len;
@@ -437,7 +460,7 @@ ssize_t read_packetized_to_strbuf(int fd_in, struct strbuf *sb_out)
* that there is already room for the extra byte.
*/
sb_out->buf + sb_out->len, LARGE_PACKET_DATA_MAX+1,
- PACKET_READ_GENTLE_ON_EOF);
+ options);
if (packet_len <= 0)
break;
sb_out->len += packet_len;
diff --git a/pkt-line.h b/pkt-line.h
index 8c90daa59e..5af5f45687 100644
--- a/pkt-line.h
+++ b/pkt-line.h
@@ -32,8 +32,8 @@ void packet_buf_write(struct strbuf *buf, const char *fmt, ...) __attribute__((f
void packet_buf_write_len(struct strbuf *buf, const char *data, size_t len);
int packet_flush_gently(int fd);
int packet_write_fmt_gently(int fd, const char *fmt, ...) __attribute__((format (printf, 2, 3)));
-int write_packetized_from_fd(int fd_in, int fd_out);
-int write_packetized_from_buf(const char *src_in, size_t len, int fd_out);
+int write_packetized_from_fd_no_flush(int fd_in, int fd_out);
+int write_packetized_from_buf_no_flush(const char *src_in, size_t len, int fd_out);
/*
* Read a packetized line into the buffer, which must be at least size bytes
@@ -68,10 +68,15 @@ int write_packetized_from_buf(const char *src_in, size_t len, int fd_out);
*
* If options contains PACKET_READ_DIE_ON_ERR_PACKET, it dies when it sees an
* ERR packet.
+ *
+ * If options contains PACKET_READ_GENTLE_ON_READ_ERROR, we will not die
+ * on read errors, but instead return -1. However, we may still die on an
+ * ERR packet (if requested).
*/
-#define PACKET_READ_GENTLE_ON_EOF (1u<<0)
-#define PACKET_READ_CHOMP_NEWLINE (1u<<1)
-#define PACKET_READ_DIE_ON_ERR_PACKET (1u<<2)
+#define PACKET_READ_GENTLE_ON_EOF (1u<<0)
+#define PACKET_READ_CHOMP_NEWLINE (1u<<1)
+#define PACKET_READ_DIE_ON_ERR_PACKET (1u<<2)
+#define PACKET_READ_GENTLE_ON_READ_ERROR (1u<<3)
int packet_read(int fd, char **src_buffer, size_t *src_len, char
*buffer, unsigned size, int options);
@@ -131,7 +136,7 @@ char *packet_read_line_buf(char **src_buf, size_t *src_len, int *size);
/*
* Reads a stream of variable sized packets until a flush packet is detected.
*/
-ssize_t read_packetized_to_strbuf(int fd_in, struct strbuf *sb_out);
+ssize_t read_packetized_to_strbuf(int fd_in, struct strbuf *sb_out, int options);
/*
* Receive multiplexed output stream over git native protocol.
diff --git a/ref-filter.c b/ref-filter.c
index f0bd32f714..a0adb4551d 100644
--- a/ref-filter.c
+++ b/ref-filter.c
@@ -1608,7 +1608,7 @@ static int get_object(struct ref_array_item *ref, int deref, struct object **obj
if (oi->info.contentp) {
*obj = parse_object_buffer(the_repository, &oi->oid, oi->type, oi->size, oi->content, &eaten);
- if (!obj) {
+ if (!*obj) {
if (!eaten)
free(oi->content);
return strbuf_addf_ret(err, -1, _("parse_object_buffer failed on %s for %s"),
diff --git a/revision.h b/revision.h
index a20a530d52..a24f72dcd1 100644
--- a/revision.h
+++ b/revision.h
@@ -236,7 +236,7 @@ struct rev_info {
const char *mime_boundary;
const char *patch_suffix;
int numbered_files;
- int reroll_count;
+ const char *reroll_count;
char *message_id;
struct ident_split from_ident;
struct string_list *ref_message_ids;
diff --git a/sequencer.c b/sequencer.c
index b4c63ae207..c29a36824c 100644
--- a/sequencer.c
+++ b/sequencer.c
@@ -2032,13 +2032,24 @@ static void record_in_rewritten(struct object_id *oid,
flush_rewritten_pending();
}
+static int should_edit(struct replay_opts *opts) {
+ if (opts->edit < 0)
+ /*
+ * Note that we only handle the case of non-conflicted
+ * commits; continue_single_pick() handles the conflicted
+ * commits itself instead of calling this function.
+ */
+ return (opts->action == REPLAY_REVERT && isatty(0)) ? 1 : 0;
+ return opts->edit;
+}
+
static int do_pick_commit(struct repository *r,
struct todo_item *item,
struct replay_opts *opts,
int final_fixup, int *check_todo)
{
- unsigned int flags = opts->edit ? EDIT_MSG : 0;
- const char *msg_file = opts->edit ? NULL : git_path_merge_msg(r);
+ unsigned int flags = should_edit(opts) ? EDIT_MSG : 0;
+ const char *msg_file = should_edit(opts) ? NULL : git_path_merge_msg(r);
struct object_id head;
struct commit *base, *next, *parent;
const char *base_label, *next_label;
@@ -2270,6 +2281,7 @@ static int do_pick_commit(struct repository *r,
refs_delete_ref(get_main_ref_store(r), "", "CHERRY_PICK_HEAD",
NULL, 0);
unlink(git_path_merge_msg(r));
+ unlink(git_path_auto_merge(r));
fprintf(stderr,
_("dropping %s %s -- patch contents already upstream\n"),
oid_to_hex(&commit->object.oid), msg.subject);
@@ -2633,6 +2645,8 @@ void sequencer_post_commit_cleanup(struct repository *r, int verbose)
need_cleanup = 1;
}
+ unlink(git_path_auto_merge(r));
+
if (!need_cleanup)
return;
@@ -3283,9 +3297,9 @@ static int save_opts(struct replay_opts *opts)
if (opts->no_commit)
res |= git_config_set_in_file_gently(opts_file,
"options.no-commit", "true");
- if (opts->edit)
- res |= git_config_set_in_file_gently(opts_file,
- "options.edit", "true");
+ if (opts->edit >= 0)
+ res |= git_config_set_in_file_gently(opts_file, "options.edit",
+ opts->edit ? "true" : "false");
if (opts->allow_empty)
res |= git_config_set_in_file_gently(opts_file,
"options.allow-empty", "true");
@@ -4259,7 +4273,7 @@ static int pick_commits(struct repository *r,
prev_reflog_action = xstrdup(getenv(GIT_REFLOG_ACTION));
if (opts->allow_ff)
assert(!(opts->signoff || opts->no_commit ||
- opts->record_origin || opts->edit ||
+ opts->record_origin || should_edit(opts) ||
opts->committer_date_is_author_date ||
opts->ignore_date));
if (read_and_refresh_cache(r, opts))
@@ -4293,6 +4307,7 @@ static int pick_commits(struct repository *r,
unlink(rebase_path_stopped_sha());
unlink(rebase_path_amend());
unlink(git_path_merge_head(r));
+ unlink(git_path_auto_merge(r));
delete_ref(NULL, "REBASE_HEAD", NULL, REF_NO_DEREF);
if (item->command == TODO_BREAK) {
@@ -4552,14 +4567,33 @@ cleanup_head_ref:
return sequencer_remove_state(opts);
}
-static int continue_single_pick(struct repository *r)
+static int continue_single_pick(struct repository *r, struct replay_opts *opts)
{
- const char *argv[] = { "commit", NULL };
+ struct strvec argv = STRVEC_INIT;
+ int ret;
if (!refs_ref_exists(get_main_ref_store(r), "CHERRY_PICK_HEAD") &&
!refs_ref_exists(get_main_ref_store(r), "REVERT_HEAD"))
return error(_("no cherry-pick or revert in progress"));
- return run_command_v_opt(argv, RUN_GIT_CMD);
+
+ strvec_push(&argv, "commit");
+
+ /*
+ * continue_single_pick() handles the case of recovering from a
+ * conflict. should_edit() doesn't handle that case; for a conflict,
+ * we want to edit if the user asked for it, or if they didn't specify
+ * and stdin is a tty.
+ */
+ if (!opts->edit || (opts->edit < 0 && !isatty(0)))
+ /*
+ * Include --cleanup=strip as well because we don't want the
+ * "# Conflicts:" messages.
+ */
+ strvec_pushl(&argv, "--no-edit", "--cleanup=strip", NULL);
+
+ ret = run_command_v_opt(argv.v, RUN_GIT_CMD);
+ strvec_clear(&argv);
+ return ret;
}
static int commit_staged_changes(struct repository *r,
@@ -4687,6 +4721,7 @@ static int commit_staged_changes(struct repository *r,
return error(_("could not commit staged changes."));
unlink(rebase_path_amend());
unlink(git_path_merge_head(r));
+ unlink(git_path_auto_merge(r));
if (final_fixup) {
unlink(rebase_path_fixup_msg());
unlink(rebase_path_squash_msg());
@@ -4729,7 +4764,7 @@ int sequencer_continue(struct repository *r, struct replay_opts *opts)
goto release_todo_list;
}
} else if (!file_exists(get_todo_path(opts)))
- return continue_single_pick(r);
+ return continue_single_pick(r, opts);
else if ((res = read_populate_todo(r, &todo_list, opts)))
goto release_todo_list;
@@ -4738,7 +4773,7 @@ int sequencer_continue(struct repository *r, struct replay_opts *opts)
if (refs_ref_exists(get_main_ref_store(r),
"CHERRY_PICK_HEAD") ||
refs_ref_exists(get_main_ref_store(r), "REVERT_HEAD")) {
- res = continue_single_pick(r);
+ res = continue_single_pick(r, opts);
if (res)
goto release_todo_list;
}
diff --git a/sequencer.h b/sequencer.h
index f8b2e4ab85..d57d8ea23d 100644
--- a/sequencer.h
+++ b/sequencer.h
@@ -31,8 +31,10 @@ enum commit_msg_cleanup_mode {
struct replay_opts {
enum replay_action action;
- /* Boolean options */
+ /* Tri-state options: unspecified, false, or true */
int edit;
+
+ /* Boolean options */
int record_origin;
int no_commit;
int signoff;
@@ -71,7 +73,7 @@ struct replay_opts {
/* Only used by REPLAY_NONE */
struct rev_info *revs;
};
-#define REPLAY_OPTS_INIT { .action = -1, .current_fixups = STRBUF_INIT }
+#define REPLAY_OPTS_INIT { .edit = -1, .action = -1, .current_fixups = STRBUF_INIT }
/*
* Note that ordering matters in this enum. Not only must it match the mapping
diff --git a/setup.c b/setup.c
index c04cd25a30..59e2facd9d 100644
--- a/setup.c
+++ b/setup.c
@@ -1274,18 +1274,10 @@ const char *setup_git_directory_gently(int *nongit_ok)
* the GIT_PREFIX environment variable must always match. For details
* see Documentation/config/alias.txt.
*/
- if (nongit_ok && *nongit_ok) {
+ if (nongit_ok && *nongit_ok)
startup_info->have_repository = 0;
- startup_info->prefix = NULL;
- setenv(GIT_PREFIX_ENVIRONMENT, "", 1);
- } else {
+ else
startup_info->have_repository = 1;
- startup_info->prefix = prefix;
- if (prefix)
- setenv(GIT_PREFIX_ENVIRONMENT, prefix, 1);
- else
- setenv(GIT_PREFIX_ENVIRONMENT, "", 1);
- }
/*
* Not all paths through the setup code will call 'set_git_dir()' (which
@@ -1311,6 +1303,22 @@ const char *setup_git_directory_gently(int *nongit_ok)
if (startup_info->have_repository)
repo_set_hash_algo(the_repository, repo_fmt.hash_algo);
}
+ /*
+ * Since precompose_string_if_needed() needs to look at
+ * the core.precomposeunicode configuration, this
+ * has to happen after the above block that finds
+ * out where the repository is, i.e. a preparation
+ * for calling git_config_get_bool().
+ */
+ if (prefix) {
+ prefix = precompose_string_if_needed(prefix);
+ startup_info->prefix = prefix;
+ setenv(GIT_PREFIX_ENVIRONMENT, prefix, 1);
+ } else {
+ startup_info->prefix = NULL;
+ setenv(GIT_PREFIX_ENVIRONMENT, "", 1);
+ }
+
strbuf_release(&dir);
strbuf_release(&gitdir);
diff --git a/simple-ipc.h b/simple-ipc.h
new file mode 100644
index 0000000000..dc3606e30b
--- /dev/null
+++ b/simple-ipc.h
@@ -0,0 +1,239 @@
+#ifndef GIT_SIMPLE_IPC_H
+#define GIT_SIMPLE_IPC_H
+
+/*
+ * See Documentation/technical/api-simple-ipc.txt
+ */
+
+#if defined(GIT_WINDOWS_NATIVE) || !defined(NO_UNIX_SOCKETS)
+#define SUPPORTS_SIMPLE_IPC
+#endif
+
+#ifdef SUPPORTS_SIMPLE_IPC
+#include "pkt-line.h"
+
+/*
+ * Simple IPC Client Side API.
+ */
+
+enum ipc_active_state {
+ /*
+ * The pipe/socket exists and the daemon is waiting for connections.
+ */
+ IPC_STATE__LISTENING = 0,
+
+ /*
+ * The pipe/socket exists, but the daemon is not listening.
+ * Perhaps it is very busy.
+ * Perhaps the daemon died without deleting the path.
+ * Perhaps it is shutting down and draining existing clients.
+ * Perhaps it is dead, but other clients are lingering and
+ * still holding a reference to the pathname.
+ */
+ IPC_STATE__NOT_LISTENING,
+
+ /*
+ * The requested pathname is bogus and no amount of retries
+ * will fix that.
+ */
+ IPC_STATE__INVALID_PATH,
+
+ /*
+ * The requested pathname is not found. This usually means
+ * that there is no daemon present.
+ */
+ IPC_STATE__PATH_NOT_FOUND,
+
+ IPC_STATE__OTHER_ERROR,
+};
+
+struct ipc_client_connect_options {
+ /*
+ * Spin under timeout if the server is running but can't
+ * accept our connection yet. This should always be set
+ * unless you just want to poke the server and see if it
+ * is alive.
+ */
+ unsigned int wait_if_busy:1;
+
+ /*
+ * Spin under timeout if the pipe/socket is not yet present
+ * on the file system. This is useful if we just started
+ * the service and need to wait for it to become ready.
+ */
+ unsigned int wait_if_not_found:1;
+
+ /*
+ * Disallow chdir() when creating a Unix domain socket.
+ */
+ unsigned int uds_disallow_chdir:1;
+};
+
+#define IPC_CLIENT_CONNECT_OPTIONS_INIT { \
+ .wait_if_busy = 0, \
+ .wait_if_not_found = 0, \
+ .uds_disallow_chdir = 0, \
+}
+
+/*
+ * Determine if a server is listening on this named pipe or socket using
+ * platform-specific logic. This might just probe the filesystem or it
+ * might make a trivial connection to the server using this pathname.
+ */
+enum ipc_active_state ipc_get_active_state(const char *path);
+
+struct ipc_client_connection {
+ int fd;
+};
+
+/*
+ * Try to connect to the daemon on the named pipe or socket.
+ *
+ * Returns IPC_STATE__LISTENING and a connection handle.
+ *
+ * Otherwise, returns info to help decide whether to retry or to
+ * spawn/respawn the server.
+ */
+enum ipc_active_state ipc_client_try_connect(
+ const char *path,
+ const struct ipc_client_connect_options *options,
+ struct ipc_client_connection **p_connection);
+
+void ipc_client_close_connection(struct ipc_client_connection *connection);
+
+/*
+ * Used by the client to synchronously send and receive a message with
+ * the server on the provided client connection.
+ *
+ * Returns 0 when successful.
+ *
+ * Calls error() and returns non-zero otherwise.
+ */
+int ipc_client_send_command_to_connection(
+ struct ipc_client_connection *connection,
+ const char *message, struct strbuf *answer);
+
+/*
+ * Used by the client to synchronously connect and send and receive a
+ * message to the server listening at the given path.
+ *
+ * Returns 0 when successful.
+ *
+ * Calls error() and returns non-zero otherwise.
+ */
+int ipc_client_send_command(const char *path,
+ const struct ipc_client_connect_options *options,
+ const char *message, struct strbuf *answer);
+
+/*
+ * Simple IPC Server Side API.
+ */
+
+struct ipc_server_reply_data;
+
+typedef int (ipc_server_reply_cb)(struct ipc_server_reply_data *,
+ const char *response,
+ size_t response_len);
+
+/*
+ * Prototype for an application-supplied callback to process incoming
+ * client IPC messages and compose a reply. The `application_cb` should
+ * use the provided `reply_cb` and `reply_data` to send an IPC response
+ * back to the client. The `reply_cb` callback can be called multiple
+ * times for chunking purposes. A reply message is optional and may be
+ * omitted if not necessary for the application.
+ *
+ * The return value from the application callback is ignored.
+ * The value `SIMPLE_IPC_QUIT` can be used to shutdown the server.
+ */
+typedef int (ipc_server_application_cb)(void *application_data,
+ const char *request,
+ ipc_server_reply_cb *reply_cb,
+ struct ipc_server_reply_data *reply_data);
+
+#define SIMPLE_IPC_QUIT -2
+
+/*
+ * Opaque instance data to represent an IPC server instance.
+ */
+struct ipc_server_data;
+
+/*
+ * Control parameters for the IPC server instance.
+ * Use this to hide platform-specific settings.
+ */
+struct ipc_server_opts
+{
+ int nr_threads;
+
+ /*
+ * Disallow chdir() when creating a Unix domain socket.
+ */
+ unsigned int uds_disallow_chdir:1;
+};
+
+/*
+ * Start an IPC server instance in one or more background threads
+ * and return a handle to the pool.
+ *
+ * Returns 0 if the asynchronous server pool was started successfully.
+ * Returns -1 if not.
+ * Returns -2 if we could not startup because another server is using
+ * the socket or named pipe.
+ *
+ * When a client IPC message is received, the `application_cb` will be
+ * called (possibly on a random thread) to handle the message and
+ * optionally compose a reply message.
+ */
+int ipc_server_run_async(struct ipc_server_data **returned_server_data,
+ const char *path, const struct ipc_server_opts *opts,
+ ipc_server_application_cb *application_cb,
+ void *application_data);
+
+/*
+ * Gently signal the IPC server pool to shutdown. No new client
+ * connections will be accepted, but existing connections will be
+ * allowed to complete.
+ */
+int ipc_server_stop_async(struct ipc_server_data *server_data);
+
+/*
+ * Block the calling thread until all threads in the IPC server pool
+ * have completed and been joined.
+ */
+int ipc_server_await(struct ipc_server_data *server_data);
+
+/*
+ * Close and free all resource handles associated with the IPC server
+ * pool.
+ */
+void ipc_server_free(struct ipc_server_data *server_data);
+
+/*
+ * Run an IPC server instance and block the calling thread of the
+ * current process. It does not return until the IPC server has
+ * either shutdown or had an unrecoverable error.
+ *
+ * The IPC server handles incoming IPC messages from client processes
+ * and may use one or more background threads as necessary.
+ *
+ * Returns 0 after the server has completed successfully.
+ * Returns -1 if the server cannot be started.
+ * Returns -2 if we could not startup because another server is using
+ * the socket or named pipe.
+ *
+ * When a client IPC message is received, the `application_cb` will be
+ * called (possibly on a random thread) to handle the message and
+ * optionally compose a reply message.
+ *
+ * Note that `ipc_server_run()` is a synchronous wrapper around the
+ * above asynchronous routines. It effectively hides all of the
+ * server state and thread details from the caller and presents a
+ * simple synchronous interface.
+ */
+int ipc_server_run(const char *path, const struct ipc_server_opts *opts,
+ ipc_server_application_cb *application_cb,
+ void *application_data);
+
+#endif /* SUPPORTS_SIMPLE_IPC */
+#endif /* GIT_SIMPLE_IPC_H */
diff --git a/symlinks.c b/symlinks.c
index 7dbb6b23d9..5232d02020 100644
--- a/symlinks.c
+++ b/symlinks.c
@@ -1,6 +1,7 @@
#include "cache.h"
-static int threaded_check_leading_path(struct cache_def *cache, const char *name, int len);
+static int threaded_check_leading_path(struct cache_def *cache, const char *name,
+ int len, int warn_on_lstat_err);
static int threaded_has_dirs_only_path(struct cache_def *cache, const char *name, int len, int prefix_len);
/*
@@ -72,7 +73,7 @@ static int lstat_cache_matchlen(struct cache_def *cache,
int prefix_len_stat_func)
{
int match_len, last_slash, last_slash_dir, previous_slash;
- int save_flags, ret;
+ int save_flags, ret, saved_errno = 0;
struct stat st;
if (cache->track_flags != track_flags ||
@@ -139,6 +140,7 @@ static int lstat_cache_matchlen(struct cache_def *cache,
if (ret) {
*ret_flags = FL_LSTATERR;
+ saved_errno = errno;
if (errno == ENOENT)
*ret_flags |= FL_NOENT;
} else if (S_ISDIR(st.st_mode)) {
@@ -180,6 +182,8 @@ static int lstat_cache_matchlen(struct cache_def *cache,
} else {
reset_lstat_cache(cache);
}
+ if (saved_errno)
+ errno = saved_errno;
return match_len;
}
@@ -202,57 +206,47 @@ int threaded_has_symlink_leading_path(struct cache_def *cache, const char *name,
return lstat_cache(cache, name, len, FL_SYMLINK|FL_DIR, USE_ONLY_LSTAT) & FL_SYMLINK;
}
-/*
- * Return non-zero if path 'name' has a leading symlink component
- */
int has_symlink_leading_path(const char *name, int len)
{
return threaded_has_symlink_leading_path(&default_cache, name, len);
}
-/*
- * Return zero if path 'name' has a leading symlink component or
- * if some leading path component does not exists.
- *
- * Return -1 if leading path exists and is a directory.
- *
- * Return path length if leading path exists and is neither a
- * directory nor a symlink.
- */
-int check_leading_path(const char *name, int len)
+int check_leading_path(const char *name, int len, int warn_on_lstat_err)
{
- return threaded_check_leading_path(&default_cache, name, len);
+ return threaded_check_leading_path(&default_cache, name, len,
+ warn_on_lstat_err);
}
/*
- * Return zero if path 'name' has a leading symlink component or
- * if some leading path component does not exists.
+ * Return zero if some leading path component of 'name' does not exist.
*
* Return -1 if leading path exists and is a directory.
*
- * Return path length if leading path exists and is neither a
- * directory nor a symlink.
+ * Return the length of a leading component if it either exists but it's not a
+ * directory, or if we were unable to lstat() it. If warn_on_lstat_err is true,
+ * also emit a warning for this error.
*/
-static int threaded_check_leading_path(struct cache_def *cache, const char *name, int len)
+static int threaded_check_leading_path(struct cache_def *cache, const char *name,
+ int len, int warn_on_lstat_err)
{
int flags;
int match_len = lstat_cache_matchlen(cache, name, len, &flags,
FL_SYMLINK|FL_NOENT|FL_DIR, USE_ONLY_LSTAT);
+ int saved_errno = errno;
+
if (flags & FL_NOENT)
return 0;
else if (flags & FL_DIR)
return -1;
- else
- return match_len;
+ if (warn_on_lstat_err && (flags & FL_LSTATERR)) {
+ char *path = xmemdupz(name, match_len);
+ errno = saved_errno;
+ warning_errno(_("failed to lstat '%s'"), path);
+ free(path);
+ }
+ return match_len;
}
-/*
- * Return non-zero if all path components of 'name' exists as a
- * directory. If prefix_len > 0, we will test with the stat()
- * function instead of the lstat() function for a prefix length of
- * 'prefix_len', thus we then allow for symlinks in the prefix part as
- * long as those points to real existing directories.
- */
int has_dirs_only_path(const char *name, int len, int prefix_len)
{
return threaded_has_dirs_only_path(&default_cache, name, len, prefix_len);
diff --git a/t/helper/test-bitmap.c b/t/helper/test-bitmap.c
new file mode 100644
index 0000000000..134a1e9d76
--- /dev/null
+++ b/t/helper/test-bitmap.c
@@ -0,0 +1,24 @@
+#include "test-tool.h"
+#include "cache.h"
+#include "pack-bitmap.h"
+
+static int bitmap_list_commits(void)
+{
+ return test_bitmap_commits(the_repository);
+}
+
+int cmd__bitmap(int argc, const char **argv)
+{
+ setup_git_directory();
+
+ if (argc != 2)
+ goto usage;
+
+ if (!strcmp(argv[1], "list-commits"))
+ return bitmap_list_commits();
+
+usage:
+ usage("\ttest-tool bitmap list-commits");
+
+ return -1;
+}
diff --git a/t/helper/test-bloom.c b/t/helper/test-bloom.c
index 2a1ae3dae6..ad3ef1cd77 100644
--- a/t/helper/test-bloom.c
+++ b/t/helper/test-bloom.c
@@ -48,7 +48,7 @@ static void get_bloom_filter_for_commit(const struct object_id *commit_oid)
static const char *bloom_usage = "\n"
" test-tool bloom get_murmur3 <string>\n"
" test-tool bloom generate_filter <string> [<string>...]\n"
-" test-tool get_filter_for_commit <commit-hex>\n";
+" test-tool bloom get_filter_for_commit <commit-hex>\n";
int cmd__bloom(int argc, const char **argv)
{
diff --git a/t/helper/test-read-midx.c b/t/helper/test-read-midx.c
index 2430880f78..7c2eb11a8e 100644
--- a/t/helper/test-read-midx.c
+++ b/t/helper/test-read-midx.c
@@ -4,7 +4,7 @@
#include "repository.h"
#include "object-store.h"
-static int read_midx_file(const char *object_dir)
+static int read_midx_file(const char *object_dir, int show_objects)
{
uint32_t i;
struct multi_pack_index *m;
@@ -43,13 +43,29 @@ static int read_midx_file(const char *object_dir)
printf("object-dir: %s\n", m->object_dir);
+ if (show_objects) {
+ struct object_id oid;
+ struct pack_entry e;
+
+ for (i = 0; i < m->num_objects; i++) {
+ nth_midxed_object_oid(&oid, m, i);
+ fill_midx_entry(the_repository, &oid, &e, m);
+
+ printf("%s %"PRIu64"\t%s\n",
+ oid_to_hex(&oid), e.offset, e.p->pack_name);
+ }
+ return 0;
+ }
+
return 0;
}
int cmd__read_midx(int argc, const char **argv)
{
- if (argc != 2)
- usage("read-midx <object-dir>");
+ if (!(argc == 2 || argc == 3))
+ usage("read-midx [--show-objects] <object-dir>");
- return read_midx_file(argv[1]);
+ if (!strcmp(argv[1], "--show-objects"))
+ return read_midx_file(argv[2], 1);
+ return read_midx_file(argv[1], 0);
}
diff --git a/t/helper/test-simple-ipc.c b/t/helper/test-simple-ipc.c
new file mode 100644
index 0000000000..42040ef81b
--- /dev/null
+++ b/t/helper/test-simple-ipc.c
@@ -0,0 +1,787 @@
+/*
+ * test-simple-ipc.c: verify that the Inter-Process Communication works.
+ */
+
+#include "test-tool.h"
+#include "cache.h"
+#include "strbuf.h"
+#include "simple-ipc.h"
+#include "parse-options.h"
+#include "thread-utils.h"
+#include "strvec.h"
+
+#ifndef SUPPORTS_SIMPLE_IPC
+int cmd__simple_ipc(int argc, const char **argv)
+{
+ die("simple IPC not available on this platform");
+}
+#else
+
+/*
+ * The test daemon defines an "application callback" that supports a
+ * series of commands (see `test_app_cb()`).
+ *
+ * Unknown commands are caught here and we send an error message back
+ * to the client process.
+ */
+static int app__unhandled_command(const char *command,
+ ipc_server_reply_cb *reply_cb,
+ struct ipc_server_reply_data *reply_data)
+{
+ struct strbuf buf = STRBUF_INIT;
+ int ret;
+
+ strbuf_addf(&buf, "unhandled command: %s", command);
+ ret = reply_cb(reply_data, buf.buf, buf.len);
+ strbuf_release(&buf);
+
+ return ret;
+}
+
+/*
+ * Reply with a single very large buffer. This is to ensure that
+ * long response are properly handled -- whether the chunking occurs
+ * in the kernel or in the (probably pkt-line) layer.
+ */
+#define BIG_ROWS (10000)
+static int app__big_command(ipc_server_reply_cb *reply_cb,
+ struct ipc_server_reply_data *reply_data)
+{
+ struct strbuf buf = STRBUF_INIT;
+ int row;
+ int ret;
+
+ for (row = 0; row < BIG_ROWS; row++)
+ strbuf_addf(&buf, "big: %.75d\n", row);
+
+ ret = reply_cb(reply_data, buf.buf, buf.len);
+ strbuf_release(&buf);
+
+ return ret;
+}
+
+/*
+ * Reply with a series of lines. This is to ensure that we can incrementally
+ * compute the response and chunk it to the client.
+ */
+#define CHUNK_ROWS (10000)
+static int app__chunk_command(ipc_server_reply_cb *reply_cb,
+ struct ipc_server_reply_data *reply_data)
+{
+ struct strbuf buf = STRBUF_INIT;
+ int row;
+ int ret;
+
+ for (row = 0; row < CHUNK_ROWS; row++) {
+ strbuf_setlen(&buf, 0);
+ strbuf_addf(&buf, "big: %.75d\n", row);
+ ret = reply_cb(reply_data, buf.buf, buf.len);
+ }
+
+ strbuf_release(&buf);
+
+ return ret;
+}
+
+/*
+ * Slowly reply with a series of lines. This is to model an expensive to
+ * compute chunked response (which might happen if this callback is running
+ * in a thread and is fighting for a lock with other threads).
+ */
+#define SLOW_ROWS (1000)
+#define SLOW_DELAY_MS (10)
+static int app__slow_command(ipc_server_reply_cb *reply_cb,
+ struct ipc_server_reply_data *reply_data)
+{
+ struct strbuf buf = STRBUF_INIT;
+ int row;
+ int ret;
+
+ for (row = 0; row < SLOW_ROWS; row++) {
+ strbuf_setlen(&buf, 0);
+ strbuf_addf(&buf, "big: %.75d\n", row);
+ ret = reply_cb(reply_data, buf.buf, buf.len);
+ sleep_millisec(SLOW_DELAY_MS);
+ }
+
+ strbuf_release(&buf);
+
+ return ret;
+}
+
+/*
+ * The client sent a command followed by a (possibly very) large buffer.
+ */
+static int app__sendbytes_command(const char *received,
+ ipc_server_reply_cb *reply_cb,
+ struct ipc_server_reply_data *reply_data)
+{
+ struct strbuf buf_resp = STRBUF_INIT;
+ const char *p = "?";
+ int len_ballast = 0;
+ int k;
+ int errs = 0;
+ int ret;
+
+ if (skip_prefix(received, "sendbytes ", &p))
+ len_ballast = strlen(p);
+
+ /*
+ * Verify that the ballast is n copies of a single letter.
+ * And that the multi-threaded IO layer didn't cross the streams.
+ */
+ for (k = 1; k < len_ballast; k++)
+ if (p[k] != p[0])
+ errs++;
+
+ if (errs)
+ strbuf_addf(&buf_resp, "errs:%d\n", errs);
+ else
+ strbuf_addf(&buf_resp, "rcvd:%c%08d\n", p[0], len_ballast);
+
+ ret = reply_cb(reply_data, buf_resp.buf, buf_resp.len);
+
+ strbuf_release(&buf_resp);
+
+ return ret;
+}
+
+/*
+ * An arbitrary fixed address to verify that the application instance
+ * data is handled properly.
+ */
+static int my_app_data = 42;
+
+static ipc_server_application_cb test_app_cb;
+
+/*
+ * This is the "application callback" that sits on top of the
+ * "ipc-server". It completely defines the set of commands supported
+ * by this application.
+ */
+static int test_app_cb(void *application_data,
+ const char *command,
+ ipc_server_reply_cb *reply_cb,
+ struct ipc_server_reply_data *reply_data)
+{
+ /*
+ * Verify that we received the application-data that we passed
+ * when we started the ipc-server. (We have several layers of
+ * callbacks calling callbacks and it's easy to get things mixed
+ * up (especially when some are "void*").)
+ */
+ if (application_data != (void*)&my_app_data)
+ BUG("application_cb: application_data pointer wrong");
+
+ if (!strcmp(command, "quit")) {
+ /*
+ * The client sent a "quit" command. This is an async
+ * request for the server to shutdown.
+ *
+ * We DO NOT send the client a response message
+ * (because we have nothing to say and the other
+ * server threads have not yet stopped).
+ *
+ * Tell the ipc-server layer to start shutting down.
+ * This includes: stop listening for new connections
+ * on the socket/pipe and telling all worker threads
+ * to finish/drain their outgoing responses to other
+ * clients.
+ *
+ * This DOES NOT force an immediate sync shutdown.
+ */
+ return SIMPLE_IPC_QUIT;
+ }
+
+ if (!strcmp(command, "ping")) {
+ const char *answer = "pong";
+ return reply_cb(reply_data, answer, strlen(answer));
+ }
+
+ if (!strcmp(command, "big"))
+ return app__big_command(reply_cb, reply_data);
+
+ if (!strcmp(command, "chunk"))
+ return app__chunk_command(reply_cb, reply_data);
+
+ if (!strcmp(command, "slow"))
+ return app__slow_command(reply_cb, reply_data);
+
+ if (starts_with(command, "sendbytes "))
+ return app__sendbytes_command(command, reply_cb, reply_data);
+
+ return app__unhandled_command(command, reply_cb, reply_data);
+}
+
+struct cl_args
+{
+ const char *subcommand;
+ const char *path;
+ const char *token;
+
+ int nr_threads;
+ int max_wait_sec;
+ int bytecount;
+ int batchsize;
+
+ char bytevalue;
+};
+
+static struct cl_args cl_args = {
+ .subcommand = NULL,
+ .path = "ipc-test",
+ .token = NULL,
+
+ .nr_threads = 5,
+ .max_wait_sec = 60,
+ .bytecount = 1024,
+ .batchsize = 10,
+
+ .bytevalue = 'x',
+};
+
+/*
+ * This process will run as a simple-ipc server and listen for IPC commands
+ * from client processes.
+ */
+static int daemon__run_server(void)
+{
+ int ret;
+
+ struct ipc_server_opts opts = {
+ .nr_threads = cl_args.nr_threads,
+ };
+
+ /*
+ * Synchronously run the ipc-server. We don't need any application
+ * instance data, so pass an arbitrary pointer (that we'll later
+ * verify made the round trip).
+ */
+ ret = ipc_server_run(cl_args.path, &opts, test_app_cb, (void*)&my_app_data);
+ if (ret == -2)
+ error(_("socket/pipe already in use: '%s'"), cl_args.path);
+ else if (ret == -1)
+ error_errno(_("could not start server on: '%s'"), cl_args.path);
+
+ return ret;
+}
+
+#ifndef GIT_WINDOWS_NATIVE
+/*
+ * This is adapted from `daemonize()`. Use `fork()` to directly create and
+ * run the daemon in a child process.
+ */
+static int spawn_server(pid_t *pid)
+{
+ struct ipc_server_opts opts = {
+ .nr_threads = cl_args.nr_threads,
+ };
+
+ *pid = fork();
+
+ switch (*pid) {
+ case 0:
+ if (setsid() == -1)
+ error_errno(_("setsid failed"));
+ close(0);
+ close(1);
+ close(2);
+ sanitize_stdfds();
+
+ return ipc_server_run(cl_args.path, &opts, test_app_cb,
+ (void*)&my_app_data);
+
+ case -1:
+ return error_errno(_("could not spawn daemon in the background"));
+
+ default:
+ return 0;
+ }
+}
+#else
+/*
+ * Conceptually like `daemonize()` but different because Windows does not
+ * have `fork(2)`. Spawn a normal Windows child process but without the
+ * limitations of `start_command()` and `finish_command()`.
+ */
+static int spawn_server(pid_t *pid)
+{
+ char test_tool_exe[MAX_PATH];
+ struct strvec args = STRVEC_INIT;
+ int in, out;
+
+ GetModuleFileNameA(NULL, test_tool_exe, MAX_PATH);
+
+ in = open("/dev/null", O_RDONLY);
+ out = open("/dev/null", O_WRONLY);
+
+ strvec_push(&args, test_tool_exe);
+ strvec_push(&args, "simple-ipc");
+ strvec_push(&args, "run-daemon");
+ strvec_pushf(&args, "--name=%s", cl_args.path);
+ strvec_pushf(&args, "--threads=%d", cl_args.nr_threads);
+
+ *pid = mingw_spawnvpe(args.v[0], args.v, NULL, NULL, in, out, out);
+ close(in);
+ close(out);
+
+ strvec_clear(&args);
+
+ if (*pid < 0)
+ return error(_("could not spawn daemon in the background"));
+
+ return 0;
+}
+#endif
+
+/*
+ * This is adapted from `wait_or_whine()`. Watch the child process and
+ * let it get started and begin listening for requests on the socket
+ * before reporting our success.
+ */
+static int wait_for_server_startup(pid_t pid_child)
+{
+ int status;
+ pid_t pid_seen;
+ enum ipc_active_state s;
+ time_t time_limit, now;
+
+ time(&time_limit);
+ time_limit += cl_args.max_wait_sec;
+
+ for (;;) {
+ pid_seen = waitpid(pid_child, &status, WNOHANG);
+
+ if (pid_seen == -1)
+ return error_errno(_("waitpid failed"));
+
+ else if (pid_seen == 0) {
+ /*
+ * The child is still running (this should be
+ * the normal case). Try to connect to it on
+ * the socket and see if it is ready for
+ * business.
+ *
+ * If there is another daemon already running,
+ * our child will fail to start (possibly
+ * after a timeout on the lock), but we don't
+ * care (who responds) if the socket is live.
+ */
+ s = ipc_get_active_state(cl_args.path);
+ if (s == IPC_STATE__LISTENING)
+ return 0;
+
+ time(&now);
+ if (now > time_limit)
+ return error(_("daemon not online yet"));
+
+ continue;
+ }
+
+ else if (pid_seen == pid_child) {
+ /*
+ * The new child daemon process shutdown while
+ * it was starting up, so it is not listening
+ * on the socket.
+ *
+ * Try to ping the socket in the odd chance
+ * that another daemon started (or was already
+ * running) while our child was starting.
+ *
+ * Again, we don't care who services the socket.
+ */
+ s = ipc_get_active_state(cl_args.path);
+ if (s == IPC_STATE__LISTENING)
+ return 0;
+
+ /*
+ * We don't care about the WEXITSTATUS() nor
+ * any of the WIF*(status) values because
+ * `cmd__simple_ipc()` does the `!!result`
+ * trick on all function return values.
+ *
+ * So it is sufficient to just report the
+ * early shutdown as an error.
+ */
+ return error(_("daemon failed to start"));
+ }
+
+ else
+ return error(_("waitpid is confused"));
+ }
+}
+
+/*
+ * This process will start a simple-ipc server in a background process and
+ * wait for it to become ready. This is like `daemonize()` but gives us
+ * more control and better error reporting (and makes it easier to write
+ * unit tests).
+ */
+static int daemon__start_server(void)
+{
+ pid_t pid_child;
+ int ret;
+
+ /*
+ * Run the actual daemon in a background process.
+ */
+ ret = spawn_server(&pid_child);
+ if (pid_child <= 0)
+ return ret;
+
+ /*
+ * Let the parent wait for the child process to get started
+ * and begin listening for requests on the socket.
+ */
+ ret = wait_for_server_startup(pid_child);
+
+ return ret;
+}
+
+/*
+ * This process will run a quick probe to see if a simple-ipc server
+ * is active on this path.
+ *
+ * Returns 0 if the server is alive.
+ */
+static int client__probe_server(void)
+{
+ enum ipc_active_state s;
+
+ s = ipc_get_active_state(cl_args.path);
+ switch (s) {
+ case IPC_STATE__LISTENING:
+ return 0;
+
+ case IPC_STATE__NOT_LISTENING:
+ return error("no server listening at '%s'", cl_args.path);
+
+ case IPC_STATE__PATH_NOT_FOUND:
+ return error("path not found '%s'", cl_args.path);
+
+ case IPC_STATE__INVALID_PATH:
+ return error("invalid pipe/socket name '%s'", cl_args.path);
+
+ case IPC_STATE__OTHER_ERROR:
+ default:
+ return error("other error for '%s'", cl_args.path);
+ }
+}
+
+/*
+ * Send an IPC command token to an already-running server daemon and
+ * print the response.
+ *
+ * This is a simple 1 word command/token that `test_app_cb()` (in the
+ * daemon process) will understand.
+ */
+static int client__send_ipc(void)
+{
+ const char *command = "(no-command)";
+ struct strbuf buf = STRBUF_INIT;
+ struct ipc_client_connect_options options
+ = IPC_CLIENT_CONNECT_OPTIONS_INIT;
+
+ if (cl_args.token && *cl_args.token)
+ command = cl_args.token;
+
+ options.wait_if_busy = 1;
+ options.wait_if_not_found = 0;
+
+ if (!ipc_client_send_command(cl_args.path, &options, command, &buf)) {
+ if (buf.len) {
+ printf("%s\n", buf.buf);
+ fflush(stdout);
+ }
+ strbuf_release(&buf);
+
+ return 0;
+ }
+
+ return error("failed to send '%s' to '%s'", command, cl_args.path);
+}
+
+/*
+ * Send an IPC command to an already-running server and ask it to
+ * shutdown. "send quit" is an async request and queues a shutdown
+ * event in the server, so we spin and wait here for it to actually
+ * shutdown to make the unit tests a little easier to write.
+ */
+static int client__stop_server(void)
+{
+ int ret;
+ time_t time_limit, now;
+ enum ipc_active_state s;
+
+ time(&time_limit);
+ time_limit += cl_args.max_wait_sec;
+
+ cl_args.token = "quit";
+
+ ret = client__send_ipc();
+ if (ret)
+ return ret;
+
+ for (;;) {
+ sleep_millisec(100);
+
+ s = ipc_get_active_state(cl_args.path);
+
+ if (s != IPC_STATE__LISTENING) {
+ /*
+ * The socket/pipe is gone and/or has stopped
+ * responding. Lets assume that the daemon
+ * process has exited too.
+ */
+ return 0;
+ }
+
+ time(&now);
+ if (now > time_limit)
+ return error(_("daemon has not shutdown yet"));
+ }
+}
+
+/*
+ * Send an IPC command followed by ballast to confirm that a large
+ * message can be sent and that the kernel or pkt-line layers will
+ * properly chunk it and that the daemon receives the entire message.
+ */
+static int do_sendbytes(int bytecount, char byte, const char *path,
+ const struct ipc_client_connect_options *options)
+{
+ struct strbuf buf_send = STRBUF_INIT;
+ struct strbuf buf_resp = STRBUF_INIT;
+
+ strbuf_addstr(&buf_send, "sendbytes ");
+ strbuf_addchars(&buf_send, byte, bytecount);
+
+ if (!ipc_client_send_command(path, options, buf_send.buf, &buf_resp)) {
+ strbuf_rtrim(&buf_resp);
+ printf("sent:%c%08d %s\n", byte, bytecount, buf_resp.buf);
+ fflush(stdout);
+ strbuf_release(&buf_send);
+ strbuf_release(&buf_resp);
+
+ return 0;
+ }
+
+ return error("client failed to sendbytes(%d, '%c') to '%s'",
+ bytecount, byte, path);
+}
+
+/*
+ * Send an IPC command with ballast to an already-running server daemon.
+ */
+static int client__sendbytes(void)
+{
+ struct ipc_client_connect_options options
+ = IPC_CLIENT_CONNECT_OPTIONS_INIT;
+
+ options.wait_if_busy = 1;
+ options.wait_if_not_found = 0;
+ options.uds_disallow_chdir = 0;
+
+ return do_sendbytes(cl_args.bytecount, cl_args.bytevalue, cl_args.path,
+ &options);
+}
+
+struct multiple_thread_data {
+ pthread_t pthread_id;
+ struct multiple_thread_data *next;
+ const char *path;
+ int bytecount;
+ int batchsize;
+ int sum_errors;
+ int sum_good;
+ char letter;
+};
+
+static void *multiple_thread_proc(void *_multiple_thread_data)
+{
+ struct multiple_thread_data *d = _multiple_thread_data;
+ int k;
+ struct ipc_client_connect_options options
+ = IPC_CLIENT_CONNECT_OPTIONS_INIT;
+
+ options.wait_if_busy = 1;
+ options.wait_if_not_found = 0;
+ /*
+ * A multi-threaded client should not be randomly calling chdir().
+ * The test will pass without this restriction because the test is
+ * not otherwise accessing the filesystem, but it makes us honest.
+ */
+ options.uds_disallow_chdir = 1;
+
+ trace2_thread_start("multiple");
+
+ for (k = 0; k < d->batchsize; k++) {
+ if (do_sendbytes(d->bytecount + k, d->letter, d->path, &options))
+ d->sum_errors++;
+ else
+ d->sum_good++;
+ }
+
+ trace2_thread_exit();
+ return NULL;
+}
+
+/*
+ * Start a client-side thread pool. Each thread sends a series of
+ * IPC requests. Each request is on a new connection to the server.
+ */
+static int client__multiple(void)
+{
+ struct multiple_thread_data *list = NULL;
+ int k;
+ int sum_join_errors = 0;
+ int sum_thread_errors = 0;
+ int sum_good = 0;
+
+ for (k = 0; k < cl_args.nr_threads; k++) {
+ struct multiple_thread_data *d = xcalloc(1, sizeof(*d));
+ d->next = list;
+ d->path = cl_args.path;
+ d->bytecount = cl_args.bytecount + cl_args.batchsize*(k/26);
+ d->batchsize = cl_args.batchsize;
+ d->sum_errors = 0;
+ d->sum_good = 0;
+ d->letter = 'A' + (k % 26);
+
+ if (pthread_create(&d->pthread_id, NULL, multiple_thread_proc, d)) {
+ warning("failed to create thread[%d] skipping remainder", k);
+ free(d);
+ break;
+ }
+
+ list = d;
+ }
+
+ while (list) {
+ struct multiple_thread_data *d = list;
+
+ if (pthread_join(d->pthread_id, NULL))
+ sum_join_errors++;
+
+ sum_thread_errors += d->sum_errors;
+ sum_good += d->sum_good;
+
+ list = d->next;
+ free(d);
+ }
+
+ printf("client (good %d) (join %d), (errors %d)\n",
+ sum_good, sum_join_errors, sum_thread_errors);
+
+ return (sum_join_errors + sum_thread_errors) ? 1 : 0;
+}
+
+int cmd__simple_ipc(int argc, const char **argv)
+{
+ const char * const simple_ipc_usage[] = {
+ N_("test-helper simple-ipc is-active [<name>] [<options>]"),
+ N_("test-helper simple-ipc run-daemon [<name>] [<threads>]"),
+ N_("test-helper simple-ipc start-daemon [<name>] [<threads>] [<max-wait>]"),
+ N_("test-helper simple-ipc stop-daemon [<name>] [<max-wait>]"),
+ N_("test-helper simple-ipc send [<name>] [<token>]"),
+ N_("test-helper simple-ipc sendbytes [<name>] [<bytecount>] [<byte>]"),
+ N_("test-helper simple-ipc multiple [<name>] [<threads>] [<bytecount>] [<batchsize>]"),
+ NULL
+ };
+
+ const char *bytevalue = NULL;
+
+ struct option options[] = {
+#ifndef GIT_WINDOWS_NATIVE
+ OPT_STRING(0, "name", &cl_args.path, N_("name"), N_("name or pathname of unix domain socket")),
+#else
+ OPT_STRING(0, "name", &cl_args.path, N_("name"), N_("named-pipe name")),
+#endif
+ OPT_INTEGER(0, "threads", &cl_args.nr_threads, N_("number of threads in server thread pool")),
+ OPT_INTEGER(0, "max-wait", &cl_args.max_wait_sec, N_("seconds to wait for daemon to start or stop")),
+
+ OPT_INTEGER(0, "bytecount", &cl_args.bytecount, N_("number of bytes")),
+ OPT_INTEGER(0, "batchsize", &cl_args.batchsize, N_("number of requests per thread")),
+
+ OPT_STRING(0, "byte", &bytevalue, N_("byte"), N_("ballast character")),
+ OPT_STRING(0, "token", &cl_args.token, N_("token"), N_("command token to send to the server")),
+
+ OPT_END()
+ };
+
+ if (argc < 2)
+ usage_with_options(simple_ipc_usage, options);
+
+ if (argc == 2 && !strcmp(argv[1], "-h"))
+ usage_with_options(simple_ipc_usage, options);
+
+ if (argc == 2 && !strcmp(argv[1], "SUPPORTS_SIMPLE_IPC"))
+ return 0;
+
+ cl_args.subcommand = argv[1];
+
+ argc--;
+ argv++;
+
+ argc = parse_options(argc, argv, NULL, options, simple_ipc_usage, 0);
+
+ if (cl_args.nr_threads < 1)
+ cl_args.nr_threads = 1;
+ if (cl_args.max_wait_sec < 0)
+ cl_args.max_wait_sec = 0;
+ if (cl_args.bytecount < 1)
+ cl_args.bytecount = 1;
+ if (cl_args.batchsize < 1)
+ cl_args.batchsize = 1;
+
+ if (bytevalue && *bytevalue)
+ cl_args.bytevalue = bytevalue[0];
+
+ /*
+ * Use '!!' on all dispatch functions to map from `error()` style
+ * (returns -1) style to `test_must_fail` style (expects 1). This
+ * makes shell error messages less confusing.
+ */
+
+ if (!strcmp(cl_args.subcommand, "is-active"))
+ return !!client__probe_server();
+
+ if (!strcmp(cl_args.subcommand, "run-daemon"))
+ return !!daemon__run_server();
+
+ if (!strcmp(cl_args.subcommand, "start-daemon"))
+ return !!daemon__start_server();
+
+ /*
+ * Client commands follow. Ensure a server is running before
+ * sending any data. This might be overkill, but then again
+ * this is a test harness.
+ */
+
+ if (!strcmp(cl_args.subcommand, "stop-daemon")) {
+ if (client__probe_server())
+ return 1;
+ return !!client__stop_server();
+ }
+
+ if (!strcmp(cl_args.subcommand, "send")) {
+ if (client__probe_server())
+ return 1;
+ return !!client__send_ipc();
+ }
+
+ if (!strcmp(cl_args.subcommand, "sendbytes")) {
+ if (client__probe_server())
+ return 1;
+ return !!client__sendbytes();
+ }
+
+ if (!strcmp(cl_args.subcommand, "multiple")) {
+ if (client__probe_server())
+ return 1;
+ return !!client__multiple();
+ }
+
+ die("Unhandled subcommand: '%s'", cl_args.subcommand);
+}
+#endif
diff --git a/t/helper/test-tool.c b/t/helper/test-tool.c
index f97cd9f48a..25c6a37e93 100644
--- a/t/helper/test-tool.c
+++ b/t/helper/test-tool.c
@@ -15,6 +15,7 @@ struct test_cmd {
static struct test_cmd cmds[] = {
{ "advise", cmd__advise_if_enabled },
+ { "bitmap", cmd__bitmap },
{ "bloom", cmd__bloom },
{ "chmtime", cmd__chmtime },
{ "config", cmd__config },
@@ -65,6 +66,7 @@ static struct test_cmd cmds[] = {
{ "sha1", cmd__sha1 },
{ "sha256", cmd__sha256 },
{ "sigchain", cmd__sigchain },
+ { "simple-ipc", cmd__simple_ipc },
{ "strcmp-offset", cmd__strcmp_offset },
{ "string-list", cmd__string_list },
{ "submodule-config", cmd__submodule_config },
diff --git a/t/helper/test-tool.h b/t/helper/test-tool.h
index 28072c0ad5..f03c5988b2 100644
--- a/t/helper/test-tool.h
+++ b/t/helper/test-tool.h
@@ -5,6 +5,7 @@
#include "git-compat-util.h"
int cmd__advise_if_enabled(int argc, const char **argv);
+int cmd__bitmap(int argc, const char **argv);
int cmd__bloom(int argc, const char **argv);
int cmd__chmtime(int argc, const char **argv);
int cmd__config(int argc, const char **argv);
@@ -55,6 +56,7 @@ int cmd__sha1(int argc, const char **argv);
int cmd__oid_array(int argc, const char **argv);
int cmd__sha256(int argc, const char **argv);
int cmd__sigchain(int argc, const char **argv);
+int cmd__simple_ipc(int argc, const char **argv);
int cmd__strcmp_offset(int argc, const char **argv);
int cmd__string_list(int argc, const char **argv);
int cmd__submodule_config(int argc, const char **argv);
diff --git a/t/perf/p5310-pack-bitmaps.sh b/t/perf/p5310-pack-bitmaps.sh
index b3e725f031..452be01056 100755
--- a/t/perf/p5310-pack-bitmaps.sh
+++ b/t/perf/p5310-pack-bitmaps.sh
@@ -15,6 +15,12 @@ test_expect_success 'setup bitmap config' '
git config pack.writebitmaps true
'
+# we need to create the tag up front such that it is covered by the repack and
+# thus by generated bitmaps.
+test_expect_success 'create tags' '
+ git tag --message="tag pointing to HEAD" perf-tag HEAD
+'
+
test_perf 'repack to disk' '
git repack -ad
'
@@ -43,6 +49,14 @@ test_perf 'rev-list (objects)' '
git rev-list --all --use-bitmap-index --objects >/dev/null
'
+test_perf 'rev-list with tag negated via --not --all (objects)' '
+ git rev-list perf-tag --not --all --use-bitmap-index --objects >/dev/null
+'
+
+test_perf 'rev-list with negative tag (objects)' '
+ git rev-list HEAD --not perf-tag --use-bitmap-index --objects >/dev/null
+'
+
test_perf 'rev-list count with blob:none' '
git rev-list --use-bitmap-index --count --objects --all \
--filter=blob:none >/dev/null
diff --git a/t/t0052-simple-ipc.sh b/t/t0052-simple-ipc.sh
new file mode 100755
index 0000000000..ff98be31a5
--- /dev/null
+++ b/t/t0052-simple-ipc.sh
@@ -0,0 +1,122 @@
+#!/bin/sh
+
+test_description='simple command server'
+
+. ./test-lib.sh
+
+test-tool simple-ipc SUPPORTS_SIMPLE_IPC || {
+ skip_all='simple IPC not supported on this platform'
+ test_done
+}
+
+stop_simple_IPC_server () {
+ test-tool simple-ipc stop-daemon
+}
+
+test_expect_success 'start simple command server' '
+ test_atexit stop_simple_IPC_server &&
+ test-tool simple-ipc start-daemon --threads=8 &&
+ test-tool simple-ipc is-active
+'
+
+test_expect_success 'simple command server' '
+ test-tool simple-ipc send --token=ping >actual &&
+ echo pong >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success 'servers cannot share the same path' '
+ test_must_fail test-tool simple-ipc run-daemon &&
+ test-tool simple-ipc is-active
+'
+
+test_expect_success 'big response' '
+ test-tool simple-ipc send --token=big >actual &&
+ test_line_count -ge 10000 actual &&
+ grep -q "big: [0]*9999\$" actual
+'
+
+test_expect_success 'chunk response' '
+ test-tool simple-ipc send --token=chunk >actual &&
+ test_line_count -ge 10000 actual &&
+ grep -q "big: [0]*9999\$" actual
+'
+
+test_expect_success 'slow response' '
+ test-tool simple-ipc send --token=slow >actual &&
+ test_line_count -ge 100 actual &&
+ grep -q "big: [0]*99\$" actual
+'
+
+# Send an IPC with n=100,000 bytes of ballast. This should be large enough
+# to force both the kernel and the pkt-line layer to chunk the message to the
+# daemon and for the daemon to receive it in chunks.
+#
+test_expect_success 'sendbytes' '
+ test-tool simple-ipc sendbytes --bytecount=100000 --byte=A >actual &&
+ grep "sent:A00100000 rcvd:A00100000" actual
+'
+
+# Start a series of <threads> client threads that each make <batchsize>
+# IPC requests to the server. Each (<threads> * <batchsize>) request
+# will open a new connection to the server and randomly bind to a server
+# thread. Each client thread exits after completing its batch. So the
+# total number of live client threads will be smaller than the total.
+# Each request will send a message containing at least <bytecount> bytes
+# of ballast. (Responses are small.)
+#
+# The purpose here is to test threading in the server and responding to
+# many concurrent client requests (regardless of whether they come from
+# 1 client process or many). And to test that the server side of the
+# named pipe/socket is stable. (On Windows this means that the server
+# pipe is properly recycled.)
+#
+# On Windows it also lets us adjust the connection timeout in the
+# `ipc_client_send_command()`.
+#
+# Note it is easy to drive the system into failure by requesting an
+# insane number of threads on client or server and/or increasing the
+# per-thread batchsize or the per-request bytecount (ballast).
+# On Windows these failures look like "pipe is busy" errors.
+# So I've chosen fairly conservative values for now.
+#
+# We expect output of the form "sent:<letter><length> ..."
+# With terms (7, 19, 13) we expect:
+# <letter> in [A-G]
+# <length> in [19+0 .. 19+(13-1)]
+# and (7 * 13) successful responses.
+#
+test_expect_success 'stress test threads' '
+ test-tool simple-ipc multiple \
+ --threads=7 \
+ --bytecount=19 \
+ --batchsize=13 \
+ >actual &&
+ test_line_count = 92 actual &&
+ grep "good 91" actual &&
+ grep "sent:A" <actual >actual_a &&
+ cat >expect_a <<-EOF &&
+ sent:A00000019 rcvd:A00000019
+ sent:A00000020 rcvd:A00000020
+ sent:A00000021 rcvd:A00000021
+ sent:A00000022 rcvd:A00000022
+ sent:A00000023 rcvd:A00000023
+ sent:A00000024 rcvd:A00000024
+ sent:A00000025 rcvd:A00000025
+ sent:A00000026 rcvd:A00000026
+ sent:A00000027 rcvd:A00000027
+ sent:A00000028 rcvd:A00000028
+ sent:A00000029 rcvd:A00000029
+ sent:A00000030 rcvd:A00000030
+ sent:A00000031 rcvd:A00000031
+ EOF
+ test_cmp expect_a actual_a
+'
+
+test_expect_success 'stop-daemon works' '
+ test-tool simple-ipc stop-daemon &&
+ test_must_fail test-tool simple-ipc is-active &&
+ test_must_fail test-tool simple-ipc send --token=ping
+'
+
+test_done
diff --git a/t/t2021-checkout-overwrite.sh b/t/t2021-checkout-overwrite.sh
index c2ada7de37..70d69263e6 100755
--- a/t/t2021-checkout-overwrite.sh
+++ b/t/t2021-checkout-overwrite.sh
@@ -51,4 +51,16 @@ test_expect_success SYMLINKS 'the symlink remained' '
test -h a/b
'
+test_expect_success SYMLINKS 'checkout -f must not follow symlinks when removing entries' '
+ git checkout -f start &&
+ mkdir dir &&
+ >dir/f &&
+ git add dir/f &&
+ git commit -m "add dir/f" &&
+ mv dir untracked &&
+ ln -s untracked dir &&
+ git checkout -f HEAD~ &&
+ test_path_is_file untracked/f
+'
+
test_done
diff --git a/t/t3060-ls-files-with-tree.sh b/t/t3060-ls-files-with-tree.sh
index 52ed665fcd..b257c792a4 100755
--- a/t/t3060-ls-files-with-tree.sh
+++ b/t/t3060-ls-files-with-tree.sh
@@ -47,6 +47,12 @@ test_expect_success setup '
git add .
'
+test_expect_success 'usage' '
+ test_expect_code 128 git ls-files --with-tree=HEAD -u &&
+ test_expect_code 128 git ls-files --with-tree=HEAD -s &&
+ test_expect_code 128 git ls-files --recurse-submodules --with-tree=HEAD
+'
+
test_expect_success 'git ls-files --with-tree should succeed from subdir' '
# We have to run from a sub-directory to trigger prune_path
# Then we finally get to run our --with-tree test
@@ -60,4 +66,39 @@ test_expect_success \
'git ls-files --with-tree should add entries from named tree.' \
'test_cmp expected output'
+test_expect_success 'no duplicates in --with-tree output' '
+ git ls-files --with-tree=HEAD >actual &&
+ sort -u actual >expected &&
+ test_cmp expected actual
+'
+
+test_expect_success 'setup: output in a conflict' '
+ test_create_repo conflict &&
+ test_commit -C conflict BASE file &&
+ test_commit -C conflict A file foo &&
+ git -C conflict reset --hard BASE &&
+ test_commit -C conflict B file bar
+'
+
+test_expect_success 'output in a conflict' '
+ test_must_fail git -C conflict merge A B &&
+ cat >expected <<-\EOF &&
+ file
+ file
+ file
+ file
+ EOF
+ git -C conflict ls-files --with-tree=HEAD >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'output with removed .git/index' '
+ cat >expected <<-\EOF &&
+ file
+ EOF
+ rm conflict/.git/index &&
+ git -C conflict ls-files --with-tree=HEAD >actual &&
+ test_cmp expected actual
+'
+
test_done
diff --git a/t/t3206-range-diff.sh b/t/t3206-range-diff.sh
index 1b26c4c2ef..e30bc48a29 100755
--- a/t/t3206-range-diff.sh
+++ b/t/t3206-range-diff.sh
@@ -521,6 +521,30 @@ test_expect_success 'format-patch --range-diff as commentary' '
grep "> 1: .* new message" 0001-*
'
+test_expect_success 'format-patch --range-diff reroll-count with a non-integer' '
+ git format-patch --range-diff=HEAD~1 -v2.9 HEAD~1 >actual &&
+ test_when_finished "rm v2.9-0001-*" &&
+ test_line_count = 1 actual &&
+ test_i18ngrep "^Range-diff:$" v2.9-0001-* &&
+ grep "> 1: .* new message" v2.9-0001-*
+'
+
+test_expect_success 'format-patch --range-diff reroll-count with a integer' '
+ git format-patch --range-diff=HEAD~1 -v2 HEAD~1 >actual &&
+ test_when_finished "rm v2-0001-*" &&
+ test_line_count = 1 actual &&
+ test_i18ngrep "^Range-diff ..* v1:$" v2-0001-* &&
+ grep "> 1: .* new message" v2-0001-*
+'
+
+test_expect_success 'format-patch --range-diff with v0' '
+ git format-patch --range-diff=HEAD~1 -v0 HEAD~1 >actual &&
+ test_when_finished "rm v0-0001-*" &&
+ test_line_count = 1 actual &&
+ test_i18ngrep "^Range-diff:$" v0-0001-* &&
+ grep "> 1: .* new message" v0-0001-*
+'
+
test_expect_success 'range-diff overrides diff.noprefix internally' '
git -c diff.noprefix=true range-diff HEAD^...
'
diff --git a/t/t3400-rebase.sh b/t/t3400-rebase.sh
index 587b408063..0bb88aa982 100755
--- a/t/t3400-rebase.sh
+++ b/t/t3400-rebase.sh
@@ -388,22 +388,6 @@ test_expect_success 'rebase--merge.sh and --show-current-patch' '
)
'
-test_expect_success 'rebase -c rebase.useBuiltin=false warning' '
- expected="rebase.useBuiltin support has been removed" &&
-
- # Only warn when the legacy rebase is requested...
- test_must_fail git -c rebase.useBuiltin=false rebase 2>err &&
- test_i18ngrep "$expected" err &&
- test_must_fail env GIT_TEST_REBASE_USE_BUILTIN=false git rebase 2>err &&
- test_i18ngrep "$expected" err &&
-
- # ...not when we would have used the built-in anyway
- test_must_fail git -c rebase.useBuiltin=true rebase 2>err &&
- test_must_be_empty err &&
- test_must_fail env GIT_TEST_REBASE_USE_BUILTIN=true git rebase 2>err &&
- test_must_be_empty err
-'
-
test_expect_success 'switch to branch checked out here' '
git checkout main &&
git rebase main main
diff --git a/t/t3510-cherry-pick-sequence.sh b/t/t3510-cherry-pick-sequence.sh
index b76cb6de91..49010aa946 100755
--- a/t/t3510-cherry-pick-sequence.sh
+++ b/t/t3510-cherry-pick-sequence.sh
@@ -65,7 +65,7 @@ test_expect_success 'cherry-pick persists opts correctly' '
# gets interrupted, use a high-enough number that is larger
# than the number of parents of any commit we have created
mainline=4 &&
- test_expect_code 128 git cherry-pick -s -m $mainline --strategy=recursive -X patience -X ours initial..anotherpick &&
+ test_expect_code 128 git cherry-pick -s -m $mainline --strategy=recursive -X patience -X ours --edit initial..anotherpick &&
test_path_is_dir .git/sequencer &&
test_path_is_file .git/sequencer/head &&
test_path_is_file .git/sequencer/todo &&
@@ -84,6 +84,36 @@ test_expect_success 'cherry-pick persists opts correctly' '
ours
EOF
git config --file=.git/sequencer/opts --get-all options.strategy-option >actual &&
+ test_cmp expect actual &&
+ echo "true" >expect &&
+ git config --file=.git/sequencer/opts --get-all options.edit >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'revert persists opts correctly' '
+ pristine_detach initial &&
+ # to make sure that the session to revert a sequence
+ # gets interrupted, revert commits that are not in the history
+ # of HEAD.
+ test_expect_code 1 git revert -s --strategy=recursive -X patience -X ours --no-edit picked yetanotherpick &&
+ test_path_is_dir .git/sequencer &&
+ test_path_is_file .git/sequencer/head &&
+ test_path_is_file .git/sequencer/todo &&
+ test_path_is_file .git/sequencer/opts &&
+ echo "true" >expect &&
+ git config --file=.git/sequencer/opts --get-all options.signoff >actual &&
+ test_cmp expect actual &&
+ echo "recursive" >expect &&
+ git config --file=.git/sequencer/opts --get-all options.strategy >actual &&
+ test_cmp expect actual &&
+ cat >expect <<-\EOF &&
+ patience
+ ours
+ EOF
+ git config --file=.git/sequencer/opts --get-all options.strategy-option >actual &&
+ test_cmp expect actual &&
+ echo "false" >expect &&
+ git config --file=.git/sequencer/opts --get-all options.edit >actual &&
test_cmp expect actual
'
diff --git a/t/t3512-cherry-pick-submodule.sh b/t/t3512-cherry-pick-submodule.sh
index 822f2d4bfb..c657840db3 100755
--- a/t/t3512-cherry-pick-submodule.sh
+++ b/t/t3512-cherry-pick-submodule.sh
@@ -8,8 +8,11 @@ export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
. ./test-lib.sh
. "$TEST_DIRECTORY"/lib-submodule-update.sh
-KNOWN_FAILURE_NOFF_MERGE_DOESNT_CREATE_EMPTY_SUBMODULE_DIR=1
-KNOWN_FAILURE_NOFF_MERGE_ATTEMPTS_TO_MERGE_REMOVED_SUBMODULE_FILES=1
+if test "$GIT_TEST_MERGE_ALGORITHM" != ort
+then
+ KNOWN_FAILURE_NOFF_MERGE_DOESNT_CREATE_EMPTY_SUBMODULE_DIR=1
+ KNOWN_FAILURE_NOFF_MERGE_ATTEMPTS_TO_MERGE_REMOVED_SUBMODULE_FILES=1
+fi
test_submodule_switch "cherry-pick"
test_expect_success 'unrelated submodule/file conflict is ignored' '
diff --git a/t/t3513-revert-submodule.sh b/t/t3513-revert-submodule.sh
index a759f12cbb..74cd96e582 100755
--- a/t/t3513-revert-submodule.sh
+++ b/t/t3513-revert-submodule.sh
@@ -30,7 +30,10 @@ git_revert () {
git revert HEAD
}
-KNOWN_FAILURE_NOFF_MERGE_DOESNT_CREATE_EMPTY_SUBMODULE_DIR=1
+if test "$GIT_TEST_MERGE_ALGORITHM" != ort
+then
+ KNOWN_FAILURE_NOFF_MERGE_DOESNT_CREATE_EMPTY_SUBMODULE_DIR=1
+fi
test_submodule_switch_func "git_revert"
test_done
diff --git a/t/t3800-mktag.sh b/t/t3800-mktag.sh
index 60a666da59..6275c98523 100755
--- a/t/t3800-mktag.sh
+++ b/t/t3800-mktag.sh
@@ -17,7 +17,7 @@ check_verify_failure () {
grep '$2' message &&
if test '$3' != '--no-strict'
then
- test_must_fail git mktag --no-strict <tag.sig 2>message.no-strict &&xb
+ test_must_fail git mktag --no-strict <tag.sig 2>message.no-strict &&
grep '$2' message.no-strict
fi
"
diff --git a/t/t4014-format-patch.sh b/t/t4014-format-patch.sh
index cdd3154e70..712d4b5ddf 100755
--- a/t/t4014-format-patch.sh
+++ b/t/t4014-format-patch.sh
@@ -386,6 +386,30 @@ test_expect_success 'reroll count (-v)' '
! grep -v "^Subject: \[PATCH v4 [0-3]/3\] " subjects
'
+test_expect_success 'reroll count (-v) with a fractional number' '
+ rm -fr patches &&
+ git format-patch -o patches --cover-letter -v4.4 main..side >list &&
+ ! grep -v "^patches/v4.4-000[0-3]-" list &&
+ sed -n -e "/^Subject: /p" $(cat list) >subjects &&
+ ! grep -v "^Subject: \[PATCH v4.4 [0-3]/3\] " subjects
+'
+
+test_expect_success 'reroll (-v) count with a non number' '
+ rm -fr patches &&
+ git format-patch -o patches --cover-letter -v4rev2 main..side >list &&
+ ! grep -v "^patches/v4rev2-000[0-3]-" list &&
+ sed -n -e "/^Subject: /p" $(cat list) >subjects &&
+ ! grep -v "^Subject: \[PATCH v4rev2 [0-3]/3\] " subjects
+'
+
+test_expect_success 'reroll (-v) count with a non-pathname character' '
+ rm -fr patches &&
+ git format-patch -o patches --cover-letter -v4---..././../--1/.2// main..side >list &&
+ ! grep -v "patches/v4-\.-\.-\.-1-\.2-000[0-3]-" list &&
+ sed -n -e "/^Subject: /p" $(cat list) >subjects &&
+ ! grep -v "^Subject: \[PATCH v4---\.\.\./\./\.\./--1/\.2// [0-3]/3\] " subjects
+'
+
check_threading () {
expect="$1" &&
shift &&
@@ -2255,6 +2279,16 @@ test_expect_success 'interdiff: reroll-count' '
test_i18ngrep "^Interdiff ..* v1:$" v2-0000-cover-letter.patch
'
+test_expect_success 'interdiff: reroll-count with a non-integer' '
+ git format-patch --cover-letter --interdiff=boop~2 -v2.2 -1 boop &&
+ test_i18ngrep "^Interdiff:$" v2.2-0000-cover-letter.patch
+'
+
+test_expect_success 'interdiff: reroll-count with a integer' '
+ git format-patch --cover-letter --interdiff=boop~2 -v2 -1 boop &&
+ test_i18ngrep "^Interdiff ..* v1:$" v2-0000-cover-letter.patch
+'
+
test_expect_success 'interdiff: solo-patch' '
cat >expect <<-\EOF &&
+fleep
diff --git a/t/t4053-diff-no-index.sh b/t/t4053-diff-no-index.sh
index 0168946b63..3feadf0e35 100755
--- a/t/t4053-diff-no-index.sh
+++ b/t/t4053-diff-no-index.sh
@@ -16,6 +16,11 @@ test_expect_success 'setup' '
echo 1 >non/git/b
'
+test_expect_success 'git diff --no-index --exit-code' '
+ git diff --no-index --exit-code a/1 non/git/a &&
+ test_expect_code 1 git diff --no-index --exit-code a/1 a/2
+'
+
test_expect_success 'git diff --no-index directories' '
test_expect_code 1 git diff --no-index a b >cnt &&
test_line_count = 14 cnt
@@ -144,4 +149,59 @@ test_expect_success 'diff --no-index allows external diff' '
test_cmp expect actual
'
+test_expect_success 'diff --no-index normalizes mode: no changes' '
+ echo foo >x &&
+ cp x y &&
+ git diff --no-index x y >out &&
+ test_must_be_empty out
+'
+
+test_expect_success POSIXPERM 'diff --no-index normalizes mode: chmod +x' '
+ chmod +x y &&
+ cat >expected <<-\EOF &&
+ diff --git a/x b/y
+ old mode 100644
+ new mode 100755
+ EOF
+ test_expect_code 1 git diff --no-index x y >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success POSIXPERM 'diff --no-index normalizes: mode not like git mode' '
+ chmod 666 x &&
+ chmod 777 y &&
+ cat >expected <<-\EOF &&
+ diff --git a/x b/y
+ old mode 100644
+ new mode 100755
+ EOF
+ test_expect_code 1 git diff --no-index x y >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success POSIXPERM,SYMLINKS 'diff --no-index normalizes: mode not like git mode (symlink)' '
+ ln -s y z &&
+ X_OID=$(git hash-object --stdin <x) &&
+ Z_OID=$(printf y | git hash-object --stdin) &&
+ cat >expected <<-EOF &&
+ diff --git a/x b/x
+ deleted file mode 100644
+ index $X_OID..$ZERO_OID
+ --- a/x
+ +++ /dev/null
+ @@ -1 +0,0 @@
+ -foo
+ diff --git a/z b/z
+ new file mode 120000
+ index $ZERO_OID..$Z_OID
+ --- /dev/null
+ +++ b/z
+ @@ -0,0 +1 @@
+ +y
+ \ No newline at end of file
+ EOF
+ test_expect_code 1 git -c core.abbrev=no diff --no-index x z >actual &&
+ test_cmp expected actual
+'
+
test_done
diff --git a/t/t4108-apply-threeway.sh b/t/t4108-apply-threeway.sh
index d62db3fbe1..65147efdea 100755
--- a/t/t4108-apply-threeway.sh
+++ b/t/t4108-apply-threeway.sh
@@ -160,4 +160,74 @@ test_expect_success 'apply -3 with add/add conflict (dirty working tree)' '
test_cmp three.save three
'
+test_expect_success 'apply -3 with ambiguous repeating file' '
+ git reset --hard &&
+ test_write_lines 1 2 1 2 1 2 1 2 1 2 1 >one_two_repeat &&
+ git add one_two_repeat &&
+ git commit -m "init one" &&
+ test_write_lines 1 2 1 2 1 2 1 2 one 2 1 >one_two_repeat &&
+ git commit -a -m "change one" &&
+
+ git diff HEAD~ >Repeat.diff &&
+ git reset --hard HEAD~ &&
+
+ test_write_lines 1 2 1 2 1 2 one 2 1 2 one >one_two_repeat &&
+ git commit -a -m "change surrounding one" &&
+
+ git apply --index --3way Repeat.diff &&
+ test_write_lines 1 2 1 2 1 2 one 2 one 2 one >expect &&
+
+ test_cmp expect one_two_repeat
+'
+
+test_expect_success 'apply with --3way --cached clean apply' '
+ # Merging side should be similar to applying this patch
+ git diff ...side >P.diff &&
+
+ # The corresponding cleanly applied merge
+ git reset --hard &&
+ git checkout main~ &&
+ git merge --no-commit side &&
+ git ls-files -s >expect.ls &&
+
+ # should succeed
+ git reset --hard &&
+ git checkout main~ &&
+ git apply --cached --3way P.diff &&
+ git ls-files -s >actual.ls &&
+ print_sanitized_conflicted_diff >actual.diff &&
+
+ # The cache should resemble the corresponding merge
+ # (both files at stage #0)
+ test_cmp expect.ls actual.ls &&
+ # However the working directory should not change
+ >expect.diff &&
+ test_cmp expect.diff actual.diff
+'
+
+test_expect_success 'apply with --3way --cached and conflicts' '
+ # Merging side should be similar to applying this patch
+ git diff ...side >P.diff &&
+
+ # The corresponding conflicted merge
+ git reset --hard &&
+ git checkout main^0 &&
+ test_must_fail git merge --no-commit side &&
+ git ls-files -s >expect.ls &&
+
+ # should fail to apply
+ git reset --hard &&
+ git checkout main^0 &&
+ test_must_fail git apply --cached --3way P.diff &&
+ git ls-files -s >actual.ls &&
+ print_sanitized_conflicted_diff >actual.diff &&
+
+ # The cache should resemble the corresponding merge
+ # (one file at stage #0, one file at stages #1 #2 #3)
+ test_cmp expect.ls actual.ls &&
+ # However the working directory should not change
+ >expect.diff &&
+ test_cmp expect.diff actual.diff
+'
+
test_done
diff --git a/t/t5310-pack-bitmaps.sh b/t/t5310-pack-bitmaps.sh
index 40b9f63244..f53efc8229 100755
--- a/t/t5310-pack-bitmaps.sh
+++ b/t/t5310-pack-bitmaps.sh
@@ -554,4 +554,42 @@ test_expect_success 'fetch with bitmaps can reuse old base' '
)
'
+test_expect_success 'pack.preferBitmapTips' '
+ git init repo &&
+ test_when_finished "rm -fr repo" &&
+ (
+ cd repo &&
+
+ # create enough commits that not all are receive bitmap
+ # coverage even if they are all at the tip of some reference.
+ test_commit_bulk --message="%s" 103 &&
+
+ git rev-list HEAD >commits.raw &&
+ sort <commits.raw >commits &&
+
+ git log --format="create refs/tags/%s %H" HEAD >refs &&
+ git update-ref --stdin <refs &&
+
+ git repack -adb &&
+ test-tool bitmap list-commits | sort >bitmaps &&
+
+ # remember which commits did not receive bitmaps
+ comm -13 bitmaps commits >before &&
+ test_file_not_empty before &&
+
+ # mark the commits which did not receive bitmaps as preferred,
+ # and generate the bitmap again
+ perl -pe "s{^}{create refs/tags/include/$. }" <before |
+ git update-ref --stdin &&
+ git -c pack.preferBitmapTips=refs/tags/include repack -adb &&
+
+ # finally, check that the commit(s) without bitmap coverage
+ # are not the same ones as before
+ test-tool bitmap list-commits | sort >bitmaps &&
+ comm -13 bitmaps commits >after &&
+
+ ! test_cmp before after
+ )
+'
+
test_done
diff --git a/t/t5319-multi-pack-index.sh b/t/t5319-multi-pack-index.sh
index b4afab1dfc..5641d158df 100755
--- a/t/t5319-multi-pack-index.sh
+++ b/t/t5319-multi-pack-index.sh
@@ -234,6 +234,48 @@ test_expect_success 'warn on improper hash version' '
)
'
+test_expect_success 'midx picks objects from preferred pack' '
+ test_when_finished rm -rf preferred.git &&
+ git init --bare preferred.git &&
+ (
+ cd preferred.git &&
+
+ a=$(echo "a" | git hash-object -w --stdin) &&
+ b=$(echo "b" | git hash-object -w --stdin) &&
+ c=$(echo "c" | git hash-object -w --stdin) &&
+
+ # Set up two packs, duplicating the object "B" at different
+ # offsets.
+ #
+ # Note that the "BC" pack (the one we choose as preferred) sorts
+ # lexically after the "AB" pack, meaning that omitting the
+ # --preferred-pack argument would cause this test to fail (since
+ # the MIDX code would select the copy of "b" in the "AB" pack).
+ git pack-objects objects/pack/test-AB <<-EOF &&
+ $a
+ $b
+ EOF
+ bc=$(git pack-objects objects/pack/test-BC <<-EOF
+ $b
+ $c
+ EOF
+ ) &&
+
+ git multi-pack-index --object-dir=objects \
+ write --preferred-pack=test-BC-$bc.idx 2>err &&
+ test_must_be_empty err &&
+
+ test-tool read-midx --show-objects objects >out &&
+
+ ofs=$(git show-index <objects/pack/test-BC-$bc.idx | grep $b |
+ cut -d" " -f1) &&
+ printf "%s %s\tobjects/pack/test-BC-%s.pack\n" \
+ "$b" "$ofs" "$bc" >expect &&
+ grep ^$b out >actual &&
+
+ test_cmp expect actual
+ )
+'
test_expect_success 'verify multi-pack-index success' '
git multi-pack-index verify --object-dir=$objdir
diff --git a/t/t5572-pull-submodule.sh b/t/t5572-pull-submodule.sh
index 29537f4798..4f92a116e1 100755
--- a/t/t5572-pull-submodule.sh
+++ b/t/t5572-pull-submodule.sh
@@ -42,8 +42,11 @@ git_pull_noff () {
$2 git pull --no-ff
}
-KNOWN_FAILURE_NOFF_MERGE_DOESNT_CREATE_EMPTY_SUBMODULE_DIR=1
-KNOWN_FAILURE_NOFF_MERGE_ATTEMPTS_TO_MERGE_REMOVED_SUBMODULE_FILES=1
+if test "$GIT_TEST_MERGE_ALGORITHM" != ort
+then
+ KNOWN_FAILURE_NOFF_MERGE_DOESNT_CREATE_EMPTY_SUBMODULE_DIR=1
+ KNOWN_FAILURE_NOFF_MERGE_ATTEMPTS_TO_MERGE_REMOVED_SUBMODULE_FILES=1
+fi
test_submodule_switch_func "git_pull_noff"
test_expect_success 'pull --recurse-submodule setup' '
diff --git a/t/t5601-clone.sh b/t/t5601-clone.sh
index e7e6c08955..329ae599fd 100755
--- a/t/t5601-clone.sh
+++ b/t/t5601-clone.sh
@@ -759,6 +759,15 @@ test_expect_success 'partial clone using HTTP' '
partial_clone "$HTTPD_DOCUMENT_ROOT_PATH/server" "$HTTPD_URL/smart/server"
'
+test_expect_success 'reject cloning shallow repository using HTTP' '
+ test_when_finished "rm -rf repo" &&
+ git clone --bare --no-local --depth=1 src "$HTTPD_DOCUMENT_ROOT_PATH/repo.git" &&
+ test_must_fail git clone --reject-shallow $HTTPD_URL/smart/repo.git repo 2>err &&
+ test_i18ngrep -e "source repository is shallow, reject to clone." err &&
+
+ git clone --no-reject-shallow $HTTPD_URL/smart/repo.git repo
+'
+
# DO NOT add non-httpd-specific tests here, because the last part of this
# test script is only executed when httpd is available and enabled.
diff --git a/t/t5606-clone-options.sh b/t/t5606-clone-options.sh
index 428b0aac93..3a595c0f82 100755
--- a/t/t5606-clone-options.sh
+++ b/t/t5606-clone-options.sh
@@ -11,7 +11,8 @@ test_expect_success 'setup' '
mkdir parent &&
(cd parent && git init &&
echo one >file && git add file &&
- git commit -m one)
+ git commit -m one) &&
+ git clone --depth=1 --no-local parent shallow-repo
'
@@ -45,6 +46,30 @@ test_expect_success 'disallows --bare with --separate-git-dir' '
'
+test_expect_success 'reject cloning shallow repository' '
+ test_when_finished "rm -rf repo" &&
+ test_must_fail git clone --reject-shallow shallow-repo out 2>err &&
+ test_i18ngrep -e "source repository is shallow, reject to clone." err &&
+
+ git clone --no-reject-shallow shallow-repo repo
+'
+
+test_expect_success 'reject cloning non-local shallow repository' '
+ test_when_finished "rm -rf repo" &&
+ test_must_fail git clone --reject-shallow --no-local shallow-repo out 2>err &&
+ test_i18ngrep -e "source repository is shallow, reject to clone." err &&
+
+ git clone --no-reject-shallow --no-local shallow-repo repo
+'
+
+test_expect_success 'succeed cloning normal repository' '
+ test_when_finished "rm -rf chilad1 child2 child3 child4 " &&
+ git clone --reject-shallow parent child1 &&
+ git clone --reject-shallow --no-local parent child2 &&
+ git clone --no-reject-shallow parent child3 &&
+ git clone --no-reject-shallow --no-local parent child4
+'
+
test_expect_success 'uses "origin" for default remote name' '
git clone parent clone-default-origin &&
diff --git a/t/t5611-clone-config.sh b/t/t5611-clone-config.sh
index 9f555b87ec..f8625f9158 100755
--- a/t/t5611-clone-config.sh
+++ b/t/t5611-clone-config.sh
@@ -95,6 +95,31 @@ test_expect_success 'clone -c remote.<remote>.fetch=<refspec> --origin=<name>' '
test_cmp expect actual
'
+test_expect_success 'set up shallow repository' '
+ git clone --depth=1 --no-local . shallow-repo
+'
+
+test_expect_success 'clone.rejectshallow=true should reject cloning shallow repo' '
+ test_when_finished "rm -rf out" &&
+ test_must_fail git -c clone.rejectshallow=true clone --no-local shallow-repo out 2>err &&
+ test_i18ngrep -e "source repository is shallow, reject to clone." err &&
+
+ git -c clone.rejectshallow=false clone --no-local shallow-repo out
+'
+
+test_expect_success 'option --[no-]reject-shallow override clone.rejectshallow config' '
+ test_when_finished "rm -rf out" &&
+ test_must_fail git -c clone.rejectshallow=false clone --reject-shallow --no-local shallow-repo out 2>err &&
+ test_i18ngrep -e "source repository is shallow, reject to clone." err &&
+
+ git -c clone.rejectshallow=true clone --no-reject-shallow --no-local shallow-repo out
+'
+
+test_expect_success 'clone.rejectshallow=true should succeed cloning normal repo' '
+ test_when_finished "rm -rf out" &&
+ git -c clone.rejectshallow=true clone --no-local . out
+'
+
test_expect_success MINGW 'clone -c core.hideDotFiles' '
test_commit attributes .gitattributes "" &&
rm -rf child &&
diff --git a/t/t6300-for-each-ref.sh b/t/t6300-for-each-ref.sh
index cac7f443d0..2e7c32d50c 100755
--- a/t/t6300-for-each-ref.sh
+++ b/t/t6300-for-each-ref.sh
@@ -1134,4 +1134,14 @@ test_expect_success 'for-each-ref --ignore-case works on multiple sort keys' '
test_cmp expect actual
'
+test_expect_success 'for-each-ref reports broken tags' '
+ git tag -m "good tag" broken-tag-good HEAD &&
+ git cat-file tag broken-tag-good >good &&
+ sed s/commit/blob/ <good >bad &&
+ bad=$(git hash-object -w -t tag bad) &&
+ git update-ref refs/tags/broken-tag-bad $bad &&
+ test_must_fail git for-each-ref --format="%(*objectname)" \
+ refs/tags/broken-tag-*
+'
+
test_done
diff --git a/t/t6423-merge-rename-directories.sh b/t/t6423-merge-rename-directories.sh
index 5d3b711fe6..7134769149 100755
--- a/t/t6423-merge-rename-directories.sh
+++ b/t/t6423-merge-rename-directories.sh
@@ -4797,7 +4797,7 @@ test_setup_12f () {
)
}
-test_expect_merge_algorithm failure success '12f: Trivial directory resolve, caching, all kinds of fun' '
+test_expect_merge_algorithm failure failure '12f: Trivial directory resolve, caching, all kinds of fun' '
test_setup_12f &&
(
cd 12f &&
@@ -4895,6 +4895,77 @@ test_expect_merge_algorithm failure success '12f: Trivial directory resolve, cac
)
'
+# Testcase 12g, Testcase with two kinds of "relevant" renames
+# Commit O: somefile_O, subdir/{a_O,b_O}
+# Commit A: somefile_A, subdir/{a_O,b_O,c_A}
+# Commit B: newfile_B, newdir/{a_B,b_B}
+# Expected: newfile_{merged}, newdir/{a_B,b_B,c_A}
+
+test_setup_12g () {
+ test_create_repo 12g &&
+ (
+ cd 12g &&
+
+ mkdir -p subdir &&
+ test_write_lines upon a time there was a >somefile &&
+ test_write_lines 1 2 3 4 5 6 7 8 9 10 >subdir/a &&
+ test_write_lines one two three four five six >subdir/b &&
+ git add . &&
+ test_tick &&
+ git commit -m "O" &&
+
+ git branch O &&
+ git branch A &&
+ git branch B &&
+
+ git switch A &&
+ test_write_lines once upon a time there was a >somefile &&
+ > subdir/c &&
+ git add somefile subdir/c &&
+ test_tick &&
+ git commit -m "A" &&
+
+ git checkout B &&
+ git mv somefile newfile &&
+ git mv subdir newdir &&
+ echo repo >>newfile &&
+ test_write_lines 1 2 3 4 5 6 7 8 9 10 11 >newdir/a &&
+ test_write_lines one two three four five six seven >newdir/b &&
+ git add newfile newdir &&
+ test_tick &&
+ git commit -m "B"
+ )
+}
+
+test_expect_success '12g: Testcase with two kinds of "relevant" renames' '
+ test_setup_12g &&
+ (
+ cd 12g &&
+
+ git checkout A^0 &&
+
+ git -c merge.directoryRenames=true merge -s recursive B^0 &&
+
+ test_write_lines once upon a time there was a repo >expect &&
+ test_cmp expect newfile &&
+
+ git ls-files -s >out &&
+ test_line_count = 4 out &&
+
+ git rev-parse >actual \
+ HEAD:newdir/a HEAD:newdir/b HEAD:newdir/c &&
+ git rev-parse >expect \
+ B:newdir/a B:newdir/b A:subdir/c &&
+ test_cmp expect actual &&
+
+ test_must_fail git rev-parse HEAD:subdir/a &&
+ test_must_fail git rev-parse HEAD:subdir/b &&
+ test_must_fail git rev-parse HEAD:subdir/c &&
+ test_path_is_missing subdir/ &&
+ test_path_is_file newdir/c
+ )
+'
+
###########################################################################
# SECTION 13: Checking informational and conflict messages
#
diff --git a/t/t6428-merge-conflicts-sparse.sh b/t/t6428-merge-conflicts-sparse.sh
new file mode 100755
index 0000000000..7e8bf497f8
--- /dev/null
+++ b/t/t6428-merge-conflicts-sparse.sh
@@ -0,0 +1,158 @@
+#!/bin/sh
+
+test_description="merge cases"
+
+# The setup for all of them, pictorially, is:
+#
+# A
+# o
+# / \
+# O o ?
+# \ /
+# o
+# B
+#
+# To help make it easier to follow the flow of tests, they have been
+# divided into sections and each test will start with a quick explanation
+# of what commits O, A, and B contain.
+#
+# Notation:
+# z/{b,c} means files z/b and z/c both exist
+# x/d_1 means file x/d exists with content d1. (Purpose of the
+# underscore notation is to differentiate different
+# files that might be renamed into each other's paths.)
+
+. ./test-lib.sh
+. "$TEST_DIRECTORY"/lib-merge.sh
+
+
+# Testcase basic, conflicting changes in 'numerals'
+
+test_setup_numerals () {
+ test_create_repo numerals_$1 &&
+ (
+ cd numerals_$1 &&
+
+ >README &&
+ test_write_lines I II III >numerals &&
+ git add README numerals &&
+ test_tick &&
+ git commit -m "O" &&
+
+ git branch O &&
+ git branch A &&
+ git branch B &&
+
+ git checkout A &&
+ test_write_lines I II III IIII >numerals &&
+ git add numerals &&
+ test_tick &&
+ git commit -m "A" &&
+
+ git checkout B &&
+ test_write_lines I II III IV >numerals &&
+ git add numerals &&
+ test_tick &&
+ git commit -m "B" &&
+
+ cat <<-EOF >expected-index &&
+ H README
+ M numerals
+ M numerals
+ M numerals
+ EOF
+
+ cat <<-EOF >expected-merge
+ I
+ II
+ III
+ <<<<<<< HEAD
+ IIII
+ =======
+ IV
+ >>>>>>> B^0
+ EOF
+
+ )
+}
+
+test_expect_success 'conflicting entries written to worktree even if sparse' '
+ test_setup_numerals plain &&
+ (
+ cd numerals_plain &&
+
+ git checkout A^0 &&
+
+ test_path_is_file README &&
+ test_path_is_file numerals &&
+
+ git sparse-checkout init &&
+ git sparse-checkout set README &&
+
+ test_path_is_file README &&
+ test_path_is_missing numerals &&
+
+ test_must_fail git merge -s recursive B^0 &&
+
+ git ls-files -t >index_files &&
+ test_cmp expected-index index_files &&
+
+ test_path_is_file README &&
+ test_path_is_file numerals &&
+
+ test_cmp expected-merge numerals &&
+
+ # 4 other files:
+ # * expected-merge
+ # * expected-index
+ # * index_files
+ # * others
+ git ls-files -o >others &&
+ test_line_count = 4 others
+ )
+'
+
+test_expect_merge_algorithm failure success 'present-despite-SKIP_WORKTREE handled reasonably' '
+ test_setup_numerals in_the_way &&
+ (
+ cd numerals_in_the_way &&
+
+ git checkout A^0 &&
+
+ test_path_is_file README &&
+ test_path_is_file numerals &&
+
+ git sparse-checkout init &&
+ git sparse-checkout set README &&
+
+ test_path_is_file README &&
+ test_path_is_missing numerals &&
+
+ echo foobar >numerals &&
+
+ test_must_fail git merge -s recursive B^0 &&
+
+ git ls-files -t >index_files &&
+ test_cmp expected-index index_files &&
+
+ test_path_is_file README &&
+ test_path_is_file numerals &&
+
+ test_cmp expected-merge numerals &&
+
+ # There should still be a file with "foobar" in it
+ grep foobar * &&
+
+ # 5 other files:
+ # * expected-merge
+ # * expected-index
+ # * index_files
+ # * others
+ # * whatever name was given to the numerals file that had
+ # "foobar" in it
+ git ls-files -o >others &&
+ test_line_count = 5 others
+ )
+'
+
+test_done
diff --git a/t/t6437-submodule-merge.sh b/t/t6437-submodule-merge.sh
index 0f92bcf326..e5e89c2045 100755
--- a/t/t6437-submodule-merge.sh
+++ b/t/t6437-submodule-merge.sh
@@ -6,6 +6,7 @@ GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
. ./test-lib.sh
+. "$TEST_DIRECTORY"/lib-merge.sh
#
# history
@@ -328,7 +329,7 @@ test_expect_success 'setup file/submodule conflict' '
)
'
-test_expect_failure 'file/submodule conflict' '
+test_expect_merge_algorithm failure success 'file/submodule conflict' '
test_when_finished "git -C file-submodule reset --hard" &&
(
cd file-submodule &&
@@ -437,7 +438,7 @@ test_expect_failure 'directory/submodule conflict; keep submodule clean' '
)
'
-test_expect_failure !FAIL_PREREQS 'directory/submodule conflict; should not treat submodule files as untracked or in the way' '
+test_expect_merge_algorithm failure success !FAIL_PREREQS 'directory/submodule conflict; should not treat submodule files as untracked or in the way' '
test_when_finished "git -C directory-submodule/path reset --hard" &&
test_when_finished "git -C directory-submodule reset --hard" &&
(
diff --git a/t/t6438-submodule-directory-file-conflicts.sh b/t/t6438-submodule-directory-file-conflicts.sh
index 04bf4be7d7..8df67a0ef9 100755
--- a/t/t6438-submodule-directory-file-conflicts.sh
+++ b/t/t6438-submodule-directory-file-conflicts.sh
@@ -12,8 +12,11 @@ test_submodule_switch "merge --ff"
test_submodule_switch "merge --ff-only"
-KNOWN_FAILURE_NOFF_MERGE_DOESNT_CREATE_EMPTY_SUBMODULE_DIR=1
-KNOWN_FAILURE_NOFF_MERGE_ATTEMPTS_TO_MERGE_REMOVED_SUBMODULE_FILES=1
+if test "$GIT_TEST_MERGE_ALGORITHM" != ort
+then
+ KNOWN_FAILURE_NOFF_MERGE_DOESNT_CREATE_EMPTY_SUBMODULE_DIR=1
+ KNOWN_FAILURE_NOFF_MERGE_ATTEMPTS_TO_MERGE_REMOVED_SUBMODULE_FILES=1
+fi
test_submodule_switch "merge --no-ff"
test_done
diff --git a/t/t7007-show.sh b/t/t7007-show.sh
index 42d3db6246..d6cc69e0f2 100755
--- a/t/t7007-show.sh
+++ b/t/t7007-show.sh
@@ -38,6 +38,45 @@ test_expect_success 'showing two commits' '
test_cmp expect actual.filtered
'
+test_expect_success 'showing a tree' '
+ cat >expected <<-EOF &&
+ tree main1:
+
+ main1.t
+ EOF
+ git show main1: >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'showing two trees' '
+ cat >expected <<-EOF &&
+ tree main1^{tree}
+
+ main1.t
+
+ tree main2^{tree}
+
+ main1.t
+ main2.t
+ EOF
+ git show main1^{tree} main2^{tree} >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'showing a trees is not recursive' '
+ git worktree add not-recursive main1 &&
+ mkdir not-recursive/a &&
+ test_commit -C not-recursive a/file &&
+ cat >expected <<-EOF &&
+ tree HEAD^{tree}
+
+ a/
+ main1.t
+ EOF
+ git -C not-recursive show HEAD^{tree} >actual &&
+ test_cmp expected actual
+'
+
test_expect_success 'showing a range walks (linear)' '
cat >expect <<-EOF &&
commit $(git rev-parse main3)
diff --git a/t/t7502-commit-porcelain.sh b/t/t7502-commit-porcelain.sh
index 6396897cc8..38a532d81c 100755
--- a/t/t7502-commit-porcelain.sh
+++ b/t/t7502-commit-porcelain.sh
@@ -38,6 +38,16 @@ check_summary_oneline() {
test_cmp exp act
}
+trailer_commit_base () {
+ echo "fun" >>file &&
+ git add file &&
+ git commit -s --trailer "Signed-off-by=C1 E1 " \
+ --trailer "Helped-by:C2 E2 " \
+ --trailer "Reported-by=C3 E3" \
+ --trailer "Mentored-by:C4 E4" \
+ -m "hello"
+}
+
test_expect_success 'output summary format' '
echo new >file1 &&
@@ -154,6 +164,308 @@ test_expect_success 'sign off' '
'
+test_expect_success 'commit --trailer with "="' '
+ trailer_commit_base &&
+ cat >expected <<-\EOF &&
+ hello
+
+ Signed-off-by: C O Mitter <committer@example.com>
+ Signed-off-by: C1 E1
+ Helped-by: C2 E2
+ Reported-by: C3 E3
+ Mentored-by: C4 E4
+ EOF
+ git cat-file commit HEAD >commit.msg &&
+ sed -e "1,/^\$/d" commit.msg >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'commit --trailer with -c and "replace" as ifexists' '
+ trailer_commit_base &&
+ cat >expected <<-\EOF &&
+ hello
+
+ Signed-off-by: C O Mitter <committer@example.com>
+ Signed-off-by: C1 E1
+ Reported-by: C3 E3
+ Mentored-by: C4 E4
+ Helped-by: C3 E3
+ EOF
+ git -c trailer.ifexists="replace" \
+ commit --trailer "Mentored-by: C4 E4" \
+ --trailer "Helped-by: C3 E3" \
+ --amend &&
+ git cat-file commit HEAD >commit.msg &&
+ sed -e "1,/^\$/d" commit.msg >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'commit --trailer with -c and "add" as ifexists' '
+ trailer_commit_base &&
+ cat >expected <<-\EOF &&
+ hello
+
+ Signed-off-by: C O Mitter <committer@example.com>
+ Signed-off-by: C1 E1
+ Helped-by: C2 E2
+ Reported-by: C3 E3
+ Mentored-by: C4 E4
+ Reported-by: C3 E3
+ Mentored-by: C4 E4
+ EOF
+ git -c trailer.ifexists="add" \
+ commit --trailer "Reported-by: C3 E3" \
+ --trailer "Mentored-by: C4 E4" \
+ --amend &&
+ git cat-file commit HEAD >commit.msg &&
+ sed -e "1,/^\$/d" commit.msg >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'commit --trailer with -c and "donothing" as ifexists' '
+ trailer_commit_base &&
+ cat >expected <<-\EOF &&
+ hello
+
+ Signed-off-by: C O Mitter <committer@example.com>
+ Signed-off-by: C1 E1
+ Helped-by: C2 E2
+ Reported-by: C3 E3
+ Mentored-by: C4 E4
+ Reviewed-by: C6 E6
+ EOF
+ git -c trailer.ifexists="donothing" \
+ commit --trailer "Mentored-by: C5 E5" \
+ --trailer "Reviewed-by: C6 E6" \
+ --amend &&
+ git cat-file commit HEAD >commit.msg &&
+ sed -e "1,/^\$/d" commit.msg >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'commit --trailer with -c and "addIfDifferent" as ifexists' '
+ trailer_commit_base &&
+ cat >expected <<-\EOF &&
+ hello
+
+ Signed-off-by: C O Mitter <committer@example.com>
+ Signed-off-by: C1 E1
+ Helped-by: C2 E2
+ Reported-by: C3 E3
+ Mentored-by: C4 E4
+ Mentored-by: C5 E5
+ EOF
+ git -c trailer.ifexists="addIfDifferent" \
+ commit --trailer "Reported-by: C3 E3" \
+ --trailer "Mentored-by: C5 E5" \
+ --amend &&
+ git cat-file commit HEAD >commit.msg &&
+ sed -e "1,/^\$/d" commit.msg >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'commit --trailer with -c and "addIfDifferentNeighbor" as ifexists' '
+ trailer_commit_base &&
+ cat >expected <<-\EOF &&
+ hello
+
+ Signed-off-by: C O Mitter <committer@example.com>
+ Signed-off-by: C1 E1
+ Helped-by: C2 E2
+ Reported-by: C3 E3
+ Mentored-by: C4 E4
+ Reported-by: C3 E3
+ EOF
+ git -c trailer.ifexists="addIfDifferentNeighbor" \
+ commit --trailer "Mentored-by: C4 E4" \
+ --trailer "Reported-by: C3 E3" \
+ --amend &&
+ git cat-file commit HEAD >commit.msg &&
+ sed -e "1,/^\$/d" commit.msg >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'commit --trailer with -c and "end" as where' '
+ trailer_commit_base &&
+ cat >expected <<-\EOF &&
+ hello
+
+ Signed-off-by: C O Mitter <committer@example.com>
+ Signed-off-by: C1 E1
+ Helped-by: C2 E2
+ Reported-by: C3 E3
+ Mentored-by: C4 E4
+ Reported-by: C3 E3
+ Mentored-by: C4 E4
+ EOF
+ git -c trailer.where="end" \
+ commit --trailer "Reported-by: C3 E3" \
+ --trailer "Mentored-by: C4 E4" \
+ --amend &&
+ git cat-file commit HEAD >commit.msg &&
+ sed -e "1,/^\$/d" commit.msg >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'commit --trailer with -c and "start" as where' '
+ trailer_commit_base &&
+ cat >expected <<-\EOF &&
+ hello
+
+ Signed-off-by: C1 E1
+ Signed-off-by: C O Mitter <committer@example.com>
+ Signed-off-by: C1 E1
+ Helped-by: C2 E2
+ Reported-by: C3 E3
+ Mentored-by: C4 E4
+ EOF
+ git -c trailer.where="start" \
+ commit --trailer "Signed-off-by: C O Mitter <committer@example.com>" \
+ --trailer "Signed-off-by: C1 E1" \
+ --amend &&
+ git cat-file commit HEAD >commit.msg &&
+ sed -e "1,/^\$/d" commit.msg >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'commit --trailer with -c and "after" as where' '
+ trailer_commit_base &&
+ cat >expected <<-\EOF &&
+ hello
+
+ Signed-off-by: C O Mitter <committer@example.com>
+ Signed-off-by: C1 E1
+ Helped-by: C2 E2
+ Reported-by: C3 E3
+ Mentored-by: C4 E4
+ Mentored-by: C5 E5
+ EOF
+ git -c trailer.where="after" \
+ commit --trailer "Mentored-by: C4 E4" \
+ --trailer "Mentored-by: C5 E5" \
+ --amend &&
+ git cat-file commit HEAD >commit.msg &&
+ sed -e "1,/^\$/d" commit.msg >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'commit --trailer with -c and "before" as where' '
+ trailer_commit_base &&
+ cat >expected <<-\EOF &&
+ hello
+
+ Signed-off-by: C O Mitter <committer@example.com>
+ Signed-off-by: C1 E1
+ Helped-by: C2 E2
+ Reported-by: C3 E3
+ Mentored-by: C2 E2
+ Mentored-by: C3 E3
+ Mentored-by: C4 E4
+ EOF
+ git -c trailer.where="before" \
+ commit --trailer "Mentored-by: C3 E3" \
+ --trailer "Mentored-by: C2 E2" \
+ --amend &&
+ git cat-file commit HEAD >commit.msg &&
+ sed -e "1,/^\$/d" commit.msg >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'commit --trailer with -c and "donothing" as ifmissing' '
+ trailer_commit_base &&
+ cat >expected <<-\EOF &&
+ hello
+
+ Signed-off-by: C O Mitter <committer@example.com>
+ Signed-off-by: C1 E1
+ Helped-by: C2 E2
+ Reported-by: C3 E3
+ Mentored-by: C4 E4
+ Helped-by: C5 E5
+ EOF
+ git -c trailer.ifmissing="donothing" \
+ commit --trailer "Helped-by: C5 E5" \
+ --trailer "Based-by: C6 E6" \
+ --amend &&
+ git cat-file commit HEAD >commit.msg &&
+ sed -e "1,/^\$/d" commit.msg >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'commit --trailer with -c and "add" as ifmissing' '
+ trailer_commit_base &&
+ cat >expected <<-\EOF &&
+ hello
+
+ Signed-off-by: C O Mitter <committer@example.com>
+ Signed-off-by: C1 E1
+ Helped-by: C2 E2
+ Reported-by: C3 E3
+ Mentored-by: C4 E4
+ Helped-by: C5 E5
+ Based-by: C6 E6
+ EOF
+ git -c trailer.ifmissing="add" \
+ commit --trailer "Helped-by: C5 E5" \
+ --trailer "Based-by: C6 E6" \
+ --amend &&
+ git cat-file commit HEAD >commit.msg &&
+ sed -e "1,/^\$/d" commit.msg >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'commit --trailer with -c ack.key ' '
+ echo "fun" >>file1 &&
+ git add file1 &&
+ cat >expected <<-\EOF &&
+ hello
+
+ Acked-by: Peff
+ EOF
+ git -c trailer.ack.key="Acked-by" \
+ commit --trailer "ack = Peff" -m "hello" &&
+ git cat-file commit HEAD >commit.msg &&
+ sed -e "1,/^\$/d" commit.msg >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'commit --trailer with -c and ":=#" as separators' '
+ echo "fun" >>file1 &&
+ git add file1 &&
+ cat >expected <<-\EOF &&
+ I hate bug
+
+ Bug #42
+ EOF
+ git -c trailer.separators=":=#" \
+ -c trailer.bug.key="Bug #" \
+ commit --trailer "bug = 42" -m "I hate bug" &&
+ git cat-file commit HEAD >commit.msg &&
+ sed -e "1,/^\$/d" commit.msg >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'commit --trailer with -c and command' '
+ trailer_commit_base &&
+ cat >expected <<-\EOF &&
+ hello
+
+ Signed-off-by: C O Mitter <committer@example.com>
+ Signed-off-by: C1 E1
+ Helped-by: C2 E2
+ Mentored-by: C4 E4
+ Reported-by: A U Thor <author@example.com>
+ EOF
+ git -c trailer.report.key="Reported-by: " \
+ -c trailer.report.ifexists="replace" \
+ -c trailer.report.command="NAME=\"\$ARG\"; test -n \"\$NAME\" && \
+ git log --author=\"\$NAME\" -1 --format=\"format:%aN <%aE>\" || true" \
+ commit --trailer "report = author" --amend &&
+ git cat-file commit HEAD >commit.msg &&
+ sed -e "1,/^\$/d" commit.msg >actual &&
+ test_cmp expected actual
+'
+
test_expect_success 'multiple -m' '
>negative &&
diff --git a/t/t9001-send-email.sh b/t/t9001-send-email.sh
index 4eee9c3dcb..65b3035371 100755
--- a/t/t9001-send-email.sh
+++ b/t/t9001-send-email.sh
@@ -415,15 +415,23 @@ test_expect_success $PREREQ 'reject long lines' '
z512=$z64$z64$z64$z64$z64$z64$z64$z64 &&
clean_fake_sendmail &&
cp $patches longline.patch &&
- echo $z512$z512 >>longline.patch &&
+ cat >>longline.patch <<-EOF &&
+ $z512$z512
+ not a long line
+ $z512$z512
+ EOF
test_must_fail git send-email \
--from="Example <nobody@example.com>" \
--to=nobody@example.com \
--smtp-server="$(pwd)/fake.sendmail" \
--transfer-encoding=8bit \
$patches longline.patch \
- 2>errors &&
- grep longline.patch errors
+ 2>actual &&
+ cat >expect <<-\EOF &&
+ fatal: longline.patch:35 is longer than 998 characters
+ warning: no patches were sent
+ EOF
+ test_cmp expect actual
'
test_expect_success $PREREQ 'no patch was sent' '
@@ -513,6 +521,49 @@ do
done
+test_expect_success $PREREQ "--validate respects relative core.hooksPath path" '
+ clean_fake_sendmail &&
+ mkdir my-hooks &&
+ test_when_finished "rm my-hooks.ran" &&
+ write_script my-hooks/sendemail-validate <<-\EOF &&
+ >my-hooks.ran
+ exit 1
+ EOF
+ test_config core.hooksPath "my-hooks" &&
+ test_must_fail git send-email \
+ --from="Example <nobody@example.com>" \
+ --to=nobody@example.com \
+ --smtp-server="$(pwd)/fake.sendmail" \
+ --validate \
+ longline.patch 2>actual &&
+ test_path_is_file my-hooks.ran &&
+ cat >expect <<-EOF &&
+ fatal: longline.patch: rejected by sendemail-validate hook
+ fatal: command '"'"'$(pwd)/my-hooks/sendemail-validate'"'"' died with exit code 1
+ warning: no patches were sent
+ EOF
+ test_cmp expect actual
+'
+
+test_expect_success $PREREQ "--validate respects absolute core.hooksPath path" '
+ hooks_path="$(pwd)/my-hooks" &&
+ test_config core.hooksPath "$hooks_path" &&
+ test_when_finished "rm my-hooks.ran" &&
+ test_must_fail git send-email \
+ --from="Example <nobody@example.com>" \
+ --to=nobody@example.com \
+ --smtp-server="$(pwd)/fake.sendmail" \
+ --validate \
+ longline.patch 2>actual &&
+ test_path_is_file my-hooks.ran &&
+ cat >expect <<-EOF &&
+ fatal: longline.patch: rejected by sendemail-validate hook
+ fatal: command '"'"'$hooks_path/sendemail-validate'"'"' died with exit code 1
+ warning: no patches were sent
+ EOF
+ test_cmp expect actual
+'
+
for enc in 7bit 8bit quoted-printable base64
do
test_expect_success $PREREQ "--transfer-encoding=$enc produces correct header" '
diff --git a/t/test-lib.sh b/t/test-lib.sh
index d3f6af6a65..3dec266221 100644
--- a/t/test-lib.sh
+++ b/t/test-lib.sh
@@ -448,6 +448,8 @@ export EDITOR
GIT_DEFAULT_HASH="${GIT_TEST_DEFAULT_HASH:-sha1}"
export GIT_DEFAULT_HASH
+GIT_TEST_MERGE_ALGORITHM="${GIT_TEST_MERGE_ALGORITHM:-ort}"
+export GIT_TEST_MERGE_ALGORITHM
# Tests using GIT_TRACE typically don't want <timestamp> <file>:<line> output
GIT_TRACE_BARE=1
diff --git a/transport.c b/transport.c
index 1c4ab676d1..ef66e73090 100644
--- a/transport.c
+++ b/transport.c
@@ -236,6 +236,9 @@ static int set_git_option(struct git_transport_options *opts,
list_objects_filter_die_if_populated(&opts->filter_options);
parse_list_objects_filter(&opts->filter_options, value);
return 0;
+ } else if (!strcmp(name, TRANS_OPT_REJECT_SHALLOW)) {
+ opts->reject_shallow = !!value;
+ return 0;
}
return 1;
}
@@ -370,6 +373,7 @@ static int fetch_refs_via_pack(struct transport *transport,
args.stateless_rpc = transport->stateless_rpc;
args.server_options = transport->server_options;
args.negotiation_tips = data->options.negotiation_tips;
+ args.reject_shallow_remote = transport->smart_options->reject_shallow;
if (!data->got_remote_heads) {
int i;
@@ -1452,6 +1456,8 @@ int transport_disconnect(struct transport *transport)
int ret = 0;
if (transport->vtable->disconnect)
ret = transport->vtable->disconnect(transport);
+ if (transport->got_remote_refs)
+ free_refs((void *)transport->remote_refs);
free(transport);
return ret;
}
diff --git a/transport.h b/transport.h
index 24e15799e7..4d5db0a7f2 100644
--- a/transport.h
+++ b/transport.h
@@ -14,6 +14,7 @@ struct git_transport_options {
unsigned check_self_contained_and_connected : 1;
unsigned self_contained_and_connected : 1;
unsigned update_shallow : 1;
+ unsigned reject_shallow : 1;
unsigned deepen_relative : 1;
/* see documentation of corresponding flag in fetch-pack.h */
@@ -194,6 +195,9 @@ void transport_check_allowed(const char *type);
/* Aggressively fetch annotated tags if possible */
#define TRANS_OPT_FOLLOWTAGS "followtags"
+/* Reject shallow repo transport */
+#define TRANS_OPT_REJECT_SHALLOW "rejectshallow"
+
/* Accept refs that may update .git/shallow without --depth */
#define TRANS_OPT_UPDATE_SHALLOW "updateshallow"
diff --git a/tree.c b/tree.c
index a52479812c..410e3b477e 100644
--- a/tree.c
+++ b/tree.c
@@ -11,58 +11,10 @@
const char *tree_type = "tree";
-static int read_one_entry_opt(struct index_state *istate,
- const struct object_id *oid,
- const char *base, int baselen,
- const char *pathname,
- unsigned mode, int stage, int opt)
-{
- int len;
- struct cache_entry *ce;
-
- if (S_ISDIR(mode))
- return READ_TREE_RECURSIVE;
-
- len = strlen(pathname);
- ce = make_empty_cache_entry(istate, baselen + len);
-
- ce->ce_mode = create_ce_mode(mode);
- ce->ce_flags = create_ce_flags(stage);
- ce->ce_namelen = baselen + len;
- memcpy(ce->name, base, baselen);
- memcpy(ce->name + baselen, pathname, len+1);
- oidcpy(&ce->oid, oid);
- return add_index_entry(istate, ce, opt);
-}
-
-static int read_one_entry(const struct object_id *oid, struct strbuf *base,
- const char *pathname, unsigned mode, int stage,
- void *context)
-{
- struct index_state *istate = context;
- return read_one_entry_opt(istate, oid, base->buf, base->len, pathname,
- mode, stage,
- ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
-}
-
-/*
- * This is used when the caller knows there is no existing entries at
- * the stage that will conflict with the entry being added.
- */
-static int read_one_entry_quick(const struct object_id *oid, struct strbuf *base,
- const char *pathname, unsigned mode, int stage,
- void *context)
-{
- struct index_state *istate = context;
- return read_one_entry_opt(istate, oid, base->buf, base->len, pathname,
- mode, stage,
- ADD_CACHE_JUST_APPEND);
-}
-
-static int read_tree_1(struct repository *r,
- struct tree *tree, struct strbuf *base,
- int stage, const struct pathspec *pathspec,
- read_tree_fn_t fn, void *context)
+int read_tree_at(struct repository *r,
+ struct tree *tree, struct strbuf *base,
+ const struct pathspec *pathspec,
+ read_tree_fn_t fn, void *context)
{
struct tree_desc desc;
struct name_entry entry;
@@ -86,7 +38,7 @@ static int read_tree_1(struct repository *r,
}
switch (fn(&entry.oid, base,
- entry.path, entry.mode, stage, context)) {
+ entry.path, entry.mode, context)) {
case 0:
continue;
case READ_TREE_RECURSIVE:
@@ -119,9 +71,9 @@ static int read_tree_1(struct repository *r,
len = tree_entry_len(&entry);
strbuf_add(base, entry.path, len);
strbuf_addch(base, '/');
- retval = read_tree_1(r, lookup_tree(r, &oid),
- base, stage, pathspec,
- fn, context);
+ retval = read_tree_at(r, lookup_tree(r, &oid),
+ base, pathspec,
+ fn, context);
strbuf_setlen(base, oldlen);
if (retval)
return -1;
@@ -129,17 +81,13 @@ static int read_tree_1(struct repository *r,
return 0;
}
-int read_tree_recursive(struct repository *r,
- struct tree *tree,
- const char *base, int baselen,
- int stage, const struct pathspec *pathspec,
- read_tree_fn_t fn, void *context)
+int read_tree(struct repository *r,
+ struct tree *tree,
+ const struct pathspec *pathspec,
+ read_tree_fn_t fn, void *context)
{
struct strbuf sb = STRBUF_INIT;
- int ret;
-
- strbuf_add(&sb, base, baselen);
- ret = read_tree_1(r, tree, &sb, stage, pathspec, fn, context);
+ int ret = read_tree_at(r, tree, &sb, pathspec, fn, context);
strbuf_release(&sb);
return ret;
}
@@ -154,47 +102,6 @@ int cmp_cache_name_compare(const void *a_, const void *b_)
ce2->name, ce2->ce_namelen, ce_stage(ce2));
}
-int read_tree(struct repository *r, struct tree *tree, int stage,
- struct pathspec *match, struct index_state *istate)
-{
- read_tree_fn_t fn = NULL;
- int i, err;
-
- /*
- * Currently the only existing callers of this function all
- * call it with stage=1 and after making sure there is nothing
- * at that stage; we could always use read_one_entry_quick().
- *
- * But when we decide to straighten out git-read-tree not to
- * use unpack_trees() in some cases, this will probably start
- * to matter.
- */
-
- /*
- * See if we have cache entry at the stage. If so,
- * do it the original slow way, otherwise, append and then
- * sort at the end.
- */
- for (i = 0; !fn && i < istate->cache_nr; i++) {
- const struct cache_entry *ce = istate->cache[i];
- if (ce_stage(ce) == stage)
- fn = read_one_entry;
- }
-
- if (!fn)
- fn = read_one_entry_quick;
- err = read_tree_recursive(r, tree, "", 0, stage, match, fn, istate);
- if (fn == read_one_entry || err)
- return err;
-
- /*
- * Sort the cache entry -- we need to nuke the cache tree, though.
- */
- cache_tree_free(&istate->cache_tree);
- QSORT(istate->cache, istate->cache_nr, cmp_cache_name_compare);
- return 0;
-}
-
struct tree *lookup_tree(struct repository *r, const struct object_id *oid)
{
struct object *obj = lookup_object(r, oid);
diff --git a/tree.h b/tree.h
index 3eb0484cbf..6efff003e2 100644
--- a/tree.h
+++ b/tree.h
@@ -31,16 +31,16 @@ struct tree *parse_tree_indirect(const struct object_id *oid);
int cmp_cache_name_compare(const void *a_, const void *b_);
#define READ_TREE_RECURSIVE 1
-typedef int (*read_tree_fn_t)(const struct object_id *, struct strbuf *, const char *, unsigned int, int, void *);
+typedef int (*read_tree_fn_t)(const struct object_id *, struct strbuf *, const char *, unsigned int, void *);
-int read_tree_recursive(struct repository *r,
- struct tree *tree,
- const char *base, int baselen,
- int stage, const struct pathspec *pathspec,
- read_tree_fn_t fn, void *context);
+int read_tree_at(struct repository *r,
+ struct tree *tree, struct strbuf *base,
+ const struct pathspec *pathspec,
+ read_tree_fn_t fn, void *context);
-int read_tree(struct repository *r, struct tree *tree,
- int stage, struct pathspec *pathspec,
- struct index_state *istate);
+int read_tree(struct repository *r,
+ struct tree *tree,
+ const struct pathspec *pathspec,
+ read_tree_fn_t fn, void *context);
#endif /* TREE_H */
diff --git a/unix-socket.c b/unix-socket.c
index 19ed48be99..e0be1badb5 100644
--- a/unix-socket.c
+++ b/unix-socket.c
@@ -1,13 +1,7 @@
#include "cache.h"
#include "unix-socket.h"
-static int unix_stream_socket(void)
-{
- int fd = socket(AF_UNIX, SOCK_STREAM, 0);
- if (fd < 0)
- die_errno("unable to create socket");
- return fd;
-}
+#define DEFAULT_UNIX_STREAM_LISTEN_BACKLOG (5)
static int chdir_len(const char *orig, int len)
{
@@ -36,16 +30,23 @@ static void unix_sockaddr_cleanup(struct unix_sockaddr_context *ctx)
}
static int unix_sockaddr_init(struct sockaddr_un *sa, const char *path,
- struct unix_sockaddr_context *ctx)
+ struct unix_sockaddr_context *ctx,
+ int disallow_chdir)
{
int size = strlen(path) + 1;
ctx->orig_dir = NULL;
if (size > sizeof(sa->sun_path)) {
- const char *slash = find_last_dir_sep(path);
+ const char *slash;
const char *dir;
struct strbuf cwd = STRBUF_INIT;
+ if (disallow_chdir) {
+ errno = ENAMETOOLONG;
+ return -1;
+ }
+
+ slash = find_last_dir_sep(path);
if (!slash) {
errno = ENAMETOOLONG;
return -1;
@@ -71,15 +72,18 @@ static int unix_sockaddr_init(struct sockaddr_un *sa, const char *path,
return 0;
}
-int unix_stream_connect(const char *path)
+int unix_stream_connect(const char *path, int disallow_chdir)
{
- int fd, saved_errno;
+ int fd = -1, saved_errno;
struct sockaddr_un sa;
struct unix_sockaddr_context ctx;
- if (unix_sockaddr_init(&sa, path, &ctx) < 0)
+ if (unix_sockaddr_init(&sa, path, &ctx, disallow_chdir) < 0)
return -1;
- fd = unix_stream_socket();
+ fd = socket(AF_UNIX, SOCK_STREAM, 0);
+ if (fd < 0)
+ goto fail;
+
if (connect(fd, (struct sockaddr *)&sa, sizeof(sa)) < 0)
goto fail;
unix_sockaddr_cleanup(&ctx);
@@ -87,28 +91,36 @@ int unix_stream_connect(const char *path)
fail:
saved_errno = errno;
+ if (fd != -1)
+ close(fd);
unix_sockaddr_cleanup(&ctx);
- close(fd);
errno = saved_errno;
return -1;
}
-int unix_stream_listen(const char *path)
+int unix_stream_listen(const char *path,
+ const struct unix_stream_listen_opts *opts)
{
- int fd, saved_errno;
+ int fd = -1, saved_errno;
+ int backlog;
struct sockaddr_un sa;
struct unix_sockaddr_context ctx;
unlink(path);
- if (unix_sockaddr_init(&sa, path, &ctx) < 0)
+ if (unix_sockaddr_init(&sa, path, &ctx, opts->disallow_chdir) < 0)
return -1;
- fd = unix_stream_socket();
+ fd = socket(AF_UNIX, SOCK_STREAM, 0);
+ if (fd < 0)
+ goto fail;
if (bind(fd, (struct sockaddr *)&sa, sizeof(sa)) < 0)
goto fail;
- if (listen(fd, 5) < 0)
+ backlog = opts->listen_backlog_size;
+ if (backlog <= 0)
+ backlog = DEFAULT_UNIX_STREAM_LISTEN_BACKLOG;
+ if (listen(fd, backlog) < 0)
goto fail;
unix_sockaddr_cleanup(&ctx);
@@ -116,8 +128,9 @@ int unix_stream_listen(const char *path)
fail:
saved_errno = errno;
+ if (fd != -1)
+ close(fd);
unix_sockaddr_cleanup(&ctx);
- close(fd);
errno = saved_errno;
return -1;
}
diff --git a/unix-socket.h b/unix-socket.h
index e271aeec5a..8542cdd799 100644
--- a/unix-socket.h
+++ b/unix-socket.h
@@ -1,7 +1,15 @@
#ifndef UNIX_SOCKET_H
#define UNIX_SOCKET_H
-int unix_stream_connect(const char *path);
-int unix_stream_listen(const char *path);
+struct unix_stream_listen_opts {
+ int listen_backlog_size;
+ unsigned int disallow_chdir:1;
+};
+
+#define UNIX_STREAM_LISTEN_OPTS_INIT { 0 }
+
+int unix_stream_connect(const char *path, int disallow_chdir);
+int unix_stream_listen(const char *path,
+ const struct unix_stream_listen_opts *opts);
#endif /* UNIX_SOCKET_H */
diff --git a/unix-stream-server.c b/unix-stream-server.c
new file mode 100644
index 0000000000..efa2a207ab
--- /dev/null
+++ b/unix-stream-server.c
@@ -0,0 +1,125 @@
+#include "cache.h"
+#include "lockfile.h"
+#include "unix-socket.h"
+#include "unix-stream-server.h"
+
+#define DEFAULT_LOCK_TIMEOUT (100)
+
+/*
+ * Try to connect to a unix domain socket at `path` (if it exists) and
+ * see if there is a server listening.
+ *
+ * We don't know if the socket exists, whether a server died and
+ * failed to cleanup, or whether we have a live server listening, so
+ * we "poke" it.
+ *
+ * We immediately hangup without sending/receiving any data because we
+ * don't know anything about the protocol spoken and don't want to
+ * block while writing/reading data. It is sufficient to just know
+ * that someone is listening.
+ */
+static int is_another_server_alive(const char *path,
+ const struct unix_stream_listen_opts *opts)
+{
+ int fd = unix_stream_connect(path, opts->disallow_chdir);
+ if (fd >= 0) {
+ close(fd);
+ return 1;
+ }
+
+ return 0;
+}
+
+int unix_ss_create(const char *path,
+ const struct unix_stream_listen_opts *opts,
+ long timeout_ms,
+ struct unix_ss_socket **new_server_socket)
+{
+ struct lock_file lock = LOCK_INIT;
+ int fd_socket;
+ struct unix_ss_socket *server_socket;
+
+ *new_server_socket = NULL;
+
+ if (timeout_ms < 0)
+ timeout_ms = DEFAULT_LOCK_TIMEOUT;
+
+ /*
+ * Create a lock at "<path>.lock" if we can.
+ */
+ if (hold_lock_file_for_update_timeout(&lock, path, 0, timeout_ms) < 0)
+ return -1;
+
+ /*
+ * If another server is listening on "<path>" give up. We do not
+ * want to create a socket and steal future connections from them.
+ */
+ if (is_another_server_alive(path, opts)) {
+ rollback_lock_file(&lock);
+ errno = EADDRINUSE;
+ return -2;
+ }
+
+ /*
+ * Create and bind to a Unix domain socket at "<path>".
+ */
+ fd_socket = unix_stream_listen(path, opts);
+ if (fd_socket < 0) {
+ int saved_errno = errno;
+ rollback_lock_file(&lock);
+ errno = saved_errno;
+ return -1;
+ }
+
+ server_socket = xcalloc(1, sizeof(*server_socket));
+ server_socket->path_socket = strdup(path);
+ server_socket->fd_socket = fd_socket;
+ lstat(path, &server_socket->st_socket);
+
+ *new_server_socket = server_socket;
+
+ /*
+ * Always rollback (just delete) "<path>.lock" because we already created
+ * "<path>" as a socket and do not want to commit_lock to do the atomic
+ * rename trick.
+ */
+ rollback_lock_file(&lock);
+
+ return 0;
+}
+
+void unix_ss_free(struct unix_ss_socket *server_socket)
+{
+ if (!server_socket)
+ return;
+
+ if (server_socket->fd_socket >= 0) {
+ if (!unix_ss_was_stolen(server_socket))
+ unlink(server_socket->path_socket);
+ close(server_socket->fd_socket);
+ }
+
+ free(server_socket->path_socket);
+ free(server_socket);
+}
+
+int unix_ss_was_stolen(struct unix_ss_socket *server_socket)
+{
+ struct stat st_now;
+
+ if (!server_socket)
+ return 0;
+
+ if (lstat(server_socket->path_socket, &st_now) == -1)
+ return 1;
+
+ if (st_now.st_ino != server_socket->st_socket.st_ino)
+ return 1;
+ if (st_now.st_dev != server_socket->st_socket.st_dev)
+ return 1;
+
+ if (!S_ISSOCK(st_now.st_mode))
+ return 1;
+
+ return 0;
+}
diff --git a/unix-stream-server.h b/unix-stream-server.h
new file mode 100644
index 0000000000..ae2712ba39
--- /dev/null
+++ b/unix-stream-server.h
@@ -0,0 +1,33 @@
+#ifndef UNIX_STREAM_SERVER_H
+#define UNIX_STREAM_SERVER_H
+
+#include "unix-socket.h"
+
+struct unix_ss_socket {
+ char *path_socket;
+ struct stat st_socket;
+ int fd_socket;
+};
+
+/*
+ * Create a Unix Domain Socket at the given path under the protection
+ * of a '.lock' lockfile.
+ *
+ * Returns 0 on success, -1 on error, -2 if socket is in use.
+ */
+int unix_ss_create(const char *path,
+ const struct unix_stream_listen_opts *opts,
+ long timeout_ms,
+ struct unix_ss_socket **server_socket);
+
+/*
+ * Close and delete the socket.
+ */
+void unix_ss_free(struct unix_ss_socket *server_socket);
+
+/*
+ * Return 1 if the inode of the pathname to our socket changes.
+ */
+int unix_ss_was_stolen(struct unix_ss_socket *server_socket);
+
+#endif /* UNIX_STREAM_SERVER_H */
diff --git a/unpack-trees.c b/unpack-trees.c
index 9af8e796b3..8a1afbc1e4 100644
--- a/unpack-trees.c
+++ b/unpack-trees.c
@@ -16,6 +16,7 @@
#include "fsmonitor.h"
#include "object-store.h"
#include "promisor-remote.h"
+#include "entry.h"
/*
* Error messages expected by scripts out of plumbing commands such as
@@ -2097,7 +2098,7 @@ static int verify_absent_1(const struct cache_entry *ce,
if (o->index_only || o->reset || !o->update)
return 0;
- len = check_leading_path(ce->name, ce_namelen(ce));
+ len = check_leading_path(ce->name, ce_namelen(ce), 0);
if (!len)
return 0;
else if (len > 0) {