summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/workflows/main.yml230
-rw-r--r--.travis.yml12
-rw-r--r--Documentation/Makefile23
-rw-r--r--Documentation/RelNotes/2.27.0.txt114
-rw-r--r--Documentation/asciidoc.conf19
-rw-r--r--Documentation/config.txt11
-rw-r--r--Documentation/config/fetch.txt13
-rw-r--r--Documentation/config/log.txt6
-rw-r--r--Documentation/config/merge.txt10
-rw-r--r--Documentation/config/protocol.txt2
-rw-r--r--Documentation/config/push.txt2
-rw-r--r--Documentation/config/submodule.txt12
-rw-r--r--Documentation/fetch-options.txt3
-rw-r--r--Documentation/git-checkout.txt4
-rw-r--r--Documentation/git-commit-graph.txt22
-rw-r--r--Documentation/git-grep.txt8
-rw-r--r--Documentation/git-log.txt6
-rw-r--r--Documentation/git-ls-files.txt2
-rw-r--r--Documentation/git-merge.txt11
-rw-r--r--Documentation/git-pull.txt14
-rw-r--r--Documentation/git-read-tree.txt4
-rw-r--r--Documentation/git-rebase.txt17
-rw-r--r--Documentation/git-reset.txt6
-rw-r--r--Documentation/git-restore.txt11
-rw-r--r--Documentation/git-sparse-checkout.txt10
-rw-r--r--Documentation/git-switch.txt4
-rw-r--r--Documentation/git-update-ref.txt28
-rw-r--r--Documentation/gitsubmodules.txt3
-rw-r--r--Documentation/manpage-1.72.xsl14
-rw-r--r--Documentation/manpage-base.xsl35
-rw-r--r--Documentation/manpage-bold-literal.xsl6
-rw-r--r--Documentation/manpage-normal.xsl25
-rw-r--r--Documentation/manpage-suppress-sp.xsl21
-rw-r--r--Documentation/merge-options.txt8
-rw-r--r--Documentation/pretty-formats.txt6
-rw-r--r--Documentation/revisions.txt2
-rw-r--r--Documentation/technical/commit-graph-format.txt30
-rw-r--r--Documentation/user-manual.conf10
-rw-r--r--INSTALL4
-rw-r--r--Makefile103
-rw-r--r--README.md2
-rw-r--r--archive-tar.c4
-rw-r--r--azure-pipelines.yml558
-rw-r--r--blame.c137
-rw-r--r--blame.h6
-rw-r--r--bloom.c276
-rw-r--r--bloom.h90
-rw-r--r--branch.c1
-rw-r--r--builtin.h16
-rw-r--r--builtin/blame.c10
-rw-r--r--builtin/clean.c6
-rw-r--r--builtin/clone.c2
-rw-r--r--builtin/commit-graph.c60
-rw-r--r--builtin/commit.c6
-rw-r--r--builtin/diff-tree.c9
-rw-r--r--builtin/fmt-merge-msg.c658
-rw-r--r--builtin/grep.c48
-rw-r--r--builtin/help.c2
-rw-r--r--builtin/index-pack.c5
-rw-r--r--builtin/log.c17
-rw-r--r--builtin/ls-files.c5
-rw-r--r--builtin/merge.c32
-rw-r--r--builtin/prune-packed.c44
-rw-r--r--builtin/prune.c1
-rw-r--r--builtin/pull.c9
-rw-r--r--builtin/rebase.c309
-rw-r--r--builtin/receive-pack.c27
-rw-r--r--builtin/reflog.c2
-rw-r--r--builtin/repack.c1
-rw-r--r--builtin/sparse-checkout.c55
-rw-r--r--builtin/stash.c19
-rw-r--r--builtin/update-ref.c245
-rw-r--r--ci/git-problem-matcher.json16
-rwxr-xr-xci/install-dependencies.sh16
-rwxr-xr-xci/install-docker-dependencies.sh18
-rwxr-xr-xci/lib.sh39
-rwxr-xr-xci/print-test-failures.sh7
-rwxr-xr-xci/run-build-and-tests.sh1
-rwxr-xr-xci/run-docker-build.sh (renamed from ci/run-linux32-build.sh)39
-rwxr-xr-xci/run-docker.sh47
-rwxr-xr-xci/run-linux32-docker.sh31
-rw-r--r--commit-graph.c379
-rw-r--r--commit-graph.h33
-rw-r--r--commit.h2
-rw-r--r--config.c42
-rw-r--r--config.h4
-rw-r--r--config.mak.uname1
-rw-r--r--contrib/completion/git-completion.bash2
-rw-r--r--contrib/completion/git-completion.zsh2
-rw-r--r--contrib/subtree/Makefile6
-rw-r--r--convert.c2
-rw-r--r--diff.c157
-rw-r--r--diff.h5
-rw-r--r--diffcore-break.c12
-rw-r--r--diffcore-rename.c64
-rw-r--r--diffcore.h30
-rw-r--r--dir.c422
-rw-r--r--fast-import.c61
-rw-r--r--fetch-pack.c10
-rw-r--r--fmt-merge-msg.c656
-rw-r--r--fmt-merge-msg.h13
-rw-r--r--fsck.c2
-rw-r--r--fuzz-commit-graph.c5
-rwxr-xr-xgit-submodule.sh2
-rwxr-xr-xgitweb/gitweb.perl2
-rw-r--r--line-log.c6
-rw-r--r--ll-merge.c2
-rw-r--r--log-tree.c57
-rw-r--r--log-tree.h4
-rw-r--r--mailinfo.c11
-rw-r--r--midx.c4
-rw-r--r--midx.h2
-rw-r--r--oidset.c5
-rw-r--r--oidset.h5
-rw-r--r--parse-options.c9
-rw-r--r--parse-options.h1
-rw-r--r--path.c1
-rw-r--r--path.h4
-rw-r--r--promisor-remote.c5
-rw-r--r--promisor-remote.h8
-rw-r--r--protocol.c2
-rw-r--r--prune-packed.c43
-rw-r--r--prune-packed.h9
-rw-r--r--range-diff.c18
-rw-r--r--refs.c44
-rw-r--r--refs.h12
-rw-r--r--refs/files-backend.c18
-rw-r--r--remote.c37
-rw-r--r--reset.c141
-rw-r--r--reset.h20
-rw-r--r--revision.c143
-rw-r--r--revision.h11
-rw-r--r--send-pack.c32
-rw-r--r--sequencer.c176
-rw-r--r--sequencer.h20
-rw-r--r--sha1-file.c4
-rw-r--r--shallow.c30
-rw-r--r--strbuf.c10
-rw-r--r--strbuf.h6
-rw-r--r--submodule-config.c3
-rw-r--r--t/README40
-rw-r--r--t/helper/test-bloom.c93
-rw-r--r--t/helper/test-pkt-line.c2
-rw-r--r--t/helper/test-read-graph.c17
-rw-r--r--t/helper/test-tool.c1
-rw-r--r--t/helper/test-tool.h1
-rwxr-xr-xt/perf/p9300-fast-import-export.sh23
-rwxr-xr-xt/t0040-parse-options.sh2
-rwxr-xr-xt/t0095-bloom.sh117
-rwxr-xr-xt/t1011-read-tree-sparse-checkout.sh11
-rwxr-xr-xt/t1091-sparse-checkout-builtin.sh105
-rwxr-xr-xt/t1400-update-ref.sh140
-rwxr-xr-xt/t2018-checkout-branch.sh22
-rwxr-xr-xt/t3000-ls-files-others.sh121
-rwxr-xr-xt/t3033-merge-toplevel.sh22
-rwxr-xr-xt/t3206-range-diff.sh10
-rwxr-xr-xt/t3420-rebase-autostash.sh20
-rwxr-xr-xt/t3904-stash-patch.sh6
-rwxr-xr-xt/t4013-diff-various.sh12
-rw-r--r--t/t4013/diff.diff-tree_--format=%N_note6
-rw-r--r--t/t4013/diff.diff-tree_--pretty_--notes_note12
-rw-r--r--t/t4013/diff.diff-tree_--pretty_note9
-rw-r--r--t/t4013/diff.log_--decorate=full_--all15
-rw-r--r--t/t4013/diff.log_--decorate_--all15
-rwxr-xr-xt/t4061-diff-indent.sh2
-rwxr-xr-xt/t4067-diff-partial-clone.sh48
-rwxr-xr-xt/t4124-apply-ws-rule.sh7
-rwxr-xr-xt/t4202-log.sh51
-rwxr-xr-xt/t4216-log-bloom.sh155
-rwxr-xr-xt/t4254-am-corrupt.sh53
-rwxr-xr-xt/t5003-archive-zip.sh24
-rwxr-xr-xt/t5318-commit-graph.sh4
-rwxr-xr-xt/t5319-multi-pack-index.sh8
-rwxr-xr-xt/t5324-split-commit-graph.sh52
-rwxr-xr-xt/t5504-fetch-receive-strict.sh1
-rwxr-xr-xt/t5516-fetch-push.sh1
-rwxr-xr-xt/t5520-pull.sh57
-rwxr-xr-xt/t5537-fetch-shallow.sh99
-rwxr-xr-xt/t5541-http-push-smart.sh12
-rwxr-xr-xt/t5543-atomic-push.sh89
-rwxr-xr-xt/t5548-push-porcelain.sh279
-rwxr-xr-xt/t5616-partial-clone.sh2
-rwxr-xr-xt/t5703-upload-pack-ref-in-want.sh5
-rwxr-xr-xt/t6030-bisect-porcelain.sh8
-rwxr-xr-xt/t7063-status-untracked-cache.sh54
-rwxr-xr-xt/t7408-submodule-reference.sh8
-rwxr-xr-xt/t7508-status.sh2
-rwxr-xr-xt/t7600-merge.sh154
-rwxr-xr-xt/t7810-grep.sh47
-rwxr-xr-xt/t9141-git-svn-multiple-branches.sh8
-rwxr-xr-xt/t9160-git-svn-preserve-empty-dirs.sh4
-rwxr-xr-xt/t9164-git-svn-dcommit-concurrent.sh4
-rwxr-xr-xt/t9819-git-p4-case-folding.sh2
-rwxr-xr-xt/t9902-completion.sh5
-rw-r--r--t/test-lib-functions.sh2
-rw-r--r--t/test-lib.sh23
-rw-r--r--transport-helper.c23
-rw-r--r--transport.c24
-rw-r--r--transport.h3
-rw-r--r--tree-diff.c6
-rw-r--r--unpack-trees.c260
-rw-r--r--unpack-trees.h28
-rw-r--r--userdiff.c4
-rw-r--r--wt-status.c6
204 files changed, 6191 insertions, 2819 deletions
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
new file mode 100644
index 0000000000..fd4df939b5
--- /dev/null
+++ b/.github/workflows/main.yml
@@ -0,0 +1,230 @@
+name: CI/PR
+
+on: [push, pull_request]
+
+env:
+ DEVELOPER: 1
+
+jobs:
+ windows-build:
+ runs-on: windows-latest
+ steps:
+ - uses: actions/checkout@v1
+ - name: download git-sdk-64-minimal
+ shell: bash
+ run: a=git-sdk-64-minimal && mkdir -p $a && curl -# https://wingit.blob.core.windows.net/ci-artifacts/$a.tar.xz | tar -C $a -xJf -
+ - name: build
+ shell: powershell
+ env:
+ HOME: ${{runner.workspace}}
+ MSYSTEM: MINGW64
+ NO_PERL: 1
+ run: |
+ & .\git-sdk-64-minimal\usr\bin\bash.exe -lc @"
+ printf '%s\n' /git-sdk-64-minimal/ >>.git/info/exclude
+
+ ci/make-test-artifacts.sh artifacts
+ "@
+ - name: upload build artifacts
+ uses: actions/upload-artifact@v1
+ with:
+ name: windows-artifacts
+ path: artifacts
+ windows-test:
+ runs-on: windows-latest
+ needs: [windows-build]
+ strategy:
+ matrix:
+ nr: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+ steps:
+ - uses: actions/checkout@v1
+ - name: download git-sdk-64-minimal
+ shell: bash
+ run: a=git-sdk-64-minimal && mkdir -p $a && curl -# https://wingit.blob.core.windows.net/ci-artifacts/$a.tar.xz | tar -C $a -xJf -
+ - name: download build artifacts
+ uses: actions/download-artifact@v1
+ with:
+ name: windows-artifacts
+ path: ${{github.workspace}}
+ - name: extract build artifacts
+ shell: bash
+ run: tar xf artifacts.tar.gz
+ - name: test
+ shell: powershell
+ run: |
+ & .\git-sdk-64-minimal\usr\bin\bash.exe -lc @"
+ # Let Git ignore the SDK
+ printf '%s\n' /git-sdk-64-minimal/ >>.git/info/exclude
+
+ ci/run-test-slice.sh ${{matrix.nr}} 10
+ "@
+ - name: ci/print-test-failures.sh
+ if: failure()
+ shell: powershell
+ run: |
+ & .\git-sdk-64-minimal\usr\bin\bash.exe -lc ci/print-test-failures.sh
+ - name: Upload failed tests' directories
+ if: failure() && env.FAILED_TEST_ARTIFACTS != ''
+ uses: actions/upload-artifact@v1
+ with:
+ name: failed-tests-windows
+ path: ${{env.FAILED_TEST_ARTIFACTS}}
+ vs-build:
+ env:
+ MSYSTEM: MINGW64
+ NO_PERL: 1
+ GIT_CONFIG_PARAMETERS: "'user.name=CI' 'user.email=ci@git'"
+ runs-on: windows-latest
+ steps:
+ - uses: actions/checkout@v1
+ - name: download git-sdk-64-minimal
+ shell: bash
+ run: a=git-sdk-64-minimal && mkdir -p $a && curl -# https://wingit.blob.core.windows.net/ci-artifacts/$a.tar.xz | tar -C $a -xJf -
+ - name: generate Visual Studio solution
+ shell: powershell
+ run: |
+ & .\git-sdk-64-minimal\usr\bin\bash.exe -lc @"
+ make NDEBUG=1 DEVELOPER=1 vcxproj
+ "@
+ if (!$?) { exit(1) }
+ - name: download vcpkg artifacts
+ shell: powershell
+ run: |
+ $urlbase = "https://dev.azure.com/git/git/_apis/build/builds"
+ $id = ((Invoke-WebRequest -UseBasicParsing "${urlbase}?definitions=9&statusFilter=completed&resultFilter=succeeded&`$top=1").content | ConvertFrom-JSON).value[0].id
+ $downloadUrl = ((Invoke-WebRequest -UseBasicParsing "${urlbase}/$id/artifacts").content | ConvertFrom-JSON).value[0].resource.downloadUrl
+ (New-Object Net.WebClient).DownloadFile($downloadUrl, "compat.zip")
+ Expand-Archive compat.zip -DestinationPath . -Force
+ Remove-Item compat.zip
+ - name: add msbuild to PATH
+ uses: microsoft/setup-msbuild@v1.0.0
+ - name: MSBuild
+ run: msbuild git.sln -property:Configuration=Release -property:Platform=x64 -maxCpuCount:4 -property:PlatformToolset=v142
+ - name: bundle artifact tar
+ shell: powershell
+ env:
+ MSVC: 1
+ VCPKG_ROOT: ${{github.workspace}}\compat\vcbuild\vcpkg
+ run: |
+ & compat\vcbuild\vcpkg_copy_dlls.bat release
+ if (!$?) { exit(1) }
+ & git-sdk-64-minimal\usr\bin\bash.exe -lc @"
+ mkdir -p artifacts &&
+ eval \"`$(make -n artifacts-tar INCLUDE_DLLS_IN_ARTIFACTS=YesPlease ARTIFACTS_DIRECTORY=artifacts 2>&1 | grep ^tar)\"
+ "@
+ - name: upload build artifacts
+ uses: actions/upload-artifact@v1
+ with:
+ name: vs-artifacts
+ path: artifacts
+ vs-test:
+ runs-on: windows-latest
+ needs: [vs-build]
+ strategy:
+ matrix:
+ nr: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+ steps:
+ - uses: actions/checkout@v1
+ - name: download git-64-portable
+ shell: bash
+ run: a=git-64-portable && mkdir -p $a && curl -# https://wingit.blob.core.windows.net/ci-artifacts/$a.tar.xz | tar -C $a -xJf -
+ - name: download build artifacts
+ uses: actions/download-artifact@v1
+ with:
+ name: vs-artifacts
+ path: ${{github.workspace}}
+ - name: extract build artifacts
+ shell: bash
+ run: tar xf artifacts.tar.gz
+ - name: test (parallel)
+ shell: powershell
+ env:
+ MSYSTEM: MINGW64
+ NO_SVN_TESTS: 1
+ GIT_TEST_SKIP_REBASE_P: 1
+ run: |
+ & git-64-portable\git-cmd.exe --command=usr\bin\bash.exe -lc @"
+ # Let Git ignore the SDK and the test-cache
+ printf '%s\n' /git-64-portable/ /test-cache/ >>.git/info/exclude
+
+ cd t &&
+ PATH=\"`$PWD/helper:`$PATH\" &&
+ test-tool.exe run-command testsuite --jobs=10 -V -x --write-junit-xml \
+ `$(test-tool.exe path-utils slice-tests \
+ ${{matrix.nr}} 10 t[0-9]*.sh)
+ "@
+ regular:
+ strategy:
+ matrix:
+ vector:
+ - jobname: linux-clang
+ cc: clang
+ pool: ubuntu-latest
+ - jobname: linux-gcc
+ cc: gcc
+ pool: ubuntu-latest
+ - jobname: osx-clang
+ cc: clang
+ pool: macos-latest
+ - jobname: osx-gcc
+ cc: gcc
+ pool: macos-latest
+ - jobname: GETTEXT_POISON
+ cc: gcc
+ pool: ubuntu-latest
+ env:
+ CC: ${{matrix.vector.cc}}
+ jobname: ${{matrix.vector.jobname}}
+ runs-on: ${{matrix.vector.pool}}
+ steps:
+ - uses: actions/checkout@v1
+ - run: ci/install-dependencies.sh
+ - run: ci/run-build-and-tests.sh
+ - run: ci/print-test-failures.sh
+ if: failure()
+ - name: Upload failed tests' directories
+ if: failure() && env.FAILED_TEST_ARTIFACTS != ''
+ uses: actions/upload-artifact@v1
+ with:
+ name: failed-tests-${{matrix.vector.jobname}}
+ path: ${{env.FAILED_TEST_ARTIFACTS}}
+ dockerized:
+ strategy:
+ matrix:
+ vector:
+ - jobname: linux-musl
+ image: alpine
+ - jobname: Linux32
+ image: daald/ubuntu32:xenial
+ env:
+ jobname: ${{matrix.vector.jobname}}
+ runs-on: ubuntu-latest
+ container: ${{matrix.vector.image}}
+ steps:
+ - uses: actions/checkout@v1
+ - run: ci/install-docker-dependencies.sh
+ - run: ci/run-build-and-tests.sh
+ - run: ci/print-test-failures.sh
+ if: failure()
+ - name: Upload failed tests' directories
+ if: failure() && env.FAILED_TEST_ARTIFACTS != ''
+ uses: actions/upload-artifact@v1
+ with:
+ name: failed-tests-${{matrix.vector.jobname}}
+ path: ${{env.FAILED_TEST_ARTIFACTS}}
+ static-analysis:
+ env:
+ jobname: StaticAnalysis
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v1
+ - run: ci/install-dependencies.sh
+ - run: ci/run-static-analysis.sh
+ documentation:
+ env:
+ jobname: Documentation
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v1
+ - run: ci/install-dependencies.sh
+ - run: ci/test-documentation.sh
diff --git a/.travis.yml b/.travis.yml
index fc5730b085..05f3e3f8d7 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -16,7 +16,7 @@ compiler:
matrix:
include:
- - env: jobname=GIT_TEST_GETTEXT_POISON
+ - env: jobname=GETTEXT_POISON
os: linux
compiler:
addons:
@@ -32,7 +32,15 @@ matrix:
services:
- docker
before_install:
- script: ci/run-linux32-docker.sh
+ script: ci/run-docker.sh
+ - env: jobname=linux-musl
+ os: linux
+ compiler:
+ addons:
+ services:
+ - docker
+ before_install:
+ script: ci/run-docker.sh
- env: jobname=StaticAnalysis
os: linux
compiler:
diff --git a/Documentation/Makefile b/Documentation/Makefile
index 59e6ce3a2a..15d9d04f31 100644
--- a/Documentation/Makefile
+++ b/Documentation/Makefile
@@ -150,32 +150,9 @@ endif
-include ../config.mak.autogen
-include ../config.mak
-#
-# For docbook-xsl ...
-# -1.68.1, no extra settings are needed?
-# 1.69.0, set ASCIIDOC_ROFF?
-# 1.69.1-1.71.0, set DOCBOOK_SUPPRESS_SP?
-# 1.71.1, set ASCIIDOC_ROFF?
-# 1.72.0, set DOCBOOK_XSL_172.
-# 1.73.0-, no extra settings are needed
-#
-
-ifdef DOCBOOK_XSL_172
-ASCIIDOC_EXTRA += -a git-asciidoc-no-roff
-MANPAGE_XSL = manpage-1.72.xsl
-else
- ifndef ASCIIDOC_ROFF
- # docbook-xsl after 1.72 needs the regular XSL, but will not
- # pass-thru raw roff codes from asciidoc.conf, so turn them off.
- ASCIIDOC_EXTRA += -a git-asciidoc-no-roff
- endif
-endif
ifndef NO_MAN_BOLD_LITERAL
XMLTO_EXTRA += -m manpage-bold-literal.xsl
endif
-ifdef DOCBOOK_SUPPRESS_SP
-XMLTO_EXTRA += -m manpage-suppress-sp.xsl
-endif
# Newer DocBook stylesheet emits warning cruft in the output when
# this is not set, and if set it shows an absolute link. Older
diff --git a/Documentation/RelNotes/2.27.0.txt b/Documentation/RelNotes/2.27.0.txt
index 0bd2dc713d..680dfe661a 100644
--- a/Documentation/RelNotes/2.27.0.txt
+++ b/Documentation/RelNotes/2.27.0.txt
@@ -76,6 +76,19 @@ UI, Workflows & Features
* The 'pack.useSparse' configuration variable now defaults to 'true',
enabling an optimization that has been experimental since Git 2.21.
+ * "git rebase" happens to call some hooks meant for "checkout" and
+ "commit" by this was not a designed behaviour than historical
+ accident. This has been documented.
+
+ * "git merge" learns the "--autostash" option.
+
+ * "sparse-checkout" UI improvements.
+
+ * "git update-ref --stdin" learned a handful of new verbs to let the
+ user control ref update transactions more explicitly, which helps
+ as an ingredient to implement two-phase commit-style atomic
+ ref-updates across multiple repositories.
+
Performance, Internal Implementation, Development Support etc.
@@ -102,6 +115,28 @@ Performance, Internal Implementation, Development Support etc.
* A Windows-specific test element has been made more robust against
misuse from both user's environment and programmer's errors.
+ * Various tests have been updated to work around issues found with
+ shell utilities that come with busybox etc.
+
+ * The config API made mixed uses of int and size_t types to represent
+ length of various pieces of text it parsed, which has been updated
+ to use the correct type (i.e. size_t) throughout.
+
+ * The "--decorate-refs" and "--decorate-refs-exclude" options "git
+ log" takes have learned a companion configuration variable
+ log.excludeDecoration that sits at the lowest priority in the
+ family.
+
+ * A new CI job to build and run test suite on linux with musl libc
+ has been added.
+
+ * Update the CI configuration to use GitHub Actions, retiring the one
+ based on Azure Pipelines.
+
+ * The directory traversal code had redundant recursive calls which
+ made its performance characteristics exponential with respect to
+ the depth of the tree, which was corrected.
+
Fixes since v2.26
-----------------
@@ -195,6 +230,81 @@ Fixes since v2.26
* Parsing the host part out of URL for the credential helper has been corrected.
(merge 4c5971e18a jk/credential-parsing-end-of-host-in-URL later to maint).
+ * Document the recommended way to abort a failing test early (e.g. by
+ exiting a loop), which is to say "return 1".
+ (merge 7cc112dc95 jc/doc-test-leaving-early later to maint).
+
+ * The code that refreshes the last access and modified time of
+ on-disk packfiles and loose object files have been updated.
+ (merge 312cd76130 lr/freshen-file-fix later to maint).
+
+ * Validation of push certificate has been made more robust against
+ timing attacks.
+ (merge 719483e547 bc/constant-memequal later to maint).
+
+ * The custom hash function used by "git fast-import" has been
+ replaced with the one from hashmap.c, which gave us a nice
+ performance boost.
+ (merge d8410a816b jk/fast-import-use-hashmap later to maint).
+
+ * The "git submodule" command did not initialize a few variables it
+ internally uses and was affected by variable settings leaked from
+ the environment.
+ (merge 65d100c4dd lx/submodule-clear-variables later to maint).
+
+ * Raise the minimum required version of docbook-xsl package to 1.74,
+ as 1.74.0 was from late 2008, which is more than 10 years old, and
+ drop compatibility cruft from our documentation suite.
+ (merge 3c255ad660 ma/doc-discard-docbook-xsl-1.73 later to maint).
+
+ * "git log" learns "--[no-]mailmap" as a synonym to "--[no-]use-mailmap"
+ (merge 88acccda38 jc/log-no-mailmap later to maint).
+
+ * "git commit-graph write --expire-time=<timestamp>" did not use the
+ given timestamp correctly, which has been corrected.
+ (merge b09b785c78 ds/commit-graph-expiry-fix later to maint).
+
+ * Tests update to use "test-chmtime" instead of "touch -t".
+ (merge e892a56845 ds/t5319-touch-fix later to maint).
+
+ * "git diff" in a partial clone learned to avoid lazy loading blob
+ objects in more casese when they are not needed.
+ (merge 95acf11a3d jt/avoid-prefetch-when-able-in-diff later to maint).
+
+ * "git push --atomic" used to show failures for refs that weren't
+ even pushed, which has been corrected.
+ (merge dfe1b7f19c jx/atomic-push later to maint).
+
+ * Code in builtin/*, i.e. those can only be called from within
+ built-in subcommands, that implements bulk of a couple of
+ subcommands have been moved to libgit.a so that they could be used
+ by others.
+ (merge 9460fd48b5 dl/libify-a-few later to maint).
+
+ * Allowing the user to split a patch hunk while "git stash -p" does
+ not work well; a band-aid has been added to make this (partially)
+ work better.
+
+ * "git diff-tree --pretty --notes" used to hit an assertion failure,
+ as it forgot to initialize the notes subsystem.
+ (merge 5778b22b3d tb/diff-tree-with-notes later to maint).
+
+ * "git range-diff" fixes.
+ (merge 8d1675eb7f vd/range-diff-with-custom-pretty-format-fix later to maint).
+
+ * "git grep" did not quote a path with unusual character like other
+ commands (like "git diff", "git status") do, but did quote when run
+ from a subdirectory, both of which has been corrected.
+ (merge 45115d8490 mt/grep-cquote-path later to maint).
+
+ * GNU/Hurd is also among the ones that need the fopen() wrapper.
+ (merge 274a1328fb jc/gnu-hurd-lets-fread-read-dirs later to maint).
+
+ * Those fetching over protocol v2 from linux-next and other kernel
+ repositories are reporting that v2 often fetches way too much than
+ needed.
+ (merge 11c7f2a30b jn/demote-proto2-from-default later to maint).
+
* Other code cleanup, docfix, build fix, etc.
(merge 564956f358 jc/maintain-doc later to maint).
(merge 7422b2a0a1 sg/commit-slab-clarify-peek later to maint).
@@ -215,3 +325,7 @@ Fixes since v2.26
(merge 0740d0a5d3 jk/oid-array-cleanups later to maint).
(merge a1aba0c95c js/t0007-typofix later to maint).
(merge 76ba7fa225 ma/config-doc-fix later to maint).
+ (merge 826f0c0df2 js/subtree-doc-update-to-asciidoctor-2 later to maint).
+ (merge 88eaf361e0 eb/mboxrd-doc later to maint).
+ (merge 051cc54941 tm/zsh-complete-switch-restore later to maint).
+ (merge 39102cf4fe ms/doc-revision-illustration-fix later to maint).
diff --git a/Documentation/asciidoc.conf b/Documentation/asciidoc.conf
index 8fc4b67081..3e4c13971b 100644
--- a/Documentation/asciidoc.conf
+++ b/Documentation/asciidoc.conf
@@ -31,24 +31,6 @@ ifdef::backend-docbook[]
endif::backend-docbook[]
ifdef::backend-docbook[]
-ifndef::git-asciidoc-no-roff[]
-# "unbreak" docbook-xsl v1.68 for manpages. v1.69 works with or without this.
-# v1.72 breaks with this because it replaces dots not in roff requests.
-[listingblock]
-<example><title>{title}</title>
-<literallayout class="monospaced">
-ifdef::doctype-manpage[]
-&#10;.ft C&#10;
-endif::doctype-manpage[]
-|
-ifdef::doctype-manpage[]
-&#10;.ft&#10;
-endif::doctype-manpage[]
-</literallayout>
-{title#}</example>
-endif::git-asciidoc-no-roff[]
-
-ifdef::git-asciidoc-no-roff[]
ifdef::doctype-manpage[]
# The following two small workarounds insert a simple paragraph after screen
[listingblock]
@@ -67,7 +49,6 @@ ifdef::doctype-manpage[]
{title#}</para></formalpara>
{title%}<simpara></simpara>
endif::doctype-manpage[]
-endif::git-asciidoc-no-roff[]
endif::backend-docbook[]
ifdef::doctype-manpage[]
diff --git a/Documentation/config.txt b/Documentation/config.txt
index 74009d5402..ef0768b91a 100644
--- a/Documentation/config.txt
+++ b/Documentation/config.txt
@@ -3,11 +3,12 @@ CONFIGURATION FILE
The Git configuration file contains a number of variables that affect
the Git commands' behavior. The files `.git/config` and optionally
-`config.worktree` (see `extensions.worktreeConfig` below) in each
-repository are used to store the configuration for that repository, and
-`$HOME/.gitconfig` is used to store a per-user configuration as
-fallback values for the `.git/config` file. The file `/etc/gitconfig`
-can be used to store a system-wide default configuration.
+`config.worktree` (see the "CONFIGURATION FILE" section of
+linkgit:git-worktree[1]) in each repository are used to store the
+configuration for that repository, and `$HOME/.gitconfig` is used to
+store a per-user configuration as fallback values for the `.git/config`
+file. The file `/etc/gitconfig` can be used to store a system-wide
+default configuration.
The configuration variables are used by both the Git plumbing
and the porcelains. The variables are divided into sections, wherein
diff --git a/Documentation/config/fetch.txt b/Documentation/config/fetch.txt
index f11940280f..b1a9b1461d 100644
--- a/Documentation/config/fetch.txt
+++ b/Documentation/config/fetch.txt
@@ -1,11 +1,14 @@
fetch.recurseSubmodules::
- This option can be either set to a boolean value or to 'on-demand'.
+ This option controls whether `git fetch` (and the underlying fetch
+ in `git pull`) will recursively fetch into populated submodules.
+ This option can be set either to a boolean value or to 'on-demand'.
Setting it to a boolean changes the behavior of fetch and pull to
- unconditionally recurse into submodules when set to true or to not
- recurse at all when set to false. When set to 'on-demand' (the default
- value), fetch and pull will only recurse into a populated submodule
- when its superproject retrieves a commit that updates the submodule's
+ recurse unconditionally into submodules when set to true or to not
+ recurse at all when set to false. When set to 'on-demand', fetch and
+ pull will only recurse into a populated submodule when its
+ superproject retrieves a commit that updates the submodule's
reference.
+ Defaults to 'on-demand', or to the value of 'submodule.recurse' if set.
fetch.fsckObjects::
If it is set to true, git-fetch-pack will check all fetched
diff --git a/Documentation/config/log.txt b/Documentation/config/log.txt
index e9e1e397f3..208d5fdcaa 100644
--- a/Documentation/config/log.txt
+++ b/Documentation/config/log.txt
@@ -18,6 +18,12 @@ log.decorate::
names are shown. This is the same as the `--decorate` option
of the `git log`.
+log.excludeDecoration::
+ Exclude the specified patterns from the log decorations. This is
+ similar to the `--decorate-refs-exclude` command-line option, but
+ the config option can be overridden by the `--decorate-refs`
+ option.
+
log.follow::
If `true`, `git log` will act as if the `--follow` option was used when
a single <path> is given. This has the same limitations as `--follow`,
diff --git a/Documentation/config/merge.txt b/Documentation/config/merge.txt
index 6a313937f8..cb2ed58907 100644
--- a/Documentation/config/merge.txt
+++ b/Documentation/config/merge.txt
@@ -70,6 +70,16 @@ merge.stat::
Whether to print the diffstat between ORIG_HEAD and the merge result
at the end of the merge. True by default.
+merge.autoStash::
+ When set to true, automatically create a temporary stash entry
+ before the operation begins, and apply it after the operation
+ ends. This means that you can run merge on a dirty worktree.
+ However, use with care: the final stash application after a
+ successful merge might result in non-trivial conflicts.
+ This option can be overridden by the `--no-autostash` and
+ `--autostash` options of linkgit:git-merge[1].
+ Defaults to false.
+
merge.tool::
Controls which merge tool is used by linkgit:git-mergetool[1].
The list below shows the valid built-in values.
diff --git a/Documentation/config/protocol.txt b/Documentation/config/protocol.txt
index 756591d77b..0b40141613 100644
--- a/Documentation/config/protocol.txt
+++ b/Documentation/config/protocol.txt
@@ -48,7 +48,7 @@ protocol.version::
If set, clients will attempt to communicate with a server
using the specified protocol version. If the server does
not support it, communication falls back to version 0.
- If unset, the default is `2`.
+ If unset, the default is `0`.
Supported versions:
+
--
diff --git a/Documentation/config/push.txt b/Documentation/config/push.txt
index 0a7aa322a9..f5e5b38c68 100644
--- a/Documentation/config/push.txt
+++ b/Documentation/config/push.txt
@@ -112,3 +112,5 @@ push.recurseSubmodules::
is 'no' then default behavior of ignoring submodules when pushing
is retained. You may override this configuration at time of push by
specifying '--recurse-submodules=check|on-demand|no'.
+ If not set, 'no' is used by default, unless 'submodule.recurse' is
+ set (in which case a 'true' value means 'on-demand').
diff --git a/Documentation/config/submodule.txt b/Documentation/config/submodule.txt
index b33177151c..d7a63c8c12 100644
--- a/Documentation/config/submodule.txt
+++ b/Documentation/config/submodule.txt
@@ -59,9 +59,17 @@ submodule.active::
submodule.recurse::
Specifies if commands recurse into submodules by default. This
- applies to all commands that have a `--recurse-submodules` option,
- except `clone`.
+ applies to all commands that have a `--recurse-submodules` option
+ (`checkout`, `fetch`, `grep`, `pull`, `push`, `read-tree`, `reset`,
+ `restore` and `switch`) except `clone` and `ls-files`.
Defaults to false.
+ When set to true, it can be deactivated via the
+ `--no-recurse-submodules` option. Note that some Git commands
+ lacking this option may call some of the above commands affected by
+ `submodule.recurse`; for instance `git remote update` will call
+ `git fetch` but does not have a `--no-recurse-submodules` option.
+ For these commands a workaround is to temporarily change the
+ configuration value by using `git -c submodule.recurse=0`.
submodule.fetchJobs::
Specifies how many submodules are fetched/cloned at the same time.
diff --git a/Documentation/fetch-options.txt b/Documentation/fetch-options.txt
index 05709f67a1..6e2a160a47 100644
--- a/Documentation/fetch-options.txt
+++ b/Documentation/fetch-options.txt
@@ -163,7 +163,8 @@ ifndef::git-pull[]
value. Use 'on-demand' to only recurse into a populated submodule
when the superproject retrieves a commit that updates the submodule's
reference to a commit that isn't already in the local submodule
- clone.
+ clone. By default, 'on-demand' is used, unless
+ `fetch.recurseSubmodules` is set (see linkgit:git-config[1]).
endif::git-pull[]
-j::
diff --git a/Documentation/git-checkout.txt b/Documentation/git-checkout.txt
index c8fb995fa7..5b697eee1b 100644
--- a/Documentation/git-checkout.txt
+++ b/Documentation/git-checkout.txt
@@ -292,11 +292,11 @@ Note that this option uses the no overlay mode by default (see also
--recurse-submodules::
--no-recurse-submodules::
- Using `--recurse-submodules` will update the content of all initialized
+ Using `--recurse-submodules` will update the content of all active
submodules according to the commit recorded in the superproject. If
local modifications in a submodule would be overwritten the checkout
will fail unless `-f` is used. If nothing (or `--no-recurse-submodules`)
- is used, the work trees of submodules will not be updated.
+ is used, submodules working trees will not be updated.
Just like linkgit:git-submodule[1], this will detach `HEAD` of the
submodule.
diff --git a/Documentation/git-commit-graph.txt b/Documentation/git-commit-graph.txt
index 28d1fee505..53a650225a 100644
--- a/Documentation/git-commit-graph.txt
+++ b/Documentation/git-commit-graph.txt
@@ -57,11 +57,23 @@ or `--stdin-packs`.)
With the `--append` option, include all commits that are present in the
existing commit-graph file.
+
-With the `--split` option, write the commit-graph as a chain of multiple
-commit-graph files stored in `<dir>/info/commit-graphs`. The new commits
-not already in the commit-graph are added in a new "tip" file. This file
-is merged with the existing file if the following merge conditions are
-met:
+With the `--changed-paths` option, compute and write information about the
+paths changed between a commit and it's first parent. This operation can
+take a while on large repositories. It provides significant performance gains
+for getting history of a directory or a file with `git log -- <path>`.
++
+With the `--split[=<strategy>]` option, write the commit-graph as a
+chain of multiple commit-graph files stored in
+`<dir>/info/commit-graphs`. Commit-graph layers are merged based on the
+strategy and other splitting options. The new commits not already in the
+commit-graph are added in a new "tip" file. This file is merged with the
+existing file if the following merge conditions are met:
+* If `--split=no-merge` is specified, a merge is never performed, and
+the remaining options are ignored. `--split=replace` overwrites the
+existing chain with a new one. A bare `--split` defers to the remaining
+options. (Note that merging a chain of commit graphs replaces the
+existing chain with a length-1 chain where the first and only
+incremental holds the entire graph).
+
* If `--size-multiple=<X>` is not specified, let `X` equal 2. If the new
tip file would have `N` commits and the previous tip has `M` commits and
diff --git a/Documentation/git-grep.txt b/Documentation/git-grep.txt
index ddb6acc025..a7f9bc99ea 100644
--- a/Documentation/git-grep.txt
+++ b/Documentation/git-grep.txt
@@ -93,7 +93,7 @@ OPTIONS
with `--no-index`.
--recurse-submodules::
- Recursively search in each submodule that has been initialized and
+ Recursively search in each submodule that is active and
checked out in the repository. When used in combination with the
<tree> option the prefix of all submodule output will be the name of
the parent project's <tree> object. This option has no effect
@@ -206,8 +206,10 @@ providing this option will cause it to die.
-z::
--null::
- Output \0 instead of the character that normally follows a
- file name.
+ Use \0 as the delimiter for pathnames in the output, and print
+ them verbatim. Without this option, pathnames with "unusual"
+ characters are quoted as explained for the configuration
+ variable core.quotePath (see git-config(1)).
-o::
--only-matching::
diff --git a/Documentation/git-log.txt b/Documentation/git-log.txt
index bed09bb09e..20e6d21a74 100644
--- a/Documentation/git-log.txt
+++ b/Documentation/git-log.txt
@@ -43,12 +43,16 @@ OPTIONS
If no `--decorate-refs` is given, pretend as if all refs were
included. For each candidate, do not use it for decoration if it
matches any patterns given to `--decorate-refs-exclude` or if it
- doesn't match any of the patterns given to `--decorate-refs`.
+ doesn't match any of the patterns given to `--decorate-refs`. The
+ `log.excludeDecoration` config option allows excluding refs from
+ the decorations, but an explicit `--decorate-refs` pattern will
+ override a match in `log.excludeDecoration`.
--source::
Print out the ref name given on the command line by which each
commit was reached.
+--[no-]mailmap::
--[no-]use-mailmap::
Use mailmap file to map author and committer names and email
addresses to canonical real names and email addresses. See
diff --git a/Documentation/git-ls-files.txt b/Documentation/git-ls-files.txt
index 8461c0e83e..3cb2ebb438 100644
--- a/Documentation/git-ls-files.txt
+++ b/Documentation/git-ls-files.txt
@@ -148,7 +148,7 @@ a space) at the start of each line:
top directory.
--recurse-submodules::
- Recursively calls ls-files on each submodule in the repository.
+ Recursively calls ls-files on each active submodule in the repository.
Currently there is only support for the --cached mode.
--abbrev[=<n>]::
diff --git a/Documentation/git-merge.txt b/Documentation/git-merge.txt
index 092529c619..ec06b2f8c2 100644
--- a/Documentation/git-merge.txt
+++ b/Documentation/git-merge.txt
@@ -94,7 +94,8 @@ will be appended to the specified message.
--abort::
Abort the current conflict resolution process, and
- try to reconstruct the pre-merge state.
+ try to reconstruct the pre-merge state. If an autostash entry is
+ present, apply it to the worktree.
+
If there were uncommitted worktree changes present when the merge
started, 'git merge --abort' will in some cases be unable to
@@ -102,11 +103,15 @@ reconstruct these changes. It is therefore recommended to always
commit or stash your changes before running 'git merge'.
+
'git merge --abort' is equivalent to 'git reset --merge' when
-`MERGE_HEAD` is present.
+`MERGE_HEAD` is present unless `MERGE_AUTOSTASH` is also present in
+which case 'git merge --abort' applies the stash entry to the worktree
+whereas 'git reset --merge' will save the stashed changes in the stash
+reflog.
--quit::
Forget about the current merge in progress. Leave the index
- and the working tree as-is.
+ and the working tree as-is. If `MERGE_AUTOSTASH` is present, the
+ stash entry will be saved to the stash reflog.
--continue::
After a 'git merge' stops due to conflicts you can conclude the
diff --git a/Documentation/git-pull.txt b/Documentation/git-pull.txt
index 21e10905fa..5c3fb67c01 100644
--- a/Documentation/git-pull.txt
+++ b/Documentation/git-pull.txt
@@ -85,8 +85,9 @@ OPTIONS
Pass --verbose to git-fetch and git-merge.
--[no-]recurse-submodules[=yes|on-demand|no]::
- This option controls if new commits of all populated submodules should
- be fetched and updated, too (see linkgit:git-config[1] and
+ This option controls if new commits of populated submodules should
+ be fetched, and if the working trees of active submodules should be
+ updated, too (see linkgit:git-fetch[1], linkgit:git-config[1] and
linkgit:gitmodules[5]).
+
If the checkout is done via rebase, local submodule commits are rebased as well.
@@ -133,15 +134,6 @@ unless you have read linkgit:git-rebase[1] carefully.
--no-rebase::
Override earlier --rebase.
---autostash::
---no-autostash::
- Before starting rebase, stash local modifications away (see
- linkgit:git-stash[1]) if needed, and apply the stash entry when
- done. `--no-autostash` is useful to override the `rebase.autoStash`
- configuration variable (see linkgit:git-config[1]).
-+
-This option is only valid when "--rebase" is used.
-
Options related to fetching
~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/git-read-tree.txt b/Documentation/git-read-tree.txt
index da33f84f33..5fa8bab64c 100644
--- a/Documentation/git-read-tree.txt
+++ b/Documentation/git-read-tree.txt
@@ -116,9 +116,9 @@ OPTIONS
located in.
--[no-]recurse-submodules::
- Using --recurse-submodules will update the content of all initialized
+ Using --recurse-submodules will update the content of all active
submodules according to the commit recorded in the superproject by
- calling read-tree recursively, also setting the submodules HEAD to be
+ calling read-tree recursively, also setting the submodules' HEAD to be
detached at that commit.
--no-sparse-checkout::
diff --git a/Documentation/git-rebase.txt b/Documentation/git-rebase.txt
index bed500f151..c70c1ec5e0 100644
--- a/Documentation/git-rebase.txt
+++ b/Documentation/git-rebase.txt
@@ -256,7 +256,8 @@ See also INCOMPATIBLE OPTIONS below.
--quit::
Abort the rebase operation but HEAD is not reset back to the
original branch. The index and working tree are also left
- unchanged as a result.
+ unchanged as a result. If a temporary stash entry was created
+ using --autostash, it will be saved to the stash reflog.
--apply:
Use applying strategies to rebase (calling `git-am`
@@ -722,9 +723,17 @@ Hooks
~~~~~
The apply backend has not traditionally called the post-commit hook,
-while the merge backend has. However, this was by accident of
-implementation rather than by design. Both backends should have the
-same behavior, though it is not clear which one is correct.
+while the merge backend has. Both have called the post-checkout hook,
+though the merge backend has squelched its output. Further, both
+backends only call the post-checkout hook with the starting point
+commit of the rebase, not the intermediate commits nor the final
+commit. In each case, the calling of these hooks was by accident of
+implementation rather than by design (both backends were originally
+implemented as shell scripts and happened to invoke other commands
+like 'git checkout' or 'git commit' that would call the hooks). Both
+backends should have the same behavior, though it is not entirely
+clear which, if any, is correct. We will likely make rebase stop
+calling either of these hooks in the future.
Interruptability
~~~~~~~~~~~~~~~~
diff --git a/Documentation/git-reset.txt b/Documentation/git-reset.txt
index 932080c55d..252e2d4e47 100644
--- a/Documentation/git-reset.txt
+++ b/Documentation/git-reset.txt
@@ -87,6 +87,12 @@ but carries forward unmerged index entries.
different between `<commit>` and `HEAD`.
If a file that is different between `<commit>` and `HEAD` has local
changes, reset is aborted.
+
+--[no-]recurse-submodules::
+ When the working tree is updated, using --recurse-submodules will
+ also recursively reset the working tree of all active submodules
+ according to the commit recorded in the superproject, also setting
+ the submodules' HEAD to be detached at that commit.
--
See "Reset, restore and revert" in linkgit:git[1] for the differences
diff --git a/Documentation/git-restore.txt b/Documentation/git-restore.txt
index 5bf60d4943..8e3b339802 100644
--- a/Documentation/git-restore.txt
+++ b/Documentation/git-restore.txt
@@ -107,6 +107,17 @@ in linkgit:git-checkout[1] for details.
patterns and unconditionally restores any files in
`<pathspec>`.
+--recurse-submodules::
+--no-recurse-submodules::
+ If `<pathspec>` names an active submodule and the restore location
+ includes the working tree, the submodule will only be updated if
+ this option is given, in which case its working tree will be
+ restored to the commit recorded in the superproject, and any local
+ modifications overwritten. If nothing (or
+ `--no-recurse-submodules`) is used, submodules working trees will
+ not be updated. Just like linkgit:git-checkout[1], this will detach
+ `HEAD` of the submodule.
+
--overlay::
--no-overlay::
In overlay mode, the command never removes files when
diff --git a/Documentation/git-sparse-checkout.txt b/Documentation/git-sparse-checkout.txt
index c0342e5393..1a3ace6082 100644
--- a/Documentation/git-sparse-checkout.txt
+++ b/Documentation/git-sparse-checkout.txt
@@ -70,6 +70,16 @@ C-style quoted strings.
`core.sparseCheckoutCone` is enabled, the given patterns are interpreted
as directory names as in the 'set' subcommand.
+'reapply::
+ Reapply the sparsity pattern rules to paths in the working tree.
+ Commands like merge or rebase can materialize paths to do their
+ work (e.g. in order to show you a conflict), and other
+ sparse-checkout commands might fail to sparsify an individual file
+ (e.g. because it has unstaged changes or conflicts). In such
+ cases, it can make sense to run `git sparse-checkout reapply` later
+ after cleaning up affected paths (e.g. resolving conflicts, undoing
+ or committing changes, etc.).
+
'disable'::
Disable the `core.sparseCheckout` config setting, and restore the
working directory to include all files. Leaves the sparse-checkout
diff --git a/Documentation/git-switch.txt b/Documentation/git-switch.txt
index 197900363b..3759c3a265 100644
--- a/Documentation/git-switch.txt
+++ b/Documentation/git-switch.txt
@@ -181,9 +181,9 @@ name, the guessing is aborted. You can explicitly give a name with
--recurse-submodules::
--no-recurse-submodules::
Using `--recurse-submodules` will update the content of all
- initialized submodules according to the commit recorded in the
+ active submodules according to the commit recorded in the
superproject. If nothing (or `--no-recurse-submodules`) is
- used, the work trees of submodules will not be updated. Just
+ used, submodules working trees will not be updated. Just
like linkgit:git-submodule[1], this will detach `HEAD` of the
submodules.
diff --git a/Documentation/git-update-ref.txt b/Documentation/git-update-ref.txt
index 9671423117..3e737c2360 100644
--- a/Documentation/git-update-ref.txt
+++ b/Documentation/git-update-ref.txt
@@ -66,6 +66,10 @@ performs all modifications together. Specify commands of the form:
delete SP <ref> [SP <oldvalue>] LF
verify SP <ref> [SP <oldvalue>] LF
option SP <opt> LF
+ start LF
+ prepare LF
+ commit LF
+ abort LF
With `--create-reflog`, update-ref will create a reflog for each ref
even if one would not ordinarily be created.
@@ -83,6 +87,10 @@ quoting:
delete SP <ref> NUL [<oldvalue>] NUL
verify SP <ref> NUL [<oldvalue>] NUL
option SP <opt> NUL
+ start NUL
+ prepare NUL
+ commit NUL
+ abort NUL
In this format, use 40 "0" to specify a zero value, and use the empty
string to specify a missing value.
@@ -107,13 +115,31 @@ delete::
verify::
Verify <ref> against <oldvalue> but do not change it. If
- <oldvalue> zero or missing, the ref must not exist.
+ <oldvalue> is zero or missing, the ref must not exist.
option::
Modify behavior of the next command naming a <ref>.
The only valid option is `no-deref` to avoid dereferencing
a symbolic ref.
+start::
+ Start a transaction. In contrast to a non-transactional session, a
+ transaction will automatically abort if the session ends without an
+ explicit commit.
+
+prepare::
+ Prepare to commit the transaction. This will create lock files for all
+ queued reference updates. If one reference could not be locked, the
+ transaction will be aborted.
+
+commit::
+ Commit all reference updates queued for the transaction, ending the
+ transaction.
+
+abort::
+ Abort the transaction, releasing all locks if the transaction is in
+ prepared state.
+
If all <ref>s can be locked with matching <oldvalue>s
simultaneously, all modifications are performed. Otherwise, no
modifications are performed. Note that while each individual
diff --git a/Documentation/gitsubmodules.txt b/Documentation/gitsubmodules.txt
index c476f891b5..f9f4e65c9e 100644
--- a/Documentation/gitsubmodules.txt
+++ b/Documentation/gitsubmodules.txt
@@ -271,7 +271,8 @@ will not be checked out by default; You can instruct 'clone' to recurse
into submodules. The 'init' and 'update' subcommands of 'git submodule'
will maintain submodules checked out and at an appropriate revision in
your working tree. Alternatively you can set 'submodule.recurse' to have
-'checkout' recursing into submodules.
+'checkout' recursing into submodules (note that 'submodule.recurse' also
+affects other git commands, see linkgit:git-config[1] for a complete list).
SEE ALSO
diff --git a/Documentation/manpage-1.72.xsl b/Documentation/manpage-1.72.xsl
deleted file mode 100644
index b4d315cb8c..0000000000
--- a/Documentation/manpage-1.72.xsl
+++ /dev/null
@@ -1,14 +0,0 @@
-<!-- manpage-1.72.xsl:
- special settings for manpages rendered from asciidoc+docbook
- handles peculiarities in docbook-xsl 1.72.0 -->
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
- version="1.0">
-
-<xsl:import href="manpage-base.xsl"/>
-
-<!-- these are the special values for the roff control characters
- needed for docbook-xsl 1.72.0 -->
-<xsl:param name="git.docbook.backslash">&#x2593;</xsl:param>
-<xsl:param name="git.docbook.dot" >&#x2302;</xsl:param>
-
-</xsl:stylesheet>
diff --git a/Documentation/manpage-base.xsl b/Documentation/manpage-base.xsl
deleted file mode 100644
index a264fa6160..0000000000
--- a/Documentation/manpage-base.xsl
+++ /dev/null
@@ -1,35 +0,0 @@
-<!-- manpage-base.xsl:
- special formatting for manpages rendered from asciidoc+docbook -->
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
- version="1.0">
-
-<!-- these params silence some output from xmlto -->
-<xsl:param name="man.output.quietly" select="1"/>
-<xsl:param name="refentry.meta.get.quietly" select="1"/>
-
-<!-- convert asciidoc callouts to man page format;
- git.docbook.backslash and git.docbook.dot params
- must be supplied by another XSL file or other means -->
-<xsl:template match="co">
- <xsl:value-of select="concat(
- $git.docbook.backslash,'fB(',
- substring-after(@id,'-'),')',
- $git.docbook.backslash,'fR')"/>
-</xsl:template>
-<xsl:template match="calloutlist">
- <xsl:value-of select="$git.docbook.dot"/>
- <xsl:text>sp&#10;</xsl:text>
- <xsl:apply-templates/>
- <xsl:text>&#10;</xsl:text>
-</xsl:template>
-<xsl:template match="callout">
- <xsl:value-of select="concat(
- $git.docbook.backslash,'fB',
- substring-after(@arearefs,'-'),
- '. ',$git.docbook.backslash,'fR')"/>
- <xsl:apply-templates/>
- <xsl:value-of select="$git.docbook.dot"/>
- <xsl:text>br&#10;</xsl:text>
-</xsl:template>
-
-</xsl:stylesheet>
diff --git a/Documentation/manpage-bold-literal.xsl b/Documentation/manpage-bold-literal.xsl
index 94d6c1b545..e13db85693 100644
--- a/Documentation/manpage-bold-literal.xsl
+++ b/Documentation/manpage-bold-literal.xsl
@@ -8,11 +8,9 @@
this makes literal text easier to distinguish in manpages
viewed on a tty -->
<xsl:template match="literal|d:literal">
- <xsl:value-of select="$git.docbook.backslash"/>
- <xsl:text>fB</xsl:text>
+ <xsl:text>\fB</xsl:text>
<xsl:apply-templates/>
- <xsl:value-of select="$git.docbook.backslash"/>
- <xsl:text>fR</xsl:text>
+ <xsl:text>\fR</xsl:text>
</xsl:template>
</xsl:stylesheet>
diff --git a/Documentation/manpage-normal.xsl b/Documentation/manpage-normal.xsl
index a48f5b11f3..a9c7ec69f4 100644
--- a/Documentation/manpage-normal.xsl
+++ b/Documentation/manpage-normal.xsl
@@ -1,13 +1,26 @@
<!-- manpage-normal.xsl:
- special settings for manpages rendered from asciidoc+docbook
- handles anything we want to keep away from docbook-xsl 1.72.0 -->
+ special settings for manpages rendered from asciidoc+docbook -->
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
version="1.0">
-<xsl:import href="manpage-base.xsl"/>
-<!-- these are the normal values for the roff control characters -->
-<xsl:param name="git.docbook.backslash">\</xsl:param>
-<xsl:param name="git.docbook.dot" >.</xsl:param>
+<!-- these params silence some output from xmlto -->
+<xsl:param name="man.output.quietly" select="1"/>
+<xsl:param name="refentry.meta.get.quietly" select="1"/>
+
+<!-- convert asciidoc callouts to man page format -->
+<xsl:template match="co">
+ <xsl:value-of select="concat('\fB(',substring-after(@id,'-'),')\fR')"/>
+</xsl:template>
+<xsl:template match="calloutlist">
+ <xsl:text>.sp&#10;</xsl:text>
+ <xsl:apply-templates/>
+ <xsl:text>&#10;</xsl:text>
+</xsl:template>
+<xsl:template match="callout">
+ <xsl:value-of select="concat('\fB',substring-after(@arearefs,'-'),'. \fR')"/>
+ <xsl:apply-templates/>
+ <xsl:text>.br&#10;</xsl:text>
+</xsl:template>
</xsl:stylesheet>
diff --git a/Documentation/manpage-suppress-sp.xsl b/Documentation/manpage-suppress-sp.xsl
deleted file mode 100644
index a63c7632a8..0000000000
--- a/Documentation/manpage-suppress-sp.xsl
+++ /dev/null
@@ -1,21 +0,0 @@
-<!-- manpage-suppress-sp.xsl:
- special settings for manpages rendered from asciidoc+docbook
- handles erroneous, inline .sp in manpage output of some
- versions of docbook-xsl -->
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
- version="1.0">
-
-<!-- attempt to work around spurious .sp at the tail of the line
- that some versions of docbook stylesheets seem to add -->
-<xsl:template match="simpara">
- <xsl:variable name="content">
- <xsl:apply-templates/>
- </xsl:variable>
- <xsl:value-of select="normalize-space($content)"/>
- <xsl:if test="not(ancestor::authorblurb) and
- not(ancestor::personblurb)">
- <xsl:text>&#10;&#10;</xsl:text>
- </xsl:if>
-</xsl:template>
-
-</xsl:stylesheet>
diff --git a/Documentation/merge-options.txt b/Documentation/merge-options.txt
index fb3a6e8d42..80d4831662 100644
--- a/Documentation/merge-options.txt
+++ b/Documentation/merge-options.txt
@@ -160,6 +160,14 @@ ifndef::git-pull[]
endif::git-pull[]
+--autostash::
+--no-autostash::
+ Automatically create a temporary stash entry before the operation
+ begins, and apply it after the operation ends. This means
+ that you can run the operation on a dirty worktree. However, use
+ with care: the final stash application after a successful
+ merge might result in non-trivial conflicts.
+
--allow-unrelated-histories::
By default, `git merge` command refuses to merge histories
that do not share a common ancestor. This option can be
diff --git a/Documentation/pretty-formats.txt b/Documentation/pretty-formats.txt
index a4b6f49186..547a552463 100644
--- a/Documentation/pretty-formats.txt
+++ b/Documentation/pretty-formats.txt
@@ -83,6 +83,12 @@ placeholders, its output is not affected by other options like
<full commit message>
+* 'mboxrd'
++
+Like 'email', but lines in the commit message starting with "From "
+(preceded by zero or more ">") are quoted with ">" so they aren't
+confused as starting a new commit.
+
* 'raw'
+
The 'raw' format shows the entire commit exactly as
diff --git a/Documentation/revisions.txt b/Documentation/revisions.txt
index 97f995e5a9..1ad95065c1 100644
--- a/Documentation/revisions.txt
+++ b/Documentation/revisions.txt
@@ -233,7 +233,7 @@ G H I J
A = = A^0
B = A^ = A^1 = A~1
- C = A^2 = A^2
+ C = = A^2
D = A^^ = A^1^1 = A~2
E = B^2 = A^^2
F = B^3 = A^^3
diff --git a/Documentation/technical/commit-graph-format.txt b/Documentation/technical/commit-graph-format.txt
index a4f17441ae..de56f9f1ef 100644
--- a/Documentation/technical/commit-graph-format.txt
+++ b/Documentation/technical/commit-graph-format.txt
@@ -17,6 +17,9 @@ metadata, including:
- The parents of the commit, stored using positional references within
the graph file.
+- The Bloom filter of the commit carrying the paths that were changed between
+ the commit and its first parent, if requested.
+
These positional references are stored as unsigned 32-bit integers
corresponding to the array position within the list of commit OIDs. Due
to some special constants we use to track parents, we can store at most
@@ -93,6 +96,33 @@ CHUNK DATA:
positions for the parents until reaching a value with the most-significant
bit on. The other bits correspond to the position of the last parent.
+ Bloom Filter Index (ID: {'B', 'I', 'D', 'X'}) (N * 4 bytes) [Optional]
+ * The ith entry, BIDX[i], stores the number of 8-byte word blocks in all
+ Bloom filters from commit 0 to commit i (inclusive) in lexicographic
+ order. The Bloom filter for the i-th commit spans from BIDX[i-1] to
+ BIDX[i] (plus header length), where BIDX[-1] is 0.
+ * The BIDX chunk is ignored if the BDAT chunk is not present.
+
+ Bloom Filter Data (ID: {'B', 'D', 'A', 'T'}) [Optional]
+ * It starts with header consisting of three unsigned 32-bit integers:
+ - Version of the hash algorithm being used. We currently only support
+ value 1 which corresponds to the 32-bit version of the murmur3 hash
+ implemented exactly as described in
+ https://en.wikipedia.org/wiki/MurmurHash#Algorithm and the double
+ hashing technique using seed values 0x293ae76f and 0x7e646e2 as
+ described in https://doi.org/10.1007/978-3-540-30494-4_26 "Bloom Filters
+ in Probabilistic Verification"
+ - The number of times a path is hashed and hence the number of bit positions
+ that cumulatively determine whether a file is present in the commit.
+ - The minimum number of bits 'b' per entry in the Bloom filter. If the filter
+ contains 'n' entries, then the filter size is the minimum number of 64-bit
+ words that contain n*b bits.
+ * The rest of the chunk is the concatenation of all the computed Bloom
+ filters for the commits in lexicographic order.
+ * Note: Commits with no changes or more than 512 changes have Bloom filters
+ of length zero.
+ * The BDAT chunk is present if and only if BIDX is present.
+
Base Graphs List (ID: {'B', 'A', 'S', 'E'}) [Optional]
This list of H-byte hashes describe a set of B commit-graph files that
form a commit-graph chain. The graph position for the ith commit in this
diff --git a/Documentation/user-manual.conf b/Documentation/user-manual.conf
index d87294de2f..0148f126dc 100644
--- a/Documentation/user-manual.conf
+++ b/Documentation/user-manual.conf
@@ -9,13 +9,3 @@ tilde=&#126;
[linkgit-inlinemacro]
<ulink url="{target}.html">{target}{0?({0})}</ulink>
-
-ifdef::backend-docbook[]
-# "unbreak" docbook-xsl v1.68 for manpages. v1.69 works with or without this.
-[listingblock]
-<example><title>{title}</title>
-<literallayout class="monospaced">
-|
-</literallayout>
-{title#}</example>
-endif::backend-docbook[]
diff --git a/INSTALL b/INSTALL
index 22c364f34f..9ba33e6a14 100644
--- a/INSTALL
+++ b/INSTALL
@@ -206,9 +206,7 @@ Issues of note:
clone two separate git-htmldocs and git-manpages repositories next
to the clone of git itself.
- It has been reported that docbook-xsl version 1.72 and 1.73 are
- buggy; 1.72 misformats manual pages for callouts, and 1.73 needs
- the patch in contrib/patches/docbook-xsl-manpages-charmap.patch
+ The minimum supported version of docbook-xsl is 1.74.
Users attempting to build the documentation on Cygwin may need to ensure
that the /etc/xml/catalog file looks something like this:
diff --git a/Makefile b/Makefile
index dc356ce4dd..7abdb1d352 100644
--- a/Makefile
+++ b/Makefile
@@ -282,12 +282,6 @@ all::
# Define NO_ST_BLOCKS_IN_STRUCT_STAT if your platform does not have st_blocks
# field that counts the on-disk footprint in 512-byte blocks.
#
-# Define DOCBOOK_XSL_172 if you want to format man pages with DocBook XSL v1.72
-# (not v1.73 or v1.71).
-#
-# Define ASCIIDOC_ROFF if your DocBook XSL does not escape raw roff directives
-# (versions 1.68.1 through v1.72).
-#
# Define GNU_ROFF if your target system uses GNU groff. This forces
# apostrophes to be ASCII so that cut&pasting examples to the shell
# will work.
@@ -616,8 +610,8 @@ SCRIPT_SH += git-web--browse.sh
SCRIPT_LIB += git-mergetool--lib
SCRIPT_LIB += git-parse-remote
SCRIPT_LIB += git-rebase--preserve-merges
-SCRIPT_LIB += git-sh-setup
SCRIPT_LIB += git-sh-i18n
+SCRIPT_LIB += git-sh-setup
SCRIPT_PERL += git-add--interactive.perl
SCRIPT_PERL += git-archimport.perl
@@ -685,9 +679,9 @@ PROGRAM_OBJS += daemon.o
PROGRAM_OBJS += fast-import.o
PROGRAM_OBJS += http-backend.o
PROGRAM_OBJS += imap-send.o
+PROGRAM_OBJS += remote-testsvn.o
PROGRAM_OBJS += sh-i18n--envsubst.o
PROGRAM_OBJS += shell.o
-PROGRAM_OBJS += remote-testsvn.o
# Binary suffix, set to .exe for Windows builds
X =
@@ -695,6 +689,7 @@ X =
PROGRAMS += $(patsubst %.o,git-%$X,$(PROGRAM_OBJS))
TEST_BUILTINS_OBJS += test-advise.o
+TEST_BUILTINS_OBJS += test-bloom.o
TEST_BUILTINS_OBJS += test-chmtime.o
TEST_BUILTINS_OBJS += test-config.o
TEST_BUILTINS_OBJS += test-ctype.o
@@ -709,15 +704,16 @@ TEST_BUILTINS_OBJS += test-dump-untracked-cache.o
TEST_BUILTINS_OBJS += test-example-decorate.o
TEST_BUILTINS_OBJS += test-genrandom.o
TEST_BUILTINS_OBJS += test-genzeros.o
+TEST_BUILTINS_OBJS += test-hash-speed.o
TEST_BUILTINS_OBJS += test-hash.o
TEST_BUILTINS_OBJS += test-hashmap.o
-TEST_BUILTINS_OBJS += test-hash-speed.o
TEST_BUILTINS_OBJS += test-index-version.o
TEST_BUILTINS_OBJS += test-json-writer.o
TEST_BUILTINS_OBJS += test-lazy-init-name-hash.o
TEST_BUILTINS_OBJS += test-match-trees.o
TEST_BUILTINS_OBJS += test-mergesort.o
TEST_BUILTINS_OBJS += test-mktemp.o
+TEST_BUILTINS_OBJS += test-oid-array.o
TEST_BUILTINS_OBJS += test-oidmap.o
TEST_BUILTINS_OBJS += test-online-cpus.o
TEST_BUILTINS_OBJS += test-parse-options.o
@@ -738,7 +734,6 @@ TEST_BUILTINS_OBJS += test-run-command.o
TEST_BUILTINS_OBJS += test-scrap-cache-tree.o
TEST_BUILTINS_OBJS += test-serve-v2.o
TEST_BUILTINS_OBJS += test-sha1.o
-TEST_BUILTINS_OBJS += test-oid-array.o
TEST_BUILTINS_OBJS += test-sha256.o
TEST_BUILTINS_OBJS += test-sigchain.o
TEST_BUILTINS_OBJS += test-strcmp-offset.o
@@ -748,10 +743,10 @@ TEST_BUILTINS_OBJS += test-submodule-nested-repo-config.o
TEST_BUILTINS_OBJS += test-subprocess.o
TEST_BUILTINS_OBJS += test-trace2.o
TEST_BUILTINS_OBJS += test-urlmatch-normalization.o
-TEST_BUILTINS_OBJS += test-xml-encode.o
TEST_BUILTINS_OBJS += test-wildmatch.o
TEST_BUILTINS_OBJS += test-windows-named-pipe.o
TEST_BUILTINS_OBJS += test-write-cache.o
+TEST_BUILTINS_OBJS += test-xml-encode.o
# Do not add more tests here unless they have extra dependencies. Add
# them in TEST_BUILTINS_OBJS above.
@@ -788,10 +783,10 @@ OTHER_PROGRAMS = git$X
# what test wrappers are needed and 'install' will install, in bindir
BINDIR_PROGRAMS_NEED_X += git
-BINDIR_PROGRAMS_NEED_X += git-upload-pack
BINDIR_PROGRAMS_NEED_X += git-receive-pack
-BINDIR_PROGRAMS_NEED_X += git-upload-archive
BINDIR_PROGRAMS_NEED_X += git-shell
+BINDIR_PROGRAMS_NEED_X += git-upload-archive
+BINDIR_PROGRAMS_NEED_X += git-upload-pack
BINDIR_PROGRAMS_NO_X += git-cvsserver
@@ -831,15 +826,16 @@ LIB_OBJS += advice.o
LIB_OBJS += alias.o
LIB_OBJS += alloc.o
LIB_OBJS += apply.o
-LIB_OBJS += archive.o
LIB_OBJS += archive-tar.o
LIB_OBJS += archive-zip.o
+LIB_OBJS += archive.o
LIB_OBJS += argv-array.o
LIB_OBJS += attr.o
LIB_OBJS += base85.o
LIB_OBJS += bisect.o
LIB_OBJS += blame.o
LIB_OBJS += blob.o
+LIB_OBJS += bloom.o
LIB_OBJS += branch.o
LIB_OBJS += bulk-checkin.o
LIB_OBJS += bundle.o
@@ -849,9 +845,9 @@ LIB_OBJS += checkout.o
LIB_OBJS += color.o
LIB_OBJS += column.o
LIB_OBJS += combine-diff.o
-LIB_OBJS += commit.o
LIB_OBJS += commit-graph.o
LIB_OBJS += commit-reach.o
+LIB_OBJS += commit.o
LIB_OBJS += compat/obstack.o
LIB_OBJS += compat/terminal.o
LIB_OBJS += config.o
@@ -865,17 +861,17 @@ LIB_OBJS += ctype.o
LIB_OBJS += date.o
LIB_OBJS += decorate.o
LIB_OBJS += delta-islands.o
+LIB_OBJS += diff-delta.o
+LIB_OBJS += diff-lib.o
+LIB_OBJS += diff-no-index.o
+LIB_OBJS += diff.o
LIB_OBJS += diffcore-break.o
LIB_OBJS += diffcore-delta.o
LIB_OBJS += diffcore-order.o
LIB_OBJS += diffcore-pickaxe.o
LIB_OBJS += diffcore-rename.o
-LIB_OBJS += diff-delta.o
-LIB_OBJS += diff-lib.o
-LIB_OBJS += diff-no-index.o
-LIB_OBJS += diff.o
-LIB_OBJS += dir.o
LIB_OBJS += dir-iterator.o
+LIB_OBJS += dir.o
LIB_OBJS += editor.o
LIB_OBJS += entry.o
LIB_OBJS += environment.o
@@ -886,6 +882,7 @@ LIB_OBJS += ewah/ewah_rlw.o
LIB_OBJS += exec-cmd.o
LIB_OBJS += fetch-negotiator.o
LIB_OBJS += fetch-pack.o
+LIB_OBJS += fmt-merge-msg.o
LIB_OBJS += fsck.o
LIB_OBJS += fsmonitor.o
LIB_OBJS += gettext.o
@@ -893,7 +890,6 @@ LIB_OBJS += gpg-interface.o
LIB_OBJS += graph.o
LIB_OBJS += grep.o
LIB_OBJS += hashmap.o
-LIB_OBJS += linear-assignment.o
LIB_OBJS += help.o
LIB_OBJS += hex.o
LIB_OBJS += ident.o
@@ -903,9 +899,10 @@ LIB_OBJS += kwset.o
LIB_OBJS += levenshtein.o
LIB_OBJS += line-log.o
LIB_OBJS += line-range.o
-LIB_OBJS += list-objects.o
-LIB_OBJS += list-objects-filter.o
+LIB_OBJS += linear-assignment.o
LIB_OBJS += list-objects-filter-options.o
+LIB_OBJS += list-objects-filter.o
+LIB_OBJS += list-objects.o
LIB_OBJS += ll-merge.o
LIB_OBJS += lockfile.o
LIB_OBJS += log-tree.o
@@ -914,32 +911,32 @@ LIB_OBJS += mailinfo.o
LIB_OBJS += mailmap.o
LIB_OBJS += match-trees.o
LIB_OBJS += mem-pool.o
-LIB_OBJS += merge.o
LIB_OBJS += merge-blobs.o
LIB_OBJS += merge-recursive.o
+LIB_OBJS += merge.o
LIB_OBJS += mergesort.o
LIB_OBJS += midx.o
LIB_OBJS += name-hash.o
LIB_OBJS += negotiator/default.o
LIB_OBJS += negotiator/skipping.o
-LIB_OBJS += notes.o
LIB_OBJS += notes-cache.o
LIB_OBJS += notes-merge.o
LIB_OBJS += notes-utils.o
+LIB_OBJS += notes.o
LIB_OBJS += object.o
+LIB_OBJS += oid-array.o
LIB_OBJS += oidmap.o
LIB_OBJS += oidset.o
-LIB_OBJS += oid-array.o
-LIB_OBJS += packfile.o
-LIB_OBJS += pack-bitmap.o
LIB_OBJS += pack-bitmap-write.o
+LIB_OBJS += pack-bitmap.o
LIB_OBJS += pack-check.o
LIB_OBJS += pack-objects.o
LIB_OBJS += pack-revindex.o
LIB_OBJS += pack-write.o
+LIB_OBJS += packfile.o
LIB_OBJS += pager.o
-LIB_OBJS += parse-options.o
LIB_OBJS += parse-options-cb.o
+LIB_OBJS += parse-options.o
LIB_OBJS += patch-delta.o
LIB_OBJS += patch-ids.o
LIB_OBJS += path.o
@@ -952,12 +949,14 @@ LIB_OBJS += progress.o
LIB_OBJS += promisor-remote.o
LIB_OBJS += prompt.o
LIB_OBJS += protocol.o
+LIB_OBJS += prune-packed.o
LIB_OBJS += quote.o
LIB_OBJS += range-diff.o
LIB_OBJS += reachable.o
LIB_OBJS += read-cache.o
-LIB_OBJS += rebase.o
LIB_OBJS += rebase-interactive.o
+LIB_OBJS += rebase.o
+LIB_OBJS += ref-filter.o
LIB_OBJS += reflog-walk.o
LIB_OBJS += refs.o
LIB_OBJS += refs/files-backend.o
@@ -965,12 +964,12 @@ LIB_OBJS += refs/iterator.o
LIB_OBJS += refs/packed-backend.o
LIB_OBJS += refs/ref-cache.o
LIB_OBJS += refspec.o
-LIB_OBJS += ref-filter.o
LIB_OBJS += remote.o
LIB_OBJS += replace-object.o
LIB_OBJS += repo-settings.o
LIB_OBJS += repository.o
LIB_OBJS += rerere.o
+LIB_OBJS += reset.o
LIB_OBJS += resolve-undo.o
LIB_OBJS += revision.o
LIB_OBJS += run-command.o
@@ -979,8 +978,8 @@ LIB_OBJS += sequencer.o
LIB_OBJS += serve.o
LIB_OBJS += server-info.o
LIB_OBJS += setup.o
-LIB_OBJS += sha1-lookup.o
LIB_OBJS += sha1-file.o
+LIB_OBJS += sha1-lookup.o
LIB_OBJS += sha1-name.o
LIB_OBJS += shallow.o
LIB_OBJS += sideband.o
@@ -990,9 +989,9 @@ LIB_OBJS += stable-qsort.o
LIB_OBJS += strbuf.o
LIB_OBJS += streaming.o
LIB_OBJS += string-list.o
-LIB_OBJS += submodule.o
-LIB_OBJS += submodule-config.o
LIB_OBJS += sub-process.o
+LIB_OBJS += submodule-config.o
+LIB_OBJS += submodule.o
LIB_OBJS += symlinks.o
LIB_OBJS += tag.o
LIB_OBJS += tempfile.o
@@ -1011,11 +1010,11 @@ LIB_OBJS += trace2/tr2_tgt_normal.o
LIB_OBJS += trace2/tr2_tgt_perf.o
LIB_OBJS += trace2/tr2_tls.o
LIB_OBJS += trailer.o
-LIB_OBJS += transport.o
LIB_OBJS += transport-helper.o
+LIB_OBJS += transport.o
LIB_OBJS += tree-diff.o
-LIB_OBJS += tree.o
LIB_OBJS += tree-walk.o
+LIB_OBJS += tree.o
LIB_OBJS += unpack-trees.o
LIB_OBJS += upload-pack.o
LIB_OBJS += url.o
@@ -1055,9 +1054,9 @@ BUILTIN_OBJS += builtin/checkout.o
BUILTIN_OBJS += builtin/clean.o
BUILTIN_OBJS += builtin/clone.o
BUILTIN_OBJS += builtin/column.o
+BUILTIN_OBJS += builtin/commit-graph.o
BUILTIN_OBJS += builtin/commit-tree.o
BUILTIN_OBJS += builtin/commit.o
-BUILTIN_OBJS += builtin/commit-graph.o
BUILTIN_OBJS += builtin/config.o
BUILTIN_OBJS += builtin/count-objects.o
BUILTIN_OBJS += builtin/credential.o
@@ -1088,13 +1087,13 @@ BUILTIN_OBJS += builtin/ls-remote.o
BUILTIN_OBJS += builtin/ls-tree.o
BUILTIN_OBJS += builtin/mailinfo.o
BUILTIN_OBJS += builtin/mailsplit.o
-BUILTIN_OBJS += builtin/merge.o
BUILTIN_OBJS += builtin/merge-base.o
BUILTIN_OBJS += builtin/merge-file.o
BUILTIN_OBJS += builtin/merge-index.o
BUILTIN_OBJS += builtin/merge-ours.o
BUILTIN_OBJS += builtin/merge-recursive.o
BUILTIN_OBJS += builtin/merge-tree.o
+BUILTIN_OBJS += builtin/merge.o
BUILTIN_OBJS += builtin/mktag.o
BUILTIN_OBJS += builtin/mktree.o
BUILTIN_OBJS += builtin/multi-pack-index.o
@@ -1114,9 +1113,9 @@ BUILTIN_OBJS += builtin/read-tree.o
BUILTIN_OBJS += builtin/rebase.o
BUILTIN_OBJS += builtin/receive-pack.o
BUILTIN_OBJS += builtin/reflog.o
-BUILTIN_OBJS += builtin/remote.o
BUILTIN_OBJS += builtin/remote-ext.o
BUILTIN_OBJS += builtin/remote-fd.o
+BUILTIN_OBJS += builtin/remote.o
BUILTIN_OBJS += builtin/repack.o
BUILTIN_OBJS += builtin/replace.o
BUILTIN_OBJS += builtin/rerere.o
@@ -1358,17 +1357,22 @@ ifdef NO_CURL
else
ifdef CURLDIR
# Try "-Wl,-rpath=$(CURLDIR)/$(lib)" in such a case.
- BASIC_CFLAGS += -I$(CURLDIR)/include
+ CURL_CFLAGS = -I$(CURLDIR)/include
CURL_LIBCURL = -L$(CURLDIR)/$(lib) $(CC_LD_DYNPATH)$(CURLDIR)/$(lib)
else
+ CURL_CFLAGS =
CURL_LIBCURL =
endif
-ifdef CURL_LDFLAGS
+ ifndef CURL_LDFLAGS
+ CURL_LDFLAGS = $(eval CURL_LDFLAGS := $$(shell $$(CURL_CONFIG) --libs))$(CURL_LDFLAGS)
+ endif
CURL_LIBCURL += $(CURL_LDFLAGS)
-else
- CURL_LIBCURL += $(shell $(CURL_CONFIG) --libs)
-endif
+
+ ifndef CURL_CFLAGS
+ CURL_CFLAGS = $(eval CURL_CFLAGS := $$(shell $$(CURL_CONFIG) --cflags))$(CURL_CFLAGS)
+ endif
+ BASIC_CFLAGS += $(CURL_CFLAGS)
REMOTE_CURL_PRIMARY = git-remote-http$X
REMOTE_CURL_ALIASES = git-remote-https$X git-remote-ftp$X git-remote-ftps$X
@@ -2335,16 +2339,16 @@ reconfigure config.mak.autogen: config.status
endif
XDIFF_OBJS += xdiff/xdiffi.o
-XDIFF_OBJS += xdiff/xprepare.o
-XDIFF_OBJS += xdiff/xutils.o
XDIFF_OBJS += xdiff/xemit.o
+XDIFF_OBJS += xdiff/xhistogram.o
XDIFF_OBJS += xdiff/xmerge.o
XDIFF_OBJS += xdiff/xpatience.o
-XDIFF_OBJS += xdiff/xhistogram.o
+XDIFF_OBJS += xdiff/xprepare.o
+XDIFF_OBJS += xdiff/xutils.o
+VCSSVN_OBJS += vcs-svn/fast_export.o
VCSSVN_OBJS += vcs-svn/line_buffer.o
VCSSVN_OBJS += vcs-svn/sliding_window.o
-VCSSVN_OBJS += vcs-svn/fast_export.o
VCSSVN_OBJS += vcs-svn/svndiff.o
VCSSVN_OBJS += vcs-svn/svndump.o
@@ -3152,9 +3156,10 @@ endif
#
ALL_COMMANDS = $(ALL_PROGRAMS) $(SCRIPT_LIB) $(BUILT_INS)
ALL_COMMANDS += git
+ALL_COMMANDS += git-citool
+ALL_COMMANDS += git-gui
ALL_COMMANDS += gitk
ALL_COMMANDS += gitweb
-ALL_COMMANDS += git-gui git-citool
.PHONY: check-docs
check-docs::
diff --git a/README.md b/README.md
index 9d4564c8aa..eb8115e6b0 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,4 @@
-[![Build Status](https://dev.azure.com/git/git/_apis/build/status/git.git)](https://dev.azure.com/git/git/_build/latest?definitionId=11)
+[![Build status](https://github.com/git/git/workflows/CI/PR/badge.svg)](https://github.com/git/git/actions?query=branch%3Amaster+event%3Apush)
Git - fast, scalable, distributed revision control system
=========================================================
diff --git a/archive-tar.c b/archive-tar.c
index 5a77701a15..5ceec3684b 100644
--- a/archive-tar.c
+++ b/archive-tar.c
@@ -364,7 +364,7 @@ static struct archiver **tar_filters;
static int nr_tar_filters;
static int alloc_tar_filters;
-static struct archiver *find_tar_filter(const char *name, int len)
+static struct archiver *find_tar_filter(const char *name, size_t len)
{
int i;
for (i = 0; i < nr_tar_filters; i++) {
@@ -380,7 +380,7 @@ static int tar_filter_config(const char *var, const char *value, void *data)
struct archiver *ar;
const char *name;
const char *type;
- int namelen;
+ size_t namelen;
if (parse_config_key(var, "tar", &name, &namelen, &type) < 0 || !name)
return 0;
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
deleted file mode 100644
index 675c3a43c9..0000000000
--- a/azure-pipelines.yml
+++ /dev/null
@@ -1,558 +0,0 @@
-variables:
- Agent.Source.Git.ShallowFetchDepth: 1
-
-jobs:
-- job: windows_build
- displayName: Windows Build
- condition: succeeded()
- pool:
- vmImage: windows-latest
- timeoutInMinutes: 240
- steps:
- - powershell: |
- if ("$GITFILESHAREPWD" -ne "" -and "$GITFILESHAREPWD" -ne "`$`(gitfileshare.pwd)") {
- net use s: \\gitfileshare.file.core.windows.net\test-cache "$GITFILESHAREPWD" /user:AZURE\gitfileshare /persistent:no
- cmd /c mklink /d "$(Build.SourcesDirectory)\test-cache" S:\
- }
- displayName: 'Mount test-cache'
- env:
- GITFILESHAREPWD: $(gitfileshare.pwd)
- - powershell: |
- $urlbase = "https://dev.azure.com/git-for-windows/git/_apis/build/builds"
- $id = ((Invoke-WebRequest -UseBasicParsing "${urlbase}?definitions=22&statusFilter=completed&resultFilter=succeeded&`$top=1").content | ConvertFrom-JSON).value[0].id
- $downloadUrl = ((Invoke-WebRequest -UseBasicParsing "${urlbase}/$id/artifacts").content | ConvertFrom-JSON).value[1].resource.downloadUrl
- (New-Object Net.WebClient).DownloadFile($downloadUrl,"git-sdk-64-minimal.zip")
- Expand-Archive git-sdk-64-minimal.zip -DestinationPath . -Force
- Remove-Item git-sdk-64-minimal.zip
-
- # Let Git ignore the SDK and the test-cache
- "/git-sdk-64-minimal/`n/test-cache/`n" | Out-File -NoNewLine -Encoding ascii -Append "$(Build.SourcesDirectory)\.git\info\exclude"
- displayName: 'Download git-sdk-64-minimal'
- - powershell: |
- & git-sdk-64-minimal\usr\bin\bash.exe -lc @"
- ci/make-test-artifacts.sh artifacts
- "@
- if (!$?) { exit(1) }
- displayName: Build
- env:
- HOME: $(Build.SourcesDirectory)
- MSYSTEM: MINGW64
- DEVELOPER: 1
- NO_PERL: 1
- - task: PublishPipelineArtifact@0
- displayName: 'Publish Pipeline Artifact: test artifacts'
- inputs:
- artifactName: 'windows-artifacts'
- targetPath: '$(Build.SourcesDirectory)\artifacts'
- - task: PublishPipelineArtifact@0
- displayName: 'Publish Pipeline Artifact: git-sdk-64-minimal'
- inputs:
- artifactName: 'git-sdk-64-minimal'
- targetPath: '$(Build.SourcesDirectory)\git-sdk-64-minimal'
- - powershell: |
- if ("$GITFILESHAREPWD" -ne "" -and "$GITFILESHAREPWD" -ne "`$`(gitfileshare.pwd)") {
- cmd /c rmdir "$(Build.SourcesDirectory)\test-cache"
- }
- displayName: 'Unmount test-cache'
- condition: true
- env:
- GITFILESHAREPWD: $(gitfileshare.pwd)
-
-- job: windows_test
- displayName: Windows Test
- dependsOn: windows_build
- condition: succeeded()
- pool:
- vmImage: windows-latest
- timeoutInMinutes: 240
- strategy:
- parallel: 10
- steps:
- - powershell: |
- if ("$GITFILESHAREPWD" -ne "" -and "$GITFILESHAREPWD" -ne "`$`(gitfileshare.pwd)") {
- net use s: \\gitfileshare.file.core.windows.net\test-cache "$GITFILESHAREPWD" /user:AZURE\gitfileshare /persistent:no
- cmd /c mklink /d "$(Build.SourcesDirectory)\test-cache" S:\
- }
- displayName: 'Mount test-cache'
- env:
- GITFILESHAREPWD: $(gitfileshare.pwd)
- - task: DownloadPipelineArtifact@0
- displayName: 'Download Pipeline Artifact: test artifacts'
- inputs:
- artifactName: 'windows-artifacts'
- targetPath: '$(Build.SourcesDirectory)'
- - task: DownloadPipelineArtifact@0
- displayName: 'Download Pipeline Artifact: git-sdk-64-minimal'
- inputs:
- artifactName: 'git-sdk-64-minimal'
- targetPath: '$(Build.SourcesDirectory)\git-sdk-64-minimal'
- - powershell: |
- & git-sdk-64-minimal\usr\bin\bash.exe -lc @"
- test -f artifacts.tar.gz || {
- echo No test artifacts found\; skipping >&2
- exit 0
- }
- tar xf artifacts.tar.gz || exit 1
-
- # Let Git ignore the SDK and the test-cache
- printf '%s\n' /git-sdk-64-minimal/ /test-cache/ >>.git/info/exclude
-
- ci/run-test-slice.sh `$SYSTEM_JOBPOSITIONINPHASE `$SYSTEM_TOTALJOBSINPHASE || {
- ci/print-test-failures.sh
- exit 1
- }
- "@
- if (!$?) { exit(1) }
- displayName: 'Test (parallel)'
- env:
- HOME: $(Build.SourcesDirectory)
- MSYSTEM: MINGW64
- NO_SVN_TESTS: 1
- GIT_TEST_SKIP_REBASE_P: 1
- - powershell: |
- if ("$GITFILESHAREPWD" -ne "" -and "$GITFILESHAREPWD" -ne "`$`(gitfileshare.pwd)") {
- cmd /c rmdir "$(Build.SourcesDirectory)\test-cache"
- }
- displayName: 'Unmount test-cache'
- condition: true
- env:
- GITFILESHAREPWD: $(gitfileshare.pwd)
- - task: PublishTestResults@2
- displayName: 'Publish Test Results **/TEST-*.xml'
- inputs:
- mergeTestResults: true
- testRunTitle: 'windows'
- platform: Windows
- publishRunAttachments: false
- condition: succeededOrFailed()
- - task: PublishBuildArtifacts@1
- displayName: 'Publish trash directories of failed tests'
- condition: failed()
- inputs:
- PathtoPublish: t/failed-test-artifacts
- ArtifactName: failed-test-artifacts
-
-- job: vs_build
- displayName: Visual Studio Build
- condition: succeeded()
- pool:
- vmImage: windows-latest
- timeoutInMinutes: 240
- steps:
- - powershell: |
- if ("$GITFILESHAREPWD" -ne "" -and "$GITFILESHAREPWD" -ne "`$`(gitfileshare.pwd)") {
- net use s: \\gitfileshare.file.core.windows.net\test-cache "$GITFILESHAREPWD" /user:AZURE\gitfileshare /persistent:no
- cmd /c mklink /d "$(Build.SourcesDirectory)\test-cache" S:\
- }
- displayName: 'Mount test-cache'
- env:
- GITFILESHAREPWD: $(gitfileshare.pwd)
- - powershell: |
- $urlbase = "https://dev.azure.com/git-for-windows/git/_apis/build/builds"
- $id = ((Invoke-WebRequest -UseBasicParsing "${urlbase}?definitions=22&statusFilter=completed&resultFilter=succeeded&`$top=1").content | ConvertFrom-JSON).value[0].id
- $downloadUrl = ((Invoke-WebRequest -UseBasicParsing "${urlbase}/$id/artifacts").content | ConvertFrom-JSON).value[1].resource.downloadUrl
- (New-Object Net.WebClient).DownloadFile($downloadUrl,"git-sdk-64-minimal.zip")
- Expand-Archive git-sdk-64-minimal.zip -DestinationPath . -Force
- Remove-Item git-sdk-64-minimal.zip
-
- # Let Git ignore the SDK and the test-cache
- "/git-sdk-64-minimal/`n/test-cache/`n" | Out-File -NoNewLine -Encoding ascii -Append "$(Build.SourcesDirectory)\.git\info\exclude"
- displayName: 'Download git-sdk-64-minimal'
- - powershell: |
- & git-sdk-64-minimal\usr\bin\bash.exe -lc @"
- make NDEBUG=1 DEVELOPER=1 vcxproj
- "@
- if (!$?) { exit(1) }
- displayName: Generate Visual Studio Solution
- env:
- HOME: $(Build.SourcesDirectory)
- MSYSTEM: MINGW64
- DEVELOPER: 1
- NO_PERL: 1
- GIT_CONFIG_PARAMETERS: "'user.name=CI' 'user.email=ci@git'"
- - powershell: |
- $urlbase = "https://dev.azure.com/git/git/_apis/build/builds"
- $id = ((Invoke-WebRequest -UseBasicParsing "${urlbase}?definitions=9&statusFilter=completed&resultFilter=succeeded&`$top=1").content | ConvertFrom-JSON).value[0].id
- $downloadUrl = ((Invoke-WebRequest -UseBasicParsing "${urlbase}/$id/artifacts").content | ConvertFrom-JSON).value[0].resource.downloadUrl
- (New-Object Net.WebClient).DownloadFile($downloadUrl, "compat.zip")
- Expand-Archive compat.zip -DestinationPath . -Force
- Remove-Item compat.zip
- displayName: 'Download vcpkg artifacts'
- - task: MSBuild@1
- inputs:
- solution: git.sln
- platform: x64
- configuration: Release
- maximumCpuCount: 4
- msbuildArguments: /p:PlatformToolset=v142
- - powershell: |
- & compat\vcbuild\vcpkg_copy_dlls.bat release
- if (!$?) { exit(1) }
- & git-sdk-64-minimal\usr\bin\bash.exe -lc @"
- mkdir -p artifacts &&
- eval \"`$(make -n artifacts-tar INCLUDE_DLLS_IN_ARTIFACTS=YesPlease ARTIFACTS_DIRECTORY=artifacts | grep ^tar)\"
- "@
- if (!$?) { exit(1) }
- displayName: Bundle artifact tar
- env:
- HOME: $(Build.SourcesDirectory)
- MSYSTEM: MINGW64
- DEVELOPER: 1
- NO_PERL: 1
- MSVC: 1
- VCPKG_ROOT: $(Build.SourcesDirectory)\compat\vcbuild\vcpkg
- - powershell: |
- $tag = (Invoke-WebRequest -UseBasicParsing "https://gitforwindows.org/latest-tag.txt").content
- $version = (Invoke-WebRequest -UseBasicParsing "https://gitforwindows.org/latest-version.txt").content
- $url = "https://github.com/git-for-windows/git/releases/download/${tag}/PortableGit-${version}-64-bit.7z.exe"
- (New-Object Net.WebClient).DownloadFile($url,"PortableGit.exe")
- & .\PortableGit.exe -y -oartifacts\PortableGit
- # Wait until it is unpacked
- while (-not @(Remove-Item -ErrorAction SilentlyContinue PortableGit.exe; $?)) { sleep 1 }
- displayName: Download & extract portable Git
- - task: PublishPipelineArtifact@0
- displayName: 'Publish Pipeline Artifact: MSVC test artifacts'
- inputs:
- artifactName: 'vs-artifacts'
- targetPath: '$(Build.SourcesDirectory)\artifacts'
- - powershell: |
- if ("$GITFILESHAREPWD" -ne "" -and "$GITFILESHAREPWD" -ne "`$`(gitfileshare.pwd)") {
- cmd /c rmdir "$(Build.SourcesDirectory)\test-cache"
- }
- displayName: 'Unmount test-cache'
- condition: true
- env:
- GITFILESHAREPWD: $(gitfileshare.pwd)
-
-- job: vs_test
- displayName: Visual Studio Test
- dependsOn: vs_build
- condition: succeeded()
- pool:
- vmImage: windows-latest
- timeoutInMinutes: 240
- strategy:
- parallel: 10
- steps:
- - powershell: |
- if ("$GITFILESHAREPWD" -ne "" -and "$GITFILESHAREPWD" -ne "`$`(gitfileshare.pwd)") {
- net use s: \\gitfileshare.file.core.windows.net\test-cache "$GITFILESHAREPWD" /user:AZURE\gitfileshare /persistent:no
- cmd /c mklink /d "$(Build.SourcesDirectory)\test-cache" S:\
- }
- displayName: 'Mount test-cache'
- env:
- GITFILESHAREPWD: $(gitfileshare.pwd)
- - task: DownloadPipelineArtifact@0
- displayName: 'Download Pipeline Artifact: VS test artifacts'
- inputs:
- artifactName: 'vs-artifacts'
- targetPath: '$(Build.SourcesDirectory)'
- - powershell: |
- & PortableGit\git-cmd.exe --command=usr\bin\bash.exe -lc @"
- test -f artifacts.tar.gz || {
- echo No test artifacts found\; skipping >&2
- exit 0
- }
- tar xf artifacts.tar.gz || exit 1
-
- # Let Git ignore the SDK and the test-cache
- printf '%s\n' /PortableGit/ /test-cache/ >>.git/info/exclude
-
- cd t &&
- PATH=\"`$PWD/helper:`$PATH\" &&
- test-tool.exe run-command testsuite --jobs=10 -V -x --write-junit-xml \
- `$(test-tool.exe path-utils slice-tests \
- `$SYSTEM_JOBPOSITIONINPHASE `$SYSTEM_TOTALJOBSINPHASE t[0-9]*.sh)
- "@
- if (!$?) { exit(1) }
- displayName: 'Test (parallel)'
- env:
- HOME: $(Build.SourcesDirectory)
- MSYSTEM: MINGW64
- NO_SVN_TESTS: 1
- GIT_TEST_SKIP_REBASE_P: 1
- - powershell: |
- if ("$GITFILESHAREPWD" -ne "" -and "$GITFILESHAREPWD" -ne "`$`(gitfileshare.pwd)") {
- cmd /c rmdir "$(Build.SourcesDirectory)\test-cache"
- }
- displayName: 'Unmount test-cache'
- condition: true
- env:
- GITFILESHAREPWD: $(gitfileshare.pwd)
- - task: PublishTestResults@2
- displayName: 'Publish Test Results **/TEST-*.xml'
- inputs:
- mergeTestResults: true
- testRunTitle: 'vs'
- platform: Windows
- publishRunAttachments: false
- condition: succeededOrFailed()
- - task: PublishBuildArtifacts@1
- displayName: 'Publish trash directories of failed tests'
- condition: failed()
- inputs:
- PathtoPublish: t/failed-test-artifacts
- ArtifactName: failed-vs-test-artifacts
-
-- job: linux_clang
- displayName: linux-clang
- condition: succeeded()
- pool:
- vmImage: ubuntu-latest
- steps:
- - bash: |
- test "$GITFILESHAREPWD" = '$(gitfileshare.pwd)' || ci/mount-fileshare.sh //gitfileshare.file.core.windows.net/test-cache gitfileshare "$GITFILESHAREPWD" "$HOME/test-cache" || exit 1
-
- sudo apt-get update &&
- sudo apt-get -y install git gcc make libssl-dev libcurl4-openssl-dev libexpat-dev tcl tk gettext git-email zlib1g-dev apache2-bin &&
-
- export CC=clang || exit 1
-
- ci/install-dependencies.sh || exit 1
- ci/run-build-and-tests.sh || {
- ci/print-test-failures.sh
- exit 1
- }
-
- test "$GITFILESHAREPWD" = '$(gitfileshare.pwd)' || sudo umount "$HOME/test-cache" || exit 1
- displayName: 'ci/run-build-and-tests.sh'
- env:
- GITFILESHAREPWD: $(gitfileshare.pwd)
- - task: PublishTestResults@2
- displayName: 'Publish Test Results **/TEST-*.xml'
- inputs:
- mergeTestResults: true
- testRunTitle: 'linux-clang'
- platform: Linux
- publishRunAttachments: false
- condition: succeededOrFailed()
- - task: PublishBuildArtifacts@1
- displayName: 'Publish trash directories of failed tests'
- condition: failed()
- inputs:
- PathtoPublish: t/failed-test-artifacts
- ArtifactName: failed-test-artifacts
-
-- job: linux_gcc
- displayName: linux-gcc
- condition: succeeded()
- pool:
- vmImage: ubuntu-latest
- steps:
- - bash: |
- test "$GITFILESHAREPWD" = '$(gitfileshare.pwd)' || ci/mount-fileshare.sh //gitfileshare.file.core.windows.net/test-cache gitfileshare "$GITFILESHAREPWD" "$HOME/test-cache" || exit 1
-
- sudo add-apt-repository ppa:ubuntu-toolchain-r/test &&
- sudo apt-get update &&
- sudo apt-get -y install git gcc make libssl-dev libcurl4-openssl-dev libexpat-dev tcl tk gettext git-email zlib1g-dev apache2 language-pack-is git-svn gcc-8 || exit 1
-
- ci/install-dependencies.sh || exit 1
- ci/run-build-and-tests.sh || {
- ci/print-test-failures.sh
- exit 1
- }
-
- test "$GITFILESHAREPWD" = '$(gitfileshare.pwd)' || sudo umount "$HOME/test-cache" || exit 1
- displayName: 'ci/run-build-and-tests.sh'
- env:
- GITFILESHAREPWD: $(gitfileshare.pwd)
- - task: PublishTestResults@2
- displayName: 'Publish Test Results **/TEST-*.xml'
- inputs:
- mergeTestResults: true
- testRunTitle: 'linux-gcc'
- platform: Linux
- publishRunAttachments: false
- condition: succeededOrFailed()
- - task: PublishBuildArtifacts@1
- displayName: 'Publish trash directories of failed tests'
- condition: failed()
- inputs:
- PathtoPublish: t/failed-test-artifacts
- ArtifactName: failed-test-artifacts
-
-- job: osx_clang
- displayName: osx-clang
- condition: succeeded()
- pool:
- vmImage: macOS-latest
- steps:
- - bash: |
- test "$GITFILESHAREPWD" = '$(gitfileshare.pwd)' || ci/mount-fileshare.sh //gitfileshare.file.core.windows.net/test-cache gitfileshare "$GITFILESHAREPWD" "$HOME/test-cache" || exit 1
-
- export CC=clang
-
- ci/install-dependencies.sh || exit 1
- ci/run-build-and-tests.sh || {
- ci/print-test-failures.sh
- exit 1
- }
-
- test "$GITFILESHAREPWD" = '$(gitfileshare.pwd)' || umount "$HOME/test-cache" || exit 1
- displayName: 'ci/run-build-and-tests.sh'
- env:
- GITFILESHAREPWD: $(gitfileshare.pwd)
- - task: PublishTestResults@2
- displayName: 'Publish Test Results **/TEST-*.xml'
- inputs:
- mergeTestResults: true
- testRunTitle: 'osx-clang'
- platform: macOS
- publishRunAttachments: false
- condition: succeededOrFailed()
- - task: PublishBuildArtifacts@1
- displayName: 'Publish trash directories of failed tests'
- condition: failed()
- inputs:
- PathtoPublish: t/failed-test-artifacts
- ArtifactName: failed-test-artifacts
-
-- job: osx_gcc
- displayName: osx-gcc
- condition: succeeded()
- pool:
- vmImage: macOS-latest
- steps:
- - bash: |
- test "$GITFILESHAREPWD" = '$(gitfileshare.pwd)' || ci/mount-fileshare.sh //gitfileshare.file.core.windows.net/test-cache gitfileshare "$GITFILESHAREPWD" "$HOME/test-cache" || exit 1
-
- ci/install-dependencies.sh || exit 1
- ci/run-build-and-tests.sh || {
- ci/print-test-failures.sh
- exit 1
- }
-
- test "$GITFILESHAREPWD" = '$(gitfileshare.pwd)' || umount "$HOME/test-cache" || exit 1
- displayName: 'ci/run-build-and-tests.sh'
- env:
- GITFILESHAREPWD: $(gitfileshare.pwd)
- - task: PublishTestResults@2
- displayName: 'Publish Test Results **/TEST-*.xml'
- inputs:
- mergeTestResults: true
- testRunTitle: 'osx-gcc'
- platform: macOS
- publishRunAttachments: false
- condition: succeededOrFailed()
- - task: PublishBuildArtifacts@1
- displayName: 'Publish trash directories of failed tests'
- condition: failed()
- inputs:
- PathtoPublish: t/failed-test-artifacts
- ArtifactName: failed-test-artifacts
-
-- job: gettext_poison
- displayName: GETTEXT_POISON
- condition: succeeded()
- pool:
- vmImage: ubuntu-latest
- steps:
- - bash: |
- test "$GITFILESHAREPWD" = '$(gitfileshare.pwd)' || ci/mount-fileshare.sh //gitfileshare.file.core.windows.net/test-cache gitfileshare "$GITFILESHAREPWD" "$HOME/test-cache" || exit 1
-
- sudo apt-get update &&
- sudo apt-get -y install git gcc make libssl-dev libcurl4-openssl-dev libexpat-dev tcl tk gettext git-email zlib1g-dev &&
-
- export jobname=GETTEXT_POISON || exit 1
-
- ci/run-build-and-tests.sh || {
- ci/print-test-failures.sh
- exit 1
- }
-
- test "$GITFILESHAREPWD" = '$(gitfileshare.pwd)' || sudo umount "$HOME/test-cache" || exit 1
- displayName: 'ci/run-build-and-tests.sh'
- env:
- GITFILESHAREPWD: $(gitfileshare.pwd)
- - task: PublishTestResults@2
- displayName: 'Publish Test Results **/TEST-*.xml'
- inputs:
- mergeTestResults: true
- testRunTitle: 'gettext-poison'
- platform: Linux
- publishRunAttachments: false
- condition: succeededOrFailed()
- - task: PublishBuildArtifacts@1
- displayName: 'Publish trash directories of failed tests'
- condition: failed()
- inputs:
- PathtoPublish: t/failed-test-artifacts
- ArtifactName: failed-test-artifacts
-
-- job: linux32
- displayName: Linux32
- condition: succeeded()
- pool:
- vmImage: ubuntu-latest
- steps:
- - bash: |
- test "$GITFILESHAREPWD" = '$(gitfileshare.pwd)' || ci/mount-fileshare.sh //gitfileshare.file.core.windows.net/test-cache gitfileshare "$GITFILESHAREPWD" "$HOME/test-cache" || exit 1
-
- res=0
- sudo AGENT_OS="$AGENT_OS" BUILD_BUILDNUMBER="$BUILD_BUILDNUMBER" BUILD_REPOSITORY_URI="$BUILD_REPOSITORY_URI" BUILD_SOURCEBRANCH="$BUILD_SOURCEBRANCH" BUILD_SOURCEVERSION="$BUILD_SOURCEVERSION" SYSTEM_PHASENAME="$SYSTEM_PHASENAME" SYSTEM_TASKDEFINITIONSURI="$SYSTEM_TASKDEFINITIONSURI" SYSTEM_TEAMPROJECT="$SYSTEM_TEAMPROJECT" CC=$CC MAKEFLAGS="$MAKEFLAGS" bash -lxc ci/run-linux32-docker.sh || res=1
-
- sudo chmod a+r t/out/TEST-*.xml
- test ! -d t/failed-test-artifacts || sudo chmod a+r t/failed-test-artifacts
-
- test "$GITFILESHAREPWD" = '$(gitfileshare.pwd)' || sudo umount "$HOME/test-cache" || res=1
- exit $res
- displayName: 'ci/run-linux32-docker.sh'
- env:
- GITFILESHAREPWD: $(gitfileshare.pwd)
- - task: PublishTestResults@2
- displayName: 'Publish Test Results **/TEST-*.xml'
- inputs:
- mergeTestResults: true
- testRunTitle: 'linux32'
- platform: Linux
- publishRunAttachments: false
- condition: succeededOrFailed()
- - task: PublishBuildArtifacts@1
- displayName: 'Publish trash directories of failed tests'
- condition: failed()
- inputs:
- PathtoPublish: t/failed-test-artifacts
- ArtifactName: failed-test-artifacts
-
-- job: static_analysis
- displayName: StaticAnalysis
- condition: succeeded()
- pool:
- vmImage: ubuntu-latest
- steps:
- - bash: |
- test "$GITFILESHAREPWD" = '$(gitfileshare.pwd)' || ci/mount-fileshare.sh //gitfileshare.file.core.windows.net/test-cache gitfileshare "$GITFILESHAREPWD" "$HOME/test-cache" || exit 1
-
- sudo apt-get update &&
- sudo apt-get install -y coccinelle libcurl4-openssl-dev libssl-dev libexpat-dev gettext &&
-
- export jobname=StaticAnalysis &&
-
- ci/run-static-analysis.sh || exit 1
-
- test "$GITFILESHAREPWD" = '$(gitfileshare.pwd)' || sudo umount "$HOME/test-cache" || exit 1
- displayName: 'ci/run-static-analysis.sh'
- env:
- GITFILESHAREPWD: $(gitfileshare.pwd)
-
-- job: documentation
- displayName: Documentation
- condition: succeeded()
- pool:
- vmImage: ubuntu-latest
- steps:
- - bash: |
- test "$GITFILESHAREPWD" = '$(gitfileshare.pwd)' || ci/mount-fileshare.sh //gitfileshare.file.core.windows.net/test-cache gitfileshare "$GITFILESHAREPWD" "$HOME/test-cache" || exit 1
-
- sudo apt-get update &&
- sudo apt-get install -y asciidoc xmlto asciidoctor docbook-xsl-ns &&
-
- export ALREADY_HAVE_ASCIIDOCTOR=yes. &&
- export jobname=Documentation &&
-
- ci/test-documentation.sh || exit 1
-
- test "$GITFILESHAREPWD" = '$(gitfileshare.pwd)' || sudo umount "$HOME/test-cache" || exit 1
- displayName: 'ci/test-documentation.sh'
- env:
- GITFILESHAREPWD: $(gitfileshare.pwd)
diff --git a/blame.c b/blame.c
index 29770e5c81..da7e28800e 100644
--- a/blame.c
+++ b/blame.c
@@ -9,6 +9,8 @@
#include "blame.h"
#include "alloc.h"
#include "commit-slab.h"
+#include "bloom.h"
+#include "commit-graph.h"
define_commit_slab(blame_suspects, struct blame_origin *);
static struct blame_suspects blame_suspects;
@@ -1246,13 +1248,74 @@ static int fill_blob_sha1_and_mode(struct repository *r,
return -1;
}
+struct blame_bloom_data {
+ /*
+ * Changed-path Bloom filter keys. These can help prevent
+ * computing diffs against first parents, but we need to
+ * expand the list as code is moved or files are renamed.
+ */
+ struct bloom_filter_settings *settings;
+ struct bloom_key **keys;
+ int nr;
+ int alloc;
+};
+
+static int bloom_count_queries = 0;
+static int bloom_count_no = 0;
+static int maybe_changed_path(struct repository *r,
+ struct blame_origin *origin,
+ struct blame_bloom_data *bd)
+{
+ int i;
+ struct bloom_filter *filter;
+
+ if (!bd)
+ return 1;
+
+ if (origin->commit->generation == GENERATION_NUMBER_INFINITY)
+ return 1;
+
+ filter = get_bloom_filter(r, origin->commit, 0);
+
+ if (!filter)
+ return 1;
+
+ bloom_count_queries++;
+ for (i = 0; i < bd->nr; i++) {
+ if (bloom_filter_contains(filter,
+ bd->keys[i],
+ bd->settings))
+ return 1;
+ }
+
+ bloom_count_no++;
+ return 0;
+}
+
+static void add_bloom_key(struct blame_bloom_data *bd,
+ const char *path)
+{
+ if (!bd)
+ return;
+
+ if (bd->nr >= bd->alloc) {
+ bd->alloc *= 2;
+ REALLOC_ARRAY(bd->keys, bd->alloc);
+ }
+
+ bd->keys[bd->nr] = xmalloc(sizeof(struct bloom_key));
+ fill_bloom_key(path, strlen(path), bd->keys[bd->nr], bd->settings);
+ bd->nr++;
+}
+
/*
* We have an origin -- check if the same path exists in the
* parent and return an origin structure to represent it.
*/
static struct blame_origin *find_origin(struct repository *r,
struct commit *parent,
- struct blame_origin *origin)
+ struct blame_origin *origin,
+ struct blame_bloom_data *bd)
{
struct blame_origin *porigin;
struct diff_options diff_opts;
@@ -1286,10 +1349,18 @@ static struct blame_origin *find_origin(struct repository *r,
if (is_null_oid(&origin->commit->object.oid))
do_diff_cache(get_commit_tree_oid(parent), &diff_opts);
- else
- diff_tree_oid(get_commit_tree_oid(parent),
- get_commit_tree_oid(origin->commit),
- "", &diff_opts);
+ else {
+ int compute_diff = 1;
+ if (origin->commit->parents &&
+ !oidcmp(&parent->object.oid,
+ &origin->commit->parents->item->object.oid))
+ compute_diff = maybe_changed_path(r, origin, bd);
+
+ if (compute_diff)
+ diff_tree_oid(get_commit_tree_oid(parent),
+ get_commit_tree_oid(origin->commit),
+ "", &diff_opts);
+ }
diffcore_std(&diff_opts);
if (!diff_queued_diff.nr) {
@@ -1341,7 +1412,8 @@ static struct blame_origin *find_origin(struct repository *r,
*/
static struct blame_origin *find_rename(struct repository *r,
struct commit *parent,
- struct blame_origin *origin)
+ struct blame_origin *origin,
+ struct blame_bloom_data *bd)
{
struct blame_origin *porigin = NULL;
struct diff_options diff_opts;
@@ -1366,6 +1438,7 @@ static struct blame_origin *find_rename(struct repository *r,
struct diff_filepair *p = diff_queued_diff.queue[i];
if ((p->status == 'R' || p->status == 'C') &&
!strcmp(p->two->path, origin->path)) {
+ add_bloom_key(bd, p->one->path);
porigin = get_origin(parent, p->one->path);
oidcpy(&porigin->blob_oid, &p->one->oid);
porigin->mode = p->one->mode;
@@ -2332,6 +2405,11 @@ static void distribute_blame(struct blame_scoreboard *sb, struct blame_entry *bl
#define MAXSG 16
+typedef struct blame_origin *(*blame_find_alg)(struct repository *,
+ struct commit *,
+ struct blame_origin *,
+ struct blame_bloom_data *);
+
static void pass_blame(struct blame_scoreboard *sb, struct blame_origin *origin, int opt)
{
struct rev_info *revs = sb->revs;
@@ -2356,8 +2434,7 @@ static void pass_blame(struct blame_scoreboard *sb, struct blame_origin *origin,
* common cases, then we look for renames in the second pass.
*/
for (pass = 0; pass < 2 - sb->no_whole_file_rename; pass++) {
- struct blame_origin *(*find)(struct repository *, struct commit *, struct blame_origin *);
- find = pass ? find_rename : find_origin;
+ blame_find_alg find = pass ? find_rename : find_origin;
for (i = 0, sg = first_scapegoat(revs, commit, sb->reverse);
i < num_sg && sg;
@@ -2369,7 +2446,7 @@ static void pass_blame(struct blame_scoreboard *sb, struct blame_origin *origin,
continue;
if (parse_commit(p))
continue;
- porigin = find(sb->repo, p, origin);
+ porigin = find(sb->repo, p, origin, sb->bloom_data);
if (!porigin)
continue;
if (oideq(&porigin->blob_oid, &origin->blob_oid)) {
@@ -2809,3 +2886,45 @@ struct blame_entry *blame_entry_prepend(struct blame_entry *head,
blame_origin_incref(o);
return new_head;
}
+
+void setup_blame_bloom_data(struct blame_scoreboard *sb,
+ const char *path)
+{
+ struct blame_bloom_data *bd;
+
+ if (!sb->repo->objects->commit_graph)
+ return;
+
+ if (!sb->repo->objects->commit_graph->bloom_filter_settings)
+ return;
+
+ bd = xmalloc(sizeof(struct blame_bloom_data));
+
+ bd->settings = sb->repo->objects->commit_graph->bloom_filter_settings;
+
+ bd->alloc = 4;
+ bd->nr = 0;
+ ALLOC_ARRAY(bd->keys, bd->alloc);
+
+ add_bloom_key(bd, path);
+
+ sb->bloom_data = bd;
+}
+
+void cleanup_scoreboard(struct blame_scoreboard *sb)
+{
+ if (sb->bloom_data) {
+ int i;
+ for (i = 0; i < sb->bloom_data->nr; i++) {
+ free(sb->bloom_data->keys[i]->hashes);
+ free(sb->bloom_data->keys[i]);
+ }
+ free(sb->bloom_data->keys);
+ FREE_AND_NULL(sb->bloom_data);
+
+ trace2_data_intmax("blame", sb->repo,
+ "bloom/queries", bloom_count_queries);
+ trace2_data_intmax("blame", sb->repo,
+ "bloom/response-no", bloom_count_no);
+ }
+}
diff --git a/blame.h b/blame.h
index 089b181ff2..b6bbee4147 100644
--- a/blame.h
+++ b/blame.h
@@ -100,6 +100,8 @@ struct blame_entry {
int unblamable;
};
+struct blame_bloom_data;
+
/*
* The current state of the blame assignment.
*/
@@ -156,6 +158,7 @@ struct blame_scoreboard {
void(*found_guilty_entry)(struct blame_entry *, void *);
void *found_guilty_entry_data;
+ struct blame_bloom_data *bloom_data;
};
/*
@@ -180,6 +183,9 @@ void init_scoreboard(struct blame_scoreboard *sb);
void setup_scoreboard(struct blame_scoreboard *sb,
const char *path,
struct blame_origin **orig);
+void setup_blame_bloom_data(struct blame_scoreboard *sb,
+ const char *path);
+void cleanup_scoreboard(struct blame_scoreboard *sb);
struct blame_entry *blame_entry_prepend(struct blame_entry *head,
long start, long end,
diff --git a/bloom.c b/bloom.c
new file mode 100644
index 0000000000..dd9bab9bbd
--- /dev/null
+++ b/bloom.c
@@ -0,0 +1,276 @@
+#include "git-compat-util.h"
+#include "bloom.h"
+#include "diff.h"
+#include "diffcore.h"
+#include "revision.h"
+#include "hashmap.h"
+#include "commit-graph.h"
+#include "commit.h"
+
+define_commit_slab(bloom_filter_slab, struct bloom_filter);
+
+struct bloom_filter_slab bloom_filters;
+
+struct pathmap_hash_entry {
+ struct hashmap_entry entry;
+ const char path[FLEX_ARRAY];
+};
+
+static uint32_t rotate_left(uint32_t value, int32_t count)
+{
+ uint32_t mask = 8 * sizeof(uint32_t) - 1;
+ count &= mask;
+ return ((value << count) | (value >> ((-count) & mask)));
+}
+
+static inline unsigned char get_bitmask(uint32_t pos)
+{
+ return ((unsigned char)1) << (pos & (BITS_PER_WORD - 1));
+}
+
+static int load_bloom_filter_from_graph(struct commit_graph *g,
+ struct bloom_filter *filter,
+ struct commit *c)
+{
+ uint32_t lex_pos, start_index, end_index;
+
+ while (c->graph_pos < g->num_commits_in_base)
+ g = g->base_graph;
+
+ /* The commit graph commit 'c' lives in doesn't carry bloom filters. */
+ if (!g->chunk_bloom_indexes)
+ return 0;
+
+ lex_pos = c->graph_pos - g->num_commits_in_base;
+
+ end_index = get_be32(g->chunk_bloom_indexes + 4 * lex_pos);
+
+ if (lex_pos > 0)
+ start_index = get_be32(g->chunk_bloom_indexes + 4 * (lex_pos - 1));
+ else
+ start_index = 0;
+
+ filter->len = end_index - start_index;
+ filter->data = (unsigned char *)(g->chunk_bloom_data +
+ sizeof(unsigned char) * start_index +
+ BLOOMDATA_CHUNK_HEADER_SIZE);
+
+ return 1;
+}
+
+/*
+ * Calculate the murmur3 32-bit hash value for the given data
+ * using the given seed.
+ * Produces a uniformly distributed hash value.
+ * Not considered to be cryptographically secure.
+ * Implemented as described in https://en.wikipedia.org/wiki/MurmurHash#Algorithm
+ */
+uint32_t murmur3_seeded(uint32_t seed, const char *data, size_t len)
+{
+ const uint32_t c1 = 0xcc9e2d51;
+ const uint32_t c2 = 0x1b873593;
+ const uint32_t r1 = 15;
+ const uint32_t r2 = 13;
+ const uint32_t m = 5;
+ const uint32_t n = 0xe6546b64;
+ int i;
+ uint32_t k1 = 0;
+ const char *tail;
+
+ int len4 = len / sizeof(uint32_t);
+
+ uint32_t k;
+ for (i = 0; i < len4; i++) {
+ uint32_t byte1 = (uint32_t)data[4*i];
+ uint32_t byte2 = ((uint32_t)data[4*i + 1]) << 8;
+ uint32_t byte3 = ((uint32_t)data[4*i + 2]) << 16;
+ uint32_t byte4 = ((uint32_t)data[4*i + 3]) << 24;
+ k = byte1 | byte2 | byte3 | byte4;
+ k *= c1;
+ k = rotate_left(k, r1);
+ k *= c2;
+
+ seed ^= k;
+ seed = rotate_left(seed, r2) * m + n;
+ }
+
+ tail = (data + len4 * sizeof(uint32_t));
+
+ switch (len & (sizeof(uint32_t) - 1)) {
+ case 3:
+ k1 ^= ((uint32_t)tail[2]) << 16;
+ /*-fallthrough*/
+ case 2:
+ k1 ^= ((uint32_t)tail[1]) << 8;
+ /*-fallthrough*/
+ case 1:
+ k1 ^= ((uint32_t)tail[0]) << 0;
+ k1 *= c1;
+ k1 = rotate_left(k1, r1);
+ k1 *= c2;
+ seed ^= k1;
+ break;
+ }
+
+ seed ^= (uint32_t)len;
+ seed ^= (seed >> 16);
+ seed *= 0x85ebca6b;
+ seed ^= (seed >> 13);
+ seed *= 0xc2b2ae35;
+ seed ^= (seed >> 16);
+
+ return seed;
+}
+
+void fill_bloom_key(const char *data,
+ size_t len,
+ struct bloom_key *key,
+ const struct bloom_filter_settings *settings)
+{
+ int i;
+ const uint32_t seed0 = 0x293ae76f;
+ const uint32_t seed1 = 0x7e646e2c;
+ const uint32_t hash0 = murmur3_seeded(seed0, data, len);
+ const uint32_t hash1 = murmur3_seeded(seed1, data, len);
+
+ key->hashes = (uint32_t *)xcalloc(settings->num_hashes, sizeof(uint32_t));
+ for (i = 0; i < settings->num_hashes; i++)
+ key->hashes[i] = hash0 + i * hash1;
+}
+
+void add_key_to_filter(const struct bloom_key *key,
+ struct bloom_filter *filter,
+ const struct bloom_filter_settings *settings)
+{
+ int i;
+ uint64_t mod = filter->len * BITS_PER_WORD;
+
+ for (i = 0; i < settings->num_hashes; i++) {
+ uint64_t hash_mod = key->hashes[i] % mod;
+ uint64_t block_pos = hash_mod / BITS_PER_WORD;
+
+ filter->data[block_pos] |= get_bitmask(hash_mod);
+ }
+}
+
+void init_bloom_filters(void)
+{
+ init_bloom_filter_slab(&bloom_filters);
+}
+
+struct bloom_filter *get_bloom_filter(struct repository *r,
+ struct commit *c,
+ int compute_if_not_present)
+{
+ struct bloom_filter *filter;
+ struct bloom_filter_settings settings = DEFAULT_BLOOM_FILTER_SETTINGS;
+ int i;
+ struct diff_options diffopt;
+ int max_changes = 512;
+
+ if (bloom_filters.slab_size == 0)
+ return NULL;
+
+ filter = bloom_filter_slab_at(&bloom_filters, c);
+
+ if (!filter->data) {
+ load_commit_graph_info(r, c);
+ if (c->graph_pos != COMMIT_NOT_FROM_GRAPH &&
+ r->objects->commit_graph->chunk_bloom_indexes) {
+ if (load_bloom_filter_from_graph(r->objects->commit_graph, filter, c))
+ return filter;
+ else
+ return NULL;
+ }
+ }
+
+ if (filter->data || !compute_if_not_present)
+ return filter;
+
+ repo_diff_setup(r, &diffopt);
+ diffopt.flags.recursive = 1;
+ diffopt.detect_rename = 0;
+ diffopt.max_changes = max_changes;
+ diff_setup_done(&diffopt);
+
+ if (c->parents)
+ diff_tree_oid(&c->parents->item->object.oid, &c->object.oid, "", &diffopt);
+ else
+ diff_tree_oid(NULL, &c->object.oid, "", &diffopt);
+ diffcore_std(&diffopt);
+
+ if (diff_queued_diff.nr <= max_changes) {
+ struct hashmap pathmap;
+ struct pathmap_hash_entry *e;
+ struct hashmap_iter iter;
+ hashmap_init(&pathmap, NULL, NULL, 0);
+
+ for (i = 0; i < diff_queued_diff.nr; i++) {
+ const char *path = diff_queued_diff.queue[i]->two->path;
+
+ /*
+ * Add each leading directory of the changed file, i.e. for
+ * 'dir/subdir/file' add 'dir' and 'dir/subdir' as well, so
+ * the Bloom filter could be used to speed up commands like
+ * 'git log dir/subdir', too.
+ *
+ * Note that directories are added without the trailing '/'.
+ */
+ do {
+ char *last_slash = strrchr(path, '/');
+
+ FLEX_ALLOC_STR(e, path, path);
+ hashmap_entry_init(&e->entry, strhash(path));
+ hashmap_add(&pathmap, &e->entry);
+
+ if (!last_slash)
+ last_slash = (char*)path;
+ *last_slash = '\0';
+
+ } while (*path);
+
+ diff_free_filepair(diff_queued_diff.queue[i]);
+ }
+
+ filter->len = (hashmap_get_size(&pathmap) * settings.bits_per_entry + BITS_PER_WORD - 1) / BITS_PER_WORD;
+ filter->data = xcalloc(filter->len, sizeof(unsigned char));
+
+ hashmap_for_each_entry(&pathmap, &iter, e, entry) {
+ struct bloom_key key;
+ fill_bloom_key(e->path, strlen(e->path), &key, &settings);
+ add_key_to_filter(&key, filter, &settings);
+ }
+
+ hashmap_free_entries(&pathmap, struct pathmap_hash_entry, entry);
+ } else {
+ for (i = 0; i < diff_queued_diff.nr; i++)
+ diff_free_filepair(diff_queued_diff.queue[i]);
+ filter->data = NULL;
+ filter->len = 0;
+ }
+
+ free(diff_queued_diff.queue);
+ DIFF_QUEUE_CLEAR(&diff_queued_diff);
+
+ return filter;
+}
+
+int bloom_filter_contains(const struct bloom_filter *filter,
+ const struct bloom_key *key,
+ const struct bloom_filter_settings *settings)
+{
+ int i;
+ uint64_t mod = filter->len * BITS_PER_WORD;
+
+ if (!mod)
+ return -1;
+
+ for (i = 0; i < settings->num_hashes; i++) {
+ uint64_t hash_mod = key->hashes[i] % mod;
+ uint64_t block_pos = hash_mod / BITS_PER_WORD;
+ if (!(filter->data[block_pos] & get_bitmask(hash_mod)))
+ return 0;
+ }
+
+ return 1;
+} \ No newline at end of file
diff --git a/bloom.h b/bloom.h
new file mode 100644
index 0000000000..b935186425
--- /dev/null
+++ b/bloom.h
@@ -0,0 +1,90 @@
+#ifndef BLOOM_H
+#define BLOOM_H
+
+struct commit;
+struct repository;
+
+struct bloom_filter_settings {
+ /*
+ * The version of the hashing technique being used.
+ * We currently only support version = 1 which is
+ * the seeded murmur3 hashing technique implemented
+ * in bloom.c.
+ */
+ uint32_t hash_version;
+
+ /*
+ * The number of times a path is hashed, i.e. the
+ * number of bit positions tht cumulatively
+ * determine whether a path is present in the
+ * Bloom filter.
+ */
+ uint32_t num_hashes;
+
+ /*
+ * The minimum number of bits per entry in the Bloom
+ * filter. If the filter contains 'n' entries, then
+ * filter size is the minimum number of 8-bit words
+ * that contain n*b bits.
+ */
+ uint32_t bits_per_entry;
+};
+
+#define DEFAULT_BLOOM_FILTER_SETTINGS { 1, 7, 10 }
+#define BITS_PER_WORD 8
+#define BLOOMDATA_CHUNK_HEADER_SIZE 3 * sizeof(uint32_t)
+
+/*
+ * A bloom_filter struct represents a data segment to
+ * use when testing hash values. The 'len' member
+ * dictates how many entries are stored in
+ * 'data'.
+ */
+struct bloom_filter {
+ unsigned char *data;
+ size_t len;
+};
+
+/*
+ * A bloom_key represents the k hash values for a
+ * given string. These can be precomputed and
+ * stored in a bloom_key for re-use when testing
+ * against a bloom_filter. The number of hashes is
+ * given by the Bloom filter settings and is the same
+ * for all Bloom filters and keys interacting with
+ * the loaded version of the commit graph file and
+ * the Bloom data chunks.
+ */
+struct bloom_key {
+ uint32_t *hashes;
+};
+
+/*
+ * Calculate the murmur3 32-bit hash value for the given data
+ * using the given seed.
+ * Produces a uniformly distributed hash value.
+ * Not considered to be cryptographically secure.
+ * Implemented as described in https://en.wikipedia.org/wiki/MurmurHash#Algorithm
+ */
+uint32_t murmur3_seeded(uint32_t seed, const char *data, size_t len);
+
+void fill_bloom_key(const char *data,
+ size_t len,
+ struct bloom_key *key,
+ const struct bloom_filter_settings *settings);
+
+void add_key_to_filter(const struct bloom_key *key,
+ struct bloom_filter *filter,
+ const struct bloom_filter_settings *settings);
+
+void init_bloom_filters(void);
+
+struct bloom_filter *get_bloom_filter(struct repository *r,
+ struct commit *c,
+ int compute_if_not_present);
+
+int bloom_filter_contains(const struct bloom_filter *filter,
+ const struct bloom_key *key,
+ const struct bloom_filter_settings *settings);
+
+#endif \ No newline at end of file
diff --git a/branch.c b/branch.c
index 579494738a..2d9e7675a6 100644
--- a/branch.c
+++ b/branch.c
@@ -344,6 +344,7 @@ void remove_merge_branch_state(struct repository *r)
unlink(git_path_merge_rr(r));
unlink(git_path_merge_msg(r));
unlink(git_path_merge_mode(r));
+ save_autostash(git_path_merge_autostash(r));
}
void remove_branch_state(struct repository *r, int verbose)
diff --git a/builtin.h b/builtin.h
index 2b25a80cde..a5ae15bfe5 100644
--- a/builtin.h
+++ b/builtin.h
@@ -94,25 +94,9 @@
* command.
*/
-#define DEFAULT_MERGE_LOG_LEN 20
-
extern const char git_usage_string[];
extern const char git_more_info_string[];
-#define PRUNE_PACKED_DRY_RUN 01
-#define PRUNE_PACKED_VERBOSE 02
-
-void prune_packed_objects(int);
-
-struct fmt_merge_msg_opts {
- unsigned add_title:1,
- credit_people:1;
- int shortlog_len;
-};
-
-int fmt_merge_msg(struct strbuf *in, struct strbuf *out,
- struct fmt_merge_msg_opts *);
-
/**
* If a built-in has DELAY_PAGER_CONFIG set, the built-in should call this early
* when it wishes to respect the `pager.foo`-config. The `cmd` is the name of
diff --git a/builtin/blame.c b/builtin/blame.c
index bf1cecdf3f..3c13634f27 100644
--- a/builtin/blame.c
+++ b/builtin/blame.c
@@ -1061,6 +1061,14 @@ parse_done:
string_list_clear(&ignore_revs_file_list, 0);
string_list_clear(&ignore_rev_list, 0);
setup_scoreboard(&sb, path, &o);
+
+ /*
+ * Changed-path Bloom filters are disabled when looking
+ * for copies.
+ */
+ if (!(opt & PICKAXE_BLAME_COPY))
+ setup_blame_bloom_data(&sb, path);
+
lno = sb.num_lines;
if (lno && !range_list.nr)
@@ -1164,5 +1172,7 @@ parse_done:
printf("num get patch: %d\n", sb.num_get_patch);
printf("num commits: %d\n", sb.num_commits);
}
+
+ cleanup_scoreboard(&sb);
return 0;
}
diff --git a/builtin/clean.c b/builtin/clean.c
index c8c011d2dd..f14c21b863 100644
--- a/builtin/clean.c
+++ b/builtin/clean.c
@@ -983,12 +983,6 @@ int cmd_clean(int argc, const char **argv, const char *prefix)
if (!cache_name_is_other(ent->name, ent->len))
continue;
- if (pathspec.nr)
- matches = dir_path_match(&the_index, ent, &pathspec, 0, NULL);
-
- if (pathspec.nr && !matches)
- continue;
-
if (lstat(ent->name, &st))
die_errno("Cannot lstat '%s'", ent->name);
diff --git a/builtin/clone.c b/builtin/clone.c
index a4f836d1ba..cb48a291ca 100644
--- a/builtin/clone.c
+++ b/builtin/clone.c
@@ -102,10 +102,10 @@ static struct option builtin_clone_options[] = {
N_("don't use local hardlinks, always copy")),
OPT_BOOL('s', "shared", &option_shared,
N_("setup as shared repository")),
- OPT_ALIAS(0, "recursive", "recurse-submodules"),
{ OPTION_CALLBACK, 0, "recurse-submodules", &option_recurse_submodules,
N_("pathspec"), N_("initialize submodules in the clone"),
PARSE_OPT_OPTARG, recurse_submodules_cb, (intptr_t)"." },
+ OPT_ALIAS(0, "recursive", "recurse-submodules"),
OPT_INTEGER('j', "jobs", &max_jobs,
N_("number of submodules cloned in parallel")),
OPT_STRING(0, "template", &option_template, N_("template-directory"),
diff --git a/builtin/commit-graph.c b/builtin/commit-graph.c
index d1ab6625f6..15fe60317c 100644
--- a/builtin/commit-graph.c
+++ b/builtin/commit-graph.c
@@ -9,7 +9,9 @@
static char const * const builtin_commit_graph_usage[] = {
N_("git commit-graph verify [--object-dir <objdir>] [--shallow] [--[no-]progress]"),
- N_("git commit-graph write [--object-dir <objdir>] [--append|--split] [--reachable|--stdin-packs|--stdin-commits] [--[no-]progress] <split options>"),
+ N_("git commit-graph write [--object-dir <objdir>] [--append] "
+ "[--split[=<strategy>]] [--reachable|--stdin-packs|--stdin-commits] "
+ "[--changed-paths] [--[no-]progress] <split options>"),
NULL
};
@@ -19,7 +21,9 @@ static const char * const builtin_commit_graph_verify_usage[] = {
};
static const char * const builtin_commit_graph_write_usage[] = {
- N_("git commit-graph write [--object-dir <objdir>] [--append|--split] [--reachable|--stdin-packs|--stdin-commits] [--[no-]progress] <split options>"),
+ N_("git commit-graph write [--object-dir <objdir>] [--append] "
+ "[--split[=<strategy>]] [--reachable|--stdin-packs|--stdin-commits] "
+ "[--changed-paths] [--[no-]progress] <split options>"),
NULL
};
@@ -32,6 +36,7 @@ static struct opts_commit_graph {
int split;
int shallow;
int progress;
+ int enable_changed_paths;
} opts;
static struct object_directory *find_odb(struct repository *r,
@@ -114,10 +119,29 @@ static int graph_verify(int argc, const char **argv)
extern int read_replace_refs;
static struct split_commit_graph_opts split_opts;
+static int write_option_parse_split(const struct option *opt, const char *arg,
+ int unset)
+{
+ enum commit_graph_split_flags *flags = opt->value;
+
+ opts.split = 1;
+ if (!arg)
+ return 0;
+
+ if (!strcmp(arg, "no-merge"))
+ *flags = COMMIT_GRAPH_SPLIT_MERGE_PROHIBITED;
+ else if (!strcmp(arg, "replace"))
+ *flags = COMMIT_GRAPH_SPLIT_REPLACE;
+ else
+ die(_("unrecognized --split argument, %s"), arg);
+
+ return 0;
+}
+
static int graph_write(int argc, const char **argv)
{
struct string_list *pack_indexes = NULL;
- struct string_list *commit_hex = NULL;
+ struct oidset commits = OIDSET_INIT;
struct object_directory *odb = NULL;
struct string_list lines;
int result = 0;
@@ -135,15 +159,19 @@ static int graph_write(int argc, const char **argv)
N_("start walk at commits listed by stdin")),
OPT_BOOL(0, "append", &opts.append,
N_("include all commits already in the commit-graph file")),
+ OPT_BOOL(0, "changed-paths", &opts.enable_changed_paths,
+ N_("enable computation for changed paths")),
OPT_BOOL(0, "progress", &opts.progress, N_("force progress reporting")),
- OPT_BOOL(0, "split", &opts.split,
- N_("allow writing an incremental commit-graph file")),
+ OPT_CALLBACK_F(0, "split", &split_opts.flags, NULL,
+ N_("allow writing an incremental commit-graph file"),
+ PARSE_OPT_OPTARG | PARSE_OPT_NONEG,
+ write_option_parse_split),
OPT_INTEGER(0, "max-commits", &split_opts.max_commits,
N_("maximum number of commits in a non-base split commit-graph")),
OPT_INTEGER(0, "size-multiple", &split_opts.size_multiple,
N_("maximum ratio between two levels of a split commit-graph")),
OPT_EXPIRY_DATE(0, "expire-time", &split_opts.expire_time,
- N_("maximum number of commits in a non-base split commit-graph")),
+ N_("only expire files older than a given date-time")),
OPT_END(),
};
@@ -168,6 +196,9 @@ static int graph_write(int argc, const char **argv)
flags |= COMMIT_GRAPH_WRITE_SPLIT;
if (opts.progress)
flags |= COMMIT_GRAPH_WRITE_PROGRESS;
+ if (opts.enable_changed_paths ||
+ git_env_bool(GIT_TEST_COMMIT_GRAPH_CHANGED_PATHS, 0))
+ flags |= COMMIT_GRAPH_WRITE_BLOOM_FILTERS;
read_replace_refs = 0;
odb = find_odb(the_repository, opts.obj_dir);
@@ -188,7 +219,20 @@ static int graph_write(int argc, const char **argv)
if (opts.stdin_packs)
pack_indexes = &lines;
if (opts.stdin_commits) {
- commit_hex = &lines;
+ struct string_list_item *item;
+ oidset_init(&commits, lines.nr);
+ for_each_string_list_item(item, &lines) {
+ struct object_id oid;
+ const char *end;
+
+ if (parse_oid_hex(item->string, &oid, &end)) {
+ error(_("unexpected non-hex object ID: "
+ "%s"), item->string);
+ return 1;
+ }
+
+ oidset_insert(&commits, &oid);
+ }
flags |= COMMIT_GRAPH_WRITE_CHECK_OIDS;
}
@@ -197,7 +241,7 @@ static int graph_write(int argc, const char **argv)
if (write_commit_graph(odb,
pack_indexes,
- commit_hex,
+ opts.stdin_commits ? &commits : NULL,
flags,
&split_opts))
result = 1;
diff --git a/builtin/commit.c b/builtin/commit.c
index d3e7781e65..4743ea5a4c 100644
--- a/builtin/commit.c
+++ b/builtin/commit.c
@@ -1700,9 +1700,7 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
"new_index file. Check that disk is not full and quota is\n"
"not exceeded, and then \"git restore --staged :/\" to recover."));
- if (git_env_bool(GIT_TEST_COMMIT_GRAPH, 0) &&
- write_commit_graph_reachable(the_repository->objects->odb, 0, NULL))
- return 1;
+ git_test_write_commit_graph_or_die();
repo_rerere(the_repository, 0);
run_command_v_opt(argv_gc_auto, RUN_GIT_CMD);
@@ -1721,6 +1719,8 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
&oid, flags);
}
+ apply_autostash(git_path_merge_autostash(the_repository));
+
UNLEAK(err);
UNLEAK(sb);
return 0;
diff --git a/builtin/diff-tree.c b/builtin/diff-tree.c
index cb9ea79367..802363d0a2 100644
--- a/builtin/diff-tree.c
+++ b/builtin/diff-tree.c
@@ -109,6 +109,7 @@ int cmd_diff_tree(int argc, const char **argv, const char *prefix)
struct object *tree1, *tree2;
static struct rev_info *opt = &log_tree_opt;
struct setup_revision_opt s_r_opt;
+ struct userformat_want w;
int read_stdin = 0;
if (argc == 2 && !strcmp(argv[1], "-h"))
@@ -127,6 +128,14 @@ int cmd_diff_tree(int argc, const char **argv, const char *prefix)
precompose_argv(argc, argv);
argc = setup_revisions(argc, argv, opt, &s_r_opt);
+ memset(&w, 0, sizeof(w));
+ userformat_find_requirements(NULL, &w);
+
+ if (!opt->show_notes_given && w.notes)
+ opt->show_notes = 1;
+ if (opt->show_notes)
+ load_display_notes(&opt->notes_opt);
+
while (--argc > 0) {
const char *arg = *++argv;
diff --git a/builtin/fmt-merge-msg.c b/builtin/fmt-merge-msg.c
index 172dfbd852..48a8699de7 100644
--- a/builtin/fmt-merge-msg.c
+++ b/builtin/fmt-merge-msg.c
@@ -1,669 +1,13 @@
#include "builtin.h"
-#include "cache.h"
#include "config.h"
-#include "refs.h"
-#include "object-store.h"
-#include "commit.h"
-#include "diff.h"
-#include "revision.h"
-#include "tag.h"
-#include "string-list.h"
-#include "branch.h"
#include "fmt-merge-msg.h"
-#include "gpg-interface.h"
-#include "repository.h"
-#include "commit-reach.h"
+#include "parse-options.h"
static const char * const fmt_merge_msg_usage[] = {
N_("git fmt-merge-msg [-m <message>] [--log[=<n>] | --no-log] [--file <file>]"),
NULL
};
-static int use_branch_desc;
-
-int fmt_merge_msg_config(const char *key, const char *value, void *cb)
-{
- if (!strcmp(key, "merge.log") || !strcmp(key, "merge.summary")) {
- int is_bool;
- merge_log_config = git_config_bool_or_int(key, value, &is_bool);
- if (!is_bool && merge_log_config < 0)
- return error("%s: negative length %s", key, value);
- if (is_bool && merge_log_config)
- merge_log_config = DEFAULT_MERGE_LOG_LEN;
- } else if (!strcmp(key, "merge.branchdesc")) {
- use_branch_desc = git_config_bool(key, value);
- } else {
- return git_default_config(key, value, cb);
- }
- return 0;
-}
-
-/* merge data per repository where the merged tips came from */
-struct src_data {
- struct string_list branch, tag, r_branch, generic;
- int head_status;
-};
-
-struct origin_data {
- struct object_id oid;
- unsigned is_local_branch:1;
-};
-
-static void init_src_data(struct src_data *data)
-{
- data->branch.strdup_strings = 1;
- data->tag.strdup_strings = 1;
- data->r_branch.strdup_strings = 1;
- data->generic.strdup_strings = 1;
-}
-
-static struct string_list srcs = STRING_LIST_INIT_DUP;
-static struct string_list origins = STRING_LIST_INIT_DUP;
-
-struct merge_parents {
- int alloc, nr;
- struct merge_parent {
- struct object_id given;
- struct object_id commit;
- unsigned char used;
- } *item;
-};
-
-/*
- * I know, I know, this is inefficient, but you won't be pulling and merging
- * hundreds of heads at a time anyway.
- */
-static struct merge_parent *find_merge_parent(struct merge_parents *table,
- struct object_id *given,
- struct object_id *commit)
-{
- int i;
- for (i = 0; i < table->nr; i++) {
- if (given && !oideq(&table->item[i].given, given))
- continue;
- if (commit && !oideq(&table->item[i].commit, commit))
- continue;
- return &table->item[i];
- }
- return NULL;
-}
-
-static void add_merge_parent(struct merge_parents *table,
- struct object_id *given,
- struct object_id *commit)
-{
- if (table->nr && find_merge_parent(table, given, commit))
- return;
- ALLOC_GROW(table->item, table->nr + 1, table->alloc);
- oidcpy(&table->item[table->nr].given, given);
- oidcpy(&table->item[table->nr].commit, commit);
- table->item[table->nr].used = 0;
- table->nr++;
-}
-
-static int handle_line(char *line, struct merge_parents *merge_parents)
-{
- int i, len = strlen(line);
- struct origin_data *origin_data;
- char *src;
- const char *origin, *tag_name;
- struct src_data *src_data;
- struct string_list_item *item;
- int pulling_head = 0;
- struct object_id oid;
- const unsigned hexsz = the_hash_algo->hexsz;
-
- if (len < hexsz + 3 || line[hexsz] != '\t')
- return 1;
-
- if (starts_with(line + hexsz + 1, "not-for-merge"))
- return 0;
-
- if (line[hexsz + 1] != '\t')
- return 2;
-
- i = get_oid_hex(line, &oid);
- if (i)
- return 3;
-
- if (!find_merge_parent(merge_parents, &oid, NULL))
- return 0; /* subsumed by other parents */
-
- origin_data = xcalloc(1, sizeof(struct origin_data));
- oidcpy(&origin_data->oid, &oid);
-
- if (line[len - 1] == '\n')
- line[len - 1] = 0;
- line += hexsz + 2;
-
- /*
- * At this point, line points at the beginning of comment e.g.
- * "branch 'frotz' of git://that/repository.git".
- * Find the repository name and point it with src.
- */
- src = strstr(line, " of ");
- if (src) {
- *src = 0;
- src += 4;
- pulling_head = 0;
- } else {
- src = line;
- pulling_head = 1;
- }
-
- item = unsorted_string_list_lookup(&srcs, src);
- if (!item) {
- item = string_list_append(&srcs, src);
- item->util = xcalloc(1, sizeof(struct src_data));
- init_src_data(item->util);
- }
- src_data = item->util;
-
- if (pulling_head) {
- origin = src;
- src_data->head_status |= 1;
- } else if (skip_prefix(line, "branch ", &origin)) {
- origin_data->is_local_branch = 1;
- string_list_append(&src_data->branch, origin);
- src_data->head_status |= 2;
- } else if (skip_prefix(line, "tag ", &tag_name)) {
- origin = line;
- string_list_append(&src_data->tag, tag_name);
- src_data->head_status |= 2;
- } else if (skip_prefix(line, "remote-tracking branch ", &origin)) {
- string_list_append(&src_data->r_branch, origin);
- src_data->head_status |= 2;
- } else {
- origin = src;
- string_list_append(&src_data->generic, line);
- src_data->head_status |= 2;
- }
-
- if (!strcmp(".", src) || !strcmp(src, origin)) {
- int len = strlen(origin);
- if (origin[0] == '\'' && origin[len - 1] == '\'')
- origin = xmemdupz(origin + 1, len - 2);
- } else
- origin = xstrfmt("%s of %s", origin, src);
- if (strcmp(".", src))
- origin_data->is_local_branch = 0;
- string_list_append(&origins, origin)->util = origin_data;
- return 0;
-}
-
-static void print_joined(const char *singular, const char *plural,
- struct string_list *list, struct strbuf *out)
-{
- if (list->nr == 0)
- return;
- if (list->nr == 1) {
- strbuf_addf(out, "%s%s", singular, list->items[0].string);
- } else {
- int i;
- strbuf_addstr(out, plural);
- for (i = 0; i < list->nr - 1; i++)
- strbuf_addf(out, "%s%s", i > 0 ? ", " : "",
- list->items[i].string);
- strbuf_addf(out, " and %s", list->items[list->nr - 1].string);
- }
-}
-
-static void add_branch_desc(struct strbuf *out, const char *name)
-{
- struct strbuf desc = STRBUF_INIT;
-
- if (!read_branch_desc(&desc, name)) {
- const char *bp = desc.buf;
- while (*bp) {
- const char *ep = strchrnul(bp, '\n');
- if (*ep)
- ep++;
- strbuf_addf(out, " : %.*s", (int)(ep - bp), bp);
- bp = ep;
- }
- strbuf_complete_line(out);
- }
- strbuf_release(&desc);
-}
-
-#define util_as_integral(elem) ((intptr_t)((elem)->util))
-
-static void record_person_from_buf(int which, struct string_list *people,
- const char *buffer)
-{
- char *name_buf, *name, *name_end;
- struct string_list_item *elem;
- const char *field;
-
- field = (which == 'a') ? "\nauthor " : "\ncommitter ";
- name = strstr(buffer, field);
- if (!name)
- return;
- name += strlen(field);
- name_end = strchrnul(name, '<');
- if (*name_end)
- name_end--;
- while (isspace(*name_end) && name <= name_end)
- name_end--;
- if (name_end < name)
- return;
- name_buf = xmemdupz(name, name_end - name + 1);
-
- elem = string_list_lookup(people, name_buf);
- if (!elem) {
- elem = string_list_insert(people, name_buf);
- elem->util = (void *)0;
- }
- elem->util = (void*)(util_as_integral(elem) + 1);
- free(name_buf);
-}
-
-
-static void record_person(int which, struct string_list *people,
- struct commit *commit)
-{
- const char *buffer = get_commit_buffer(commit, NULL);
- record_person_from_buf(which, people, buffer);
- unuse_commit_buffer(commit, buffer);
-}
-
-static int cmp_string_list_util_as_integral(const void *a_, const void *b_)
-{
- const struct string_list_item *a = a_, *b = b_;
- return util_as_integral(b) - util_as_integral(a);
-}
-
-static void add_people_count(struct strbuf *out, struct string_list *people)
-{
- if (people->nr == 1)
- strbuf_addstr(out, people->items[0].string);
- else if (people->nr == 2)
- strbuf_addf(out, "%s (%d) and %s (%d)",
- people->items[0].string,
- (int)util_as_integral(&people->items[0]),
- people->items[1].string,
- (int)util_as_integral(&people->items[1]));
- else if (people->nr)
- strbuf_addf(out, "%s (%d) and others",
- people->items[0].string,
- (int)util_as_integral(&people->items[0]));
-}
-
-static void credit_people(struct strbuf *out,
- struct string_list *them,
- int kind)
-{
- const char *label;
- const char *me;
-
- if (kind == 'a') {
- label = "By";
- me = git_author_info(IDENT_NO_DATE);
- } else {
- label = "Via";
- me = git_committer_info(IDENT_NO_DATE);
- }
-
- if (!them->nr ||
- (them->nr == 1 &&
- me &&
- skip_prefix(me, them->items->string, &me) &&
- starts_with(me, " <")))
- return;
- strbuf_addf(out, "\n%c %s ", comment_line_char, label);
- add_people_count(out, them);
-}
-
-static void add_people_info(struct strbuf *out,
- struct string_list *authors,
- struct string_list *committers)
-{
- QSORT(authors->items, authors->nr,
- cmp_string_list_util_as_integral);
- QSORT(committers->items, committers->nr,
- cmp_string_list_util_as_integral);
-
- credit_people(out, authors, 'a');
- credit_people(out, committers, 'c');
-}
-
-static void shortlog(const char *name,
- struct origin_data *origin_data,
- struct commit *head,
- struct rev_info *rev,
- struct fmt_merge_msg_opts *opts,
- struct strbuf *out)
-{
- int i, count = 0;
- struct commit *commit;
- struct object *branch;
- struct string_list subjects = STRING_LIST_INIT_DUP;
- struct string_list authors = STRING_LIST_INIT_DUP;
- struct string_list committers = STRING_LIST_INIT_DUP;
- int flags = UNINTERESTING | TREESAME | SEEN | SHOWN | ADDED;
- struct strbuf sb = STRBUF_INIT;
- const struct object_id *oid = &origin_data->oid;
- int limit = opts->shortlog_len;
-
- branch = deref_tag(the_repository, parse_object(the_repository, oid),
- oid_to_hex(oid),
- the_hash_algo->hexsz);
- if (!branch || branch->type != OBJ_COMMIT)
- return;
-
- setup_revisions(0, NULL, rev, NULL);
- add_pending_object(rev, branch, name);
- add_pending_object(rev, &head->object, "^HEAD");
- head->object.flags |= UNINTERESTING;
- if (prepare_revision_walk(rev))
- die("revision walk setup failed");
- while ((commit = get_revision(rev)) != NULL) {
- struct pretty_print_context ctx = {0};
-
- if (commit->parents && commit->parents->next) {
- /* do not list a merge but count committer */
- if (opts->credit_people)
- record_person('c', &committers, commit);
- continue;
- }
- if (!count && opts->credit_people)
- /* the 'tip' committer */
- record_person('c', &committers, commit);
- if (opts->credit_people)
- record_person('a', &authors, commit);
- count++;
- if (subjects.nr > limit)
- continue;
-
- format_commit_message(commit, "%s", &sb, &ctx);
- strbuf_ltrim(&sb);
-
- if (!sb.len)
- string_list_append(&subjects,
- oid_to_hex(&commit->object.oid));
- else
- string_list_append_nodup(&subjects,
- strbuf_detach(&sb, NULL));
- }
-
- if (opts->credit_people)
- add_people_info(out, &authors, &committers);
- if (count > limit)
- strbuf_addf(out, "\n* %s: (%d commits)\n", name, count);
- else
- strbuf_addf(out, "\n* %s:\n", name);
-
- if (origin_data->is_local_branch && use_branch_desc)
- add_branch_desc(out, name);
-
- for (i = 0; i < subjects.nr; i++)
- if (i >= limit)
- strbuf_addstr(out, " ...\n");
- else
- strbuf_addf(out, " %s\n", subjects.items[i].string);
-
- clear_commit_marks((struct commit *)branch, flags);
- clear_commit_marks(head, flags);
- free_commit_list(rev->commits);
- rev->commits = NULL;
- rev->pending.nr = 0;
-
- string_list_clear(&authors, 0);
- string_list_clear(&committers, 0);
- string_list_clear(&subjects, 0);
-}
-
-static void fmt_merge_msg_title(struct strbuf *out,
- const char *current_branch)
-{
- int i = 0;
- char *sep = "";
-
- strbuf_addstr(out, "Merge ");
- for (i = 0; i < srcs.nr; i++) {
- struct src_data *src_data = srcs.items[i].util;
- const char *subsep = "";
-
- strbuf_addstr(out, sep);
- sep = "; ";
-
- if (src_data->head_status == 1) {
- strbuf_addstr(out, srcs.items[i].string);
- continue;
- }
- if (src_data->head_status == 3) {
- subsep = ", ";
- strbuf_addstr(out, "HEAD");
- }
- if (src_data->branch.nr) {
- strbuf_addstr(out, subsep);
- subsep = ", ";
- print_joined("branch ", "branches ", &src_data->branch,
- out);
- }
- if (src_data->r_branch.nr) {
- strbuf_addstr(out, subsep);
- subsep = ", ";
- print_joined("remote-tracking branch ", "remote-tracking branches ",
- &src_data->r_branch, out);
- }
- if (src_data->tag.nr) {
- strbuf_addstr(out, subsep);
- subsep = ", ";
- print_joined("tag ", "tags ", &src_data->tag, out);
- }
- if (src_data->generic.nr) {
- strbuf_addstr(out, subsep);
- print_joined("commit ", "commits ", &src_data->generic,
- out);
- }
- if (strcmp(".", srcs.items[i].string))
- strbuf_addf(out, " of %s", srcs.items[i].string);
- }
-
- if (!strcmp("master", current_branch))
- strbuf_addch(out, '\n');
- else
- strbuf_addf(out, " into %s\n", current_branch);
-}
-
-static void fmt_tag_signature(struct strbuf *tagbuf,
- struct strbuf *sig,
- const char *buf,
- unsigned long len)
-{
- const char *tag_body = strstr(buf, "\n\n");
- if (tag_body) {
- tag_body += 2;
- strbuf_add(tagbuf, tag_body, buf + len - tag_body);
- }
- strbuf_complete_line(tagbuf);
- if (sig->len) {
- strbuf_addch(tagbuf, '\n');
- strbuf_add_commented_lines(tagbuf, sig->buf, sig->len);
- }
-}
-
-static void fmt_merge_msg_sigs(struct strbuf *out)
-{
- int i, tag_number = 0, first_tag = 0;
- struct strbuf tagbuf = STRBUF_INIT;
-
- for (i = 0; i < origins.nr; i++) {
- struct object_id *oid = origins.items[i].util;
- enum object_type type;
- unsigned long size, len;
- char *buf = read_object_file(oid, &type, &size);
- struct signature_check sigc = { 0 };
- struct strbuf sig = STRBUF_INIT;
-
- if (!buf || type != OBJ_TAG)
- goto next;
- len = parse_signature(buf, size);
-
- if (size == len)
- ; /* merely annotated */
- else if (check_signature(buf, len, buf + len, size - len, &sigc) &&
- !sigc.gpg_output)
- strbuf_addstr(&sig, "gpg verification failed.\n");
- else
- strbuf_addstr(&sig, sigc.gpg_output);
- signature_check_clear(&sigc);
-
- if (!tag_number++) {
- fmt_tag_signature(&tagbuf, &sig, buf, len);
- first_tag = i;
- } else {
- if (tag_number == 2) {
- struct strbuf tagline = STRBUF_INIT;
- strbuf_addch(&tagline, '\n');
- strbuf_add_commented_lines(&tagline,
- origins.items[first_tag].string,
- strlen(origins.items[first_tag].string));
- strbuf_insert(&tagbuf, 0, tagline.buf,
- tagline.len);
- strbuf_release(&tagline);
- }
- strbuf_addch(&tagbuf, '\n');
- strbuf_add_commented_lines(&tagbuf,
- origins.items[i].string,
- strlen(origins.items[i].string));
- fmt_tag_signature(&tagbuf, &sig, buf, len);
- }
- strbuf_release(&sig);
- next:
- free(buf);
- }
- if (tagbuf.len) {
- strbuf_addch(out, '\n');
- strbuf_addbuf(out, &tagbuf);
- }
- strbuf_release(&tagbuf);
-}
-
-static void find_merge_parents(struct merge_parents *result,
- struct strbuf *in, struct object_id *head)
-{
- struct commit_list *parents;
- struct commit *head_commit;
- int pos = 0, i, j;
-
- parents = NULL;
- while (pos < in->len) {
- int len;
- char *p = in->buf + pos;
- char *newline = strchr(p, '\n');
- const char *q;
- struct object_id oid;
- struct commit *parent;
- struct object *obj;
-
- len = newline ? newline - p : strlen(p);
- pos += len + !!newline;
-
- if (parse_oid_hex(p, &oid, &q) ||
- q[0] != '\t' ||
- q[1] != '\t')
- continue; /* skip not-for-merge */
- /*
- * Do not use get_merge_parent() here; we do not have
- * "name" here and we do not want to contaminate its
- * util field yet.
- */
- obj = parse_object(the_repository, &oid);
- parent = (struct commit *)peel_to_type(NULL, 0, obj, OBJ_COMMIT);
- if (!parent)
- continue;
- commit_list_insert(parent, &parents);
- add_merge_parent(result, &obj->oid, &parent->object.oid);
- }
- head_commit = lookup_commit(the_repository, head);
- if (head_commit)
- commit_list_insert(head_commit, &parents);
- reduce_heads_replace(&parents);
-
- while (parents) {
- struct commit *cmit = pop_commit(&parents);
- for (i = 0; i < result->nr; i++)
- if (oideq(&result->item[i].commit, &cmit->object.oid))
- result->item[i].used = 1;
- }
-
- for (i = j = 0; i < result->nr; i++) {
- if (result->item[i].used) {
- if (i != j)
- result->item[j] = result->item[i];
- j++;
- }
- }
- result->nr = j;
-}
-
-int fmt_merge_msg(struct strbuf *in, struct strbuf *out,
- struct fmt_merge_msg_opts *opts)
-{
- int i = 0, pos = 0;
- struct object_id head_oid;
- const char *current_branch;
- void *current_branch_to_free;
- struct merge_parents merge_parents;
-
- memset(&merge_parents, 0, sizeof(merge_parents));
-
- /* get current branch */
- current_branch = current_branch_to_free =
- resolve_refdup("HEAD", RESOLVE_REF_READING, &head_oid, NULL);
- if (!current_branch)
- die("No current branch");
- if (starts_with(current_branch, "refs/heads/"))
- current_branch += 11;
-
- find_merge_parents(&merge_parents, in, &head_oid);
-
- /* get a line */
- while (pos < in->len) {
- int len;
- char *newline, *p = in->buf + pos;
-
- newline = strchr(p, '\n');
- len = newline ? newline - p : strlen(p);
- pos += len + !!newline;
- i++;
- p[len] = 0;
- if (handle_line(p, &merge_parents))
- die("error in line %d: %.*s", i, len, p);
- }
-
- if (opts->add_title && srcs.nr)
- fmt_merge_msg_title(out, current_branch);
-
- if (origins.nr)
- fmt_merge_msg_sigs(out);
-
- if (opts->shortlog_len) {
- struct commit *head;
- struct rev_info rev;
-
- head = lookup_commit_or_die(&head_oid, "HEAD");
- repo_init_revisions(the_repository, &rev, NULL);
- rev.commit_format = CMIT_FMT_ONELINE;
- rev.ignore_merges = 1;
- rev.limited = 1;
-
- strbuf_complete_line(out);
-
- for (i = 0; i < origins.nr; i++)
- shortlog(origins.items[i].string,
- origins.items[i].util,
- head, &rev, opts, out);
- }
-
- strbuf_complete_line(out);
- free(current_branch_to_free);
- free(merge_parents.item);
- return 0;
-}
-
int cmd_fmt_merge_msg(int argc, const char **argv, const char *prefix)
{
const char *inpath = NULL;
diff --git a/builtin/grep.c b/builtin/grep.c
index 99e2685090..5e150f5825 100644
--- a/builtin/grep.c
+++ b/builtin/grep.c
@@ -295,6 +295,38 @@ static int grep_cmd_config(const char *var, const char *value, void *cb)
return st;
}
+static void grep_source_name(struct grep_opt *opt, const char *filename,
+ int tree_name_len, struct strbuf *out)
+{
+ strbuf_reset(out);
+
+ if (opt->null_following_name) {
+ if (opt->relative && opt->prefix_length) {
+ struct strbuf rel_buf = STRBUF_INIT;
+ const char *rel_name =
+ relative_path(filename + tree_name_len,
+ opt->prefix, &rel_buf);
+
+ if (tree_name_len)
+ strbuf_add(out, filename, tree_name_len);
+
+ strbuf_addstr(out, rel_name);
+ strbuf_release(&rel_buf);
+ } else {
+ strbuf_addstr(out, filename);
+ }
+ return;
+ }
+
+ if (opt->relative && opt->prefix_length)
+ quote_path_relative(filename + tree_name_len, opt->prefix, out);
+ else
+ quote_c_style(filename + tree_name_len, out, NULL, 0);
+
+ if (tree_name_len)
+ strbuf_insert(out, 0, filename, tree_name_len);
+}
+
static int grep_oid(struct grep_opt *opt, const struct object_id *oid,
const char *filename, int tree_name_len,
const char *path)
@@ -302,13 +334,7 @@ static int grep_oid(struct grep_opt *opt, const struct object_id *oid,
struct strbuf pathbuf = STRBUF_INIT;
struct grep_source gs;
- if (opt->relative && opt->prefix_length) {
- quote_path_relative(filename + tree_name_len, opt->prefix, &pathbuf);
- strbuf_insert(&pathbuf, 0, filename, tree_name_len);
- } else {
- strbuf_addstr(&pathbuf, filename);
- }
-
+ grep_source_name(opt, filename, tree_name_len, &pathbuf);
grep_source_init(&gs, GREP_SOURCE_OID, pathbuf.buf, path, oid);
strbuf_release(&pathbuf);
@@ -334,11 +360,7 @@ static int grep_file(struct grep_opt *opt, const char *filename)
struct strbuf buf = STRBUF_INIT;
struct grep_source gs;
- if (opt->relative && opt->prefix_length)
- quote_path_relative(filename, opt->prefix, &buf);
- else
- strbuf_addstr(&buf, filename);
-
+ grep_source_name(opt, filename, 0, &buf);
grep_source_init(&gs, GREP_SOURCE_FILE, buf.buf, filename, filename);
strbuf_release(&buf);
@@ -679,8 +701,6 @@ static int grep_directory(struct grep_opt *opt, const struct pathspec *pathspec,
fill_directory(&dir, opt->repo->index, pathspec);
for (i = 0; i < dir.nr; i++) {
- if (!dir_path_match(opt->repo->index, dir.entries[i], pathspec, 0, NULL))
- continue;
hit |= grep_file(opt, dir.entries[i]->name);
if (hit && opt->status_only)
break;
diff --git a/builtin/help.c b/builtin/help.c
index e5590d7787..c024110531 100644
--- a/builtin/help.c
+++ b/builtin/help.c
@@ -242,7 +242,7 @@ static int add_man_viewer_cmd(const char *name,
static int add_man_viewer_info(const char *var, const char *value)
{
const char *name, *subkey;
- int namelen;
+ size_t namelen;
if (parse_config_key(var, "man", &name, &namelen, &subkey) < 0 || !name)
return 0;
diff --git a/builtin/index-pack.c b/builtin/index-pack.c
index d967d188a3..f176dd28c8 100644
--- a/builtin/index-pack.c
+++ b/builtin/index-pack.c
@@ -1368,9 +1368,8 @@ static void fix_unresolved_deltas(struct hashfile *f)
continue;
oid_array_append(&to_fetch, &d->oid);
}
- if (to_fetch.nr)
- promisor_remote_get_direct(the_repository,
- to_fetch.oid, to_fetch.nr);
+ promisor_remote_get_direct(the_repository,
+ to_fetch.oid, to_fetch.nr);
oid_array_clear(&to_fetch);
}
diff --git a/builtin/log.c b/builtin/log.c
index a5c3ace9a0..bef7403d5e 100644
--- a/builtin/log.c
+++ b/builtin/log.c
@@ -166,15 +166,18 @@ static void cmd_log_init_finish(int argc, const char **argv, const char *prefix,
int quiet = 0, source = 0, mailmap;
static struct line_opt_callback_data line_cb = {NULL, NULL, STRING_LIST_INIT_DUP};
static struct string_list decorate_refs_exclude = STRING_LIST_INIT_NODUP;
+ static struct string_list decorate_refs_exclude_config = STRING_LIST_INIT_NODUP;
static struct string_list decorate_refs_include = STRING_LIST_INIT_NODUP;
struct decoration_filter decoration_filter = {&decorate_refs_include,
- &decorate_refs_exclude};
+ &decorate_refs_exclude,
+ &decorate_refs_exclude_config};
static struct revision_sources revision_sources;
const struct option builtin_log_options[] = {
OPT__QUIET(&quiet, N_("suppress diff output")),
OPT_BOOL(0, "source", &source, N_("show source")),
OPT_BOOL(0, "use-mailmap", &mailmap, N_("Use mail map file")),
+ OPT_ALIAS(0, "mailmap", "use-mailmap"),
OPT_STRING_LIST(0, "decorate-refs", &decorate_refs_include,
N_("pattern"), N_("only decorate refs that match <pattern>")),
OPT_STRING_LIST(0, "decorate-refs-exclude", &decorate_refs_exclude,
@@ -238,7 +241,19 @@ static void cmd_log_init_finish(int argc, const char **argv, const char *prefix,
}
if (decoration_style) {
+ const struct string_list *config_exclude =
+ repo_config_get_value_multi(the_repository,
+ "log.excludeDecoration");
+
+ if (config_exclude) {
+ struct string_list_item *item;
+ for_each_string_list_item(item, config_exclude)
+ string_list_append(&decorate_refs_exclude_config,
+ item->string);
+ }
+
rev->show_decorations = 1;
+
load_ref_decorations(&decoration_filter, decoration_style);
}
diff --git a/builtin/ls-files.c b/builtin/ls-files.c
index f069a028ce..b87c22ac24 100644
--- a/builtin/ls-files.c
+++ b/builtin/ls-files.c
@@ -128,8 +128,9 @@ static void show_dir_entry(const struct index_state *istate,
if (len > ent->len)
die("git ls-files: internal error - directory entry not superset of prefix");
- if (!dir_path_match(istate, ent, &pathspec, len, ps_matched))
- return;
+ /* If ps_matches is non-NULL, figure out which pathspec(s) match. */
+ if (ps_matched)
+ dir_path_match(istate, ent, &pathspec, len, ps_matched);
fputs(tag, stdout);
write_eolinfo(istate, NULL, ent->name);
diff --git a/builtin/merge.c b/builtin/merge.c
index df83ba2a80..97066a5632 100644
--- a/builtin/merge.c
+++ b/builtin/merge.c
@@ -40,6 +40,7 @@
#include "branch.h"
#include "commit-reach.h"
#include "wt-status.h"
+#include "commit-graph.h"
#define DEFAULT_TWOHEAD (1<<0)
#define DEFAULT_OCTOPUS (1<<1)
@@ -82,6 +83,7 @@ static int show_progress = -1;
static int default_to_upstream = 1;
static int signoff;
static const char *sign_commit;
+static int autostash;
static int no_verify;
static struct strategy all_strategy[] = {
@@ -286,6 +288,7 @@ static struct option builtin_merge_options[] = {
OPT_SET_INT(0, "progress", &show_progress, N_("force progress reporting"), 1),
{ OPTION_STRING, 'S', "gpg-sign", &sign_commit, N_("key-id"),
N_("GPG sign commit"), PARSE_OPT_OPTARG, NULL, (intptr_t) "" },
+ OPT_AUTOSTASH(&autostash),
OPT_BOOL(0, "overwrite-ignore", &overwrite_ignore, N_("update ignored files (default)")),
OPT_BOOL(0, "signoff", &signoff, N_("add Signed-off-by:")),
OPT_BOOL(0, "no-verify", &no_verify, N_("bypass pre-merge-commit and commit-msg hooks")),
@@ -475,6 +478,7 @@ static void finish(struct commit *head_commit,
/* Run a post-merge hook */
run_hook_le(NULL, "post-merge", squash ? "1" : "0", NULL);
+ apply_autostash(git_path_merge_autostash(the_repository));
strbuf_release(&reflog_message);
}
@@ -636,6 +640,9 @@ static int git_merge_config(const char *k, const char *v, void *cb)
return 0;
} else if (!strcmp(k, "gpg.mintrustlevel")) {
check_trust_level = 0;
+ } else if (!strcmp(k, "merge.autostash")) {
+ autostash = git_config_bool(k, v);
+ return 0;
}
status = fmt_merge_msg_config(k, v, cb);
@@ -1283,6 +1290,7 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
if (abort_current_merge) {
int nargc = 2;
const char *nargv[] = {"reset", "--merge", NULL};
+ struct strbuf stash_oid = STRBUF_INIT;
if (orig_argc != 2)
usage_msg_opt(_("--abort expects no arguments"),
@@ -1291,8 +1299,17 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
if (!file_exists(git_path_merge_head(the_repository)))
die(_("There is no merge to abort (MERGE_HEAD missing)."));
+ if (read_oneliner(&stash_oid, git_path_merge_autostash(the_repository),
+ READ_ONELINER_SKIP_IF_EMPTY))
+ unlink(git_path_merge_autostash(the_repository));
+
/* Invoke 'git reset --merge' */
ret = cmd_reset(nargc, nargv, prefix);
+
+ if (stash_oid.len)
+ apply_autostash_oid(stash_oid.buf);
+
+ strbuf_release(&stash_oid);
goto done;
}
@@ -1515,6 +1532,10 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
goto done;
}
+ if (autostash)
+ create_autostash(the_repository,
+ git_path_merge_autostash(the_repository),
+ "merge");
if (checkout_fast_forward(the_repository,
&head_commit->object.oid,
&commit->object.oid,
@@ -1581,6 +1602,11 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
if (fast_forward == FF_ONLY)
die(_("Not possible to fast-forward, aborting."));
+ if (autostash)
+ create_autostash(the_repository,
+ git_path_merge_autostash(the_repository),
+ "merge");
+
/* We are going to make a new commit. */
git_committer_info(IDENT_STRICT);
@@ -1675,9 +1701,11 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
head_commit);
}
- if (squash)
+ if (squash) {
finish(head_commit, remoteheads, NULL, NULL);
- else
+
+ git_test_write_commit_graph_or_die();
+ } else
write_merge_state(remoteheads);
if (merge_was_ok)
diff --git a/builtin/prune-packed.c b/builtin/prune-packed.c
index 48c5e78e33..b7b9281a8c 100644
--- a/builtin/prune-packed.c
+++ b/builtin/prune-packed.c
@@ -1,54 +1,12 @@
#include "builtin.h"
-#include "cache.h"
-#include "progress.h"
#include "parse-options.h"
-#include "packfile.h"
-#include "object-store.h"
+#include "prune-packed.h"
static const char * const prune_packed_usage[] = {
N_("git prune-packed [-n | --dry-run] [-q | --quiet]"),
NULL
};
-static struct progress *progress;
-
-static int prune_subdir(unsigned int nr, const char *path, void *data)
-{
- int *opts = data;
- display_progress(progress, nr + 1);
- if (!(*opts & PRUNE_PACKED_DRY_RUN))
- rmdir(path);
- return 0;
-}
-
-static int prune_object(const struct object_id *oid, const char *path,
- void *data)
-{
- int *opts = data;
-
- if (!has_object_pack(oid))
- return 0;
-
- if (*opts & PRUNE_PACKED_DRY_RUN)
- printf("rm -f %s\n", path);
- else
- unlink_or_warn(path);
- return 0;
-}
-
-void prune_packed_objects(int opts)
-{
- if (opts & PRUNE_PACKED_VERBOSE)
- progress = start_delayed_progress(_("Removing duplicate objects"), 256);
-
- for_each_loose_file_in_objdir(get_object_directory(),
- prune_object, NULL, prune_subdir, &opts);
-
- /* Ensure we show 100% before finishing progress */
- display_progress(progress, 256);
- stop_progress(&progress);
-}
-
int cmd_prune_packed(int argc, const char **argv, const char *prefix)
{
int opts = isatty(2) ? PRUNE_PACKED_VERBOSE : 0;
diff --git a/builtin/prune.c b/builtin/prune.c
index 2b76872ad2..fd9acc7222 100644
--- a/builtin/prune.c
+++ b/builtin/prune.c
@@ -6,6 +6,7 @@
#include "reachable.h"
#include "parse-options.h"
#include "progress.h"
+#include "prune-packed.h"
#include "object-store.h"
static const char * const prune_usage[] = {
diff --git a/builtin/pull.c b/builtin/pull.c
index b5d51ea74f..f1fa6db74e 100644
--- a/builtin/pull.c
+++ b/builtin/pull.c
@@ -164,7 +164,7 @@ static struct option pull_options[] = {
N_("verify that the named commit has a valid GPG signature"),
PARSE_OPT_NOARG),
OPT_BOOL(0, "autostash", &opt_autostash,
- N_("automatically stash/stash pop before and after rebase")),
+ N_("automatically stash/stash pop before and after")),
OPT_PASSTHRU_ARGV('s', "strategy", &opt_strategies, N_("strategy"),
N_("merge strategy to use"),
0),
@@ -695,6 +695,10 @@ static int run_merge(void)
argv_array_pushv(&args, opt_strategy_opts.argv);
if (opt_gpg_sign)
argv_array_push(&args, opt_gpg_sign);
+ if (opt_autostash == 0)
+ argv_array_push(&args, "--no-autostash");
+ else if (opt_autostash == 1)
+ argv_array_push(&args, "--autostash");
if (opt_allow_unrelated_histories > 0)
argv_array_push(&args, "--allow-unrelated-histories");
@@ -942,9 +946,6 @@ int cmd_pull(int argc, const char **argv, const char *prefix)
if (get_oid("HEAD", &orig_head))
oidclr(&orig_head);
- if (!opt_rebase && opt_autostash != -1)
- die(_("--[no-]autostash option is only valid with --rebase."));
-
autostash = config_autostash;
if (opt_rebase) {
if (opt_autostash != -1)
diff --git a/builtin/rebase.c b/builtin/rebase.c
index c466923869..1c0a49c642 100644
--- a/builtin/rebase.c
+++ b/builtin/rebase.c
@@ -27,6 +27,9 @@
#include "branch.h"
#include "sequencer.h"
#include "rebase-interactive.h"
+#include "reset.h"
+
+#define DEFAULT_REFLOG_ACTION "rebase"
static char const * const builtin_rebase_usage[] = {
N_("git rebase [-i] [options] [--exec <cmd>] "
@@ -590,15 +593,6 @@ static const char *state_dir_path(const char *filename, struct rebase_options *o
return path.buf;
}
-/* Read one file, then strip line endings */
-static int read_one(const char *path, struct strbuf *buf)
-{
- if (strbuf_read_file(buf, path, 0) < 0)
- return error_errno(_("could not read '%s'"), path);
- strbuf_trim_trailing_newline(buf);
- return 0;
-}
-
/* Initialize the rebase options from the state directory. */
static int read_basic_state(struct rebase_options *opts)
{
@@ -606,8 +600,10 @@ static int read_basic_state(struct rebase_options *opts)
struct strbuf buf = STRBUF_INIT;
struct object_id oid;
- if (read_one(state_dir_path("head-name", opts), &head_name) ||
- read_one(state_dir_path("onto", opts), &buf))
+ if (!read_oneliner(&head_name, state_dir_path("head-name", opts),
+ READ_ONELINER_WARN_MISSING) ||
+ !read_oneliner(&buf, state_dir_path("onto", opts),
+ READ_ONELINER_WARN_MISSING))
return -1;
opts->head_name = starts_with(head_name.buf, "refs/") ?
xstrdup(head_name.buf) : NULL;
@@ -623,9 +619,11 @@ static int read_basic_state(struct rebase_options *opts)
*/
strbuf_reset(&buf);
if (file_exists(state_dir_path("orig-head", opts))) {
- if (read_one(state_dir_path("orig-head", opts), &buf))
+ if (!read_oneliner(&buf, state_dir_path("orig-head", opts),
+ READ_ONELINER_WARN_MISSING))
return -1;
- } else if (read_one(state_dir_path("head", opts), &buf))
+ } else if (!read_oneliner(&buf, state_dir_path("head", opts),
+ READ_ONELINER_WARN_MISSING))
return -1;
if (get_oid(buf.buf, &opts->orig_head))
return error(_("invalid orig-head: '%s'"), buf.buf);
@@ -645,8 +643,8 @@ static int read_basic_state(struct rebase_options *opts)
if (file_exists(state_dir_path("allow_rerere_autoupdate", opts))) {
strbuf_reset(&buf);
- if (read_one(state_dir_path("allow_rerere_autoupdate", opts),
- &buf))
+ if (!read_oneliner(&buf, state_dir_path("allow_rerere_autoupdate", opts),
+ READ_ONELINER_WARN_MISSING))
return -1;
if (!strcmp(buf.buf, "--rerere-autoupdate"))
opts->allow_rerere_autoupdate = RERERE_AUTOUPDATE;
@@ -659,8 +657,8 @@ static int read_basic_state(struct rebase_options *opts)
if (file_exists(state_dir_path("gpg_sign_opt", opts))) {
strbuf_reset(&buf);
- if (read_one(state_dir_path("gpg_sign_opt", opts),
- &buf))
+ if (!read_oneliner(&buf, state_dir_path("gpg_sign_opt", opts),
+ READ_ONELINER_WARN_MISSING))
return -1;
free(opts->gpg_sign_opt);
opts->gpg_sign_opt = xstrdup(buf.buf);
@@ -668,7 +666,8 @@ static int read_basic_state(struct rebase_options *opts)
if (file_exists(state_dir_path("strategy", opts))) {
strbuf_reset(&buf);
- if (read_one(state_dir_path("strategy", opts), &buf))
+ if (!read_oneliner(&buf, state_dir_path("strategy", opts),
+ READ_ONELINER_WARN_MISSING))
return -1;
free(opts->strategy);
opts->strategy = xstrdup(buf.buf);
@@ -676,7 +675,8 @@ static int read_basic_state(struct rebase_options *opts)
if (file_exists(state_dir_path("strategy_opts", opts))) {
strbuf_reset(&buf);
- if (read_one(state_dir_path("strategy_opts", opts), &buf))
+ if (!read_oneliner(&buf, state_dir_path("strategy_opts", opts),
+ READ_ONELINER_WARN_MISSING))
return -1;
free(opts->strategy_opts);
opts->strategy_opts = xstrdup(buf.buf);
@@ -719,51 +719,6 @@ static int rebase_write_basic_state(struct rebase_options *opts)
return 0;
}
-static int apply_autostash(struct rebase_options *opts)
-{
- const char *path = state_dir_path("autostash", opts);
- struct strbuf autostash = STRBUF_INIT;
- struct child_process stash_apply = CHILD_PROCESS_INIT;
-
- if (!file_exists(path))
- return 0;
-
- if (read_one(path, &autostash))
- return error(_("Could not read '%s'"), path);
- /* Ensure that the hash is not mistaken for a number */
- strbuf_addstr(&autostash, "^0");
- argv_array_pushl(&stash_apply.args,
- "stash", "apply", autostash.buf, NULL);
- stash_apply.git_cmd = 1;
- stash_apply.no_stderr = stash_apply.no_stdout =
- stash_apply.no_stdin = 1;
- if (!run_command(&stash_apply))
- printf(_("Applied autostash.\n"));
- else {
- struct argv_array args = ARGV_ARRAY_INIT;
- int res = 0;
-
- argv_array_pushl(&args,
- "stash", "store", "-m", "autostash", "-q",
- autostash.buf, NULL);
- if (run_command_v_opt(args.argv, RUN_GIT_CMD))
- res = error(_("Cannot store %s"), autostash.buf);
- argv_array_clear(&args);
- strbuf_release(&autostash);
- if (res)
- return res;
-
- fprintf(stderr,
- _("Applying autostash resulted in conflicts.\n"
- "Your changes are safe in the stash.\n"
- "You can run \"git stash pop\" or \"git stash drop\" "
- "at any time.\n"));
- }
-
- strbuf_release(&autostash);
- return 0;
-}
-
static int finish_rebase(struct rebase_options *opts)
{
struct strbuf dir = STRBUF_INIT;
@@ -771,7 +726,7 @@ static int finish_rebase(struct rebase_options *opts)
int ret = 0;
delete_ref(NULL, "REBASE_HEAD", NULL, REF_NO_DEREF);
- apply_autostash(opts);
+ apply_autostash(state_dir_path("autostash", opts));
close_object_store(the_repository->objects);
/*
* We ignore errors in 'gc --auto', since the
@@ -816,144 +771,6 @@ static void add_var(struct strbuf *buf, const char *name, const char *value)
}
}
-#define GIT_REFLOG_ACTION_ENVIRONMENT "GIT_REFLOG_ACTION"
-
-#define RESET_HEAD_DETACH (1<<0)
-#define RESET_HEAD_HARD (1<<1)
-#define RESET_HEAD_RUN_POST_CHECKOUT_HOOK (1<<2)
-#define RESET_HEAD_REFS_ONLY (1<<3)
-#define RESET_ORIG_HEAD (1<<4)
-
-static int reset_head(struct object_id *oid, const char *action,
- const char *switch_to_branch, unsigned flags,
- const char *reflog_orig_head, const char *reflog_head)
-{
- unsigned detach_head = flags & RESET_HEAD_DETACH;
- unsigned reset_hard = flags & RESET_HEAD_HARD;
- unsigned run_hook = flags & RESET_HEAD_RUN_POST_CHECKOUT_HOOK;
- unsigned refs_only = flags & RESET_HEAD_REFS_ONLY;
- unsigned update_orig_head = flags & RESET_ORIG_HEAD;
- struct object_id head_oid;
- struct tree_desc desc[2] = { { NULL }, { NULL } };
- struct lock_file lock = LOCK_INIT;
- struct unpack_trees_options unpack_tree_opts;
- struct tree *tree;
- const char *reflog_action;
- struct strbuf msg = STRBUF_INIT;
- size_t prefix_len;
- struct object_id *orig = NULL, oid_orig,
- *old_orig = NULL, oid_old_orig;
- int ret = 0, nr = 0;
-
- if (switch_to_branch && !starts_with(switch_to_branch, "refs/"))
- BUG("Not a fully qualified branch: '%s'", switch_to_branch);
-
- if (!refs_only && hold_locked_index(&lock, LOCK_REPORT_ON_ERROR) < 0) {
- ret = -1;
- goto leave_reset_head;
- }
-
- if ((!oid || !reset_hard) && get_oid("HEAD", &head_oid)) {
- ret = error(_("could not determine HEAD revision"));
- goto leave_reset_head;
- }
-
- if (!oid)
- oid = &head_oid;
-
- if (refs_only)
- goto reset_head_refs;
-
- memset(&unpack_tree_opts, 0, sizeof(unpack_tree_opts));
- setup_unpack_trees_porcelain(&unpack_tree_opts, action);
- unpack_tree_opts.head_idx = 1;
- unpack_tree_opts.src_index = the_repository->index;
- unpack_tree_opts.dst_index = the_repository->index;
- unpack_tree_opts.fn = reset_hard ? oneway_merge : twoway_merge;
- unpack_tree_opts.update = 1;
- unpack_tree_opts.merge = 1;
- init_checkout_metadata(&unpack_tree_opts.meta, switch_to_branch, oid, NULL);
- if (!detach_head)
- unpack_tree_opts.reset = 1;
-
- if (repo_read_index_unmerged(the_repository) < 0) {
- ret = error(_("could not read index"));
- goto leave_reset_head;
- }
-
- if (!reset_hard && !fill_tree_descriptor(the_repository, &desc[nr++], &head_oid)) {
- ret = error(_("failed to find tree of %s"),
- oid_to_hex(&head_oid));
- goto leave_reset_head;
- }
-
- if (!fill_tree_descriptor(the_repository, &desc[nr++], oid)) {
- ret = error(_("failed to find tree of %s"), oid_to_hex(oid));
- goto leave_reset_head;
- }
-
- if (unpack_trees(nr, desc, &unpack_tree_opts)) {
- ret = -1;
- goto leave_reset_head;
- }
-
- tree = parse_tree_indirect(oid);
- prime_cache_tree(the_repository, the_repository->index, tree);
-
- if (write_locked_index(the_repository->index, &lock, COMMIT_LOCK) < 0) {
- ret = error(_("could not write index"));
- goto leave_reset_head;
- }
-
-reset_head_refs:
- reflog_action = getenv(GIT_REFLOG_ACTION_ENVIRONMENT);
- strbuf_addf(&msg, "%s: ", reflog_action ? reflog_action : "rebase");
- prefix_len = msg.len;
-
- if (update_orig_head) {
- if (!get_oid("ORIG_HEAD", &oid_old_orig))
- old_orig = &oid_old_orig;
- if (!get_oid("HEAD", &oid_orig)) {
- orig = &oid_orig;
- if (!reflog_orig_head) {
- strbuf_addstr(&msg, "updating ORIG_HEAD");
- reflog_orig_head = msg.buf;
- }
- update_ref(reflog_orig_head, "ORIG_HEAD", orig,
- old_orig, 0, UPDATE_REFS_MSG_ON_ERR);
- } else if (old_orig)
- delete_ref(NULL, "ORIG_HEAD", old_orig, 0);
- }
-
- if (!reflog_head) {
- strbuf_setlen(&msg, prefix_len);
- strbuf_addstr(&msg, "updating HEAD");
- reflog_head = msg.buf;
- }
- if (!switch_to_branch)
- ret = update_ref(reflog_head, "HEAD", oid, orig,
- detach_head ? REF_NO_DEREF : 0,
- UPDATE_REFS_MSG_ON_ERR);
- else {
- ret = update_ref(reflog_head, switch_to_branch, oid,
- NULL, 0, UPDATE_REFS_MSG_ON_ERR);
- if (!ret)
- ret = create_symref("HEAD", switch_to_branch,
- reflog_head);
- }
- if (run_hook)
- run_hook_le(NULL, "post-checkout",
- oid_to_hex(orig ? orig : &null_oid),
- oid_to_hex(oid), "1", NULL);
-
-leave_reset_head:
- strbuf_release(&msg);
- rollback_lock_file(&lock);
- while (nr)
- free((void *)desc[--nr].buffer);
- return ret;
-}
-
static int move_to_original_branch(struct rebase_options *opts)
{
struct strbuf orig_head_reflog = STRBUF_INIT, head_reflog = STRBUF_INIT;
@@ -969,8 +786,10 @@ static int move_to_original_branch(struct rebase_options *opts)
opts->head_name, oid_to_hex(&opts->onto->object.oid));
strbuf_addf(&head_reflog, "rebase finished: returning to %s",
opts->head_name);
- ret = reset_head(NULL, "", opts->head_name, RESET_HEAD_REFS_ONLY,
- orig_head_reflog.buf, head_reflog.buf);
+ ret = reset_head(the_repository, NULL, "", opts->head_name,
+ RESET_HEAD_REFS_ONLY,
+ orig_head_reflog.buf, head_reflog.buf,
+ DEFAULT_REFLOG_ACTION);
strbuf_release(&orig_head_reflog);
strbuf_release(&head_reflog);
@@ -1058,8 +877,9 @@ static int run_am(struct rebase_options *opts)
free(rebased_patches);
argv_array_clear(&am.args);
- reset_head(&opts->orig_head, "checkout", opts->head_name, 0,
- "HEAD", NULL);
+ reset_head(the_repository, &opts->orig_head, "checkout",
+ opts->head_name, 0,
+ "HEAD", NULL, DEFAULT_REFLOG_ACTION);
error(_("\ngit encountered an error while preparing the "
"patches to replay\n"
"these revisions:\n"
@@ -1218,7 +1038,7 @@ finished_rebase:
} else if (status == 2) {
struct strbuf dir = STRBUF_INIT;
- apply_autostash(opts);
+ apply_autostash(state_dir_path("autostash", opts));
strbuf_addstr(&dir, opts->state_dir);
remove_dir_recursively(&dir, 0);
strbuf_release(&dir);
@@ -1459,7 +1279,6 @@ static int check_exec_cmd(const char *cmd)
return 0;
}
-
int cmd_rebase(int argc, const char **argv, const char *prefix)
{
struct rebase_options options = REBASE_OPTIONS_INIT;
@@ -1562,8 +1381,7 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
{ OPTION_STRING, 'S', "gpg-sign", &gpg_sign, N_("key-id"),
N_("GPG-sign commits"),
PARSE_OPT_OPTARG, NULL, (intptr_t) "" },
- OPT_BOOL(0, "autostash", &options.autostash,
- N_("automatically stash/stash pop before and after")),
+ OPT_AUTOSTASH(&options.autostash),
OPT_STRING_LIST('x', "exec", &exec, N_("exec"),
N_("add exec lines after each commit of the "
"editable list")),
@@ -1720,8 +1538,8 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
rerere_clear(the_repository, &merge_rr);
string_list_clear(&merge_rr, 1);
- if (reset_head(NULL, "reset", NULL, RESET_HEAD_HARD,
- NULL, NULL) < 0)
+ if (reset_head(the_repository, NULL, "reset", NULL, RESET_HEAD_HARD,
+ NULL, NULL, DEFAULT_REFLOG_ACTION) < 0)
die(_("could not discard worktree changes"));
remove_branch_state(the_repository, 0);
if (read_basic_state(&options))
@@ -1738,9 +1556,9 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
if (read_basic_state(&options))
exit(1);
- if (reset_head(&options.orig_head, "reset",
+ if (reset_head(the_repository, &options.orig_head, "reset",
options.head_name, RESET_HEAD_HARD,
- NULL, NULL) < 0)
+ NULL, NULL, DEFAULT_REFLOG_ACTION) < 0)
die(_("could not move back to %s"),
oid_to_hex(&options.orig_head));
remove_branch_state(the_repository, 0);
@@ -1748,6 +1566,7 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
goto cleanup;
}
case ACTION_QUIT: {
+ save_autostash(state_dir_path("autostash", &options));
if (options.type == REBASE_MERGE) {
struct replay_opts replay = REPLAY_OPTS_INIT;
@@ -2098,49 +1917,8 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
die(_("could not read index"));
if (options.autostash) {
- struct lock_file lock_file = LOCK_INIT;
- int fd;
-
- fd = hold_locked_index(&lock_file, 0);
- refresh_cache(REFRESH_QUIET);
- if (0 <= fd)
- repo_update_index_if_able(the_repository, &lock_file);
- rollback_lock_file(&lock_file);
-
- if (has_unstaged_changes(the_repository, 1) ||
- has_uncommitted_changes(the_repository, 1)) {
- const char *autostash =
- state_dir_path("autostash", &options);
- struct child_process stash = CHILD_PROCESS_INIT;
- struct object_id oid;
-
- argv_array_pushl(&stash.args,
- "stash", "create", "autostash", NULL);
- stash.git_cmd = 1;
- stash.no_stdin = 1;
- strbuf_reset(&buf);
- if (capture_command(&stash, &buf, GIT_MAX_HEXSZ))
- die(_("Cannot autostash"));
- strbuf_trim_trailing_newline(&buf);
- if (get_oid(buf.buf, &oid))
- die(_("Unexpected stash response: '%s'"),
- buf.buf);
- strbuf_reset(&buf);
- strbuf_add_unique_abbrev(&buf, &oid, DEFAULT_ABBREV);
-
- if (safe_create_leading_directories_const(autostash))
- die(_("Could not create directory for '%s'"),
- options.state_dir);
- write_file(autostash, "%s", oid_to_hex(&oid));
- printf(_("Created autostash: %s\n"), buf.buf);
- if (reset_head(NULL, "reset --hard",
- NULL, RESET_HEAD_HARD, NULL, NULL) < 0)
- die(_("could not reset --hard"));
-
- if (discard_index(the_repository->index) < 0 ||
- repo_read_index(the_repository) < 0)
- die(_("could not read index"));
- }
+ create_autostash(the_repository, state_dir_path("autostash", &options),
+ DEFAULT_REFLOG_ACTION);
}
if (require_clean_work_tree(the_repository, "rebase",
@@ -2174,10 +1952,12 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
strbuf_addf(&buf, "%s: checkout %s",
getenv(GIT_REFLOG_ACTION_ENVIRONMENT),
options.switch_to);
- if (reset_head(&options.orig_head, "checkout",
+ if (reset_head(the_repository,
+ &options.orig_head, "checkout",
options.head_name,
RESET_HEAD_RUN_POST_CHECKOUT_HOOK,
- NULL, buf.buf) < 0) {
+ NULL, buf.buf,
+ DEFAULT_REFLOG_ACTION) < 0) {
ret = !!error(_("could not switch to "
"%s"),
options.switch_to);
@@ -2249,10 +2029,10 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
strbuf_addf(&msg, "%s: checkout %s",
getenv(GIT_REFLOG_ACTION_ENVIRONMENT), options.onto_name);
- if (reset_head(&options.onto->object.oid, "checkout", NULL,
+ if (reset_head(the_repository, &options.onto->object.oid, "checkout", NULL,
RESET_HEAD_DETACH | RESET_ORIG_HEAD |
RESET_HEAD_RUN_POST_CHECKOUT_HOOK,
- NULL, msg.buf))
+ NULL, msg.buf, DEFAULT_REFLOG_ACTION))
die(_("Could not detach HEAD"));
strbuf_release(&msg);
@@ -2267,8 +2047,9 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
strbuf_addf(&msg, "rebase finished: %s onto %s",
options.head_name ? options.head_name : "detached HEAD",
oid_to_hex(&options.onto->object.oid));
- reset_head(NULL, "Fast-forwarded", options.head_name,
- RESET_HEAD_REFS_ONLY, "HEAD", msg.buf);
+ reset_head(the_repository, NULL, "Fast-forwarded", options.head_name,
+ RESET_HEAD_REFS_ONLY, "HEAD", msg.buf,
+ DEFAULT_REFLOG_ACTION);
strbuf_release(&msg);
ret = !!finish_rebase(&options);
goto cleanup;
diff --git a/builtin/receive-pack.c b/builtin/receive-pack.c
index d46147f709..a00f91c1a0 100644
--- a/builtin/receive-pack.c
+++ b/builtin/receive-pack.c
@@ -499,12 +499,27 @@ static char *find_header(const char *msg, size_t len, const char *key,
return NULL;
}
+/*
+ * Return zero if a and b are equal up to n bytes and nonzero if they are not.
+ * This operation is guaranteed to run in constant time to avoid leaking data.
+ */
+static int constant_memequal(const char *a, const char *b, size_t n)
+{
+ int res = 0;
+ size_t i;
+
+ for (i = 0; i < n; i++)
+ res |= a[i] ^ b[i];
+ return res;
+}
+
static const char *check_nonce(const char *buf, size_t len)
{
char *nonce = find_header(buf, len, "nonce", NULL);
timestamp_t stamp, ostamp;
char *bohmac, *expect = NULL;
const char *retval = NONCE_BAD;
+ size_t noncelen;
if (!nonce) {
retval = NONCE_MISSING;
@@ -546,8 +561,14 @@ static const char *check_nonce(const char *buf, size_t len)
goto leave;
}
+ noncelen = strlen(nonce);
expect = prepare_push_cert_nonce(service_dir, stamp);
- if (strcmp(expect, nonce)) {
+ if (noncelen != strlen(expect)) {
+ /* This is not even the right size. */
+ retval = NONCE_BAD;
+ goto leave;
+ }
+ if (constant_memequal(expect, nonce, noncelen)) {
/* Not what we would have signed earlier */
retval = NONCE_BAD;
goto leave;
@@ -872,12 +893,12 @@ static int update_shallow_ref(struct command *cmd, struct shallow_info *si)
opt.env = tmp_objdir_env(tmp_objdir);
setup_alternate_shallow(&shallow_lock, &opt.shallow_file, &extra);
if (check_connected(command_singleton_iterator, cmd, &opt)) {
- rollback_lock_file(&shallow_lock);
+ rollback_shallow_file(the_repository, &shallow_lock);
oid_array_clear(&extra);
return -1;
}
- commit_lock_file(&shallow_lock);
+ commit_shallow_file(the_repository, &shallow_lock);
/*
* Make sure setup_alternate_shallow() for the next ref does
diff --git a/builtin/reflog.c b/builtin/reflog.c
index 81dfd563c0..52ecf6d43c 100644
--- a/builtin/reflog.c
+++ b/builtin/reflog.c
@@ -459,7 +459,7 @@ static struct reflog_expire_cfg *find_cfg_ent(const char *pattern, size_t len)
static int reflog_expire_config(const char *var, const char *value, void *cb)
{
const char *pattern, *key;
- int pattern_len;
+ size_t pattern_len;
timestamp_t expire;
int slot;
struct reflog_expire_cfg *ent;
diff --git a/builtin/repack.c b/builtin/repack.c
index 0781763b06..1b686ee9ce 100644
--- a/builtin/repack.c
+++ b/builtin/repack.c
@@ -10,6 +10,7 @@
#include "argv-array.h"
#include "midx.h"
#include "packfile.h"
+#include "prune-packed.h"
#include "object-store.h"
#include "promisor-remote.h"
diff --git a/builtin/sparse-checkout.c b/builtin/sparse-checkout.c
index 740da4b6d5..95d0882417 100644
--- a/builtin/sparse-checkout.c
+++ b/builtin/sparse-checkout.c
@@ -18,7 +18,7 @@
static const char *empty_base = "";
static char const * const builtin_sparse_checkout_usage[] = {
- N_("git sparse-checkout (init|list|set|add|disable) <options>"),
+ N_("git sparse-checkout (init|list|set|add|reapply|disable) <options>"),
NULL
};
@@ -94,50 +94,37 @@ static int sparse_checkout_list(int argc, const char **argv)
static int update_working_directory(struct pattern_list *pl)
{
- int result = 0;
+ enum update_sparsity_result result;
struct unpack_trees_options o;
struct lock_file lock_file = LOCK_INIT;
- struct object_id oid;
- struct tree *tree;
- struct tree_desc t;
struct repository *r = the_repository;
- if (repo_read_index_unmerged(r))
- die(_("you need to resolve your current index first"));
-
- if (get_oid("HEAD", &oid))
- return 0;
-
- tree = parse_tree_indirect(&oid);
- parse_tree(tree);
- init_tree_desc(&t, tree->buffer, tree->size);
-
memset(&o, 0, sizeof(o));
o.verbose_update = isatty(2);
- o.merge = 1;
o.update = 1;
- o.fn = oneway_merge;
o.head_idx = -1;
o.src_index = r->index;
o.dst_index = r->index;
o.skip_sparse_checkout = 0;
o.pl = pl;
- o.keep_pattern_list = !!pl;
- resolve_undo_clear_index(r->index);
setup_work_tree();
- cache_tree_free(&r->index->cache_tree);
-
repo_hold_locked_index(r, &lock_file, LOCK_DIE_ON_ERROR);
- core_apply_sparse_checkout = 1;
- result = unpack_trees(1, &t, &o);
-
- if (!result) {
- prime_cache_tree(r, r->index, tree);
+ setup_unpack_trees_porcelain(&o, "sparse-checkout");
+ result = update_sparsity(&o);
+ clear_unpack_trees_porcelain(&o);
+
+ if (result == UPDATE_SPARSITY_WARNINGS)
+ /*
+ * We don't do any special handling of warnings from untracked
+ * files in the way or dirty entries that can't be removed.
+ */
+ result = UPDATE_SPARSITY_SUCCESS;
+ if (result == UPDATE_SPARSITY_SUCCESS)
write_locked_index(r->index, &lock_file, COMMIT_LOCK);
- } else
+ else
rollback_lock_file(&lock_file);
return result;
@@ -304,8 +291,6 @@ static int sparse_checkout_init(int argc, const char **argv)
};
repo_read_index(the_repository);
- require_clean_work_tree(the_repository,
- N_("initialize sparse-checkout"), NULL, 1, 0);
argc = parse_options(argc, argv, NULL,
builtin_sparse_checkout_init_options,
@@ -560,8 +545,6 @@ static int sparse_checkout_set(int argc, const char **argv, const char *prefix,
};
repo_read_index(the_repository);
- require_clean_work_tree(the_repository,
- N_("set sparse-checkout patterns"), NULL, 1, 0);
argc = parse_options(argc, argv, prefix,
builtin_sparse_checkout_set_options,
@@ -571,14 +554,18 @@ static int sparse_checkout_set(int argc, const char **argv, const char *prefix,
return modify_pattern_list(argc, argv, m);
}
+static int sparse_checkout_reapply(int argc, const char **argv)
+{
+ repo_read_index(the_repository);
+ return update_working_directory(NULL);
+}
+
static int sparse_checkout_disable(int argc, const char **argv)
{
struct pattern_list pl;
struct strbuf match_all = STRBUF_INIT;
repo_read_index(the_repository);
- require_clean_work_tree(the_repository,
- N_("disable sparse-checkout"), NULL, 1, 0);
memset(&pl, 0, sizeof(pl));
hashmap_init(&pl.recursive_hashmap, pl_hashmap_cmp, NULL, 0);
@@ -622,6 +609,8 @@ int cmd_sparse_checkout(int argc, const char **argv, const char *prefix)
return sparse_checkout_set(argc, argv, prefix, REPLACE);
if (!strcmp(argv[0], "add"))
return sparse_checkout_set(argc, argv, prefix, ADD);
+ if (!strcmp(argv[0], "reapply"))
+ return sparse_checkout_reapply(argc, argv);
if (!strcmp(argv[0], "disable"))
return sparse_checkout_disable(argc, argv);
}
diff --git a/builtin/stash.c b/builtin/stash.c
index 6d586ef06d..0c52a3b849 100644
--- a/builtin/stash.c
+++ b/builtin/stash.c
@@ -861,30 +861,23 @@ static int get_untracked_files(const struct pathspec *ps, int include_untracked,
struct strbuf *untracked_files)
{
int i;
- int max_len;
int found = 0;
- char *seen;
struct dir_struct dir;
memset(&dir, 0, sizeof(dir));
if (include_untracked != INCLUDE_ALL_FILES)
setup_standard_excludes(&dir);
- seen = xcalloc(ps->nr, 1);
-
- max_len = fill_directory(&dir, the_repository->index, ps);
+ fill_directory(&dir, the_repository->index, ps);
for (i = 0; i < dir.nr; i++) {
struct dir_entry *ent = dir.entries[i];
- if (dir_path_match(&the_index, ent, ps, max_len, seen)) {
- found++;
- strbuf_addstr(untracked_files, ent->name);
- /* NUL-terminate: will be fed to update-index -z */
- strbuf_addch(untracked_files, '\0');
- }
+ found++;
+ strbuf_addstr(untracked_files, ent->name);
+ /* NUL-terminate: will be fed to update-index -z */
+ strbuf_addch(untracked_files, '\0');
free(ent);
}
- free(seen);
free(dir.entries);
free(dir.ignored);
clear_directory(&dir);
@@ -1041,7 +1034,7 @@ static int stash_patch(struct stash_info *info, const struct pathspec *ps,
}
cp_diff_tree.git_cmd = 1;
- argv_array_pushl(&cp_diff_tree.args, "diff-tree", "-p", "HEAD",
+ argv_array_pushl(&cp_diff_tree.args, "diff-tree", "-p", "-U1", "HEAD",
oid_to_hex(&info->w_tree), "--", NULL);
if (pipe_command(&cp_diff_tree, NULL, 0, out_patch, 0, NULL, 0)) {
ret = -1;
diff --git a/builtin/update-ref.c b/builtin/update-ref.c
index 2d8f7f0578..b74dd9a69d 100644
--- a/builtin/update-ref.c
+++ b/builtin/update-ref.c
@@ -50,7 +50,7 @@ static const char *parse_arg(const char *next, struct strbuf *arg)
* the argument. Die if C-quoting is malformed or the reference name
* is invalid.
*/
-static char *parse_refname(struct strbuf *input, const char **next)
+static char *parse_refname(const char **next)
{
struct strbuf ref = STRBUF_INIT;
@@ -95,7 +95,7 @@ static char *parse_refname(struct strbuf *input, const char **next)
* provided but cannot be converted to a SHA-1, die. flags can
* include PARSE_SHA1_OLD and/or PARSE_SHA1_ALLOW_EMPTY.
*/
-static int parse_next_oid(struct strbuf *input, const char **next,
+static int parse_next_oid(const char **next, const char *end,
struct object_id *oid,
const char *command, const char *refname,
int flags)
@@ -103,7 +103,7 @@ static int parse_next_oid(struct strbuf *input, const char **next,
struct strbuf arg = STRBUF_INIT;
int ret = 0;
- if (*next == input->buf + input->len)
+ if (*next == end)
goto eof;
if (line_termination) {
@@ -128,7 +128,7 @@ static int parse_next_oid(struct strbuf *input, const char **next,
die("%s %s: expected NUL but got: %s",
command, refname, *next);
(*next)++;
- if (*next == input->buf + input->len)
+ if (*next == end)
goto eof;
strbuf_addstr(&arg, *next);
*next += arg.len;
@@ -178,23 +178,23 @@ static int parse_next_oid(struct strbuf *input, const char **next,
* depending on how line_termination is set.
*/
-static const char *parse_cmd_update(struct ref_transaction *transaction,
- struct strbuf *input, const char *next)
+static void parse_cmd_update(struct ref_transaction *transaction,
+ const char *next, const char *end)
{
struct strbuf err = STRBUF_INIT;
char *refname;
struct object_id new_oid, old_oid;
int have_old;
- refname = parse_refname(input, &next);
+ refname = parse_refname(&next);
if (!refname)
die("update: missing <ref>");
- if (parse_next_oid(input, &next, &new_oid, "update", refname,
+ if (parse_next_oid(&next, end, &new_oid, "update", refname,
PARSE_SHA1_ALLOW_EMPTY))
die("update %s: missing <newvalue>", refname);
- have_old = !parse_next_oid(input, &next, &old_oid, "update", refname,
+ have_old = !parse_next_oid(&next, end, &old_oid, "update", refname,
PARSE_SHA1_OLD);
if (*next != line_termination)
@@ -209,22 +209,20 @@ static const char *parse_cmd_update(struct ref_transaction *transaction,
update_flags = default_flags;
free(refname);
strbuf_release(&err);
-
- return next;
}
-static const char *parse_cmd_create(struct ref_transaction *transaction,
- struct strbuf *input, const char *next)
+static void parse_cmd_create(struct ref_transaction *transaction,
+ const char *next, const char *end)
{
struct strbuf err = STRBUF_INIT;
char *refname;
struct object_id new_oid;
- refname = parse_refname(input, &next);
+ refname = parse_refname(&next);
if (!refname)
die("create: missing <ref>");
- if (parse_next_oid(input, &next, &new_oid, "create", refname, 0))
+ if (parse_next_oid(&next, end, &new_oid, "create", refname, 0))
die("create %s: missing <newvalue>", refname);
if (is_null_oid(&new_oid))
@@ -241,23 +239,21 @@ static const char *parse_cmd_create(struct ref_transaction *transaction,
update_flags = default_flags;
free(refname);
strbuf_release(&err);
-
- return next;
}
-static const char *parse_cmd_delete(struct ref_transaction *transaction,
- struct strbuf *input, const char *next)
+static void parse_cmd_delete(struct ref_transaction *transaction,
+ const char *next, const char *end)
{
struct strbuf err = STRBUF_INIT;
char *refname;
struct object_id old_oid;
int have_old;
- refname = parse_refname(input, &next);
+ refname = parse_refname(&next);
if (!refname)
die("delete: missing <ref>");
- if (parse_next_oid(input, &next, &old_oid, "delete", refname,
+ if (parse_next_oid(&next, end, &old_oid, "delete", refname,
PARSE_SHA1_OLD)) {
have_old = 0;
} else {
@@ -277,22 +273,20 @@ static const char *parse_cmd_delete(struct ref_transaction *transaction,
update_flags = default_flags;
free(refname);
strbuf_release(&err);
-
- return next;
}
-static const char *parse_cmd_verify(struct ref_transaction *transaction,
- struct strbuf *input, const char *next)
+static void parse_cmd_verify(struct ref_transaction *transaction,
+ const char *next, const char *end)
{
struct strbuf err = STRBUF_INIT;
char *refname;
struct object_id old_oid;
- refname = parse_refname(input, &next);
+ refname = parse_refname(&next);
if (!refname)
die("verify: missing <ref>");
- if (parse_next_oid(input, &next, &old_oid, "verify", refname,
+ if (parse_next_oid(&next, end, &old_oid, "verify", refname,
PARSE_SHA1_OLD))
oidclr(&old_oid);
@@ -306,50 +300,179 @@ static const char *parse_cmd_verify(struct ref_transaction *transaction,
update_flags = default_flags;
free(refname);
strbuf_release(&err);
-
- return next;
}
-static const char *parse_cmd_option(struct strbuf *input, const char *next)
+static void parse_cmd_option(struct ref_transaction *transaction,
+ const char *next, const char *end)
{
const char *rest;
if (skip_prefix(next, "no-deref", &rest) && *rest == line_termination)
update_flags |= REF_NO_DEREF;
else
die("option unknown: %s", next);
- return rest;
}
-static void update_refs_stdin(struct ref_transaction *transaction)
+static void parse_cmd_start(struct ref_transaction *transaction,
+ const char *next, const char *end)
+{
+ if (*next != line_termination)
+ die("start: extra input: %s", next);
+ puts("start: ok");
+}
+
+static void parse_cmd_prepare(struct ref_transaction *transaction,
+ const char *next, const char *end)
+{
+ struct strbuf error = STRBUF_INIT;
+ if (*next != line_termination)
+ die("prepare: extra input: %s", next);
+ if (ref_transaction_prepare(transaction, &error))
+ die("prepare: %s", error.buf);
+ puts("prepare: ok");
+}
+
+static void parse_cmd_abort(struct ref_transaction *transaction,
+ const char *next, const char *end)
+{
+ struct strbuf error = STRBUF_INIT;
+ if (*next != line_termination)
+ die("abort: extra input: %s", next);
+ if (ref_transaction_abort(transaction, &error))
+ die("abort: %s", error.buf);
+ puts("abort: ok");
+}
+
+static void parse_cmd_commit(struct ref_transaction *transaction,
+ const char *next, const char *end)
+{
+ struct strbuf error = STRBUF_INIT;
+ if (*next != line_termination)
+ die("commit: extra input: %s", next);
+ if (ref_transaction_commit(transaction, &error))
+ die("commit: %s", error.buf);
+ puts("commit: ok");
+ ref_transaction_free(transaction);
+}
+
+enum update_refs_state {
+ /* Non-transactional state open for updates. */
+ UPDATE_REFS_OPEN,
+ /* A transaction has been started. */
+ UPDATE_REFS_STARTED,
+ /* References are locked and ready for commit */
+ UPDATE_REFS_PREPARED,
+ /* Transaction has been committed or closed. */
+ UPDATE_REFS_CLOSED,
+};
+
+static const struct parse_cmd {
+ const char *prefix;
+ void (*fn)(struct ref_transaction *, const char *, const char *);
+ unsigned args;
+ enum update_refs_state state;
+} command[] = {
+ { "update", parse_cmd_update, 3, UPDATE_REFS_OPEN },
+ { "create", parse_cmd_create, 2, UPDATE_REFS_OPEN },
+ { "delete", parse_cmd_delete, 2, UPDATE_REFS_OPEN },
+ { "verify", parse_cmd_verify, 2, UPDATE_REFS_OPEN },
+ { "option", parse_cmd_option, 1, UPDATE_REFS_OPEN },
+ { "start", parse_cmd_start, 0, UPDATE_REFS_STARTED },
+ { "prepare", parse_cmd_prepare, 0, UPDATE_REFS_PREPARED },
+ { "abort", parse_cmd_abort, 0, UPDATE_REFS_CLOSED },
+ { "commit", parse_cmd_commit, 0, UPDATE_REFS_CLOSED },
+};
+
+static void update_refs_stdin(void)
{
- struct strbuf input = STRBUF_INIT;
- const char *next;
+ struct strbuf input = STRBUF_INIT, err = STRBUF_INIT;
+ enum update_refs_state state = UPDATE_REFS_OPEN;
+ struct ref_transaction *transaction;
+ int i, j;
+
+ transaction = ref_transaction_begin(&err);
+ if (!transaction)
+ die("%s", err.buf);
- if (strbuf_read(&input, 0, 1000) < 0)
- die_errno("could not read from stdin");
- next = input.buf;
/* Read each line dispatch its command */
- while (next < input.buf + input.len) {
- if (*next == line_termination)
+ while (!strbuf_getwholeline(&input, stdin, line_termination)) {
+ const struct parse_cmd *cmd = NULL;
+
+ if (*input.buf == line_termination)
die("empty command in input");
- else if (isspace(*next))
- die("whitespace before command: %s", next);
- else if (skip_prefix(next, "update ", &next))
- next = parse_cmd_update(transaction, &input, next);
- else if (skip_prefix(next, "create ", &next))
- next = parse_cmd_create(transaction, &input, next);
- else if (skip_prefix(next, "delete ", &next))
- next = parse_cmd_delete(transaction, &input, next);
- else if (skip_prefix(next, "verify ", &next))
- next = parse_cmd_verify(transaction, &input, next);
- else if (skip_prefix(next, "option ", &next))
- next = parse_cmd_option(&input, next);
- else
- die("unknown command: %s", next);
-
- next++;
+ else if (isspace(*input.buf))
+ die("whitespace before command: %s", input.buf);
+
+ for (i = 0; i < ARRAY_SIZE(command); i++) {
+ const char *prefix = command[i].prefix;
+ char c;
+
+ if (!starts_with(input.buf, prefix))
+ continue;
+
+ /*
+ * If the command has arguments, verify that it's
+ * followed by a space. Otherwise, it shall be followed
+ * by a line terminator.
+ */
+ c = command[i].args ? ' ' : line_termination;
+ if (input.buf[strlen(prefix)] != c)
+ continue;
+
+ cmd = &command[i];
+ break;
+ }
+ if (!cmd)
+ die("unknown command: %s", input.buf);
+
+ /*
+ * Read additional arguments if NUL-terminated. Do not raise an
+ * error in case there is an early EOF to let the command
+ * handle missing arguments with a proper error message.
+ */
+ for (j = 1; line_termination == '\0' && j < cmd->args; j++)
+ if (strbuf_appendwholeline(&input, stdin, line_termination))
+ break;
+
+ switch (state) {
+ case UPDATE_REFS_OPEN:
+ case UPDATE_REFS_STARTED:
+ /* Do not downgrade a transaction to a non-transaction. */
+ if (cmd->state >= state)
+ state = cmd->state;
+ break;
+ case UPDATE_REFS_PREPARED:
+ if (cmd->state != UPDATE_REFS_CLOSED)
+ die("prepared transactions can only be closed");
+ state = cmd->state;
+ break;
+ case UPDATE_REFS_CLOSED:
+ die("transaction is closed");
+ break;
+ }
+
+ cmd->fn(transaction, input.buf + strlen(cmd->prefix) + !!cmd->args,
+ input.buf + input.len);
+ }
+
+ switch (state) {
+ case UPDATE_REFS_OPEN:
+ /* Commit by default if no transaction was requested. */
+ if (ref_transaction_commit(transaction, &err))
+ die("%s", err.buf);
+ ref_transaction_free(transaction);
+ break;
+ case UPDATE_REFS_STARTED:
+ case UPDATE_REFS_PREPARED:
+ /* If using a transaction, we want to abort it. */
+ if (ref_transaction_abort(transaction, &err))
+ die("%s", err.buf);
+ break;
+ case UPDATE_REFS_CLOSED:
+ /* Otherwise no need to do anything, the transaction was closed already. */
+ break;
}
+ strbuf_release(&err);
strbuf_release(&input);
}
@@ -384,21 +507,11 @@ int cmd_update_ref(int argc, const char **argv, const char *prefix)
}
if (read_stdin) {
- struct strbuf err = STRBUF_INIT;
- struct ref_transaction *transaction;
-
- transaction = ref_transaction_begin(&err);
- if (!transaction)
- die("%s", err.buf);
if (delete || argc > 0)
usage_with_options(git_update_ref_usage, options);
if (end_null)
line_termination = '\0';
- update_refs_stdin(transaction);
- if (ref_transaction_commit(transaction, &err))
- die("%s", err.buf);
- ref_transaction_free(transaction);
- strbuf_release(&err);
+ update_refs_stdin();
return 0;
}
diff --git a/ci/git-problem-matcher.json b/ci/git-problem-matcher.json
new file mode 100644
index 0000000000..506dfbd97f
--- /dev/null
+++ b/ci/git-problem-matcher.json
@@ -0,0 +1,16 @@
+{
+ "problemMatcher": [
+ {
+ "owner": "git-test-suite",
+ "pattern": [
+ {
+ "regexp": "^([^ :]+\\.sh):(\\d+): (error|warning|info):\\s+(.*)$",
+ "file": 1,
+ "line": 2,
+ "severity": 3,
+ "message": 4
+ }
+ ]
+ }
+ ]
+}
diff --git a/ci/install-dependencies.sh b/ci/install-dependencies.sh
index 497fd32ca8..0229a77f7d 100755
--- a/ci/install-dependencies.sh
+++ b/ci/install-dependencies.sh
@@ -7,12 +7,16 @@
P4WHENCE=http://filehost.perforce.com/perforce/r$LINUX_P4_VERSION
LFSWHENCE=https://github.com/github/git-lfs/releases/download/v$LINUX_GIT_LFS_VERSION
+UBUNTU_COMMON_PKGS="make libssl-dev libcurl4-openssl-dev libexpat-dev
+ tcl tk gettext zlib1g-dev perl-modules liberror-perl libauthen-sasl-perl
+ libemail-valid-perl libio-socket-ssl-perl libnet-smtp-ssl-perl"
case "$jobname" in
linux-clang|linux-gcc)
sudo apt-add-repository -y "ppa:ubuntu-toolchain-r/test"
sudo apt-get -q update
- sudo apt-get -q -y install language-pack-is libsvn-perl apache2
+ sudo apt-get -q -y install language-pack-is libsvn-perl apache2 \
+ $UBUNTU_COMMON_PKGS
case "$jobname" in
linux-gcc)
sudo apt-get -q -y install gcc-8
@@ -59,14 +63,18 @@ osx-clang|osx-gcc)
StaticAnalysis)
sudo apt-get -q update
sudo apt-get -q -y install coccinelle libcurl4-openssl-dev libssl-dev \
- libexpat-dev gettext
+ libexpat-dev gettext make
;;
Documentation)
sudo apt-get -q update
- sudo apt-get -q -y install asciidoc xmlto docbook-xsl-ns
+ sudo apt-get -q -y install asciidoc xmlto docbook-xsl-ns make
test -n "$ALREADY_HAVE_ASCIIDOCTOR" ||
- gem install --version 1.5.8 asciidoctor
+ sudo gem install --version 1.5.8 asciidoctor
+ ;;
+linux-gcc-4.8|GETTEXT_POISON)
+ sudo apt-get -q update
+ sudo apt-get -q -y install $UBUNTU_COMMON_PKGS
;;
esac
diff --git a/ci/install-docker-dependencies.sh b/ci/install-docker-dependencies.sh
new file mode 100755
index 0000000000..26a6689766
--- /dev/null
+++ b/ci/install-docker-dependencies.sh
@@ -0,0 +1,18 @@
+#!/bin/sh
+#
+# Install dependencies required to build and test Git inside container
+#
+
+case "$jobname" in
+Linux32)
+ linux32 --32bit i386 sh -c '
+ apt update >/dev/null &&
+ apt install -y build-essential libcurl4-openssl-dev \
+ libssl-dev libexpat-dev gettext python >/dev/null
+ '
+ ;;
+linux-musl)
+ apk add --update build-base curl-dev openssl-dev expat-dev gettext \
+ pcre2-dev python3 musl-libintl perl-utils ncurses >/dev/null
+ ;;
+esac
diff --git a/ci/lib.sh b/ci/lib.sh
index c3a8cd2104..dac36886e3 100755
--- a/ci/lib.sh
+++ b/ci/lib.sh
@@ -34,7 +34,7 @@ save_good_tree () {
# successfully before (e.g. because the branch got rebased, changing only
# the commit messages).
skip_good_tree () {
- if test "$TRAVIS_DEBUG_MODE" = true
+ if test "$TRAVIS_DEBUG_MODE" = true || test true = "$GITHUB_ACTIONS"
then
return
fi
@@ -79,6 +79,9 @@ check_unignored_build_artifacts ()
}
}
+# GitHub Action doesn't set TERM, which is required by tput
+export TERM=${TERM:-dumb}
+
# Clear MAKEFLAGS that may come from the outside world.
export MAKEFLAGS=
@@ -136,8 +139,32 @@ then
MAKEFLAGS="$MAKEFLAGS --jobs=10"
test windows_nt != "$CI_OS_NAME" ||
GIT_TEST_OPTS="--no-chain-lint --no-bin-wrappers $GIT_TEST_OPTS"
+elif test true = "$GITHUB_ACTIONS"
+then
+ CI_TYPE=github-actions
+ CI_BRANCH="$GITHUB_REF"
+ CI_COMMIT="$GITHUB_SHA"
+ CI_OS_NAME="$(echo "$RUNNER_OS" | tr A-Z a-z)"
+ test macos != "$CI_OS_NAME" || CI_OS_NAME=osx
+ CI_REPO_SLUG="$GITHUB_REPOSITORY"
+ CI_JOB_ID="$GITHUB_RUN_ID"
+ CC="${CC:-gcc}"
+
+ cache_dir="$HOME/none"
+
+ export GIT_PROVE_OPTS="--timer --jobs 10"
+ export GIT_TEST_OPTS="--verbose-log -x"
+ MAKEFLAGS="$MAKEFLAGS --jobs=10"
+ test windows != "$CI_OS_NAME" ||
+ GIT_TEST_OPTS="--no-chain-lint --no-bin-wrappers $GIT_TEST_OPTS"
+
+ # https://github.com/actions/toolkit/blob/master/docs/commands.md#problem-matchers
+ echo "::add-matcher::ci/git-problem-matcher.json"
+ test linux-musl = "$jobname" ||
+ MAKEFLAGS="$MAKEFLAGS TEST_SHELL_PATH=/bin/sh"
else
echo "Could not identify CI type" >&2
+ env >&2
exit 1
fi
@@ -195,9 +222,17 @@ osx-clang|osx-gcc)
# Travis CI OS X
export GIT_SKIP_TESTS="t9810 t9816"
;;
-GIT_TEST_GETTEXT_POISON)
+GETTEXT_POISON)
export GIT_TEST_GETTEXT_POISON=true
;;
+Linux32)
+ CC=gcc
+ ;;
+linux-musl)
+ CC=gcc
+ MAKEFLAGS="$MAKEFLAGS PYTHON_PATH=/usr/bin/python3 USE_LIBPCRE2=Yes"
+ MAKEFLAGS="$MAKEFLAGS NO_REGEX=Yes ICONV_OMITS_BOM=Yes"
+ ;;
esac
MAKEFLAGS="$MAKEFLAGS CC=${CC:-cc}"
diff --git a/ci/print-test-failures.sh b/ci/print-test-failures.sh
index e688a26f0d..92a983a265 100755
--- a/ci/print-test-failures.sh
+++ b/ci/print-test-failures.sh
@@ -46,6 +46,13 @@ do
mv "$trash_dir" failed-test-artifacts
continue
;;
+ github-actions)
+ mkdir -p failed-test-artifacts
+ echo "::set-env name=FAILED_TEST_ARTIFACTS::t/failed-test-artifacts"
+ cp "${TEST_EXIT%.exit}.out" failed-test-artifacts/
+ tar czf failed-test-artifacts/"$test_name".trash.tar.gz "$trash_dir"
+ continue
+ ;;
*)
echo "Unhandled CI type: $CI_TYPE" >&2
exit 1
diff --git a/ci/run-build-and-tests.sh b/ci/run-build-and-tests.sh
index 4df54c4efe..17e25aade9 100755
--- a/ci/run-build-and-tests.sh
+++ b/ci/run-build-and-tests.sh
@@ -19,6 +19,7 @@ linux-gcc)
export GIT_TEST_OE_SIZE=10
export GIT_TEST_OE_DELTA_SIZE=5
export GIT_TEST_COMMIT_GRAPH=1
+ export GIT_TEST_COMMIT_GRAPH_CHANGED_PATHS=1
export GIT_TEST_MULTI_PACK_INDEX=1
export GIT_TEST_ADD_I_USE_BUILTIN=1
make test
diff --git a/ci/run-linux32-build.sh b/ci/run-docker-build.sh
index e3a193adbc..8d47a5fda3 100755
--- a/ci/run-linux32-build.sh
+++ b/ci/run-docker-build.sh
@@ -1,25 +1,33 @@
#!/bin/sh
#
-# Build and test Git in a 32-bit environment
+# Build and test Git inside container
#
# Usage:
-# run-linux32-build.sh <host-user-id>
+# run-docker-build.sh <host-user-id>
#
set -ex
if test $# -ne 1 || test -z "$1"
then
- echo >&2 "usage: run-linux32-build.sh <host-user-id>"
+ echo >&2 "usage: run-docker-build.sh <host-user-id>"
exit 1
fi
-# Update packages to the latest available versions
-linux32 --32bit i386 sh -c '
- apt update >/dev/null &&
- apt install -y build-essential libcurl4-openssl-dev libssl-dev \
- libexpat-dev gettext python >/dev/null
-'
+case "$jobname" in
+Linux32)
+ switch_cmd="linux32 --32bit i386"
+ ;;
+linux-musl)
+ switch_cmd=
+ useradd () { adduser -D "$@"; }
+ ;;
+*)
+ exit 1
+ ;;
+esac
+
+"${0%/*}/install-docker-dependencies.sh"
# If this script runs inside a docker container, then all commands are
# usually executed as root. Consequently, the host user might not be
@@ -51,10 +59,17 @@ else
fi
# Build and test
-linux32 --32bit i386 su -m -l $CI_USER -c '
+command $switch_cmd su -m -l $CI_USER -c "
set -ex
+ export DEVELOPER='$DEVELOPER'
+ export DEFAULT_TEST_TARGET='$DEFAULT_TEST_TARGET'
+ export GIT_PROVE_OPTS='$GIT_PROVE_OPTS'
+ export GIT_TEST_OPTS='$GIT_TEST_OPTS'
+ export GIT_TEST_CLONE_2GB='$GIT_TEST_CLONE_2GB'
+ export MAKEFLAGS='$MAKEFLAGS'
+ export cache_dir='$cache_dir'
cd /usr/src/git
- test -n "$cache_dir" && ln -s "$cache_dir/.prove" t/.prove
+ test -n '$cache_dir' && ln -s '$cache_dir/.prove' t/.prove
make
make test
-'
+"
diff --git a/ci/run-docker.sh b/ci/run-docker.sh
new file mode 100755
index 0000000000..37fa372052
--- /dev/null
+++ b/ci/run-docker.sh
@@ -0,0 +1,47 @@
+#!/bin/sh
+#
+# Download and run Docker image to build and test Git
+#
+
+. ${0%/*}/lib.sh
+
+case "$jobname" in
+Linux32)
+ CI_CONTAINER="daald/ubuntu32:xenial"
+ ;;
+linux-musl)
+ CI_CONTAINER=alpine
+ ;;
+*)
+ exit 1
+ ;;
+esac
+
+docker pull "$CI_CONTAINER"
+
+# Use the following command to debug the docker build locally:
+# <host-user-id> must be 0 if podman is used as drop-in replacement for docker
+# $ docker run -itv "${PWD}:/usr/src/git" --entrypoint /bin/sh "$CI_CONTAINER"
+# root@container:/# export jobname=<jobname>
+# root@container:/# /usr/src/git/ci/run-docker-build.sh <host-user-id>
+
+container_cache_dir=/tmp/travis-cache
+
+docker run \
+ --interactive \
+ --env DEVELOPER \
+ --env DEFAULT_TEST_TARGET \
+ --env GIT_PROVE_OPTS \
+ --env GIT_TEST_OPTS \
+ --env GIT_TEST_CLONE_2GB \
+ --env MAKEFLAGS \
+ --env jobname \
+ --env cache_dir="$container_cache_dir" \
+ --volume "${PWD}:/usr/src/git" \
+ --volume "$cache_dir:$container_cache_dir" \
+ "$CI_CONTAINER" \
+ /usr/src/git/ci/run-docker-build.sh $(id -u $USER)
+
+check_unignored_build_artifacts
+
+save_good_tree
diff --git a/ci/run-linux32-docker.sh b/ci/run-linux32-docker.sh
deleted file mode 100755
index 751acfcf8a..0000000000
--- a/ci/run-linux32-docker.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/sh
-#
-# Download and run Docker image to build and test 32-bit Git
-#
-
-. ${0%/*}/lib.sh
-
-docker pull daald/ubuntu32:xenial
-
-# Use the following command to debug the docker build locally:
-# $ docker run -itv "${PWD}:/usr/src/git" --entrypoint /bin/bash daald/ubuntu32:xenial
-# root@container:/# /usr/src/git/ci/run-linux32-build.sh <host-user-id>
-
-container_cache_dir=/tmp/travis-cache
-
-docker run \
- --interactive \
- --env DEVELOPER \
- --env DEFAULT_TEST_TARGET \
- --env GIT_PROVE_OPTS \
- --env GIT_TEST_OPTS \
- --env GIT_TEST_CLONE_2GB \
- --env cache_dir="$container_cache_dir" \
- --volume "${PWD}:/usr/src/git" \
- --volume "$cache_dir:$container_cache_dir" \
- daald/ubuntu32:xenial \
- /usr/src/git/ci/run-linux32-build.sh $(id -u $USER)
-
-check_unignored_build_artifacts
-
-save_good_tree
diff --git a/commit-graph.c b/commit-graph.c
index f013a84e29..6dc777e2f3 100644
--- a/commit-graph.c
+++ b/commit-graph.c
@@ -16,13 +16,32 @@
#include "hashmap.h"
#include "replace-object.h"
#include "progress.h"
+#include "bloom.h"
+#include "commit-slab.h"
+
+void git_test_write_commit_graph_or_die(void)
+{
+ int flags = 0;
+ if (!git_env_bool(GIT_TEST_COMMIT_GRAPH, 0))
+ return;
+
+ if (git_env_bool(GIT_TEST_COMMIT_GRAPH_CHANGED_PATHS, 0))
+ flags = COMMIT_GRAPH_WRITE_BLOOM_FILTERS;
+
+ if (write_commit_graph_reachable(the_repository->objects->odb,
+ flags, NULL))
+ die("failed to write commit-graph under GIT_TEST_COMMIT_GRAPH");
+}
#define GRAPH_SIGNATURE 0x43475048 /* "CGPH" */
#define GRAPH_CHUNKID_OIDFANOUT 0x4f494446 /* "OIDF" */
#define GRAPH_CHUNKID_OIDLOOKUP 0x4f49444c /* "OIDL" */
#define GRAPH_CHUNKID_DATA 0x43444154 /* "CDAT" */
#define GRAPH_CHUNKID_EXTRAEDGES 0x45444745 /* "EDGE" */
+#define GRAPH_CHUNKID_BLOOMINDEXES 0x42494458 /* "BIDX" */
+#define GRAPH_CHUNKID_BLOOMDATA 0x42444154 /* "BDAT" */
#define GRAPH_CHUNKID_BASE 0x42415345 /* "BASE" */
+#define MAX_NUM_CHUNKS 7
#define GRAPH_DATA_WIDTH (the_hash_algo->rawsz + 16)
@@ -44,9 +63,51 @@
/* Remember to update object flag allocation in object.h */
#define REACHABLE (1u<<15)
-char *get_commit_graph_filename(struct object_directory *odb)
+/* Keep track of the order in which commits are added to our list. */
+define_commit_slab(commit_pos, int);
+static struct commit_pos commit_pos = COMMIT_SLAB_INIT(1, commit_pos);
+
+static void set_commit_pos(struct repository *r, const struct object_id *oid)
+{
+ static int32_t max_pos;
+ struct commit *commit = lookup_commit(r, oid);
+
+ if (!commit)
+ return; /* should never happen, but be lenient */
+
+ *commit_pos_at(&commit_pos, commit) = max_pos++;
+}
+
+static int commit_pos_cmp(const void *va, const void *vb)
+{
+ const struct commit *a = *(const struct commit **)va;
+ const struct commit *b = *(const struct commit **)vb;
+ return commit_pos_at(&commit_pos, a) -
+ commit_pos_at(&commit_pos, b);
+}
+
+static int commit_gen_cmp(const void *va, const void *vb)
+{
+ const struct commit *a = *(const struct commit **)va;
+ const struct commit *b = *(const struct commit **)vb;
+
+ /* lower generation commits first */
+ if (a->generation < b->generation)
+ return -1;
+ else if (a->generation > b->generation)
+ return 1;
+
+ /* use date as a heuristic when generations are equal */
+ if (a->date < b->date)
+ return -1;
+ else if (a->date > b->date)
+ return 1;
+ return 0;
+}
+
+char *get_commit_graph_filename(struct object_directory *obj_dir)
{
- return xstrfmt("%s/info/commit-graph", odb->path);
+ return xstrfmt("%s/info/commit-graph", obj_dir->path);
}
static char *get_split_graph_filename(struct object_directory *odb,
@@ -69,7 +130,6 @@ static uint8_t oid_version(void)
static struct commit_graph *alloc_commit_graph(void)
{
struct commit_graph *g = xcalloc(1, sizeof(*g));
- g->graph_fd = -1;
return g;
}
@@ -123,14 +183,13 @@ struct commit_graph *load_commit_graph_one_fd_st(int fd, struct stat *st,
return NULL;
}
graph_map = xmmap(NULL, graph_size, PROT_READ, MAP_PRIVATE, fd, 0);
- ret = parse_commit_graph(graph_map, fd, graph_size);
+ close(fd);
+ ret = parse_commit_graph(graph_map, graph_size);
if (ret)
ret->odb = odb;
- else {
+ else
munmap(graph_map, graph_size);
- close(fd);
- }
return ret;
}
@@ -165,8 +224,7 @@ static int verify_commit_graph_lite(struct commit_graph *g)
return 0;
}
-struct commit_graph *parse_commit_graph(void *graph_map, int fd,
- size_t graph_size)
+struct commit_graph *parse_commit_graph(void *graph_map, size_t graph_size)
{
const unsigned char *data, *chunk_lookup;
uint32_t i;
@@ -209,7 +267,6 @@ struct commit_graph *parse_commit_graph(void *graph_map, int fd,
graph->hash_len = the_hash_algo->rawsz;
graph->num_chunks = *(unsigned char*)(data + 6);
- graph->graph_fd = fd;
graph->data = graph_map;
graph->data_len = graph_size;
@@ -274,6 +331,32 @@ struct commit_graph *parse_commit_graph(void *graph_map, int fd,
chunk_repeated = 1;
else
graph->chunk_base_graphs = data + chunk_offset;
+ break;
+
+ case GRAPH_CHUNKID_BLOOMINDEXES:
+ if (graph->chunk_bloom_indexes)
+ chunk_repeated = 1;
+ else
+ graph->chunk_bloom_indexes = data + chunk_offset;
+ break;
+
+ case GRAPH_CHUNKID_BLOOMDATA:
+ if (graph->chunk_bloom_data)
+ chunk_repeated = 1;
+ else {
+ uint32_t hash_version;
+ graph->chunk_bloom_data = data + chunk_offset;
+ hash_version = get_be32(data + chunk_offset);
+
+ if (hash_version != 1)
+ break;
+
+ graph->bloom_filter_settings = xmalloc(sizeof(struct bloom_filter_settings));
+ graph->bloom_filter_settings->hash_version = hash_version;
+ graph->bloom_filter_settings->num_hashes = get_be32(data + chunk_offset + 4);
+ graph->bloom_filter_settings->bits_per_entry = get_be32(data + chunk_offset + 8);
+ }
+ break;
}
if (chunk_repeated) {
@@ -292,6 +375,15 @@ struct commit_graph *parse_commit_graph(void *graph_map, int fd,
last_chunk_offset = chunk_offset;
}
+ if (graph->chunk_bloom_indexes && graph->chunk_bloom_data) {
+ init_bloom_filters();
+ } else {
+ /* We need both the bloom chunks to exist together. Else ignore the data */
+ graph->chunk_bloom_indexes = NULL;
+ graph->chunk_bloom_data = NULL;
+ graph->bloom_filter_settings = NULL;
+ }
+
hashcpy(graph->oid.hash, graph->data + graph->data_len - graph->hash_len);
if (verify_commit_graph_lite(graph)) {
@@ -788,9 +880,12 @@ struct write_commit_graph_context {
unsigned append:1,
report_progress:1,
split:1,
- check_oids:1;
+ check_oids:1,
+ changed_paths:1,
+ order_by_pack:1;
const struct split_commit_graph_opts *split_opts;
+ size_t total_bloom_filter_data_size;
};
static void write_graph_chunk_fanout(struct hashfile *f,
@@ -866,7 +961,7 @@ static void write_graph_chunk_data(struct hashfile *f, int hash_len,
if (edge_value >= 0)
edge_value += ctx->new_num_commits_in_base;
- else {
+ else if (ctx->new_base_graph) {
uint32_t pos;
if (find_commit_in_graph(parent->item,
ctx->new_base_graph,
@@ -897,7 +992,7 @@ static void write_graph_chunk_data(struct hashfile *f, int hash_len,
if (edge_value >= 0)
edge_value += ctx->new_num_commits_in_base;
- else {
+ else if (ctx->new_base_graph) {
uint32_t pos;
if (find_commit_in_graph(parent->item,
ctx->new_base_graph,
@@ -964,7 +1059,7 @@ static void write_graph_chunk_extra_edges(struct hashfile *f,
if (edge_value >= 0)
edge_value += ctx->new_num_commits_in_base;
- else {
+ else if (ctx->new_base_graph) {
uint32_t pos;
if (find_commit_in_graph(parent->item,
ctx->new_base_graph,
@@ -986,6 +1081,59 @@ static void write_graph_chunk_extra_edges(struct hashfile *f,
}
}
+static void write_graph_chunk_bloom_indexes(struct hashfile *f,
+ struct write_commit_graph_context *ctx)
+{
+ struct commit **list = ctx->commits.list;
+ struct commit **last = ctx->commits.list + ctx->commits.nr;
+ uint32_t cur_pos = 0;
+ struct progress *progress = NULL;
+ int i = 0;
+
+ if (ctx->report_progress)
+ progress = start_delayed_progress(
+ _("Writing changed paths Bloom filters index"),
+ ctx->commits.nr);
+
+ while (list < last) {
+ struct bloom_filter *filter = get_bloom_filter(ctx->r, *list, 0);
+ cur_pos += filter->len;
+ display_progress(progress, ++i);
+ hashwrite_be32(f, cur_pos);
+ list++;
+ }
+
+ stop_progress(&progress);
+}
+
+static void write_graph_chunk_bloom_data(struct hashfile *f,
+ struct write_commit_graph_context *ctx,
+ const struct bloom_filter_settings *settings)
+{
+ struct commit **list = ctx->commits.list;
+ struct commit **last = ctx->commits.list + ctx->commits.nr;
+ struct progress *progress = NULL;
+ int i = 0;
+
+ if (ctx->report_progress)
+ progress = start_delayed_progress(
+ _("Writing changed paths Bloom filters data"),
+ ctx->commits.nr);
+
+ hashwrite_be32(f, settings->hash_version);
+ hashwrite_be32(f, settings->num_hashes);
+ hashwrite_be32(f, settings->bits_per_entry);
+
+ while (list < last) {
+ struct bloom_filter *filter = get_bloom_filter(ctx->r, *list, 0);
+ display_progress(progress, ++i);
+ hashwrite(f, filter->data, filter->len * sizeof(unsigned char));
+ list++;
+ }
+
+ stop_progress(&progress);
+}
+
static int oid_compare(const void *_a, const void *_b)
{
const struct object_id *a = (const struct object_id *)_a;
@@ -1017,6 +1165,8 @@ static int add_packed_commits(const struct object_id *oid,
oidcpy(&(ctx->oids.list[ctx->oids.nr]), oid);
ctx->oids.nr++;
+ set_commit_pos(ctx->r, oid);
+
return 0;
}
@@ -1037,6 +1187,8 @@ static void close_reachable(struct write_commit_graph_context *ctx)
{
int i;
struct commit *commit;
+ enum commit_graph_split_flags flags = ctx->split_opts ?
+ ctx->split_opts->flags : COMMIT_GRAPH_SPLIT_UNSPECIFIED;
if (ctx->report_progress)
ctx->progress = start_delayed_progress(
@@ -1066,8 +1218,9 @@ static void close_reachable(struct write_commit_graph_context *ctx)
if (!commit)
continue;
if (ctx->split) {
- if (!parse_commit(commit) &&
- commit->graph_pos == COMMIT_NOT_FROM_GRAPH)
+ if ((!parse_commit(commit) &&
+ commit->graph_pos == COMMIT_NOT_FROM_GRAPH) ||
+ flags == COMMIT_GRAPH_SPLIT_REPLACE)
add_missing_parents(ctx, commit);
} else if (!parse_commit_no_graph(commit))
add_missing_parents(ctx, commit);
@@ -1133,13 +1286,45 @@ static void compute_generation_numbers(struct write_commit_graph_context *ctx)
stop_progress(&ctx->progress);
}
-static int add_ref_to_list(const char *refname,
- const struct object_id *oid,
- int flags, void *cb_data)
+static void compute_bloom_filters(struct write_commit_graph_context *ctx)
{
- struct string_list *list = (struct string_list *)cb_data;
+ int i;
+ struct progress *progress = NULL;
+ struct commit **sorted_commits;
+
+ init_bloom_filters();
- string_list_append(list, oid_to_hex(oid));
+ if (ctx->report_progress)
+ progress = start_delayed_progress(
+ _("Computing commit changed paths Bloom filters"),
+ ctx->commits.nr);
+
+ ALLOC_ARRAY(sorted_commits, ctx->commits.nr);
+ COPY_ARRAY(sorted_commits, ctx->commits.list, ctx->commits.nr);
+
+ if (ctx->order_by_pack)
+ QSORT(sorted_commits, ctx->commits.nr, commit_pos_cmp);
+ else
+ QSORT(sorted_commits, ctx->commits.nr, commit_gen_cmp);
+
+ for (i = 0; i < ctx->commits.nr; i++) {
+ struct commit *c = sorted_commits[i];
+ struct bloom_filter *filter = get_bloom_filter(ctx->r, c, 1);
+ ctx->total_bloom_filter_data_size += sizeof(unsigned char) * filter->len;
+ display_progress(progress, i + 1);
+ }
+
+ free(sorted_commits);
+ stop_progress(&progress);
+}
+
+static int add_ref_to_set(const char *refname,
+ const struct object_id *oid,
+ int flags, void *cb_data)
+{
+ struct oidset *commits = (struct oidset *)cb_data;
+
+ oidset_insert(commits, oid);
return 0;
}
@@ -1147,14 +1332,14 @@ int write_commit_graph_reachable(struct object_directory *odb,
enum commit_graph_write_flags flags,
const struct split_commit_graph_opts *split_opts)
{
- struct string_list list = STRING_LIST_INIT_DUP;
+ struct oidset commits = OIDSET_INIT;
int result;
- for_each_ref(add_ref_to_list, &list);
- result = write_commit_graph(odb, NULL, &list,
+ for_each_ref(add_ref_to_set, &commits);
+ result = write_commit_graph(odb, NULL, &commits,
flags, split_opts);
- string_list_clear(&list, 0);
+ oidset_clear(&commits);
return result;
}
@@ -1203,39 +1388,46 @@ static int fill_oids_from_packs(struct write_commit_graph_context *ctx,
return 0;
}
-static int fill_oids_from_commit_hex(struct write_commit_graph_context *ctx,
- struct string_list *commit_hex)
+static int fill_oids_from_commits(struct write_commit_graph_context *ctx,
+ struct oidset *commits)
{
- uint32_t i;
+ uint32_t i = 0;
struct strbuf progress_title = STRBUF_INIT;
+ struct oidset_iter iter;
+ struct object_id *oid;
+
+ if (!oidset_size(commits))
+ return 0;
if (ctx->report_progress) {
strbuf_addf(&progress_title,
Q_("Finding commits for commit graph from %d ref",
"Finding commits for commit graph from %d refs",
- commit_hex->nr),
- commit_hex->nr);
+ oidset_size(commits)),
+ oidset_size(commits));
ctx->progress = start_delayed_progress(
progress_title.buf,
- commit_hex->nr);
+ oidset_size(commits));
}
- for (i = 0; i < commit_hex->nr; i++) {
- const char *end;
- struct object_id oid;
+
+ oidset_iter_init(commits, &iter);
+ while ((oid = oidset_iter_next(&iter))) {
struct commit *result;
- display_progress(ctx->progress, i + 1);
- if (!parse_oid_hex(commit_hex->items[i].string, &oid, &end) &&
- (result = lookup_commit_reference_gently(ctx->r, &oid, 1))) {
+ display_progress(ctx->progress, ++i);
+
+ result = lookup_commit_reference_gently(ctx->r, oid, 1);
+ if (result) {
ALLOC_GROW(ctx->oids.list, ctx->oids.nr + 1, ctx->oids.alloc);
oidcpy(&ctx->oids.list[ctx->oids.nr], &(result->object.oid));
ctx->oids.nr++;
} else if (ctx->check_oids) {
error(_("invalid commit object id: %s"),
- commit_hex->items[i].string);
+ oid_to_hex(oid));
return -1;
}
}
+
stop_progress(&ctx->progress);
strbuf_release(&progress_title);
@@ -1287,6 +1479,8 @@ static uint32_t count_distinct_commits(struct write_commit_graph_context *ctx)
static void copy_oids_to_commits(struct write_commit_graph_context *ctx)
{
uint32_t i;
+ enum commit_graph_split_flags flags = ctx->split_opts ?
+ ctx->split_opts->flags : COMMIT_GRAPH_SPLIT_UNSPECIFIED;
ctx->num_extra_edges = 0;
if (ctx->report_progress)
@@ -1303,11 +1497,14 @@ static void copy_oids_to_commits(struct write_commit_graph_context *ctx)
ALLOC_GROW(ctx->commits.list, ctx->commits.nr + 1, ctx->commits.alloc);
ctx->commits.list[ctx->commits.nr] = lookup_commit(ctx->r, &ctx->oids.list[i]);
- if (ctx->split &&
+ if (ctx->split && flags != COMMIT_GRAPH_SPLIT_REPLACE &&
ctx->commits.list[ctx->commits.nr]->graph_pos != COMMIT_NOT_FROM_GRAPH)
continue;
- parse_commit_no_graph(ctx->commits.list[ctx->commits.nr]);
+ if (ctx->split && flags == COMMIT_GRAPH_SPLIT_REPLACE)
+ parse_commit(ctx->commits.list[ctx->commits.nr]);
+ else
+ parse_commit_no_graph(ctx->commits.list[ctx->commits.nr]);
num_parents = commit_list_count(ctx->commits.list[ctx->commits.nr]->parents);
if (num_parents > 2)
@@ -1350,12 +1547,13 @@ static int write_commit_graph_file(struct write_commit_graph_context *ctx)
int fd;
struct hashfile *f;
struct lock_file lk = LOCK_INIT;
- uint32_t chunk_ids[6];
- uint64_t chunk_offsets[6];
+ uint32_t chunk_ids[MAX_NUM_CHUNKS + 1];
+ uint64_t chunk_offsets[MAX_NUM_CHUNKS + 1];
const unsigned hashsz = the_hash_algo->rawsz;
struct strbuf progress_title = STRBUF_INIT;
int num_chunks = 3;
struct object_id file_hash;
+ const struct bloom_filter_settings bloom_settings = DEFAULT_BLOOM_FILTER_SETTINGS;
if (ctx->split) {
struct strbuf tmp_file = STRBUF_INIT;
@@ -1382,7 +1580,7 @@ static int write_commit_graph_file(struct write_commit_graph_context *ctx)
fd = git_mkstemp_mode(ctx->graph_name, 0444);
if (fd < 0) {
- error(_("unable to create '%s'"), ctx->graph_name);
+ error(_("unable to create temporary graph layer"));
return -1;
}
@@ -1400,6 +1598,12 @@ static int write_commit_graph_file(struct write_commit_graph_context *ctx)
chunk_ids[num_chunks] = GRAPH_CHUNKID_EXTRAEDGES;
num_chunks++;
}
+ if (ctx->changed_paths) {
+ chunk_ids[num_chunks] = GRAPH_CHUNKID_BLOOMINDEXES;
+ num_chunks++;
+ chunk_ids[num_chunks] = GRAPH_CHUNKID_BLOOMDATA;
+ num_chunks++;
+ }
if (ctx->num_commit_graphs_after > 1) {
chunk_ids[num_chunks] = GRAPH_CHUNKID_BASE;
num_chunks++;
@@ -1418,6 +1622,15 @@ static int write_commit_graph_file(struct write_commit_graph_context *ctx)
4 * ctx->num_extra_edges;
num_chunks++;
}
+ if (ctx->changed_paths) {
+ chunk_offsets[num_chunks + 1] = chunk_offsets[num_chunks] +
+ sizeof(uint32_t) * ctx->commits.nr;
+ num_chunks++;
+
+ chunk_offsets[num_chunks + 1] = chunk_offsets[num_chunks] +
+ sizeof(uint32_t) * 3 + ctx->total_bloom_filter_data_size;
+ num_chunks++;
+ }
if (ctx->num_commit_graphs_after > 1) {
chunk_offsets[num_chunks + 1] = chunk_offsets[num_chunks] +
hashsz * (ctx->num_commit_graphs_after - 1);
@@ -1455,6 +1668,10 @@ static int write_commit_graph_file(struct write_commit_graph_context *ctx)
write_graph_chunk_data(f, hashsz, ctx);
if (ctx->num_extra_edges)
write_graph_chunk_extra_edges(f, ctx);
+ if (ctx->changed_paths) {
+ write_graph_chunk_bloom_indexes(f, ctx);
+ write_graph_chunk_bloom_data(f, ctx, &bloom_settings);
+ }
if (ctx->num_commit_graphs_after > 1 &&
write_graph_chunk_base(f, ctx)) {
return -1;
@@ -1488,8 +1705,12 @@ static int write_commit_graph_file(struct write_commit_graph_context *ctx)
}
if (ctx->base_graph_name) {
- const char *dest = ctx->commit_graph_filenames_after[
- ctx->num_commit_graphs_after - 2];
+ const char *dest;
+ int idx = ctx->num_commit_graphs_after - 1;
+ if (ctx->num_commit_graphs_after > 1)
+ idx--;
+
+ dest = ctx->commit_graph_filenames_after[idx];
if (strcmp(ctx->base_graph_name, dest)) {
result = rename(ctx->base_graph_name, dest);
@@ -1529,6 +1750,7 @@ static void split_graph_merge_strategy(struct write_commit_graph_context *ctx)
{
struct commit_graph *g;
uint32_t num_commits;
+ enum commit_graph_split_flags flags = COMMIT_GRAPH_SPLIT_UNSPECIFIED;
uint32_t i;
int max_commits = 0;
@@ -1539,24 +1761,36 @@ static void split_graph_merge_strategy(struct write_commit_graph_context *ctx)
if (ctx->split_opts->size_multiple)
size_mult = ctx->split_opts->size_multiple;
+
+ flags = ctx->split_opts->flags;
}
g = ctx->r->objects->commit_graph;
num_commits = ctx->commits.nr;
- ctx->num_commit_graphs_after = ctx->num_commit_graphs_before + 1;
-
- while (g && (g->num_commits <= size_mult * num_commits ||
- (max_commits && num_commits > max_commits))) {
- if (g->odb != ctx->odb)
- break;
+ if (flags == COMMIT_GRAPH_SPLIT_REPLACE)
+ ctx->num_commit_graphs_after = 1;
+ else
+ ctx->num_commit_graphs_after = ctx->num_commit_graphs_before + 1;
+
+ if (flags != COMMIT_GRAPH_SPLIT_MERGE_PROHIBITED &&
+ flags != COMMIT_GRAPH_SPLIT_REPLACE) {
+ while (g && (g->num_commits <= size_mult * num_commits ||
+ (max_commits && num_commits > max_commits))) {
+ if (g->odb != ctx->odb)
+ break;
- num_commits += g->num_commits;
- g = g->base_graph;
+ num_commits += g->num_commits;
+ g = g->base_graph;
- ctx->num_commit_graphs_after--;
+ ctx->num_commit_graphs_after--;
+ }
}
- ctx->new_base_graph = g;
+ if (flags != COMMIT_GRAPH_SPLIT_REPLACE)
+ ctx->new_base_graph = g;
+ else if (ctx->num_commit_graphs_after != 1)
+ BUG("split_graph_merge_strategy: num_commit_graphs_after "
+ "should be 1 with --split=replace");
if (ctx->num_commit_graphs_after == 2) {
char *old_graph_name = get_commit_graph_filename(g->odb);
@@ -1570,8 +1804,8 @@ static void split_graph_merge_strategy(struct write_commit_graph_context *ctx)
free(old_graph_name);
}
- ALLOC_ARRAY(ctx->commit_graph_filenames_after, ctx->num_commit_graphs_after);
- ALLOC_ARRAY(ctx->commit_graph_hash_after, ctx->num_commit_graphs_after);
+ CALLOC_ARRAY(ctx->commit_graph_filenames_after, ctx->num_commit_graphs_after);
+ CALLOC_ARRAY(ctx->commit_graph_hash_after, ctx->num_commit_graphs_after);
for (i = 0; i < ctx->num_commit_graphs_after &&
i < ctx->num_commit_graphs_before; i++)
@@ -1707,7 +1941,7 @@ static void expire_commit_graphs(struct write_commit_graph_context *ctx)
timestamp_t expire_time = time(NULL);
if (ctx->split_opts && ctx->split_opts->expire_time)
- expire_time -= ctx->split_opts->expire_time;
+ expire_time = ctx->split_opts->expire_time;
if (!ctx->split) {
char *chain_file_name = get_chain_filename(ctx->odb);
unlink(chain_file_name);
@@ -1756,13 +1990,14 @@ out:
int write_commit_graph(struct object_directory *odb,
struct string_list *pack_indexes,
- struct string_list *commit_hex,
+ struct oidset *commits,
enum commit_graph_write_flags flags,
const struct split_commit_graph_opts *split_opts)
{
struct write_commit_graph_context *ctx;
uint32_t i, count_distinct = 0;
int res = 0;
+ int replace = 0;
if (!commit_graph_compatible(the_repository))
return 0;
@@ -1775,6 +2010,8 @@ int write_commit_graph(struct object_directory *odb,
ctx->split = flags & COMMIT_GRAPH_WRITE_SPLIT ? 1 : 0;
ctx->check_oids = flags & COMMIT_GRAPH_WRITE_CHECK_OIDS ? 1 : 0;
ctx->split_opts = split_opts;
+ ctx->changed_paths = flags & COMMIT_GRAPH_WRITE_BLOOM_FILTERS ? 1 : 0;
+ ctx->total_bloom_filter_data_size = 0;
if (ctx->split) {
struct commit_graph *g;
@@ -1797,6 +2034,9 @@ int write_commit_graph(struct object_directory *odb,
g = g->base_graph;
}
}
+
+ if (ctx->split_opts)
+ replace = ctx->split_opts->flags & COMMIT_GRAPH_SPLIT_REPLACE;
}
ctx->approx_nr_objects = approximate_object_count();
@@ -1824,17 +2064,20 @@ int write_commit_graph(struct object_directory *odb,
}
if (pack_indexes) {
+ ctx->order_by_pack = 1;
if ((res = fill_oids_from_packs(ctx, pack_indexes)))
goto cleanup;
}
- if (commit_hex) {
- if ((res = fill_oids_from_commit_hex(ctx, commit_hex)))
+ if (commits) {
+ if ((res = fill_oids_from_commits(ctx, commits)))
goto cleanup;
}
- if (!pack_indexes && !commit_hex)
+ if (!pack_indexes && !commits) {
+ ctx->order_by_pack = 1;
fill_oids_from_all_packs(ctx);
+ }
close_reachable(ctx);
@@ -1857,18 +2100,22 @@ int write_commit_graph(struct object_directory *odb,
goto cleanup;
}
- if (!ctx->commits.nr)
+ if (!ctx->commits.nr && !replace)
goto cleanup;
if (ctx->split) {
split_graph_merge_strategy(ctx);
- merge_commit_graphs(ctx);
+ if (!replace)
+ merge_commit_graphs(ctx);
} else
ctx->num_commit_graphs_after = 1;
compute_generation_numbers(ctx);
+ if (ctx->changed_paths)
+ compute_bloom_filters(ctx);
+
res = write_commit_graph_file(ctx);
if (ctx->split)
@@ -2088,12 +2335,12 @@ void free_commit_graph(struct commit_graph *g)
{
if (!g)
return;
- if (g->graph_fd >= 0) {
+ if (g->data) {
munmap((void *)g->data, g->data_len);
g->data = NULL;
- close(g->graph_fd);
}
free(g->filename);
+ free(g->bloom_filter_settings);
free(g);
}
diff --git a/commit-graph.h b/commit-graph.h
index e87a6f6360..4212766a4f 100644
--- a/commit-graph.h
+++ b/commit-graph.h
@@ -6,11 +6,23 @@
#include "string-list.h"
#include "cache.h"
#include "object-store.h"
+#include "oidset.h"
#define GIT_TEST_COMMIT_GRAPH "GIT_TEST_COMMIT_GRAPH"
#define GIT_TEST_COMMIT_GRAPH_DIE_ON_LOAD "GIT_TEST_COMMIT_GRAPH_DIE_ON_LOAD"
+#define GIT_TEST_COMMIT_GRAPH_CHANGED_PATHS "GIT_TEST_COMMIT_GRAPH_CHANGED_PATHS"
+
+/*
+ * This method is only used to enhance coverage of the commit-graph
+ * feature in the test suite with the GIT_TEST_COMMIT_GRAPH and
+ * GIT_TEST_COMMIT_GRAPH_CHANGED_PATHS environment variables. Do not
+ * call this method oustide of a builtin, and only if you know what
+ * you are doing!
+ */
+void git_test_write_commit_graph_or_die(void);
struct commit;
+struct bloom_filter_settings;
char *get_commit_graph_filename(struct object_directory *odb);
int open_commit_graph(const char *graph_file, int *fd, struct stat *st);
@@ -39,8 +51,6 @@ struct tree *get_commit_tree_in_graph(struct repository *r,
const struct commit *c);
struct commit_graph {
- int graph_fd;
-
const unsigned char *data;
size_t data_len;
@@ -59,14 +69,17 @@ struct commit_graph {
const unsigned char *chunk_commit_data;
const unsigned char *chunk_extra_edges;
const unsigned char *chunk_base_graphs;
+ const unsigned char *chunk_bloom_indexes;
+ const unsigned char *chunk_bloom_data;
+
+ struct bloom_filter_settings *bloom_filter_settings;
};
struct commit_graph *load_commit_graph_one_fd_st(int fd, struct stat *st,
struct object_directory *odb);
struct commit_graph *read_commit_graph_one(struct repository *r,
struct object_directory *odb);
-struct commit_graph *parse_commit_graph(void *graph_map, int fd,
- size_t graph_size);
+struct commit_graph *parse_commit_graph(void *graph_map, size_t graph_size);
/*
* Return 1 if and only if the repository has a commit-graph
@@ -79,13 +92,21 @@ enum commit_graph_write_flags {
COMMIT_GRAPH_WRITE_PROGRESS = (1 << 1),
COMMIT_GRAPH_WRITE_SPLIT = (1 << 2),
/* Make sure that each OID in the input is a valid commit OID. */
- COMMIT_GRAPH_WRITE_CHECK_OIDS = (1 << 3)
+ COMMIT_GRAPH_WRITE_CHECK_OIDS = (1 << 3),
+ COMMIT_GRAPH_WRITE_BLOOM_FILTERS = (1 << 4),
+};
+
+enum commit_graph_split_flags {
+ COMMIT_GRAPH_SPLIT_UNSPECIFIED = 0,
+ COMMIT_GRAPH_SPLIT_MERGE_PROHIBITED = 1,
+ COMMIT_GRAPH_SPLIT_REPLACE = 2
};
struct split_commit_graph_opts {
int size_multiple;
int max_commits;
timestamp_t expire_time;
+ enum commit_graph_split_flags flags;
};
/*
@@ -99,7 +120,7 @@ int write_commit_graph_reachable(struct object_directory *odb,
const struct split_commit_graph_opts *split_opts);
int write_commit_graph(struct object_directory *odb,
struct string_list *pack_indexes,
- struct string_list *commit_hex,
+ struct oidset *commits,
enum commit_graph_write_flags flags,
const struct split_commit_graph_opts *split_opts);
diff --git a/commit.h b/commit.h
index 008a0fa4a0..ab91d21131 100644
--- a/commit.h
+++ b/commit.h
@@ -249,6 +249,8 @@ struct oid_array;
struct ref;
int register_shallow(struct repository *r, const struct object_id *oid);
int unregister_shallow(const struct object_id *oid);
+int commit_shallow_file(struct repository *r, struct lock_file *lk);
+void rollback_shallow_file(struct repository *r, struct lock_file *lk);
int for_each_commit_graft(each_commit_graft_fn, void *);
int is_repository_shallow(struct repository *r);
struct commit_list *get_shallow_commits(struct object_array *heads,
diff --git a/config.c b/config.c
index d17d2bd9dc..8db9c77098 100644
--- a/config.c
+++ b/config.c
@@ -37,6 +37,7 @@ struct config_source {
enum config_error_action default_error_action;
int linenr;
int eof;
+ size_t total_len;
struct strbuf value;
struct strbuf var;
unsigned subsection_case_sensitive : 1;
@@ -309,7 +310,7 @@ int git_config_include(const char *var, const char *value, void *data)
{
struct config_include_data *inc = data;
const char *cond, *key;
- int cond_len;
+ size_t cond_len;
int ret;
/*
@@ -358,12 +359,13 @@ static inline int iskeychar(int c)
*
* store_key - pointer to char* which will hold a copy of the key with
* lowercase section and variable name
- * baselen - pointer to int which will hold the length of the
+ * baselen - pointer to size_t which will hold the length of the
* section + subsection part, can be NULL
*/
-static int git_config_parse_key_1(const char *key, char **store_key, int *baselen_, int quiet)
+static int git_config_parse_key_1(const char *key, char **store_key, size_t *baselen_, int quiet)
{
- int i, dot, baselen;
+ size_t i, baselen;
+ int dot;
const char *last_dot = strrchr(key, '.');
/*
@@ -425,7 +427,7 @@ out_free_ret_1:
return -CONFIG_INVALID_KEY;
}
-int git_config_parse_key(const char *key, char **store_key, int *baselen)
+int git_config_parse_key(const char *key, char **store_key, size_t *baselen)
{
return git_config_parse_key_1(key, store_key, baselen, 0);
}
@@ -523,6 +525,19 @@ static int get_next_char(void)
c = '\r';
}
}
+
+ if (c != EOF && ++cf->total_len > INT_MAX) {
+ /*
+ * This is an absurdly long config file; refuse to parse
+ * further in order to protect downstream code from integer
+ * overflows. Note that we can't return an error specifically,
+ * but we can mark EOF and put trash in the return value,
+ * which will trigger a parse error.
+ */
+ cf->eof = 1;
+ return 0;
+ }
+
if (c == '\n')
cf->linenr++;
if (c == EOF) {
@@ -728,7 +743,7 @@ static int git_parse_source(config_fn_t fn, void *data,
const struct config_options *opts)
{
int comment = 0;
- int baselen = 0;
+ size_t baselen = 0;
struct strbuf *var = &cf->var;
int error_return = 0;
char *error_msg = NULL;
@@ -1539,6 +1554,7 @@ static int do_config_from(struct config_source *top, config_fn_t fn, void *data,
top->prev = cf;
top->linenr = 1;
top->eof = 0;
+ top->total_len = 0;
strbuf_init(&top->value, 1024);
strbuf_init(&top->var, 1024);
cf = top;
@@ -2383,7 +2399,7 @@ void git_die_config(const char *key, const char *err, ...)
*/
struct config_store_data {
- int baselen;
+ size_t baselen;
char *key;
int do_not_match;
regex_t *value_regex;
@@ -2509,7 +2525,7 @@ static struct strbuf store_create_section(const char *key,
const struct config_store_data *store)
{
const char *dot;
- int i;
+ size_t i;
struct strbuf sb = STRBUF_INIT;
dot = memchr(key, '.', store->baselen);
@@ -2522,7 +2538,9 @@ static struct strbuf store_create_section(const char *key,
}
strbuf_addstr(&sb, "\"]\n");
} else {
- strbuf_addf(&sb, "[%.*s]\n", store->baselen, key);
+ strbuf_addch(&sb, '[');
+ strbuf_add(&sb, key, store->baselen);
+ strbuf_addstr(&sb, "]\n");
}
return sb;
@@ -2545,7 +2563,6 @@ static ssize_t write_pair(int fd, const char *key, const char *value,
{
int i;
ssize_t ret;
- int length = strlen(key + store->baselen + 1);
const char *quote = "";
struct strbuf sb = STRBUF_INIT;
@@ -2564,8 +2581,7 @@ static ssize_t write_pair(int fd, const char *key, const char *value,
if (i && value[i - 1] == ' ')
quote = "\"";
- strbuf_addf(&sb, "\t%.*s = %s",
- length, key + store->baselen + 1, quote);
+ strbuf_addf(&sb, "\t%s = %s", key + store->baselen + 1, quote);
for (i = 0; value[i]; i++)
switch (value[i]) {
@@ -3238,7 +3254,7 @@ int config_error_nonbool(const char *var)
int parse_config_key(const char *var,
const char *section,
- const char **subsection, int *subsection_len,
+ const char **subsection, size_t *subsection_len,
const char **key)
{
const char *dot;
diff --git a/config.h b/config.h
index 9b3773f778..060874488f 100644
--- a/config.h
+++ b/config.h
@@ -254,7 +254,7 @@ int git_config_set_gently(const char *, const char *);
*/
void git_config_set(const char *, const char *);
-int git_config_parse_key(const char *, char **, int *);
+int git_config_parse_key(const char *, char **, size_t *);
int git_config_key_is_valid(const char *key);
int git_config_set_multivar_gently(const char *, const char *, const char *, int);
void git_config_set_multivar(const char *, const char *, const char *, int);
@@ -359,7 +359,7 @@ int git_config_include(const char *name, const char *value, void *data);
*/
int parse_config_key(const char *var,
const char *section,
- const char **subsection, int *subsection_len,
+ const char **subsection, size_t *subsection_len,
const char **key);
/**
diff --git a/config.mak.uname b/config.mak.uname
index 0ab8e00938..3e526f6b9f 100644
--- a/config.mak.uname
+++ b/config.mak.uname
@@ -308,6 +308,7 @@ ifeq ($(uname_S),GNU)
NO_STRLCPY = YesPlease
HAVE_PATHS_H = YesPlease
LIBC_CONTAINS_LIBINTL = YesPlease
+ FREAD_READS_DIRECTORIES = UnfortunatelyYes
endif
ifeq ($(uname_S),IRIX)
NO_SETENV = YesPlease
diff --git a/contrib/completion/git-completion.bash b/contrib/completion/git-completion.bash
index c21786f2fd..b1d6e5ebed 100644
--- a/contrib/completion/git-completion.bash
+++ b/contrib/completion/git-completion.bash
@@ -504,7 +504,7 @@ __git_index_files ()
{
local root="$2" match="$3"
- __git_ls_files_helper "$root" "$1" "$match" |
+ __git_ls_files_helper "$root" "$1" "${match:-?}" |
awk -F / -v pfx="${2//\\/\\\\}" '{
paths[$1] = 1
}
diff --git a/contrib/completion/git-completion.zsh b/contrib/completion/git-completion.zsh
index eef4eff53d..ce47e86b60 100644
--- a/contrib/completion/git-completion.zsh
+++ b/contrib/completion/git-completion.zsh
@@ -150,9 +150,11 @@ __git_zsh_cmd_common ()
push:'update remote refs along with associated objects'
rebase:'forward-port local commits to the updated upstream head'
reset:'reset current HEAD to the specified state'
+ restore:'restore working tree files'
rm:'remove files from the working tree and from the index'
show:'show various types of objects'
status:'show the working tree status'
+ switch:'switch branches'
tag:'create, list, delete or verify a tag object signed with GPG')
_describe -t common-commands 'common commands' list && _ret=0
}
diff --git a/contrib/subtree/Makefile b/contrib/subtree/Makefile
index 6906aae441..6fa7496bfd 100644
--- a/contrib/subtree/Makefile
+++ b/contrib/subtree/Makefile
@@ -25,14 +25,16 @@ ASCIIDOC_HTML = xhtml11
ASCIIDOC_DOCBOOK = docbook
ASCIIDOC_EXTRA =
XMLTO = xmlto
+XMLTO_EXTRA =
ifdef USE_ASCIIDOCTOR
ASCIIDOC = asciidoctor
ASCIIDOC_CONF =
ASCIIDOC_HTML = xhtml5
-ASCIIDOC_DOCBOOK = docbook45
+ASCIIDOC_DOCBOOK = docbook
ASCIIDOC_EXTRA += -I../../Documentation -rasciidoctor-extensions
ASCIIDOC_EXTRA += -alitdd='&\#x2d;&\#x2d;'
+XMLTO_EXTRA += --skip-validation
endif
ifndef SHELL_PATH
@@ -78,7 +80,7 @@ install-html: $(GIT_SUBTREE_HTML)
$(INSTALL) -m 644 $^ $(DESTDIR)$(htmldir)
$(GIT_SUBTREE_DOC): $(GIT_SUBTREE_XML)
- $(XMLTO) -m $(MANPAGE_XSL) man $^
+ $(XMLTO) -m $(MANPAGE_XSL) $(XMLTO_EXTRA) man $^
$(GIT_SUBTREE_XML): $(GIT_SUBTREE_TXT)
$(ASCIIDOC) -b $(ASCIIDOC_DOCBOOK) -d manpage $(ASCIIDOC_CONF) \
diff --git a/convert.c b/convert.c
index 5aa87d45e3..572449825c 100644
--- a/convert.c
+++ b/convert.c
@@ -1018,7 +1018,7 @@ static int apply_filter(const char *path, const char *src, size_t len,
static int read_convert_config(const char *var, const char *value, void *cb)
{
const char *key, *name;
- int namelen;
+ size_t namelen;
struct convert_driver *drv;
/*
diff --git a/diff.c b/diff.c
index 1010d806f5..d1ad6a3c4a 100644
--- a/diff.c
+++ b/diff.c
@@ -573,7 +573,7 @@ static int fill_mmfile(struct repository *r, mmfile_t *mf,
mf->size = 0;
return 0;
}
- else if (diff_populate_filespec(r, one, 0))
+ else if (diff_populate_filespec(r, one, NULL))
return -1;
mf->ptr = one->data;
@@ -585,9 +585,13 @@ static int fill_mmfile(struct repository *r, mmfile_t *mf,
static unsigned long diff_filespec_size(struct repository *r,
struct diff_filespec *one)
{
+ struct diff_populate_filespec_options dpf_options = {
+ .check_size_only = 1,
+ };
+
if (!DIFF_FILE_VALID(one))
return 0;
- diff_populate_filespec(r, one, CHECK_SIZE_ONLY);
+ diff_populate_filespec(r, one, &dpf_options);
return one->size;
}
@@ -3020,6 +3024,9 @@ static void show_dirstat(struct diff_options *options)
struct diff_filepair *p = q->queue[i];
const char *name;
unsigned long copied, added, damage;
+ struct diff_populate_filespec_options dpf_options = {
+ .check_size_only = 1,
+ };
name = p->two->path ? p->two->path : p->one->path;
@@ -3047,19 +3054,19 @@ static void show_dirstat(struct diff_options *options)
}
if (DIFF_FILE_VALID(p->one) && DIFF_FILE_VALID(p->two)) {
- diff_populate_filespec(options->repo, p->one, 0);
- diff_populate_filespec(options->repo, p->two, 0);
+ diff_populate_filespec(options->repo, p->one, NULL);
+ diff_populate_filespec(options->repo, p->two, NULL);
diffcore_count_changes(options->repo,
p->one, p->two, NULL, NULL,
&copied, &added);
diff_free_filespec_data(p->one);
diff_free_filespec_data(p->two);
} else if (DIFF_FILE_VALID(p->one)) {
- diff_populate_filespec(options->repo, p->one, CHECK_SIZE_ONLY);
+ diff_populate_filespec(options->repo, p->one, &dpf_options);
copied = added = 0;
diff_free_filespec_data(p->one);
} else if (DIFF_FILE_VALID(p->two)) {
- diff_populate_filespec(options->repo, p->two, CHECK_SIZE_ONLY);
+ diff_populate_filespec(options->repo, p->two, &dpf_options);
copied = 0;
added = p->two->size;
diff_free_filespec_data(p->two);
@@ -3339,13 +3346,17 @@ static void emit_binary_diff(struct diff_options *o,
int diff_filespec_is_binary(struct repository *r,
struct diff_filespec *one)
{
+ struct diff_populate_filespec_options dpf_options = {
+ .check_binary = 1,
+ };
+
if (one->is_binary == -1) {
diff_filespec_load_driver(one, r->index);
if (one->driver->binary != -1)
one->is_binary = one->driver->binary;
else {
if (!one->data && DIFF_FILE_VALID(one))
- diff_populate_filespec(r, one, CHECK_BINARY);
+ diff_populate_filespec(r, one, &dpf_options);
if (one->is_binary == -1 && one->data)
one->is_binary = buffer_is_binary(one->data,
one->size);
@@ -3677,8 +3688,8 @@ static void builtin_diffstat(const char *name_a, const char *name_b,
}
else if (complete_rewrite) {
- diff_populate_filespec(o->repo, one, 0);
- diff_populate_filespec(o->repo, two, 0);
+ diff_populate_filespec(o->repo, one, NULL);
+ diff_populate_filespec(o->repo, two, NULL);
data->deleted = count_lines(one->data, one->size);
data->added = count_lines(two->data, two->size);
}
@@ -3914,9 +3925,10 @@ static int diff_populate_gitlink(struct diff_filespec *s, int size_only)
*/
int diff_populate_filespec(struct repository *r,
struct diff_filespec *s,
- unsigned int flags)
+ const struct diff_populate_filespec_options *options)
{
- int size_only = flags & CHECK_SIZE_ONLY;
+ int size_only = options ? options->check_size_only : 0;
+ int check_binary = options ? options->check_binary : 0;
int err = 0;
int conv_flags = global_conv_flags_eol;
/*
@@ -3986,7 +3998,7 @@ int diff_populate_filespec(struct repository *r,
* opening the file and inspecting the contents, this
* is probably fine.
*/
- if ((flags & CHECK_BINARY) &&
+ if (check_binary &&
s->size > big_file_threshold && s->is_binary == -1) {
s->is_binary = 1;
return 0;
@@ -4011,12 +4023,30 @@ int diff_populate_filespec(struct repository *r,
}
}
else {
- enum object_type type;
- if (size_only || (flags & CHECK_BINARY)) {
- type = oid_object_info(r, &s->oid, &s->size);
- if (type < 0)
- die("unable to read %s",
- oid_to_hex(&s->oid));
+ struct object_info info = {
+ .sizep = &s->size
+ };
+
+ if (!(size_only || check_binary))
+ /*
+ * Set contentp, since there is no chance that merely
+ * the size is sufficient.
+ */
+ info.contentp = &s->data;
+
+ if (options && options->missing_object_cb) {
+ if (!oid_object_info_extended(r, &s->oid, &info,
+ OBJECT_INFO_LOOKUP_REPLACE |
+ OBJECT_INFO_SKIP_FETCH_OBJECT))
+ goto object_read;
+ options->missing_object_cb(options->missing_object_data);
+ }
+ if (oid_object_info_extended(r, &s->oid, &info,
+ OBJECT_INFO_LOOKUP_REPLACE))
+ die("unable to read %s", oid_to_hex(&s->oid));
+
+object_read:
+ if (size_only || check_binary) {
if (size_only)
return 0;
if (s->size > big_file_threshold && s->is_binary == -1) {
@@ -4024,9 +4054,12 @@ int diff_populate_filespec(struct repository *r,
return 0;
}
}
- s->data = repo_read_object_file(r, &s->oid, &type, &s->size);
- if (!s->data)
- die("unable to read %s", oid_to_hex(&s->oid));
+ if (!info.contentp) {
+ info.contentp = &s->data;
+ if (oid_object_info_extended(r, &s->oid, &info,
+ OBJECT_INFO_LOOKUP_REPLACE))
+ die("unable to read %s", oid_to_hex(&s->oid));
+ }
s->should_free = 1;
}
return 0;
@@ -4144,7 +4177,7 @@ static struct diff_tempfile *prepare_temp_file(struct repository *r,
return temp;
}
else {
- if (diff_populate_filespec(r, one, 0))
+ if (diff_populate_filespec(r, one, NULL))
die("cannot read data blob for %s", one->path);
prep_temp_blob(r->index, name, temp,
one->data, one->size,
@@ -6410,9 +6443,9 @@ static int diff_filespec_is_identical(struct repository *r,
{
if (S_ISGITLINK(one->mode))
return 0;
- if (diff_populate_filespec(r, one, 0))
+ if (diff_populate_filespec(r, one, NULL))
return 0;
- if (diff_populate_filespec(r, two, 0))
+ if (diff_populate_filespec(r, two, NULL))
return 0;
return !memcmp(one->data, two->data, one->size);
}
@@ -6420,6 +6453,12 @@ static int diff_filespec_is_identical(struct repository *r,
static int diff_filespec_check_stat_unmatch(struct repository *r,
struct diff_filepair *p)
{
+ struct diff_populate_filespec_options dpf_options = {
+ .check_size_only = 1,
+ .missing_object_cb = diff_queued_diff_prefetch,
+ .missing_object_data = r,
+ };
+
if (p->done_skip_stat_unmatch)
return p->skip_stat_unmatch_result;
@@ -6442,8 +6481,8 @@ static int diff_filespec_check_stat_unmatch(struct repository *r,
!DIFF_FILE_VALID(p->two) ||
(p->one->oid_valid && p->two->oid_valid) ||
(p->one->mode != p->two->mode) ||
- diff_populate_filespec(r, p->one, CHECK_SIZE_ONLY) ||
- diff_populate_filespec(r, p->two, CHECK_SIZE_ONLY) ||
+ diff_populate_filespec(r, p->one, &dpf_options) ||
+ diff_populate_filespec(r, p->two, &dpf_options) ||
(p->one->size != p->two->size) ||
!diff_filespec_is_identical(r, p->one, p->two)) /* (2) */
p->skip_stat_unmatch_result = 1;
@@ -6494,9 +6533,9 @@ void diffcore_fix_diff_index(void)
QSORT(q->queue, q->nr, diffnamecmp);
}
-static void add_if_missing(struct repository *r,
- struct oid_array *to_fetch,
- const struct diff_filespec *filespec)
+void diff_add_if_missing(struct repository *r,
+ struct oid_array *to_fetch,
+ const struct diff_filespec *filespec)
{
if (filespec && filespec->oid_valid &&
!S_ISGITLINK(filespec->mode) &&
@@ -6505,30 +6544,48 @@ static void add_if_missing(struct repository *r,
oid_array_append(to_fetch, &filespec->oid);
}
-void diffcore_std(struct diff_options *options)
+void diff_queued_diff_prefetch(void *repository)
{
- if (options->repo == the_repository && has_promisor_remote()) {
- /*
- * Prefetch the diff pairs that are about to be flushed.
- */
- int i;
- struct diff_queue_struct *q = &diff_queued_diff;
- struct oid_array to_fetch = OID_ARRAY_INIT;
+ struct repository *repo = repository;
+ int i;
+ struct diff_queue_struct *q = &diff_queued_diff;
+ struct oid_array to_fetch = OID_ARRAY_INIT;
- for (i = 0; i < q->nr; i++) {
- struct diff_filepair *p = q->queue[i];
- add_if_missing(options->repo, &to_fetch, p->one);
- add_if_missing(options->repo, &to_fetch, p->two);
- }
- if (to_fetch.nr)
- /*
- * NEEDSWORK: Consider deduplicating the OIDs sent.
- */
- promisor_remote_get_direct(options->repo,
- to_fetch.oid, to_fetch.nr);
- oid_array_clear(&to_fetch);
+ for (i = 0; i < q->nr; i++) {
+ struct diff_filepair *p = q->queue[i];
+ diff_add_if_missing(repo, &to_fetch, p->one);
+ diff_add_if_missing(repo, &to_fetch, p->two);
}
+ /*
+ * NEEDSWORK: Consider deduplicating the OIDs sent.
+ */
+ promisor_remote_get_direct(repo, to_fetch.oid, to_fetch.nr);
+
+ oid_array_clear(&to_fetch);
+}
+
+void diffcore_std(struct diff_options *options)
+{
+ int output_formats_to_prefetch = DIFF_FORMAT_DIFFSTAT |
+ DIFF_FORMAT_NUMSTAT |
+ DIFF_FORMAT_PATCH |
+ DIFF_FORMAT_SHORTSTAT |
+ DIFF_FORMAT_DIRSTAT;
+
+ /*
+ * Check if the user requested a blob-data-requiring diff output and/or
+ * break-rewrite detection (which requires blob data). If yes, prefetch
+ * the diff pairs.
+ *
+ * If no prefetching occurs, diffcore_rename() will prefetch if it
+ * decides that it needs inexact rename detection.
+ */
+ if (options->repo == the_repository && has_promisor_remote() &&
+ (options->output_format & output_formats_to_prefetch ||
+ options->pickaxe_opts & DIFF_PICKAXE_KINDS_MASK))
+ diff_queued_diff_prefetch(options->repo);
+
/* NOTE please keep the following in sync with diff_tree_combined() */
if (options->skip_stat_unmatch)
diffcore_skip_stat_unmatch(options);
@@ -6774,7 +6831,7 @@ size_t fill_textconv(struct repository *r,
*outbuf = "";
return 0;
}
- if (diff_populate_filespec(r, df, 0))
+ if (diff_populate_filespec(r, df, NULL))
die("unable to read files to diff");
*outbuf = df->data;
return df->size;
diff --git a/diff.h b/diff.h
index 6febe7e365..9443dc1b00 100644
--- a/diff.h
+++ b/diff.h
@@ -285,6 +285,11 @@ struct diff_options {
/* Number of hexdigits to abbreviate raw format output to. */
int abbrev;
+ /* If non-zero, then stop computing after this many changes. */
+ int max_changes;
+ /* For internal use only. */
+ int num_changes;
+
int ita_invisible_in_index;
/* white-space error highlighting */
#define WSEH_NEW (1<<12)
diff --git a/diffcore-break.c b/diffcore-break.c
index 9d20a6a6fc..0d4a14964d 100644
--- a/diffcore-break.c
+++ b/diffcore-break.c
@@ -4,6 +4,7 @@
#include "cache.h"
#include "diff.h"
#include "diffcore.h"
+#include "promisor-remote.h"
static int should_break(struct repository *r,
struct diff_filespec *src,
@@ -49,6 +50,8 @@ static int should_break(struct repository *r,
unsigned long delta_size, max_size;
unsigned long src_copied, literal_added, src_removed;
+ struct diff_populate_filespec_options options = { 0 };
+
*merge_score_p = 0; /* assume no deletion --- "do not break"
* is the default.
*/
@@ -62,8 +65,13 @@ static int should_break(struct repository *r,
oideq(&src->oid, &dst->oid))
return 0; /* they are the same */
- if (diff_populate_filespec(r, src, 0) ||
- diff_populate_filespec(r, dst, 0))
+ if (r == the_repository && has_promisor_remote()) {
+ options.missing_object_cb = diff_queued_diff_prefetch;
+ options.missing_object_data = r;
+ }
+
+ if (diff_populate_filespec(r, src, &options) ||
+ diff_populate_filespec(r, dst, &options))
return 0; /* error but caught downstream */
max_size = ((src->size > dst->size) ? src->size : dst->size);
diff --git a/diffcore-rename.c b/diffcore-rename.c
index e189f407af..99e63e90f8 100644
--- a/diffcore-rename.c
+++ b/diffcore-rename.c
@@ -1,4 +1,5 @@
/*
+ *
* Copyright (C) 2005 Junio C Hamano
*/
#include "cache.h"
@@ -7,6 +8,7 @@
#include "object-store.h"
#include "hashmap.h"
#include "progress.h"
+#include "promisor-remote.h"
/* Table of rename/copy destinations */
@@ -128,10 +130,46 @@ struct diff_score {
short name_score;
};
+struct prefetch_options {
+ struct repository *repo;
+ int skip_unmodified;
+};
+static void prefetch(void *prefetch_options)
+{
+ struct prefetch_options *options = prefetch_options;
+ int i;
+ struct oid_array to_fetch = OID_ARRAY_INIT;
+
+ for (i = 0; i < rename_dst_nr; i++) {
+ if (rename_dst[i].pair)
+ /*
+ * The loop in diffcore_rename() will not need these
+ * blobs, so skip prefetching.
+ */
+ continue; /* already found exact match */
+ diff_add_if_missing(options->repo, &to_fetch,
+ rename_dst[i].two);
+ }
+ for (i = 0; i < rename_src_nr; i++) {
+ if (options->skip_unmodified &&
+ diff_unmodified_pair(rename_src[i].p))
+ /*
+ * The loop in diffcore_rename() will not need these
+ * blobs, so skip prefetching.
+ */
+ continue;
+ diff_add_if_missing(options->repo, &to_fetch,
+ rename_src[i].p->one);
+ }
+ promisor_remote_get_direct(options->repo, to_fetch.oid, to_fetch.nr);
+ oid_array_clear(&to_fetch);
+}
+
static int estimate_similarity(struct repository *r,
struct diff_filespec *src,
struct diff_filespec *dst,
- int minimum_score)
+ int minimum_score,
+ int skip_unmodified)
{
/* src points at a file that existed in the original tree (or
* optionally a file in the destination tree) and dst points
@@ -148,6 +186,15 @@ static int estimate_similarity(struct repository *r,
*/
unsigned long max_size, delta_size, base_size, src_copied, literal_added;
int score;
+ struct diff_populate_filespec_options dpf_options = {
+ .check_size_only = 1
+ };
+ struct prefetch_options prefetch_options = {r, skip_unmodified};
+
+ if (r == the_repository && has_promisor_remote()) {
+ dpf_options.missing_object_cb = prefetch;
+ dpf_options.missing_object_data = &prefetch_options;
+ }
/* We deal only with regular files. Symlink renames are handled
* only when they are exact matches --- in other words, no edits
@@ -166,10 +213,10 @@ static int estimate_similarity(struct repository *r,
* say whether the size is valid or not!)
*/
if (!src->cnt_data &&
- diff_populate_filespec(r, src, CHECK_SIZE_ONLY))
+ diff_populate_filespec(r, src, &dpf_options))
return 0;
if (!dst->cnt_data &&
- diff_populate_filespec(r, dst, CHECK_SIZE_ONLY))
+ diff_populate_filespec(r, dst, &dpf_options))
return 0;
max_size = ((src->size > dst->size) ? src->size : dst->size);
@@ -187,9 +234,11 @@ static int estimate_similarity(struct repository *r,
if (max_size * (MAX_SCORE-minimum_score) < delta_size * MAX_SCORE)
return 0;
- if (!src->cnt_data && diff_populate_filespec(r, src, 0))
+ dpf_options.check_size_only = 0;
+
+ if (!src->cnt_data && diff_populate_filespec(r, src, &dpf_options))
return 0;
- if (!dst->cnt_data && diff_populate_filespec(r, dst, 0))
+ if (!dst->cnt_data && diff_populate_filespec(r, dst, &dpf_options))
return 0;
if (diffcore_count_changes(r, src, dst,
@@ -261,7 +310,7 @@ static unsigned int hash_filespec(struct repository *r,
struct diff_filespec *filespec)
{
if (!filespec->oid_valid) {
- if (diff_populate_filespec(r, filespec, 0))
+ if (diff_populate_filespec(r, filespec, NULL))
return 0;
hash_object_file(r->hash_algo, filespec->data, filespec->size,
"blob", &filespec->oid);
@@ -566,7 +615,8 @@ void diffcore_rename(struct diff_options *options)
this_src.score = estimate_similarity(options->repo,
one, two,
- minimum_score);
+ minimum_score,
+ skip_unmodified);
this_src.name_score = basename_same(one, two);
this_src.dst = i;
this_src.src = j;
diff --git a/diffcore.h b/diffcore.h
index 7c07347e42..d2a63c5c71 100644
--- a/diffcore.h
+++ b/diffcore.h
@@ -65,9 +65,25 @@ void free_filespec(struct diff_filespec *);
void fill_filespec(struct diff_filespec *, const struct object_id *,
int, unsigned short);
-#define CHECK_SIZE_ONLY 1
-#define CHECK_BINARY 2
-int diff_populate_filespec(struct repository *, struct diff_filespec *, unsigned int);
+/*
+ * Prefetch the entries in diff_queued_diff. The parameter is a pointer to a
+ * struct repository.
+ */
+void diff_queued_diff_prefetch(void *repository);
+
+struct diff_populate_filespec_options {
+ unsigned check_size_only : 1;
+ unsigned check_binary : 1;
+
+ /*
+ * If an object is missing, diff_populate_filespec() will invoke this
+ * callback before attempting to read that object again.
+ */
+ void (*missing_object_cb)(void *);
+ void *missing_object_data;
+};
+int diff_populate_filespec(struct repository *, struct diff_filespec *,
+ const struct diff_populate_filespec_options *);
void diff_free_filespec_data(struct diff_filespec *);
void diff_free_filespec_blob(struct diff_filespec *);
int diff_filespec_is_binary(struct repository *, struct diff_filespec *);
@@ -182,4 +198,12 @@ int diffcore_count_changes(struct repository *r,
unsigned long *src_copied,
unsigned long *literal_added);
+/*
+ * If filespec contains an OID and if that object is missing from the given
+ * repository, add that OID to to_fetch.
+ */
+void diff_add_if_missing(struct repository *r,
+ struct oid_array *to_fetch,
+ const struct diff_filespec *filespec);
+
#endif
diff --git a/dir.c b/dir.c
index 0ffb1b3302..d97e955848 100644
--- a/dir.c
+++ b/dir.c
@@ -1727,36 +1727,59 @@ static enum exist_status directory_exists_in_index(struct index_state *istate,
static enum path_treatment treat_directory(struct dir_struct *dir,
struct index_state *istate,
struct untracked_cache_dir *untracked,
- const char *dirname, int len, int baselen, int exclude,
+ const char *dirname, int len, int baselen, int excluded,
const struct pathspec *pathspec)
{
- int nested_repo = 0;
-
+ /*
+ * WARNING: From this function, you can return path_recurse or you
+ * can call read_directory_recursive() (or neither), but
+ * you CAN'T DO BOTH.
+ */
+ enum path_treatment state;
+ int matches_how = 0;
+ int nested_repo = 0, check_only, stop_early;
+ int old_ignored_nr, old_untracked_nr;
/* The "len-1" is to strip the final '/' */
- switch (directory_exists_in_index(istate, dirname, len-1)) {
- case index_directory:
- return path_recurse;
+ enum exist_status status = directory_exists_in_index(istate, dirname, len-1);
- case index_gitdir:
+ if (status == index_directory)
+ return path_recurse;
+ if (status == index_gitdir)
return path_none;
+ if (status != index_nonexistent)
+ BUG("Unhandled value for directory_exists_in_index: %d\n", status);
- case index_nonexistent:
- if ((dir->flags & DIR_SKIP_NESTED_GIT) ||
- !(dir->flags & DIR_NO_GITLINKS)) {
- struct strbuf sb = STRBUF_INIT;
- strbuf_addstr(&sb, dirname);
- nested_repo = is_nonbare_repository_dir(&sb);
- strbuf_release(&sb);
- }
- if (nested_repo)
- return ((dir->flags & DIR_SKIP_NESTED_GIT) ? path_none :
- (exclude ? path_excluded : path_untracked));
+ /*
+ * We don't want to descend into paths that don't match the necessary
+ * patterns. Clearly, if we don't have a pathspec, then we can't check
+ * for matching patterns. Also, if (excluded) then we know we matched
+ * the exclusion patterns so as an optimization we can skip checking
+ * for matching patterns.
+ */
+ if (pathspec && !excluded) {
+ matches_how = do_match_pathspec(istate, pathspec, dirname, len,
+ 0 /* prefix */, NULL /* seen */,
+ DO_MATCH_LEADING_PATHSPEC);
+ if (!matches_how)
+ return path_none;
+ }
- if (dir->flags & DIR_SHOW_OTHER_DIRECTORIES)
- break;
- if (exclude &&
- (dir->flags & DIR_SHOW_IGNORED_TOO) &&
- (dir->flags & DIR_SHOW_IGNORED_TOO_MODE_MATCHING)) {
+
+ if ((dir->flags & DIR_SKIP_NESTED_GIT) ||
+ !(dir->flags & DIR_NO_GITLINKS)) {
+ struct strbuf sb = STRBUF_INIT;
+ strbuf_addstr(&sb, dirname);
+ nested_repo = is_nonbare_repository_dir(&sb);
+ strbuf_release(&sb);
+ }
+ if (nested_repo)
+ return ((dir->flags & DIR_SKIP_NESTED_GIT) ? path_none :
+ (excluded ? path_excluded : path_untracked));
+
+ if (!(dir->flags & DIR_SHOW_OTHER_DIRECTORIES)) {
+ if (excluded &&
+ (dir->flags & DIR_SHOW_IGNORED_TOO) &&
+ (dir->flags & DIR_SHOW_IGNORED_TOO_MODE_MATCHING)) {
/*
* This is an excluded directory and we are
@@ -1783,18 +1806,134 @@ static enum path_treatment treat_directory(struct dir_struct *dir,
/* This is the "show_other_directories" case */
- if (!(dir->flags & DIR_HIDE_EMPTY_DIRECTORIES))
- return exclude ? path_excluded : path_untracked;
+ /*
+ * If we have a pathspec which could match something _below_ this
+ * directory (e.g. when checking 'subdir/' having a pathspec like
+ * 'subdir/some/deep/path/file' or 'subdir/widget-*.c'), then we
+ * need to recurse.
+ */
+ if (matches_how == MATCHED_RECURSIVELY_LEADING_PATHSPEC)
+ return path_recurse;
+
+ /*
+ * Other than the path_recurse case immediately above, we only need
+ * to recurse into untracked/ignored directories if either of the
+ * following bits is set:
+ * - DIR_SHOW_IGNORED_TOO (because then we need to determine if
+ * there are ignored directories below)
+ * - DIR_HIDE_EMPTY_DIRECTORIES (because we have to determine if
+ * the directory is empty)
+ */
+ if (!(dir->flags & (DIR_SHOW_IGNORED_TOO | DIR_HIDE_EMPTY_DIRECTORIES)))
+ return excluded ? path_excluded : path_untracked;
+ /*
+ * ...and even if DIR_SHOW_IGNORED_TOO is set, we can still avoid
+ * recursing into ignored directories if the path is excluded and
+ * DIR_SHOW_IGNORED_TOO_MODE_MATCHING is also set.
+ */
+ if (excluded &&
+ (dir->flags & DIR_SHOW_IGNORED_TOO) &&
+ (dir->flags & DIR_SHOW_IGNORED_TOO_MODE_MATCHING))
+ return path_excluded;
+
+ /*
+ * If we have we don't want to know the all the paths under an
+ * untracked or ignored directory, we still need to go into the
+ * directory to determine if it is empty (because an empty directory
+ * should be path_none instead of path_excluded or path_untracked).
+ */
+ check_only = ((dir->flags & DIR_HIDE_EMPTY_DIRECTORIES) &&
+ !(dir->flags & DIR_SHOW_IGNORED_TOO));
+
+ /*
+ * However, there's another optimization possible as a subset of
+ * check_only, based on the cases we have to consider:
+ * A) Directory matches no exclude patterns:
+ * * Directory is empty => path_none
+ * * Directory has an untracked file under it => path_untracked
+ * * Directory has only ignored files under it => path_excluded
+ * B) Directory matches an exclude pattern:
+ * * Directory is empty => path_none
+ * * Directory has an untracked file under it => path_excluded
+ * * Directory has only ignored files under it => path_excluded
+ * In case A, we can exit as soon as we've found an untracked
+ * file but otherwise have to walk all files. In case B, though,
+ * we can stop at the first file we find under the directory.
+ */
+ stop_early = check_only && excluded;
+
+ /*
+ * If /every/ file within an untracked directory is ignored, then
+ * we want to treat the directory as ignored (for e.g. status
+ * --porcelain), without listing the individual ignored files
+ * underneath. To do so, we'll save the current ignored_nr, and
+ * pop all the ones added after it if it turns out the entire
+ * directory is ignored. Also, when DIR_SHOW_IGNORED_TOO and
+ * !DIR_KEEP_UNTRACKED_CONTENTS then we don't want to show
+ * untracked paths so will need to pop all those off the last
+ * after we traverse.
+ */
+ old_ignored_nr = dir->ignored_nr;
+ old_untracked_nr = dir->nr;
+
+ /* Actually recurse into dirname now, we'll fixup the state later. */
untracked = lookup_untracked(dir->untracked, untracked,
dirname + baselen, len - baselen);
+ state = read_directory_recursive(dir, istate, dirname, len, untracked,
+ check_only, stop_early, pathspec);
+
+ /* There are a variety of reasons we may need to fixup the state... */
+ if (state == path_excluded) {
+ /* state == path_excluded implies all paths under
+ * dirname were ignored...
+ *
+ * if running e.g. `git status --porcelain --ignored=matching`,
+ * then we want to see the subpaths that are ignored.
+ *
+ * if running e.g. just `git status --porcelain`, then
+ * we just want the directory itself to be listed as ignored
+ * and not the individual paths underneath.
+ */
+ int want_ignored_subpaths =
+ ((dir->flags & DIR_SHOW_IGNORED_TOO) &&
+ (dir->flags & DIR_SHOW_IGNORED_TOO_MODE_MATCHING));
+
+ if (want_ignored_subpaths) {
+ /*
+ * with --ignored=matching, we want the subpaths
+ * INSTEAD of the directory itself.
+ */
+ state = path_none;
+ } else {
+ int i;
+ for (i = old_ignored_nr + 1; i<dir->ignored_nr; ++i)
+ FREE_AND_NULL(dir->ignored[i]);
+ dir->ignored_nr = old_ignored_nr;
+ }
+ }
/*
- * If this is an excluded directory, then we only need to check if
- * the directory contains any files.
+ * We may need to ignore some of the untracked paths we found while
+ * traversing subdirectories.
*/
- return read_directory_recursive(dir, istate, dirname, len,
- untracked, 1, exclude, pathspec);
+ if ((dir->flags & DIR_SHOW_IGNORED_TOO) &&
+ !(dir->flags & DIR_KEEP_UNTRACKED_CONTENTS)) {
+ int i;
+ for (i = old_untracked_nr + 1; i<dir->nr; ++i)
+ FREE_AND_NULL(dir->entries[i]);
+ dir->nr = old_untracked_nr;
+ }
+
+ /*
+ * If there is nothing under the current directory and we are not
+ * hiding empty directories, then we need to report on the
+ * untracked or ignored status of the directory itself.
+ */
+ if (state == path_none && !(dir->flags & DIR_HIDE_EMPTY_DIRECTORIES))
+ state = excluded ? path_excluded : path_untracked;
+
+ return state;
}
/*
@@ -1934,85 +2073,6 @@ static int resolve_dtype(int dtype, struct index_state *istate,
return dtype;
}
-static enum path_treatment treat_one_path(struct dir_struct *dir,
- struct untracked_cache_dir *untracked,
- struct index_state *istate,
- struct strbuf *path,
- int baselen,
- const struct pathspec *pathspec,
- int dtype)
-{
- int exclude;
- int has_path_in_index = !!index_file_exists(istate, path->buf, path->len, ignore_case);
- enum path_treatment path_treatment;
-
- dtype = resolve_dtype(dtype, istate, path->buf, path->len);
-
- /* Always exclude indexed files */
- if (dtype != DT_DIR && has_path_in_index)
- return path_none;
-
- /*
- * When we are looking at a directory P in the working tree,
- * there are three cases:
- *
- * (1) P exists in the index. Everything inside the directory P in
- * the working tree needs to go when P is checked out from the
- * index.
- *
- * (2) P does not exist in the index, but there is P/Q in the index.
- * We know P will stay a directory when we check out the contents
- * of the index, but we do not know yet if there is a directory
- * P/Q in the working tree to be killed, so we need to recurse.
- *
- * (3) P does not exist in the index, and there is no P/Q in the index
- * to require P to be a directory, either. Only in this case, we
- * know that everything inside P will not be killed without
- * recursing.
- */
- if ((dir->flags & DIR_COLLECT_KILLED_ONLY) &&
- (dtype == DT_DIR) &&
- !has_path_in_index &&
- (directory_exists_in_index(istate, path->buf, path->len) == index_nonexistent))
- return path_none;
-
- exclude = is_excluded(dir, istate, path->buf, &dtype);
-
- /*
- * Excluded? If we don't explicitly want to show
- * ignored files, ignore it
- */
- if (exclude && !(dir->flags & (DIR_SHOW_IGNORED|DIR_SHOW_IGNORED_TOO)))
- return path_excluded;
-
- switch (dtype) {
- default:
- return path_none;
- case DT_DIR:
- strbuf_addch(path, '/');
- path_treatment = treat_directory(dir, istate, untracked,
- path->buf, path->len,
- baselen, exclude, pathspec);
- /*
- * If 1) we only want to return directories that
- * match an exclude pattern and 2) this directory does
- * not match an exclude pattern but all of its
- * contents are excluded, then indicate that we should
- * recurse into this directory (instead of marking the
- * directory itself as an ignored path).
- */
- if (!exclude &&
- path_treatment == path_excluded &&
- (dir->flags & DIR_SHOW_IGNORED_TOO) &&
- (dir->flags & DIR_SHOW_IGNORED_TOO_MODE_MATCHING))
- return path_recurse;
- return path_treatment;
- case DT_REG:
- case DT_LNK:
- return exclude ? path_excluded : path_untracked;
- }
-}
-
static enum path_treatment treat_path_fast(struct dir_struct *dir,
struct untracked_cache_dir *untracked,
struct cached_dir *cdir,
@@ -2021,6 +2081,11 @@ static enum path_treatment treat_path_fast(struct dir_struct *dir,
int baselen,
const struct pathspec *pathspec)
{
+ /*
+ * WARNING: From this function, you can return path_recurse or you
+ * can call read_directory_recursive() (or neither), but
+ * you CAN'T DO BOTH.
+ */
strbuf_setlen(path, baselen);
if (!cdir->ucd) {
strbuf_addstr(path, cdir->file);
@@ -2054,6 +2119,8 @@ static enum path_treatment treat_path(struct dir_struct *dir,
int baselen,
const struct pathspec *pathspec)
{
+ int has_path_in_index, dtype, excluded;
+
if (!cdir->d_name)
return treat_path_fast(dir, untracked, cdir, istate, path,
baselen, pathspec);
@@ -2064,8 +2131,72 @@ static enum path_treatment treat_path(struct dir_struct *dir,
if (simplify_away(path->buf, path->len, pathspec))
return path_none;
- return treat_one_path(dir, untracked, istate, path, baselen, pathspec,
- cdir->d_type);
+ dtype = resolve_dtype(cdir->d_type, istate, path->buf, path->len);
+
+ /* Always exclude indexed files */
+ has_path_in_index = !!index_file_exists(istate, path->buf, path->len,
+ ignore_case);
+ if (dtype != DT_DIR && has_path_in_index)
+ return path_none;
+
+ /*
+ * When we are looking at a directory P in the working tree,
+ * there are three cases:
+ *
+ * (1) P exists in the index. Everything inside the directory P in
+ * the working tree needs to go when P is checked out from the
+ * index.
+ *
+ * (2) P does not exist in the index, but there is P/Q in the index.
+ * We know P will stay a directory when we check out the contents
+ * of the index, but we do not know yet if there is a directory
+ * P/Q in the working tree to be killed, so we need to recurse.
+ *
+ * (3) P does not exist in the index, and there is no P/Q in the index
+ * to require P to be a directory, either. Only in this case, we
+ * know that everything inside P will not be killed without
+ * recursing.
+ */
+ if ((dir->flags & DIR_COLLECT_KILLED_ONLY) &&
+ (dtype == DT_DIR) &&
+ !has_path_in_index &&
+ (directory_exists_in_index(istate, path->buf, path->len) == index_nonexistent))
+ return path_none;
+
+ excluded = is_excluded(dir, istate, path->buf, &dtype);
+
+ /*
+ * Excluded? If we don't explicitly want to show
+ * ignored files, ignore it
+ */
+ if (excluded && !(dir->flags & (DIR_SHOW_IGNORED|DIR_SHOW_IGNORED_TOO)))
+ return path_excluded;
+
+ switch (dtype) {
+ default:
+ return path_none;
+ case DT_DIR:
+ /*
+ * WARNING: Do not ignore/amend the return value from
+ * treat_directory(), and especially do not change it to return
+ * path_recurse as that can cause exponential slowdown.
+ * Instead, modify treat_directory() to return the right value.
+ */
+ strbuf_addch(path, '/');
+ return treat_directory(dir, istate, untracked,
+ path->buf, path->len,
+ baselen, excluded, pathspec);
+ case DT_REG:
+ case DT_LNK:
+ if (excluded)
+ return path_excluded;
+ if (pathspec &&
+ !do_match_pathspec(istate, pathspec, path->buf, path->len,
+ 0 /* prefix */, NULL /* seen */,
+ 0 /* flags */))
+ return path_none;
+ return path_untracked;
+ }
}
static void add_untracked(struct untracked_cache_dir *dir, const char *name)
@@ -2245,7 +2376,7 @@ static void add_path_to_appropriate_result_list(struct dir_struct *dir,
* If 'stop_at_first_file' is specified, 'path_excluded' is returned
* to signal that a file was found. This is the least significant value that
* indicates that a file was encountered that does not depend on the order of
- * whether an untracked or exluded path was encountered first.
+ * whether an untracked or excluded path was encountered first.
*
* Returns the most significant path_treatment value encountered in the scan.
* If 'stop_at_first_file' is specified, `path_excluded` is the most
@@ -2258,14 +2389,10 @@ static enum path_treatment read_directory_recursive(struct dir_struct *dir,
int stop_at_first_file, const struct pathspec *pathspec)
{
/*
- * WARNING WARNING WARNING:
- *
- * Any updates to the traversal logic here may need corresponding
- * updates in treat_leading_path(). See the commit message for the
- * commit adding this warning as well as the commit preceding it
- * for details.
+ * WARNING: Do NOT recurse unless path_recurse is returned from
+ * treat_path(). Recursing on any other return value
+ * can result in exponential slowdown.
*/
-
struct cached_dir cdir;
enum path_treatment state, subdir_state, dir_state = path_none;
struct strbuf path = STRBUF_INIT;
@@ -2287,13 +2414,7 @@ static enum path_treatment read_directory_recursive(struct dir_struct *dir,
dir_state = state;
/* recurse into subdir if instructed by treat_path */
- if ((state == path_recurse) ||
- ((state == path_untracked) &&
- (resolve_dtype(cdir.d_type, istate, path.buf, path.len) == DT_DIR) &&
- ((dir->flags & DIR_SHOW_IGNORED_TOO) ||
- (pathspec &&
- do_match_pathspec(istate, pathspec, path.buf, path.len,
- baselen, NULL, DO_MATCH_LEADING_PATHSPEC) == MATCHED_RECURSIVELY_LEADING_PATHSPEC)))) {
+ if (state == path_recurse) {
struct untracked_cache_dir *ud;
ud = lookup_untracked(dir->untracked, untracked,
path.buf + baselen,
@@ -2341,7 +2462,7 @@ static enum path_treatment read_directory_recursive(struct dir_struct *dir,
add_untracked(untracked, path.buf + baselen);
break;
}
- /* skip the dir_add_* part */
+ /* skip the add_path_to_appropriate_result_list() */
continue;
}
@@ -2377,15 +2498,6 @@ static int treat_leading_path(struct dir_struct *dir,
const char *path, int len,
const struct pathspec *pathspec)
{
- /*
- * WARNING WARNING WARNING:
- *
- * Any updates to the traversal logic here may need corresponding
- * updates in read_directory_recursive(). See 777b420347 (dir:
- * synchronize treat_leading_path() and read_directory_recursive(),
- * 2019-12-19) and its parent commit for details.
- */
-
struct strbuf sb = STRBUF_INIT;
struct strbuf subdir = STRBUF_INIT;
int prevlen, baselen;
@@ -2436,23 +2548,7 @@ static int treat_leading_path(struct dir_struct *dir,
strbuf_reset(&subdir);
strbuf_add(&subdir, path+prevlen, baselen-prevlen);
cdir.d_name = subdir.buf;
- state = treat_path(dir, NULL, &cdir, istate, &sb, prevlen,
- pathspec);
- if (state == path_untracked &&
- resolve_dtype(cdir.d_type, istate, sb.buf, sb.len) == DT_DIR &&
- (dir->flags & DIR_SHOW_IGNORED_TOO ||
- do_match_pathspec(istate, pathspec, sb.buf, sb.len,
- baselen, NULL, DO_MATCH_LEADING_PATHSPEC) == MATCHED_RECURSIVELY_LEADING_PATHSPEC)) {
- if (!match_pathspec(istate, pathspec, sb.buf, sb.len,
- 0 /* prefix */, NULL,
- 0 /* do NOT special case dirs */))
- state = path_none;
- add_path_to_appropriate_result_list(dir, NULL, &cdir,
- istate,
- &sb, baselen,
- pathspec, state);
- state = path_recurse;
- }
+ state = treat_path(dir, NULL, &cdir, istate, &sb, prevlen, pathspec);
if (state != path_recurse)
break; /* do not recurse into it */
@@ -2652,28 +2748,6 @@ int read_directory(struct dir_struct *dir, struct index_state *istate,
QSORT(dir->entries, dir->nr, cmp_dir_entry);
QSORT(dir->ignored, dir->ignored_nr, cmp_dir_entry);
- /*
- * If DIR_SHOW_IGNORED_TOO is set, read_directory_recursive() will
- * also pick up untracked contents of untracked dirs; by default
- * we discard these, but given DIR_KEEP_UNTRACKED_CONTENTS we do not.
- */
- if ((dir->flags & DIR_SHOW_IGNORED_TOO) &&
- !(dir->flags & DIR_KEEP_UNTRACKED_CONTENTS)) {
- int i, j;
-
- /* remove from dir->entries untracked contents of untracked dirs */
- for (i = j = 0; j < dir->nr; j++) {
- if (i &&
- check_dir_entry_contains(dir->entries[i - 1], dir->entries[j])) {
- FREE_AND_NULL(dir->entries[j]);
- } else {
- dir->entries[i++] = dir->entries[j];
- }
- }
-
- dir->nr = i;
- }
-
trace_performance_leave("read directory %.*s", len, path);
if (dir->untracked) {
static int force_untracked_cache = -1;
diff --git a/fast-import.c b/fast-import.c
index 202dda11a6..c98970274c 100644
--- a/fast-import.c
+++ b/fast-import.c
@@ -39,12 +39,28 @@
struct object_entry {
struct pack_idx_entry idx;
- struct object_entry *next;
+ struct hashmap_entry ent;
uint32_t type : TYPE_BITS,
pack_id : PACK_ID_BITS,
depth : DEPTH_BITS;
};
+static int object_entry_hashcmp(const void *map_data,
+ const struct hashmap_entry *eptr,
+ const struct hashmap_entry *entry_or_key,
+ const void *keydata)
+{
+ const struct object_id *oid = keydata;
+ const struct object_entry *e1, *e2;
+
+ e1 = container_of(eptr, const struct object_entry, ent);
+ if (oid)
+ return oidcmp(&e1->idx.oid, oid);
+
+ e2 = container_of(entry_or_key, const struct object_entry, ent);
+ return oidcmp(&e1->idx.oid, &e2->idx.oid);
+}
+
struct object_entry_pool {
struct object_entry_pool *next_pool;
struct object_entry *next_free;
@@ -178,7 +194,7 @@ static off_t pack_size;
/* Table of objects we've written. */
static unsigned int object_entry_alloc = 5000;
static struct object_entry_pool *blocks;
-static struct object_entry *object_table[1 << 16];
+static struct hashmap object_table;
static struct mark_set *marks;
static const char *export_marks_file;
static const char *import_marks_file;
@@ -455,44 +471,37 @@ static struct object_entry *new_object(struct object_id *oid)
static struct object_entry *find_object(struct object_id *oid)
{
- unsigned int h = oid->hash[0] << 8 | oid->hash[1];
- struct object_entry *e;
- for (e = object_table[h]; e; e = e->next)
- if (oideq(oid, &e->idx.oid))
- return e;
- return NULL;
+ return hashmap_get_entry_from_hash(&object_table, oidhash(oid), oid,
+ struct object_entry, ent);
}
static struct object_entry *insert_object(struct object_id *oid)
{
- unsigned int h = oid->hash[0] << 8 | oid->hash[1];
- struct object_entry *e = object_table[h];
+ struct object_entry *e;
+ unsigned int hash = oidhash(oid);
- while (e) {
- if (oideq(oid, &e->idx.oid))
- return e;
- e = e->next;
+ e = hashmap_get_entry_from_hash(&object_table, hash, oid,
+ struct object_entry, ent);
+ if (!e) {
+ e = new_object(oid);
+ e->idx.offset = 0;
+ hashmap_entry_init(&e->ent, hash);
+ hashmap_add(&object_table, &e->ent);
}
- e = new_object(oid);
- e->next = object_table[h];
- e->idx.offset = 0;
- object_table[h] = e;
return e;
}
static void invalidate_pack_id(unsigned int id)
{
- unsigned int h;
unsigned long lu;
struct tag *t;
+ struct hashmap_iter iter;
+ struct object_entry *e;
- for (h = 0; h < ARRAY_SIZE(object_table); h++) {
- struct object_entry *e;
-
- for (e = object_table[h]; e; e = e->next)
- if (e->pack_id == id)
- e->pack_id = MAX_PACK_ID;
+ hashmap_for_each_entry(&object_table, &iter, e, ent) {
+ if (e->pack_id == id)
+ e->pack_id = MAX_PACK_ID;
}
for (lu = 0; lu < branch_table_sz; lu++) {
@@ -3511,6 +3520,8 @@ int cmd_main(int argc, const char **argv)
avail_tree_table = xcalloc(avail_tree_table_sz, sizeof(struct avail_tree_content*));
marks = mem_pool_calloc(&fi_mem_pool, 1, sizeof(struct mark_set));
+ hashmap_init(&object_table, object_entry_hashcmp, NULL, 0);
+
/*
* We don't parse most options until after we've seen the set of
* "feature" lines at the start of the stream (which allows the command
diff --git a/fetch-pack.c b/fetch-pack.c
index 0b07b3ee73..8e98b3d4a5 100644
--- a/fetch-pack.c
+++ b/fetch-pack.c
@@ -1629,9 +1629,9 @@ static void update_shallow(struct fetch_pack_args *args,
if (args->deepen && alternate_shallow_file) {
if (*alternate_shallow_file == '\0') { /* --unshallow */
unlink_or_warn(git_path_shallow(the_repository));
- rollback_lock_file(&shallow_lock);
+ rollback_shallow_file(the_repository, &shallow_lock);
} else
- commit_lock_file(&shallow_lock);
+ commit_shallow_file(the_repository, &shallow_lock);
alternate_shallow_file = NULL;
return;
}
@@ -1655,7 +1655,7 @@ static void update_shallow(struct fetch_pack_args *args,
setup_alternate_shallow(&shallow_lock,
&alternate_shallow_file,
&extra);
- commit_lock_file(&shallow_lock);
+ commit_shallow_file(the_repository, &shallow_lock);
alternate_shallow_file = NULL;
}
oid_array_clear(&extra);
@@ -1693,7 +1693,7 @@ static void update_shallow(struct fetch_pack_args *args,
setup_alternate_shallow(&shallow_lock,
&alternate_shallow_file,
&extra);
- commit_lock_file(&shallow_lock);
+ commit_shallow_file(the_repository, &shallow_lock);
oid_array_clear(&extra);
oid_array_clear(&ref);
alternate_shallow_file = NULL;
@@ -1785,7 +1785,7 @@ struct ref *fetch_pack(struct fetch_pack_args *args,
error(_("remote did not send all necessary objects"));
free_refs(ref_cpy);
ref_cpy = NULL;
- rollback_lock_file(&shallow_lock);
+ rollback_shallow_file(the_repository, &shallow_lock);
goto cleanup;
}
args->connectivity_checked = 1;
diff --git a/fmt-merge-msg.c b/fmt-merge-msg.c
new file mode 100644
index 0000000000..72d32bd73b
--- /dev/null
+++ b/fmt-merge-msg.c
@@ -0,0 +1,656 @@
+#include "config.h"
+#include "refs.h"
+#include "object-store.h"
+#include "diff.h"
+#include "revision.h"
+#include "tag.h"
+#include "string-list.h"
+#include "branch.h"
+#include "fmt-merge-msg.h"
+#include "commit-reach.h"
+
+static int use_branch_desc;
+
+int fmt_merge_msg_config(const char *key, const char *value, void *cb)
+{
+ if (!strcmp(key, "merge.log") || !strcmp(key, "merge.summary")) {
+ int is_bool;
+ merge_log_config = git_config_bool_or_int(key, value, &is_bool);
+ if (!is_bool && merge_log_config < 0)
+ return error("%s: negative length %s", key, value);
+ if (is_bool && merge_log_config)
+ merge_log_config = DEFAULT_MERGE_LOG_LEN;
+ } else if (!strcmp(key, "merge.branchdesc")) {
+ use_branch_desc = git_config_bool(key, value);
+ } else {
+ return git_default_config(key, value, cb);
+ }
+ return 0;
+}
+
+/* merge data per repository where the merged tips came from */
+struct src_data {
+ struct string_list branch, tag, r_branch, generic;
+ int head_status;
+};
+
+struct origin_data {
+ struct object_id oid;
+ unsigned is_local_branch:1;
+};
+
+static void init_src_data(struct src_data *data)
+{
+ data->branch.strdup_strings = 1;
+ data->tag.strdup_strings = 1;
+ data->r_branch.strdup_strings = 1;
+ data->generic.strdup_strings = 1;
+}
+
+static struct string_list srcs = STRING_LIST_INIT_DUP;
+static struct string_list origins = STRING_LIST_INIT_DUP;
+
+struct merge_parents {
+ int alloc, nr;
+ struct merge_parent {
+ struct object_id given;
+ struct object_id commit;
+ unsigned char used;
+ } *item;
+};
+
+/*
+ * I know, I know, this is inefficient, but you won't be pulling and merging
+ * hundreds of heads at a time anyway.
+ */
+static struct merge_parent *find_merge_parent(struct merge_parents *table,
+ struct object_id *given,
+ struct object_id *commit)
+{
+ int i;
+ for (i = 0; i < table->nr; i++) {
+ if (given && !oideq(&table->item[i].given, given))
+ continue;
+ if (commit && !oideq(&table->item[i].commit, commit))
+ continue;
+ return &table->item[i];
+ }
+ return NULL;
+}
+
+static void add_merge_parent(struct merge_parents *table,
+ struct object_id *given,
+ struct object_id *commit)
+{
+ if (table->nr && find_merge_parent(table, given, commit))
+ return;
+ ALLOC_GROW(table->item, table->nr + 1, table->alloc);
+ oidcpy(&table->item[table->nr].given, given);
+ oidcpy(&table->item[table->nr].commit, commit);
+ table->item[table->nr].used = 0;
+ table->nr++;
+}
+
+static int handle_line(char *line, struct merge_parents *merge_parents)
+{
+ int i, len = strlen(line);
+ struct origin_data *origin_data;
+ char *src;
+ const char *origin, *tag_name;
+ struct src_data *src_data;
+ struct string_list_item *item;
+ int pulling_head = 0;
+ struct object_id oid;
+ const unsigned hexsz = the_hash_algo->hexsz;
+
+ if (len < hexsz + 3 || line[hexsz] != '\t')
+ return 1;
+
+ if (starts_with(line + hexsz + 1, "not-for-merge"))
+ return 0;
+
+ if (line[hexsz + 1] != '\t')
+ return 2;
+
+ i = get_oid_hex(line, &oid);
+ if (i)
+ return 3;
+
+ if (!find_merge_parent(merge_parents, &oid, NULL))
+ return 0; /* subsumed by other parents */
+
+ origin_data = xcalloc(1, sizeof(struct origin_data));
+ oidcpy(&origin_data->oid, &oid);
+
+ if (line[len - 1] == '\n')
+ line[len - 1] = 0;
+ line += hexsz + 2;
+
+ /*
+ * At this point, line points at the beginning of comment e.g.
+ * "branch 'frotz' of git://that/repository.git".
+ * Find the repository name and point it with src.
+ */
+ src = strstr(line, " of ");
+ if (src) {
+ *src = 0;
+ src += 4;
+ pulling_head = 0;
+ } else {
+ src = line;
+ pulling_head = 1;
+ }
+
+ item = unsorted_string_list_lookup(&srcs, src);
+ if (!item) {
+ item = string_list_append(&srcs, src);
+ item->util = xcalloc(1, sizeof(struct src_data));
+ init_src_data(item->util);
+ }
+ src_data = item->util;
+
+ if (pulling_head) {
+ origin = src;
+ src_data->head_status |= 1;
+ } else if (skip_prefix(line, "branch ", &origin)) {
+ origin_data->is_local_branch = 1;
+ string_list_append(&src_data->branch, origin);
+ src_data->head_status |= 2;
+ } else if (skip_prefix(line, "tag ", &tag_name)) {
+ origin = line;
+ string_list_append(&src_data->tag, tag_name);
+ src_data->head_status |= 2;
+ } else if (skip_prefix(line, "remote-tracking branch ", &origin)) {
+ string_list_append(&src_data->r_branch, origin);
+ src_data->head_status |= 2;
+ } else {
+ origin = src;
+ string_list_append(&src_data->generic, line);
+ src_data->head_status |= 2;
+ }
+
+ if (!strcmp(".", src) || !strcmp(src, origin)) {
+ int len = strlen(origin);
+ if (origin[0] == '\'' && origin[len - 1] == '\'')
+ origin = xmemdupz(origin + 1, len - 2);
+ } else
+ origin = xstrfmt("%s of %s", origin, src);
+ if (strcmp(".", src))
+ origin_data->is_local_branch = 0;
+ string_list_append(&origins, origin)->util = origin_data;
+ return 0;
+}
+
+static void print_joined(const char *singular, const char *plural,
+ struct string_list *list, struct strbuf *out)
+{
+ if (list->nr == 0)
+ return;
+ if (list->nr == 1) {
+ strbuf_addf(out, "%s%s", singular, list->items[0].string);
+ } else {
+ int i;
+ strbuf_addstr(out, plural);
+ for (i = 0; i < list->nr - 1; i++)
+ strbuf_addf(out, "%s%s", i > 0 ? ", " : "",
+ list->items[i].string);
+ strbuf_addf(out, " and %s", list->items[list->nr - 1].string);
+ }
+}
+
+static void add_branch_desc(struct strbuf *out, const char *name)
+{
+ struct strbuf desc = STRBUF_INIT;
+
+ if (!read_branch_desc(&desc, name)) {
+ const char *bp = desc.buf;
+ while (*bp) {
+ const char *ep = strchrnul(bp, '\n');
+ if (*ep)
+ ep++;
+ strbuf_addf(out, " : %.*s", (int)(ep - bp), bp);
+ bp = ep;
+ }
+ strbuf_complete_line(out);
+ }
+ strbuf_release(&desc);
+}
+
+#define util_as_integral(elem) ((intptr_t)((elem)->util))
+
+static void record_person_from_buf(int which, struct string_list *people,
+ const char *buffer)
+{
+ char *name_buf, *name, *name_end;
+ struct string_list_item *elem;
+ const char *field;
+
+ field = (which == 'a') ? "\nauthor " : "\ncommitter ";
+ name = strstr(buffer, field);
+ if (!name)
+ return;
+ name += strlen(field);
+ name_end = strchrnul(name, '<');
+ if (*name_end)
+ name_end--;
+ while (isspace(*name_end) && name <= name_end)
+ name_end--;
+ if (name_end < name)
+ return;
+ name_buf = xmemdupz(name, name_end - name + 1);
+
+ elem = string_list_lookup(people, name_buf);
+ if (!elem) {
+ elem = string_list_insert(people, name_buf);
+ elem->util = (void *)0;
+ }
+ elem->util = (void*)(util_as_integral(elem) + 1);
+ free(name_buf);
+}
+
+
+static void record_person(int which, struct string_list *people,
+ struct commit *commit)
+{
+ const char *buffer = get_commit_buffer(commit, NULL);
+ record_person_from_buf(which, people, buffer);
+ unuse_commit_buffer(commit, buffer);
+}
+
+static int cmp_string_list_util_as_integral(const void *a_, const void *b_)
+{
+ const struct string_list_item *a = a_, *b = b_;
+ return util_as_integral(b) - util_as_integral(a);
+}
+
+static void add_people_count(struct strbuf *out, struct string_list *people)
+{
+ if (people->nr == 1)
+ strbuf_addstr(out, people->items[0].string);
+ else if (people->nr == 2)
+ strbuf_addf(out, "%s (%d) and %s (%d)",
+ people->items[0].string,
+ (int)util_as_integral(&people->items[0]),
+ people->items[1].string,
+ (int)util_as_integral(&people->items[1]));
+ else if (people->nr)
+ strbuf_addf(out, "%s (%d) and others",
+ people->items[0].string,
+ (int)util_as_integral(&people->items[0]));
+}
+
+static void credit_people(struct strbuf *out,
+ struct string_list *them,
+ int kind)
+{
+ const char *label;
+ const char *me;
+
+ if (kind == 'a') {
+ label = "By";
+ me = git_author_info(IDENT_NO_DATE);
+ } else {
+ label = "Via";
+ me = git_committer_info(IDENT_NO_DATE);
+ }
+
+ if (!them->nr ||
+ (them->nr == 1 &&
+ me &&
+ skip_prefix(me, them->items->string, &me) &&
+ starts_with(me, " <")))
+ return;
+ strbuf_addf(out, "\n%c %s ", comment_line_char, label);
+ add_people_count(out, them);
+}
+
+static void add_people_info(struct strbuf *out,
+ struct string_list *authors,
+ struct string_list *committers)
+{
+ QSORT(authors->items, authors->nr,
+ cmp_string_list_util_as_integral);
+ QSORT(committers->items, committers->nr,
+ cmp_string_list_util_as_integral);
+
+ credit_people(out, authors, 'a');
+ credit_people(out, committers, 'c');
+}
+
+static void shortlog(const char *name,
+ struct origin_data *origin_data,
+ struct commit *head,
+ struct rev_info *rev,
+ struct fmt_merge_msg_opts *opts,
+ struct strbuf *out)
+{
+ int i, count = 0;
+ struct commit *commit;
+ struct object *branch;
+ struct string_list subjects = STRING_LIST_INIT_DUP;
+ struct string_list authors = STRING_LIST_INIT_DUP;
+ struct string_list committers = STRING_LIST_INIT_DUP;
+ int flags = UNINTERESTING | TREESAME | SEEN | SHOWN | ADDED;
+ struct strbuf sb = STRBUF_INIT;
+ const struct object_id *oid = &origin_data->oid;
+ int limit = opts->shortlog_len;
+
+ branch = deref_tag(the_repository, parse_object(the_repository, oid),
+ oid_to_hex(oid),
+ the_hash_algo->hexsz);
+ if (!branch || branch->type != OBJ_COMMIT)
+ return;
+
+ setup_revisions(0, NULL, rev, NULL);
+ add_pending_object(rev, branch, name);
+ add_pending_object(rev, &head->object, "^HEAD");
+ head->object.flags |= UNINTERESTING;
+ if (prepare_revision_walk(rev))
+ die("revision walk setup failed");
+ while ((commit = get_revision(rev)) != NULL) {
+ struct pretty_print_context ctx = {0};
+
+ if (commit->parents && commit->parents->next) {
+ /* do not list a merge but count committer */
+ if (opts->credit_people)
+ record_person('c', &committers, commit);
+ continue;
+ }
+ if (!count && opts->credit_people)
+ /* the 'tip' committer */
+ record_person('c', &committers, commit);
+ if (opts->credit_people)
+ record_person('a', &authors, commit);
+ count++;
+ if (subjects.nr > limit)
+ continue;
+
+ format_commit_message(commit, "%s", &sb, &ctx);
+ strbuf_ltrim(&sb);
+
+ if (!sb.len)
+ string_list_append(&subjects,
+ oid_to_hex(&commit->object.oid));
+ else
+ string_list_append_nodup(&subjects,
+ strbuf_detach(&sb, NULL));
+ }
+
+ if (opts->credit_people)
+ add_people_info(out, &authors, &committers);
+ if (count > limit)
+ strbuf_addf(out, "\n* %s: (%d commits)\n", name, count);
+ else
+ strbuf_addf(out, "\n* %s:\n", name);
+
+ if (origin_data->is_local_branch && use_branch_desc)
+ add_branch_desc(out, name);
+
+ for (i = 0; i < subjects.nr; i++)
+ if (i >= limit)
+ strbuf_addstr(out, " ...\n");
+ else
+ strbuf_addf(out, " %s\n", subjects.items[i].string);
+
+ clear_commit_marks((struct commit *)branch, flags);
+ clear_commit_marks(head, flags);
+ free_commit_list(rev->commits);
+ rev->commits = NULL;
+ rev->pending.nr = 0;
+
+ string_list_clear(&authors, 0);
+ string_list_clear(&committers, 0);
+ string_list_clear(&subjects, 0);
+}
+
+static void fmt_merge_msg_title(struct strbuf *out,
+ const char *current_branch)
+{
+ int i = 0;
+ char *sep = "";
+
+ strbuf_addstr(out, "Merge ");
+ for (i = 0; i < srcs.nr; i++) {
+ struct src_data *src_data = srcs.items[i].util;
+ const char *subsep = "";
+
+ strbuf_addstr(out, sep);
+ sep = "; ";
+
+ if (src_data->head_status == 1) {
+ strbuf_addstr(out, srcs.items[i].string);
+ continue;
+ }
+ if (src_data->head_status == 3) {
+ subsep = ", ";
+ strbuf_addstr(out, "HEAD");
+ }
+ if (src_data->branch.nr) {
+ strbuf_addstr(out, subsep);
+ subsep = ", ";
+ print_joined("branch ", "branches ", &src_data->branch,
+ out);
+ }
+ if (src_data->r_branch.nr) {
+ strbuf_addstr(out, subsep);
+ subsep = ", ";
+ print_joined("remote-tracking branch ", "remote-tracking branches ",
+ &src_data->r_branch, out);
+ }
+ if (src_data->tag.nr) {
+ strbuf_addstr(out, subsep);
+ subsep = ", ";
+ print_joined("tag ", "tags ", &src_data->tag, out);
+ }
+ if (src_data->generic.nr) {
+ strbuf_addstr(out, subsep);
+ print_joined("commit ", "commits ", &src_data->generic,
+ out);
+ }
+ if (strcmp(".", srcs.items[i].string))
+ strbuf_addf(out, " of %s", srcs.items[i].string);
+ }
+
+ if (!strcmp("master", current_branch))
+ strbuf_addch(out, '\n');
+ else
+ strbuf_addf(out, " into %s\n", current_branch);
+}
+
+static void fmt_tag_signature(struct strbuf *tagbuf,
+ struct strbuf *sig,
+ const char *buf,
+ unsigned long len)
+{
+ const char *tag_body = strstr(buf, "\n\n");
+ if (tag_body) {
+ tag_body += 2;
+ strbuf_add(tagbuf, tag_body, buf + len - tag_body);
+ }
+ strbuf_complete_line(tagbuf);
+ if (sig->len) {
+ strbuf_addch(tagbuf, '\n');
+ strbuf_add_commented_lines(tagbuf, sig->buf, sig->len);
+ }
+}
+
+static void fmt_merge_msg_sigs(struct strbuf *out)
+{
+ int i, tag_number = 0, first_tag = 0;
+ struct strbuf tagbuf = STRBUF_INIT;
+
+ for (i = 0; i < origins.nr; i++) {
+ struct object_id *oid = origins.items[i].util;
+ enum object_type type;
+ unsigned long size, len;
+ char *buf = read_object_file(oid, &type, &size);
+ struct signature_check sigc = { NULL };
+ struct strbuf sig = STRBUF_INIT;
+
+ if (!buf || type != OBJ_TAG)
+ goto next;
+ len = parse_signature(buf, size);
+
+ if (size == len)
+ ; /* merely annotated */
+ else if (check_signature(buf, len, buf + len, size - len, &sigc) &&
+ !sigc.gpg_output)
+ strbuf_addstr(&sig, "gpg verification failed.\n");
+ else
+ strbuf_addstr(&sig, sigc.gpg_output);
+ signature_check_clear(&sigc);
+
+ if (!tag_number++) {
+ fmt_tag_signature(&tagbuf, &sig, buf, len);
+ first_tag = i;
+ } else {
+ if (tag_number == 2) {
+ struct strbuf tagline = STRBUF_INIT;
+ strbuf_addch(&tagline, '\n');
+ strbuf_add_commented_lines(&tagline,
+ origins.items[first_tag].string,
+ strlen(origins.items[first_tag].string));
+ strbuf_insert(&tagbuf, 0, tagline.buf,
+ tagline.len);
+ strbuf_release(&tagline);
+ }
+ strbuf_addch(&tagbuf, '\n');
+ strbuf_add_commented_lines(&tagbuf,
+ origins.items[i].string,
+ strlen(origins.items[i].string));
+ fmt_tag_signature(&tagbuf, &sig, buf, len);
+ }
+ strbuf_release(&sig);
+ next:
+ free(buf);
+ }
+ if (tagbuf.len) {
+ strbuf_addch(out, '\n');
+ strbuf_addbuf(out, &tagbuf);
+ }
+ strbuf_release(&tagbuf);
+}
+
+static void find_merge_parents(struct merge_parents *result,
+ struct strbuf *in, struct object_id *head)
+{
+ struct commit_list *parents;
+ struct commit *head_commit;
+ int pos = 0, i, j;
+
+ parents = NULL;
+ while (pos < in->len) {
+ int len;
+ char *p = in->buf + pos;
+ char *newline = strchr(p, '\n');
+ const char *q;
+ struct object_id oid;
+ struct commit *parent;
+ struct object *obj;
+
+ len = newline ? newline - p : strlen(p);
+ pos += len + !!newline;
+
+ if (parse_oid_hex(p, &oid, &q) ||
+ q[0] != '\t' ||
+ q[1] != '\t')
+ continue; /* skip not-for-merge */
+ /*
+ * Do not use get_merge_parent() here; we do not have
+ * "name" here and we do not want to contaminate its
+ * util field yet.
+ */
+ obj = parse_object(the_repository, &oid);
+ parent = (struct commit *)peel_to_type(NULL, 0, obj, OBJ_COMMIT);
+ if (!parent)
+ continue;
+ commit_list_insert(parent, &parents);
+ add_merge_parent(result, &obj->oid, &parent->object.oid);
+ }
+ head_commit = lookup_commit(the_repository, head);
+ if (head_commit)
+ commit_list_insert(head_commit, &parents);
+ reduce_heads_replace(&parents);
+
+ while (parents) {
+ struct commit *cmit = pop_commit(&parents);
+ for (i = 0; i < result->nr; i++)
+ if (oideq(&result->item[i].commit, &cmit->object.oid))
+ result->item[i].used = 1;
+ }
+
+ for (i = j = 0; i < result->nr; i++) {
+ if (result->item[i].used) {
+ if (i != j)
+ result->item[j] = result->item[i];
+ j++;
+ }
+ }
+ result->nr = j;
+}
+
+
+int fmt_merge_msg(struct strbuf *in, struct strbuf *out,
+ struct fmt_merge_msg_opts *opts)
+{
+ int i = 0, pos = 0;
+ struct object_id head_oid;
+ const char *current_branch;
+ void *current_branch_to_free;
+ struct merge_parents merge_parents;
+
+ memset(&merge_parents, 0, sizeof(merge_parents));
+
+ /* get current branch */
+ current_branch = current_branch_to_free =
+ resolve_refdup("HEAD", RESOLVE_REF_READING, &head_oid, NULL);
+ if (!current_branch)
+ die("No current branch");
+ if (starts_with(current_branch, "refs/heads/"))
+ current_branch += 11;
+
+ find_merge_parents(&merge_parents, in, &head_oid);
+
+ /* get a line */
+ while (pos < in->len) {
+ int len;
+ char *newline, *p = in->buf + pos;
+
+ newline = strchr(p, '\n');
+ len = newline ? newline - p : strlen(p);
+ pos += len + !!newline;
+ i++;
+ p[len] = 0;
+ if (handle_line(p, &merge_parents))
+ die("error in line %d: %.*s", i, len, p);
+ }
+
+ if (opts->add_title && srcs.nr)
+ fmt_merge_msg_title(out, current_branch);
+
+ if (origins.nr)
+ fmt_merge_msg_sigs(out);
+
+ if (opts->shortlog_len) {
+ struct commit *head;
+ struct rev_info rev;
+
+ head = lookup_commit_or_die(&head_oid, "HEAD");
+ repo_init_revisions(the_repository, &rev, NULL);
+ rev.commit_format = CMIT_FMT_ONELINE;
+ rev.ignore_merges = 1;
+ rev.limited = 1;
+
+ strbuf_complete_line(out);
+
+ for (i = 0; i < origins.nr; i++)
+ shortlog(origins.items[i].string,
+ origins.items[i].util,
+ head, &rev, opts, out);
+ }
+
+ strbuf_complete_line(out);
+ free(current_branch_to_free);
+ free(merge_parents.item);
+ return 0;
+}
diff --git a/fmt-merge-msg.h b/fmt-merge-msg.h
index 01e3aa88c5..f2ab0e0085 100644
--- a/fmt-merge-msg.h
+++ b/fmt-merge-msg.h
@@ -1,7 +1,20 @@
#ifndef FMT_MERGE_MSG_H
#define FMT_MERGE_MSG_H
+#include "strbuf.h"
+
+#define DEFAULT_MERGE_LOG_LEN 20
+
+struct fmt_merge_msg_opts {
+ unsigned add_title:1,
+ credit_people:1;
+ int shortlog_len;
+};
+
extern int merge_log_config;
int fmt_merge_msg_config(const char *key, const char *value, void *cb);
+int fmt_merge_msg(struct strbuf *in, struct strbuf *out,
+ struct fmt_merge_msg_opts *);
+
#endif /* FMT_MERGE_MSG_H */
diff --git a/fsck.c b/fsck.c
index 73f30773f2..087a7f1ffc 100644
--- a/fsck.c
+++ b/fsck.c
@@ -1065,7 +1065,7 @@ static int fsck_gitmodules_fn(const char *var, const char *value, void *vdata)
{
struct fsck_gitmodules_data *data = vdata;
const char *subsection, *key;
- int subsection_len;
+ size_t subsection_len;
char *name;
if (parse_config_key(var, "submodule", &subsection, &subsection_len, &key) < 0 ||
diff --git a/fuzz-commit-graph.c b/fuzz-commit-graph.c
index 0157acbf2e..9fd1c04edd 100644
--- a/fuzz-commit-graph.c
+++ b/fuzz-commit-graph.c
@@ -1,8 +1,7 @@
#include "commit-graph.h"
#include "repository.h"
-struct commit_graph *parse_commit_graph(void *graph_map, int fd,
- size_t graph_size);
+struct commit_graph *parse_commit_graph(void *graph_map, size_t graph_size);
int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size);
@@ -11,7 +10,7 @@ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size)
struct commit_graph *g;
initialize_the_repository();
- g = parse_commit_graph((void *)data, -1, size);
+ g = parse_commit_graph((void *)data, size);
repo_clear(the_repository);
free(g);
diff --git a/git-submodule.sh b/git-submodule.sh
index 89f915cae9..08e0439df0 100755
--- a/git-submodule.sh
+++ b/git-submodule.sh
@@ -48,6 +48,8 @@ depth=
progress=
dissociate=
single_branch=
+jobs=
+recommend_shallow=
die_if_unmatched ()
{
diff --git a/gitweb/gitweb.perl b/gitweb/gitweb.perl
index 1a02a1242d..0959a782ec 100755
--- a/gitweb/gitweb.perl
+++ b/gitweb/gitweb.perl
@@ -4641,7 +4641,7 @@ sub git_print_log {
# print log
my $skip_blank_line = 0;
foreach my $line (@$log) {
- if ($line =~ m/^\s*([A-Z][-A-Za-z]*-[Bb]y|C[Cc]): /) {
+ if ($line =~ m/^\s*([A-Z][-A-Za-z]*-([Bb]y|[Tt]o)|C[Cc]|(Clos|Fix)es): /) {
if (! $opts{'-remove_signoff'}) {
print "<span class=\"signoff\">" . esc_html($line) . "</span><br/>\n";
$skip_blank_line = 1;
diff --git a/line-log.c b/line-log.c
index 9010e00950..40e1738dbb 100644
--- a/line-log.c
+++ b/line-log.c
@@ -519,7 +519,7 @@ static void fill_line_ends(struct repository *r,
unsigned long *ends = NULL;
char *data = NULL;
- if (diff_populate_filespec(r, spec, 0))
+ if (diff_populate_filespec(r, spec, NULL))
die("Cannot read blob %s", oid_to_hex(&spec->oid));
ALLOC_ARRAY(ends, size);
@@ -1045,12 +1045,12 @@ static int process_diff_filepair(struct rev_info *rev,
return 0;
assert(pair->two->oid_valid);
- diff_populate_filespec(rev->diffopt.repo, pair->two, 0);
+ diff_populate_filespec(rev->diffopt.repo, pair->two, NULL);
file_target.ptr = pair->two->data;
file_target.size = pair->two->size;
if (pair->one->oid_valid) {
- diff_populate_filespec(rev->diffopt.repo, pair->one, 0);
+ diff_populate_filespec(rev->diffopt.repo, pair->one, NULL);
file_parent.ptr = pair->one->data;
file_parent.size = pair->one->size;
} else {
diff --git a/ll-merge.c b/ll-merge.c
index d65a8971db..1ec0b959e0 100644
--- a/ll-merge.c
+++ b/ll-merge.c
@@ -247,7 +247,7 @@ static int read_merge_config(const char *var, const char *value, void *cb)
{
struct ll_merge_driver *fn;
const char *key, *name;
- int namelen;
+ size_t namelen;
if (!strcmp(var, "merge.default"))
return git_config_string(&default_ll_merge, var, value);
diff --git a/log-tree.c b/log-tree.c
index 0064788b25..55a68d0c61 100644
--- a/log-tree.c
+++ b/log-tree.c
@@ -81,6 +81,56 @@ const struct name_decoration *get_name_decoration(const struct object *obj)
return lookup_decoration(&name_decoration, obj);
}
+static int match_ref_pattern(const char *refname,
+ const struct string_list_item *item)
+{
+ int matched = 0;
+ if (item->util == NULL) {
+ if (!wildmatch(item->string, refname, 0))
+ matched = 1;
+ } else {
+ const char *rest;
+ if (skip_prefix(refname, item->string, &rest) &&
+ (!*rest || *rest == '/'))
+ matched = 1;
+ }
+ return matched;
+}
+
+static int ref_filter_match(const char *refname,
+ const struct decoration_filter *filter)
+{
+ struct string_list_item *item;
+ const struct string_list *exclude_patterns = filter->exclude_ref_pattern;
+ const struct string_list *include_patterns = filter->include_ref_pattern;
+ const struct string_list *exclude_patterns_config =
+ filter->exclude_ref_config_pattern;
+
+ if (exclude_patterns && exclude_patterns->nr) {
+ for_each_string_list_item(item, exclude_patterns) {
+ if (match_ref_pattern(refname, item))
+ return 0;
+ }
+ }
+
+ if (include_patterns && include_patterns->nr) {
+ for_each_string_list_item(item, include_patterns) {
+ if (match_ref_pattern(refname, item))
+ return 1;
+ }
+ return 0;
+ }
+
+ if (exclude_patterns_config && exclude_patterns_config->nr) {
+ for_each_string_list_item(item, exclude_patterns_config) {
+ if (match_ref_pattern(refname, item))
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
static int add_ref_decoration(const char *refname, const struct object_id *oid,
int flags, void *cb_data)
{
@@ -88,9 +138,7 @@ static int add_ref_decoration(const char *refname, const struct object_id *oid,
enum decoration_type type = DECORATION_NONE;
struct decoration_filter *filter = (struct decoration_filter *)cb_data;
- if (filter && !ref_filter_match(refname,
- filter->include_ref_pattern,
- filter->exclude_ref_pattern))
+ if (filter && !ref_filter_match(refname, filter))
return 0;
if (starts_with(refname, git_replace_ref_base)) {
@@ -155,6 +203,9 @@ void load_ref_decorations(struct decoration_filter *filter, int flags)
for_each_string_list_item(item, filter->include_ref_pattern) {
normalize_glob_ref(item, NULL, item->string);
}
+ for_each_string_list_item(item, filter->exclude_ref_config_pattern) {
+ normalize_glob_ref(item, NULL, item->string);
+ }
}
decoration_loaded = 1;
decoration_flags = flags;
diff --git a/log-tree.h b/log-tree.h
index e668628074..8fa79289ec 100644
--- a/log-tree.h
+++ b/log-tree.h
@@ -8,7 +8,9 @@ struct log_info {
};
struct decoration_filter {
- struct string_list *include_ref_pattern, *exclude_ref_pattern;
+ struct string_list *include_ref_pattern;
+ struct string_list *exclude_ref_pattern;
+ struct string_list *exclude_ref_config_pattern;
};
int parse_decorate_color_config(const char *var, const char *slot_name, const char *value);
diff --git a/mailinfo.c b/mailinfo.c
index 742fa376ab..5681d9130d 100644
--- a/mailinfo.c
+++ b/mailinfo.c
@@ -447,19 +447,21 @@ static int convert_to_utf8(struct mailinfo *mi,
struct strbuf *line, const char *charset)
{
char *out;
+ size_t out_len;
if (!mi->metainfo_charset || !charset || !*charset)
return 0;
if (same_encoding(mi->metainfo_charset, charset))
return 0;
- out = reencode_string(line->buf, mi->metainfo_charset, charset);
+ out = reencode_string_len(line->buf, line->len,
+ mi->metainfo_charset, charset, &out_len);
if (!out) {
mi->input_error = -1;
return error("cannot convert from %s to %s",
charset, mi->metainfo_charset);
}
- strbuf_attach(line, out, strlen(out), strlen(out));
+ strbuf_attach(line, out, out_len, out_len);
return 0;
}
@@ -1136,6 +1138,11 @@ static void handle_info(struct mailinfo *mi)
else
continue;
+ if (memchr(hdr->buf, '\0', hdr->len)) {
+ error("a NUL byte in '%s' is not allowed.", header[i]);
+ mi->input_error = -1;
+ }
+
if (!strcmp(header[i], "Subject")) {
if (!mi->keep_subject) {
cleanup_subject(mi, hdr);
diff --git a/midx.c b/midx.c
index a520e26395..9a61d3b37d 100644
--- a/midx.c
+++ b/midx.c
@@ -72,9 +72,9 @@ struct multi_pack_index *load_multi_pack_index(const char *object_dir, int local
FREE_AND_NULL(midx_name);
midx_map = xmmap(NULL, midx_size, PROT_READ, MAP_PRIVATE, fd, 0);
+ close(fd);
FLEX_ALLOC_STR(m, object_dir, object_dir);
- m->fd = fd;
m->data = midx_map;
m->data_len = midx_size;
m->local = local;
@@ -190,8 +190,6 @@ void close_midx(struct multi_pack_index *m)
return;
munmap((unsigned char *)m->data, m->data_len);
- close(m->fd);
- m->fd = -1;
for (i = 0; i < m->num_packs; i++) {
if (m->packs[i])
diff --git a/midx.h b/midx.h
index e6fa356b5c..b18cf53bc4 100644
--- a/midx.h
+++ b/midx.h
@@ -12,8 +12,6 @@ struct repository;
struct multi_pack_index {
struct multi_pack_index *next;
- int fd;
-
const unsigned char *data;
size_t data_len;
diff --git a/oidset.c b/oidset.c
index f63ce818f6..15d4e18c37 100644
--- a/oidset.c
+++ b/oidset.c
@@ -36,6 +36,11 @@ void oidset_clear(struct oidset *set)
oidset_init(set, 0);
}
+int oidset_size(struct oidset *set)
+{
+ return kh_size(&set->set);
+}
+
void oidset_parse_file(struct oidset *set, const char *path)
{
FILE *fp;
diff --git a/oidset.h b/oidset.h
index 3a2d9d1115..209ae7a173 100644
--- a/oidset.h
+++ b/oidset.h
@@ -55,6 +55,11 @@ int oidset_insert(struct oidset *set, const struct object_id *oid);
int oidset_remove(struct oidset *set, const struct object_id *oid);
/**
+ * Returns the number of oids in the set.
+ */
+int oidset_size(struct oidset *set);
+
+/**
* Remove all entries from the oidset, freeing any resources associated with
* it.
*/
diff --git a/parse-options.c b/parse-options.c
index 63d6bab60c..c57618d537 100644
--- a/parse-options.c
+++ b/parse-options.c
@@ -648,6 +648,7 @@ static struct option *preprocess_options(struct parse_opt_ctx_t *ctx,
int short_name;
const char *long_name;
const char *source;
+ struct strbuf help = STRBUF_INIT;
int j;
if (newopt[i].type != OPTION_ALIAS)
@@ -659,6 +660,7 @@ static struct option *preprocess_options(struct parse_opt_ctx_t *ctx,
if (!long_name)
BUG("An alias must have long option name");
+ strbuf_addf(&help, _("alias of --%s"), source);
for (j = 0; j < nr; j++) {
const char *name = options[j].long_name;
@@ -669,15 +671,10 @@ static struct option *preprocess_options(struct parse_opt_ctx_t *ctx,
if (options[j].type == OPTION_ALIAS)
BUG("No please. Nested aliases are not supported.");
- /*
- * NEEDSWORK: this is a bit inconsistent because
- * usage_with_options() on the original options[] will print
- * help string as "alias of %s" but "git cmd -h" will
- * print the original help string.
- */
memcpy(newopt + i, options + j, sizeof(*newopt));
newopt[i].short_name = short_name;
newopt[i].long_name = long_name;
+ newopt[i].help = strbuf_detach(&help, NULL);
break;
}
diff --git a/parse-options.h b/parse-options.h
index fece5ba628..46af942093 100644
--- a/parse-options.h
+++ b/parse-options.h
@@ -336,5 +336,6 @@ int parse_opt_passthru_argv(const struct option *, const char *, int);
#define OPT_CLEANUP(v) OPT_STRING(0, "cleanup", v, N_("mode"), N_("how to strip spaces and #comments from message"))
#define OPT_PATHSPEC_FROM_FILE(v) OPT_FILENAME(0, "pathspec-from-file", v, N_("read pathspec from file"))
#define OPT_PATHSPEC_FILE_NUL(v) OPT_BOOL(0, "pathspec-file-nul", v, N_("with --pathspec-from-file, pathspec elements are separated with NUL character"))
+#define OPT_AUTOSTASH(v) OPT_BOOL(0, "autostash", v, N_("automatically stash/stash pop before and after"))
#endif
diff --git a/path.c b/path.c
index 9bd717c307..8b2c753191 100644
--- a/path.c
+++ b/path.c
@@ -1535,5 +1535,6 @@ REPO_GIT_PATH_FUNC(merge_msg, "MERGE_MSG")
REPO_GIT_PATH_FUNC(merge_rr, "MERGE_RR")
REPO_GIT_PATH_FUNC(merge_mode, "MERGE_MODE")
REPO_GIT_PATH_FUNC(merge_head, "MERGE_HEAD")
+REPO_GIT_PATH_FUNC(merge_autostash, "MERGE_AUTOSTASH")
REPO_GIT_PATH_FUNC(fetch_head, "FETCH_HEAD")
REPO_GIT_PATH_FUNC(shallow, "shallow")
diff --git a/path.h b/path.h
index 14d6dcad16..1f1bf8f87a 100644
--- a/path.h
+++ b/path.h
@@ -177,11 +177,12 @@ struct path_cache {
const char *merge_rr;
const char *merge_mode;
const char *merge_head;
+ const char *merge_autostash;
const char *fetch_head;
const char *shallow;
};
-#define PATH_CACHE_INIT { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
+#define PATH_CACHE_INIT { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
const char *git_path_cherry_pick_head(struct repository *r);
const char *git_path_revert_head(struct repository *r);
@@ -190,6 +191,7 @@ const char *git_path_merge_msg(struct repository *r);
const char *git_path_merge_rr(struct repository *r);
const char *git_path_merge_mode(struct repository *r);
const char *git_path_merge_head(struct repository *r);
+const char *git_path_merge_autostash(struct repository *r);
const char *git_path_fetch_head(struct repository *r);
const char *git_path_shallow(struct repository *r);
diff --git a/promisor-remote.c b/promisor-remote.c
index 9f338c945f..baaea12fd6 100644
--- a/promisor-remote.c
+++ b/promisor-remote.c
@@ -101,7 +101,7 @@ static void promisor_remote_move_to_tail(struct promisor_remote *r,
static int promisor_remote_config(const char *var, const char *value, void *data)
{
const char *name;
- int namelen;
+ size_t namelen;
const char *subkey;
if (!strcmp(var, "core.partialclonefilter"))
@@ -241,6 +241,9 @@ int promisor_remote_get_direct(struct repository *repo,
int to_free = 0;
int res = -1;
+ if (oid_nr == 0)
+ return 0;
+
promisor_remote_init();
for (r = promisors; r; r = r->next) {
diff --git a/promisor-remote.h b/promisor-remote.h
index 737bac3a33..6343c47d18 100644
--- a/promisor-remote.h
+++ b/promisor-remote.h
@@ -20,6 +20,14 @@ struct promisor_remote {
void promisor_remote_reinit(void);
struct promisor_remote *promisor_remote_find(const char *remote_name);
int has_promisor_remote(void);
+
+/*
+ * Fetches all requested objects from all promisor remotes, trying them one at
+ * a time until all objects are fetched. Returns 0 upon success, and non-zero
+ * otherwise.
+ *
+ * If oid_nr is 0, this function returns 0 (success) immediately.
+ */
int promisor_remote_get_direct(struct repository *repo,
const struct object_id *oids,
int oid_nr);
diff --git a/protocol.c b/protocol.c
index 803bef5c87..d390391eba 100644
--- a/protocol.c
+++ b/protocol.c
@@ -39,7 +39,7 @@ enum protocol_version get_protocol_version_config(void)
return env;
}
- return protocol_v2;
+ return protocol_v0;
}
enum protocol_version determine_protocol_version_server(void)
diff --git a/prune-packed.c b/prune-packed.c
new file mode 100644
index 0000000000..261520b472
--- /dev/null
+++ b/prune-packed.c
@@ -0,0 +1,43 @@
+#include "object-store.h"
+#include "packfile.h"
+#include "progress.h"
+#include "prune-packed.h"
+
+static struct progress *progress;
+
+static int prune_subdir(unsigned int nr, const char *path, void *data)
+{
+ int *opts = data;
+ display_progress(progress, nr + 1);
+ if (!(*opts & PRUNE_PACKED_DRY_RUN))
+ rmdir(path);
+ return 0;
+}
+
+static int prune_object(const struct object_id *oid, const char *path,
+ void *data)
+{
+ int *opts = data;
+
+ if (!has_object_pack(oid))
+ return 0;
+
+ if (*opts & PRUNE_PACKED_DRY_RUN)
+ printf("rm -f %s\n", path);
+ else
+ unlink_or_warn(path);
+ return 0;
+}
+
+void prune_packed_objects(int opts)
+{
+ if (opts & PRUNE_PACKED_VERBOSE)
+ progress = start_delayed_progress(_("Removing duplicate objects"), 256);
+
+ for_each_loose_file_in_objdir(get_object_directory(),
+ prune_object, NULL, prune_subdir, &opts);
+
+ /* Ensure we show 100% before finishing progress */
+ display_progress(progress, 256);
+ stop_progress(&progress);
+}
diff --git a/prune-packed.h b/prune-packed.h
new file mode 100644
index 0000000000..936fa9df23
--- /dev/null
+++ b/prune-packed.h
@@ -0,0 +1,9 @@
+#ifndef PRUNE_PACKED_H
+#define PRUNE_PACKED_H
+
+#define PRUNE_PACKED_DRY_RUN 01
+#define PRUNE_PACKED_VERBOSE 02
+
+void prune_packed_objects(int);
+
+#endif
diff --git a/range-diff.c b/range-diff.c
index f745567cf6..40af086281 100644
--- a/range-diff.c
+++ b/range-diff.c
@@ -63,6 +63,8 @@ static int read_patches(const char *range, struct string_list *list,
"--output-indicator-old=<",
"--output-indicator-context=#",
"--no-abbrev-commit",
+ "--pretty=medium",
+ "--notes",
NULL);
if (other_arg)
argv_array_pushv(&cp.args, other_arg->argv);
@@ -106,20 +108,34 @@ static int read_patches(const char *range, struct string_list *list,
continue;
}
+ if (!util) {
+ error(_("could not parse first line of `log` output: "
+ "did not start with 'commit ': '%s'"),
+ line);
+ string_list_clear(list, 1);
+ strbuf_release(&buf);
+ strbuf_release(&contents);
+ finish_command(&cp);
+ return -1;
+ }
+
if (starts_with(line, "diff --git")) {
struct patch patch = { 0 };
struct strbuf root = STRBUF_INIT;
int linenr = 0;
+ int orig_len;
in_header = 0;
strbuf_addch(&buf, '\n');
if (!util->diff_offset)
util->diff_offset = buf.len;
line[len - 1] = '\n';
+ orig_len = len;
len = parse_git_diff_header(&root, &linenr, 0, line,
len, size, &patch);
if (len < 0)
- die(_("could not parse git header '%.*s'"), (int)len, line);
+ die(_("could not parse git header '%.*s'"),
+ orig_len, line);
strbuf_addstr(&buf, " ## ");
if (patch.is_new > 0)
strbuf_addf(&buf, "%s (new)", patch.new_name);
diff --git a/refs.c b/refs.c
index b8759116cd..224ff66c7b 100644
--- a/refs.c
+++ b/refs.c
@@ -321,50 +321,6 @@ int ref_exists(const char *refname)
return refs_ref_exists(get_main_ref_store(the_repository), refname);
}
-static int match_ref_pattern(const char *refname,
- const struct string_list_item *item)
-{
- int matched = 0;
- if (item->util == NULL) {
- if (!wildmatch(item->string, refname, 0))
- matched = 1;
- } else {
- const char *rest;
- if (skip_prefix(refname, item->string, &rest) &&
- (!*rest || *rest == '/'))
- matched = 1;
- }
- return matched;
-}
-
-int ref_filter_match(const char *refname,
- const struct string_list *include_patterns,
- const struct string_list *exclude_patterns)
-{
- struct string_list_item *item;
-
- if (exclude_patterns && exclude_patterns->nr) {
- for_each_string_list_item(item, exclude_patterns) {
- if (match_ref_pattern(refname, item))
- return 0;
- }
- }
-
- if (include_patterns && include_patterns->nr) {
- int found = 0;
- for_each_string_list_item(item, include_patterns) {
- if (match_ref_pattern(refname, item)) {
- found = 1;
- break;
- }
- }
-
- if (!found)
- return 0;
- }
- return 1;
-}
-
static int filter_refs(const char *refname, const struct object_id *oid,
int flags, void *data)
{
diff --git a/refs.h b/refs.h
index 545029c6d8..a92d2c74c8 100644
--- a/refs.h
+++ b/refs.h
@@ -361,18 +361,6 @@ int for_each_rawref(each_ref_fn fn, void *cb_data);
void normalize_glob_ref(struct string_list_item *item, const char *prefix,
const char *pattern);
-/*
- * Returns 0 if refname matches any of the exclude_patterns, or if it doesn't
- * match any of the include_patterns. Returns 1 otherwise.
- *
- * If pattern list is NULL or empty, matching against that list is skipped.
- * This has the effect of matching everything by default, unless the user
- * specifies rules otherwise.
- */
-int ref_filter_match(const char *refname,
- const struct string_list *include_patterns,
- const struct string_list *exclude_patterns);
-
static inline const char *has_glob_specials(const char *pattern)
{
return strpbrk(pattern, "?*[");
diff --git a/refs/files-backend.c b/refs/files-backend.c
index 561c33ac8a..6516c7bc8c 100644
--- a/refs/files-backend.c
+++ b/refs/files-backend.c
@@ -2565,16 +2565,18 @@ static void files_transaction_cleanup(struct files_ref_store *refs,
}
}
- if (backend_data->packed_transaction &&
- ref_transaction_abort(backend_data->packed_transaction, &err)) {
- error("error aborting transaction: %s", err.buf);
- strbuf_release(&err);
- }
+ if (backend_data) {
+ if (backend_data->packed_transaction &&
+ ref_transaction_abort(backend_data->packed_transaction, &err)) {
+ error("error aborting transaction: %s", err.buf);
+ strbuf_release(&err);
+ }
- if (backend_data->packed_refs_locked)
- packed_refs_unlock(refs->packed_ref_store);
+ if (backend_data->packed_refs_locked)
+ packed_refs_unlock(refs->packed_ref_store);
- free(backend_data);
+ free(backend_data);
+ }
transaction->state = REF_TRANSACTION_CLOSED;
}
diff --git a/remote.c b/remote.c
index c43196ec06..534c6426f1 100644
--- a/remote.c
+++ b/remote.c
@@ -174,54 +174,43 @@ static void add_merge(struct branch *branch, const char *name)
branch->merge_name[branch->merge_nr++] = name;
}
-static struct branch *make_branch(const char *name, int len)
+static struct branch *make_branch(const char *name, size_t len)
{
struct branch *ret;
int i;
for (i = 0; i < branches_nr; i++) {
- if (len ? (!strncmp(name, branches[i]->name, len) &&
- !branches[i]->name[len]) :
- !strcmp(name, branches[i]->name))
+ if (!strncmp(name, branches[i]->name, len) &&
+ !branches[i]->name[len])
return branches[i];
}
ALLOC_GROW(branches, branches_nr + 1, branches_alloc);
ret = xcalloc(1, sizeof(struct branch));
branches[branches_nr++] = ret;
- if (len)
- ret->name = xstrndup(name, len);
- else
- ret->name = xstrdup(name);
+ ret->name = xstrndup(name, len);
ret->refname = xstrfmt("refs/heads/%s", ret->name);
return ret;
}
-static struct rewrite *make_rewrite(struct rewrites *r, const char *base, int len)
+static struct rewrite *make_rewrite(struct rewrites *r,
+ const char *base, size_t len)
{
struct rewrite *ret;
int i;
for (i = 0; i < r->rewrite_nr; i++) {
- if (len
- ? (len == r->rewrite[i]->baselen &&
- !strncmp(base, r->rewrite[i]->base, len))
- : !strcmp(base, r->rewrite[i]->base))
+ if (len == r->rewrite[i]->baselen &&
+ !strncmp(base, r->rewrite[i]->base, len))
return r->rewrite[i];
}
ALLOC_GROW(r->rewrite, r->rewrite_nr + 1, r->rewrite_alloc);
ret = xcalloc(1, sizeof(struct rewrite));
r->rewrite[r->rewrite_nr++] = ret;
- if (len) {
- ret->base = xstrndup(base, len);
- ret->baselen = len;
- }
- else {
- ret->base = xstrdup(base);
- ret->baselen = strlen(base);
- }
+ ret->base = xstrndup(base, len);
+ ret->baselen = len;
return ret;
}
@@ -316,7 +305,7 @@ static void read_branches_file(struct remote *remote)
static int handle_config(const char *key, const char *value, void *cb)
{
const char *name;
- int namelen;
+ size_t namelen;
const char *subkey;
struct remote *remote;
struct branch *branch;
@@ -470,7 +459,7 @@ static void read_config(void)
const char *head_ref = resolve_ref_unsafe("HEAD", 0, NULL, &flag);
if (head_ref && (flag & REF_ISSYMREF) &&
skip_prefix(head_ref, "refs/heads/", &head_ref)) {
- current_branch = make_branch(head_ref, 0);
+ current_branch = make_branch(head_ref, strlen(head_ref));
}
}
git_config(handle_config, NULL);
@@ -1584,7 +1573,7 @@ struct branch *branch_get(const char *name)
if (!name || !*name || !strcmp(name, "HEAD"))
ret = current_branch;
else
- ret = make_branch(name, 0);
+ ret = make_branch(name, strlen(name));
set_merge(ret);
return ret;
}
diff --git a/reset.c b/reset.c
new file mode 100644
index 0000000000..2f4fbd07c5
--- /dev/null
+++ b/reset.c
@@ -0,0 +1,141 @@
+#include "git-compat-util.h"
+#include "cache-tree.h"
+#include "lockfile.h"
+#include "refs.h"
+#include "reset.h"
+#include "run-command.h"
+#include "tree-walk.h"
+#include "tree.h"
+#include "unpack-trees.h"
+
+int reset_head(struct repository *r, struct object_id *oid, const char *action,
+ const char *switch_to_branch, unsigned flags,
+ const char *reflog_orig_head, const char *reflog_head,
+ const char *default_reflog_action)
+{
+ unsigned detach_head = flags & RESET_HEAD_DETACH;
+ unsigned reset_hard = flags & RESET_HEAD_HARD;
+ unsigned run_hook = flags & RESET_HEAD_RUN_POST_CHECKOUT_HOOK;
+ unsigned refs_only = flags & RESET_HEAD_REFS_ONLY;
+ unsigned update_orig_head = flags & RESET_ORIG_HEAD;
+ struct object_id head_oid;
+ struct tree_desc desc[2] = { { NULL }, { NULL } };
+ struct lock_file lock = LOCK_INIT;
+ struct unpack_trees_options unpack_tree_opts;
+ struct tree *tree;
+ const char *reflog_action;
+ struct strbuf msg = STRBUF_INIT;
+ size_t prefix_len;
+ struct object_id *orig = NULL, oid_orig,
+ *old_orig = NULL, oid_old_orig;
+ int ret = 0, nr = 0;
+
+ if (switch_to_branch && !starts_with(switch_to_branch, "refs/"))
+ BUG("Not a fully qualified branch: '%s'", switch_to_branch);
+
+ if (!refs_only && repo_hold_locked_index(r, &lock, LOCK_REPORT_ON_ERROR) < 0) {
+ ret = -1;
+ goto leave_reset_head;
+ }
+
+ if ((!oid || !reset_hard) && get_oid("HEAD", &head_oid)) {
+ ret = error(_("could not determine HEAD revision"));
+ goto leave_reset_head;
+ }
+
+ if (!oid)
+ oid = &head_oid;
+
+ if (refs_only)
+ goto reset_head_refs;
+
+ memset(&unpack_tree_opts, 0, sizeof(unpack_tree_opts));
+ setup_unpack_trees_porcelain(&unpack_tree_opts, action);
+ unpack_tree_opts.head_idx = 1;
+ unpack_tree_opts.src_index = r->index;
+ unpack_tree_opts.dst_index = r->index;
+ unpack_tree_opts.fn = reset_hard ? oneway_merge : twoway_merge;
+ unpack_tree_opts.update = 1;
+ unpack_tree_opts.merge = 1;
+ init_checkout_metadata(&unpack_tree_opts.meta, switch_to_branch, oid, NULL);
+ if (!detach_head)
+ unpack_tree_opts.reset = 1;
+
+ if (repo_read_index_unmerged(r) < 0) {
+ ret = error(_("could not read index"));
+ goto leave_reset_head;
+ }
+
+ if (!reset_hard && !fill_tree_descriptor(r, &desc[nr++], &head_oid)) {
+ ret = error(_("failed to find tree of %s"),
+ oid_to_hex(&head_oid));
+ goto leave_reset_head;
+ }
+
+ if (!fill_tree_descriptor(r, &desc[nr++], oid)) {
+ ret = error(_("failed to find tree of %s"), oid_to_hex(oid));
+ goto leave_reset_head;
+ }
+
+ if (unpack_trees(nr, desc, &unpack_tree_opts)) {
+ ret = -1;
+ goto leave_reset_head;
+ }
+
+ tree = parse_tree_indirect(oid);
+ prime_cache_tree(r, r->index, tree);
+
+ if (write_locked_index(r->index, &lock, COMMIT_LOCK) < 0) {
+ ret = error(_("could not write index"));
+ goto leave_reset_head;
+ }
+
+reset_head_refs:
+ reflog_action = getenv(GIT_REFLOG_ACTION_ENVIRONMENT);
+ strbuf_addf(&msg, "%s: ", reflog_action ? reflog_action : default_reflog_action);
+ prefix_len = msg.len;
+
+ if (update_orig_head) {
+ if (!get_oid("ORIG_HEAD", &oid_old_orig))
+ old_orig = &oid_old_orig;
+ if (!get_oid("HEAD", &oid_orig)) {
+ orig = &oid_orig;
+ if (!reflog_orig_head) {
+ strbuf_addstr(&msg, "updating ORIG_HEAD");
+ reflog_orig_head = msg.buf;
+ }
+ update_ref(reflog_orig_head, "ORIG_HEAD", orig,
+ old_orig, 0, UPDATE_REFS_MSG_ON_ERR);
+ } else if (old_orig)
+ delete_ref(NULL, "ORIG_HEAD", old_orig, 0);
+ }
+
+ if (!reflog_head) {
+ strbuf_setlen(&msg, prefix_len);
+ strbuf_addstr(&msg, "updating HEAD");
+ reflog_head = msg.buf;
+ }
+ if (!switch_to_branch)
+ ret = update_ref(reflog_head, "HEAD", oid, orig,
+ detach_head ? REF_NO_DEREF : 0,
+ UPDATE_REFS_MSG_ON_ERR);
+ else {
+ ret = update_ref(reflog_head, switch_to_branch, oid,
+ NULL, 0, UPDATE_REFS_MSG_ON_ERR);
+ if (!ret)
+ ret = create_symref("HEAD", switch_to_branch,
+ reflog_head);
+ }
+ if (run_hook)
+ run_hook_le(NULL, "post-checkout",
+ oid_to_hex(orig ? orig : &null_oid),
+ oid_to_hex(oid), "1", NULL);
+
+leave_reset_head:
+ strbuf_release(&msg);
+ rollback_lock_file(&lock);
+ while (nr)
+ free((void *)desc[--nr].buffer);
+ return ret;
+
+}
diff --git a/reset.h b/reset.h
new file mode 100644
index 0000000000..12f83c78e2
--- /dev/null
+++ b/reset.h
@@ -0,0 +1,20 @@
+#ifndef RESET_H
+#define RESET_H
+
+#include "hash.h"
+#include "repository.h"
+
+#define GIT_REFLOG_ACTION_ENVIRONMENT "GIT_REFLOG_ACTION"
+
+#define RESET_HEAD_DETACH (1<<0)
+#define RESET_HEAD_HARD (1<<1)
+#define RESET_HEAD_RUN_POST_CHECKOUT_HOOK (1<<2)
+#define RESET_HEAD_REFS_ONLY (1<<3)
+#define RESET_ORIG_HEAD (1<<4)
+
+int reset_head(struct repository *r, struct object_id *oid, const char *action,
+ const char *switch_to_branch, unsigned flags,
+ const char *reflog_orig_head, const char *reflog_head,
+ const char *default_reflog_action);
+
+#endif
diff --git a/revision.c b/revision.c
index 5bc96444b6..60cca8c0b9 100644
--- a/revision.c
+++ b/revision.c
@@ -29,6 +29,8 @@
#include "prio-queue.h"
#include "hashmap.h"
#include "utf8.h"
+#include "bloom.h"
+#include "json-writer.h"
volatile show_early_output_fn_t show_early_output;
@@ -624,11 +626,133 @@ static void file_change(struct diff_options *options,
options->flags.has_changes = 1;
}
+static int bloom_filter_atexit_registered;
+static unsigned int count_bloom_filter_maybe;
+static unsigned int count_bloom_filter_definitely_not;
+static unsigned int count_bloom_filter_false_positive;
+static unsigned int count_bloom_filter_not_present;
+static unsigned int count_bloom_filter_length_zero;
+
+static void trace2_bloom_filter_statistics_atexit(void)
+{
+ struct json_writer jw = JSON_WRITER_INIT;
+
+ jw_object_begin(&jw, 0);
+ jw_object_intmax(&jw, "filter_not_present", count_bloom_filter_not_present);
+ jw_object_intmax(&jw, "zero_length_filter", count_bloom_filter_length_zero);
+ jw_object_intmax(&jw, "maybe", count_bloom_filter_maybe);
+ jw_object_intmax(&jw, "definitely_not", count_bloom_filter_definitely_not);
+ jw_object_intmax(&jw, "false_positive", count_bloom_filter_false_positive);
+ jw_end(&jw);
+
+ trace2_data_json("bloom", the_repository, "statistics", &jw);
+
+ jw_release(&jw);
+}
+
+static int forbid_bloom_filters(struct pathspec *spec)
+{
+ if (spec->has_wildcard)
+ return 1;
+ if (spec->nr > 1)
+ return 1;
+ if (spec->magic & ~PATHSPEC_LITERAL)
+ return 1;
+ if (spec->nr && (spec->items[0].magic & ~PATHSPEC_LITERAL))
+ return 1;
+
+ return 0;
+}
+
+static void prepare_to_use_bloom_filter(struct rev_info *revs)
+{
+ struct pathspec_item *pi;
+ char *path_alloc = NULL;
+ const char *path;
+ int last_index;
+ int len;
+
+ if (!revs->commits)
+ return;
+
+ if (forbid_bloom_filters(&revs->prune_data))
+ return;
+
+ repo_parse_commit(revs->repo, revs->commits->item);
+
+ if (!revs->repo->objects->commit_graph)
+ return;
+
+ revs->bloom_filter_settings = revs->repo->objects->commit_graph->bloom_filter_settings;
+ if (!revs->bloom_filter_settings)
+ return;
+
+ pi = &revs->pruning.pathspec.items[0];
+ last_index = pi->len - 1;
+
+ /* remove single trailing slash from path, if needed */
+ if (pi->match[last_index] == '/') {
+ path_alloc = xstrdup(pi->match);
+ path_alloc[last_index] = '\0';
+ path = path_alloc;
+ } else
+ path = pi->match;
+
+ len = strlen(path);
+
+ revs->bloom_key = xmalloc(sizeof(struct bloom_key));
+ fill_bloom_key(path, len, revs->bloom_key, revs->bloom_filter_settings);
+
+ if (trace2_is_enabled() && !bloom_filter_atexit_registered) {
+ atexit(trace2_bloom_filter_statistics_atexit);
+ bloom_filter_atexit_registered = 1;
+ }
+
+ free(path_alloc);
+}
+
+static int check_maybe_different_in_bloom_filter(struct rev_info *revs,
+ struct commit *commit)
+{
+ struct bloom_filter *filter;
+ int result;
+
+ if (!revs->repo->objects->commit_graph)
+ return -1;
+
+ if (commit->generation == GENERATION_NUMBER_INFINITY)
+ return -1;
+
+ filter = get_bloom_filter(revs->repo, commit, 0);
+
+ if (!filter) {
+ count_bloom_filter_not_present++;
+ return -1;
+ }
+
+ if (!filter->len) {
+ count_bloom_filter_length_zero++;
+ return -1;
+ }
+
+ result = bloom_filter_contains(filter,
+ revs->bloom_key,
+ revs->bloom_filter_settings);
+
+ if (result)
+ count_bloom_filter_maybe++;
+ else
+ count_bloom_filter_definitely_not++;
+
+ return result;
+}
+
static int rev_compare_tree(struct rev_info *revs,
- struct commit *parent, struct commit *commit)
+ struct commit *parent, struct commit *commit, int nth_parent)
{
struct tree *t1 = get_commit_tree(parent);
struct tree *t2 = get_commit_tree(commit);
+ int bloom_ret = 1;
if (!t1)
return REV_TREE_NEW;
@@ -653,11 +777,23 @@ static int rev_compare_tree(struct rev_info *revs,
return REV_TREE_SAME;
}
+ if (revs->bloom_key && !nth_parent) {
+ bloom_ret = check_maybe_different_in_bloom_filter(revs, commit);
+
+ if (bloom_ret == 0)
+ return REV_TREE_SAME;
+ }
+
tree_difference = REV_TREE_SAME;
revs->pruning.flags.has_changes = 0;
if (diff_tree_oid(&t1->object.oid, &t2->object.oid, "",
&revs->pruning) < 0)
return REV_TREE_DIFFERENT;
+
+ if (!nth_parent)
+ if (bloom_ret == 1 && tree_difference == REV_TREE_SAME)
+ count_bloom_filter_false_positive++;
+
return tree_difference;
}
@@ -855,7 +991,7 @@ static void try_to_simplify_commit(struct rev_info *revs, struct commit *commit)
die("cannot simplify commit %s (because of %s)",
oid_to_hex(&commit->object.oid),
oid_to_hex(&p->object.oid));
- switch (rev_compare_tree(revs, p, commit)) {
+ switch (rev_compare_tree(revs, p, commit, nth_parent)) {
case REV_TREE_SAME:
if (!revs->simplify_history || !relevant_commit(p)) {
/* Even if a merge with an uninteresting
@@ -3385,6 +3521,8 @@ int prepare_revision_walk(struct rev_info *revs)
FOR_EACH_OBJECT_PROMISOR_ONLY);
}
+ if (revs->pruning.pathspec.nr == 1 && !revs->reflog_info)
+ prepare_to_use_bloom_filter(revs);
if (revs->no_walk != REVISION_WALK_NO_WALK_UNSORTED)
commit_list_sort_by_date(&revs->commits);
if (revs->no_walk)
@@ -3402,6 +3540,7 @@ int prepare_revision_walk(struct rev_info *revs)
simplify_merges(revs);
if (revs->children.name)
set_children(revs);
+
return 0;
}
diff --git a/revision.h b/revision.h
index c1af164b30..93491b79d4 100644
--- a/revision.h
+++ b/revision.h
@@ -59,6 +59,8 @@ struct repository;
struct rev_info;
struct string_list;
struct saved_parents;
+struct bloom_key;
+struct bloom_filter_settings;
define_shared_commit_slab(revision_sources, char *);
struct rev_cmdline_info {
@@ -296,6 +298,15 @@ struct rev_info {
struct revision_sources *sources;
struct topo_walk_info *topo_walk_info;
+
+ /* Commit graph bloom filter fields */
+ /* The bloom filter key for the pathspec */
+ struct bloom_key *bloom_key;
+ /*
+ * The bloom filter settings used to generate the key.
+ * This is loaded from the commit-graph being used.
+ */
+ struct bloom_filter_settings *bloom_filter_settings;
};
int ref_excluded(struct string_list *, const char *path);
diff --git a/send-pack.c b/send-pack.c
index da4741ce4a..d1b7edc995 100644
--- a/send-pack.c
+++ b/send-pack.c
@@ -190,10 +190,8 @@ static int receive_status(struct packet_reader *reader, struct ref *refs)
if (reader->line[0] == 'o' && reader->line[1] == 'k')
hint->status = REF_STATUS_OK;
- else {
+ else
hint->status = REF_STATUS_REMOTE_REJECT;
- ret = -1;
- }
hint->remote_status = xstrdup_or_null(msg);
/* start our next search from the next ref */
hint = hint->next;
@@ -322,29 +320,6 @@ free_return:
return update_seen;
}
-
-static int atomic_push_failure(struct send_pack_args *args,
- struct ref *remote_refs,
- struct ref *failing_ref)
-{
- struct ref *ref;
- /* Mark other refs as failed */
- for (ref = remote_refs; ref; ref = ref->next) {
- if (!ref->peer_ref && !args->send_mirror)
- continue;
-
- switch (ref->status) {
- case REF_STATUS_EXPECTING_REPORT:
- ref->status = REF_STATUS_ATOMIC_PUSH_FAILED;
- continue;
- default:
- break; /* do nothing */
- }
- }
- return error("atomic push failed for ref %s. status: %d\n",
- failing_ref->name, failing_ref->status);
-}
-
#define NONCE_LEN_LIMIT 256
static void reject_invalid_nonce(const char *nonce, int len)
@@ -489,7 +464,10 @@ int send_pack(struct send_pack_args *args,
if (use_atomic) {
strbuf_release(&req_buf);
strbuf_release(&cap_buf);
- return atomic_push_failure(args, remote_refs, ref);
+ reject_atomic_push(remote_refs, args->send_mirror);
+ error("atomic push failed for ref %s. status: %d\n",
+ ref->name, ref->status);
+ return args->porcelain ? 0 : -1;
}
/* else fallthrough */
default:
diff --git a/sequencer.c b/sequencer.c
index f30bb73c70..9d1b3e7d4f 100644
--- a/sequencer.c
+++ b/sequencer.c
@@ -32,6 +32,7 @@
#include "alias.h"
#include "commit-reach.h"
#include "rebase-interactive.h"
+#include "reset.h"
#define GIT_REFLOG_ACTION "GIT_REFLOG_ACTION"
@@ -419,25 +420,15 @@ static int write_message(const void *buf, size_t len, const char *filename,
return 0;
}
-/*
- * Reads a file that was presumably written by a shell script, i.e. with an
- * end-of-line marker that needs to be stripped.
- *
- * Note that only the last end-of-line marker is stripped, consistent with the
- * behavior of "$(cat path)" in a shell script.
- *
- * Returns 1 if the file was read, 0 if it could not be read or does not exist.
- */
-static int read_oneliner(struct strbuf *buf,
- const char *path, int skip_if_empty)
+int read_oneliner(struct strbuf *buf,
+ const char *path, unsigned flags)
{
int orig_len = buf->len;
- if (!file_exists(path))
- return 0;
-
if (strbuf_read_file(buf, path, 0) < 0) {
- warning_errno(_("could not read '%s'"), path);
+ if ((flags & READ_ONELINER_WARN_MISSING) ||
+ (errno != ENOENT && errno != ENOTDIR))
+ warning_errno(_("could not read '%s'"), path);
return 0;
}
@@ -447,7 +438,7 @@ static int read_oneliner(struct strbuf *buf,
buf->buf[buf->len] = '\0';
}
- if (skip_if_empty && buf->len == orig_len)
+ if ((flags & READ_ONELINER_SKIP_IF_EMPTY) && buf->len == orig_len)
return 0;
return 1;
@@ -2504,8 +2495,10 @@ static int read_populate_opts(struct replay_opts *opts)
{
if (is_rebase_i(opts)) {
struct strbuf buf = STRBUF_INIT;
+ int ret = 0;
- if (read_oneliner(&buf, rebase_path_gpg_sign_opt(), 1)) {
+ if (read_oneliner(&buf, rebase_path_gpg_sign_opt(),
+ READ_ONELINER_SKIP_IF_EMPTY)) {
if (!starts_with(buf.buf, "-S"))
strbuf_reset(&buf);
else {
@@ -2515,7 +2508,8 @@ static int read_populate_opts(struct replay_opts *opts)
strbuf_reset(&buf);
}
- if (read_oneliner(&buf, rebase_path_allow_rerere_autoupdate(), 1)) {
+ if (read_oneliner(&buf, rebase_path_allow_rerere_autoupdate(),
+ READ_ONELINER_SKIP_IF_EMPTY)) {
if (!strcmp(buf.buf, "--rerere-autoupdate"))
opts->allow_rerere_auto = RERERE_AUTOUPDATE;
else if (!strcmp(buf.buf, "--no-rerere-autoupdate"))
@@ -2544,10 +2538,11 @@ static int read_populate_opts(struct replay_opts *opts)
opts->keep_redundant_commits = 1;
read_strategy_opts(opts, &buf);
- strbuf_release(&buf);
+ strbuf_reset(&buf);
if (read_oneliner(&opts->current_fixups,
- rebase_path_current_fixups(), 1)) {
+ rebase_path_current_fixups(),
+ READ_ONELINER_SKIP_IF_EMPTY)) {
const char *p = opts->current_fixups.buf;
opts->current_fixup_count = 1;
while ((p = strchr(p, '\n'))) {
@@ -2557,12 +2552,16 @@ static int read_populate_opts(struct replay_opts *opts)
}
if (read_oneliner(&buf, rebase_path_squash_onto(), 0)) {
- if (get_oid_hex(buf.buf, &opts->squash_onto) < 0)
- return error(_("unusable squash-onto"));
+ if (get_oid_hex(buf.buf, &opts->squash_onto) < 0) {
+ ret = error(_("unusable squash-onto"));
+ goto done_rebase_i;
+ }
opts->have_squash_onto = 1;
}
- return 0;
+done_rebase_i:
+ strbuf_release(&buf);
+ return ret;
}
if (!file_exists(git_path_opts_file()))
@@ -3677,25 +3676,71 @@ static enum todo_command peek_command(struct todo_list *todo_list, int offset)
return -1;
}
-static int apply_autostash(struct replay_opts *opts)
+void create_autostash(struct repository *r, const char *path,
+ const char *default_reflog_action)
+{
+ struct strbuf buf = STRBUF_INIT;
+ struct lock_file lock_file = LOCK_INIT;
+ int fd;
+
+ fd = repo_hold_locked_index(r, &lock_file, 0);
+ refresh_index(r->index, REFRESH_QUIET, NULL, NULL, NULL);
+ if (0 <= fd)
+ repo_update_index_if_able(r, &lock_file);
+ rollback_lock_file(&lock_file);
+
+ if (has_unstaged_changes(r, 1) ||
+ has_uncommitted_changes(r, 1)) {
+ struct child_process stash = CHILD_PROCESS_INIT;
+ struct object_id oid;
+
+ argv_array_pushl(&stash.args,
+ "stash", "create", "autostash", NULL);
+ stash.git_cmd = 1;
+ stash.no_stdin = 1;
+ strbuf_reset(&buf);
+ if (capture_command(&stash, &buf, GIT_MAX_HEXSZ))
+ die(_("Cannot autostash"));
+ strbuf_trim_trailing_newline(&buf);
+ if (get_oid(buf.buf, &oid))
+ die(_("Unexpected stash response: '%s'"),
+ buf.buf);
+ strbuf_reset(&buf);
+ strbuf_add_unique_abbrev(&buf, &oid, DEFAULT_ABBREV);
+
+ if (safe_create_leading_directories_const(path))
+ die(_("Could not create directory for '%s'"),
+ path);
+ write_file(path, "%s", oid_to_hex(&oid));
+ printf(_("Created autostash: %s\n"), buf.buf);
+ if (reset_head(r, NULL, "reset --hard",
+ NULL, RESET_HEAD_HARD, NULL, NULL,
+ default_reflog_action) < 0)
+ die(_("could not reset --hard"));
+
+ if (discard_index(r->index) < 0 ||
+ repo_read_index(r) < 0)
+ die(_("could not read index"));
+ }
+ strbuf_release(&buf);
+}
+
+static int apply_save_autostash_oid(const char *stash_oid, int attempt_apply)
{
- struct strbuf stash_sha1 = STRBUF_INIT;
struct child_process child = CHILD_PROCESS_INIT;
int ret = 0;
- if (!read_oneliner(&stash_sha1, rebase_path_autostash(), 1)) {
- strbuf_release(&stash_sha1);
- return 0;
+ if (attempt_apply) {
+ child.git_cmd = 1;
+ child.no_stdout = 1;
+ child.no_stderr = 1;
+ argv_array_push(&child.args, "stash");
+ argv_array_push(&child.args, "apply");
+ argv_array_push(&child.args, stash_oid);
+ ret = run_command(&child);
}
- strbuf_trim(&stash_sha1);
- child.git_cmd = 1;
- child.no_stdout = 1;
- child.no_stderr = 1;
- argv_array_push(&child.args, "stash");
- argv_array_push(&child.args, "apply");
- argv_array_push(&child.args, stash_sha1.buf);
- if (!run_command(&child))
+ if (attempt_apply && !ret)
fprintf(stderr, _("Applied autostash.\n"));
else {
struct child_process store = CHILD_PROCESS_INIT;
@@ -3706,21 +3751,57 @@ static int apply_autostash(struct replay_opts *opts)
argv_array_push(&store.args, "-m");
argv_array_push(&store.args, "autostash");
argv_array_push(&store.args, "-q");
- argv_array_push(&store.args, stash_sha1.buf);
+ argv_array_push(&store.args, stash_oid);
if (run_command(&store))
- ret = error(_("cannot store %s"), stash_sha1.buf);
+ ret = error(_("cannot store %s"), stash_oid);
else
fprintf(stderr,
- _("Applying autostash resulted in conflicts.\n"
+ _("%s\n"
"Your changes are safe in the stash.\n"
"You can run \"git stash pop\" or"
- " \"git stash drop\" at any time.\n"));
+ " \"git stash drop\" at any time.\n"),
+ attempt_apply ?
+ _("Applying autostash resulted in conflicts.") :
+ _("Autostash exists; creating a new stash entry."));
}
- strbuf_release(&stash_sha1);
return ret;
}
+static int apply_save_autostash(const char *path, int attempt_apply)
+{
+ struct strbuf stash_oid = STRBUF_INIT;
+ int ret = 0;
+
+ if (!read_oneliner(&stash_oid, path,
+ READ_ONELINER_SKIP_IF_EMPTY)) {
+ strbuf_release(&stash_oid);
+ return 0;
+ }
+ strbuf_trim(&stash_oid);
+
+ ret = apply_save_autostash_oid(stash_oid.buf, attempt_apply);
+
+ unlink(path);
+ strbuf_release(&stash_oid);
+ return ret;
+}
+
+int save_autostash(const char *path)
+{
+ return apply_save_autostash(path, 0);
+}
+
+int apply_autostash(const char *path)
+{
+ return apply_save_autostash(path, 1);
+}
+
+int apply_autostash_oid(const char *stash_oid)
+{
+ return apply_save_autostash_oid(stash_oid, 1);
+}
+
static const char *reflog_message(struct replay_opts *opts,
const char *sub_action, const char *fmt, ...)
{
@@ -3776,7 +3857,7 @@ static int checkout_onto(struct repository *r, struct replay_opts *opts,
return error(_("%s: not a valid OID"), orig_head);
if (run_git_checkout(r, opts, oid_to_hex(onto), action)) {
- apply_autostash(opts);
+ apply_autostash(rebase_path_autostash());
sequencer_remove_state(opts);
return error(_("could not detach HEAD"));
}
@@ -4095,7 +4176,7 @@ cleanup_head_ref:
run_command(&hook);
}
}
- apply_autostash(opts);
+ apply_autostash(rebase_path_autostash());
if (!opts->quiet) {
if (!opts->verbose)
@@ -4313,7 +4394,8 @@ int sequencer_continue(struct repository *r, struct replay_opts *opts)
struct strbuf buf = STRBUF_INIT;
struct object_id oid;
- if (read_oneliner(&buf, rebase_path_stopped_sha(), 1) &&
+ if (read_oneliner(&buf, rebase_path_stopped_sha(),
+ READ_ONELINER_SKIP_IF_EMPTY) &&
!get_oid_committish(buf.buf, &oid))
record_in_rewritten(&oid, peek_command(&todo_list, 0));
strbuf_release(&buf);
@@ -5118,7 +5200,7 @@ int complete_action(struct repository *r, struct replay_opts *opts, unsigned fla
todo_list_add_exec_commands(todo_list, commands);
if (count_commands(todo_list) == 0) {
- apply_autostash(opts);
+ apply_autostash(rebase_path_autostash());
sequencer_remove_state(opts);
return error(_("nothing to do"));
@@ -5129,12 +5211,12 @@ int complete_action(struct repository *r, struct replay_opts *opts, unsigned fla
if (res == -1)
return -1;
else if (res == -2) {
- apply_autostash(opts);
+ apply_autostash(rebase_path_autostash());
sequencer_remove_state(opts);
return -1;
} else if (res == -3) {
- apply_autostash(opts);
+ apply_autostash(rebase_path_autostash());
sequencer_remove_state(opts);
todo_list_release(&new_todo);
diff --git a/sequencer.h b/sequencer.h
index 9611605711..d31c41f018 100644
--- a/sequencer.h
+++ b/sequencer.h
@@ -191,6 +191,12 @@ void commit_post_rewrite(struct repository *r,
const struct commit *current_head,
const struct object_id *new_head);
+void create_autostash(struct repository *r, const char *path,
+ const char *default_reflog_action);
+int save_autostash(const char *path);
+int apply_autostash(const char *path);
+int apply_autostash_oid(const char *stash_oid);
+
#define SUMMARY_INITIAL_COMMIT (1 << 0)
#define SUMMARY_SHOW_AUTHOR_DATE (1 << 1)
void print_commit_summary(struct repository *repo,
@@ -198,6 +204,20 @@ void print_commit_summary(struct repository *repo,
const struct object_id *oid,
unsigned int flags);
+#define READ_ONELINER_SKIP_IF_EMPTY (1 << 0)
+#define READ_ONELINER_WARN_MISSING (1 << 1)
+
+/*
+ * Reads a file that was presumably written by a shell script, i.e. with an
+ * end-of-line marker that needs to be stripped.
+ *
+ * Note that only the last end-of-line marker is stripped, consistent with the
+ * behavior of "$(cat path)" in a shell script.
+ *
+ * Returns 1 if the file was read, 0 if it could not be read or does not exist.
+ */
+int read_oneliner(struct strbuf *buf,
+ const char *path, unsigned flags);
int read_author_script(const char *path, char **name, char **email, char **date,
int allow_missing);
void parse_strategy_opts(struct replay_opts *opts, char *raw_opts);
diff --git a/sha1-file.c b/sha1-file.c
index 6926851724..ccd34dd9e8 100644
--- a/sha1-file.c
+++ b/sha1-file.c
@@ -881,9 +881,7 @@ void prepare_alt_odb(struct repository *r)
/* Returns 1 if we have successfully freshened the file, 0 otherwise. */
static int freshen_file(const char *fn)
{
- struct utimbuf t;
- t.actime = t.modtime = time(NULL);
- return !utime(fn, &t);
+ return !utime(fn, NULL);
}
/*
diff --git a/shallow.c b/shallow.c
index 14f7fa6e27..321a27670f 100644
--- a/shallow.c
+++ b/shallow.c
@@ -40,13 +40,6 @@ int register_shallow(struct repository *r, const struct object_id *oid)
int is_repository_shallow(struct repository *r)
{
- /*
- * NEEDSWORK: This function updates
- * r->parsed_objects->{is_shallow,shallow_stat} as a side effect but
- * there is no corresponding function to clear them when the shallow
- * file is updated.
- */
-
FILE *fp;
char buf[1024];
const char *path = r->parsed_objects->alternate_shallow_file;
@@ -79,6 +72,25 @@ int is_repository_shallow(struct repository *r)
return r->parsed_objects->is_shallow;
}
+static void reset_repository_shallow(struct repository *r)
+{
+ r->parsed_objects->is_shallow = -1;
+ stat_validity_clear(r->parsed_objects->shallow_stat);
+}
+
+int commit_shallow_file(struct repository *r, struct lock_file *lk)
+{
+ int res = commit_lock_file(lk);
+ reset_repository_shallow(r);
+ return res;
+}
+
+void rollback_shallow_file(struct repository *r, struct lock_file *lk)
+{
+ rollback_lock_file(lk);
+ reset_repository_shallow(r);
+}
+
/*
* TODO: use "int" elemtype instead of "int *" when/if commit-slab
* supports a "valid" flag.
@@ -410,10 +422,10 @@ void prune_shallow(unsigned options)
if (write_in_full(fd, sb.buf, sb.len) < 0)
die_errno("failed to write to %s",
get_lock_file_path(&shallow_lock));
- commit_lock_file(&shallow_lock);
+ commit_shallow_file(the_repository, &shallow_lock);
} else {
unlink(git_path_shallow(the_repository));
- rollback_lock_file(&shallow_lock);
+ rollback_shallow_file(the_repository, &shallow_lock);
}
strbuf_release(&sb);
}
diff --git a/strbuf.c b/strbuf.c
index bb0065ccaf..6e74901bfa 100644
--- a/strbuf.c
+++ b/strbuf.c
@@ -690,6 +690,16 @@ int strbuf_getwholeline(struct strbuf *sb, FILE *fp, int term)
}
#endif
+int strbuf_appendwholeline(struct strbuf *sb, FILE *fp, int term)
+{
+ struct strbuf line = STRBUF_INIT;
+ if (strbuf_getwholeline(&line, fp, term))
+ return EOF;
+ strbuf_addbuf(sb, &line);
+ strbuf_release(&line);
+ return 0;
+}
+
static int strbuf_getdelim(struct strbuf *sb, FILE *fp, int term)
{
if (strbuf_getwholeline(sb, fp, term))
diff --git a/strbuf.h b/strbuf.h
index ce8e49c0b2..411063ca76 100644
--- a/strbuf.h
+++ b/strbuf.h
@@ -503,6 +503,12 @@ int strbuf_getline(struct strbuf *sb, FILE *file);
int strbuf_getwholeline(struct strbuf *sb, FILE *file, int term);
/**
+ * Like `strbuf_getwholeline`, but appends the line instead of
+ * resetting the buffer first.
+ */
+int strbuf_appendwholeline(struct strbuf *sb, FILE *file, int term);
+
+/**
* Like `strbuf_getwholeline`, but operates on a file descriptor.
* It reads one character at a time, so it is very slow. Do not
* use it unless you need the correct position in the file
diff --git a/submodule-config.c b/submodule-config.c
index 4d1c92d582..e175dfbc38 100644
--- a/submodule-config.c
+++ b/submodule-config.c
@@ -225,7 +225,8 @@ static int name_and_item_from_var(const char *var, struct strbuf *name,
struct strbuf *item)
{
const char *subsection, *key;
- int subsection_len, parse;
+ size_t subsection_len;
+ int parse;
parse = parse_config_key(var, "submodule", &subsection,
&subsection_len, &key);
if (parse < 0 || !subsection)
diff --git a/t/README b/t/README
index d12efcd3a4..cf863837ab 100644
--- a/t/README
+++ b/t/README
@@ -379,6 +379,11 @@ GIT_TEST_COMMIT_GRAPH=<boolean>, when true, forces the commit-graph to
be written after every 'git commit' command, and overrides the
'core.commitGraph' setting to true.
+GIT_TEST_COMMIT_GRAPH_CHANGED_PATHS=<boolean>, when true, forces
+commit-graph write to compute and write changed path Bloom filters for
+every 'git commit-graph write', as if the `--changed-paths` option was
+passed in.
+
GIT_TEST_FSMONITOR=$PWD/t7519/fsmonitor-all exercises the fsmonitor
code path for utilizing a file system monitor to speed up detecting
new or changed files.
@@ -547,6 +552,41 @@ Here are the "do's:"
reports "ok" or "not ok" to the end user running the tests. Under
--verbose, they are shown to help debug the tests.
+ - Be careful when you loop
+
+ You may need to verify multiple things in a loop, but the
+ following does not work correctly:
+
+ test_expect_success 'test three things' '
+ for i in one two three
+ do
+ test_something "$i"
+ done &&
+ test_something_else
+ '
+
+ Because the status of the loop itself is the exit status of the
+ test_something in the last round, the loop does not fail when
+ "test_something" for "one" or "two" fails. This is not what you
+ want.
+
+ Instead, you can break out of the loop immediately when you see a
+ failure. Because all test_expect_* snippets are executed inside
+ a function, "return 1" can be used to fail the test immediately
+ upon a failure:
+
+ test_expect_success 'test three things' '
+ for i in one two three
+ do
+ test_something "$i" || return 1
+ done &&
+ test_something_else
+ '
+
+ Note that we still &&-chain the loop to propagate failures from
+ earlier commands.
+
+
And here are the "don'ts:"
- Don't exit() within a <script> part.
diff --git a/t/helper/test-bloom.c b/t/helper/test-bloom.c
new file mode 100644
index 0000000000..77eb27adac
--- /dev/null
+++ b/t/helper/test-bloom.c
@@ -0,0 +1,93 @@
+#include "git-compat-util.h"
+#include "bloom.h"
+#include "test-tool.h"
+#include "commit.h"
+
+struct bloom_filter_settings settings = DEFAULT_BLOOM_FILTER_SETTINGS;
+
+static void add_string_to_filter(const char *data, struct bloom_filter *filter) {
+ struct bloom_key key;
+ int i;
+
+ fill_bloom_key(data, strlen(data), &key, &settings);
+ printf("Hashes:");
+ for (i = 0; i < settings.num_hashes; i++){
+ printf("0x%08x|", key.hashes[i]);
+ }
+ printf("\n");
+ add_key_to_filter(&key, filter, &settings);
+}
+
+static void print_bloom_filter(struct bloom_filter *filter) {
+ int i;
+
+ if (!filter) {
+ printf("No filter.\n");
+ return;
+ }
+ printf("Filter_Length:%d\n", (int)filter->len);
+ printf("Filter_Data:");
+ for (i = 0; i < filter->len; i++) {
+ printf("%02x|", filter->data[i]);
+ }
+ printf("\n");
+}
+
+static void get_bloom_filter_for_commit(const struct object_id *commit_oid)
+{
+ struct commit *c;
+ struct bloom_filter *filter;
+ setup_git_directory();
+ c = lookup_commit(the_repository, commit_oid);
+ filter = get_bloom_filter(the_repository, c, 1);
+ print_bloom_filter(filter);
+}
+
+static const char *bloom_usage = "\n"
+" test-tool bloom get_murmer3 <string>\n"
+" test-tool bloom generate_filter <string> [<string>...]\n"
+" test-tool get_filter_for_commit <commit-hex>\n";
+
+int cmd__bloom(int argc, const char **argv)
+{
+ if (argc < 2)
+ usage(bloom_usage);
+
+ if (!strcmp(argv[1], "get_murmur3")) {
+ uint32_t hashed;
+ if (argc < 3)
+ usage(bloom_usage);
+ hashed = murmur3_seeded(0, argv[2], strlen(argv[2]));
+ printf("Murmur3 Hash with seed=0:0x%08x\n", hashed);
+ }
+
+ if (!strcmp(argv[1], "generate_filter")) {
+ struct bloom_filter filter;
+ int i = 2;
+ filter.len = (settings.bits_per_entry + BITS_PER_WORD - 1) / BITS_PER_WORD;
+ filter.data = xcalloc(filter.len, sizeof(unsigned char));
+
+ if (argc - 1 < i)
+ usage(bloom_usage);
+
+ while (argv[i]) {
+ add_string_to_filter(argv[i], &filter);
+ i++;
+ }
+
+ print_bloom_filter(&filter);
+ }
+
+ if (!strcmp(argv[1], "get_filter_for_commit")) {
+ struct object_id oid;
+ const char *end;
+ if (argc < 3)
+ usage(bloom_usage);
+ if (parse_oid_hex(argv[2], &oid, &end))
+ die("cannot parse oid '%s'", argv[2]);
+ init_bloom_filters();
+ get_bloom_filter_for_commit(&oid);
+ }
+
+ return 0;
+}
diff --git a/t/helper/test-pkt-line.c b/t/helper/test-pkt-line.c
index 282d536384..12ca698e17 100644
--- a/t/helper/test-pkt-line.c
+++ b/t/helper/test-pkt-line.c
@@ -67,7 +67,7 @@ static void unpack_sideband(void)
case PACKET_READ_NORMAL:
band = reader.line[0] & 0xff;
if (band < 1 || band > 2)
- die("unexpected side band %d", band);
+ continue; /* skip non-sideband packets */
fd = band;
write_or_die(fd, reader.line + 1, reader.pktlen - 1);
diff --git a/t/helper/test-read-graph.c b/t/helper/test-read-graph.c
index f8a461767c..6d0c962438 100644
--- a/t/helper/test-read-graph.c
+++ b/t/helper/test-read-graph.c
@@ -7,26 +7,15 @@
int cmd__read_graph(int argc, const char **argv)
{
struct commit_graph *graph = NULL;
- char *graph_name;
- int open_ok;
- int fd;
- struct stat st;
struct object_directory *odb;
setup_git_directory();
odb = the_repository->objects->odb;
- graph_name = get_commit_graph_filename(odb);
-
- open_ok = open_commit_graph(graph_name, &fd, &st);
- if (!open_ok)
- die_errno(_("Could not open commit-graph '%s'"), graph_name);
-
- graph = load_commit_graph_one_fd_st(fd, &st, odb);
+ graph = read_commit_graph_one(the_repository, odb);
if (!graph)
return 1;
- FREE_AND_NULL(graph_name);
printf("header: %08x %d %d %d %d\n",
ntohl(*(uint32_t*)graph->data),
@@ -45,6 +34,10 @@ int cmd__read_graph(int argc, const char **argv)
printf(" commit_metadata");
if (graph->chunk_extra_edges)
printf(" extra_edges");
+ if (graph->chunk_bloom_indexes)
+ printf(" bloom_indexes");
+ if (graph->chunk_bloom_data)
+ printf(" bloom_data");
printf("\n");
UNLEAK(graph);
diff --git a/t/helper/test-tool.c b/t/helper/test-tool.c
index 2ece4d1ebf..590b2efca7 100644
--- a/t/helper/test-tool.c
+++ b/t/helper/test-tool.c
@@ -15,6 +15,7 @@ struct test_cmd {
static struct test_cmd cmds[] = {
{ "advise", cmd__advise_if_enabled },
+ { "bloom", cmd__bloom },
{ "chmtime", cmd__chmtime },
{ "config", cmd__config },
{ "ctype", cmd__ctype },
diff --git a/t/helper/test-tool.h b/t/helper/test-tool.h
index 1cbaec02f3..ddc8e990e9 100644
--- a/t/helper/test-tool.h
+++ b/t/helper/test-tool.h
@@ -5,6 +5,7 @@
#include "git-compat-util.h"
int cmd__advise_if_enabled(int argc, const char **argv);
+int cmd__bloom(int argc, const char **argv);
int cmd__chmtime(int argc, const char **argv);
int cmd__config(int argc, const char **argv);
int cmd__ctype(int argc, const char **argv);
diff --git a/t/perf/p9300-fast-import-export.sh b/t/perf/p9300-fast-import-export.sh
new file mode 100755
index 0000000000..586161e9ad
--- /dev/null
+++ b/t/perf/p9300-fast-import-export.sh
@@ -0,0 +1,23 @@
+#!/bin/sh
+
+test_description='test fast-import and fast-export performance'
+. ./perf-lib.sh
+
+test_perf_default_repo
+
+# Use --no-data here to produce a vastly smaller export file.
+# This is much cheaper to work with but should still exercise
+# fast-import pretty well (we'll still process all commits and
+# trees, which account for 60% or more of objects in most repos).
+#
+# Use --reencode to avoid the default of aborting on non-utf8 commits,
+# which lets this test run against a wider variety of sample repos.
+test_perf 'export (no-blobs)' '
+ git fast-export --reencode=yes --no-data HEAD >export
+'
+
+test_perf 'import (no-blobs)' '
+ git fast-import --force <export
+'
+
+test_done
diff --git a/t/t0040-parse-options.sh b/t/t0040-parse-options.sh
index 3483b72db4..f8178ee4e3 100755
--- a/t/t0040-parse-options.sh
+++ b/t/t0040-parse-options.sh
@@ -54,7 +54,7 @@ Alias
-A, --alias-source <string>
get a string
-Z, --alias-target <string>
- get a string
+ alias of --alias-source
EOF
diff --git a/t/t0095-bloom.sh b/t/t0095-bloom.sh
new file mode 100755
index 0000000000..8f9eef116d
--- /dev/null
+++ b/t/t0095-bloom.sh
@@ -0,0 +1,117 @@
+#!/bin/sh
+
+test_description='Testing the various Bloom filter computations in bloom.c'
+. ./test-lib.sh
+
+test_expect_success 'compute unseeded murmur3 hash for empty string' '
+ cat >expect <<-\EOF &&
+ Murmur3 Hash with seed=0:0x00000000
+ EOF
+ test-tool bloom get_murmur3 "" >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'compute unseeded murmur3 hash for test string 1' '
+ cat >expect <<-\EOF &&
+ Murmur3 Hash with seed=0:0x627b0c2c
+ EOF
+ test-tool bloom get_murmur3 "Hello world!" >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'compute unseeded murmur3 hash for test string 2' '
+ cat >expect <<-\EOF &&
+ Murmur3 Hash with seed=0:0x2e4ff723
+ EOF
+ test-tool bloom get_murmur3 "The quick brown fox jumps over the lazy dog" >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'compute bloom key for empty string' '
+ cat >expect <<-\EOF &&
+ Hashes:0x5615800c|0x5b966560|0x61174ab4|0x66983008|0x6c19155c|0x7199fab0|0x771ae004|
+ Filter_Length:2
+ Filter_Data:11|11|
+ EOF
+ test-tool bloom generate_filter "" >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'compute bloom key for whitespace' '
+ cat >expect <<-\EOF &&
+ Hashes:0xf178874c|0x5f3d6eb6|0xcd025620|0x3ac73d8a|0xa88c24f4|0x16510c5e|0x8415f3c8|
+ Filter_Length:2
+ Filter_Data:51|55|
+ EOF
+ test-tool bloom generate_filter " " >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'compute bloom key for test string 1' '
+ cat >expect <<-\EOF &&
+ Hashes:0xb270de9b|0x1bb6f26e|0x84fd0641|0xee431a14|0x57892de7|0xc0cf41ba|0x2a15558d|
+ Filter_Length:2
+ Filter_Data:92|6c|
+ EOF
+ test-tool bloom generate_filter "Hello world!" >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'compute bloom key for test string 2' '
+ cat >expect <<-\EOF &&
+ Hashes:0x20ab385b|0xf5237fe2|0xc99bc769|0x9e140ef0|0x728c5677|0x47049dfe|0x1b7ce585|
+ Filter_Length:2
+ Filter_Data:a5|4a|
+ EOF
+ test-tool bloom generate_filter "file.txt" >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'get bloom filters for commit with no changes' '
+ git init &&
+ git commit --allow-empty -m "c0" &&
+ cat >expect <<-\EOF &&
+ Filter_Length:0
+ Filter_Data:
+ EOF
+ test-tool bloom get_filter_for_commit "$(git rev-parse HEAD)" >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'get bloom filter for commit with 10 changes' '
+ rm actual &&
+ rm expect &&
+ mkdir smallDir &&
+ for i in $(test_seq 0 9)
+ do
+ echo $i >smallDir/$i
+ done &&
+ git add smallDir &&
+ git commit -m "commit with 10 changes" &&
+ cat >expect <<-\EOF &&
+ Filter_Length:25
+ Filter_Data:82|a0|65|47|0c|92|90|c0|a1|40|02|a0|e2|40|e0|04|0a|9a|66|cf|80|19|85|42|23|
+ EOF
+ test-tool bloom get_filter_for_commit "$(git rev-parse HEAD)" >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success EXPENSIVE 'get bloom filter for commit with 513 changes' '
+ rm actual &&
+ rm expect &&
+ mkdir bigDir &&
+ for i in $(test_seq 0 512)
+ do
+ echo $i >bigDir/$i
+ done &&
+ git add bigDir &&
+ git commit -m "commit with 513 changes" &&
+ cat >expect <<-\EOF &&
+ Filter_Length:0
+ Filter_Data:
+ EOF
+ test-tool bloom get_filter_for_commit "$(git rev-parse HEAD)" >actual &&
+ test_cmp expect actual
+'
+
+test_done \ No newline at end of file
diff --git a/t/t1011-read-tree-sparse-checkout.sh b/t/t1011-read-tree-sparse-checkout.sh
index eb44bafb59..63223e13bd 100755
--- a/t/t1011-read-tree-sparse-checkout.sh
+++ b/t/t1011-read-tree-sparse-checkout.sh
@@ -233,18 +233,19 @@ test_expect_success 'read-tree --reset removes outside worktree' '
test_must_be_empty result
'
-test_expect_success 'print errors when failed to update worktree' '
+test_expect_success 'print warnings when some worktree updates disabled' '
echo sub >.git/info/sparse-checkout &&
git checkout -f init &&
mkdir sub &&
touch sub/added sub/addedtoo &&
- test_must_fail git checkout top 2>actual &&
+ # Use -q to suppress "Previous HEAD position" and "Head is now at" msgs
+ git checkout -q top 2>actual &&
cat >expected <<\EOF &&
-error: The following untracked working tree files would be overwritten by checkout:
+warning: The following paths were already present and thus not updated despite sparse patterns:
sub/added
sub/addedtoo
-Please move or remove them before you switch branches.
-Aborting
+
+After fixing the above paths, you may want to run `git sparse-checkout reapply`.
EOF
test_i18ncmp expected actual
'
diff --git a/t/t1091-sparse-checkout-builtin.sh b/t/t1091-sparse-checkout-builtin.sh
index 44a91205d6..dee99eeec3 100755
--- a/t/t1091-sparse-checkout-builtin.sh
+++ b/t/t1091-sparse-checkout-builtin.sh
@@ -277,15 +277,23 @@ test_expect_success 'cone mode: add parent path' '
check_files repo a deep folder1
'
-test_expect_success 'revert to old sparse-checkout on bad update' '
+test_expect_success 'not-up-to-date does not block rest of sparsification' '
+ test_when_finished git -C repo sparse-checkout disable &&
test_when_finished git -C repo reset --hard &&
git -C repo sparse-checkout set deep &&
+
echo update >repo/deep/deeper2/a &&
cp repo/.git/info/sparse-checkout expect &&
- test_must_fail git -C repo sparse-checkout set deep/deeper1 2>err &&
- test_i18ngrep "cannot set sparse-checkout patterns" err &&
- test_cmp repo/.git/info/sparse-checkout expect &&
- check_files repo/deep a deeper1 deeper2
+ test_write_lines "!/deep/*/" "/deep/deeper1/" >>expect &&
+
+ git -C repo sparse-checkout set deep/deeper1 2>err &&
+
+ test_i18ngrep "The following paths are not up to date" err &&
+ test_cmp expect repo/.git/info/sparse-checkout &&
+ check_files repo/deep a deeper1 deeper2 &&
+ check_files repo/deep/deeper1 a deepest &&
+ check_files repo/deep/deeper1/deepest a &&
+ check_files repo/deep/deeper2 a
'
test_expect_success 'revert to old sparse-checkout on empty update' '
@@ -315,19 +323,96 @@ test_expect_success '.gitignore should not warn about cone mode' '
test_i18ngrep ! "disabling cone patterns" err
'
-test_expect_success 'sparse-checkout (init|set|disable) fails with dirty status' '
+test_expect_success 'sparse-checkout (init|set|disable) warns with dirty status' '
git clone repo dirty &&
echo dirty >dirty/folder1/a &&
- test_must_fail git -C dirty sparse-checkout init &&
- test_must_fail git -C dirty sparse-checkout set /folder2/* /deep/deeper1/* &&
- test_must_fail git -C dirty sparse-checkout disable &&
+
+ git -C dirty sparse-checkout init 2>err &&
+ test_i18ngrep "warning.*The following paths are not up to date" err &&
+
+ git -C dirty sparse-checkout set /folder2/* /deep/deeper1/* 2>err &&
+ test_i18ngrep "warning.*The following paths are not up to date" err &&
+ test_path_is_file dirty/folder1/a &&
+
+ git -C dirty sparse-checkout disable 2>err &&
+ test_must_be_empty err &&
+
git -C dirty reset --hard &&
git -C dirty sparse-checkout init &&
git -C dirty sparse-checkout set /folder2/* /deep/deeper1/* &&
- git -C dirty sparse-checkout disable
+ test_path_is_missing dirty/folder1/a &&
+ git -C dirty sparse-checkout disable &&
+ test_path_is_file dirty/folder1/a
+'
+
+test_expect_success 'sparse-checkout (init|set|disable) warns with unmerged status' '
+ git clone repo unmerged &&
+
+ cat >input <<-EOF &&
+ 0 0000000000000000000000000000000000000000 folder1/a
+ 100644 $(git -C unmerged rev-parse HEAD:folder1/a) 1 folder1/a
+ EOF
+ git -C unmerged update-index --index-info <input &&
+
+ git -C unmerged sparse-checkout init 2>err &&
+ test_i18ngrep "warning.*The following paths are unmerged" err &&
+
+ git -C unmerged sparse-checkout set /folder2/* /deep/deeper1/* 2>err &&
+ test_i18ngrep "warning.*The following paths are unmerged" err &&
+ test_path_is_file dirty/folder1/a &&
+
+ git -C unmerged sparse-checkout disable 2>err &&
+ test_i18ngrep "warning.*The following paths are unmerged" err &&
+
+ git -C unmerged reset --hard &&
+ git -C unmerged sparse-checkout init &&
+ git -C unmerged sparse-checkout set /folder2/* /deep/deeper1/* &&
+ git -C unmerged sparse-checkout disable
+'
+
+test_expect_success 'sparse-checkout reapply' '
+ git clone repo tweak &&
+
+ echo dirty >tweak/deep/deeper2/a &&
+
+ cat >input <<-EOF &&
+ 0 0000000000000000000000000000000000000000 folder1/a
+ 100644 $(git -C tweak rev-parse HEAD:folder1/a) 1 folder1/a
+ EOF
+ git -C tweak update-index --index-info <input &&
+
+ git -C tweak sparse-checkout init --cone 2>err &&
+ test_i18ngrep "warning.*The following paths are not up to date" err &&
+ test_i18ngrep "warning.*The following paths are unmerged" err &&
+
+ git -C tweak sparse-checkout set folder2 deep/deeper1 2>err &&
+ test_i18ngrep "warning.*The following paths are not up to date" err &&
+ test_i18ngrep "warning.*The following paths are unmerged" err &&
+
+ git -C tweak sparse-checkout reapply 2>err &&
+ test_i18ngrep "warning.*The following paths are not up to date" err &&
+ test_path_is_file tweak/deep/deeper2/a &&
+ test_i18ngrep "warning.*The following paths are unmerged" err &&
+ test_path_is_file tweak/folder1/a &&
+
+ git -C tweak checkout HEAD deep/deeper2/a &&
+ git -C tweak sparse-checkout reapply 2>err &&
+ test_i18ngrep ! "warning.*The following paths are not up to date" err &&
+ test_path_is_missing tweak/deep/deeper2/a &&
+ test_i18ngrep "warning.*The following paths are unmerged" err &&
+ test_path_is_file tweak/folder1/a &&
+
+ git -C tweak add folder1/a &&
+ git -C tweak sparse-checkout reapply 2>err &&
+ test_must_be_empty err &&
+ test_path_is_missing tweak/deep/deeper2/a &&
+ test_path_is_missing tweak/folder1/a &&
+
+ git -C tweak sparse-checkout disable
'
test_expect_success 'cone mode: set with core.ignoreCase=true' '
+ rm repo/.git/info/sparse-checkout &&
git -C repo sparse-checkout init --cone &&
git -C repo -c core.ignoreCase=true sparse-checkout set folder1 &&
cat >expect <<-\EOF &&
diff --git a/t/t1400-update-ref.sh b/t/t1400-update-ref.sh
index a6224ef65f..e1197ac818 100755
--- a/t/t1400-update-ref.sh
+++ b/t/t1400-update-ref.sh
@@ -1354,15 +1354,6 @@ test_expect_success 'fails with duplicate ref update via symref' '
test_cmp expect actual
'
-run_with_limited_open_files () {
- (ulimit -n 32 && "$@")
-}
-
-test_lazy_prereq ULIMIT_FILE_DESCRIPTORS '
- test_have_prereq !MINGW,!CYGWIN &&
- run_with_limited_open_files true
-'
-
test_expect_success ULIMIT_FILE_DESCRIPTORS 'large transaction creating branches does not burst open file limit' '
(
for i in $(test_seq 33)
@@ -1404,4 +1395,135 @@ test_expect_success 'handle per-worktree refs in refs/bisect' '
! test_cmp main-head worktree-head
'
+test_expect_success 'transaction handles empty commit' '
+ cat >stdin <<-EOF &&
+ start
+ prepare
+ commit
+ EOF
+ git update-ref --stdin <stdin >actual &&
+ printf "%s: ok\n" start prepare commit >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success 'transaction handles empty commit with missing prepare' '
+ cat >stdin <<-EOF &&
+ start
+ commit
+ EOF
+ git update-ref --stdin <stdin >actual &&
+ printf "%s: ok\n" start commit >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success 'transaction handles sole commit' '
+ cat >stdin <<-EOF &&
+ commit
+ EOF
+ git update-ref --stdin <stdin >actual &&
+ printf "%s: ok\n" commit >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success 'transaction handles empty abort' '
+ cat >stdin <<-EOF &&
+ start
+ prepare
+ abort
+ EOF
+ git update-ref --stdin <stdin >actual &&
+ printf "%s: ok\n" start prepare abort >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success 'transaction exits on multiple aborts' '
+ cat >stdin <<-EOF &&
+ abort
+ abort
+ EOF
+ test_must_fail git update-ref --stdin <stdin >actual 2>err &&
+ printf "%s: ok\n" abort >expect &&
+ test_cmp expect actual &&
+ grep "fatal: transaction is closed" err
+'
+
+test_expect_success 'transaction exits on start after prepare' '
+ cat >stdin <<-EOF &&
+ prepare
+ start
+ EOF
+ test_must_fail git update-ref --stdin <stdin 2>err >actual &&
+ printf "%s: ok\n" prepare >expect &&
+ test_cmp expect actual &&
+ grep "fatal: prepared transactions can only be closed" err
+'
+
+test_expect_success 'transaction handles empty abort with missing prepare' '
+ cat >stdin <<-EOF &&
+ start
+ abort
+ EOF
+ git update-ref --stdin <stdin >actual &&
+ printf "%s: ok\n" start abort >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success 'transaction handles sole abort' '
+ cat >stdin <<-EOF &&
+ abort
+ EOF
+ git update-ref --stdin <stdin >actual &&
+ printf "%s: ok\n" abort >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success 'transaction can handle commit' '
+ cat >stdin <<-EOF &&
+ start
+ create $a HEAD
+ commit
+ EOF
+ git update-ref --stdin <stdin >actual &&
+ printf "%s: ok\n" start commit >expect &&
+ test_cmp expect actual &&
+ git rev-parse HEAD >expect &&
+ git rev-parse $a >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'transaction can handle abort' '
+ cat >stdin <<-EOF &&
+ start
+ create $b HEAD
+ abort
+ EOF
+ git update-ref --stdin <stdin >actual &&
+ printf "%s: ok\n" start abort >expect &&
+ test_cmp expect actual &&
+ test_path_is_missing .git/$b
+'
+
+test_expect_success 'transaction aborts by default' '
+ cat >stdin <<-EOF &&
+ start
+ create $b HEAD
+ EOF
+ git update-ref --stdin <stdin >actual &&
+ printf "%s: ok\n" start >expect &&
+ test_cmp expect actual &&
+ test_path_is_missing .git/$b
+'
+
+test_expect_success 'transaction with prepare aborts by default' '
+ cat >stdin <<-EOF &&
+ start
+ create $b HEAD
+ prepare
+ EOF
+ git update-ref --stdin <stdin >actual &&
+ printf "%s: ok\n" start prepare >expect &&
+ test_cmp expect actual &&
+ test_path_is_missing .git/$b
+'
+
test_done
diff --git a/t/t2018-checkout-branch.sh b/t/t2018-checkout-branch.sh
index bbca7ef8da..21583154d8 100755
--- a/t/t2018-checkout-branch.sh
+++ b/t/t2018-checkout-branch.sh
@@ -238,4 +238,26 @@ test_expect_success 'checkout -b after clone --no-checkout does a checkout of HE
test_path_is_file dest/a.t
'
+test_expect_success 'checkout -b to a new branch preserves mergeable changes despite sparse-checkout' '
+ test_when_finished "
+ git reset --hard &&
+ git checkout branch1-scratch &&
+ test_might_fail git branch -D branch3 &&
+ git config core.sparseCheckout false &&
+ rm .git/info/sparse-checkout" &&
+
+ test_commit file2 &&
+
+ echo stuff >>file1 &&
+ echo file2 >.git/info/sparse-checkout &&
+ git config core.sparseCheckout true &&
+
+ CURHEAD=$(git rev-parse HEAD) &&
+ do_checkout branch3 $CURHEAD &&
+
+ echo file1 >expect &&
+ git diff --name-only >actual &&
+ test_cmp expect actual
+'
+
test_done
diff --git a/t/t3000-ls-files-others.sh b/t/t3000-ls-files-others.sh
index 0aefadacb0..ffdfb16f58 100755
--- a/t/t3000-ls-files-others.sh
+++ b/t/t3000-ls-files-others.sh
@@ -91,4 +91,125 @@ test_expect_success SYMLINKS 'ls-files --others with symlinked submodule' '
test_cmp expect actual
'
+test_expect_success 'setup nested pathspec search' '
+ test_create_repo nested &&
+ (
+ cd nested &&
+
+ mkdir -p partially_tracked/untracked_dir &&
+ > partially_tracked/content &&
+ > partially_tracked/untracked_dir/file &&
+
+ mkdir -p untracked/deep &&
+ > untracked/deep/path &&
+ > untracked/deep/foo.c &&
+
+ git add partially_tracked/content
+ )
+'
+
+test_expect_success 'ls-files -o --directory with single deep dir pathspec' '
+ (
+ cd nested &&
+
+ git ls-files -o --directory untracked/deep/ >actual &&
+
+ cat <<-EOF >expect &&
+ untracked/deep/
+ EOF
+
+ test_cmp expect actual
+ )
+'
+
+test_expect_success 'ls-files -o --directory with multiple dir pathspecs' '
+ (
+ cd nested &&
+
+ git ls-files -o --directory partially_tracked/ untracked/ >actual &&
+
+ cat <<-EOF >expect &&
+ partially_tracked/untracked_dir/
+ untracked/
+ EOF
+
+ test_cmp expect actual
+ )
+'
+
+test_expect_success 'ls-files -o --directory with mix dir/file pathspecs' '
+ (
+ cd nested &&
+
+ git ls-files -o --directory partially_tracked/ untracked/deep/path >actual &&
+
+ cat <<-EOF >expect &&
+ partially_tracked/untracked_dir/
+ untracked/deep/path
+ EOF
+
+ test_cmp expect actual
+ )
+'
+
+test_expect_success 'ls-files --o --directory with glob filetype match' '
+ (
+ cd nested &&
+
+ # globs kinda defeat --directory, but only for that pathspec
+ git ls-files --others --directory partially_tracked "untracked/*.c" >actual &&
+
+ cat <<-EOF >expect &&
+ partially_tracked/untracked_dir/
+ untracked/deep/foo.c
+ EOF
+
+ test_cmp expect actual
+ )
+'
+
+test_expect_success 'ls-files --o --directory with mix of tracked states' '
+ (
+ cd nested &&
+
+ # globs kinda defeat --directory, but only for that pathspec
+ git ls-files --others --directory partially_tracked/ "untracked/?*" >actual &&
+
+ cat <<-EOF >expect &&
+ partially_tracked/untracked_dir/
+ untracked/deep/
+ EOF
+
+ test_cmp expect actual
+ )
+'
+
+test_expect_success 'ls-files --o --directory with glob filetype match only' '
+ (
+ cd nested &&
+
+ git ls-files --others --directory "untracked/*.c" >actual &&
+
+ cat <<-EOF >expect &&
+ untracked/deep/foo.c
+ EOF
+
+ test_cmp expect actual
+ )
+'
+
+test_expect_success 'ls-files --o --directory to get immediate paths under one dir only' '
+ (
+ cd nested &&
+
+ git ls-files --others --directory "untracked/?*" >actual &&
+
+ cat <<-EOF >expect &&
+ untracked/deep/
+ EOF
+
+ test_cmp expect actual
+ )
+'
+
test_done
diff --git a/t/t3033-merge-toplevel.sh b/t/t3033-merge-toplevel.sh
index d314599428..e29c284b9b 100755
--- a/t/t3033-merge-toplevel.sh
+++ b/t/t3033-merge-toplevel.sh
@@ -142,6 +142,17 @@ test_expect_success 'refuse two-project merge by default' '
test_must_fail git merge five
'
+test_expect_success 'refuse two-project merge by default, quit before --autostash happens' '
+ t3033_reset &&
+ git reset --hard four &&
+ echo change >>one.t &&
+ git diff >expect &&
+ test_must_fail git merge --autostash five 2>err &&
+ test_i18ngrep ! "stash" err &&
+ git diff >actual &&
+ test_cmp expect actual
+'
+
test_expect_success 'two-project merge with --allow-unrelated-histories' '
t3033_reset &&
git reset --hard four &&
@@ -149,4 +160,15 @@ test_expect_success 'two-project merge with --allow-unrelated-histories' '
git diff --exit-code five
'
+test_expect_success 'two-project merge with --allow-unrelated-histories with --autostash' '
+ t3033_reset &&
+ git reset --hard four &&
+ echo change >>one.t &&
+ git diff one.t >expect &&
+ git merge --allow-unrelated-histories --autostash five 2>err &&
+ test_i18ngrep "Applied autostash." err &&
+ git diff one.t >actual &&
+ test_cmp expect actual
+'
+
test_done
diff --git a/t/t3206-range-diff.sh b/t/t3206-range-diff.sh
index bd808f87ed..e024cff65c 100755
--- a/t/t3206-range-diff.sh
+++ b/t/t3206-range-diff.sh
@@ -513,6 +513,16 @@ test_expect_success 'range-diff overrides diff.noprefix internally' '
git -c diff.noprefix=true range-diff HEAD^...
'
+test_expect_success 'basic with modified format.pretty with suffix' '
+ git -c format.pretty="format:commit %H%d%n" range-diff \
+ master..topic master..unmodified
+'
+
+test_expect_success 'basic with modified format.pretty without "commit "' '
+ git -c format.pretty="format:%H%n" range-diff \
+ master..topic master..unmodified
+'
+
test_expect_success 'range-diff compares notes by default' '
git notes add -m "topic note" topic &&
git notes add -m "unmodified note" unmodified &&
diff --git a/t/t3420-rebase-autostash.sh b/t/t3420-rebase-autostash.sh
index b97ea62363..ca331733fb 100755
--- a/t/t3420-rebase-autostash.sh
+++ b/t/t3420-rebase-autostash.sh
@@ -184,6 +184,26 @@ testrebase () {
git checkout feature-branch
'
+ test_expect_success "rebase$type: --quit" '
+ test_config rebase.autostash true &&
+ git reset --hard &&
+ git checkout -b rebased-feature-branch feature-branch &&
+ test_when_finished git branch -D rebased-feature-branch &&
+ echo dirty >>file3 &&
+ git diff >expect &&
+ test_must_fail git rebase$type related-onto-branch &&
+ test_path_is_file $dotest/autostash &&
+ test_path_is_missing file3 &&
+ git rebase --quit &&
+ test_when_finished git stash drop &&
+ test_path_is_missing $dotest/autostash &&
+ ! grep dirty file3 &&
+ git stash show -p >actual &&
+ test_cmp expect actual &&
+ git reset --hard &&
+ git checkout feature-branch
+ '
+
test_expect_success "rebase$type: non-conflicting rebase, conflicting stash" '
test_config rebase.autostash true &&
git reset --hard &&
diff --git a/t/t3904-stash-patch.sh b/t/t3904-stash-patch.sh
index 9546b6f8a4..accfe3845c 100755
--- a/t/t3904-stash-patch.sh
+++ b/t/t3904-stash-patch.sh
@@ -89,7 +89,7 @@ test_expect_success 'none of this moved HEAD' '
verify_saved_head
'
-test_expect_failure 'stash -p with split hunk' '
+test_expect_success 'stash -p with split hunk' '
git reset --hard &&
cat >test <<-\EOF &&
aaa
@@ -106,8 +106,8 @@ test_expect_failure 'stash -p with split hunk' '
ccc
EOF
printf "%s\n" s n y q |
- test_might_fail git stash -p 2>error &&
- ! test_must_be_empty error &&
+ git stash -p 2>error &&
+ test_must_be_empty error &&
grep "added line 1" test &&
! grep "added line 2" test
'
diff --git a/t/t4013-diff-various.sh b/t/t4013-diff-various.sh
index dde3f11fec..3f60f7d96c 100755
--- a/t/t4013-diff-various.sh
+++ b/t/t4013-diff-various.sh
@@ -95,6 +95,15 @@ test_expect_success setup '
git commit -m "update mode" &&
git checkout -f master &&
+ GIT_AUTHOR_DATE="2006-06-26 00:06:00 +0000" &&
+ GIT_COMMITTER_DATE="2006-06-26 00:06:00 +0000" &&
+ export GIT_AUTHOR_DATE GIT_COMMITTER_DATE &&
+ git checkout -b note initial &&
+ git update-index --chmod=+x file2 &&
+ git commit -m "update mode (file2)" &&
+ git notes add -m "note" &&
+ git checkout -f master &&
+
# Same merge as master, but with parents reversed. Hide it in a
# pseudo-ref to avoid impacting tests with --all.
commit=$(echo reverse |
@@ -398,6 +407,9 @@ diff --no-index --raw --no-abbrev dir2 dir
diff-tree --pretty --root --stat --compact-summary initial
diff-tree --pretty -R --root --stat --compact-summary initial
+diff-tree --pretty note
+diff-tree --pretty --notes note
+diff-tree --format=%N note
diff-tree --stat --compact-summary initial mode
diff-tree -R --stat --compact-summary initial mode
EOF
diff --git a/t/t4013/diff.diff-tree_--format=%N_note b/t/t4013/diff.diff-tree_--format=%N_note
new file mode 100644
index 0000000000..93042ed539
--- /dev/null
+++ b/t/t4013/diff.diff-tree_--format=%N_note
@@ -0,0 +1,6 @@
+$ git diff-tree --format=%N note
+note
+
+
+:100644 100755 01e79c32a8c99c557f0757da7cb6d65b3414466d 01e79c32a8c99c557f0757da7cb6d65b3414466d M file2
+$
diff --git a/t/t4013/diff.diff-tree_--pretty_--notes_note b/t/t4013/diff.diff-tree_--pretty_--notes_note
new file mode 100644
index 0000000000..4d0bde601c
--- /dev/null
+++ b/t/t4013/diff.diff-tree_--pretty_--notes_note
@@ -0,0 +1,12 @@
+$ git diff-tree --pretty --notes note
+commit a6f364368ca320bc5a92e18912e16fa6b3dff598
+Author: A U Thor <author@example.com>
+Date: Mon Jun 26 00:06:00 2006 +0000
+
+ update mode (file2)
+
+Notes:
+ note
+
+:100644 100755 01e79c32a8c99c557f0757da7cb6d65b3414466d 01e79c32a8c99c557f0757da7cb6d65b3414466d M file2
+$
diff --git a/t/t4013/diff.diff-tree_--pretty_note b/t/t4013/diff.diff-tree_--pretty_note
new file mode 100644
index 0000000000..1fa5967083
--- /dev/null
+++ b/t/t4013/diff.diff-tree_--pretty_note
@@ -0,0 +1,9 @@
+$ git diff-tree --pretty note
+commit a6f364368ca320bc5a92e18912e16fa6b3dff598
+Author: A U Thor <author@example.com>
+Date: Mon Jun 26 00:06:00 2006 +0000
+
+ update mode (file2)
+
+:100644 100755 01e79c32a8c99c557f0757da7cb6d65b3414466d 01e79c32a8c99c557f0757da7cb6d65b3414466d M file2
+$
diff --git a/t/t4013/diff.log_--decorate=full_--all b/t/t4013/diff.log_--decorate=full_--all
index 2afe91f116..3f9b872ece 100644
--- a/t/t4013/diff.log_--decorate=full_--all
+++ b/t/t4013/diff.log_--decorate=full_--all
@@ -5,12 +5,27 @@ Date: Mon Jun 26 00:06:00 2006 +0000
update mode
+commit a6f364368ca320bc5a92e18912e16fa6b3dff598 (refs/heads/note)
+Author: A U Thor <author@example.com>
+Date: Mon Jun 26 00:06:00 2006 +0000
+
+ update mode (file2)
+
+Notes:
+ note
+
commit cd4e72fd96faed3f0ba949dc42967430374e2290 (refs/heads/rearrange)
Author: A U Thor <author@example.com>
Date: Mon Jun 26 00:06:00 2006 +0000
Rearranged lines in dir/sub
+commit cbacedd14cb8b89255a2c02b59e77a2e9a8021a0 (refs/notes/commits)
+Author: A U Thor <author@example.com>
+Date: Mon Jun 26 00:06:00 2006 +0000
+
+ Notes added by 'git notes add'
+
commit 59d314ad6f356dd08601a4cd5e530381da3e3c64 (HEAD -> refs/heads/master)
Merge: 9a6d494 c7a2ab9
Author: A U Thor <author@example.com>
diff --git a/t/t4013/diff.log_--decorate_--all b/t/t4013/diff.log_--decorate_--all
index d0f308ab2b..f5e20e1e14 100644
--- a/t/t4013/diff.log_--decorate_--all
+++ b/t/t4013/diff.log_--decorate_--all
@@ -5,12 +5,27 @@ Date: Mon Jun 26 00:06:00 2006 +0000
update mode
+commit a6f364368ca320bc5a92e18912e16fa6b3dff598 (note)
+Author: A U Thor <author@example.com>
+Date: Mon Jun 26 00:06:00 2006 +0000
+
+ update mode (file2)
+
+Notes:
+ note
+
commit cd4e72fd96faed3f0ba949dc42967430374e2290 (rearrange)
Author: A U Thor <author@example.com>
Date: Mon Jun 26 00:06:00 2006 +0000
Rearranged lines in dir/sub
+commit cbacedd14cb8b89255a2c02b59e77a2e9a8021a0 (refs/notes/commits)
+Author: A U Thor <author@example.com>
+Date: Mon Jun 26 00:06:00 2006 +0000
+
+ Notes added by 'git notes add'
+
commit 59d314ad6f356dd08601a4cd5e530381da3e3c64 (HEAD -> master)
Merge: 9a6d494 c7a2ab9
Author: A U Thor <author@example.com>
diff --git a/t/t4061-diff-indent.sh b/t/t4061-diff-indent.sh
index 2affd7a100..0f7a6d97a8 100755
--- a/t/t4061-diff-indent.sh
+++ b/t/t4061-diff-indent.sh
@@ -17,7 +17,7 @@ compare_diff () {
# Compare blame output using the expectation for a diff as reference.
# Only look for the lines coming from non-boundary commits.
compare_blame () {
- sed -n -e "1,4d" -e "s/^\+//p" <"$1" >.tmp-1
+ sed -n -e "1,4d" -e "s/^+//p" <"$1" >.tmp-1
sed -ne "s/^[^^][^)]*) *//p" <"$2" >.tmp-2
test_cmp .tmp-1 .tmp-2 && rm -f .tmp-1 .tmp-2
}
diff --git a/t/t4067-diff-partial-clone.sh b/t/t4067-diff-partial-clone.sh
index 4831ad35e6..c1ed1c2fc4 100755
--- a/t/t4067-diff-partial-clone.sh
+++ b/t/t4067-diff-partial-clone.sh
@@ -131,4 +131,52 @@ test_expect_success 'diff with rename detection batches blobs' '
test_line_count = 1 done_lines
'
+test_expect_success 'diff does not fetch anything if inexact rename detection is not needed' '
+ test_when_finished "rm -rf server client trace" &&
+
+ test_create_repo server &&
+ echo a >server/a &&
+ printf "b\nb\nb\nb\nb\n" >server/b &&
+ git -C server add a b &&
+ git -C server commit -m x &&
+ mv server/b server/c &&
+ git -C server add c &&
+ git -C server commit -a -m x &&
+
+ test_config -C server uploadpack.allowfilter 1 &&
+ test_config -C server uploadpack.allowanysha1inwant 1 &&
+ git clone --bare --filter=blob:limit=0 "file://$(pwd)/server" client &&
+
+ # Ensure no fetches.
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client diff --raw -M HEAD^ HEAD &&
+ ! test_path_exists trace
+'
+
+test_expect_success 'diff --break-rewrites fetches only if necessary, and batches blobs if it does' '
+ test_when_finished "rm -rf server client trace" &&
+
+ test_create_repo server &&
+ echo a >server/a &&
+ printf "b\nb\nb\nb\nb\n" >server/b &&
+ git -C server add a b &&
+ git -C server commit -m x &&
+ printf "c\nc\nc\nc\nc\n" >server/b &&
+ git -C server commit -a -m x &&
+
+ test_config -C server uploadpack.allowfilter 1 &&
+ test_config -C server uploadpack.allowanysha1inwant 1 &&
+ git clone --bare --filter=blob:limit=0 "file://$(pwd)/server" client &&
+
+ # Ensure no fetches.
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client diff --raw -M HEAD^ HEAD &&
+ ! test_path_exists trace &&
+
+ # But with --break-rewrites, ensure that there is exactly 1 negotiation
+ # by checking that there is only 1 "done" line sent. ("done" marks the
+ # end of negotiation.)
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client diff --break-rewrites --raw -M HEAD^ HEAD &&
+ grep "git> done" trace >done_lines &&
+ test_line_count = 1 done_lines
+'
+
test_done
diff --git a/t/t4124-apply-ws-rule.sh b/t/t4124-apply-ws-rule.sh
index 971a5a7512..0ca29821ec 100755
--- a/t/t4124-apply-ws-rule.sh
+++ b/t/t4124-apply-ws-rule.sh
@@ -52,6 +52,13 @@ test_fix () {
# find touched lines
$DIFF file target | sed -n -e "s/^> //p" >fixed
+ # busybox's diff(1) doesn't output normal format
+ if ! test -s fixed
+ then
+ $DIFF -u file target |
+ grep -v '^+++ target' |
+ sed -ne "/^+/s/+//p" >fixed
+ fi
# the changed lines are all expected to change
fixed_cnt=$(wc -l <fixed)
diff --git a/t/t4202-log.sh b/t/t4202-log.sh
index 5eeb739f3e..f1ea7d97f5 100755
--- a/t/t4202-log.sh
+++ b/t/t4202-log.sh
@@ -742,7 +742,23 @@ test_expect_success 'decorate-refs with glob' '
octopus-a (octopus-a)
reach
EOF
+ cat >expect.no-decorate <<-\EOF &&
+ Merge-tag-reach
+ Merge-tags-octopus-a-and-octopus-b
+ seventh
+ octopus-b
+ octopus-a
+ reach
+ EOF
+ git log -n6 --decorate=short --pretty="tformat:%f%d" \
+ --decorate-refs="heads/octopus*" >actual &&
+ test_cmp expect.decorate actual &&
git log -n6 --decorate=short --pretty="tformat:%f%d" \
+ --decorate-refs-exclude="heads/octopus*" \
+ --decorate-refs="heads/octopus*" >actual &&
+ test_cmp expect.no-decorate actual &&
+ git -c log.excludeDecoration="heads/octopus*" log \
+ -n6 --decorate=short --pretty="tformat:%f%d" \
--decorate-refs="heads/octopus*" >actual &&
test_cmp expect.decorate actual
'
@@ -787,6 +803,9 @@ test_expect_success 'decorate-refs-exclude with glob' '
EOF
git log -n6 --decorate=short --pretty="tformat:%f%d" \
--decorate-refs-exclude="heads/octopus*" >actual &&
+ test_cmp expect.decorate actual &&
+ git -c log.excludeDecoration="heads/octopus*" log \
+ -n6 --decorate=short --pretty="tformat:%f%d" >actual &&
test_cmp expect.decorate actual
'
@@ -801,6 +820,9 @@ test_expect_success 'decorate-refs-exclude without globs' '
EOF
git log -n6 --decorate=short --pretty="tformat:%f%d" \
--decorate-refs-exclude="tags/reach" >actual &&
+ test_cmp expect.decorate actual &&
+ git -c log.excludeDecoration="tags/reach" log \
+ -n6 --decorate=short --pretty="tformat:%f%d" >actual &&
test_cmp expect.decorate actual
'
@@ -816,11 +838,19 @@ test_expect_success 'multiple decorate-refs-exclude' '
git log -n6 --decorate=short --pretty="tformat:%f%d" \
--decorate-refs-exclude="heads/octopus*" \
--decorate-refs-exclude="tags/reach" >actual &&
+ test_cmp expect.decorate actual &&
+ git -c log.excludeDecoration="heads/octopus*" \
+ -c log.excludeDecoration="tags/reach" log \
+ -n6 --decorate=short --pretty="tformat:%f%d" >actual &&
+ test_cmp expect.decorate actual &&
+ git -c log.excludeDecoration="heads/octopus*" log \
+ --decorate-refs-exclude="tags/reach" \
+ -n6 --decorate=short --pretty="tformat:%f%d" >actual &&
test_cmp expect.decorate actual
'
test_expect_success 'decorate-refs and decorate-refs-exclude' '
- cat >expect.decorate <<-\EOF &&
+ cat >expect.no-decorate <<-\EOF &&
Merge-tag-reach (master)
Merge-tags-octopus-a-and-octopus-b
seventh
@@ -831,6 +861,21 @@ test_expect_success 'decorate-refs and decorate-refs-exclude' '
git log -n6 --decorate=short --pretty="tformat:%f%d" \
--decorate-refs="heads/*" \
--decorate-refs-exclude="heads/oc*" >actual &&
+ test_cmp expect.no-decorate actual
+'
+
+test_expect_success 'deocrate-refs and log.excludeDecoration' '
+ cat >expect.decorate <<-\EOF &&
+ Merge-tag-reach (master)
+ Merge-tags-octopus-a-and-octopus-b
+ seventh
+ octopus-b (octopus-b)
+ octopus-a (octopus-a)
+ reach (reach)
+ EOF
+ git -c log.excludeDecoration="heads/oc*" log \
+ --decorate-refs="heads/*" \
+ -n6 --decorate=short --pretty="tformat:%f%d" >actual &&
test_cmp expect.decorate actual
'
@@ -846,6 +891,10 @@ test_expect_success 'decorate-refs-exclude and simplify-by-decoration' '
git log -n6 --decorate=short --pretty="tformat:%f%d" \
--decorate-refs-exclude="*octopus*" \
--simplify-by-decoration >actual &&
+ test_cmp expect.decorate actual &&
+ git -c log.excludeDecoration="*octopus*" log \
+ -n6 --decorate=short --pretty="tformat:%f%d" \
+ --simplify-by-decoration >actual &&
test_cmp expect.decorate actual
'
diff --git a/t/t4216-log-bloom.sh b/t/t4216-log-bloom.sh
new file mode 100755
index 0000000000..c7011f33e2
--- /dev/null
+++ b/t/t4216-log-bloom.sh
@@ -0,0 +1,155 @@
+#!/bin/sh
+
+test_description='git log for a path with Bloom filters'
+. ./test-lib.sh
+
+GIT_TEST_COMMIT_GRAPH=0
+GIT_TEST_COMMIT_GRAPH_CHANGED_PATHS=0
+
+test_expect_success 'setup test - repo, commits, commit graph, log outputs' '
+ git init &&
+ mkdir A A/B A/B/C &&
+ test_commit c1 A/file1 &&
+ test_commit c2 A/B/file2 &&
+ test_commit c3 A/B/C/file3 &&
+ test_commit c4 A/file1 &&
+ test_commit c5 A/B/file2 &&
+ test_commit c6 A/B/C/file3 &&
+ test_commit c7 A/file1 &&
+ test_commit c8 A/B/file2 &&
+ test_commit c9 A/B/C/file3 &&
+ test_commit c10 file_to_be_deleted &&
+ git checkout -b side HEAD~4 &&
+ test_commit side-1 file4 &&
+ git checkout master &&
+ git merge side &&
+ test_commit c11 file5 &&
+ mv file5 file5_renamed &&
+ git add file5_renamed &&
+ git commit -m "rename" &&
+ rm file_to_be_deleted &&
+ git add . &&
+ git commit -m "file removed" &&
+ git commit-graph write --reachable --changed-paths
+'
+graph_read_expect () {
+ NUM_CHUNKS=5
+ cat >expect <<- EOF
+ header: 43475048 1 1 $NUM_CHUNKS 0
+ num_commits: $1
+ chunks: oid_fanout oid_lookup commit_metadata bloom_indexes bloom_data
+ EOF
+ test-tool read-graph >actual &&
+ test_cmp expect actual
+}
+
+test_expect_success 'commit-graph write wrote out the bloom chunks' '
+ graph_read_expect 15
+'
+
+# Turn off any inherited trace2 settings for this test.
+sane_unset GIT_TRACE2 GIT_TRACE2_PERF GIT_TRACE2_EVENT
+sane_unset GIT_TRACE2_PERF_BRIEF
+sane_unset GIT_TRACE2_CONFIG_PARAMS
+
+setup () {
+ rm "$TRASH_DIRECTORY/trace.perf"
+ git -c core.commitGraph=false log --pretty="format:%s" $1 >log_wo_bloom &&
+ GIT_TRACE2_PERF="$TRASH_DIRECTORY/trace.perf" git -c core.commitGraph=true log --pretty="format:%s" $1 >log_w_bloom
+}
+
+test_bloom_filters_used () {
+ log_args=$1
+ bloom_trace_prefix="statistics:{\"filter_not_present\":0,\"zero_length_filter\":0,\"maybe\""
+ setup "$log_args" &&
+ grep -q "$bloom_trace_prefix" "$TRASH_DIRECTORY/trace.perf" &&
+ test_cmp log_wo_bloom log_w_bloom &&
+ test_path_is_file "$TRASH_DIRECTORY/trace.perf"
+}
+
+test_bloom_filters_not_used () {
+ log_args=$1
+ setup "$log_args" &&
+ !(grep -q "statistics:{\"filter_not_present\":" "$TRASH_DIRECTORY/trace.perf") &&
+ test_cmp log_wo_bloom log_w_bloom
+}
+
+for path in A A/B A/B/C A/file1 A/B/file2 A/B/C/file3 file4 file5 file5_renamed file_to_be_deleted
+do
+ for option in "" \
+ "--all" \
+ "--full-history" \
+ "--full-history --simplify-merges" \
+ "--simplify-merges" \
+ "--simplify-by-decoration" \
+ "--follow" \
+ "--first-parent" \
+ "--topo-order" \
+ "--date-order" \
+ "--author-date-order" \
+ "--ancestry-path side..master"
+ do
+ test_expect_success "git log option: $option for path: $path" '
+ test_bloom_filters_used "$option -- $path"
+ '
+ done
+done
+
+test_expect_success 'git log -- folder works with and without the trailing slash' '
+ test_bloom_filters_used "-- A" &&
+ test_bloom_filters_used "-- A/"
+'
+
+test_expect_success 'git log for path that does not exist. ' '
+ test_bloom_filters_used "-- path_does_not_exist"
+'
+
+test_expect_success 'git log with --walk-reflogs does not use Bloom filters' '
+ test_bloom_filters_not_used "--walk-reflogs -- A"
+'
+
+test_expect_success 'git log -- multiple path specs does not use Bloom filters' '
+ test_bloom_filters_not_used "-- file4 A/file1"
+'
+
+test_expect_success 'git log with wildcard that resolves to a single path uses Bloom filters' '
+ test_bloom_filters_used "-- *4" &&
+ test_bloom_filters_used "-- *renamed"
+'
+
+test_expect_success 'git log with wildcard that resolves to a multiple paths does not uses Bloom filters' '
+ test_bloom_filters_not_used "-- *" &&
+ test_bloom_filters_not_used "-- file*"
+'
+
+test_expect_success 'setup - add commit-graph to the chain without Bloom filters' '
+ test_commit c14 A/anotherFile2 &&
+ test_commit c15 A/B/anotherFile2 &&
+ test_commit c16 A/B/C/anotherFile2 &&
+ GIT_TEST_COMMIT_GRAPH_CHANGED_PATHS=0 git commit-graph write --reachable --split &&
+ test_line_count = 2 .git/objects/info/commit-graphs/commit-graph-chain
+'
+
+test_expect_success 'Do not use Bloom filters if the latest graph does not have Bloom filters.' '
+ test_bloom_filters_not_used "-- A/B"
+'
+
+test_expect_success 'setup - add commit-graph to the chain with Bloom filters' '
+ test_commit c17 A/anotherFile3 &&
+ git commit-graph write --reachable --changed-paths --split &&
+ test_line_count = 3 .git/objects/info/commit-graphs/commit-graph-chain
+'
+
+test_bloom_filters_used_when_some_filters_are_missing () {
+ log_args=$1
+ bloom_trace_prefix="statistics:{\"filter_not_present\":3,\"zero_length_filter\":0,\"maybe\":8,\"definitely_not\":6"
+ setup "$log_args" &&
+ grep -q "$bloom_trace_prefix" "$TRASH_DIRECTORY/trace.perf" &&
+ test_cmp log_wo_bloom log_w_bloom
+}
+
+test_expect_success 'Use Bloom filters if they exist in the latest but not all commit graphs in the chain.' '
+ test_bloom_filters_used_when_some_filters_are_missing "-- A/B"
+'
+
+test_done \ No newline at end of file
diff --git a/t/t4254-am-corrupt.sh b/t/t4254-am-corrupt.sh
index fd3bdbfe2c..daf01c309d 100755
--- a/t/t4254-am-corrupt.sh
+++ b/t/t4254-am-corrupt.sh
@@ -3,6 +3,37 @@
test_description='git am with corrupt input'
. ./test-lib.sh
+make_mbox_with_nul () {
+ space=' '
+ q_nul_in_subject=
+ q_nul_in_body=
+ while test $# -ne 0
+ do
+ case "$1" in
+ subject) q_nul_in_subject='=00' ;;
+ body) q_nul_in_body='=00' ;;
+ esac &&
+ shift
+ done &&
+ cat <<-EOF
+ From ec7364544f690c560304f5a5de9428ea3b978b26 Mon Sep 17 00:00:00 2001
+ From: A U Thor <author@example.com>
+ Date: Sun, 19 Apr 2020 13:42:07 +0700
+ Subject: [PATCH] =?ISO-8859-1?q?=C4=CB${q_nul_in_subject}=D1=CF=D6?=
+ MIME-Version: 1.0
+ Content-Type: text/plain; charset=ISO-8859-1
+ Content-Transfer-Encoding: quoted-printable
+
+ abc${q_nul_in_body}def
+ ---
+ diff --git a/afile b/afile
+ new file mode 100644
+ index 0000000000..e69de29bb2
+ --$space
+ 2.26.1
+ EOF
+}
+
test_expect_success setup '
# Note the missing "+++" line:
cat >bad-patch.diff <<-\EOF &&
@@ -25,13 +56,27 @@ test_expect_success setup '
# fatal: unable to write file '(null)' mode 100644: Bad address
# Also, it had the unwanted side-effect of deleting f.
test_expect_success 'try to apply corrupted patch' '
- test_must_fail git -c advice.amWorkDir=false am bad-patch.diff 2>actual
-'
-
-test_expect_success 'compare diagnostic; ensure file is still here' '
+ test_when_finished "git am --abort" &&
+ test_must_fail git -c advice.amWorkDir=false am bad-patch.diff 2>actual &&
echo "error: git diff header lacks filename information (line 4)" >expected &&
test_path_is_file f &&
test_i18ncmp expected actual
'
+test_expect_success "NUL in commit message's body" '
+ test_when_finished "git am --abort" &&
+ make_mbox_with_nul body >body.patch &&
+ test_must_fail git am body.patch 2>err &&
+ grep "a NUL byte in commit log message not allowed" err
+'
+
+test_expect_success "NUL in commit message's header" "
+ test_when_finished 'git am --abort' &&
+ make_mbox_with_nul subject >subject.patch &&
+ test_must_fail git mailinfo msg patch <subject.patch 2>err &&
+ grep \"a NUL byte in 'Subject' is not allowed\" err &&
+ test_must_fail git am subject.patch 2>err &&
+ grep \"a NUL byte in 'Subject' is not allowed\" err
+"
+
test_done
diff --git a/t/t5003-archive-zip.sh b/t/t5003-archive-zip.sh
index 106eddbd85..3b76d2eb65 100755
--- a/t/t5003-archive-zip.sh
+++ b/t/t5003-archive-zip.sh
@@ -7,12 +7,12 @@ test_description='git archive --format=zip test'
SUBSTFORMAT=%H%n
test_lazy_prereq UNZIP_SYMLINKS '
- (
- mkdir unzip-symlinks &&
- cd unzip-symlinks &&
- "$GIT_UNZIP" "$TEST_DIRECTORY"/t5003/infozip-symlinks.zip &&
- test -h symlink
- )
+ "$GIT_UNZIP" "$TEST_DIRECTORY"/t5003/infozip-symlinks.zip &&
+ test -h symlink
+'
+
+test_lazy_prereq UNZIP_CONVERT '
+ "$GIT_UNZIP" -a "$TEST_DIRECTORY"/t5003/infozip-symlinks.zip
'
check_zip() {
@@ -39,33 +39,33 @@ check_zip() {
extracted=${dir_with_prefix}a
original=a
- test_expect_success UNZIP " extract ZIP archive with EOL conversion" '
+ test_expect_success UNZIP_CONVERT " extract ZIP archive with EOL conversion" '
(mkdir $dir && cd $dir && "$GIT_UNZIP" -a ../$zipfile)
'
- test_expect_success UNZIP " validate that text files are converted" "
+ test_expect_success UNZIP_CONVERT " validate that text files are converted" "
test_cmp_bin $extracted/text.cr $extracted/text.crlf &&
test_cmp_bin $extracted/text.cr $extracted/text.lf
"
- test_expect_success UNZIP " validate that binary files are unchanged" "
+ test_expect_success UNZIP_CONVERT " validate that binary files are unchanged" "
test_cmp_bin $original/binary.cr $extracted/binary.cr &&
test_cmp_bin $original/binary.crlf $extracted/binary.crlf &&
test_cmp_bin $original/binary.lf $extracted/binary.lf
"
- test_expect_success UNZIP " validate that diff files are converted" "
+ test_expect_success UNZIP_CONVERT " validate that diff files are converted" "
test_cmp_bin $extracted/diff.cr $extracted/diff.crlf &&
test_cmp_bin $extracted/diff.cr $extracted/diff.lf
"
- test_expect_success UNZIP " validate that -diff files are unchanged" "
+ test_expect_success UNZIP_CONVERT " validate that -diff files are unchanged" "
test_cmp_bin $original/nodiff.cr $extracted/nodiff.cr &&
test_cmp_bin $original/nodiff.crlf $extracted/nodiff.crlf &&
test_cmp_bin $original/nodiff.lf $extracted/nodiff.lf
"
- test_expect_success UNZIP " validate that custom diff is unchanged " "
+ test_expect_success UNZIP_CONVERT " validate that custom diff is unchanged " "
test_cmp_bin $original/custom.cr $extracted/custom.cr &&
test_cmp_bin $original/custom.crlf $extracted/custom.crlf &&
test_cmp_bin $original/custom.lf $extracted/custom.lf
diff --git a/t/t5318-commit-graph.sh b/t/t5318-commit-graph.sh
index 9bf920ae17..39e2918a32 100755
--- a/t/t5318-commit-graph.sh
+++ b/t/t5318-commit-graph.sh
@@ -3,6 +3,8 @@
test_description='commit graph'
. ./test-lib.sh
+GIT_TEST_COMMIT_GRAPH_CHANGED_PATHS=0
+
test_expect_success 'setup full repo' '
mkdir full &&
cd "$TRASH_DIRECTORY/full" &&
@@ -43,7 +45,7 @@ test_expect_success 'create commits and repack' '
test_expect_success 'exit with correct error on bad input to --stdin-commits' '
cd "$TRASH_DIRECTORY/full" &&
echo HEAD | test_expect_code 1 git commit-graph write --stdin-commits 2>stderr &&
- test_i18ngrep "invalid commit object id" stderr &&
+ test_i18ngrep "unexpected non-hex object ID: HEAD" stderr &&
# valid tree OID, but not a commit OID
git rev-parse HEAD^{tree} | test_expect_code 1 git commit-graph write --stdin-commits 2>stderr &&
test_i18ngrep "invalid commit object id" stderr
diff --git a/t/t5319-multi-pack-index.sh b/t/t5319-multi-pack-index.sh
index 22240fd30b..030a7222b2 100755
--- a/t/t5319-multi-pack-index.sh
+++ b/t/t5319-multi-pack-index.sh
@@ -526,10 +526,10 @@ test_expect_success 'repack with minimum size does not alter existing packs' '
cd dup &&
rm -rf .git/objects/pack &&
mv .git/objects/pack-backup .git/objects/pack &&
- touch -m -t 201901010000 .git/objects/pack/pack-D* &&
- touch -m -t 201901010001 .git/objects/pack/pack-C* &&
- touch -m -t 201901010002 .git/objects/pack/pack-B* &&
- touch -m -t 201901010003 .git/objects/pack/pack-A* &&
+ test-tool chmtime =-5 .git/objects/pack/pack-D* &&
+ test-tool chmtime =-4 .git/objects/pack/pack-C* &&
+ test-tool chmtime =-3 .git/objects/pack/pack-B* &&
+ test-tool chmtime =-2 .git/objects/pack/pack-A* &&
ls .git/objects/pack >expect &&
MINSIZE=$(test-tool path-utils file-size .git/objects/pack/*pack | sort -n | head -n 1) &&
git multi-pack-index repack --batch-size=$MINSIZE &&
diff --git a/t/t5324-split-commit-graph.sh b/t/t5324-split-commit-graph.sh
index 53b2e6b455..594edb7307 100755
--- a/t/t5324-split-commit-graph.sh
+++ b/t/t5324-split-commit-graph.sh
@@ -4,6 +4,7 @@ test_description='split commit graph'
. ./test-lib.sh
GIT_TEST_COMMIT_GRAPH=0
+GIT_TEST_COMMIT_GRAPH_CHANGED_PATHS=0
test_expect_success 'setup repo' '
git init &&
@@ -210,8 +211,14 @@ test_expect_success 'test merge stragety constants' '
git config core.commitGraph true &&
test_line_count = 2 $graphdir/commit-graph-chain &&
test_commit 15 &&
- git commit-graph write --reachable --split --size-multiple=10 --expire-time=1980-01-01 &&
+ touch $graphdir/to-delete.graph $graphdir/to-keep.graph &&
+ test-tool chmtime =1546362000 $graphdir/to-delete.graph &&
+ test-tool chmtime =1546362001 $graphdir/to-keep.graph &&
+ git commit-graph write --reachable --split --size-multiple=10 \
+ --expire-time="2019-01-01 12:00 -05:00" &&
test_line_count = 1 $graphdir/commit-graph-chain &&
+ test_path_is_missing $graphdir/to-delete.graph &&
+ test_path_is_file $graphdir/to-keep.graph &&
ls $graphdir/graph-*.graph >graph-files &&
test_line_count = 3 graph-files
) &&
@@ -351,4 +358,47 @@ test_expect_success 'split across alternate where alternate is not split' '
test_cmp commit-graph .git/objects/info/commit-graph
'
+test_expect_success '--split=no-merge always writes an incremental' '
+ test_when_finished rm -rf a b &&
+ rm -rf $graphdir $infodir/commit-graph &&
+ git reset --hard commits/2 &&
+ git rev-list HEAD~1 >a &&
+ git rev-list HEAD >b &&
+ git commit-graph write --split --stdin-commits <a &&
+ git commit-graph write --split=no-merge --stdin-commits <b &&
+ test_line_count = 2 $graphdir/commit-graph-chain
+'
+
+test_expect_success '--split=replace replaces the chain' '
+ rm -rf $graphdir $infodir/commit-graph &&
+ git reset --hard commits/3 &&
+ git rev-list -1 HEAD~2 >a &&
+ git rev-list -1 HEAD~1 >b &&
+ git rev-list -1 HEAD >c &&
+ git commit-graph write --split=no-merge --stdin-commits <a &&
+ git commit-graph write --split=no-merge --stdin-commits <b &&
+ git commit-graph write --split=no-merge --stdin-commits <c &&
+ test_line_count = 3 $graphdir/commit-graph-chain &&
+ git commit-graph write --stdin-commits --split=replace <b &&
+ test_path_is_missing $infodir/commit-graph &&
+ test_path_is_file $graphdir/commit-graph-chain &&
+ ls $graphdir/graph-*.graph >graph-files &&
+ test_line_count = 1 graph-files &&
+ verify_chain_files_exist $graphdir &&
+ graph_read_expect 2
+'
+
+test_expect_success ULIMIT_FILE_DESCRIPTORS 'handles file descriptor exhaustion' '
+ git init ulimit &&
+ (
+ cd ulimit &&
+ for i in $(test_seq 64)
+ do
+ test_commit $i &&
+ test_might_fail run_with_limited_open_files git commit-graph write \
+ --split=no-merge --reachable || return 1
+ done
+ )
+'
+
test_done
diff --git a/t/t5504-fetch-receive-strict.sh b/t/t5504-fetch-receive-strict.sh
index 645b4c78d3..a32efe2b6c 100755
--- a/t/t5504-fetch-receive-strict.sh
+++ b/t/t5504-fetch-receive-strict.sh
@@ -65,6 +65,7 @@ test_expect_success 'fetch with transfer.fsckobjects' '
cat >exp <<EOF
To dst
! refs/heads/master:refs/heads/test [remote rejected] (missing necessary objects)
+Done
EOF
test_expect_success 'push without strict' '
diff --git a/t/t5516-fetch-push.sh b/t/t5516-fetch-push.sh
index 9ff041a093..9c6218f568 100755
--- a/t/t5516-fetch-push.sh
+++ b/t/t5516-fetch-push.sh
@@ -1066,6 +1066,7 @@ test_expect_success 'push --porcelain rejected' '
echo >.git/foo "To testrepo" &&
echo >>.git/foo "! refs/heads/master:refs/heads/master [remote rejected] (branch is currently checked out)" &&
+ echo >>.git/foo "Done" &&
test_must_fail git push >.git/bar --porcelain testrepo refs/heads/master:refs/heads/master &&
test_cmp .git/foo .git/bar
diff --git a/t/t5520-pull.sh b/t/t5520-pull.sh
index 2f86fca042..37535d63a9 100755
--- a/t/t5520-pull.sh
+++ b/t/t5520-pull.sh
@@ -10,11 +10,13 @@ modify () {
}
test_pull_autostash () {
+ expect_parent_num="$1" &&
+ shift &&
git reset --hard before-rebase &&
echo dirty >new_file &&
git add new_file &&
git pull "$@" . copy &&
- test_cmp_rev HEAD^ copy &&
+ test_cmp_rev HEAD^"$expect_parent_num" copy &&
echo dirty >expect &&
test_cmp expect new_file &&
echo "modified again" >expect &&
@@ -26,7 +28,7 @@ test_pull_autostash_fail () {
echo dirty >new_file &&
git add new_file &&
test_must_fail git pull "$@" . copy 2>err &&
- test_i18ngrep "uncommitted changes." err
+ test_i18ngrep "\(uncommitted changes.\)\|\(overwritten by merge:\)" err
}
test_expect_success setup '
@@ -369,22 +371,22 @@ test_expect_success '--rebase fails with multiple branches' '
test_expect_success 'pull --rebase succeeds with dirty working directory and rebase.autostash set' '
test_config rebase.autostash true &&
- test_pull_autostash --rebase
+ test_pull_autostash 1 --rebase
'
test_expect_success 'pull --rebase --autostash & rebase.autostash=true' '
test_config rebase.autostash true &&
- test_pull_autostash --rebase --autostash
+ test_pull_autostash 1 --rebase --autostash
'
test_expect_success 'pull --rebase --autostash & rebase.autostash=false' '
test_config rebase.autostash false &&
- test_pull_autostash --rebase --autostash
+ test_pull_autostash 1 --rebase --autostash
'
test_expect_success 'pull --rebase --autostash & rebase.autostash unset' '
test_unconfig rebase.autostash &&
- test_pull_autostash --rebase --autostash
+ test_pull_autostash 1 --rebase --autostash
'
test_expect_success 'pull --rebase --no-autostash & rebase.autostash=true' '
@@ -402,13 +404,40 @@ test_expect_success 'pull --rebase --no-autostash & rebase.autostash unset' '
test_pull_autostash_fail --rebase --no-autostash
'
-for i in --autostash --no-autostash
-do
- test_expect_success "pull $i (without --rebase) is illegal" '
- test_must_fail git pull $i . copy 2>err &&
- test_i18ngrep "only valid with --rebase" err
- '
-done
+test_expect_success 'pull succeeds with dirty working directory and merge.autostash set' '
+ test_config merge.autostash true &&
+ test_pull_autostash 2
+'
+
+test_expect_success 'pull --autostash & merge.autostash=true' '
+ test_config merge.autostash true &&
+ test_pull_autostash 2 --autostash
+'
+
+test_expect_success 'pull --autostash & merge.autostash=false' '
+ test_config merge.autostash false &&
+ test_pull_autostash 2 --autostash
+'
+
+test_expect_success 'pull --autostash & merge.autostash unset' '
+ test_unconfig merge.autostash &&
+ test_pull_autostash 2 --autostash
+'
+
+test_expect_success 'pull --no-autostash & merge.autostash=true' '
+ test_config merge.autostash true &&
+ test_pull_autostash_fail --no-autostash
+'
+
+test_expect_success 'pull --no-autostash & merge.autostash=false' '
+ test_config merge.autostash false &&
+ test_pull_autostash_fail --no-autostash
+'
+
+test_expect_success 'pull --no-autostash & merge.autostash unset' '
+ test_unconfig merge.autostash &&
+ test_pull_autostash_fail --no-autostash
+'
test_expect_success 'pull.rebase' '
git reset --hard before-rebase &&
@@ -422,7 +451,7 @@ test_expect_success 'pull.rebase' '
test_expect_success 'pull --autostash & pull.rebase=true' '
test_config pull.rebase true &&
- test_pull_autostash --autostash
+ test_pull_autostash 1 --autostash
'
test_expect_success 'pull --no-autostash & pull.rebase=true' '
diff --git a/t/t5537-fetch-shallow.sh b/t/t5537-fetch-shallow.sh
index b57209c84f..d427a2d7f7 100755
--- a/t/t5537-fetch-shallow.sh
+++ b/t/t5537-fetch-shallow.sh
@@ -16,7 +16,7 @@ test_expect_success 'setup' '
commit 3 &&
commit 4 &&
git config --global transfer.fsckObjects true &&
- test_oid_cache <<-EOF
+ test_oid_cache <<-\EOF
perl sha1:s/0034shallow %s/0036unshallow %s/
perl sha256:s/004cshallow %s/004eunshallow %s/
EOF
@@ -25,10 +25,7 @@ test_expect_success 'setup' '
test_expect_success 'setup shallow clone' '
git clone --no-local --depth=2 .git shallow &&
git --git-dir=shallow/.git log --format=%s >actual &&
- cat <<EOF >expect &&
-4
-3
-EOF
+ test_write_lines 4 3 >expect &&
test_cmp expect actual
'
@@ -38,10 +35,7 @@ test_expect_success 'clone from shallow clone' '
cd shallow2 &&
git fsck &&
git log --format=%s >actual &&
- cat <<EOF >expect &&
-4
-3
-EOF
+ test_write_lines 4 3 >expect &&
test_cmp expect actual
)
'
@@ -56,11 +50,7 @@ test_expect_success 'fetch from shallow clone' '
git fetch &&
git fsck &&
git log --format=%s origin/master >actual &&
- cat <<EOF >expect &&
-5
-4
-3
-EOF
+ test_write_lines 5 4 3 >expect &&
test_cmp expect actual
)
'
@@ -75,10 +65,7 @@ test_expect_success 'fetch --depth from shallow clone' '
git fetch --depth=2 &&
git fsck &&
git log --format=%s origin/master >actual &&
- cat <<EOF >expect &&
-6
-5
-EOF
+ test_write_lines 6 5 >expect &&
test_cmp expect actual
)
'
@@ -89,12 +76,7 @@ test_expect_success 'fetch --unshallow from shallow clone' '
git fetch --unshallow &&
git fsck &&
git log --format=%s origin/master >actual &&
- cat <<EOF >expect &&
-6
-5
-4
-3
-EOF
+ test_write_lines 6 5 4 3 >expect &&
test_cmp expect actual
)
'
@@ -111,15 +93,10 @@ test_expect_success 'fetch something upstream has but hidden by clients shallow
git fetch ../.git +refs/heads/master:refs/remotes/top/master &&
git fsck &&
git log --format=%s top/master >actual &&
- cat <<EOF >expect &&
-add-1-back
-4
-3
-EOF
+ test_write_lines add-1-back 4 3 >expect &&
test_cmp expect actual
) &&
git --git-dir=shallow2/.git cat-file blob $(echo 1|git hash-object --stdin) >/dev/null
-
'
test_expect_success 'fetch that requires changes in .git/shallow is filtered' '
@@ -133,14 +110,10 @@ test_expect_success 'fetch that requires changes in .git/shallow is filtered' '
cd notshallow &&
git fetch ../shallow/.git refs/heads/*:refs/remotes/shallow/* &&
git for-each-ref --format="%(refname)" >actual.refs &&
- cat <<EOF >expect.refs &&
-refs/remotes/shallow/no-shallow
-EOF
+ echo refs/remotes/shallow/no-shallow >expect.refs &&
test_cmp expect.refs actual.refs &&
git log --format=%s shallow/no-shallow >actual &&
- cat <<EOF >expect &&
-no-shallow
-EOF
+ echo no-shallow >expect &&
test_cmp expect actual
)
'
@@ -158,21 +131,44 @@ test_expect_success 'fetch --update-shallow' '
git fetch --update-shallow ../shallow/.git refs/heads/*:refs/remotes/shallow/* &&
git fsck &&
git for-each-ref --sort=refname --format="%(refname)" >actual.refs &&
- cat <<EOF >expect.refs &&
-refs/remotes/shallow/master
-refs/remotes/shallow/no-shallow
-refs/tags/heavy-tag
-refs/tags/light-tag
-EOF
+ cat <<-\EOF >expect.refs &&
+ refs/remotes/shallow/master
+ refs/remotes/shallow/no-shallow
+ refs/tags/heavy-tag
+ refs/tags/light-tag
+ EOF
+ test_cmp expect.refs actual.refs &&
+ git log --format=%s shallow/master >actual &&
+ test_write_lines 7 6 5 4 3 >expect &&
+ test_cmp expect actual
+ )
+'
+
+test_expect_success 'fetch --update-shallow (with fetch.writeCommitGraph)' '
+ (
+ cd shallow &&
+ git checkout master &&
+ commit 8 &&
+ git tag -m foo heavy-tag-for-graph HEAD^ &&
+ git tag light-tag-for-graph HEAD^:tracked
+ ) &&
+ test_config -C notshallow fetch.writeCommitGraph true &&
+ (
+ cd notshallow &&
+ git fetch --update-shallow ../shallow/.git refs/heads/*:refs/remotes/shallow/* &&
+ git fsck &&
+ git for-each-ref --sort=refname --format="%(refname)" >actual.refs &&
+ cat <<-EOF >expect.refs &&
+ refs/remotes/shallow/master
+ refs/remotes/shallow/no-shallow
+ refs/tags/heavy-tag
+ refs/tags/heavy-tag-for-graph
+ refs/tags/light-tag
+ refs/tags/light-tag-for-graph
+ EOF
test_cmp expect.refs actual.refs &&
git log --format=%s shallow/master >actual &&
- cat <<EOF >expect &&
-7
-6
-5
-4
-3
-EOF
+ test_write_lines 8 7 6 5 4 3 >expect &&
test_cmp expect actual
)
'
@@ -183,10 +179,7 @@ test_expect_success POSIXPERM,SANITY 'shallow fetch from a read-only repo' '
find read-only.git -print | xargs chmod -w &&
git clone --no-local --depth=2 read-only.git from-read-only &&
git --git-dir=from-read-only/.git log --format=%s >actual &&
- cat >expect <<EOF &&
-add-1-back
-4
-EOF
+ test_write_lines add-1-back 4 >expect &&
test_cmp expect actual
'
diff --git a/t/t5541-http-push-smart.sh b/t/t5541-http-push-smart.sh
index 23be8ce92d..afc680d5e3 100755
--- a/t/t5541-http-push-smart.sh
+++ b/t/t5541-http-push-smart.sh
@@ -177,6 +177,9 @@ test_expect_success 'push (chunked)' '
test $HEAD = $(git rev-parse --verify HEAD))
'
+## References of remote: atomic1(1) master(2) collateral(2) other(2)
+## References of local : atomic2(2) master(1) collateral(3) other(2) collateral1(3) atomic(1)
+## Atomic push : master(1) collateral(3) atomic(1)
test_expect_success 'push --atomic also prevents branch creation, reports collateral' '
# Setup upstream repo - empty for now
d=$HTTPD_DOCUMENT_ROOT_PATH/atomic-branches.git &&
@@ -189,7 +192,8 @@ test_expect_success 'push --atomic also prevents branch creation, reports collat
test_commit atomic2 &&
git branch collateral &&
git branch other &&
- git push "$up" master collateral other &&
+ git push "$up" atomic1 master collateral other &&
+ git tag -d atomic1 &&
# collateral is a valid push, but should be failed by atomic push
git checkout collateral &&
@@ -224,7 +228,11 @@ test_expect_success 'push --atomic also prevents branch creation, reports collat
# the collateral failure refs should be indicated to the user
grep "^ ! .*rejected.* atomic -> atomic .*atomic push failed" output &&
- grep "^ ! .*rejected.* collateral -> collateral .*atomic push failed" output
+ grep "^ ! .*rejected.* collateral -> collateral .*atomic push failed" output &&
+
+ # never report what we do not push
+ ! grep "^ ! .*rejected.* atomic1 " output &&
+ ! grep "^ ! .*rejected.* other " output
'
test_expect_success 'push --atomic fails on server-side errors' '
diff --git a/t/t5543-atomic-push.sh b/t/t5543-atomic-push.sh
index 7079bcf9a0..620c30d58f 100755
--- a/t/t5543-atomic-push.sh
+++ b/t/t5543-atomic-push.sh
@@ -27,6 +27,12 @@ test_refs () {
test_cmp expect actual
}
+fmt_status_report () {
+ sed -n \
+ -e "/^To / { s/ */ /g; p; }" \
+ -e "/^ ! / { s/ */ /g; p; }"
+}
+
test_expect_success 'atomic push works for a single branch' '
mk_repo_pair &&
(
@@ -191,4 +197,87 @@ test_expect_success 'atomic push is not advertised if configured' '
test_refs master HEAD@{1}
'
+# References in upstream : master(1) one(1) foo(1)
+# References in workbench: master(2) foo(1) two(2) bar(2)
+# Atomic push : master(2) two(2) bar(2)
+test_expect_success 'atomic push reports (reject by update hook)' '
+ mk_repo_pair &&
+ (
+ cd workbench &&
+ test_commit one &&
+ git branch foo &&
+ git push up master one foo &&
+ git tag -d one
+ ) &&
+ (
+ mkdir -p upstream/.git/hooks &&
+ cat >upstream/.git/hooks/update <<-EOF &&
+ #!/bin/sh
+
+ if test "\$1" = "refs/heads/bar"
+ then
+ echo >&2 "Pusing to branch bar is prohibited"
+ exit 1
+ fi
+ EOF
+ chmod a+x upstream/.git/hooks/update
+ ) &&
+ (
+ cd workbench &&
+ test_commit two &&
+ git branch bar
+ ) &&
+ test_must_fail git -C workbench \
+ push --atomic up master two bar >out 2>&1 &&
+ fmt_status_report <out >actual &&
+ cat >expect <<-EOF &&
+ To ../upstream
+ ! [remote rejected] master -> master (atomic push failure)
+ ! [remote rejected] two -> two (atomic push failure)
+ ! [remote rejected] bar -> bar (hook declined)
+ EOF
+ test_cmp expect actual
+'
+
+# References in upstream : master(1) one(1) foo(1)
+# References in workbench: master(2) foo(1) two(2) bar(2)
+test_expect_success 'atomic push reports (mirror, but reject by update hook)' '
+ (
+ cd workbench &&
+ git remote remove up &&
+ git remote add up ../upstream
+ ) &&
+ test_must_fail git -C workbench \
+ push --atomic --mirror up >out 2>&1 &&
+ fmt_status_report <out >actual &&
+ cat >expect <<-EOF &&
+ To ../upstream
+ ! [remote rejected] master -> master (atomic push failure)
+ ! [remote rejected] one (atomic push failure)
+ ! [remote rejected] bar -> bar (hook declined)
+ ! [remote rejected] two -> two (atomic push failure)
+ EOF
+ test_cmp expect actual
+'
+
+# References in upstream : master(2) one(1) foo(1)
+# References in workbench: master(1) foo(1) two(2) bar(2)
+test_expect_success 'atomic push reports (reject by non-ff)' '
+ rm upstream/.git/hooks/update &&
+ (
+ cd workbench &&
+ git push up master &&
+ git reset --hard HEAD^
+ ) &&
+ test_must_fail git -C workbench \
+ push --atomic up master foo bar >out 2>&1 &&
+ fmt_status_report <out >actual &&
+ cat >expect <<-EOF &&
+ To ../upstream
+ ! [rejected] master -> master (non-fast-forward)
+ ! [rejected] bar -> bar (atomic push failed)
+ EOF
+ test_cmp expect actual
+'
+
test_done
diff --git a/t/t5548-push-porcelain.sh b/t/t5548-push-porcelain.sh
new file mode 100755
index 0000000000..1b19b3ef55
--- /dev/null
+++ b/t/t5548-push-porcelain.sh
@@ -0,0 +1,279 @@
+#!/bin/sh
+#
+# Copyright (c) 2020 Jiang Xin
+#
+test_description='Test git push porcelain output'
+
+. ./test-lib.sh
+
+# Create commits in <repo> and assign each commit's oid to shell variables
+# given in the arguments (A, B, and C). E.g.:
+#
+# create_commits_in <repo> A B C
+#
+# NOTE: Never calling this function from a subshell since variable
+# assignments will disappear when subshell exits.
+create_commits_in () {
+ repo="$1" &&
+ if ! parent=$(git -C "$repo" rev-parse HEAD^{} --)
+ then
+ parent=
+ fi &&
+ T=$(git -C "$repo" write-tree) &&
+ shift &&
+ while test $# -gt 0
+ do
+ name=$1 &&
+ test_tick &&
+ if test -z "$parent"
+ then
+ oid=$(echo $name | git -C "$repo" commit-tree $T)
+ else
+ oid=$(echo $name | git -C "$repo" commit-tree -p $parent $T)
+ fi &&
+ eval $name=$oid &&
+ parent=$oid &&
+ shift ||
+ return 1
+ done &&
+ git -C "$repo" update-ref refs/heads/master $oid
+}
+
+# Format the output of git-push, git-show-ref and other commands to make a
+# user-friendly and stable text. We can easily prepare the expect text
+# without having to worry about future changes of the commit ID and spaces
+# of the output.
+make_user_friendly_and_stable_output () {
+ sed \
+ -e "s/ *\$//" \
+ -e "s/ */ /g" \
+ -e "s/ / /g" \
+ -e "s/$A/<COMMIT-A>/g" \
+ -e "s/$B/<COMMIT-B>/g" \
+ -e "s/$ZERO_OID/<ZERO-OID>/g" \
+ -e "s/$(echo $A | cut -c1-7)[0-9a-f]*/<OID-A>/g" \
+ -e "s/$(echo $B | cut -c1-7)[0-9a-f]*/<OID-B>/g" \
+ -e "s#To $URL_PREFIX/upstream.git#To <URL/of/upstream.git>#"
+}
+
+setup_upstream_and_workbench () {
+ # Upstream after setup : master(B) foo(A) bar(A) baz(A)
+ # Workbench after setup : master(A)
+ test_expect_success "setup upstream repository and workbench" '
+ rm -rf upstream.git workbench &&
+ git init --bare upstream.git &&
+ git init workbench &&
+ create_commits_in workbench A B &&
+ (
+ cd workbench &&
+ # Try to make a stable fixed width for abbreviated commit ID,
+ # this fixed-width oid will be replaced with "<OID>".
+ git config core.abbrev 7 &&
+ git remote add origin ../upstream.git &&
+ git update-ref refs/heads/master $A &&
+ git push origin \
+ $B:refs/heads/master \
+ $A:refs/heads/foo \
+ $A:refs/heads/bar \
+ $A:refs/heads/baz
+ ) &&
+ git -C "workbench" config advice.pushUpdateRejected false &&
+ upstream=upstream.git
+ '
+}
+
+run_git_push_porcelain_output_test() {
+ case $1 in
+ http)
+ PROTOCOL="HTTP protocol"
+ URL_PREFIX="http://.*"
+ ;;
+ file)
+ PROTOCOL="builtin protocol"
+ URL_PREFIX="\.\."
+ ;;
+ esac
+
+ # Refs of upstream : master(B) foo(A) bar(A) baz(A)
+ # Refs of workbench: master(A) baz(A) next(A)
+ # git-push : master(A) NULL (B) baz(A) next(A)
+ test_expect_success "porcelain output of successful git-push ($PROTOCOL)" '
+ (
+ cd workbench &&
+ git update-ref refs/heads/master $A &&
+ git update-ref refs/heads/baz $A &&
+ git update-ref refs/heads/next $A &&
+ git push --porcelain --force origin \
+ master \
+ :refs/heads/foo \
+ $B:bar \
+ baz \
+ next
+ ) >out &&
+ make_user_friendly_and_stable_output <out >actual &&
+ cat >expect <<-EOF &&
+ To <URL/of/upstream.git>
+ = refs/heads/baz:refs/heads/baz [up to date]
+ <COMMIT-B>:refs/heads/bar <OID-A>..<OID-B>
+ - :refs/heads/foo [deleted]
+ + refs/heads/master:refs/heads/master <OID-B>...<OID-A> (forced update)
+ * refs/heads/next:refs/heads/next [new branch]
+ Done
+ EOF
+ test_cmp expect actual &&
+
+ git -C "$upstream" show-ref >out &&
+ make_user_friendly_and_stable_output <out >actual &&
+ cat >expect <<-EOF &&
+ <COMMIT-B> refs/heads/bar
+ <COMMIT-A> refs/heads/baz
+ <COMMIT-A> refs/heads/master
+ <COMMIT-A> refs/heads/next
+ EOF
+ test_cmp expect actual
+ '
+
+ # Refs of upstream : master(A) bar(B) baz(A) next(A)
+ # Refs of workbench: master(B) bar(A) baz(A) next(A)
+ # git-push : master(B) bar(A) NULL next(A)
+ test_expect_success "atomic push failed ($PROTOCOL)" '
+ (
+ cd workbench &&
+ git update-ref refs/heads/master $B &&
+ git update-ref refs/heads/bar $A &&
+ test_must_fail git push --atomic --porcelain origin \
+ master \
+ bar \
+ :baz \
+ next
+ ) >out &&
+ make_user_friendly_and_stable_output <out >actual &&
+ cat >expect <<-EOF &&
+ To <URL/of/upstream.git>
+ = refs/heads/next:refs/heads/next [up to date]
+ ! refs/heads/bar:refs/heads/bar [rejected] (non-fast-forward)
+ ! (delete):refs/heads/baz [rejected] (atomic push failed)
+ ! refs/heads/master:refs/heads/master [rejected] (atomic push failed)
+ Done
+ EOF
+ test_cmp expect actual &&
+
+ git -C "$upstream" show-ref >out &&
+ make_user_friendly_and_stable_output <out >actual &&
+ cat >expect <<-EOF &&
+ <COMMIT-B> refs/heads/bar
+ <COMMIT-A> refs/heads/baz
+ <COMMIT-A> refs/heads/master
+ <COMMIT-A> refs/heads/next
+ EOF
+ test_cmp expect actual
+ '
+ test_expect_success "prepare pre-receive hook ($PROTOCOL)" '
+ write_script "$upstream/hooks/pre-receive" <<-EOF
+ exit 1
+ EOF
+ '
+
+ # Refs of upstream : master(A) bar(B) baz(A) next(A)
+ # Refs of workbench: master(B) bar(A) baz(A) next(A)
+ # git-push : master(B) bar(A) NULL next(A)
+ test_expect_success "pre-receive hook declined ($PROTOCOL)" '
+ (
+ cd workbench &&
+ git update-ref refs/heads/master $B &&
+ git update-ref refs/heads/bar $A &&
+ test_must_fail git push --porcelain --force origin \
+ master \
+ bar \
+ :baz \
+ next
+ ) >out &&
+ make_user_friendly_and_stable_output <out >actual &&
+ cat >expect <<-EOF &&
+ To <URL/of/upstream.git>
+ = refs/heads/next:refs/heads/next [up to date]
+ ! refs/heads/bar:refs/heads/bar [remote rejected] (pre-receive hook declined)
+ ! :refs/heads/baz [remote rejected] (pre-receive hook declined)
+ ! refs/heads/master:refs/heads/master [remote rejected] (pre-receive hook declined)
+ Done
+ EOF
+ test_cmp expect actual &&
+
+ git -C "$upstream" show-ref >out &&
+ make_user_friendly_and_stable_output <out >actual &&
+ cat >expect <<-EOF &&
+ <COMMIT-B> refs/heads/bar
+ <COMMIT-A> refs/heads/baz
+ <COMMIT-A> refs/heads/master
+ <COMMIT-A> refs/heads/next
+ EOF
+ test_cmp expect actual
+ '
+
+ test_expect_success "remove pre-receive hook ($PROTOCOL)" '
+ rm "$upstream/hooks/pre-receive"
+ '
+
+ # Refs of upstream : master(A) bar(B) baz(A) next(A)
+ # Refs of workbench: master(B) bar(A) baz(A) next(A)
+ # git-push : master(B) bar(A) NULL next(A)
+ test_expect_success "non-fastforward push ($PROTOCOL)" '
+ (
+ cd workbench &&
+ test_must_fail git push --porcelain origin \
+ master \
+ bar \
+ :baz \
+ next
+ ) >out &&
+ make_user_friendly_and_stable_output <out >actual &&
+ cat >expect <<-EOF &&
+ To <URL/of/upstream.git>
+ = refs/heads/next:refs/heads/next [up to date]
+ - :refs/heads/baz [deleted]
+ refs/heads/master:refs/heads/master <OID-A>..<OID-B>
+ ! refs/heads/bar:refs/heads/bar [rejected] (non-fast-forward)
+ Done
+ EOF
+ test_cmp expect actual &&
+
+ git -C "$upstream" show-ref >out &&
+ make_user_friendly_and_stable_output <out >actual &&
+ cat >expect <<-EOF &&
+ <COMMIT-B> refs/heads/bar
+ <COMMIT-B> refs/heads/master
+ <COMMIT-A> refs/heads/next
+ EOF
+ test_cmp expect actual
+ '
+}
+
+# Initialize the upstream repository and local workbench.
+setup_upstream_and_workbench
+
+# Run git-push porcelain test on builtin protocol
+run_git_push_porcelain_output_test file
+
+ROOT_PATH="$PWD"
+. "$TEST_DIRECTORY"/lib-gpg.sh
+. "$TEST_DIRECTORY"/lib-httpd.sh
+. "$TEST_DIRECTORY"/lib-terminal.sh
+start_httpd
+
+# Re-initialize the upstream repository and local workbench.
+setup_upstream_and_workbench
+
+test_expect_success "setup for http" '
+ git -C upstream.git config http.receivepack true &&
+ upstream="$HTTPD_DOCUMENT_ROOT_PATH/upstream.git" &&
+ mv upstream.git "$upstream" &&
+
+ git -C workbench remote set-url origin $HTTPD_URL/smart/upstream.git
+'
+
+setup_askpass_helper
+
+# Run git-push porcelain test on HTTP protocol
+run_git_push_porcelain_output_test http
+
+test_done
diff --git a/t/t5616-partial-clone.sh b/t/t5616-partial-clone.sh
index 8f0d81a27e..88002b24af 100755
--- a/t/t5616-partial-clone.sh
+++ b/t/t5616-partial-clone.sh
@@ -49,7 +49,7 @@ test_expect_success 'do partial clone 1' '
test_expect_success 'verify that .promisor file contains refs fetched' '
ls pc1/.git/objects/pack/pack-*.promisor >promisorlist &&
test_line_count = 1 promisorlist &&
- git -C srv.bare rev-list HEAD >headhash &&
+ git -C srv.bare rev-parse --verify HEAD >headhash &&
grep "$(cat headhash) HEAD" $(cat promisorlist) &&
grep "$(cat headhash) refs/heads/master" $(cat promisorlist)
'
diff --git a/t/t5703-upload-pack-ref-in-want.sh b/t/t5703-upload-pack-ref-in-want.sh
index 7fba3063bf..a34460f7d8 100755
--- a/t/t5703-upload-pack-ref-in-want.sh
+++ b/t/t5703-upload-pack-ref-in-want.sh
@@ -13,10 +13,7 @@ get_actual_refs () {
}
get_actual_commits () {
- sed -n -e '/packfile/,/0000/{
- /packfile/d
- p
- }' <out | test-tool pkt-line unpack-sideband >o.pack &&
+ test-tool pkt-line unpack-sideband <out >o.pack &&
git index-pack o.pack &&
git verify-pack -v o.idx >objs &&
grep commit objs | cut -d" " -f1 | sort >actual_commits
diff --git a/t/t6030-bisect-porcelain.sh b/t/t6030-bisect-porcelain.sh
index 821a0c88cf..1313142564 100755
--- a/t/t6030-bisect-porcelain.sh
+++ b/t/t6030-bisect-porcelain.sh
@@ -148,7 +148,7 @@ test_expect_success 'bisect start: no ".git/BISECT_START" created if junk rev' '
test_must_fail git bisect start $HASH4 foo -- &&
git branch > branch.output &&
grep "* other" branch.output > /dev/null &&
- test_must_fail test -e .git/BISECT_START
+ test_path_is_missing .git/BISECT_START
'
test_expect_success 'bisect start: existing ".git/BISECT_START" not modified if junk rev' '
@@ -166,7 +166,7 @@ test_expect_success 'bisect start: no ".git/BISECT_START" if mistaken rev' '
test_must_fail git bisect start $HASH1 $HASH4 -- &&
git branch > branch.output &&
grep "* other" branch.output > /dev/null &&
- test_must_fail test -e .git/BISECT_START
+ test_path_is_missing .git/BISECT_START
'
test_expect_success 'bisect start: no ".git/BISECT_START" if checkout error' '
@@ -175,7 +175,7 @@ test_expect_success 'bisect start: no ".git/BISECT_START" if checkout error' '
git branch &&
git branch > branch.output &&
grep "* other" branch.output > /dev/null &&
- test_must_fail test -e .git/BISECT_START &&
+ test_path_is_missing .git/BISECT_START &&
test -z "$(git for-each-ref "refs/bisect/*")" &&
git checkout HEAD hello
'
@@ -485,7 +485,7 @@ test_expect_success 'optimized merge base checks' '
git bisect bad &&
git bisect good "$A_HASH" > my_bisect_log4.txt &&
test_i18ngrep "merge base must be tested" my_bisect_log4.txt &&
- test_must_fail test -f ".git/BISECT_ANCESTORS_OK"
+ test_path_is_missing ".git/BISECT_ANCESTORS_OK"
'
# This creates another side branch called "parallel" with some files
diff --git a/t/t7063-status-untracked-cache.sh b/t/t7063-status-untracked-cache.sh
index 190ae149cf..428cff9cf3 100755
--- a/t/t7063-status-untracked-cache.sh
+++ b/t/t7063-status-untracked-cache.sh
@@ -18,7 +18,7 @@ GIT_FORCE_UNTRACKED_CACHE=true
export GIT_FORCE_UNTRACKED_CACHE
sync_mtime () {
- find . -type d -ls >/dev/null
+ find . -type d -exec ls -ld {} + >/dev/null
}
avoid_racy() {
@@ -30,6 +30,30 @@ status_is_clean() {
test_must_be_empty ../status.actual
}
+# Ignore_Untracked_Cache, abbreviated to 3 letters because then people can
+# compare commands side-by-side, e.g.
+# iuc status --porcelain >expect &&
+# git status --porcelain >actual &&
+# test_cmp expect actual
+iuc () {
+ git ls-files -s >../current-index-entries
+ git ls-files -t | sed -ne s/^S.//p >../current-sparse-entries
+
+ GIT_INDEX_FILE=.git/tmp_index
+ export GIT_INDEX_FILE
+ git update-index --index-info <../current-index-entries
+ git update-index --skip-worktree $(cat ../current-sparse-entries)
+
+ git -c core.untrackedCache=false "$@"
+ ret=$?
+
+ rm ../current-index-entries
+ rm $GIT_INDEX_FILE
+ unset GIT_INDEX_FILE
+
+ return $ret
+}
+
test_lazy_prereq UNTRACKED_CACHE '
{ git update-index --test-untracked-cache; ret=$?; } &&
test $ret -ne 1
@@ -95,6 +119,8 @@ test_expect_success 'status first time (empty cache)' '
: >../trace &&
GIT_TRACE_UNTRACKED_STATS="$TRASH_DIRECTORY/trace" \
git status --porcelain >../actual &&
+ iuc status --porcelain >../status.iuc &&
+ test_cmp ../status.expect ../status.iuc &&
test_cmp ../status.expect ../actual &&
cat >../trace.expect <<EOF &&
node creation: 3
@@ -115,6 +141,8 @@ test_expect_success 'status second time (fully populated cache)' '
: >../trace &&
GIT_TRACE_UNTRACKED_STATS="$TRASH_DIRECTORY/trace" \
git status --porcelain >../actual &&
+ iuc status --porcelain >../status.iuc &&
+ test_cmp ../status.expect ../status.iuc &&
test_cmp ../status.expect ../actual &&
cat >../trace.expect <<EOF &&
node creation: 0
@@ -136,6 +164,7 @@ test_expect_success 'modify in root directory, one dir invalidation' '
: >../trace &&
GIT_TRACE_UNTRACKED_STATS="$TRASH_DIRECTORY/trace" \
git status --porcelain >../actual &&
+ iuc status --porcelain >../status.iuc &&
cat >../status.expect <<EOF &&
A done/one
A one
@@ -145,6 +174,7 @@ A two
?? four
?? three
EOF
+ test_cmp ../status.expect ../status.iuc &&
test_cmp ../status.expect ../actual &&
cat >../trace.expect <<EOF &&
node creation: 0
@@ -183,6 +213,7 @@ test_expect_success 'new .gitignore invalidates recursively' '
: >../trace &&
GIT_TRACE_UNTRACKED_STATS="$TRASH_DIRECTORY/trace" \
git status --porcelain >../actual &&
+ iuc status --porcelain >../status.iuc &&
cat >../status.expect <<EOF &&
A done/one
A one
@@ -192,6 +223,7 @@ A two
?? dtwo/
?? three
EOF
+ test_cmp ../status.expect ../status.iuc &&
test_cmp ../status.expect ../actual &&
cat >../trace.expect <<EOF &&
node creation: 0
@@ -230,6 +262,7 @@ test_expect_success 'new info/exclude invalidates everything' '
: >../trace &&
GIT_TRACE_UNTRACKED_STATS="$TRASH_DIRECTORY/trace" \
git status --porcelain >../actual &&
+ iuc status --porcelain >../status.iuc &&
cat >../status.expect <<EOF &&
A done/one
A one
@@ -237,6 +270,7 @@ A two
?? .gitignore
?? dtwo/
EOF
+ test_cmp ../status.expect ../status.iuc &&
test_cmp ../status.expect ../actual &&
cat >../trace.expect <<EOF &&
node creation: 0
@@ -286,6 +320,7 @@ test_expect_success 'status after the move' '
: >../trace &&
GIT_TRACE_UNTRACKED_STATS="$TRASH_DIRECTORY/trace" \
git status --porcelain >../actual &&
+ iuc status --porcelain >../status.iuc &&
cat >../status.expect <<EOF &&
A done/one
A one
@@ -293,6 +328,7 @@ A one
?? dtwo/
?? two
EOF
+ test_cmp ../status.expect ../status.iuc &&
test_cmp ../status.expect ../actual &&
cat >../trace.expect <<EOF &&
node creation: 0
@@ -343,6 +379,7 @@ test_expect_success 'status after the move' '
: >../trace &&
GIT_TRACE_UNTRACKED_STATS="$TRASH_DIRECTORY/trace" \
git status --porcelain >../actual &&
+ iuc status --porcelain >../status.iuc &&
cat >../status.expect <<EOF &&
A done/one
A one
@@ -350,6 +387,7 @@ A two
?? .gitignore
?? dtwo/
EOF
+ test_cmp ../status.expect ../status.iuc &&
test_cmp ../status.expect ../actual &&
cat >../trace.expect <<EOF &&
node creation: 0
@@ -390,10 +428,12 @@ test_expect_success 'status after commit' '
: >../trace &&
GIT_TRACE_UNTRACKED_STATS="$TRASH_DIRECTORY/trace" \
git status --porcelain >../actual &&
+ iuc status --porcelain >../status.iuc &&
cat >../status.expect <<EOF &&
?? .gitignore
?? dtwo/
EOF
+ test_cmp ../status.expect ../status.iuc &&
test_cmp ../status.expect ../actual &&
cat >../trace.expect <<EOF &&
node creation: 0
@@ -447,12 +487,14 @@ test_expect_success 'test sparse status with untracked cache' '
avoid_racy &&
GIT_TRACE_UNTRACKED_STATS="$TRASH_DIRECTORY/trace" \
git status --porcelain >../status.actual &&
+ iuc status --porcelain >../status.iuc &&
cat >../status.expect <<EOF &&
M done/two
?? .gitignore
?? done/five
?? dtwo/
EOF
+ test_cmp ../status.expect ../status.iuc &&
test_cmp ../status.expect ../status.actual &&
cat >../trace.expect <<EOF &&
node creation: 0
@@ -487,12 +529,14 @@ test_expect_success 'test sparse status again with untracked cache' '
: >../trace &&
GIT_TRACE_UNTRACKED_STATS="$TRASH_DIRECTORY/trace" \
git status --porcelain >../status.actual &&
+ iuc status --porcelain >../status.iuc &&
cat >../status.expect <<EOF &&
M done/two
?? .gitignore
?? done/five
?? dtwo/
EOF
+ test_cmp ../status.expect ../status.iuc &&
test_cmp ../status.expect ../status.actual &&
cat >../trace.expect <<EOF &&
node creation: 0
@@ -514,6 +558,7 @@ test_expect_success 'test sparse status with untracked cache and subdir' '
: >../trace &&
GIT_TRACE_UNTRACKED_STATS="$TRASH_DIRECTORY/trace" \
git status --porcelain >../status.actual &&
+ iuc status --porcelain >../status.iuc &&
cat >../status.expect <<EOF &&
M done/two
?? .gitignore
@@ -521,6 +566,7 @@ test_expect_success 'test sparse status with untracked cache and subdir' '
?? done/sub/
?? dtwo/
EOF
+ test_cmp ../status.expect ../status.iuc &&
test_cmp ../status.expect ../status.actual &&
cat >../trace.expect <<EOF &&
node creation: 2
@@ -560,6 +606,8 @@ test_expect_success 'test sparse status again with untracked cache and subdir' '
: >../trace &&
GIT_TRACE_UNTRACKED_STATS="$TRASH_DIRECTORY/trace" \
git status --porcelain >../status.actual &&
+ iuc status --porcelain >../status.iuc &&
+ test_cmp ../status.expect ../status.iuc &&
test_cmp ../status.expect ../status.actual &&
cat >../trace.expect <<EOF &&
node creation: 0
@@ -573,6 +621,7 @@ EOF
test_expect_success 'move entry in subdir from untracked to cached' '
git add dtwo/two &&
git status --porcelain >../status.actual &&
+ iuc status --porcelain >../status.iuc &&
cat >../status.expect <<EOF &&
M done/two
A dtwo/two
@@ -580,12 +629,14 @@ A dtwo/two
?? done/five
?? done/sub/
EOF
+ test_cmp ../status.expect ../status.iuc &&
test_cmp ../status.expect ../status.actual
'
test_expect_success 'move entry in subdir from cached to untracked' '
git rm --cached dtwo/two &&
git status --porcelain >../status.actual &&
+ iuc status --porcelain >../status.iuc &&
cat >../status.expect <<EOF &&
M done/two
?? .gitignore
@@ -593,6 +644,7 @@ test_expect_success 'move entry in subdir from cached to untracked' '
?? done/sub/
?? dtwo/
EOF
+ test_cmp ../status.expect ../status.iuc &&
test_cmp ../status.expect ../status.actual
'
diff --git a/t/t7408-submodule-reference.sh b/t/t7408-submodule-reference.sh
index 34ac28c056..a3892f494b 100755
--- a/t/t7408-submodule-reference.sh
+++ b/t/t7408-submodule-reference.sh
@@ -122,8 +122,8 @@ test_expect_success 'missing submodule alternate fails clone and submodule updat
# update of the submodule succeeds
test_must_fail git submodule update --init &&
# and we have no alternates:
- test_must_fail test_alternate_is_used .git/modules/sub/objects/info/alternates sub &&
- test_must_fail test_path_is_file sub/file1
+ test_path_is_missing .git/modules/sub/objects/info/alternates &&
+ test_path_is_missing sub/file1
)
'
@@ -137,7 +137,7 @@ test_expect_success 'ignoring missing submodule alternates passes clone and subm
# update of the submodule succeeds
git submodule update --init &&
# and we have no alternates:
- test_must_fail test_alternate_is_used .git/modules/sub/objects/info/alternates sub &&
+ test_path_is_missing .git/modules/sub/objects/info/alternates &&
test_path_is_file sub/file1
)
'
@@ -182,7 +182,7 @@ check_that_two_of_three_alternates_are_used() {
# immediate submodule has alternate:
test_alternate_is_used .git/modules/subwithsub/objects/info/alternates subwithsub &&
# but nested submodule has no alternate:
- test_must_fail test_alternate_is_used .git/modules/subwithsub/modules/sub/objects/info/alternates subwithsub/sub
+ test_path_is_missing .git/modules/subwithsub/modules/sub/objects/info/alternates
}
diff --git a/t/t7508-status.sh b/t/t7508-status.sh
index 482ce3510e..8e969f3e36 100755
--- a/t/t7508-status.sh
+++ b/t/t7508-status.sh
@@ -1471,7 +1471,7 @@ test_expect_success '"status.branch=true" same as "-b"' '
test_expect_success '"status.branch=true" different from "--no-branch"' '
git status -s --no-branch >expected_nobranch &&
git -c status.branch=true status -s >actual &&
- test_must_fail test_cmp expected_nobranch actual
+ ! test_cmp expected_nobranch actual
'
test_expect_success '"status.branch=true" weaker than "--no-branch"' '
diff --git a/t/t7600-merge.sh b/t/t7600-merge.sh
index 132608879a..5883a6adc3 100755
--- a/t/t7600-merge.sh
+++ b/t/t7600-merge.sh
@@ -29,15 +29,19 @@ Testing basic merge operations/option parsing.
. ./test-lib.sh
. "$TEST_DIRECTORY"/lib-gpg.sh
-printf '%s\n' 1 2 3 4 5 6 7 8 9 >file
-printf '%s\n' '1 X' 2 3 4 5 6 7 8 9 >file.1
-printf '%s\n' 1 2 3 4 '5 X' 6 7 8 9 >file.5
-printf '%s\n' 1 2 3 4 5 6 7 8 '9 X' >file.9
-printf '%s\n' 1 2 3 4 5 6 7 8 '9 Y' >file.9y
-printf '%s\n' '1 X' 2 3 4 5 6 7 8 9 >result.1
-printf '%s\n' '1 X' 2 3 4 '5 X' 6 7 8 9 >result.1-5
-printf '%s\n' '1 X' 2 3 4 '5 X' 6 7 8 '9 X' >result.1-5-9
-printf '%s\n' 1 2 3 4 5 6 7 8 '9 Z' >result.9z
+test_write_lines 1 2 3 4 5 6 7 8 9 >file
+cp file file.orig
+test_write_lines '1 X' 2 3 4 5 6 7 8 9 >file.1
+test_write_lines 1 2 '3 X' 4 5 6 7 8 9 >file.3
+test_write_lines 1 2 3 4 '5 X' 6 7 8 9 >file.5
+test_write_lines 1 2 3 4 5 6 7 8 '9 X' >file.9
+test_write_lines 1 2 3 4 5 6 7 8 '9 Y' >file.9y
+test_write_lines '1 X' 2 3 4 5 6 7 8 9 >result.1
+test_write_lines '1 X' 2 3 4 '5 X' 6 7 8 9 >result.1-5
+test_write_lines '1 X' 2 3 4 5 6 7 8 '9 X' >result.1-9
+test_write_lines '1 X' 2 3 4 '5 X' 6 7 8 '9 X' >result.1-5-9
+test_write_lines '1 X' 2 '3 X' 4 '5 X' 6 7 8 '9 X' >result.1-3-5-9
+test_write_lines 1 2 3 4 5 6 7 8 '9 Z' >result.9z
create_merge_msgs () {
echo "Merge tag 'c2'" >msg.1-5 &&
@@ -81,7 +85,7 @@ verify_head () {
}
verify_parents () {
- printf '%s\n' "$@" >parents.expected &&
+ test_write_lines "$@" >parents.expected &&
>parents.actual &&
i=1 &&
while test $i -le $#
@@ -95,7 +99,7 @@ verify_parents () {
}
verify_mergeheads () {
- printf '%s\n' "$@" >mergehead.expected &&
+ test_write_lines "$@" >mergehead.expected &&
while read sha1 rest
do
git rev-parse $sha1
@@ -675,6 +679,134 @@ test_expect_success 'refresh the index before merging' '
git merge c3
'
+test_expect_success 'merge with --autostash' '
+ git reset --hard c1 &&
+ git merge-file file file.orig file.9 &&
+ git merge --autostash c2 2>err &&
+ test_i18ngrep "Applied autostash." err &&
+ git show HEAD:file >merge-result &&
+ test_cmp result.1-5 merge-result &&
+ test_cmp result.1-5-9 file
+'
+
+test_expect_success 'merge with merge.autoStash' '
+ test_config merge.autoStash true &&
+ git reset --hard c1 &&
+ git merge-file file file.orig file.9 &&
+ git merge c2 2>err &&
+ test_i18ngrep "Applied autostash." err &&
+ git show HEAD:file >merge-result &&
+ test_cmp result.1-5 merge-result &&
+ test_cmp result.1-5-9 file
+'
+
+test_expect_success 'fast-forward merge with --autostash' '
+ git reset --hard c0 &&
+ git merge-file file file.orig file.5 &&
+ git merge --autostash c1 2>err &&
+ test_i18ngrep "Applied autostash." err &&
+ test_cmp result.1-5 file
+'
+
+test_expect_success 'octopus merge with --autostash' '
+ git reset --hard c1 &&
+ git merge-file file file.orig file.3 &&
+ git merge --autostash c2 c3 2>err &&
+ test_i18ngrep "Applied autostash." err &&
+ git show HEAD:file >merge-result &&
+ test_cmp result.1-5-9 merge-result &&
+ test_cmp result.1-3-5-9 file
+'
+
+test_expect_success 'conflicted merge with --autostash, --abort restores stash' '
+ git reset --hard c3 &&
+ cp file.1 file &&
+ test_must_fail git merge --autostash c7 &&
+ git merge --abort 2>err &&
+ test_i18ngrep "Applied autostash." err &&
+ test_cmp file.1 file
+'
+
+test_expect_success 'completed merge (git commit) with --no-commit and --autostash' '
+ git reset --hard c1 &&
+ git merge-file file file.orig file.9 &&
+ git diff >expect &&
+ git merge --no-commit --autostash c2 &&
+ git stash show -p MERGE_AUTOSTASH >actual &&
+ test_cmp expect actual &&
+ git commit 2>err &&
+ test_i18ngrep "Applied autostash." err &&
+ git show HEAD:file >merge-result &&
+ test_cmp result.1-5 merge-result &&
+ test_cmp result.1-5-9 file
+'
+
+test_expect_success 'completed merge (git merge --continue) with --no-commit and --autostash' '
+ git reset --hard c1 &&
+ git merge-file file file.orig file.9 &&
+ git diff >expect &&
+ git merge --no-commit --autostash c2 &&
+ git stash show -p MERGE_AUTOSTASH >actual &&
+ test_cmp expect actual &&
+ git merge --continue 2>err &&
+ test_i18ngrep "Applied autostash." err &&
+ git show HEAD:file >merge-result &&
+ test_cmp result.1-5 merge-result &&
+ test_cmp result.1-5-9 file
+'
+
+test_expect_success 'aborted merge (merge --abort) with --no-commit and --autostash' '
+ git reset --hard c1 &&
+ git merge-file file file.orig file.9 &&
+ git diff >expect &&
+ git merge --no-commit --autostash c2 &&
+ git stash show -p MERGE_AUTOSTASH >actual &&
+ test_cmp expect actual &&
+ git merge --abort 2>err &&
+ test_i18ngrep "Applied autostash." err &&
+ git diff >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'aborted merge (reset --hard) with --no-commit and --autostash' '
+ git reset --hard c1 &&
+ git merge-file file file.orig file.9 &&
+ git diff >expect &&
+ git merge --no-commit --autostash c2 &&
+ git stash show -p MERGE_AUTOSTASH >actual &&
+ test_cmp expect actual &&
+ git reset --hard 2>err &&
+ test_i18ngrep "Autostash exists; creating a new stash entry." err &&
+ git diff --exit-code
+'
+
+test_expect_success 'quit merge with --no-commit and --autostash' '
+ git reset --hard c1 &&
+ git merge-file file file.orig file.9 &&
+ git diff >expect &&
+ git merge --no-commit --autostash c2 &&
+ git stash show -p MERGE_AUTOSTASH >actual &&
+ test_cmp expect actual &&
+ git diff HEAD >expect &&
+ git merge --quit 2>err &&
+ test_i18ngrep "Autostash exists; creating a new stash entry." err &&
+ git diff HEAD >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'merge with conflicted --autostash changes' '
+ git reset --hard c1 &&
+ git merge-file file file.orig file.9y &&
+ git diff >expect &&
+ test_when_finished "test_might_fail git stash drop" &&
+ git merge --autostash c3 2>err &&
+ test_i18ngrep "Applying autostash resulted in conflicts." err &&
+ git show HEAD:file >merge-result &&
+ test_cmp result.1-9 merge-result &&
+ git stash show -p >actual &&
+ test_cmp expect actual
+'
+
cat >expected.branch <<\EOF
Merge branch 'c5-branch' (early part)
EOF
diff --git a/t/t7810-grep.sh b/t/t7810-grep.sh
index 7d7b396c23..991d5bd9c0 100755
--- a/t/t7810-grep.sh
+++ b/t/t7810-grep.sh
@@ -72,6 +72,11 @@ test_expect_success setup '
# Still a no-op.
function dummy() {}
EOF
+ if test_have_prereq FUNNYNAMES
+ then
+ echo unusual >"\"unusual\" pathname" &&
+ echo unusual >"t/nested \"unusual\" pathname"
+ fi &&
git add . &&
test_tick &&
git commit -m initial
@@ -481,6 +486,48 @@ do
git grep --count -h -e b $H -- ab >actual &&
test_cmp expected actual
'
+
+ test_expect_success FUNNYNAMES "grep $L should quote unusual pathnames" '
+ cat >expected <<-EOF &&
+ ${HC}"\"unusual\" pathname":unusual
+ ${HC}"t/nested \"unusual\" pathname":unusual
+ EOF
+ git grep unusual $H >actual &&
+ test_cmp expected actual
+ '
+
+ test_expect_success FUNNYNAMES "grep $L in subdir should quote unusual relative pathnames" '
+ cat >expected <<-EOF &&
+ ${HC}"nested \"unusual\" pathname":unusual
+ EOF
+ (
+ cd t &&
+ git grep unusual $H
+ ) >actual &&
+ test_cmp expected actual
+ '
+
+ test_expect_success FUNNYNAMES "grep -z $L with unusual pathnames" '
+ cat >expected <<-EOF &&
+ ${HC}"unusual" pathname:unusual
+ ${HC}t/nested "unusual" pathname:unusual
+ EOF
+ git grep -z unusual $H >actual &&
+ tr "\0" ":" <actual >actual-replace-null &&
+ test_cmp expected actual-replace-null
+ '
+
+ test_expect_success FUNNYNAMES "grep -z $L in subdir with unusual relative pathnames" '
+ cat >expected <<-EOF &&
+ ${HC}nested "unusual" pathname:unusual
+ EOF
+ (
+ cd t &&
+ git grep -z unusual $H
+ ) >actual &&
+ tr "\0" ":" <actual >actual-replace-null &&
+ test_cmp expected actual-replace-null
+ '
done
cat >expected <<EOF
diff --git a/t/t9141-git-svn-multiple-branches.sh b/t/t9141-git-svn-multiple-branches.sh
index 8e7f7d68b7..bf168a3645 100755
--- a/t/t9141-git-svn-multiple-branches.sh
+++ b/t/t9141-git-svn-multiple-branches.sh
@@ -90,10 +90,10 @@ test_expect_success 'Multiple branch or tag paths require -d' '
) &&
( cd svn_project &&
svn_cmd up &&
- test_must_fail test -d b_one/Nope &&
- test_must_fail test -d b_two/Nope &&
- test_must_fail test -d tags_A/Tagless &&
- test_must_fail test -d tags_B/Tagless
+ test_path_is_missing b_one/Nope &&
+ test_path_is_missing b_two/Nope &&
+ test_path_is_missing tags_A/Tagless &&
+ test_path_is_missing tags_B/Tagless
)
'
diff --git a/t/t9160-git-svn-preserve-empty-dirs.sh b/t/t9160-git-svn-preserve-empty-dirs.sh
index 0ede3cfedb..36c6b1a12f 100755
--- a/t/t9160-git-svn-preserve-empty-dirs.sh
+++ b/t/t9160-git-svn-preserve-empty-dirs.sh
@@ -86,8 +86,8 @@ test_expect_success 'remove non-last entry from directory' '
cd "$GIT_REPO" &&
git checkout HEAD~2
) &&
- test_must_fail test -f "$GIT_REPO"/2/.gitignore &&
- test_must_fail test -f "$GIT_REPO"/3/.gitignore
+ test_path_is_missing "$GIT_REPO"/2/.gitignore &&
+ test_path_is_missing "$GIT_REPO"/3/.gitignore
'
# After re-cloning the repository with --placeholder-file specified, there
diff --git a/t/t9164-git-svn-dcommit-concurrent.sh b/t/t9164-git-svn-dcommit-concurrent.sh
index 90346ff4e9..8466269bf5 100755
--- a/t/t9164-git-svn-dcommit-concurrent.sh
+++ b/t/t9164-git-svn-dcommit-concurrent.sh
@@ -92,7 +92,7 @@ test_expect_success 'check if post-commit hook creates a concurrent commit' '
echo 1 >> file &&
svn_cmd commit -m "changing file" &&
svn_cmd up &&
- test_must_fail test_cmp auto_updated_file au_file_saved
+ ! test_cmp auto_updated_file au_file_saved
)
'
@@ -103,7 +103,7 @@ test_expect_success 'check if pre-commit hook fails' '
echo 2 >> file &&
svn_cmd commit -m "changing file once again" &&
echo 3 >> file &&
- test_must_fail svn_cmd commit -m "this commit should fail" &&
+ ! svn_cmd commit -m "this commit should fail" &&
svn_cmd revert file
)
'
diff --git a/t/t9819-git-p4-case-folding.sh b/t/t9819-git-p4-case-folding.sh
index 600ce1e0b0..b4d93f0c17 100755
--- a/t/t9819-git-p4-case-folding.sh
+++ b/t/t9819-git-p4-case-folding.sh
@@ -30,7 +30,7 @@ test_expect_success 'Check p4 is in case-folding mode' '
cd "$cli" &&
>lc/FILE.TXT &&
p4 add lc/FILE.TXT &&
- test_must_fail p4 submit -d "Cannot add file differing only in case" lc/FILE.TXT
+ ! p4 submit -d "Cannot add file differing only in case" lc/FILE.TXT
)
'
diff --git a/t/t9902-completion.sh b/t/t9902-completion.sh
index 5505e5aa24..3c44af6940 100755
--- a/t/t9902-completion.sh
+++ b/t/t9902-completion.sh
@@ -1638,6 +1638,11 @@ test_expect_success 'complete files' '
echo modify > modified &&
test_completion "git add " "modified" &&
+ mkdir -p some/deep &&
+ touch some/deep/path &&
+ test_completion "git add some/" "some/deep" &&
+ git clean -f some &&
+
touch untracked &&
: TODO .gitignore should not be here &&
diff --git a/t/test-lib-functions.sh b/t/test-lib-functions.sh
index d9ef356a16..3103be8a32 100644
--- a/t/test-lib-functions.sh
+++ b/t/test-lib-functions.sh
@@ -905,7 +905,7 @@ test_expect_code () {
# - not all diff versions understand "-u"
test_cmp() {
- $GIT_TEST_CMP "$@"
+ eval "$GIT_TEST_CMP" '"$@"'
}
# Check that the given config key has the expected value.
diff --git a/t/test-lib.sh b/t/test-lib.sh
index 0bb1105ec3..1b221951a8 100644
--- a/t/test-lib.sh
+++ b/t/test-lib.sh
@@ -675,6 +675,18 @@ die () {
fi
}
+file_lineno () {
+ test -z "$GIT_TEST_FRAMEWORK_SELFTEST" && test -n "$BASH" || return 0
+ local i
+ for i in ${!BASH_SOURCE[*]}
+ do
+ case $i,"${BASH_SOURCE[$i]##*/}" in
+ 0,t[0-9]*.sh) echo "t/${BASH_SOURCE[$i]}:$LINENO: ${1+$1: }"; return;;
+ *,t[0-9]*.sh) echo "t/${BASH_SOURCE[$i]}:${BASH_LINENO[$(($i-1))]}: ${1+$1: }"; return;;
+ esac
+ done
+}
+
GIT_EXIT_OK=
trap 'die' EXIT
# Disable '-x' tracing, because with some shells, notably dash, it
@@ -720,7 +732,7 @@ test_failure_ () {
write_junit_xml_testcase "$1" " $junit_insert"
fi
test_failure=$(($test_failure + 1))
- say_color error "not ok $test_count - $1"
+ say_color error "$(file_lineno error)not ok $test_count - $1"
shift
printf '%s\n' "$*" | sed -e 's/^/# /'
test "$immediate" = "" || { finalize_junit_xml; GIT_EXIT_OK=t; exit 1; }
@@ -1654,6 +1666,15 @@ test_lazy_prereq ULIMIT_STACK_SIZE '
run_with_limited_stack true
'
+run_with_limited_open_files () {
+ (ulimit -n 32 && "$@")
+}
+
+test_lazy_prereq ULIMIT_FILE_DESCRIPTORS '
+ test_have_prereq !MINGW,!CYGWIN &&
+ run_with_limited_open_files true
+'
+
build_option () {
git version --build-options |
sed -ne "s/^$1: //p"
diff --git a/transport-helper.c b/transport-helper.c
index 20a7185ec4..a46afcb69d 100644
--- a/transport-helper.c
+++ b/transport-helper.c
@@ -894,6 +894,7 @@ static int push_refs_with_push(struct transport *transport,
case REF_STATUS_REJECT_STALE:
case REF_STATUS_REJECT_ALREADY_EXISTS:
if (atomic) {
+ reject_atomic_push(remote_refs, mirror);
string_list_clear(&cas_options, 0);
return 0;
} else
@@ -1488,3 +1489,25 @@ int bidirectional_transfer_loop(int input, int output)
return tloop_spawnwait_tasks(&state);
}
+
+void reject_atomic_push(struct ref *remote_refs, int mirror_mode)
+{
+ struct ref *ref;
+
+ /* Mark other refs as failed */
+ for (ref = remote_refs; ref; ref = ref->next) {
+ if (!ref->peer_ref && !mirror_mode)
+ continue;
+
+ switch (ref->status) {
+ case REF_STATUS_NONE:
+ case REF_STATUS_OK:
+ case REF_STATUS_EXPECTING_REPORT:
+ ref->status = REF_STATUS_ATOMIC_PUSH_FAILED;
+ continue;
+ default:
+ break; /* do nothing */
+ }
+ }
+ return;
+}
diff --git a/transport.c b/transport.c
index 471c5bd339..15f5ba4e8f 100644
--- a/transport.c
+++ b/transport.c
@@ -715,7 +715,15 @@ static int git_transport_push(struct transport *transport, struct ref *remote_re
close(data->fd[1]);
close(data->fd[0]);
- ret |= finish_connect(data->conn);
+ /*
+ * Atomic push may abort the connection early and close the pipe,
+ * which may cause an error for `finish_connect()`. Ignore this error
+ * for atomic git-push.
+ */
+ if (ret || args.atomic)
+ finish_connect(data->conn);
+ else
+ ret = finish_connect(data->conn);
data->conn = NULL;
data->got_remote_heads = 0;
@@ -1240,20 +1248,6 @@ int transport_push(struct repository *r,
err = push_had_errors(remote_refs);
ret = push_ret | err;
- if ((flags & TRANSPORT_PUSH_ATOMIC) && err) {
- struct ref *it;
- for (it = remote_refs; it; it = it->next)
- switch (it->status) {
- case REF_STATUS_NONE:
- case REF_STATUS_UPTODATE:
- case REF_STATUS_OK:
- it->status = REF_STATUS_ATOMIC_PUSH_FAILED;
- break;
- default:
- break;
- }
- }
-
if (!quiet || err)
transport_print_push_status(transport->url, remote_refs,
verbose | porcelain, porcelain,
diff --git a/transport.h b/transport.h
index e0131daab9..4298c855be 100644
--- a/transport.h
+++ b/transport.h
@@ -265,4 +265,7 @@ int transport_refs_pushed(struct ref *ref);
void transport_print_push_status(const char *dest, struct ref *refs,
int verbose, int porcelain, unsigned int *reject_reasons);
+/* common method used by transport-helper.c and send-pack.c */
+void reject_atomic_push(struct ref *refs, int mirror_mode);
+
#endif
diff --git a/tree-diff.c b/tree-diff.c
index 33ded7f8b3..f3d303c6e5 100644
--- a/tree-diff.c
+++ b/tree-diff.c
@@ -434,6 +434,9 @@ static struct combine_diff_path *ll_diff_tree_paths(
if (diff_can_quit_early(opt))
break;
+ if (opt->max_changes && opt->num_changes > opt->max_changes)
+ break;
+
if (opt->pathspec.nr) {
skip_uninteresting(&t, base, opt);
for (i = 0; i < nparent; i++)
@@ -518,6 +521,7 @@ static struct combine_diff_path *ll_diff_tree_paths(
/* t↓ */
update_tree_entry(&t);
+ opt->num_changes++;
}
/* t > p[imin] */
@@ -535,6 +539,7 @@ static struct combine_diff_path *ll_diff_tree_paths(
skip_emit_tp:
/* ∀ pi=p[imin] pi↓ */
update_tp_entries(tp, nparent);
+ opt->num_changes++;
}
}
@@ -552,6 +557,7 @@ struct combine_diff_path *diff_tree_paths(
const struct object_id **parents_oid, int nparent,
struct strbuf *base, struct diff_options *opt)
{
+ opt->num_changes = 0;
p = ll_diff_tree_paths(p, oid, parents_oid, nparent, base, opt);
/*
diff --git a/unpack-trees.c b/unpack-trees.c
index f618a644ef..6bbf58d28e 100644
--- a/unpack-trees.c
+++ b/unpack-trees.c
@@ -24,7 +24,7 @@
* situation better. See how "git checkout" and "git merge" replaces
* them using setup_unpack_trees_porcelain(), for example.
*/
-static const char *unpack_plumbing_errors[NB_UNPACK_TREES_ERROR_TYPES] = {
+static const char *unpack_plumbing_errors[NB_UNPACK_TREES_WARNING_TYPES] = {
/* ERROR_WOULD_OVERWRITE */
"Entry '%s' would be overwritten by merge. Cannot merge.",
@@ -43,17 +43,20 @@ static const char *unpack_plumbing_errors[NB_UNPACK_TREES_ERROR_TYPES] = {
/* ERROR_BIND_OVERLAP */
"Entry '%s' overlaps with '%s'. Cannot bind.",
- /* ERROR_SPARSE_NOT_UPTODATE_FILE */
- "Entry '%s' not uptodate. Cannot update sparse checkout.",
+ /* ERROR_WOULD_LOSE_SUBMODULE */
+ "Submodule '%s' cannot checkout new HEAD.",
- /* ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN */
- "Working tree file '%s' would be overwritten by sparse checkout update.",
+ /* NB_UNPACK_TREES_ERROR_TYPES; just a meta value */
+ "",
- /* ERROR_WOULD_LOSE_ORPHANED_REMOVED */
- "Working tree file '%s' would be removed by sparse checkout update.",
+ /* WARNING_SPARSE_NOT_UPTODATE_FILE */
+ "Path '%s' not uptodate; will not remove from working tree.",
- /* ERROR_WOULD_LOSE_SUBMODULE */
- "Submodule '%s' cannot checkout new HEAD.",
+ /* WARNING_SPARSE_UNMERGED_FILE */
+ "Path '%s' unmerged; will not remove from working tree.",
+
+ /* WARNING_SPARSE_ORPHANED_NOT_OVERWRITTEN */
+ "Path '%s' already present; will not overwrite with sparse update.",
};
#define ERRORMSG(o,type) \
@@ -168,15 +171,16 @@ void setup_unpack_trees_porcelain(struct unpack_trees_options *opts,
*/
msgs[ERROR_BIND_OVERLAP] = _("Entry '%s' overlaps with '%s'. Cannot bind.");
- msgs[ERROR_SPARSE_NOT_UPTODATE_FILE] =
- _("Cannot update sparse checkout: the following entries are not up to date:\n%s");
- msgs[ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN] =
- _("The following working tree files would be overwritten by sparse checkout update:\n%s");
- msgs[ERROR_WOULD_LOSE_ORPHANED_REMOVED] =
- _("The following working tree files would be removed by sparse checkout update:\n%s");
msgs[ERROR_WOULD_LOSE_SUBMODULE] =
_("Cannot update submodule:\n%s");
+ msgs[WARNING_SPARSE_NOT_UPTODATE_FILE] =
+ _("The following paths are not up to date and were left despite sparse patterns:\n%s");
+ msgs[WARNING_SPARSE_UNMERGED_FILE] =
+ _("The following paths are unmerged and were left despite sparse patterns:\n%s");
+ msgs[WARNING_SPARSE_ORPHANED_NOT_OVERWRITTEN] =
+ _("The following paths were already present and thus not updated despite sparse patterns:\n%s");
+
opts->show_all_errors = 1;
/* rejected paths may not have a static buffer */
for (i = 0; i < ARRAY_SIZE(opts->unpack_rejects); i++)
@@ -226,7 +230,7 @@ static int add_rejected_path(struct unpack_trees_options *o,
/*
* Otherwise, insert in a list for future display by
- * display_error_msgs()
+ * display_(error|warning)_msgs()
*/
string_list_append(&o->unpack_rejects[e], path);
return -1;
@@ -237,13 +241,16 @@ static int add_rejected_path(struct unpack_trees_options *o,
*/
static void display_error_msgs(struct unpack_trees_options *o)
{
- int e, i;
- int something_displayed = 0;
+ int e;
+ unsigned error_displayed = 0;
for (e = 0; e < NB_UNPACK_TREES_ERROR_TYPES; e++) {
struct string_list *rejects = &o->unpack_rejects[e];
+
if (rejects->nr > 0) {
+ int i;
struct strbuf path = STRBUF_INIT;
- something_displayed = 1;
+
+ error_displayed = 1;
for (i = 0; i < rejects->nr; i++)
strbuf_addf(&path, "\t%s\n", rejects->items[i].string);
error(ERRORMSG(o, e), super_prefixed(path.buf));
@@ -251,10 +258,36 @@ static void display_error_msgs(struct unpack_trees_options *o)
}
string_list_clear(rejects, 0);
}
- if (something_displayed)
+ if (error_displayed)
fprintf(stderr, _("Aborting\n"));
}
+/*
+ * display all the warning messages stored in a nice way
+ */
+static void display_warning_msgs(struct unpack_trees_options *o)
+{
+ int e;
+ unsigned warning_displayed = 0;
+ for (e = NB_UNPACK_TREES_ERROR_TYPES + 1;
+ e < NB_UNPACK_TREES_WARNING_TYPES; e++) {
+ struct string_list *rejects = &o->unpack_rejects[e];
+
+ if (rejects->nr > 0) {
+ int i;
+ struct strbuf path = STRBUF_INIT;
+
+ warning_displayed = 1;
+ for (i = 0; i < rejects->nr; i++)
+ strbuf_addf(&path, "\t%s\n", rejects->items[i].string);
+ warning(ERRORMSG(o, e), super_prefixed(path.buf));
+ strbuf_release(&path);
+ }
+ string_list_clear(rejects, 0);
+ }
+ if (warning_displayed)
+ fprintf(stderr, _("After fixing the above paths, you may want to run `git sparse-checkout reapply`.\n"));
+}
static int check_submodule_move_head(const struct cache_entry *ce,
const char *old_id,
const char *new_id,
@@ -357,12 +390,12 @@ static void report_collided_checkout(struct index_state *index)
string_list_clear(&list, 0);
}
-static int check_updates(struct unpack_trees_options *o)
+static int check_updates(struct unpack_trees_options *o,
+ struct index_state *index)
{
unsigned cnt = 0;
int errs = 0;
struct progress *progress;
- struct index_state *index = &o->result;
struct checkout state = CHECKOUT_INIT;
int i;
@@ -423,9 +456,8 @@ static int check_updates(struct unpack_trees_options *o)
continue;
oid_array_append(&to_fetch, &ce->oid);
}
- if (to_fetch.nr)
- promisor_remote_get_direct(the_repository,
- to_fetch.oid, to_fetch.nr);
+ promisor_remote_get_direct(the_repository,
+ to_fetch.oid, to_fetch.nr);
oid_array_clear(&to_fetch);
}
for (i = 0; i < index->cache_nr; i++) {
@@ -505,19 +537,39 @@ static int apply_sparse_checkout(struct index_state *istate,
* also stat info may have lost after merged_entry() so calling
* verify_uptodate() again may fail
*/
- if (!(ce->ce_flags & CE_UPDATE) && verify_uptodate_sparse(ce, o))
+ if (!(ce->ce_flags & CE_UPDATE) &&
+ verify_uptodate_sparse(ce, o)) {
+ ce->ce_flags &= ~CE_SKIP_WORKTREE;
return -1;
+ }
ce->ce_flags |= CE_WT_REMOVE;
ce->ce_flags &= ~CE_UPDATE;
}
if (was_skip_worktree && !ce_skip_worktree(ce)) {
- if (verify_absent_sparse(ce, ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o))
+ if (verify_absent_sparse(ce, WARNING_SPARSE_ORPHANED_NOT_OVERWRITTEN, o))
return -1;
ce->ce_flags |= CE_UPDATE;
}
return 0;
}
+static int warn_conflicted_path(struct index_state *istate,
+ int i,
+ struct unpack_trees_options *o)
+{
+ char *conflicting_path = istate->cache[i]->name;
+ int count = 0;
+
+ add_rejected_path(o, WARNING_SPARSE_UNMERGED_FILE, conflicting_path);
+
+ /* Find out how many higher stage entries at same path */
+ while (++count < istate->cache_nr &&
+ !strcmp(conflicting_path,
+ istate->cache[i+count]->name))
+ /* do nothing */;
+ return count;
+}
+
static inline int call_unpack_fn(const struct cache_entry * const *src,
struct unpack_trees_options *o)
{
@@ -1494,6 +1546,20 @@ static void mark_new_skip_worktree(struct pattern_list *pl,
clear_ce_flags(istate, select_flag, skip_wt_flag, pl, show_progress);
}
+static void populate_from_existing_patterns(struct unpack_trees_options *o,
+ struct pattern_list *pl)
+{
+ char *sparse = git_pathdup("info/sparse-checkout");
+
+ pl->use_cone_patterns = core_sparse_checkout_cone;
+ if (add_patterns_from_file_to_list(sparse, "", 0, pl, NULL) < 0)
+ o->skip_sparse_checkout = 1;
+ else
+ o->pl = pl;
+ free(sparse);
+}
+
+
static int verify_absent(const struct cache_entry *,
enum unpack_trees_error_types,
struct unpack_trees_options *);
@@ -1508,22 +1574,18 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options
int i, ret;
static struct cache_entry *dfc;
struct pattern_list pl;
+ int free_pattern_list = 0;
if (len > MAX_UNPACK_TREES)
die("unpack_trees takes at most %d trees", MAX_UNPACK_TREES);
trace_performance_enter();
- memset(&pl, 0, sizeof(pl));
if (!core_apply_sparse_checkout || !o->update)
o->skip_sparse_checkout = 1;
if (!o->skip_sparse_checkout && !o->pl) {
- char *sparse = git_pathdup("info/sparse-checkout");
- pl.use_cone_patterns = core_sparse_checkout_cone;
- if (add_patterns_from_file_to_list(sparse, "", 0, &pl, NULL) < 0)
- o->skip_sparse_checkout = 1;
- else
- o->pl = &pl;
- free(sparse);
+ memset(&pl, 0, sizeof(pl));
+ free_pattern_list = 1;
+ populate_from_existing_patterns(o, &pl);
}
memset(&o->result, 0, sizeof(o->result));
@@ -1619,7 +1681,7 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options
/*
* Sparse checkout loop #2: set NEW_SKIP_WORKTREE on entries not in loop #1
- * If the will have NEW_SKIP_WORKTREE, also set CE_SKIP_WORKTREE
+ * If they will have NEW_SKIP_WORKTREE, also set CE_SKIP_WORKTREE
* so apply_sparse_checkout() won't attempt to remove it from worktree
*/
mark_new_skip_worktree(o->pl, &o->result,
@@ -1639,23 +1701,15 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options
* correct CE_NEW_SKIP_WORKTREE
*/
if (ce->ce_flags & CE_ADDED &&
- verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) {
- if (!o->show_all_errors)
- goto return_failed;
- ret = -1;
- }
+ verify_absent(ce, WARNING_SPARSE_ORPHANED_NOT_OVERWRITTEN, o))
+ ret = 1;
+
+ if (apply_sparse_checkout(&o->result, ce, o))
+ ret = 1;
- if (apply_sparse_checkout(&o->result, ce, o)) {
- if (!o->show_all_errors)
- goto return_failed;
- ret = -1;
- }
if (!ce_skip_worktree(ce))
empty_worktree = 0;
-
}
- if (ret < 0)
- goto return_failed;
/*
* Sparse checkout is meant to narrow down checkout area
* but it does not make sense to narrow down to empty working
@@ -1666,9 +1720,18 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options
ret = unpack_failed(o, "Sparse checkout leaves no entry on working directory");
goto done;
}
+ if (ret == 1) {
+ /*
+ * Inability to sparsify or de-sparsify individual
+ * paths is not an error, but just a warning.
+ */
+ if (o->show_all_errors)
+ display_warning_msgs(o);
+ ret = 0;
+ }
}
- ret = check_updates(o) ? (-2) : 0;
+ ret = check_updates(o, &o->result) ? (-2) : 0;
if (o->dst_index) {
move_index_extensions(&o->result, o->src_index);
if (!ret) {
@@ -1691,9 +1754,9 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options
o->src_index = NULL;
done:
- trace_performance_leave("unpack_trees");
- if (!o->keep_pattern_list)
+ if (free_pattern_list)
clear_pattern_list(&pl);
+ trace_performance_leave("unpack_trees");
return ret;
return_failed:
@@ -1706,6 +1769,91 @@ return_failed:
goto done;
}
+/*
+ * Update SKIP_WORKTREE bits according to sparsity patterns, and update
+ * working directory to match.
+ *
+ * CE_NEW_SKIP_WORKTREE is used internally.
+ */
+enum update_sparsity_result update_sparsity(struct unpack_trees_options *o)
+{
+ enum update_sparsity_result ret = UPDATE_SPARSITY_SUCCESS;
+ struct pattern_list pl;
+ int i, empty_worktree;
+ unsigned old_show_all_errors;
+ int free_pattern_list = 0;
+
+ old_show_all_errors = o->show_all_errors;
+ o->show_all_errors = 1;
+
+ /* Sanity checks */
+ if (!o->update || o->index_only || o->skip_sparse_checkout)
+ BUG("update_sparsity() is for reflecting sparsity patterns in working directory");
+ if (o->src_index != o->dst_index || o->fn)
+ BUG("update_sparsity() called wrong");
+
+ trace_performance_enter();
+
+ /* If we weren't given patterns, use the recorded ones */
+ if (!o->pl) {
+ memset(&pl, 0, sizeof(pl));
+ free_pattern_list = 1;
+ populate_from_existing_patterns(o, &pl);
+ if (o->skip_sparse_checkout)
+ goto skip_sparse_checkout;
+ }
+
+ /* Set NEW_SKIP_WORKTREE on existing entries. */
+ mark_all_ce_unused(o->src_index);
+ mark_new_skip_worktree(o->pl, o->src_index, 0,
+ CE_NEW_SKIP_WORKTREE, o->verbose_update);
+
+ /* Then loop over entries and update/remove as needed */
+ ret = UPDATE_SPARSITY_SUCCESS;
+ empty_worktree = 1;
+ for (i = 0; i < o->src_index->cache_nr; i++) {
+ struct cache_entry *ce = o->src_index->cache[i];
+
+
+ if (ce_stage(ce)) {
+ /* -1 because for loop will increment by 1 */
+ i += warn_conflicted_path(o->src_index, i, o) - 1;
+ ret = UPDATE_SPARSITY_WARNINGS;
+ continue;
+ }
+
+ if (apply_sparse_checkout(o->src_index, ce, o))
+ ret = UPDATE_SPARSITY_WARNINGS;
+
+ if (!ce_skip_worktree(ce))
+ empty_worktree = 0;
+ }
+
+ /*
+ * Sparse checkout is meant to narrow down checkout area
+ * but it does not make sense to narrow down to empty working
+ * tree. This is usually a mistake in sparse checkout rules.
+ * Do not allow users to do that.
+ */
+ if (o->src_index->cache_nr && empty_worktree) {
+ unpack_failed(o, "Sparse checkout leaves no entry on working directory");
+ ret = UPDATE_SPARSITY_INDEX_UPDATE_FAILURES;
+ goto done;
+ }
+
+skip_sparse_checkout:
+ if (check_updates(o, o->src_index))
+ ret = UPDATE_SPARSITY_WORKTREE_UPDATE_FAILURES;
+
+done:
+ display_warning_msgs(o);
+ o->show_all_errors = old_show_all_errors;
+ if (free_pattern_list)
+ clear_pattern_list(&pl);
+ trace_performance_leave("update_sparsity");
+ return ret;
+}
+
/* Here come the merge functions */
static int reject_merge(const struct cache_entry *ce,
@@ -1790,7 +1938,7 @@ int verify_uptodate(const struct cache_entry *ce,
static int verify_uptodate_sparse(const struct cache_entry *ce,
struct unpack_trees_options *o)
{
- return verify_uptodate_1(ce, o, ERROR_SPARSE_NOT_UPTODATE_FILE);
+ return verify_uptodate_1(ce, o, WARNING_SPARSE_NOT_UPTODATE_FILE);
}
/*
@@ -2028,11 +2176,7 @@ static int verify_absent_sparse(const struct cache_entry *ce,
enum unpack_trees_error_types error_type,
struct unpack_trees_options *o)
{
- enum unpack_trees_error_types orphaned_error = error_type;
- if (orphaned_error == ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN)
- orphaned_error = ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN;
-
- return verify_absent_1(ce, orphaned_error, o);
+ return verify_absent_1(ce, error_type, o);
}
static int merged_entry(const struct cache_entry *ce,
diff --git a/unpack-trees.h b/unpack-trees.h
index ad41b45a71..9c2f08277e 100644
--- a/unpack-trees.h
+++ b/unpack-trees.h
@@ -22,11 +22,15 @@ enum unpack_trees_error_types {
ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN,
ERROR_WOULD_LOSE_UNTRACKED_REMOVED,
ERROR_BIND_OVERLAP,
- ERROR_SPARSE_NOT_UPTODATE_FILE,
- ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN,
- ERROR_WOULD_LOSE_ORPHANED_REMOVED,
ERROR_WOULD_LOSE_SUBMODULE,
- NB_UNPACK_TREES_ERROR_TYPES
+
+ NB_UNPACK_TREES_ERROR_TYPES,
+
+ WARNING_SPARSE_NOT_UPTODATE_FILE,
+ WARNING_SPARSE_UNMERGED_FILE,
+ WARNING_SPARSE_ORPHANED_NOT_OVERWRITTEN,
+
+ NB_UNPACK_TREES_WARNING_TYPES,
};
/*
@@ -59,20 +63,19 @@ struct unpack_trees_options {
quiet,
exiting_early,
show_all_errors,
- dry_run,
- keep_pattern_list;
+ dry_run;
const char *prefix;
int cache_bottom;
struct dir_struct *dir;
struct pathspec *pathspec;
merge_fn_t fn;
- const char *msgs[NB_UNPACK_TREES_ERROR_TYPES];
+ const char *msgs[NB_UNPACK_TREES_WARNING_TYPES];
struct argv_array msgs_to_free;
/*
* Store error messages in an array, each case
* corresponding to a error message type
*/
- struct string_list unpack_rejects[NB_UNPACK_TREES_ERROR_TYPES];
+ struct string_list unpack_rejects[NB_UNPACK_TREES_WARNING_TYPES];
int head_idx;
int merge_size;
@@ -91,6 +94,15 @@ struct unpack_trees_options {
int unpack_trees(unsigned n, struct tree_desc *t,
struct unpack_trees_options *options);
+enum update_sparsity_result {
+ UPDATE_SPARSITY_SUCCESS = 0,
+ UPDATE_SPARSITY_WARNINGS = 1,
+ UPDATE_SPARSITY_INDEX_UPDATE_FAILURES = -1,
+ UPDATE_SPARSITY_WORKTREE_UPDATE_FAILURES = -2
+};
+
+enum update_sparsity_result update_sparsity(struct unpack_trees_options *options);
+
int verify_uptodate(const struct cache_entry *ce,
struct unpack_trees_options *o);
diff --git a/userdiff.c b/userdiff.c
index efbe05e5a5..30ab42df8e 100644
--- a/userdiff.c
+++ b/userdiff.c
@@ -222,7 +222,7 @@ static struct userdiff_driver driver_false = {
{ NULL, 0 }
};
-static struct userdiff_driver *userdiff_find_by_namelen(const char *k, int len)
+static struct userdiff_driver *userdiff_find_by_namelen(const char *k, size_t len)
{
int i;
for (i = 0; i < ndrivers; i++) {
@@ -266,7 +266,7 @@ int userdiff_config(const char *k, const char *v)
{
struct userdiff_driver *drv;
const char *name, *type;
- int namelen;
+ size_t namelen;
if (parse_config_key(k, "diff", &name, &namelen, &type) || !name)
return 0;
diff --git a/wt-status.c b/wt-status.c
index cc6f94504d..98dfa6f73f 100644
--- a/wt-status.c
+++ b/wt-status.c
@@ -722,16 +722,14 @@ static void wt_status_collect_untracked(struct wt_status *s)
for (i = 0; i < dir.nr; i++) {
struct dir_entry *ent = dir.entries[i];
- if (index_name_is_other(istate, ent->name, ent->len) &&
- dir_path_match(istate, ent, &s->pathspec, 0, NULL))
+ if (index_name_is_other(istate, ent->name, ent->len))
string_list_insert(&s->untracked, ent->name);
free(ent);
}
for (i = 0; i < dir.ignored_nr; i++) {
struct dir_entry *ent = dir.ignored[i];
- if (index_name_is_other(istate, ent->name, ent->len) &&
- dir_path_match(istate, ent, &s->pathspec, 0, NULL))
+ if (index_name_is_other(istate, ent->name, ent->len))
string_list_insert(&s->ignored, ent->name);
free(ent);
}