summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/workflows/l10n.yml105
-rw-r--r--.github/workflows/main.yml3
-rw-r--r--.gitignore1
-rw-r--r--Documentation/MyFirstContribution.txt42
-rw-r--r--Documentation/RelNotes/2.33.1.txt138
-rw-r--r--Documentation/RelNotes/2.34.0.txt181
-rw-r--r--Documentation/config/pack.txt4
-rw-r--r--Documentation/git-add.txt9
-rw-r--r--Documentation/git-am.txt2
-rw-r--r--Documentation/git-bugreport.txt4
-rw-r--r--Documentation/git-bundle.txt2
-rw-r--r--Documentation/git-checkout.txt5
-rw-r--r--Documentation/git-config.txt3
-rw-r--r--Documentation/git-cvsserver.txt27
-rw-r--r--Documentation/git-http-backend.txt26
-rw-r--r--Documentation/git-index-pack.txt6
-rw-r--r--Documentation/git-maintenance.txt57
-rw-r--r--Documentation/git-multi-pack-index.txt24
-rw-r--r--Documentation/git-read-tree.txt23
-rw-r--r--Documentation/git-rebase.txt3
-rw-r--r--Documentation/git-receive-pack.txt5
-rw-r--r--Documentation/git-reset.txt3
-rw-r--r--Documentation/git-rm.txt6
-rw-r--r--Documentation/git-sparse-checkout.txt10
-rw-r--r--Documentation/git-status.txt2
-rw-r--r--Documentation/git-upload-pack.txt20
-rw-r--r--Documentation/git-version.txt28
-rw-r--r--Documentation/git.txt38
-rw-r--r--Documentation/technical/api-parse-options.txt5
-rw-r--r--Documentation/technical/api-trace2.txt40
-rw-r--r--Documentation/technical/bitmap-format.txt71
-rw-r--r--Documentation/technical/http-protocol.txt3
-rw-r--r--Documentation/technical/multi-pack-index.txt14
-rw-r--r--Documentation/technical/protocol-v2.txt17
-rw-r--r--INSTALL15
-rw-r--r--Makefile119
-rw-r--r--add-interactive.c8
-rw-r--r--advice.c11
-rw-r--r--apply.c21
-rw-r--r--attr.c15
-rw-r--r--builtin.h1
-rw-r--r--builtin/add.c39
-rw-r--r--builtin/am.c8
-rw-r--r--builtin/bisect--helper.c160
-rw-r--r--builtin/branch.c2
-rw-r--r--builtin/bugreport.c46
-rw-r--r--builtin/bundle.c14
-rw-r--r--builtin/checkout.c10
-rw-r--r--builtin/clone.c160
-rw-r--r--builtin/commit-graph.c28
-rw-r--r--builtin/commit.c3
-rw-r--r--builtin/credential-cache.c30
-rw-r--r--builtin/diff-index.c6
-rw-r--r--builtin/difftool.c159
-rw-r--r--builtin/fetch.c76
-rw-r--r--builtin/for-each-ref.c2
-rw-r--r--builtin/gc.c609
-rw-r--r--builtin/grep.c67
-rw-r--r--builtin/hash-object.c2
-rw-r--r--builtin/help.c9
-rw-r--r--builtin/index-pack.c58
-rw-r--r--builtin/ls-files.c8
-rw-r--r--builtin/merge.c8
-rw-r--r--builtin/multi-pack-index.c54
-rw-r--r--builtin/mv.c52
-rw-r--r--builtin/pack-objects.c23
-rw-r--r--builtin/prune.c1
-rw-r--r--builtin/pull.c3
-rw-r--r--builtin/read-tree.c26
-rw-r--r--builtin/rebase.c56
-rw-r--r--builtin/receive-pack.c26
-rw-r--r--builtin/reflog.c13
-rw-r--r--builtin/repack.c17
-rw-r--r--builtin/reset.c10
-rw-r--r--builtin/rev-parse.c4
-rw-r--r--builtin/revert.c3
-rw-r--r--builtin/rm.c10
-rw-r--r--builtin/sparse-checkout.c94
-rw-r--r--builtin/stash.c25
-rw-r--r--builtin/submodule--helper.c631
-rw-r--r--builtin/update-ref.c14
-rw-r--r--builtin/upload-pack.c28
-rw-r--r--builtin/worktree.c1
-rw-r--r--bulk-checkin.c31
-rw-r--r--bundle.c12
-rw-r--r--bundle.h14
-rw-r--r--cache.h72
-rw-r--r--cbtree.h5
-rwxr-xr-xcheck_bindir13
-rw-r--r--checkout.c2
-rwxr-xr-xci/install-dependencies.sh2
-rwxr-xr-xci/lib.sh9
-rwxr-xr-xci/run-build-and-tests.sh1
-rw-r--r--commit-graph.c3
-rw-r--r--compat/linux/procinfo.c169
-rw-r--r--compat/nedmalloc/nedmalloc.c2
-rw-r--r--compat/simple-ipc/ipc-unix-socket.c14
-rw-r--r--compat/simple-ipc/ipc-win32.c179
-rw-r--r--compat/vcbuild/README2
-rw-r--r--compat/win32/lazyload.h14
-rw-r--r--config.c57
-rw-r--r--config.h5
-rw-r--r--config.mak.dev23
-rw-r--r--config.mak.uname12
-rw-r--r--connect.c2
-rw-r--r--connected.c15
-rw-r--r--connected.h2
-rw-r--r--contrib/buildsystems/CMakeLists.txt7
-rw-r--r--contrib/coccinelle/xopen.cocci13
-rw-r--r--contrib/credential/gnome-keyring/git-credential-gnome-keyring.c2
-rw-r--r--contrib/credential/libsecret/git-credential-libsecret.c2
-rwxr-xr-xcontrib/rerere-train.sh2
-rw-r--r--diff-merges.c14
-rw-r--r--diff-merges.h2
-rw-r--r--diff.c12
-rw-r--r--dir.c224
-rw-r--r--dir.h19
-rw-r--r--entry.c12
-rw-r--r--entry.h2
-rw-r--r--environment.c12
-rw-r--r--fetch-negotiator.c1
-rw-r--r--fetch-pack.c12
-rwxr-xr-xgenerate-hooklist.sh20
-rw-r--r--gettext.h24
-rwxr-xr-xgit-bisect.sh89
-rw-r--r--git-compat-util.h9
-rw-r--r--git-curl-compat.h129
-rwxr-xr-xgit-cvsserver.perl5
-rwxr-xr-xgit-send-email.perl2
-rw-r--r--git-sh-setup.sh6
-rwxr-xr-xgit-submodule.sh233
-rw-r--r--git.c2
-rw-r--r--grep.c136
-rw-r--r--grep.h27
-rw-r--r--hook.c42
-rw-r--r--hook.h16
-rw-r--r--http-backend.c6
-rw-r--r--http.c45
-rw-r--r--http.h1
-rw-r--r--imap-send.c2
-rw-r--r--list.h5
-rw-r--r--lockfile.h2
-rw-r--r--log-tree.h2
-rw-r--r--ls-refs.c31
-rw-r--r--ls-refs.h4
-rw-r--r--merge-ort.c78
-rw-r--r--merge-recursive.c57
-rw-r--r--merge.c8
-rw-r--r--midx.c372
-rw-r--r--midx.h6
-rw-r--r--object-file.c98
-rw-r--r--object-store.h7
-rw-r--r--object.h2
-rw-r--r--oid-array.h2
-rw-r--r--oidset.c5
-rw-r--r--oidset.h5
-rw-r--r--pack-bitmap-write.c79
-rw-r--r--pack-bitmap.c528
-rw-r--r--pack-bitmap.h9
-rw-r--r--pack-revindex.h4
-rw-r--r--pack-write.c60
-rw-r--r--pack.h10
-rw-r--r--packfile.c41
-rw-r--r--packfile.h4
-rw-r--r--parse-options.c13
-rw-r--r--parse-options.h3
-rw-r--r--path.c13
-rw-r--r--path.h5
-rw-r--r--pathspec.c5
-rw-r--r--pkt-line.c37
-rw-r--r--pkt-line.h11
-rw-r--r--protocol-caps.c3
-rw-r--r--protocol-caps.h4
-rw-r--r--range-diff.c3
-rw-r--r--read-cache.c60
-rw-r--r--ref-filter.c24
-rw-r--r--ref-filter.h1
-rw-r--r--reflog-walk.c3
-rw-r--r--refs.c63
-rw-r--r--refs.h20
-rw-r--r--refs/debug.c15
-rw-r--r--refs/files-backend.c278
-rw-r--r--refs/packed-backend.c15
-rw-r--r--refs/ref-cache.c87
-rw-r--r--refs/ref-cache.h26
-rw-r--r--refs/refs-internal.h78
-rw-r--r--remote-curl.c8
-rw-r--r--remote.c2
-rw-r--r--repo-settings.c115
-rw-r--r--repository.c24
-rw-r--r--repository.h35
-rw-r--r--reset.c3
-rw-r--r--revision.c20
-rw-r--r--revision.h1
-rw-r--r--run-command.c174
-rw-r--r--run-command.h99
-rw-r--r--sequencer.c60
-rw-r--r--sequencer.h5
-rw-r--r--serve.c198
-rw-r--r--serve.h12
-rw-r--r--setup.c8
-rw-r--r--shallow.h4
-rw-r--r--simple-ipc.h27
-rw-r--r--sparse-index.c76
-rw-r--r--sparse-index.h3
-rw-r--r--strbuf.h2
-rw-r--r--string-list.c8
-rw-r--r--string-list.h5
-rw-r--r--strvec.h8
-rw-r--r--submodule-config.c5
-rw-r--r--submodule-config.h4
-rw-r--r--submodule.c154
-rw-r--r--submodule.h19
-rw-r--r--t/README32
-rw-r--r--t/helper/test-bitmap.c10
-rw-r--r--t/helper/test-dump-untracked-cache.c6
-rw-r--r--t/helper/test-parse-options.c1
-rw-r--r--t/helper/test-read-midx.c16
-rw-r--r--t/helper/test-run-command.c9
-rw-r--r--t/helper/test-serve-v2.c14
-rw-r--r--t/helper/test-simple-ipc.c233
-rw-r--r--t/helper/test-submodule-nested-repo-config.c4
-rw-r--r--t/lib-bitmap.sh240
-rw-r--r--t/lib-httpd/apache.conf7
-rw-r--r--t/lib-subtest.sh95
-rwxr-xr-xt/perf/aggregate.perl4
-rw-r--r--t/perf/lib-bitmap.sh69
-rwxr-xr-xt/perf/p3400-rebase.sh2
-rwxr-xr-xt/perf/p5310-pack-bitmaps.sh65
-rwxr-xr-xt/perf/p5326-multi-pack-bitmaps.sh52
-rwxr-xr-xt/perf/run2
-rwxr-xr-xt/t0000-basic.sh471
-rwxr-xr-xt/t0004-unwritable.sh3
-rwxr-xr-xt/t0011-hashmap.sh2
-rwxr-xr-xt/t0012-help.sh16
-rwxr-xr-xt/t0016-oidmap.sh2
-rwxr-xr-xt/t0017-env-helper.sh1
-rwxr-xr-xt/t0018-advice.sh1
-rwxr-xr-xt/t0030-stripspace.sh1
-rwxr-xr-xt/t0040-parse-options.sh5
-rwxr-xr-xt/t0063-string-list.sh1
-rwxr-xr-xt/t0090-cache-tree.sh1
-rwxr-xr-xt/t0091-bugreport.sh1
-rwxr-xr-xt/t0301-credential-cache.sh32
-rwxr-xr-xt/t0410-partial-clone.sh12
-rwxr-xr-xt/t1013-read-tree-submodule.sh1
-rwxr-xr-xt/t1091-sparse-checkout-builtin.sh88
-rwxr-xr-xt/t1092-sparse-checkout-compatibility.sh163
-rwxr-xr-xt/t1400-update-ref.sh34
-rwxr-xr-xt/t1430-bad-ref-name.sh2
-rwxr-xr-xt/t1600-index.sh13
-rwxr-xr-xt/t1700-split-index.sh34
-rwxr-xr-xt/t2021-checkout-overwrite.sh1
-rwxr-xr-xt/t2500-untracked-overwriting.sh244
-rwxr-xr-xt/t3404-rebase-interactive.sh1
-rwxr-xr-xt/t3407-rebase-abort.sh105
-rwxr-xr-xt/t3435-rebase-gpg-sign.sh1
-rwxr-xr-xt/t3510-cherry-pick-sequence.sh1
-rwxr-xr-xt/t3602-rm-sparse-checkout.sh40
-rwxr-xr-xt/t3705-add-sparse-checkout.sh68
-rwxr-xr-xt/t3903-stash.sh58
-rwxr-xr-xt/t4060-diff-submodule-option-diff-format.sh159
-rwxr-xr-xt/t4108-apply-threeway.sh45
-rwxr-xr-xt/t4151-am-abort.sh39
-rwxr-xr-xt/t5304-prune.sh1
-rwxr-xr-xt/t5310-pack-bitmaps.sh233
-rwxr-xr-xt/t5312-prune-corruption.sh48
-rwxr-xr-xt/t5319-multi-pack-index.sh75
-rwxr-xr-xt/t5326-multi-pack-bitmaps.sh316
-rwxr-xr-xt/t5516-fetch-push.sh19
-rwxr-xr-xt/t5551-http-fetch-smart.sh33
-rwxr-xr-xt/t5555-http-smart-common.sh161
-rw-r--r--t/t5562/invoke-with-content-length.pl16
-rwxr-xr-xt/t5600-clone-fail-cleanup.sh4
-rwxr-xr-xt/t5701-git-serve.sh75
-rwxr-xr-xt/t5702-protocol-v2.sh13
-rwxr-xr-xt/t5704-protocol-violations.sh15
-rwxr-xr-xt/t6030-bisect-porcelain.sh18
-rwxr-xr-xt/t6415-merge-dir-to-symlink.sh6
-rwxr-xr-xt/t6424-merge-unrelated-index-changes.sh1
-rwxr-xr-xt/t6430-merge-recursive.sh4
-rwxr-xr-xt/t6436-merge-overwrite.sh3
-rwxr-xr-xt/t7002-mv-sparse-checkout.sh189
-rwxr-xr-xt/t7112-reset-submodule.sh1
-rwxr-xr-xt/t7201-co.sh1
-rwxr-xr-xt/t7519-status-fsmonitor.sh70
-rwxr-xr-xt/t7600-merge.sh1
-rwxr-xr-xt/t7700-repack.sh18
-rwxr-xr-xt/t7800-difftool.sh74
-rwxr-xr-xt/t7814-grep-recurse-submodules.sh106
-rwxr-xr-xt/t7900-maintenance.sh119
-rwxr-xr-xt/t9001-send-email.sh15
-rwxr-xr-xt/t9400-git-cvsserver-server.sh9
-rw-r--r--t/test-lib-functions.sh107
-rw-r--r--t/test-lib.sh33
-rw-r--r--trace.h2
-rw-r--r--trace2.c31
-rw-r--r--trace2.h27
-rw-r--r--trace2/tr2_tgt.h5
-rw-r--r--trace2/tr2_tgt_event.c22
-rw-r--r--trace2/tr2_tgt_normal.c14
-rw-r--r--trace2/tr2_tgt_perf.c15
-rw-r--r--trace2/tr2_tls.c1
-rw-r--r--transport-helper.c18
-rw-r--r--transport-internal.h2
-rw-r--r--transport.c40
-rw-r--r--transport.h4
-rw-r--r--tree-diff.c4
-rw-r--r--unicode-width.h44
-rw-r--r--unpack-trees.c69
-rw-r--r--unpack-trees.h14
-rw-r--r--upload-pack.c32
-rw-r--r--upload-pack.h14
-rw-r--r--wrapper.c12
-rw-r--r--write-or-die.c12
315 files changed, 9532 insertions, 3908 deletions
diff --git a/.github/workflows/l10n.yml b/.github/workflows/l10n.yml
new file mode 100644
index 0000000000..27f72f0ff3
--- /dev/null
+++ b/.github/workflows/l10n.yml
@@ -0,0 +1,105 @@
+name: git-l10n
+
+on: [push, pull_request_target]
+
+jobs:
+ git-po-helper:
+ if: >-
+ endsWith(github.repository, '/git-po') ||
+ contains(github.head_ref, 'l10n') ||
+ contains(github.ref, 'l10n')
+ runs-on: ubuntu-latest
+ permissions:
+ pull-requests: write
+ steps:
+ - name: Setup base and head objects
+ id: setup-tips
+ run: |
+ if test "${{ github.event_name }}" = "pull_request_target"
+ then
+ base=${{ github.event.pull_request.base.sha }}
+ head=${{ github.event.pull_request.head.sha }}
+ else
+ base=${{ github.event.before }}
+ head=${{ github.event.after }}
+ fi
+ echo "::set-output name=base::$base"
+ echo "::set-output name=head::$head"
+ - name: Run partial clone
+ run: |
+ git -c init.defaultBranch=master init --bare .
+ git remote add \
+ --mirror=fetch \
+ origin \
+ https://github.com/${{ github.repository }}
+ # Fetch tips that may be unreachable from github.ref:
+ # - For a forced push, "$base" may be unreachable.
+ # - For a "pull_request_target" event, "$head" may be unreachable.
+ args=
+ for commit in \
+ ${{ steps.setup-tips.outputs.base }} \
+ ${{ steps.setup-tips.outputs.head }}
+ do
+ case $commit in
+ *[^0]*)
+ args="$args $commit"
+ ;;
+ *)
+ # Should not fetch ZERO-OID.
+ ;;
+ esac
+ done
+ git -c protocol.version=2 fetch \
+ --progress \
+ --no-tags \
+ --no-write-fetch-head \
+ --filter=blob:none \
+ origin \
+ ${{ github.ref }} \
+ $args
+ - uses: actions/setup-go@v2
+ with:
+ go-version: '>=1.16'
+ - name: Install git-po-helper
+ run: go install github.com/git-l10n/git-po-helper@main
+ - name: Install other dependencies
+ run: |
+ sudo apt-get update -q &&
+ sudo apt-get install -q -y gettext
+ - name: Run git-po-helper
+ id: check-commits
+ run: |
+ exit_code=0
+ git-po-helper check-commits \
+ --github-action-event="${{ github.event_name }}" -- \
+ ${{ steps.setup-tips.outputs.base }}..${{ steps.setup-tips.outputs.head }} \
+ >git-po-helper.out 2>&1 || exit_code=$?
+ if test $exit_code -ne 0 || grep -q WARNING git-po-helper.out
+ then
+ # Remove ANSI colors which are proper for console logs but not
+ # proper for PR comment.
+ echo "COMMENT_BODY<<EOF" >>$GITHUB_ENV
+ perl -pe 's/\e\[[0-9;]*m//g; s/\bEOF$//g' git-po-helper.out >>$GITHUB_ENV
+ echo "EOF" >>$GITHUB_ENV
+ fi
+ cat git-po-helper.out
+ exit $exit_code
+ - name: Create comment in pull request for report
+ uses: mshick/add-pr-comment@v1
+ if: >-
+ always() &&
+ github.event_name == 'pull_request_target' &&
+ env.COMMENT_BODY != ''
+ with:
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+ repo-token-user-login: 'github-actions[bot]'
+ message: >
+ ${{ steps.check-commits.outcome == 'failure' && 'Errors and warnings' || 'Warnings' }}
+ found by [git-po-helper](https://github.com/git-l10n/git-po-helper#readme) in workflow
+ [#${{ github.run_number }}](${{ env.GITHUB_SERVER_URL }}/${{ github.repository }}/actions/runs/${{ github.run_id }}):
+
+ ```
+
+ ${{ env.COMMENT_BODY }}
+
+ ```
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index b053b01c66..4728168478 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -232,6 +232,9 @@ jobs:
- jobname: linux-gcc-default
cc: gcc
pool: ubuntu-latest
+ - jobname: linux-leaks
+ cc: gcc
+ pool: ubuntu-latest
env:
CC: ${{matrix.vector.cc}}
jobname: ${{matrix.vector.jobname}}
diff --git a/.gitignore b/.gitignore
index 311841f9be..6be9de41ae 100644
--- a/.gitignore
+++ b/.gitignore
@@ -190,6 +190,7 @@
/gitweb/static/gitweb.min.*
/config-list.h
/command-list.h
+/hook-list.h
*.tar.gz
*.dsc
*.deb
diff --git a/Documentation/MyFirstContribution.txt b/Documentation/MyFirstContribution.txt
index 015cf24631..b20bc8e914 100644
--- a/Documentation/MyFirstContribution.txt
+++ b/Documentation/MyFirstContribution.txt
@@ -1029,22 +1029,42 @@ kidding - be patient!)
[[v2-git-send-email]]
=== Sending v2
-Skip ahead to <<reviewing,Responding to Reviews>> for information on how to
-handle comments from reviewers. Continue this section when your topic branch is
-shaped the way you want it to look for your patchset v2.
+This section will focus on how to send a v2 of your patchset. To learn what
+should go into v2, skip ahead to <<reviewing,Responding to Reviews>> for
+information on how to handle comments from reviewers.
+
+We'll reuse our `psuh` topic branch for v2. Before we make any changes, we'll
+mark the tip of our v1 branch for easy reference:
-When you're ready with the next iteration of your patch, the process is fairly
-similar.
+----
+$ git checkout psuh
+$ git branch psuh-v1
+----
-First, generate your v2 patches again:
+Refine your patch series by using `git rebase -i` to adjust commits based upon
+reviewer comments. Once the patch series is ready for submission, generate your
+patches again, but with some new flags:
----
-$ git format-patch -v2 --cover-letter -o psuh/ master..psuh
+$ git format-patch -v2 --cover-letter -o psuh/ --range-diff master..psuh-v1 master..
----
-This will add your v2 patches, all named like `v2-000n-my-commit-subject.patch`,
-to the `psuh/` directory. You may notice that they are sitting alongside the v1
-patches; that's fine, but be careful when you are ready to send them.
+The `--range-diff master..psuh-v1` parameter tells `format-patch` to include a
+range-diff between `psuh-v1` and `psuh` in the cover letter (see
+linkgit:git-range-diff[1]). This helps tell reviewers about the differences
+between your v1 and v2 patches.
+
+The `-v2` parameter tells `format-patch` to output your patches
+as version "2". For instance, you may notice that your v2 patches are
+all named like `v2-000n-my-commit-subject.patch`. `-v2` will also format
+your patches by prefixing them with "[PATCH v2]" instead of "[PATCH]",
+and your range-diff will be prefaced with "Range-diff against v1".
+
+Afer you run this command, `format-patch` will output the patches to the `psuh/`
+directory, alongside the v1 patches. Using a single directory makes it easy to
+refer to the old v1 patches while proofreading the v2 patches, but you will need
+to be careful to send out only the v2 patches. We will use a pattern like
+"psuh/v2-*.patch" (not "psuh/*.patch", which would match v1 and v2 patches).
Edit your cover letter again. Now is a good time to mention what's different
between your last version and now, if it's something significant. You do not
@@ -1082,7 +1102,7 @@ to the command:
----
$ git send-email --to=target@example.com
--in-reply-to="<foo.12345.author@example.com>"
- psuh/v2*
+ psuh/v2-*.patch
----
[[single-patch]]
diff --git a/Documentation/RelNotes/2.33.1.txt b/Documentation/RelNotes/2.33.1.txt
new file mode 100644
index 0000000000..b71738e654
--- /dev/null
+++ b/Documentation/RelNotes/2.33.1.txt
@@ -0,0 +1,138 @@
+Git 2.33.1 Release Notes
+========================
+
+This primarily is to backport various fixes accumulated during the
+development towards Git 2.34, the next feature release.
+
+
+Fixes since v2.33
+-----------------
+
+ * The unicode character width table (used for output alignment) has
+ been updated.
+
+ * Input validation of "git pack-objects --stdin-packs" has been
+ corrected.
+
+ * Bugfix for common ancestor negotiation recently introduced in "git
+ push" codepath.
+
+ * "git pull" had various corner cases that were not well thought out
+ around its --rebase backend, e.g. "git pull --ff-only" did not stop
+ but went ahead and rebased when the history on other side is not a
+ descendant of our history. The series tries to fix them up.
+
+ * "git apply" miscounted the bytes and failed to read to the end of
+ binary hunks.
+
+ * "git range-diff" code clean-up.
+
+ * "git commit --fixup" now works with "--edit" again, after it was
+ broken in v2.32.
+
+ * Use upload-artifacts v1 (instead of v2) for 32-bit linux, as the
+ new version has a blocker bug for that architecture.
+
+ * Checking out all the paths from HEAD during the last conflicted
+ step in "git rebase" and continuing would cause the step to be
+ skipped (which is expected), but leaves MERGE_MSG file behind in
+ $GIT_DIR and confuses the next "git commit", which has been
+ corrected.
+
+ * Various bugs in "git rebase -r" have been fixed.
+
+ * mmap() imitation used to call xmalloc() that dies upon malloc()
+ failure, which has been corrected to just return an error to the
+ caller to be handled.
+
+ * "git diff --relative" segfaulted and/or produced incorrect result
+ when there are unmerged paths.
+
+ * The delayed checkout code path in "git checkout" etc. were chatty
+ even when --quiet and/or --no-progress options were given.
+
+ * "git branch -D <branch>" used to refuse to remove a broken branch
+ ref that points at a missing commit, which has been corrected.
+
+ * Build update for Apple clang.
+
+ * The parser for the "--nl" option of "git column" has been
+ corrected.
+
+ * "git upload-pack" which runs on the other side of "git fetch"
+ forgot to take the ref namespaces into account when handling
+ want-ref requests.
+
+ * The sparse-index support can corrupt the index structure by storing
+ a stale and/or uninitialized data, which has been corrected.
+
+ * Buggy tests could damage repositories outside the throw-away test
+ area we created. We now by default export GIT_CEILING_DIRECTORIES
+ to limit the damage from such a stray test.
+
+ * Even when running "git send-email" without its own threaded
+ discussion support, a threading related header in one message is
+ carried over to the subsequent message to result in an unwanted
+ threading, which has been corrected.
+
+ * The output from "git fast-export", when its anonymization feature
+ is in use, showed an annotated tag incorrectly.
+
+ * Recent "diff -m" changes broke "gitk", which has been corrected.
+
+ * "git maintenance" scheduler fix for macOS.
+
+ * A pathname in an advice message has been made cut-and-paste ready.
+
+ * The "git apply -3" code path learned not to bother the lower level
+ merge machinery when the three-way merge can be trivially resolved
+ without the content level merge.
+
+ * The code that optionally creates the *.rev reverse index file has
+ been optimized to avoid needless computation when it is not writing
+ the file out.
+
+ * "git range-diff -I... <range> <range>" segfaulted, which has been
+ corrected.
+
+ * The order in which various files that make up a single (conceptual)
+ packfile has been reevaluated and straightened up. This matters in
+ correctness, as an incomplete set of files must not be shown to a
+ running Git.
+
+ * The "mode" word is useless in a call to open(2) that does not
+ create a new file. Such a call in the files backend of the ref
+ subsystem has been cleaned up.
+
+ * "git update-ref --stdin" failed to flush its output as needed,
+ which potentially led the conversation to a deadlock.
+
+ * When "git am --abort" fails to abort correctly, it still exited
+ with exit status of 0, which has been corrected.
+
+ * Correct nr and alloc members of strvec struct to be of type size_t.
+
+ * "git stash", where the tentative change involves changing a
+ directory to a file (or vice versa), was confused, which has been
+ corrected.
+
+ * "git clone" from a repository whose HEAD is unborn into a bare
+ repository didn't follow the branch name the other side used, which
+ is corrected.
+
+ * "git cvsserver" had a long-standing bug in its authentication code,
+ which has finally been corrected (it is unclear and is a separate
+ question if anybody is seriously using it, though).
+
+ * "git difftool --dir-diff" mishandled symbolic links.
+
+ * Sensitive data in the HTTP trace were supposed to be redacted, but
+ we failed to do so in HTTP/2 requests.
+
+ * "make clean" has been updated to remove leftover .depend/
+ directories, even when it is not told to use them to compute header
+ dependencies.
+
+ * Protocol v0 clients can get stuck parsing a malformed feature line.
+
+Also contains various documentation updates and code clean-ups.
diff --git a/Documentation/RelNotes/2.34.0.txt b/Documentation/RelNotes/2.34.0.txt
index d21a63ed83..080687f202 100644
--- a/Documentation/RelNotes/2.34.0.txt
+++ b/Documentation/RelNotes/2.34.0.txt
@@ -34,6 +34,34 @@ UI, Workflows & Features
interactive when the help.autocorrect configuration variable is set
to 'prompt'.
+ * "git maintenance" scheduler learned to use systemd timers as a
+ possible backend.
+
+ * "git diff --submodule=diff" showed failure from run_command() when
+ trying to run diff inside a submodule, when the user manually
+ removes the submodule directory.
+
+ * "git bundle unbundle" learned to show progress display.
+
+ * In cone mode, the sparse-index code path learned to remove ignored
+ files (like build artifacts) outside the sparse cone, allowing the
+ entire directory outside the sparse cone to be removed, which is
+ especially useful when the sparse patterns change.
+
+ * Taking advantage of the CGI interface, http-backend has been
+ updated to enable protocol v2 automatically when the other side
+ asks for it.
+
+ * The credential-cache helper has been adjusted to Windows.
+
+ * The error in "git help no-such-git-command" is handled better.
+
+ * The unicode character width table (used for output alignment) has
+ been updated.
+
+ * The ref iteration code used to optionally allow dangling refs to be
+ shown, which has been tightened up.
+
Performance, Internal Implementation, Development Support etc.
@@ -74,123 +102,184 @@ Performance, Internal Implementation, Development Support etc.
* Callers from older advice_config[] based API has been updated to
use the newer advice_if_enabled() and advice_enabled() API.
+ * Teach "test_pause" and "debug" helpers to allow using the HOME and
+ TERM environment variables the user usually uses.
+
+ * "make INSTALL_STRIP=-s install" allows the installation step to use
+ "install -s" to strip the binaries as they get installed.
+
+ * Code that handles large number of refs in the "git fetch" code
+ path has been optimized.
+
+ * The reachability bitmap file used to be generated only for a single
+ pack, but now we've learned to generate bitmaps for history that
+ span across multiple packfiles.
+
+ * The code to make "git grep" recurse into submodules has been
+ updated to migrate away from the "add submodule's object store as
+ an alternate object store" mechanism (which is suboptimal).
+
+ * The tracing of process ancestry information has been enhanced.
+
+ * Reduce number of write(2) system calls while sending the
+ ref advertisement.
+
+ * Update the build procedure to use the "-pedantic" build when
+ DEVELOPER makefile macro is in effect.
+
+ * Large part of "git submodule add" gets rewritten in C.
+
+ * The run-command API has been updated so that the callers can easily
+ ask the file descriptors open for packfiles to be closed immediately
+ before spawning commands that may trigger auto-gc.
+
+ * An oddball OPTION_ARGUMENT feature has been removed from the
+ parse-options API.
+
+ * The mergesort implementation used to sort linked list has been
+ optimized.
+
+ * Remove external declaration of functions that no longer exist.
+
+ * "git multi-pack-index write --bitmap" learns to propagate the
+ hashcache from original bitmap to resulting bitmap.
+
+ * CI learns to run the leak sanitizer builds.
+
Fixes since v2.33
-----------------
* Input validation of "git pack-objects --stdin-packs" has been
corrected.
- (merge 561fa03529 ab/pack-stdin-packs-fix later to maint).
* Bugfix for common ancestor negotiation recently introduced in "git
push" code path.
- (merge 82823118b9 jt/push-negotiation-fixes later to maint).
* "git pull" had various corner cases that were not well thought out
around its --rebase backend, e.g. "git pull --ff-only" did not stop
but went ahead and rebased when the history on other side is not a
descendant of our history. The series tries to fix them up.
- (merge 6f843a3355 en/pull-conflicting-options later to maint).
* "git apply" miscounted the bytes and failed to read to the end of
binary hunks.
- (merge 46d723ce57 jk/apply-binary-hunk-parsing-fix later to maint).
* "git range-diff" code clean-up.
- (merge c4d5907324 jk/range-diff-fixes later to maint).
* "git commit --fixup" now works with "--edit" again, after it was
broken in v2.32.
- (merge 8ef6aad664 jk/commit-edit-fixup-fix later to maint).
* Use upload-artifacts v1 (instead of v2) for 32-bit linux, as the
new version has a blocker bug for that architecture.
- (merge 3cf9bb36bf cb/ci-use-upload-artifacts-v1 later to maint).
* Checking out all the paths from HEAD during the last conflicted
step in "git rebase" and continuing would cause the step to be
skipped (which is expected), but leaves MERGE_MSG file behind in
$GIT_DIR and confuses the next "git commit", which has been
corrected.
- (merge e5ee33e855 pw/rebase-skip-final-fix later to maint).
* Various bugs in "git rebase -r" have been fixed.
- (merge f2563c9ef3 pw/rebase-r-fixes later to maint).
* mmap() imitation used to call xmalloc() that dies upon malloc()
failure, which has been corrected to just return an error to the
caller to be handled.
- (merge 95b4ff3931 rs/git-mmap-uses-malloc later to maint).
* "git diff --relative" segfaulted and/or produced incorrect result
when there are unmerged paths.
- (merge 8174627b3d dd/diff-files-unmerged-fix later to maint).
* The delayed checkout code path in "git checkout" etc. were chatty
even when --quiet and/or --no-progress options were given.
- (merge 7a132c628e mt/quiet-with-delayed-checkout later to maint).
* "git branch -D <branch>" used to refuse to remove a broken branch
ref that points at a missing commit, which has been corrected.
- (merge 597a977489 rs/branch-allow-deleting-dangling later to maint).
* Build update for Apple clang.
- (merge f32c5d3716 cb/makefile-apple-clang later to maint).
* The parser for the "--nl" option of "git column" has been
corrected.
- (merge c93ca46cf5 sg/column-nl later to maint).
* "git upload-pack" which runs on the other side of "git fetch"
forgot to take the ref namespaces into account when handling
want-ref requests.
- (merge 53a66ec37c ka/want-ref-in-namespace later to maint).
* The sparse-index support can corrupt the index structure by storing
a stale and/or uninitialized data, which has been corrected.
- (merge d9e9b44d7a jh/sparse-index-resize-fix later to maint).
* Buggy tests could damage repositories outside the throw-away test
area we created. We now by default export GIT_CEILING_DIRECTORIES
to limit the damage from such a stray test.
- (merge 614c3d8f2e sg/set-ceiling-during-tests later to maint).
* Even when running "git send-email" without its own threaded
discussion support, a threading related header in one message is
carried over to the subsequent message to result in an unwanted
threading, which has been corrected.
- (merge e082113484 mh/send-email-reset-in-reply-to later to maint).
* The output from "git fast-export", when its anonymization feature
is in use, showed an annotated tag incorrectly.
- (merge 2f040a9671 tk/fast-export-anonymized-tag-fix later to maint).
* Doc update plus improved error reporting.
- (merge 1e93770888 jk/log-warn-on-bogus-encoding later to maint).
+
+ * Recent "diff -m" changes broke "gitk", which has been corrected.
+
+ * Regression fix.
+
+ * The "git apply -3" code path learned not to bother the lower level
+ merge machinery when the three-way merge can be trivially resolved
+ without the content level merge. This fixes a regression caused by
+ recent "-3way first and fall back to direct application" change.
+
+ * The code that optionally creates the *.rev reverse index file has
+ been optimized to avoid needless computation when it is not writing
+ the file out.
+
+ * "git range-diff -I... <range> <range>" segfaulted, which has been
+ corrected.
+
+ * The order in which various files that make up a single (conceptual)
+ packfile has been reevaluated and straightened up. This matters in
+ correctness, as an incomplete set of files must not be shown to a
+ running Git.
+
+ * The "mode" word is useless in a call to open(2) that does not
+ create a new file. Such a call in the files backend of the ref
+ subsystem has been cleaned up.
+
+ * "git update-ref --stdin" failed to flush its output as needed,
+ which potentially led the conversation to a deadlock.
+
+ * When "git am --abort" fails to abort correctly, it still exited
+ with exit status of 0, which has been corrected.
+
+ * Correct nr and alloc members of strvec struct to be of type size_t.
+
+ * "git stash", where the tentative change involves changing a
+ directory to a file (or vice versa), was confused, which has been
+ corrected.
+
+ * "git clone" from a repository whose HEAD is unborn into a bare
+ repository didn't follow the branch name the other side used, which
+ is corrected.
+
+ * "git cvsserver" had a long-standing bug in its authentication code,
+ which has finally been corrected (it is unclear and is a separate
+ question if anybody is seriously using it, though).
+
+ * "git difftool --dir-diff" mishandled symbolic links.
+
+ * Sensitive data in the HTTP trace were supposed to be redacted, but
+ we failed to do so in HTTP/2 requests.
+
+ * "make clean" has been updated to remove leftover .depend/
+ directories, even when it is not told to use them to compute header
+ dependencies.
+
+ * Protocol v0 clients can get stuck parsing a malformed feature line.
* Other code cleanup, docfix, build fix, etc.
- (merge 1d9c8daef8 ab/bundle-doc later to maint).
- (merge 81483fe613 en/merge-strategy-docs later to maint).
- (merge 626beebdf8 js/log-protocol-version later to maint).
- (merge 00e302da76 cb/builtin-merge-format-string-fix later to maint).
- (merge ad51ae4dc0 cb/ci-freebsd-update later to maint).
- (merge be6444d1ca fc/completion-updates later to maint).
- (merge ff7b83f562 ti/tcsh-completion-regression-fix later to maint).
- (merge 325b06deda sg/make-fix-ar-invocation later to maint).
- (merge bd72824c60 me/t5582-cleanup later to maint).
- (merge f6a5af0f62 ga/send-email-sendmail-cmd later to maint).
- (merge f58c7468cd ab/ls-remote-packet-trace later to maint).
- (merge 0160f7e725 ab/rebase-fatal-fatal-fix later to maint).
- (merge a16eb6b1ff js/maintenance-launchctl-fix later to maint).
- (merge c21b2511c2 jk/t5323-no-pack-test-fix later to maint).
- (merge 5146c2f148 mh/credential-leakfix later to maint).
- (merge 1549577338 dd/t6300-wo-gpg-fix later to maint).
- (merge 66e905b7dd rs/xopen-reports-open-failures later to maint).
- (merge 469888e6a5 es/walken-tutorial-fix later to maint).
- (merge 88682b016d ba/object-info later to maint).
- (merge b45c172e51 ab/gc-log-rephrase later to maint).
- (merge ccdd5d1eb1 ab/mailmap-leakfix later to maint).
- (merge 6540b71614 cb/remote-ndebug-fix later to maint).
- (merge e4f8d27585 rs/show-branch-simplify later to maint).
- (merge e124ecf7f7 rs/archive-use-object-id later to maint).
- (merge cebead1ebf cb/ci-build-pedantic later to maint).
+ (merge f188160be9 ab/bundle-remove-verbose-option later to maint).
+ (merge 8c6b4332b4 rs/close-pack-leakfix later to maint).
+ (merge 51b04c05b7 bs/difftool-msg-tweak later to maint).
+ (merge dd20e4a6db ab/make-compdb-fix later to maint).
+ (merge 6ffb990dc4 os/status-docfix later to maint).
+ (merge 100c2da2d3 rs/p3400-lose-tac later to maint).
+ (merge 76f3b69896 tb/aggregate-ignore-leading-whitespaces later to maint).
diff --git a/Documentation/config/pack.txt b/Documentation/config/pack.txt
index 763f7af7c4..ad7f73a1ea 100644
--- a/Documentation/config/pack.txt
+++ b/Documentation/config/pack.txt
@@ -159,6 +159,10 @@ pack.writeBitmapHashCache::
between an older, bitmapped pack and objects that have been
pushed since the last gc). The downside is that it consumes 4
bytes per object of disk space. Defaults to true.
++
+When writing a multi-pack reachability bitmap, no new namehashes are
+computed; instead, any namehashes stored in an existing bitmap are
+permuted into their appropriate location when writing a new bitmap.
pack.writeReverseIndex::
When true, git will write a corresponding .rev file (see:
diff --git a/Documentation/git-add.txt b/Documentation/git-add.txt
index be5e3ac54b..11eb70f16c 100644
--- a/Documentation/git-add.txt
+++ b/Documentation/git-add.txt
@@ -9,7 +9,7 @@ SYNOPSIS
--------
[verse]
'git add' [--verbose | -v] [--dry-run | -n] [--force | -f] [--interactive | -i] [--patch | -p]
- [--edit | -e] [--[no-]all | --[no-]ignore-removal | [--update | -u]]
+ [--edit | -e] [--[no-]all | --[no-]ignore-removal | [--update | -u]] [--sparse]
[--intent-to-add | -N] [--refresh] [--ignore-errors] [--ignore-missing] [--renormalize]
[--chmod=(+|-)x] [--pathspec-from-file=<file> [--pathspec-file-nul]]
[--] [<pathspec>...]
@@ -79,6 +79,13 @@ in linkgit:gitglossary[7].
--force::
Allow adding otherwise ignored files.
+--sparse::
+ Allow updating index entries outside of the sparse-checkout cone.
+ Normally, `git add` refuses to update index entries whose paths do
+ not fit within the sparse-checkout cone, since those files might
+ be removed from the working tree without warning. See
+ linkgit:git-sparse-checkout[1] for more details.
+
-i::
--interactive::
Add modified contents in the working tree interactively to
diff --git a/Documentation/git-am.txt b/Documentation/git-am.txt
index 8714dfcb76..0a4a984dfd 100644
--- a/Documentation/git-am.txt
+++ b/Documentation/git-am.txt
@@ -178,6 +178,8 @@ default. You can use `--no-utf8` to override this.
--abort::
Restore the original branch and abort the patching operation.
+ Revert contents of files involved in the am operation to their
+ pre-am state.
--quit::
Abort the patching operation but keep HEAD and the index
diff --git a/Documentation/git-bugreport.txt b/Documentation/git-bugreport.txt
index 66e88c2e31..d8817bf3ce 100644
--- a/Documentation/git-bugreport.txt
+++ b/Documentation/git-bugreport.txt
@@ -40,8 +40,8 @@ OPTIONS
-------
-o <path>::
--output-directory <path>::
- Place the resulting bug report file in `<path>` instead of the root of
- the Git repository.
+ Place the resulting bug report file in `<path>` instead of the current
+ directory.
-s <format>::
--suffix <format>::
diff --git a/Documentation/git-bundle.txt b/Documentation/git-bundle.txt
index ac0d003835..71b5ecabd1 100644
--- a/Documentation/git-bundle.txt
+++ b/Documentation/git-bundle.txt
@@ -13,7 +13,7 @@ SYNOPSIS
[--version=<version>] <file> <git-rev-list-args>
'git bundle' verify [-q | --quiet] <file>
'git bundle' list-heads <file> [<refname>...]
-'git bundle' unbundle <file> [<refname>...]
+'git bundle' unbundle [--progress] <file> [<refname>...]
DESCRIPTION
-----------
diff --git a/Documentation/git-checkout.txt b/Documentation/git-checkout.txt
index b1a6fe4499..d473c9bf38 100644
--- a/Documentation/git-checkout.txt
+++ b/Documentation/git-checkout.txt
@@ -118,8 +118,9 @@ OPTIONS
-f::
--force::
When switching branches, proceed even if the index or the
- working tree differs from `HEAD`. This is used to throw away
- local changes.
+ working tree differs from `HEAD`, and even if there are untracked
+ files in the way. This is used to throw away local changes and
+ any untracked files or directories that are in the way.
+
When checking out paths from the index, do not fail upon unmerged
entries; instead, unmerged entries are ignored.
diff --git a/Documentation/git-config.txt b/Documentation/git-config.txt
index 2dc4bae6da..992225f612 100644
--- a/Documentation/git-config.txt
+++ b/Documentation/git-config.txt
@@ -71,6 +71,9 @@ codes are:
On success, the command returns the exit code 0.
+A list of all available configuration variables can be obtained using the
+`git help --config` command.
+
[[OPTIONS]]
OPTIONS
-------
diff --git a/Documentation/git-cvsserver.txt b/Documentation/git-cvsserver.txt
index f2e4a47ebe..4dc57ed254 100644
--- a/Documentation/git-cvsserver.txt
+++ b/Documentation/git-cvsserver.txt
@@ -99,7 +99,7 @@ looks like
------
-Only anonymous access is provided by pserve by default. To commit you
+Only anonymous access is provided by pserver by default. To commit you
will have to create pserver accounts, simply add a gitcvs.authdb
setting in the config file of the repositories you want the cvsserver
to allow writes to, for example:
@@ -114,21 +114,20 @@ The format of these files is username followed by the encrypted password,
for example:
------
- myuser:$1Oyx5r9mdGZ2
- myuser:$1$BA)@$vbnMJMDym7tA32AamXrm./
+ myuser:sqkNi8zPf01HI
+ myuser:$1$9K7FzU28$VfF6EoPYCJEYcVQwATgOP/
+ myuser:$5$.NqmNH1vwfzGpV8B$znZIcumu1tNLATgV2l6e1/mY8RzhUDHMOaVOeL1cxV3
------
You can use the 'htpasswd' facility that comes with Apache to make these
-files, but Apache's MD5 crypt method differs from the one used by most C
-library's crypt() function, so don't use the -m option.
+files, but only with the -d option (or -B if your system suports it).
-Alternatively you can produce the password with perl's crypt() operator:
------
- perl -e 'my ($user, $pass) = @ARGV; printf "%s:%s\n", $user, crypt($user, $pass)' $USER password
------
+Preferably use the system specific utility that manages password hash
+creation in your platform (e.g. mkpasswd in Linux, encrypt in OpenBSD or
+pwhash in NetBSD) and paste it in the right location.
Then provide your password via the pserver method, for example:
------
- cvs -d:pserver:someuser:somepassword <at> server/path/repo.git co <HEAD_name>
+ cvs -d:pserver:someuser:somepassword@server:/path/repo.git co <HEAD_name>
------
No special setup is needed for SSH access, other than having Git tools
in the PATH. If you have clients that do not accept the CVS_SERVER
@@ -138,7 +137,7 @@ Note: Newer CVS versions (>= 1.12.11) also support specifying
CVS_SERVER directly in CVSROOT like
------
-cvs -d ":ext;CVS_SERVER=git cvsserver:user@server/path/repo.git" co <HEAD_name>
+ cvs -d ":ext;CVS_SERVER=git cvsserver:user@server/path/repo.git" co <HEAD_name>
------
This has the advantage that it will be saved in your 'CVS/Root' files and
you don't need to worry about always setting the correct environment
@@ -186,8 +185,8 @@ allowing access over SSH.
+
--
------
- export CVSROOT=:ext:user@server:/var/git/project.git
- export CVS_SERVER="git cvsserver"
+ export CVSROOT=:ext:user@server:/var/git/project.git
+ export CVS_SERVER="git cvsserver"
------
--
4. For SSH clients that will make commits, make sure their server-side
@@ -203,7 +202,7 @@ allowing access over SSH.
`project-master` directory:
+
------
- cvs co -d project-master master
+ cvs co -d project-master master
------
[[dbbackend]]
diff --git a/Documentation/git-http-backend.txt b/Documentation/git-http-backend.txt
index 558966aa83..0c5c0dde19 100644
--- a/Documentation/git-http-backend.txt
+++ b/Documentation/git-http-backend.txt
@@ -16,7 +16,9 @@ A simple CGI program to serve the contents of a Git repository to Git
clients accessing the repository over http:// and https:// protocols.
The program supports clients fetching using both the smart HTTP protocol
and the backwards-compatible dumb HTTP protocol, as well as clients
-pushing using the smart HTTP protocol.
+pushing using the smart HTTP protocol. It also supports Git's
+more-efficient "v2" protocol if properly configured; see the
+discussion of `GIT_PROTOCOL` in the ENVIRONMENT section below.
It verifies that the directory has the magic file
"git-daemon-export-ok", and it will refuse to export any Git directory
@@ -77,6 +79,18 @@ Apache 2.x::
SetEnv GIT_PROJECT_ROOT /var/www/git
SetEnv GIT_HTTP_EXPORT_ALL
ScriptAlias /git/ /usr/libexec/git-core/git-http-backend/
+
+# This is not strictly necessary using Apache and a modern version of
+# git-http-backend, as the webserver will pass along the header in the
+# environment as HTTP_GIT_PROTOCOL, and http-backend will copy that into
+# GIT_PROTOCOL. But you may need this line (or something similar if you
+# are using a different webserver), or if you want to support older Git
+# versions that did not do that copying.
+#
+# Having the webserver set up GIT_PROTOCOL is perfectly fine even with
+# modern versions (and will take precedence over HTTP_GIT_PROTOCOL,
+# which means it can be used to override the client's request).
+SetEnvIf Git-Protocol ".*" GIT_PROTOCOL=$0
----------------------------------------------------------------
+
To enable anonymous read access but authenticated write access,
@@ -264,6 +278,16 @@ a repository with an extremely large number of refs. The value can be
specified with a unit (e.g., `100M` for 100 megabytes). The default is
10 megabytes.
+Clients may probe for optional protocol capabilities (like the v2
+protocol) using the `Git-Protocol` HTTP header. In order to support
+these, the contents of that header must appear in the `GIT_PROTOCOL`
+environment variable. Most webservers will pass this header to the CGI
+via the `HTTP_GIT_PROTOCOL` variable, and `git-http-backend` will
+automatically copy that to `GIT_PROTOCOL`. However, some webservers may
+be more selective about which headers they'll pass, in which case they
+need to be configured explicitly (see the mention of `Git-Protocol` in
+the Apache config from the earlier EXAMPLES section).
+
The backend process sets GIT_COMMITTER_NAME to '$REMOTE_USER' and
GIT_COMMITTER_EMAIL to '$\{REMOTE_USER}@http.$\{REMOTE_ADDR\}',
ensuring that any reflogs created by 'git-receive-pack' contain some
diff --git a/Documentation/git-index-pack.txt b/Documentation/git-index-pack.txt
index 7fa74b9e79..1f1e359225 100644
--- a/Documentation/git-index-pack.txt
+++ b/Documentation/git-index-pack.txt
@@ -82,6 +82,12 @@ OPTIONS
--strict::
Die, if the pack contains broken objects or links.
+--progress-title::
+ For internal use only.
++
+Set the title of the progress bar. The title is "Receiving objects" by
+default and "Indexing objects" when `--stdin` is specified.
+
--check-self-contained-and-connected::
Die if the pack contains broken links. For internal use only.
diff --git a/Documentation/git-maintenance.txt b/Documentation/git-maintenance.txt
index 1e738ad398..e2cfb68ab5 100644
--- a/Documentation/git-maintenance.txt
+++ b/Documentation/git-maintenance.txt
@@ -179,6 +179,17 @@ OPTIONS
`maintenance.<task>.enabled` configured as `true` are considered.
See the 'TASKS' section for the list of accepted `<task>` values.
+--scheduler=auto|crontab|systemd-timer|launchctl|schtasks::
+ When combined with the `start` subcommand, specify the scheduler
+ for running the hourly, daily and weekly executions of
+ `git maintenance run`.
+ Possible values for `<scheduler>` are `auto`, `crontab`
+ (POSIX), `systemd-timer` (Linux), `launchctl` (macOS), and
+ `schtasks` (Windows). When `auto` is specified, the
+ appropriate platform-specific scheduler is used; on Linux,
+ `systemd-timer` is used if available, otherwise
+ `crontab`. Default is `auto`.
+
TROUBLESHOOTING
---------------
@@ -277,6 +288,52 @@ schedule to ensure you are executing the correct binaries in your
schedule.
+BACKGROUND MAINTENANCE ON LINUX SYSTEMD SYSTEMS
+-----------------------------------------------
+
+While Linux supports `cron`, depending on the distribution, `cron` may
+be an optional package not necessarily installed. On modern Linux
+distributions, systemd timers are superseding it.
+
+If user systemd timers are available, they will be used as a replacement
+of `cron`.
+
+In this case, `git maintenance start` will create user systemd timer units
+and start the timers. The current list of user-scheduled tasks can be found
+by running `systemctl --user list-timers`. The timers written by `git
+maintenance start` are similar to this:
+
+-----------------------------------------------------------------------
+$ systemctl --user list-timers
+NEXT LEFT LAST PASSED UNIT ACTIVATES
+Thu 2021-04-29 19:00:00 CEST 42min left Thu 2021-04-29 18:00:11 CEST 17min ago git-maintenance@hourly.timer git-maintenance@hourly.service
+Fri 2021-04-30 00:00:00 CEST 5h 42min left Thu 2021-04-29 00:00:11 CEST 18h ago git-maintenance@daily.timer git-maintenance@daily.service
+Mon 2021-05-03 00:00:00 CEST 3 days left Mon 2021-04-26 00:00:11 CEST 3 days ago git-maintenance@weekly.timer git-maintenance@weekly.service
+-----------------------------------------------------------------------
+
+One timer is registered for each `--schedule=<frequency>` option.
+
+The definition of the systemd units can be inspected in the following files:
+
+-----------------------------------------------------------------------
+~/.config/systemd/user/git-maintenance@.timer
+~/.config/systemd/user/git-maintenance@.service
+~/.config/systemd/user/timers.target.wants/git-maintenance@hourly.timer
+~/.config/systemd/user/timers.target.wants/git-maintenance@daily.timer
+~/.config/systemd/user/timers.target.wants/git-maintenance@weekly.timer
+-----------------------------------------------------------------------
+
+`git maintenance start` will overwrite these files and start the timer
+again with `systemctl --user`, so any customization should be done by
+creating a drop-in file, i.e. a `.conf` suffixed file in the
+`~/.config/systemd/user/git-maintenance@.service.d` directory.
+
+`git maintenance stop` will stop the user systemd timers and delete
+the above mentioned files.
+
+For more details, see `systemd.timer(5)`.
+
+
BACKGROUND MAINTENANCE ON MACOS SYSTEMS
---------------------------------------
diff --git a/Documentation/git-multi-pack-index.txt b/Documentation/git-multi-pack-index.txt
index ffd601bc17..3b0b55cd75 100644
--- a/Documentation/git-multi-pack-index.txt
+++ b/Documentation/git-multi-pack-index.txt
@@ -9,8 +9,7 @@ git-multi-pack-index - Write and verify multi-pack-indexes
SYNOPSIS
--------
[verse]
-'git multi-pack-index' [--object-dir=<dir>] [--[no-]progress]
- [--preferred-pack=<pack>] <subcommand>
+'git multi-pack-index' [--object-dir=<dir>] [--[no-]bitmap] <sub-command>
DESCRIPTION
-----------
@@ -23,10 +22,13 @@ OPTIONS
Use given directory for the location of Git objects. We check
`<dir>/packs/multi-pack-index` for the current MIDX file, and
`<dir>/packs` for the pack-files to index.
++
+`<dir>` must be an alternate of the current repository.
--[no-]progress::
Turn progress on/off explicitly. If neither is specified, progress is
- shown if standard error is connected to a terminal.
+ shown if standard error is connected to a terminal. Supported by
+ sub-commands `write`, `verify`, `expire`, and `repack.
The following subcommands are available:
@@ -37,9 +39,12 @@ write::
--
--preferred-pack=<pack>::
Optionally specify the tie-breaking pack used when
- multiple packs contain the same object. If not given,
- ties are broken in favor of the pack with the lowest
- mtime.
+ multiple packs contain the same object. `<pack>` must
+ contain at least one object. If not given, ties are
+ broken in favor of the pack with the lowest mtime.
+
+ --[no-]bitmap::
+ Control whether or not a multi-pack bitmap is written.
--
verify::
@@ -81,6 +86,13 @@ EXAMPLES
$ git multi-pack-index write
-----------------------------------------------
+* Write a MIDX file for the packfiles in the current .git folder with a
+corresponding bitmap.
++
+-------------------------------------------------------------
+$ git multi-pack-index write --preferred-pack=<pack> --bitmap
+-------------------------------------------------------------
+
* Write a MIDX file for the packfiles in an alternate object store.
+
-----------------------------------------------
diff --git a/Documentation/git-read-tree.txt b/Documentation/git-read-tree.txt
index 5fa8bab64c..8c3aceb832 100644
--- a/Documentation/git-read-tree.txt
+++ b/Documentation/git-read-tree.txt
@@ -10,8 +10,7 @@ SYNOPSIS
--------
[verse]
'git read-tree' [[-m [--trivial] [--aggressive] | --reset | --prefix=<prefix>]
- [-u [--exclude-per-directory=<gitignore>] | -i]]
- [--index-output=<file>] [--no-sparse-checkout]
+ [-u | -i]] [--index-output=<file>] [--no-sparse-checkout]
(--empty | <tree-ish1> [<tree-ish2> [<tree-ish3>]])
@@ -39,8 +38,9 @@ OPTIONS
--reset::
Same as -m, except that unmerged entries are discarded instead
- of failing. When used with `-u`, updates leading to loss of
- working tree changes will not abort the operation.
+ of failing. When used with `-u`, updates leading to loss of
+ working tree changes or untracked files or directories will not
+ abort the operation.
-u::
After a successful merge, update the files in the work
@@ -88,21 +88,6 @@ OPTIONS
The command will refuse to overwrite entries that already
existed in the original index file.
---exclude-per-directory=<gitignore>::
- When running the command with `-u` and `-m` options, the
- merge result may need to overwrite paths that are not
- tracked in the current branch. The command usually
- refuses to proceed with the merge to avoid losing such a
- path. However this safety valve sometimes gets in the
- way. For example, it often happens that the other
- branch added a file that used to be a generated file in
- your branch, and the safety valve triggers when you try
- to switch to that branch after you ran `make` but before
- running `make clean` to remove the generated file. This
- option tells the command to read per-directory exclude
- file (usually '.gitignore') and allows such an untracked
- but explicitly ignored file to be overwritten.
-
--index-output=<file>::
Instead of writing the results out to `$GIT_INDEX_FILE`,
write the resulting index in the named file. While the
diff --git a/Documentation/git-rebase.txt b/Documentation/git-rebase.txt
index 506345cb0e..c116dbf4bb 100644
--- a/Documentation/git-rebase.txt
+++ b/Documentation/git-rebase.txt
@@ -446,7 +446,8 @@ When --fork-point is active, 'fork_point' will be used instead of
ends up being empty, the <upstream> will be used as a fallback.
+
If <upstream> is given on the command line, then the default is
-`--no-fork-point`, otherwise the default is `--fork-point`.
+`--no-fork-point`, otherwise the default is `--fork-point`. See also
+`rebase.forkpoint` in linkgit:git-config[1].
+
If your branch was based on <upstream> but <upstream> was rewound and
your branch contains commits which were dropped, this option can be used
diff --git a/Documentation/git-receive-pack.txt b/Documentation/git-receive-pack.txt
index 25702ed730..014a78409b 100644
--- a/Documentation/git-receive-pack.txt
+++ b/Documentation/git-receive-pack.txt
@@ -41,6 +41,11 @@ OPTIONS
<directory>::
The repository to sync into.
+--http-backend-info-refs::
+ Used by linkgit:git-http-backend[1] to serve up
+ `$GIT_URL/info/refs?service=git-receive-pack` requests. See
+ `--http-backend-info-refs` in linkgit:git-upload-pack[1].
+
PRE-RECEIVE HOOK
----------------
Before any ref is updated, if $GIT_DIR/hooks/pre-receive file exists
diff --git a/Documentation/git-reset.txt b/Documentation/git-reset.txt
index 252e2d4e47..6f7685f53d 100644
--- a/Documentation/git-reset.txt
+++ b/Documentation/git-reset.txt
@@ -69,7 +69,8 @@ linkgit:git-add[1]).
--hard::
Resets the index and working tree. Any changes to tracked files in the
- working tree since `<commit>` are discarded.
+ working tree since `<commit>` are discarded. Any untracked files or
+ directories in the way of writing any tracked files are simply deleted.
--merge::
Resets the index and updates the files in the working tree that are
diff --git a/Documentation/git-rm.txt b/Documentation/git-rm.txt
index 26e9b28470..81bc23f3cd 100644
--- a/Documentation/git-rm.txt
+++ b/Documentation/git-rm.txt
@@ -72,6 +72,12 @@ For more details, see the 'pathspec' entry in linkgit:gitglossary[7].
--ignore-unmatch::
Exit with a zero status even if no files matched.
+--sparse::
+ Allow updating index entries outside of the sparse-checkout cone.
+ Normally, `git rm` refuses to update index entries whose paths do
+ not fit within the sparse-checkout cone. See
+ linkgit:git-sparse-checkout[1] for more.
+
-q::
--quiet::
`git rm` normally outputs one line (in the form of an `rm` command)
diff --git a/Documentation/git-sparse-checkout.txt b/Documentation/git-sparse-checkout.txt
index fdcf43f87c..42056ee9ff 100644
--- a/Documentation/git-sparse-checkout.txt
+++ b/Documentation/git-sparse-checkout.txt
@@ -210,6 +210,16 @@ case-insensitive check. This corrects for case mismatched filenames in the
'git sparse-checkout set' command to reflect the expected cone in the working
directory.
+When changing the sparse-checkout patterns in cone mode, Git will inspect each
+tracked directory that is not within the sparse-checkout cone to see if it
+contains any untracked files. If all of those files are ignored due to the
+`.gitignore` patterns, then the directory will be deleted. If any of the
+untracked files within that directory is not ignored, then no deletions will
+occur within that directory and a warning message will appear. If these files
+are important, then reset your sparse-checkout definition so they are included,
+use `git add` and `git commit` to store them, then remove any remaining files
+manually to ensure Git can behave optimally.
+
SUBMODULES
----------
diff --git a/Documentation/git-status.txt b/Documentation/git-status.txt
index 83f38e3198..c33a3d8d53 100644
--- a/Documentation/git-status.txt
+++ b/Documentation/git-status.txt
@@ -363,7 +363,7 @@ Field Meaning
Unmerged entries have the following format; the first character is
a "u" to distinguish from ordinary changed entries.
- u <xy> <sub> <m1> <m2> <m3> <mW> <h1> <h2> <h3> <path>
+ u <XY> <sub> <m1> <m2> <m3> <mW> <h1> <h2> <h3> <path>
....
Field Meaning
diff --git a/Documentation/git-upload-pack.txt b/Documentation/git-upload-pack.txt
index 9822c1eb1a..8f87b23ea8 100644
--- a/Documentation/git-upload-pack.txt
+++ b/Documentation/git-upload-pack.txt
@@ -36,14 +36,26 @@ OPTIONS
This fits with the HTTP POST request processing model where
a program may read the request, write a response, and must exit.
---advertise-refs::
- Only the initial ref advertisement is output, and the program exits
- immediately. This fits with the HTTP GET request model, where
- no request content is received but a response must be produced.
+--http-backend-info-refs::
+ Used by linkgit:git-http-backend[1] to serve up
+ `$GIT_URL/info/refs?service=git-upload-pack` requests. See
+ "Smart Clients" in link:technical/http-protocol.html[the HTTP
+ transfer protocols] documentation and "HTTP Transport" in
+ link:technical/protocol-v2.html[the Git Wire Protocol, Version
+ 2] documentation. Also understood by
+ linkgit:git-receive-pack[1].
<directory>::
The repository to sync from.
+ENVIRONMENT
+-----------
+
+`GIT_PROTOCOL`::
+ Internal variable used for handshaking the wire protocol. Server
+ admins may need to configure some transports to allow this
+ variable to be passed. See the discussion in linkgit:git[1].
+
SEE ALSO
--------
linkgit:gitnamespaces[7]
diff --git a/Documentation/git-version.txt b/Documentation/git-version.txt
new file mode 100644
index 0000000000..80fa7754a6
--- /dev/null
+++ b/Documentation/git-version.txt
@@ -0,0 +1,28 @@
+git-version(1)
+==============
+
+NAME
+----
+git-version - Display version information about Git
+
+SYNOPSIS
+--------
+[verse]
+'git version' [--build-options]
+
+DESCRIPTION
+-----------
+With no options given, the version of 'git' is printed on the standard output.
+
+Note that `git --version` is identical to `git version` because the
+former is internally converted into the latter.
+
+OPTIONS
+-------
+--build-options::
+ Include additional information about how git was built for diagnostic
+ purposes.
+
+GIT
+---
+Part of the linkgit:git[1] suite
diff --git a/Documentation/git.txt b/Documentation/git.txt
index 6dd241ef83..d63c65e67d 100644
--- a/Documentation/git.txt
+++ b/Documentation/git.txt
@@ -41,6 +41,10 @@ OPTIONS
-------
--version::
Prints the Git suite version that the 'git' program came from.
++
+This option is internaly converted to `git version ...` and accepts
+the same options as the linkgit:git-version[1] command. If `--help` is
+also given, it takes precedence over `--version`.
--help::
Prints the synopsis and a list of the most commonly used
@@ -863,15 +867,16 @@ for full details.
end user, to be recorded in the body of the reflog.
`GIT_REF_PARANOIA`::
- If set to `1`, include broken or badly named refs when iterating
- over lists of refs. In a normal, non-corrupted repository, this
- does nothing. However, enabling it may help git to detect and
- abort some operations in the presence of broken refs. Git sets
- this variable automatically when performing destructive
- operations like linkgit:git-prune[1]. You should not need to set
- it yourself unless you want to be paranoid about making sure
- an operation has touched every ref (e.g., because you are
- cloning a repository to make a backup).
+ If set to `0`, ignore broken or badly named refs when iterating
+ over lists of refs. Normally Git will try to include any such
+ refs, which may cause some operations to fail. This is usually
+ preferable, as potentially destructive operations (e.g.,
+ linkgit:git-prune[1]) are better off aborting rather than
+ ignoring broken refs (and thus considering the history they
+ point to as not worth saving). The default value is `1` (i.e.,
+ be paranoid about detecting and aborting all operations). You
+ should not normally need to set this to `0`, but it may be
+ useful when trying to salvage data from a corrupted repository.
`GIT_ALLOW_PROTOCOL`::
If set to a colon-separated list of protocols, behave as if
@@ -894,6 +899,21 @@ for full details.
Contains a colon ':' separated list of keys with optional values
'key[=value]'. Presence of unknown keys and values must be
ignored.
++
+Note that servers may need to be configured to allow this variable to
+pass over some transports. It will be propagated automatically when
+accessing local repositories (i.e., `file://` or a filesystem path), as
+well as over the `git://` protocol. For git-over-http, it should work
+automatically in most configurations, but see the discussion in
+linkgit:git-http-backend[1]. For git-over-ssh, the ssh server may need
+to be configured to allow clients to pass this variable (e.g., by using
+`AcceptEnv GIT_PROTOCOL` with OpenSSH).
++
+This configuration is optional. If the variable is not propagated, then
+clients will fall back to the original "v0" protocol (but may miss out
+on some performance improvements or features). This variable currently
+only affects clones and fetches; it is not yet used for pushes (but may
+be in the future).
`GIT_OPTIONAL_LOCKS`::
If set to `0`, Git will complete any requested operation without
diff --git a/Documentation/technical/api-parse-options.txt b/Documentation/technical/api-parse-options.txt
index 5a60bbfa7f..acfd5dc1d8 100644
--- a/Documentation/technical/api-parse-options.txt
+++ b/Documentation/technical/api-parse-options.txt
@@ -198,11 +198,6 @@ There are some macros to easily define options:
The filename will be prefixed by passing the filename along with
the prefix argument of `parse_options()` to `prefix_filename()`.
-`OPT_ARGUMENT(long, &int_var, description)`::
- Introduce a long-option argument that will be kept in `argv[]`.
- If this option was seen, `int_var` will be set to one (except
- if a `NULL` pointer was passed).
-
`OPT_NUMBER_CALLBACK(&var, description, func_ptr)`::
Recognize numerical options like -123 and feed the integer as
if it was an argument to the function given by `func_ptr`.
diff --git a/Documentation/technical/api-trace2.txt b/Documentation/technical/api-trace2.txt
index b9f3198fbe..ef7fe02a8f 100644
--- a/Documentation/technical/api-trace2.txt
+++ b/Documentation/technical/api-trace2.txt
@@ -613,6 +613,46 @@ stopping after the waitpid() and includes OS process creation overhead).
So this time will be slightly larger than the atexit time reported by
the child process itself.
+`"child_ready"`::
+ This event is generated after the current process has started
+ a background process and released all handles to it.
++
+------------
+{
+ "event":"child_ready",
+ ...
+ "child_id":2,
+ "pid":14708, # child PID
+ "ready":"ready", # child ready state
+ "t_rel":0.110605 # observed run-time of child process
+}
+------------
++
+Note that the session-id of the child process is not available to
+the current/spawning process, so the child's PID is reported here as
+a hint for post-processing. (But it is only a hint because the child
+process may be a shell script which doesn't have a session-id.)
++
+This event is generated after the child is started in the background
+and given a little time to boot up and start working. If the child
+startups normally and while the parent is still waiting, the "ready"
+field will have the value "ready".
+If the child is too slow to start and the parent times out, the field
+will have the value "timeout".
+If the child starts but the parent is unable to probe it, the field
+will have the value "error".
++
+After the parent process emits this event, it will release all of its
+handles to the child process and treat the child as a background
+daemon. So even if the child does eventually finish booting up,
+the parent will not emit an updated event.
++
+Note that the `t_rel` field contains the observed run time in seconds
+when the parent released the child process into the background.
+The child is assumed to be a long-running daemon process and may
+outlive the parent process. So the parent's child event times should
+not be compared to the child's atexit times.
+
`"exec"`::
This event is generated before git attempts to `exec()`
another command rather than starting a child process.
diff --git a/Documentation/technical/bitmap-format.txt b/Documentation/technical/bitmap-format.txt
index f8c18a0f7a..04b3ec2178 100644
--- a/Documentation/technical/bitmap-format.txt
+++ b/Documentation/technical/bitmap-format.txt
@@ -1,6 +1,44 @@
GIT bitmap v1 format
====================
+== Pack and multi-pack bitmaps
+
+Bitmaps store reachability information about the set of objects in a packfile,
+or a multi-pack index (MIDX). The former is defined obviously, and the latter is
+defined as the union of objects in packs contained in the MIDX.
+
+A bitmap may belong to either one pack, or the repository's multi-pack index (if
+it exists). A repository may have at most one bitmap.
+
+An object is uniquely described by its bit position within a bitmap:
+
+ - If the bitmap belongs to a packfile, the __n__th bit corresponds to
+ the __n__th object in pack order. For a function `offset` which maps
+ objects to their byte offset within a pack, pack order is defined as
+ follows:
+
+ o1 <= o2 <==> offset(o1) <= offset(o2)
+
+ - If the bitmap belongs to a MIDX, the __n__th bit corresponds to the
+ __n__th object in MIDX order. With an additional function `pack` which
+ maps objects to the pack they were selected from by the MIDX, MIDX order
+ is defined as follows:
+
+ o1 <= o2 <==> pack(o1) <= pack(o2) /\ offset(o1) <= offset(o2)
+
+ The ordering between packs is done according to the MIDX's .rev file.
+ Notably, the preferred pack sorts ahead of all other packs.
+
+The on-disk representation (described below) of a bitmap is the same regardless
+of whether or not that bitmap belongs to a packfile or a MIDX. The only
+difference is the interpretation of the bits, which is described above.
+
+Certain bitmap extensions are supported (see: Appendix B). No extensions are
+required for bitmaps corresponding to packfiles. For bitmaps that correspond to
+MIDXs, both the bit-cache and rev-cache extensions are required.
+
+== On-disk format
+
- A header appears at the beginning:
4-byte signature: {'B', 'I', 'T', 'M'}
@@ -14,17 +52,19 @@ GIT bitmap v1 format
The following flags are supported:
- BITMAP_OPT_FULL_DAG (0x1) REQUIRED
- This flag must always be present. It implies that the bitmap
- index has been generated for a packfile with full closure
- (i.e. where every single object in the packfile can find
- its parent links inside the same packfile). This is a
- requirement for the bitmap index format, also present in JGit,
- that greatly reduces the complexity of the implementation.
+ This flag must always be present. It implies that the
+ bitmap index has been generated for a packfile or
+ multi-pack index (MIDX) with full closure (i.e. where
+ every single object in the packfile/MIDX can find its
+ parent links inside the same packfile/MIDX). This is a
+ requirement for the bitmap index format, also present in
+ JGit, that greatly reduces the complexity of the
+ implementation.
- BITMAP_OPT_HASH_CACHE (0x4)
If present, the end of the bitmap file contains
`N` 32-bit name-hash values, one per object in the
- pack. The format and meaning of the name-hash is
+ pack/MIDX. The format and meaning of the name-hash is
described below.
4-byte entry count (network byte order)
@@ -33,7 +73,8 @@ GIT bitmap v1 format
20-byte checksum
- The SHA1 checksum of the pack this bitmap index belongs to.
+ The SHA1 checksum of the pack/MIDX this bitmap index
+ belongs to.
- 4 EWAH bitmaps that act as type indexes
@@ -50,7 +91,7 @@ GIT bitmap v1 format
- Tags
In each bitmap, the `n`th bit is set to true if the `n`th object
- in the packfile is of that type.
+ in the packfile or multi-pack index is of that type.
The obvious consequence is that the OR of all 4 bitmaps will result
in a full set (all bits set), and the AND of all 4 bitmaps will
@@ -62,8 +103,9 @@ GIT bitmap v1 format
Each entry contains the following:
- 4-byte object position (network byte order)
- The position **in the index for the packfile** where the
- bitmap for this commit is found.
+ The position **in the index for the packfile or
+ multi-pack index** where the bitmap for this commit is
+ found.
- 1-byte XOR-offset
The xor offset used to compress this bitmap. For an entry
@@ -146,10 +188,11 @@ Name-hash cache
---------------
If the BITMAP_OPT_HASH_CACHE flag is set, the end of the bitmap contains
-a cache of 32-bit values, one per object in the pack. The value at
+a cache of 32-bit values, one per object in the pack/MIDX. The value at
position `i` is the hash of the pathname at which the `i`th object
-(counting in index order) in the pack can be found. This can be fed
-into the delta heuristics to compare objects with similar pathnames.
+(counting in index or multi-pack index order) in the pack/MIDX can be found.
+This can be fed into the delta heuristics to compare objects with similar
+pathnames.
The hash algorithm used is:
diff --git a/Documentation/technical/http-protocol.txt b/Documentation/technical/http-protocol.txt
index 96d89ea9b2..cc5126cfed 100644
--- a/Documentation/technical/http-protocol.txt
+++ b/Documentation/technical/http-protocol.txt
@@ -225,6 +225,9 @@ The client may send Extra Parameters (see
Documentation/technical/pack-protocol.txt) as a colon-separated string
in the Git-Protocol HTTP header.
+Uses the `--http-backend-info-refs` option to
+linkgit:git-upload-pack[1].
+
Dumb Server Response
^^^^^^^^^^^^^^^^^^^^
Dumb servers MUST respond with the dumb server reply format.
diff --git a/Documentation/technical/multi-pack-index.txt b/Documentation/technical/multi-pack-index.txt
index fb688976c4..86f40f2490 100644
--- a/Documentation/technical/multi-pack-index.txt
+++ b/Documentation/technical/multi-pack-index.txt
@@ -36,7 +36,9 @@ Design Details
directory of an alternate. It refers only to packfiles in that
same directory.
-- The core.multiPackIndex config setting must be on to consume MIDX files.
+- The core.multiPackIndex config setting must be on (which is the
+ default) to consume MIDX files. Setting it to `false` prevents
+ Git from reading a MIDX file, even if one exists.
- The file format includes parameters for the object ID hash
function, so a future change of hash algorithm does not require
@@ -71,14 +73,10 @@ Future Work
still reducing the number of binary searches required for object
lookups.
-- The reachability bitmap is currently paired directly with a single
- packfile, using the pack-order as the object order to hopefully
- compress the bitmaps well using run-length encoding. This could be
- extended to pair a reachability bitmap with a multi-pack-index. If
- the multi-pack-index is extended to store a "stable object order"
+- If the multi-pack-index is extended to store a "stable object order"
(a function Order(hash) = integer that is constant for a given hash,
- even as the multi-pack-index is updated) then a reachability bitmap
- could point to a multi-pack-index and be updated independently.
+ even as the multi-pack-index is updated) then MIDX bitmaps could be
+ updated independently of the MIDX.
- Packfiles can be marked as "special" using empty files that share
the initial name but replace ".pack" with ".keep" or ".promisor".
diff --git a/Documentation/technical/protocol-v2.txt b/Documentation/technical/protocol-v2.txt
index 1040d85319..21e8258ccf 100644
--- a/Documentation/technical/protocol-v2.txt
+++ b/Documentation/technical/protocol-v2.txt
@@ -42,7 +42,8 @@ Initial Client Request
In general a client can request to speak protocol v2 by sending
`version=2` through the respective side-channel for the transport being
used which inevitably sets `GIT_PROTOCOL`. More information can be
-found in `pack-protocol.txt` and `http-protocol.txt`. In all cases the
+found in `pack-protocol.txt` and `http-protocol.txt`, as well as the
+`GIT_PROTOCOL` definition in `git.txt`. In all cases the
response from the server is the capability advertisement.
Git Transport
@@ -58,6 +59,8 @@ SSH and File Transport
When using either the ssh:// or file:// transport, the GIT_PROTOCOL
environment variable must be set explicitly to include "version=2".
+The server may need to be configured to allow this environment variable
+to pass.
HTTP Transport
~~~~~~~~~~~~~~
@@ -81,6 +84,12 @@ A v2 server would reply:
Subsequent requests are then made directly to the service
`$GIT_URL/git-upload-pack`. (This works the same for git-receive-pack).
+Uses the `--http-backend-info-refs` option to
+linkgit:git-upload-pack[1].
+
+The server may need to be configured to pass this header's contents via
+the `GIT_PROTOCOL` variable. See the discussion in `git-http-backend.txt`.
+
Capability Advertisement
------------------------
@@ -190,7 +199,11 @@ ls-refs takes in the following arguments:
Show peeled tags.
ref-prefix <prefix>
When specified, only references having a prefix matching one of
- the provided prefixes are displayed.
+ the provided prefixes are displayed. Multiple instances may be
+ given, in which case references matching any prefix will be
+ shown. Note that this is purely for optimization; a server MAY
+ show refs not matching the prefix if it chooses, and clients
+ should filter the result themselves.
If the 'unborn' feature is advertised the following argument can be
included in the client's request.
diff --git a/INSTALL b/INSTALL
index 66389ce059..4140a3f5c8 100644
--- a/INSTALL
+++ b/INSTALL
@@ -138,12 +138,15 @@ Issues of note:
BLK_SHA1. Also included is a version optimized for PowerPC
(PPC_SHA1).
- - "libcurl" library is used by git-http-fetch, git-fetch, and, if
- the curl version >= 7.34.0, for git-imap-send. You might also
- want the "curl" executable for debugging purposes. If you do not
- use http:// or https:// repositories, and do not want to put
- patches into an IMAP mailbox, you do not have to have them
- (use NO_CURL).
+ - "libcurl" library is used for fetching and pushing
+ repositories over http:// or https://, as well as by
+ git-imap-send if the curl version is >= 7.34.0. If you do
+ not need that functionality, use NO_CURL to build without
+ it.
+
+ Git requires version "7.19.4" or later of "libcurl" to build
+ without NO_CURL. This version requirement may be bumped in
+ the future.
- "expat" library; git-http-push uses it for remote lock
management over DAV. Similar to "curl" above, this is optional
diff --git a/Makefile b/Makefile
index 429c276058..877492386e 100644
--- a/Makefile
+++ b/Makefile
@@ -409,15 +409,6 @@ all::
# Define NEEDS_LIBRT if your platform requires linking with librt (glibc version
# before 2.17) for clock_gettime and CLOCK_MONOTONIC.
#
-# Define USE_PARENS_AROUND_GETTEXT_N to "yes" if your compiler happily
-# compiles the following initialization:
-#
-# static const char s[] = ("FOO");
-#
-# and define it to "no" if you need to remove the parentheses () around the
-# constant. The default is "auto", which means to use parentheses if your
-# compiler is detected to support it.
-#
# Define HAVE_BSD_SYSCTL if your platform has a BSD-compatible sysctl function.
#
# Define HAVE_GETDELIM if your system has the getdelim() function.
@@ -465,6 +456,9 @@ all::
# the global variable _wpgmptr containing the absolute path of the current
# executable (this is the case on Windows).
#
+# INSTALL_STRIP can be set to "-s" to strip binaries during installation,
+# if your $(INSTALL) command supports the option.
+#
# Define GENERATE_COMPILATION_DATABASE to "yes" to generate JSON compilation
# database entries during compilation if your compiler supports it, using the
# `-MJ` flag. The JSON entries will be placed in the `compile_commands/`
@@ -495,10 +489,9 @@ all::
# setting this flag the exceptions are removed, and all of
# -Wextra is used.
#
-# pedantic:
+# no-pedantic:
#
-# Enable -pedantic compilation. This also disables
-# USE_PARENS_AROUND_GETTEXT_N to produce only relevant warnings.
+# Disable -pedantic compilation.
GIT-VERSION-FILE: FORCE
@$(SHELL_PATH) ./GIT-VERSION-GEN
@@ -824,6 +817,10 @@ XDIFF_LIB = xdiff/lib.a
GENERATED_H += command-list.h
GENERATED_H += config-list.h
+GENERATED_H += hook-list.h
+
+.PHONY: generated-hdrs
+generated-hdrs: $(GENERATED_H)
LIB_H := $(sort $(patsubst ./%,%,$(shell git ls-files '*.h' ':!t/' ':!Documentation/' 2>/dev/null || \
$(FIND) . \
@@ -909,6 +906,7 @@ LIB_OBJS += hash-lookup.o
LIB_OBJS += hashmap.o
LIB_OBJS += help.o
LIB_OBJS += hex.o
+LIB_OBJS += hook.o
LIB_OBJS += ident.o
LIB_OBJS += json-writer.o
LIB_OBJS += kwset.o
@@ -1221,6 +1219,9 @@ PTHREAD_CFLAGS =
SPARSE_FLAGS ?=
SP_EXTRA_FLAGS = -Wno-universal-initializer
+# For informing GIT-BUILD-OPTIONS of the SANITIZE=leak target
+SANITIZE_LEAK =
+
# For the 'coccicheck' target; setting SPATCH_BATCH_SIZE higher will
# usually result in less CPU usage at the cost of higher peak memory.
# Setting it to 0 will feed all files in a single spatch invocation.
@@ -1265,6 +1266,7 @@ BASIC_CFLAGS += -DSHA1DC_FORCE_ALIGNED_ACCESS
endif
ifneq ($(filter leak,$(SANITIZERS)),)
BASIC_CFLAGS += -DSUPPRESS_ANNOTATED_LEAKS
+SANITIZE_LEAK = YesCompiledWithIt
endif
ifneq ($(filter address,$(SANITIZERS)),)
NO_REGEX = NeededForASAN
@@ -1285,6 +1287,7 @@ endif
ifeq ($(COMPUTE_HEADER_DEPENDENCIES),auto)
dep_check = $(shell $(CC) $(ALL_CFLAGS) \
+ -Wno-pedantic \
-c -MF /dev/null -MQ /dev/null -MMD -MP \
-x c /dev/null -o /dev/null 2>&1; \
echo $$?)
@@ -1310,6 +1313,7 @@ endif
ifeq ($(GENERATE_COMPILATION_DATABASE),yes)
compdb_check = $(shell $(CC) $(ALL_CFLAGS) \
+ -Wno-pedantic \
-c -MJ /dev/null \
-x c /dev/null -o /dev/null 2>&1; \
echo $$?)
@@ -1347,14 +1351,6 @@ ifneq (,$(SOCKLEN_T))
BASIC_CFLAGS += -Dsocklen_t=$(SOCKLEN_T)
endif
-ifeq (yes,$(USE_PARENS_AROUND_GETTEXT_N))
- BASIC_CFLAGS += -DUSE_PARENS_AROUND_GETTEXT_N=1
-else
-ifeq (no,$(USE_PARENS_AROUND_GETTEXT_N))
- BASIC_CFLAGS += -DUSE_PARENS_AROUND_GETTEXT_N=0
-endif
-endif
-
ifeq ($(uname_S),Darwin)
ifndef NO_FINK
ifeq ($(shell test -d /sw/lib && echo y),y)
@@ -1436,15 +1432,8 @@ else
REMOTE_CURL_NAMES = $(REMOTE_CURL_PRIMARY) $(REMOTE_CURL_ALIASES)
PROGRAM_OBJS += http-fetch.o
PROGRAMS += $(REMOTE_CURL_NAMES)
- curl_check := $(shell (echo 070908; $(CURL_CONFIG) --vernum | sed -e '/^70[BC]/s/^/0/') 2>/dev/null | sort -r | sed -ne 2p)
- ifeq "$(curl_check)" "070908"
- ifndef NO_EXPAT
- PROGRAM_OBJS += http-push.o
- else
- EXCLUDED_PROGRAMS += git-http-push
- endif
- else
- EXCLUDED_PROGRAMS += git-http-push
+ ifndef NO_EXPAT
+ PROGRAM_OBJS += http-push.o
endif
curl_check := $(shell (echo 072200; $(CURL_CONFIG) --vernum | sed -e '/^70[BC]/s/^/0/') 2>/dev/null | sort -r | sed -ne 2p)
ifeq "$(curl_check)" "072200"
@@ -2227,8 +2216,9 @@ git$X: git.o GIT-LDFLAGS $(BUILTIN_OBJS) $(GITLIBS)
$(filter %.o,$^) $(LIBS)
help.sp help.s help.o: command-list.h
+hook.sp hook.s hook.o: hook-list.h
-builtin/help.sp builtin/help.s builtin/help.o: config-list.h GIT-PREFIX
+builtin/help.sp builtin/help.s builtin/help.o: config-list.h hook-list.h GIT-PREFIX
builtin/help.sp builtin/help.s builtin/help.o: EXTRA_CPPFLAGS = \
'-DGIT_HTML_PATH="$(htmldir_relative_SQ)"' \
'-DGIT_MAN_PATH="$(mandir_relative_SQ)"' \
@@ -2251,15 +2241,17 @@ $(BUILT_INS): git$X
config-list.h: generate-configlist.sh
config-list.h: Documentation/*config.txt Documentation/config/*.txt
- $(QUIET_GEN)$(SHELL_PATH) ./generate-configlist.sh \
- >$@+ && mv $@+ $@
+ $(QUIET_GEN)$(SHELL_PATH) ./generate-configlist.sh >$@
command-list.h: generate-cmdlist.sh command-list.txt
command-list.h: $(wildcard Documentation/git*.txt)
$(QUIET_GEN)$(SHELL_PATH) ./generate-cmdlist.sh \
$(patsubst %,--exclude-program %,$(EXCLUDED_PROGRAMS)) \
- command-list.txt >$@+ && mv $@+ $@
+ command-list.txt >$@
+
+hook-list.h: generate-hooklist.sh Documentation/githooks.txt
+ $(QUIET_GEN)$(SHELL_PATH) ./generate-hooklist.sh >$@
SCRIPT_DEFINES = $(SHELL_PATH_SQ):$(DIFF_SQ):$(GIT_VERSION):\
$(localedir_SQ):$(NO_CURL):$(USE_GETTEXT_SCHEME):$(SANE_TOOL_PATH_SQ):\
@@ -2481,7 +2473,6 @@ dep_args = -MF $(dep_file) -MQ $@ -MMD -MP
endif
ifneq ($(COMPUTE_HEADER_DEPENDENCIES),yes)
-dep_dirs =
missing_dep_dirs =
dep_args =
endif
@@ -2522,13 +2513,6 @@ ifneq ($(dep_files_present),)
include $(dep_files_present)
endif
else
-# Dependencies on header files, for platforms that do not support
-# the gcc -MMD option.
-#
-# Dependencies on automatically generated headers such as command-list.h
-# should _not_ be included here, since they are necessary even when
-# building an object for the first time.
-
$(OBJECTS): $(LIB_H) $(GENERATED_H)
endif
@@ -2750,19 +2734,25 @@ FIND_SOURCE_FILES = ( \
| sed -e 's|^\./||' \
)
-$(ETAGS_TARGET): FORCE
- $(QUIET_GEN)$(RM) "$(ETAGS_TARGET)+" && \
- $(FIND_SOURCE_FILES) | xargs etags -a -o "$(ETAGS_TARGET)+" && \
- mv "$(ETAGS_TARGET)+" "$(ETAGS_TARGET)"
+FOUND_SOURCE_FILES = $(shell $(FIND_SOURCE_FILES))
-tags: FORCE
- $(QUIET_GEN)$(RM) tags+ && \
- $(FIND_SOURCE_FILES) | xargs ctags -a -o tags+ && \
- mv tags+ tags
+$(ETAGS_TARGET): $(FOUND_SOURCE_FILES)
+ $(QUIET_GEN)$(RM) $@+ && \
+ echo $(FOUND_SOURCE_FILES) | xargs etags -a -o $@+ && \
+ mv $@+ $@
-cscope:
- $(RM) cscope*
- $(FIND_SOURCE_FILES) | xargs cscope -b
+tags: $(FOUND_SOURCE_FILES)
+ $(QUIET_GEN)$(RM) $@+ && \
+ echo $(FOUND_SOURCE_FILES) | xargs ctags -a -o $@+ && \
+ mv $@+ $@
+
+cscope.out: $(FOUND_SOURCE_FILES)
+ $(QUIET_GEN)$(RM) $@+ && \
+ echo $(FOUND_SOURCE_FILES) | xargs cscope -f$@+ -b && \
+ mv $@+ $@
+
+.PHONY: cscope
+cscope: cscope.out
### Detect prefix changes
TRACK_PREFIX = $(bindir_SQ):$(gitexecdir_SQ):$(template_dir_SQ):$(prefix_SQ):\
@@ -2812,6 +2802,7 @@ GIT-BUILD-OPTIONS: FORCE
@echo NO_UNIX_SOCKETS=\''$(subst ','\'',$(subst ','\'',$(NO_UNIX_SOCKETS)))'\' >>$@+
@echo PAGER_ENV=\''$(subst ','\'',$(subst ','\'',$(PAGER_ENV)))'\' >>$@+
@echo DC_SHA1=\''$(subst ','\'',$(subst ','\'',$(DC_SHA1)))'\' >>$@+
+ @echo SANITIZE_LEAK=\''$(subst ','\'',$(subst ','\'',$(SANITIZE_LEAK)))'\' >>$@+
@echo X=\'$(X)\' >>$@+
ifdef TEST_OUTPUT_DIRECTORY
@echo TEST_OUTPUT_DIRECTORY=\''$(subst ','\'',$(subst ','\'',$(TEST_OUTPUT_DIRECTORY)))'\' >>$@+
@@ -2919,7 +2910,7 @@ $(SP_OBJ): %.sp: %.c GIT-CFLAGS FORCE
.PHONY: sparse $(SP_OBJ)
sparse: $(SP_OBJ)
-EXCEPT_HDRS := command-list.h config-list.h unicode-width.h compat/% xdiff/%
+EXCEPT_HDRS := $(GENERATED_H) unicode-width.h compat/% xdiff/%
ifndef GCRYPT_SHA256
EXCEPT_HDRS += sha256/gcrypt.h
endif
@@ -2941,7 +2932,8 @@ hdr-check: $(HCO)
style:
git clang-format --style file --diff --extensions c,h
-check: config-list.h command-list.h
+.PHONY: check
+check: $(GENERATED_H)
@if sparse; \
then \
echo >&2 "Use 'make sparse' instead"; \
@@ -2951,7 +2943,7 @@ check: config-list.h command-list.h
exit 1; \
fi
-FOUND_C_SOURCES = $(filter %.c,$(shell $(FIND_SOURCE_FILES)))
+FOUND_C_SOURCES = $(filter %.c,$(FOUND_SOURCE_FILES))
COCCI_SOURCES = $(filter-out $(THIRD_PARTY_SOURCES),$(FOUND_C_SOURCES))
%.cocci.patch: %.cocci $(COCCI_SOURCES)
@@ -3004,7 +2996,8 @@ mergetools_instdir = $(prefix)/$(mergetoolsdir)
endif
mergetools_instdir_SQ = $(subst ','\'',$(mergetools_instdir))
-install_bindir_programs := $(patsubst %,%$X,$(BINDIR_PROGRAMS_NEED_X)) $(BINDIR_PROGRAMS_NO_X)
+install_bindir_xprograms := $(patsubst %,%$X,$(BINDIR_PROGRAMS_NEED_X))
+install_bindir_programs := $(install_bindir_xprograms) $(BINDIR_PROGRAMS_NO_X)
.PHONY: profile-install profile-fast-install
profile-install: profile
@@ -3013,12 +3006,17 @@ profile-install: profile
profile-fast-install: profile-fast
$(MAKE) install
+INSTALL_STRIP =
+
install: all
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)'
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(gitexec_instdir_SQ)'
- $(INSTALL) $(ALL_PROGRAMS) '$(DESTDIR_SQ)$(gitexec_instdir_SQ)'
+ $(INSTALL) $(INSTALL_STRIP) $(PROGRAMS) '$(DESTDIR_SQ)$(gitexec_instdir_SQ)'
+ $(INSTALL) $(SCRIPTS) '$(DESTDIR_SQ)$(gitexec_instdir_SQ)'
$(INSTALL) -m 644 $(SCRIPT_LIB) '$(DESTDIR_SQ)$(gitexec_instdir_SQ)'
- $(INSTALL) $(install_bindir_programs) '$(DESTDIR_SQ)$(bindir_SQ)'
+ $(INSTALL) $(INSTALL_STRIP) $(install_bindir_xprograms) '$(DESTDIR_SQ)$(bindir_SQ)'
+ $(INSTALL) $(BINDIR_PROGRAMS_NO_X) '$(DESTDIR_SQ)$(bindir_SQ)'
+
ifdef MSVC
# We DO NOT install the individual foo.o.pdb files because they
# have already been rolled up into the exe's pdb file.
@@ -3099,8 +3097,7 @@ endif
ln "$$execdir/git-remote-http$X" "$$execdir/$$p" 2>/dev/null || \
ln -s "git-remote-http$X" "$$execdir/$$p" 2>/dev/null || \
cp "$$execdir/git-remote-http$X" "$$execdir/$$p" || exit; } \
- done && \
- ./check_bindir "z$$bindir" "z$$execdir" "$$bindir/git-add$X"
+ done
.PHONY: install-gitweb install-doc install-man install-man-perl install-html install-info install-pdf
.PHONY: quick-install-doc quick-install-man quick-install-html
@@ -3276,7 +3273,7 @@ endif
.PHONY: all install profile-clean cocciclean clean strip
.PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell
-.PHONY: FORCE cscope
+.PHONY: FORCE
### Check documentation
#
diff --git a/add-interactive.c b/add-interactive.c
index 36ebdbdf7e..6498ae196f 100644
--- a/add-interactive.c
+++ b/add-interactive.c
@@ -102,8 +102,12 @@ struct prefix_item_list {
int *selected; /* for multi-selections */
size_t min_length, max_length;
};
-#define PREFIX_ITEM_LIST_INIT \
- { STRING_LIST_INIT_DUP, STRING_LIST_INIT_NODUP, NULL, 1, 4 }
+#define PREFIX_ITEM_LIST_INIT { \
+ .items = STRING_LIST_INIT_DUP, \
+ .sorted = STRING_LIST_INIT_NODUP, \
+ .min_length = 1, \
+ .max_length = 4, \
+}
static void prefix_item_list_clear(struct prefix_item_list *list)
{
diff --git a/advice.c b/advice.c
index 2f5499a5e1..1dfc91d176 100644
--- a/advice.c
+++ b/advice.c
@@ -224,15 +224,16 @@ void advise_on_updating_sparse_paths(struct string_list *pathspec_list)
if (!pathspec_list->nr)
return;
- fprintf(stderr, _("The following pathspecs didn't match any"
- " eligible path, but they do match index\n"
- "entries outside the current sparse checkout:\n"));
+ fprintf(stderr, _("The following paths and/or pathspecs matched paths that exist\n"
+ "outside of your sparse-checkout definition, so will not be\n"
+ "updated in the index:\n"));
for_each_string_list_item(item, pathspec_list)
fprintf(stderr, "%s\n", item->string);
advise_if_enabled(ADVICE_UPDATE_SPARSE_PATH,
- _("Disable or modify the sparsity rules if you intend"
- " to update such entries."));
+ _("If you intend to update such entries, try one of the following:\n"
+ "* Use the --sparse option.\n"
+ "* Disable or modify the sparsity rules."));
}
void detach_advice(const char *new_name)
diff --git a/apply.c b/apply.c
index 4ed4b27169..43a0aebf4e 100644
--- a/apply.c
+++ b/apply.c
@@ -3468,6 +3468,21 @@ static int load_preimage(struct apply_state *state,
return 0;
}
+static int resolve_to(struct image *image, const struct object_id *result_id)
+{
+ unsigned long size;
+ enum object_type type;
+
+ clear_image(image);
+
+ image->buf = read_object_file(result_id, &type, &size);
+ if (!image->buf || type != OBJ_BLOB)
+ die("unable to read blob object %s", oid_to_hex(result_id));
+ image->len = size;
+
+ return 0;
+}
+
static int three_way_merge(struct apply_state *state,
struct image *image,
char *path,
@@ -3479,6 +3494,12 @@ static int three_way_merge(struct apply_state *state,
mmbuffer_t result = { NULL };
int status;
+ /* resolve trivial cases first */
+ if (oideq(base, ours))
+ return resolve_to(image, theirs);
+ else if (oideq(base, theirs) || oideq(ours, theirs))
+ return resolve_to(image, ours);
+
read_mmblob(&base_file, base);
read_mmblob(&our_file, ours);
read_mmblob(&their_file, theirs);
diff --git a/attr.c b/attr.c
index d029e681f2..79adaa50ea 100644
--- a/attr.c
+++ b/attr.c
@@ -14,6 +14,7 @@
#include "utf8.h"
#include "quote.h"
#include "thread-utils.h"
+#include "dir.h"
const char git_attr__true[] = "(builtin)true";
const char git_attr__false[] = "\0(builtin)false";
@@ -744,6 +745,20 @@ static struct attr_stack *read_attr_from_index(struct index_state *istate,
if (!istate)
return NULL;
+ /*
+ * The .gitattributes file only applies to files within its
+ * parent directory. In the case of cone-mode sparse-checkout,
+ * the .gitattributes file is sparse if and only if all paths
+ * within that directory are also sparse. Thus, don't load the
+ * .gitattributes file since it will not matter.
+ *
+ * In the case of a sparse index, it is critical that we don't go
+ * looking for a .gitattributes file, as doing so would cause the
+ * index to expand.
+ */
+ if (!path_in_cone_mode_sparse_checkout(path, istate))
+ return NULL;
+
buf = read_blob_data_from_index(istate, path, NULL);
if (!buf)
return NULL;
diff --git a/builtin.h b/builtin.h
index 16ecd5586f..8a58743ed6 100644
--- a/builtin.h
+++ b/builtin.h
@@ -225,7 +225,6 @@ int cmd_submodule__helper(int argc, const char **argv, const char *prefix);
int cmd_switch(int argc, const char **argv, const char *prefix);
int cmd_symbolic_ref(int argc, const char **argv, const char *prefix);
int cmd_tag(int argc, const char **argv, const char *prefix);
-int cmd_tar_tree(int argc, const char **argv, const char *prefix);
int cmd_unpack_file(int argc, const char **argv, const char *prefix);
int cmd_unpack_objects(int argc, const char **argv, const char *prefix);
int cmd_update_index(int argc, const char **argv, const char *prefix);
diff --git a/builtin/add.c b/builtin/add.c
index 2244311d48..ef6b619c45 100644
--- a/builtin/add.c
+++ b/builtin/add.c
@@ -30,6 +30,7 @@ static int patch_interactive, add_interactive, edit_interactive;
static int take_worktree_changes;
static int add_renormalize;
static int pathspec_file_nul;
+static int include_sparse;
static const char *pathspec_from_file;
static int legacy_stash_p; /* support for the scripted `git stash` */
@@ -46,7 +47,9 @@ static int chmod_pathspec(struct pathspec *pathspec, char flip, int show_only)
struct cache_entry *ce = active_cache[i];
int err;
- if (ce_skip_worktree(ce))
+ if (!include_sparse &&
+ (ce_skip_worktree(ce) ||
+ !path_in_sparse_checkout(ce->name, &the_index)))
continue;
if (pathspec && !ce_path_match(&the_index, ce, pathspec, NULL))
@@ -94,6 +97,10 @@ static void update_callback(struct diff_queue_struct *q,
for (i = 0; i < q->nr; i++) {
struct diff_filepair *p = q->queue[i];
const char *path = p->one->path;
+
+ if (!include_sparse && !path_in_sparse_checkout(path, &the_index))
+ continue;
+
switch (fix_unmerged_status(p, data)) {
default:
die(_("unexpected diff status %c"), p->status);
@@ -147,7 +154,9 @@ static int renormalize_tracked_files(const struct pathspec *pathspec, int flags)
for (i = 0; i < active_nr; i++) {
struct cache_entry *ce = active_cache[i];
- if (ce_skip_worktree(ce))
+ if (!include_sparse &&
+ (ce_skip_worktree(ce) ||
+ !path_in_sparse_checkout(ce->name, &the_index)))
continue;
if (ce_stage(ce))
continue; /* do not touch unmerged paths */
@@ -190,8 +199,6 @@ static int refresh(int verbose, const struct pathspec *pathspec)
struct string_list only_match_skip_worktree = STRING_LIST_INIT_NODUP;
int flags = REFRESH_IGNORE_SKIP_WORKTREE |
(verbose ? REFRESH_IN_PORCELAIN : REFRESH_QUIET);
- struct pattern_list pl = { 0 };
- int sparse_checkout_enabled = !get_sparse_checkout_patterns(&pl);
seen = xcalloc(pathspec->nr, 1);
refresh_index(&the_index, flags, pathspec, seen,
@@ -199,12 +206,9 @@ static int refresh(int verbose, const struct pathspec *pathspec)
for (i = 0; i < pathspec->nr; i++) {
if (!seen[i]) {
const char *path = pathspec->items[i].original;
- int dtype = DT_REG;
if (matches_skip_worktree(pathspec, i, &skip_worktree_seen) ||
- (sparse_checkout_enabled &&
- !path_matches_pattern_list(path, strlen(path), NULL,
- &dtype, &pl, &the_index))) {
+ !path_in_sparse_checkout(path, &the_index)) {
string_list_append(&only_match_skip_worktree,
pathspec->items[i].original);
} else {
@@ -382,6 +386,7 @@ static struct option builtin_add_options[] = {
OPT_BOOL( 0 , "refresh", &refresh_only, N_("don't add, only refresh the index")),
OPT_BOOL( 0 , "ignore-errors", &ignore_add_errors, N_("just skip files which cannot be added because of errors")),
OPT_BOOL( 0 , "ignore-missing", &ignore_missing, N_("check if - even missing - files are ignored in dry run")),
+ OPT_BOOL(0, "sparse", &include_sparse, N_("allow updating entries outside of the sparse-checkout cone")),
OPT_STRING(0, "chmod", &chmod_arg, "(+|-)x",
N_("override the executable bit of the listed files")),
OPT_HIDDEN_BOOL(0, "warn-embedded-repo", &warn_on_embedded_repo,
@@ -447,6 +452,7 @@ static void check_embedded_repo(const char *path)
static int add_files(struct dir_struct *dir, int flags)
{
int i, exit_status = 0;
+ struct string_list matched_sparse_paths = STRING_LIST_INIT_NODUP;
if (dir->ignored_nr) {
fprintf(stderr, _(ignore_error));
@@ -460,6 +466,12 @@ static int add_files(struct dir_struct *dir, int flags)
}
for (i = 0; i < dir->nr; i++) {
+ if (!include_sparse &&
+ !path_in_sparse_checkout(dir->entries[i]->name, &the_index)) {
+ string_list_append(&matched_sparse_paths,
+ dir->entries[i]->name);
+ continue;
+ }
if (add_file_to_index(&the_index, dir->entries[i]->name, flags)) {
if (!ignore_add_errors)
die(_("adding files failed"));
@@ -468,6 +480,14 @@ static int add_files(struct dir_struct *dir, int flags)
check_embedded_repo(dir->entries[i]->name);
}
}
+
+ if (matched_sparse_paths.nr) {
+ advise_on_updating_sparse_paths(&matched_sparse_paths);
+ exit_status = 1;
+ }
+
+ string_list_clear(&matched_sparse_paths, 0);
+
return exit_status;
}
@@ -632,7 +652,8 @@ int cmd_add(int argc, const char **argv, const char *prefix)
if (seen[i])
continue;
- if (matches_skip_worktree(&pathspec, i, &skip_worktree_seen)) {
+ if (!include_sparse &&
+ matches_skip_worktree(&pathspec, i, &skip_worktree_seen)) {
string_list_append(&only_match_skip_worktree,
pathspec.items[i].original);
continue;
diff --git a/builtin/am.c b/builtin/am.c
index ff7dd33fcd..8677ea2348 100644
--- a/builtin/am.c
+++ b/builtin/am.c
@@ -11,6 +11,7 @@
#include "parse-options.h"
#include "dir.h"
#include "run-command.h"
+#include "hook.h"
#include "quote.h"
#include "tempfile.h"
#include "lockfile.h"
@@ -1848,7 +1849,6 @@ next:
*/
if (!state->rebasing) {
am_destroy(state);
- close_object_store(the_repository->objects);
run_auto_maintenance(state->quiet);
}
}
@@ -1918,7 +1918,8 @@ static int fast_forward_to(struct tree *head, struct tree *remote, int reset)
opts.dst_index = &the_index;
opts.update = 1;
opts.merge = 1;
- opts.reset = reset;
+ opts.reset = reset ? UNPACK_RESET_PROTECT_UNTRACKED : 0;
+ opts.preserve_ignored = 0; /* FIXME: !overwrite_ignore */
opts.fn = twoway_merge;
init_tree_desc(&t[0], head->buffer, head->size);
init_tree_desc(&t[1], remote->buffer, remote->size);
@@ -2106,7 +2107,8 @@ static void am_abort(struct am_state *state)
if (!has_orig_head)
oidcpy(&orig_head, the_hash_algo->empty_tree);
- clean_index(&curr_head, &orig_head);
+ if (clean_index(&curr_head, &orig_head))
+ die(_("failed to clean index"));
if (has_orig_head)
update_ref("am --abort", "HEAD", &orig_head,
diff --git a/builtin/bisect--helper.c b/builtin/bisect--helper.c
index f184eaeac6..28a2e6a575 100644
--- a/builtin/bisect--helper.c
+++ b/builtin/bisect--helper.c
@@ -18,10 +18,10 @@ static GIT_PATH_FUNC(git_path_bisect_log, "BISECT_LOG")
static GIT_PATH_FUNC(git_path_head_name, "head-name")
static GIT_PATH_FUNC(git_path_bisect_names, "BISECT_NAMES")
static GIT_PATH_FUNC(git_path_bisect_first_parent, "BISECT_FIRST_PARENT")
+static GIT_PATH_FUNC(git_path_bisect_run, "BISECT_RUN")
static const char * const git_bisect_helper_usage[] = {
N_("git bisect--helper --bisect-reset [<commit>]"),
- N_("git bisect--helper --bisect-next-check <good_term> <bad_term> [<term>]"),
N_("git bisect--helper --bisect-terms [--term-good | --term-old | --term-bad | --term-new]"),
N_("git bisect--helper --bisect-start [--term-{new,bad}=<term> --term-{old,good}=<term>]"
" [--no-checkout] [--first-parent] [<bad> [<good>...]] [--] [<paths>...]"),
@@ -30,6 +30,8 @@ static const char * const git_bisect_helper_usage[] = {
N_("git bisect--helper --bisect-state (good|old) [<rev>...]"),
N_("git bisect--helper --bisect-replay <filename>"),
N_("git bisect--helper --bisect-skip [(<rev>|<range>)...]"),
+ N_("git bisect--helper --bisect-visualize"),
+ N_("git bisect--helper --bisect-run <cmd>..."),
NULL
};
@@ -143,6 +145,19 @@ static int append_to_file(const char *path, const char *format, ...)
return res;
}
+static int print_file_to_stdout(const char *path)
+{
+ int fd = open(path, O_RDONLY);
+ int ret = 0;
+
+ if (fd < 0)
+ return error_errno(_("cannot open file '%s' for reading"), path);
+ if (copy_fd(fd, 1) < 0)
+ ret = error_errno(_("failed to read '%s'"), path);
+ close(fd);
+ return ret;
+}
+
static int check_term_format(const char *term, const char *orig_term)
{
int res;
@@ -1036,6 +1051,125 @@ static enum bisect_error bisect_skip(struct bisect_terms *terms, const char **ar
return res;
}
+static int bisect_visualize(struct bisect_terms *terms, const char **argv, int argc)
+{
+ struct strvec args = STRVEC_INIT;
+ int flags = RUN_COMMAND_NO_STDIN, res = 0;
+ struct strbuf sb = STRBUF_INIT;
+
+ if (bisect_next_check(terms, NULL) != 0)
+ return BISECT_FAILED;
+
+ if (!argc) {
+ if ((getenv("DISPLAY") || getenv("SESSIONNAME") || getenv("MSYSTEM") ||
+ getenv("SECURITYSESSIONID")) && exists_in_PATH("gitk")) {
+ strvec_push(&args, "gitk");
+ } else {
+ strvec_push(&args, "log");
+ flags |= RUN_GIT_CMD;
+ }
+ } else {
+ if (argv[0][0] == '-') {
+ strvec_push(&args, "log");
+ flags |= RUN_GIT_CMD;
+ } else if (strcmp(argv[0], "tig") && !starts_with(argv[0], "git"))
+ flags |= RUN_GIT_CMD;
+
+ strvec_pushv(&args, argv);
+ }
+
+ strvec_pushl(&args, "--bisect", "--", NULL);
+
+ strbuf_read_file(&sb, git_path_bisect_names(), 0);
+ sq_dequote_to_strvec(sb.buf, &args);
+ strbuf_release(&sb);
+
+ res = run_command_v_opt(args.v, flags);
+ strvec_clear(&args);
+ return res;
+}
+
+static int bisect_run(struct bisect_terms *terms, const char **argv, int argc)
+{
+ int res = BISECT_OK;
+ struct strbuf command = STRBUF_INIT;
+ struct strvec args = STRVEC_INIT;
+ struct strvec run_args = STRVEC_INIT;
+ const char *new_state;
+ int temporary_stdout_fd, saved_stdout;
+
+ if (bisect_next_check(terms, NULL))
+ return BISECT_FAILED;
+
+ if (argc)
+ sq_quote_argv(&command, argv);
+ else {
+ error(_("bisect run failed: no command provided."));
+ return BISECT_FAILED;
+ }
+
+ strvec_push(&run_args, command.buf);
+
+ while (1) {
+ strvec_clear(&args);
+
+ printf(_("running %s\n"), command.buf);
+ res = run_command_v_opt(run_args.v, RUN_USING_SHELL);
+
+ if (res < 0 || 128 <= res) {
+ error(_("bisect run failed: exit code %d from"
+ " '%s' is < 0 or >= 128"), res, command.buf);
+ strbuf_release(&command);
+ return res;
+ }
+
+ if (res == 125)
+ new_state = "skip";
+ else if (!res)
+ new_state = terms->term_good;
+ else
+ new_state = terms->term_bad;
+
+ temporary_stdout_fd = open(git_path_bisect_run(), O_CREAT | O_WRONLY | O_TRUNC, 0666);
+
+ if (temporary_stdout_fd < 0)
+ return error_errno(_("cannot open file '%s' for writing"), git_path_bisect_run());
+
+ fflush(stdout);
+ saved_stdout = dup(1);
+ dup2(temporary_stdout_fd, 1);
+
+ res = bisect_state(terms, &new_state, 1);
+
+ fflush(stdout);
+ dup2(saved_stdout, 1);
+ close(saved_stdout);
+ close(temporary_stdout_fd);
+
+ print_file_to_stdout(git_path_bisect_run());
+
+ if (res == BISECT_ONLY_SKIPPED_LEFT)
+ error(_("bisect run cannot continue any more"));
+ else if (res == BISECT_INTERNAL_SUCCESS_MERGE_BASE) {
+ printf(_("bisect run success"));
+ res = BISECT_OK;
+ } else if (res == BISECT_INTERNAL_SUCCESS_1ST_BAD_FOUND) {
+ printf(_("bisect found first bad commit"));
+ res = BISECT_OK;
+ } else if (res) {
+ error(_("bisect run failed: 'git bisect--helper --bisect-state"
+ " %s' exited with error code %d"), args.v[0], res);
+ } else {
+ continue;
+ }
+
+ strbuf_release(&command);
+ strvec_clear(&args);
+ strvec_clear(&run_args);
+ return res;
+ }
+}
+
int cmd_bisect__helper(int argc, const char **argv, const char *prefix)
{
enum {
@@ -1048,7 +1182,9 @@ int cmd_bisect__helper(int argc, const char **argv, const char *prefix)
BISECT_STATE,
BISECT_LOG,
BISECT_REPLAY,
- BISECT_SKIP
+ BISECT_SKIP,
+ BISECT_VISUALIZE,
+ BISECT_RUN,
} cmdmode = 0;
int res = 0, nolog = 0;
struct option options[] = {
@@ -1070,6 +1206,10 @@ int cmd_bisect__helper(int argc, const char **argv, const char *prefix)
N_("replay the bisection process from the given file"), BISECT_REPLAY),
OPT_CMDMODE(0, "bisect-skip", &cmdmode,
N_("skip some commits for checkout"), BISECT_SKIP),
+ OPT_CMDMODE(0, "bisect-visualize", &cmdmode,
+ N_("visualize the bisection"), BISECT_VISUALIZE),
+ OPT_CMDMODE(0, "bisect-run", &cmdmode,
+ N_("use <cmd>... to automatically bisect."), BISECT_RUN),
OPT_BOOL(0, "no-log", &nolog,
N_("no log for BISECT_WRITE")),
OPT_END()
@@ -1089,12 +1229,6 @@ int cmd_bisect__helper(int argc, const char **argv, const char *prefix)
return error(_("--bisect-reset requires either no argument or a commit"));
res = bisect_reset(argc ? argv[0] : NULL);
break;
- case BISECT_NEXT_CHECK:
- if (argc != 2 && argc != 3)
- return error(_("--bisect-next-check requires 2 or 3 arguments"));
- set_terms(&terms, argv[1], argv[0]);
- res = bisect_next_check(&terms, argc == 3 ? argv[2] : NULL);
- break;
case BISECT_TERMS:
if (argc > 1)
return error(_("--bisect-terms requires 0 or 1 argument"));
@@ -1131,6 +1265,16 @@ int cmd_bisect__helper(int argc, const char **argv, const char *prefix)
get_terms(&terms);
res = bisect_skip(&terms, argv, argc);
break;
+ case BISECT_VISUALIZE:
+ get_terms(&terms);
+ res = bisect_visualize(&terms, argv, argc);
+ break;
+ case BISECT_RUN:
+ if (!argc)
+ return error(_("bisect run failed: no command provided."));
+ get_terms(&terms);
+ res = bisect_run(&terms, argv, argc);
+ break;
default:
BUG("unknown subcommand %d", cmdmode);
}
diff --git a/builtin/branch.c b/builtin/branch.c
index 03c7b7253a..0b7ed82654 100644
--- a/builtin/branch.c
+++ b/builtin/branch.c
@@ -427,7 +427,7 @@ static void print_ref_list(struct ref_filter *filter, struct ref_sorting *sortin
memset(&array, 0, sizeof(array));
- filter_refs(&array, filter, filter->kind | FILTER_REFS_INCLUDE_BROKEN);
+ filter_refs(&array, filter, filter->kind);
if (filter->verbose)
maxwidth = calc_maxwidth(&array, strlen(remote_prefix));
diff --git a/builtin/bugreport.c b/builtin/bugreport.c
index 06ed10dc92..9de32bc96e 100644
--- a/builtin/bugreport.c
+++ b/builtin/bugreport.c
@@ -3,7 +3,8 @@
#include "strbuf.h"
#include "help.h"
#include "compat/compiler.h"
-#include "run-command.h"
+#include "hook.h"
+#include "hook-list.h"
static void get_system_info(struct strbuf *sys_info)
@@ -41,39 +42,7 @@ static void get_system_info(struct strbuf *sys_info)
static void get_populated_hooks(struct strbuf *hook_info, int nongit)
{
- /*
- * NEEDSWORK: Doesn't look like there is a list of all possible hooks;
- * so below is a transcription of `git help hooks`. Later, this should
- * be replaced with some programmatically generated list (generated from
- * doc or else taken from some library which tells us about all the
- * hooks)
- */
- static const char *hook[] = {
- "applypatch-msg",
- "pre-applypatch",
- "post-applypatch",
- "pre-commit",
- "pre-merge-commit",
- "prepare-commit-msg",
- "commit-msg",
- "post-commit",
- "pre-rebase",
- "post-checkout",
- "post-merge",
- "pre-push",
- "pre-receive",
- "update",
- "post-receive",
- "post-update",
- "push-to-checkout",
- "pre-auto-gc",
- "post-rewrite",
- "sendemail-validate",
- "fsmonitor-watchman",
- "p4-pre-submit",
- "post-index-change",
- };
- int i;
+ const char **p;
if (nongit) {
strbuf_addstr(hook_info,
@@ -81,9 +50,12 @@ static void get_populated_hooks(struct strbuf *hook_info, int nongit)
return;
}
- for (i = 0; i < ARRAY_SIZE(hook); i++)
- if (find_hook(hook[i]))
- strbuf_addf(hook_info, "%s\n", hook[i]);
+ for (p = hook_name_list; *p; p++) {
+ const char *hook = *p;
+
+ if (hook_exists(hook))
+ strbuf_addf(hook_info, "%s\n", hook);
+ }
}
static const char * const bugreport_usage[] = {
diff --git a/builtin/bundle.c b/builtin/bundle.c
index 053a51bea1..5a85d7cd0f 100644
--- a/builtin/bundle.c
+++ b/builtin/bundle.c
@@ -39,8 +39,6 @@ static const char * const builtin_bundle_unbundle_usage[] = {
NULL
};
-static int verbose;
-
static int parse_options_cmd_bundle(int argc,
const char **argv,
const char* prefix,
@@ -162,10 +160,15 @@ static int cmd_bundle_unbundle(int argc, const char **argv, const char *prefix)
struct bundle_header header = BUNDLE_HEADER_INIT;
int bundle_fd = -1;
int ret;
+ int progress = isatty(2);
+
struct option options[] = {
+ OPT_BOOL(0, "progress", &progress,
+ N_("show progress meter")),
OPT_END()
};
char *bundle_file;
+ struct strvec extra_index_pack_args = STRVEC_INIT;
argc = parse_options_cmd_bundle(argc, argv, prefix,
builtin_bundle_unbundle_usage, options, &bundle_file);
@@ -177,7 +180,11 @@ static int cmd_bundle_unbundle(int argc, const char **argv, const char *prefix)
}
if (!startup_info->have_repository)
die(_("Need a repository to unbundle."));
- ret = !!unbundle(the_repository, &header, bundle_fd, 0) ||
+ if (progress)
+ strvec_pushl(&extra_index_pack_args, "-v", "--progress-title",
+ _("Unbundling objects"), NULL);
+ ret = !!unbundle(the_repository, &header, bundle_fd,
+ &extra_index_pack_args) ||
list_bundle_refs(&header, argc, argv);
bundle_header_release(&header);
cleanup:
@@ -188,7 +195,6 @@ cleanup:
int cmd_bundle(int argc, const char **argv, const char *prefix)
{
struct option options[] = {
- OPT__VERBOSE(&verbose, N_("be verbose; must be placed before a subcommand")),
OPT_END()
};
int result;
diff --git a/builtin/checkout.c b/builtin/checkout.c
index 8c69dcdf72..cbf73b8c9f 100644
--- a/builtin/checkout.c
+++ b/builtin/checkout.c
@@ -646,7 +646,9 @@ static int reset_tree(struct tree *tree, const struct checkout_opts *o,
opts.head_idx = -1;
opts.update = worktree;
opts.skip_unmerged = !worktree;
- opts.reset = 1;
+ opts.reset = o->force ? UNPACK_RESET_OVERWRITE_UNTRACKED :
+ UNPACK_RESET_PROTECT_UNTRACKED;
+ opts.preserve_ignored = (!o->force && !o->overwrite_ignore);
opts.merge = 1;
opts.fn = oneway_merge;
opts.verbose_update = o->show_progress;
@@ -746,11 +748,7 @@ static int merge_working_tree(const struct checkout_opts *opts,
new_branch_info->commit ?
&new_branch_info->commit->object.oid :
&new_branch_info->oid, NULL);
- if (opts->overwrite_ignore) {
- topts.dir = xcalloc(1, sizeof(*topts.dir));
- topts.dir->flags |= DIR_SHOW_IGNORED;
- setup_standard_excludes(topts.dir);
- }
+ topts.preserve_ignored = !opts->overwrite_ignore;
tree = parse_tree_indirect(old_branch_info->commit ?
&old_branch_info->commit->object.oid :
the_hash_algo->empty_tree);
diff --git a/builtin/clone.c b/builtin/clone.c
index b93bcd460e..559acf9e03 100644
--- a/builtin/clone.c
+++ b/builtin/clone.c
@@ -217,120 +217,6 @@ static char *get_repo_path(const char *repo, int *is_bundle)
return canon;
}
-static char *guess_dir_name(const char *repo, int is_bundle, int is_bare)
-{
- const char *end = repo + strlen(repo), *start, *ptr;
- size_t len;
- char *dir;
-
- /*
- * Skip scheme.
- */
- start = strstr(repo, "://");
- if (start == NULL)
- start = repo;
- else
- start += 3;
-
- /*
- * Skip authentication data. The stripping does happen
- * greedily, such that we strip up to the last '@' inside
- * the host part.
- */
- for (ptr = start; ptr < end && !is_dir_sep(*ptr); ptr++) {
- if (*ptr == '@')
- start = ptr + 1;
- }
-
- /*
- * Strip trailing spaces, slashes and /.git
- */
- while (start < end && (is_dir_sep(end[-1]) || isspace(end[-1])))
- end--;
- if (end - start > 5 && is_dir_sep(end[-5]) &&
- !strncmp(end - 4, ".git", 4)) {
- end -= 5;
- while (start < end && is_dir_sep(end[-1]))
- end--;
- }
-
- /*
- * Strip trailing port number if we've got only a
- * hostname (that is, there is no dir separator but a
- * colon). This check is required such that we do not
- * strip URI's like '/foo/bar:2222.git', which should
- * result in a dir '2222' being guessed due to backwards
- * compatibility.
- */
- if (memchr(start, '/', end - start) == NULL
- && memchr(start, ':', end - start) != NULL) {
- ptr = end;
- while (start < ptr && isdigit(ptr[-1]) && ptr[-1] != ':')
- ptr--;
- if (start < ptr && ptr[-1] == ':')
- end = ptr - 1;
- }
-
- /*
- * Find last component. To remain backwards compatible we
- * also regard colons as path separators, such that
- * cloning a repository 'foo:bar.git' would result in a
- * directory 'bar' being guessed.
- */
- ptr = end;
- while (start < ptr && !is_dir_sep(ptr[-1]) && ptr[-1] != ':')
- ptr--;
- start = ptr;
-
- /*
- * Strip .{bundle,git}.
- */
- len = end - start;
- strip_suffix_mem(start, &len, is_bundle ? ".bundle" : ".git");
-
- if (!len || (len == 1 && *start == '/'))
- die(_("No directory name could be guessed.\n"
- "Please specify a directory on the command line"));
-
- if (is_bare)
- dir = xstrfmt("%.*s.git", (int)len, start);
- else
- dir = xstrndup(start, len);
- /*
- * Replace sequences of 'control' characters and whitespace
- * with one ascii space, remove leading and trailing spaces.
- */
- if (*dir) {
- char *out = dir;
- int prev_space = 1 /* strip leading whitespace */;
- for (end = dir; *end; ++end) {
- char ch = *end;
- if ((unsigned char)ch < '\x20')
- ch = '\x20';
- if (isspace(ch)) {
- if (prev_space)
- continue;
- prev_space = 1;
- } else
- prev_space = 0;
- *out++ = ch;
- }
- *out = '\0';
- if (out > dir && prev_space)
- out[-1] = '\0';
- }
- return dir;
-}
-
-static void strip_trailing_slashes(char *dir)
-{
- char *end = dir + strlen(dir);
-
- while (dir < end - 1 && is_dir_sep(end[-1]))
- end--;
- *end = '\0';
-}
-
static int add_one_reference(struct string_list_item *item, void *cb_data)
{
struct strbuf err = STRBUF_INIT;
@@ -657,7 +543,7 @@ static void write_followtags(const struct ref *refs, const char *msg)
}
}
-static int iterate_ref_map(void *cb_data, struct object_id *oid)
+static const struct object_id *iterate_ref_map(void *cb_data)
{
struct ref **rm = cb_data;
struct ref *ref = *rm;
@@ -668,13 +554,11 @@ static int iterate_ref_map(void *cb_data, struct object_id *oid)
*/
while (ref && !ref->peer_ref)
ref = ref->next;
- /* Returning -1 notes "end of list" to the caller. */
if (!ref)
- return -1;
+ return NULL;
- oidcpy(oid, &ref->old_oid);
*rm = ref->next;
- return 0;
+ return &ref->old_oid;
}
static void update_remote_refs(const struct ref *refs,
@@ -803,6 +687,7 @@ static int checkout(int submodule_progress)
opts.update = 1;
opts.merge = 1;
opts.clone = 1;
+ opts.preserve_ignored = 0;
opts.fn = oneway_merge;
opts.verbose_update = (option_verbosity >= 0);
opts.src_index = &the_index;
@@ -1041,8 +926,8 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
if (argc == 2)
dir = xstrdup(argv[1]);
else
- dir = guess_dir_name(repo_name, is_bundle, option_bare);
- strip_trailing_slashes(dir);
+ dir = git_url_basename(repo_name, is_bundle, option_bare);
+ strip_dir_trailing_slashes(dir);
dest_exists = path_exists(dir);
if (dest_exists && !is_empty_dir(dir))
@@ -1345,6 +1230,9 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
our_head_points_at = remote_head_points_at;
}
else {
+ const char *branch;
+ char *ref;
+
if (option_branch)
die(_("Remote branch %s not found in upstream %s"),
option_branch, remote_name);
@@ -1355,24 +1243,22 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
remote_head_points_at = NULL;
remote_head = NULL;
option_no_checkout = 1;
- if (!option_bare) {
- const char *branch;
- char *ref;
-
- if (transport_ls_refs_options.unborn_head_target &&
- skip_prefix(transport_ls_refs_options.unborn_head_target,
- "refs/heads/", &branch)) {
- ref = transport_ls_refs_options.unborn_head_target;
- transport_ls_refs_options.unborn_head_target = NULL;
- create_symref("HEAD", ref, reflog_msg.buf);
- } else {
- branch = git_default_branch_name(0);
- ref = xstrfmt("refs/heads/%s", branch);
- }
- install_branch_config(0, branch, remote_name, ref);
- free(ref);
+ if (transport_ls_refs_options.unborn_head_target &&
+ skip_prefix(transport_ls_refs_options.unborn_head_target,
+ "refs/heads/", &branch)) {
+ ref = transport_ls_refs_options.unborn_head_target;
+ transport_ls_refs_options.unborn_head_target = NULL;
+ create_symref("HEAD", ref, reflog_msg.buf);
+ } else {
+ branch = git_default_branch_name(0);
+ ref = xstrfmt("refs/heads/%s", branch);
}
+
+ if (!option_bare)
+ install_branch_config(0, branch, remote_name, ref);
+
+ free(ref);
}
write_refspec_config(src_ref_prefix, our_head_points_at,
diff --git a/builtin/commit-graph.c b/builtin/commit-graph.c
index 21fc6e934b..3c3de3a156 100644
--- a/builtin/commit-graph.c
+++ b/builtin/commit-graph.c
@@ -50,8 +50,6 @@ static struct option common_opts[] = {
OPT_STRING(0, "object-dir", &opts.obj_dir,
N_("dir"),
N_("the object directory to store the graph")),
- OPT_BOOL(0, "progress", &opts.progress,
- N_("force progress reporting")),
OPT_END()
};
@@ -60,28 +58,6 @@ static struct option *add_common_options(struct option *to)
return parse_options_concat(common_opts, to);
}
-static struct object_directory *find_odb(struct repository *r,
- const char *obj_dir)
-{
- struct object_directory *odb;
- char *obj_dir_real = real_pathdup(obj_dir, 1);
- struct strbuf odb_path_real = STRBUF_INIT;
-
- prepare_alt_odb(r);
- for (odb = r->objects->odb; odb; odb = odb->next) {
- strbuf_realpath(&odb_path_real, odb->path, 1);
- if (!strcmp(obj_dir_real, odb_path_real.buf))
- break;
- }
-
- free(obj_dir_real);
- strbuf_release(&odb_path_real);
-
- if (!odb)
- die(_("could not find object directory matching %s"), obj_dir);
- return odb;
-}
-
static int graph_verify(int argc, const char **argv)
{
struct commit_graph *graph = NULL;
@@ -95,6 +71,8 @@ static int graph_verify(int argc, const char **argv)
static struct option builtin_commit_graph_verify_options[] = {
OPT_BOOL(0, "shallow", &opts.shallow,
N_("if the commit-graph is split, only verify the tip file")),
+ OPT_BOOL(0, "progress", &opts.progress,
+ N_("force progress reporting")),
OPT_END(),
};
struct option *options = add_common_options(builtin_commit_graph_verify_options);
@@ -246,6 +224,8 @@ static int graph_write(int argc, const char **argv)
OPT_CALLBACK_F(0, "max-new-filters", &write_opts.max_new_filters,
NULL, N_("maximum number of changed-path Bloom filters to compute"),
0, write_option_max_new_filters),
+ OPT_BOOL(0, "progress", &opts.progress,
+ N_("force progress reporting")),
OPT_END(),
};
struct option *options = add_common_options(builtin_commit_graph_write_options);
diff --git a/builtin/commit.c b/builtin/commit.c
index e7320f66f9..883c16256c 100644
--- a/builtin/commit.c
+++ b/builtin/commit.c
@@ -19,6 +19,7 @@
#include "revision.h"
#include "wt-status.h"
#include "run-command.h"
+#include "hook.h"
#include "refs.h"
#include "log-tree.h"
#include "strbuf.h"
@@ -1051,7 +1052,7 @@ static int prepare_to_commit(const char *index_file, const char *prefix,
return 0;
}
- if (!no_verify && find_hook("pre-commit")) {
+ if (!no_verify && hook_exists("pre-commit")) {
/*
* Re-read the index as pre-commit hook could have updated it,
* and write it out as a tree. We must do this before we invoke
diff --git a/builtin/credential-cache.c b/builtin/credential-cache.c
index e8a7415747..78c02ad531 100644
--- a/builtin/credential-cache.c
+++ b/builtin/credential-cache.c
@@ -11,6 +11,32 @@
#define FLAG_SPAWN 0x1
#define FLAG_RELAY 0x2
+#ifdef GIT_WINDOWS_NATIVE
+
+static int connection_closed(int error)
+{
+ return (error == EINVAL);
+}
+
+static int connection_fatally_broken(int error)
+{
+ return (error != ENOENT) && (error != ENETDOWN);
+}
+
+#else
+
+static int connection_closed(int error)
+{
+ return (error == ECONNRESET);
+}
+
+static int connection_fatally_broken(int error)
+{
+ return (error != ENOENT) && (error != ECONNREFUSED);
+}
+
+#endif
+
static int send_request(const char *socket, const struct strbuf *out)
{
int got_data = 0;
@@ -28,7 +54,7 @@ static int send_request(const char *socket, const struct strbuf *out)
int r;
r = read_in_full(fd, in, sizeof(in));
- if (r == 0 || (r < 0 && errno == ECONNRESET))
+ if (r == 0 || (r < 0 && connection_closed(errno)))
break;
if (r < 0)
die_errno("read error from cache daemon");
@@ -75,7 +101,7 @@ static void do_cache(const char *socket, const char *action, int timeout,
}
if (send_request(socket, &buf) < 0) {
- if (errno != ENOENT && errno != ECONNREFUSED)
+ if (connection_fatally_broken(errno))
die_errno("unable to connect to cache daemon");
if (flags & FLAG_SPAWN) {
spawn_daemon(socket);
diff --git a/builtin/diff-index.c b/builtin/diff-index.c
index cf09559e42..5fd23ab5b6 100644
--- a/builtin/diff-index.c
+++ b/builtin/diff-index.c
@@ -29,10 +29,10 @@ int cmd_diff_index(int argc, const char **argv, const char *prefix)
prefix = precompose_argv_prefix(argc, argv, prefix);
/*
- * We need no diff for merges options, and we need to avoid conflict
- * with our own meaning of "-m".
+ * We need (some of) diff for merges options (e.g., --cc), and we need
+ * to avoid conflict with our own meaning of "-m".
*/
- diff_merges_suppress_options_parsing();
+ diff_merges_suppress_m_parsing();
argc = setup_revisions(argc, argv, &rev, NULL);
for (i = 1; i < argc; i++) {
diff --git a/builtin/difftool.c b/builtin/difftool.c
index 6a9242a803..4931c10845 100644
--- a/builtin/difftool.c
+++ b/builtin/difftool.c
@@ -252,16 +252,6 @@ static void changed_files(struct hashmap *result, const char *index_path,
strbuf_release(&buf);
}
-static NORETURN void exit_cleanup(const char *tmpdir, int exit_code)
-{
- struct strbuf buf = STRBUF_INIT;
- strbuf_addstr(&buf, tmpdir);
- remove_dir_recursively(&buf, 0);
- if (exit_code)
- warning(_("failed: %d"), exit_code);
- exit(exit_code);
-}
-
static int ensure_leading_directories(char *path)
{
switch (safe_create_leading_directories(path)) {
@@ -330,19 +320,44 @@ static int checkout_path(unsigned mode, struct object_id *oid,
return ret;
}
+static void write_file_in_directory(struct strbuf *dir, size_t dir_len,
+ const char *path, const char *content)
+{
+ add_path(dir, dir_len, path);
+ ensure_leading_directories(dir->buf);
+ unlink(dir->buf);
+ write_file(dir->buf, "%s", content);
+}
+
+/* Write the file contents for the left and right sides of the difftool
+ * dir-diff representation for submodules and symlinks. Symlinks and submodules
+ * are written as regular text files so that external diff tools can diff them
+ * as text files, resulting in behavior that is analogous to to what "git diff"
+ * displays for symlink and submodule diffs.
+ */
+static void write_standin_files(struct pair_entry *entry,
+ struct strbuf *ldir, size_t ldir_len,
+ struct strbuf *rdir, size_t rdir_len)
+{
+ if (*entry->left)
+ write_file_in_directory(ldir, ldir_len, entry->path, entry->left);
+ if (*entry->right)
+ write_file_in_directory(rdir, rdir_len, entry->path, entry->right);
+}
+
static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix,
- int argc, const char **argv)
+ struct child_process *child)
{
- char tmpdir[PATH_MAX];
struct strbuf info = STRBUF_INIT, lpath = STRBUF_INIT;
struct strbuf rpath = STRBUF_INIT, buf = STRBUF_INIT;
struct strbuf ldir = STRBUF_INIT, rdir = STRBUF_INIT;
struct strbuf wtdir = STRBUF_INIT;
- char *lbase_dir, *rbase_dir;
+ struct strbuf tmpdir = STRBUF_INIT;
+ char *lbase_dir = NULL, *rbase_dir = NULL;
size_t ldir_len, rdir_len, wtdir_len;
const char *workdir, *tmp;
int ret = 0, i;
- FILE *fp;
+ FILE *fp = NULL;
struct hashmap working_tree_dups = HASHMAP_INIT(working_tree_entry_cmp,
NULL);
struct hashmap submodules = HASHMAP_INIT(pair_cmp, NULL);
@@ -351,8 +366,7 @@ static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix,
struct pair_entry *entry;
struct index_state wtindex;
struct checkout lstate, rstate;
- int rc, flags = RUN_GIT_CMD, err = 0;
- struct child_process child = CHILD_PROCESS_INIT;
+ int flags = RUN_GIT_CMD, err = 0;
const char *helper_argv[] = { "difftool--helper", NULL, NULL, NULL };
struct hashmap wt_modified, tmp_modified;
int indices_loaded = 0;
@@ -361,11 +375,15 @@ static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix,
/* Setup temp directories */
tmp = getenv("TMPDIR");
- xsnprintf(tmpdir, sizeof(tmpdir), "%s/git-difftool.XXXXXX", tmp ? tmp : "/tmp");
- if (!mkdtemp(tmpdir))
- return error("could not create '%s'", tmpdir);
- strbuf_addf(&ldir, "%s/left/", tmpdir);
- strbuf_addf(&rdir, "%s/right/", tmpdir);
+ strbuf_add_absolute_path(&tmpdir, tmp ? tmp : "/tmp");
+ strbuf_trim_trailing_dir_sep(&tmpdir);
+ strbuf_addstr(&tmpdir, "/git-difftool.XXXXXX");
+ if (!mkdtemp(tmpdir.buf)) {
+ ret = error("could not create '%s'", tmpdir.buf);
+ goto finish;
+ }
+ strbuf_addf(&ldir, "%s/left/", tmpdir.buf);
+ strbuf_addf(&rdir, "%s/right/", tmpdir.buf);
strbuf_addstr(&wtdir, workdir);
if (!wtdir.len || !is_dir_sep(wtdir.buf[wtdir.len - 1]))
strbuf_addch(&wtdir, '/');
@@ -387,19 +405,15 @@ static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix,
rdir_len = rdir.len;
wtdir_len = wtdir.len;
- child.no_stdin = 1;
- child.git_cmd = 1;
- child.use_shell = 0;
- child.clean_on_exit = 1;
- child.dir = prefix;
- child.out = -1;
- strvec_pushl(&child.args, "diff", "--raw", "--no-abbrev", "-z",
- NULL);
- for (i = 0; i < argc; i++)
- strvec_push(&child.args, argv[i]);
- if (start_command(&child))
+ child->no_stdin = 1;
+ child->git_cmd = 1;
+ child->use_shell = 0;
+ child->clean_on_exit = 1;
+ child->dir = prefix;
+ child->out = -1;
+ if (start_command(child))
die("could not obtain raw diff");
- fp = xfdopen(child.out, "r");
+ fp = xfdopen(child->out, "r");
/* Build index info for left and right sides of the diff */
i = 0;
@@ -410,9 +424,9 @@ static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix,
const char *src_path, *dst_path;
if (starts_with(info.buf, "::"))
- die(N_("combined diff formats('-c' and '--cc') are "
+ die(N_("combined diff formats ('-c' and '--cc') are "
"not supported in\n"
- "directory diff mode('-d' and '--dir-diff')."));
+ "directory diff mode ('-d' and '--dir-diff')."));
if (parse_index_info(info.buf, &lmode, &rmode, &loid, &roid,
&status))
@@ -525,7 +539,7 @@ static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix,
fclose(fp);
fp = NULL;
- if (finish_command(&child)) {
+ if (finish_command(child)) {
ret = error("error occurred running diff --raw");
goto finish;
}
@@ -540,38 +554,19 @@ static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix,
*/
hashmap_for_each_entry(&submodules, &iter, entry,
entry /* member name */) {
- if (*entry->left) {
- add_path(&ldir, ldir_len, entry->path);
- ensure_leading_directories(ldir.buf);
- write_file(ldir.buf, "%s", entry->left);
- }
- if (*entry->right) {
- add_path(&rdir, rdir_len, entry->path);
- ensure_leading_directories(rdir.buf);
- write_file(rdir.buf, "%s", entry->right);
- }
+ write_standin_files(entry, &ldir, ldir_len, &rdir, rdir_len);
}
/*
- * Symbolic links require special treatment.The standard "git diff"
+ * Symbolic links require special treatment. The standard "git diff"
* shows only the link itself, not the contents of the link target.
* This loop replicates that behavior.
*/
hashmap_for_each_entry(&symlinks2, &iter, entry,
entry /* member name */) {
- if (*entry->left) {
- add_path(&ldir, ldir_len, entry->path);
- ensure_leading_directories(ldir.buf);
- write_file(ldir.buf, "%s", entry->left);
- }
- if (*entry->right) {
- add_path(&rdir, rdir_len, entry->path);
- ensure_leading_directories(rdir.buf);
- write_file(rdir.buf, "%s", entry->right);
- }
- }
- strbuf_release(&buf);
+ write_standin_files(entry, &ldir, ldir_len, &rdir, rdir_len);
+ }
strbuf_setlen(&ldir, ldir_len);
helper_argv[1] = ldir.buf;
@@ -583,7 +578,7 @@ static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix,
flags = 0;
} else
setenv("GIT_DIFFTOOL_DIRDIFF", "true", 1);
- rc = run_command_v_opt(helper_argv, flags);
+ ret = run_command_v_opt(helper_argv, flags);
/* TODO: audit for interaction with sparse-index. */
ensure_full_index(&wtindex);
@@ -617,7 +612,7 @@ static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix,
if (!indices_loaded) {
struct lock_file lock = LOCK_INIT;
strbuf_reset(&buf);
- strbuf_addf(&buf, "%s/wtindex", tmpdir);
+ strbuf_addf(&buf, "%s/wtindex", tmpdir.buf);
if (hold_lock_file_for_update(&lock, buf.buf, 0) < 0 ||
write_locked_index(&wtindex, &lock, COMMIT_LOCK)) {
ret = error("could not write %s", buf.buf);
@@ -647,11 +642,14 @@ static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix,
}
if (err) {
- warning(_("temporary files exist in '%s'."), tmpdir);
+ warning(_("temporary files exist in '%s'."), tmpdir.buf);
warning(_("you may want to cleanup or recover these."));
- exit(1);
- } else
- exit_cleanup(tmpdir, rc);
+ ret = 1;
+ } else {
+ remove_dir_recursively(&tmpdir, 0);
+ if (ret)
+ warning(_("failed: %d"), ret);
+ }
finish:
if (fp)
@@ -663,30 +661,29 @@ finish:
strbuf_release(&rdir);
strbuf_release(&wtdir);
strbuf_release(&buf);
+ strbuf_release(&tmpdir);
- return ret;
+ return (ret < 0) ? 1 : ret;
}
static int run_file_diff(int prompt, const char *prefix,
- int argc, const char **argv)
+ struct child_process *child)
{
- struct strvec args = STRVEC_INIT;
const char *env[] = {
"GIT_PAGER=", "GIT_EXTERNAL_DIFF=git-difftool--helper", NULL,
NULL
};
- int i;
if (prompt > 0)
env[2] = "GIT_DIFFTOOL_PROMPT=true";
else if (!prompt)
env[2] = "GIT_DIFFTOOL_NO_PROMPT=true";
+ child->git_cmd = 1;
+ child->dir = prefix;
+ strvec_pushv(&child->env_array, env);
- strvec_push(&args, "diff");
- for (i = 0; i < argc; i++)
- strvec_push(&args, argv[i]);
- return run_command_v_opt_cd_env(args.v, RUN_GIT_CMD, prefix, env);
+ return run_command(child);
}
int cmd_difftool(int argc, const char **argv, const char *prefix)
@@ -713,12 +710,13 @@ int cmd_difftool(int argc, const char **argv, const char *prefix)
"`--tool`")),
OPT_BOOL(0, "trust-exit-code", &trust_exit_code,
N_("make 'git-difftool' exit when an invoked diff "
- "tool returns a non - zero exit code")),
+ "tool returns a non-zero exit code")),
OPT_STRING('x', "extcmd", &extcmd, N_("command"),
N_("specify a custom command for viewing diffs")),
- OPT_ARGUMENT("no-index", &no_index, N_("passed to `diff`")),
+ OPT_BOOL(0, "no-index", &no_index, N_("passed to `diff`")),
OPT_END()
};
+ struct child_process child = CHILD_PROCESS_INIT;
git_config(difftool_config, NULL);
symlinks = has_symlinks;
@@ -768,7 +766,14 @@ int cmd_difftool(int argc, const char **argv, const char *prefix)
* will invoke a separate instance of 'git-difftool--helper' for
* each file that changed.
*/
+ strvec_push(&child.args, "diff");
+ if (no_index)
+ strvec_push(&child.args, "--no-index");
+ if (dir_diff)
+ strvec_pushl(&child.args, "--raw", "--no-abbrev", "-z", NULL);
+ strvec_pushv(&child.args, argv);
+
if (dir_diff)
- return run_dir_diff(extcmd, symlinks, prefix, argc, argv);
- return run_file_diff(prompt, prefix, argc, argv);
+ return run_dir_diff(extcmd, symlinks, prefix, &child);
+ return run_file_diff(prompt, prefix, &child);
}
diff --git a/builtin/fetch.c b/builtin/fetch.c
index 38fab0369f..f7abbc31ff 100644
--- a/builtin/fetch.c
+++ b/builtin/fetch.c
@@ -854,13 +854,11 @@ static int update_local_ref(struct ref *ref,
int summary_width)
{
struct commit *current = NULL, *updated;
- enum object_type type;
struct branch *current_branch = branch_get(NULL);
const char *pretty_ref = prettify_refname(ref->name);
int fast_forward = 0;
- type = oid_object_info(the_repository, &ref->new_oid, NULL);
- if (type < 0)
+ if (!repo_has_object_file(the_repository, &ref->new_oid))
die(_("object %s not found"), oid_to_hex(&ref->new_oid));
if (oideq(&ref->old_oid, &ref->new_oid)) {
@@ -972,7 +970,7 @@ static int update_local_ref(struct ref *ref,
}
}
-static int iterate_ref_map(void *cb_data, struct object_id *oid)
+static const struct object_id *iterate_ref_map(void *cb_data)
{
struct ref **rm = cb_data;
struct ref *ref = *rm;
@@ -980,10 +978,9 @@ static int iterate_ref_map(void *cb_data, struct object_id *oid)
while (ref && ref->status == REF_STATUS_REJECT_SHALLOW)
ref = ref->next;
if (!ref)
- return -1; /* end of the list */
+ return NULL;
*rm = ref->next;
- oidcpy(oid, &ref->old_oid);
- return 0;
+ return &ref->old_oid;
}
struct fetch_head {
@@ -1082,7 +1079,6 @@ static int store_updated_refs(const char *raw_url, const char *remote_name,
int connectivity_checked, struct ref *ref_map)
{
struct fetch_head fetch_head;
- struct commit *commit;
int url_len, i, rc = 0;
struct strbuf note = STRBUF_INIT, err = STRBUF_INIT;
struct ref_transaction *transaction = NULL;
@@ -1130,6 +1126,7 @@ static int store_updated_refs(const char *raw_url, const char *remote_name,
want_status <= FETCH_HEAD_IGNORE;
want_status++) {
for (rm = ref_map; rm; rm = rm->next) {
+ struct commit *commit = NULL;
struct ref *ref = NULL;
if (rm->status == REF_STATUS_REJECT_SHALLOW) {
@@ -1139,11 +1136,23 @@ static int store_updated_refs(const char *raw_url, const char *remote_name,
continue;
}
- commit = lookup_commit_reference_gently(the_repository,
- &rm->old_oid,
- 1);
- if (!commit)
- rm->fetch_head_status = FETCH_HEAD_NOT_FOR_MERGE;
+ /*
+ * References in "refs/tags/" are often going to point
+ * to annotated tags, which are not part of the
+ * commit-graph. We thus only try to look up refs in
+ * the graph which are not in that namespace to not
+ * regress performance in repositories with many
+ * annotated tags.
+ */
+ if (!starts_with(rm->name, "refs/tags/"))
+ commit = lookup_commit_in_graph(the_repository, &rm->old_oid);
+ if (!commit) {
+ commit = lookup_commit_reference_gently(the_repository,
+ &rm->old_oid,
+ 1);
+ if (!commit)
+ rm->fetch_head_status = FETCH_HEAD_NOT_FOR_MERGE;
+ }
if (rm->fetch_head_status != want_status)
continue;
@@ -1289,37 +1298,35 @@ static int check_exist_and_connected(struct ref *ref_map)
return check_connected(iterate_ref_map, &rm, &opt);
}
-static int fetch_refs(struct transport *transport, struct ref *ref_map)
+static int fetch_and_consume_refs(struct transport *transport, struct ref *ref_map)
{
- int ret = check_exist_and_connected(ref_map);
+ int connectivity_checked = 1;
+ int ret;
+
+ /*
+ * We don't need to perform a fetch in case we can already satisfy all
+ * refs.
+ */
+ ret = check_exist_and_connected(ref_map);
if (ret) {
trace2_region_enter("fetch", "fetch_refs", the_repository);
ret = transport_fetch_refs(transport, ref_map);
trace2_region_leave("fetch", "fetch_refs", the_repository);
+ if (ret)
+ goto out;
+ connectivity_checked = transport->smart_options ?
+ transport->smart_options->connectivity_checked : 0;
}
- if (!ret)
- /*
- * Keep the new pack's ".keep" file around to allow the caller
- * time to update refs to reference the new objects.
- */
- return 0;
- transport_unlock_pack(transport);
- return ret;
-}
-/* Update local refs based on the ref values fetched from a remote */
-static int consume_refs(struct transport *transport, struct ref *ref_map)
-{
- int connectivity_checked = transport->smart_options
- ? transport->smart_options->connectivity_checked : 0;
- int ret;
trace2_region_enter("fetch", "consume_refs", the_repository);
ret = store_updated_refs(transport->url,
transport->remote->name,
connectivity_checked,
ref_map);
- transport_unlock_pack(transport);
trace2_region_leave("fetch", "consume_refs", the_repository);
+
+out:
+ transport_unlock_pack(transport);
return ret;
}
@@ -1508,8 +1515,7 @@ static void backfill_tags(struct transport *transport, struct ref *ref_map)
transport_set_option(transport, TRANS_OPT_FOLLOWTAGS, NULL);
transport_set_option(transport, TRANS_OPT_DEPTH, "0");
transport_set_option(transport, TRANS_OPT_DEEPEN_RELATIVE, NULL);
- if (!fetch_refs(transport, ref_map))
- consume_refs(transport, ref_map);
+ fetch_and_consume_refs(transport, ref_map);
if (gsecondary) {
transport_disconnect(gsecondary);
@@ -1600,7 +1606,7 @@ static int do_fetch(struct transport *transport,
transport->url);
}
}
- if (fetch_refs(transport, ref_map) || consume_refs(transport, ref_map)) {
+ if (fetch_and_consume_refs(transport, ref_map)) {
free_refs(ref_map);
retcode = 1;
goto cleanup;
@@ -2142,8 +2148,6 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
NULL);
}
- close_object_store(the_repository->objects);
-
if (enable_auto_gc)
run_auto_maintenance(verbosity < 0);
diff --git a/builtin/for-each-ref.c b/builtin/for-each-ref.c
index 89cb6307d4..642b4b888f 100644
--- a/builtin/for-each-ref.c
+++ b/builtin/for-each-ref.c
@@ -77,7 +77,7 @@ int cmd_for_each_ref(int argc, const char **argv, const char *prefix)
filter.name_patterns = argv;
filter.match_as_path = 1;
- filter_refs(&array, &filter, FILTER_REFS_ALL | FILTER_REFS_INCLUDE_BROKEN);
+ filter_refs(&array, &filter, FILTER_REFS_ALL);
ref_array_sort(sorting, &array);
if (!maxcount || array.nr < maxcount)
diff --git a/builtin/gc.c b/builtin/gc.c
index 43c36024cb..6b3de3dd51 100644
--- a/builtin/gc.c
+++ b/builtin/gc.c
@@ -663,8 +663,8 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
gc_before_repack();
if (!repository_format_precious_objects) {
- close_object_store(the_repository->objects);
- if (run_command_v_opt(repack.v, RUN_GIT_CMD))
+ if (run_command_v_opt(repack.v,
+ RUN_GIT_CMD | RUN_CLOSE_OBJECT_STORE))
die(FAILED_RUN, repack.v[0]);
if (prune_expire) {
@@ -848,7 +848,7 @@ static int run_write_commit_graph(struct maintenance_run_opts *opts)
{
struct child_process child = CHILD_PROCESS_INIT;
- child.git_cmd = 1;
+ child.git_cmd = child.close_object_store = 1;
strvec_pushl(&child.args, "commit-graph", "write",
"--split", "--reachable", NULL);
@@ -864,7 +864,6 @@ static int maintenance_task_commit_graph(struct maintenance_run_opts *opts)
if (!the_repository->settings.core_commit_graph)
return 0;
- close_object_store(the_repository->objects);
if (run_write_commit_graph(opts)) {
error(_("failed to write commit-graph"));
return 1;
@@ -913,7 +912,7 @@ static int maintenance_task_gc(struct maintenance_run_opts *opts)
{
struct child_process child = CHILD_PROCESS_INIT;
- child.git_cmd = 1;
+ child.git_cmd = child.close_object_store = 1;
strvec_push(&child.args, "gc");
if (opts->auto_flag)
@@ -923,7 +922,6 @@ static int maintenance_task_gc(struct maintenance_run_opts *opts)
else
strvec_push(&child.args, "--no-quiet");
- close_object_store(the_repository->objects);
return run_command(&child);
}
@@ -1097,14 +1095,12 @@ static int multi_pack_index_expire(struct maintenance_run_opts *opts)
{
struct child_process child = CHILD_PROCESS_INIT;
- child.git_cmd = 1;
+ child.git_cmd = child.close_object_store = 1;
strvec_pushl(&child.args, "multi-pack-index", "expire", NULL);
if (opts->quiet)
strvec_push(&child.args, "--no-progress");
- close_object_store(the_repository->objects);
-
if (run_command(&child))
return error(_("'git multi-pack-index expire' failed"));
@@ -1155,7 +1151,7 @@ static int multi_pack_index_repack(struct maintenance_run_opts *opts)
{
struct child_process child = CHILD_PROCESS_INIT;
- child.git_cmd = 1;
+ child.git_cmd = child.close_object_store = 1;
strvec_pushl(&child.args, "multi-pack-index", "repack", NULL);
if (opts->quiet)
@@ -1164,8 +1160,6 @@ static int multi_pack_index_repack(struct maintenance_run_opts *opts)
strvec_pushf(&child.args, "--batch-size=%"PRIuMAX,
(uintmax_t)get_auto_pack_size());
- close_object_store(the_repository->objects);
-
if (run_command(&child))
return error(_("'git multi-pack-index repack' failed"));
@@ -1529,6 +1523,93 @@ static const char *get_frequency(enum schedule_priority schedule)
}
}
+/*
+ * get_schedule_cmd` reads the GIT_TEST_MAINT_SCHEDULER environment variable
+ * to mock the schedulers that `git maintenance start` rely on.
+ *
+ * For test purpose, GIT_TEST_MAINT_SCHEDULER can be set to a comma-separated
+ * list of colon-separated key/value pairs where each pair contains a scheduler
+ * and its corresponding mock.
+ *
+ * * If $GIT_TEST_MAINT_SCHEDULER is not set, return false and leave the
+ * arguments unmodified.
+ *
+ * * If $GIT_TEST_MAINT_SCHEDULER is set, return true.
+ * In this case, the *cmd value is read as input.
+ *
+ * * if the input value *cmd is the key of one of the comma-separated list
+ * item, then *is_available is set to true and *cmd is modified and becomes
+ * the mock command.
+ *
+ * * if the input value *cmd isn’t the key of any of the comma-separated list
+ * item, then *is_available is set to false.
+ *
+ * Ex.:
+ * GIT_TEST_MAINT_SCHEDULER not set
+ * +-------+-------------------------------------------------+
+ * | Input | Output |
+ * | *cmd | return code | *cmd | *is_available |
+ * +-------+-------------+-------------------+---------------+
+ * | "foo" | false | "foo" (unchanged) | (unchanged) |
+ * +-------+-------------+-------------------+---------------+
+ *
+ * GIT_TEST_MAINT_SCHEDULER set to “foo:./mock_foo.sh,bar:./mock_bar.sh”
+ * +-------+-------------------------------------------------+
+ * | Input | Output |
+ * | *cmd | return code | *cmd | *is_available |
+ * +-------+-------------+-------------------+---------------+
+ * | "foo" | true | "./mock.foo.sh" | true |
+ * | "qux" | true | "qux" (unchanged) | false |
+ * +-------+-------------+-------------------+---------------+
+ */
+static int get_schedule_cmd(const char **cmd, int *is_available)
+{
+ char *testing = xstrdup_or_null(getenv("GIT_TEST_MAINT_SCHEDULER"));
+ struct string_list_item *item;
+ struct string_list list = STRING_LIST_INIT_NODUP;
+
+ if (!testing)
+ return 0;
+
+ if (is_available)
+ *is_available = 0;
+
+ string_list_split_in_place(&list, testing, ',', -1);
+ for_each_string_list_item(item, &list) {
+ struct string_list pair = STRING_LIST_INIT_NODUP;
+
+ if (string_list_split_in_place(&pair, item->string, ':', 2) != 2)
+ continue;
+
+ if (!strcmp(*cmd, pair.items[0].string)) {
+ *cmd = pair.items[1].string;
+ if (is_available)
+ *is_available = 1;
+ string_list_clear(&list, 0);
+ UNLEAK(testing);
+ return 1;
+ }
+ }
+
+ string_list_clear(&list, 0);
+ free(testing);
+ return 1;
+}
+
+static int is_launchctl_available(void)
+{
+ const char *cmd = "launchctl";
+ int is_available;
+ if (get_schedule_cmd(&cmd, &is_available))
+ return is_available;
+
+#ifdef __APPLE__
+ return 1;
+#else
+ return 0;
+#endif
+}
+
static char *launchctl_service_name(const char *frequency)
{
struct strbuf label = STRBUF_INIT;
@@ -1555,19 +1636,17 @@ static char *launchctl_get_uid(void)
return xstrfmt("gui/%d", getuid());
}
-static int launchctl_boot_plist(int enable, const char *filename, const char *cmd)
+static int launchctl_boot_plist(int enable, const char *filename)
{
+ const char *cmd = "launchctl";
int result;
struct child_process child = CHILD_PROCESS_INIT;
char *uid = launchctl_get_uid();
+ get_schedule_cmd(&cmd, NULL);
strvec_split(&child.args, cmd);
- if (enable)
- strvec_push(&child.args, "bootstrap");
- else
- strvec_push(&child.args, "bootout");
- strvec_push(&child.args, uid);
- strvec_push(&child.args, filename);
+ strvec_pushl(&child.args, enable ? "bootstrap" : "bootout", uid,
+ filename, NULL);
child.no_stderr = 1;
child.no_stdout = 1;
@@ -1581,30 +1660,28 @@ static int launchctl_boot_plist(int enable, const char *filename, const char *cm
return result;
}
-static int launchctl_remove_plist(enum schedule_priority schedule, const char *cmd)
+static int launchctl_remove_plist(enum schedule_priority schedule)
{
const char *frequency = get_frequency(schedule);
char *name = launchctl_service_name(frequency);
char *filename = launchctl_service_filename(name);
- int result = launchctl_boot_plist(0, filename, cmd);
+ int result = launchctl_boot_plist(0, filename);
unlink(filename);
free(filename);
free(name);
return result;
}
-static int launchctl_remove_plists(const char *cmd)
+static int launchctl_remove_plists(void)
{
- return launchctl_remove_plist(SCHEDULE_HOURLY, cmd) ||
- launchctl_remove_plist(SCHEDULE_DAILY, cmd) ||
- launchctl_remove_plist(SCHEDULE_WEEKLY, cmd);
+ return launchctl_remove_plist(SCHEDULE_HOURLY) ||
+ launchctl_remove_plist(SCHEDULE_DAILY) ||
+ launchctl_remove_plist(SCHEDULE_WEEKLY);
}
static int launchctl_list_contains_plist(const char *name, const char *cmd)
{
- int result;
struct child_process child = CHILD_PROCESS_INIT;
- char *uid = launchctl_get_uid();
strvec_split(&child.args, cmd);
strvec_pushl(&child.args, "list", name, NULL);
@@ -1615,15 +1692,11 @@ static int launchctl_list_contains_plist(const char *name, const char *cmd)
if (start_command(&child))
die(_("failed to start launchctl"));
- result = finish_command(&child);
-
- free(uid);
-
/* Returns failure if 'name' doesn't exist. */
- return !result;
+ return !finish_command(&child);
}
-static int launchctl_schedule_plist(const char *exec_path, enum schedule_priority schedule, const char *cmd)
+static int launchctl_schedule_plist(const char *exec_path, enum schedule_priority schedule)
{
int i, fd;
const char *preamble, *repeat;
@@ -1634,7 +1707,9 @@ static int launchctl_schedule_plist(const char *exec_path, enum schedule_priorit
static unsigned long lock_file_timeout_ms = ULONG_MAX;
struct strbuf plist = STRBUF_INIT, plist2 = STRBUF_INIT;
struct stat st;
+ const char *cmd = "launchctl";
+ get_schedule_cmd(&cmd, NULL);
preamble = "<?xml version=\"1.0\"?>\n"
"<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n"
"<plist version=\"1.0\">"
@@ -1715,8 +1790,8 @@ static int launchctl_schedule_plist(const char *exec_path, enum schedule_priorit
die_errno(_("could not write '%s'"), filename);
/* bootout might fail if not already running, so ignore */
- launchctl_boot_plist(0, filename, cmd);
- if (launchctl_boot_plist(1, filename, cmd))
+ launchctl_boot_plist(0, filename);
+ if (launchctl_boot_plist(1, filename))
die(_("failed to bootstrap service %s"), filename);
}
@@ -1727,21 +1802,35 @@ static int launchctl_schedule_plist(const char *exec_path, enum schedule_priorit
return 0;
}
-static int launchctl_add_plists(const char *cmd)
+static int launchctl_add_plists(void)
{
const char *exec_path = git_exec_path();
- return launchctl_schedule_plist(exec_path, SCHEDULE_HOURLY, cmd) ||
- launchctl_schedule_plist(exec_path, SCHEDULE_DAILY, cmd) ||
- launchctl_schedule_plist(exec_path, SCHEDULE_WEEKLY, cmd);
+ return launchctl_schedule_plist(exec_path, SCHEDULE_HOURLY) ||
+ launchctl_schedule_plist(exec_path, SCHEDULE_DAILY) ||
+ launchctl_schedule_plist(exec_path, SCHEDULE_WEEKLY);
}
-static int launchctl_update_schedule(int run_maintenance, int fd, const char *cmd)
+static int launchctl_update_schedule(int run_maintenance, int fd)
{
if (run_maintenance)
- return launchctl_add_plists(cmd);
+ return launchctl_add_plists();
else
- return launchctl_remove_plists(cmd);
+ return launchctl_remove_plists();
+}
+
+static int is_schtasks_available(void)
+{
+ const char *cmd = "schtasks";
+ int is_available;
+ if (get_schedule_cmd(&cmd, &is_available))
+ return is_available;
+
+#ifdef GIT_WINDOWS_NATIVE
+ return 1;
+#else
+ return 0;
+#endif
}
static char *schtasks_task_name(const char *frequency)
@@ -1751,13 +1840,15 @@ static char *schtasks_task_name(const char *frequency)
return strbuf_detach(&label, NULL);
}
-static int schtasks_remove_task(enum schedule_priority schedule, const char *cmd)
+static int schtasks_remove_task(enum schedule_priority schedule)
{
+ const char *cmd = "schtasks";
int result;
struct strvec args = STRVEC_INIT;
const char *frequency = get_frequency(schedule);
char *name = schtasks_task_name(frequency);
+ get_schedule_cmd(&cmd, NULL);
strvec_split(&args, cmd);
strvec_pushl(&args, "/delete", "/tn", name, "/f", NULL);
@@ -1768,15 +1859,16 @@ static int schtasks_remove_task(enum schedule_priority schedule, const char *cmd
return result;
}
-static int schtasks_remove_tasks(const char *cmd)
+static int schtasks_remove_tasks(void)
{
- return schtasks_remove_task(SCHEDULE_HOURLY, cmd) ||
- schtasks_remove_task(SCHEDULE_DAILY, cmd) ||
- schtasks_remove_task(SCHEDULE_WEEKLY, cmd);
+ return schtasks_remove_task(SCHEDULE_HOURLY) ||
+ schtasks_remove_task(SCHEDULE_DAILY) ||
+ schtasks_remove_task(SCHEDULE_WEEKLY);
}
-static int schtasks_schedule_task(const char *exec_path, enum schedule_priority schedule, const char *cmd)
+static int schtasks_schedule_task(const char *exec_path, enum schedule_priority schedule)
{
+ const char *cmd = "schtasks";
int result;
struct child_process child = CHILD_PROCESS_INIT;
const char *xml;
@@ -1785,6 +1877,8 @@ static int schtasks_schedule_task(const char *exec_path, enum schedule_priority
char *name = schtasks_task_name(frequency);
struct strbuf tfilename = STRBUF_INIT;
+ get_schedule_cmd(&cmd, NULL);
+
strbuf_addf(&tfilename, "%s/schedule_%s_XXXXXX",
get_git_common_dir(), frequency);
tfile = xmks_tempfile(tfilename.buf);
@@ -1889,28 +1983,52 @@ static int schtasks_schedule_task(const char *exec_path, enum schedule_priority
return result;
}
-static int schtasks_schedule_tasks(const char *cmd)
+static int schtasks_schedule_tasks(void)
{
const char *exec_path = git_exec_path();
- return schtasks_schedule_task(exec_path, SCHEDULE_HOURLY, cmd) ||
- schtasks_schedule_task(exec_path, SCHEDULE_DAILY, cmd) ||
- schtasks_schedule_task(exec_path, SCHEDULE_WEEKLY, cmd);
+ return schtasks_schedule_task(exec_path, SCHEDULE_HOURLY) ||
+ schtasks_schedule_task(exec_path, SCHEDULE_DAILY) ||
+ schtasks_schedule_task(exec_path, SCHEDULE_WEEKLY);
}
-static int schtasks_update_schedule(int run_maintenance, int fd, const char *cmd)
+static int schtasks_update_schedule(int run_maintenance, int fd)
{
if (run_maintenance)
- return schtasks_schedule_tasks(cmd);
+ return schtasks_schedule_tasks();
else
- return schtasks_remove_tasks(cmd);
+ return schtasks_remove_tasks();
+}
+
+static int is_crontab_available(void)
+{
+ const char *cmd = "crontab";
+ int is_available;
+ struct child_process child = CHILD_PROCESS_INIT;
+
+ if (get_schedule_cmd(&cmd, &is_available))
+ return is_available;
+
+ strvec_split(&child.args, cmd);
+ strvec_push(&child.args, "-l");
+ child.no_stdin = 1;
+ child.no_stdout = 1;
+ child.no_stderr = 1;
+ child.silent_exec_failure = 1;
+
+ if (start_command(&child))
+ return 0;
+ /* Ignore exit code, as an empty crontab will return error. */
+ finish_command(&child);
+ return 1;
}
#define BEGIN_LINE "# BEGIN GIT MAINTENANCE SCHEDULE"
#define END_LINE "# END GIT MAINTENANCE SCHEDULE"
-static int crontab_update_schedule(int run_maintenance, int fd, const char *cmd)
+static int crontab_update_schedule(int run_maintenance, int fd)
{
+ const char *cmd = "crontab";
int result = 0;
int in_old_region = 0;
struct child_process crontab_list = CHILD_PROCESS_INIT;
@@ -1918,6 +2036,7 @@ static int crontab_update_schedule(int run_maintenance, int fd, const char *cmd)
FILE *cron_list, *cron_in;
struct strbuf line = STRBUF_INIT;
+ get_schedule_cmd(&cmd, NULL);
strvec_split(&crontab_list.args, cmd);
strvec_push(&crontab_list.args, "-l");
crontab_list.in = -1;
@@ -1994,66 +2113,376 @@ done_editing:
return result;
}
+static int real_is_systemd_timer_available(void)
+{
+ struct child_process child = CHILD_PROCESS_INIT;
+
+ strvec_pushl(&child.args, "systemctl", "--user", "list-timers", NULL);
+ child.no_stdin = 1;
+ child.no_stdout = 1;
+ child.no_stderr = 1;
+ child.silent_exec_failure = 1;
+
+ if (start_command(&child))
+ return 0;
+ if (finish_command(&child))
+ return 0;
+ return 1;
+}
+
+static int is_systemd_timer_available(void)
+{
+ const char *cmd = "systemctl";
+ int is_available;
+
+ if (get_schedule_cmd(&cmd, &is_available))
+ return is_available;
+
+ return real_is_systemd_timer_available();
+}
+
+static char *xdg_config_home_systemd(const char *filename)
+{
+ return xdg_config_home_for("systemd/user", filename);
+}
+
+static int systemd_timer_enable_unit(int enable,
+ enum schedule_priority schedule)
+{
+ const char *cmd = "systemctl";
+ struct child_process child = CHILD_PROCESS_INIT;
+ const char *frequency = get_frequency(schedule);
+
+ /*
+ * Disabling the systemd unit while it is already disabled makes
+ * systemctl print an error.
+ * Let's ignore it since it means we already are in the expected state:
+ * the unit is disabled.
+ *
+ * On the other hand, enabling a systemd unit which is already enabled
+ * produces no error.
+ */
+ if (!enable)
+ child.no_stderr = 1;
+
+ get_schedule_cmd(&cmd, NULL);
+ strvec_split(&child.args, cmd);
+ strvec_pushl(&child.args, "--user", enable ? "enable" : "disable",
+ "--now", NULL);
+ strvec_pushf(&child.args, "git-maintenance@%s.timer", frequency);
+
+ if (start_command(&child))
+ return error(_("failed to start systemctl"));
+ if (finish_command(&child))
+ /*
+ * Disabling an already disabled systemd unit makes
+ * systemctl fail.
+ * Let's ignore this failure.
+ *
+ * Enabling an enabled systemd unit doesn't fail.
+ */
+ if (enable)
+ return error(_("failed to run systemctl"));
+ return 0;
+}
+
+static int systemd_timer_delete_unit_templates(void)
+{
+ int ret = 0;
+ char *filename = xdg_config_home_systemd("git-maintenance@.timer");
+ if (unlink(filename) && !is_missing_file_error(errno))
+ ret = error_errno(_("failed to delete '%s'"), filename);
+ FREE_AND_NULL(filename);
+
+ filename = xdg_config_home_systemd("git-maintenance@.service");
+ if (unlink(filename) && !is_missing_file_error(errno))
+ ret = error_errno(_("failed to delete '%s'"), filename);
+
+ free(filename);
+ return ret;
+}
+
+static int systemd_timer_delete_units(void)
+{
+ return systemd_timer_enable_unit(0, SCHEDULE_HOURLY) ||
+ systemd_timer_enable_unit(0, SCHEDULE_DAILY) ||
+ systemd_timer_enable_unit(0, SCHEDULE_WEEKLY) ||
+ systemd_timer_delete_unit_templates();
+}
+
+static int systemd_timer_write_unit_templates(const char *exec_path)
+{
+ char *filename;
+ FILE *file;
+ const char *unit;
+
+ filename = xdg_config_home_systemd("git-maintenance@.timer");
+ if (safe_create_leading_directories(filename)) {
+ error(_("failed to create directories for '%s'"), filename);
+ goto error;
+ }
+ file = fopen_or_warn(filename, "w");
+ if (file == NULL)
+ goto error;
+
+ unit = "# This file was created and is maintained by Git.\n"
+ "# Any edits made in this file might be replaced in the future\n"
+ "# by a Git command.\n"
+ "\n"
+ "[Unit]\n"
+ "Description=Optimize Git repositories data\n"
+ "\n"
+ "[Timer]\n"
+ "OnCalendar=%i\n"
+ "Persistent=true\n"
+ "\n"
+ "[Install]\n"
+ "WantedBy=timers.target\n";
+ if (fputs(unit, file) == EOF) {
+ error(_("failed to write to '%s'"), filename);
+ fclose(file);
+ goto error;
+ }
+ if (fclose(file) == EOF) {
+ error_errno(_("failed to flush '%s'"), filename);
+ goto error;
+ }
+ free(filename);
+
+ filename = xdg_config_home_systemd("git-maintenance@.service");
+ file = fopen_or_warn(filename, "w");
+ if (file == NULL)
+ goto error;
+
+ unit = "# This file was created and is maintained by Git.\n"
+ "# Any edits made in this file might be replaced in the future\n"
+ "# by a Git command.\n"
+ "\n"
+ "[Unit]\n"
+ "Description=Optimize Git repositories data\n"
+ "\n"
+ "[Service]\n"
+ "Type=oneshot\n"
+ "ExecStart=\"%s/git\" --exec-path=\"%s\" for-each-repo --config=maintenance.repo maintenance run --schedule=%%i\n"
+ "LockPersonality=yes\n"
+ "MemoryDenyWriteExecute=yes\n"
+ "NoNewPrivileges=yes\n"
+ "RestrictAddressFamilies=AF_UNIX AF_INET AF_INET6\n"
+ "RestrictNamespaces=yes\n"
+ "RestrictRealtime=yes\n"
+ "RestrictSUIDSGID=yes\n"
+ "SystemCallArchitectures=native\n"
+ "SystemCallFilter=@system-service\n";
+ if (fprintf(file, unit, exec_path, exec_path) < 0) {
+ error(_("failed to write to '%s'"), filename);
+ fclose(file);
+ goto error;
+ }
+ if (fclose(file) == EOF) {
+ error_errno(_("failed to flush '%s'"), filename);
+ goto error;
+ }
+ free(filename);
+ return 0;
+
+error:
+ free(filename);
+ systemd_timer_delete_unit_templates();
+ return -1;
+}
+
+static int systemd_timer_setup_units(void)
+{
+ const char *exec_path = git_exec_path();
+
+ int ret = systemd_timer_write_unit_templates(exec_path) ||
+ systemd_timer_enable_unit(1, SCHEDULE_HOURLY) ||
+ systemd_timer_enable_unit(1, SCHEDULE_DAILY) ||
+ systemd_timer_enable_unit(1, SCHEDULE_WEEKLY);
+ if (ret)
+ systemd_timer_delete_units();
+ return ret;
+}
+
+static int systemd_timer_update_schedule(int run_maintenance, int fd)
+{
+ if (run_maintenance)
+ return systemd_timer_setup_units();
+ else
+ return systemd_timer_delete_units();
+}
+
+enum scheduler {
+ SCHEDULER_INVALID = -1,
+ SCHEDULER_AUTO,
+ SCHEDULER_CRON,
+ SCHEDULER_SYSTEMD,
+ SCHEDULER_LAUNCHCTL,
+ SCHEDULER_SCHTASKS,
+};
+
+static const struct {
+ const char *name;
+ int (*is_available)(void);
+ int (*update_schedule)(int run_maintenance, int fd);
+} scheduler_fn[] = {
+ [SCHEDULER_CRON] = {
+ .name = "crontab",
+ .is_available = is_crontab_available,
+ .update_schedule = crontab_update_schedule,
+ },
+ [SCHEDULER_SYSTEMD] = {
+ .name = "systemctl",
+ .is_available = is_systemd_timer_available,
+ .update_schedule = systemd_timer_update_schedule,
+ },
+ [SCHEDULER_LAUNCHCTL] = {
+ .name = "launchctl",
+ .is_available = is_launchctl_available,
+ .update_schedule = launchctl_update_schedule,
+ },
+ [SCHEDULER_SCHTASKS] = {
+ .name = "schtasks",
+ .is_available = is_schtasks_available,
+ .update_schedule = schtasks_update_schedule,
+ },
+};
+
+static enum scheduler parse_scheduler(const char *value)
+{
+ if (!value)
+ return SCHEDULER_INVALID;
+ else if (!strcasecmp(value, "auto"))
+ return SCHEDULER_AUTO;
+ else if (!strcasecmp(value, "cron") || !strcasecmp(value, "crontab"))
+ return SCHEDULER_CRON;
+ else if (!strcasecmp(value, "systemd") ||
+ !strcasecmp(value, "systemd-timer"))
+ return SCHEDULER_SYSTEMD;
+ else if (!strcasecmp(value, "launchctl"))
+ return SCHEDULER_LAUNCHCTL;
+ else if (!strcasecmp(value, "schtasks"))
+ return SCHEDULER_SCHTASKS;
+ else
+ return SCHEDULER_INVALID;
+}
+
+static int maintenance_opt_scheduler(const struct option *opt, const char *arg,
+ int unset)
+{
+ enum scheduler *scheduler = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+
+ *scheduler = parse_scheduler(arg);
+ if (*scheduler == SCHEDULER_INVALID)
+ return error(_("unrecognized --scheduler argument '%s'"), arg);
+ return 0;
+}
+
+struct maintenance_start_opts {
+ enum scheduler scheduler;
+};
+
+static enum scheduler resolve_scheduler(enum scheduler scheduler)
+{
+ if (scheduler != SCHEDULER_AUTO)
+ return scheduler;
+
#if defined(__APPLE__)
-static const char platform_scheduler[] = "launchctl";
+ return SCHEDULER_LAUNCHCTL;
+
#elif defined(GIT_WINDOWS_NATIVE)
-static const char platform_scheduler[] = "schtasks";
+ return SCHEDULER_SCHTASKS;
+
+#elif defined(__linux__)
+ if (is_systemd_timer_available())
+ return SCHEDULER_SYSTEMD;
+ else if (is_crontab_available())
+ return SCHEDULER_CRON;
+ else
+ die(_("neither systemd timers nor crontab are available"));
+
#else
-static const char platform_scheduler[] = "crontab";
+ return SCHEDULER_CRON;
#endif
+}
-static int update_background_schedule(int enable)
+static void validate_scheduler(enum scheduler scheduler)
{
- int result;
- const char *scheduler = platform_scheduler;
- const char *cmd = scheduler;
- char *testing;
+ if (scheduler == SCHEDULER_INVALID)
+ BUG("invalid scheduler");
+ if (scheduler == SCHEDULER_AUTO)
+ BUG("resolve_scheduler should have been called before");
+
+ if (!scheduler_fn[scheduler].is_available())
+ die(_("%s scheduler is not available"),
+ scheduler_fn[scheduler].name);
+}
+
+static int update_background_schedule(const struct maintenance_start_opts *opts,
+ int enable)
+{
+ unsigned int i;
+ int result = 0;
struct lock_file lk;
char *lock_path = xstrfmt("%s/schedule", the_repository->objects->odb->path);
- testing = xstrdup_or_null(getenv("GIT_TEST_MAINT_SCHEDULER"));
- if (testing) {
- char *sep = strchr(testing, ':');
- if (!sep)
- die("GIT_TEST_MAINT_SCHEDULER unparseable: %s", testing);
- *sep = '\0';
- scheduler = testing;
- cmd = sep + 1;
+ if (hold_lock_file_for_update(&lk, lock_path, LOCK_NO_DEREF) < 0) {
+ free(lock_path);
+ return error(_("another process is scheduling background maintenance"));
}
- if (hold_lock_file_for_update(&lk, lock_path, LOCK_NO_DEREF) < 0) {
- result = error(_("another process is scheduling background maintenance"));
- goto cleanup;
+ for (i = 1; i < ARRAY_SIZE(scheduler_fn); i++) {
+ if (enable && opts->scheduler == i)
+ continue;
+ if (!scheduler_fn[i].is_available())
+ continue;
+ scheduler_fn[i].update_schedule(0, get_lock_file_fd(&lk));
}
- if (!strcmp(scheduler, "launchctl"))
- result = launchctl_update_schedule(enable, get_lock_file_fd(&lk), cmd);
- else if (!strcmp(scheduler, "schtasks"))
- result = schtasks_update_schedule(enable, get_lock_file_fd(&lk), cmd);
- else if (!strcmp(scheduler, "crontab"))
- result = crontab_update_schedule(enable, get_lock_file_fd(&lk), cmd);
- else
- die("unknown background scheduler: %s", scheduler);
+ if (enable)
+ result = scheduler_fn[opts->scheduler].update_schedule(
+ 1, get_lock_file_fd(&lk));
rollback_lock_file(&lk);
-cleanup:
free(lock_path);
- free(testing);
return result;
}
-static int maintenance_start(void)
+static const char *const builtin_maintenance_start_usage[] = {
+ N_("git maintenance start [--scheduler=<scheduler>]"),
+ NULL
+};
+
+static int maintenance_start(int argc, const char **argv, const char *prefix)
{
+ struct maintenance_start_opts opts = { 0 };
+ struct option options[] = {
+ OPT_CALLBACK_F(
+ 0, "scheduler", &opts.scheduler, N_("scheduler"),
+ N_("scheduler to trigger git maintenance run"),
+ PARSE_OPT_NONEG, maintenance_opt_scheduler),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options,
+ builtin_maintenance_start_usage, 0);
+ if (argc)
+ usage_with_options(builtin_maintenance_start_usage, options);
+
+ opts.scheduler = resolve_scheduler(opts.scheduler);
+ validate_scheduler(opts.scheduler);
+
if (maintenance_register())
warning(_("failed to add repo to global config"));
-
- return update_background_schedule(1);
+ return update_background_schedule(&opts, 1);
}
static int maintenance_stop(void)
{
- return update_background_schedule(0);
+ return update_background_schedule(NULL, 0);
}
static const char builtin_maintenance_usage[] = N_("git maintenance <subcommand> [<options>]");
@@ -2067,7 +2496,7 @@ int cmd_maintenance(int argc, const char **argv, const char *prefix)
if (!strcmp(argv[1], "run"))
return maintenance_run(argc - 1, argv + 1, prefix);
if (!strcmp(argv[1], "start"))
- return maintenance_start();
+ return maintenance_start(argc - 1, argv + 1, prefix);
if (!strcmp(argv[1], "stop"))
return maintenance_stop();
if (!strcmp(argv[1], "register"))
diff --git a/builtin/grep.c b/builtin/grep.c
index 7d2f8e5adb..8af5249a7b 100644
--- a/builtin/grep.c
+++ b/builtin/grep.c
@@ -65,6 +65,9 @@ static int todo_done;
/* Has all work items been added? */
static int all_work_added;
+static struct repository **repos_to_free;
+static size_t repos_to_free_nr, repos_to_free_alloc;
+
/* This lock protects all the variables above. */
static pthread_mutex_t grep_mutex;
@@ -168,6 +171,19 @@ static void work_done(struct work_item *w)
grep_unlock();
}
+static void free_repos(void)
+{
+ int i;
+
+ for (i = 0; i < repos_to_free_nr; i++) {
+ repo_clear(repos_to_free[i]);
+ free(repos_to_free[i]);
+ }
+ FREE_AND_NULL(repos_to_free);
+ repos_to_free_nr = 0;
+ repos_to_free_alloc = 0;
+}
+
static void *run(void *arg)
{
int hit = 0;
@@ -333,7 +349,7 @@ static int grep_oid(struct grep_opt *opt, const struct object_id *oid,
struct grep_source gs;
grep_source_name(opt, filename, tree_name_len, &pathbuf);
- grep_source_init(&gs, GREP_SOURCE_OID, pathbuf.buf, path, oid);
+ grep_source_init_oid(&gs, pathbuf.buf, path, oid, opt->repo);
strbuf_release(&pathbuf);
if (num_threads > 1) {
@@ -359,7 +375,7 @@ static int grep_file(struct grep_opt *opt, const char *filename)
struct grep_source gs;
grep_source_name(opt, filename, 0, &buf);
- grep_source_init(&gs, GREP_SOURCE_FILE, buf.buf, filename, filename);
+ grep_source_init_file(&gs, buf.buf, filename);
strbuf_release(&buf);
if (num_threads > 1) {
@@ -415,19 +431,21 @@ static int grep_submodule(struct grep_opt *opt,
const struct object_id *oid,
const char *filename, const char *path, int cached)
{
- struct repository subrepo;
+ struct repository *subrepo;
struct repository *superproject = opt->repo;
- const struct submodule *sub;
struct grep_opt subopt;
- int hit;
-
- sub = submodule_from_path(superproject, null_oid(), path);
+ int hit = 0;
if (!is_submodule_active(superproject, path))
return 0;
- if (repo_submodule_init(&subrepo, superproject, sub))
+ subrepo = xmalloc(sizeof(*subrepo));
+ if (repo_submodule_init(subrepo, superproject, path, null_oid())) {
+ free(subrepo);
return 0;
+ }
+ ALLOC_GROW(repos_to_free, repos_to_free_nr + 1, repos_to_free_alloc);
+ repos_to_free[repos_to_free_nr++] = subrepo;
/*
* NEEDSWORK: repo_read_gitmodules() might call
@@ -438,53 +456,49 @@ static int grep_submodule(struct grep_opt *opt,
* subrepo's odbs to the in-memory alternates list.
*/
obj_read_lock();
- repo_read_gitmodules(&subrepo, 0);
+ repo_read_gitmodules(subrepo, 0);
/*
- * NEEDSWORK: This adds the submodule's object directory to the list of
- * alternates for the single in-memory object store. This has some bad
- * consequences for memory (processed objects will never be freed) and
- * performance (this increases the number of pack files git has to pay
- * attention to, to the sum of the number of pack files in all the
- * repositories processed so far). This can be removed once the object
- * store is no longer global and instead is a member of the repository
- * object.
+ * All code paths tested by test code no longer need submodule ODBs to
+ * be added as alternates, but add it to the list just in case.
+ * Submodule ODBs added through add_submodule_odb_by_path() will be
+ * lazily registered as alternates when needed (and except in an
+ * unexpected code interaction, it won't be needed).
*/
- add_to_alternates_memory(subrepo.objects->odb->path);
+ add_submodule_odb_by_path(subrepo->objects->odb->path);
obj_read_unlock();
memcpy(&subopt, opt, sizeof(subopt));
- subopt.repo = &subrepo;
+ subopt.repo = subrepo;
if (oid) {
- struct object *object;
+ enum object_type object_type;
struct tree_desc tree;
void *data;
unsigned long size;
struct strbuf base = STRBUF_INIT;
obj_read_lock();
- object = parse_object_or_die(oid, NULL);
+ object_type = oid_object_info(subrepo, oid, NULL);
obj_read_unlock();
- data = read_object_with_reference(&subrepo,
- &object->oid, tree_type,
+ data = read_object_with_reference(subrepo,
+ oid, tree_type,
&size, NULL);
if (!data)
- die(_("unable to read tree (%s)"), oid_to_hex(&object->oid));
+ die(_("unable to read tree (%s)"), oid_to_hex(oid));
strbuf_addstr(&base, filename);
strbuf_addch(&base, '/');
init_tree_desc(&tree, data, size);
hit = grep_tree(&subopt, pathspec, &tree, &base, base.len,
- object->type == OBJ_COMMIT);
+ object_type == OBJ_COMMIT);
strbuf_release(&base);
free(data);
} else {
hit = grep_cache(&subopt, pathspec, cached);
}
- repo_clear(&subrepo);
return hit;
}
@@ -1182,5 +1196,6 @@ int cmd_grep(int argc, const char **argv, const char *prefix)
run_pager(&opt, prefix);
clear_pathspec(&pathspec);
free_grep_patterns(&opt);
+ free_repos();
return !hit;
}
diff --git a/builtin/hash-object.c b/builtin/hash-object.c
index 2e6e2ddd0c..c7b3ad74c6 100644
--- a/builtin/hash-object.c
+++ b/builtin/hash-object.c
@@ -115,7 +115,7 @@ int cmd_hash_object(int argc, const char **argv, const char *prefix)
prefix = setup_git_directory_gently(&nongit);
if (vpath && prefix)
- vpath = xstrdup(prefix_filename(prefix, vpath));
+ vpath = prefix_filename(prefix, vpath);
git_config(git_default_config, NULL);
diff --git a/builtin/help.c b/builtin/help.c
index 0a40d8cf09..75cd2fb407 100644
--- a/builtin/help.c
+++ b/builtin/help.c
@@ -498,11 +498,14 @@ static void get_html_page_path(struct strbuf *page_path, const char *page)
if (!html_path)
html_path = to_free = system_path(GIT_HTML_PATH);
- /* Check that we have a git documentation directory. */
+ /*
+ * Check that the page we're looking for exists.
+ */
if (!strstr(html_path, "://")) {
- if (stat(mkpath("%s/git.html", html_path), &st)
+ if (stat(mkpath("%s/%s.html", html_path, page), &st)
|| !S_ISREG(st.st_mode))
- die("'%s': not a documentation directory.", html_path);
+ die("'%s/%s.html': documentation file not found.",
+ html_path, page);
}
strbuf_init(page_path, 0);
diff --git a/builtin/index-pack.c b/builtin/index-pack.c
index 6cc4890217..7ce69c087e 100644
--- a/builtin/index-pack.c
+++ b/builtin/index-pack.c
@@ -122,6 +122,7 @@ static int strict;
static int do_fsck_object;
static struct fsck_options fsck_options = FSCK_OPTIONS_MISSING_GITMODULES;
static int verbose;
+static const char *progress_title;
static int show_resolving_progress;
static int show_stat;
static int check_self_contained_and_connected;
@@ -187,9 +188,7 @@ static void init_thread(void)
pthread_key_create(&key, NULL);
CALLOC_ARRAY(thread_data, nr_threads);
for (i = 0; i < nr_threads; i++) {
- thread_data[i].pack_fd = open(curr_pack, O_RDONLY);
- if (thread_data[i].pack_fd == -1)
- die_errno(_("unable to open %s"), curr_pack);
+ thread_data[i].pack_fd = xopen(curr_pack, O_RDONLY);
}
threads_active = 1;
@@ -1153,6 +1152,7 @@ static void parse_pack_objects(unsigned char *hash)
if (verbose)
progress = start_progress(
+ progress_title ? progress_title :
from_stdin ? _("Receiving objects") : _("Indexing objects"),
nr_objects);
for (i = 0; i < nr_objects; i++) {
@@ -1477,6 +1477,22 @@ static void write_special_file(const char *suffix, const char *msg,
strbuf_release(&name_buf);
}
+static void rename_tmp_packfile(const char **final_name,
+ const char *curr_name,
+ struct strbuf *name, unsigned char *hash,
+ const char *ext, int make_read_only_if_same)
+{
+ if (*final_name != curr_name) {
+ if (!*final_name)
+ *final_name = odb_pack_name(name, hash, ext);
+ if (finalize_object_file(curr_name, *final_name))
+ die(_("unable to rename temporary '*.%s' file to '%s"),
+ ext, *final_name);
+ } else if (make_read_only_if_same) {
+ chmod(*final_name, 0444);
+ }
+}
+
static void final(const char *final_pack_name, const char *curr_pack_name,
const char *final_index_name, const char *curr_index_name,
const char *final_rev_index_name, const char *curr_rev_index_name,
@@ -1505,31 +1521,13 @@ static void final(const char *final_pack_name, const char *curr_pack_name,
write_special_file("promisor", promisor_msg, final_pack_name,
hash, NULL);
- if (final_pack_name != curr_pack_name) {
- if (!final_pack_name)
- final_pack_name = odb_pack_name(&pack_name, hash, "pack");
- if (finalize_object_file(curr_pack_name, final_pack_name))
- die(_("cannot store pack file"));
- } else if (from_stdin)
- chmod(final_pack_name, 0444);
-
- if (final_index_name != curr_index_name) {
- if (!final_index_name)
- final_index_name = odb_pack_name(&index_name, hash, "idx");
- if (finalize_object_file(curr_index_name, final_index_name))
- die(_("cannot store index file"));
- } else
- chmod(final_index_name, 0444);
-
- if (curr_rev_index_name) {
- if (final_rev_index_name != curr_rev_index_name) {
- if (!final_rev_index_name)
- final_rev_index_name = odb_pack_name(&rev_index_name, hash, "rev");
- if (finalize_object_file(curr_rev_index_name, final_rev_index_name))
- die(_("cannot store reverse index file"));
- } else
- chmod(final_rev_index_name, 0444);
- }
+ rename_tmp_packfile(&final_pack_name, curr_pack_name, &pack_name,
+ hash, "pack", from_stdin);
+ if (curr_rev_index_name)
+ rename_tmp_packfile(&final_rev_index_name, curr_rev_index_name,
+ &rev_index_name, hash, "rev", 1);
+ rename_tmp_packfile(&final_index_name, curr_index_name, &index_name,
+ hash, "idx", 1);
if (do_fsck_object) {
struct packed_git *p;
@@ -1802,6 +1800,10 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix)
input_len = sizeof(*hdr);
} else if (!strcmp(arg, "-v")) {
verbose = 1;
+ } else if (!strcmp(arg, "--progress-title")) {
+ if (progress_title || (i+1) >= argc)
+ usage(index_pack_usage);
+ progress_title = argv[++i];
} else if (!strcmp(arg, "--show-resolving-progress")) {
show_resolving_progress = 1;
} else if (!strcmp(arg, "--report-end-of-input")) {
diff --git a/builtin/ls-files.c b/builtin/ls-files.c
index 29a26ad8ae..a2000ed6bf 100644
--- a/builtin/ls-files.c
+++ b/builtin/ls-files.c
@@ -209,10 +209,8 @@ static void show_submodule(struct repository *superproject,
struct dir_struct *dir, const char *path)
{
struct repository subrepo;
- const struct submodule *sub = submodule_from_path(superproject,
- null_oid(), path);
- if (repo_submodule_init(&subrepo, superproject, sub))
+ if (repo_submodule_init(&subrepo, superproject, path, null_oid()))
return;
if (repo_read_index(&subrepo) < 0)
@@ -614,7 +612,7 @@ int cmd_ls_files(int argc, const char **argv, const char *cmd_prefix)
struct option builtin_ls_files_options[] = {
/* Think twice before adding "--nul" synonym to this */
OPT_SET_INT('z', NULL, &line_terminator,
- N_("paths are separated with NUL character"), '\0'),
+ N_("separate paths with the NUL character"), '\0'),
OPT_BOOL('t', NULL, &show_tag,
N_("identify the file status with tags")),
OPT_BOOL('v', NULL, &show_valid_bit,
@@ -651,7 +649,7 @@ int cmd_ls_files(int argc, const char **argv, const char *cmd_prefix)
N_("skip files matching pattern"),
PARSE_OPT_NONEG, option_parse_exclude),
OPT_CALLBACK_F('X', "exclude-from", &dir, N_("file"),
- N_("exclude patterns are read from <file>"),
+ N_("read exclude patterns from <file>"),
PARSE_OPT_NONEG, option_parse_exclude_from),
OPT_STRING(0, "exclude-per-directory", &dir.exclude_per_dir, N_("file"),
N_("read additional per-directory exclude patterns in <file>")),
diff --git a/builtin/merge.c b/builtin/merge.c
index d2c52b6e97..cc4a910c69 100644
--- a/builtin/merge.c
+++ b/builtin/merge.c
@@ -13,6 +13,7 @@
#include "builtin.h"
#include "lockfile.h"
#include "run-command.h"
+#include "hook.h"
#include "diff.h"
#include "diff-merges.h"
#include "refs.h"
@@ -469,7 +470,6 @@ static void finish(struct commit *head_commit,
* We ignore errors in 'gc --auto', since the
* user should see them.
*/
- close_object_store(the_repository->objects);
run_auto_maintenance(verbosity < 0);
}
}
@@ -681,6 +681,7 @@ static int read_tree_trivial(struct object_id *common, struct object_id *head,
opts.verbose_update = 1;
opts.trivial_merges_only = 1;
opts.merge = 1;
+ opts.preserve_ignored = 0; /* FIXME: !overwrite_ignore */
trees[nr_trees] = parse_tree_indirect(common);
if (!trees[nr_trees++])
return -1;
@@ -849,7 +850,7 @@ static void prepare_to_commit(struct commit_list *remoteheads)
* and write it out as a tree. We must do this before we invoke
* the editor and after we invoke run_status above.
*/
- if (find_hook("pre-merge-commit"))
+ if (hook_exists("pre-merge-commit"))
discard_cache();
read_cache_from(index_file);
strbuf_addbuf(&msg, &merge_msg);
@@ -1276,6 +1277,9 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
if (argc == 2 && !strcmp(argv[1], "-h"))
usage_with_options(builtin_merge_usage, builtin_merge_options);
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+
/*
* Check if we are _not_ on a detached HEAD, i.e. if there is a
* current branch.
diff --git a/builtin/multi-pack-index.c b/builtin/multi-pack-index.c
index 649aa5f9ab..6426bbdace 100644
--- a/builtin/multi-pack-index.c
+++ b/builtin/multi-pack-index.c
@@ -52,7 +52,6 @@ static struct opts_multi_pack_index {
static struct option common_opts[] = {
OPT_FILENAME(0, "object-dir", &opts.object_dir,
N_("object directory containing set of packfile and pack-index pairs")),
- OPT_BIT(0, "progress", &opts.flags, N_("force progress reporting"), MIDX_PROGRESS),
OPT_END(),
};
@@ -61,6 +60,23 @@ static struct option *add_common_options(struct option *prev)
return parse_options_concat(common_opts, prev);
}
+static int git_multi_pack_index_write_config(const char *var, const char *value,
+ void *cb)
+{
+ if (!strcmp(var, "pack.writebitmaphashcache")) {
+ if (git_config_bool(var, value))
+ opts.flags |= MIDX_WRITE_BITMAP_HASH_CACHE;
+ else
+ opts.flags &= ~MIDX_WRITE_BITMAP_HASH_CACHE;
+ }
+
+ /*
+ * We should never make a fall-back call to 'git_default_config', since
+ * this was already called in 'cmd_multi_pack_index()'.
+ */
+ return 0;
+}
+
static int cmd_multi_pack_index_write(int argc, const char **argv)
{
struct option *options;
@@ -68,13 +84,23 @@ static int cmd_multi_pack_index_write(int argc, const char **argv)
OPT_STRING(0, "preferred-pack", &opts.preferred_pack,
N_("preferred-pack"),
N_("pack for reuse when computing a multi-pack bitmap")),
+ OPT_BIT(0, "bitmap", &opts.flags, N_("write multi-pack bitmap"),
+ MIDX_WRITE_BITMAP | MIDX_WRITE_REV_INDEX),
+ OPT_BIT(0, "progress", &opts.flags,
+ N_("force progress reporting"), MIDX_PROGRESS),
OPT_END(),
};
+ opts.flags |= MIDX_WRITE_BITMAP_HASH_CACHE;
+
+ git_config(git_multi_pack_index_write_config, NULL);
+
options = add_common_options(builtin_multi_pack_index_write_options);
trace2_cmd_mode(argv[0]);
+ if (isatty(2))
+ opts.flags |= MIDX_PROGRESS;
argc = parse_options(argc, argv, NULL,
options, builtin_multi_pack_index_write_usage,
PARSE_OPT_KEEP_UNKNOWN);
@@ -90,10 +116,18 @@ static int cmd_multi_pack_index_write(int argc, const char **argv)
static int cmd_multi_pack_index_verify(int argc, const char **argv)
{
- struct option *options = common_opts;
+ struct option *options;
+ static struct option builtin_multi_pack_index_verify_options[] = {
+ OPT_BIT(0, "progress", &opts.flags,
+ N_("force progress reporting"), MIDX_PROGRESS),
+ OPT_END(),
+ };
+ options = add_common_options(builtin_multi_pack_index_verify_options);
trace2_cmd_mode(argv[0]);
+ if (isatty(2))
+ opts.flags |= MIDX_PROGRESS;
argc = parse_options(argc, argv, NULL,
options, builtin_multi_pack_index_verify_usage,
PARSE_OPT_KEEP_UNKNOWN);
@@ -106,10 +140,18 @@ static int cmd_multi_pack_index_verify(int argc, const char **argv)
static int cmd_multi_pack_index_expire(int argc, const char **argv)
{
- struct option *options = common_opts;
+ struct option *options;
+ static struct option builtin_multi_pack_index_expire_options[] = {
+ OPT_BIT(0, "progress", &opts.flags,
+ N_("force progress reporting"), MIDX_PROGRESS),
+ OPT_END(),
+ };
+ options = add_common_options(builtin_multi_pack_index_expire_options);
trace2_cmd_mode(argv[0]);
+ if (isatty(2))
+ opts.flags |= MIDX_PROGRESS;
argc = parse_options(argc, argv, NULL,
options, builtin_multi_pack_index_expire_usage,
PARSE_OPT_KEEP_UNKNOWN);
@@ -126,6 +168,8 @@ static int cmd_multi_pack_index_repack(int argc, const char **argv)
static struct option builtin_multi_pack_index_repack_options[] = {
OPT_MAGNITUDE(0, "batch-size", &opts.batch_size,
N_("during repack, collect pack-files of smaller size into a batch that is larger than this size")),
+ OPT_BIT(0, "progress", &opts.flags,
+ N_("force progress reporting"), MIDX_PROGRESS),
OPT_END(),
};
@@ -133,6 +177,8 @@ static int cmd_multi_pack_index_repack(int argc, const char **argv)
trace2_cmd_mode(argv[0]);
+ if (isatty(2))
+ opts.flags |= MIDX_PROGRESS;
argc = parse_options(argc, argv, NULL,
options,
builtin_multi_pack_index_repack_usage,
@@ -154,8 +200,6 @@ int cmd_multi_pack_index(int argc, const char **argv,
git_config(git_default_config, NULL);
- if (isatty(2))
- opts.flags |= MIDX_PROGRESS;
argc = parse_options(argc, argv, prefix,
builtin_multi_pack_index_options,
builtin_multi_pack_index_usage,
diff --git a/builtin/mv.c b/builtin/mv.c
index c2f96c8e89..83a465ba83 100644
--- a/builtin/mv.c
+++ b/builtin/mv.c
@@ -118,21 +118,23 @@ static int index_range_of_same_dir(const char *src, int length,
int cmd_mv(int argc, const char **argv, const char *prefix)
{
int i, flags, gitmodules_modified = 0;
- int verbose = 0, show_only = 0, force = 0, ignore_errors = 0;
+ int verbose = 0, show_only = 0, force = 0, ignore_errors = 0, ignore_sparse = 0;
struct option builtin_mv_options[] = {
OPT__VERBOSE(&verbose, N_("be verbose")),
OPT__DRY_RUN(&show_only, N_("dry run")),
OPT__FORCE(&force, N_("force move/rename even if target exists"),
PARSE_OPT_NOCOMPLETE),
OPT_BOOL('k', NULL, &ignore_errors, N_("skip move/rename errors")),
+ OPT_BOOL(0, "sparse", &ignore_sparse, N_("allow updating entries outside of the sparse-checkout cone")),
OPT_END(),
};
const char **source, **destination, **dest_path, **submodule_gitfile;
- enum update_mode { BOTH = 0, WORKING_DIRECTORY, INDEX } *modes;
+ enum update_mode { BOTH = 0, WORKING_DIRECTORY, INDEX, SPARSE } *modes;
struct stat st;
struct string_list src_for_dst = STRING_LIST_INIT_NODUP;
struct lock_file lock_file = LOCK_INIT;
struct cache_entry *ce;
+ struct string_list only_match_skip_worktree = STRING_LIST_INIT_NODUP;
git_config(git_default_config, NULL);
@@ -176,14 +178,17 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
const char *src = source[i], *dst = destination[i];
int length, src_is_dir;
const char *bad = NULL;
+ int skip_sparse = 0;
if (show_only)
printf(_("Checking rename of '%s' to '%s'\n"), src, dst);
length = strlen(src);
- if (lstat(src, &st) < 0)
- bad = _("bad source");
- else if (!strncmp(src, dst, length) &&
+ if (lstat(src, &st) < 0) {
+ /* only error if existence is expected. */
+ if (modes[i] != SPARSE)
+ bad = _("bad source");
+ } else if (!strncmp(src, dst, length) &&
(dst[length] == 0 || dst[length] == '/')) {
bad = _("can not move directory into itself");
} else if ((src_is_dir = S_ISDIR(st.st_mode))
@@ -212,11 +217,12 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
dst_len = strlen(dst);
for (j = 0; j < last - first; j++) {
- const char *path = active_cache[first + j]->name;
+ const struct cache_entry *ce = active_cache[first + j];
+ const char *path = ce->name;
source[argc + j] = path;
destination[argc + j] =
prefix_path(dst, dst_len, path + length + 1);
- modes[argc + j] = INDEX;
+ modes[argc + j] = ce_skip_worktree(ce) ? SPARSE : INDEX;
submodule_gitfile[argc + j] = NULL;
}
argc += last - first;
@@ -244,14 +250,36 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
bad = _("multiple sources for the same target");
else if (is_dir_sep(dst[strlen(dst) - 1]))
bad = _("destination directory does not exist");
- else
+ else {
+ /*
+ * We check if the paths are in the sparse-checkout
+ * definition as a very final check, since that
+ * allows us to point the user to the --sparse
+ * option as a way to have a successful run.
+ */
+ if (!ignore_sparse &&
+ !path_in_sparse_checkout(src, &the_index)) {
+ string_list_append(&only_match_skip_worktree, src);
+ skip_sparse = 1;
+ }
+ if (!ignore_sparse &&
+ !path_in_sparse_checkout(dst, &the_index)) {
+ string_list_append(&only_match_skip_worktree, dst);
+ skip_sparse = 1;
+ }
+
+ if (skip_sparse)
+ goto remove_entry;
+
string_list_insert(&src_for_dst, dst);
+ }
if (!bad)
continue;
if (!ignore_errors)
die(_("%s, source=%s, destination=%s"),
bad, src, dst);
+remove_entry:
if (--argc > 0) {
int n = argc - i;
memmove(source + i, source + i + 1,
@@ -266,6 +294,12 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
}
}
+ if (only_match_skip_worktree.nr) {
+ advise_on_updating_sparse_paths(&only_match_skip_worktree);
+ if (!ignore_errors)
+ return 1;
+ }
+
for (i = 0; i < argc; i++) {
const char *src = source[i], *dst = destination[i];
enum update_mode mode = modes[i];
@@ -274,7 +308,7 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
printf(_("Renaming %s to %s\n"), src, dst);
if (show_only)
continue;
- if (mode != INDEX && rename(src, dst) < 0) {
+ if (mode != INDEX && mode != SPARSE && rename(src, dst) < 0) {
if (ignore_errors)
continue;
die_errno(_("renaming '%s' failed"), src);
diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c
index ec8503563a..1a3dd445f8 100644
--- a/builtin/pack-objects.c
+++ b/builtin/pack-objects.c
@@ -1124,6 +1124,11 @@ static void write_reused_pack(struct hashfile *f)
break;
offset += ewah_bit_ctz64(word >> offset);
+ /*
+ * Can use bit positions directly, even for MIDX
+ * bitmaps. See comment in try_partial_reuse()
+ * for why.
+ */
write_reused_pack_one(pos + offset, f, &w_curs);
display_progress(progress_state, ++written);
}
@@ -1217,6 +1222,7 @@ static void write_pack_file(void)
if (!pack_to_stdout) {
struct stat st;
struct strbuf tmpname = STRBUF_INIT;
+ char *idx_tmp_name = NULL;
/*
* Packs are runtime accessed in their mtime
@@ -1237,7 +1243,8 @@ static void write_pack_file(void)
warning_errno(_("failed utime() on %s"), pack_tmp_name);
}
- strbuf_addf(&tmpname, "%s-", base_name);
+ strbuf_addf(&tmpname, "%s-%s.", base_name,
+ hash_to_hex(hash));
if (write_bitmap_index) {
bitmap_writer_set_checksum(hash);
@@ -1245,23 +1252,29 @@ static void write_pack_file(void)
&to_pack, written_list, nr_written);
}
- finish_tmp_packfile(&tmpname, pack_tmp_name,
+ stage_tmp_packfiles(&tmpname, pack_tmp_name,
written_list, nr_written,
- &pack_idx_opts, hash);
+ &pack_idx_opts, hash, &idx_tmp_name);
if (write_bitmap_index) {
- strbuf_addf(&tmpname, "%s.bitmap", hash_to_hex(hash));
+ size_t tmpname_len = tmpname.len;
+ strbuf_addstr(&tmpname, "bitmap");
stop_progress(&progress_state);
bitmap_writer_show_progress(progress);
bitmap_writer_select_commits(indexed_commits, indexed_commits_nr, -1);
- bitmap_writer_build(&to_pack);
+ if (bitmap_writer_build(&to_pack) < 0)
+ die(_("failed to write bitmap index"));
bitmap_writer_finish(written_list, nr_written,
tmpname.buf, write_bitmap_options);
write_bitmap_index = 0;
+ strbuf_setlen(&tmpname, tmpname_len);
}
+ rename_tmp_packfile_idx(&tmpname, &idx_tmp_name);
+
+ free(idx_tmp_name);
strbuf_release(&tmpname);
free(pack_tmp_name);
puts(hash_to_hex(hash));
diff --git a/builtin/prune.c b/builtin/prune.c
index 02c6ab7cba..485c9a3c56 100644
--- a/builtin/prune.c
+++ b/builtin/prune.c
@@ -143,7 +143,6 @@ int cmd_prune(int argc, const char **argv, const char *prefix)
expire = TIME_MAX;
save_commit_buffer = 0;
read_replace_refs = 0;
- ref_paranoia = 1;
repo_init_revisions(the_repository, &revs, prefix);
argc = parse_options(argc, argv, prefix, options, prune_usage, 0);
diff --git a/builtin/pull.c b/builtin/pull.c
index b311ea6b9d..cf6c56e2d8 100644
--- a/builtin/pull.c
+++ b/builtin/pull.c
@@ -26,6 +26,7 @@
#include "wt-status.h"
#include "commit-reach.h"
#include "sequencer.h"
+#include "packfile.h"
/**
* Parses the value of --rebase. If value is a false value, returns
@@ -577,7 +578,7 @@ static int run_fetch(const char *repo, const char **refspecs)
strvec_pushv(&args, refspecs);
} else if (*refspecs)
BUG("refspecs without repo?");
- ret = run_command_v_opt(args.v, RUN_GIT_CMD);
+ ret = run_command_v_opt(args.v, RUN_GIT_CMD | RUN_CLOSE_OBJECT_STORE);
strvec_clear(&args);
return ret;
}
diff --git a/builtin/read-tree.c b/builtin/read-tree.c
index 485e7b0479..2109c4c9e5 100644
--- a/builtin/read-tree.c
+++ b/builtin/read-tree.c
@@ -38,7 +38,7 @@ static int list_tree(struct object_id *oid)
}
static const char * const read_tree_usage[] = {
- N_("git read-tree [(-m [--trivial] [--aggressive] | --reset | --prefix=<prefix>) [-u [--exclude-per-directory=<gitignore>] | -i]] [--no-sparse-checkout] [--index-output=<file>] (--empty | <tree-ish1> [<tree-ish2> [<tree-ish3>]])"),
+ N_("git read-tree [(-m [--trivial] [--aggressive] | --reset | --prefix=<prefix>) [-u | -i]] [--no-sparse-checkout] [--index-output=<file>] (--empty | <tree-ish1> [<tree-ish2> [<tree-ish3>]])"),
NULL
};
@@ -53,24 +53,16 @@ static int index_output_cb(const struct option *opt, const char *arg,
static int exclude_per_directory_cb(const struct option *opt, const char *arg,
int unset)
{
- struct dir_struct *dir;
struct unpack_trees_options *opts;
BUG_ON_OPT_NEG(unset);
opts = (struct unpack_trees_options *)opt->value;
- if (opts->dir)
- die("more than one --exclude-per-directory given.");
-
- dir = xcalloc(1, sizeof(*opts->dir));
- dir->flags |= DIR_SHOW_IGNORED;
- dir->exclude_per_dir = arg;
- opts->dir = dir;
- /* We do not need to nor want to do read-directory
- * here; we are merely interested in reusing the
- * per directory ignore stack mechanism.
- */
+ if (!opts->update)
+ die("--exclude-per-directory is meaningless unless -u");
+ if (strcmp(arg, ".gitignore"))
+ die("--exclude-per-directory argument must be .gitignore");
return 0;
}
@@ -174,6 +166,9 @@ int cmd_read_tree(int argc, const char **argv, const char *cmd_prefix)
if (1 < opts.merge + opts.reset + prefix_set)
die("Which one? -m, --reset, or --prefix?");
+ if (opts.reset)
+ opts.reset = UNPACK_RESET_OVERWRITE_UNTRACKED;
+
/*
* NEEDSWORK
*
@@ -209,8 +204,9 @@ int cmd_read_tree(int argc, const char **argv, const char *cmd_prefix)
if ((opts.update || opts.index_only) && !opts.merge)
die("%s is meaningless without -m, --reset, or --prefix",
opts.update ? "-u" : "-i");
- if ((opts.dir && !opts.update))
- die("--exclude-per-directory is meaningless unless -u");
+ if (opts.update && !opts.reset)
+ opts.preserve_ignored = 0;
+ /* otherwise, opts.preserve_ignored is irrelevant */
if (opts.merge && !opts.index_only)
setup_work_tree();
diff --git a/builtin/rebase.c b/builtin/rebase.c
index eb01f4d790..8c6393f6d7 100644
--- a/builtin/rebase.c
+++ b/builtin/rebase.c
@@ -560,6 +560,9 @@ int cmd_rebase__interactive(int argc, const char **argv, const char *prefix)
argc = parse_options(argc, argv, prefix, options,
builtin_rebase_interactive_usage, PARSE_OPT_KEEP_ARGV0);
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+
if (!is_null_oid(&squash_onto))
opts.squash_onto = &squash_onto;
@@ -741,7 +744,6 @@ static int finish_rebase(struct rebase_options *opts)
delete_ref(NULL, "REBASE_HEAD", NULL, REF_NO_DEREF);
unlink(git_path_auto_merge(the_repository));
apply_autostash(state_dir_path("autostash", opts));
- close_object_store(the_repository->objects);
/*
* We ignore errors in 'git maintenance run --auto', since the
* user should see them.
@@ -763,17 +765,6 @@ static int finish_rebase(struct rebase_options *opts)
return ret;
}
-static struct commit *peel_committish(const char *name)
-{
- struct object *obj;
- struct object_id oid;
-
- if (get_oid(name, &oid))
- return NULL;
- obj = parse_object(the_repository, &oid);
- return (struct commit *)peel_to_type(name, 0, obj, OBJ_COMMIT);
-}
-
static void add_var(struct strbuf *buf, const char *name, const char *value)
{
if (!value)
@@ -1431,6 +1422,9 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
usage_with_options(builtin_rebase_usage,
builtin_rebase_options);
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+
options.allow_empty_message = 1;
git_config(rebase_config, &options);
/* options.gpg_sign_opt will be either "-S" or NULL */
@@ -1575,7 +1569,7 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
die(_("could not move back to %s"),
oid_to_hex(&options.orig_head));
remove_branch_state(the_repository, 0);
- ret = !!finish_rebase(&options);
+ ret = finish_rebase(&options);
goto cleanup;
}
case ACTION_QUIT: {
@@ -1584,11 +1578,11 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
struct replay_opts replay = REPLAY_OPTS_INIT;
replay.action = REPLAY_INTERACTIVE_REBASE;
- ret = !!sequencer_remove_state(&replay);
+ ret = sequencer_remove_state(&replay);
} else {
strbuf_reset(&buf);
strbuf_addstr(&buf, options.state_dir);
- ret = !!remove_dir_recursively(&buf, 0);
+ ret = remove_dir_recursively(&buf, 0);
if (ret)
error(_("could not remove '%s'"),
options.state_dir);
@@ -1846,7 +1840,8 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
if (!strcmp(options.upstream_name, "-"))
options.upstream_name = "@{-1}";
}
- options.upstream = peel_committish(options.upstream_name);
+ options.upstream =
+ lookup_commit_reference_by_name(options.upstream_name);
if (!options.upstream)
die(_("invalid upstream '%s'"), options.upstream_name);
options.upstream_arg = options.upstream_name;
@@ -1889,7 +1884,8 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
options.onto = lookup_commit_or_die(&merge_base,
options.onto_name);
} else {
- options.onto = peel_committish(options.onto_name);
+ options.onto =
+ lookup_commit_reference_by_name(options.onto_name);
if (!options.onto)
die(_("Does not point to a valid commit '%s'"),
options.onto_name);
@@ -1914,13 +1910,15 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
die_if_checked_out(buf.buf, 1);
options.head_name = xstrdup(buf.buf);
/* If not is it a valid ref (branch or commit)? */
- } else if (!get_oid(branch_name, &options.orig_head) &&
- lookup_commit_reference(the_repository,
- &options.orig_head))
+ } else {
+ struct commit *commit =
+ lookup_commit_reference_by_name(branch_name);
+ if (!commit)
+ die(_("no such branch/commit '%s'"),
+ branch_name);
+ oidcpy(&options.orig_head, &commit->object.oid);
options.head_name = NULL;
- else
- die(_("no such branch/commit '%s'"),
- branch_name);
+ }
} else if (argc == 0) {
/* Do not need to switch branches, we are already on it. */
options.head_name =
@@ -1960,7 +1958,7 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
if (require_clean_work_tree(the_repository, "rebase",
_("Please commit or stash them."), 1, 1)) {
- ret = 1;
+ ret = -1;
goto cleanup;
}
@@ -1995,7 +1993,7 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
RESET_HEAD_RUN_POST_CHECKOUT_HOOK,
NULL, buf.buf,
DEFAULT_REFLOG_ACTION) < 0) {
- ret = !!error(_("could not switch to "
+ ret = error(_("could not switch to "
"%s"),
options.switch_to);
goto cleanup;
@@ -2010,7 +2008,7 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
else
printf(_("Current branch %s is up to date.\n"),
branch_name);
- ret = !!finish_rebase(&options);
+ ret = finish_rebase(&options);
goto cleanup;
} else if (!(options.flags & REBASE_NO_QUIET))
; /* be quiet */
@@ -2088,7 +2086,7 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
RESET_HEAD_REFS_ONLY, "HEAD", msg.buf,
DEFAULT_REFLOG_ACTION);
strbuf_release(&msg);
- ret = !!finish_rebase(&options);
+ ret = finish_rebase(&options);
goto cleanup;
}
@@ -2102,7 +2100,7 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
options.revisions = revisions.buf;
run_rebase:
- ret = !!run_specific_rebase(&options, action);
+ ret = run_specific_rebase(&options, action);
cleanup:
strbuf_release(&buf);
@@ -2113,5 +2111,5 @@ cleanup:
free(options.strategy);
strbuf_release(&options.git_format_patch_opt);
free(squash_onto_name);
- return ret;
+ return !!ret;
}
diff --git a/builtin/receive-pack.c b/builtin/receive-pack.c
index 2d1f97e1ca..25cc0c907e 100644
--- a/builtin/receive-pack.c
+++ b/builtin/receive-pack.c
@@ -7,6 +7,7 @@
#include "pkt-line.h"
#include "sideband.h"
#include "run-command.h"
+#include "hook.h"
#include "exec-cmd.h"
#include "commit.h"
#include "object.h"
@@ -1306,7 +1307,7 @@ static void refuse_unconfigured_deny_delete_current(void)
rp_error("%s", _(refuse_unconfigured_deny_delete_current_msg));
}
-static int command_singleton_iterator(void *cb_data, struct object_id *oid);
+static const struct object_id *command_singleton_iterator(void *cb_data);
static int update_shallow_ref(struct command *cmd, struct shallow_info *si)
{
struct shallow_lock shallow_lock = SHALLOW_LOCK_INIT;
@@ -1463,7 +1464,7 @@ static const char *update_worktree(unsigned char *sha1, const struct worktree *w
strvec_pushf(&env, "GIT_DIR=%s", absolute_path(git_dir));
- if (!find_hook(push_to_checkout_hook))
+ if (!hook_exists(push_to_checkout_hook))
retval = push_to_deploy(sha1, &env, work_tree);
else
retval = push_to_checkout(sha1, &env, work_tree);
@@ -1731,16 +1732,15 @@ static void check_aliased_updates(struct command *commands)
string_list_clear(&ref_list, 0);
}
-static int command_singleton_iterator(void *cb_data, struct object_id *oid)
+static const struct object_id *command_singleton_iterator(void *cb_data)
{
struct command **cmd_list = cb_data;
struct command *cmd = *cmd_list;
if (!cmd || is_null_oid(&cmd->new_oid))
- return -1; /* end of list */
+ return NULL;
*cmd_list = NULL; /* this returns only one */
- oidcpy(oid, &cmd->new_oid);
- return 0;
+ return &cmd->new_oid;
}
static void set_connectivity_errors(struct command *commands,
@@ -1770,7 +1770,7 @@ struct iterate_data {
struct shallow_info *si;
};
-static int iterate_receive_command_list(void *cb_data, struct object_id *oid)
+static const struct object_id *iterate_receive_command_list(void *cb_data)
{
struct iterate_data *data = cb_data;
struct command **cmd_list = &data->cmds;
@@ -1781,13 +1781,11 @@ static int iterate_receive_command_list(void *cb_data, struct object_id *oid)
/* to be checked in update_shallow_ref() */
continue;
if (!is_null_oid(&cmd->new_oid) && !cmd->skip_update) {
- oidcpy(oid, &cmd->new_oid);
*cmd_list = cmd->next;
- return 0;
+ return &cmd->new_oid;
}
}
- *cmd_list = NULL;
- return -1; /* end of list */
+ return NULL;
}
static void reject_updates_to_hidden(struct command *commands)
@@ -2477,7 +2475,8 @@ int cmd_receive_pack(int argc, const char **argv, const char *prefix)
struct option options[] = {
OPT__QUIET(&quiet, N_("quiet")),
OPT_HIDDEN_BOOL(0, "stateless-rpc", &stateless_rpc, NULL),
- OPT_HIDDEN_BOOL(0, "advertise-refs", &advertise_refs, NULL),
+ OPT_HIDDEN_BOOL(0, "http-backend-info-refs", &advertise_refs, NULL),
+ OPT_ALIAS(0, "advertise-refs", "http-backend-info-refs"),
OPT_HIDDEN_BOOL(0, "reject-thin-pack-for-testing", &reject_thin, NULL),
OPT_END()
};
@@ -2580,10 +2579,9 @@ int cmd_receive_pack(int argc, const char **argv, const char *prefix)
proc.no_stdin = 1;
proc.stdout_to_stderr = 1;
proc.err = use_sideband ? -1 : 0;
- proc.git_cmd = 1;
+ proc.git_cmd = proc.close_object_store = 1;
proc.argv = argv_gc_auto;
- close_object_store(the_repository->objects);
if (!start_command(&proc)) {
if (use_sideband)
copy_to_sideband(proc.err, -1, NULL);
diff --git a/builtin/reflog.c b/builtin/reflog.c
index 09541d1c80..bd4c669918 100644
--- a/builtin/reflog.c
+++ b/builtin/reflog.c
@@ -629,8 +629,9 @@ static int cmd_reflog_expire(int argc, const char **argv, const char *prefix)
free_worktrees(worktrees);
for (i = 0; i < collected.nr; i++) {
struct collected_reflog *e = collected.e[i];
+
set_reflog_expiry_param(&cb.cmd, explicit_expiry, e->reflog);
- status |= reflog_expire(e->reflog, &e->oid, flags,
+ status |= reflog_expire(e->reflog, flags,
reflog_expiry_prepare,
should_expire_reflog_ent,
reflog_expiry_cleanup,
@@ -642,13 +643,12 @@ static int cmd_reflog_expire(int argc, const char **argv, const char *prefix)
for (; i < argc; i++) {
char *ref;
- struct object_id oid;
- if (!dwim_log(argv[i], strlen(argv[i]), &oid, &ref)) {
+ if (!dwim_log(argv[i], strlen(argv[i]), NULL, &ref)) {
status |= error(_("%s points nowhere!"), argv[i]);
continue;
}
set_reflog_expiry_param(&cb.cmd, explicit_expiry, ref);
- status |= reflog_expire(ref, &oid, flags,
+ status |= reflog_expire(ref, flags,
reflog_expiry_prepare,
should_expire_reflog_ent,
reflog_expiry_cleanup,
@@ -700,7 +700,6 @@ static int cmd_reflog_delete(int argc, const char **argv, const char *prefix)
for ( ; i < argc; i++) {
const char *spec = strstr(argv[i], "@{");
- struct object_id oid;
char *ep, *ref;
int recno;
@@ -709,7 +708,7 @@ static int cmd_reflog_delete(int argc, const char **argv, const char *prefix)
continue;
}
- if (!dwim_log(argv[i], spec - argv[i], &oid, &ref)) {
+ if (!dwim_log(argv[i], spec - argv[i], NULL, &ref)) {
status |= error(_("no reflog for '%s'"), argv[i]);
continue;
}
@@ -724,7 +723,7 @@ static int cmd_reflog_delete(int argc, const char **argv, const char *prefix)
cb.cmd.expire_total = 0;
}
- status |= reflog_expire(ref, &oid, flags,
+ status |= reflog_expire(ref, flags,
reflog_expiry_prepare,
should_expire_reflog_ent,
reflog_expiry_cleanup,
diff --git a/builtin/repack.c b/builtin/repack.c
index 5f9bc74adc..cb9f4bfed3 100644
--- a/builtin/repack.c
+++ b/builtin/repack.c
@@ -208,10 +208,10 @@ static struct {
unsigned optional:1;
} exts[] = {
{".pack"},
- {".idx"},
{".rev", 1},
{".bitmap", 1},
{".promisor", 1},
+ {".idx"},
};
static unsigned populate_pack_exts(char *name)
@@ -515,6 +515,10 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
if (!(pack_everything & ALL_INTO_ONE) ||
!is_bare_repository())
write_bitmaps = 0;
+ } else if (write_bitmaps &&
+ git_env_bool(GIT_TEST_MULTI_PACK_INDEX, 0) &&
+ git_env_bool(GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP, 0)) {
+ write_bitmaps = 0;
}
if (pack_kept_objects < 0)
pack_kept_objects = write_bitmaps > 0;
@@ -582,15 +586,12 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
strvec_pushf(&cmd.args,
"--unpack-unreachable=%s",
unpack_unreachable);
- strvec_push(&cmd.env_array, "GIT_REF_PARANOIA=1");
} else if (pack_everything & LOOSEN_UNREACHABLE) {
strvec_push(&cmd.args,
"--unpack-unreachable");
} else if (keep_unreachable) {
strvec_push(&cmd.args, "--keep-unreachable");
strvec_push(&cmd.args, "--pack-loose-unreachable");
- } else {
- strvec_push(&cmd.env_array, "GIT_REF_PARANOIA=1");
}
}
} else if (geometry) {
@@ -725,8 +726,12 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
update_server_info(0);
remove_temporary_files();
- if (git_env_bool(GIT_TEST_MULTI_PACK_INDEX, 0))
- write_midx_file(get_object_directory(), NULL, 0);
+ if (git_env_bool(GIT_TEST_MULTI_PACK_INDEX, 0)) {
+ unsigned flags = 0;
+ if (git_env_bool(GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP, 0))
+ flags |= MIDX_WRITE_BITMAP | MIDX_WRITE_REV_INDEX;
+ write_midx_file(get_object_directory(), NULL, flags);
+ }
string_list_clear(&names, 0);
string_list_clear(&rollback, 0);
diff --git a/builtin/reset.c b/builtin/reset.c
index 51c9e2f43f..7393595349 100644
--- a/builtin/reset.c
+++ b/builtin/reset.c
@@ -67,12 +67,18 @@ static int reset_index(const char *ref, const struct object_id *oid, int reset_t
case KEEP:
case MERGE:
opts.update = 1;
+ opts.preserve_ignored = 0; /* FIXME: !overwrite_ignore */
break;
case HARD:
opts.update = 1;
- /* fallthrough */
+ opts.reset = UNPACK_RESET_OVERWRITE_UNTRACKED;
+ break;
+ case MIXED:
+ opts.reset = UNPACK_RESET_PROTECT_UNTRACKED;
+ /* but opts.update=0, so working tree not updated */
+ break;
default:
- opts.reset = 1;
+ BUG("invalid reset_type passed to reset_index");
}
read_cache_unmerged();
diff --git a/builtin/rev-parse.c b/builtin/rev-parse.c
index 22c4e1a4ff..8480a59f57 100644
--- a/builtin/rev-parse.c
+++ b/builtin/rev-parse.c
@@ -863,8 +863,8 @@ int cmd_rev_parse(int argc, const char **argv, const char *prefix)
continue;
}
if (!strcmp(arg, "--bisect")) {
- for_each_fullref_in("refs/bisect/bad", show_reference, NULL, 0);
- for_each_fullref_in("refs/bisect/good", anti_reference, NULL, 0);
+ for_each_fullref_in("refs/bisect/bad", show_reference, NULL);
+ for_each_fullref_in("refs/bisect/good", anti_reference, NULL);
continue;
}
if (opt_with_value(arg, "--branches", &arg)) {
diff --git a/builtin/revert.c b/builtin/revert.c
index 2e13660e4b..51776abea6 100644
--- a/builtin/revert.c
+++ b/builtin/revert.c
@@ -136,6 +136,9 @@ static int run_sequencer(int argc, const char **argv, struct replay_opts *opts)
PARSE_OPT_KEEP_ARGV0 |
PARSE_OPT_KEEP_UNKNOWN);
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+
/* implies allow_empty */
if (opts->keep_redundant_commits)
opts->allow_empty = 1;
diff --git a/builtin/rm.c b/builtin/rm.c
index 3b44b807e5..3d0967cdc1 100644
--- a/builtin/rm.c
+++ b/builtin/rm.c
@@ -237,6 +237,7 @@ static int check_local_mod(struct object_id *head, int index_only)
static int show_only = 0, force = 0, index_only = 0, recursive = 0, quiet = 0;
static int ignore_unmatch = 0, pathspec_file_nul;
+static int include_sparse;
static char *pathspec_from_file;
static struct option builtin_rm_options[] = {
@@ -247,6 +248,7 @@ static struct option builtin_rm_options[] = {
OPT_BOOL('r', NULL, &recursive, N_("allow recursive removal")),
OPT_BOOL( 0 , "ignore-unmatch", &ignore_unmatch,
N_("exit with a zero status even if nothing matched")),
+ OPT_BOOL(0, "sparse", &include_sparse, N_("allow updating entries outside of the sparse-checkout cone")),
OPT_PATHSPEC_FROM_FILE(&pathspec_from_file),
OPT_PATHSPEC_FILE_NUL(&pathspec_file_nul),
OPT_END(),
@@ -298,7 +300,10 @@ int cmd_rm(int argc, const char **argv, const char *prefix)
ensure_full_index(&the_index);
for (i = 0; i < active_nr; i++) {
const struct cache_entry *ce = active_cache[i];
- if (ce_skip_worktree(ce))
+
+ if (!include_sparse &&
+ (ce_skip_worktree(ce) ||
+ !path_in_sparse_checkout(ce->name, &the_index)))
continue;
if (!ce_path_match(&the_index, ce, &pathspec, seen))
continue;
@@ -322,7 +327,8 @@ int cmd_rm(int argc, const char **argv, const char *prefix)
seen_any = 1;
else if (ignore_unmatch)
continue;
- else if (matches_skip_worktree(&pathspec, i, &skip_worktree_seen))
+ else if (!include_sparse &&
+ matches_skip_worktree(&pathspec, i, &skip_worktree_seen))
string_list_append(&only_match_skip_worktree, original);
else
die(_("pathspec '%s' did not match any files"), original);
diff --git a/builtin/sparse-checkout.c b/builtin/sparse-checkout.c
index 8ba9f13787..d0f5c4702b 100644
--- a/builtin/sparse-checkout.c
+++ b/builtin/sparse-checkout.c
@@ -100,6 +100,98 @@ static int sparse_checkout_list(int argc, const char **argv)
return 0;
}
+static void clean_tracked_sparse_directories(struct repository *r)
+{
+ int i, was_full = 0;
+ struct strbuf path = STRBUF_INIT;
+ size_t pathlen;
+ struct string_list_item *item;
+ struct string_list sparse_dirs = STRING_LIST_INIT_DUP;
+
+ /*
+ * If we are not using cone mode patterns, then we cannot
+ * delete directories outside of the sparse cone.
+ */
+ if (!r || !r->index || !r->worktree)
+ return;
+ if (init_sparse_checkout_patterns(r->index) ||
+ !r->index->sparse_checkout_patterns->use_cone_patterns)
+ return;
+
+ /*
+ * Use the sparse index as a data structure to assist finding
+ * directories that are safe to delete. This conversion to a
+ * sparse index will not delete directories that contain
+ * conflicted entries or submodules.
+ */
+ if (!r->index->sparse_index) {
+ /*
+ * If something, such as a merge conflict or other concern,
+ * prevents us from converting to a sparse index, then do
+ * not try deleting files.
+ */
+ if (convert_to_sparse(r->index, SPARSE_INDEX_MEMORY_ONLY))
+ return;
+ was_full = 1;
+ }
+
+ strbuf_addstr(&path, r->worktree);
+ strbuf_complete(&path, '/');
+ pathlen = path.len;
+
+ /*
+ * Collect directories that have gone out of scope but also
+ * exist on disk, so there is some work to be done. We need to
+ * store the entries in a list before exploring, since that might
+ * expand the sparse-index again.
+ */
+ for (i = 0; i < r->index->cache_nr; i++) {
+ struct cache_entry *ce = r->index->cache[i];
+
+ if (S_ISSPARSEDIR(ce->ce_mode) &&
+ repo_file_exists(r, ce->name))
+ string_list_append(&sparse_dirs, ce->name);
+ }
+
+ for_each_string_list_item(item, &sparse_dirs) {
+ struct dir_struct dir = DIR_INIT;
+ struct pathspec p = { 0 };
+ struct strvec s = STRVEC_INIT;
+
+ strbuf_setlen(&path, pathlen);
+ strbuf_addstr(&path, item->string);
+
+ dir.flags |= DIR_SHOW_IGNORED_TOO;
+
+ setup_standard_excludes(&dir);
+ strvec_push(&s, path.buf);
+
+ parse_pathspec(&p, PATHSPEC_GLOB, 0, NULL, s.v);
+ fill_directory(&dir, r->index, &p);
+
+ if (dir.nr) {
+ warning(_("directory '%s' contains untracked files,"
+ " but is not in the sparse-checkout cone"),
+ item->string);
+ } else if (remove_dir_recursively(&path, 0)) {
+ /*
+ * Removal is "best effort". If something blocks
+ * the deletion, then continue with a warning.
+ */
+ warning(_("failed to remove directory '%s'"),
+ item->string);
+ }
+
+ dir_clear(&dir);
+ }
+
+ string_list_clear(&sparse_dirs, 0);
+ strbuf_release(&path);
+
+ if (was_full)
+ ensure_full_index(r->index);
+}
+
static int update_working_directory(struct pattern_list *pl)
{
enum update_sparsity_result result;
@@ -141,6 +233,8 @@ static int update_working_directory(struct pattern_list *pl)
else
rollback_lock_file(&lock_file);
+ clean_tracked_sparse_directories(r);
+
r->index->sparse_checkout_patterns = NULL;
return result;
}
diff --git a/builtin/stash.c b/builtin/stash.c
index 8f42360ca9..cc93ace422 100644
--- a/builtin/stash.c
+++ b/builtin/stash.c
@@ -256,8 +256,10 @@ static int reset_tree(struct object_id *i_tree, int update, int reset)
opts.src_index = &the_index;
opts.dst_index = &the_index;
opts.merge = 1;
- opts.reset = reset;
+ opts.reset = reset ? UNPACK_RESET_PROTECT_UNTRACKED : 0;
opts.update = update;
+ if (update)
+ opts.preserve_ignored = 0; /* FIXME: !overwrite_ignore */
opts.fn = oneway_merge;
if (unpack_trees(nr_trees, t, &opts))
@@ -313,6 +315,17 @@ static int reset_head(void)
return run_command(&cp);
}
+static int is_path_a_directory(const char *path)
+{
+ /*
+ * This function differs from abspath.c:is_directory() in that
+ * here we use lstat() instead of stat(); we do not want to
+ * follow symbolic links here.
+ */
+ struct stat st;
+ return (!lstat(path, &st) && S_ISDIR(st.st_mode));
+}
+
static void add_diff_to_buf(struct diff_queue_struct *q,
struct diff_options *options,
void *data)
@@ -320,6 +333,9 @@ static void add_diff_to_buf(struct diff_queue_struct *q,
int i;
for (i = 0; i < q->nr; i++) {
+ if (is_path_a_directory(q->queue[i]->one->path))
+ continue;
+
strbuf_addstr(data, q->queue[i]->one->path);
/* NUL-terminate: will be fed to update-index -z */
@@ -521,9 +537,6 @@ static int do_apply_stash(const char *prefix, struct stash_info *info,
}
}
- if (info->has_u && restore_untracked(&info->u_tree))
- return error(_("could not restore untracked files from stash"));
-
init_merge_options(&o, the_repository);
o.branch1 = "Updated upstream";
@@ -558,6 +571,9 @@ static int do_apply_stash(const char *prefix, struct stash_info *info,
unstage_changes_unless_new(&c_tree);
}
+ if (info->has_u && restore_untracked(&info->u_tree))
+ return error(_("could not restore untracked files from stash"));
+
if (!quiet) {
struct child_process cp = CHILD_PROCESS_INIT;
@@ -1519,6 +1535,7 @@ static int do_push_stash(const struct pathspec *ps, const char *stash_msg, int q
} else {
struct child_process cp = CHILD_PROCESS_INIT;
cp.git_cmd = 1;
+ /* BUG: this nukes untracked files in the way */
strvec_pushl(&cp.args, "reset", "--hard", "-q",
"--no-recurse-submodules", NULL);
if (run_command(&cp)) {
diff --git a/builtin/submodule--helper.c b/builtin/submodule--helper.c
index 4da9781b99..6298cbdd4e 100644
--- a/builtin/submodule--helper.c
+++ b/builtin/submodule--helper.c
@@ -199,34 +199,28 @@ static char *relative_url(const char *remote_url,
return strbuf_detach(&sb, NULL);
}
-static int resolve_relative_url(int argc, const char **argv, const char *prefix)
+static char *resolve_relative_url(const char *rel_url, const char *up_path, int quiet)
{
- char *remoteurl = NULL;
+ char *remoteurl, *resolved_url;
char *remote = get_default_remote();
- const char *up_path = NULL;
- char *res;
- const char *url;
- struct strbuf sb = STRBUF_INIT;
-
- if (argc != 2 && argc != 3)
- die("resolve-relative-url only accepts one or two arguments");
-
- url = argv[1];
- strbuf_addf(&sb, "remote.%s.url", remote);
- free(remote);
+ struct strbuf remotesb = STRBUF_INIT;
- if (git_config_get_string(sb.buf, &remoteurl))
- /* the repository is its own authoritative upstream */
+ strbuf_addf(&remotesb, "remote.%s.url", remote);
+ if (git_config_get_string(remotesb.buf, &remoteurl)) {
+ if (!quiet)
+ warning(_("could not look up configuration '%s'. "
+ "Assuming this repository is its own "
+ "authoritative upstream."),
+ remotesb.buf);
remoteurl = xgetcwd();
+ }
+ resolved_url = relative_url(remoteurl, rel_url, up_path);
- if (argc == 3)
- up_path = argv[2];
-
- res = relative_url(remoteurl, url, up_path);
- puts(res);
- free(res);
+ free(remote);
free(remoteurl);
- return 0;
+ strbuf_release(&remotesb);
+
+ return resolved_url;
}
static int resolve_relative_url_test(int argc, const char **argv, const char *prefix)
@@ -313,7 +307,7 @@ struct module_list {
const struct cache_entry **entries;
int alloc, nr;
};
-#define MODULE_LIST_INIT { NULL, 0, 0 }
+#define MODULE_LIST_INIT { 0 }
static int module_list_compute(int argc, const char **argv,
const char *prefix,
@@ -590,31 +584,11 @@ static int module_foreach(int argc, const char **argv, const char *prefix)
return 0;
}
-static char *compute_submodule_clone_url(const char *rel_url)
-{
- char *remoteurl, *relurl;
- char *remote = get_default_remote();
- struct strbuf remotesb = STRBUF_INIT;
-
- strbuf_addf(&remotesb, "remote.%s.url", remote);
- if (git_config_get_string(remotesb.buf, &remoteurl)) {
- warning(_("could not look up configuration '%s'. Assuming this repository is its own authoritative upstream."), remotesb.buf);
- remoteurl = xgetcwd();
- }
- relurl = relative_url(remoteurl, rel_url, NULL);
-
- free(remote);
- free(remoteurl);
- strbuf_release(&remotesb);
-
- return relurl;
-}
-
struct init_cb {
const char *prefix;
unsigned int flags;
};
-#define INIT_CB_INIT { NULL, 0 }
+#define INIT_CB_INIT { 0 }
static void init_submodule(const char *path, const char *prefix,
unsigned int flags)
@@ -660,7 +634,7 @@ static void init_submodule(const char *path, const char *prefix,
if (starts_with_dot_dot_slash(url) ||
starts_with_dot_slash(url)) {
char *oldurl = url;
- url = compute_submodule_clone_url(oldurl);
+ url = resolve_relative_url(oldurl, NULL, 0);
free(oldurl);
}
@@ -743,7 +717,7 @@ struct status_cb {
const char *prefix;
unsigned int flags;
};
-#define STATUS_CB_INIT { NULL, 0 }
+#define STATUS_CB_INIT { 0 }
static void print_status(unsigned int flags, char state, const char *path,
const struct object_id *oid, const char *displaypath)
@@ -937,13 +911,13 @@ struct module_cb {
char status;
const char *sm_path;
};
-#define MODULE_CB_INIT { 0, 0, NULL, NULL, '\0', NULL }
+#define MODULE_CB_INIT { 0 }
struct module_cb_list {
struct module_cb **entries;
int alloc, nr;
};
-#define MODULE_CB_LIST_INIT { NULL, 0, 0 }
+#define MODULE_CB_LIST_INIT { 0 }
struct summary_cb {
int argc;
@@ -954,7 +928,7 @@ struct summary_cb {
unsigned int files: 1;
int summary_limit;
};
-#define SUMMARY_CB_INIT { 0, NULL, NULL, 0, 0, 0, 0 }
+#define SUMMARY_CB_INIT { 0 }
enum diff_cmd {
DIFF_INDEX,
@@ -1360,7 +1334,7 @@ struct sync_cb {
const char *prefix;
unsigned int flags;
};
-#define SYNC_CB_INIT { NULL, 0 }
+#define SYNC_CB_INIT { 0 }
static void sync_submodule(const char *path, const char *prefix,
unsigned int flags)
@@ -1380,20 +1354,10 @@ static void sync_submodule(const char *path, const char *prefix,
if (sub && sub->url) {
if (starts_with_dot_dot_slash(sub->url) ||
starts_with_dot_slash(sub->url)) {
- char *remote_url, *up_path;
- char *remote = get_default_remote();
- strbuf_addf(&sb, "remote.%s.url", remote);
-
- if (git_config_get_string(sb.buf, &remote_url))
- remote_url = xgetcwd();
-
- up_path = get_up_path(path);
- sub_origin_url = relative_url(remote_url, sub->url, up_path);
- super_config_url = relative_url(remote_url, sub->url, NULL);
-
- free(remote);
+ char *up_path = get_up_path(path);
+ sub_origin_url = resolve_relative_url(sub->url, up_path, 1);
+ super_config_url = resolve_relative_url(sub->url, NULL, 1);
free(up_path);
- free(remote_url);
} else {
sub_origin_url = xstrdup(sub->url);
super_config_url = xstrdup(sub->url);
@@ -1516,7 +1480,7 @@ struct deinit_cb {
const char *prefix;
unsigned int flags;
};
-#define DEINIT_CB_INIT { NULL, 0 }
+#define DEINIT_CB_INIT { 0 }
static void deinit_submodule(const char *path, const char *prefix,
unsigned int flags)
@@ -1683,8 +1647,9 @@ struct submodule_alternate_setup {
} error_mode;
struct string_list *reference;
};
-#define SUBMODULE_ALTERNATE_SETUP_INIT { NULL, \
- SUBMODULE_ALTERNATE_ERROR_IGNORE, NULL }
+#define SUBMODULE_ALTERNATE_SETUP_INIT { \
+ .error_mode = SUBMODULE_ALTERNATE_ERROR_IGNORE, \
+}
static const char alternate_error_advice[] = N_(
"An alternate computed from a superproject's alternate is invalid.\n"
@@ -1704,18 +1669,24 @@ static int add_possible_reference_from_superproject(
* standard layout with .git/(modules/<name>)+/objects
*/
if (strip_suffix(odb->path, "/objects", &len)) {
+ struct repository alternate;
char *sm_alternate;
struct strbuf sb = STRBUF_INIT;
struct strbuf err = STRBUF_INIT;
strbuf_add(&sb, odb->path, len);
+ repo_init(&alternate, sb.buf, NULL);
+
/*
* We need to end the new path with '/' to mark it as a dir,
* otherwise a submodule name containing '/' will be broken
* as the last part of a missing submodule reference would
* be taken as a file name.
*/
- strbuf_addf(&sb, "/modules/%s/", sas->submodule_name);
+ strbuf_reset(&sb);
+ submodule_name_to_gitdir(&sb, &alternate, sas->submodule_name);
+ strbuf_addch(&sb, '/');
+ repo_clear(&alternate);
sm_alternate = compute_alternate_path(sb.buf, &err);
if (sm_alternate) {
@@ -1785,7 +1756,7 @@ static int clone_submodule(struct module_clone_data *clone_data)
struct strbuf sb = STRBUF_INIT;
struct child_process cp = CHILD_PROCESS_INIT;
- strbuf_addf(&sb, "%s/modules/%s", get_git_dir(), clone_data->name);
+ submodule_name_to_gitdir(&sb, the_repository, clone_data->name);
sm_gitdir = absolute_pathdup(sb.buf);
strbuf_reset(&sb);
@@ -2045,6 +2016,20 @@ struct submodule_update_clone {
.max_jobs = 1, \
}
+struct update_data {
+ const char *recursive_prefix;
+ const char *sm_path;
+ const char *displaypath;
+ struct object_id oid;
+ struct object_id suboid;
+ struct submodule_update_strategy update_strategy;
+ int depth;
+ unsigned int force: 1;
+ unsigned int quiet: 1;
+ unsigned int nofetch: 1;
+ unsigned int just_cloned: 1;
+};
+#define UPDATE_DATA_INIT { .update_strategy = SUBMODULE_UPDATE_STRATEGY_INIT }
static void next_submodule_warn_missing(struct submodule_update_clone *suc,
struct strbuf *out, const char *displaypath)
@@ -2134,7 +2119,7 @@ static int prepare_to_clone_next_submodule(const struct cache_entry *ce,
if (repo_config_get_string_tmp(the_repository, sb.buf, &url)) {
if (starts_with_dot_slash(sub->url) ||
starts_with_dot_dot_slash(sub->url)) {
- url = compute_submodule_clone_url(sub->url);
+ url = resolve_relative_url(sub->url, NULL, 0);
need_free_url = 1;
} else
url = sub->url;
@@ -2298,6 +2283,181 @@ static int git_update_clone_config(const char *var, const char *value,
return 0;
}
+static int is_tip_reachable(const char *path, struct object_id *oid)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+ struct strbuf rev = STRBUF_INIT;
+ char *hex = oid_to_hex(oid);
+
+ cp.git_cmd = 1;
+ cp.dir = xstrdup(path);
+ cp.no_stderr = 1;
+ strvec_pushl(&cp.args, "rev-list", "-n", "1", hex, "--not", "--all", NULL);
+
+ prepare_submodule_repo_env(&cp.env_array);
+
+ if (capture_command(&cp, &rev, GIT_MAX_HEXSZ + 1) || rev.len)
+ return 0;
+
+ return 1;
+}
+
+static int fetch_in_submodule(const char *module_path, int depth, int quiet, struct object_id *oid)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+
+ prepare_submodule_repo_env(&cp.env_array);
+ cp.git_cmd = 1;
+ cp.dir = xstrdup(module_path);
+
+ strvec_push(&cp.args, "fetch");
+ if (quiet)
+ strvec_push(&cp.args, "--quiet");
+ if (depth)
+ strvec_pushf(&cp.args, "--depth=%d", depth);
+ if (oid) {
+ char *hex = oid_to_hex(oid);
+ char *remote = get_default_remote();
+ strvec_pushl(&cp.args, remote, hex, NULL);
+ }
+
+ return run_command(&cp);
+}
+
+static int run_update_command(struct update_data *ud, int subforce)
+{
+ struct strvec args = STRVEC_INIT;
+ struct strvec child_env = STRVEC_INIT;
+ char *oid = oid_to_hex(&ud->oid);
+ int must_die_on_failure = 0;
+ int git_cmd;
+
+ switch (ud->update_strategy.type) {
+ case SM_UPDATE_CHECKOUT:
+ git_cmd = 1;
+ strvec_pushl(&args, "checkout", "-q", NULL);
+ if (subforce)
+ strvec_push(&args, "-f");
+ break;
+ case SM_UPDATE_REBASE:
+ git_cmd = 1;
+ strvec_push(&args, "rebase");
+ if (ud->quiet)
+ strvec_push(&args, "--quiet");
+ must_die_on_failure = 1;
+ break;
+ case SM_UPDATE_MERGE:
+ git_cmd = 1;
+ strvec_push(&args, "merge");
+ if (ud->quiet)
+ strvec_push(&args, "--quiet");
+ must_die_on_failure = 1;
+ break;
+ case SM_UPDATE_COMMAND:
+ git_cmd = 0;
+ strvec_push(&args, ud->update_strategy.command);
+ must_die_on_failure = 1;
+ break;
+ default:
+ BUG("unexpected update strategy type: %s",
+ submodule_strategy_to_string(&ud->update_strategy));
+ }
+ strvec_push(&args, oid);
+
+ prepare_submodule_repo_env(&child_env);
+ if (run_command_v_opt_cd_env(args.v, git_cmd ? RUN_GIT_CMD : RUN_USING_SHELL,
+ ud->sm_path, child_env.v)) {
+ switch (ud->update_strategy.type) {
+ case SM_UPDATE_CHECKOUT:
+ printf(_("Unable to checkout '%s' in submodule path '%s'"),
+ oid, ud->displaypath);
+ break;
+ case SM_UPDATE_REBASE:
+ printf(_("Unable to rebase '%s' in submodule path '%s'"),
+ oid, ud->displaypath);
+ break;
+ case SM_UPDATE_MERGE:
+ printf(_("Unable to merge '%s' in submodule path '%s'"),
+ oid, ud->displaypath);
+ break;
+ case SM_UPDATE_COMMAND:
+ printf(_("Execution of '%s %s' failed in submodule path '%s'"),
+ ud->update_strategy.command, oid, ud->displaypath);
+ break;
+ default:
+ BUG("unexpected update strategy type: %s",
+ submodule_strategy_to_string(&ud->update_strategy));
+ }
+ /*
+ * NEEDSWORK: We are currently printing to stdout with error
+ * return so that the shell caller handles the error output
+ * properly. Once we start handling the error messages within
+ * C, we should use die() instead.
+ */
+ if (must_die_on_failure)
+ return 2;
+ /*
+ * This signifies to the caller in shell that the command
+ * failed without dying
+ */
+ return 1;
+ }
+
+ switch (ud->update_strategy.type) {
+ case SM_UPDATE_CHECKOUT:
+ printf(_("Submodule path '%s': checked out '%s'\n"),
+ ud->displaypath, oid);
+ break;
+ case SM_UPDATE_REBASE:
+ printf(_("Submodule path '%s': rebased into '%s'\n"),
+ ud->displaypath, oid);
+ break;
+ case SM_UPDATE_MERGE:
+ printf(_("Submodule path '%s': merged in '%s'\n"),
+ ud->displaypath, oid);
+ break;
+ case SM_UPDATE_COMMAND:
+ printf(_("Submodule path '%s': '%s %s'\n"),
+ ud->displaypath, ud->update_strategy.command, oid);
+ break;
+ default:
+ BUG("unexpected update strategy type: %s",
+ submodule_strategy_to_string(&ud->update_strategy));
+ }
+
+ return 0;
+}
+
+static int do_run_update_procedure(struct update_data *ud)
+{
+ int subforce = is_null_oid(&ud->suboid) || ud->force;
+
+ if (!ud->nofetch) {
+ /*
+ * Run fetch only if `oid` isn't present or it
+ * is not reachable from a ref.
+ */
+ if (!is_tip_reachable(ud->sm_path, &ud->oid) &&
+ fetch_in_submodule(ud->sm_path, ud->depth, ud->quiet, NULL) &&
+ !ud->quiet)
+ fprintf_ln(stderr,
+ _("Unable to fetch in submodule path '%s'; "
+ "trying to directly fetch %s:"),
+ ud->displaypath, oid_to_hex(&ud->oid));
+ /*
+ * Now we tried the usual fetch, but `oid` may
+ * not be reachable from any of the refs.
+ */
+ if (!is_tip_reachable(ud->sm_path, &ud->oid) &&
+ fetch_in_submodule(ud->sm_path, ud->depth, ud->quiet, &ud->oid))
+ die(_("Fetched in submodule path '%s', but it did not "
+ "contain %s. Direct fetching of that commit failed."),
+ ud->displaypath, oid_to_hex(&ud->oid));
+ }
+
+ return run_update_command(ud, subforce);
+}
+
static void update_submodule(struct update_clone_data *ucd)
{
fprintf(stdout, "dummy %s %d\t%s\n",
@@ -2395,6 +2555,73 @@ static int update_clone(int argc, const char **argv, const char *prefix)
return update_submodules(&suc);
}
+static int run_update_procedure(int argc, const char **argv, const char *prefix)
+{
+ int force = 0, quiet = 0, nofetch = 0, just_cloned = 0;
+ char *prefixed_path, *update = NULL;
+ struct update_data update_data = UPDATE_DATA_INIT;
+
+ struct option options[] = {
+ OPT__QUIET(&quiet, N_("suppress output for update by rebase or merge")),
+ OPT__FORCE(&force, N_("force checkout updates"), 0),
+ OPT_BOOL('N', "no-fetch", &nofetch,
+ N_("don't fetch new objects from the remote site")),
+ OPT_BOOL(0, "just-cloned", &just_cloned,
+ N_("overrides update mode in case the repository is a fresh clone")),
+ OPT_INTEGER(0, "depth", &update_data.depth, N_("depth for shallow fetch")),
+ OPT_STRING(0, "prefix", &prefix,
+ N_("path"),
+ N_("path into the working tree")),
+ OPT_STRING(0, "update", &update,
+ N_("string"),
+ N_("rebase, merge, checkout or none")),
+ OPT_STRING(0, "recursive-prefix", &update_data.recursive_prefix, N_("path"),
+ N_("path into the working tree, across nested "
+ "submodule boundaries")),
+ OPT_CALLBACK_F(0, "oid", &update_data.oid, N_("sha1"),
+ N_("SHA1 expected by superproject"), PARSE_OPT_NONEG,
+ parse_opt_object_id),
+ OPT_CALLBACK_F(0, "suboid", &update_data.suboid, N_("subsha1"),
+ N_("SHA1 of submodule's HEAD"), PARSE_OPT_NONEG,
+ parse_opt_object_id),
+ OPT_END()
+ };
+
+ const char *const usage[] = {
+ N_("git submodule--helper run-update-procedure [<options>] <path>"),
+ NULL
+ };
+
+ argc = parse_options(argc, argv, prefix, options, usage, 0);
+
+ if (argc != 1)
+ usage_with_options(usage, options);
+
+ update_data.force = !!force;
+ update_data.quiet = !!quiet;
+ update_data.nofetch = !!nofetch;
+ update_data.just_cloned = !!just_cloned;
+ update_data.sm_path = argv[0];
+
+ if (update_data.recursive_prefix)
+ prefixed_path = xstrfmt("%s%s", update_data.recursive_prefix, update_data.sm_path);
+ else
+ prefixed_path = xstrdup(update_data.sm_path);
+
+ update_data.displaypath = get_submodule_displaypath(prefixed_path, prefix);
+
+ determine_submodule_update_strategy(the_repository, update_data.just_cloned,
+ update_data.sm_path, update,
+ &update_data.update_strategy);
+
+ free(prefixed_path);
+
+ if (!oideq(&update_data.oid, &update_data.suboid) || update_data.force)
+ return do_run_update_procedure(&update_data);
+
+ return 3;
+}
+
static int resolve_relative_path(int argc, const char **argv, const char *prefix)
{
struct strbuf sb = STRBUF_INIT;
@@ -2540,7 +2767,6 @@ static int push_check(int argc, const char **argv, const char *prefix)
static int ensure_core_worktree(int argc, const char **argv, const char *prefix)
{
- const struct submodule *sub;
const char *path;
const char *cw;
struct repository subrepo;
@@ -2550,11 +2776,7 @@ static int ensure_core_worktree(int argc, const char **argv, const char *prefix)
path = argv[1];
- sub = submodule_from_path(the_repository, null_oid(), path);
- if (!sub)
- BUG("We could get the submodule handle before?");
-
- if (repo_submodule_init(&subrepo, the_repository, sub))
+ if (repo_submodule_init(&subrepo, the_repository, path, null_oid()))
die(_("could not get a repository handle for submodule '%s'"), path);
if (!repo_config_get_string_tmp(&subrepo, "core.worktree", &cw)) {
@@ -2765,7 +2987,7 @@ struct add_data {
const char *prefix;
const char *branch;
const char *reference_path;
- const char *sm_path;
+ char *sm_path;
const char *sm_name;
const char *repo;
const char *realrepo;
@@ -2864,6 +3086,10 @@ static int add_submodule(const struct add_data *add_data)
prepare_submodule_repo_env(&cp.env_array);
cp.git_cmd = 1;
cp.dir = add_data->sm_path;
+ /*
+ * NOTE: we only get here if add_data->force is true, so
+ * passing --force to checkout is reasonable.
+ */
strvec_pushl(&cp.args, "checkout", "-f", "-q", NULL);
if (add_data->branch) {
@@ -2877,61 +3103,244 @@ static int add_submodule(const struct add_data *add_data)
return 0;
}
-static int add_clone(int argc, const char **argv, const char *prefix)
+static int config_submodule_in_gitmodules(const char *name, const char *var, const char *value)
+{
+ char *key;
+ int ret;
+
+ if (!is_writing_gitmodules_ok())
+ die(_("please make sure that the .gitmodules file is in the working tree"));
+
+ key = xstrfmt("submodule.%s.%s", name, var);
+ ret = config_set_in_gitmodules_file_gently(key, value);
+ free(key);
+
+ return ret;
+}
+
+static void configure_added_submodule(struct add_data *add_data)
+{
+ char *key;
+ char *val = NULL;
+ struct child_process add_submod = CHILD_PROCESS_INIT;
+ struct child_process add_gitmodules = CHILD_PROCESS_INIT;
+
+ key = xstrfmt("submodule.%s.url", add_data->sm_name);
+ git_config_set_gently(key, add_data->realrepo);
+ free(key);
+
+ add_submod.git_cmd = 1;
+ strvec_pushl(&add_submod.args, "add",
+ "--no-warn-embedded-repo", NULL);
+ if (add_data->force)
+ strvec_push(&add_submod.args, "--force");
+ strvec_pushl(&add_submod.args, "--", add_data->sm_path, NULL);
+
+ if (run_command(&add_submod))
+ die(_("Failed to add submodule '%s'"), add_data->sm_path);
+
+ if (config_submodule_in_gitmodules(add_data->sm_name, "path", add_data->sm_path) ||
+ config_submodule_in_gitmodules(add_data->sm_name, "url", add_data->repo))
+ die(_("Failed to register submodule '%s'"), add_data->sm_path);
+
+ if (add_data->branch) {
+ if (config_submodule_in_gitmodules(add_data->sm_name,
+ "branch", add_data->branch))
+ die(_("Failed to register submodule '%s'"), add_data->sm_path);
+ }
+
+ add_gitmodules.git_cmd = 1;
+ strvec_pushl(&add_gitmodules.args,
+ "add", "--force", "--", ".gitmodules", NULL);
+
+ if (run_command(&add_gitmodules))
+ die(_("Failed to register submodule '%s'"), add_data->sm_path);
+
+ /*
+ * NEEDSWORK: In a multi-working-tree world this needs to be
+ * set in the per-worktree config.
+ */
+ /*
+ * NEEDSWORK: In the longer run, we need to get rid of this
+ * pattern of querying "submodule.active" before calling
+ * is_submodule_active(), since that function needs to find
+ * out the value of "submodule.active" again anyway.
+ */
+ if (!git_config_get_string("submodule.active", &val) && val) {
+ /*
+ * If the submodule being added isn't already covered by the
+ * current configured pathspec, set the submodule's active flag
+ */
+ if (!is_submodule_active(the_repository, add_data->sm_path)) {
+ key = xstrfmt("submodule.%s.active", add_data->sm_name);
+ git_config_set_gently(key, "true");
+ free(key);
+ }
+ } else {
+ key = xstrfmt("submodule.%s.active", add_data->sm_name);
+ git_config_set_gently(key, "true");
+ free(key);
+ }
+}
+
+static void die_on_index_match(const char *path, int force)
+{
+ struct pathspec ps;
+ const char *args[] = { path, NULL };
+ parse_pathspec(&ps, 0, PATHSPEC_PREFER_CWD, NULL, args);
+
+ if (read_cache_preload(NULL) < 0)
+ die(_("index file corrupt"));
+
+ if (ps.nr) {
+ int i;
+ char *ps_matched = xcalloc(ps.nr, 1);
+
+ /* TODO: audit for interaction with sparse-index. */
+ ensure_full_index(&the_index);
+
+ /*
+ * Since there is only one pathspec, we just need
+ * need to check ps_matched[0] to know if a cache
+ * entry matched.
+ */
+ for (i = 0; i < active_nr; i++) {
+ ce_path_match(&the_index, active_cache[i], &ps,
+ ps_matched);
+
+ if (ps_matched[0]) {
+ if (!force)
+ die(_("'%s' already exists in the index"),
+ path);
+ if (!S_ISGITLINK(active_cache[i]->ce_mode))
+ die(_("'%s' already exists in the index "
+ "and is not a submodule"), path);
+ break;
+ }
+ }
+ free(ps_matched);
+ }
+}
+
+static void die_on_repo_without_commits(const char *path)
{
- int force = 0, quiet = 0, dissociate = 0, progress = 0;
+ struct strbuf sb = STRBUF_INIT;
+ strbuf_addstr(&sb, path);
+ if (is_nonbare_repository_dir(&sb)) {
+ struct object_id oid;
+ if (resolve_gitlink_ref(path, "HEAD", &oid) < 0)
+ die(_("'%s' does not have a commit checked out"), path);
+ }
+}
+
+static int module_add(int argc, const char **argv, const char *prefix)
+{
+ int force = 0, quiet = 0, progress = 0, dissociate = 0;
struct add_data add_data = ADD_DATA_INIT;
struct option options[] = {
- OPT_STRING('b', "branch", &add_data.branch,
- N_("branch"),
- N_("branch of repository to checkout on cloning")),
- OPT_STRING(0, "prefix", &prefix,
- N_("path"),
- N_("alternative anchor for relative paths")),
- OPT_STRING(0, "path", &add_data.sm_path,
- N_("path"),
- N_("where the new submodule will be cloned to")),
- OPT_STRING(0, "name", &add_data.sm_name,
- N_("string"),
- N_("name of the new submodule")),
- OPT_STRING(0, "url", &add_data.realrepo,
- N_("string"),
- N_("url where to clone the submodule from")),
- OPT_STRING(0, "reference", &add_data.reference_path,
- N_("repo"),
- N_("reference repository")),
- OPT_BOOL(0, "dissociate", &dissociate,
- N_("use --reference only while cloning")),
- OPT_INTEGER(0, "depth", &add_data.depth,
- N_("depth for shallow clones")),
- OPT_BOOL(0, "progress", &progress,
- N_("force cloning progress")),
+ OPT_STRING('b', "branch", &add_data.branch, N_("branch"),
+ N_("branch of repository to add as submodule")),
OPT__FORCE(&force, N_("allow adding an otherwise ignored submodule path"),
PARSE_OPT_NOCOMPLETE),
- OPT__QUIET(&quiet, "suppress output for cloning a submodule"),
+ OPT__QUIET(&quiet, N_("print only error messages")),
+ OPT_BOOL(0, "progress", &progress, N_("force cloning progress")),
+ OPT_STRING(0, "reference", &add_data.reference_path, N_("repository"),
+ N_("reference repository")),
+ OPT_BOOL(0, "dissociate", &dissociate, N_("borrow the objects from reference repositories")),
+ OPT_STRING(0, "name", &add_data.sm_name, N_("name"),
+ N_("sets the submodule’s name to the given string "
+ "instead of defaulting to its path")),
+ OPT_INTEGER(0, "depth", &add_data.depth, N_("depth for shallow clones")),
OPT_END()
};
const char *const usage[] = {
- N_("git submodule--helper add-clone [<options>...] "
- "--url <url> --path <path> --name <name>"),
+ N_("git submodule--helper add [<options>] [--] <repository> [<path>]"),
NULL
};
argc = parse_options(argc, argv, prefix, options, usage, 0);
- if (argc != 0)
+ if (!is_writing_gitmodules_ok())
+ die(_("please make sure that the .gitmodules file is in the working tree"));
+
+ if (prefix && *prefix &&
+ add_data.reference_path && !is_absolute_path(add_data.reference_path))
+ add_data.reference_path = xstrfmt("%s%s", prefix, add_data.reference_path);
+
+ if (argc == 0 || argc > 2)
usage_with_options(usage, options);
+ add_data.repo = argv[0];
+ if (argc == 1)
+ add_data.sm_path = git_url_basename(add_data.repo, 0, 0);
+ else
+ add_data.sm_path = xstrdup(argv[1]);
+
+ if (prefix && *prefix && !is_absolute_path(add_data.sm_path))
+ add_data.sm_path = xstrfmt("%s%s", prefix, add_data.sm_path);
+
+ if (starts_with_dot_dot_slash(add_data.repo) ||
+ starts_with_dot_slash(add_data.repo)) {
+ if (prefix)
+ die(_("Relative path can only be used from the toplevel "
+ "of the working tree"));
+
+ /* dereference source url relative to parent's url */
+ add_data.realrepo = resolve_relative_url(add_data.repo, NULL, 1);
+ } else if (is_dir_sep(add_data.repo[0]) || strchr(add_data.repo, ':')) {
+ add_data.realrepo = add_data.repo;
+ } else {
+ die(_("repo URL: '%s' must be absolute or begin with ./|../"),
+ add_data.repo);
+ }
+
+ /*
+ * normalize path:
+ * multiple //; leading ./; /./; /../;
+ */
+ normalize_path_copy(add_data.sm_path, add_data.sm_path);
+ strip_dir_trailing_slashes(add_data.sm_path);
+
+ die_on_index_match(add_data.sm_path, force);
+ die_on_repo_without_commits(add_data.sm_path);
+
+ if (!force) {
+ int exit_code = -1;
+ struct strbuf sb = STRBUF_INIT;
+ struct child_process cp = CHILD_PROCESS_INIT;
+ cp.git_cmd = 1;
+ cp.no_stdout = 1;
+ strvec_pushl(&cp.args, "add", "--dry-run", "--ignore-missing",
+ "--no-warn-embedded-repo", add_data.sm_path, NULL);
+ if ((exit_code = pipe_command(&cp, NULL, 0, NULL, 0, &sb, 0))) {
+ strbuf_complete_line(&sb);
+ fputs(sb.buf, stderr);
+ free(add_data.sm_path);
+ return exit_code;
+ }
+ strbuf_release(&sb);
+ }
+
+ if(!add_data.sm_name)
+ add_data.sm_name = add_data.sm_path;
+
+ if (check_submodule_name(add_data.sm_name))
+ die(_("'%s' is not a valid submodule name"), add_data.sm_name);
+
add_data.prefix = prefix;
- add_data.progress = !!progress;
- add_data.dissociate = !!dissociate;
add_data.force = !!force;
add_data.quiet = !!quiet;
+ add_data.progress = !!progress;
+ add_data.dissociate = !!dissociate;
- if (add_submodule(&add_data))
+ if (add_submodule(&add_data)) {
+ free(add_data.sm_path);
return 1;
+ }
+ configure_added_submodule(&add_data);
+ free(add_data.sm_path);
return 0;
}
@@ -2948,12 +3357,12 @@ static struct cmd_struct commands[] = {
{"list", module_list, 0},
{"name", module_name, 0},
{"clone", module_clone, 0},
- {"add-clone", add_clone, 0},
+ {"add", module_add, SUPPORT_SUPER_PREFIX},
{"update-module-mode", module_update_module_mode, 0},
{"update-clone", update_clone, 0},
+ {"run-update-procedure", run_update_procedure, 0},
{"ensure-core-worktree", ensure_core_worktree, 0},
{"relative-path", resolve_relative_path, 0},
- {"resolve-relative-url", resolve_relative_url, 0},
{"resolve-relative-url-test", resolve_relative_url_test, 0},
{"foreach", module_foreach, SUPPORT_SUPER_PREFIX},
{"init", module_init, SUPPORT_SUPER_PREFIX},
diff --git a/builtin/update-ref.c b/builtin/update-ref.c
index 6029a80544..a84e7b47a2 100644
--- a/builtin/update-ref.c
+++ b/builtin/update-ref.c
@@ -302,6 +302,12 @@ static void parse_cmd_verify(struct ref_transaction *transaction,
strbuf_release(&err);
}
+static void report_ok(const char *command)
+{
+ fprintf(stdout, "%s: ok\n", command);
+ fflush(stdout);
+}
+
static void parse_cmd_option(struct ref_transaction *transaction,
const char *next, const char *end)
{
@@ -317,7 +323,7 @@ static void parse_cmd_start(struct ref_transaction *transaction,
{
if (*next != line_termination)
die("start: extra input: %s", next);
- puts("start: ok");
+ report_ok("start");
}
static void parse_cmd_prepare(struct ref_transaction *transaction,
@@ -328,7 +334,7 @@ static void parse_cmd_prepare(struct ref_transaction *transaction,
die("prepare: extra input: %s", next);
if (ref_transaction_prepare(transaction, &error))
die("prepare: %s", error.buf);
- puts("prepare: ok");
+ report_ok("prepare");
}
static void parse_cmd_abort(struct ref_transaction *transaction,
@@ -339,7 +345,7 @@ static void parse_cmd_abort(struct ref_transaction *transaction,
die("abort: extra input: %s", next);
if (ref_transaction_abort(transaction, &error))
die("abort: %s", error.buf);
- puts("abort: ok");
+ report_ok("abort");
}
static void parse_cmd_commit(struct ref_transaction *transaction,
@@ -350,7 +356,7 @@ static void parse_cmd_commit(struct ref_transaction *transaction,
die("commit: extra input: %s", next);
if (ref_transaction_commit(transaction, &error))
die("commit: %s", error.buf);
- puts("commit: ok");
+ report_ok("commit");
ref_transaction_free(transaction);
}
diff --git a/builtin/upload-pack.c b/builtin/upload-pack.c
index 6da8fa2607..125af53885 100644
--- a/builtin/upload-pack.c
+++ b/builtin/upload-pack.c
@@ -16,16 +16,18 @@ int cmd_upload_pack(int argc, const char **argv, const char *prefix)
{
const char *dir;
int strict = 0;
- struct upload_pack_options opts = { 0 };
- struct serve_options serve_opts = SERVE_OPTIONS_INIT;
+ int advertise_refs = 0;
+ int stateless_rpc = 0;
+ int timeout = 0;
struct option options[] = {
- OPT_BOOL(0, "stateless-rpc", &opts.stateless_rpc,
+ OPT_BOOL(0, "stateless-rpc", &stateless_rpc,
N_("quit after a single request/response exchange")),
- OPT_BOOL(0, "advertise-refs", &opts.advertise_refs,
- N_("exit immediately after initial ref advertisement")),
+ OPT_HIDDEN_BOOL(0, "http-backend-info-refs", &advertise_refs,
+ N_("serve up the info/refs for git-http-backend")),
+ OPT_ALIAS(0, "advertise-refs", "http-backend-info-refs"),
OPT_BOOL(0, "strict", &strict,
N_("do not try <directory>/.git/ if <directory> is no Git directory")),
- OPT_INTEGER(0, "timeout", &opts.timeout,
+ OPT_INTEGER(0, "timeout", &timeout,
N_("interrupt transfer after <n> seconds of inactivity")),
OPT_END()
};
@@ -38,9 +40,6 @@ int cmd_upload_pack(int argc, const char **argv, const char *prefix)
if (argc != 1)
usage_with_options(upload_pack_usage, options);
- if (opts.timeout)
- opts.daemon_mode = 1;
-
setup_path();
dir = argv[0];
@@ -50,21 +49,22 @@ int cmd_upload_pack(int argc, const char **argv, const char *prefix)
switch (determine_protocol_version_server()) {
case protocol_v2:
- serve_opts.advertise_capabilities = opts.advertise_refs;
- serve_opts.stateless_rpc = opts.stateless_rpc;
- serve(&serve_opts);
+ if (advertise_refs)
+ protocol_v2_advertise_capabilities();
+ else
+ protocol_v2_serve_loop(stateless_rpc);
break;
case protocol_v1:
/*
* v1 is just the original protocol with a version string,
* so just fall through after writing the version string.
*/
- if (opts.advertise_refs || !opts.stateless_rpc)
+ if (advertise_refs || !stateless_rpc)
packet_write_fmt(1, "version 1\n");
/* fallthrough */
case protocol_v0:
- upload_pack(&opts);
+ upload_pack(advertise_refs, stateless_rpc, timeout);
break;
case protocol_unknown_version:
BUG("unknown protocol version");
diff --git a/builtin/worktree.c b/builtin/worktree.c
index 0d0a80da61..d22ece93e1 100644
--- a/builtin/worktree.c
+++ b/builtin/worktree.c
@@ -8,6 +8,7 @@
#include "branch.h"
#include "refs.h"
#include "run-command.h"
+#include "hook.h"
#include "sigchain.h"
#include "submodule.h"
#include "utf8.h"
diff --git a/bulk-checkin.c b/bulk-checkin.c
index b023d9959a..8785b2ac80 100644
--- a/bulk-checkin.c
+++ b/bulk-checkin.c
@@ -23,9 +23,25 @@ static struct bulk_checkin_state {
uint32_t nr_written;
} state;
+static void finish_tmp_packfile(struct strbuf *basename,
+ const char *pack_tmp_name,
+ struct pack_idx_entry **written_list,
+ uint32_t nr_written,
+ struct pack_idx_option *pack_idx_opts,
+ unsigned char hash[])
+{
+ char *idx_tmp_name = NULL;
+
+ stage_tmp_packfiles(basename, pack_tmp_name, written_list, nr_written,
+ pack_idx_opts, hash, &idx_tmp_name);
+ rename_tmp_packfile_idx(basename, &idx_tmp_name);
+
+ free(idx_tmp_name);
+}
+
static void finish_bulk_checkin(struct bulk_checkin_state *state)
{
- struct object_id oid;
+ unsigned char hash[GIT_MAX_RAWSZ];
struct strbuf packname = STRBUF_INIT;
int i;
@@ -37,19 +53,20 @@ static void finish_bulk_checkin(struct bulk_checkin_state *state)
unlink(state->pack_tmp_name);
goto clear_exit;
} else if (state->nr_written == 1) {
- finalize_hashfile(state->f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
+ finalize_hashfile(state->f, hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
} else {
- int fd = finalize_hashfile(state->f, oid.hash, 0);
- fixup_pack_header_footer(fd, oid.hash, state->pack_tmp_name,
- state->nr_written, oid.hash,
+ int fd = finalize_hashfile(state->f, hash, 0);
+ fixup_pack_header_footer(fd, hash, state->pack_tmp_name,
+ state->nr_written, hash,
state->offset);
close(fd);
}
- strbuf_addf(&packname, "%s/pack/pack-", get_object_directory());
+ strbuf_addf(&packname, "%s/pack/pack-%s.", get_object_directory(),
+ hash_to_hex(hash));
finish_tmp_packfile(&packname, state->pack_tmp_name,
state->written, state->nr_written,
- &state->pack_idx_opts, oid.hash);
+ &state->pack_idx_opts, hash);
for (i = 0; i < state->nr_written; i++)
free(state->written[i]);
diff --git a/bundle.c b/bundle.c
index ab63f40226..a0bb687b0f 100644
--- a/bundle.c
+++ b/bundle.c
@@ -569,18 +569,18 @@ err:
}
int unbundle(struct repository *r, struct bundle_header *header,
- int bundle_fd, int flags)
+ int bundle_fd, struct strvec *extra_index_pack_args)
{
- const char *argv_index_pack[] = {"index-pack",
- "--fix-thin", "--stdin", NULL, NULL};
struct child_process ip = CHILD_PROCESS_INIT;
+ strvec_pushl(&ip.args, "index-pack", "--fix-thin", "--stdin", NULL);
- if (flags & BUNDLE_VERBOSE)
- argv_index_pack[3] = "-v";
+ if (extra_index_pack_args) {
+ strvec_pushv(&ip.args, extra_index_pack_args->v);
+ strvec_clear(extra_index_pack_args);
+ }
if (verify_bundle(r, header, 0))
return -1;
- ip.argv = argv_index_pack;
ip.in = bundle_fd;
ip.no_stdout = 1;
ip.git_cmd = 1;
diff --git a/bundle.h b/bundle.h
index 1927d8cd6a..06009fe6b1 100644
--- a/bundle.h
+++ b/bundle.h
@@ -26,9 +26,19 @@ int create_bundle(struct repository *r, const char *path,
int argc, const char **argv, struct strvec *pack_options,
int version);
int verify_bundle(struct repository *r, struct bundle_header *header, int verbose);
-#define BUNDLE_VERBOSE 1
+
+/**
+ * Unbundle after reading the header with read_bundle_header().
+ *
+ * We'll invoke "git index-pack --stdin --fix-thin" for you on the
+ * provided `bundle_fd` from read_bundle_header().
+ *
+ * Provide "extra_index_pack_args" to pass any extra arguments
+ * (e.g. "-v" for verbose/progress), NULL otherwise. The provided
+ * "extra_index_pack_args" (if any) will be strvec_clear()'d for you.
+ */
int unbundle(struct repository *r, struct bundle_header *header,
- int bundle_fd, int flags);
+ int bundle_fd, struct strvec *extra_index_pack_args);
int list_bundle_refs(struct bundle_header *header,
int argc, const char **argv);
diff --git a/cache.h b/cache.h
index d23de69368..d092820c94 100644
--- a/cache.h
+++ b/cache.h
@@ -958,7 +958,6 @@ extern char *apply_default_ignorewhitespace;
extern const char *git_attributes_file;
extern const char *git_hooks_path;
extern int zlib_compression_level;
-extern int core_compression_level;
extern int pack_compression_level;
extern size_t packed_git_window_size;
extern size_t packed_git_limit;
@@ -996,14 +995,6 @@ extern int core_apply_sparse_checkout;
extern int core_sparse_checkout_cone;
/*
- * Include broken refs in all ref iterations, which will
- * generally choke dangerous operations rather than letting
- * them silently proceed without taking the broken ref into
- * account.
- */
-extern int ref_paranoia;
-
-/*
* Returns the boolean value of $GIT_OPTIONAL_LOCKS (or the default value).
*/
int use_optional_locks(void);
@@ -1211,49 +1202,6 @@ enum scld_error safe_create_leading_directories(char *path);
enum scld_error safe_create_leading_directories_const(const char *path);
enum scld_error safe_create_leading_directories_no_share(char *path);
-/*
- * Callback function for raceproof_create_file(). This function is
- * expected to do something that makes dirname(path) permanent despite
- * the fact that other processes might be cleaning up empty
- * directories at the same time. Usually it will create a file named
- * path, but alternatively it could create another file in that
- * directory, or even chdir() into that directory. The function should
- * return 0 if the action was completed successfully. On error, it
- * should return a nonzero result and set errno.
- * raceproof_create_file() treats two errno values specially:
- *
- * - ENOENT -- dirname(path) does not exist. In this case,
- * raceproof_create_file() tries creating dirname(path)
- * (and any parent directories, if necessary) and calls
- * the function again.
- *
- * - EISDIR -- the file already exists and is a directory. In this
- * case, raceproof_create_file() removes the directory if
- * it is empty (and recursively any empty directories that
- * it contains) and calls the function again.
- *
- * Any other errno causes raceproof_create_file() to fail with the
- * callback's return value and errno.
- *
- * Obviously, this function should be OK with being called again if it
- * fails with ENOENT or EISDIR. In other scenarios it will not be
- * called again.
- */
-typedef int create_file_fn(const char *path, void *cb);
-
-/*
- * Create a file in dirname(path) by calling fn, creating leading
- * directories if necessary. Retry a few times in case we are racing
- * with another process that is trying to clean up the directory that
- * contains path. See the documentation for create_file_fn for more
- * details.
- *
- * Return the value and set the errno that resulted from the most
- * recent call of fn. fn is always called at least once, and will be
- * called more than once if it returns ENOENT or EISDIR.
- */
-int raceproof_create_file(const char *path, create_file_fn fn, void *cb);
-
int mkdir_in_gitdir(const char *path);
char *interpolate_path(const char *path, int real_home);
/* NEEDSWORK: remove this synonym once in-flight topics have migrated */
@@ -1299,6 +1247,13 @@ int looks_like_command_line_option(const char *str);
/**
* Return a newly allocated string with the evaluation of
+ * "$XDG_CONFIG_HOME/$subdir/$filename" if $XDG_CONFIG_HOME is non-empty, otherwise
+ * "$HOME/.config/$subdir/$filename". Return NULL upon error.
+ */
+char *xdg_config_home_for(const char *subdir, const char *filename);
+
+/**
+ * Return a newly allocated string with the evaluation of
* "$XDG_CONFIG_HOME/git/$filename" if $XDG_CONFIG_HOME is non-empty, otherwise
* "$HOME/.config/git/$filename". Return NULL upon error.
*/
@@ -1662,7 +1617,9 @@ struct cache_def {
int track_flags;
int prefix_len_stat_func;
};
-#define CACHE_DEF_INIT { STRBUF_INIT, 0, 0, 0 }
+#define CACHE_DEF_INIT { \
+ .path = STRBUF_INIT, \
+}
static inline void cache_def_clear(struct cache_def *cache)
{
strbuf_release(&cache->path);
@@ -1719,13 +1676,6 @@ int update_server_info(int);
const char *get_log_output_encoding(void);
const char *get_commit_output_encoding(void);
-/*
- * This is a hack for test programs like test-dump-untracked-cache to
- * ensure that they do not modify the untracked cache when reading it.
- * Do not use it otherwise!
- */
-extern int ignore_untracked_cache_config;
-
int committer_ident_sufficiently_given(void);
int author_ident_sufficiently_given(void);
@@ -1738,6 +1688,8 @@ extern const char *git_mailmap_blob;
void maybe_flush_or_die(FILE *, const char *);
__attribute__((format (printf, 2, 3)))
void fprintf_or_die(FILE *, const char *fmt, ...);
+void fwrite_or_die(FILE *f, const void *buf, size_t count);
+void fflush_or_die(FILE *f);
#define COPY_READ_ERROR (-2)
#define COPY_WRITE_ERROR (-3)
diff --git a/cbtree.h b/cbtree.h
index a04a312c3f..dedbb8e2a4 100644
--- a/cbtree.h
+++ b/cbtree.h
@@ -37,11 +37,12 @@ enum cb_next {
CB_BREAK = 1
};
-#define CBTREE_INIT { .root = NULL }
+#define CBTREE_INIT { 0 }
static inline void cb_init(struct cb_tree *t)
{
- t->root = NULL;
+ struct cb_tree blank = CBTREE_INIT;
+ memcpy(t, &blank, sizeof(*t));
}
struct cb_node *cb_lookup(struct cb_tree *, const uint8_t *k, size_t klen);
diff --git a/check_bindir b/check_bindir
deleted file mode 100755
index 623eadcbb7..0000000000
--- a/check_bindir
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/sh
-bindir="$1"
-gitexecdir="$2"
-gitcmd="$3"
-if test "$bindir" != "$gitexecdir" && test -x "$gitcmd"
-then
- echo
- echo "!! You have installed git-* commands to new gitexecdir."
- echo "!! Old version git-* commands still remain in bindir."
- echo "!! Mixing two versions of Git will lead to problems."
- echo "!! Please remove old version commands in bindir now."
- echo
-fi
diff --git a/checkout.c b/checkout.c
index 6586e30ca5..2e39dae684 100644
--- a/checkout.c
+++ b/checkout.c
@@ -14,7 +14,7 @@ struct tracking_name_data {
struct object_id *default_dst_oid;
};
-#define TRACKING_NAME_DATA_INIT { NULL, NULL, NULL, 0, NULL, NULL, NULL }
+#define TRACKING_NAME_DATA_INIT { 0 }
static int check_tracking_name(struct remote *remote, void *cb_data)
{
diff --git a/ci/install-dependencies.sh b/ci/install-dependencies.sh
index 5772081b6e..1d0e48f451 100755
--- a/ci/install-dependencies.sh
+++ b/ci/install-dependencies.sh
@@ -12,7 +12,7 @@ UBUNTU_COMMON_PKGS="make libssl-dev libcurl4-openssl-dev libexpat-dev
libemail-valid-perl libio-socket-ssl-perl libnet-smtp-ssl-perl"
case "$jobname" in
-linux-clang|linux-gcc)
+linux-clang|linux-gcc|linux-leaks)
sudo apt-add-repository -y "ppa:ubuntu-toolchain-r/test"
sudo apt-get -q update
sudo apt-get -q -y install language-pack-is libsvn-perl apache2 \
diff --git a/ci/lib.sh b/ci/lib.sh
index 476c3f369f..82cb17f8ee 100755
--- a/ci/lib.sh
+++ b/ci/lib.sh
@@ -183,7 +183,7 @@ export GIT_TEST_CLONE_2GB=true
export SKIP_DASHED_BUILT_INS=YesPlease
case "$jobname" in
-linux-clang|linux-gcc)
+linux-clang|linux-gcc|linux-leaks)
if [ "$jobname" = linux-gcc ]
then
export CC=gcc-8
@@ -233,4 +233,11 @@ linux-musl)
;;
esac
+case "$jobname" in
+linux-leaks)
+ export SANITIZE=leak
+ export GIT_TEST_PASSING_SANITIZE_LEAK=true
+ ;;
+esac
+
MAKEFLAGS="$MAKEFLAGS CC=${CC:-cc}"
diff --git a/ci/run-build-and-tests.sh b/ci/run-build-and-tests.sh
index f3aba5d6cb..cc62616d80 100755
--- a/ci/run-build-and-tests.sh
+++ b/ci/run-build-and-tests.sh
@@ -28,6 +28,7 @@ linux-gcc)
export GIT_TEST_COMMIT_GRAPH=1
export GIT_TEST_COMMIT_GRAPH_CHANGED_PATHS=1
export GIT_TEST_MULTI_PACK_INDEX=1
+ export GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=1
export GIT_TEST_ADD_I_USE_BUILTIN=1
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=master
export GIT_TEST_WRITE_REV_INDEX=1
diff --git a/commit-graph.c b/commit-graph.c
index 00614acd65..2706683acf 100644
--- a/commit-graph.c
+++ b/commit-graph.c
@@ -713,6 +713,7 @@ static void close_commit_graph_one(struct commit_graph *g)
if (!g)
return;
+ clear_commit_graph_data_slab(&commit_graph_data_slab);
close_commit_graph_one(g->base_graph);
free_commit_graph(g);
}
@@ -2125,7 +2126,7 @@ static void sort_and_scan_merged_commits(struct write_commit_graph_context *ctx)
ctx->num_extra_edges = 0;
for (i = 0; i < ctx->commits.nr; i++) {
- display_progress(ctx->progress, i);
+ display_progress(ctx->progress, i + 1);
if (i && oideq(&ctx->commits.list[i - 1]->object.oid,
&ctx->commits.list[i]->object.oid)) {
diff --git a/compat/linux/procinfo.c b/compat/linux/procinfo.c
index 578fed4cd3..bc2f9382a1 100644
--- a/compat/linux/procinfo.c
+++ b/compat/linux/procinfo.c
@@ -4,51 +4,172 @@
#include "strvec.h"
#include "trace2.h"
-static void get_ancestry_names(struct strvec *names)
+/*
+ * We need more complex parsing in stat_parent_pid() and
+ * parse_proc_stat() below than a dumb fscanf(). That's because while
+ * the statcomm field is surrounded by parentheses, the process itself
+ * is free to insert any arbitrary byte sequence its its name. That
+ * can include newlines, spaces, closing parentheses etc.
+ *
+ * See do_task_stat() in fs/proc/array.c in linux.git, this is in
+ * contrast with the escaped version of the name found in
+ * /proc/%d/status.
+ *
+ * So instead of using fscanf() we'll read N bytes from it, look for
+ * the first "(", and then the last ")", anything in-between is our
+ * process name.
+ *
+ * How much N do we need? On Linux /proc/sys/kernel/pid_max is 2^15 by
+ * default, but it can be raised set to values of up to 2^22. So
+ * that's 7 digits for a PID. We have 2 PIDs in the first four fields
+ * we're interested in, so 2 * 7 = 14.
+ *
+ * We then have 3 spaces between those four values, and we'd like to
+ * get to the space between the 4th and the 5th (the "pgrp" field) to
+ * make sure we read the entire "ppid" field. So that brings us up to
+ * 14 + 3 + 1 = 18. Add the two parentheses around the "comm" value
+ * and it's 20. The "state" value itself is then one character (now at
+ * 21).
+ *
+ * Finally the maximum length of the "comm" name itself is 15
+ * characters, e.g. a setting of "123456789abcdefg" will be truncated
+ * to "123456789abcdef". See PR_SET_NAME in prctl(2). So all in all
+ * we'd need to read 21 + 15 = 36 bytes.
+ *
+ * Let's just read 2^6 (64) instead for good measure. If PID_MAX ever
+ * grows past 2^22 we'll be future-proof. We'll then anchor at the
+ * last ")" we find to locate the parent PID.
+ */
+#define STAT_PARENT_PID_READ_N 64
+
+static int parse_proc_stat(struct strbuf *sb, struct strbuf *name,
+ int *statppid)
{
+ const char *comm_lhs = strchr(sb->buf, '(');
+ const char *comm_rhs = strrchr(sb->buf, ')');
+ const char *ppid_lhs, *ppid_rhs;
+ char *p;
+ pid_t ppid;
+
+ if (!comm_lhs || !comm_rhs)
+ goto bad_kernel;
+
+ /*
+ * We're at the ")", that's followed by " X ", where X is a
+ * single "state" character. So advance by 4 bytes.
+ */
+ ppid_lhs = comm_rhs + 4;
+
+ /*
+ * Read until the space between the "ppid" and "pgrp" fields
+ * to make sure we're anchored after the untruncated "ppid"
+ * field..
+ */
+ ppid_rhs = strchr(ppid_lhs, ' ');
+ if (!ppid_rhs)
+ goto bad_kernel;
+
+ ppid = strtol(ppid_lhs, &p, 10);
+ if (ppid_rhs == p) {
+ const char *comm = comm_lhs + 1;
+ size_t commlen = comm_rhs - comm;
+
+ strbuf_add(name, comm, commlen);
+ *statppid = ppid;
+
+ return 0;
+ }
+
+bad_kernel:
/*
- * NEEDSWORK: We could gather the entire pstree into an array to match
- * functionality with compat/win32/trace2_win32_process_info.c.
- * To do so, we may want to examine /proc/<pid>/stat. For now, just
- * gather the immediate parent name which is readily accessible from
- * /proc/$(getppid())/comm.
+ * We were able to read our STAT_PARENT_PID_READ_N bytes from
+ * /proc/%d/stat, but the content is bad. Broken kernel?
+ * Should not happen, but handle it gracefully.
*/
+ return -1;
+}
+
+static int stat_parent_pid(pid_t pid, struct strbuf *name, int *statppid)
+{
struct strbuf procfs_path = STRBUF_INIT;
- struct strbuf name = STRBUF_INIT;
+ struct strbuf sb = STRBUF_INIT;
+ FILE *fp;
+ int ret = -1;
/* try to use procfs if it's present. */
- strbuf_addf(&procfs_path, "/proc/%d/comm", getppid());
- if (strbuf_read_file(&name, procfs_path.buf, 0)) {
- strbuf_release(&procfs_path);
- strbuf_trim_trailing_newline(&name);
- strvec_push(names, strbuf_detach(&name, NULL));
- }
+ strbuf_addf(&procfs_path, "/proc/%d/stat", pid);
+ fp = fopen(procfs_path.buf, "r");
+ if (!fp)
+ goto cleanup;
+
+ /*
+ * We could be more strict here and assert that we read at
+ * least STAT_PARENT_PID_READ_N. My reading of procfs(5) is
+ * that on any modern kernel (at least since 2.6.0 released in
+ * 2003) even if all the mandatory numeric fields were zero'd
+ * out we'd get at least 100 bytes, but let's just check that
+ * we got anything at all and trust the parse_proc_stat()
+ * function to handle its "Bad Kernel?" error checking.
+ */
+ if (!strbuf_fread(&sb, STAT_PARENT_PID_READ_N, fp))
+ goto cleanup;
+ if (parse_proc_stat(&sb, name, statppid) < 0)
+ goto cleanup;
+
+ ret = 0;
+cleanup:
+ if (fp)
+ fclose(fp);
+ strbuf_release(&procfs_path);
+ strbuf_release(&sb);
+
+ return ret;
+}
+
+static void push_ancestry_name(struct strvec *names, pid_t pid)
+{
+ struct strbuf name = STRBUF_INIT;
+ int ppid;
+
+ if (stat_parent_pid(pid, &name, &ppid) < 0)
+ goto cleanup;
+
+ strvec_push(names, name.buf);
+
+ /*
+ * Both errors and reaching the end of the process chain are
+ * reported as fields of 0 by proc(5)
+ */
+ if (ppid)
+ push_ancestry_name(names, ppid);
+cleanup:
+ strbuf_release(&name);
return;
- /* NEEDSWORK: add non-procfs-linux implementations here */
}
void trace2_collect_process_info(enum trace2_process_info_reason reason)
{
- if (!trace2_is_enabled())
- return;
+ struct strvec names = STRVEC_INIT;
- /* someday we may want to write something extra here, but not today */
- if (reason == TRACE2_PROCESS_INFO_EXIT)
+ if (!trace2_is_enabled())
return;
- if (reason == TRACE2_PROCESS_INFO_STARTUP) {
+ switch (reason) {
+ case TRACE2_PROCESS_INFO_EXIT:
/*
- * NEEDSWORK: we could do the entire ptree in an array instead,
- * see compat/win32/trace2_win32_process_info.c.
+ * The Windows version of this calls its
+ * get_peak_memory_info() here. We may want to insert
+ * similar process-end statistics here in the future.
*/
- struct strvec names = STRVEC_INIT;
-
- get_ancestry_names(&names);
+ break;
+ case TRACE2_PROCESS_INFO_STARTUP:
+ push_ancestry_name(&names, getppid());
if (names.nr)
trace2_cmd_ancestry(names.v);
strvec_clear(&names);
+ break;
}
return;
diff --git a/compat/nedmalloc/nedmalloc.c b/compat/nedmalloc/nedmalloc.c
index 1cc31c3502..edb438a777 100644
--- a/compat/nedmalloc/nedmalloc.c
+++ b/compat/nedmalloc/nedmalloc.c
@@ -510,7 +510,7 @@ static void threadcache_free(nedpool *p, threadcache *tc, int mymspace, void *me
assert(idx<=THREADCACHEMAXBINS);
if(tck==*binsptr)
{
- fprintf(stderr, "Attempt to free already freed memory block %p - aborting!\n", tck);
+ fprintf(stderr, "Attempt to free already freed memory block %p - aborting!\n", (void *)tck);
abort();
}
#ifdef FULLSANITYCHECKS
diff --git a/compat/simple-ipc/ipc-unix-socket.c b/compat/simple-ipc/ipc-unix-socket.c
index 1927e6ef4b..4e28857a0a 100644
--- a/compat/simple-ipc/ipc-unix-socket.c
+++ b/compat/simple-ipc/ipc-unix-socket.c
@@ -168,7 +168,8 @@ void ipc_client_close_connection(struct ipc_client_connection *connection)
int ipc_client_send_command_to_connection(
struct ipc_client_connection *connection,
- const char *message, struct strbuf *answer)
+ const char *message, size_t message_len,
+ struct strbuf *answer)
{
int ret = 0;
@@ -176,7 +177,7 @@ int ipc_client_send_command_to_connection(
trace2_region_enter("ipc-client", "send-command", NULL);
- if (write_packetized_from_buf_no_flush(message, strlen(message),
+ if (write_packetized_from_buf_no_flush(message, message_len,
connection->fd) < 0 ||
packet_flush_gently(connection->fd) < 0) {
ret = error(_("could not send IPC command"));
@@ -197,7 +198,8 @@ done:
int ipc_client_send_command(const char *path,
const struct ipc_client_connect_options *options,
- const char *message, struct strbuf *answer)
+ const char *message, size_t message_len,
+ struct strbuf *answer)
{
int ret = -1;
enum ipc_active_state state;
@@ -208,7 +210,9 @@ int ipc_client_send_command(const char *path,
if (state != IPC_STATE__LISTENING)
return ret;
- ret = ipc_client_send_command_to_connection(connection, message, answer);
+ ret = ipc_client_send_command_to_connection(connection,
+ message, message_len,
+ answer);
ipc_client_close_connection(connection);
@@ -503,7 +507,7 @@ static int worker_thread__do_io(
if (ret >= 0) {
ret = worker_thread_data->server_data->application_cb(
worker_thread_data->server_data->application_data,
- buf.buf, do_io_reply_callback, &reply_data);
+ buf.buf, buf.len, do_io_reply_callback, &reply_data);
packet_flush_gently(reply_data.fd);
}
diff --git a/compat/simple-ipc/ipc-win32.c b/compat/simple-ipc/ipc-win32.c
index 8dc7bda087..20ea7b65e0 100644
--- a/compat/simple-ipc/ipc-win32.c
+++ b/compat/simple-ipc/ipc-win32.c
@@ -3,6 +3,8 @@
#include "strbuf.h"
#include "pkt-line.h"
#include "thread-utils.h"
+#include "accctrl.h"
+#include "aclapi.h"
#ifndef SUPPORTS_SIMPLE_IPC
/*
@@ -49,6 +51,9 @@ static enum ipc_active_state get_active_state(wchar_t *pipe_path)
if (GetLastError() == ERROR_FILE_NOT_FOUND)
return IPC_STATE__PATH_NOT_FOUND;
+ trace2_data_intmax("ipc-debug", NULL, "getstate/waitpipe/gle",
+ (intmax_t)GetLastError());
+
return IPC_STATE__OTHER_ERROR;
}
@@ -109,9 +114,15 @@ static enum ipc_active_state connect_to_server(
t_start_ms = (DWORD)(getnanotime() / 1000000);
if (!WaitNamedPipeW(wpath, timeout_ms)) {
- if (GetLastError() == ERROR_SEM_TIMEOUT)
+ DWORD gleWait = GetLastError();
+
+ if (gleWait == ERROR_SEM_TIMEOUT)
return IPC_STATE__NOT_LISTENING;
+ trace2_data_intmax("ipc-debug", NULL,
+ "connect/waitpipe/gle",
+ (intmax_t)gleWait);
+
return IPC_STATE__OTHER_ERROR;
}
@@ -133,17 +144,31 @@ static enum ipc_active_state connect_to_server(
break; /* try again */
default:
+ trace2_data_intmax("ipc-debug", NULL,
+ "connect/createfile/gle",
+ (intmax_t)gle);
+
return IPC_STATE__OTHER_ERROR;
}
}
if (!SetNamedPipeHandleState(hPipe, &mode, NULL, NULL)) {
+ gle = GetLastError();
+ trace2_data_intmax("ipc-debug", NULL,
+ "connect/setpipestate/gle",
+ (intmax_t)gle);
+
CloseHandle(hPipe);
return IPC_STATE__OTHER_ERROR;
}
*pfd = _open_osfhandle((intptr_t)hPipe, O_RDWR|O_BINARY);
if (*pfd < 0) {
+ gle = GetLastError();
+ trace2_data_intmax("ipc-debug", NULL,
+ "connect/openosfhandle/gle",
+ (intmax_t)gle);
+
CloseHandle(hPipe);
return IPC_STATE__OTHER_ERROR;
}
@@ -208,7 +233,8 @@ void ipc_client_close_connection(struct ipc_client_connection *connection)
int ipc_client_send_command_to_connection(
struct ipc_client_connection *connection,
- const char *message, struct strbuf *answer)
+ const char *message, size_t message_len,
+ struct strbuf *answer)
{
int ret = 0;
@@ -216,7 +242,7 @@ int ipc_client_send_command_to_connection(
trace2_region_enter("ipc-client", "send-command", NULL);
- if (write_packetized_from_buf_no_flush(message, strlen(message),
+ if (write_packetized_from_buf_no_flush(message, message_len,
connection->fd) < 0 ||
packet_flush_gently(connection->fd) < 0) {
ret = error(_("could not send IPC command"));
@@ -239,7 +265,8 @@ done:
int ipc_client_send_command(const char *path,
const struct ipc_client_connect_options *options,
- const char *message, struct strbuf *response)
+ const char *message, size_t message_len,
+ struct strbuf *response)
{
int ret = -1;
enum ipc_active_state state;
@@ -250,7 +277,9 @@ int ipc_client_send_command(const char *path,
if (state != IPC_STATE__LISTENING)
return ret;
- ret = ipc_client_send_command_to_connection(connection, message, response);
+ ret = ipc_client_send_command_to_connection(connection,
+ message, message_len,
+ response);
ipc_client_close_connection(connection);
@@ -458,7 +487,7 @@ static int do_io(struct ipc_server_thread_data *server_thread_data)
if (ret >= 0) {
ret = server_thread_data->server_data->application_cb(
server_thread_data->server_data->application_data,
- buf.buf, do_io_reply_callback, &reply_data);
+ buf.buf, buf.len, do_io_reply_callback, &reply_data);
packet_flush_gently(reply_data.fd);
@@ -565,11 +594,132 @@ finished:
return NULL;
}
+/*
+ * We need to build a Windows "SECURITY_ATTRIBUTES" object and use it
+ * to apply an ACL when we create the initial instance of the Named
+ * Pipe. The construction is somewhat involved and consists of
+ * several sequential steps and intermediate objects.
+ *
+ * We use this structure to hold these intermediate pointers so that
+ * we can free them as a group. (It is unclear from the docs whether
+ * some of these intermediate pointers can be freed before we are
+ * finished using the "lpSA" member.)
+ */
+struct my_sa_data
+{
+ PSID pEveryoneSID;
+ PACL pACL;
+ PSECURITY_DESCRIPTOR pSD;
+ LPSECURITY_ATTRIBUTES lpSA;
+};
+
+static void init_sa(struct my_sa_data *d)
+{
+ memset(d, 0, sizeof(*d));
+}
+
+static void release_sa(struct my_sa_data *d)
+{
+ if (d->pEveryoneSID)
+ FreeSid(d->pEveryoneSID);
+ if (d->pACL)
+ LocalFree(d->pACL);
+ if (d->pSD)
+ LocalFree(d->pSD);
+ if (d->lpSA)
+ LocalFree(d->lpSA);
+
+ memset(d, 0, sizeof(*d));
+}
+
+/*
+ * Create SECURITY_ATTRIBUTES to apply to the initial named pipe. The
+ * creator of the first server instance gets to set the ACLs on it.
+ *
+ * We allow the well-known group `EVERYONE` to have read+write access
+ * to the named pipe so that clients can send queries to the daemon
+ * and receive the response.
+ *
+ * Normally, this is not necessary since the daemon is usually
+ * automatically started by a foreground command like `git status`,
+ * but in those cases where an elevated Git command started the daemon
+ * (such that the daemon itself runs with elevation), we need to add
+ * the ACL so that non-elevated commands can write to it.
+ *
+ * The following document was helpful:
+ * https://docs.microsoft.com/en-us/windows/win32/secauthz/creating-a-security-descriptor-for-a-new-object-in-c--
+ *
+ * Returns d->lpSA set to a SA or NULL.
+ */
+static LPSECURITY_ATTRIBUTES get_sa(struct my_sa_data *d)
+{
+ SID_IDENTIFIER_AUTHORITY sid_auth_world = SECURITY_WORLD_SID_AUTHORITY;
+#define NR_EA (1)
+ EXPLICIT_ACCESS ea[NR_EA];
+ DWORD dwResult;
+
+ if (!AllocateAndInitializeSid(&sid_auth_world, 1,
+ SECURITY_WORLD_RID, 0,0,0,0,0,0,0,
+ &d->pEveryoneSID)) {
+ DWORD gle = GetLastError();
+ trace2_data_intmax("ipc-debug", NULL, "alloc-world-sid/gle",
+ (intmax_t)gle);
+ goto fail;
+ }
+
+ memset(ea, 0, NR_EA * sizeof(EXPLICIT_ACCESS));
+
+ ea[0].grfAccessPermissions = GENERIC_READ | GENERIC_WRITE;
+ ea[0].grfAccessMode = SET_ACCESS;
+ ea[0].grfInheritance = NO_INHERITANCE;
+ ea[0].Trustee.MultipleTrusteeOperation = NO_MULTIPLE_TRUSTEE;
+ ea[0].Trustee.TrusteeForm = TRUSTEE_IS_SID;
+ ea[0].Trustee.TrusteeType = TRUSTEE_IS_WELL_KNOWN_GROUP;
+ ea[0].Trustee.ptstrName = (LPTSTR)d->pEveryoneSID;
+
+ dwResult = SetEntriesInAcl(NR_EA, ea, NULL, &d->pACL);
+ if (dwResult != ERROR_SUCCESS) {
+ DWORD gle = GetLastError();
+ trace2_data_intmax("ipc-debug", NULL, "set-acl-entry/gle",
+ (intmax_t)gle);
+ trace2_data_intmax("ipc-debug", NULL, "set-acl-entry/dw",
+ (intmax_t)dwResult);
+ goto fail;
+ }
+
+ d->pSD = (PSECURITY_DESCRIPTOR)LocalAlloc(
+ LPTR, SECURITY_DESCRIPTOR_MIN_LENGTH);
+ if (!InitializeSecurityDescriptor(d->pSD, SECURITY_DESCRIPTOR_REVISION)) {
+ DWORD gle = GetLastError();
+ trace2_data_intmax("ipc-debug", NULL, "init-sd/gle", (intmax_t)gle);
+ goto fail;
+ }
+
+ if (!SetSecurityDescriptorDacl(d->pSD, TRUE, d->pACL, FALSE)) {
+ DWORD gle = GetLastError();
+ trace2_data_intmax("ipc-debug", NULL, "set-sd-dacl/gle", (intmax_t)gle);
+ goto fail;
+ }
+
+ d->lpSA = (LPSECURITY_ATTRIBUTES)LocalAlloc(LPTR, sizeof(SECURITY_ATTRIBUTES));
+ d->lpSA->nLength = sizeof(SECURITY_ATTRIBUTES);
+ d->lpSA->lpSecurityDescriptor = d->pSD;
+ d->lpSA->bInheritHandle = FALSE;
+
+ return d->lpSA;
+
+fail:
+ release_sa(d);
+ return NULL;
+}
+
static HANDLE create_new_pipe(wchar_t *wpath, int is_first)
{
HANDLE hPipe;
DWORD dwOpenMode, dwPipeMode;
- LPSECURITY_ATTRIBUTES lpsa = NULL;
+ struct my_sa_data my_sa_data;
+
+ init_sa(&my_sa_data);
dwOpenMode = PIPE_ACCESS_INBOUND | PIPE_ACCESS_OUTBOUND |
FILE_FLAG_OVERLAPPED;
@@ -585,20 +735,15 @@ static HANDLE create_new_pipe(wchar_t *wpath, int is_first)
* set the ACL / Security Attributes on the named
* pipe; subsequent instances inherit and cannot
* change them.
- *
- * TODO Should we allow the application layer to
- * specify security attributes, such as `LocalService`
- * or `LocalSystem`, when we create the named pipe?
- * This question is probably not important when the
- * daemon is started by a foreground user process and
- * only needs to talk to the current user, but may be
- * if the daemon is run via the Control Panel as a
- * System Service.
*/
+ get_sa(&my_sa_data);
}
hPipe = CreateNamedPipeW(wpath, dwOpenMode, dwPipeMode,
- PIPE_UNLIMITED_INSTANCES, 1024, 1024, 0, lpsa);
+ PIPE_UNLIMITED_INSTANCES, 1024, 1024, 0,
+ my_sa_data.lpSA);
+
+ release_sa(&my_sa_data);
return hPipe;
}
diff --git a/compat/vcbuild/README b/compat/vcbuild/README
index 51fb083dbb..29ec1d0f10 100644
--- a/compat/vcbuild/README
+++ b/compat/vcbuild/README
@@ -92,7 +92,7 @@ The Steps of Build Git with VS2008
the git operations.
3. Inside Git's directory run the command:
- make command-list.h config-list.h
+ make generated-hdrs
to generate the header file needed to compile git.
4. Then either build Git with the GNU Make Makefile in the Git projects
diff --git a/compat/win32/lazyload.h b/compat/win32/lazyload.h
index 9e631c8593..2b3637135f 100644
--- a/compat/win32/lazyload.h
+++ b/compat/win32/lazyload.h
@@ -15,10 +15,12 @@
* source, target);
*/
+typedef void (*FARVOIDPROC)(void);
+
struct proc_addr {
const char *const dll;
const char *const function;
- FARPROC pfunction;
+ FARVOIDPROC pfunction;
unsigned initialized : 1;
};
@@ -26,7 +28,8 @@ struct proc_addr {
#define DECLARE_PROC_ADDR(dll, rettype, function, ...) \
static struct proc_addr proc_addr_##function = \
{ #dll, #function, NULL, 0 }; \
- static rettype (WINAPI *function)(__VA_ARGS__)
+ typedef rettype (WINAPI *proc_type_##function)(__VA_ARGS__); \
+ static proc_type_##function function
/*
* Loads a function from a DLL (once-only).
@@ -35,9 +38,9 @@ struct proc_addr {
* This function is not thread-safe.
*/
#define INIT_PROC_ADDR(function) \
- (function = get_proc_addr(&proc_addr_##function))
+ (function = (proc_type_##function)get_proc_addr(&proc_addr_##function))
-static inline void *get_proc_addr(struct proc_addr *proc)
+static inline FARVOIDPROC get_proc_addr(struct proc_addr *proc)
{
/* only do this once */
if (!proc->initialized) {
@@ -46,7 +49,8 @@ static inline void *get_proc_addr(struct proc_addr *proc)
hnd = LoadLibraryExA(proc->dll, NULL,
LOAD_LIBRARY_SEARCH_SYSTEM32);
if (hnd)
- proc->pfunction = GetProcAddress(hnd, proc->function);
+ proc->pfunction = (FARVOIDPROC)GetProcAddress(hnd,
+ proc->function);
}
/* set ENOSYS if DLL or function was not found */
if (!proc->pfunction)
diff --git a/config.c b/config.c
index cb4a8058bf..2dcbe901b6 100644
--- a/config.c
+++ b/config.c
@@ -76,7 +76,6 @@ static struct key_value_info *current_config_kvi;
*/
static enum config_scope current_parsing_scope;
-static int core_compression_seen;
static int pack_compression_seen;
static int zlib_compression_seen;
@@ -426,7 +425,7 @@ static inline int iskeychar(int c)
* baselen - pointer to size_t which will hold the length of the
* section + subsection part, can be NULL
*/
-static int git_config_parse_key_1(const char *key, char **store_key, size_t *baselen_, int quiet)
+int git_config_parse_key(const char *key, char **store_key, size_t *baselen_)
{
size_t i, baselen;
int dot;
@@ -438,14 +437,12 @@ static int git_config_parse_key_1(const char *key, char **store_key, size_t *bas
*/
if (last_dot == NULL || last_dot == key) {
- if (!quiet)
- error(_("key does not contain a section: %s"), key);
+ error(_("key does not contain a section: %s"), key);
return -CONFIG_NO_SECTION_OR_NAME;
}
if (!last_dot[1]) {
- if (!quiet)
- error(_("key does not contain variable name: %s"), key);
+ error(_("key does not contain variable name: %s"), key);
return -CONFIG_NO_SECTION_OR_NAME;
}
@@ -456,8 +453,7 @@ static int git_config_parse_key_1(const char *key, char **store_key, size_t *bas
/*
* Validate the key and while at it, lower case it for matching.
*/
- if (store_key)
- *store_key = xmallocz(strlen(key));
+ *store_key = xmallocz(strlen(key));
dot = 0;
for (i = 0; key[i]; i++) {
@@ -468,39 +464,24 @@ static int git_config_parse_key_1(const char *key, char **store_key, size_t *bas
if (!dot || i > baselen) {
if (!iskeychar(c) ||
(i == baselen + 1 && !isalpha(c))) {
- if (!quiet)
- error(_("invalid key: %s"), key);
+ error(_("invalid key: %s"), key);
goto out_free_ret_1;
}
c = tolower(c);
} else if (c == '\n') {
- if (!quiet)
- error(_("invalid key (newline): %s"), key);
+ error(_("invalid key (newline): %s"), key);
goto out_free_ret_1;
}
- if (store_key)
- (*store_key)[i] = c;
+ (*store_key)[i] = c;
}
return 0;
out_free_ret_1:
- if (store_key) {
- FREE_AND_NULL(*store_key);
- }
+ FREE_AND_NULL(*store_key);
return -CONFIG_INVALID_KEY;
}
-int git_config_parse_key(const char *key, char **store_key, size_t *baselen)
-{
- return git_config_parse_key_1(key, store_key, baselen, 0);
-}
-
-int git_config_key_is_valid(const char *key)
-{
- return !git_config_parse_key_1(key, NULL, NULL, 1);
-}
-
static int config_parse_pair(const char *key, const char *value,
config_fn_t fn, void *data)
{
@@ -1400,8 +1381,6 @@ static int git_default_core_config(const char *var, const char *value, void *cb)
level = Z_DEFAULT_COMPRESSION;
else if (level < 0 || level > Z_BEST_COMPRESSION)
die(_("bad zlib compression level %d"), level);
- core_compression_level = level;
- core_compression_seen = 1;
if (!zlib_compression_seen)
zlib_compression_level = level;
if (!pack_compression_seen)
@@ -1796,6 +1775,7 @@ int git_config_from_mem(config_fn_t fn,
int git_config_from_blob_oid(config_fn_t fn,
const char *name,
+ struct repository *repo,
const struct object_id *oid,
void *data)
{
@@ -1804,7 +1784,7 @@ int git_config_from_blob_oid(config_fn_t fn,
unsigned long size;
int ret;
- buf = read_object_file(oid, &type, &size);
+ buf = repo_read_object_file(repo, oid, &type, &size);
if (!buf)
return error(_("unable to load config blob object '%s'"), name);
if (type != OBJ_BLOB) {
@@ -1820,14 +1800,15 @@ int git_config_from_blob_oid(config_fn_t fn,
}
static int git_config_from_blob_ref(config_fn_t fn,
+ struct repository *repo,
const char *name,
void *data)
{
struct object_id oid;
- if (get_oid(name, &oid) < 0)
+ if (repo_get_oid(repo, name, &oid) < 0)
return error(_("unable to resolve config blob '%s'"), name);
- return git_config_from_blob_oid(fn, name, &oid, data);
+ return git_config_from_blob_oid(fn, name, repo, &oid, data);
}
char *git_system_config(void)
@@ -1958,12 +1939,16 @@ int config_with_options(config_fn_t fn, void *data,
* If we have a specific filename, use it. Otherwise, follow the
* regular lookup sequence.
*/
- if (config_source && config_source->use_stdin)
+ if (config_source && config_source->use_stdin) {
return git_config_from_stdin(fn, data);
- else if (config_source && config_source->file)
+ } else if (config_source && config_source->file) {
return git_config_from_file(fn, config_source->file, data);
- else if (config_source && config_source->blob)
- return git_config_from_blob_ref(fn, config_source->blob, data);
+ } else if (config_source && config_source->blob) {
+ struct repository *repo = config_source->repo ?
+ config_source->repo : the_repository;
+ return git_config_from_blob_ref(fn, repo, config_source->blob,
+ data);
+ }
return do_git_config_sequence(opts, fn, data);
}
diff --git a/config.h b/config.h
index a2200f3111..f119de0130 100644
--- a/config.h
+++ b/config.h
@@ -49,6 +49,8 @@ const char *config_scope_name(enum config_scope scope);
struct git_config_source {
unsigned int use_stdin:1;
const char *file;
+ /* The repository if blob is not NULL; leave blank for the_repository */
+ struct repository *repo;
const char *blob;
enum config_scope scope;
};
@@ -136,6 +138,7 @@ int git_config_from_mem(config_fn_t fn,
const char *buf, size_t len,
void *data, const struct config_options *opts);
int git_config_from_blob_oid(config_fn_t fn, const char *name,
+ struct repository *repo,
const struct object_id *oid, void *data);
void git_config_push_parameter(const char *text);
void git_config_push_env(const char *spec);
@@ -256,7 +259,6 @@ int git_config_set_gently(const char *, const char *);
void git_config_set(const char *, const char *);
int git_config_parse_key(const char *, char **, size_t *);
-int git_config_key_is_valid(const char *key);
/*
* The following macros specify flag bits that alter the behavior
@@ -606,7 +608,6 @@ int git_config_get_maybe_bool(const char *key, int *dest);
int git_config_get_pathname(const char *key, const char **dest);
int git_config_get_index_threads(int *dest);
-int git_config_get_untracked_cache(void);
int git_config_get_split_index(void);
int git_config_get_max_percent_split_change(void);
int git_config_get_fsmonitor(void);
diff --git a/config.mak.dev b/config.mak.dev
index 022fb58218..7673fed114 100644
--- a/config.mak.dev
+++ b/config.mak.dev
@@ -1,13 +1,24 @@
+ifndef COMPILER_FEATURES
+COMPILER_FEATURES := $(shell ./detect-compiler $(CC))
+endif
+
ifeq ($(filter no-error,$(DEVOPTS)),)
DEVELOPER_CFLAGS += -Werror
SPARSE_FLAGS += -Wsparse-error
endif
-ifneq ($(filter pedantic,$(DEVOPTS)),)
+
+DEVELOPER_CFLAGS += -Wall
+ifeq ($(filter no-pedantic,$(DEVOPTS)),)
DEVELOPER_CFLAGS += -pedantic
-# don't warn for each N_ use
-DEVELOPER_CFLAGS += -DUSE_PARENS_AROUND_GETTEXT_N=0
+ifneq (($or $(filter gcc5,$(COMPILER_FEATURES)),$(filter clang4,$(COMPILER_FEATURES))),)
+DEVELOPER_CFLAGS += -Wpedantic
+ifneq ($(filter gcc10,$(COMPILER_FEATURES)),)
+ifeq ($(uname_S),MINGW)
+DEVELOPER_CFLAGS += -Wno-pedantic-ms-format
+endif
+endif
+endif
endif
-DEVELOPER_CFLAGS += -Wall
DEVELOPER_CFLAGS += -Wdeclaration-after-statement
DEVELOPER_CFLAGS += -Wformat-security
DEVELOPER_CFLAGS += -Wold-style-definition
@@ -18,10 +29,6 @@ DEVELOPER_CFLAGS += -Wunused
DEVELOPER_CFLAGS += -Wvla
DEVELOPER_CFLAGS += -fno-common
-ifndef COMPILER_FEATURES
-COMPILER_FEATURES := $(shell ./detect-compiler $(CC))
-endif
-
ifneq ($(filter clang4,$(COMPILER_FEATURES)),)
DEVELOPER_CFLAGS += -Wtautological-constant-out-of-range-compare
endif
diff --git a/config.mak.uname b/config.mak.uname
index 76516aaa9a..3236a4918a 100644
--- a/config.mak.uname
+++ b/config.mak.uname
@@ -11,6 +11,10 @@ uname_R := $(shell sh -c 'uname -r 2>/dev/null || echo not')
uname_P := $(shell sh -c 'uname -p 2>/dev/null || echo not')
uname_V := $(shell sh -c 'uname -v 2>/dev/null || echo not')
+ifneq ($(findstring MINGW,$(uname_S)),)
+ uname_S := MINGW
+endif
+
ifdef MSVC
# avoid the MingW and Cygwin configuration sections
uname_S := Windows
@@ -588,7 +592,7 @@ ifeq ($(uname_S),NONSTOP_KERNEL)
SANE_TOOL_PATH = /usr/coreutils/bin:/usr/local/bin
SHELL_PATH = /usr/coreutils/bin/bash
endif
-ifneq (,$(findstring MINGW,$(uname_S)))
+ifeq ($(uname_S),MINGW)
pathsep = ;
HAVE_ALLOCA_H = YesPlease
NO_PREAD = YesPlease
@@ -735,9 +739,9 @@ vcxproj:
echo '</Project>') >git-remote-http/LinkOrCopyRemoteHttp.targets
git add -f git/LinkOrCopyBuiltins.targets git-remote-http/LinkOrCopyRemoteHttp.targets
- # Add command-list.h and config-list.h
- $(MAKE) MSVC=1 SKIP_VCPKG=1 prefix=/mingw64 config-list.h command-list.h
- git add -f config-list.h command-list.h
+ # Add generated headers
+ $(MAKE) MSVC=1 SKIP_VCPKG=1 prefix=/mingw64 $(GENERATED_H)
+ git add -f $(GENERATED_H)
# Add scripts
rm -f perl/perl.mak
diff --git a/connect.c b/connect.c
index aff13a270e..eaf7d6d261 100644
--- a/connect.c
+++ b/connect.c
@@ -557,6 +557,8 @@ const char *parse_feature_value(const char *feature_list, const char *feature, i
if (!*value || isspace(*value)) {
if (lenp)
*lenp = 0;
+ if (offset)
+ *offset = found + len - feature_list;
return value;
}
/* feature with a value (e.g., "agent=git/1.2.3") */
diff --git a/connected.c b/connected.c
index b5f9523a5f..cf68e37a97 100644
--- a/connected.c
+++ b/connected.c
@@ -24,7 +24,7 @@ int check_connected(oid_iterate_fn fn, void *cb_data,
struct child_process rev_list = CHILD_PROCESS_INIT;
FILE *rev_list_in;
struct check_connected_options defaults = CHECK_CONNECTED_INIT;
- struct object_id oid;
+ const struct object_id *oid;
int err = 0;
struct packed_git *new_pack = NULL;
struct transport *transport;
@@ -34,7 +34,8 @@ int check_connected(oid_iterate_fn fn, void *cb_data,
opt = &defaults;
transport = opt->transport;
- if (fn(cb_data, &oid)) {
+ oid = fn(cb_data);
+ if (!oid) {
if (opt->err_fd)
close(opt->err_fd);
return err;
@@ -73,7 +74,7 @@ int check_connected(oid_iterate_fn fn, void *cb_data,
for (p = get_all_packs(the_repository); p; p = p->next) {
if (!p->pack_promisor)
continue;
- if (find_pack_entry_one(oid.hash, p))
+ if (find_pack_entry_one(oid->hash, p))
goto promisor_pack_found;
}
/*
@@ -83,7 +84,7 @@ int check_connected(oid_iterate_fn fn, void *cb_data,
goto no_promisor_pack_found;
promisor_pack_found:
;
- } while (!fn(cb_data, &oid));
+ } while ((oid = fn(cb_data)) != NULL);
return 0;
}
@@ -133,12 +134,12 @@ no_promisor_pack_found:
* are sure the ref is good and not sending it to
* rev-list for verification.
*/
- if (new_pack && find_pack_entry_one(oid.hash, new_pack))
+ if (new_pack && find_pack_entry_one(oid->hash, new_pack))
continue;
- if (fprintf(rev_list_in, "%s\n", oid_to_hex(&oid)) < 0)
+ if (fprintf(rev_list_in, "%s\n", oid_to_hex(oid)) < 0)
break;
- } while (!fn(cb_data, &oid));
+ } while ((oid = fn(cb_data)) != NULL);
if (ferror(rev_list_in) || fflush(rev_list_in)) {
if (errno != EPIPE && errno != EINVAL)
diff --git a/connected.h b/connected.h
index 8d5a6b3ad6..6e59c92aa3 100644
--- a/connected.h
+++ b/connected.h
@@ -9,7 +9,7 @@ struct transport;
* When called after returning the name for the last object, return -1
* to signal EOF, otherwise return 0.
*/
-typedef int (*oid_iterate_fn)(void *, struct object_id *oid);
+typedef const struct object_id *(*oid_iterate_fn)(void *);
/*
* Named-arguments struct for check_connected. All arguments are
diff --git a/contrib/buildsystems/CMakeLists.txt b/contrib/buildsystems/CMakeLists.txt
index 171b4124af..fd1399c440 100644
--- a/contrib/buildsystems/CMakeLists.txt
+++ b/contrib/buildsystems/CMakeLists.txt
@@ -624,6 +624,13 @@ if(NOT EXISTS ${CMAKE_BINARY_DIR}/config-list.h)
OUTPUT_FILE ${CMAKE_BINARY_DIR}/config-list.h)
endif()
+if(NOT EXISTS ${CMAKE_BINARY_DIR}/hook-list.h)
+ message("Generating hook-list.h")
+ execute_process(COMMAND ${SH_EXE} ${CMAKE_SOURCE_DIR}/generate-hooklist.sh
+ WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
+ OUTPUT_FILE ${CMAKE_BINARY_DIR}/hook-list.h)
+endif()
+
include_directories(${CMAKE_BINARY_DIR})
#build
diff --git a/contrib/coccinelle/xopen.cocci b/contrib/coccinelle/xopen.cocci
index 814d7b8a1a..b71db67019 100644
--- a/contrib/coccinelle/xopen.cocci
+++ b/contrib/coccinelle/xopen.cocci
@@ -2,15 +2,18 @@
identifier fd;
identifier die_fn =~ "^(die|die_errno)$";
@@
-(
- fd =
+ int fd =
- open
+ xopen
(...);
-|
- int fd =
+- if ( \( fd < 0 \| fd == -1 \) ) { die_fn(...); }
+
+@@
+expression fd;
+identifier die_fn =~ "^(die|die_errno)$";
+@@
+ fd =
- open
+ xopen
(...);
-)
- if ( \( fd < 0 \| fd == -1 \) ) { die_fn(...); }
diff --git a/contrib/credential/gnome-keyring/git-credential-gnome-keyring.c b/contrib/credential/gnome-keyring/git-credential-gnome-keyring.c
index d389bfadce..5927e27ae6 100644
--- a/contrib/credential/gnome-keyring/git-credential-gnome-keyring.c
+++ b/contrib/credential/gnome-keyring/git-credential-gnome-keyring.c
@@ -138,7 +138,7 @@ struct credential {
char *password;
};
-#define CREDENTIAL_INIT { NULL, NULL, 0, NULL, NULL, NULL }
+#define CREDENTIAL_INIT { 0 }
typedef int (*credential_op_cb)(struct credential *);
diff --git a/contrib/credential/libsecret/git-credential-libsecret.c b/contrib/credential/libsecret/git-credential-libsecret.c
index e6598b6383..2c5d76d789 100644
--- a/contrib/credential/libsecret/git-credential-libsecret.c
+++ b/contrib/credential/libsecret/git-credential-libsecret.c
@@ -41,7 +41,7 @@ struct credential {
char *password;
};
-#define CREDENTIAL_INIT { NULL, NULL, 0, NULL, NULL, NULL }
+#define CREDENTIAL_INIT { 0 }
typedef int (*credential_op_cb)(struct credential *);
diff --git a/contrib/rerere-train.sh b/contrib/rerere-train.sh
index eeee45dd34..75125d6ae0 100755
--- a/contrib/rerere-train.sh
+++ b/contrib/rerere-train.sh
@@ -91,7 +91,7 @@ do
git checkout -q $commit -- .
git rerere
fi
- git reset -q --hard
+ git reset -q --hard # Might nuke untracked files...
done
if test -z "$branch"
diff --git a/diff-merges.c b/diff-merges.c
index d897fd8a29..5060ccd890 100644
--- a/diff-merges.c
+++ b/diff-merges.c
@@ -6,7 +6,7 @@ typedef void (*diff_merges_setup_func_t)(struct rev_info *);
static void set_separate(struct rev_info *revs);
static diff_merges_setup_func_t set_to_default = set_separate;
-static int suppress_parsing;
+static int suppress_m_parsing;
static void suppress(struct rev_info *revs)
{
@@ -91,9 +91,9 @@ int diff_merges_config(const char *value)
return 0;
}
-void diff_merges_suppress_options_parsing(void)
+void diff_merges_suppress_m_parsing(void)
{
- suppress_parsing = 1;
+ suppress_m_parsing = 1;
}
int diff_merges_parse_opts(struct rev_info *revs, const char **argv)
@@ -102,10 +102,7 @@ int diff_merges_parse_opts(struct rev_info *revs, const char **argv)
const char *optarg;
const char *arg = argv[0];
- if (suppress_parsing)
- return 0;
-
- if (!strcmp(arg, "-m")) {
+ if (!suppress_m_parsing && !strcmp(arg, "-m")) {
set_to_default(revs);
} else if (!strcmp(arg, "-c")) {
set_combined(revs);
@@ -153,9 +150,6 @@ void diff_merges_set_dense_combined_if_unset(struct rev_info *revs)
void diff_merges_setup_revs(struct rev_info *revs)
{
- if (suppress_parsing)
- return;
-
if (revs->combine_merges == 0)
revs->dense_combined_merges = 0;
if (revs->separate_merges == 0)
diff --git a/diff-merges.h b/diff-merges.h
index b5d57f6563..19639689bb 100644
--- a/diff-merges.h
+++ b/diff-merges.h
@@ -11,7 +11,7 @@ struct rev_info;
int diff_merges_config(const char *value);
-void diff_merges_suppress_options_parsing(void);
+void diff_merges_suppress_m_parsing(void);
int diff_merges_parse_opts(struct rev_info *revs, const char **argv);
diff --git a/diff.c b/diff.c
index a8113f1707..861282db1c 100644
--- a/diff.c
+++ b/diff.c
@@ -26,6 +26,7 @@
#include "parse-options.h"
#include "help.h"
#include "promisor-remote.h"
+#include "dir.h"
#ifdef NO_FAST_WORKING_DIRECTORY
#define FAST_WORKING_DIRECTORY 0
@@ -774,13 +775,13 @@ struct emitted_diff_symbol {
int indent_width; /* The visual width of the indentation */
enum diff_symbol s;
};
-#define EMITTED_DIFF_SYMBOL_INIT {NULL}
+#define EMITTED_DIFF_SYMBOL_INIT { 0 }
struct emitted_diff_symbols {
struct emitted_diff_symbol *buf;
int nr, alloc;
};
-#define EMITTED_DIFF_SYMBOLS_INIT {NULL, 0, 0}
+#define EMITTED_DIFF_SYMBOLS_INIT { 0 }
static void append_emitted_diff_symbol(struct diff_options *o,
struct emitted_diff_symbol *e)
@@ -3907,6 +3908,13 @@ static int reuse_worktree_file(struct index_state *istate,
if (!want_file && would_convert_to_git(istate, name))
return 0;
+ /*
+ * If this path does not match our sparse-checkout definition,
+ * then the file will not be in the working directory.
+ */
+ if (!path_in_sparse_checkout(name, istate))
+ return 0;
+
len = strlen(name);
pos = index_name_pos(istate, name, len);
if (pos < 0)
diff --git a/dir.c b/dir.c
index 03c4d21267..a4306ab874 100644
--- a/dir.c
+++ b/dir.c
@@ -1294,7 +1294,7 @@ int match_pathname(const char *pathname, int pathlen,
* then our prefix match is all we need; we
* do not need to call fnmatch at all.
*/
- if (!patternlen && !namelen)
+ if (!patternlen && (!namelen || (flags & PATTERN_FLAG_MUSTBEDIR)))
return 1;
}
@@ -1303,6 +1303,44 @@ int match_pathname(const char *pathname, int pathlen,
WM_PATHNAME) == 0;
}
+static int path_matches_dir_pattern(const char *pathname,
+ int pathlen,
+ struct strbuf **path_parent,
+ int *dtype,
+ struct path_pattern *pattern,
+ struct index_state *istate)
+{
+ if (!*path_parent) {
+ char *slash;
+ CALLOC_ARRAY(*path_parent, 1);
+ strbuf_add(*path_parent, pathname, pathlen);
+ slash = find_last_dir_sep((*path_parent)->buf);
+
+ if (slash)
+ strbuf_setlen(*path_parent, slash - (*path_parent)->buf);
+ else
+ strbuf_setlen(*path_parent, 0);
+ }
+
+ /*
+ * If the parent directory matches the pattern, then we do not
+ * need to check for dtype.
+ */
+ if ((*path_parent)->len &&
+ match_pathname((*path_parent)->buf, (*path_parent)->len,
+ pattern->base,
+ pattern->baselen ? pattern->baselen - 1 : 0,
+ pattern->pattern, pattern->nowildcardlen,
+ pattern->patternlen, pattern->flags))
+ return 1;
+
+ *dtype = resolve_dtype(*dtype, istate, pathname, pathlen);
+ if (*dtype != DT_DIR)
+ return 0;
+
+ return 1;
+}
+
/*
* Scan the given exclude list in reverse to see whether pathname
* should be ignored. The first match (i.e. the last on the list), if
@@ -1318,6 +1356,7 @@ static struct path_pattern *last_matching_pattern_from_list(const char *pathname
{
struct path_pattern *res = NULL; /* undecided */
int i;
+ struct strbuf *path_parent = NULL;
if (!pl->nr)
return NULL; /* undefined */
@@ -1327,11 +1366,10 @@ static struct path_pattern *last_matching_pattern_from_list(const char *pathname
const char *exclude = pattern->pattern;
int prefix = pattern->nowildcardlen;
- if (pattern->flags & PATTERN_FLAG_MUSTBEDIR) {
- *dtype = resolve_dtype(*dtype, istate, pathname, pathlen);
- if (*dtype != DT_DIR)
- continue;
- }
+ if (pattern->flags & PATTERN_FLAG_MUSTBEDIR &&
+ !path_matches_dir_pattern(pathname, pathlen, &path_parent,
+ dtype, pattern, istate))
+ continue;
if (pattern->flags & PATTERN_FLAG_NODIR) {
if (match_basename(basename,
@@ -1355,6 +1393,12 @@ static struct path_pattern *last_matching_pattern_from_list(const char *pathname
break;
}
}
+
+ if (path_parent) {
+ strbuf_release(path_parent);
+ free(path_parent);
+ }
+
return res;
}
@@ -1439,6 +1483,58 @@ done:
return result;
}
+int init_sparse_checkout_patterns(struct index_state *istate)
+{
+ if (!core_apply_sparse_checkout)
+ return 1;
+ if (istate->sparse_checkout_patterns)
+ return 0;
+
+ CALLOC_ARRAY(istate->sparse_checkout_patterns, 1);
+
+ if (get_sparse_checkout_patterns(istate->sparse_checkout_patterns) < 0) {
+ FREE_AND_NULL(istate->sparse_checkout_patterns);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int path_in_sparse_checkout_1(const char *path,
+ struct index_state *istate,
+ int require_cone_mode)
+{
+ const char *base;
+ int dtype = DT_REG;
+
+ /*
+ * We default to accepting a path if there are no patterns or
+ * they are of the wrong type.
+ */
+ if (init_sparse_checkout_patterns(istate) ||
+ (require_cone_mode &&
+ !istate->sparse_checkout_patterns->use_cone_patterns))
+ return 1;
+
+ base = strrchr(path, '/');
+ return path_matches_pattern_list(path, strlen(path), base ? base + 1 : path,
+ &dtype,
+ istate->sparse_checkout_patterns,
+ istate) > 0;
+}
+
+int path_in_sparse_checkout(const char *path,
+ struct index_state *istate)
+{
+ return path_in_sparse_checkout_1(path, istate, 0);
+}
+
+int path_in_cone_mode_sparse_checkout(const char *path,
+ struct index_state *istate)
+{
+ return path_in_sparse_checkout_1(path, istate, 1);
+}
+
static struct path_pattern *last_matching_pattern_from_lists(
struct dir_struct *dir, struct index_state *istate,
const char *pathname, int pathlen,
@@ -2970,6 +3066,120 @@ int is_empty_dir(const char *path)
return ret;
}
+char *git_url_basename(const char *repo, int is_bundle, int is_bare)
+{
+ const char *end = repo + strlen(repo), *start, *ptr;
+ size_t len;
+ char *dir;
+
+ /*
+ * Skip scheme.
+ */
+ start = strstr(repo, "://");
+ if (start == NULL)
+ start = repo;
+ else
+ start += 3;
+
+ /*
+ * Skip authentication data. The stripping does happen
+ * greedily, such that we strip up to the last '@' inside
+ * the host part.
+ */
+ for (ptr = start; ptr < end && !is_dir_sep(*ptr); ptr++) {
+ if (*ptr == '@')
+ start = ptr + 1;
+ }
+
+ /*
+ * Strip trailing spaces, slashes and /.git
+ */
+ while (start < end && (is_dir_sep(end[-1]) || isspace(end[-1])))
+ end--;
+ if (end - start > 5 && is_dir_sep(end[-5]) &&
+ !strncmp(end - 4, ".git", 4)) {
+ end -= 5;
+ while (start < end && is_dir_sep(end[-1]))
+ end--;
+ }
+
+ /*
+ * Strip trailing port number if we've got only a
+ * hostname (that is, there is no dir separator but a
+ * colon). This check is required such that we do not
+ * strip URI's like '/foo/bar:2222.git', which should
+ * result in a dir '2222' being guessed due to backwards
+ * compatibility.
+ */
+ if (memchr(start, '/', end - start) == NULL
+ && memchr(start, ':', end - start) != NULL) {
+ ptr = end;
+ while (start < ptr && isdigit(ptr[-1]) && ptr[-1] != ':')
+ ptr--;
+ if (start < ptr && ptr[-1] == ':')
+ end = ptr - 1;
+ }
+
+ /*
+ * Find last component. To remain backwards compatible we
+ * also regard colons as path separators, such that
+ * cloning a repository 'foo:bar.git' would result in a
+ * directory 'bar' being guessed.
+ */
+ ptr = end;
+ while (start < ptr && !is_dir_sep(ptr[-1]) && ptr[-1] != ':')
+ ptr--;
+ start = ptr;
+
+ /*
+ * Strip .{bundle,git}.
+ */
+ len = end - start;
+ strip_suffix_mem(start, &len, is_bundle ? ".bundle" : ".git");
+
+ if (!len || (len == 1 && *start == '/'))
+ die(_("No directory name could be guessed.\n"
+ "Please specify a directory on the command line"));
+
+ if (is_bare)
+ dir = xstrfmt("%.*s.git", (int)len, start);
+ else
+ dir = xstrndup(start, len);
+ /*
+ * Replace sequences of 'control' characters and whitespace
+ * with one ascii space, remove leading and trailing spaces.
+ */
+ if (*dir) {
+ char *out = dir;
+ int prev_space = 1 /* strip leading whitespace */;
+ for (end = dir; *end; ++end) {
+ char ch = *end;
+ if ((unsigned char)ch < '\x20')
+ ch = '\x20';
+ if (isspace(ch)) {
+ if (prev_space)
+ continue;
+ prev_space = 1;
+ } else
+ prev_space = 0;
+ *out++ = ch;
+ }
+ *out = '\0';
+ if (out > dir && prev_space)
+ out[-1] = '\0';
+ }
+ return dir;
+}
+
+void strip_dir_trailing_slashes(char *dir)
+{
+ char *end = dir + strlen(dir);
+
+ while (dir < end - 1 && is_dir_sep(end[-1]))
+ end--;
+ *end = '\0';
+}
+
static int remove_dir_recurse(struct strbuf *path, int flag, int *kept_up)
{
DIR *dir;
@@ -3633,7 +3843,7 @@ static void connect_wt_gitdir_in_nested(const char *sub_worktree,
strbuf_reset(&sub_wt);
strbuf_reset(&sub_gd);
strbuf_addf(&sub_wt, "%s/%s", sub_worktree, sub->path);
- strbuf_addf(&sub_gd, "%s/modules/%s", sub_gitdir, sub->name);
+ submodule_name_to_gitdir(&sub_gd, &subrepo, sub->name);
connect_work_tree_and_git_dir(sub_wt.buf, sub_gd.buf, 1);
}
diff --git a/dir.h b/dir.h
index b3e1a54a97..83f46c0fb4 100644
--- a/dir.h
+++ b/dir.h
@@ -394,6 +394,14 @@ enum pattern_match_result path_matches_pattern_list(const char *pathname,
const char *basename, int *dtype,
struct pattern_list *pl,
struct index_state *istate);
+
+int init_sparse_checkout_patterns(struct index_state *state);
+
+int path_in_sparse_checkout(const char *path,
+ struct index_state *istate);
+int path_in_cone_mode_sparse_checkout(const char *path,
+ struct index_state *istate);
+
struct dir_entry *dir_add_ignored(struct dir_struct *dir,
struct index_state *istate,
const char *pathname, int len);
@@ -453,6 +461,17 @@ static inline int is_dot_or_dotdot(const char *name)
int is_empty_dir(const char *dir);
+/*
+ * Retrieve the "humanish" basename of the given Git URL.
+ *
+ * For example:
+ * /path/to/repo.git => "repo"
+ * host.xz:foo/.git => "foo"
+ * http://example.com/user/bar.baz => "bar.baz"
+ */
+char *git_url_basename(const char *repo, int is_bundle, int is_bare);
+void strip_dir_trailing_slashes(char *dir);
+
void setup_standard_excludes(struct dir_struct *dir);
char *get_sparse_checkout_filename(void);
diff --git a/entry.c b/entry.c
index 044e8ec92c..9b0f968a70 100644
--- a/entry.c
+++ b/entry.c
@@ -163,24 +163,21 @@ int finish_delayed_checkout(struct checkout *state, int *nr_checkouts,
int show_progress)
{
int errs = 0;
- unsigned delayed_object_count;
+ unsigned processed_paths = 0;
off_t filtered_bytes = 0;
struct string_list_item *filter, *path;
- struct progress *progress;
+ struct progress *progress = NULL;
struct delayed_checkout *dco = state->delayed_checkout;
if (!state->delayed_checkout)
return errs;
dco->state = CE_RETRY;
- delayed_object_count = dco->paths.nr;
- progress = show_progress
- ? start_delayed_progress(_("Filtering content"), delayed_object_count)
- : NULL;
+ if (show_progress)
+ progress = start_delayed_progress(_("Filtering content"), dco->paths.nr);
while (dco->filters.nr > 0) {
for_each_string_list_item(filter, &dco->filters) {
struct string_list available_paths = STRING_LIST_INIT_NODUP;
- display_progress(progress, delayed_object_count - dco->paths.nr);
if (!async_query_available_blobs(filter->string, &available_paths)) {
/* Filter reported an error */
@@ -227,6 +224,7 @@ int finish_delayed_checkout(struct checkout *state, int *nr_checkouts,
ce = index_file_exists(state->istate, path->string,
strlen(path->string), 0);
if (ce) {
+ display_progress(progress, ++processed_paths);
errs |= checkout_entry(ce, state, NULL, nr_checkouts);
filtered_bytes += ce->ce_stat_data.sd_size;
display_throughput(progress, filtered_bytes);
diff --git a/entry.h b/entry.h
index 7c889e58fd..2254c62727 100644
--- a/entry.h
+++ b/entry.h
@@ -16,7 +16,7 @@ struct checkout {
clone:1,
refresh_cache:1;
};
-#define CHECKOUT_INIT { NULL, "" }
+#define CHECKOUT_INIT { .base_dir = "" }
#define TEMPORARY_FILENAME_LENGTH 25
/*
diff --git a/environment.c b/environment.c
index d6b22ede7e..9da7f3c1a1 100644
--- a/environment.c
+++ b/environment.c
@@ -31,7 +31,6 @@ int prefer_symlink_refs;
int is_bare_repository_cfg = -1; /* unspecified */
int warn_ambiguous_refs = 1;
int warn_on_object_refname_ambiguity = 1;
-int ref_paranoia = -1;
int repository_format_precious_objects;
int repository_format_worktree_config;
const char *git_commit_encoding;
@@ -41,7 +40,6 @@ char *apply_default_ignorewhitespace;
const char *git_attributes_file;
const char *git_hooks_path;
int zlib_compression_level = Z_BEST_SPEED;
-int core_compression_level;
int pack_compression_level = Z_DEFAULT_COMPRESSION;
int fsync_object_files;
size_t packed_git_window_size = DEFAULT_PACKED_GIT_WINDOW_SIZE;
@@ -96,13 +94,6 @@ int auto_comment_line_char;
/* Parallel index stat data preload? */
int core_preload_index = 1;
-/*
- * This is a hack for test programs like test-dump-untracked-cache to
- * ensure that they do not modify the untracked cache when reading it.
- * Do not use it otherwise!
- */
-int ignore_untracked_cache_config;
-
/* This is set by setup_git_dir_gently() and/or git_default_config() */
char *git_work_tree_cfg;
@@ -330,8 +321,7 @@ char *get_graft_file(struct repository *r)
static void set_git_dir_1(const char *path)
{
- if (setenv(GIT_DIR_ENVIRONMENT, path, 1))
- die(_("could not set GIT_DIR to '%s'"), path);
+ xsetenv(GIT_DIR_ENVIRONMENT, path, 1);
setup_git_env(path);
}
diff --git a/fetch-negotiator.c b/fetch-negotiator.c
index 57ed5784e1..273390229f 100644
--- a/fetch-negotiator.c
+++ b/fetch-negotiator.c
@@ -19,7 +19,6 @@ void fetch_negotiator_init(struct repository *r,
return;
case FETCH_NEGOTIATION_DEFAULT:
- default:
default_negotiator_init(negotiator);
return;
}
diff --git a/fetch-pack.c b/fetch-pack.c
index 0bf7ed7e47..a9604f35a3 100644
--- a/fetch-pack.c
+++ b/fetch-pack.c
@@ -119,6 +119,11 @@ static struct commit *deref_without_lazy_fetch(const struct object_id *oid,
{
enum object_type type;
struct object_info info = { .typep = &type };
+ struct commit *commit;
+
+ commit = lookup_commit_in_graph(the_repository, oid);
+ if (commit)
+ return commit;
while (1) {
if (oid_object_info_extended(the_repository, oid, &info,
@@ -1912,16 +1917,15 @@ static void update_shallow(struct fetch_pack_args *args,
oid_array_clear(&ref);
}
-static int iterate_ref_map(void *cb_data, struct object_id *oid)
+static const struct object_id *iterate_ref_map(void *cb_data)
{
struct ref **rm = cb_data;
struct ref *ref = *rm;
if (!ref)
- return -1; /* end of the list */
+ return NULL;
*rm = ref->next;
- oidcpy(oid, &ref->old_oid);
- return 0;
+ return &ref->old_oid;
}
struct ref *fetch_pack(struct fetch_pack_args *args,
diff --git a/generate-hooklist.sh b/generate-hooklist.sh
new file mode 100755
index 0000000000..2f9f54eb54
--- /dev/null
+++ b/generate-hooklist.sh
@@ -0,0 +1,20 @@
+#!/bin/sh
+#
+# Usage: ./generate-hooklist.sh >hook-list.h
+
+cat <<EOF
+/* Automatically generated by generate-hooklist.sh */
+
+static const char *hook_name_list[] = {
+EOF
+
+sed -n \
+ -e '/^~~~~*$/ {x; s/^.*$/ "&",/; p;}' \
+ -e 'x' \
+ <Documentation/githooks.txt |
+ LC_ALL=C sort
+
+cat <<EOF
+ NULL,
+};
+EOF
diff --git a/gettext.h b/gettext.h
index c8b34fd612..d209911ebb 100644
--- a/gettext.h
+++ b/gettext.h
@@ -55,31 +55,7 @@ const char *Q_(const char *msgid, const char *plu, unsigned long n)
}
/* Mark msgid for translation but do not translate it. */
-#if !USE_PARENS_AROUND_GETTEXT_N
#define N_(msgid) msgid
-#else
-/*
- * Strictly speaking, this will lead to invalid C when
- * used this way:
- * static const char s[] = N_("FOO");
- * which will expand to
- * static const char s[] = ("FOO");
- * and in valid C, the initializer on the right hand side must
- * be without the parentheses. But many compilers do accept it
- * as a language extension and it will allow us to catch mistakes
- * like:
- * static const char *msgs[] = {
- * N_("one")
- * N_("two"),
- * N_("three"),
- * NULL
- * };
- * (notice the missing comma on one of the lines) by forcing
- * a compilation error, because parenthesised ("one") ("two")
- * will not get silently turned into ("onetwo").
- */
-#define N_(msgid) (msgid)
-#endif
const char *get_preferred_languages(void);
int is_utf8_locale(void);
diff --git a/git-bisect.sh b/git-bisect.sh
index 6a7afaea8d..405cf76f2a 100755
--- a/git-bisect.sh
+++ b/git-bisect.sh
@@ -34,94 +34,9 @@ Please use "git help bisect" to get the full man page.'
OPTIONS_SPEC=
. git-sh-setup
-_x40='[0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f]'
-_x40="$_x40$_x40$_x40$_x40$_x40$_x40$_x40$_x40"
TERM_BAD=bad
TERM_GOOD=good
-bisect_visualize() {
- git bisect--helper --bisect-next-check $TERM_GOOD $TERM_BAD fail || exit
-
- if test $# = 0
- then
- if test -n "${DISPLAY+set}${SESSIONNAME+set}${MSYSTEM+set}${SECURITYSESSIONID+set}" &&
- type gitk >/dev/null 2>&1
- then
- set gitk
- else
- set git log
- fi
- else
- case "$1" in
- git*|tig) ;;
- -*) set git log "$@" ;;
- *) set git "$@" ;;
- esac
- fi
-
- eval '"$@"' --bisect -- $(cat "$GIT_DIR/BISECT_NAMES")
-}
-
-bisect_run () {
- git bisect--helper --bisect-next-check $TERM_GOOD $TERM_BAD fail || exit
-
- test -n "$*" || die "$(gettext "bisect run failed: no command provided.")"
-
- while true
- do
- command="$@"
- eval_gettextln "running \$command"
- "$@"
- res=$?
-
- # Check for really bad run error.
- if [ $res -lt 0 -o $res -ge 128 ]
- then
- eval_gettextln "bisect run failed:
-exit code \$res from '\$command' is < 0 or >= 128" >&2
- exit $res
- fi
-
- # Find current state depending on run success or failure.
- # A special exit code of 125 means cannot test.
- if [ $res -eq 125 ]
- then
- state='skip'
- elif [ $res -gt 0 ]
- then
- state="$TERM_BAD"
- else
- state="$TERM_GOOD"
- fi
-
- git bisect--helper --bisect-state $state >"$GIT_DIR/BISECT_RUN"
- res=$?
-
- cat "$GIT_DIR/BISECT_RUN"
-
- if sane_grep "first $TERM_BAD commit could be any of" "$GIT_DIR/BISECT_RUN" \
- >/dev/null
- then
- gettextln "bisect run cannot continue any more" >&2
- exit $res
- fi
-
- if [ $res -ne 0 ]
- then
- eval_gettextln "bisect run failed:
-'bisect-state \$state' exited with error code \$res" >&2
- exit $res
- fi
-
- if sane_grep "is the first $TERM_BAD commit" "$GIT_DIR/BISECT_RUN" >/dev/null
- then
- gettextln "bisect run success"
- exit 0;
- fi
-
- done
-}
-
get_terms () {
if test -s "$GIT_DIR/BISECT_TERMS"
then
@@ -152,7 +67,7 @@ case "$#" in
# Not sure we want "next" at the UI level anymore.
git bisect--helper --bisect-next "$@" || exit ;;
visualize|view)
- bisect_visualize "$@" ;;
+ git bisect--helper --bisect-visualize "$@" || exit;;
reset)
git bisect--helper --bisect-reset "$@" ;;
replay)
@@ -160,7 +75,7 @@ case "$#" in
log)
git bisect--helper --bisect-log || exit ;;
run)
- bisect_run "$@" ;;
+ git bisect--helper --bisect-run "$@" || exit;;
terms)
git bisect--helper --bisect-terms "$@" || exit;;
*)
diff --git a/git-compat-util.h b/git-compat-util.h
index b46605300a..141bb86351 100644
--- a/git-compat-util.h
+++ b/git-compat-util.h
@@ -160,6 +160,9 @@
# endif
#define WIN32_LEAN_AND_MEAN /* stops windows.h including winsock.h */
#include <winsock2.h>
+#ifndef NO_UNIX_SOCKETS
+#include <afunix.h>
+#endif
#include <windows.h>
#define GIT_WINDOWS_NATIVE
#endif
@@ -875,6 +878,8 @@ void *xmemdupz(const void *data, size_t len);
char *xstrndup(const char *str, size_t len);
void *xrealloc(void *ptr, size_t size);
void *xcalloc(size_t nmemb, size_t size);
+void xsetenv(const char *name, const char *value, int overwrite);
+void xunsetenv(const char *name);
void *xmmap(void *start, size_t length, int prot, int flags, int fd, off_t offset);
const char *mmap_os_err(void);
void *xmmap_gently(void *start, size_t length, int prot, int flags, int fd, off_t offset);
@@ -1253,10 +1258,6 @@ int warn_on_fopen_errors(const char *path);
*/
int open_nofollow(const char *path, int flags);
-#if !defined(USE_PARENS_AROUND_GETTEXT_N) && defined(__GNUC__)
-#define USE_PARENS_AROUND_GETTEXT_N 1
-#endif
-
#ifndef SHELL_PATH
# define SHELL_PATH "/bin/sh"
#endif
diff --git a/git-curl-compat.h b/git-curl-compat.h
new file mode 100644
index 0000000000..56a83b6bbd
--- /dev/null
+++ b/git-curl-compat.h
@@ -0,0 +1,129 @@
+#ifndef GIT_CURL_COMPAT_H
+#define GIT_CURL_COMPAT_H
+#include <curl/curl.h>
+
+/**
+ * This header centralizes the declaration of our libcurl dependencies
+ * to make it easy to discover the oldest versions we support, and to
+ * inform decisions about removing support for older libcurl in the
+ * future.
+ *
+ * The oldest supported version of curl is documented in the "INSTALL"
+ * document.
+ *
+ * The source of truth for what versions have which symbols is
+ * https://github.com/curl/curl/blob/master/docs/libcurl/symbols-in-versions;
+ * the release dates are taken from curl.git (at
+ * https://github.com/curl/curl/).
+ *
+ * For each X symbol we need from curl we define our own
+ * GIT_CURL_HAVE_X. If multiple similar symbols with the same prefix
+ * were defined in the same version we pick one and check for that name.
+ *
+ * We may also define a missing CURL_* symbol to its known value, if
+ * doing so is sufficient to add support for it to older versions that
+ * don't have it.
+ *
+ * Keep any symbols in date order of when their support was
+ * introduced, oldest first, in the official version of cURL library.
+ */
+
+/**
+ * CURL_SOCKOPT_OK was added in 7.21.5, released in April 2011.
+ */
+#if LIBCURL_VERSION_NUM < 0x071505
+#define CURL_SOCKOPT_OK 0
+#endif
+
+/**
+ * CURLOPT_TCP_KEEPALIVE was added in 7.25.0, released in March 2012.
+ */
+#if LIBCURL_VERSION_NUM >= 0x071900
+#define GITCURL_HAVE_CURLOPT_TCP_KEEPALIVE 1
+#endif
+
+
+/**
+ * CURLOPT_LOGIN_OPTIONS was added in 7.34.0, released in December
+ * 2013.
+ *
+ * If we start requiring 7.34.0 we might also be able to remove the
+ * code conditional on USE_CURL_FOR_IMAP_SEND in imap-send.c, see
+ * 1e16b255b95 (git-imap-send: use libcurl for implementation,
+ * 2014-11-09) and the check it added for "072200" in the Makefile.
+
+ */
+#if LIBCURL_VERSION_NUM >= 0x072200
+#define GIT_CURL_HAVE_CURLOPT_LOGIN_OPTIONS 1
+#endif
+
+/**
+ * CURL_SSLVERSION_TLSv1_[012] was added in 7.34.0, released in
+ * December 2013.
+ */
+#if LIBCURL_VERSION_NUM >= 0x072200
+#define GIT_CURL_HAVE_CURL_SSLVERSION_TLSv1_0
+#endif
+
+/**
+ * CURLOPT_PINNEDPUBLICKEY was added in 7.39.0, released in November
+ * 2014. CURLE_SSL_PINNEDPUBKEYNOTMATCH was added in that same version.
+ */
+#if LIBCURL_VERSION_NUM >= 0x072c00
+#define GIT_CURL_HAVE_CURLOPT_PINNEDPUBLICKEY 1
+#define GIT_CURL_HAVE_CURLE_SSL_PINNEDPUBKEYNOTMATCH 1
+#endif
+
+/**
+ * CURL_HTTP_VERSION_2 was added in 7.43.0, released in June 2015.
+ *
+ * The CURL_HTTP_VERSION_2 alias (but not CURL_HTTP_VERSION_2_0) has
+ * always been a macro, not an enum field (checked on curl version
+ * 7.78.0)
+ */
+#if LIBCURL_VERSION_NUM >= 0x072b00
+#define GIT_CURL_HAVE_CURL_HTTP_VERSION_2 1
+#endif
+
+/**
+ * CURLSSLOPT_NO_REVOKE was added in 7.44.0, released in August 2015.
+ *
+ * The CURLSSLOPT_NO_REVOKE is, has always been a macro, not an enum
+ * field (checked on curl version 7.78.0)
+ */
+#if LIBCURL_VERSION_NUM >= 0x072c00
+#define GIT_CURL_HAVE_CURLSSLOPT_NO_REVOKE 1
+#endif
+
+/**
+ * CURLOPT_PROXY_CAINFO was added in 7.52.0, released in August 2017.
+ */
+#if LIBCURL_VERSION_NUM >= 0x073400
+#define GIT_CURL_HAVE_CURLOPT_PROXY_CAINFO 1
+#endif
+
+/**
+ * CURLOPT_PROXY_{KEYPASSWD,SSLCERT,SSLKEY} was added in 7.52.0,
+ * released in August 2017.
+ */
+#if LIBCURL_VERSION_NUM >= 0x073400
+#define GIT_CURL_HAVE_CURLOPT_PROXY_KEYPASSWD 1
+#endif
+
+/**
+ * CURL_SSLVERSION_TLSv1_3 was added in 7.53.0, released in February
+ * 2017.
+ */
+#if LIBCURL_VERSION_NUM >= 0x073400
+#define GIT_CURL_HAVE_CURL_SSLVERSION_TLSv1_3 1
+#endif
+
+/**
+ * CURLSSLSET_{NO_BACKENDS,OK,TOO_LATE,UNKNOWN_BACKEND} were added in
+ * 7.56.0, released in September 2017.
+ */
+#if LIBCURL_VERSION_NUM >= 0x073800
+#define GIT_CURL_HAVE_CURLSSLSET_NO_BACKENDS
+#endif
+
+#endif
diff --git a/git-cvsserver.perl b/git-cvsserver.perl
index ed035f32c2..64319bed43 100755
--- a/git-cvsserver.perl
+++ b/git-cvsserver.perl
@@ -222,10 +222,11 @@ if ($state->{method} eq 'pserver') {
open my $passwd, "<", $authdb or die $!;
while (<$passwd>) {
if (m{^\Q$user\E:(.*)}) {
- if (crypt($user, descramble($password)) eq $1) {
+ my $hash = crypt(descramble($password), $1);
+ if (defined $hash and $hash eq $1) {
$auth_ok = 1;
}
- };
+ }
}
close $passwd;
diff --git a/git-send-email.perl b/git-send-email.perl
index fd79849530..5262d88ee3 100755
--- a/git-send-email.perl
+++ b/git-send-email.perl
@@ -376,7 +376,7 @@ sub read_config {
@$target = @values;
}
else {
- my $v = $known_keys->{$key}->[0];
+ my $v = $known_keys->{$key}->[-1];
next unless defined $v;
next if $configured->{$setting}++;
$$target = $v;
diff --git a/git-sh-setup.sh b/git-sh-setup.sh
index 10d9764185..cee053cdc3 100644
--- a/git-sh-setup.sh
+++ b/git-sh-setup.sh
@@ -223,9 +223,6 @@ require_clean_work_tree () {
"rewrite branches")
gettextln "Cannot rewrite branches: You have unstaged changes." >&2
;;
- "pull with rebase")
- gettextln "Cannot pull with rebase: You have unstaged changes." >&2
- ;;
*)
eval_gettextln "Cannot \$action: You have unstaged changes." >&2
;;
@@ -242,9 +239,6 @@ require_clean_work_tree () {
rebase)
gettextln "Cannot rebase: Your index contains uncommitted changes." >&2
;;
- "pull with rebase")
- gettextln "Cannot pull with rebase: Your index contains uncommitted changes." >&2
- ;;
*)
eval_gettextln "Cannot \$action: Your index contains uncommitted changes." >&2
;;
diff --git a/git-submodule.sh b/git-submodule.sh
index dbd2ec2050..652861aa66 100755
--- a/git-submodule.sh
+++ b/git-submodule.sh
@@ -63,11 +63,6 @@ isnumber()
n=$(($1 + 0)) 2>/dev/null && test "$n" = "$1"
}
-# Given a full hex object ID, is this the zero OID?
-is_zero_oid () {
- echo "$1" | sane_egrep '^0+$' >/dev/null 2>&1
-}
-
# Sanitize the local git environment for use within a submodule. We
# can't simply use clear_local_git_env since we want to preserve some
# of the settings from GIT_CONFIG_PARAMETERS.
@@ -145,130 +140,12 @@ cmd_add()
shift
done
- if ! git submodule--helper config --check-writeable >/dev/null 2>&1
- then
- die "fatal: $(eval_gettext "please make sure that the .gitmodules file is in the working tree")"
- fi
-
- if test -n "$reference_path"
+ if test -z "$1"
then
- is_absolute_path "$reference_path" ||
- reference_path="$wt_prefix$reference_path"
-
- reference="--reference=$reference_path"
- fi
-
- repo=$1
- sm_path=$2
-
- if test -z "$sm_path"; then
- sm_path=$(printf '%s\n' "$repo" |
- sed -e 's|/$||' -e 's|:*/*\.git$||' -e 's|.*[/:]||g')
- fi
-
- if test -z "$repo" || test -z "$sm_path"; then
usage
fi
- is_absolute_path "$sm_path" || sm_path="$wt_prefix$sm_path"
-
- # assure repo is absolute or relative to parent
- case "$repo" in
- ./*|../*)
- test -z "$wt_prefix" ||
- die "fatal: $(gettext "Relative path can only be used from the toplevel of the working tree")"
-
- # dereference source url relative to parent's url
- realrepo=$(git submodule--helper resolve-relative-url "$repo") || exit
- ;;
- *:*|/*)
- # absolute url
- realrepo=$repo
- ;;
- *)
- die "fatal: $(eval_gettext "repo URL: '\$repo' must be absolute or begin with ./|../")"
- ;;
- esac
-
- # normalize path:
- # multiple //; leading ./; /./; /../; trailing /
- sm_path=$(printf '%s/\n' "$sm_path" |
- sed -e '
- s|//*|/|g
- s|^\(\./\)*||
- s|/\(\./\)*|/|g
- :start
- s|\([^/]*\)/\.\./||
- tstart
- s|/*$||
- ')
- if test -z "$force"
- then
- git ls-files --error-unmatch "$sm_path" > /dev/null 2>&1 &&
- die "fatal: $(eval_gettext "'\$sm_path' already exists in the index")"
- else
- git ls-files -s "$sm_path" | sane_grep -v "^160000" > /dev/null 2>&1 &&
- die "fatal: $(eval_gettext "'\$sm_path' already exists in the index and is not a submodule")"
- fi
-
- if test -d "$sm_path" &&
- test -z $(git -C "$sm_path" rev-parse --show-cdup 2>/dev/null)
- then
- git -C "$sm_path" rev-parse --verify -q HEAD >/dev/null ||
- die "fatal: $(eval_gettext "'\$sm_path' does not have a commit checked out")"
- fi
-
- if test -z "$force"
- then
- dryerr=$(git add --dry-run --ignore-missing --no-warn-embedded-repo "$sm_path" 2>&1 >/dev/null)
- res=$?
- if test $res -ne 0
- then
- echo >&2 "$dryerr"
- exit $res
- fi
- fi
-
- if test -n "$custom_name"
- then
- sm_name="$custom_name"
- else
- sm_name="$sm_path"
- fi
-
- if ! git submodule--helper check-name "$sm_name"
- then
- die "fatal: $(eval_gettext "'$sm_name' is not a valid submodule name")"
- fi
-
- git submodule--helper add-clone ${GIT_QUIET:+--quiet} ${force:+"--force"} ${progress:+"--progress"} ${branch:+--branch "$branch"} --prefix "$wt_prefix" --path "$sm_path" --name "$sm_name" --url "$realrepo" ${reference:+"$reference"} ${dissociate:+"--dissociate"} ${depth:+"$depth"} || exit
- git config submodule."$sm_name".url "$realrepo"
-
- git add --no-warn-embedded-repo $force "$sm_path" ||
- die "fatal: $(eval_gettext "Failed to add submodule '\$sm_path'")"
-
- git submodule--helper config submodule."$sm_name".path "$sm_path" &&
- git submodule--helper config submodule."$sm_name".url "$repo" &&
- if test -n "$branch"
- then
- git submodule--helper config submodule."$sm_name".branch "$branch"
- fi &&
- git add --force .gitmodules ||
- die "fatal: $(eval_gettext "Failed to register submodule '\$sm_path'")"
-
- # NEEDSWORK: In a multi-working-tree world, this needs to be
- # set in the per-worktree config.
- if git config --get submodule.active >/dev/null
- then
- # If the submodule being adding isn't already covered by the
- # current configured pathspec, set the submodule's active flag
- if ! git submodule--helper is-active "$sm_path"
- then
- git config submodule."$sm_name".active "true"
- fi
- else
- git config submodule."$sm_name".active "true"
- fi
+ git ${wt_prefix:+-C "$wt_prefix"} ${prefix:+--super-prefix "$prefix"} submodule--helper add ${GIT_QUIET:+--quiet} ${force:+--force} ${progress:+"--progress"} ${branch:+--branch "$branch"} ${reference_path:+--reference "$reference_path"} ${dissociate:+--dissociate} ${custom_name:+--name "$custom_name"} ${depth:+"$depth"} -- "$@"
}
#
@@ -369,13 +246,6 @@ cmd_deinit()
git ${wt_prefix:+-C "$wt_prefix"} submodule--helper deinit ${GIT_QUIET:+--quiet} ${force:+--force} ${deinit_all:+--all} -- "$@"
}
-is_tip_reachable () (
- sanitize_submodule_env &&
- cd "$1" &&
- rev=$(git rev-list -n 1 "$2" --not --all 2>/dev/null) &&
- test -z "$rev"
-)
-
# usage: fetch_in_submodule <module_path> [<depth>] [<sha1>]
# Because arguments are positional, use an empty string to omit <depth>
# but include <sha1>.
@@ -519,14 +389,13 @@ cmd_update()
git submodule--helper ensure-core-worktree "$sm_path" || exit 1
- update_module=$(git submodule--helper update-module-mode $just_cloned "$sm_path" $update)
-
displaypath=$(git submodule--helper relative-path "$prefix$sm_path" "$wt_prefix")
if test $just_cloned -eq 1
then
subsha1=
else
+ just_cloned=
subsha1=$(sanitize_submodule_env; cd "$sm_path" &&
git rev-parse --verify HEAD) ||
die "fatal: $(eval_gettext "Unable to find current revision in submodule path '\$displaypath'")"
@@ -547,70 +416,38 @@ cmd_update()
die "fatal: $(eval_gettext "Unable to find current \${remote_name}/\${branch} revision in submodule path '\$sm_path'")"
fi
- if test "$subsha1" != "$sha1" || test -n "$force"
- then
- subforce=$force
- # If we don't already have a -f flag and the submodule has never been checked out
- if test -z "$subsha1" && test -z "$force"
- then
- subforce="-f"
- fi
-
- if test -z "$nofetch"
- then
- # Run fetch only if $sha1 isn't present or it
- # is not reachable from a ref.
- is_tip_reachable "$sm_path" "$sha1" ||
- fetch_in_submodule "$sm_path" $depth ||
- say "$(eval_gettext "Unable to fetch in submodule path '\$displaypath'; trying to directly fetch \$sha1:")"
-
- # Now we tried the usual fetch, but $sha1 may
- # not be reachable from any of the refs
- is_tip_reachable "$sm_path" "$sha1" ||
- fetch_in_submodule "$sm_path" "$depth" "$sha1" ||
- die "fatal: $(eval_gettext "Fetched in submodule path '\$displaypath', but it did not contain \$sha1. Direct fetching of that commit failed.")"
- fi
-
- must_die_on_failure=
- case "$update_module" in
- checkout)
- command="git checkout $subforce -q"
- die_msg="fatal: $(eval_gettext "Unable to checkout '\$sha1' in submodule path '\$displaypath'")"
- say_msg="$(eval_gettext "Submodule path '\$displaypath': checked out '\$sha1'")"
- ;;
- rebase)
- command="git rebase ${GIT_QUIET:+--quiet}"
- die_msg="fatal: $(eval_gettext "Unable to rebase '\$sha1' in submodule path '\$displaypath'")"
- say_msg="$(eval_gettext "Submodule path '\$displaypath': rebased into '\$sha1'")"
- must_die_on_failure=yes
- ;;
- merge)
- command="git merge ${GIT_QUIET:+--quiet}"
- die_msg="fatal: $(eval_gettext "Unable to merge '\$sha1' in submodule path '\$displaypath'")"
- say_msg="$(eval_gettext "Submodule path '\$displaypath': merged in '\$sha1'")"
- must_die_on_failure=yes
- ;;
- !*)
- command="${update_module#!}"
- die_msg="fatal: $(eval_gettext "Execution of '\$command \$sha1' failed in submodule path '\$displaypath'")"
- say_msg="$(eval_gettext "Submodule path '\$displaypath': '\$command \$sha1'")"
- must_die_on_failure=yes
- ;;
- *)
- die "fatal: $(eval_gettext "Invalid update mode '$update_module' for submodule path '$path'")"
- esac
-
- if (sanitize_submodule_env; cd "$sm_path" && $command "$sha1")
- then
- say "$say_msg"
- elif test -n "$must_die_on_failure"
- then
- die_with_status 2 "$die_msg"
- else
- err="${err};$die_msg"
- continue
- fi
- fi
+ out=$(git submodule--helper run-update-procedure \
+ ${wt_prefix:+--prefix "$wt_prefix"} \
+ ${GIT_QUIET:+--quiet} \
+ ${force:+--force} \
+ ${just_cloned:+--just-cloned} \
+ ${nofetch:+--no-fetch} \
+ ${depth:+"$depth"} \
+ ${update:+--update "$update"} \
+ ${prefix:+--recursive-prefix "$prefix"} \
+ ${sha1:+--oid "$sha1"} \
+ ${subsha1:+--suboid "$subsha1"} \
+ "--" \
+ "$sm_path")
+
+ # exit codes for run-update-procedure:
+ # 0: update was successful, say command output
+ # 1: update procedure failed, but should not die
+ # 2 or 128: subcommand died during execution
+ # 3: no update procedure was run
+ res="$?"
+ case $res in
+ 0)
+ say "$out"
+ ;;
+ 1)
+ err="${err};fatal: $out"
+ continue
+ ;;
+ 2|128)
+ die_with_status $res "fatal: $out"
+ ;;
+ esac
if test -n "$recursive"
then
diff --git a/git.c b/git.c
index 18bed9a996..60c2784be4 100644
--- a/git.c
+++ b/git.c
@@ -561,7 +561,7 @@ static struct cmd_struct commands[] = {
{ "merge-tree", cmd_merge_tree, RUN_SETUP | NO_PARSEOPT },
{ "mktag", cmd_mktag, RUN_SETUP | NO_PARSEOPT },
{ "mktree", cmd_mktree, RUN_SETUP },
- { "multi-pack-index", cmd_multi_pack_index, RUN_SETUP_GENTLY },
+ { "multi-pack-index", cmd_multi_pack_index, RUN_SETUP },
{ "mv", cmd_mv, RUN_SETUP | NEED_WORK_TREE },
{ "name-rev", cmd_name_rev, RUN_SETUP },
{ "notes", cmd_notes, RUN_SETUP },
diff --git a/grep.c b/grep.c
index 424a39591b..14fe8a0fd2 100644
--- a/grep.c
+++ b/grep.c
@@ -867,7 +867,7 @@ void free_grep_patterns(struct grep_opt *opt)
free_pattern_expr(opt->pattern_expression);
}
-static char *end_of_line(char *cp, unsigned long *left)
+static const char *end_of_line(const char *cp, unsigned long *left)
{
unsigned long l = *left;
while (l && *cp != '\n') {
@@ -908,7 +908,8 @@ static void show_name(struct grep_opt *opt, const char *name)
opt->output(opt, opt->null_following_name ? "\0" : "\n", 1);
}
-static int patmatch(struct grep_pat *p, char *line, char *eol,
+static int patmatch(struct grep_pat *p,
+ const char *line, const char *eol,
regmatch_t *match, int eflags)
{
int hit;
@@ -922,20 +923,16 @@ static int patmatch(struct grep_pat *p, char *line, char *eol,
return hit;
}
-static int strip_timestamp(char *bol, char **eol_p)
+static void strip_timestamp(const char *bol, const char **eol_p)
{
- char *eol = *eol_p;
- int ch;
+ const char *eol = *eol_p;
while (bol < --eol) {
if (*eol != '>')
continue;
*eol_p = ++eol;
- ch = *eol;
- *eol = '\0';
- return ch;
+ break;
}
- return 0;
}
static struct {
@@ -947,12 +944,12 @@ static struct {
{ "reflog ", 7 },
};
-static int match_one_pattern(struct grep_pat *p, char *bol, char *eol,
+static int match_one_pattern(struct grep_pat *p,
+ const char *bol, const char *eol,
enum grep_context ctx,
regmatch_t *pmatch, int eflags)
{
int hit = 0;
- int saved_ch = 0;
const char *start = bol;
if ((p->token != GREP_PATTERN) &&
@@ -971,7 +968,7 @@ static int match_one_pattern(struct grep_pat *p, char *bol, char *eol,
switch (p->field) {
case GREP_HEADER_AUTHOR:
case GREP_HEADER_COMMITTER:
- saved_ch = strip_timestamp(bol, &eol);
+ strip_timestamp(bol, &eol);
break;
default:
break;
@@ -1021,8 +1018,6 @@ static int match_one_pattern(struct grep_pat *p, char *bol, char *eol,
goto again;
}
}
- if (p->token == GREP_PATTERN_HEAD && saved_ch)
- *eol = saved_ch;
if (hit) {
pmatch[0].rm_so += bol - start;
pmatch[0].rm_eo += bol - start;
@@ -1030,8 +1025,9 @@ static int match_one_pattern(struct grep_pat *p, char *bol, char *eol,
return hit;
}
-static int match_expr_eval(struct grep_opt *opt, struct grep_expr *x, char *bol,
- char *eol, enum grep_context ctx, ssize_t *col,
+static int match_expr_eval(struct grep_opt *opt, struct grep_expr *x,
+ const char *bol, const char *eol,
+ enum grep_context ctx, ssize_t *col,
ssize_t *icol, int collect_hits)
{
int h = 0;
@@ -1098,7 +1094,8 @@ static int match_expr_eval(struct grep_opt *opt, struct grep_expr *x, char *bol,
return h;
}
-static int match_expr(struct grep_opt *opt, char *bol, char *eol,
+static int match_expr(struct grep_opt *opt,
+ const char *bol, const char *eol,
enum grep_context ctx, ssize_t *col,
ssize_t *icol, int collect_hits)
{
@@ -1106,7 +1103,8 @@ static int match_expr(struct grep_opt *opt, char *bol, char *eol,
return match_expr_eval(opt, x, bol, eol, ctx, col, icol, collect_hits);
}
-static int match_line(struct grep_opt *opt, char *bol, char *eol,
+static int match_line(struct grep_opt *opt,
+ const char *bol, const char *eol,
ssize_t *col, ssize_t *icol,
enum grep_context ctx, int collect_hits)
{
@@ -1138,7 +1136,8 @@ static int match_line(struct grep_opt *opt, char *bol, char *eol,
return hit;
}
-static int match_next_pattern(struct grep_pat *p, char *bol, char *eol,
+static int match_next_pattern(struct grep_pat *p,
+ const char *bol, const char *eol,
enum grep_context ctx,
regmatch_t *pmatch, int eflags)
{
@@ -1159,7 +1158,8 @@ static int match_next_pattern(struct grep_pat *p, char *bol, char *eol,
return 1;
}
-static int next_match(struct grep_opt *opt, char *bol, char *eol,
+static int next_match(struct grep_opt *opt,
+ const char *bol, const char *eol,
enum grep_context ctx, regmatch_t *pmatch, int eflags)
{
struct grep_pat *p;
@@ -1215,7 +1215,8 @@ static void show_line_header(struct grep_opt *opt, const char *name,
}
}
-static void show_line(struct grep_opt *opt, char *bol, char *eol,
+static void show_line(struct grep_opt *opt,
+ const char *bol, const char *eol,
const char *name, unsigned lno, ssize_t cno, char sign)
{
int rest = eol - bol;
@@ -1246,7 +1247,6 @@ static void show_line(struct grep_opt *opt, char *bol, char *eol,
if (opt->color || opt->only_matching) {
regmatch_t match;
enum grep_context ctx = GREP_CONTEXT_BODY;
- int ch = *eol;
int eflags = 0;
if (opt->color) {
@@ -1261,7 +1261,6 @@ static void show_line(struct grep_opt *opt, char *bol, char *eol,
else if (sign == '=')
line_color = opt->colors[GREP_COLOR_FUNCTION];
}
- *eol = '\0';
while (next_match(opt, bol, eol, ctx, &match, eflags)) {
if (match.rm_so == match.rm_eo)
break;
@@ -1279,7 +1278,6 @@ static void show_line(struct grep_opt *opt, char *bol, char *eol,
rest -= match.rm_eo;
eflags = REG_NOTBOL;
}
- *eol = ch;
}
if (!opt->only_matching) {
output_color(opt, bol, rest, line_color);
@@ -1307,7 +1305,8 @@ static inline void grep_attr_unlock(void)
pthread_mutex_unlock(&grep_attr_mutex);
}
-static int match_funcname(struct grep_opt *opt, struct grep_source *gs, char *bol, char *eol)
+static int match_funcname(struct grep_opt *opt, struct grep_source *gs,
+ const char *bol, const char *eol)
{
xdemitconf_t *xecfg = opt->priv;
if (xecfg && !xecfg->find_func) {
@@ -1334,10 +1333,10 @@ static int match_funcname(struct grep_opt *opt, struct grep_source *gs, char *bo
}
static void show_funcname_line(struct grep_opt *opt, struct grep_source *gs,
- char *bol, unsigned lno)
+ const char *bol, unsigned lno)
{
while (bol > gs->buf) {
- char *eol = --bol;
+ const char *eol = --bol;
while (bol > gs->buf && bol[-1] != '\n')
bol--;
@@ -1356,7 +1355,7 @@ static void show_funcname_line(struct grep_opt *opt, struct grep_source *gs,
static int is_empty_line(const char *bol, const char *eol);
static void show_pre_context(struct grep_opt *opt, struct grep_source *gs,
- char *bol, char *end, unsigned lno)
+ const char *bol, const char *end, unsigned lno)
{
unsigned cur = lno, from = 1, funcname_lno = 0, orig_from;
int funcname_needed = !!opt->funcname, comment_needed = 0;
@@ -1376,8 +1375,8 @@ static void show_pre_context(struct grep_opt *opt, struct grep_source *gs,
/* Rewind. */
while (bol > gs->buf && cur > from) {
- char *next_bol = bol;
- char *eol = --bol;
+ const char *next_bol = bol;
+ const char *eol = --bol;
while (bol > gs->buf && bol[-1] != '\n')
bol--;
@@ -1408,7 +1407,7 @@ static void show_pre_context(struct grep_opt *opt, struct grep_source *gs,
/* Back forward. */
while (cur < lno) {
- char *eol = bol, sign = (cur == funcname_lno) ? '=' : '-';
+ const char *eol = bol, sign = (cur == funcname_lno) ? '=' : '-';
while (*eol != '\n')
eol++;
@@ -1436,12 +1435,12 @@ static int should_lookahead(struct grep_opt *opt)
static int look_ahead(struct grep_opt *opt,
unsigned long *left_p,
unsigned *lno_p,
- char **bol_p)
+ const char **bol_p)
{
unsigned lno = *lno_p;
- char *bol = *bol_p;
+ const char *bol = *bol_p;
struct grep_pat *p;
- char *sp, *last_bol;
+ const char *sp, *last_bol;
regoff_t earliest = -1;
for (p = opt->pattern_list; p; p = p->next) {
@@ -1543,8 +1542,8 @@ static int is_empty_line(const char *bol, const char *eol)
static int grep_source_1(struct grep_opt *opt, struct grep_source *gs, int collect_hits)
{
- char *bol;
- char *peek_bol = NULL;
+ const char *bol;
+ const char *peek_bol = NULL;
unsigned long left;
unsigned lno = 1;
unsigned last_hit = 0;
@@ -1626,7 +1625,7 @@ static int grep_source_1(struct grep_opt *opt, struct grep_source *gs, int colle
bol = gs->buf;
left = gs->size;
while (left) {
- char *eol, ch;
+ const char *eol;
int hit;
ssize_t cno;
ssize_t col = -1, icol = -1;
@@ -1647,14 +1646,11 @@ static int grep_source_1(struct grep_opt *opt, struct grep_source *gs, int colle
&& look_ahead(opt, &left, &lno, &bol))
break;
eol = end_of_line(bol, &left);
- ch = *eol;
- *eol = 0;
if ((ctx == GREP_CONTEXT_HEAD) && (eol == bol))
ctx = GREP_CONTEXT_BODY;
hit = match_line(opt, bol, eol, &col, &icol, ctx, collect_hits);
- *eol = ch;
if (collect_hits)
goto next_line;
@@ -1713,7 +1709,7 @@ static int grep_source_1(struct grep_opt *opt, struct grep_source *gs, int colle
}
if (show_function && (!peek_bol || peek_bol < bol)) {
unsigned long peek_left = left;
- char *peek_eol = eol;
+ const char *peek_eol = eol;
/*
* Trailing empty lines are not interesting.
@@ -1825,14 +1821,25 @@ int grep_source(struct grep_opt *opt, struct grep_source *gs)
return grep_source_1(opt, gs, 0);
}
-int grep_buffer(struct grep_opt *opt, char *buf, unsigned long size)
+static void grep_source_init_buf(struct grep_source *gs,
+ const char *buf,
+ unsigned long size)
+{
+ gs->type = GREP_SOURCE_BUF;
+ gs->name = NULL;
+ gs->path = NULL;
+ gs->buf = buf;
+ gs->size = size;
+ gs->driver = NULL;
+ gs->identifier = NULL;
+}
+
+int grep_buffer(struct grep_opt *opt, const char *buf, unsigned long size)
{
struct grep_source gs;
int r;
- grep_source_init(&gs, GREP_SOURCE_BUF, NULL, NULL, NULL);
- gs.buf = buf;
- gs.size = size;
+ grep_source_init_buf(&gs, buf, size);
r = grep_source(opt, &gs);
@@ -1840,28 +1847,30 @@ int grep_buffer(struct grep_opt *opt, char *buf, unsigned long size)
return r;
}
-void grep_source_init(struct grep_source *gs, enum grep_source_type type,
- const char *name, const char *path,
- const void *identifier)
+void grep_source_init_file(struct grep_source *gs, const char *name,
+ const char *path)
{
- gs->type = type;
+ gs->type = GREP_SOURCE_FILE;
gs->name = xstrdup_or_null(name);
gs->path = xstrdup_or_null(path);
gs->buf = NULL;
gs->size = 0;
gs->driver = NULL;
+ gs->identifier = xstrdup(path);
+}
- switch (type) {
- case GREP_SOURCE_FILE:
- gs->identifier = xstrdup(identifier);
- break;
- case GREP_SOURCE_OID:
- gs->identifier = oiddup(identifier);
- break;
- case GREP_SOURCE_BUF:
- gs->identifier = NULL;
- break;
- }
+void grep_source_init_oid(struct grep_source *gs, const char *name,
+ const char *path, const struct object_id *oid,
+ struct repository *repo)
+{
+ gs->type = GREP_SOURCE_OID;
+ gs->name = xstrdup_or_null(name);
+ gs->path = xstrdup_or_null(path);
+ gs->buf = NULL;
+ gs->size = 0;
+ gs->driver = NULL;
+ gs->identifier = oiddup(oid);
+ gs->repo = repo;
}
void grep_source_clear(struct grep_source *gs)
@@ -1877,7 +1886,9 @@ void grep_source_clear_data(struct grep_source *gs)
switch (gs->type) {
case GREP_SOURCE_FILE:
case GREP_SOURCE_OID:
- FREE_AND_NULL(gs->buf);
+ /* these types own the buffer */
+ free((char *)gs->buf);
+ gs->buf = NULL;
gs->size = 0;
break;
case GREP_SOURCE_BUF:
@@ -1890,7 +1901,8 @@ static int grep_source_load_oid(struct grep_source *gs)
{
enum object_type type;
- gs->buf = read_object_file(gs->identifier, &type, &gs->size);
+ gs->buf = repo_read_object_file(gs->repo, gs->identifier, &type,
+ &gs->size);
if (!gs->buf)
return error(_("'%s': unable to read %s"),
gs->name,
diff --git a/grep.h b/grep.h
index 72f82b1e30..3c75ed1fd8 100644
--- a/grep.h
+++ b/grep.h
@@ -120,7 +120,20 @@ struct grep_opt {
struct grep_pat *header_list;
struct grep_pat **header_tail;
struct grep_expr *pattern_expression;
+
+ /*
+ * NEEDSWORK: See if we can remove this field, because the repository
+ * should probably be per-source. That is, grep.c functions using this
+ * field should probably start using "repo" in "struct grep_source"
+ * instead.
+ *
+ * This is potentially the cause of at least one bug - "git grep"
+ * using the textconv attributes from the superproject on the
+ * submodules. See the failing "git grep --textconv" tests in
+ * t7814-grep-recurse-submodules.sh for more information.
+ */
struct repository *repo;
+
const char *prefix;
int prefix_length;
regex_t regexp;
@@ -176,7 +189,7 @@ void append_grep_pattern(struct grep_opt *opt, const char *pat, const char *orig
void append_header_grep_pattern(struct grep_opt *, enum grep_header_field, const char *);
void compile_grep_patterns(struct grep_opt *opt);
void free_grep_patterns(struct grep_opt *opt);
-int grep_buffer(struct grep_opt *opt, char *buf, unsigned long size);
+int grep_buffer(struct grep_opt *opt, const char *buf, unsigned long size);
struct grep_source {
char *name;
@@ -187,17 +200,20 @@ struct grep_source {
GREP_SOURCE_BUF,
} type;
void *identifier;
+ struct repository *repo; /* if GREP_SOURCE_OID */
- char *buf;
+ const char *buf;
unsigned long size;
char *path; /* for attribute lookups */
struct userdiff_driver *driver;
};
-void grep_source_init(struct grep_source *gs, enum grep_source_type type,
- const char *name, const char *path,
- const void *identifier);
+void grep_source_init_file(struct grep_source *gs, const char *name,
+ const char *path);
+void grep_source_init_oid(struct grep_source *gs, const char *name,
+ const char *path, const struct object_id *oid,
+ struct repository *repo);
void grep_source_clear_data(struct grep_source *gs);
void grep_source_clear(struct grep_source *gs);
void grep_source_load_driver(struct grep_source *gs,
@@ -207,7 +223,6 @@ void grep_source_load_driver(struct grep_source *gs,
int grep_source(struct grep_opt *opt, struct grep_source *gs);
struct grep_opt *grep_opt_dup(const struct grep_opt *opt);
-int grep_threads_ok(const struct grep_opt *opt);
/*
* Mutex used around access to the attributes machinery if
diff --git a/hook.c b/hook.c
new file mode 100644
index 0000000000..55e1145a4b
--- /dev/null
+++ b/hook.c
@@ -0,0 +1,42 @@
+#include "cache.h"
+#include "hook.h"
+#include "run-command.h"
+
+const char *find_hook(const char *name)
+{
+ static struct strbuf path = STRBUF_INIT;
+
+ strbuf_reset(&path);
+ strbuf_git_path(&path, "hooks/%s", name);
+ if (access(path.buf, X_OK) < 0) {
+ int err = errno;
+
+#ifdef STRIP_EXTENSION
+ strbuf_addstr(&path, STRIP_EXTENSION);
+ if (access(path.buf, X_OK) >= 0)
+ return path.buf;
+ if (errno == EACCES)
+ err = errno;
+#endif
+
+ if (err == EACCES && advice_enabled(ADVICE_IGNORED_HOOK)) {
+ static struct string_list advise_given = STRING_LIST_INIT_DUP;
+
+ if (!string_list_lookup(&advise_given, name)) {
+ string_list_insert(&advise_given, name);
+ advise(_("The '%s' hook was ignored because "
+ "it's not set as executable.\n"
+ "You can disable this warning with "
+ "`git config advice.ignoredHook false`."),
+ path.buf);
+ }
+ }
+ return NULL;
+ }
+ return path.buf;
+}
+
+int hook_exists(const char *name)
+{
+ return !!find_hook(name);
+}
diff --git a/hook.h b/hook.h
new file mode 100644
index 0000000000..6aa36fc7ff
--- /dev/null
+++ b/hook.h
@@ -0,0 +1,16 @@
+#ifndef HOOK_H
+#define HOOK_H
+
+/*
+ * Returns the path to the hook file, or NULL if the hook is missing
+ * or disabled. Note that this points to static storage that will be
+ * overwritten by further calls to find_hook and run_hook_*.
+ */
+const char *find_hook(const char *name);
+
+/**
+ * A boolean version of find_hook()
+ */
+int hook_exists(const char *hookname);
+
+#endif
diff --git a/http-backend.c b/http-backend.c
index b329bf63f0..e7c0eeab23 100644
--- a/http-backend.c
+++ b/http-backend.c
@@ -534,7 +534,7 @@ static void get_info_refs(struct strbuf *hdr, char *arg)
if (service_name) {
const char *argv[] = {NULL /* service name */,
- "--stateless-rpc", "--advertise-refs",
+ "--http-backend-info-refs",
".", NULL};
struct rpc_service *svc = select_service(hdr, service_name);
@@ -739,6 +739,7 @@ static int bad_request(struct strbuf *hdr, const struct service_cmd *c)
int cmd_main(int argc, const char **argv)
{
char *method = getenv("REQUEST_METHOD");
+ const char *proto_header;
char *dir;
struct service_cmd *cmd = NULL;
char *cmd_arg = NULL;
@@ -789,6 +790,9 @@ int cmd_main(int argc, const char **argv)
http_config();
max_request_buffer = git_env_ulong("GIT_HTTP_MAX_REQUEST_BUFFER",
max_request_buffer);
+ proto_header = getenv("HTTP_GIT_PROTOCOL");
+ if (proto_header)
+ setenv(GIT_PROTOCOL_ENVIRONMENT, proto_header, 0);
cmd->imp(&hdr, cmd_arg);
return 0;
diff --git a/http.c b/http.c
index a0f169d2fe..53cf7ad570 100644
--- a/http.c
+++ b/http.c
@@ -1,4 +1,5 @@
#include "git-compat-util.h"
+#include "git-curl-compat.h"
#include "http.h"
#include "config.h"
#include "pack.h"
@@ -47,19 +48,19 @@ static struct {
{ "sslv2", CURL_SSLVERSION_SSLv2 },
{ "sslv3", CURL_SSLVERSION_SSLv3 },
{ "tlsv1", CURL_SSLVERSION_TLSv1 },
-#if LIBCURL_VERSION_NUM >= 0x072200
+#ifdef GIT_CURL_HAVE_CURL_SSLVERSION_TLSv1_0
{ "tlsv1.0", CURL_SSLVERSION_TLSv1_0 },
{ "tlsv1.1", CURL_SSLVERSION_TLSv1_1 },
{ "tlsv1.2", CURL_SSLVERSION_TLSv1_2 },
#endif
-#if LIBCURL_VERSION_NUM >= 0x073400
+#ifdef GIT_CURL_HAVE_CURL_SSLVERSION_TLSv1_3
{ "tlsv1.3", CURL_SSLVERSION_TLSv1_3 },
#endif
};
static const char *ssl_key;
static const char *ssl_capath;
static const char *curl_no_proxy;
-#if LIBCURL_VERSION_NUM >= 0x072c00
+#ifdef GIT_CURL_HAVE_CURLOPT_PINNEDPUBLICKEY
static const char *ssl_pinnedkey;
#endif
static const char *ssl_cainfo;
@@ -373,10 +374,10 @@ static int http_options(const char *var, const char *value, void *cb)
}
if (!strcmp("http.pinnedpubkey", var)) {
-#if LIBCURL_VERSION_NUM >= 0x072c00
+#ifdef GIT_CURL_HAVE_CURLOPT_PINNEDPUBLICKEY
return git_config_pathname(&ssl_pinnedkey, var, value);
#else
- warning(_("Public key pinning not supported with cURL < 7.44.0"));
+ warning(_("Public key pinning not supported with cURL < 7.39.0"));
return 0;
#endif
}
@@ -500,7 +501,7 @@ static int has_cert_password(void)
return 1;
}
-#if LIBCURL_VERSION_NUM >= 0x073400
+#ifdef GIT_CURL_HAVE_CURLOPT_PROXY_KEYPASSWD
static int has_proxy_cert_password(void)
{
if (http_proxy_ssl_cert == NULL || proxy_ssl_cert_password_required != 1)
@@ -516,7 +517,7 @@ static int has_proxy_cert_password(void)
}
#endif
-#if LIBCURL_VERSION_NUM >= 0x071900
+#ifdef GITCURL_HAVE_CURLOPT_TCP_KEEPALIVE
static void set_curl_keepalive(CURL *c)
{
curl_easy_setopt(c, CURLOPT_TCP_KEEPALIVE, 1);
@@ -536,7 +537,7 @@ static int sockopt_callback(void *client, curl_socket_t fd, curlsocktype type)
if (rc < 0)
warning_errno("unable to set SO_KEEPALIVE on socket");
- return 0; /* CURL_SOCKOPT_OK only exists since curl 7.21.5 */
+ return CURL_SOCKOPT_OK;
}
static void set_curl_keepalive(CURL *c)
@@ -550,8 +551,8 @@ static void redact_sensitive_header(struct strbuf *header)
const char *sensitive_header;
if (trace_curl_redact &&
- (skip_prefix(header->buf, "Authorization:", &sensitive_header) ||
- skip_prefix(header->buf, "Proxy-Authorization:", &sensitive_header))) {
+ (skip_iprefix(header->buf, "Authorization:", &sensitive_header) ||
+ skip_iprefix(header->buf, "Proxy-Authorization:", &sensitive_header))) {
/* The first token is the type, which is OK to log */
while (isspace(*sensitive_header))
sensitive_header++;
@@ -561,7 +562,7 @@ static void redact_sensitive_header(struct strbuf *header)
strbuf_setlen(header, sensitive_header - header->buf);
strbuf_addstr(header, " <redacted>");
} else if (trace_curl_redact &&
- skip_prefix(header->buf, "Cookie:", &sensitive_header)) {
+ skip_iprefix(header->buf, "Cookie:", &sensitive_header)) {
struct strbuf redacted_header = STRBUF_INIT;
const char *cookie;
@@ -732,7 +733,7 @@ static long get_curl_allowed_protocols(int from_user)
return allowed_protocols;
}
-#if LIBCURL_VERSION_NUM >=0x072f00
+#ifdef GIT_CURL_HAVE_CURL_HTTP_VERSION_2
static int get_curl_http_version_opt(const char *version_string, long *opt)
{
int i;
@@ -774,7 +775,7 @@ static CURL *get_curl_handle(void)
curl_easy_setopt(result, CURLOPT_SSL_VERIFYHOST, 2);
}
-#if LIBCURL_VERSION_NUM >= 0x072f00 // 7.47.0
+#ifdef GIT_CURL_HAVE_CURL_HTTP_VERSION_2
if (curl_http_version) {
long opt;
if (!get_curl_http_version_opt(curl_http_version, &opt)) {
@@ -805,7 +806,7 @@ static CURL *get_curl_handle(void)
if (http_ssl_backend && !strcmp("schannel", http_ssl_backend) &&
!http_schannel_check_revoke) {
-#if LIBCURL_VERSION_NUM >= 0x072c00
+#ifdef GIT_CURL_HAVE_CURLSSLOPT_NO_REVOKE
curl_easy_setopt(result, CURLOPT_SSL_OPTIONS, CURLSSLOPT_NO_REVOKE);
#else
warning(_("CURLSSLOPT_NO_REVOKE not supported with cURL < 7.44.0"));
@@ -845,20 +846,20 @@ static CURL *get_curl_handle(void)
curl_easy_setopt(result, CURLOPT_SSLKEY, ssl_key);
if (ssl_capath != NULL)
curl_easy_setopt(result, CURLOPT_CAPATH, ssl_capath);
-#if LIBCURL_VERSION_NUM >= 0x072c00
+#ifdef GIT_CURL_HAVE_CURLOPT_PINNEDPUBLICKEY
if (ssl_pinnedkey != NULL)
curl_easy_setopt(result, CURLOPT_PINNEDPUBLICKEY, ssl_pinnedkey);
#endif
if (http_ssl_backend && !strcmp("schannel", http_ssl_backend) &&
!http_schannel_use_ssl_cainfo) {
curl_easy_setopt(result, CURLOPT_CAINFO, NULL);
-#if LIBCURL_VERSION_NUM >= 0x073400
+#ifdef GIT_CURL_HAVE_CURLOPT_PROXY_CAINFO
curl_easy_setopt(result, CURLOPT_PROXY_CAINFO, NULL);
#endif
} else if (ssl_cainfo != NULL || http_proxy_ssl_ca_info != NULL) {
if (ssl_cainfo != NULL)
curl_easy_setopt(result, CURLOPT_CAINFO, ssl_cainfo);
-#if LIBCURL_VERSION_NUM >= 0x073400
+#ifdef GIT_CURL_HAVE_CURLOPT_PROXY_CAINFO
if (http_proxy_ssl_ca_info != NULL)
curl_easy_setopt(result, CURLOPT_PROXY_CAINFO, http_proxy_ssl_ca_info);
#endif
@@ -927,7 +928,6 @@ static CURL *get_curl_handle(void)
*/
curl_easy_setopt(result, CURLOPT_PROXY, "");
} else if (curl_http_proxy) {
-#if LIBCURL_VERSION_NUM >= 0x071800
if (starts_with(curl_http_proxy, "socks5h"))
curl_easy_setopt(result,
CURLOPT_PROXYTYPE, CURLPROXY_SOCKS5_HOSTNAME);
@@ -940,8 +940,7 @@ static CURL *get_curl_handle(void)
else if (starts_with(curl_http_proxy, "socks"))
curl_easy_setopt(result,
CURLOPT_PROXYTYPE, CURLPROXY_SOCKS4);
-#endif
-#if LIBCURL_VERSION_NUM >= 0x073400
+#ifdef GIT_CURL_HAVE_CURLOPT_PROXY_KEYPASSWD
else if (starts_with(curl_http_proxy, "https")) {
curl_easy_setopt(result, CURLOPT_PROXYTYPE, CURLPROXY_HTTPS);
@@ -1006,7 +1005,7 @@ void http_init(struct remote *remote, const char *url, int proactive_auth)
free(normalized_url);
string_list_clear(&config.vars, 1);
-#if LIBCURL_VERSION_NUM >= 0x073800
+#ifdef GIT_CURL_HAVE_CURLSSLSET_NO_BACKENDS
if (http_ssl_backend) {
const curl_ssl_backend **backends;
struct strbuf buf = STRBUF_INIT;
@@ -1490,6 +1489,10 @@ static int handle_curl_result(struct slot_results *results)
*/
credential_reject(&cert_auth);
return HTTP_NOAUTH;
+#ifdef GIT_CURL_HAVE_CURLE_SSL_PINNEDPUBKEYNOTMATCH
+ } else if (results->curl_result == CURLE_SSL_PINNEDPUBKEYNOTMATCH) {
+ return HTTP_NOMATCHPUBLICKEY;
+#endif
} else if (missing_target(results))
return HTTP_MISSING_TARGET;
else if (results->http_code == 401) {
diff --git a/http.h b/http.h
index 3db5a0cf32..df1590e53a 100644
--- a/http.h
+++ b/http.h
@@ -154,6 +154,7 @@ struct http_get_options {
#define HTTP_START_FAILED 3
#define HTTP_REAUTH 4
#define HTTP_NOAUTH 5
+#define HTTP_NOMATCHPUBLICKEY 6
/*
* Requests a URL and stores the result in a strbuf.
diff --git a/imap-send.c b/imap-send.c
index 49a5f8aa59..e6090a0346 100644
--- a/imap-send.c
+++ b/imap-send.c
@@ -1441,7 +1441,7 @@ static CURL *setup_curl(struct imap_server_conf *srvc, struct credential *cred)
curl_easy_setopt(curl, CURLOPT_PORT, server.port);
if (server.auth_method) {
-#if LIBCURL_VERSION_NUM < 0x072200
+#ifndef GIT_CURL_HAVE_CURLOPT_LOGIN_OPTIONS
warning("No LOGIN_OPTIONS support in this cURL version");
#else
struct strbuf auth = STRBUF_INIT;
diff --git a/list.h b/list.h
index eb601192f4..362a4cd7f5 100644
--- a/list.h
+++ b/list.h
@@ -46,7 +46,10 @@ struct list_head {
#define INIT_LIST_HEAD(ptr) \
(ptr)->next = (ptr)->prev = (ptr)
-#define LIST_HEAD_INIT(name) { &(name), &(name) }
+#define LIST_HEAD_INIT(name) { \
+ .next = &(name), \
+ .prev = &(name), \
+}
/* Add new element at the head of the list. */
static inline void list_add(struct list_head *newp, struct list_head *head)
diff --git a/lockfile.h b/lockfile.h
index db93e6ba73..90af4e66b2 100644
--- a/lockfile.h
+++ b/lockfile.h
@@ -121,7 +121,7 @@ struct lock_file {
struct tempfile *tempfile;
};
-#define LOCK_INIT { NULL }
+#define LOCK_INIT { 0 }
/* String appended to a filename to derive the lockfile name: */
#define LOCK_SUFFIX ".lock"
diff --git a/log-tree.h b/log-tree.h
index 1e8c91dbf2..e7e4641cf8 100644
--- a/log-tree.h
+++ b/log-tree.h
@@ -14,10 +14,8 @@ struct decoration_filter {
};
int parse_decorate_color_config(const char *var, const char *slot_name, const char *value);
-void init_log_tree_opt(struct rev_info *);
int log_tree_diff_flush(struct rev_info *);
int log_tree_commit(struct rev_info *, struct commit *);
-int log_tree_opt_parse(struct rev_info *, const char **, int);
void show_log(struct rev_info *opt);
void format_decorations_extended(struct strbuf *sb, const struct commit *commit,
int use_color,
diff --git a/ls-refs.c b/ls-refs.c
index 84021416ca..54078323dc 100644
--- a/ls-refs.c
+++ b/ls-refs.c
@@ -41,6 +41,12 @@ static void ensure_config_read(void)
}
/*
+ * If we see this many or more "ref-prefix" lines from the client, we consider
+ * it "too many" and will avoid using the prefix feature entirely.
+ */
+#define TOO_MANY_PREFIXES 65536
+
+/*
* Check if one of the prefixes is a prefix of the ref.
* If no prefixes were provided, all refs match.
*/
@@ -107,7 +113,7 @@ static int send_ref(const char *refname, const struct object_id *oid,
}
strbuf_addch(&data->buf, '\n');
- packet_write(1, data->buf.buf, data->buf.len);
+ packet_fwrite(stdout, data->buf.buf, data->buf.len);
return 0;
}
@@ -139,8 +145,7 @@ static int ls_refs_config(const char *var, const char *value, void *data)
return parse_hide_refs_config(var, value, "uploadpack");
}
-int ls_refs(struct repository *r, struct strvec *keys,
- struct packet_reader *request)
+int ls_refs(struct repository *r, struct packet_reader *request)
{
struct ls_refs_data data;
@@ -159,21 +164,33 @@ int ls_refs(struct repository *r, struct strvec *keys,
data.peel = 1;
else if (!strcmp("symrefs", arg))
data.symrefs = 1;
- else if (skip_prefix(arg, "ref-prefix ", &out))
- strvec_push(&data.prefixes, out);
+ else if (skip_prefix(arg, "ref-prefix ", &out)) {
+ if (data.prefixes.nr < TOO_MANY_PREFIXES)
+ strvec_push(&data.prefixes, out);
+ }
else if (!strcmp("unborn", arg))
data.unborn = allow_unborn;
+ else
+ die(_("unexpected line: '%s'"), arg);
}
if (request->status != PACKET_READ_FLUSH)
die(_("expected flush after ls-refs arguments"));
+ /*
+ * If we saw too many prefixes, we must avoid using them at all; as
+ * soon as we have any prefix, they are meant to form a comprehensive
+ * list.
+ */
+ if (data.prefixes.nr >= TOO_MANY_PREFIXES)
+ strvec_clear(&data.prefixes);
+
send_possibly_unborn_head(&data);
if (!data.prefixes.nr)
strvec_push(&data.prefixes, "");
for_each_fullref_in_prefixes(get_git_namespace(), data.prefixes.v,
- send_ref, &data, 0);
- packet_flush(1);
+ send_ref, &data);
+ packet_fflush(stdout);
strvec_clear(&data.prefixes);
strbuf_release(&data.buf);
return 0;
diff --git a/ls-refs.h b/ls-refs.h
index a99e4be0bd..e2243a547b 100644
--- a/ls-refs.h
+++ b/ls-refs.h
@@ -2,10 +2,8 @@
#define LS_REFS_H
struct repository;
-struct strvec;
struct packet_reader;
-int ls_refs(struct repository *r, struct strvec *keys,
- struct packet_reader *request);
+int ls_refs(struct repository *r, struct packet_reader *request);
int ls_refs_advertise(struct repository *r, struct strbuf *value);
#endif /* LS_REFS_H */
diff --git a/merge-ort.c b/merge-ort.c
index 515dc39b7f..e5456f4722 100644
--- a/merge-ort.c
+++ b/merge-ort.c
@@ -32,6 +32,7 @@
#include "promisor-remote.h"
#include "revision.h"
#include "strmap.h"
+#include "submodule-config.h"
#include "submodule.h"
#include "tree.h"
#include "unpack-trees.h"
@@ -1511,7 +1512,6 @@ static int find_first_merges(struct repository *repo,
xsnprintf(merged_revision, sizeof(merged_revision), "^%s",
oid_to_hex(&a->object.oid));
repo_init_revisions(repo, &revs, NULL);
- rev_opts.submodule = path;
/* FIXME: can't handle linked worktrees in submodules yet */
revs.single_worktree = path != NULL;
setup_revisions(ARRAY_SIZE(rev_args)-1, rev_args, &revs, &rev_opts);
@@ -1521,7 +1521,7 @@ static int find_first_merges(struct repository *repo,
die("revision walk setup failed");
while ((commit = get_revision(&revs)) != NULL) {
struct object *o = &(commit->object);
- if (in_merge_bases(b, commit))
+ if (repo_in_merge_bases(repo, b, commit))
add_object_array(o, NULL, &merges);
}
reset_revision_walk();
@@ -1536,7 +1536,7 @@ static int find_first_merges(struct repository *repo,
contains_another = 0;
for (j = 0; j < merges.nr; j++) {
struct commit *m2 = (struct commit *) merges.objects[j].item;
- if (i != j && in_merge_bases(m2, m1)) {
+ if (i != j && repo_in_merge_bases(repo, m2, m1)) {
contains_another = 1;
break;
}
@@ -1557,10 +1557,12 @@ static int merge_submodule(struct merge_options *opt,
const struct object_id *b,
struct object_id *result)
{
+ struct repository subrepo;
+ struct strbuf sb = STRBUF_INIT;
+ int ret = 0;
struct commit *commit_o, *commit_a, *commit_b;
int parent_count;
struct object_array merges;
- struct strbuf sb = STRBUF_INIT;
int i;
int search = !opt->priv->call_depth;
@@ -1576,6 +1578,10 @@ static int merge_submodule(struct merge_options *opt,
if (is_null_oid(b))
return 0;
+ /*
+ * NEEDSWORK: Remove this when all submodule object accesses are
+ * through explicitly specified repositores.
+ */
if (add_submodule_odb(path)) {
path_msg(opt, path, 0,
_("Failed to merge submodule %s (not checked out)"),
@@ -1583,39 +1589,48 @@ static int merge_submodule(struct merge_options *opt,
return 0;
}
- if (!(commit_o = lookup_commit_reference(opt->repo, o)) ||
- !(commit_a = lookup_commit_reference(opt->repo, a)) ||
- !(commit_b = lookup_commit_reference(opt->repo, b))) {
+ if (repo_submodule_init(&subrepo, opt->repo, path, null_oid())) {
+ path_msg(opt, path, 0,
+ _("Failed to merge submodule %s (not checked out)"),
+ path);
+ return 0;
+ }
+
+ if (!(commit_o = lookup_commit_reference(&subrepo, o)) ||
+ !(commit_a = lookup_commit_reference(&subrepo, a)) ||
+ !(commit_b = lookup_commit_reference(&subrepo, b))) {
path_msg(opt, path, 0,
_("Failed to merge submodule %s (commits not present)"),
path);
- return 0;
+ goto cleanup;
}
/* check whether both changes are forward */
- if (!in_merge_bases(commit_o, commit_a) ||
- !in_merge_bases(commit_o, commit_b)) {
+ if (!repo_in_merge_bases(&subrepo, commit_o, commit_a) ||
+ !repo_in_merge_bases(&subrepo, commit_o, commit_b)) {
path_msg(opt, path, 0,
_("Failed to merge submodule %s "
"(commits don't follow merge-base)"),
path);
- return 0;
+ goto cleanup;
}
/* Case #1: a is contained in b or vice versa */
- if (in_merge_bases(commit_a, commit_b)) {
+ if (repo_in_merge_bases(&subrepo, commit_a, commit_b)) {
oidcpy(result, b);
path_msg(opt, path, 1,
_("Note: Fast-forwarding submodule %s to %s"),
path, oid_to_hex(b));
- return 1;
+ ret = 1;
+ goto cleanup;
}
- if (in_merge_bases(commit_b, commit_a)) {
+ if (repo_in_merge_bases(&subrepo, commit_b, commit_a)) {
oidcpy(result, a);
path_msg(opt, path, 1,
_("Note: Fast-forwarding submodule %s to %s"),
path, oid_to_hex(a));
- return 1;
+ ret = 1;
+ goto cleanup;
}
/*
@@ -1627,10 +1642,10 @@ static int merge_submodule(struct merge_options *opt,
/* Skip the search if makes no sense to the calling context. */
if (!search)
- return 0;
+ goto cleanup;
/* find commit which merges them */
- parent_count = find_first_merges(opt->repo, path, commit_a, commit_b,
+ parent_count = find_first_merges(&subrepo, path, commit_a, commit_b,
&merges);
switch (parent_count) {
case 0:
@@ -1664,7 +1679,9 @@ static int merge_submodule(struct merge_options *opt,
}
object_array_clear(&merges);
- return 0;
+cleanup:
+ repo_clear(&subrepo);
+ return ret;
}
static void initialize_attr_index(struct merge_options *opt)
@@ -4045,11 +4062,7 @@ static int checkout(struct merge_options *opt,
unpack_opts.quiet = 0; /* FIXME: sequencer might want quiet? */
unpack_opts.verbose_update = (opt->verbosity > 2);
unpack_opts.fn = twoway_merge;
- if (1/* FIXME: opts->overwrite_ignore*/) {
- CALLOC_ARRAY(unpack_opts.dir, 1);
- unpack_opts.dir->flags |= DIR_SHOW_IGNORED;
- setup_standard_excludes(unpack_opts.dir);
- }
+ unpack_opts.preserve_ignored = 0; /* FIXME: !opts->overwrite_ignore */
parse_tree(prev);
init_tree_desc(&trees[0], prev->buffer, prev->size);
parse_tree(next);
@@ -4057,8 +4070,6 @@ static int checkout(struct merge_options *opt,
ret = unpack_trees(2, trees, &unpack_opts);
clear_unpack_trees_porcelain(&unpack_opts);
- dir_clear(unpack_opts.dir);
- FREE_AND_NULL(unpack_opts.dir);
return ret;
}
@@ -4074,6 +4085,21 @@ static int record_conflicted_index_entries(struct merge_options *opt)
if (strmap_empty(&opt->priv->conflicted))
return 0;
+ /*
+ * We are in a conflicted state. These conflicts might be inside
+ * sparse-directory entries, so check if any entries are outside
+ * of the sparse-checkout cone preemptively.
+ *
+ * We set original_cache_nr below, but that might change if
+ * index_name_pos() calls ask for paths within sparse directories.
+ */
+ strmap_for_each_entry(&opt->priv->conflicted, &iter, e) {
+ if (!path_in_sparse_checkout(e->key, index)) {
+ ensure_full_index(index);
+ break;
+ }
+ }
+
/* If any entries have skip_worktree set, we'll have to check 'em out */
state.force = 1;
state.quiet = 1;
@@ -4081,7 +4107,7 @@ static int record_conflicted_index_entries(struct merge_options *opt)
state.istate = index;
original_cache_nr = index->cache_nr;
- /* Put every entry from paths into plist, then sort */
+ /* Append every entry from conflicted into index, then sort */
strmap_for_each_entry(&opt->priv->conflicted, &iter, e) {
const char *path = e->key;
struct conflict_info *ci = e->value;
diff --git a/merge-recursive.c b/merge-recursive.c
index 840599fd53..c553751889 100644
--- a/merge-recursive.c
+++ b/merge-recursive.c
@@ -24,6 +24,7 @@
#include "repository.h"
#include "revision.h"
#include "string-list.h"
+#include "submodule-config.h"
#include "submodule.h"
#include "tag.h"
#include "tree-walk.h"
@@ -408,8 +409,11 @@ static int unpack_trees_start(struct merge_options *opt,
memset(&opt->priv->unpack_opts, 0, sizeof(opt->priv->unpack_opts));
if (opt->priv->call_depth)
opt->priv->unpack_opts.index_only = 1;
- else
+ else {
opt->priv->unpack_opts.update = 1;
+ /* FIXME: should only do this if !overwrite_ignore */
+ opt->priv->unpack_opts.preserve_ignored = 0;
+ }
opt->priv->unpack_opts.merge = 1;
opt->priv->unpack_opts.head_idx = 2;
opt->priv->unpack_opts.fn = threeway_merge;
@@ -1110,7 +1114,6 @@ static int find_first_merges(struct repository *repo,
xsnprintf(merged_revision, sizeof(merged_revision), "^%s",
oid_to_hex(&a->object.oid));
repo_init_revisions(repo, &revs, NULL);
- rev_opts.submodule = path;
/* FIXME: can't handle linked worktrees in submodules yet */
revs.single_worktree = path != NULL;
setup_revisions(ARRAY_SIZE(rev_args)-1, rev_args, &revs, &rev_opts);
@@ -1120,7 +1123,7 @@ static int find_first_merges(struct repository *repo,
die("revision walk setup failed");
while ((commit = get_revision(&revs)) != NULL) {
struct object *o = &(commit->object);
- if (in_merge_bases(b, commit))
+ if (repo_in_merge_bases(repo, b, commit))
add_object_array(o, NULL, &merges);
}
reset_revision_walk();
@@ -1135,7 +1138,7 @@ static int find_first_merges(struct repository *repo,
contains_another = 0;
for (j = 0; j < merges.nr; j++) {
struct commit *m2 = (struct commit *) merges.objects[j].item;
- if (i != j && in_merge_bases(m2, m1)) {
+ if (i != j && repo_in_merge_bases(repo, m2, m1)) {
contains_another = 1;
break;
}
@@ -1171,6 +1174,8 @@ static int merge_submodule(struct merge_options *opt,
const struct object_id *base, const struct object_id *a,
const struct object_id *b)
{
+ struct repository subrepo;
+ int ret = 0;
struct commit *commit_base, *commit_a, *commit_b;
int parent_count;
struct object_array merges;
@@ -1194,27 +1199,36 @@ static int merge_submodule(struct merge_options *opt,
if (is_null_oid(b))
return 0;
+ /*
+ * NEEDSWORK: Remove this when all submodule object accesses are
+ * through explicitly specified repositores.
+ */
if (add_submodule_odb(path)) {
output(opt, 1, _("Failed to merge submodule %s (not checked out)"), path);
return 0;
}
- if (!(commit_base = lookup_commit_reference(opt->repo, base)) ||
- !(commit_a = lookup_commit_reference(opt->repo, a)) ||
- !(commit_b = lookup_commit_reference(opt->repo, b))) {
- output(opt, 1, _("Failed to merge submodule %s (commits not present)"), path);
+ if (repo_submodule_init(&subrepo, opt->repo, path, null_oid())) {
+ output(opt, 1, _("Failed to merge submodule %s (not checked out)"), path);
return 0;
}
+ if (!(commit_base = lookup_commit_reference(&subrepo, base)) ||
+ !(commit_a = lookup_commit_reference(&subrepo, a)) ||
+ !(commit_b = lookup_commit_reference(&subrepo, b))) {
+ output(opt, 1, _("Failed to merge submodule %s (commits not present)"), path);
+ goto cleanup;
+ }
+
/* check whether both changes are forward */
- if (!in_merge_bases(commit_base, commit_a) ||
- !in_merge_bases(commit_base, commit_b)) {
+ if (!repo_in_merge_bases(&subrepo, commit_base, commit_a) ||
+ !repo_in_merge_bases(&subrepo, commit_base, commit_b)) {
output(opt, 1, _("Failed to merge submodule %s (commits don't follow merge-base)"), path);
- return 0;
+ goto cleanup;
}
/* Case #1: a is contained in b or vice versa */
- if (in_merge_bases(commit_a, commit_b)) {
+ if (repo_in_merge_bases(&subrepo, commit_a, commit_b)) {
oidcpy(result, b);
if (show(opt, 3)) {
output(opt, 3, _("Fast-forwarding submodule %s to the following commit:"), path);
@@ -1224,9 +1238,10 @@ static int merge_submodule(struct merge_options *opt,
else
; /* no output */
- return 1;
+ ret = 1;
+ goto cleanup;
}
- if (in_merge_bases(commit_b, commit_a)) {
+ if (repo_in_merge_bases(&subrepo, commit_b, commit_a)) {
oidcpy(result, a);
if (show(opt, 3)) {
output(opt, 3, _("Fast-forwarding submodule %s to the following commit:"), path);
@@ -1236,7 +1251,8 @@ static int merge_submodule(struct merge_options *opt,
else
; /* no output */
- return 1;
+ ret = 1;
+ goto cleanup;
}
/*
@@ -1248,10 +1264,10 @@ static int merge_submodule(struct merge_options *opt,
/* Skip the search if makes no sense to the calling context. */
if (!search)
- return 0;
+ goto cleanup;
/* find commit which merges them */
- parent_count = find_first_merges(opt->repo, &merges, path,
+ parent_count = find_first_merges(&subrepo, &merges, path,
commit_a, commit_b);
switch (parent_count) {
case 0:
@@ -1278,7 +1294,9 @@ static int merge_submodule(struct merge_options *opt,
}
object_array_clear(&merges);
- return 0;
+cleanup:
+ repo_clear(&subrepo);
+ return ret;
}
static int merge_mode_and_contents(struct merge_options *opt,
@@ -3747,6 +3765,9 @@ int merge_recursive(struct merge_options *opt,
assert(opt->ancestor == NULL ||
!strcmp(opt->ancestor, "constructed merge base"));
+ prepare_repo_settings(opt->repo);
+ opt->repo->settings.command_requires_full_index = 1;
+
if (merge_start(opt, repo_get_commit_tree(opt->repo, h1)))
return -1;
clean = merge_recursive_internal(opt, h1, h2, merge_bases, result);
diff --git a/merge.c b/merge.c
index 6e736881d9..2382ff66d3 100644
--- a/merge.c
+++ b/merge.c
@@ -53,7 +53,6 @@ int checkout_fast_forward(struct repository *r,
struct unpack_trees_options opts;
struct tree_desc t[MAX_UNPACK_TREES];
int i, nr_trees = 0;
- struct dir_struct dir = DIR_INIT;
struct lock_file lock_file = LOCK_INIT;
refresh_index(r->index, REFRESH_QUIET, NULL, NULL, NULL);
@@ -80,11 +79,7 @@ int checkout_fast_forward(struct repository *r,
}
memset(&opts, 0, sizeof(opts));
- if (overwrite_ignore) {
- dir.flags |= DIR_SHOW_IGNORED;
- setup_standard_excludes(&dir);
- opts.dir = &dir;
- }
+ opts.preserve_ignored = !overwrite_ignore;
opts.head_idx = 1;
opts.src_index = r->index;
@@ -101,7 +96,6 @@ int checkout_fast_forward(struct repository *r,
clear_unpack_trees_porcelain(&opts);
return -1;
}
- dir_clear(&dir);
clear_unpack_trees_porcelain(&opts);
if (write_locked_index(r->index, &lock_file, COMMIT_LOCK))
diff --git a/midx.c b/midx.c
index 321c6fdd2f..4a01583fe8 100644
--- a/midx.c
+++ b/midx.c
@@ -13,6 +13,10 @@
#include "repository.h"
#include "chunk-format.h"
#include "pack.h"
+#include "pack-bitmap.h"
+#include "refs.h"
+#include "revision.h"
+#include "list-objects.h"
#define MIDX_SIGNATURE 0x4d494458 /* "MIDX" */
#define MIDX_VERSION 1
@@ -48,12 +52,12 @@ static uint8_t oid_version(void)
}
}
-static const unsigned char *get_midx_checksum(struct multi_pack_index *m)
+const unsigned char *get_midx_checksum(struct multi_pack_index *m)
{
return m->data + m->data_len - the_hash_algo->rawsz;
}
-static char *get_midx_filename(const char *object_dir)
+char *get_midx_filename(const char *object_dir)
{
return xstrfmt("%s/pack/multi-pack-index", object_dir);
}
@@ -195,6 +199,8 @@ void close_midx(struct multi_pack_index *m)
if (!m)
return;
+ close_midx(m->next);
+
munmap((unsigned char *)m->data, m->data_len);
for (i = 0; i < m->num_packs; i++) {
@@ -203,6 +209,7 @@ void close_midx(struct multi_pack_index *m)
}
FREE_AND_NULL(m->packs);
FREE_AND_NULL(m->pack_names);
+ free(m);
}
int prepare_midx_pack(struct repository *r, struct multi_pack_index *m, uint32_t pack_int_id)
@@ -276,14 +283,18 @@ uint32_t nth_midxed_pack_int_id(struct multi_pack_index *m, uint32_t pos)
(off_t)pos * MIDX_CHUNK_OFFSET_WIDTH);
}
-static int nth_midxed_pack_entry(struct repository *r,
- struct multi_pack_index *m,
- struct pack_entry *e,
- uint32_t pos)
+int fill_midx_entry(struct repository * r,
+ const struct object_id *oid,
+ struct pack_entry *e,
+ struct multi_pack_index *m)
{
+ uint32_t pos;
uint32_t pack_int_id;
struct packed_git *p;
+ if (!bsearch_midx(oid, m, &pos))
+ return 0;
+
if (pos >= m->num_objects)
return 0;
@@ -303,15 +314,9 @@ static int nth_midxed_pack_entry(struct repository *r,
if (!is_pack_valid(p))
return 0;
- if (p->num_bad_objects) {
- uint32_t i;
- struct object_id oid;
- nth_midxed_object_oid(&oid, m, pos);
- for (i = 0; i < p->num_bad_objects; i++)
- if (hasheq(oid.hash,
- p->bad_object_sha1 + the_hash_algo->rawsz * i))
- return 0;
- }
+ if (oidset_size(&p->bad_objects) &&
+ oidset_contains(&p->bad_objects, oid))
+ return 0;
e->offset = nth_midxed_offset(m, pos);
e->p = p;
@@ -319,19 +324,6 @@ static int nth_midxed_pack_entry(struct repository *r,
return 1;
}
-int fill_midx_entry(struct repository * r,
- const struct object_id *oid,
- struct pack_entry *e,
- struct multi_pack_index *m)
-{
- uint32_t pos;
-
- if (!bsearch_midx(oid, m, &pos))
- return 0;
-
- return nth_midxed_pack_entry(r, m, e, pos);
-}
-
/* Match "foo.idx" against either "foo.pack" _or_ "foo.idx". */
static int cmp_idx_or_pack_name(const char *idx_or_pack_name,
const char *idx_name)
@@ -882,7 +874,7 @@ static void write_midx_reverse_index(char *midx_name, unsigned char *midx_hash,
strbuf_release(&buf);
}
-static void clear_midx_files_ext(struct repository *r, const char *ext,
+static void clear_midx_files_ext(const char *object_dir, const char *ext,
unsigned char *keep_hash);
static int midx_checksum_valid(struct multi_pack_index *m)
@@ -890,7 +882,171 @@ static int midx_checksum_valid(struct multi_pack_index *m)
return hashfile_checksum_valid(m->data, m->data_len);
}
-static int write_midx_internal(const char *object_dir, struct multi_pack_index *m,
+static void prepare_midx_packing_data(struct packing_data *pdata,
+ struct write_midx_context *ctx)
+{
+ uint32_t i;
+
+ memset(pdata, 0, sizeof(struct packing_data));
+ prepare_packing_data(the_repository, pdata);
+
+ for (i = 0; i < ctx->entries_nr; i++) {
+ struct pack_midx_entry *from = &ctx->entries[ctx->pack_order[i]];
+ struct object_entry *to = packlist_alloc(pdata, &from->oid);
+
+ oe_set_in_pack(pdata, to,
+ ctx->info[ctx->pack_perm[from->pack_int_id]].p);
+ }
+}
+
+static int add_ref_to_pending(const char *refname,
+ const struct object_id *oid,
+ int flag, void *cb_data)
+{
+ struct rev_info *revs = (struct rev_info*)cb_data;
+ struct object *object;
+
+ if ((flag & REF_ISSYMREF) && (flag & REF_ISBROKEN)) {
+ warning("symbolic ref is dangling: %s", refname);
+ return 0;
+ }
+
+ object = parse_object_or_die(oid, refname);
+ if (object->type != OBJ_COMMIT)
+ return 0;
+
+ add_pending_object(revs, object, "");
+ if (bitmap_is_preferred_refname(revs->repo, refname))
+ object->flags |= NEEDS_BITMAP;
+ return 0;
+}
+
+struct bitmap_commit_cb {
+ struct commit **commits;
+ size_t commits_nr, commits_alloc;
+
+ struct write_midx_context *ctx;
+};
+
+static const struct object_id *bitmap_oid_access(size_t index,
+ const void *_entries)
+{
+ const struct pack_midx_entry *entries = _entries;
+ return &entries[index].oid;
+}
+
+static void bitmap_show_commit(struct commit *commit, void *_data)
+{
+ struct bitmap_commit_cb *data = _data;
+ int pos = oid_pos(&commit->object.oid, data->ctx->entries,
+ data->ctx->entries_nr,
+ bitmap_oid_access);
+ if (pos < 0)
+ return;
+
+ ALLOC_GROW(data->commits, data->commits_nr + 1, data->commits_alloc);
+ data->commits[data->commits_nr++] = commit;
+}
+
+static struct commit **find_commits_for_midx_bitmap(uint32_t *indexed_commits_nr_p,
+ struct write_midx_context *ctx)
+{
+ struct rev_info revs;
+ struct bitmap_commit_cb cb = {0};
+
+ cb.ctx = ctx;
+
+ repo_init_revisions(the_repository, &revs, NULL);
+ setup_revisions(0, NULL, &revs, NULL);
+ for_each_ref(add_ref_to_pending, &revs);
+
+ /*
+ * Skipping promisor objects here is intentional, since it only excludes
+ * them from the list of reachable commits that we want to select from
+ * when computing the selection of MIDX'd commits to receive bitmaps.
+ *
+ * Reachability bitmaps do require that their objects be closed under
+ * reachability, but fetching any objects missing from promisors at this
+ * point is too late. But, if one of those objects can be reached from
+ * an another object that is included in the bitmap, then we will
+ * complain later that we don't have reachability closure (and fail
+ * appropriately).
+ */
+ fetch_if_missing = 0;
+ revs.exclude_promisor_objects = 1;
+
+ if (prepare_revision_walk(&revs))
+ die(_("revision walk setup failed"));
+
+ traverse_commit_list(&revs, bitmap_show_commit, NULL, &cb);
+ if (indexed_commits_nr_p)
+ *indexed_commits_nr_p = cb.commits_nr;
+
+ return cb.commits;
+}
+
+static int write_midx_bitmap(char *midx_name, unsigned char *midx_hash,
+ struct write_midx_context *ctx,
+ unsigned flags)
+{
+ struct packing_data pdata;
+ struct pack_idx_entry **index;
+ struct commit **commits = NULL;
+ uint32_t i, commits_nr;
+ uint16_t options = 0;
+ char *bitmap_name = xstrfmt("%s-%s.bitmap", midx_name, hash_to_hex(midx_hash));
+ int ret;
+
+ if (flags & MIDX_WRITE_BITMAP_HASH_CACHE)
+ options |= BITMAP_OPT_HASH_CACHE;
+
+ prepare_midx_packing_data(&pdata, ctx);
+
+ commits = find_commits_for_midx_bitmap(&commits_nr, ctx);
+
+ /*
+ * Build the MIDX-order index based on pdata.objects (which is already
+ * in MIDX order; c.f., 'midx_pack_order_cmp()' for the definition of
+ * this order).
+ */
+ ALLOC_ARRAY(index, pdata.nr_objects);
+ for (i = 0; i < pdata.nr_objects; i++)
+ index[i] = &pdata.objects[i].idx;
+
+ bitmap_writer_show_progress(flags & MIDX_PROGRESS);
+ bitmap_writer_build_type_index(&pdata, index, pdata.nr_objects);
+
+ /*
+ * bitmap_writer_finish expects objects in lex order, but pack_order
+ * gives us exactly that. use it directly instead of re-sorting the
+ * array.
+ *
+ * This changes the order of objects in 'index' between
+ * bitmap_writer_build_type_index and bitmap_writer_finish.
+ *
+ * The same re-ordering takes place in the single-pack bitmap code via
+ * write_idx_file(), which is called by finish_tmp_packfile(), which
+ * happens between bitmap_writer_build_type_index() and
+ * bitmap_writer_finish().
+ */
+ for (i = 0; i < pdata.nr_objects; i++)
+ index[ctx->pack_order[i]] = &pdata.objects[i].idx;
+
+ bitmap_writer_select_commits(commits, commits_nr, -1);
+ ret = bitmap_writer_build(&pdata);
+ if (ret < 0)
+ goto cleanup;
+
+ bitmap_writer_set_checksum(midx_hash);
+ bitmap_writer_finish(index, pdata.nr_objects, bitmap_name, options);
+
+cleanup:
+ free(index);
+ free(bitmap_name);
+ return ret;
+}
+
+static int write_midx_internal(const char *object_dir,
struct string_list *packs_to_drop,
const char *preferred_pack_name,
unsigned flags)
@@ -901,20 +1057,26 @@ static int write_midx_internal(const char *object_dir, struct multi_pack_index *
struct hashfile *f = NULL;
struct lock_file lk;
struct write_midx_context ctx = { 0 };
+ struct multi_pack_index *cur;
int pack_name_concat_len = 0;
int dropped_packs = 0;
int result = 0;
struct chunkfile *cf;
+ /* Ensure the given object_dir is local, or a known alternate. */
+ find_odb(the_repository, object_dir);
+
midx_name = get_midx_filename(object_dir);
if (safe_create_leading_directories(midx_name))
die_errno(_("unable to create leading directories of %s"),
midx_name);
- if (m)
- ctx.m = m;
- else
- ctx.m = load_multi_pack_index(object_dir, 1);
+ for (cur = get_multi_pack_index(the_repository); cur; cur = cur->next) {
+ if (!strcmp(object_dir, cur->object_dir)) {
+ ctx.m = cur;
+ break;
+ }
+ }
if (ctx.m && !midx_checksum_valid(ctx.m)) {
warning(_("ignoring existing multi-pack-index; checksum mismatch"));
@@ -932,8 +1094,27 @@ static int write_midx_internal(const char *object_dir, struct multi_pack_index *
ctx.info[ctx.nr].orig_pack_int_id = i;
ctx.info[ctx.nr].pack_name = xstrdup(ctx.m->pack_names[i]);
- ctx.info[ctx.nr].p = NULL;
+ ctx.info[ctx.nr].p = ctx.m->packs[i];
ctx.info[ctx.nr].expired = 0;
+
+ if (flags & MIDX_WRITE_REV_INDEX) {
+ /*
+ * If generating a reverse index, need to have
+ * packed_git's loaded to compare their
+ * mtimes and object count.
+ */
+ if (prepare_midx_pack(the_repository, ctx.m, i)) {
+ error(_("could not load pack"));
+ result = 1;
+ goto cleanup;
+ }
+
+ if (open_pack_index(ctx.m->packs[i]))
+ die(_("could not open index for %s"),
+ ctx.m->packs[i]->pack_name);
+ ctx.info[ctx.nr].p = ctx.m->packs[i];
+ }
+
ctx.nr++;
}
}
@@ -947,18 +1128,89 @@ static int write_midx_internal(const char *object_dir, struct multi_pack_index *
for_each_file_in_pack_dir(object_dir, add_pack_to_midx, &ctx);
stop_progress(&ctx.progress);
- if (ctx.m && ctx.nr == ctx.m->num_packs && !packs_to_drop)
- goto cleanup;
+ if (ctx.m && ctx.nr == ctx.m->num_packs && !packs_to_drop) {
+ struct bitmap_index *bitmap_git;
+ int bitmap_exists;
+ int want_bitmap = flags & MIDX_WRITE_BITMAP;
+
+ bitmap_git = prepare_midx_bitmap_git(ctx.m);
+ bitmap_exists = bitmap_git && bitmap_is_midx(bitmap_git);
+ free_bitmap_index(bitmap_git);
+
+ if (bitmap_exists || !want_bitmap) {
+ /*
+ * The correct MIDX already exists, and so does a
+ * corresponding bitmap (or one wasn't requested).
+ */
+ if (!want_bitmap)
+ clear_midx_files_ext(object_dir, ".bitmap",
+ NULL);
+ goto cleanup;
+ }
+ }
- ctx.preferred_pack_idx = -1;
if (preferred_pack_name) {
+ int found = 0;
for (i = 0; i < ctx.nr; i++) {
if (!cmp_idx_or_pack_name(preferred_pack_name,
ctx.info[i].pack_name)) {
ctx.preferred_pack_idx = i;
+ found = 1;
break;
}
}
+
+ if (!found)
+ warning(_("unknown preferred pack: '%s'"),
+ preferred_pack_name);
+ } else if (ctx.nr &&
+ (flags & (MIDX_WRITE_REV_INDEX | MIDX_WRITE_BITMAP))) {
+ struct packed_git *oldest = ctx.info[ctx.preferred_pack_idx].p;
+ ctx.preferred_pack_idx = 0;
+
+ if (packs_to_drop && packs_to_drop->nr)
+ BUG("cannot write a MIDX bitmap during expiration");
+
+ /*
+ * set a preferred pack when writing a bitmap to ensure that
+ * the pack from which the first object is selected in pseudo
+ * pack-order has all of its objects selected from that pack
+ * (and not another pack containing a duplicate)
+ */
+ for (i = 1; i < ctx.nr; i++) {
+ struct packed_git *p = ctx.info[i].p;
+
+ if (!oldest->num_objects || p->mtime < oldest->mtime) {
+ oldest = p;
+ ctx.preferred_pack_idx = i;
+ }
+ }
+
+ if (!oldest->num_objects) {
+ /*
+ * If all packs are empty; unset the preferred index.
+ * This is acceptable since there will be no duplicate
+ * objects to resolve, so the preferred value doesn't
+ * matter.
+ */
+ ctx.preferred_pack_idx = -1;
+ }
+ } else {
+ /*
+ * otherwise don't mark any pack as preferred to avoid
+ * interfering with expiration logic below
+ */
+ ctx.preferred_pack_idx = -1;
+ }
+
+ if (ctx.preferred_pack_idx > -1) {
+ struct packed_git *preferred = ctx.info[ctx.preferred_pack_idx].p;
+ if (!preferred->num_objects) {
+ error(_("cannot select preferred pack %s with no objects"),
+ preferred->pack_name);
+ result = 1;
+ goto cleanup;
+ }
}
ctx.entries = get_sorted_entries(ctx.m, ctx.info, ctx.nr, &ctx.entries_nr,
@@ -1029,11 +1281,7 @@ static int write_midx_internal(const char *object_dir, struct multi_pack_index *
ctx.info, ctx.nr,
sizeof(*ctx.info),
idx_or_pack_name_cmp);
-
- if (!preferred)
- warning(_("unknown preferred pack: '%s'"),
- preferred_pack_name);
- else {
+ if (preferred) {
uint32_t perm = ctx.pack_perm[preferred->orig_pack_int_id];
if (perm == PACK_EXPIRED)
warning(_("preferred pack '%s' is expired"),
@@ -1048,9 +1296,6 @@ static int write_midx_internal(const char *object_dir, struct multi_pack_index *
hold_lock_file_for_update(&lk, midx_name, LOCK_DIE_ON_ERROR);
f = hashfd(get_lock_file_fd(&lk), get_lock_file_path(&lk));
- if (ctx.m)
- close_midx(ctx.m);
-
if (ctx.nr - dropped_packs == 0) {
error(_("no pack files to index."));
result = 1;
@@ -1081,15 +1326,27 @@ static int write_midx_internal(const char *object_dir, struct multi_pack_index *
finalize_hashfile(f, midx_hash, CSUM_FSYNC | CSUM_HASH_IN_STREAM);
free_chunkfile(cf);
- if (flags & MIDX_WRITE_REV_INDEX)
+ if (flags & (MIDX_WRITE_REV_INDEX | MIDX_WRITE_BITMAP))
ctx.pack_order = midx_pack_order(&ctx);
if (flags & MIDX_WRITE_REV_INDEX)
write_midx_reverse_index(midx_name, midx_hash, &ctx);
- clear_midx_files_ext(the_repository, ".rev", midx_hash);
+ if (flags & MIDX_WRITE_BITMAP) {
+ if (write_midx_bitmap(midx_name, midx_hash, &ctx, flags) < 0) {
+ error(_("could not write multi-pack bitmap"));
+ result = 1;
+ goto cleanup;
+ }
+ }
+
+ if (ctx.m)
+ close_object_store(the_repository->objects);
commit_lock_file(&lk);
+ clear_midx_files_ext(object_dir, ".bitmap", midx_hash);
+ clear_midx_files_ext(object_dir, ".rev", midx_hash);
+
cleanup:
for (i = 0; i < ctx.nr; i++) {
if (ctx.info[i].p) {
@@ -1104,6 +1361,7 @@ cleanup:
free(ctx.pack_perm);
free(ctx.pack_order);
free(midx_name);
+
return result;
}
@@ -1111,8 +1369,7 @@ int write_midx_file(const char *object_dir,
const char *preferred_pack_name,
unsigned flags)
{
- return write_midx_internal(object_dir, NULL, NULL, preferred_pack_name,
- flags);
+ return write_midx_internal(object_dir, NULL, preferred_pack_name, flags);
}
struct clear_midx_data {
@@ -1135,7 +1392,7 @@ static void clear_midx_file_ext(const char *full_path, size_t full_path_len,
die_errno(_("failed to remove %s"), full_path);
}
-static void clear_midx_files_ext(struct repository *r, const char *ext,
+static void clear_midx_files_ext(const char *object_dir, const char *ext,
unsigned char *keep_hash)
{
struct clear_midx_data data;
@@ -1146,7 +1403,7 @@ static void clear_midx_files_ext(struct repository *r, const char *ext,
hash_to_hex(keep_hash), ext);
data.ext = ext;
- for_each_file_in_pack_dir(r->objects->odb->path,
+ for_each_file_in_pack_dir(object_dir,
clear_midx_file_ext,
&data);
@@ -1165,7 +1422,8 @@ void clear_midx_file(struct repository *r)
if (remove_path(midx))
die(_("failed to clear multi-pack-index at %s"), midx);
- clear_midx_files_ext(r, ".rev", NULL);
+ clear_midx_files_ext(r->objects->odb->path, ".bitmap", NULL);
+ clear_midx_files_ext(r->objects->odb->path, ".rev", NULL);
free(midx);
}
@@ -1390,8 +1648,10 @@ int expire_midx_packs(struct repository *r, const char *object_dir, unsigned fla
free(count);
- if (packs_to_drop.nr)
- result = write_midx_internal(object_dir, m, &packs_to_drop, NULL, flags);
+ if (packs_to_drop.nr) {
+ result = write_midx_internal(object_dir, &packs_to_drop, NULL, flags);
+ m = NULL;
+ }
string_list_clear(&packs_to_drop, 0);
return result;
@@ -1580,7 +1840,7 @@ int midx_repack(struct repository *r, const char *object_dir, size_t batch_size,
goto cleanup;
}
- result = write_midx_internal(object_dir, m, NULL, NULL, flags);
+ result = write_midx_internal(object_dir, NULL, NULL, flags);
m = NULL;
cleanup:
diff --git a/midx.h b/midx.h
index 8684cf0fef..541d9ac728 100644
--- a/midx.h
+++ b/midx.h
@@ -8,6 +8,8 @@ struct pack_entry;
struct repository;
#define GIT_TEST_MULTI_PACK_INDEX "GIT_TEST_MULTI_PACK_INDEX"
+#define GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP \
+ "GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP"
struct multi_pack_index {
struct multi_pack_index *next;
@@ -41,7 +43,11 @@ struct multi_pack_index {
#define MIDX_PROGRESS (1 << 0)
#define MIDX_WRITE_REV_INDEX (1 << 1)
+#define MIDX_WRITE_BITMAP (1 << 2)
+#define MIDX_WRITE_BITMAP_HASH_CACHE (1 << 3)
+const unsigned char *get_midx_checksum(struct multi_pack_index *m);
+char *get_midx_filename(const char *object_dir);
char *get_midx_rev_filename(struct multi_pack_index *m);
struct multi_pack_index *load_multi_pack_index(const char *object_dir, int local);
diff --git a/object-file.c b/object-file.c
index a8be899481..112d9b4bad 100644
--- a/object-file.c
+++ b/object-file.c
@@ -32,6 +32,7 @@
#include "packfile.h"
#include "object-store.h"
#include "promisor-remote.h"
+#include "submodule.h"
/* The maximum size for an object header. */
#define MAX_HEADER_LEN 32
@@ -414,74 +415,6 @@ enum scld_error safe_create_leading_directories_const(const char *path)
return result;
}
-int raceproof_create_file(const char *path, create_file_fn fn, void *cb)
-{
- /*
- * The number of times we will try to remove empty directories
- * in the way of path. This is only 1 because if another
- * process is racily creating directories that conflict with
- * us, we don't want to fight against them.
- */
- int remove_directories_remaining = 1;
-
- /*
- * The number of times that we will try to create the
- * directories containing path. We are willing to attempt this
- * more than once, because another process could be trying to
- * clean up empty directories at the same time as we are
- * trying to create them.
- */
- int create_directories_remaining = 3;
-
- /* A scratch copy of path, filled lazily if we need it: */
- struct strbuf path_copy = STRBUF_INIT;
-
- int ret, save_errno;
-
- /* Sanity check: */
- assert(*path);
-
-retry_fn:
- ret = fn(path, cb);
- save_errno = errno;
- if (!ret)
- goto out;
-
- if (errno == EISDIR && remove_directories_remaining-- > 0) {
- /*
- * A directory is in the way. Maybe it is empty; try
- * to remove it:
- */
- if (!path_copy.len)
- strbuf_addstr(&path_copy, path);
-
- if (!remove_dir_recursively(&path_copy, REMOVE_DIR_EMPTY_ONLY))
- goto retry_fn;
- } else if (errno == ENOENT && create_directories_remaining-- > 0) {
- /*
- * Maybe the containing directory didn't exist, or
- * maybe it was just deleted by a process that is
- * racing with us to clean up empty directories. Try
- * to create it:
- */
- enum scld_error scld_result;
-
- if (!path_copy.len)
- strbuf_addstr(&path_copy, path);
-
- do {
- scld_result = safe_create_leading_directories(path_copy.buf);
- if (scld_result == SCLD_OK)
- goto retry_fn;
- } while (scld_result == SCLD_VANISHED && create_directories_remaining-- > 0);
- }
-
-out:
- strbuf_release(&path_copy);
- errno = save_errno;
- return ret;
-}
-
static void fill_loose_path(struct strbuf *buf, const struct object_id *oid)
{
int i;
@@ -820,6 +753,27 @@ out:
return ref_git;
}
+struct object_directory *find_odb(struct repository *r, const char *obj_dir)
+{
+ struct object_directory *odb;
+ char *obj_dir_real = real_pathdup(obj_dir, 1);
+ struct strbuf odb_path_real = STRBUF_INIT;
+
+ prepare_alt_odb(r);
+ for (odb = r->objects->odb; odb; odb = odb->next) {
+ strbuf_realpath(&odb_path_real, odb->path, 1);
+ if (!strcmp(obj_dir_real, odb_path_real.buf))
+ break;
+ }
+
+ free(obj_dir_real);
+ strbuf_release(&odb_path_real);
+
+ if (!odb)
+ die(_("could not find object directory matching %s"), obj_dir);
+ return odb;
+}
+
static void fill_alternate_refs_command(struct child_process *cmd,
const char *repo_path)
{
@@ -1592,6 +1546,10 @@ static int do_oid_object_info_extended(struct repository *r,
break;
}
+ if (register_all_submodule_odb_as_alternates())
+ /* We added some alternates; retry */
+ continue;
+
/* Check if it is a missing object */
if (fetch_if_missing && repo_has_promisor_remote(r) &&
!already_retried &&
@@ -1616,7 +1574,7 @@ static int do_oid_object_info_extended(struct repository *r,
return 0;
rtype = packed_object_info(r, e.p, e.offset, oi);
if (rtype < 0) {
- mark_bad_packed_object(e.p, real->hash);
+ mark_bad_packed_object(e.p, real);
return do_oid_object_info_extended(r, real, oi, 0);
} else if (oi->whence == OI_PACKED) {
oi->u.packed.offset = e.offset;
@@ -1725,7 +1683,7 @@ void *read_object_file_extended(struct repository *r,
die(_("loose object %s (stored in %s) is corrupt"),
oid_to_hex(repl), path);
- if ((p = has_packed_and_bad(r, repl->hash)) != NULL)
+ if ((p = has_packed_and_bad(r, repl)) != NULL)
die(_("packed object %s (stored in %s) is corrupt"),
oid_to_hex(repl), p->pack_name);
obj_read_unlock();
diff --git a/object-store.h b/object-store.h
index b4dc6668aa..1e647a5be3 100644
--- a/object-store.h
+++ b/object-store.h
@@ -10,6 +10,7 @@
#include "khash.h"
#include "dir.h"
#include "oidtree.h"
+#include "oidset.h"
struct object_directory {
struct object_directory *next;
@@ -38,6 +39,7 @@ KHASH_INIT(odb_path_map, const char * /* key: odb_path */,
void prepare_alt_odb(struct repository *r);
char *compute_alternate_path(const char *path, struct strbuf *err);
+struct object_directory *find_odb(struct repository *r, const char *obj_dir);
typedef int alt_odb_fn(struct object_directory *, void *);
int foreach_alt_odb(alt_odb_fn, void*);
typedef void alternate_ref_fn(const struct object_id *oid, void *);
@@ -75,9 +77,8 @@ struct packed_git {
const void *index_data;
size_t index_size;
uint32_t num_objects;
- uint32_t num_bad_objects;
uint32_t crc_offset;
- unsigned char *bad_object_sha1;
+ struct oidset bad_objects;
int index_version;
time_t mtime;
int pack_fd;
@@ -370,7 +371,7 @@ struct object_info {
* Initializer for a "struct object_info" that wants no items. You may
* also memset() the memory to all-zeroes.
*/
-#define OBJECT_INFO_INIT {NULL}
+#define OBJECT_INFO_INIT { 0 }
/* Invoke lookup_replace_object() on the given hash */
#define OBJECT_INFO_LOOKUP_REPLACE 1
diff --git a/object.h b/object.h
index 549f2d256b..cb556ab775 100644
--- a/object.h
+++ b/object.h
@@ -55,7 +55,7 @@ struct object_array {
} *objects;
};
-#define OBJECT_ARRAY_INIT { 0, 0, NULL }
+#define OBJECT_ARRAY_INIT { 0 }
/*
* object flag allocation:
diff --git a/oid-array.h b/oid-array.h
index 72bca78b7d..f60f9af674 100644
--- a/oid-array.h
+++ b/oid-array.h
@@ -56,7 +56,7 @@ struct oid_array {
int sorted;
};
-#define OID_ARRAY_INIT { NULL, 0, 0, 0 }
+#define OID_ARRAY_INIT { 0 }
/**
* Add an item to the set. The object ID will be placed at the end of the array
diff --git a/oidset.c b/oidset.c
index 5aac633c1f..b36a2bae86 100644
--- a/oidset.c
+++ b/oidset.c
@@ -36,11 +36,6 @@ void oidset_clear(struct oidset *set)
oidset_init(set, 0);
}
-int oidset_size(struct oidset *set)
-{
- return kh_size(&set->set);
-}
-
void oidset_parse_file(struct oidset *set, const char *path)
{
oidset_parse_file_carefully(set, path, NULL, NULL);
diff --git a/oidset.h b/oidset.h
index 01f6560283..ba4a5a2cd3 100644
--- a/oidset.h
+++ b/oidset.h
@@ -57,7 +57,10 @@ int oidset_remove(struct oidset *set, const struct object_id *oid);
/**
* Returns the number of oids in the set.
*/
-int oidset_size(struct oidset *set);
+static inline int oidset_size(const struct oidset *set)
+{
+ return kh_size(&set->set);
+}
/**
* Remove all entries from the oidset, freeing any resources associated with
diff --git a/pack-bitmap-write.c b/pack-bitmap-write.c
index 88d9e696a5..9c55c1531e 100644
--- a/pack-bitmap-write.c
+++ b/pack-bitmap-write.c
@@ -48,7 +48,7 @@ void bitmap_writer_show_progress(int show)
}
/**
- * Build the initial type index for the packfile
+ * Build the initial type index for the packfile or multi-pack-index
*/
void bitmap_writer_build_type_index(struct packing_data *to_pack,
struct pack_idx_entry **index,
@@ -125,15 +125,20 @@ static inline void push_bitmapped_commit(struct commit *commit)
writer.selected_nr++;
}
-static uint32_t find_object_pos(const struct object_id *oid)
+static uint32_t find_object_pos(const struct object_id *oid, int *found)
{
struct object_entry *entry = packlist_find(writer.to_pack, oid);
if (!entry) {
- die("Failed to write bitmap index. Packfile doesn't have full closure "
+ if (found)
+ *found = 0;
+ warning("Failed to write bitmap index. Packfile doesn't have full closure "
"(object %s is missing)", oid_to_hex(oid));
+ return 0;
}
+ if (found)
+ *found = 1;
return oe_in_pack_pos(writer.to_pack, entry);
}
@@ -331,9 +336,10 @@ static void bitmap_builder_clear(struct bitmap_builder *bb)
bb->commits_nr = bb->commits_alloc = 0;
}
-static void fill_bitmap_tree(struct bitmap *bitmap,
- struct tree *tree)
+static int fill_bitmap_tree(struct bitmap *bitmap,
+ struct tree *tree)
{
+ int found;
uint32_t pos;
struct tree_desc desc;
struct name_entry entry;
@@ -342,9 +348,11 @@ static void fill_bitmap_tree(struct bitmap *bitmap,
* If our bit is already set, then there is nothing to do. Both this
* tree and all of its children will be set.
*/
- pos = find_object_pos(&tree->object.oid);
+ pos = find_object_pos(&tree->object.oid, &found);
+ if (!found)
+ return -1;
if (bitmap_get(bitmap, pos))
- return;
+ return 0;
bitmap_set(bitmap, pos);
if (parse_tree(tree) < 0)
@@ -355,11 +363,15 @@ static void fill_bitmap_tree(struct bitmap *bitmap,
while (tree_entry(&desc, &entry)) {
switch (object_type(entry.mode)) {
case OBJ_TREE:
- fill_bitmap_tree(bitmap,
- lookup_tree(the_repository, &entry.oid));
+ if (fill_bitmap_tree(bitmap,
+ lookup_tree(the_repository, &entry.oid)) < 0)
+ return -1;
break;
case OBJ_BLOB:
- bitmap_set(bitmap, find_object_pos(&entry.oid));
+ pos = find_object_pos(&entry.oid, &found);
+ if (!found)
+ return -1;
+ bitmap_set(bitmap, pos);
break;
default:
/* Gitlink, etc; not reachable */
@@ -368,15 +380,18 @@ static void fill_bitmap_tree(struct bitmap *bitmap,
}
free_tree_buffer(tree);
+ return 0;
}
-static void fill_bitmap_commit(struct bb_commit *ent,
- struct commit *commit,
- struct prio_queue *queue,
- struct prio_queue *tree_queue,
- struct bitmap_index *old_bitmap,
- const uint32_t *mapping)
+static int fill_bitmap_commit(struct bb_commit *ent,
+ struct commit *commit,
+ struct prio_queue *queue,
+ struct prio_queue *tree_queue,
+ struct bitmap_index *old_bitmap,
+ const uint32_t *mapping)
{
+ int found;
+ uint32_t pos;
if (!ent->bitmap)
ent->bitmap = bitmap_new();
@@ -401,11 +416,16 @@ static void fill_bitmap_commit(struct bb_commit *ent,
* Mark ourselves and queue our tree. The commit
* walk ensures we cover all parents.
*/
- bitmap_set(ent->bitmap, find_object_pos(&c->object.oid));
+ pos = find_object_pos(&c->object.oid, &found);
+ if (!found)
+ return -1;
+ bitmap_set(ent->bitmap, pos);
prio_queue_put(tree_queue, get_commit_tree(c));
for (p = c->parents; p; p = p->next) {
- int pos = find_object_pos(&p->item->object.oid);
+ pos = find_object_pos(&p->item->object.oid, &found);
+ if (!found)
+ return -1;
if (!bitmap_get(ent->bitmap, pos)) {
bitmap_set(ent->bitmap, pos);
prio_queue_put(queue, p->item);
@@ -413,8 +433,12 @@ static void fill_bitmap_commit(struct bb_commit *ent,
}
}
- while (tree_queue->nr)
- fill_bitmap_tree(ent->bitmap, prio_queue_get(tree_queue));
+ while (tree_queue->nr) {
+ if (fill_bitmap_tree(ent->bitmap,
+ prio_queue_get(tree_queue)) < 0)
+ return -1;
+ }
+ return 0;
}
static void store_selected(struct bb_commit *ent, struct commit *commit)
@@ -432,7 +456,7 @@ static void store_selected(struct bb_commit *ent, struct commit *commit)
kh_value(writer.bitmaps, hash_pos) = stored;
}
-void bitmap_writer_build(struct packing_data *to_pack)
+int bitmap_writer_build(struct packing_data *to_pack)
{
struct bitmap_builder bb;
size_t i;
@@ -441,6 +465,7 @@ void bitmap_writer_build(struct packing_data *to_pack)
struct prio_queue tree_queue = { NULL };
struct bitmap_index *old_bitmap;
uint32_t *mapping;
+ int closed = 1; /* until proven otherwise */
writer.bitmaps = kh_init_oid_map();
writer.to_pack = to_pack;
@@ -463,8 +488,11 @@ void bitmap_writer_build(struct packing_data *to_pack)
struct commit *child;
int reused = 0;
- fill_bitmap_commit(ent, commit, &queue, &tree_queue,
- old_bitmap, mapping);
+ if (fill_bitmap_commit(ent, commit, &queue, &tree_queue,
+ old_bitmap, mapping) < 0) {
+ closed = 0;
+ break;
+ }
if (ent->selected) {
store_selected(ent, commit);
@@ -492,6 +520,7 @@ void bitmap_writer_build(struct packing_data *to_pack)
clear_prio_queue(&queue);
clear_prio_queue(&tree_queue);
bitmap_builder_clear(&bb);
+ free_bitmap_index(old_bitmap);
free(mapping);
trace2_region_leave("pack-bitmap-write", "building_bitmaps_total",
@@ -499,7 +528,9 @@ void bitmap_writer_build(struct packing_data *to_pack)
stop_progress(&writer.progress);
- compute_xor_offsets();
+ if (closed)
+ compute_xor_offsets();
+ return closed ? 0 : -1;
}
/**
diff --git a/pack-bitmap.c b/pack-bitmap.c
index d999616c9e..33a3732992 100644
--- a/pack-bitmap.c
+++ b/pack-bitmap.c
@@ -13,6 +13,7 @@
#include "repository.h"
#include "object-store.h"
#include "list-objects-filter-options.h"
+#include "midx.h"
#include "config.h"
/*
@@ -35,8 +36,15 @@ struct stored_bitmap {
* the active bitmap index is the largest one.
*/
struct bitmap_index {
- /* Packfile to which this bitmap index belongs to */
+ /*
+ * The pack or multi-pack index (MIDX) that this bitmap index belongs
+ * to.
+ *
+ * Exactly one of these must be non-NULL; this specifies the object
+ * order used to interpret this bitmap.
+ */
struct packed_git *pack;
+ struct multi_pack_index *midx;
/*
* Mark the first `reuse_objects` in the packfile as reused:
@@ -71,6 +79,9 @@ struct bitmap_index {
/* If not NULL, this is a name-hash cache pointing into map. */
uint32_t *hashes;
+ /* The checksum of the packfile or MIDX; points into map. */
+ const unsigned char *checksum;
+
/*
* Extended index.
*
@@ -136,6 +147,13 @@ static struct ewah_bitmap *read_bitmap_1(struct bitmap_index *index)
return b;
}
+static uint32_t bitmap_num_objects(struct bitmap_index *index)
+{
+ if (index->midx)
+ return index->midx->num_objects;
+ return index->pack->num_objects;
+}
+
static int load_bitmap_header(struct bitmap_index *index)
{
struct bitmap_disk_header *header = (void *)index->map;
@@ -154,7 +172,7 @@ static int load_bitmap_header(struct bitmap_index *index)
/* Parse known bitmap format options */
{
uint32_t flags = ntohs(header->options);
- size_t cache_size = st_mult(index->pack->num_objects, sizeof(uint32_t));
+ size_t cache_size = st_mult(bitmap_num_objects(index), sizeof(uint32_t));
unsigned char *index_end = index->map + index->map_size - the_hash_algo->rawsz;
if ((flags & BITMAP_OPT_FULL_DAG) == 0)
@@ -170,6 +188,7 @@ static int load_bitmap_header(struct bitmap_index *index)
}
index->entry_count = ntohl(header->entry_count);
+ index->checksum = header->checksum;
index->map_pos += header_size;
return 0;
}
@@ -218,6 +237,15 @@ static inline uint8_t read_u8(const unsigned char *buffer, size_t *pos)
#define MAX_XOR_OFFSET 160
+static int nth_bitmap_object_oid(struct bitmap_index *index,
+ struct object_id *oid,
+ uint32_t n)
+{
+ if (index->midx)
+ return nth_midxed_object_oid(oid, index->midx, n) ? 0 : -1;
+ return nth_packed_object_id(oid, index->pack, n);
+}
+
static int load_bitmap_entries_v1(struct bitmap_index *index)
{
uint32_t i;
@@ -237,7 +265,7 @@ static int load_bitmap_entries_v1(struct bitmap_index *index)
xor_offset = read_u8(index->map, &index->map_pos);
flags = read_u8(index->map, &index->map_pos);
- if (nth_packed_object_id(&oid, index->pack, commit_idx_pos) < 0)
+ if (nth_bitmap_object_oid(index, &oid, commit_idx_pos) < 0)
return error("corrupt ewah bitmap: commit index %u out of range",
(unsigned)commit_idx_pos);
@@ -262,7 +290,14 @@ static int load_bitmap_entries_v1(struct bitmap_index *index)
return 0;
}
-static char *pack_bitmap_filename(struct packed_git *p)
+char *midx_bitmap_filename(struct multi_pack_index *midx)
+{
+ return xstrfmt("%s-%s.bitmap",
+ get_midx_filename(midx->object_dir),
+ hash_to_hex(get_midx_checksum(midx)));
+}
+
+char *pack_bitmap_filename(struct packed_git *p)
{
size_t len;
@@ -271,6 +306,57 @@ static char *pack_bitmap_filename(struct packed_git *p)
return xstrfmt("%.*s.bitmap", (int)len, p->pack_name);
}
+static int open_midx_bitmap_1(struct bitmap_index *bitmap_git,
+ struct multi_pack_index *midx)
+{
+ struct stat st;
+ char *idx_name = midx_bitmap_filename(midx);
+ int fd = git_open(idx_name);
+
+ free(idx_name);
+
+ if (fd < 0)
+ return -1;
+
+ if (fstat(fd, &st)) {
+ close(fd);
+ return -1;
+ }
+
+ if (bitmap_git->pack || bitmap_git->midx) {
+ /* ignore extra bitmap file; we can only handle one */
+ warning("ignoring extra bitmap file: %s",
+ get_midx_filename(midx->object_dir));
+ close(fd);
+ return -1;
+ }
+
+ bitmap_git->midx = midx;
+ bitmap_git->map_size = xsize_t(st.st_size);
+ bitmap_git->map_pos = 0;
+ bitmap_git->map = xmmap(NULL, bitmap_git->map_size, PROT_READ,
+ MAP_PRIVATE, fd, 0);
+ close(fd);
+
+ if (load_bitmap_header(bitmap_git) < 0)
+ goto cleanup;
+
+ if (!hasheq(get_midx_checksum(bitmap_git->midx), bitmap_git->checksum))
+ goto cleanup;
+
+ if (load_midx_revindex(bitmap_git->midx) < 0) {
+ warning(_("multi-pack bitmap is missing required reverse index"));
+ goto cleanup;
+ }
+ return 0;
+
+cleanup:
+ munmap(bitmap_git->map, bitmap_git->map_size);
+ bitmap_git->map_size = 0;
+ bitmap_git->map = NULL;
+ return -1;
+}
+
static int open_pack_bitmap_1(struct bitmap_index *bitmap_git, struct packed_git *packfile)
{
int fd;
@@ -292,7 +378,8 @@ static int open_pack_bitmap_1(struct bitmap_index *bitmap_git, struct packed_git
return -1;
}
- if (bitmap_git->pack) {
+ if (bitmap_git->pack || bitmap_git->midx) {
+ /* ignore extra bitmap file; we can only handle one */
warning("ignoring extra bitmap file: %s", packfile->pack_name);
close(fd);
return -1;
@@ -319,13 +406,39 @@ static int open_pack_bitmap_1(struct bitmap_index *bitmap_git, struct packed_git
return 0;
}
-static int load_pack_bitmap(struct bitmap_index *bitmap_git)
+static int load_reverse_index(struct bitmap_index *bitmap_git)
+{
+ if (bitmap_is_midx(bitmap_git)) {
+ uint32_t i;
+ int ret;
+
+ /*
+ * The multi-pack-index's .rev file is already loaded via
+ * open_pack_bitmap_1().
+ *
+ * But we still need to open the individual pack .rev files,
+ * since we will need to make use of them in pack-objects.
+ */
+ for (i = 0; i < bitmap_git->midx->num_packs; i++) {
+ if (prepare_midx_pack(the_repository, bitmap_git->midx, i))
+ die(_("load_reverse_index: could not open pack"));
+ ret = load_pack_revindex(bitmap_git->midx->packs[i]);
+ if (ret)
+ return ret;
+ }
+ return 0;
+ }
+ return load_pack_revindex(bitmap_git->pack);
+}
+
+static int load_bitmap(struct bitmap_index *bitmap_git)
{
assert(bitmap_git->map);
bitmap_git->bitmaps = kh_init_oid_map();
bitmap_git->ext_index.positions = kh_init_oid_pos();
- if (load_pack_revindex(bitmap_git->pack))
+
+ if (load_reverse_index(bitmap_git))
goto failed;
if (!(bitmap_git->commits = read_bitmap_1(bitmap_git)) ||
@@ -369,11 +482,46 @@ static int open_pack_bitmap(struct repository *r,
return ret;
}
+static int open_midx_bitmap(struct repository *r,
+ struct bitmap_index *bitmap_git)
+{
+ struct multi_pack_index *midx;
+
+ assert(!bitmap_git->map);
+
+ for (midx = get_multi_pack_index(r); midx; midx = midx->next) {
+ if (!open_midx_bitmap_1(bitmap_git, midx))
+ return 0;
+ }
+ return -1;
+}
+
+static int open_bitmap(struct repository *r,
+ struct bitmap_index *bitmap_git)
+{
+ assert(!bitmap_git->map);
+
+ if (!open_midx_bitmap(r, bitmap_git))
+ return 0;
+ return open_pack_bitmap(r, bitmap_git);
+}
+
struct bitmap_index *prepare_bitmap_git(struct repository *r)
{
struct bitmap_index *bitmap_git = xcalloc(1, sizeof(*bitmap_git));
- if (!open_pack_bitmap(r, bitmap_git) && !load_pack_bitmap(bitmap_git))
+ if (!open_bitmap(r, bitmap_git) && !load_bitmap(bitmap_git))
+ return bitmap_git;
+
+ free_bitmap_index(bitmap_git);
+ return NULL;
+}
+
+struct bitmap_index *prepare_midx_bitmap_git(struct multi_pack_index *midx)
+{
+ struct bitmap_index *bitmap_git = xcalloc(1, sizeof(*bitmap_git));
+
+ if (!open_midx_bitmap_1(bitmap_git, midx) && !load_bitmap(bitmap_git))
return bitmap_git;
free_bitmap_index(bitmap_git);
@@ -404,7 +552,7 @@ static inline int bitmap_position_extended(struct bitmap_index *bitmap_git,
if (pos < kh_end(positions)) {
int bitmap_pos = kh_value(positions, pos);
- return bitmap_pos + bitmap_git->pack->num_objects;
+ return bitmap_pos + bitmap_num_objects(bitmap_git);
}
return -1;
@@ -423,10 +571,26 @@ static inline int bitmap_position_packfile(struct bitmap_index *bitmap_git,
return pos;
}
+static int bitmap_position_midx(struct bitmap_index *bitmap_git,
+ const struct object_id *oid)
+{
+ uint32_t want, got;
+ if (!bsearch_midx(oid, bitmap_git->midx, &want))
+ return -1;
+
+ if (midx_to_pack_pos(bitmap_git->midx, want, &got) < 0)
+ return -1;
+ return got;
+}
+
static int bitmap_position(struct bitmap_index *bitmap_git,
const struct object_id *oid)
{
- int pos = bitmap_position_packfile(bitmap_git, oid);
+ int pos;
+ if (bitmap_is_midx(bitmap_git))
+ pos = bitmap_position_midx(bitmap_git, oid);
+ else
+ pos = bitmap_position_packfile(bitmap_git, oid);
return (pos >= 0) ? pos : bitmap_position_extended(bitmap_git, oid);
}
@@ -456,7 +620,7 @@ static int ext_index_add_object(struct bitmap_index *bitmap_git,
bitmap_pos = kh_value(eindex->positions, hash_pos);
}
- return bitmap_pos + bitmap_git->pack->num_objects;
+ return bitmap_pos + bitmap_num_objects(bitmap_git);
}
struct bitmap_show_data {
@@ -673,7 +837,7 @@ static void show_extended_objects(struct bitmap_index *bitmap_git,
for (i = 0; i < eindex->count; ++i) {
struct object *obj;
- if (!bitmap_get(objects, bitmap_git->pack->num_objects + i))
+ if (!bitmap_get(objects, bitmap_num_objects(bitmap_git) + i))
continue;
obj = eindex->objects[i];
@@ -737,6 +901,7 @@ static void show_objects_for_type(
continue;
for (offset = 0; offset < BITS_IN_EWORD; ++offset) {
+ struct packed_git *pack;
struct object_id oid;
uint32_t hash = 0, index_pos;
off_t ofs;
@@ -746,14 +911,28 @@ static void show_objects_for_type(
offset += ewah_bit_ctz64(word >> offset);
- index_pos = pack_pos_to_index(bitmap_git->pack, pos + offset);
- ofs = pack_pos_to_offset(bitmap_git->pack, pos + offset);
- nth_packed_object_id(&oid, bitmap_git->pack, index_pos);
+ if (bitmap_is_midx(bitmap_git)) {
+ struct multi_pack_index *m = bitmap_git->midx;
+ uint32_t pack_id;
+
+ index_pos = pack_pos_to_midx(m, pos + offset);
+ ofs = nth_midxed_offset(m, index_pos);
+ nth_midxed_object_oid(&oid, m, index_pos);
+
+ pack_id = nth_midxed_pack_int_id(m, index_pos);
+ pack = bitmap_git->midx->packs[pack_id];
+ } else {
+ index_pos = pack_pos_to_index(bitmap_git->pack, pos + offset);
+ ofs = pack_pos_to_offset(bitmap_git->pack, pos + offset);
+ nth_bitmap_object_oid(bitmap_git, &oid, index_pos);
+
+ pack = bitmap_git->pack;
+ }
if (bitmap_git->hashes)
hash = get_be32(bitmap_git->hashes + index_pos);
- show_reach(&oid, object_type, 0, hash, bitmap_git->pack, ofs);
+ show_reach(&oid, object_type, 0, hash, pack, ofs);
}
}
}
@@ -765,8 +944,13 @@ static int in_bitmapped_pack(struct bitmap_index *bitmap_git,
struct object *object = roots->item;
roots = roots->next;
- if (find_pack_entry_one(object->oid.hash, bitmap_git->pack) > 0)
- return 1;
+ if (bitmap_is_midx(bitmap_git)) {
+ if (bsearch_midx(&object->oid, bitmap_git->midx, NULL))
+ return 1;
+ } else {
+ if (find_pack_entry_one(object->oid.hash, bitmap_git->pack) > 0)
+ return 1;
+ }
}
return 0;
@@ -832,7 +1016,7 @@ static void filter_bitmap_exclude_type(struct bitmap_index *bitmap_git,
* them individually.
*/
for (i = 0; i < eindex->count; i++) {
- uint32_t pos = i + bitmap_git->pack->num_objects;
+ uint32_t pos = i + bitmap_num_objects(bitmap_git);
if (eindex->objects[i]->type == type &&
bitmap_get(to_filter, pos) &&
!bitmap_get(tips, pos))
@@ -853,23 +1037,35 @@ static void filter_bitmap_blob_none(struct bitmap_index *bitmap_git,
static unsigned long get_size_by_pos(struct bitmap_index *bitmap_git,
uint32_t pos)
{
- struct packed_git *pack = bitmap_git->pack;
unsigned long size;
struct object_info oi = OBJECT_INFO_INIT;
oi.sizep = &size;
- if (pos < pack->num_objects) {
- off_t ofs = pack_pos_to_offset(pack, pos);
+ if (pos < bitmap_num_objects(bitmap_git)) {
+ struct packed_git *pack;
+ off_t ofs;
+
+ if (bitmap_is_midx(bitmap_git)) {
+ uint32_t midx_pos = pack_pos_to_midx(bitmap_git->midx, pos);
+ uint32_t pack_id = nth_midxed_pack_int_id(bitmap_git->midx, midx_pos);
+
+ pack = bitmap_git->midx->packs[pack_id];
+ ofs = nth_midxed_offset(bitmap_git->midx, midx_pos);
+ } else {
+ pack = bitmap_git->pack;
+ ofs = pack_pos_to_offset(pack, pos);
+ }
+
if (packed_object_info(the_repository, pack, ofs, &oi) < 0) {
struct object_id oid;
- nth_packed_object_id(&oid, pack,
- pack_pos_to_index(pack, pos));
+ nth_bitmap_object_oid(bitmap_git, &oid,
+ pack_pos_to_index(pack, pos));
die(_("unable to get size of %s"), oid_to_hex(&oid));
}
} else {
struct eindex *eindex = &bitmap_git->ext_index;
- struct object *obj = eindex->objects[pos - pack->num_objects];
+ struct object *obj = eindex->objects[pos - bitmap_num_objects(bitmap_git)];
if (oid_object_info_extended(the_repository, &obj->oid, &oi, 0) < 0)
die(_("unable to get size of %s"), oid_to_hex(&obj->oid));
}
@@ -911,7 +1107,7 @@ static void filter_bitmap_blob_limit(struct bitmap_index *bitmap_git,
}
for (i = 0; i < eindex->count; i++) {
- uint32_t pos = i + bitmap_git->pack->num_objects;
+ uint32_t pos = i + bitmap_num_objects(bitmap_git);
if (eindex->objects[i]->type == OBJ_BLOB &&
bitmap_get(to_filter, pos) &&
!bitmap_get(tips, pos) &&
@@ -1041,7 +1237,7 @@ struct bitmap_index *prepare_bitmap_walk(struct rev_info *revs,
/* try to open a bitmapped pack, but don't parse it yet
* because we may not need to use it */
CALLOC_ARRAY(bitmap_git, 1);
- if (open_pack_bitmap(revs->repo, bitmap_git) < 0)
+ if (open_bitmap(revs->repo, bitmap_git) < 0)
goto cleanup;
for (i = 0; i < revs->pending.nr; ++i) {
@@ -1085,7 +1281,7 @@ struct bitmap_index *prepare_bitmap_walk(struct rev_info *revs,
* from disk. this is the point of no return; after this the rev_list
* becomes invalidated and we must perform the revwalk through bitmaps
*/
- if (load_pack_bitmap(bitmap_git) < 0)
+ if (load_bitmap(bitmap_git) < 0)
goto cleanup;
object_array_clear(&revs->pending);
@@ -1128,22 +1324,49 @@ cleanup:
return NULL;
}
-static void try_partial_reuse(struct bitmap_index *bitmap_git,
- size_t pos,
- struct bitmap *reuse,
- struct pack_window **w_curs)
+/*
+ * -1 means "stop trying further objects"; 0 means we may or may not have
+ * reused, but you can keep feeding bits.
+ */
+static int try_partial_reuse(struct packed_git *pack,
+ size_t pos,
+ struct bitmap *reuse,
+ struct pack_window **w_curs)
{
- off_t offset, header;
+ off_t offset, delta_obj_offset;
enum object_type type;
unsigned long size;
- if (pos >= bitmap_git->pack->num_objects)
- return; /* not actually in the pack */
+ /*
+ * try_partial_reuse() is called either on (a) objects in the
+ * bitmapped pack (in the case of a single-pack bitmap) or (b)
+ * objects in the preferred pack of a multi-pack bitmap.
+ * Importantly, the latter can pretend as if only a single pack
+ * exists because:
+ *
+ * - The first pack->num_objects bits of a MIDX bitmap are
+ * reserved for the preferred pack, and
+ *
+ * - Ties due to duplicate objects are always resolved in
+ * favor of the preferred pack.
+ *
+ * Therefore we do not need to ever ask the MIDX for its copy of
+ * an object by OID, since it will always select it from the
+ * preferred pack. Likewise, the selected copy of the base
+ * object for any deltas will reside in the same pack.
+ *
+ * This means that we can reuse pos when looking up the bit in
+ * the reuse bitmap, too, since bits corresponding to the
+ * preferred pack precede all bits from other packs.
+ */
+
+ if (pos >= pack->num_objects)
+ return -1; /* not actually in the pack or MIDX preferred pack */
- offset = header = pack_pos_to_offset(bitmap_git->pack, pos);
- type = unpack_object_header(bitmap_git->pack, w_curs, &offset, &size);
+ offset = delta_obj_offset = pack_pos_to_offset(pack, pos);
+ type = unpack_object_header(pack, w_curs, &offset, &size);
if (type < 0)
- return; /* broken packfile, punt */
+ return -1; /* broken packfile, punt */
if (type == OBJ_REF_DELTA || type == OBJ_OFS_DELTA) {
off_t base_offset;
@@ -1157,12 +1380,12 @@ static void try_partial_reuse(struct bitmap_index *bitmap_git,
* and the normal slow path will complain about it in
* more detail.
*/
- base_offset = get_delta_base(bitmap_git->pack, w_curs,
- &offset, type, header);
+ base_offset = get_delta_base(pack, w_curs, &offset, type,
+ delta_obj_offset);
if (!base_offset)
- return;
- if (offset_to_pack_pos(bitmap_git->pack, base_offset, &base_pos) < 0)
- return;
+ return 0;
+ if (offset_to_pack_pos(pack, base_offset, &base_pos) < 0)
+ return 0;
/*
* We assume delta dependencies always point backwards. This
@@ -1174,7 +1397,7 @@ static void try_partial_reuse(struct bitmap_index *bitmap_git,
* odd parameters.
*/
if (base_pos >= pos)
- return;
+ return 0;
/*
* And finally, if we're not sending the base as part of our
@@ -1185,13 +1408,22 @@ static void try_partial_reuse(struct bitmap_index *bitmap_git,
* object_entry code path handle it.
*/
if (!bitmap_get(reuse, base_pos))
- return;
+ return 0;
}
/*
* If we got here, then the object is OK to reuse. Mark it.
*/
bitmap_set(reuse, pos);
+ return 0;
+}
+
+static uint32_t midx_preferred_pack(struct bitmap_index *bitmap_git)
+{
+ struct multi_pack_index *m = bitmap_git->midx;
+ if (!m)
+ BUG("midx_preferred_pack: requires non-empty MIDX");
+ return nth_midxed_pack_int_id(m, pack_pos_to_midx(bitmap_git->midx, 0));
}
int reuse_partial_packfile_from_bitmap(struct bitmap_index *bitmap_git,
@@ -1199,20 +1431,37 @@ int reuse_partial_packfile_from_bitmap(struct bitmap_index *bitmap_git,
uint32_t *entries,
struct bitmap **reuse_out)
{
+ struct packed_git *pack;
struct bitmap *result = bitmap_git->result;
struct bitmap *reuse;
struct pack_window *w_curs = NULL;
size_t i = 0;
uint32_t offset;
+ uint32_t objects_nr;
assert(result);
+ load_reverse_index(bitmap_git);
+
+ if (bitmap_is_midx(bitmap_git))
+ pack = bitmap_git->midx->packs[midx_preferred_pack(bitmap_git)];
+ else
+ pack = bitmap_git->pack;
+ objects_nr = pack->num_objects;
+
while (i < result->word_alloc && result->words[i] == (eword_t)~0)
i++;
- /* Don't mark objects not in the packfile */
- if (i > bitmap_git->pack->num_objects / BITS_IN_EWORD)
- i = bitmap_git->pack->num_objects / BITS_IN_EWORD;
+ /*
+ * Don't mark objects not in the packfile or preferred pack. This bitmap
+ * marks objects eligible for reuse, but the pack-reuse code only
+ * understands how to reuse a single pack. Since the preferred pack is
+ * guaranteed to have all bases for its deltas (in a multi-pack bitmap),
+ * we use it instead of another pack. In single-pack bitmaps, the choice
+ * is made for us.
+ */
+ if (i > objects_nr / BITS_IN_EWORD)
+ i = objects_nr / BITS_IN_EWORD;
reuse = bitmap_word_alloc(i);
memset(reuse->words, 0xFF, i * sizeof(eword_t));
@@ -1226,10 +1475,23 @@ int reuse_partial_packfile_from_bitmap(struct bitmap_index *bitmap_git,
break;
offset += ewah_bit_ctz64(word >> offset);
- try_partial_reuse(bitmap_git, pos + offset, reuse, &w_curs);
+ if (try_partial_reuse(pack, pos + offset,
+ reuse, &w_curs) < 0) {
+ /*
+ * try_partial_reuse indicated we couldn't reuse
+ * any bits, so there is no point in trying more
+ * bits in the current word, or any other words
+ * in result.
+ *
+ * Jump out of both loops to avoid future
+ * unnecessary calls to try_partial_reuse.
+ */
+ goto done;
+ }
}
}
+done:
unuse_pack(&w_curs);
*entries = bitmap_popcount(reuse);
@@ -1243,7 +1505,7 @@ int reuse_partial_packfile_from_bitmap(struct bitmap_index *bitmap_git,
* need to be handled separately.
*/
bitmap_and_not(result, reuse);
- *packfile_out = bitmap_git->pack;
+ *packfile_out = pack;
*reuse_out = reuse;
return 0;
}
@@ -1296,7 +1558,7 @@ static uint32_t count_object_type(struct bitmap_index *bitmap_git,
for (i = 0; i < eindex->count; ++i) {
if (eindex->objects[i]->type == type &&
- bitmap_get(objects, bitmap_git->pack->num_objects + i))
+ bitmap_get(objects, bitmap_num_objects(bitmap_git) + i))
count++;
}
@@ -1325,10 +1587,52 @@ void count_bitmap_commit_list(struct bitmap_index *bitmap_git,
struct bitmap_test_data {
struct bitmap_index *bitmap_git;
struct bitmap *base;
+ struct bitmap *commits;
+ struct bitmap *trees;
+ struct bitmap *blobs;
+ struct bitmap *tags;
struct progress *prg;
size_t seen;
};
+static void test_bitmap_type(struct bitmap_test_data *tdata,
+ struct object *obj, int pos)
+{
+ enum object_type bitmap_type = OBJ_NONE;
+ int bitmaps_nr = 0;
+
+ if (bitmap_get(tdata->commits, pos)) {
+ bitmap_type = OBJ_COMMIT;
+ bitmaps_nr++;
+ }
+ if (bitmap_get(tdata->trees, pos)) {
+ bitmap_type = OBJ_TREE;
+ bitmaps_nr++;
+ }
+ if (bitmap_get(tdata->blobs, pos)) {
+ bitmap_type = OBJ_BLOB;
+ bitmaps_nr++;
+ }
+ if (bitmap_get(tdata->tags, pos)) {
+ bitmap_type = OBJ_TAG;
+ bitmaps_nr++;
+ }
+
+ if (bitmap_type == OBJ_NONE)
+ die("object %s not found in type bitmaps",
+ oid_to_hex(&obj->oid));
+
+ if (bitmaps_nr > 1)
+ die("object %s does not have a unique type",
+ oid_to_hex(&obj->oid));
+
+ if (bitmap_type != obj->type)
+ die("object %s: real type %s, expected: %s",
+ oid_to_hex(&obj->oid),
+ type_name(obj->type),
+ type_name(bitmap_type));
+}
+
static void test_show_object(struct object *object, const char *name,
void *data)
{
@@ -1338,6 +1642,7 @@ static void test_show_object(struct object *object, const char *name,
bitmap_pos = bitmap_position(tdata->bitmap_git, &object->oid);
if (bitmap_pos < 0)
die("Object not in bitmap: %s\n", oid_to_hex(&object->oid));
+ test_bitmap_type(tdata, object, bitmap_pos);
bitmap_set(tdata->base, bitmap_pos);
display_progress(tdata->prg, ++tdata->seen);
@@ -1352,6 +1657,7 @@ static void test_show_commit(struct commit *commit, void *data)
&commit->object.oid);
if (bitmap_pos < 0)
die("Object not in bitmap: %s\n", oid_to_hex(&commit->object.oid));
+ test_bitmap_type(tdata, &commit->object, bitmap_pos);
bitmap_set(tdata->base, bitmap_pos);
display_progress(tdata->prg, ++tdata->seen);
@@ -1399,6 +1705,10 @@ void test_bitmap_walk(struct rev_info *revs)
tdata.bitmap_git = bitmap_git;
tdata.base = bitmap_new();
+ tdata.commits = ewah_to_bitmap(bitmap_git->commits);
+ tdata.trees = ewah_to_bitmap(bitmap_git->trees);
+ tdata.blobs = ewah_to_bitmap(bitmap_git->blobs);
+ tdata.tags = ewah_to_bitmap(bitmap_git->tags);
tdata.prg = start_progress("Verifying bitmap entries", result_popcnt);
tdata.seen = 0;
@@ -1432,6 +1742,33 @@ int test_bitmap_commits(struct repository *r)
return 0;
}
+int test_bitmap_hashes(struct repository *r)
+{
+ struct bitmap_index *bitmap_git = prepare_bitmap_git(r);
+ struct object_id oid;
+ uint32_t i, index_pos;
+
+ if (!bitmap_git->hashes)
+ goto cleanup;
+
+ for (i = 0; i < bitmap_num_objects(bitmap_git); i++) {
+ if (bitmap_is_midx(bitmap_git))
+ index_pos = pack_pos_to_midx(bitmap_git->midx, i);
+ else
+ index_pos = pack_pos_to_index(bitmap_git->pack, i);
+
+ nth_bitmap_object_oid(bitmap_git, &oid, index_pos);
+
+ printf("%s %"PRIu32"\n",
+ oid_to_hex(&oid), get_be32(bitmap_git->hashes + index_pos));
+ }
+
+cleanup:
+ free_bitmap_index(bitmap_git);
+
+ return 0;
+}
+
int rebuild_bitmap(const uint32_t *reposition,
struct ewah_bitmap *source,
struct bitmap *dest)
@@ -1469,19 +1806,32 @@ uint32_t *create_bitmap_mapping(struct bitmap_index *bitmap_git,
uint32_t i, num_objects;
uint32_t *reposition;
- num_objects = bitmap_git->pack->num_objects;
+ if (!bitmap_is_midx(bitmap_git))
+ load_reverse_index(bitmap_git);
+ else if (load_midx_revindex(bitmap_git->midx) < 0)
+ BUG("rebuild_existing_bitmaps: missing required rev-cache "
+ "extension");
+
+ num_objects = bitmap_num_objects(bitmap_git);
CALLOC_ARRAY(reposition, num_objects);
for (i = 0; i < num_objects; ++i) {
struct object_id oid;
struct object_entry *oe;
+ uint32_t index_pos;
- nth_packed_object_id(&oid, bitmap_git->pack,
- pack_pos_to_index(bitmap_git->pack, i));
+ if (bitmap_is_midx(bitmap_git))
+ index_pos = pack_pos_to_midx(bitmap_git->midx, i);
+ else
+ index_pos = pack_pos_to_index(bitmap_git->pack, i);
+ nth_bitmap_object_oid(bitmap_git, &oid, index_pos);
oe = packlist_find(mapping, &oid);
- if (oe)
+ if (oe) {
reposition[i] = oe_in_pack_pos(mapping, oe) + 1;
+ if (bitmap_git->hashes && !oe->hash)
+ oe->hash = get_be32(bitmap_git->hashes + index_pos);
+ }
}
return reposition;
@@ -1503,6 +1853,19 @@ void free_bitmap_index(struct bitmap_index *b)
free(b->ext_index.hashes);
bitmap_free(b->result);
bitmap_free(b->haves);
+ if (bitmap_is_midx(b)) {
+ /*
+ * Multi-pack bitmaps need to have resources associated with
+ * their on-disk reverse indexes unmapped so that stale .rev and
+ * .bitmap files can be removed.
+ *
+ * Unlike pack-based bitmaps, multi-pack bitmaps can be read and
+ * written in the same 'git multi-pack-index write --bitmap'
+ * process. Close resources so they can be removed safely on
+ * platforms like Windows.
+ */
+ close_midx_revindex(b->midx);
+ }
free(b);
}
@@ -1517,7 +1880,6 @@ static off_t get_disk_usage_for_type(struct bitmap_index *bitmap_git,
enum object_type object_type)
{
struct bitmap *result = bitmap_git->result;
- struct packed_git *pack = bitmap_git->pack;
off_t total = 0;
struct ewah_iterator it;
eword_t filter;
@@ -1534,15 +1896,35 @@ static off_t get_disk_usage_for_type(struct bitmap_index *bitmap_git,
continue;
for (offset = 0; offset < BITS_IN_EWORD; offset++) {
- size_t pos;
-
if ((word >> offset) == 0)
break;
offset += ewah_bit_ctz64(word >> offset);
- pos = base + offset;
- total += pack_pos_to_offset(pack, pos + 1) -
- pack_pos_to_offset(pack, pos);
+
+ if (bitmap_is_midx(bitmap_git)) {
+ uint32_t pack_pos;
+ uint32_t midx_pos = pack_pos_to_midx(bitmap_git->midx, base + offset);
+ off_t offset = nth_midxed_offset(bitmap_git->midx, midx_pos);
+
+ uint32_t pack_id = nth_midxed_pack_int_id(bitmap_git->midx, midx_pos);
+ struct packed_git *pack = bitmap_git->midx->packs[pack_id];
+
+ if (offset_to_pack_pos(pack, offset, &pack_pos) < 0) {
+ struct object_id oid;
+ nth_midxed_object_oid(&oid, bitmap_git->midx, midx_pos);
+
+ die(_("could not find %s in pack %s at offset %"PRIuMAX),
+ oid_to_hex(&oid),
+ pack->pack_name,
+ (uintmax_t)offset);
+ }
+
+ total += pack_pos_to_offset(pack, pack_pos + 1) - offset;
+ } else {
+ size_t pos = base + offset;
+ total += pack_pos_to_offset(bitmap_git->pack, pos + 1) -
+ pack_pos_to_offset(bitmap_git->pack, pos);
+ }
}
}
@@ -1552,7 +1934,6 @@ static off_t get_disk_usage_for_type(struct bitmap_index *bitmap_git,
static off_t get_disk_usage_for_extended(struct bitmap_index *bitmap_git)
{
struct bitmap *result = bitmap_git->result;
- struct packed_git *pack = bitmap_git->pack;
struct eindex *eindex = &bitmap_git->ext_index;
off_t total = 0;
struct object_info oi = OBJECT_INFO_INIT;
@@ -1564,7 +1945,7 @@ static off_t get_disk_usage_for_extended(struct bitmap_index *bitmap_git)
for (i = 0; i < eindex->count; i++) {
struct object *obj = eindex->objects[i];
- if (!bitmap_get(result, pack->num_objects + i))
+ if (!bitmap_get(result, bitmap_num_objects(bitmap_git) + i))
continue;
if (oid_object_info_extended(the_repository, &obj->oid, &oi, 0) < 0)
@@ -1594,7 +1975,28 @@ off_t get_disk_usage_from_bitmap(struct bitmap_index *bitmap_git,
return total;
}
+int bitmap_is_midx(struct bitmap_index *bitmap_git)
+{
+ return !!bitmap_git->midx;
+}
+
const struct string_list *bitmap_preferred_tips(struct repository *r)
{
return repo_config_get_value_multi(r, "pack.preferbitmaptips");
}
+
+int bitmap_is_preferred_refname(struct repository *r, const char *refname)
+{
+ const struct string_list *preferred_tips = bitmap_preferred_tips(r);
+ struct string_list_item *item;
+
+ if (!preferred_tips)
+ return 0;
+
+ for_each_string_list_item(item, preferred_tips) {
+ if (starts_with(refname, item->string))
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/pack-bitmap.h b/pack-bitmap.h
index 99d733eb26..ed46d27077 100644
--- a/pack-bitmap.h
+++ b/pack-bitmap.h
@@ -44,6 +44,7 @@ typedef int (*show_reachable_fn)(
struct bitmap_index;
struct bitmap_index *prepare_bitmap_git(struct repository *r);
+struct bitmap_index *prepare_midx_bitmap_git(struct multi_pack_index *midx);
void count_bitmap_commit_list(struct bitmap_index *, uint32_t *commits,
uint32_t *trees, uint32_t *blobs, uint32_t *tags);
void traverse_bitmap_commit_list(struct bitmap_index *,
@@ -51,6 +52,7 @@ void traverse_bitmap_commit_list(struct bitmap_index *,
show_reachable_fn show_reachable);
void test_bitmap_walk(struct rev_info *revs);
int test_bitmap_commits(struct repository *r);
+int test_bitmap_hashes(struct repository *r);
struct bitmap_index *prepare_bitmap_walk(struct rev_info *revs,
struct list_objects_filter_options *filter,
int filter_provided_objects);
@@ -87,12 +89,17 @@ struct ewah_bitmap *bitmap_for_commit(struct bitmap_index *bitmap_git,
struct commit *commit);
void bitmap_writer_select_commits(struct commit **indexed_commits,
unsigned int indexed_commits_nr, int max_bitmaps);
-void bitmap_writer_build(struct packing_data *to_pack);
+int bitmap_writer_build(struct packing_data *to_pack);
void bitmap_writer_finish(struct pack_idx_entry **index,
uint32_t index_nr,
const char *filename,
uint16_t options);
+char *midx_bitmap_filename(struct multi_pack_index *midx);
+char *pack_bitmap_filename(struct packed_git *p);
+
+int bitmap_is_midx(struct bitmap_index *bitmap_git);
const struct string_list *bitmap_preferred_tips(struct repository *r);
+int bitmap_is_preferred_refname(struct repository *r, const char *refname);
#endif
diff --git a/pack-revindex.h b/pack-revindex.h
index 479b8f2f9c..74f4eae668 100644
--- a/pack-revindex.h
+++ b/pack-revindex.h
@@ -109,7 +109,7 @@ off_t pack_pos_to_offset(struct packed_git *p, uint32_t pos);
* If the reverse index has not yet been loaded, or the position is out of
* bounds, this function aborts.
*
- * This function runs in time O(log N) with the number of objects in the MIDX.
+ * This function runs in constant time.
*/
uint32_t pack_pos_to_midx(struct multi_pack_index *m, uint32_t pos);
@@ -120,7 +120,7 @@ uint32_t pack_pos_to_midx(struct multi_pack_index *m, uint32_t pos);
* If the reverse index has not yet been loaded, or the position is out of
* bounds, this function aborts.
*
- * This function runs in constant time.
+ * This function runs in time O(log N) with the number of objects in the MIDX.
*/
int midx_to_pack_pos(struct multi_pack_index *midx, uint32_t at, uint32_t *pos);
diff --git a/pack-write.c b/pack-write.c
index 2767b78619..a5846f3a34 100644
--- a/pack-write.c
+++ b/pack-write.c
@@ -222,6 +222,9 @@ const char *write_rev_file(const char *rev_name,
uint32_t i;
const char *ret;
+ if (!(flags & WRITE_REV) && !(flags & WRITE_REV_VERIFY))
+ return NULL;
+
ALLOC_ARRAY(pack_order, nr_objects);
for (i = 0; i < nr_objects; i++)
pack_order[i] = i;
@@ -458,49 +461,48 @@ struct hashfile *create_tmp_packfile(char **pack_tmp_name)
return hashfd(fd, *pack_tmp_name);
}
-void finish_tmp_packfile(struct strbuf *name_buffer,
+static void rename_tmp_packfile(struct strbuf *name_prefix, const char *source,
+ const char *ext)
+{
+ size_t name_prefix_len = name_prefix->len;
+
+ strbuf_addstr(name_prefix, ext);
+ if (rename(source, name_prefix->buf))
+ die_errno("unable to rename temporary file to '%s'",
+ name_prefix->buf);
+ strbuf_setlen(name_prefix, name_prefix_len);
+}
+
+void rename_tmp_packfile_idx(struct strbuf *name_buffer,
+ char **idx_tmp_name)
+{
+ rename_tmp_packfile(name_buffer, *idx_tmp_name, "idx");
+}
+
+void stage_tmp_packfiles(struct strbuf *name_buffer,
const char *pack_tmp_name,
struct pack_idx_entry **written_list,
uint32_t nr_written,
struct pack_idx_option *pack_idx_opts,
- unsigned char hash[])
+ unsigned char hash[],
+ char **idx_tmp_name)
{
- const char *idx_tmp_name, *rev_tmp_name = NULL;
- int basename_len = name_buffer->len;
+ const char *rev_tmp_name = NULL;
if (adjust_shared_perm(pack_tmp_name))
die_errno("unable to make temporary pack file readable");
- idx_tmp_name = write_idx_file(NULL, written_list, nr_written,
- pack_idx_opts, hash);
- if (adjust_shared_perm(idx_tmp_name))
+ *idx_tmp_name = (char *)write_idx_file(NULL, written_list, nr_written,
+ pack_idx_opts, hash);
+ if (adjust_shared_perm(*idx_tmp_name))
die_errno("unable to make temporary index file readable");
rev_tmp_name = write_rev_file(NULL, written_list, nr_written, hash,
pack_idx_opts->flags);
- strbuf_addf(name_buffer, "%s.pack", hash_to_hex(hash));
-
- if (rename(pack_tmp_name, name_buffer->buf))
- die_errno("unable to rename temporary pack file");
-
- strbuf_setlen(name_buffer, basename_len);
-
- strbuf_addf(name_buffer, "%s.idx", hash_to_hex(hash));
- if (rename(idx_tmp_name, name_buffer->buf))
- die_errno("unable to rename temporary index file");
-
- strbuf_setlen(name_buffer, basename_len);
-
- if (rev_tmp_name) {
- strbuf_addf(name_buffer, "%s.rev", hash_to_hex(hash));
- if (rename(rev_tmp_name, name_buffer->buf))
- die_errno("unable to rename temporary reverse-index file");
- }
-
- strbuf_setlen(name_buffer, basename_len);
-
- free((void *)idx_tmp_name);
+ rename_tmp_packfile(name_buffer, pack_tmp_name, "pack");
+ if (rev_tmp_name)
+ rename_tmp_packfile(name_buffer, rev_tmp_name, "rev");
}
void write_promisor_file(const char *promisor_name, struct ref **sought, int nr_sought)
diff --git a/pack.h b/pack.h
index fa13954526..b22bfc4a18 100644
--- a/pack.h
+++ b/pack.h
@@ -110,6 +110,14 @@ int encode_in_pack_object_header(unsigned char *hdr, int hdr_len,
int read_pack_header(int fd, struct pack_header *);
struct hashfile *create_tmp_packfile(char **pack_tmp_name);
-void finish_tmp_packfile(struct strbuf *name_buffer, const char *pack_tmp_name, struct pack_idx_entry **written_list, uint32_t nr_written, struct pack_idx_option *pack_idx_opts, unsigned char sha1[]);
+void stage_tmp_packfiles(struct strbuf *name_buffer,
+ const char *pack_tmp_name,
+ struct pack_idx_entry **written_list,
+ uint32_t nr_written,
+ struct pack_idx_option *pack_idx_opts,
+ unsigned char hash[],
+ char **idx_tmp_name);
+void rename_tmp_packfile_idx(struct strbuf *basename,
+ char **idx_tmp_name);
#endif
diff --git a/packfile.c b/packfile.c
index 4d0d625238..89402cfc69 100644
--- a/packfile.c
+++ b/packfile.c
@@ -339,6 +339,7 @@ void close_pack(struct packed_git *p)
close_pack_fd(p);
close_pack_index(p);
close_pack_revindex(p);
+ oidset_clear(&p->bad_objects);
}
void close_object_store(struct raw_object_store *o)
@@ -860,7 +861,7 @@ static void prepare_pack(const char *full_name, size_t full_name_len,
if (!strcmp(file_name, "multi-pack-index"))
return;
if (starts_with(file_name, "multi-pack-index") &&
- ends_with(file_name, ".rev"))
+ (ends_with(file_name, ".bitmap") || ends_with(file_name, ".rev")))
return;
if (ends_with(file_name, ".idx") ||
ends_with(file_name, ".rev") ||
@@ -1161,31 +1162,19 @@ int unpack_object_header(struct packed_git *p,
return type;
}
-void mark_bad_packed_object(struct packed_git *p, const unsigned char *sha1)
+void mark_bad_packed_object(struct packed_git *p, const struct object_id *oid)
{
- unsigned i;
- const unsigned hashsz = the_hash_algo->rawsz;
- for (i = 0; i < p->num_bad_objects; i++)
- if (hasheq(sha1, p->bad_object_sha1 + hashsz * i))
- return;
- p->bad_object_sha1 = xrealloc(p->bad_object_sha1,
- st_mult(GIT_MAX_RAWSZ,
- st_add(p->num_bad_objects, 1)));
- hashcpy(p->bad_object_sha1 + hashsz * p->num_bad_objects, sha1);
- p->num_bad_objects++;
+ oidset_insert(&p->bad_objects, oid);
}
const struct packed_git *has_packed_and_bad(struct repository *r,
- const unsigned char *sha1)
+ const struct object_id *oid)
{
struct packed_git *p;
- unsigned i;
for (p = r->objects->packed_git; p; p = p->next)
- for (i = 0; i < p->num_bad_objects; i++)
- if (hasheq(sha1,
- p->bad_object_sha1 + the_hash_algo->rawsz * i))
- return p;
+ if (oidset_contains(&p->bad_objects, oid))
+ return p;
return NULL;
}
@@ -1272,7 +1261,7 @@ static int retry_bad_packed_offset(struct repository *r,
if (offset_to_pack_pos(p, obj_offset, &pos) < 0)
return OBJ_BAD;
nth_packed_object_id(&oid, p, pack_pos_to_index(p, pos));
- mark_bad_packed_object(p, oid.hash);
+ mark_bad_packed_object(p, &oid);
type = oid_object_info(r, &oid, NULL);
if (type <= OBJ_NONE)
return OBJ_BAD;
@@ -1722,7 +1711,7 @@ void *unpack_entry(struct repository *r, struct packed_git *p, off_t obj_offset,
nth_packed_object_id(&oid, p, index_pos);
error("bad packed object CRC for %s",
oid_to_hex(&oid));
- mark_bad_packed_object(p, oid.hash);
+ mark_bad_packed_object(p, &oid);
data = NULL;
goto out;
}
@@ -1811,7 +1800,7 @@ void *unpack_entry(struct repository *r, struct packed_git *p, off_t obj_offset,
" at offset %"PRIuMAX" from %s",
oid_to_hex(&base_oid), (uintmax_t)obj_offset,
p->pack_name);
- mark_bad_packed_object(p, base_oid.hash);
+ mark_bad_packed_object(p, &base_oid);
base = read_object(r, &base_oid, &type, &base_size);
external_base = base;
}
@@ -2016,13 +2005,9 @@ static int fill_pack_entry(const struct object_id *oid,
{
off_t offset;
- if (p->num_bad_objects) {
- unsigned i;
- for (i = 0; i < p->num_bad_objects; i++)
- if (hasheq(oid->hash,
- p->bad_object_sha1 + the_hash_algo->rawsz * i))
- return 0;
- }
+ if (oidset_size(&p->bad_objects) &&
+ oidset_contains(&p->bad_objects, oid))
+ return 0;
offset = find_pack_entry_one(oid->hash, p);
if (!offset)
diff --git a/packfile.h b/packfile.h
index 3ae117a8ae..186146779d 100644
--- a/packfile.h
+++ b/packfile.h
@@ -159,8 +159,8 @@ int packed_object_info(struct repository *r,
struct packed_git *pack,
off_t offset, struct object_info *);
-void mark_bad_packed_object(struct packed_git *p, const unsigned char *sha1);
-const struct packed_git *has_packed_and_bad(struct repository *r, const unsigned char *sha1);
+void mark_bad_packed_object(struct packed_git *, const struct object_id *);
+const struct packed_git *has_packed_and_bad(struct repository *, const struct object_id *);
#define ON_DISK_KEEP_PACKS 1
#define IN_CORE_KEEP_PACKS 2
diff --git a/parse-options.c b/parse-options.c
index 2abff136a1..55c5821b08 100644
--- a/parse-options.c
+++ b/parse-options.c
@@ -310,19 +310,6 @@ static enum parse_opt_result parse_long_opt(
again:
if (!skip_prefix(arg, long_name, &rest))
rest = NULL;
- if (options->type == OPTION_ARGUMENT) {
- if (!rest)
- continue;
- if (*rest == '=')
- return error(_("%s takes no value"),
- optname(options, flags));
- if (*rest)
- continue;
- if (options->value)
- *(int *)options->value = options->defval;
- p->out[p->cpidx++] = arg - 2;
- return PARSE_OPT_DONE;
- }
if (!rest) {
/* abbreviated? */
if (!(p->flags & PARSE_OPT_KEEP_UNKNOWN) &&
diff --git a/parse-options.h b/parse-options.h
index 0e9271dde5..13405472ee 100644
--- a/parse-options.h
+++ b/parse-options.h
@@ -8,7 +8,6 @@
enum parse_opt_type {
/* special types */
OPTION_END,
- OPTION_ARGUMENT,
OPTION_GROUP,
OPTION_NUMBER,
OPTION_ALIAS,
@@ -155,8 +154,6 @@ struct option {
#define OPT_INTEGER_F(s, l, v, h, f) { OPTION_INTEGER, (s), (l), (v), N_("n"), (h), (f) }
#define OPT_END() { OPTION_END }
-#define OPT_ARGUMENT(l, v, h) { OPTION_ARGUMENT, 0, (l), (v), NULL, \
- (h), PARSE_OPT_NOARG, NULL, 1 }
#define OPT_GROUP(h) { OPTION_GROUP, 0, NULL, NULL, NULL, (h) }
#define OPT_BIT(s, l, v, h, b) OPT_BIT_F(s, l, v, h, b, 0)
#define OPT_BITOP(s, l, v, h, set, clear) { OPTION_BITOP, (s), (l), (v), NULL, (h), \
diff --git a/path.c b/path.c
index 0bc788ea40..2c895471d9 100644
--- a/path.c
+++ b/path.c
@@ -1510,21 +1510,28 @@ int looks_like_command_line_option(const char *str)
return str && str[0] == '-';
}
-char *xdg_config_home(const char *filename)
+char *xdg_config_home_for(const char *subdir, const char *filename)
{
const char *home, *config_home;
+ assert(subdir);
assert(filename);
config_home = getenv("XDG_CONFIG_HOME");
if (config_home && *config_home)
- return mkpathdup("%s/git/%s", config_home, filename);
+ return mkpathdup("%s/%s/%s", config_home, subdir, filename);
home = getenv("HOME");
if (home)
- return mkpathdup("%s/.config/git/%s", home, filename);
+ return mkpathdup("%s/.config/%s/%s", home, subdir, filename);
+
return NULL;
}
+char *xdg_config_home(const char *filename)
+{
+ return xdg_config_home_for("git", filename);
+}
+
char *xdg_cache_home(const char *filename)
{
const char *home, *cache_home;
diff --git a/path.h b/path.h
index 251c78d980..b68691a86b 100644
--- a/path.h
+++ b/path.h
@@ -181,10 +181,7 @@ struct path_cache {
const char *shallow;
};
-#define PATH_CACHE_INIT \
- { \
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL \
- }
+#define PATH_CACHE_INIT { 0 }
const char *git_path_squash_msg(struct repository *r);
const char *git_path_merge_msg(struct repository *r);
diff --git a/pathspec.c b/pathspec.c
index 44306fdaca..ddeeba7911 100644
--- a/pathspec.c
+++ b/pathspec.c
@@ -39,7 +39,8 @@ void add_pathspec_matches_against_index(const struct pathspec *pathspec,
return;
for (i = 0; i < istate->cache_nr; i++) {
const struct cache_entry *ce = istate->cache[i];
- if (sw_action == PS_IGNORE_SKIP_WORKTREE && ce_skip_worktree(ce))
+ if (sw_action == PS_IGNORE_SKIP_WORKTREE &&
+ (ce_skip_worktree(ce) || !path_in_sparse_checkout(ce->name, istate)))
continue;
ce_path_match(istate, ce, pathspec, seen);
}
@@ -70,7 +71,7 @@ char *find_pathspecs_matching_skip_worktree(const struct pathspec *pathspec)
for (i = 0; i < istate->cache_nr; i++) {
struct cache_entry *ce = istate->cache[i];
- if (ce_skip_worktree(ce))
+ if (ce_skip_worktree(ce) || !path_in_sparse_checkout(ce->name, istate))
ce_path_match(istate, ce, pathspec, seen);
}
diff --git a/pkt-line.c b/pkt-line.c
index 9f63eae2e6..de4a94b437 100644
--- a/pkt-line.c
+++ b/pkt-line.c
@@ -243,6 +243,43 @@ void packet_write(int fd_out, const char *buf, size_t size)
die("%s", err.buf);
}
+void packet_fwrite(FILE *f, const char *buf, size_t size)
+{
+ size_t packet_size;
+ char header[4];
+
+ if (size > LARGE_PACKET_DATA_MAX)
+ die(_("packet write failed - data exceeds max packet size"));
+
+ packet_trace(buf, size, 1);
+ packet_size = size + 4;
+
+ set_packet_header(header, packet_size);
+ fwrite_or_die(f, header, 4);
+ fwrite_or_die(f, buf, size);
+}
+
+void packet_fwrite_fmt(FILE *fh, const char *fmt, ...)
+{
+ static struct strbuf buf = STRBUF_INIT;
+ va_list args;
+
+ strbuf_reset(&buf);
+
+ va_start(args, fmt);
+ format_packet(&buf, "", fmt, args);
+ va_end(args);
+
+ fwrite_or_die(fh, buf.buf, buf.len);
+}
+
+void packet_fflush(FILE *f)
+{
+ packet_trace("0000", 4, 1);
+ fwrite_or_die(f, "0000", 4);
+ fflush_or_die(f);
+}
+
void packet_buf_write(struct strbuf *buf, const char *fmt, ...)
{
va_list args;
diff --git a/pkt-line.h b/pkt-line.h
index 5af5f45687..82b95e4bdd 100644
--- a/pkt-line.h
+++ b/pkt-line.h
@@ -36,6 +36,17 @@ int write_packetized_from_fd_no_flush(int fd_in, int fd_out);
int write_packetized_from_buf_no_flush(const char *src_in, size_t len, int fd_out);
/*
+ * Stdio versions of packet_write functions. When mixing these with fd
+ * based functions, take care to call fflush(3) before doing fd writes or
+ * closing the fd.
+ */
+void packet_fwrite(FILE *f, const char *buf, size_t size);
+void packet_fwrite_fmt(FILE *f, const char *fmt, ...) __attribute__((format (printf, 2, 3)));
+
+/* packet_fflush writes a flush packet and flushes the stdio buffer of f */
+void packet_fflush(FILE *f);
+
+/*
* Read a packetized line into the buffer, which must be at least size bytes
* long. The return value specifies the number of bytes read into the buffer.
*
diff --git a/protocol-caps.c b/protocol-caps.c
index 901b6795e4..bbde91810a 100644
--- a/protocol-caps.c
+++ b/protocol-caps.c
@@ -75,8 +75,7 @@ static void send_info(struct repository *r, struct packet_writer *writer,
strbuf_release(&send_buffer);
}
-int cap_object_info(struct repository *r, struct strvec *keys,
- struct packet_reader *request)
+int cap_object_info(struct repository *r, struct packet_reader *request)
{
struct requested_info info;
struct packet_writer writer;
diff --git a/protocol-caps.h b/protocol-caps.h
index 0a9f49df11..15c4550360 100644
--- a/protocol-caps.h
+++ b/protocol-caps.h
@@ -2,9 +2,7 @@
#define PROTOCOL_CAPS_H
struct repository;
-struct strvec;
struct packet_reader;
-int cap_object_info(struct repository *r, struct strvec *keys,
- struct packet_reader *request);
+int cap_object_info(struct repository *r, struct packet_reader *request);
#endif /* PROTOCOL_CAPS_H */
diff --git a/range-diff.c b/range-diff.c
index e731525e66..cac89a2f4f 100644
--- a/range-diff.c
+++ b/range-diff.c
@@ -482,6 +482,7 @@ static void output(struct string_list *a, struct string_list *b,
else
diff_setup(&opts);
+ opts.no_free = 1;
if (!opts.output_format)
opts.output_format = DIFF_FORMAT_PATCH;
opts.flags.suppress_diff_headers = 1;
@@ -542,6 +543,8 @@ static void output(struct string_list *a, struct string_list *b,
strbuf_release(&buf);
strbuf_release(&dashes);
strbuf_release(&indent);
+ opts.no_free = 0;
+ diff_free(&opts);
}
int show_range_diff(const char *range1, const char *range2,
diff --git a/read-cache.c b/read-cache.c
index 9048ef9e90..a78b88a41b 100644
--- a/read-cache.c
+++ b/read-cache.c
@@ -1944,13 +1944,22 @@ static void tweak_untracked_cache(struct index_state *istate)
prepare_repo_settings(r);
- if (r->settings.core_untracked_cache == UNTRACKED_CACHE_REMOVE) {
+ switch (r->settings.core_untracked_cache) {
+ case UNTRACKED_CACHE_REMOVE:
remove_untracked_cache(istate);
- return;
- }
-
- if (r->settings.core_untracked_cache == UNTRACKED_CACHE_WRITE)
+ break;
+ case UNTRACKED_CACHE_WRITE:
add_untracked_cache(istate);
+ break;
+ case UNTRACKED_CACHE_KEEP:
+ /*
+ * Either an explicit "core.untrackedCache=keep", the
+ * default if "core.untrackedCache" isn't configured,
+ * or a fallback on an unknown "core.untrackedCache"
+ * value.
+ */
+ break;
+ }
}
static void tweak_split_index(struct index_state *istate)
@@ -2391,9 +2400,21 @@ int read_index_from(struct index_state *istate, const char *path,
base_path = xstrfmt("%s/sharedindex.%s", gitdir, base_oid_hex);
trace2_region_enter_printf("index", "shared/do_read_index",
the_repository, "%s", base_path);
- ret = do_read_index(split_index->base, base_path, 1);
+ ret = do_read_index(split_index->base, base_path, 0);
trace2_region_leave_printf("index", "shared/do_read_index",
the_repository, "%s", base_path);
+ if (!ret) {
+ char *path_copy = xstrdup(path);
+ const char *base_path2 = xstrfmt("%s/sharedindex.%s",
+ dirname(path_copy),
+ base_oid_hex);
+ free(path_copy);
+ trace2_region_enter_printf("index", "shared/do_read_index",
+ the_repository, "%s", base_path2);
+ ret = do_read_index(split_index->base, base_path2, 1);
+ trace2_region_leave_printf("index", "shared/do_read_index",
+ the_repository, "%s", base_path2);
+ }
if (!oideq(&split_index->base_oid, &split_index->base->oid))
die(_("broken index, expect %s in %s, got %s"),
base_oid_hex, base_path,
@@ -2812,11 +2833,8 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
}
}
- if (!istate->version) {
+ if (!istate->version)
istate->version = get_index_format_default(the_repository);
- if (git_env_bool("GIT_TEST_SPLIT_INDEX", 0))
- init_split_index(istate);
- }
/* demote version 3 to version 2 when the latter suffices */
if (istate->version == 3 || istate->version == 2)
@@ -3069,7 +3087,7 @@ static int do_write_locked_index(struct index_state *istate, struct lock_file *l
int ret;
int was_full = !istate->sparse_index;
- ret = convert_to_sparse(istate);
+ ret = convert_to_sparse(istate, 0);
if (ret) {
warning(_("failed to convert to a sparse-index"));
@@ -3182,7 +3200,7 @@ static int write_shared_index(struct index_state *istate,
int ret, was_full = !istate->sparse_index;
move_cache_to_base_index(istate);
- convert_to_sparse(istate);
+ convert_to_sparse(istate, 0);
trace2_region_enter_printf("index", "shared/do_write_index",
the_repository, "%s", get_tempfile_path(*temp));
@@ -3243,7 +3261,7 @@ static int too_many_not_shared_entries(struct index_state *istate)
int write_locked_index(struct index_state *istate, struct lock_file *lock,
unsigned flags)
{
- int new_shared_index, ret;
+ int new_shared_index, ret, test_split_index_env;
struct split_index *si = istate->split_index;
if (git_env_bool("GIT_TEST_CHECK_CACHE_TREE", 0))
@@ -3258,7 +3276,10 @@ int write_locked_index(struct index_state *istate, struct lock_file *lock,
if (istate->fsmonitor_last_update)
fill_fsmonitor_bitmap(istate);
- if (!si || alternate_index_output ||
+ test_split_index_env = git_env_bool("GIT_TEST_SPLIT_INDEX", 0);
+
+ if ((!si && !test_split_index_env) ||
+ alternate_index_output ||
(istate->cache_changed & ~EXTMASK)) {
if (si)
oidclr(&si->base_oid);
@@ -3266,10 +3287,15 @@ int write_locked_index(struct index_state *istate, struct lock_file *lock,
goto out;
}
- if (git_env_bool("GIT_TEST_SPLIT_INDEX", 0)) {
- int v = si->base_oid.hash[0];
- if ((v & 15) < 6)
+ if (test_split_index_env) {
+ if (!si) {
+ si = init_split_index(istate);
istate->cache_changed |= SPLIT_INDEX_ORDERED;
+ } else {
+ int v = si->base_oid.hash[0];
+ if ((v & 15) < 6)
+ istate->cache_changed |= SPLIT_INDEX_ORDERED;
+ }
}
if (too_many_not_shared_entries(istate))
istate->cache_changed |= SPLIT_INDEX_ORDERED;
diff --git a/ref-filter.c b/ref-filter.c
index 93ce2a6ef2..add429be79 100644
--- a/ref-filter.c
+++ b/ref-filter.c
@@ -633,7 +633,7 @@ static struct {
*/
};
-#define REF_FORMATTING_STATE_INIT { 0, NULL }
+#define REF_FORMATTING_STATE_INIT { 0 }
struct ref_formatting_stack {
struct ref_formatting_stack *prev;
@@ -2100,8 +2100,7 @@ static int filter_pattern_match(struct ref_filter *filter, const char *refname)
*/
static int for_each_fullref_in_pattern(struct ref_filter *filter,
each_ref_fn cb,
- void *cb_data,
- int broken)
+ void *cb_data)
{
if (!filter->match_as_path) {
/*
@@ -2109,7 +2108,7 @@ static int for_each_fullref_in_pattern(struct ref_filter *filter,
* prefixes like "refs/heads/" etc. are stripped off,
* so we have to look at everything:
*/
- return for_each_fullref_in("", cb, cb_data, broken);
+ return for_each_fullref_in("", cb, cb_data);
}
if (filter->ignore_case) {
@@ -2118,16 +2117,16 @@ static int for_each_fullref_in_pattern(struct ref_filter *filter,
* so just return everything and let the caller
* sort it out.
*/
- return for_each_fullref_in("", cb, cb_data, broken);
+ return for_each_fullref_in("", cb, cb_data);
}
if (!filter->name_patterns[0]) {
/* no patterns; we have to look at everything */
- return for_each_fullref_in("", cb, cb_data, broken);
+ return for_each_fullref_in("", cb, cb_data);
}
return for_each_fullref_in_prefixes(NULL, filter->name_patterns,
- cb, cb_data, broken);
+ cb, cb_data);
}
/*
@@ -2405,13 +2404,10 @@ int filter_refs(struct ref_array *array, struct ref_filter *filter, unsigned int
{
struct ref_filter_cbdata ref_cbdata;
int ret = 0;
- unsigned int broken = 0;
ref_cbdata.array = array;
ref_cbdata.filter = filter;
- if (type & FILTER_REFS_INCLUDE_BROKEN)
- broken = 1;
filter->kind = type & FILTER_REFS_KIND_MASK;
init_contains_cache(&ref_cbdata.contains_cache);
@@ -2428,13 +2424,13 @@ int filter_refs(struct ref_array *array, struct ref_filter *filter, unsigned int
* of filter_ref_kind().
*/
if (filter->kind == FILTER_REFS_BRANCHES)
- ret = for_each_fullref_in("refs/heads/", ref_filter_handler, &ref_cbdata, broken);
+ ret = for_each_fullref_in("refs/heads/", ref_filter_handler, &ref_cbdata);
else if (filter->kind == FILTER_REFS_REMOTES)
- ret = for_each_fullref_in("refs/remotes/", ref_filter_handler, &ref_cbdata, broken);
+ ret = for_each_fullref_in("refs/remotes/", ref_filter_handler, &ref_cbdata);
else if (filter->kind == FILTER_REFS_TAGS)
- ret = for_each_fullref_in("refs/tags/", ref_filter_handler, &ref_cbdata, broken);
+ ret = for_each_fullref_in("refs/tags/", ref_filter_handler, &ref_cbdata);
else if (filter->kind & FILTER_REFS_ALL)
- ret = for_each_fullref_in_pattern(filter, ref_filter_handler, &ref_cbdata, broken);
+ ret = for_each_fullref_in_pattern(filter, ref_filter_handler, &ref_cbdata);
if (!ret && (filter->kind & FILTER_REFS_DETACHED_HEAD))
head_ref(ref_filter_handler, &ref_cbdata);
}
diff --git a/ref-filter.h b/ref-filter.h
index c15dee8d6b..b636f4389d 100644
--- a/ref-filter.h
+++ b/ref-filter.h
@@ -13,7 +13,6 @@
#define QUOTE_PYTHON 4
#define QUOTE_TCL 8
-#define FILTER_REFS_INCLUDE_BROKEN 0x0001
#define FILTER_REFS_TAGS 0x0002
#define FILTER_REFS_BRANCHES 0x0004
#define FILTER_REFS_REMOTES 0x0008
diff --git a/reflog-walk.c b/reflog-walk.c
index e9cd328369..8ac4b284b6 100644
--- a/reflog-walk.c
+++ b/reflog-walk.c
@@ -158,10 +158,9 @@ int add_reflog_for_walk(struct reflog_walk_info *info,
}
reflogs = read_complete_reflog(branch);
if (!reflogs || reflogs->nr == 0) {
- struct object_id oid;
char *b;
int ret = dwim_log(branch, strlen(branch),
- &oid, &b);
+ NULL, &b);
if (ret > 1)
free(b);
else if (ret == 1) {
diff --git a/refs.c b/refs.c
index 8b9f7c3a80..7f019c2377 100644
--- a/refs.c
+++ b/refs.c
@@ -10,6 +10,7 @@
#include "refs.h"
#include "refs/refs-internal.h"
#include "run-command.h"
+#include "hook.h"
#include "object-store.h"
#include "object.h"
#include "tag.h"
@@ -33,11 +34,6 @@ static struct ref_storage_be *find_ref_storage_backend(const char *name)
return NULL;
}
-int ref_storage_backend_exists(const char *name)
-{
- return find_ref_storage_backend(name) != NULL;
-}
-
/*
* How to handle various characters in refnames:
* 0: An acceptable character for refs
@@ -698,7 +694,7 @@ int repo_dwim_log(struct repository *r, const char *str, int len,
strbuf_addf(&path, *p, len, str);
ref = refs_resolve_ref_unsafe(refs, path.buf,
RESOLVE_REF_READING,
- &hash, NULL);
+ oid ? &hash : NULL, NULL);
if (!ref)
continue;
if (refs_reflog_exists(refs, path.buf))
@@ -710,7 +706,8 @@ int repo_dwim_log(struct repository *r, const char *str, int len,
continue;
if (!logs_found++) {
*log = xstrdup(it);
- oidcpy(oid, &hash);
+ if (oid)
+ oidcpy(oid, &hash);
}
if (!warn_ambiguous_refs)
break;
@@ -1413,14 +1410,21 @@ int head_ref(each_ref_fn fn, void *cb_data)
struct ref_iterator *refs_ref_iterator_begin(
struct ref_store *refs,
- const char *prefix, int trim, int flags)
+ const char *prefix, int trim,
+ enum do_for_each_ref_flags flags)
{
struct ref_iterator *iter;
- if (ref_paranoia < 0)
- ref_paranoia = git_env_bool("GIT_REF_PARANOIA", 0);
- if (ref_paranoia)
- flags |= DO_FOR_EACH_INCLUDE_BROKEN;
+ if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN)) {
+ static int ref_paranoia = -1;
+
+ if (ref_paranoia < 0)
+ ref_paranoia = git_env_bool("GIT_REF_PARANOIA", 1);
+ if (ref_paranoia) {
+ flags |= DO_FOR_EACH_INCLUDE_BROKEN;
+ flags |= DO_FOR_EACH_OMIT_DANGLING_SYMREFS;
+ }
+ }
iter = refs->be->iterator_begin(refs, prefix, flags);
@@ -1479,7 +1483,8 @@ static int do_for_each_ref_helper(struct repository *r,
}
static int do_for_each_ref(struct ref_store *refs, const char *prefix,
- each_ref_fn fn, int trim, int flags, void *cb_data)
+ each_ref_fn fn, int trim,
+ enum do_for_each_ref_flags flags, void *cb_data)
{
struct ref_iterator *iter;
struct do_for_each_ref_help hp = { fn, cb_data };
@@ -1514,25 +1519,16 @@ int for_each_ref_in(const char *prefix, each_ref_fn fn, void *cb_data)
return refs_for_each_ref_in(get_main_ref_store(the_repository), prefix, fn, cb_data);
}
-int for_each_fullref_in(const char *prefix, each_ref_fn fn, void *cb_data, unsigned int broken)
+int for_each_fullref_in(const char *prefix, each_ref_fn fn, void *cb_data)
{
- unsigned int flag = 0;
-
- if (broken)
- flag = DO_FOR_EACH_INCLUDE_BROKEN;
return do_for_each_ref(get_main_ref_store(the_repository),
- prefix, fn, 0, flag, cb_data);
+ prefix, fn, 0, 0, cb_data);
}
int refs_for_each_fullref_in(struct ref_store *refs, const char *prefix,
- each_ref_fn fn, void *cb_data,
- unsigned int broken)
+ each_ref_fn fn, void *cb_data)
{
- unsigned int flag = 0;
-
- if (broken)
- flag = DO_FOR_EACH_INCLUDE_BROKEN;
- return do_for_each_ref(refs, prefix, fn, 0, flag, cb_data);
+ return do_for_each_ref(refs, prefix, fn, 0, 0, cb_data);
}
int for_each_replace_ref(struct repository *r, each_repo_ref_fn fn, void *cb_data)
@@ -1624,8 +1620,7 @@ static void find_longest_prefixes(struct string_list *out,
int for_each_fullref_in_prefixes(const char *namespace,
const char **patterns,
- each_ref_fn fn, void *cb_data,
- unsigned int broken)
+ each_ref_fn fn, void *cb_data)
{
struct string_list prefixes = STRING_LIST_INIT_DUP;
struct string_list_item *prefix;
@@ -1640,7 +1635,7 @@ int for_each_fullref_in_prefixes(const char *namespace,
for_each_string_list_item(prefix, &prefixes) {
strbuf_addstr(&buf, prefix->string);
- ret = for_each_fullref_in(buf.buf, fn, cb_data, broken);
+ ret = for_each_fullref_in(buf.buf, fn, cb_data);
if (ret)
break;
strbuf_setlen(&buf, namespace_len);
@@ -1681,7 +1676,7 @@ int refs_read_raw_ref(struct ref_store *ref_store,
}
return ref_store->be->read_raw_ref(ref_store, refname, oid, referent,
- type);
+ type, &errno);
}
/* This function needs to return a meaningful errno on failure */
@@ -2370,19 +2365,19 @@ int delete_reflog(const char *refname)
}
int refs_reflog_expire(struct ref_store *refs,
- const char *refname, const struct object_id *oid,
+ const char *refname,
unsigned int flags,
reflog_expiry_prepare_fn prepare_fn,
reflog_expiry_should_prune_fn should_prune_fn,
reflog_expiry_cleanup_fn cleanup_fn,
void *policy_cb_data)
{
- return refs->be->reflog_expire(refs, refname, oid, flags,
+ return refs->be->reflog_expire(refs, refname, flags,
prepare_fn, should_prune_fn,
cleanup_fn, policy_cb_data);
}
-int reflog_expire(const char *refname, const struct object_id *oid,
+int reflog_expire(const char *refname,
unsigned int flags,
reflog_expiry_prepare_fn prepare_fn,
reflog_expiry_should_prune_fn should_prune_fn,
@@ -2390,7 +2385,7 @@ int reflog_expire(const char *refname, const struct object_id *oid,
void *policy_cb_data)
{
return refs_reflog_expire(get_main_ref_store(the_repository),
- refname, oid, flags,
+ refname, flags,
prepare_fn, should_prune_fn,
cleanup_fn, policy_cb_data);
}
diff --git a/refs.h b/refs.h
index 48970dfc7e..d5099d4984 100644
--- a/refs.h
+++ b/refs.h
@@ -342,10 +342,8 @@ int for_each_ref(each_ref_fn fn, void *cb_data);
int for_each_ref_in(const char *prefix, each_ref_fn fn, void *cb_data);
int refs_for_each_fullref_in(struct ref_store *refs, const char *prefix,
- each_ref_fn fn, void *cb_data,
- unsigned int broken);
-int for_each_fullref_in(const char *prefix, each_ref_fn fn, void *cb_data,
- unsigned int broken);
+ each_ref_fn fn, void *cb_data);
+int for_each_fullref_in(const char *prefix, each_ref_fn fn, void *cb_data);
/**
* iterate all refs in "patterns" by partitioning patterns into disjoint sets
@@ -354,8 +352,7 @@ int for_each_fullref_in(const char *prefix, each_ref_fn fn, void *cb_data,
* callers should be prepared to ignore references that they did not ask for.
*/
int for_each_fullref_in_prefixes(const char *namespace, const char **patterns,
- each_ref_fn fn, void *cb_data,
- unsigned int broken);
+ each_ref_fn fn, void *cb_data);
/**
* iterate refs from the respective area.
*/
@@ -796,7 +793,7 @@ enum expire_reflog_flags {
* expiration policy that is desired.
*
* reflog_expiry_prepare_fn -- Called once after the reference is
- * locked.
+ * locked. Called with the OID of the locked reference.
*
* reflog_expiry_should_prune_fn -- Called once for each entry in the
* existing reflog. It should return true iff that entry should be
@@ -816,28 +813,25 @@ typedef int reflog_expiry_should_prune_fn(struct object_id *ooid,
typedef void reflog_expiry_cleanup_fn(void *cb_data);
/*
- * Expire reflog entries for the specified reference. oid is the old
- * value of the reference. flags is a combination of the constants in
+ * Expire reflog entries for the specified reference.
+ * flags is a combination of the constants in
* enum expire_reflog_flags. The three function pointers are described
* above. On success, return zero.
*/
int refs_reflog_expire(struct ref_store *refs,
const char *refname,
- const struct object_id *oid,
unsigned int flags,
reflog_expiry_prepare_fn prepare_fn,
reflog_expiry_should_prune_fn should_prune_fn,
reflog_expiry_cleanup_fn cleanup_fn,
void *policy_cb_data);
-int reflog_expire(const char *refname, const struct object_id *oid,
+int reflog_expire(const char *refname,
unsigned int flags,
reflog_expiry_prepare_fn prepare_fn,
reflog_expiry_should_prune_fn should_prune_fn,
reflog_expiry_cleanup_fn cleanup_fn,
void *policy_cb_data);
-int ref_storage_backend_exists(const char *name);
-
struct ref_store *get_main_ref_store(struct repository *r);
/**
diff --git a/refs/debug.c b/refs/debug.c
index 1a7a9e11cf..8667c64023 100644
--- a/refs/debug.c
+++ b/refs/debug.c
@@ -239,15 +239,14 @@ debug_ref_iterator_begin(struct ref_store *ref_store, const char *prefix,
static int debug_read_raw_ref(struct ref_store *ref_store, const char *refname,
struct object_id *oid, struct strbuf *referent,
- unsigned int *type)
+ unsigned int *type, int *failure_errno)
{
struct debug_ref_store *drefs = (struct debug_ref_store *)ref_store;
int res = 0;
oidcpy(oid, null_oid());
- errno = 0;
res = drefs->refs->be->read_raw_ref(drefs->refs, refname, oid, referent,
- type);
+ type, failure_errno);
if (res == 0) {
trace_printf_key(&trace_refs, "read_raw_ref: %s: %s (=> %s) type %x: %d\n",
@@ -255,7 +254,7 @@ static int debug_read_raw_ref(struct ref_store *ref_store, const char *refname,
} else {
trace_printf_key(&trace_refs,
"read_raw_ref: %s: %d (errno %d)\n", refname,
- res, errno);
+ res, *failure_errno);
}
return res;
}
@@ -365,8 +364,8 @@ struct debug_reflog_expiry_should_prune {
};
static void debug_reflog_expiry_prepare(const char *refname,
- const struct object_id *oid,
- void *cb_data)
+ const struct object_id *oid,
+ void *cb_data)
{
struct debug_reflog_expiry_should_prune *prune = cb_data;
trace_printf_key(&trace_refs, "reflog_expire_prepare: %s\n", refname);
@@ -392,7 +391,7 @@ static void debug_reflog_expiry_cleanup(void *cb_data)
}
static int debug_reflog_expire(struct ref_store *ref_store, const char *refname,
- const struct object_id *oid, unsigned int flags,
+ unsigned int flags,
reflog_expiry_prepare_fn prepare_fn,
reflog_expiry_should_prune_fn should_prune_fn,
reflog_expiry_cleanup_fn cleanup_fn,
@@ -405,7 +404,7 @@ static int debug_reflog_expire(struct ref_store *ref_store, const char *refname,
.should_prune = should_prune_fn,
.cb_data = policy_cb_data,
};
- int res = drefs->refs->be->reflog_expire(drefs->refs, refname, oid,
+ int res = drefs->refs->be->reflog_expire(drefs->refs, refname,
flags, &debug_reflog_expiry_prepare,
&debug_reflog_expiry_should_prune_fn,
&debug_reflog_expiry_cleanup,
diff --git a/refs/files-backend.c b/refs/files-backend.c
index 677b7e4cdd..6a6ead0b99 100644
--- a/refs/files-backend.c
+++ b/refs/files-backend.c
@@ -227,7 +227,7 @@ static void add_per_worktree_entries_to_dir(struct ref_dir *dir, const char *dir
pos = search_ref_dir(dir, prefix, prefix_len);
if (pos >= 0)
continue;
- child_entry = create_dir_entry(dir->cache, prefix, prefix_len, 1);
+ child_entry = create_dir_entry(dir->cache, prefix, prefix_len);
add_entry_to_dir(dir, child_entry);
}
}
@@ -278,7 +278,7 @@ static void loose_fill_ref_dir(struct ref_store *ref_store,
strbuf_addch(&refname, '/');
add_entry_to_dir(dir,
create_dir_entry(dir->cache, refname.buf,
- refname.len, 1));
+ refname.len));
} else {
if (!refs_resolve_ref_unsafe(&refs->base,
refname.buf,
@@ -336,14 +336,14 @@ static struct ref_cache *get_loose_ref_cache(struct files_ref_store *refs)
* lazily):
*/
add_entry_to_dir(get_ref_dir(refs->loose->root),
- create_dir_entry(refs->loose, "refs/", 5, 1));
+ create_dir_entry(refs->loose, "refs/", 5));
}
return refs->loose;
}
-static int files_read_raw_ref(struct ref_store *ref_store,
- const char *refname, struct object_id *oid,
- struct strbuf *referent, unsigned int *type)
+static int files_read_raw_ref(struct ref_store *ref_store, const char *refname,
+ struct object_id *oid, struct strbuf *referent,
+ unsigned int *type, int *failure_errno)
{
struct files_ref_store *refs =
files_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
@@ -354,7 +354,6 @@ static int files_read_raw_ref(struct ref_store *ref_store,
struct stat st;
int fd;
int ret = -1;
- int save_errno;
int remaining_retries = 3;
*type = 0;
@@ -459,10 +458,9 @@ stat_ref:
ret = parse_loose_ref_contents(buf, oid, referent, type);
out:
- save_errno = errno;
+ *failure_errno = errno;
strbuf_release(&sb_path);
strbuf_release(&sb_contents);
- errno = save_errno;
return ret;
}
@@ -531,7 +529,6 @@ static void unlock_ref(struct ref_lock *lock)
static int lock_raw_ref(struct files_ref_store *refs,
const char *refname, int mustexist,
const struct string_list *extras,
- const struct string_list *skip,
struct ref_lock **lock_p,
struct strbuf *referent,
unsigned int *type,
@@ -541,6 +538,7 @@ static int lock_raw_ref(struct files_ref_store *refs,
struct strbuf ref_file = STRBUF_INIT;
int attempts_remaining = 3;
int ret = TRANSACTION_GENERIC_ERROR;
+ int failure_errno;
assert(err);
files_assert_main_repository(refs, "lock_raw_ref");
@@ -568,7 +566,7 @@ retry:
* reason to expect this error to be transitory.
*/
if (refs_verify_refname_available(&refs->base, refname,
- extras, skip, err)) {
+ extras, NULL, err)) {
if (mustexist) {
/*
* To the user the relevant error is
@@ -611,7 +609,9 @@ retry:
if (hold_lock_file_for_update_timeout(
&lock->lk, ref_file.buf, LOCK_NO_DEREF,
get_files_ref_lock_timeout_ms()) < 0) {
- if (errno == ENOENT && --attempts_remaining > 0) {
+ int myerr = errno;
+ errno = 0;
+ if (myerr == ENOENT && --attempts_remaining > 0) {
/*
* Maybe somebody just deleted one of the
* directories leading to ref_file. Try
@@ -619,7 +619,7 @@ retry:
*/
goto retry;
} else {
- unable_to_lock_message(ref_file.buf, errno, err);
+ unable_to_lock_message(ref_file.buf, myerr, err);
goto error_return;
}
}
@@ -629,9 +629,9 @@ retry:
* fear that its value will change.
*/
- if (files_read_raw_ref(&refs->base, refname,
- &lock->old_oid, referent, type)) {
- if (errno == ENOENT) {
+ if (files_read_raw_ref(&refs->base, refname, &lock->old_oid, referent,
+ type, &failure_errno)) {
+ if (failure_errno == ENOENT) {
if (mustexist) {
/* Garden variety missing reference. */
strbuf_addf(err, "unable to resolve reference '%s'",
@@ -655,7 +655,7 @@ retry:
* reference named "refs/foo/bar/baz".
*/
}
- } else if (errno == EISDIR) {
+ } else if (failure_errno == EISDIR) {
/*
* There is a directory in the way. It might have
* contained references that have been deleted. If
@@ -673,7 +673,7 @@ retry:
REMOVE_DIR_EMPTY_ONLY)) {
if (refs_verify_refname_available(
&refs->base, refname,
- extras, skip, err)) {
+ extras, NULL, err)) {
/*
* The error message set by
* verify_refname_available() is OK.
@@ -693,13 +693,13 @@ retry:
goto error_return;
}
}
- } else if (errno == EINVAL && (*type & REF_ISBROKEN)) {
+ } else if (failure_errno == EINVAL && (*type & REF_ISBROKEN)) {
strbuf_addf(err, "unable to resolve reference '%s': "
"reference broken", refname);
goto error_return;
} else {
strbuf_addf(err, "unable to resolve reference '%s': %s",
- refname, strerror(errno));
+ refname, strerror(failure_errno));
goto error_return;
}
@@ -710,7 +710,7 @@ retry:
*/
if (refs_verify_refname_available(
refs->packed_ref_store, refname,
- extras, skip, err))
+ extras, NULL, err))
goto error_return;
}
@@ -744,6 +744,11 @@ static int files_ref_iterator_advance(struct ref_iterator *ref_iterator)
ref_type(iter->iter0->refname) != REF_TYPE_PER_WORKTREE)
continue;
+ if ((iter->flags & DO_FOR_EACH_OMIT_DANGLING_SYMREFS) &&
+ (iter->iter0->flags & REF_ISSYMREF) &&
+ (iter->iter0->flags & REF_ISBROKEN))
+ continue;
+
if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
!ref_resolves_to_object(iter->iter0->refname,
iter->iter0->oid,
@@ -854,39 +859,112 @@ static struct ref_iterator *files_ref_iterator_begin(
}
/*
- * Verify that the reference locked by lock has the value old_oid
- * (unless it is NULL). Fail if the reference doesn't exist and
- * mustexist is set. Return 0 on success. On error, write an error
- * message to err, set errno, and return a negative value.
+ * Callback function for raceproof_create_file(). This function is
+ * expected to do something that makes dirname(path) permanent despite
+ * the fact that other processes might be cleaning up empty
+ * directories at the same time. Usually it will create a file named
+ * path, but alternatively it could create another file in that
+ * directory, or even chdir() into that directory. The function should
+ * return 0 if the action was completed successfully. On error, it
+ * should return a nonzero result and set errno.
+ * raceproof_create_file() treats two errno values specially:
+ *
+ * - ENOENT -- dirname(path) does not exist. In this case,
+ * raceproof_create_file() tries creating dirname(path)
+ * (and any parent directories, if necessary) and calls
+ * the function again.
+ *
+ * - EISDIR -- the file already exists and is a directory. In this
+ * case, raceproof_create_file() removes the directory if
+ * it is empty (and recursively any empty directories that
+ * it contains) and calls the function again.
+ *
+ * Any other errno causes raceproof_create_file() to fail with the
+ * callback's return value and errno.
+ *
+ * Obviously, this function should be OK with being called again if it
+ * fails with ENOENT or EISDIR. In other scenarios it will not be
+ * called again.
+ */
+typedef int create_file_fn(const char *path, void *cb);
+
+/*
+ * Create a file in dirname(path) by calling fn, creating leading
+ * directories if necessary. Retry a few times in case we are racing
+ * with another process that is trying to clean up the directory that
+ * contains path. See the documentation for create_file_fn for more
+ * details.
+ *
+ * Return the value and set the errno that resulted from the most
+ * recent call of fn. fn is always called at least once, and will be
+ * called more than once if it returns ENOENT or EISDIR.
*/
-static int verify_lock(struct ref_store *ref_store, struct ref_lock *lock,
- const struct object_id *old_oid, int mustexist,
- struct strbuf *err)
+static int raceproof_create_file(const char *path, create_file_fn fn, void *cb)
{
- assert(err);
+ /*
+ * The number of times we will try to remove empty directories
+ * in the way of path. This is only 1 because if another
+ * process is racily creating directories that conflict with
+ * us, we don't want to fight against them.
+ */
+ int remove_directories_remaining = 1;
- if (refs_read_ref_full(ref_store, lock->ref_name,
- mustexist ? RESOLVE_REF_READING : 0,
- &lock->old_oid, NULL)) {
- if (old_oid) {
- int save_errno = errno;
- strbuf_addf(err, "can't verify ref '%s'", lock->ref_name);
- errno = save_errno;
- return -1;
- } else {
- oidclr(&lock->old_oid);
- return 0;
- }
- }
- if (old_oid && !oideq(&lock->old_oid, old_oid)) {
- strbuf_addf(err, "ref '%s' is at %s but expected %s",
- lock->ref_name,
- oid_to_hex(&lock->old_oid),
- oid_to_hex(old_oid));
- errno = EBUSY;
- return -1;
+ /*
+ * The number of times that we will try to create the
+ * directories containing path. We are willing to attempt this
+ * more than once, because another process could be trying to
+ * clean up empty directories at the same time as we are
+ * trying to create them.
+ */
+ int create_directories_remaining = 3;
+
+ /* A scratch copy of path, filled lazily if we need it: */
+ struct strbuf path_copy = STRBUF_INIT;
+
+ int ret, save_errno;
+
+ /* Sanity check: */
+ assert(*path);
+
+retry_fn:
+ ret = fn(path, cb);
+ save_errno = errno;
+ if (!ret)
+ goto out;
+
+ if (errno == EISDIR && remove_directories_remaining-- > 0) {
+ /*
+ * A directory is in the way. Maybe it is empty; try
+ * to remove it:
+ */
+ if (!path_copy.len)
+ strbuf_addstr(&path_copy, path);
+
+ if (!remove_dir_recursively(&path_copy, REMOVE_DIR_EMPTY_ONLY))
+ goto retry_fn;
+ } else if (errno == ENOENT && create_directories_remaining-- > 0) {
+ /*
+ * Maybe the containing directory didn't exist, or
+ * maybe it was just deleted by a process that is
+ * racing with us to clean up empty directories. Try
+ * to create it:
+ */
+ enum scld_error scld_result;
+
+ if (!path_copy.len)
+ strbuf_addstr(&path_copy, path);
+
+ do {
+ scld_result = safe_create_leading_directories(path_copy.buf);
+ if (scld_result == SCLD_OK)
+ goto retry_fn;
+ } while (scld_result == SCLD_VANISHED && create_directories_remaining-- > 0);
}
- return 0;
+
+out:
+ strbuf_release(&path_copy);
+ errno = save_errno;
+ return ret;
}
static int remove_empty_directories(struct strbuf *path)
@@ -910,64 +988,27 @@ static int create_reflock(const char *path, void *cb)
/*
* Locks a ref returning the lock on success and NULL on failure.
- * On failure errno is set to something meaningful.
*/
static struct ref_lock *lock_ref_oid_basic(struct files_ref_store *refs,
- const char *refname,
- const struct object_id *old_oid,
- const struct string_list *extras,
- const struct string_list *skip,
- unsigned int flags, int *type,
+ const char *refname, int *type,
struct strbuf *err)
{
struct strbuf ref_file = STRBUF_INIT;
struct ref_lock *lock;
- int last_errno = 0;
- int mustexist = (old_oid && !is_null_oid(old_oid));
- int resolve_flags = RESOLVE_REF_NO_RECURSE;
- int resolved;
files_assert_main_repository(refs, "lock_ref_oid_basic");
assert(err);
CALLOC_ARRAY(lock, 1);
- if (mustexist)
- resolve_flags |= RESOLVE_REF_READING;
- if (flags & REF_DELETING)
- resolve_flags |= RESOLVE_REF_ALLOW_BAD_NAME;
-
files_ref_path(refs, &ref_file, refname);
- resolved = !!refs_resolve_ref_unsafe(&refs->base,
- refname, resolve_flags,
- &lock->old_oid, type);
- if (!resolved && errno == EISDIR) {
- /*
- * we are trying to lock foo but we used to
- * have foo/bar which now does not exist;
- * it is normal for the empty directory 'foo'
- * to remain.
- */
- if (remove_empty_directories(&ref_file)) {
- last_errno = errno;
- if (!refs_verify_refname_available(
- &refs->base,
- refname, extras, skip, err))
- strbuf_addf(err, "there are still refs under '%s'",
- refname);
- goto error_return;
- }
- resolved = !!refs_resolve_ref_unsafe(&refs->base,
- refname, resolve_flags,
- &lock->old_oid, type);
- }
- if (!resolved) {
- last_errno = errno;
- if (last_errno != ENOTDIR ||
- !refs_verify_refname_available(&refs->base, refname,
- extras, skip, err))
+ if (!refs_resolve_ref_unsafe(&refs->base, refname,
+ RESOLVE_REF_NO_RECURSE,
+ &lock->old_oid, type)) {
+ if (!refs_verify_refname_available(&refs->base, refname,
+ NULL, NULL, err))
strbuf_addf(err, "unable to resolve reference '%s': %s",
- refname, strerror(last_errno));
+ refname, strerror(errno));
goto error_return;
}
@@ -980,23 +1021,20 @@ static struct ref_lock *lock_ref_oid_basic(struct files_ref_store *refs,
*/
if (is_null_oid(&lock->old_oid) &&
refs_verify_refname_available(refs->packed_ref_store, refname,
- extras, skip, err)) {
- last_errno = ENOTDIR;
+ NULL, NULL, err))
goto error_return;
- }
lock->ref_name = xstrdup(refname);
if (raceproof_create_file(ref_file.buf, create_reflock, &lock->lk)) {
- last_errno = errno;
unable_to_lock_message(ref_file.buf, errno, err);
goto error_return;
}
- if (verify_lock(&refs->base, lock, old_oid, mustexist, err)) {
- last_errno = errno;
- goto error_return;
- }
+ if (refs_read_ref_full(&refs->base, lock->ref_name,
+ 0,
+ &lock->old_oid, NULL))
+ oidclr(&lock->old_oid);
goto out;
error_return:
@@ -1005,7 +1043,6 @@ static struct ref_lock *lock_ref_oid_basic(struct files_ref_store *refs,
out:
strbuf_release(&ref_file);
- errno = last_errno;
return lock;
}
@@ -1415,8 +1452,7 @@ static int files_copy_or_rename_ref(struct ref_store *ref_store,
logmoved = log;
- lock = lock_ref_oid_basic(refs, newrefname, NULL, NULL, NULL,
- REF_NO_DEREF, NULL, &err);
+ lock = lock_ref_oid_basic(refs, newrefname, NULL, &err);
if (!lock) {
if (copy)
error("unable to copy '%s' to '%s': %s", oldrefname, newrefname, err.buf);
@@ -1438,8 +1474,7 @@ static int files_copy_or_rename_ref(struct ref_store *ref_store,
goto out;
rollback:
- lock = lock_ref_oid_basic(refs, oldrefname, NULL, NULL, NULL,
- REF_NO_DEREF, NULL, &err);
+ lock = lock_ref_oid_basic(refs, oldrefname, NULL, &err);
if (!lock) {
error("unable to lock %s for rollback: %s", oldrefname, err.buf);
strbuf_release(&err);
@@ -1569,7 +1604,7 @@ static int log_ref_setup(struct files_ref_store *refs,
goto error;
}
} else {
- *logfd = open(logfile, O_APPEND | O_WRONLY, 0666);
+ *logfd = open(logfile, O_APPEND | O_WRONLY);
if (*logfd < 0) {
if (errno == ENOENT || errno == EISDIR) {
/*
@@ -1846,9 +1881,7 @@ static int files_create_symref(struct ref_store *ref_store,
struct ref_lock *lock;
int ret;
- lock = lock_ref_oid_basic(refs, refname, NULL,
- NULL, NULL, REF_NO_DEREF, NULL,
- &err);
+ lock = lock_ref_oid_basic(refs, refname, NULL, &err);
if (!lock) {
error("%s", err.buf);
strbuf_release(&err);
@@ -2416,7 +2449,7 @@ static int lock_ref_for_update(struct files_ref_store *refs,
}
ret = lock_raw_ref(refs, update->refname, mustexist,
- affected_refnames, NULL,
+ affected_refnames,
&lock, &referent,
&update->type, err);
if (ret) {
@@ -3037,7 +3070,7 @@ static int expire_reflog_ent(struct object_id *ooid, struct object_id *noid,
}
static int files_reflog_expire(struct ref_store *ref_store,
- const char *refname, const struct object_id *oid,
+ const char *refname,
unsigned int flags,
reflog_expiry_prepare_fn prepare_fn,
reflog_expiry_should_prune_fn should_prune_fn,
@@ -3054,6 +3087,7 @@ static int files_reflog_expire(struct ref_store *ref_store,
int status = 0;
int type;
struct strbuf err = STRBUF_INIT;
+ const struct object_id *oid;
memset(&cb, 0, sizeof(cb));
cb.flags = flags;
@@ -3065,14 +3099,26 @@ static int files_reflog_expire(struct ref_store *ref_store,
* reference itself, plus we might need to update the
* reference if --updateref was specified:
*/
- lock = lock_ref_oid_basic(refs, refname, oid,
- NULL, NULL, REF_NO_DEREF,
- &type, &err);
+ lock = lock_ref_oid_basic(refs, refname, &type, &err);
if (!lock) {
error("cannot lock ref '%s': %s", refname, err.buf);
strbuf_release(&err);
return -1;
}
+ oid = &lock->old_oid;
+
+ /*
+ * When refs are deleted, their reflog is deleted before the
+ * ref itself is deleted. This is because there is no separate
+ * lock for reflog; instead we take a lock on the ref with
+ * lock_ref_oid_basic().
+ *
+ * If a race happens and the reflog doesn't exist after we've
+ * acquired the lock that's OK. We've got nothing more to do;
+ * We were asked to delete the reflog, but someone else
+ * deleted it! The caller doesn't care that we deleted it,
+ * just that it is deleted. So we can return successfully.
+ */
if (!refs_reflog_exists(ref_store, refname)) {
unlock_ref(lock);
return 0;
diff --git a/refs/packed-backend.c b/refs/packed-backend.c
index f8aa97d799..47247a1491 100644
--- a/refs/packed-backend.c
+++ b/refs/packed-backend.c
@@ -724,9 +724,9 @@ static struct snapshot *get_snapshot(struct packed_ref_store *refs)
return refs->snapshot;
}
-static int packed_read_raw_ref(struct ref_store *ref_store,
- const char *refname, struct object_id *oid,
- struct strbuf *referent, unsigned int *type)
+static int packed_read_raw_ref(struct ref_store *ref_store, const char *refname,
+ struct object_id *oid, struct strbuf *referent,
+ unsigned int *type, int *failure_errno)
{
struct packed_ref_store *refs =
packed_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
@@ -739,7 +739,7 @@ static int packed_read_raw_ref(struct ref_store *ref_store,
if (!rec) {
/* refname is not a packed reference. */
- errno = ENOENT;
+ *failure_errno = ENOENT;
return -1;
}
@@ -1600,6 +1600,7 @@ static int packed_for_each_reflog_ent(struct ref_store *ref_store,
const char *refname,
each_reflog_ent_fn fn, void *cb_data)
{
+ BUG("packed reference store does not support reflogs");
return 0;
}
@@ -1608,12 +1609,14 @@ static int packed_for_each_reflog_ent_reverse(struct ref_store *ref_store,
each_reflog_ent_fn fn,
void *cb_data)
{
+ BUG("packed reference store does not support reflogs");
return 0;
}
static int packed_reflog_exists(struct ref_store *ref_store,
const char *refname)
{
+ BUG("packed reference store does not support reflogs");
return 0;
}
@@ -1627,17 +1630,19 @@ static int packed_create_reflog(struct ref_store *ref_store,
static int packed_delete_reflog(struct ref_store *ref_store,
const char *refname)
{
+ BUG("packed reference store does not support reflogs");
return 0;
}
static int packed_reflog_expire(struct ref_store *ref_store,
- const char *refname, const struct object_id *oid,
+ const char *refname,
unsigned int flags,
reflog_expiry_prepare_fn prepare_fn,
reflog_expiry_should_prune_fn should_prune_fn,
reflog_expiry_cleanup_fn cleanup_fn,
void *policy_cb_data)
{
+ BUG("packed reference store does not support reflogs");
return 0;
}
diff --git a/refs/ref-cache.c b/refs/ref-cache.c
index 49d732f6db..a5ad8a39fb 100644
--- a/refs/ref-cache.c
+++ b/refs/ref-cache.c
@@ -49,7 +49,7 @@ struct ref_cache *create_ref_cache(struct ref_store *refs,
ret->ref_store = refs;
ret->fill_ref_dir = fill_ref_dir;
- ret->root = create_dir_entry(ret, "", 0, 1);
+ ret->root = create_dir_entry(ret, "", 0);
return ret;
}
@@ -86,14 +86,13 @@ static void clear_ref_dir(struct ref_dir *dir)
}
struct ref_entry *create_dir_entry(struct ref_cache *cache,
- const char *dirname, size_t len,
- int incomplete)
+ const char *dirname, size_t len)
{
struct ref_entry *direntry;
FLEX_ALLOC_MEM(direntry, name, dirname, len);
direntry->u.subdir.cache = cache;
- direntry->flag = REF_DIR | (incomplete ? REF_INCOMPLETE : 0);
+ direntry->flag = REF_DIR | REF_INCOMPLETE;
return direntry;
}
@@ -144,30 +143,19 @@ int search_ref_dir(struct ref_dir *dir, const char *refname, size_t len)
/*
* Search for a directory entry directly within dir (without
* recursing). Sort dir if necessary. subdirname must be a directory
- * name (i.e., end in '/'). If mkdir is set, then create the
- * directory if it is missing; otherwise, return NULL if the desired
+ * name (i.e., end in '/'). Returns NULL if the desired
* directory cannot be found. dir must already be complete.
*/
static struct ref_dir *search_for_subdir(struct ref_dir *dir,
- const char *subdirname, size_t len,
- int mkdir)
+ const char *subdirname, size_t len)
{
int entry_index = search_ref_dir(dir, subdirname, len);
struct ref_entry *entry;
- if (entry_index == -1) {
- if (!mkdir)
- return NULL;
- /*
- * Since dir is complete, the absence of a subdir
- * means that the subdir really doesn't exist;
- * therefore, create an empty record for it but mark
- * the record complete.
- */
- entry = create_dir_entry(dir->cache, subdirname, len, 0);
- add_entry_to_dir(dir, entry);
- } else {
- entry = dir->entries[entry_index];
- }
+
+ if (entry_index == -1)
+ return NULL;
+
+ entry = dir->entries[entry_index];
return get_ref_dir(entry);
}
@@ -176,18 +164,17 @@ static struct ref_dir *search_for_subdir(struct ref_dir *dir,
* tree that should hold refname. If refname is a directory name
* (i.e., it ends in '/'), then return that ref_dir itself. dir must
* represent the top-level directory and must already be complete.
- * Sort ref_dirs and recurse into subdirectories as necessary. If
- * mkdir is set, then create any missing directories; otherwise,
+ * Sort ref_dirs and recurse into subdirectories as necessary. Will
* return NULL if the desired directory cannot be found.
*/
static struct ref_dir *find_containing_dir(struct ref_dir *dir,
- const char *refname, int mkdir)
+ const char *refname)
{
const char *slash;
for (slash = strchr(refname, '/'); slash; slash = strchr(slash + 1, '/')) {
size_t dirnamelen = slash - refname + 1;
struct ref_dir *subdir;
- subdir = search_for_subdir(dir, refname, dirnamelen, mkdir);
+ subdir = search_for_subdir(dir, refname, dirnamelen);
if (!subdir) {
dir = NULL;
break;
@@ -202,7 +189,7 @@ struct ref_entry *find_ref_entry(struct ref_dir *dir, const char *refname)
{
int entry_index;
struct ref_entry *entry;
- dir = find_containing_dir(dir, refname, 0);
+ dir = find_containing_dir(dir, refname);
if (!dir)
return NULL;
entry_index = search_ref_dir(dir, refname, strlen(refname));
@@ -212,50 +199,6 @@ struct ref_entry *find_ref_entry(struct ref_dir *dir, const char *refname)
return (entry->flag & REF_DIR) ? NULL : entry;
}
-int remove_entry_from_dir(struct ref_dir *dir, const char *refname)
-{
- int refname_len = strlen(refname);
- int entry_index;
- struct ref_entry *entry;
- int is_dir = refname[refname_len - 1] == '/';
- if (is_dir) {
- /*
- * refname represents a reference directory. Remove
- * the trailing slash; otherwise we will get the
- * directory *representing* refname rather than the
- * one *containing* it.
- */
- char *dirname = xmemdupz(refname, refname_len - 1);
- dir = find_containing_dir(dir, dirname, 0);
- free(dirname);
- } else {
- dir = find_containing_dir(dir, refname, 0);
- }
- if (!dir)
- return -1;
- entry_index = search_ref_dir(dir, refname, refname_len);
- if (entry_index == -1)
- return -1;
- entry = dir->entries[entry_index];
-
- MOVE_ARRAY(&dir->entries[entry_index],
- &dir->entries[entry_index + 1], dir->nr - entry_index - 1);
- dir->nr--;
- if (dir->sorted > entry_index)
- dir->sorted--;
- free_ref_entry(entry);
- return dir->nr;
-}
-
-int add_ref_entry(struct ref_dir *dir, struct ref_entry *ref)
-{
- dir = find_containing_dir(dir, ref->name, 1);
- if (!dir)
- return -1;
- add_entry_to_dir(dir, ref);
- return 0;
-}
-
/*
* Emit a warning and return true iff ref1 and ref2 have the same name
* and the same oid. Die if they have the same name but different
@@ -522,7 +465,7 @@ struct ref_iterator *cache_ref_iterator_begin(struct ref_cache *cache,
dir = get_ref_dir(cache->root);
if (prefix && *prefix)
- dir = find_containing_dir(dir, prefix, 0);
+ dir = find_containing_dir(dir, prefix);
if (!dir)
/* There's nothing to iterate over. */
return empty_ref_iterator_begin();
diff --git a/refs/ref-cache.h b/refs/ref-cache.h
index 3bfb89d2b3..5c042ae718 100644
--- a/refs/ref-cache.h
+++ b/refs/ref-cache.h
@@ -169,8 +169,7 @@ struct ref_dir *get_ref_dir(struct ref_entry *entry);
* "refs/heads/") or "" for the top-level directory.
*/
struct ref_entry *create_dir_entry(struct ref_cache *cache,
- const char *dirname, size_t len,
- int incomplete);
+ const char *dirname, size_t len);
struct ref_entry *create_ref_entry(const char *refname,
const struct object_id *oid, int flag);
@@ -200,29 +199,6 @@ void free_ref_cache(struct ref_cache *cache);
void add_entry_to_dir(struct ref_dir *dir, struct ref_entry *entry);
/*
- * Remove the entry with the given name from dir, recursing into
- * subdirectories as necessary. If refname is the name of a directory
- * (i.e., ends with '/'), then remove the directory and its contents.
- * If the removal was successful, return the number of entries
- * remaining in the directory entry that contained the deleted entry.
- * If the name was not found, return -1. Please note that this
- * function only deletes the entry from the cache; it does not delete
- * it from the filesystem or ensure that other cache entries (which
- * might be symbolic references to the removed entry) are updated.
- * Nor does it remove any containing dir entries that might be made
- * empty by the removal. dir must represent the top-level directory
- * and must already be complete.
- */
-int remove_entry_from_dir(struct ref_dir *dir, const char *refname);
-
-/*
- * Add a ref_entry to the ref_dir (unsorted), recursing into
- * subdirectories as necessary. dir must represent the top-level
- * directory. Return 0 on success.
- */
-int add_ref_entry(struct ref_dir *dir, struct ref_entry *ref);
-
-/*
* Find the value entry with the given name in dir, sorting ref_dirs
* and recursing into subdirectories as necessary. If the name is not
* found or it corresponds to a directory entry, return NULL.
diff --git a/refs/refs-internal.h b/refs/refs-internal.h
index 3155708345..72746407fc 100644
--- a/refs/refs-internal.h
+++ b/refs/refs-internal.h
@@ -245,8 +245,36 @@ int refs_rename_ref_available(struct ref_store *refs,
/* We allow "recursive" symbolic refs. Only within reason, though */
#define SYMREF_MAXDEPTH 5
-/* Include broken references in a do_for_each_ref*() iteration: */
-#define DO_FOR_EACH_INCLUDE_BROKEN 0x01
+/*
+ * These flags are passed to refs_ref_iterator_begin() (and do_for_each_ref(),
+ * which feeds it).
+ */
+enum do_for_each_ref_flags {
+ /*
+ * Include broken references in a do_for_each_ref*() iteration, which
+ * would normally be omitted. This includes both refs that point to
+ * missing objects (a true repository corruption), ones with illegal
+ * names (which we prefer not to expose to callers), as well as
+ * dangling symbolic refs (i.e., those that point to a non-existent
+ * ref; this is not a corruption, but as they have no valid oid, we
+ * omit them from normal iteration results).
+ */
+ DO_FOR_EACH_INCLUDE_BROKEN = (1 << 0),
+
+ /*
+ * Only include per-worktree refs in a do_for_each_ref*() iteration.
+ * Normally this will be used with a files ref_store, since that's
+ * where all reference backends will presumably store their
+ * per-worktree refs.
+ */
+ DO_FOR_EACH_PER_WORKTREE_ONLY = (1 << 1),
+
+ /*
+ * Omit dangling symrefs from output; this only has an effect with
+ * INCLUDE_BROKEN, since they are otherwise not included at all.
+ */
+ DO_FOR_EACH_OMIT_DANGLING_SYMREFS = (1 << 2),
+};
/*
* Reference iterators
@@ -349,16 +377,12 @@ int is_empty_ref_iterator(struct ref_iterator *ref_iterator);
* Return an iterator that goes over each reference in `refs` for
* which the refname begins with prefix. If trim is non-zero, then
* trim that many characters off the beginning of each refname.
- * The output is ordered by refname. The following flags are supported:
- *
- * DO_FOR_EACH_INCLUDE_BROKEN: include broken references in
- * the iteration.
- *
- * DO_FOR_EACH_PER_WORKTREE_ONLY: only produce REF_TYPE_PER_WORKTREE refs.
+ * The output is ordered by refname.
*/
struct ref_iterator *refs_ref_iterator_begin(
struct ref_store *refs,
- const char *prefix, int trim, int flags);
+ const char *prefix, int trim,
+ enum do_for_each_ref_flags flags);
/*
* A callback function used to instruct merge_ref_iterator how to
@@ -446,10 +470,8 @@ void base_ref_iterator_free(struct ref_iterator *iter);
/*
* backend-specific implementation of ref_iterator_advance. For symrefs, the
* function should set REF_ISSYMREF, and it should also dereference the symref
- * to provide the OID referent. If DO_FOR_EACH_INCLUDE_BROKEN is set, symrefs
- * with non-existent referents and refs pointing to non-existent object names
- * should also be returned. If DO_FOR_EACH_PER_WORKTREE_ONLY, only
- * REF_TYPE_PER_WORKTREE refs should be returned.
+ * to provide the OID referent. It should respect do_for_each_ref_flags
+ * that were passed to refs_ref_iterator_begin().
*/
typedef int ref_iterator_advance_fn(struct ref_iterator *ref_iterator);
@@ -498,14 +520,6 @@ int do_for_each_repo_ref_iterator(struct repository *r,
struct ref_iterator *iter,
each_repo_ref_fn fn, void *cb_data);
-/*
- * Only include per-worktree refs in a do_for_each_ref*() iteration.
- * Normally this will be used with a files ref_store, since that's
- * where all reference backends will presumably store their
- * per-worktree refs.
- */
-#define DO_FOR_EACH_PER_WORKTREE_ONLY 0x02
-
struct ref_store;
/* refs backends */
@@ -593,7 +607,7 @@ typedef int create_reflog_fn(struct ref_store *ref_store, const char *refname,
int force_create, struct strbuf *err);
typedef int delete_reflog_fn(struct ref_store *ref_store, const char *refname);
typedef int reflog_expire_fn(struct ref_store *ref_store,
- const char *refname, const struct object_id *oid,
+ const char *refname,
unsigned int flags,
reflog_expiry_prepare_fn prepare_fn,
reflog_expiry_should_prune_fn should_prune_fn,
@@ -620,11 +634,15 @@ typedef int reflog_expire_fn(struct ref_store *ref_store,
* properly-formatted or even safe reference name. NEITHER INPUT NOR
* OUTPUT REFERENCE NAMES ARE VALIDATED WITHIN THIS FUNCTION.
*
- * Return 0 on success. If the ref doesn't exist, set errno to ENOENT
- * and return -1. If the ref exists but is neither a symbolic ref nor
- * an object ID, it is broken; set REF_ISBROKEN in type, set errno to
- * EINVAL, and return -1. If there is another error reading the ref,
- * set errno appropriately and return -1.
+ * Return 0 on success, or -1 on failure. If the ref exists but is neither a
+ * symbolic ref nor an object ID, it is broken. In this case set REF_ISBROKEN in
+ * type, and return -1 (failure_errno should not be ENOENT)
+ *
+ * failure_errno provides errno codes that are interpreted beyond error
+ * reporting. The following error codes have special meaning:
+ * * ENOENT: the ref doesn't exist
+ * * EISDIR: ref name is a directory
+ * * ENOTDIR: ref prefix is not a directory
*
* Backend-specific flags might be set in type as well, regardless of
* outcome.
@@ -638,9 +656,9 @@ typedef int reflog_expire_fn(struct ref_store *ref_store,
* - in all other cases, referent will be untouched, and therefore
* refname will still be valid and unchanged.
*/
-typedef int read_raw_ref_fn(struct ref_store *ref_store,
- const char *refname, struct object_id *oid,
- struct strbuf *referent, unsigned int *type);
+typedef int read_raw_ref_fn(struct ref_store *ref_store, const char *refname,
+ struct object_id *oid, struct strbuf *referent,
+ unsigned int *type, int *failure_errno);
struct ref_storage_be {
struct ref_storage_be *next;
diff --git a/remote-curl.c b/remote-curl.c
index 598cff7cde..5975103b96 100644
--- a/remote-curl.c
+++ b/remote-curl.c
@@ -499,6 +499,10 @@ static struct discovery *discover_refs(const char *service, int for_push)
show_http_message(&type, &charset, &buffer);
die(_("Authentication failed for '%s'"),
transport_anonymize_url(url.buf));
+ case HTTP_NOMATCHPUBLICKEY:
+ show_http_message(&type, &charset, &buffer);
+ die(_("unable to access '%s' with http.pinnedPubkey configuration: %s"),
+ transport_anonymize_url(url.buf), curl_errorstr);
default:
show_http_message(&type, &charset, &buffer);
die(_("unable to access '%s': %s"),
@@ -1478,8 +1482,8 @@ int cmd_main(int argc, const char **argv)
options.verbosity = 1;
options.progress = !!isatty(2);
options.thin = 1;
- string_list_init(&options.deepen_not, 1);
- string_list_init(&options.push_options, 1);
+ string_list_init_dup(&options.deepen_not);
+ string_list_init_dup(&options.push_options);
/*
* Just report "remote-curl" here (folding all the various aliases
diff --git a/remote.c b/remote.c
index 31e141b01f..f958543d70 100644
--- a/remote.c
+++ b/remote.c
@@ -2403,7 +2403,7 @@ struct reflog_commit_array {
size_t nr, alloc;
};
-#define REFLOG_COMMIT_ARRAY_INIT { NULL, 0, 0 }
+#define REFLOG_COMMIT_ARRAY_INIT { 0 }
/* Append a commit to the array. */
static void append_commit(struct reflog_commit_array *arr,
diff --git a/repo-settings.c b/repo-settings.c
index 0cfe8b787d..b93e91a212 100644
--- a/repo-settings.c
+++ b/repo-settings.c
@@ -3,40 +3,77 @@
#include "repository.h"
#include "midx.h"
-#define UPDATE_DEFAULT_BOOL(s,v) do { if (s == -1) { s = v; } } while(0)
+static void repo_cfg_bool(struct repository *r, const char *key, int *dest,
+ int def)
+{
+ if (repo_config_get_bool(r, key, dest))
+ *dest = def;
+}
void prepare_repo_settings(struct repository *r)
{
+ int experimental;
int value;
char *strval;
+ int manyfiles;
- if (r->settings.initialized)
+ if (r->settings.initialized++)
return;
/* Defaults */
- memset(&r->settings, -1, sizeof(r->settings));
+ r->settings.index_version = -1;
+ r->settings.core_untracked_cache = UNTRACKED_CACHE_KEEP;
+ r->settings.fetch_negotiation_algorithm = FETCH_NEGOTIATION_DEFAULT;
+
+ /* Booleans config or default, cascades to other settings */
+ repo_cfg_bool(r, "feature.manyfiles", &manyfiles, 0);
+ repo_cfg_bool(r, "feature.experimental", &experimental, 0);
+
+ /* Defaults modified by feature.* */
+ if (experimental) {
+ r->settings.fetch_negotiation_algorithm = FETCH_NEGOTIATION_SKIPPING;
+ }
+ if (manyfiles) {
+ r->settings.index_version = 4;
+ r->settings.core_untracked_cache = UNTRACKED_CACHE_WRITE;
+ }
- if (!repo_config_get_bool(r, "core.commitgraph", &value))
- r->settings.core_commit_graph = value;
- if (!repo_config_get_bool(r, "commitgraph.readchangedpaths", &value))
- r->settings.commit_graph_read_changed_paths = value;
- if (!repo_config_get_bool(r, "gc.writecommitgraph", &value))
- r->settings.gc_write_commit_graph = value;
- UPDATE_DEFAULT_BOOL(r->settings.core_commit_graph, 1);
- UPDATE_DEFAULT_BOOL(r->settings.commit_graph_read_changed_paths, 1);
- UPDATE_DEFAULT_BOOL(r->settings.gc_write_commit_graph, 1);
+ /* Boolean config or default, does not cascade (simple) */
+ repo_cfg_bool(r, "core.commitgraph", &r->settings.core_commit_graph, 1);
+ repo_cfg_bool(r, "commitgraph.readchangedpaths", &r->settings.commit_graph_read_changed_paths, 1);
+ repo_cfg_bool(r, "gc.writecommitgraph", &r->settings.gc_write_commit_graph, 1);
+ repo_cfg_bool(r, "fetch.writecommitgraph", &r->settings.fetch_write_commit_graph, 0);
+ repo_cfg_bool(r, "pack.usesparse", &r->settings.pack_use_sparse, 1);
+ repo_cfg_bool(r, "core.multipackindex", &r->settings.core_multi_pack_index, 1);
+ repo_cfg_bool(r, "index.sparse", &r->settings.sparse_index, 0);
+ /*
+ * The GIT_TEST_MULTI_PACK_INDEX variable is special in that
+ * either it *or* the config sets
+ * r->settings.core_multi_pack_index if true. We don't take
+ * the environment variable if it exists (even if false) over
+ * any config, as in most other cases.
+ */
+ if (git_env_bool(GIT_TEST_MULTI_PACK_INDEX, 0))
+ r->settings.core_multi_pack_index = 1;
+
+ /*
+ * Non-boolean config
+ */
if (!repo_config_get_int(r, "index.version", &value))
r->settings.index_version = value;
- if (!repo_config_get_maybe_bool(r, "core.untrackedcache", &value)) {
- if (value == 0)
- r->settings.core_untracked_cache = UNTRACKED_CACHE_REMOVE;
- else
- r->settings.core_untracked_cache = UNTRACKED_CACHE_WRITE;
- } else if (!repo_config_get_string(r, "core.untrackedcache", &strval)) {
- if (!strcasecmp(strval, "keep"))
- r->settings.core_untracked_cache = UNTRACKED_CACHE_KEEP;
+ if (!repo_config_get_string(r, "core.untrackedcache", &strval)) {
+ int v = git_parse_maybe_bool(strval);
+
+ /*
+ * If it's set to "keep", or some other non-boolean
+ * value then "v < 0". Then we do nothing and keep it
+ * at the default of UNTRACKED_CACHE_KEEP.
+ */
+ if (v >= 0)
+ r->settings.core_untracked_cache = v ?
+ UNTRACKED_CACHE_WRITE : UNTRACKED_CACHE_REMOVE;
free(strval);
}
@@ -45,39 +82,8 @@ void prepare_repo_settings(struct repository *r)
r->settings.fetch_negotiation_algorithm = FETCH_NEGOTIATION_SKIPPING;
else if (!strcasecmp(strval, "noop"))
r->settings.fetch_negotiation_algorithm = FETCH_NEGOTIATION_NOOP;
- else
- r->settings.fetch_negotiation_algorithm = FETCH_NEGOTIATION_DEFAULT;
}
- if (!repo_config_get_bool(r, "pack.usesparse", &value))
- r->settings.pack_use_sparse = value;
- UPDATE_DEFAULT_BOOL(r->settings.pack_use_sparse, 1);
-
- value = git_env_bool(GIT_TEST_MULTI_PACK_INDEX, 0);
- if (value || !repo_config_get_bool(r, "core.multipackindex", &value))
- r->settings.core_multi_pack_index = value;
- UPDATE_DEFAULT_BOOL(r->settings.core_multi_pack_index, 1);
-
- if (!repo_config_get_bool(r, "feature.manyfiles", &value) && value) {
- UPDATE_DEFAULT_BOOL(r->settings.index_version, 4);
- UPDATE_DEFAULT_BOOL(r->settings.core_untracked_cache, UNTRACKED_CACHE_WRITE);
- }
-
- if (!repo_config_get_bool(r, "fetch.writecommitgraph", &value))
- r->settings.fetch_write_commit_graph = value;
- UPDATE_DEFAULT_BOOL(r->settings.fetch_write_commit_graph, 0);
-
- if (!repo_config_get_bool(r, "feature.experimental", &value) && value)
- UPDATE_DEFAULT_BOOL(r->settings.fetch_negotiation_algorithm, FETCH_NEGOTIATION_SKIPPING);
-
- /* Hack for test programs like test-dump-untracked-cache */
- if (ignore_untracked_cache_config)
- r->settings.core_untracked_cache = UNTRACKED_CACHE_KEEP;
- else
- UPDATE_DEFAULT_BOOL(r->settings.core_untracked_cache, UNTRACKED_CACHE_KEEP);
-
- UPDATE_DEFAULT_BOOL(r->settings.fetch_negotiation_algorithm, FETCH_NEGOTIATION_DEFAULT);
-
/*
* This setting guards all index reads to require a full index
* over a sparse index. After suitable guards are placed in the
@@ -85,11 +91,4 @@ void prepare_repo_settings(struct repository *r)
* removed.
*/
r->settings.command_requires_full_index = 1;
-
- /*
- * Initialize this as off.
- */
- r->settings.sparse_index = 0;
- if (!repo_config_get_bool(r, "index.sparse", &value) && value)
- r->settings.sparse_index = 1;
}
diff --git a/repository.c b/repository.c
index b2bf44c6fa..c5b90ba93e 100644
--- a/repository.c
+++ b/repository.c
@@ -190,19 +190,15 @@ error:
int repo_submodule_init(struct repository *subrepo,
struct repository *superproject,
- const struct submodule *sub)
+ const char *path,
+ const struct object_id *treeish_name)
{
struct strbuf gitdir = STRBUF_INIT;
struct strbuf worktree = STRBUF_INIT;
int ret = 0;
- if (!sub) {
- ret = -1;
- goto out;
- }
-
- strbuf_repo_worktree_path(&gitdir, superproject, "%s/.git", sub->path);
- strbuf_repo_worktree_path(&worktree, superproject, "%s", sub->path);
+ strbuf_repo_worktree_path(&gitdir, superproject, "%s/.git", path);
+ strbuf_repo_worktree_path(&worktree, superproject, "%s", path);
if (repo_init(subrepo, gitdir.buf, worktree.buf)) {
/*
@@ -212,9 +208,15 @@ int repo_submodule_init(struct repository *subrepo,
* in the superproject's 'modules' directory. In this case the
* submodule would not have a worktree.
*/
+ const struct submodule *sub =
+ submodule_from_path(superproject, treeish_name, path);
+ if (!sub) {
+ ret = -1;
+ goto out;
+ }
+
strbuf_reset(&gitdir);
- strbuf_repo_git_path(&gitdir, superproject,
- "modules/%s", sub->name);
+ submodule_name_to_gitdir(&gitdir, superproject, sub->name);
if (repo_init(subrepo, gitdir.buf, NULL)) {
ret = -1;
@@ -225,7 +227,7 @@ int repo_submodule_init(struct repository *subrepo,
subrepo->submodule_prefix = xstrfmt("%s%s/",
superproject->submodule_prefix ?
superproject->submodule_prefix :
- "", sub->path);
+ "", path);
out:
strbuf_release(&gitdir);
diff --git a/repository.h b/repository.h
index 3740c93bc0..a057653981 100644
--- a/repository.h
+++ b/repository.h
@@ -13,18 +13,15 @@ struct submodule_cache;
struct promisor_remote_config;
enum untracked_cache_setting {
- UNTRACKED_CACHE_UNSET = -1,
- UNTRACKED_CACHE_REMOVE = 0,
- UNTRACKED_CACHE_KEEP = 1,
- UNTRACKED_CACHE_WRITE = 2
+ UNTRACKED_CACHE_KEEP,
+ UNTRACKED_CACHE_REMOVE,
+ UNTRACKED_CACHE_WRITE,
};
enum fetch_negotiation_setting {
- FETCH_NEGOTIATION_UNSET = -1,
- FETCH_NEGOTIATION_NONE = 0,
- FETCH_NEGOTIATION_DEFAULT = 1,
- FETCH_NEGOTIATION_SKIPPING = 2,
- FETCH_NEGOTIATION_NOOP = 3,
+ FETCH_NEGOTIATION_DEFAULT,
+ FETCH_NEGOTIATION_SKIPPING,
+ FETCH_NEGOTIATION_NOOP,
};
struct repo_settings {
@@ -34,6 +31,8 @@ struct repo_settings {
int commit_graph_read_changed_paths;
int gc_write_commit_graph;
int fetch_write_commit_graph;
+ int command_requires_full_index;
+ int sparse_index;
int index_version;
enum untracked_cache_setting core_untracked_cache;
@@ -42,9 +41,6 @@ struct repo_settings {
enum fetch_negotiation_setting fetch_negotiation_algorithm;
int core_multi_pack_index;
-
- unsigned command_requires_full_index:1,
- sparse_index:1;
};
struct repository {
@@ -172,15 +168,18 @@ void initialize_the_repository(void);
int repo_init(struct repository *r, const char *gitdir, const char *worktree);
/*
- * Initialize the repository 'subrepo' as the submodule given by the
- * struct submodule 'sub' in parent repository 'superproject'.
- * Return 0 upon success and a non-zero value upon failure, which may happen
- * if the submodule is not found, or 'sub' is NULL.
+ * Initialize the repository 'subrepo' as the submodule at the given path. If
+ * the submodule's gitdir cannot be found at <path>/.git, this function calls
+ * submodule_from_path() to try to find it. treeish_name is only used if
+ * submodule_from_path() needs to be called; see its documentation for more
+ * information.
+ * Return 0 upon success and a non-zero value upon failure.
*/
-struct submodule;
+struct object_id;
int repo_submodule_init(struct repository *subrepo,
struct repository *superproject,
- const struct submodule *sub);
+ const char *path,
+ const struct object_id *treeish_name);
void repo_clear(struct repository *repo);
/*
diff --git a/reset.c b/reset.c
index 79310ae071..f214df3d96 100644
--- a/reset.c
+++ b/reset.c
@@ -56,9 +56,10 @@ int reset_head(struct repository *r, struct object_id *oid, const char *action,
unpack_tree_opts.fn = reset_hard ? oneway_merge : twoway_merge;
unpack_tree_opts.update = 1;
unpack_tree_opts.merge = 1;
+ unpack_tree_opts.preserve_ignored = 0; /* FIXME: !overwrite_ignore */
init_checkout_metadata(&unpack_tree_opts.meta, switch_to_branch, oid, NULL);
if (!detach_head)
- unpack_tree_opts.reset = 1;
+ unpack_tree_opts.reset = UNPACK_RESET_PROTECT_UNTRACKED;
if (repo_read_index_unmerged(r) < 0) {
ret = error(_("could not read index"));
diff --git a/revision.c b/revision.c
index 0dabb5a0bc..ab7c135804 100644
--- a/revision.c
+++ b/revision.c
@@ -249,7 +249,7 @@ struct commit_stack {
struct commit **items;
size_t nr, alloc;
};
-#define COMMIT_STACK_INIT { NULL, 0, 0 }
+#define COMMIT_STACK_INIT { 0 }
static void commit_stack_push(struct commit_stack *stack, struct commit *commit)
{
@@ -2548,7 +2548,7 @@ static int for_each_bisect_ref(struct ref_store *refs, each_ref_fn fn,
struct strbuf bisect_refs = STRBUF_INIT;
int status;
strbuf_addf(&bisect_refs, "refs/bisect/%s", term);
- status = refs_for_each_fullref_in(refs, bisect_refs.buf, fn, cb_data, 0);
+ status = refs_for_each_fullref_in(refs, bisect_refs.buf, fn, cb_data);
strbuf_release(&bisect_refs);
return status;
}
@@ -2563,8 +2563,7 @@ static int for_each_good_bisect_ref(struct ref_store *refs, each_ref_fn fn, void
return for_each_bisect_ref(refs, fn, cb_data, term_good);
}
-static int handle_revision_pseudo_opt(const char *submodule,
- struct rev_info *revs,
+static int handle_revision_pseudo_opt(struct rev_info *revs,
const char **argv, int *flags)
{
const char *arg = argv[0];
@@ -2572,7 +2571,7 @@ static int handle_revision_pseudo_opt(const char *submodule,
struct ref_store *refs;
int argcount;
- if (submodule) {
+ if (revs->repo != the_repository) {
/*
* We need some something like get_submodule_worktrees()
* before we can go through all worktrees of a submodule,
@@ -2581,9 +2580,8 @@ static int handle_revision_pseudo_opt(const char *submodule,
*/
if (!revs->single_worktree)
BUG("--single-worktree cannot be used together with submodule");
- refs = get_submodule_ref_store(submodule);
- } else
- refs = get_main_ref_store(revs->repo);
+ }
+ refs = get_main_ref_store(revs->repo);
/*
* NOTE!
@@ -2707,12 +2705,8 @@ int setup_revisions(int argc, const char **argv, struct rev_info *revs, struct s
{
int i, flags, left, seen_dashdash, revarg_opt;
struct strvec prune_data = STRVEC_INIT;
- const char *submodule = NULL;
int seen_end_of_options = 0;
- if (opt)
- submodule = opt->submodule;
-
/* First, search for "--" */
if (opt && opt->assume_dashdash) {
seen_dashdash = 1;
@@ -2741,7 +2735,7 @@ int setup_revisions(int argc, const char **argv, struct rev_info *revs, struct s
if (!seen_end_of_options && *arg == '-') {
int opts;
- opts = handle_revision_pseudo_opt(submodule,
+ opts = handle_revision_pseudo_opt(
revs, argv + i,
&flags);
if (opts > 0) {
diff --git a/revision.h b/revision.h
index 0c65a760ee..5578bb4720 100644
--- a/revision.h
+++ b/revision.h
@@ -336,7 +336,6 @@ extern volatile show_early_output_fn_t show_early_output;
struct setup_revision_opt {
const char *def;
void (*tweak)(struct rev_info *, struct setup_revision_opt *);
- const char *submodule; /* TODO: drop this and use rev_info->repo */
unsigned int assume_dashdash:1,
allow_exclude_promisor_objects:1;
unsigned revarg_opt;
diff --git a/run-command.c b/run-command.c
index 3e4e082e94..7ef5cc712a 100644
--- a/run-command.c
+++ b/run-command.c
@@ -8,6 +8,8 @@
#include "string-list.h"
#include "quote.h"
#include "config.h"
+#include "packfile.h"
+#include "hook.h"
void child_process_init(struct child_process *child)
{
@@ -210,9 +212,9 @@ static char *locate_in_PATH(const char *file)
return NULL;
}
-static int exists_in_PATH(const char *file)
+int exists_in_PATH(const char *command)
{
- char *r = locate_in_PATH(file);
+ char *r = locate_in_PATH(command);
int found = r != NULL;
free(r);
return found;
@@ -740,6 +742,9 @@ fail_pipe:
fflush(NULL);
+ if (cmd->close_object_store)
+ close_object_store(the_repository->objects);
+
#ifndef GIT_WINDOWS_NATIVE
{
int notify_pipe[2];
@@ -1042,6 +1047,7 @@ int run_command_v_opt_cd_env_tr2(const char **argv, int opt, const char *dir,
cmd.use_shell = opt & RUN_USING_SHELL ? 1 : 0;
cmd.clean_on_exit = opt & RUN_CLEAN_ON_EXIT ? 1 : 0;
cmd.wait_after_clean = opt & RUN_WAIT_AFTER_CLEAN ? 1 : 0;
+ cmd.close_object_store = opt & RUN_CLOSE_OBJECT_STORE ? 1 : 0;
cmd.dir = dir;
cmd.env = env;
cmd.trace2_child_class = tr2_class;
@@ -1317,40 +1323,6 @@ int async_with_fork(void)
#endif
}
-const char *find_hook(const char *name)
-{
- static struct strbuf path = STRBUF_INIT;
-
- strbuf_reset(&path);
- strbuf_git_path(&path, "hooks/%s", name);
- if (access(path.buf, X_OK) < 0) {
- int err = errno;
-
-#ifdef STRIP_EXTENSION
- strbuf_addstr(&path, STRIP_EXTENSION);
- if (access(path.buf, X_OK) >= 0)
- return path.buf;
- if (errno == EACCES)
- err = errno;
-#endif
-
- if (err == EACCES && advice_enabled(ADVICE_IGNORED_HOOK)) {
- static struct string_list advise_given = STRING_LIST_INIT_DUP;
-
- if (!string_list_lookup(&advise_given, name)) {
- string_list_insert(&advise_given, name);
- advise(_("The '%s' hook was ignored because "
- "it's not set as executable.\n"
- "You can disable this warning with "
- "`git config advice.ignoredHook false`."),
- path.buf);
- }
- }
- return NULL;
- }
- return path.buf;
-}
-
int run_hook_ve(const char *const *env, const char *name, va_list args)
{
struct child_process hook = CHILD_PROCESS_INIT;
@@ -1884,6 +1856,7 @@ int run_auto_maintenance(int quiet)
return 0;
maint.git_cmd = 1;
+ maint.close_object_store = 1;
strvec_pushl(&maint.args, "maintenance", "run", "--auto", NULL);
strvec_push(&maint.args, quiet ? "--quiet" : "--no-quiet");
@@ -1901,3 +1874,132 @@ void prepare_other_repo_env(struct strvec *env_array, const char *new_git_dir)
}
strvec_pushf(env_array, "%s=%s", GIT_DIR_ENVIRONMENT, new_git_dir);
}
+
+enum start_bg_result start_bg_command(struct child_process *cmd,
+ start_bg_wait_cb *wait_cb,
+ void *cb_data,
+ unsigned int timeout_sec)
+{
+ enum start_bg_result sbgr = SBGR_ERROR;
+ int ret;
+ int wait_status;
+ pid_t pid_seen;
+ time_t time_limit;
+
+ /*
+ * We do not allow clean-on-exit because the child process
+ * should persist in the background and possibly/probably
+ * after this process exits. So we don't want to kill the
+ * child during our atexit routine.
+ */
+ if (cmd->clean_on_exit)
+ BUG("start_bg_command() does not allow non-zero clean_on_exit");
+
+ if (!cmd->trace2_child_class)
+ cmd->trace2_child_class = "background";
+
+ ret = start_command(cmd);
+ if (ret) {
+ /*
+ * We assume that if `start_command()` fails, we
+ * either get a complete `trace2_child_start() /
+ * trace2_child_exit()` pair or it fails before the
+ * `trace2_child_start()` is emitted, so we do not
+ * need to worry about it here.
+ *
+ * We also assume that `start_command()` does not add
+ * us to the cleanup list. And that it calls
+ * calls `child_process_clear()`.
+ */
+ sbgr = SBGR_ERROR;
+ goto done;
+ }
+
+ time(&time_limit);
+ time_limit += timeout_sec;
+
+wait:
+ pid_seen = waitpid(cmd->pid, &wait_status, WNOHANG);
+
+ if (!pid_seen) {
+ /*
+ * The child is currently running. Ask the callback
+ * if the child is ready to do work or whether we
+ * should keep waiting for it to boot up.
+ */
+ ret = (*wait_cb)(cmd, cb_data);
+ if (!ret) {
+ /*
+ * The child is running and "ready".
+ */
+ trace2_child_ready(cmd, "ready");
+ sbgr = SBGR_READY;
+ goto done;
+ } else if (ret > 0) {
+ /*
+ * The callback said to give it more time to boot up
+ * (subject to our timeout limit).
+ */
+ time_t now;
+
+ time(&now);
+ if (now < time_limit)
+ goto wait;
+
+ /*
+ * Our timeout has expired. We don't try to
+ * kill the child, but rather let it continue
+ * (hopefully) trying to startup.
+ */
+ trace2_child_ready(cmd, "timeout");
+ sbgr = SBGR_TIMEOUT;
+ goto done;
+ } else {
+ /*
+ * The cb gave up on this child. It is still running,
+ * but our cb got an error trying to probe it.
+ */
+ trace2_child_ready(cmd, "error");
+ sbgr = SBGR_CB_ERROR;
+ goto done;
+ }
+ }
+
+ else if (pid_seen == cmd->pid) {
+ int child_code = -1;
+
+ /*
+ * The child started, but exited or was terminated
+ * before becoming "ready".
+ *
+ * We try to match the behavior of `wait_or_whine()`
+ * WRT the handling of WIFSIGNALED() and WIFEXITED()
+ * and convert the child's status to a return code for
+ * tracing purposes and emit the `trace2_child_exit()`
+ * event.
+ *
+ * We do not want the wait_or_whine() error message
+ * because we will be called by client-side library
+ * routines.
+ */
+ if (WIFEXITED(wait_status))
+ child_code = WEXITSTATUS(wait_status);
+ else if (WIFSIGNALED(wait_status))
+ child_code = WTERMSIG(wait_status) + 128;
+ trace2_child_exit(cmd, child_code);
+
+ sbgr = SBGR_DIED;
+ goto done;
+ }
+
+ else if (pid_seen < 0 && errno == EINTR)
+ goto wait;
+
+ trace2_child_exit(cmd, -1);
+ sbgr = SBGR_ERROR;
+
+done:
+ child_process_clear(cmd);
+ invalidate_lstat_cache();
+ return sbgr;
+}
diff --git a/run-command.h b/run-command.h
index af1296769f..4987826258 100644
--- a/run-command.h
+++ b/run-command.h
@@ -134,6 +134,14 @@ struct child_process {
*/
unsigned use_shell:1;
+ /**
+ * Release any open file handles to the object store before running
+ * the command; This is necessary e.g. when the spawned process may
+ * want to repack because that would delete `.pack` files (and on
+ * Windows, you cannot delete files that are still in use).
+ */
+ unsigned close_object_store:1;
+
unsigned stdout_to_stderr:1;
unsigned clean_on_exit:1;
unsigned wait_after_clean:1;
@@ -183,6 +191,18 @@ void child_process_clear(struct child_process *);
int is_executable(const char *name);
/**
+ * Check if the command exists on $PATH. This emulates the path search that
+ * execvp would perform, without actually executing the command so it
+ * can be used before fork() to prepare to run a command using
+ * execve() or after execvp() to diagnose why it failed.
+ *
+ * The caller should ensure that command contains no directory separators.
+ *
+ * Returns 1 if it is found in $PATH or 0 if the command could not be found.
+ */
+int exists_in_PATH(const char *command);
+
+/**
* Start a sub-process. Takes a pointer to a `struct child_process`
* that specifies the details and returns pipe FDs (if requested).
* See below for details.
@@ -204,13 +224,6 @@ int finish_command_in_signal(struct child_process *);
*/
int run_command(struct child_process *);
-/*
- * Returns the path to the hook file, or NULL if the hook is missing
- * or disabled. Note that this points to static storage that will be
- * overwritten by further calls to find_hook and run_hook_*.
- */
-const char *find_hook(const char *name);
-
/**
* Run a hook.
* The first argument is a pathname to an index file, or NULL
@@ -233,13 +246,14 @@ int run_hook_ve(const char *const *env, const char *name, va_list args);
*/
int run_auto_maintenance(int quiet);
-#define RUN_COMMAND_NO_STDIN 1
-#define RUN_GIT_CMD 2 /*If this is to be git sub-command */
-#define RUN_COMMAND_STDOUT_TO_STDERR 4
-#define RUN_SILENT_EXEC_FAILURE 8
-#define RUN_USING_SHELL 16
-#define RUN_CLEAN_ON_EXIT 32
-#define RUN_WAIT_AFTER_CLEAN 64
+#define RUN_COMMAND_NO_STDIN (1<<0)
+#define RUN_GIT_CMD (1<<1)
+#define RUN_COMMAND_STDOUT_TO_STDERR (1<<2)
+#define RUN_SILENT_EXEC_FAILURE (1<<3)
+#define RUN_USING_SHELL (1<<4)
+#define RUN_CLEAN_ON_EXIT (1<<5)
+#define RUN_WAIT_AFTER_CLEAN (1<<6)
+#define RUN_CLOSE_OBJECT_STORE (1<<7)
/**
* Convenience functions that encapsulate a sequence of
@@ -496,4 +510,61 @@ int run_processes_parallel_tr2(int n, get_next_task_fn, start_failure_fn,
*/
void prepare_other_repo_env(struct strvec *env_array, const char *new_git_dir);
+/**
+ * Possible return values for start_bg_command().
+ */
+enum start_bg_result {
+ /* child process is "ready" */
+ SBGR_READY = 0,
+
+ /* child process could not be started */
+ SBGR_ERROR,
+
+ /* callback error when testing for "ready" */
+ SBGR_CB_ERROR,
+
+ /* timeout expired waiting for child to become "ready" */
+ SBGR_TIMEOUT,
+
+ /* child process exited or was signalled before becomming "ready" */
+ SBGR_DIED,
+};
+
+/**
+ * Callback used by start_bg_command() to ask whether the
+ * child process is ready or needs more time to become "ready".
+ *
+ * The callback will receive the cmd and cb_data arguments given to
+ * start_bg_command().
+ *
+ * Returns 1 is child needs more time (subject to the requested timeout).
+ * Returns 0 if child is "ready".
+ * Returns -1 on any error and cause start_bg_command() to also error out.
+ */
+typedef int(start_bg_wait_cb)(const struct child_process *cmd, void *cb_data);
+
+/**
+ * Start a command in the background. Wait long enough for the child
+ * to become "ready" (as defined by the provided callback). Capture
+ * immediate errors (like failure to start) and any immediate exit
+ * status (such as a shutdown/signal before the child became "ready")
+ * and return this like start_command().
+ *
+ * We run a custom wait loop using the provided callback to wait for
+ * the child to start and become "ready". This is limited by the given
+ * timeout value.
+ *
+ * If the child does successfully start and become "ready", we orphan
+ * it into the background.
+ *
+ * The caller must not call finish_command().
+ *
+ * The opaque cb_data argument will be forwarded to the callback for
+ * any instance data that it might require. This may be NULL.
+ */
+enum start_bg_result start_bg_command(struct child_process *cmd,
+ start_bg_wait_cb *wait_cb,
+ void *cb_data,
+ unsigned int timeout_sec);
+
#endif
diff --git a/sequencer.c b/sequencer.c
index 6831663692..fac0b5162f 100644
--- a/sequencer.c
+++ b/sequencer.c
@@ -8,6 +8,7 @@
#include "sequencer.h"
#include "tag.h"
#include "run-command.h"
+#include "hook.h"
#include "exec-cmd.h"
#include "utf8.h"
#include "cache-tree.h"
@@ -664,6 +665,7 @@ static int do_recursive_merge(struct repository *r,
merge_switch_to_result(&o, head_tree, &result, 1, show_output);
clean = result.clean;
} else {
+ ensure_full_index(r->index);
clean = merge_trees(&o, head_tree, next_tree, base_tree);
if (is_rebase_i(opts) && clean <= 0)
fputs(o.obuf.buf, stdout);
@@ -1458,7 +1460,7 @@ static int try_to_commit(struct repository *r,
}
}
- if (find_hook("prepare-commit-msg")) {
+ if (hook_exists("prepare-commit-msg")) {
res = run_prepare_commit_msg_hook(r, msg, hook_commit);
if (res)
goto out;
@@ -2359,6 +2361,7 @@ static int read_and_refresh_cache(struct repository *r,
_(action_name(opts)));
}
refresh_index(r->index, REFRESH_QUIET|REFRESH_UNMERGED, NULL, NULL, NULL);
+
if (index_fd >= 0) {
if (write_locked_index(r->index, &index_lock,
COMMIT_LOCK | SKIP_IF_UNCHANGED)) {
@@ -2366,6 +2369,13 @@ static int read_and_refresh_cache(struct repository *r,
_(action_name(opts)));
}
}
+
+ /*
+ * If we are resolving merges in any way other than "ort", then
+ * expand the sparse index.
+ */
+ if (opts->strategy && strcmp(opts->strategy, "ort"))
+ ensure_full_index(r->index);
return 0;
}
@@ -2684,7 +2694,6 @@ static int read_populate_todo(struct repository *r,
struct todo_list *todo_list,
struct replay_opts *opts)
{
- struct stat st;
const char *todo_file = get_todo_path(opts);
int res;
@@ -2692,11 +2701,6 @@ static int read_populate_todo(struct repository *r,
if (strbuf_read_file_or_whine(&todo_list->buf, todo_file) < 0)
return -1;
- res = stat(todo_file, &st);
- if (res)
- return error(_("could not stat '%s'"), todo_file);
- fill_stat_data(&todo_list->stat, &st);
-
res = todo_list_parse_insn_buffer(r, todo_list->buf.buf, todo_list);
if (res) {
if (is_rebase_i(opts))
@@ -3690,6 +3694,7 @@ static int do_reset(struct repository *r,
unpack_tree_opts.fn = oneway_merge;
unpack_tree_opts.merge = 1;
unpack_tree_opts.update = 1;
+ unpack_tree_opts.preserve_ignored = 0; /* FIXME: !overwrite_ignore */
init_checkout_metadata(&unpack_tree_opts.meta, name, &oid, NULL);
if (repo_read_index_unmerged(r)) {
@@ -4275,6 +4280,30 @@ static int stopped_at_head(struct repository *r)
}
+static int reread_todo_if_changed(struct repository *r,
+ struct todo_list *todo_list,
+ struct replay_opts *opts)
+{
+ int offset;
+ struct strbuf buf = STRBUF_INIT;
+
+ if (strbuf_read_file_or_whine(&buf, get_todo_path(opts)) < 0)
+ return -1;
+ offset = get_item_line_offset(todo_list, todo_list->current + 1);
+ if (buf.len != todo_list->buf.len - offset ||
+ memcmp(buf.buf, todo_list->buf.buf + offset, buf.len)) {
+ /* Reread the todo file if it has changed. */
+ todo_list_release(todo_list);
+ if (read_populate_todo(r, todo_list, opts))
+ return -1; /* message was printed */
+ /* `current` will be incremented on return */
+ todo_list->current = -1;
+ }
+ strbuf_release(&buf);
+
+ return 0;
+}
+
static const char rescheduled_advice[] =
N_("Could not execute the todo command\n"
"\n"
@@ -4453,20 +4482,9 @@ static int pick_commits(struct repository *r,
item->commit,
arg, item->arg_len,
opts, res, 0);
- } else if (is_rebase_i(opts) && check_todo && !res) {
- struct stat st;
-
- if (stat(get_todo_path(opts), &st)) {
- res = error_errno(_("could not stat '%s'"),
- get_todo_path(opts));
- } else if (match_stat_data(&todo_list->stat, &st)) {
- /* Reread the todo file if it has changed. */
- todo_list_release(todo_list);
- if (read_populate_todo(r, todo_list, opts))
- res = -1; /* message was printed */
- /* `current` will be incremented below */
- todo_list->current = -1;
- }
+ } else if (is_rebase_i(opts) && check_todo && !res &&
+ reread_todo_if_changed(r, todo_list, opts)) {
+ return -1;
}
todo_list->current++;
diff --git a/sequencer.h b/sequencer.h
index 2088344cb3..d8a1853b29 100644
--- a/sequencer.h
+++ b/sequencer.h
@@ -116,10 +116,11 @@ struct todo_list {
struct todo_item *items;
int nr, alloc, current;
int done_nr, total_nr;
- struct stat_data stat;
};
-#define TODO_LIST_INIT { STRBUF_INIT }
+#define TODO_LIST_INIT { \
+ .buf = STRBUF_INIT, \
+}
int todo_list_parse_insn_buffer(struct repository *r, char *buf,
struct todo_list *todo_list);
diff --git a/serve.c b/serve.c
index f11c0e07c4..b3fe9b5126 100644
--- a/serve.c
+++ b/serve.c
@@ -9,7 +9,8 @@
#include "serve.h"
#include "upload-pack.h"
-static int advertise_sid;
+static int advertise_sid = -1;
+static int client_hash_algo = GIT_HASH_SHA1;
static int always_advertise(struct repository *r,
struct strbuf *value)
@@ -33,8 +34,22 @@ static int object_format_advertise(struct repository *r,
return 1;
}
+static void object_format_receive(struct repository *r,
+ const char *algo_name)
+{
+ if (!algo_name)
+ die("object-format capability requires an argument");
+
+ client_hash_algo = hash_algo_by_name(algo_name);
+ if (client_hash_algo == GIT_HASH_UNKNOWN)
+ die("unknown object format '%s'", algo_name);
+}
+
static int session_id_advertise(struct repository *r, struct strbuf *value)
{
+ if (advertise_sid == -1 &&
+ git_config_get_bool("transfer.advertisesid", &advertise_sid))
+ advertise_sid = 0;
if (!advertise_sid)
return 0;
if (value)
@@ -42,6 +57,14 @@ static int session_id_advertise(struct repository *r, struct strbuf *value)
return 1;
}
+static void session_id_receive(struct repository *r,
+ const char *client_sid)
+{
+ if (!client_sid)
+ client_sid = "";
+ trace2_data_string("transfer", NULL, "client-sid", client_sid);
+}
+
struct protocol_capability {
/*
* The name of the capability. The server uses this name when
@@ -60,34 +83,70 @@ struct protocol_capability {
/*
* Function called when a client requests the capability as a command.
- * The function will be provided the capabilities requested via 'keys'
- * as well as a struct packet_reader 'request' which the command should
+ * Will be provided a struct packet_reader 'request' which it should
* use to read the command specific part of the request. Every command
* MUST read until a flush packet is seen before sending a response.
*
* This field should be NULL for capabilities which are not commands.
*/
- int (*command)(struct repository *r,
- struct strvec *keys,
- struct packet_reader *request);
+ int (*command)(struct repository *r, struct packet_reader *request);
+
+ /*
+ * Function called when a client requests the capability as a
+ * non-command. This may be NULL if the capability does nothing.
+ *
+ * For a capability of the form "foo=bar", the value string points to
+ * the content after the "=" (i.e., "bar"). For simple capabilities
+ * (just "foo"), it is NULL.
+ */
+ void (*receive)(struct repository *r, const char *value);
};
static struct protocol_capability capabilities[] = {
- { "agent", agent_advertise, NULL },
- { "ls-refs", ls_refs_advertise, ls_refs },
- { "fetch", upload_pack_advertise, upload_pack_v2 },
- { "server-option", always_advertise, NULL },
- { "object-format", object_format_advertise, NULL },
- { "session-id", session_id_advertise, NULL },
- { "object-info", always_advertise, cap_object_info },
+ {
+ .name = "agent",
+ .advertise = agent_advertise,
+ },
+ {
+ .name = "ls-refs",
+ .advertise = ls_refs_advertise,
+ .command = ls_refs,
+ },
+ {
+ .name = "fetch",
+ .advertise = upload_pack_advertise,
+ .command = upload_pack_v2,
+ },
+ {
+ .name = "server-option",
+ .advertise = always_advertise,
+ },
+ {
+ .name = "object-format",
+ .advertise = object_format_advertise,
+ .receive = object_format_receive,
+ },
+ {
+ .name = "session-id",
+ .advertise = session_id_advertise,
+ .receive = session_id_receive,
+ },
+ {
+ .name = "object-info",
+ .advertise = always_advertise,
+ .command = cap_object_info,
+ },
};
-static void advertise_capabilities(void)
+void protocol_v2_advertise_capabilities(void)
{
struct strbuf capability = STRBUF_INIT;
struct strbuf value = STRBUF_INIT;
int i;
+ /* serve by default supports v2 */
+ packet_write_fmt(1, "version 2\n");
+
for (i = 0; i < ARRAY_SIZE(capabilities); i++) {
struct protocol_capability *c = &capabilities[i];
@@ -112,7 +171,7 @@ static void advertise_capabilities(void)
strbuf_release(&value);
}
-static struct protocol_capability *get_capability(const char *key)
+static struct protocol_capability *get_capability(const char *key, const char **value)
{
int i;
@@ -122,31 +181,46 @@ static struct protocol_capability *get_capability(const char *key)
for (i = 0; i < ARRAY_SIZE(capabilities); i++) {
struct protocol_capability *c = &capabilities[i];
const char *out;
- if (skip_prefix(key, c->name, &out) && (!*out || *out == '='))
+ if (!skip_prefix(key, c->name, &out))
+ continue;
+ if (!*out) {
+ *value = NULL;
return c;
+ }
+ if (*out++ == '=') {
+ *value = out;
+ return c;
+ }
}
return NULL;
}
-static int is_valid_capability(const char *key)
+static int receive_client_capability(const char *key)
{
- const struct protocol_capability *c = get_capability(key);
+ const char *value;
+ const struct protocol_capability *c = get_capability(key, &value);
- return c && c->advertise(the_repository, NULL);
+ if (!c || c->command || !c->advertise(the_repository, NULL))
+ return 0;
+
+ if (c->receive)
+ c->receive(the_repository, value);
+ return 1;
}
-static int is_command(const char *key, struct protocol_capability **command)
+static int parse_command(const char *key, struct protocol_capability **command)
{
const char *out;
if (skip_prefix(key, "command=", &out)) {
- struct protocol_capability *cmd = get_capability(out);
+ const char *value;
+ struct protocol_capability *cmd = get_capability(out, &value);
if (*command)
die("command '%s' requested after already requesting command '%s'",
out, (*command)->name);
- if (!cmd || !cmd->advertise(the_repository, NULL) || !cmd->command)
+ if (!cmd || !cmd->advertise(the_repository, NULL) || !cmd->command || value)
die("invalid command '%s'", out);
*command = cmd;
@@ -156,42 +230,6 @@ static int is_command(const char *key, struct protocol_capability **command)
return 0;
}
-int has_capability(const struct strvec *keys, const char *capability,
- const char **value)
-{
- int i;
- for (i = 0; i < keys->nr; i++) {
- const char *out;
- if (skip_prefix(keys->v[i], capability, &out) &&
- (!*out || *out == '=')) {
- if (value) {
- if (*out == '=')
- out++;
- *value = out;
- }
- return 1;
- }
- }
-
- return 0;
-}
-
-static void check_algorithm(struct repository *r, struct strvec *keys)
-{
- int client = GIT_HASH_SHA1, server = hash_algo_by_ptr(r->hash_algo);
- const char *algo_name;
-
- if (has_capability(keys, "object-format", &algo_name)) {
- client = hash_algo_by_name(algo_name);
- if (client == GIT_HASH_UNKNOWN)
- die("unknown object format '%s'", algo_name);
- }
-
- if (client != server)
- die("mismatched object format: server %s; client %s\n",
- r->hash_algo->name, hash_algos[client].name);
-}
-
enum request_state {
PROCESS_REQUEST_KEYS,
PROCESS_REQUEST_DONE,
@@ -201,9 +239,8 @@ static int process_request(void)
{
enum request_state state = PROCESS_REQUEST_KEYS;
struct packet_reader reader;
- struct strvec keys = STRVEC_INIT;
+ int seen_capability_or_command = 0;
struct protocol_capability *command = NULL;
- const char *client_sid;
packet_reader_init(&reader, 0, NULL, 0,
PACKET_READ_CHOMP_NEWLINE |
@@ -223,10 +260,9 @@ static int process_request(void)
case PACKET_READ_EOF:
BUG("Should have already died when seeing EOF");
case PACKET_READ_NORMAL:
- /* collect request; a sequence of keys and values */
- if (is_command(reader.line, &command) ||
- is_valid_capability(reader.line))
- strvec_push(&keys, reader.line);
+ if (parse_command(reader.line, &command) ||
+ receive_client_capability(reader.line))
+ seen_capability_or_command = 1;
else
die("unknown capability '%s'", reader.line);
@@ -238,7 +274,7 @@ static int process_request(void)
* If no command and no keys were given then the client
* wanted to terminate the connection.
*/
- if (!keys.nr)
+ if (!seen_capability_or_command)
return 1;
/*
@@ -265,40 +301,26 @@ static int process_request(void)
if (!command)
die("no command requested");
- check_algorithm(the_repository, &keys);
-
- if (has_capability(&keys, "session-id", &client_sid))
- trace2_data_string("transfer", NULL, "client-sid", client_sid);
+ if (client_hash_algo != hash_algo_by_ptr(the_repository->hash_algo))
+ die("mismatched object format: server %s; client %s\n",
+ the_repository->hash_algo->name,
+ hash_algos[client_hash_algo].name);
- command->command(the_repository, &keys, &reader);
+ command->command(the_repository, &reader);
- strvec_clear(&keys);
return 0;
}
-/* Main serve loop for protocol version 2 */
-void serve(struct serve_options *options)
+void protocol_v2_serve_loop(int stateless_rpc)
{
- git_config_get_bool("transfer.advertisesid", &advertise_sid);
-
- if (options->advertise_capabilities || !options->stateless_rpc) {
- /* serve by default supports v2 */
- packet_write_fmt(1, "version 2\n");
-
- advertise_capabilities();
- /*
- * If only the list of capabilities was requested exit
- * immediately after advertising capabilities
- */
- if (options->advertise_capabilities)
- return;
- }
+ if (!stateless_rpc)
+ protocol_v2_advertise_capabilities();
/*
* If stateless-rpc was requested then exit after
* a single request/response exchange
*/
- if (options->stateless_rpc) {
+ if (stateless_rpc) {
process_request();
} else {
for (;;)
diff --git a/serve.h b/serve.h
index fc2683e24d..f946cf904a 100644
--- a/serve.h
+++ b/serve.h
@@ -1,15 +1,7 @@
#ifndef SERVE_H
#define SERVE_H
-struct strvec;
-int has_capability(const struct strvec *keys, const char *capability,
- const char **value);
-
-struct serve_options {
- unsigned advertise_capabilities;
- unsigned stateless_rpc;
-};
-#define SERVE_OPTIONS_INIT { 0 }
-void serve(struct serve_options *options);
+void protocol_v2_advertise_capabilities(void);
+void protocol_v2_serve_loop(int stateless_rpc);
#endif /* SERVE_H */
diff --git a/setup.c b/setup.c
index eb9367ca5c..347d7181ae 100644
--- a/setup.c
+++ b/setup.c
@@ -1423,11 +1423,9 @@ const char *resolve_gitdir_gently(const char *suspect, int *return_error_code)
/* if any standard file descriptor is missing open it to /dev/null */
void sanitize_stdfds(void)
{
- int fd = open("/dev/null", O_RDWR, 0);
- while (fd != -1 && fd < 2)
- fd = dup(fd);
- if (fd == -1)
- die_errno(_("open /dev/null or dup failed"));
+ int fd = xopen("/dev/null", O_RDWR);
+ while (fd < 2)
+ fd = xdup(fd);
if (fd > 2)
close(fd);
}
diff --git a/shallow.h b/shallow.h
index 5b4a96dcd6..aba6ff5829 100644
--- a/shallow.h
+++ b/shallow.h
@@ -23,7 +23,9 @@ int is_repository_shallow(struct repository *r);
struct shallow_lock {
struct lock_file lock;
};
-#define SHALLOW_LOCK_INIT { LOCK_INIT }
+#define SHALLOW_LOCK_INIT { \
+ .lock = LOCK_INIT, \
+}
/* commit $GIT_DIR/shallow and reset stat-validity checks */
int commit_shallow_file(struct repository *r, struct shallow_lock *lk);
diff --git a/simple-ipc.h b/simple-ipc.h
index 2c48a5ee00..a849d9f841 100644
--- a/simple-ipc.h
+++ b/simple-ipc.h
@@ -5,13 +5,6 @@
* See Documentation/technical/api-simple-ipc.txt
*/
-#ifdef SUPPORTS_SIMPLE_IPC
-#include "pkt-line.h"
-
-/*
- * Simple IPC Client Side API.
- */
-
enum ipc_active_state {
/*
* The pipe/socket exists and the daemon is waiting for connections.
@@ -43,6 +36,13 @@ enum ipc_active_state {
IPC_STATE__OTHER_ERROR,
};
+#ifdef SUPPORTS_SIMPLE_IPC
+#include "pkt-line.h"
+
+/*
+ * Simple IPC Client Side API.
+ */
+
struct ipc_client_connect_options {
/*
* Spin under timeout if the server is running but can't
@@ -65,11 +65,7 @@ struct ipc_client_connect_options {
unsigned int uds_disallow_chdir:1;
};
-#define IPC_CLIENT_CONNECT_OPTIONS_INIT { \
- .wait_if_busy = 0, \
- .wait_if_not_found = 0, \
- .uds_disallow_chdir = 0, \
-}
+#define IPC_CLIENT_CONNECT_OPTIONS_INIT { 0 }
/*
* Determine if a server is listening on this named pipe or socket using
@@ -107,7 +103,8 @@ void ipc_client_close_connection(struct ipc_client_connection *connection);
*/
int ipc_client_send_command_to_connection(
struct ipc_client_connection *connection,
- const char *message, struct strbuf *answer);
+ const char *message, size_t message_len,
+ struct strbuf *answer);
/*
* Used by the client to synchronously connect and send and receive a
@@ -119,7 +116,8 @@ int ipc_client_send_command_to_connection(
*/
int ipc_client_send_command(const char *path,
const struct ipc_client_connect_options *options,
- const char *message, struct strbuf *answer);
+ const char *message, size_t message_len,
+ struct strbuf *answer);
/*
* Simple IPC Server Side API.
@@ -144,6 +142,7 @@ typedef int (ipc_server_reply_cb)(struct ipc_server_reply_data *,
*/
typedef int (ipc_server_application_cb)(void *application_data,
const char *request,
+ size_t request_len,
ipc_server_reply_cb *reply_cb,
struct ipc_server_reply_data *reply_data);
diff --git a/sparse-index.c b/sparse-index.c
index 56eb65dc34..7b7ff79e04 100644
--- a/sparse-index.c
+++ b/sparse-index.c
@@ -33,19 +33,14 @@ static int convert_to_sparse_rec(struct index_state *istate,
{
int i, can_convert = 1;
int start_converted = num_converted;
- enum pattern_match_result match;
- int dtype = DT_UNKNOWN;
struct strbuf child_path = STRBUF_INIT;
- struct pattern_list *pl = istate->sparse_checkout_patterns;
/*
* Is the current path outside of the sparse cone?
* Then check if the region can be replaced by a sparse
* directory entry (everything is sparse and merged).
*/
- match = path_matches_pattern_list(ct_path, ct_pathlen,
- NULL, &dtype, pl, istate);
- if (match != NOT_MATCHED)
+ if (path_in_sparse_checkout(ct_path, istate))
can_convert = 0;
for (i = start; can_convert && i < end; i++) {
@@ -127,41 +122,51 @@ static int index_has_unmerged_entries(struct index_state *istate)
return 0;
}
-int convert_to_sparse(struct index_state *istate)
+int convert_to_sparse(struct index_state *istate, int flags)
{
int test_env;
- if (istate->split_index || istate->sparse_index ||
+ if (istate->sparse_index || !istate->cache_nr ||
!core_apply_sparse_checkout || !core_sparse_checkout_cone)
return 0;
if (!istate->repo)
istate->repo = the_repository;
- /*
- * The GIT_TEST_SPARSE_INDEX environment variable triggers the
- * index.sparse config variable to be on.
- */
- test_env = git_env_bool("GIT_TEST_SPARSE_INDEX", -1);
- if (test_env >= 0)
- set_sparse_index_config(istate->repo, test_env);
-
- /*
- * Only convert to sparse if index.sparse is set.
- */
- prepare_repo_settings(istate->repo);
- if (!istate->repo->settings.sparse_index)
- return 0;
+ if (!(flags & SPARSE_INDEX_MEMORY_ONLY)) {
+ /*
+ * The sparse index is not (yet) integrated with a split index.
+ */
+ if (istate->split_index)
+ return 0;
+ /*
+ * The GIT_TEST_SPARSE_INDEX environment variable triggers the
+ * index.sparse config variable to be on.
+ */
+ test_env = git_env_bool("GIT_TEST_SPARSE_INDEX", -1);
+ if (test_env >= 0)
+ set_sparse_index_config(istate->repo, test_env);
- if (!istate->sparse_checkout_patterns) {
- istate->sparse_checkout_patterns = xcalloc(1, sizeof(struct pattern_list));
- if (get_sparse_checkout_patterns(istate->sparse_checkout_patterns) < 0)
+ /*
+ * Only convert to sparse if index.sparse is set.
+ */
+ prepare_repo_settings(istate->repo);
+ if (!istate->repo->settings.sparse_index)
return 0;
}
- if (!istate->sparse_checkout_patterns->use_cone_patterns) {
- warning(_("attempting to use sparse-index without cone mode"));
- return -1;
- }
+ if (init_sparse_checkout_patterns(istate))
+ return 0;
+
+ /*
+ * We need cone-mode patterns to use sparse-index. If a user edits
+ * their sparse-checkout file manually, then we can detect during
+ * parsing that they are not actually using cone-mode patterns and
+ * hence we need to abort this conversion _without error_. Warnings
+ * already exist in the pattern parsing to inform the user of their
+ * bad patterns.
+ */
+ if (!istate->sparse_checkout_patterns->use_cone_patterns)
+ return 0;
/*
* NEEDSWORK: If we have unmerged entries, then stay full.
@@ -172,10 +177,15 @@ int convert_to_sparse(struct index_state *istate)
/* Clear and recompute the cache-tree */
cache_tree_free(&istate->cache_tree);
- if (cache_tree_update(istate, 0)) {
- warning(_("unable to update cache-tree, staying full"));
- return -1;
- }
+ /*
+ * Silently return if there is a problem with the cache tree update,
+ * which might just be due to a conflict state in some entry.
+ *
+ * This might create new tree objects, so be sure to use
+ * WRITE_TREE_MISSING_OK.
+ */
+ if (cache_tree_update(istate, WRITE_TREE_MISSING_OK))
+ return 0;
remove_fsmonitor(istate);
diff --git a/sparse-index.h b/sparse-index.h
index 1115a0d7dd..9f3d7bc7fa 100644
--- a/sparse-index.h
+++ b/sparse-index.h
@@ -2,7 +2,8 @@
#define SPARSE_INDEX_H__
struct index_state;
-int convert_to_sparse(struct index_state *istate);
+#define SPARSE_INDEX_MEMORY_ONLY (1 << 0)
+int convert_to_sparse(struct index_state *istate, int flags);
/*
* Some places in the codebase expect to search for a specific path.
diff --git a/strbuf.h b/strbuf.h
index 5b1113abf8..3b36bbc49f 100644
--- a/strbuf.h
+++ b/strbuf.h
@@ -70,7 +70,7 @@ struct strbuf {
};
extern char strbuf_slopbuf[];
-#define STRBUF_INIT { .alloc = 0, .len = 0, .buf = strbuf_slopbuf }
+#define STRBUF_INIT { .buf = strbuf_slopbuf }
/*
* Predeclare this here, since cache.h includes this file before it defines the
diff --git a/string-list.c b/string-list.c
index 43576ad126..549fc416d6 100644
--- a/string-list.c
+++ b/string-list.c
@@ -13,14 +13,6 @@ void string_list_init_dup(struct string_list *list)
memcpy(list, &blank, sizeof(*list));
}
-void string_list_init(struct string_list *list, int strdup_strings)
-{
- if (strdup_strings)
- string_list_init_dup(list);
- else
- string_list_init_nodup(list);
-}
-
/* if there is no exact match, point to the index where the entry could be
* inserted */
static int get_entry_index(const struct string_list *list, const char *string,
diff --git a/string-list.h b/string-list.h
index 0d6b469239..267d6e5769 100644
--- a/string-list.h
+++ b/string-list.h
@@ -104,11 +104,6 @@ struct string_list {
void string_list_init_nodup(struct string_list *list);
void string_list_init_dup(struct string_list *list);
-/**
- * TODO remove: For compatibility with any in-flight older API users
- */
-void string_list_init(struct string_list *list, int strdup_strings);
-
/** Callback function type for for_each_string_list */
typedef int (*string_list_each_func_t)(struct string_list_item *, void *);
diff --git a/strvec.h b/strvec.h
index fdcad75b45..9f55c8766b 100644
--- a/strvec.h
+++ b/strvec.h
@@ -29,11 +29,13 @@ extern const char *empty_strvec[];
*/
struct strvec {
const char **v;
- int nr;
- int alloc;
+ size_t nr;
+ size_t alloc;
};
-#define STRVEC_INIT { empty_strvec, 0, 0 }
+#define STRVEC_INIT { \
+ .v = empty_strvec, \
+}
/**
* Initialize an array. This is no different than assigning from
diff --git a/submodule-config.c b/submodule-config.c
index 2026120fb3..f95344028b 100644
--- a/submodule-config.c
+++ b/submodule-config.c
@@ -649,9 +649,10 @@ static void config_from_gitmodules(config_fn_t fn, struct repository *repo, void
config_source.file = file;
} else if (repo_get_oid(repo, GITMODULES_INDEX, &oid) >= 0 ||
repo_get_oid(repo, GITMODULES_HEAD, &oid) >= 0) {
+ config_source.repo = repo;
config_source.blob = oidstr = xstrdup(oid_to_hex(&oid));
if (repo != the_repository)
- add_to_alternates_memory(repo->objects->odb->path);
+ add_submodule_odb_by_path(repo->objects->odb->path);
} else {
goto out;
}
@@ -702,7 +703,7 @@ void gitmodules_config_oid(const struct object_id *commit_oid)
if (gitmodule_oid_from_commit(commit_oid, &oid, &rev)) {
git_config_from_blob_oid(gitmodules_cb, rev.buf,
- &oid, the_repository);
+ the_repository, &oid, the_repository);
}
strbuf_release(&rev);
diff --git a/submodule-config.h b/submodule-config.h
index c11e22cf50..65875b94ea 100644
--- a/submodule-config.h
+++ b/submodule-config.h
@@ -45,10 +45,6 @@ struct submodule {
struct object_id gitmodules_oid;
int recommend_shallow;
};
-
-#define SUBMODULE_INIT { NULL, NULL, NULL, RECURSE_SUBMODULES_NONE, \
- NULL, NULL, SUBMODULE_UPDATE_STRATEGY_INIT, { { 0 } }, -1 };
-
struct submodule_cache;
struct repository;
diff --git a/submodule.c b/submodule.c
index 8e611fe1db..f3c99634a9 100644
--- a/submodule.c
+++ b/submodule.c
@@ -165,6 +165,8 @@ void stage_updated_gitmodules(struct index_state *istate)
die(_("staging updated .gitmodules failed"));
}
+static struct string_list added_submodule_odb_paths = STRING_LIST_INIT_NODUP;
+
/* TODO: remove this function, use repo_submodule_init instead. */
int add_submodule_odb(const char *path)
{
@@ -178,12 +180,33 @@ int add_submodule_odb(const char *path)
ret = -1;
goto done;
}
- add_to_alternates_memory(objects_directory.buf);
+ string_list_insert(&added_submodule_odb_paths,
+ strbuf_detach(&objects_directory, NULL));
done:
strbuf_release(&objects_directory);
return ret;
}
+void add_submodule_odb_by_path(const char *path)
+{
+ string_list_insert(&added_submodule_odb_paths, xstrdup(path));
+}
+
+int register_all_submodule_odb_as_alternates(void)
+{
+ int i;
+ int ret = added_submodule_odb_paths.nr;
+
+ for (i = 0; i < added_submodule_odb_paths.nr; i++)
+ add_to_alternates_memory(added_submodule_odb_paths.items[i].string);
+ if (ret) {
+ string_list_clear(&added_submodule_odb_paths, 0);
+ if (git_env_bool("GIT_TEST_FATAL_REGISTER_SUBMODULE_ODB", 0))
+ BUG("register_all_submodule_odb_as_alternates() called");
+ }
+ return ret;
+}
+
void set_diffopt_flags_from_submodule_config(struct diff_options *diffopt,
const char *path)
{
@@ -237,6 +260,11 @@ int option_parse_recurse_submodules_worktree_updater(const struct option *opt,
/*
* Determine if a submodule has been initialized at a given 'path'
*/
+/*
+ * NEEDSWORK: Emit a warning if submodule.active exists, but is valueless,
+ * ie, the config looks like: "[submodule] active\n".
+ * Since that is an invalid pathspec, we should inform the user.
+ */
int is_submodule_active(struct repository *repo, const char *path)
{
int ret = 0;
@@ -497,9 +525,6 @@ static void prepare_submodule_repo_env_in_gitdir(struct strvec *out)
/*
* Initialize a repository struct for a submodule based on the provided 'path'.
*
- * Unlike repo_submodule_init, this tolerates submodules not present
- * in .gitmodules. This function exists only to preserve historical behavior,
- *
* Returns the repository struct on success,
* NULL when the submodule is not present.
*/
@@ -697,8 +722,20 @@ void show_submodule_inline_diff(struct diff_options *o, const char *path,
strvec_push(&cp.args, oid_to_hex(new_oid));
prepare_submodule_repo_env(&cp.env_array);
- if (start_command(&cp))
+
+ if (!is_directory(path)) {
+ /* fall back to absorbed git dir, if any */
+ if (!sub)
+ goto done;
+ cp.dir = sub->gitdir;
+ strvec_push(&cp.env_array, GIT_DIR_ENVIRONMENT "=.");
+ strvec_push(&cp.env_array, GIT_WORK_TREE_ENVIRONMENT "=.");
+ }
+
+ if (start_command(&cp)) {
diff_emit_submodule_error(o, "(diff failed)\n");
+ goto done;
+ }
while (strbuf_getwholeline_fd(&sb, cp.out, '\n') != EOF)
diff_emit_submodule_pipethrough(o, sb.buf, sb.len);
@@ -1281,9 +1318,11 @@ struct submodule_parallel_fetch {
struct strbuf submodules_with_errors;
};
-#define SPF_INIT {0, STRVEC_INIT, NULL, NULL, 0, 0, 0, 0, \
- STRING_LIST_INIT_DUP, \
- NULL, 0, 0, STRBUF_INIT}
+#define SPF_INIT { \
+ .args = STRVEC_INIT, \
+ .changed_submodule_names = STRING_LIST_INIT_DUP, \
+ .submodules_with_errors = STRBUF_INIT, \
+}
static int get_fetch_recurse_config(const struct submodule *submodule,
struct submodule_parallel_fetch *spf)
@@ -1381,24 +1420,13 @@ static void fetch_task_release(struct fetch_task *p)
}
static struct repository *get_submodule_repo_for(struct repository *r,
- const struct submodule *sub)
+ const char *path)
{
struct repository *ret = xmalloc(sizeof(*ret));
- if (repo_submodule_init(ret, r, sub)) {
- /*
- * No entry in .gitmodules? Technically not a submodule,
- * but historically we supported repositories that happen to be
- * in-place where a gitlink is. Keep supporting them.
- */
- struct strbuf gitdir = STRBUF_INIT;
- strbuf_repo_worktree_path(&gitdir, r, "%s/.git", sub->path);
- if (repo_init(ret, gitdir.buf, NULL)) {
- strbuf_release(&gitdir);
- free(ret);
- return NULL;
- }
- strbuf_release(&gitdir);
+ if (repo_submodule_init(ret, r, path, null_oid())) {
+ free(ret);
+ return NULL;
}
return ret;
@@ -1440,7 +1468,7 @@ static int get_next_submodule(struct child_process *cp,
continue;
}
- task->repo = get_submodule_repo_for(spf->r, task->sub);
+ task->repo = get_submodule_repo_for(spf->r, task->sub->path);
if (task->repo) {
struct strbuf submodule_prefix = STRBUF_INIT;
child_process_init(cp);
@@ -1819,14 +1847,16 @@ out:
void submodule_unset_core_worktree(const struct submodule *sub)
{
- char *config_path = xstrfmt("%s/modules/%s/config",
- get_git_dir(), sub->name);
+ struct strbuf config_path = STRBUF_INIT;
+
+ submodule_name_to_gitdir(&config_path, the_repository, sub->name);
+ strbuf_addstr(&config_path, "/config");
- if (git_config_set_in_file_gently(config_path, "core.worktree", NULL))
+ if (git_config_set_in_file_gently(config_path.buf, "core.worktree", NULL))
warning(_("Could not unset core.worktree setting in submodule '%s'"),
sub->path);
- free(config_path);
+ strbuf_release(&config_path);
}
static const char *get_super_prefix_or_empty(void)
@@ -1866,6 +1896,7 @@ static void submodule_reset_index(const char *path)
strvec_pushf(&cp.args, "--super-prefix=%s%s/",
get_super_prefix_or_empty(), path);
+ /* TODO: determine if this might overwright untracked files */
strvec_pushl(&cp.args, "read-tree", "-u", "--reset", NULL);
strvec_push(&cp.args, empty_tree_oid_hex());
@@ -1922,20 +1953,22 @@ int submodule_move_head(const char *path,
absorb_git_dir_into_superproject(path,
ABSORB_GITDIR_RECURSE_SUBMODULES);
} else {
- char *gitdir = xstrfmt("%s/modules/%s",
- get_git_dir(), sub->name);
- connect_work_tree_and_git_dir(path, gitdir, 0);
- free(gitdir);
+ struct strbuf gitdir = STRBUF_INIT;
+ submodule_name_to_gitdir(&gitdir, the_repository,
+ sub->name);
+ connect_work_tree_and_git_dir(path, gitdir.buf, 0);
+ strbuf_release(&gitdir);
/* make sure the index is clean as well */
submodule_reset_index(path);
}
if (old_head && (flags & SUBMODULE_MOVE_HEAD_FORCE)) {
- char *gitdir = xstrfmt("%s/modules/%s",
- get_git_dir(), sub->name);
- connect_work_tree_and_git_dir(path, gitdir, 1);
- free(gitdir);
+ struct strbuf gitdir = STRBUF_INIT;
+ submodule_name_to_gitdir(&gitdir, the_repository,
+ sub->name);
+ connect_work_tree_and_git_dir(path, gitdir.buf, 1);
+ strbuf_release(&gitdir);
}
}
@@ -2050,7 +2083,7 @@ int validate_submodule_git_dir(char *git_dir, const char *submodule_name)
static void relocate_single_git_dir_into_superproject(const char *path)
{
char *old_git_dir = NULL, *real_old_git_dir = NULL, *real_new_git_dir = NULL;
- char *new_git_dir;
+ struct strbuf new_gitdir = STRBUF_INIT;
const struct submodule *sub;
if (submodule_uses_worktrees(path))
@@ -2068,14 +2101,13 @@ static void relocate_single_git_dir_into_superproject(const char *path)
if (!sub)
die(_("could not lookup name for submodule '%s'"), path);
- new_git_dir = git_pathdup("modules/%s", sub->name);
- if (validate_submodule_git_dir(new_git_dir, sub->name) < 0)
+ submodule_name_to_gitdir(&new_gitdir, the_repository, sub->name);
+ if (validate_submodule_git_dir(new_gitdir.buf, sub->name) < 0)
die(_("refusing to move '%s' into an existing git dir"),
real_old_git_dir);
- if (safe_create_leading_directories_const(new_git_dir) < 0)
- die(_("could not create directory '%s'"), new_git_dir);
- real_new_git_dir = real_pathdup(new_git_dir, 1);
- free(new_git_dir);
+ if (safe_create_leading_directories_const(new_gitdir.buf) < 0)
+ die(_("could not create directory '%s'"), new_gitdir.buf);
+ real_new_git_dir = real_pathdup(new_gitdir.buf, 1);
fprintf(stderr, _("Migrating git directory of '%s%s' from\n'%s' to\n'%s'\n"),
get_super_prefix_or_empty(), path,
@@ -2086,6 +2118,7 @@ static void relocate_single_git_dir_into_superproject(const char *path)
free(old_git_dir);
free(real_old_git_dir);
free(real_new_git_dir);
+ strbuf_release(&new_gitdir);
}
/*
@@ -2105,6 +2138,7 @@ void absorb_git_dir_into_superproject(const char *path,
/* Not populated? */
if (!sub_git_dir) {
const struct submodule *sub;
+ struct strbuf sub_gitdir = STRBUF_INIT;
if (err_code == READ_GITFILE_ERR_STAT_FAILED) {
/* unpopulated as expected */
@@ -2126,8 +2160,9 @@ void absorb_git_dir_into_superproject(const char *path,
sub = submodule_from_path(the_repository, null_oid(), path);
if (!sub)
die(_("could not lookup name for submodule '%s'"), path);
- connect_work_tree_and_git_dir(path,
- git_path("modules/%s", sub->name), 0);
+ submodule_name_to_gitdir(&sub_gitdir, the_repository, sub->name);
+ connect_work_tree_and_git_dir(path, sub_gitdir.buf, 0);
+ strbuf_release(&sub_gitdir);
} else {
/* Is it already absorbed into the superprojects git dir? */
char *real_sub_git_dir = real_pathdup(sub_git_dir, 1);
@@ -2278,9 +2313,36 @@ int submodule_to_gitdir(struct strbuf *buf, const char *submodule)
goto cleanup;
}
strbuf_reset(buf);
- strbuf_git_path(buf, "%s/%s", "modules", sub->name);
+ submodule_name_to_gitdir(buf, the_repository, sub->name);
}
cleanup:
return ret;
}
+
+void submodule_name_to_gitdir(struct strbuf *buf, struct repository *r,
+ const char *submodule_name)
+{
+ /*
+ * NEEDSWORK: The current way of mapping a submodule's name to
+ * its location in .git/modules/ has problems with some naming
+ * schemes. For example, if a submodule is named "foo" and
+ * another is named "foo/bar" (whether present in the same
+ * superproject commit or not - the problem will arise if both
+ * superproject commits have been checked out at any point in
+ * time), or if two submodule names only have different cases in
+ * a case-insensitive filesystem.
+ *
+ * There are several solutions, including encoding the path in
+ * some way, introducing a submodule.<name>.gitdir config in
+ * .git/config (not .gitmodules) that allows overriding what the
+ * gitdir of a submodule would be (and teach Git, upon noticing
+ * a clash, to automatically determine a non-clashing name and
+ * to write such a config), or introducing a
+ * submodule.<name>.gitdir config in .gitmodules that repo
+ * administrators can explicitly set. Nothing has been decided,
+ * so for now, just append the name at the end of the path.
+ */
+ strbuf_repo_git_path(buf, r, "modules/");
+ strbuf_addstr(buf, submodule_name);
+}
diff --git a/submodule.h b/submodule.h
index 84640c49c1..6bd2c99fd9 100644
--- a/submodule.h
+++ b/submodule.h
@@ -37,7 +37,9 @@ struct submodule_update_strategy {
enum submodule_update_type type;
const char *command;
};
-#define SUBMODULE_UPDATE_STRATEGY_INIT {SM_UPDATE_UNSPECIFIED, NULL}
+#define SUBMODULE_UPDATE_STRATEGY_INIT { \
+ .type = SM_UPDATE_UNSPECIFIED, \
+}
int is_gitmodules_unmerged(struct index_state *istate);
int is_writing_gitmodules_ok(void);
@@ -97,7 +99,15 @@ int submodule_uses_gitfile(const char *path);
#define SUBMODULE_REMOVAL_IGNORE_IGNORED_UNTRACKED (1<<2)
int bad_to_remove_submodule(const char *path, unsigned flags);
+/*
+ * Call add_submodule_odb() to add the submodule at the given path to a list.
+ * When register_all_submodule_odb_as_alternates() is called, the object stores
+ * of all submodules in that list will be added as alternates in
+ * the_repository.
+ */
int add_submodule_odb(const char *path);
+void add_submodule_odb_by_path(const char *path);
+int register_all_submodule_odb_as_alternates(void);
/*
* Checks if there are submodule changes in a..b. If a is the null OID,
@@ -125,6 +135,13 @@ int push_unpushed_submodules(struct repository *r,
int submodule_to_gitdir(struct strbuf *buf, const char *submodule);
/*
+ * Given a submodule name, create a path to where the submodule's gitdir lives
+ * inside of the provided repository's 'modules' directory.
+ */
+void submodule_name_to_gitdir(struct strbuf *buf, struct repository *r,
+ const char *submodule_name);
+
+/*
* Make sure that no submodule's git dir is nested in a sibling submodule's.
*/
int validate_submodule_git_dir(char *git_dir, const char *submodule_name);
diff --git a/t/README b/t/README
index 9e70122302..b92155a822 100644
--- a/t/README
+++ b/t/README
@@ -366,6 +366,13 @@ excluded as so much relies on it, but this might change in the future.
GIT_TEST_SPLIT_INDEX=<boolean> forces split-index mode on the whole
test suite. Accept any boolean values that are accepted by git-config.
+GIT_TEST_PASSING_SANITIZE_LEAK=<boolean> when compiled with
+SANITIZE=leak will run only those tests that have whitelisted
+themselves as passing with no memory leaks. Tests can be whitelisted
+by setting "TEST_PASSES_SANITIZE_LEAK=true" before sourcing
+"test-lib.sh" itself at the top of the test script. This test mode is
+used by the "linux-leaks" CI target.
+
GIT_TEST_PROTOCOL_VERSION=<n>, when set, makes 'protocol.version'
default to n.
@@ -425,6 +432,10 @@ GIT_TEST_MULTI_PACK_INDEX=<boolean>, when true, forces the multi-pack-
index to be written after every 'git repack' command, and overrides the
'core.multiPackIndex' setting to true.
+GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=<boolean>, when true, sets the
+'--bitmap' option on all invocations of 'git multi-pack-index write',
+and ignores pack-objects' '--write-bitmap-index'.
+
GIT_TEST_SIDEBAND_ALL=<boolean>, when true, overrides the
'uploadpack.allowSidebandAll' setting to true, and when false, forces
fetch-pack to not request sideband-all (even if the server advertises
@@ -448,6 +459,16 @@ GIT_TEST_CHECKOUT_WORKERS=<n> overrides the 'checkout.workers' setting
to <n> and 'checkout.thresholdForParallelism' to 0, forcing the
execution of the parallel-checkout code.
+GIT_TEST_FATAL_REGISTER_SUBMODULE_ODB=<boolean>, when true, makes
+registering submodule ODBs as alternates a fatal action. Support for
+this environment variable can be removed once the migration to
+explicitly providing repositories when accessing submodule objects is
+complete (in which case we might want to replace this with a trace2
+call so that users can make it visible if accessing submodule objects
+without an explicit repository still happens) or needs to be abandoned
+for whatever reason (in which case the migrated codepaths still retain
+their performance benefits).
+
Naming Tests
------------
@@ -753,7 +774,8 @@ Test harness library
--------------------
There are a handful helper functions defined in the test harness
-library for your script to use.
+library for your script to use. Some of them are listed below;
+see test-lib-functions.sh for the full list and their options.
- test_expect_success [<prereq>] <message> <script>
@@ -799,10 +821,12 @@ library for your script to use.
argument. This is primarily meant for use during the
development of a new test script.
- - debug <git-command>
+ - debug [options] <git-command>
Run a git command inside a debugger. This is primarily meant for
- use when debugging a failing test script.
+ use when debugging a failing test script. With '-t', use your
+ original TERM instead of test-lib.sh's "dumb", so that your
+ debugger interface has colors.
- test_done
@@ -989,7 +1013,7 @@ library for your script to use.
EOF
- - test_pause
+ - test_pause [options]
This command is useful for writing and debugging tests and must be
removed before submitting. It halts the execution of the test and
diff --git a/t/helper/test-bitmap.c b/t/helper/test-bitmap.c
index 134a1e9d76..ff35f5999b 100644
--- a/t/helper/test-bitmap.c
+++ b/t/helper/test-bitmap.c
@@ -7,6 +7,11 @@ static int bitmap_list_commits(void)
return test_bitmap_commits(the_repository);
}
+static int bitmap_dump_hashes(void)
+{
+ return test_bitmap_hashes(the_repository);
+}
+
int cmd__bitmap(int argc, const char **argv)
{
setup_git_directory();
@@ -16,9 +21,12 @@ int cmd__bitmap(int argc, const char **argv)
if (!strcmp(argv[1], "list-commits"))
return bitmap_list_commits();
+ if (!strcmp(argv[1], "dump-hashes"))
+ return bitmap_dump_hashes();
usage:
- usage("\ttest-tool bitmap list-commits");
+ usage("\ttest-tool bitmap list-commits\n"
+ "\ttest-tool bitmap dump-hashes");
return -1;
}
diff --git a/t/helper/test-dump-untracked-cache.c b/t/helper/test-dump-untracked-cache.c
index cf0f2c7228..99010614f6 100644
--- a/t/helper/test-dump-untracked-cache.c
+++ b/t/helper/test-dump-untracked-cache.c
@@ -45,8 +45,10 @@ int cmd__dump_untracked_cache(int ac, const char **av)
struct untracked_cache *uc;
struct strbuf base = STRBUF_INIT;
- /* Hack to avoid modifying the untracked cache when we read it */
- ignore_untracked_cache_config = 1;
+ /* Set core.untrackedCache=keep before setup_git_directory() */
+ xsetenv("GIT_CONFIG_COUNT", "1", 1);
+ xsetenv("GIT_CONFIG_KEY_0", "core.untrackedCache", 1);
+ xsetenv("GIT_CONFIG_VALUE_0", "keep", 1);
setup_git_directory();
if (read_cache() < 0)
diff --git a/t/helper/test-parse-options.c b/t/helper/test-parse-options.c
index 2051ce57db..a282b6ff13 100644
--- a/t/helper/test-parse-options.c
+++ b/t/helper/test-parse-options.c
@@ -134,7 +134,6 @@ int cmd__parse_options(int argc, const char **argv)
OPT_NOOP_NOARG(0, "obsolete"),
OPT_STRING_LIST(0, "list", &list, "str", "add str to list"),
OPT_GROUP("Magic arguments"),
- OPT_ARGUMENT("quux", NULL, "means --quux"),
OPT_NUMBER_CALLBACK(&integer, "set integer to NUM",
number_callback),
{ OPTION_COUNTUP, '+', NULL, &boolean, NULL, "same as -b",
diff --git a/t/helper/test-read-midx.c b/t/helper/test-read-midx.c
index 7c2eb11a8e..cb0d27049a 100644
--- a/t/helper/test-read-midx.c
+++ b/t/helper/test-read-midx.c
@@ -60,12 +60,26 @@ static int read_midx_file(const char *object_dir, int show_objects)
return 0;
}
+static int read_midx_checksum(const char *object_dir)
+{
+ struct multi_pack_index *m;
+
+ setup_git_directory();
+ m = load_multi_pack_index(object_dir, 1);
+ if (!m)
+ return 1;
+ printf("%s\n", hash_to_hex(get_midx_checksum(m)));
+ return 0;
+}
+
int cmd__read_midx(int argc, const char **argv)
{
if (!(argc == 2 || argc == 3))
- usage("read-midx [--show-objects] <object-dir>");
+ usage("read-midx [--show-objects|--checksum] <object-dir>");
if (!strcmp(argv[1], "--show-objects"))
return read_midx_file(argv[2], 1);
+ else if (!strcmp(argv[1], "--checksum"))
+ return read_midx_checksum(argv[2]);
return read_midx_file(argv[1], 0);
}
diff --git a/t/helper/test-run-command.c b/t/helper/test-run-command.c
index 7ae03dc712..3c4fb86223 100644
--- a/t/helper/test-run-command.c
+++ b/t/helper/test-run-command.c
@@ -60,8 +60,10 @@ struct testsuite {
int next;
int quiet, immediate, verbose, verbose_log, trace, write_junit_xml;
};
-#define TESTSUITE_INIT \
- { STRING_LIST_INIT_DUP, STRING_LIST_INIT_DUP, -1, 0, 0, 0, 0, 0, 0 }
+#define TESTSUITE_INIT { \
+ .tests = STRING_LIST_INIT_DUP, \
+ .failed = STRING_LIST_INIT_DUP, \
+}
static int next_test(struct child_process *cp, struct strbuf *err, void *cb,
void **task_cb)
@@ -142,9 +144,6 @@ static int testsuite(int argc, const char **argv)
OPT_END()
};
- memset(&suite, 0, sizeof(suite));
- suite.tests.strdup_strings = suite.failed.strdup_strings = 1;
-
argc = parse_options(argc, argv, NULL, options,
testsuite_usage, PARSE_OPT_STOP_AT_NON_OPTION);
diff --git a/t/helper/test-serve-v2.c b/t/helper/test-serve-v2.c
index aee35e5aef..28e905afc3 100644
--- a/t/helper/test-serve-v2.c
+++ b/t/helper/test-serve-v2.c
@@ -10,12 +10,12 @@ static char const * const serve_usage[] = {
int cmd__serve_v2(int argc, const char **argv)
{
- struct serve_options opts = SERVE_OPTIONS_INIT;
-
+ int stateless_rpc = 0;
+ int advertise_capabilities = 0;
struct option options[] = {
- OPT_BOOL(0, "stateless-rpc", &opts.stateless_rpc,
+ OPT_BOOL(0, "stateless-rpc", &stateless_rpc,
N_("quit after a single request/response exchange")),
- OPT_BOOL(0, "advertise-capabilities", &opts.advertise_capabilities,
+ OPT_BOOL(0, "advertise-capabilities", &advertise_capabilities,
N_("exit immediately after advertising capabilities")),
OPT_END()
};
@@ -25,7 +25,11 @@ int cmd__serve_v2(int argc, const char **argv)
argc = parse_options(argc, argv, prefix, options, serve_usage,
PARSE_OPT_KEEP_DASHDASH |
PARSE_OPT_KEEP_UNKNOWN);
- serve(&opts);
+
+ if (advertise_capabilities)
+ protocol_v2_advertise_capabilities();
+ else
+ protocol_v2_serve_loop(stateless_rpc);
return 0;
}
diff --git a/t/helper/test-simple-ipc.c b/t/helper/test-simple-ipc.c
index 42040ef81b..28365ff85b 100644
--- a/t/helper/test-simple-ipc.c
+++ b/t/helper/test-simple-ipc.c
@@ -9,6 +9,7 @@
#include "parse-options.h"
#include "thread-utils.h"
#include "strvec.h"
+#include "run-command.h"
#ifndef SUPPORTS_SIMPLE_IPC
int cmd__simple_ipc(int argc, const char **argv)
@@ -112,7 +113,7 @@ static int app__slow_command(ipc_server_reply_cb *reply_cb,
/*
* The client sent a command followed by a (possibly very) large buffer.
*/
-static int app__sendbytes_command(const char *received,
+static int app__sendbytes_command(const char *received, size_t received_len,
ipc_server_reply_cb *reply_cb,
struct ipc_server_reply_data *reply_data)
{
@@ -123,6 +124,13 @@ static int app__sendbytes_command(const char *received,
int errs = 0;
int ret;
+ /*
+ * The test is setup to send:
+ * "sendbytes" SP <n * char>
+ */
+ if (received_len < strlen("sendbytes "))
+ BUG("received_len is short in app__sendbytes_command");
+
if (skip_prefix(received, "sendbytes ", &p))
len_ballast = strlen(p);
@@ -160,7 +168,7 @@ static ipc_server_application_cb test_app_cb;
* by this application.
*/
static int test_app_cb(void *application_data,
- const char *command,
+ const char *command, size_t command_len,
ipc_server_reply_cb *reply_cb,
struct ipc_server_reply_data *reply_data)
{
@@ -173,7 +181,7 @@ static int test_app_cb(void *application_data,
if (application_data != (void*)&my_app_data)
BUG("application_cb: application_data pointer wrong");
- if (!strcmp(command, "quit")) {
+ if (command_len == 4 && !strncmp(command, "quit", 4)) {
/*
* The client sent a "quit" command. This is an async
* request for the server to shutdown.
@@ -193,22 +201,23 @@ static int test_app_cb(void *application_data,
return SIMPLE_IPC_QUIT;
}
- if (!strcmp(command, "ping")) {
+ if (command_len == 4 && !strncmp(command, "ping", 4)) {
const char *answer = "pong";
return reply_cb(reply_data, answer, strlen(answer));
}
- if (!strcmp(command, "big"))
+ if (command_len == 3 && !strncmp(command, "big", 3))
return app__big_command(reply_cb, reply_data);
- if (!strcmp(command, "chunk"))
+ if (command_len == 5 && !strncmp(command, "chunk", 5))
return app__chunk_command(reply_cb, reply_data);
- if (!strcmp(command, "slow"))
+ if (command_len == 4 && !strncmp(command, "slow", 4))
return app__slow_command(reply_cb, reply_data);
- if (starts_with(command, "sendbytes "))
- return app__sendbytes_command(command, reply_cb, reply_data);
+ if (command_len >= 10 && starts_with(command, "sendbytes "))
+ return app__sendbytes_command(command, command_len,
+ reply_cb, reply_data);
return app__unhandled_command(command, reply_cb, reply_data);
}
@@ -259,186 +268,72 @@ static int daemon__run_server(void)
*/
ret = ipc_server_run(cl_args.path, &opts, test_app_cb, (void*)&my_app_data);
if (ret == -2)
- error(_("socket/pipe already in use: '%s'"), cl_args.path);
+ error("socket/pipe already in use: '%s'", cl_args.path);
else if (ret == -1)
- error_errno(_("could not start server on: '%s'"), cl_args.path);
+ error_errno("could not start server on: '%s'", cl_args.path);
return ret;
}
-#ifndef GIT_WINDOWS_NATIVE
-/*
- * This is adapted from `daemonize()`. Use `fork()` to directly create and
- * run the daemon in a child process.
- */
-static int spawn_server(pid_t *pid)
-{
- struct ipc_server_opts opts = {
- .nr_threads = cl_args.nr_threads,
- };
-
- *pid = fork();
+static start_bg_wait_cb bg_wait_cb;
- switch (*pid) {
- case 0:
- if (setsid() == -1)
- error_errno(_("setsid failed"));
- close(0);
- close(1);
- close(2);
- sanitize_stdfds();
+static int bg_wait_cb(const struct child_process *cp, void *cb_data)
+{
+ int s = ipc_get_active_state(cl_args.path);
- return ipc_server_run(cl_args.path, &opts, test_app_cb,
- (void*)&my_app_data);
+ switch (s) {
+ case IPC_STATE__LISTENING:
+ /* child is "ready" */
+ return 0;
- case -1:
- return error_errno(_("could not spawn daemon in the background"));
+ case IPC_STATE__NOT_LISTENING:
+ case IPC_STATE__PATH_NOT_FOUND:
+ /* give child more time */
+ return 1;
default:
- return 0;
+ case IPC_STATE__INVALID_PATH:
+ case IPC_STATE__OTHER_ERROR:
+ /* all the time in world won't help */
+ return -1;
}
}
-#else
-/*
- * Conceptually like `daemonize()` but different because Windows does not
- * have `fork(2)`. Spawn a normal Windows child process but without the
- * limitations of `start_command()` and `finish_command()`.
- */
-static int spawn_server(pid_t *pid)
-{
- char test_tool_exe[MAX_PATH];
- struct strvec args = STRVEC_INIT;
- int in, out;
-
- GetModuleFileNameA(NULL, test_tool_exe, MAX_PATH);
-
- in = open("/dev/null", O_RDONLY);
- out = open("/dev/null", O_WRONLY);
-
- strvec_push(&args, test_tool_exe);
- strvec_push(&args, "simple-ipc");
- strvec_push(&args, "run-daemon");
- strvec_pushf(&args, "--name=%s", cl_args.path);
- strvec_pushf(&args, "--threads=%d", cl_args.nr_threads);
- *pid = mingw_spawnvpe(args.v[0], args.v, NULL, NULL, in, out, out);
- close(in);
- close(out);
-
- strvec_clear(&args);
-
- if (*pid < 0)
- return error(_("could not spawn daemon in the background"));
-
- return 0;
-}
-#endif
-
-/*
- * This is adapted from `wait_or_whine()`. Watch the child process and
- * let it get started and begin listening for requests on the socket
- * before reporting our success.
- */
-static int wait_for_server_startup(pid_t pid_child)
+static int daemon__start_server(void)
{
- int status;
- pid_t pid_seen;
- enum ipc_active_state s;
- time_t time_limit, now;
+ struct child_process cp = CHILD_PROCESS_INIT;
+ enum start_bg_result sbgr;
- time(&time_limit);
- time_limit += cl_args.max_wait_sec;
+ strvec_push(&cp.args, "test-tool");
+ strvec_push(&cp.args, "simple-ipc");
+ strvec_push(&cp.args, "run-daemon");
+ strvec_pushf(&cp.args, "--name=%s", cl_args.path);
+ strvec_pushf(&cp.args, "--threads=%d", cl_args.nr_threads);
- for (;;) {
- pid_seen = waitpid(pid_child, &status, WNOHANG);
-
- if (pid_seen == -1)
- return error_errno(_("waitpid failed"));
-
- else if (pid_seen == 0) {
- /*
- * The child is still running (this should be
- * the normal case). Try to connect to it on
- * the socket and see if it is ready for
- * business.
- *
- * If there is another daemon already running,
- * our child will fail to start (possibly
- * after a timeout on the lock), but we don't
- * care (who responds) if the socket is live.
- */
- s = ipc_get_active_state(cl_args.path);
- if (s == IPC_STATE__LISTENING)
- return 0;
+ cp.no_stdin = 1;
+ cp.no_stdout = 1;
+ cp.no_stderr = 1;
- time(&now);
- if (now > time_limit)
- return error(_("daemon not online yet"));
+ sbgr = start_bg_command(&cp, bg_wait_cb, NULL, cl_args.max_wait_sec);
- continue;
- }
+ switch (sbgr) {
+ case SBGR_READY:
+ return 0;
- else if (pid_seen == pid_child) {
- /*
- * The new child daemon process shutdown while
- * it was starting up, so it is not listening
- * on the socket.
- *
- * Try to ping the socket in the odd chance
- * that another daemon started (or was already
- * running) while our child was starting.
- *
- * Again, we don't care who services the socket.
- */
- s = ipc_get_active_state(cl_args.path);
- if (s == IPC_STATE__LISTENING)
- return 0;
+ default:
+ case SBGR_ERROR:
+ case SBGR_CB_ERROR:
+ return error("daemon failed to start");
- /*
- * We don't care about the WEXITSTATUS() nor
- * any of the WIF*(status) values because
- * `cmd__simple_ipc()` does the `!!result`
- * trick on all function return values.
- *
- * So it is sufficient to just report the
- * early shutdown as an error.
- */
- return error(_("daemon failed to start"));
- }
+ case SBGR_TIMEOUT:
+ return error("daemon not online yet");
- else
- return error(_("waitpid is confused"));
+ case SBGR_DIED:
+ return error("daemon terminated");
}
}
/*
- * This process will start a simple-ipc server in a background process and
- * wait for it to become ready. This is like `daemonize()` but gives us
- * more control and better error reporting (and makes it easier to write
- * unit tests).
- */
-static int daemon__start_server(void)
-{
- pid_t pid_child;
- int ret;
-
- /*
- * Run the actual daemon in a background process.
- */
- ret = spawn_server(&pid_child);
- if (pid_child <= 0)
- return ret;
-
- /*
- * Let the parent wait for the child process to get started
- * and begin listening for requests on the socket.
- */
- ret = wait_for_server_startup(pid_child);
-
- return ret;
-}
-
-/*
* This process will run a quick probe to see if a simple-ipc server
* is active on this path.
*
@@ -488,7 +383,9 @@ static int client__send_ipc(void)
options.wait_if_busy = 1;
options.wait_if_not_found = 0;
- if (!ipc_client_send_command(cl_args.path, &options, command, &buf)) {
+ if (!ipc_client_send_command(cl_args.path, &options,
+ command, strlen(command),
+ &buf)) {
if (buf.len) {
printf("%s\n", buf.buf);
fflush(stdout);
@@ -538,7 +435,7 @@ static int client__stop_server(void)
time(&now);
if (now > time_limit)
- return error(_("daemon has not shutdown yet"));
+ return error("daemon has not shutdown yet");
}
}
@@ -556,7 +453,9 @@ static int do_sendbytes(int bytecount, char byte, const char *path,
strbuf_addstr(&buf_send, "sendbytes ");
strbuf_addchars(&buf_send, byte, bytecount);
- if (!ipc_client_send_command(path, options, buf_send.buf, &buf_resp)) {
+ if (!ipc_client_send_command(path, options,
+ buf_send.buf, buf_send.len,
+ &buf_resp)) {
strbuf_rtrim(&buf_resp);
printf("sent:%c%08d %s\n", byte, bytecount, buf_resp.buf);
fflush(stdout);
diff --git a/t/helper/test-submodule-nested-repo-config.c b/t/helper/test-submodule-nested-repo-config.c
index e3f11ff5a7..dc1c14bde3 100644
--- a/t/helper/test-submodule-nested-repo-config.c
+++ b/t/helper/test-submodule-nested-repo-config.c
@@ -11,15 +11,13 @@ static void die_usage(const char **argv, const char *msg)
int cmd__submodule_nested_repo_config(int argc, const char **argv)
{
struct repository subrepo;
- const struct submodule *sub;
if (argc < 3)
die_usage(argv, "Wrong number of arguments.");
setup_git_directory();
- sub = submodule_from_path(the_repository, null_oid(), argv[1]);
- if (repo_submodule_init(&subrepo, the_repository, sub)) {
+ if (repo_submodule_init(&subrepo, the_repository, argv[1], null_oid())) {
die_usage(argv, "Submodule not found.");
}
diff --git a/t/lib-bitmap.sh b/t/lib-bitmap.sh
index fe3f98be24..21d0392dda 100644
--- a/t/lib-bitmap.sh
+++ b/t/lib-bitmap.sh
@@ -1,3 +1,6 @@
+# Helpers for scripts testing bitmap functionality; see t5310 for
+# example usage.
+
# Compare a file containing rev-list bitmap traversal output to its non-bitmap
# counterpart. You can't just use test_cmp for this, because the two produce
# subtly different output:
@@ -24,3 +27,240 @@ test_bitmap_traversal () {
test_cmp "$1.normalized" "$2.normalized" &&
rm -f "$1.normalized" "$2.normalized"
}
+
+# To ensure the logic for "maximal commits" is exercised, make
+# the repository a bit more complicated.
+#
+# other second
+# * *
+# (99 commits) (99 commits)
+# * *
+# |\ /|
+# | * octo-other octo-second * |
+# |/|\_________ ____________/|\|
+# | \ \/ __________/ |
+# | | ________/\ / |
+# * |/ * merge-right *
+# | _|__________/ \____________ |
+# |/ | \|
+# (l1) * * merge-left * (r1)
+# | / \________________________ |
+# |/ \|
+# (l2) * * (r2)
+# \___________________________ |
+# \|
+# * (base)
+#
+# We only push bits down the first-parent history, which
+# makes some of these commits unimportant!
+#
+# The important part for the maximal commit algorithm is how
+# the bitmasks are extended. Assuming starting bit positions
+# for second (bit 0) and other (bit 1), the bitmasks at the
+# end should be:
+#
+# second: 1 (maximal, selected)
+# other: 01 (maximal, selected)
+# (base): 11 (maximal)
+#
+# This complicated history was important for a previous
+# version of the walk that guarantees never walking a
+# commit multiple times. That goal might be important
+# again, so preserve this complicated case. For now, this
+# test will guarantee that the bitmaps are computed
+# correctly, even with the repeat calculations.
+setup_bitmap_history() {
+ test_expect_success 'setup repo with moderate-sized history' '
+ test_commit_bulk --id=file 10 &&
+ git branch -M second &&
+ git checkout -b other HEAD~5 &&
+ test_commit_bulk --id=side 10 &&
+
+ # add complicated history setup, including merges and
+ # ambiguous merge-bases
+
+ git checkout -b merge-left other~2 &&
+ git merge second~2 -m "merge-left" &&
+
+ git checkout -b merge-right second~1 &&
+ git merge other~1 -m "merge-right" &&
+
+ git checkout -b octo-second second &&
+ git merge merge-left merge-right -m "octopus-second" &&
+
+ git checkout -b octo-other other &&
+ git merge merge-left merge-right -m "octopus-other" &&
+
+ git checkout other &&
+ git merge octo-other -m "pull octopus" &&
+
+ git checkout second &&
+ git merge octo-second -m "pull octopus" &&
+
+ # Remove these branches so they are not selected
+ # as bitmap tips
+ git branch -D merge-left &&
+ git branch -D merge-right &&
+ git branch -D octo-other &&
+ git branch -D octo-second &&
+
+ # add padding to make these merges less interesting
+ # and avoid having them selected for bitmaps
+ test_commit_bulk --id=file 100 &&
+ git checkout other &&
+ test_commit_bulk --id=side 100 &&
+ git checkout second &&
+
+ bitmaptip=$(git rev-parse second) &&
+ blob=$(echo tagged-blob | git hash-object -w --stdin) &&
+ git tag tagged-blob $blob
+ '
+}
+
+rev_list_tests_head () {
+ test_expect_success "counting commits via bitmap ($state, $branch)" '
+ git rev-list --count $branch >expect &&
+ git rev-list --use-bitmap-index --count $branch >actual &&
+ test_cmp expect actual
+ '
+
+ test_expect_success "counting partial commits via bitmap ($state, $branch)" '
+ git rev-list --count $branch~5..$branch >expect &&
+ git rev-list --use-bitmap-index --count $branch~5..$branch >actual &&
+ test_cmp expect actual
+ '
+
+ test_expect_success "counting commits with limit ($state, $branch)" '
+ git rev-list --count -n 1 $branch >expect &&
+ git rev-list --use-bitmap-index --count -n 1 $branch >actual &&
+ test_cmp expect actual
+ '
+
+ test_expect_success "counting non-linear history ($state, $branch)" '
+ git rev-list --count other...second >expect &&
+ git rev-list --use-bitmap-index --count other...second >actual &&
+ test_cmp expect actual
+ '
+
+ test_expect_success "counting commits with limiting ($state, $branch)" '
+ git rev-list --count $branch -- 1.t >expect &&
+ git rev-list --use-bitmap-index --count $branch -- 1.t >actual &&
+ test_cmp expect actual
+ '
+
+ test_expect_success "counting objects via bitmap ($state, $branch)" '
+ git rev-list --count --objects $branch >expect &&
+ git rev-list --use-bitmap-index --count --objects $branch >actual &&
+ test_cmp expect actual
+ '
+
+ test_expect_success "enumerate commits ($state, $branch)" '
+ git rev-list --use-bitmap-index $branch >actual &&
+ git rev-list $branch >expect &&
+ test_bitmap_traversal --no-confirm-bitmaps expect actual
+ '
+
+ test_expect_success "enumerate --objects ($state, $branch)" '
+ git rev-list --objects --use-bitmap-index $branch >actual &&
+ git rev-list --objects $branch >expect &&
+ test_bitmap_traversal expect actual
+ '
+
+ test_expect_success "bitmap --objects handles non-commit objects ($state, $branch)" '
+ git rev-list --objects --use-bitmap-index $branch tagged-blob >actual &&
+ grep $blob actual
+ '
+}
+
+rev_list_tests () {
+ state=$1
+
+ for branch in "second" "other"
+ do
+ rev_list_tests_head
+ done
+}
+
+basic_bitmap_tests () {
+ tip="$1"
+ test_expect_success 'rev-list --test-bitmap verifies bitmaps' "
+ git rev-list --test-bitmap "${tip:-HEAD}"
+ "
+
+ rev_list_tests 'full bitmap'
+
+ test_expect_success 'clone from bitmapped repository' '
+ rm -fr clone.git &&
+ git clone --no-local --bare . clone.git &&
+ git rev-parse HEAD >expect &&
+ git --git-dir=clone.git rev-parse HEAD >actual &&
+ test_cmp expect actual
+ '
+
+ test_expect_success 'partial clone from bitmapped repository' '
+ test_config uploadpack.allowfilter true &&
+ rm -fr partial-clone.git &&
+ git clone --no-local --bare --filter=blob:none . partial-clone.git &&
+ (
+ cd partial-clone.git &&
+ pack=$(echo objects/pack/*.pack) &&
+ git verify-pack -v "$pack" >have &&
+ awk "/blob/ { print \$1 }" <have >blobs &&
+ # we expect this single blob because of the direct ref
+ git rev-parse refs/tags/tagged-blob >expect &&
+ test_cmp expect blobs
+ )
+ '
+
+ test_expect_success 'setup further non-bitmapped commits' '
+ test_commit_bulk --id=further 10
+ '
+
+ rev_list_tests 'partial bitmap'
+
+ test_expect_success 'fetch (partial bitmap)' '
+ git --git-dir=clone.git fetch origin second:second &&
+ git rev-parse HEAD >expect &&
+ git --git-dir=clone.git rev-parse HEAD >actual &&
+ test_cmp expect actual
+ '
+
+ test_expect_success 'enumerating progress counts pack-reused objects' '
+ count=$(git rev-list --objects --all --count) &&
+ git repack -adb &&
+
+ # check first with only reused objects; confirm that our
+ # progress showed the right number, and also that we did
+ # pack-reuse as expected. Check only the final "done"
+ # line of the meter (there may be an arbitrary number of
+ # intermediate lines ending with CR).
+ GIT_PROGRESS_DELAY=0 \
+ git pack-objects --all --stdout --progress \
+ </dev/null >/dev/null 2>stderr &&
+ grep "Enumerating objects: $count, done" stderr &&
+ grep "pack-reused $count" stderr &&
+
+ # now the same but with one non-reused object
+ git commit --allow-empty -m "an extra commit object" &&
+ GIT_PROGRESS_DELAY=0 \
+ git pack-objects --all --stdout --progress \
+ </dev/null >/dev/null 2>stderr &&
+ grep "Enumerating objects: $((count+1)), done" stderr &&
+ grep "pack-reused $count" stderr
+ '
+}
+
+# have_delta <obj> <expected_base>
+#
+# Note that because this relies on cat-file, it might find _any_ copy of an
+# object in the repository. The caller is responsible for making sure
+# there's only one (e.g., via "repack -ad", or having just fetched a copy).
+have_delta () {
+ echo $2 >expect &&
+ echo $1 | git cat-file --batch-check="%(deltabase)" >actual &&
+ test_cmp expect actual
+}
+
+midx_checksum () {
+ test-tool read-midx --checksum "$1"
+}
diff --git a/t/lib-httpd/apache.conf b/t/lib-httpd/apache.conf
index afa91e38b0..180a41fe96 100644
--- a/t/lib-httpd/apache.conf
+++ b/t/lib-httpd/apache.conf
@@ -81,8 +81,6 @@ PassEnv GIT_TRACE
PassEnv GIT_CONFIG_NOSYSTEM
PassEnv GIT_TEST_SIDEBAND_ALL
-SetEnvIf Git-Protocol ".*" GIT_PROTOCOL=$0
-
Alias /dumb/ www/
Alias /auth/dumb/ www/auth/dumb/
@@ -117,6 +115,11 @@ Alias /auth/dumb/ www/auth/dumb/
SetEnv GIT_EXEC_PATH ${GIT_EXEC_PATH}
SetEnv GIT_HTTP_EXPORT_ALL
</LocationMatch>
+<LocationMatch /smart_v0/>
+ SetEnv GIT_EXEC_PATH ${GIT_EXEC_PATH}
+ SetEnv GIT_HTTP_EXPORT_ALL
+ SetEnv GIT_PROTOCOL
+</LocationMatch>
ScriptAlias /smart/incomplete_length/git-upload-pack incomplete-length-upload-pack-v2-http.sh/
ScriptAlias /smart/incomplete_body/git-upload-pack incomplete-body-upload-pack-v2-http.sh/
ScriptAliasMatch /error_git_upload_pack/(.*)/git-upload-pack error.sh/
diff --git a/t/lib-subtest.sh b/t/lib-subtest.sh
new file mode 100644
index 0000000000..56ee927f0c
--- /dev/null
+++ b/t/lib-subtest.sh
@@ -0,0 +1,95 @@
+write_sub_test_lib_test () {
+ name="$1" # stdin is the body of the test code
+ mkdir "$name" &&
+ write_script "$name/$name.sh" "$TEST_SHELL_PATH" <<-EOF &&
+ test_description='A test of test-lib.sh itself'
+
+ # Point to the t/test-lib.sh, which isn't in ../ as usual
+ . "\$TEST_DIRECTORY"/test-lib.sh
+ EOF
+ cat >>"$name/$name.sh"
+}
+
+_run_sub_test_lib_test_common () {
+ cmp_op="$1" want_code="$2" name="$3" # stdin is the body of the test code
+ shift 3
+
+ # intercept pseudo-options at the front of the argument list that we
+ # will not pass to child script
+ skip=
+ while test $# -gt 0
+ do
+ case "$1" in
+ --skip=*)
+ skip=${1#--*=}
+ shift
+ ;;
+ *)
+ break
+ ;;
+ esac
+ done
+
+ (
+ cd "$name" &&
+
+ # Pretend we're not running under a test harness, whether we
+ # are or not. The test-lib output depends on the setting of
+ # this variable, so we need a stable setting under which to run
+ # the sub-test.
+ sane_unset HARNESS_ACTIVE &&
+
+ export TEST_DIRECTORY &&
+ # The child test re-sources GIT-BUILD-OPTIONS and may thus
+ # override the test output directory. We thus pass it as an
+ # explicit override to the child.
+ TEST_OUTPUT_DIRECTORY_OVERRIDE=$(pwd) &&
+ export TEST_OUTPUT_DIRECTORY_OVERRIDE &&
+ GIT_SKIP_TESTS=$skip &&
+ export GIT_SKIP_TESTS &&
+ sane_unset GIT_TEST_FAIL_PREREQS &&
+ ./"$name.sh" "$@" >out 2>err;
+ ret=$? &&
+ test "$ret" "$cmp_op" "$want_code"
+ )
+}
+
+write_and_run_sub_test_lib_test () {
+ name="$1" descr="$2" # stdin is the body of the test code
+ write_sub_test_lib_test "$@" || return 1
+ _run_sub_test_lib_test_common -eq 0 "$@"
+}
+
+write_and_run_sub_test_lib_test_err () {
+ name="$1" descr="$2" # stdin is the body of the test code
+ write_sub_test_lib_test "$@" || return 1
+ _run_sub_test_lib_test_common -eq 1 "$@"
+}
+
+run_sub_test_lib_test () {
+ _run_sub_test_lib_test_common -eq 0 "$@"
+}
+
+run_sub_test_lib_test_err () {
+ _run_sub_test_lib_test_common -eq 1 "$@"
+}
+
+_check_sub_test_lib_test_common () {
+ name="$1" &&
+ sed -e 's/^> //' -e 's/Z$//' >"$name"/expect.out &&
+ test_cmp "$name"/expect.out "$name"/out
+}
+
+check_sub_test_lib_test () {
+ name="$1" # stdin is the expected output from the test
+ _check_sub_test_lib_test_common "$name" &&
+ test_must_be_empty "$name"/err
+}
+
+check_sub_test_lib_test_err () {
+ name="$1" # stdin is the expected output from the test
+ _check_sub_test_lib_test_common "$name" &&
+ # expected error output is in descriptor 3
+ sed -e 's/^> //' -e 's/Z$//' <&3 >"$name"/expect.err &&
+ test_cmp "$name"/expect.err "$name"/err
+}
diff --git a/t/perf/aggregate.perl b/t/perf/aggregate.perl
index 82c0df4553..575d2000cc 100755
--- a/t/perf/aggregate.perl
+++ b/t/perf/aggregate.perl
@@ -17,8 +17,8 @@ sub get_times {
my $rt = ((defined $1 ? $1 : 0.0)*60+$2)*60+$3;
return ($rt, $4, $5);
# size
- } elsif ($line =~ /^\d+$/) {
- return $&;
+ } elsif ($line =~ /^\s*(\d+)$/) {
+ return $1;
} else {
die "bad input line: $line";
}
diff --git a/t/perf/lib-bitmap.sh b/t/perf/lib-bitmap.sh
new file mode 100644
index 0000000000..63d3bc7cec
--- /dev/null
+++ b/t/perf/lib-bitmap.sh
@@ -0,0 +1,69 @@
+# Helper functions for testing bitmap performance; see p5310.
+
+test_full_bitmap () {
+ test_perf 'simulated clone' '
+ git pack-objects --stdout --all </dev/null >/dev/null
+ '
+
+ test_perf 'simulated fetch' '
+ have=$(git rev-list HEAD~100 -1) &&
+ {
+ echo HEAD &&
+ echo ^$have
+ } | git pack-objects --revs --stdout >/dev/null
+ '
+
+ test_perf 'pack to file (bitmap)' '
+ git pack-objects --use-bitmap-index --all pack1b </dev/null >/dev/null
+ '
+
+ test_perf 'rev-list (commits)' '
+ git rev-list --all --use-bitmap-index >/dev/null
+ '
+
+ test_perf 'rev-list (objects)' '
+ git rev-list --all --use-bitmap-index --objects >/dev/null
+ '
+
+ test_perf 'rev-list with tag negated via --not --all (objects)' '
+ git rev-list perf-tag --not --all --use-bitmap-index --objects >/dev/null
+ '
+
+ test_perf 'rev-list with negative tag (objects)' '
+ git rev-list HEAD --not perf-tag --use-bitmap-index --objects >/dev/null
+ '
+
+ test_perf 'rev-list count with blob:none' '
+ git rev-list --use-bitmap-index --count --objects --all \
+ --filter=blob:none >/dev/null
+ '
+
+ test_perf 'rev-list count with blob:limit=1k' '
+ git rev-list --use-bitmap-index --count --objects --all \
+ --filter=blob:limit=1k >/dev/null
+ '
+
+ test_perf 'rev-list count with tree:0' '
+ git rev-list --use-bitmap-index --count --objects --all \
+ --filter=tree:0 >/dev/null
+ '
+
+ test_perf 'simulated partial clone' '
+ git pack-objects --stdout --all --filter=blob:none </dev/null >/dev/null
+ '
+}
+
+test_partial_bitmap () {
+ test_perf 'clone (partial bitmap)' '
+ git pack-objects --stdout --all </dev/null >/dev/null
+ '
+
+ test_perf 'pack to file (partial bitmap)' '
+ git pack-objects --use-bitmap-index --all pack2b </dev/null >/dev/null
+ '
+
+ test_perf 'rev-list with tree filter (partial bitmap)' '
+ git rev-list --use-bitmap-index --count --objects --all \
+ --filter=tree:0 >/dev/null
+ '
+}
diff --git a/t/perf/p3400-rebase.sh b/t/perf/p3400-rebase.sh
index 7a0bb29448..43d5a34e8c 100755
--- a/t/perf/p3400-rebase.sh
+++ b/t/perf/p3400-rebase.sh
@@ -18,7 +18,7 @@ test_expect_success 'setup rebasing on top of a lot of changes' '
test_tick &&
git commit -m commit$i unrelated-file$i &&
echo change$i >unrelated-file$i &&
- test_seq 1000 | tac >>unrelated-file$i &&
+ test_seq 1000 | sort -nr >>unrelated-file$i &&
git add unrelated-file$i &&
test_tick &&
git commit -m commit$i-reverse unrelated-file$i ||
diff --git a/t/perf/p5310-pack-bitmaps.sh b/t/perf/p5310-pack-bitmaps.sh
index 452be01056..7ad4f237bc 100755
--- a/t/perf/p5310-pack-bitmaps.sh
+++ b/t/perf/p5310-pack-bitmaps.sh
@@ -2,6 +2,7 @@
test_description='Tests pack performance using bitmaps'
. ./perf-lib.sh
+. "${TEST_DIRECTORY}/perf/lib-bitmap.sh"
test_perf_large_repo
@@ -25,56 +26,7 @@ test_perf 'repack to disk' '
git repack -ad
'
-test_perf 'simulated clone' '
- git pack-objects --stdout --all </dev/null >/dev/null
-'
-
-test_perf 'simulated fetch' '
- have=$(git rev-list HEAD~100 -1) &&
- {
- echo HEAD &&
- echo ^$have
- } | git pack-objects --revs --stdout >/dev/null
-'
-
-test_perf 'pack to file (bitmap)' '
- git pack-objects --use-bitmap-index --all pack1b </dev/null >/dev/null
-'
-
-test_perf 'rev-list (commits)' '
- git rev-list --all --use-bitmap-index >/dev/null
-'
-
-test_perf 'rev-list (objects)' '
- git rev-list --all --use-bitmap-index --objects >/dev/null
-'
-
-test_perf 'rev-list with tag negated via --not --all (objects)' '
- git rev-list perf-tag --not --all --use-bitmap-index --objects >/dev/null
-'
-
-test_perf 'rev-list with negative tag (objects)' '
- git rev-list HEAD --not perf-tag --use-bitmap-index --objects >/dev/null
-'
-
-test_perf 'rev-list count with blob:none' '
- git rev-list --use-bitmap-index --count --objects --all \
- --filter=blob:none >/dev/null
-'
-
-test_perf 'rev-list count with blob:limit=1k' '
- git rev-list --use-bitmap-index --count --objects --all \
- --filter=blob:limit=1k >/dev/null
-'
-
-test_perf 'rev-list count with tree:0' '
- git rev-list --use-bitmap-index --count --objects --all \
- --filter=tree:0 >/dev/null
-'
-
-test_perf 'simulated partial clone' '
- git pack-objects --stdout --all --filter=blob:none </dev/null >/dev/null
-'
+test_full_bitmap
test_expect_success 'create partial bitmap state' '
# pick a commit to represent the repo tip in the past
@@ -97,17 +49,6 @@ test_expect_success 'create partial bitmap state' '
git update-ref HEAD $orig_tip
'
-test_perf 'clone (partial bitmap)' '
- git pack-objects --stdout --all </dev/null >/dev/null
-'
-
-test_perf 'pack to file (partial bitmap)' '
- git pack-objects --use-bitmap-index --all pack2b </dev/null >/dev/null
-'
-
-test_perf 'rev-list with tree filter (partial bitmap)' '
- git rev-list --use-bitmap-index --count --objects --all \
- --filter=tree:0 >/dev/null
-'
+test_partial_bitmap
test_done
diff --git a/t/perf/p5326-multi-pack-bitmaps.sh b/t/perf/p5326-multi-pack-bitmaps.sh
new file mode 100755
index 0000000000..f2fa228f16
--- /dev/null
+++ b/t/perf/p5326-multi-pack-bitmaps.sh
@@ -0,0 +1,52 @@
+#!/bin/sh
+
+test_description='Tests performance using midx bitmaps'
+. ./perf-lib.sh
+. "${TEST_DIRECTORY}/perf/lib-bitmap.sh"
+
+test_perf_large_repo
+
+# we need to create the tag up front such that it is covered by the repack and
+# thus by generated bitmaps.
+test_expect_success 'create tags' '
+ git tag --message="tag pointing to HEAD" perf-tag HEAD
+'
+
+test_expect_success 'start with bitmapped pack' '
+ git repack -adb
+'
+
+test_perf 'setup multi-pack index' '
+ git multi-pack-index write --bitmap
+'
+
+test_expect_success 'drop pack bitmap' '
+ rm -f .git/objects/pack/pack-*.bitmap
+'
+
+test_full_bitmap
+
+test_expect_success 'create partial bitmap state' '
+ # pick a commit to represent the repo tip in the past
+ cutoff=$(git rev-list HEAD~100 -1) &&
+ orig_tip=$(git rev-parse HEAD) &&
+
+ # now pretend we have just one tip
+ rm -rf .git/logs .git/refs/* .git/packed-refs &&
+ git update-ref HEAD $cutoff &&
+
+ # and then repack, which will leave us with a nice
+ # big bitmap pack of the "old" history, and all of
+ # the new history will be loose, as if it had been pushed
+ # up incrementally and exploded via unpack-objects
+ git repack -Ad &&
+ git multi-pack-index write --bitmap &&
+
+ # and now restore our original tip, as if the pushes
+ # had happened
+ git update-ref HEAD $orig_tip
+'
+
+test_partial_bitmap
+
+test_done
diff --git a/t/perf/run b/t/perf/run
index d19dec258a..55219aa405 100755
--- a/t/perf/run
+++ b/t/perf/run
@@ -74,7 +74,7 @@ set_git_test_installed () {
mydir=$1
mydir_abs=$(cd $mydir && pwd)
- mydir_abs_wrappers="$mydir_abs_wrappers/bin-wrappers"
+ mydir_abs_wrappers="$mydir_abs/bin-wrappers"
if test -d "$mydir_abs_wrappers"
then
GIT_TEST_INSTALLED=$mydir_abs_wrappers
diff --git a/t/t0000-basic.sh b/t/t0000-basic.sh
index cb87768513..b007f0efef 100755
--- a/t/t0000-basic.sh
+++ b/t/t0000-basic.sh
@@ -19,6 +19,7 @@ modification *should* take notice and update the test vectors here.
'
. ./test-lib.sh
+. "$TEST_DIRECTORY"/lib-subtest.sh
try_local_xy () {
local x="local" y="alsolocal" &&
@@ -66,95 +67,8 @@ test_expect_success 'success is reported like this' '
:
'
-_run_sub_test_lib_test_common () {
- neg="$1" name="$2" descr="$3" # stdin is the body of the test code
- shift 3
-
- # intercept pseudo-options at the front of the argument list that we
- # will not pass to child script
- skip=
- while test $# -gt 0
- do
- case "$1" in
- --skip=*)
- skip=${1#--*=}
- shift
- ;;
- *)
- break
- ;;
- esac
- done
-
- mkdir "$name" &&
- (
- # Pretend we're not running under a test harness, whether we
- # are or not. The test-lib output depends on the setting of
- # this variable, so we need a stable setting under which to run
- # the sub-test.
- sane_unset HARNESS_ACTIVE &&
- cd "$name" &&
- write_script "$name.sh" "$TEST_SHELL_PATH" <<-EOF &&
- test_description='$descr (run in sub test-lib)
-
- This is run in a sub test-lib so that we do not get incorrect
- passing metrics
- '
-
- # Point to the t/test-lib.sh, which isn't in ../ as usual
- . "\$TEST_DIRECTORY"/test-lib.sh
- EOF
- cat >>"$name.sh" &&
- export TEST_DIRECTORY &&
- # The child test re-sources GIT-BUILD-OPTIONS and may thus
- # override the test output directory. We thus pass it as an
- # explicit override to the child.
- TEST_OUTPUT_DIRECTORY_OVERRIDE=$(pwd) &&
- export TEST_OUTPUT_DIRECTORY_OVERRIDE &&
- GIT_SKIP_TESTS=$skip &&
- export GIT_SKIP_TESTS &&
- sane_unset GIT_TEST_FAIL_PREREQS &&
- if test -z "$neg"
- then
- ./"$name.sh" "$@" >out 2>err
- else
- ! ./"$name.sh" "$@" >out 2>err
- fi
- )
-}
-
-run_sub_test_lib_test () {
- _run_sub_test_lib_test_common '' "$@"
-}
-
-run_sub_test_lib_test_err () {
- _run_sub_test_lib_test_common '!' "$@"
-}
-
-check_sub_test_lib_test () {
- name="$1" # stdin is the expected output from the test
- (
- cd "$name" &&
- test_must_be_empty err &&
- sed -e 's/^> //' -e 's/Z$//' >expect &&
- test_cmp expect out
- )
-}
-
-check_sub_test_lib_test_err () {
- name="$1" # stdin is the expected output from the test
- # expected error output is in descriptor 3
- (
- cd "$name" &&
- sed -e 's/^> //' -e 's/Z$//' >expect.out &&
- test_cmp expect.out out &&
- sed -e 's/^> //' -e 's/Z$//' <&3 >expect.err &&
- test_cmp expect.err err
- )
-}
-
-test_expect_success 'pretend we have a fully passing test suite' '
- run_sub_test_lib_test full-pass "3 passing tests" <<-\EOF &&
+test_expect_success 'subtest: 3 passing tests' '
+ write_and_run_sub_test_lib_test full-pass <<-\EOF &&
for i in 1 2 3
do
test_expect_success "passing test #$i" "true"
@@ -170,9 +84,8 @@ test_expect_success 'pretend we have a fully passing test suite' '
EOF
'
-test_expect_success 'pretend we have a partially passing test suite' '
- run_sub_test_lib_test_err \
- partial-pass "2/3 tests passing" <<-\EOF &&
+test_expect_success 'subtest: 2/3 tests passing' '
+ write_and_run_sub_test_lib_test_err partial-pass <<-\EOF &&
test_expect_success "passing test #1" "true"
test_expect_success "failing test #2" "false"
test_expect_success "passing test #3" "true"
@@ -188,8 +101,8 @@ test_expect_success 'pretend we have a partially passing test suite' '
EOF
'
-test_expect_success 'pretend we have a known breakage' '
- run_sub_test_lib_test failing-todo "A failing TODO test" <<-\EOF &&
+test_expect_success 'subtest: a failing TODO test' '
+ write_and_run_sub_test_lib_test failing-todo <<-\EOF &&
test_expect_success "passing test" "true"
test_expect_failure "pretend we have a known breakage" "false"
test_done
@@ -203,8 +116,8 @@ test_expect_success 'pretend we have a known breakage' '
EOF
'
-test_expect_success 'pretend we have fixed a known breakage' '
- run_sub_test_lib_test passing-todo "A passing TODO test" <<-\EOF &&
+test_expect_success 'subtest: a passing TODO test' '
+ write_and_run_sub_test_lib_test passing-todo <<-\EOF &&
test_expect_failure "pretend we have fixed a known breakage" "true"
test_done
EOF
@@ -215,9 +128,8 @@ test_expect_success 'pretend we have fixed a known breakage' '
EOF
'
-test_expect_success 'pretend we have fixed one of two known breakages (run in sub test-lib)' '
- run_sub_test_lib_test partially-passing-todos \
- "2 TODO tests, one passing" <<-\EOF &&
+test_expect_success 'subtest: 2 TODO tests, one passin' '
+ write_and_run_sub_test_lib_test partially-passing-todos <<-\EOF &&
test_expect_failure "pretend we have a known breakage" "false"
test_expect_success "pretend we have a passing test" "true"
test_expect_failure "pretend we have fixed another known breakage" "true"
@@ -234,9 +146,8 @@ test_expect_success 'pretend we have fixed one of two known breakages (run in su
EOF
'
-test_expect_success 'pretend we have a pass, fail, and known breakage' '
- run_sub_test_lib_test_err \
- mixed-results1 "mixed results #1" <<-\EOF &&
+test_expect_success 'subtest: mixed results: pass, failure and a TODO test' '
+ write_and_run_sub_test_lib_test_err mixed-results1 <<-\EOF &&
test_expect_success "passing test" "true"
test_expect_success "failing test" "false"
test_expect_failure "pretend we have a known breakage" "false"
@@ -253,9 +164,8 @@ test_expect_success 'pretend we have a pass, fail, and known breakage' '
EOF
'
-test_expect_success 'pretend we have a mix of all possible results' '
- run_sub_test_lib_test_err \
- mixed-results2 "mixed results #2" <<-\EOF &&
+test_expect_success 'subtest: mixed results: a mixture of all possible results' '
+ write_and_run_sub_test_lib_test_err mixed-results2 <<-\EOF &&
test_expect_success "passing test" "true"
test_expect_success "passing test" "true"
test_expect_success "passing test" "true"
@@ -289,9 +199,8 @@ test_expect_success 'pretend we have a mix of all possible results' '
EOF
'
-test_expect_success 'test --verbose' '
- run_sub_test_lib_test_err \
- t1234-verbose "test verbose" --verbose <<-\EOF &&
+test_expect_success 'subtest: --verbose option' '
+ write_and_run_sub_test_lib_test_err t1234-verbose --verbose <<-\EOF &&
test_expect_success "passing test" true
test_expect_success "test with output" "echo foo"
test_expect_success "failing test" false
@@ -316,19 +225,14 @@ test_expect_success 'test --verbose' '
EOF
'
-test_expect_success 'test --verbose-only' '
+test_expect_success 'subtest: --verbose-only option' '
run_sub_test_lib_test_err \
- t2345-verbose-only-2 "test verbose-only=2" \
- --verbose-only=2 <<-\EOF &&
- test_expect_success "passing test" true
- test_expect_success "test with output" "echo foo"
- test_expect_success "failing test" false
- test_done
- EOF
- check_sub_test_lib_test t2345-verbose-only-2 <<-\EOF
+ t1234-verbose \
+ --verbose-only=2 &&
+ check_sub_test_lib_test t1234-verbose <<-\EOF
> ok 1 - passing test
> Z
- > expecting success of 2345.2 '\''test with output'\'': echo foo
+ > expecting success of 1234.2 '\''test with output'\'': echo foo
> foo
> ok 2 - test with output
> Z
@@ -339,18 +243,11 @@ test_expect_success 'test --verbose-only' '
EOF
'
-test_expect_success 'GIT_SKIP_TESTS' '
+test_expect_success 'subtest: skip one with GIT_SKIP_TESTS' '
(
- run_sub_test_lib_test git-skip-tests-basic \
- "GIT_SKIP_TESTS" \
- --skip="git.2" <<-\EOF &&
- for i in 1 2 3
- do
- test_expect_success "passing test #$i" "true"
- done
- test_done
- EOF
- check_sub_test_lib_test git-skip-tests-basic <<-\EOF
+ run_sub_test_lib_test full-pass \
+ --skip="full.2" &&
+ check_sub_test_lib_test full-pass <<-\EOF
> ok 1 - passing test #1
> ok 2 # skip passing test #2 (GIT_SKIP_TESTS)
> ok 3 - passing test #3
@@ -360,10 +257,9 @@ test_expect_success 'GIT_SKIP_TESTS' '
)
'
-test_expect_success 'GIT_SKIP_TESTS several tests' '
+test_expect_success 'subtest: skip several with GIT_SKIP_TESTS' '
(
- run_sub_test_lib_test git-skip-tests-several \
- "GIT_SKIP_TESTS several tests" \
+ write_and_run_sub_test_lib_test git-skip-tests-several \
--skip="git.2 git.5" <<-\EOF &&
for i in 1 2 3 4 5 6
do
@@ -384,18 +280,11 @@ test_expect_success 'GIT_SKIP_TESTS several tests' '
)
'
-test_expect_success 'GIT_SKIP_TESTS sh pattern' '
+test_expect_success 'subtest: sh pattern skipping with GIT_SKIP_TESTS' '
(
- run_sub_test_lib_test git-skip-tests-sh-pattern \
- "GIT_SKIP_TESTS sh pattern" \
- --skip="git.[2-5]" <<-\EOF &&
- for i in 1 2 3 4 5 6
- do
- test_expect_success "passing test #$i" "true"
- done
- test_done
- EOF
- check_sub_test_lib_test git-skip-tests-sh-pattern <<-\EOF
+ run_sub_test_lib_test git-skip-tests-several \
+ --skip="git.[2-5]" &&
+ check_sub_test_lib_test git-skip-tests-several <<-\EOF
> ok 1 - passing test #1
> ok 2 # skip passing test #2 (GIT_SKIP_TESTS)
> ok 3 # skip passing test #3 (GIT_SKIP_TESTS)
@@ -408,35 +297,23 @@ test_expect_success 'GIT_SKIP_TESTS sh pattern' '
)
'
-test_expect_success 'GIT_SKIP_TESTS entire suite' '
+test_expect_success 'subtest: skip entire test suite with GIT_SKIP_TESTS' '
(
- run_sub_test_lib_test git-skip-tests-entire-suite \
- "GIT_SKIP_TESTS entire suite" \
- --skip="git" <<-\EOF &&
- for i in 1 2 3
- do
- test_expect_success "passing test #$i" "true"
- done
- test_done
- EOF
- check_sub_test_lib_test git-skip-tests-entire-suite <<-\EOF
+ GIT_SKIP_TESTS="git" && export GIT_SKIP_TESTS &&
+ run_sub_test_lib_test git-skip-tests-several \
+ --skip="git" &&
+ check_sub_test_lib_test git-skip-tests-several <<-\EOF
> 1..0 # SKIP skip all tests in git
EOF
)
'
-test_expect_success 'GIT_SKIP_TESTS does not skip unmatched suite' '
+test_expect_success 'subtest: GIT_SKIP_TESTS does not skip unmatched suite' '
(
- run_sub_test_lib_test git-skip-tests-unmatched-suite \
- "GIT_SKIP_TESTS does not skip unmatched suite" \
- --skip="notgit" <<-\EOF &&
- for i in 1 2 3
- do
- test_expect_success "passing test #$i" "true"
- done
- test_done
- EOF
- check_sub_test_lib_test git-skip-tests-unmatched-suite <<-\EOF
+ GIT_SKIP_TESTS="notgit" && export GIT_SKIP_TESTS &&
+ run_sub_test_lib_test full-pass \
+ --skip="notfull" &&
+ check_sub_test_lib_test full-pass <<-\EOF
> ok 1 - passing test #1
> ok 2 - passing test #2
> ok 3 - passing test #3
@@ -446,16 +323,9 @@ test_expect_success 'GIT_SKIP_TESTS does not skip unmatched suite' '
)
'
-test_expect_success '--run basic' '
- run_sub_test_lib_test run-basic \
- "--run basic" --run="1,3,5" <<-\EOF &&
- for i in 1 2 3 4 5 6
- do
- test_expect_success "passing test #$i" "true"
- done
- test_done
- EOF
- check_sub_test_lib_test run-basic <<-\EOF
+test_expect_success 'subtest: --run basic' '
+ run_sub_test_lib_test git-skip-tests-several --run="1,3,5" &&
+ check_sub_test_lib_test git-skip-tests-several <<-\EOF
> ok 1 - passing test #1
> ok 2 # skip passing test #2 (--run)
> ok 3 - passing test #3
@@ -467,16 +337,10 @@ test_expect_success '--run basic' '
EOF
'
-test_expect_success '--run with a range' '
- run_sub_test_lib_test run-range \
- "--run with a range" --run="1-3" <<-\EOF &&
- for i in 1 2 3 4 5 6
- do
- test_expect_success "passing test #$i" "true"
- done
- test_done
- EOF
- check_sub_test_lib_test run-range <<-\EOF
+test_expect_success 'subtest: --run with a range' '
+ run_sub_test_lib_test git-skip-tests-several \
+ --run="1-3" &&
+ check_sub_test_lib_test git-skip-tests-several <<-\EOF
> ok 1 - passing test #1
> ok 2 - passing test #2
> ok 3 - passing test #3
@@ -488,16 +352,10 @@ test_expect_success '--run with a range' '
EOF
'
-test_expect_success '--run with two ranges' '
- run_sub_test_lib_test run-two-ranges \
- "--run with two ranges" --run="1-2,5-6" <<-\EOF &&
- for i in 1 2 3 4 5 6
- do
- test_expect_success "passing test #$i" "true"
- done
- test_done
- EOF
- check_sub_test_lib_test run-two-ranges <<-\EOF
+test_expect_success 'subtest: --run with two ranges' '
+ run_sub_test_lib_test git-skip-tests-several \
+ --run="1-2,5-6" &&
+ check_sub_test_lib_test git-skip-tests-several <<-\EOF
> ok 1 - passing test #1
> ok 2 - passing test #2
> ok 3 # skip passing test #3 (--run)
@@ -509,16 +367,10 @@ test_expect_success '--run with two ranges' '
EOF
'
-test_expect_success '--run with a left open range' '
- run_sub_test_lib_test run-left-open-range \
- "--run with a left open range" --run="-3" <<-\EOF &&
- for i in 1 2 3 4 5 6
- do
- test_expect_success "passing test #$i" "true"
- done
- test_done
- EOF
- check_sub_test_lib_test run-left-open-range <<-\EOF
+test_expect_success 'subtest: --run with a left open range' '
+ run_sub_test_lib_test git-skip-tests-several \
+ --run="-3" &&
+ check_sub_test_lib_test git-skip-tests-several <<-\EOF
> ok 1 - passing test #1
> ok 2 - passing test #2
> ok 3 - passing test #3
@@ -530,16 +382,10 @@ test_expect_success '--run with a left open range' '
EOF
'
-test_expect_success '--run with a right open range' '
- run_sub_test_lib_test run-right-open-range \
- "--run with a right open range" --run="4-" <<-\EOF &&
- for i in 1 2 3 4 5 6
- do
- test_expect_success "passing test #$i" "true"
- done
- test_done
- EOF
- check_sub_test_lib_test run-right-open-range <<-\EOF
+test_expect_success 'subtest: --run with a right open range' '
+ run_sub_test_lib_test git-skip-tests-several \
+ --run="4-" &&
+ check_sub_test_lib_test git-skip-tests-several <<-\EOF
> ok 1 # skip passing test #1 (--run)
> ok 2 # skip passing test #2 (--run)
> ok 3 # skip passing test #3 (--run)
@@ -551,16 +397,10 @@ test_expect_success '--run with a right open range' '
EOF
'
-test_expect_success '--run with basic negation' '
- run_sub_test_lib_test run-basic-neg \
- "--run with basic negation" --run="!3" <<-\EOF &&
- for i in 1 2 3 4 5 6
- do
- test_expect_success "passing test #$i" "true"
- done
- test_done
- EOF
- check_sub_test_lib_test run-basic-neg <<-\EOF
+test_expect_success 'subtest: --run with basic negation' '
+ run_sub_test_lib_test git-skip-tests-several \
+ --run="!3" &&
+ check_sub_test_lib_test git-skip-tests-several <<-\EOF
> ok 1 - passing test #1
> ok 2 - passing test #2
> ok 3 # skip passing test #3 (--run)
@@ -572,16 +412,10 @@ test_expect_success '--run with basic negation' '
EOF
'
-test_expect_success '--run with two negations' '
- run_sub_test_lib_test run-two-neg \
- "--run with two negations" --run="!3,!6" <<-\EOF &&
- for i in 1 2 3 4 5 6
- do
- test_expect_success "passing test #$i" "true"
- done
- test_done
- EOF
- check_sub_test_lib_test run-two-neg <<-\EOF
+test_expect_success 'subtest: --run with two negations' '
+ run_sub_test_lib_test git-skip-tests-several \
+ --run="!3,!6" &&
+ check_sub_test_lib_test git-skip-tests-several <<-\EOF
> ok 1 - passing test #1
> ok 2 - passing test #2
> ok 3 # skip passing test #3 (--run)
@@ -593,16 +427,10 @@ test_expect_success '--run with two negations' '
EOF
'
-test_expect_success '--run a range and negation' '
- run_sub_test_lib_test run-range-and-neg \
- "--run a range and negation" --run="-4,!2" <<-\EOF &&
- for i in 1 2 3 4 5 6
- do
- test_expect_success "passing test #$i" "true"
- done
- test_done
- EOF
- check_sub_test_lib_test run-range-and-neg <<-\EOF
+test_expect_success 'subtest: --run a range and negation' '
+ run_sub_test_lib_test git-skip-tests-several \
+ --run="-4,!2" &&
+ check_sub_test_lib_test git-skip-tests-several <<-\EOF
> ok 1 - passing test #1
> ok 2 # skip passing test #2 (--run)
> ok 3 - passing test #3
@@ -614,16 +442,10 @@ test_expect_success '--run a range and negation' '
EOF
'
-test_expect_success '--run range negation' '
- run_sub_test_lib_test run-range-neg \
- "--run range negation" --run="!1-3" <<-\EOF &&
- for i in 1 2 3 4 5 6
- do
- test_expect_success "passing test #$i" "true"
- done
- test_done
- EOF
- check_sub_test_lib_test run-range-neg <<-\EOF
+test_expect_success 'subtest: --run range negation' '
+ run_sub_test_lib_test git-skip-tests-several \
+ --run="!1-3" &&
+ check_sub_test_lib_test git-skip-tests-several <<-\EOF
> ok 1 # skip passing test #1 (--run)
> ok 2 # skip passing test #2 (--run)
> ok 3 # skip passing test #3 (--run)
@@ -635,17 +457,10 @@ test_expect_success '--run range negation' '
EOF
'
-test_expect_success '--run include, exclude and include' '
- run_sub_test_lib_test run-inc-neg-inc \
- "--run include, exclude and include" \
- --run="1-5,!1-3,2" <<-\EOF &&
- for i in 1 2 3 4 5 6
- do
- test_expect_success "passing test #$i" "true"
- done
- test_done
- EOF
- check_sub_test_lib_test run-inc-neg-inc <<-\EOF
+test_expect_success 'subtest: --run include, exclude and include' '
+ run_sub_test_lib_test git-skip-tests-several \
+ --run="1-5,!1-3,2" &&
+ check_sub_test_lib_test git-skip-tests-several <<-\EOF
> ok 1 # skip passing test #1 (--run)
> ok 2 - passing test #2
> ok 3 # skip passing test #3 (--run)
@@ -657,17 +472,10 @@ test_expect_success '--run include, exclude and include' '
EOF
'
-test_expect_success '--run include, exclude and include, comma separated' '
- run_sub_test_lib_test run-inc-neg-inc-comma \
- "--run include, exclude and include, comma separated" \
- --run=1-5,!1-3,2 <<-\EOF &&
- for i in 1 2 3 4 5 6
- do
- test_expect_success "passing test #$i" "true"
- done
- test_done
- EOF
- check_sub_test_lib_test run-inc-neg-inc-comma <<-\EOF
+test_expect_success 'subtest: --run include, exclude and include, comma separated' '
+ run_sub_test_lib_test git-skip-tests-several \
+ --run=1-5,!1-3,2 &&
+ check_sub_test_lib_test git-skip-tests-several <<-\EOF
> ok 1 # skip passing test #1 (--run)
> ok 2 - passing test #2
> ok 3 # skip passing test #3 (--run)
@@ -679,17 +487,10 @@ test_expect_success '--run include, exclude and include, comma separated' '
EOF
'
-test_expect_success '--run exclude and include' '
- run_sub_test_lib_test run-neg-inc \
- "--run exclude and include" \
- --run="!3-,5" <<-\EOF &&
- for i in 1 2 3 4 5 6
- do
- test_expect_success "passing test #$i" "true"
- done
- test_done
- EOF
- check_sub_test_lib_test run-neg-inc <<-\EOF
+test_expect_success 'subtest: --run exclude and include' '
+ run_sub_test_lib_test git-skip-tests-several \
+ --run="!3-,5" &&
+ check_sub_test_lib_test git-skip-tests-several <<-\EOF
> ok 1 - passing test #1
> ok 2 - passing test #2
> ok 3 # skip passing test #3 (--run)
@@ -701,17 +502,10 @@ test_expect_success '--run exclude and include' '
EOF
'
-test_expect_success '--run empty selectors' '
- run_sub_test_lib_test run-empty-sel \
- "--run empty selectors" \
- --run="1,,3,,,5" <<-\EOF &&
- for i in 1 2 3 4 5 6
- do
- test_expect_success "passing test #$i" "true"
- done
- test_done
- EOF
- check_sub_test_lib_test run-empty-sel <<-\EOF
+test_expect_success 'subtest: --run empty selectors' '
+ run_sub_test_lib_test git-skip-tests-several \
+ --run="1,,3,,,5" &&
+ check_sub_test_lib_test git-skip-tests-several <<-\EOF
> ok 1 - passing test #1
> ok 2 # skip passing test #2 (--run)
> ok 3 - passing test #3
@@ -723,9 +517,8 @@ test_expect_success '--run empty selectors' '
EOF
'
-test_expect_success '--run substring selector' '
- run_sub_test_lib_test run-substring-selector \
- "--run empty selectors" \
+test_expect_success 'subtest: --run substring selector' '
+ write_and_run_sub_test_lib_test run-substring-selector \
--run="relevant" <<-\EOF &&
test_expect_success "relevant test" "true"
for i in 1 2 3 4 5 6
@@ -747,9 +540,8 @@ test_expect_success '--run substring selector' '
EOF
'
-test_expect_success '--run keyword selection' '
- run_sub_test_lib_test_err run-inv-range-start \
- "--run invalid range start" \
+test_expect_success 'subtest: --run keyword selection' '
+ write_and_run_sub_test_lib_test_err run-inv-range-start \
--run="a-5" <<-\EOF &&
test_expect_success "passing test #1" "true"
test_done
@@ -762,14 +554,10 @@ test_expect_success '--run keyword selection' '
EOF_ERR
'
-test_expect_success '--run invalid range end' '
- run_sub_test_lib_test_err run-inv-range-end \
- "--run invalid range end" \
- --run="1-z" <<-\EOF &&
- test_expect_success "passing test #1" "true"
- test_done
- EOF
- check_sub_test_lib_test_err run-inv-range-end \
+test_expect_success 'subtest: --run invalid range end' '
+ run_sub_test_lib_test_err run-inv-range-start \
+ --run="1-z" &&
+ check_sub_test_lib_test_err run-inv-range-start \
<<-\EOF_OUT 3<<-EOF_ERR
> FATAL: Unexpected exit with code 1
EOF_OUT
@@ -777,8 +565,8 @@ test_expect_success '--run invalid range end' '
EOF_ERR
'
-test_expect_success 'tests respect prerequisites' '
- run_sub_test_lib_test prereqs "tests respect prereqs" <<-\EOF &&
+test_expect_success 'subtest: tests respect prerequisites' '
+ write_and_run_sub_test_lib_test prereqs <<-\EOF &&
test_set_prereq HAVEIT
test_expect_success HAVEIT "prereq is satisfied" "true"
@@ -807,8 +595,8 @@ test_expect_success 'tests respect prerequisites' '
EOF
'
-test_expect_success 'tests respect lazy prerequisites' '
- run_sub_test_lib_test lazy-prereqs "respect lazy prereqs" <<-\EOF &&
+test_expect_success 'subtest: tests respect lazy prerequisites' '
+ write_and_run_sub_test_lib_test lazy-prereqs <<-\EOF &&
test_lazy_prereq LAZY_TRUE true
test_expect_success LAZY_TRUE "lazy prereq is satisifed" "true"
@@ -831,8 +619,8 @@ test_expect_success 'tests respect lazy prerequisites' '
EOF
'
-test_expect_success 'nested lazy prerequisites' '
- run_sub_test_lib_test nested-lazy "nested lazy prereqs" <<-\EOF &&
+test_expect_success 'subtest: nested lazy prerequisites' '
+ write_and_run_sub_test_lib_test nested-lazy <<-\EOF &&
test_lazy_prereq NESTED_INNER "
>inner &&
@@ -857,9 +645,9 @@ test_expect_success 'nested lazy prerequisites' '
EOF
'
-test_expect_success 'lazy prereqs do not turn off tracing' '
- run_sub_test_lib_test lazy-prereq-and-tracing \
- "lazy prereqs and -x" -v -x <<-\EOF &&
+test_expect_success 'subtest: lazy prereqs do not turn off tracing' '
+ write_and_run_sub_test_lib_test lazy-prereq-and-tracing \
+ -v -x <<-\EOF &&
test_lazy_prereq LAZY true
test_expect_success lazy "test_have_prereq LAZY && echo trace"
@@ -870,8 +658,8 @@ test_expect_success 'lazy prereqs do not turn off tracing' '
grep "echo trace" lazy-prereq-and-tracing/err
'
-test_expect_success 'tests clean up after themselves' '
- run_sub_test_lib_test cleanup "test with cleanup" <<-\EOF &&
+test_expect_success 'subtest: tests clean up after themselves' '
+ write_and_run_sub_test_lib_test cleanup <<-\EOF &&
clean=no
test_expect_success "do cleanup" "
test_when_finished clean=yes
@@ -890,9 +678,9 @@ test_expect_success 'tests clean up after themselves' '
EOF
'
-test_expect_success 'tests clean up even on failures' '
- run_sub_test_lib_test_err \
- failing-cleanup "Failing tests with cleanup commands" <<-\EOF &&
+test_expect_success 'subtest: tests clean up even on failures' '
+ write_and_run_sub_test_lib_test_err \
+ failing-cleanup <<-\EOF &&
test_expect_success "tests clean up even after a failure" "
touch clean-after-failure &&
test_when_finished rm clean-after-failure &&
@@ -919,9 +707,9 @@ test_expect_success 'tests clean up even on failures' '
EOF
'
-test_expect_success 'test_atexit is run' '
- run_sub_test_lib_test_err \
- atexit-cleanup "Run atexit commands" -i <<-\EOF &&
+test_expect_success 'subtest: test_atexit is run' '
+ write_and_run_sub_test_lib_test_err \
+ atexit-cleanup -i <<-\EOF &&
test_expect_success "tests clean up even after a failure" "
> ../../clean-atexit &&
test_atexit rm ../../clean-atexit &&
@@ -1271,28 +1059,29 @@ P=$(test_oid root)
test_expect_success 'git commit-tree records the correct tree in a commit' '
commit0=$(echo NO | git commit-tree $P) &&
- tree=$(git show --pretty=raw $commit0 |
- sed -n -e "s/^tree //p" -e "/^author /q") &&
+ git show --pretty=raw $commit0 >out &&
+ tree=$(sed -n -e "s/^tree //p" -e "/^author /q" out) &&
test "z$tree" = "z$P"
'
test_expect_success 'git commit-tree records the correct parent in a commit' '
commit1=$(echo NO | git commit-tree $P -p $commit0) &&
- parent=$(git show --pretty=raw $commit1 |
- sed -n -e "s/^parent //p" -e "/^author /q") &&
+ git show --pretty=raw $commit1 >out &&
+ parent=$(sed -n -e "s/^parent //p" -e "/^author /q" out) &&
test "z$commit0" = "z$parent"
'
test_expect_success 'git commit-tree omits duplicated parent in a commit' '
commit2=$(echo NO | git commit-tree $P -p $commit0 -p $commit0) &&
- parent=$(git show --pretty=raw $commit2 |
- sed -n -e "s/^parent //p" -e "/^author /q" |
- sort -u) &&
+ git show --pretty=raw $commit2 >out &&
+ cat >match.sed <<-\EOF &&
+ s/^parent //p
+ /^author /q
+ EOF
+ parent=$(sed -n -f match.sed out | sort -u) &&
test "z$commit0" = "z$parent" &&
- numparent=$(git show --pretty=raw $commit2 |
- sed -n -e "s/^parent //p" -e "/^author /q" |
- wc -l) &&
- test $numparent = 1
+ git show --pretty=raw $commit2 >out &&
+ test_stdout_line_count = 1 sed -n -f match.sed out
'
test_expect_success 'update-index D/F conflict' '
diff --git a/t/t0004-unwritable.sh b/t/t0004-unwritable.sh
index e3137d638e..37d68ef03b 100755
--- a/t/t0004-unwritable.sh
+++ b/t/t0004-unwritable.sh
@@ -2,6 +2,7 @@
test_description='detect unwritable repository and fail correctly'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success setup '
@@ -21,7 +22,7 @@ test_expect_success POSIXPERM,SANITY 'write-tree should notice unwritable reposi
test_must_fail git write-tree
'
-test_expect_success POSIXPERM,SANITY 'commit should notice unwritable repository' '
+test_expect_success POSIXPERM,SANITY,!SANITIZE_LEAK 'commit should notice unwritable repository' '
test_when_finished "chmod 775 .git/objects .git/objects/??" &&
chmod a-w .git/objects .git/objects/?? &&
test_must_fail git commit -m second
diff --git a/t/t0011-hashmap.sh b/t/t0011-hashmap.sh
index 5343ffd3f9..e094975b13 100755
--- a/t/t0011-hashmap.sh
+++ b/t/t0011-hashmap.sh
@@ -1,6 +1,8 @@
#!/bin/sh
test_description='test hashmap and string hash functions'
+
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_hashmap() {
diff --git a/t/t0012-help.sh b/t/t0012-help.sh
index 60d713021f..91b68c74a1 100755
--- a/t/t0012-help.sh
+++ b/t/t0012-help.sh
@@ -85,6 +85,22 @@ test_expect_success 'git help -g' '
test_i18ngrep "^ tutorial " help.output
'
+test_expect_success 'git help fails for non-existing html pages' '
+ configure_help &&
+ mkdir html-empty &&
+ test_must_fail git -c help.htmlpath=html-empty help status &&
+ test_must_be_empty test-browser.log
+'
+
+test_expect_success 'git help succeeds without git.html' '
+ configure_help &&
+ mkdir html-with-docs &&
+ touch html-with-docs/git-status.html &&
+ git -c help.htmlpath=html-with-docs help status &&
+ echo "html-with-docs/git-status.html" >expect &&
+ test_cmp expect test-browser.log
+'
+
test_expect_success 'git help -c' '
git help -c >help.output &&
cat >expect <<-\EOF &&
diff --git a/t/t0016-oidmap.sh b/t/t0016-oidmap.sh
index 31f8276ba8..0faef1f4f1 100755
--- a/t/t0016-oidmap.sh
+++ b/t/t0016-oidmap.sh
@@ -1,6 +1,8 @@
#!/bin/sh
test_description='test oidmap'
+
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
# This purposefully is very similar to t0011-hashmap.sh
diff --git a/t/t0017-env-helper.sh b/t/t0017-env-helper.sh
index 4a159f99e4..2e42fba956 100755
--- a/t/t0017-env-helper.sh
+++ b/t/t0017-env-helper.sh
@@ -2,6 +2,7 @@
test_description='test env--helper'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
diff --git a/t/t0018-advice.sh b/t/t0018-advice.sh
index 39e5e4b34f..c13057a4ca 100755
--- a/t/t0018-advice.sh
+++ b/t/t0018-advice.sh
@@ -2,6 +2,7 @@
test_description='Test advise_if_enabled functionality'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'advice should be printed when config variable is unset' '
diff --git a/t/t0030-stripspace.sh b/t/t0030-stripspace.sh
index 0c24a0f9a3..ae1ca380c1 100755
--- a/t/t0030-stripspace.sh
+++ b/t/t0030-stripspace.sh
@@ -5,6 +5,7 @@
test_description='git stripspace'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
t40='A quick brown fox jumps over the lazy do'
diff --git a/t/t0040-parse-options.sh b/t/t0040-parse-options.sh
index ad4746d899..da310ed29b 100755
--- a/t/t0040-parse-options.sh
+++ b/t/t0040-parse-options.sh
@@ -37,7 +37,6 @@ String options
--list <str> add str to list
Magic arguments
- --quux means --quux
-NUM set integer to NUM
+ same as -b
--ambiguous positive ambiguity
@@ -263,10 +262,6 @@ test_expect_success 'detect possible typos' '
test_cmp typo.err output.err
'
-test_expect_success 'keep some options as arguments' '
- test-tool parse-options --expect="arg 00: --quux" --quux
-'
-
cat >expect <<\EOF
Callback: "four", 0
boolean: 5
diff --git a/t/t0063-string-list.sh b/t/t0063-string-list.sh
index c6ee9f66b1..46d4839194 100755
--- a/t/t0063-string-list.sh
+++ b/t/t0063-string-list.sh
@@ -5,6 +5,7 @@
test_description='Test string list functionality'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_split () {
diff --git a/t/t0090-cache-tree.sh b/t/t0090-cache-tree.sh
index 9bf66c9e68..9067572648 100755
--- a/t/t0090-cache-tree.sh
+++ b/t/t0090-cache-tree.sh
@@ -195,6 +195,7 @@ test_expect_success 'reset --hard gives cache-tree' '
test_expect_success 'reset --hard without index gives cache-tree' '
rm -f .git/index &&
+ git clean -fd &&
git reset --hard &&
test_cache_tree
'
diff --git a/t/t0091-bugreport.sh b/t/t0091-bugreport.sh
index 526304ff95..eeedbfa919 100755
--- a/t/t0091-bugreport.sh
+++ b/t/t0091-bugreport.sh
@@ -2,6 +2,7 @@
test_description='git bugreport'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
# Headers "[System Info]" will be followed by a non-empty line if we put some
diff --git a/t/t0301-credential-cache.sh b/t/t0301-credential-cache.sh
index ebd5fa5249..698b7159f0 100755
--- a/t/t0301-credential-cache.sh
+++ b/t/t0301-credential-cache.sh
@@ -9,6 +9,21 @@ test -z "$NO_UNIX_SOCKETS" || {
test_done
}
+uname_s=$(uname -s)
+case $uname_s in
+*MINGW*)
+ test_path_is_socket () {
+ # `test -S` cannot detect Win10's Unix sockets
+ test_path_exists "$1"
+ }
+ ;;
+*)
+ test_path_is_socket () {
+ test -S "$1"
+ }
+ ;;
+esac
+
# don't leave a stale daemon running
test_atexit 'git credential-cache exit'
@@ -21,7 +36,7 @@ test_expect_success 'socket defaults to ~/.cache/git/credential/socket' '
rmdir -p .cache/git/credential/
" &&
test_path_is_missing "$HOME/.git-credential-cache" &&
- test -S "$HOME/.cache/git/credential/socket"
+ test_path_is_socket "$HOME/.cache/git/credential/socket"
'
XDG_CACHE_HOME="$HOME/xdg"
@@ -31,7 +46,7 @@ helper_test cache
test_expect_success "use custom XDG_CACHE_HOME if set and default sockets are not created" '
test_when_finished "git credential-cache exit" &&
- test -S "$XDG_CACHE_HOME/git/credential/socket" &&
+ test_path_is_socket "$XDG_CACHE_HOME/git/credential/socket" &&
test_path_is_missing "$HOME/.git-credential-cache/socket" &&
test_path_is_missing "$HOME/.cache/git/credential/socket"
'
@@ -48,7 +63,7 @@ test_expect_success 'credential-cache --socket option overrides default location
username=store-user
password=store-pass
EOF
- test -S "$HOME/dir/socket"
+ test_path_is_socket "$HOME/dir/socket"
'
test_expect_success "use custom XDG_CACHE_HOME even if xdg socket exists" '
@@ -62,7 +77,7 @@ test_expect_success "use custom XDG_CACHE_HOME even if xdg socket exists" '
username=store-user
password=store-pass
EOF
- test -S "$HOME/.cache/git/credential/socket" &&
+ test_path_is_socket "$HOME/.cache/git/credential/socket" &&
XDG_CACHE_HOME="$HOME/xdg" &&
export XDG_CACHE_HOME &&
check approve cache <<-\EOF &&
@@ -71,7 +86,7 @@ test_expect_success "use custom XDG_CACHE_HOME even if xdg socket exists" '
username=store-user
password=store-pass
EOF
- test -S "$XDG_CACHE_HOME/git/credential/socket"
+ test_path_is_socket "$XDG_CACHE_HOME/git/credential/socket"
'
test_expect_success 'use user socket if user directory exists' '
@@ -79,14 +94,15 @@ test_expect_success 'use user socket if user directory exists' '
git credential-cache exit &&
rmdir \"\$HOME/.git-credential-cache/\"
" &&
- mkdir -p -m 700 "$HOME/.git-credential-cache/" &&
+ mkdir -p "$HOME/.git-credential-cache/" &&
+ chmod 700 "$HOME/.git-credential-cache/" &&
check approve cache <<-\EOF &&
protocol=https
host=example.com
username=store-user
password=store-pass
EOF
- test -S "$HOME/.git-credential-cache/socket"
+ test_path_is_socket "$HOME/.git-credential-cache/socket"
'
test_expect_success SYMLINKS 'use user socket if user directory is a symlink to a directory' '
@@ -103,7 +119,7 @@ test_expect_success SYMLINKS 'use user socket if user directory is a symlink to
username=store-user
password=store-pass
EOF
- test -S "$HOME/.git-credential-cache/socket"
+ test_path_is_socket "$HOME/.git-credential-cache/socket"
'
helper_test_timeout cache --timeout=1
diff --git a/t/t0410-partial-clone.sh b/t/t0410-partial-clone.sh
index a211a66c67..bba679685f 100755
--- a/t/t0410-partial-clone.sh
+++ b/t/t0410-partial-clone.sh
@@ -4,6 +4,9 @@ test_description='partial clone'
. ./test-lib.sh
+# missing promisor objects cause repacks which write bitmaps to fail
+GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=0
+
delete_object () {
rm $1/.git/objects/$(echo $2 | sed -e 's|^..|&/|')
}
@@ -536,7 +539,13 @@ test_expect_success 'gc does not repack promisor objects if there are none' '
repack_and_check () {
rm -rf repo2 &&
cp -r repo repo2 &&
- git -C repo2 repack $1 -d &&
+ if test x"$1" = "x--must-fail"
+ then
+ shift
+ test_must_fail git -C repo2 repack $1 -d
+ else
+ git -C repo2 repack $1 -d
+ fi &&
git -C repo2 fsck &&
git -C repo2 cat-file -e $2 &&
@@ -561,6 +570,7 @@ test_expect_success 'repack -d does not irreversibly delete promisor objects' '
printf "$THREE\n" | pack_as_from_promisor &&
delete_object repo "$ONE" &&
+ repack_and_check --must-fail -ab "$TWO" "$THREE" &&
repack_and_check -a "$TWO" "$THREE" &&
repack_and_check -A "$TWO" "$THREE" &&
repack_and_check -l "$TWO" "$THREE"
diff --git a/t/t1013-read-tree-submodule.sh b/t/t1013-read-tree-submodule.sh
index b6df7444c0..bfc90d4cf2 100755
--- a/t/t1013-read-tree-submodule.sh
+++ b/t/t1013-read-tree-submodule.sh
@@ -6,7 +6,6 @@ test_description='read-tree can handle submodules'
. "$TEST_DIRECTORY"/lib-submodule-update.sh
KNOWN_FAILURE_DIRECTORY_SUBMODULE_CONFLICTS=1
-KNOWN_FAILURE_SUBMODULE_OVERWRITE_IGNORED_UNTRACKED=1
test_submodule_switch_recursing_with_args "read-tree -u -m"
diff --git a/t/t1091-sparse-checkout-builtin.sh b/t/t1091-sparse-checkout-builtin.sh
index 38fc8340f5..272ba1b566 100755
--- a/t/t1091-sparse-checkout-builtin.sh
+++ b/t/t1091-sparse-checkout-builtin.sh
@@ -206,16 +206,21 @@ test_expect_success 'sparse-checkout disable' '
'
test_expect_success 'sparse-index enabled and disabled' '
- git -C repo sparse-checkout init --cone --sparse-index &&
- test_cmp_config -C repo true index.sparse &&
- test-tool -C repo read-cache --table >cache &&
- grep " tree " cache &&
-
- git -C repo sparse-checkout disable &&
- test-tool -C repo read-cache --table >cache &&
- ! grep " tree " cache &&
- git -C repo config --list >config &&
- ! grep index.sparse config
+ (
+ sane_unset GIT_TEST_SPLIT_INDEX &&
+ git -C repo update-index --no-split-index &&
+
+ git -C repo sparse-checkout init --cone --sparse-index &&
+ test_cmp_config -C repo true index.sparse &&
+ test-tool -C repo read-cache --table >cache &&
+ grep " tree " cache &&
+
+ git -C repo sparse-checkout disable &&
+ test-tool -C repo read-cache --table >cache &&
+ ! grep " tree " cache &&
+ git -C repo config --list >config &&
+ ! grep index.sparse config
+ )
'
test_expect_success 'cone mode: init and set' '
@@ -406,7 +411,7 @@ test_expect_success 'sparse-checkout (init|set|disable) warns with unmerged stat
git -C unmerged sparse-checkout disable
'
-test_expect_success 'sparse-checkout reapply' '
+test_expect_failure 'sparse-checkout reapply' '
git clone repo tweak &&
echo dirty >tweak/deep/deeper2/a &&
@@ -438,6 +443,8 @@ test_expect_success 'sparse-checkout reapply' '
test_i18ngrep "warning.*The following paths are unmerged" err &&
test_path_is_file tweak/folder1/a &&
+ # NEEDSWORK: We are asking to update a file outside of the
+ # sparse-checkout cone, but this is no longer allowed.
git -C tweak add folder1/a &&
git -C tweak sparse-checkout reapply 2>err &&
test_must_be_empty err &&
@@ -642,4 +649,63 @@ test_expect_success MINGW 'cone mode replaces backslashes with slashes' '
check_files repo/deep a deeper1
'
+test_expect_success 'cone mode clears ignored subdirectories' '
+ rm repo/.git/info/sparse-checkout &&
+
+ git -C repo sparse-checkout init --cone &&
+ git -C repo sparse-checkout set deep/deeper1 &&
+
+ cat >repo/.gitignore <<-\EOF &&
+ obj/
+ *.o
+ EOF
+
+ git -C repo add .gitignore &&
+ git -C repo commit -m ".gitignore" &&
+
+ mkdir -p repo/obj repo/folder1/obj repo/deep/deeper2/obj &&
+ for file in folder1/obj/a obj/a folder1/file.o folder1.o \
+ deep/deeper2/obj/a deep/deeper2/file.o file.o
+ do
+ echo ignored >repo/$file || return 1
+ done &&
+
+ git -C repo status --porcelain=v2 >out &&
+ test_must_be_empty out &&
+
+ git -C repo sparse-checkout reapply &&
+ test_path_is_missing repo/folder1 &&
+ test_path_is_missing repo/deep/deeper2 &&
+ test_path_is_dir repo/obj &&
+ test_path_is_file repo/file.o &&
+
+ git -C repo status --porcelain=v2 >out &&
+ test_must_be_empty out &&
+
+ git -C repo sparse-checkout set deep/deeper2 &&
+ test_path_is_missing repo/deep/deeper1 &&
+ test_path_is_dir repo/deep/deeper2 &&
+ test_path_is_dir repo/obj &&
+ test_path_is_file repo/file.o &&
+
+ >repo/deep/deeper2/ignored.o &&
+ >repo/deep/deeper2/untracked &&
+
+ # When an untracked file is in the way, all untracked files
+ # (even ignored files) are preserved.
+ git -C repo sparse-checkout set folder1 2>err &&
+ grep "contains untracked files" err &&
+ test_path_is_file repo/deep/deeper2/ignored.o &&
+ test_path_is_file repo/deep/deeper2/untracked &&
+
+ # The rest of the cone matches expectation
+ test_path_is_missing repo/deep/deeper1 &&
+ test_path_is_dir repo/obj &&
+ test_path_is_file repo/file.o &&
+
+ git -C repo status --porcelain=v2 >out &&
+ echo "? deep/deeper2/untracked" >expect &&
+ test_cmp expect out
+'
+
test_done
diff --git a/t/t1092-sparse-checkout-compatibility.sh b/t/t1092-sparse-checkout-compatibility.sh
index ddc86bb415..ca91c6a67f 100755
--- a/t/t1092-sparse-checkout-compatibility.sh
+++ b/t/t1092-sparse-checkout-compatibility.sh
@@ -47,7 +47,7 @@ test_expect_success 'setup' '
git checkout -b base &&
for dir in folder1 folder2 deep
do
- git checkout -b update-$dir &&
+ git checkout -b update-$dir base &&
echo "updated $dir" >$dir/a &&
git commit -a -m "update $dir" || return 1
done &&
@@ -187,6 +187,16 @@ test_sparse_match () {
test_cmp sparse-checkout-err sparse-index-err
}
+test_sparse_unstaged () {
+ file=$1 &&
+ for repo in sparse-checkout sparse-index
+ do
+ # Skip "unmerged" paths
+ git -C $repo diff --staged --diff-filter=u -- "$file" >diff &&
+ test_must_be_empty diff || return 1
+ done
+}
+
test_expect_success 'sparse-index contents' '
init_repos &&
@@ -291,6 +301,20 @@ test_expect_success 'add, commit, checkout' '
test_all_match git checkout -
'
+test_expect_success 'add outside sparse cone' '
+ init_repos &&
+
+ run_on_sparse mkdir folder1 &&
+ run_on_sparse ../edit-contents folder1/a &&
+ run_on_sparse ../edit-contents folder1/newfile &&
+ test_sparse_match test_must_fail git add folder1/a &&
+ grep "Disable or modify the sparsity rules" sparse-checkout-err &&
+ test_sparse_unstaged folder1/a &&
+ test_sparse_match test_must_fail git add folder1/newfile &&
+ grep "Disable or modify the sparsity rules" sparse-checkout-err &&
+ test_sparse_unstaged folder1/newfile
+'
+
test_expect_success 'commit including unstaged changes' '
init_repos &&
@@ -339,18 +363,24 @@ test_expect_success 'status/add: outside sparse cone' '
# Adding the path outside of the sparse-checkout cone should fail.
test_sparse_match test_must_fail git add folder1/a &&
+ grep "Disable or modify the sparsity rules" sparse-checkout-err &&
+ test_sparse_unstaged folder1/a &&
test_sparse_match test_must_fail git add --refresh folder1/a &&
-
- # NEEDSWORK: Adding a newly-tracked file outside the cone succeeds
- test_sparse_match git add folder1/new &&
-
- test_all_match git add . &&
+ grep "Disable or modify the sparsity rules" sparse-checkout-err &&
+ test_sparse_unstaged folder1/a &&
+ test_sparse_match test_must_fail git add folder1/new &&
+ grep "Disable or modify the sparsity rules" sparse-checkout-err &&
+ test_sparse_unstaged folder1/new &&
+ test_sparse_match git add --sparse folder1/a &&
+ test_sparse_match git add --sparse folder1/new &&
+
+ test_all_match git add --sparse . &&
test_all_match git status --porcelain=v2 &&
test_all_match git commit -m folder1/new &&
test_all_match git rev-parse HEAD^{tree} &&
run_on_all ../edit-contents folder1/newer &&
- test_all_match git add folder1/ &&
+ test_all_match git add --sparse folder1/ &&
test_all_match git status --porcelain=v2 &&
test_all_match git commit -m folder1/newer &&
test_all_match git rev-parse HEAD^{tree}
@@ -481,21 +511,19 @@ test_expect_success 'checkout and reset (mixed) [sparse]' '
test_sparse_match git reset update-folder2
'
-test_expect_success 'merge' '
+test_expect_success 'merge, cherry-pick, and rebase' '
init_repos &&
- test_all_match git checkout -b merge update-deep &&
- test_all_match git merge -m "folder1" update-folder1 &&
- test_all_match git rev-parse HEAD^{tree} &&
- test_all_match git merge -m "folder2" update-folder2 &&
- test_all_match git rev-parse HEAD^{tree}
+ for OPERATION in "merge -m merge" cherry-pick rebase
+ do
+ test_all_match git checkout -B temp update-deep &&
+ test_all_match git $OPERATION update-folder1 &&
+ test_all_match git rev-parse HEAD^{tree} &&
+ test_all_match git $OPERATION update-folder2 &&
+ test_all_match git rev-parse HEAD^{tree} || return 1
+ done
'
-# NEEDSWORK: This test is documenting current behavior, but that
-# behavior can be confusing to users so there is desire to change it.
-# Right now, users might be using this flow to work through conflicts,
-# so any solution should present advice to users who try this sequence
-# of commands to follow whatever new method we create.
test_expect_success 'merge with conflict outside cone' '
init_repos &&
@@ -510,13 +538,19 @@ test_expect_success 'merge with conflict outside cone' '
test_all_match git status --porcelain=v2 &&
# 2. Add the file with conflict markers
- test_all_match git add folder1/a &&
+ test_sparse_match test_must_fail git add folder1/a &&
+ grep "Disable or modify the sparsity rules" sparse-checkout-err &&
+ test_sparse_unstaged folder1/a &&
+ test_all_match git add --sparse folder1/a &&
test_all_match git status --porcelain=v2 &&
# 3. Rename the file to another sparse filename and
# accept conflict markers as resolved content.
run_on_all mv folder2/a folder2/z &&
- test_all_match git add folder2 &&
+ test_sparse_match test_must_fail git add folder2 &&
+ grep "Disable or modify the sparsity rules" sparse-checkout-err &&
+ test_sparse_unstaged folder2/z &&
+ test_all_match git add --sparse folder2 &&
test_all_match git status --porcelain=v2 &&
test_all_match git merge --continue &&
@@ -524,6 +558,50 @@ test_expect_success 'merge with conflict outside cone' '
test_all_match git rev-parse HEAD^{tree}
'
+test_expect_success 'cherry-pick/rebase with conflict outside cone' '
+ init_repos &&
+
+ for OPERATION in cherry-pick rebase
+ do
+ test_all_match git checkout -B tip &&
+ test_all_match git reset --hard merge-left &&
+ test_all_match git status --porcelain=v2 &&
+ test_all_match test_must_fail git $OPERATION merge-right &&
+ test_all_match git status --porcelain=v2 &&
+
+ # Resolve the conflict in different ways:
+ # 1. Revert to the base
+ test_all_match git checkout base -- deep/deeper2/a &&
+ test_all_match git status --porcelain=v2 &&
+
+ # 2. Add the file with conflict markers
+ # NEEDSWORK: Even though the merge conflict removed the
+ # SKIP_WORKTREE bit from the index entry for folder1/a, we should
+ # warn that this is a problematic add.
+ test_sparse_match test_must_fail git add folder1/a &&
+ grep "Disable or modify the sparsity rules" sparse-checkout-err &&
+ test_sparse_unstaged folder1/a &&
+ test_all_match git add --sparse folder1/a &&
+ test_all_match git status --porcelain=v2 &&
+
+ # 3. Rename the file to another sparse filename and
+ # accept conflict markers as resolved content.
+ # NEEDSWORK: This mode now fails, because folder2/z is
+ # outside of the sparse-checkout cone and does not match an
+ # existing index entry with the SKIP_WORKTREE bit cleared.
+ run_on_all mv folder2/a folder2/z &&
+ test_sparse_match test_must_fail git add folder2 &&
+ grep "Disable or modify the sparsity rules" sparse-checkout-err &&
+ test_sparse_unstaged folder2/z &&
+ test_all_match git add --sparse folder2 &&
+ test_all_match git status --porcelain=v2 &&
+
+ test_all_match git $OPERATION --continue &&
+ test_all_match git status --porcelain=v2 &&
+ test_all_match git rev-parse HEAD^{tree} || return 1
+ done
+'
+
test_expect_success 'merge with outside renames' '
init_repos &&
@@ -591,6 +669,7 @@ test_expect_success 'clean' '
test_expect_success 'submodule handling' '
init_repos &&
+ test_sparse_match git sparse-checkout add modules &&
test_all_match mkdir modules &&
test_all_match touch modules/a &&
test_all_match git add modules &&
@@ -600,6 +679,7 @@ test_expect_success 'submodule handling' '
test_all_match git commit -m "add submodule" &&
# having a submodule prevents "modules" from collapse
+ test_sparse_match git sparse-checkout set deep/deeper1 &&
test-tool -C sparse-index read-cache --table >cache &&
grep "100644 blob .* modules/a" cache &&
grep "160000 commit $(git -C initial-repo rev-parse HEAD) modules/sub" cache
@@ -617,8 +697,17 @@ test_expect_success 'sparse-index is expanded and converted back' '
ensure_not_expanded () {
rm -f trace2.txt &&
echo >>sparse-index/untracked.txt &&
- GIT_TRACE2_EVENT="$(pwd)/trace2.txt" GIT_TRACE2_EVENT_NESTING=10 \
- git -C sparse-index "$@" &&
+
+ if test "$1" = "!"
+ then
+ shift &&
+ test_must_fail env \
+ GIT_TRACE2_EVENT="$(pwd)/trace2.txt" GIT_TRACE2_EVENT_NESTING=10 \
+ git -C sparse-index "$@" || return 1
+ else
+ GIT_TRACE2_EVENT="$(pwd)/trace2.txt" GIT_TRACE2_EVENT_NESTING=10 \
+ git -C sparse-index "$@" || return 1
+ fi &&
test_region ! index ensure_full_index trace2.txt
}
@@ -647,7 +736,35 @@ test_expect_success 'sparse-index is not expanded' '
echo >>sparse-index/extra.txt &&
ensure_not_expanded add extra.txt &&
echo >>sparse-index/untracked.txt &&
- ensure_not_expanded add .
+ ensure_not_expanded add . &&
+
+ ensure_not_expanded checkout -f update-deep &&
+ test_config -C sparse-index pull.twohead ort &&
+ (
+ sane_unset GIT_TEST_MERGE_ALGORITHM &&
+ for OPERATION in "merge -m merge" cherry-pick rebase
+ do
+ ensure_not_expanded merge -m merge update-folder1 &&
+ ensure_not_expanded merge -m merge update-folder2 || return 1
+ done
+ )
+'
+
+test_expect_success 'sparse-index is not expanded: merge conflict in cone' '
+ init_repos &&
+
+ for side in right left
+ do
+ git -C sparse-index checkout -b expand-$side base &&
+ echo $side >sparse-index/deep/a &&
+ git -C sparse-index commit -a -m "$side" || return 1
+ done &&
+
+ (
+ sane_unset GIT_TEST_MERGE_ALGORITHM &&
+ git -C sparse-index config pull.twohead ort &&
+ ensure_not_expanded ! merge -m merged expand-right
+ )
'
# NEEDSWORK: a sparse-checkout behaves differently from a full checkout
diff --git a/t/t1400-update-ref.sh b/t/t1400-update-ref.sh
index 4506cd435b..0d4f73acaa 100755
--- a/t/t1400-update-ref.sh
+++ b/t/t1400-update-ref.sh
@@ -1598,6 +1598,40 @@ test_expect_success 'transaction cannot restart ongoing transaction' '
test_must_fail git show-ref --verify refs/heads/restart
'
+test_expect_success PIPE 'transaction flushes status updates' '
+ mkfifo in out &&
+ (git update-ref --stdin <in >out &) &&
+
+ exec 9>in &&
+ exec 8<out &&
+ test_when_finished "exec 9>&-" &&
+ test_when_finished "exec 8<&-" &&
+
+ echo "start" >&9 &&
+ echo "start: ok" >expected &&
+ read line <&8 &&
+ echo "$line" >actual &&
+ test_cmp expected actual &&
+
+ echo "create refs/heads/flush $A" >&9 &&
+
+ echo prepare >&9 &&
+ echo "prepare: ok" >expected &&
+ read line <&8 &&
+ echo "$line" >actual &&
+ test_cmp expected actual &&
+
+ # This must now fail given that we have locked the ref.
+ test_must_fail git update-ref refs/heads/flush $B 2>stderr &&
+ grep "fatal: update_ref failed for ref ${SQ}refs/heads/flush${SQ}: cannot lock ref" stderr &&
+
+ echo commit >&9 &&
+ echo "commit: ok" >expected &&
+ read line <&8 &&
+ echo "$line" >actual &&
+ test_cmp expected actual
+'
+
test_expect_success 'directory not created deleting packed ref' '
git branch d1/d2/r1 HEAD &&
git pack-refs --all &&
diff --git a/t/t1430-bad-ref-name.sh b/t/t1430-bad-ref-name.sh
index b1839e0877..fa3aeb80f2 100755
--- a/t/t1430-bad-ref-name.sh
+++ b/t/t1430-bad-ref-name.sh
@@ -170,7 +170,7 @@ test_expect_success 'for-each-ref emits warnings for broken names' '
! grep -e "badname" output &&
! grep -e "broken\.\.\.symref" output &&
test_i18ngrep "ignoring ref with broken name refs/heads/broken\.\.\.ref" error &&
- test_i18ngrep "ignoring broken ref refs/heads/badname" error &&
+ test_i18ngrep ! "ignoring broken ref refs/heads/badname" error &&
test_i18ngrep "ignoring ref with broken name refs/heads/broken\.\.\.symref" error
'
diff --git a/t/t1600-index.sh b/t/t1600-index.sh
index c9b9e718b8..46329c488b 100755
--- a/t/t1600-index.sh
+++ b/t/t1600-index.sh
@@ -4,6 +4,8 @@ test_description='index file specific tests'
. ./test-lib.sh
+sane_unset GIT_TEST_SPLIT_INDEX
+
test_expect_success 'setup' '
echo 1 >a
'
@@ -13,7 +15,8 @@ test_expect_success 'bogus GIT_INDEX_VERSION issues warning' '
rm -f .git/index &&
GIT_INDEX_VERSION=2bogus &&
export GIT_INDEX_VERSION &&
- git add a 2>&1 | sed "s/[0-9]//" >actual.err &&
+ git add a 2>err &&
+ sed "s/[0-9]//" err >actual.err &&
sed -e "s/ Z$/ /" <<-\EOF >expect.err &&
warning: GIT_INDEX_VERSION set, but the value is invalid.
Using version Z
@@ -27,7 +30,8 @@ test_expect_success 'out of bounds GIT_INDEX_VERSION issues warning' '
rm -f .git/index &&
GIT_INDEX_VERSION=1 &&
export GIT_INDEX_VERSION &&
- git add a 2>&1 | sed "s/[0-9]//" >actual.err &&
+ git add a 2>err &&
+ sed "s/[0-9]//" err >actual.err &&
sed -e "s/ Z$/ /" <<-\EOF >expect.err &&
warning: GIT_INDEX_VERSION set, but the value is invalid.
Using version Z
@@ -50,7 +54,8 @@ test_expect_success 'out of bounds index.version issues warning' '
sane_unset GIT_INDEX_VERSION &&
rm -f .git/index &&
git config --add index.version 1 &&
- git add a 2>&1 | sed "s/[0-9]//" >actual.err &&
+ git add a 2>err &&
+ sed "s/[0-9]//" err >actual.err &&
sed -e "s/ Z$/ /" <<-\EOF >expect.err &&
warning: index.version set, but the value is invalid.
Using version Z
@@ -79,7 +84,7 @@ test_index_version () {
else
unset GIT_INDEX_VERSION
fi &&
- git add a 2>&1 &&
+ git add a &&
echo $EXPECTED_OUTPUT_VERSION >expect &&
test-tool index-version <.git/index >actual &&
test_cmp expect actual
diff --git a/t/t1700-split-index.sh b/t/t1700-split-index.sh
index 986baa612e..decd2527ed 100755
--- a/t/t1700-split-index.sh
+++ b/t/t1700-split-index.sh
@@ -510,4 +510,38 @@ test_expect_success 'do not refresh null base index' '
)
'
+test_expect_success 'reading split index at alternate location' '
+ git init reading-alternate-location &&
+ (
+ cd reading-alternate-location &&
+ >file-in-alternate &&
+ git update-index --split-index --add file-in-alternate
+ ) &&
+ echo file-in-alternate >expect &&
+
+ # Should be able to find the shared index both right next to
+ # the specified split index file ...
+ GIT_INDEX_FILE=./reading-alternate-location/.git/index \
+ git ls-files --cached >actual &&
+ test_cmp expect actual &&
+
+ # ... and, for backwards compatibility, in the current GIT_DIR
+ # as well.
+ mv -v ./reading-alternate-location/.git/sharedindex.* .git &&
+ GIT_INDEX_FILE=./reading-alternate-location/.git/index \
+ git ls-files --cached >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'GIT_TEST_SPLIT_INDEX works' '
+ git init git-test-split-index &&
+ (
+ cd git-test-split-index &&
+ >file &&
+ GIT_TEST_SPLIT_INDEX=1 git update-index --add file &&
+ ls -l .git/sharedindex.* >actual &&
+ test_line_count = 1 actual
+ )
+'
+
test_done
diff --git a/t/t2021-checkout-overwrite.sh b/t/t2021-checkout-overwrite.sh
index 70d69263e6..660132ff8d 100755
--- a/t/t2021-checkout-overwrite.sh
+++ b/t/t2021-checkout-overwrite.sh
@@ -48,6 +48,7 @@ test_expect_success 'checkout commit with dir must not remove untracked a/b' '
test_expect_success SYMLINKS 'the symlink remained' '
+ test_when_finished "rm a/b" &&
test -h a/b
'
diff --git a/t/t2500-untracked-overwriting.sh b/t/t2500-untracked-overwriting.sh
new file mode 100755
index 0000000000..5c0bf4d21f
--- /dev/null
+++ b/t/t2500-untracked-overwriting.sh
@@ -0,0 +1,244 @@
+#!/bin/sh
+
+test_description='Test handling of overwriting untracked files'
+
+. ./test-lib.sh
+
+test_setup_reset () {
+ git init reset_$1 &&
+ (
+ cd reset_$1 &&
+ test_commit init &&
+
+ git branch stable &&
+ git branch work &&
+
+ git checkout work &&
+ test_commit foo &&
+
+ git checkout stable
+ )
+}
+
+test_expect_success 'reset --hard will nuke untracked files/dirs' '
+ test_setup_reset hard &&
+ (
+ cd reset_hard &&
+ git ls-tree -r stable &&
+ git log --all --name-status --oneline &&
+ git ls-tree -r work &&
+
+ mkdir foo.t &&
+ echo precious >foo.t/file &&
+ echo foo >expect &&
+
+ git reset --hard work &&
+
+ # check that untracked directory foo.t/ was nuked
+ test_path_is_file foo.t &&
+ test_cmp expect foo.t
+ )
+'
+
+test_expect_success 'reset --merge will preserve untracked files/dirs' '
+ test_setup_reset merge &&
+ (
+ cd reset_merge &&
+
+ mkdir foo.t &&
+ echo precious >foo.t/file &&
+ cp foo.t/file expect &&
+
+ test_must_fail git reset --merge work 2>error &&
+ test_cmp expect foo.t/file &&
+ grep "Updating .foo.t. would lose untracked files" error
+ )
+'
+
+test_expect_success 'reset --keep will preserve untracked files/dirs' '
+ test_setup_reset keep &&
+ (
+ cd reset_keep &&
+
+ mkdir foo.t &&
+ echo precious >foo.t/file &&
+ cp foo.t/file expect &&
+
+ test_must_fail git reset --merge work 2>error &&
+ test_cmp expect foo.t/file &&
+ grep "Updating.*foo.t.*would lose untracked files" error
+ )
+'
+
+test_setup_checkout_m () {
+ git init checkout &&
+ (
+ cd checkout &&
+ test_commit init &&
+
+ test_write_lines file has some >filler &&
+ git add filler &&
+ git commit -m filler &&
+
+ git branch stable &&
+
+ git switch -c work &&
+ echo stuff >notes.txt &&
+ test_write_lines file has some words >filler &&
+ git add notes.txt filler &&
+ git commit -m filler &&
+
+ git checkout stable
+ )
+}
+
+test_expect_success 'checkout -m does not nuke untracked file' '
+ test_setup_checkout_m &&
+ (
+ cd checkout &&
+
+ # Tweak filler
+ test_write_lines this file has some >filler &&
+ # Make an untracked file, save its contents in "expect"
+ echo precious >notes.txt &&
+ cp notes.txt expect &&
+
+ test_must_fail git checkout -m work &&
+ test_cmp expect notes.txt
+ )
+'
+
+test_setup_sequencing () {
+ git init sequencing_$1 &&
+ (
+ cd sequencing_$1 &&
+ test_commit init &&
+
+ test_write_lines this file has some words >filler &&
+ git add filler &&
+ git commit -m filler &&
+
+ mkdir -p foo/bar &&
+ test_commit foo/bar/baz &&
+
+ git branch simple &&
+ git branch fooey &&
+
+ git checkout fooey &&
+ git rm foo/bar/baz.t &&
+ echo stuff >>filler &&
+ git add -u &&
+ git commit -m "changes" &&
+
+ git checkout simple &&
+ echo items >>filler &&
+ echo newstuff >>newfile &&
+ git add filler newfile &&
+ git commit -m another
+ )
+}
+
+test_expect_success 'git rebase --abort and untracked files' '
+ test_setup_sequencing rebase_abort_and_untracked &&
+ (
+ cd sequencing_rebase_abort_and_untracked &&
+ git checkout fooey &&
+ test_must_fail git rebase simple &&
+
+ cat init.t &&
+ git rm init.t &&
+ echo precious >init.t &&
+ cp init.t expect &&
+ git status --porcelain &&
+ test_must_fail git rebase --abort &&
+ test_cmp expect init.t
+ )
+'
+
+test_expect_success 'git rebase fast forwarding and untracked files' '
+ test_setup_sequencing rebase_fast_forward_and_untracked &&
+ (
+ cd sequencing_rebase_fast_forward_and_untracked &&
+ git checkout init &&
+ echo precious >filler &&
+ cp filler expect &&
+ test_must_fail git rebase init simple &&
+ test_cmp expect filler
+ )
+'
+
+test_expect_failure 'git rebase --autostash and untracked files' '
+ test_setup_sequencing rebase_autostash_and_untracked &&
+ (
+ cd sequencing_rebase_autostash_and_untracked &&
+ git checkout simple &&
+ git rm filler &&
+ mkdir filler &&
+ echo precious >filler/file &&
+ cp filler/file expect &&
+ git rebase --autostash init &&
+ test_path_is_file filler/file
+ )
+'
+
+test_expect_failure 'git stash and untracked files' '
+ test_setup_sequencing stash_and_untracked_files &&
+ (
+ cd sequencing_stash_and_untracked_files &&
+ git checkout simple &&
+ git rm filler &&
+ mkdir filler &&
+ echo precious >filler/file &&
+ cp filler/file expect &&
+ git status --porcelain &&
+ git stash push &&
+ git status --porcelain &&
+ test_path_is_file filler/file
+ )
+'
+
+test_expect_success 'git am --abort and untracked dir vs. unmerged file' '
+ test_setup_sequencing am_abort_and_untracked &&
+ (
+ cd sequencing_am_abort_and_untracked &&
+ git format-patch -1 --stdout fooey >changes.mbox &&
+ test_must_fail git am --3way changes.mbox &&
+
+ # Delete the conflicted file; we will stage and commit it later
+ rm filler &&
+
+ # Put an unrelated untracked directory there
+ mkdir filler &&
+ echo foo >filler/file1 &&
+ echo bar >filler/file2 &&
+
+ test_must_fail git am --abort 2>errors &&
+ test_path_is_dir filler &&
+ grep "Updating .filler. would lose untracked files in it" errors
+ )
+'
+
+test_expect_success 'git am --skip and untracked dir vs deleted file' '
+ test_setup_sequencing am_skip_and_untracked &&
+ (
+ cd sequencing_am_skip_and_untracked &&
+ git checkout fooey &&
+ git format-patch -1 --stdout simple >changes.mbox &&
+ test_must_fail git am --3way changes.mbox &&
+
+ # Delete newfile
+ rm newfile &&
+
+ # Put an unrelated untracked directory there
+ mkdir newfile &&
+ echo foo >newfile/file1 &&
+ echo bar >newfile/file2 &&
+
+ # Change our mind about resolutions, just skip this patch
+ test_must_fail git am --skip 2>errors &&
+ test_path_is_dir newfile &&
+ grep "Updating .newfile. would lose untracked files in it" errors
+ )
+'
+
+test_done
diff --git a/t/t3404-rebase-interactive.sh b/t/t3404-rebase-interactive.sh
index d877872e8f..972ce026bb 100755
--- a/t/t3404-rebase-interactive.sh
+++ b/t/t3404-rebase-interactive.sh
@@ -297,6 +297,7 @@ test_expect_success 'abort with error when new base cannot be checked out' '
output &&
test_i18ngrep "file1" output &&
test_path_is_missing .git/rebase-merge &&
+ rm file1 &&
git reset --hard HEAD^
'
diff --git a/t/t3407-rebase-abort.sh b/t/t3407-rebase-abort.sh
index 7c381fbc89..ebbaed147a 100755
--- a/t/t3407-rebase-abort.sh
+++ b/t/t3407-rebase-abort.sh
@@ -7,77 +7,77 @@ export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
. ./test-lib.sh
-### Test that we handle space characters properly
-work_dir="$(pwd)/test dir"
-
test_expect_success setup '
- mkdir -p "$work_dir" &&
- cd "$work_dir" &&
- git init &&
- echo a > a &&
- git add a &&
- git commit -m a &&
+ test_commit a a a &&
git branch to-rebase &&
- echo b > a &&
- git commit -a -m b &&
- echo c > a &&
- git commit -a -m c &&
+ test_commit --annotate b a b &&
+ test_commit --annotate c a c &&
git checkout to-rebase &&
- echo d > a &&
- git commit -a -m "merge should fail on this" &&
- echo e > a &&
- git commit -a -m "merge should fail on this, too" &&
- git branch pre-rebase
+ test_commit "merge should fail on this" a d d &&
+ test_commit --annotate "merge should fail on this, too" a e pre-rebase
'
+# Check that HEAD is equal to "pre-rebase" and the current branch is
+# "to-rebase"
+check_head() {
+ test_cmp_rev HEAD pre-rebase^{commit} &&
+ test "$(git symbolic-ref HEAD)" = refs/heads/to-rebase
+}
+
testrebase() {
type=$1
- dotest=$2
+ state_dir=$2
test_expect_success "rebase$type --abort" '
- cd "$work_dir" &&
# Clean up the state from the previous one
git reset --hard pre-rebase &&
test_must_fail git rebase$type main &&
- test_path_is_dir "$dotest" &&
+ test_path_is_dir "$state_dir" &&
git rebase --abort &&
- test $(git rev-parse to-rebase) = $(git rev-parse pre-rebase) &&
- test ! -d "$dotest"
+ check_head &&
+ test_path_is_missing "$state_dir"
'
test_expect_success "rebase$type --abort after --skip" '
- cd "$work_dir" &&
# Clean up the state from the previous one
git reset --hard pre-rebase &&
test_must_fail git rebase$type main &&
- test_path_is_dir "$dotest" &&
+ test_path_is_dir "$state_dir" &&
test_must_fail git rebase --skip &&
- test $(git rev-parse HEAD) = $(git rev-parse main) &&
+ test_cmp_rev HEAD main &&
git rebase --abort &&
- test $(git rev-parse to-rebase) = $(git rev-parse pre-rebase) &&
- test ! -d "$dotest"
+ check_head &&
+ test_path_is_missing "$state_dir"
'
test_expect_success "rebase$type --abort after --continue" '
- cd "$work_dir" &&
# Clean up the state from the previous one
git reset --hard pre-rebase &&
test_must_fail git rebase$type main &&
- test_path_is_dir "$dotest" &&
+ test_path_is_dir "$state_dir" &&
echo c > a &&
echo d >> a &&
git add a &&
test_must_fail git rebase --continue &&
- test $(git rev-parse HEAD) != $(git rev-parse main) &&
+ test_cmp_rev ! HEAD main &&
+ git rebase --abort &&
+ check_head &&
+ test_path_is_missing "$state_dir"
+ '
+
+ test_expect_success "rebase$type --abort when checking out a tag" '
+ test_when_finished "git symbolic-ref HEAD refs/heads/to-rebase" &&
+ git reset --hard a -- &&
+ test_must_fail git rebase$type --onto b c pre-rebase &&
+ test_cmp_rev HEAD b^{commit} &&
git rebase --abort &&
- test $(git rev-parse to-rebase) = $(git rev-parse pre-rebase) &&
- test ! -d "$dotest"
+ test_cmp_rev HEAD pre-rebase^{commit} &&
+ ! git symbolic-ref HEAD
'
test_expect_success "rebase$type --abort does not update reflog" '
- cd "$work_dir" &&
# Clean up the state from the previous one
git reset --hard pre-rebase &&
git reflog show to-rebase > reflog_before &&
@@ -89,7 +89,6 @@ testrebase() {
'
test_expect_success 'rebase --abort can not be used with other options' '
- cd "$work_dir" &&
# Clean up the state from the previous one
git reset --hard pre-rebase &&
test_must_fail git rebase$type main &&
@@ -97,33 +96,21 @@ testrebase() {
test_must_fail git rebase --abort -v &&
git rebase --abort
'
+
+ test_expect_success "rebase$type --quit" '
+ test_when_finished "git symbolic-ref HEAD refs/heads/to-rebase" &&
+ # Clean up the state from the previous one
+ git reset --hard pre-rebase &&
+ test_must_fail git rebase$type main &&
+ test_path_is_dir $state_dir &&
+ head_before=$(git rev-parse HEAD) &&
+ git rebase --quit &&
+ test_cmp_rev HEAD $head_before &&
+ test_path_is_missing .git/rebase-apply
+ '
}
testrebase " --apply" .git/rebase-apply
testrebase " --merge" .git/rebase-merge
-test_expect_success 'rebase --apply --quit' '
- cd "$work_dir" &&
- # Clean up the state from the previous one
- git reset --hard pre-rebase &&
- test_must_fail git rebase --apply main &&
- test_path_is_dir .git/rebase-apply &&
- head_before=$(git rev-parse HEAD) &&
- git rebase --quit &&
- test $(git rev-parse HEAD) = $head_before &&
- test ! -d .git/rebase-apply
-'
-
-test_expect_success 'rebase --merge --quit' '
- cd "$work_dir" &&
- # Clean up the state from the previous one
- git reset --hard pre-rebase &&
- test_must_fail git rebase --merge main &&
- test_path_is_dir .git/rebase-merge &&
- head_before=$(git rev-parse HEAD) &&
- git rebase --quit &&
- test $(git rev-parse HEAD) = $head_before &&
- test ! -d .git/rebase-merge
-'
-
test_done
diff --git a/t/t3435-rebase-gpg-sign.sh b/t/t3435-rebase-gpg-sign.sh
index ec10766858..5f8ba2c739 100755
--- a/t/t3435-rebase-gpg-sign.sh
+++ b/t/t3435-rebase-gpg-sign.sh
@@ -65,6 +65,7 @@ test_rebase_gpg_sign ! true -i --gpg-sign --no-gpg-sign
test_rebase_gpg_sign false -i --no-gpg-sign --gpg-sign
test_expect_failure 'rebase -p --no-gpg-sign override commit.gpgsign' '
+ test_when_finished "git clean -f" &&
git reset --hard merged &&
git config commit.gpgsign true &&
git rebase -p --no-gpg-sign --onto=one fork-point main &&
diff --git a/t/t3510-cherry-pick-sequence.sh b/t/t3510-cherry-pick-sequence.sh
index 49010aa946..3b0fa66c33 100755
--- a/t/t3510-cherry-pick-sequence.sh
+++ b/t/t3510-cherry-pick-sequence.sh
@@ -238,6 +238,7 @@ test_expect_success 'allow skipping commit but not abort for a new history' '
'
test_expect_success 'allow skipping stopped cherry-pick because of untracked file modifications' '
+ test_when_finished "rm unrelated" &&
pristine_detach initial &&
git rm --cached unrelated &&
git commit -m "untrack unrelated" &&
diff --git a/t/t3602-rm-sparse-checkout.sh b/t/t3602-rm-sparse-checkout.sh
index e9e9a15c74..ecce497a9c 100755
--- a/t/t3602-rm-sparse-checkout.sh
+++ b/t/t3602-rm-sparse-checkout.sh
@@ -11,12 +11,15 @@ test_expect_success 'setup' "
git commit -m files &&
cat >sparse_error_header <<-EOF &&
- The following pathspecs didn't match any eligible path, but they do match index
- entries outside the current sparse checkout:
+ The following paths and/or pathspecs matched paths that exist
+ outside of your sparse-checkout definition, so will not be
+ updated in the index:
EOF
cat >sparse_hint <<-EOF &&
- hint: Disable or modify the sparsity rules if you intend to update such entries.
+ hint: If you intend to update such entries, try one of the following:
+ hint: * Use the --sparse option.
+ hint: * Disable or modify the sparsity rules.
hint: Disable this message with \"git config advice.updateSparsePath false\"
EOF
@@ -37,9 +40,25 @@ done
test_expect_success 'recursive rm does not remove sparse entries' '
git reset --hard &&
git sparse-checkout set sub/dir &&
- git rm -r sub &&
+ test_must_fail git rm -r sub &&
+ git rm --sparse -r sub &&
git status --porcelain -uno >actual &&
- echo "D sub/dir/e" >expected &&
+ cat >expected <<-\EOF &&
+ D sub/d
+ D sub/dir/e
+ EOF
+ test_cmp expected actual
+'
+
+test_expect_success 'recursive rm --sparse removes sparse entries' '
+ git reset --hard &&
+ git sparse-checkout set "sub/dir" &&
+ git rm --sparse -r sub &&
+ git status --porcelain -uno >actual &&
+ cat >expected <<-\EOF &&
+ D sub/d
+ D sub/dir/e
+ EOF
test_cmp expected actual
'
@@ -75,4 +94,15 @@ test_expect_success 'do not warn about sparse entries with --ignore-unmatch' '
git ls-files --error-unmatch b
'
+test_expect_success 'refuse to rm a non-skip-worktree path outside sparse cone' '
+ git reset --hard &&
+ git sparse-checkout set a &&
+ git update-index --no-skip-worktree b &&
+ test_must_fail git rm b 2>stderr &&
+ test_cmp b_error_and_hint stderr &&
+ git rm --sparse b 2>stderr &&
+ test_must_be_empty stderr &&
+ test_path_is_missing b
+'
+
test_done
diff --git a/t/t3705-add-sparse-checkout.sh b/t/t3705-add-sparse-checkout.sh
index 2b1fd0d0ee..5b904988d4 100755
--- a/t/t3705-add-sparse-checkout.sh
+++ b/t/t3705-add-sparse-checkout.sh
@@ -19,6 +19,7 @@ setup_sparse_entry () {
fi &&
git add sparse_entry &&
git update-index --skip-worktree sparse_entry &&
+ git commit --allow-empty -m "ensure sparse_entry exists at HEAD" &&
SPARSE_ENTRY_BLOB=$(git rev-parse :sparse_entry)
}
@@ -36,14 +37,22 @@ setup_gitignore () {
EOF
}
+test_sparse_entry_unstaged () {
+ git diff --staged -- sparse_entry >diff &&
+ test_must_be_empty diff
+}
+
test_expect_success 'setup' "
cat >sparse_error_header <<-EOF &&
- The following pathspecs didn't match any eligible path, but they do match index
- entries outside the current sparse checkout:
+ The following paths and/or pathspecs matched paths that exist
+ outside of your sparse-checkout definition, so will not be
+ updated in the index:
EOF
cat >sparse_hint <<-EOF &&
- hint: Disable or modify the sparsity rules if you intend to update such entries.
+ hint: If you intend to update such entries, try one of the following:
+ hint: * Use the --sparse option.
+ hint: * Disable or modify the sparsity rules.
hint: Disable this message with \"git config advice.updateSparsePath false\"
EOF
@@ -55,6 +64,7 @@ test_expect_success 'git add does not remove sparse entries' '
setup_sparse_entry &&
rm sparse_entry &&
test_must_fail git add sparse_entry 2>stderr &&
+ test_sparse_entry_unstaged &&
test_cmp error_and_hint stderr &&
test_sparse_entry_unchanged
'
@@ -73,6 +83,7 @@ test_expect_success 'git add . does not remove sparse entries' '
rm sparse_entry &&
setup_gitignore &&
test_must_fail git add . 2>stderr &&
+ test_sparse_entry_unstaged &&
cat sparse_error_header >expect &&
echo . >>expect &&
@@ -88,6 +99,7 @@ do
setup_sparse_entry &&
echo modified >sparse_entry &&
test_must_fail git add $opt sparse_entry 2>stderr &&
+ test_sparse_entry_unstaged &&
test_cmp error_and_hint stderr &&
test_sparse_entry_unchanged
'
@@ -98,6 +110,7 @@ test_expect_success 'git add --refresh does not update sparse entries' '
git ls-files --debug sparse_entry | grep mtime >before &&
test-tool chmtime -60 sparse_entry &&
test_must_fail git add --refresh sparse_entry 2>stderr &&
+ test_sparse_entry_unstaged &&
test_cmp error_and_hint stderr &&
git ls-files --debug sparse_entry | grep mtime >after &&
test_cmp before after
@@ -106,6 +119,7 @@ test_expect_success 'git add --refresh does not update sparse entries' '
test_expect_success 'git add --chmod does not update sparse entries' '
setup_sparse_entry &&
test_must_fail git add --chmod=+x sparse_entry 2>stderr &&
+ test_sparse_entry_unstaged &&
test_cmp error_and_hint stderr &&
test_sparse_entry_unchanged &&
! test -x sparse_entry
@@ -116,6 +130,7 @@ test_expect_success 'git add --renormalize does not update sparse entries' '
setup_sparse_entry "LINEONE\r\nLINETWO\r\n" &&
echo "sparse_entry text=auto" >.gitattributes &&
test_must_fail git add --renormalize sparse_entry 2>stderr &&
+ test_sparse_entry_unstaged &&
test_cmp error_and_hint stderr &&
test_sparse_entry_unchanged
'
@@ -124,6 +139,7 @@ test_expect_success 'git add --dry-run --ignore-missing warn on sparse path' '
setup_sparse_entry &&
rm sparse_entry &&
test_must_fail git add --dry-run --ignore-missing sparse_entry 2>stderr &&
+ test_sparse_entry_unstaged &&
test_cmp error_and_hint stderr &&
test_sparse_entry_unchanged
'
@@ -145,11 +161,57 @@ test_expect_success 'do not warn when pathspec matches dense entries' '
git ls-files --error-unmatch dense_entry
'
+test_expect_success 'git add fails outside of sparse-checkout definition' '
+ test_when_finished git sparse-checkout disable &&
+ test_commit a &&
+ git sparse-checkout init &&
+ git sparse-checkout set a &&
+ echo >>sparse_entry &&
+
+ git update-index --no-skip-worktree sparse_entry &&
+ test_must_fail git add sparse_entry &&
+ test_sparse_entry_unstaged &&
+
+ test_must_fail git add --chmod=+x sparse_entry &&
+ test_sparse_entry_unstaged &&
+
+ test_must_fail git add --renormalize sparse_entry &&
+ test_sparse_entry_unstaged &&
+
+ # Avoid munging CRLFs to avoid an error message
+ git -c core.autocrlf=input add --sparse sparse_entry 2>stderr &&
+ test_must_be_empty stderr &&
+ test-tool read-cache --table >actual &&
+ grep "^100644 blob.*sparse_entry\$" actual &&
+
+ git add --sparse --chmod=+x sparse_entry 2>stderr &&
+ test_must_be_empty stderr &&
+ test-tool read-cache --table >actual &&
+ grep "^100755 blob.*sparse_entry\$" actual &&
+
+ git reset &&
+
+ # This will print a message over stderr on Windows.
+ git add --sparse --renormalize sparse_entry &&
+ git status --porcelain >actual &&
+ grep "^M sparse_entry\$" actual
+'
+
test_expect_success 'add obeys advice.updateSparsePath' '
setup_sparse_entry &&
test_must_fail git -c advice.updateSparsePath=false add sparse_entry 2>stderr &&
+ test_sparse_entry_unstaged &&
test_cmp sparse_entry_error stderr
'
+test_expect_success 'add allows sparse entries with --sparse' '
+ git sparse-checkout set a &&
+ echo modified >sparse_entry &&
+ test_must_fail git add sparse_entry &&
+ test_sparse_entry_unchanged &&
+ git add --sparse sparse_entry 2>stderr &&
+ test_must_be_empty stderr
+'
+
test_done
diff --git a/t/t3903-stash.sh b/t/t3903-stash.sh
index 873aa56e35..f0a82be9de 100755
--- a/t/t3903-stash.sh
+++ b/t/t3903-stash.sh
@@ -1307,4 +1307,62 @@ test_expect_success 'stash -c stash.useBuiltin=false warning ' '
test_must_be_empty err
'
+test_expect_success 'git stash succeeds despite directory/file change' '
+ test_create_repo directory_file_switch_v1 &&
+ (
+ cd directory_file_switch_v1 &&
+ test_commit init &&
+
+ test_write_lines this file has some words >filler &&
+ git add filler &&
+ git commit -m filler &&
+
+ git rm filler &&
+ mkdir filler &&
+ echo contents >filler/file &&
+ git stash push
+ )
+'
+
+test_expect_success 'git stash can pop file -> directory saved changes' '
+ test_create_repo directory_file_switch_v2 &&
+ (
+ cd directory_file_switch_v2 &&
+ test_commit init &&
+
+ test_write_lines this file has some words >filler &&
+ git add filler &&
+ git commit -m filler &&
+
+ git rm filler &&
+ mkdir filler &&
+ echo contents >filler/file &&
+ cp filler/file expect &&
+ git stash push --include-untracked &&
+ git stash apply --index &&
+ test_cmp expect filler/file
+ )
+'
+
+test_expect_success 'git stash can pop directory -> file saved changes' '
+ test_create_repo directory_file_switch_v3 &&
+ (
+ cd directory_file_switch_v3 &&
+ test_commit init &&
+
+ mkdir filler &&
+ test_write_lines some words >filler/file1 &&
+ test_write_lines and stuff >filler/file2 &&
+ git add filler &&
+ git commit -m filler &&
+
+ git rm -rf filler &&
+ echo contents >filler &&
+ cp filler expect &&
+ git stash push --include-untracked &&
+ git stash apply --index &&
+ test_cmp expect filler
+ )
+'
+
test_done
diff --git a/t/t4060-diff-submodule-option-diff-format.sh b/t/t4060-diff-submodule-option-diff-format.sh
index dc7b242697..d86e38abd8 100755
--- a/t/t4060-diff-submodule-option-diff-format.sh
+++ b/t/t4060-diff-submodule-option-diff-format.sh
@@ -361,7 +361,6 @@ test_expect_success 'typechanged submodule(submodule->blob)' '
rm -f sm1 &&
test_create_repo sm1 &&
head6=$(add_file sm1 foo6 foo7)
-fullhead6=$(cd sm1; git rev-parse --verify HEAD)
test_expect_success 'nonexistent commit' '
git diff-index -p --submodule=diff HEAD >actual &&
cat >expected <<-EOF &&
@@ -704,10 +703,26 @@ test_expect_success 'path filter' '
diff_cmp expected actual
'
-commit_file sm2
+cat >.gitmodules <<-EOF
+[submodule "sm2"]
+ path = sm2
+ url = bogus_url
+EOF
+git add .gitmodules
+commit_file sm2 .gitmodules
+
test_expect_success 'given commit' '
git diff-index -p --submodule=diff HEAD^ >actual &&
cat >expected <<-EOF &&
+ diff --git a/.gitmodules b/.gitmodules
+ new file mode 100644
+ index 1234567..89abcde
+ --- /dev/null
+ +++ b/.gitmodules
+ @@ -0,0 +1,3 @@
+ +[submodule "sm2"]
+ +path = sm2
+ +url = bogus_url
Submodule sm1 $head7...0000000 (submodule deleted)
Submodule sm2 0000000...$head9 (new submodule)
diff --git a/sm2/foo8 b/sm2/foo8
@@ -729,15 +744,21 @@ test_expect_success 'given commit' '
'
test_expect_success 'setup .git file for sm2' '
- (cd sm2 &&
- REAL="$(pwd)/../.real" &&
- mv .git "$REAL" &&
- echo "gitdir: $REAL" >.git)
+ git submodule absorbgitdirs sm2
'
test_expect_success 'diff --submodule=diff with .git file' '
git diff --submodule=diff HEAD^ >actual &&
cat >expected <<-EOF &&
+ diff --git a/.gitmodules b/.gitmodules
+ new file mode 100644
+ index 1234567..89abcde
+ --- /dev/null
+ +++ b/.gitmodules
+ @@ -0,0 +1,3 @@
+ +[submodule "sm2"]
+ +path = sm2
+ +url = bogus_url
Submodule sm1 $head7...0000000 (submodule deleted)
Submodule sm2 0000000...$head9 (new submodule)
diff --git a/sm2/foo8 b/sm2/foo8
@@ -758,9 +779,67 @@ test_expect_success 'diff --submodule=diff with .git file' '
diff_cmp expected actual
'
+mv sm2 sm2-bak
+
+test_expect_success 'deleted submodule with .git file' '
+ git diff-index -p --submodule=diff HEAD >actual &&
+ cat >expected <<-EOF &&
+ Submodule sm1 $head7...0000000 (submodule deleted)
+ Submodule sm2 $head9...0000000 (submodule deleted)
+ diff --git a/sm2/foo8 b/sm2/foo8
+ deleted file mode 100644
+ index 1234567..89abcde
+ --- a/sm2/foo8
+ +++ /dev/null
+ @@ -1 +0,0 @@
+ -foo8
+ diff --git a/sm2/foo9 b/sm2/foo9
+ deleted file mode 100644
+ index 1234567..89abcde
+ --- a/sm2/foo9
+ +++ /dev/null
+ @@ -1 +0,0 @@
+ -foo9
+ EOF
+ diff_cmp expected actual
+'
+
+echo submodule-to-blob>sm2
+
+test_expect_success 'typechanged(submodule->blob) submodule with .git file' '
+ git diff-index -p --submodule=diff HEAD >actual &&
+ cat >expected <<-EOF &&
+ Submodule sm1 $head7...0000000 (submodule deleted)
+ Submodule sm2 $head9...0000000 (submodule deleted)
+ diff --git a/sm2/foo8 b/sm2/foo8
+ deleted file mode 100644
+ index 1234567..89abcde
+ --- a/sm2/foo8
+ +++ /dev/null
+ @@ -1 +0,0 @@
+ -foo8
+ diff --git a/sm2/foo9 b/sm2/foo9
+ deleted file mode 100644
+ index 1234567..89abcde
+ --- a/sm2/foo9
+ +++ /dev/null
+ @@ -1 +0,0 @@
+ -foo9
+ diff --git a/sm2 b/sm2
+ new file mode 100644
+ index 1234567..89abcde
+ --- /dev/null
+ +++ b/sm2
+ @@ -0,0 +1 @@
+ +submodule-to-blob
+ EOF
+ diff_cmp expected actual
+'
+
+rm sm2
+mv sm2-bak sm2
+
test_expect_success 'setup nested submodule' '
- git submodule add -f ./sm2 &&
- git commit -a -m "add sm2" &&
git -C sm2 submodule add ../sm2 nested &&
git -C sm2 commit -a -m "nested sub" &&
head10=$(git -C sm2 rev-parse --short --verify HEAD)
@@ -791,6 +870,7 @@ test_expect_success 'diff --submodule=diff with moved nested submodule HEAD' '
test_expect_success 'diff --submodule=diff recurses into nested submodules' '
cat >expected <<-EOF &&
+ Submodule sm1 $head7...0000000 (submodule deleted)
Submodule sm2 contains modified content
Submodule sm2 $head9..$head10:
diff --git a/sm2/.gitmodules b/sm2/.gitmodules
@@ -830,4 +910,67 @@ test_expect_success 'diff --submodule=diff recurses into nested submodules' '
diff_cmp expected actual
'
+(cd sm2; commit_file nested)
+commit_file sm2
+head12=$(cd sm2; git rev-parse --short --verify HEAD)
+
+mv sm2 sm2-bak
+
+test_expect_success 'diff --submodule=diff recurses into deleted nested submodules' '
+ cat >expected <<-EOF &&
+ Submodule sm1 $head7...0000000 (submodule deleted)
+ Submodule sm2 $head12...0000000 (submodule deleted)
+ diff --git a/sm2/.gitmodules b/sm2/.gitmodules
+ deleted file mode 100644
+ index 3a816b8..0000000
+ --- a/sm2/.gitmodules
+ +++ /dev/null
+ @@ -1,3 +0,0 @@
+ -[submodule "nested"]
+ - path = nested
+ - url = ../sm2
+ diff --git a/sm2/foo8 b/sm2/foo8
+ deleted file mode 100644
+ index db9916b..0000000
+ --- a/sm2/foo8
+ +++ /dev/null
+ @@ -1 +0,0 @@
+ -foo8
+ diff --git a/sm2/foo9 b/sm2/foo9
+ deleted file mode 100644
+ index 9c3b4f6..0000000
+ --- a/sm2/foo9
+ +++ /dev/null
+ @@ -1 +0,0 @@
+ -foo9
+ Submodule nested $head11...0000000 (submodule deleted)
+ diff --git a/sm2/nested/file b/sm2/nested/file
+ deleted file mode 100644
+ index ca281f5..0000000
+ --- a/sm2/nested/file
+ +++ /dev/null
+ @@ -1 +0,0 @@
+ -nested content
+ diff --git a/sm2/nested/foo8 b/sm2/nested/foo8
+ deleted file mode 100644
+ index db9916b..0000000
+ --- a/sm2/nested/foo8
+ +++ /dev/null
+ @@ -1 +0,0 @@
+ -foo8
+ diff --git a/sm2/nested/foo9 b/sm2/nested/foo9
+ deleted file mode 100644
+ index 9c3b4f6..0000000
+ --- a/sm2/nested/foo9
+ +++ /dev/null
+ @@ -1 +0,0 @@
+ -foo9
+ EOF
+ git diff --submodule=diff >actual 2>err &&
+ test_must_be_empty err &&
+ diff_cmp expected actual
+'
+
+mv sm2-bak sm2
+
test_done
diff --git a/t/t4108-apply-threeway.sh b/t/t4108-apply-threeway.sh
index 65147efdea..cc3aa3314a 100755
--- a/t/t4108-apply-threeway.sh
+++ b/t/t4108-apply-threeway.sh
@@ -230,4 +230,49 @@ test_expect_success 'apply with --3way --cached and conflicts' '
test_cmp expect.diff actual.diff
'
+test_expect_success 'apply binary file patch' '
+ git reset --hard main &&
+ cp "$TEST_DIRECTORY/test-binary-1.png" bin.png &&
+ git add bin.png &&
+ git commit -m "add binary file" &&
+
+ cp "$TEST_DIRECTORY/test-binary-2.png" bin.png &&
+
+ git diff --binary >bin.diff &&
+ git reset --hard &&
+
+ # Apply must succeed.
+ git apply bin.diff
+'
+
+test_expect_success 'apply binary file patch with 3way' '
+ git reset --hard main &&
+ cp "$TEST_DIRECTORY/test-binary-1.png" bin.png &&
+ git add bin.png &&
+ git commit -m "add binary file" &&
+
+ cp "$TEST_DIRECTORY/test-binary-2.png" bin.png &&
+
+ git diff --binary >bin.diff &&
+ git reset --hard &&
+
+ # Apply must succeed.
+ git apply --3way --index bin.diff
+'
+
+test_expect_success 'apply full-index patch with 3way' '
+ git reset --hard main &&
+ cp "$TEST_DIRECTORY/test-binary-1.png" bin.png &&
+ git add bin.png &&
+ git commit -m "add binary file" &&
+
+ cp "$TEST_DIRECTORY/test-binary-2.png" bin.png &&
+
+ git diff --full-index >bin.diff &&
+ git reset --hard &&
+
+ # Apply must succeed.
+ git apply --3way --index bin.diff
+'
+
test_done
diff --git a/t/t4151-am-abort.sh b/t/t4151-am-abort.sh
index 9d8d3c72e7..2374151662 100755
--- a/t/t4151-am-abort.sh
+++ b/t/t4151-am-abort.sh
@@ -23,7 +23,13 @@ test_expect_success setup '
test_tick &&
git commit -a -m $i || return 1
done &&
+ git branch changes &&
git format-patch --no-numbered initial &&
+ git checkout -b conflicting initial &&
+ echo different >>file-1 &&
+ echo whatever >new-file &&
+ git add file-1 new-file &&
+ git commit -m different &&
git checkout -b side initial &&
echo local change >file-2-expect
'
@@ -191,4 +197,37 @@ test_expect_success 'am --abort leaves index stat info alone' '
git diff-files --exit-code --quiet
'
+test_expect_success 'git am --abort return failed exit status when it fails' '
+ test_when_finished "rm -rf file-2/ && git reset --hard && git am --abort" &&
+ git checkout changes &&
+ git format-patch -1 --stdout conflicting >changes.mbox &&
+ test_must_fail git am --3way changes.mbox &&
+
+ git rm file-2 &&
+ mkdir file-2 &&
+ echo precious >file-2/somefile &&
+ test_must_fail git am --abort &&
+ test_path_is_dir file-2/
+'
+
+test_expect_success 'git am --abort cleans relevant files' '
+ git checkout changes &&
+ git format-patch -1 --stdout conflicting >changes.mbox &&
+ test_must_fail git am --3way changes.mbox &&
+
+ test_path_is_file new-file &&
+ echo further changes >>file-1 &&
+ echo change other file >>file-2 &&
+
+ # Abort, and expect the files touched by am to be reverted
+ git am --abort &&
+
+ test_path_is_missing new-file &&
+
+ # Files not involved in am operation are left modified
+ git diff --name-only changes >actual &&
+ test_write_lines file-2 >expect &&
+ test_cmp expect actual
+'
+
test_done
diff --git a/t/t5304-prune.sh b/t/t5304-prune.sh
index 7cabb85ca6..8ae314af58 100755
--- a/t/t5304-prune.sh
+++ b/t/t5304-prune.sh
@@ -291,6 +291,7 @@ test_expect_success 'prune: handle HEAD reflog in multiple worktrees' '
cat ../expected >blob &&
git add blob &&
git commit -m "second commit in third" &&
+ git clean -f && # Remove untracked left behind by deleting index
git reset --hard HEAD^
) &&
git prune --expire=now &&
diff --git a/t/t5310-pack-bitmaps.sh b/t/t5310-pack-bitmaps.sh
index b02838750e..673baa5c3c 100755
--- a/t/t5310-pack-bitmaps.sh
+++ b/t/t5310-pack-bitmaps.sh
@@ -8,6 +8,10 @@ export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
. "$TEST_DIRECTORY"/lib-bundle.sh
. "$TEST_DIRECTORY"/lib-bitmap.sh
+# t5310 deals only with single-pack bitmaps, so don't write MIDX bitmaps in
+# their place.
+GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=0
+
objpath () {
echo ".git/objects/$(echo "$1" | sed -e 's|\(..\)|\1/|')"
}
@@ -25,93 +29,10 @@ has_any () {
grep -Ff "$1" "$2"
}
-# To ensure the logic for "maximal commits" is exercised, make
-# the repository a bit more complicated.
-#
-# other second
-# * *
-# (99 commits) (99 commits)
-# * *
-# |\ /|
-# | * octo-other octo-second * |
-# |/|\_________ ____________/|\|
-# | \ \/ __________/ |
-# | | ________/\ / |
-# * |/ * merge-right *
-# | _|__________/ \____________ |
-# |/ | \|
-# (l1) * * merge-left * (r1)
-# | / \________________________ |
-# |/ \|
-# (l2) * * (r2)
-# \___________________________ |
-# \|
-# * (base)
-#
-# We only push bits down the first-parent history, which
-# makes some of these commits unimportant!
-#
-# The important part for the maximal commit algorithm is how
-# the bitmasks are extended. Assuming starting bit positions
-# for second (bit 0) and other (bit 1), the bitmasks at the
-# end should be:
-#
-# second: 1 (maximal, selected)
-# other: 01 (maximal, selected)
-# (base): 11 (maximal)
-#
-# This complicated history was important for a previous
-# version of the walk that guarantees never walking a
-# commit multiple times. That goal might be important
-# again, so preserve this complicated case. For now, this
-# test will guarantee that the bitmaps are computed
-# correctly, even with the repeat calculations.
-
-test_expect_success 'setup repo with moderate-sized history' '
- test_commit_bulk --id=file 10 &&
- git branch -M second &&
- git checkout -b other HEAD~5 &&
- test_commit_bulk --id=side 10 &&
-
- # add complicated history setup, including merges and
- # ambiguous merge-bases
-
- git checkout -b merge-left other~2 &&
- git merge second~2 -m "merge-left" &&
-
- git checkout -b merge-right second~1 &&
- git merge other~1 -m "merge-right" &&
-
- git checkout -b octo-second second &&
- git merge merge-left merge-right -m "octopus-second" &&
-
- git checkout -b octo-other other &&
- git merge merge-left merge-right -m "octopus-other" &&
-
- git checkout other &&
- git merge octo-other -m "pull octopus" &&
-
- git checkout second &&
- git merge octo-second -m "pull octopus" &&
-
- # Remove these branches so they are not selected
- # as bitmap tips
- git branch -D merge-left &&
- git branch -D merge-right &&
- git branch -D octo-other &&
- git branch -D octo-second &&
-
- # add padding to make these merges less interesting
- # and avoid having them selected for bitmaps
- test_commit_bulk --id=file 100 &&
- git checkout other &&
- test_commit_bulk --id=side 100 &&
- git checkout second &&
-
- bitmaptip=$(git rev-parse second) &&
- blob=$(echo tagged-blob | git hash-object -w --stdin) &&
- git tag tagged-blob $blob &&
- git config repack.writebitmaps true
+setup_bitmap_history
+
+test_expect_success 'setup writing bitmaps during repack' '
+ git config repack.writeBitmaps true
'
test_expect_success 'full repack creates bitmaps' '
@@ -123,109 +44,7 @@ test_expect_success 'full repack creates bitmaps' '
grep "\"key\":\"num_maximal_commits\",\"value\":\"107\"" trace
'
-test_expect_success 'rev-list --test-bitmap verifies bitmaps' '
- git rev-list --test-bitmap HEAD
-'
-
-rev_list_tests_head () {
- test_expect_success "counting commits via bitmap ($state, $branch)" '
- git rev-list --count $branch >expect &&
- git rev-list --use-bitmap-index --count $branch >actual &&
- test_cmp expect actual
- '
-
- test_expect_success "counting partial commits via bitmap ($state, $branch)" '
- git rev-list --count $branch~5..$branch >expect &&
- git rev-list --use-bitmap-index --count $branch~5..$branch >actual &&
- test_cmp expect actual
- '
-
- test_expect_success "counting commits with limit ($state, $branch)" '
- git rev-list --count -n 1 $branch >expect &&
- git rev-list --use-bitmap-index --count -n 1 $branch >actual &&
- test_cmp expect actual
- '
-
- test_expect_success "counting non-linear history ($state, $branch)" '
- git rev-list --count other...second >expect &&
- git rev-list --use-bitmap-index --count other...second >actual &&
- test_cmp expect actual
- '
-
- test_expect_success "counting commits with limiting ($state, $branch)" '
- git rev-list --count $branch -- 1.t >expect &&
- git rev-list --use-bitmap-index --count $branch -- 1.t >actual &&
- test_cmp expect actual
- '
-
- test_expect_success "counting objects via bitmap ($state, $branch)" '
- git rev-list --count --objects $branch >expect &&
- git rev-list --use-bitmap-index --count --objects $branch >actual &&
- test_cmp expect actual
- '
-
- test_expect_success "enumerate commits ($state, $branch)" '
- git rev-list --use-bitmap-index $branch >actual &&
- git rev-list $branch >expect &&
- test_bitmap_traversal --no-confirm-bitmaps expect actual
- '
-
- test_expect_success "enumerate --objects ($state, $branch)" '
- git rev-list --objects --use-bitmap-index $branch >actual &&
- git rev-list --objects $branch >expect &&
- test_bitmap_traversal expect actual
- '
-
- test_expect_success "bitmap --objects handles non-commit objects ($state, $branch)" '
- git rev-list --objects --use-bitmap-index $branch tagged-blob >actual &&
- grep $blob actual
- '
-}
-
-rev_list_tests () {
- state=$1
-
- for branch in "second" "other"
- do
- rev_list_tests_head
- done
-}
-
-rev_list_tests 'full bitmap'
-
-test_expect_success 'clone from bitmapped repository' '
- git clone --no-local --bare . clone.git &&
- git rev-parse HEAD >expect &&
- git --git-dir=clone.git rev-parse HEAD >actual &&
- test_cmp expect actual
-'
-
-test_expect_success 'partial clone from bitmapped repository' '
- test_config uploadpack.allowfilter true &&
- git clone --no-local --bare --filter=blob:none . partial-clone.git &&
- (
- cd partial-clone.git &&
- pack=$(echo objects/pack/*.pack) &&
- git verify-pack -v "$pack" >have &&
- awk "/blob/ { print \$1 }" <have >blobs &&
- # we expect this single blob because of the direct ref
- git rev-parse refs/tags/tagged-blob >expect &&
- test_cmp expect blobs
- )
-'
-
-test_expect_success 'setup further non-bitmapped commits' '
- test_commit_bulk --id=further 10
-'
-
-rev_list_tests 'partial bitmap'
-
-test_expect_success 'fetch (partial bitmap)' '
- git --git-dir=clone.git fetch origin second:second &&
- git rev-parse HEAD >expect &&
- git --git-dir=clone.git rev-parse HEAD >actual &&
- test_cmp expect actual
-'
+basic_bitmap_tests
test_expect_success 'incremental repack fails when bitmaps are requested' '
test_commit more-1 &&
@@ -461,40 +280,6 @@ test_expect_success 'truncated bitmap fails gracefully (cache)' '
test_i18ngrep corrupted.bitmap.index stderr
'
-test_expect_success 'enumerating progress counts pack-reused objects' '
- count=$(git rev-list --objects --all --count) &&
- git repack -adb &&
-
- # check first with only reused objects; confirm that our progress
- # showed the right number, and also that we did pack-reuse as expected.
- # Check only the final "done" line of the meter (there may be an
- # arbitrary number of intermediate lines ending with CR).
- GIT_PROGRESS_DELAY=0 \
- git pack-objects --all --stdout --progress \
- </dev/null >/dev/null 2>stderr &&
- grep "Enumerating objects: $count, done" stderr &&
- grep "pack-reused $count" stderr &&
-
- # now the same but with one non-reused object
- git commit --allow-empty -m "an extra commit object" &&
- GIT_PROGRESS_DELAY=0 \
- git pack-objects --all --stdout --progress \
- </dev/null >/dev/null 2>stderr &&
- grep "Enumerating objects: $((count+1)), done" stderr &&
- grep "pack-reused $count" stderr
-'
-
-# have_delta <obj> <expected_base>
-#
-# Note that because this relies on cat-file, it might find _any_ copy of an
-# object in the repository. The caller is responsible for making sure
-# there's only one (e.g., via "repack -ad", or having just fetched a copy).
-have_delta () {
- echo $2 >expect &&
- echo $1 | git cat-file --batch-check="%(deltabase)" >actual &&
- test_cmp expect actual
-}
-
# Create a state of history with these properties:
#
# - refs that allow a client to fetch some new history, while sharing some old
diff --git a/t/t5312-prune-corruption.sh b/t/t5312-prune-corruption.sh
index 11423b3cb2..ea889c088a 100755
--- a/t/t5312-prune-corruption.sh
+++ b/t/t5312-prune-corruption.sh
@@ -7,6 +7,9 @@ if we see, for example, a ref with a bogus name, it is OK either to
bail out or to proceed using it as a reachable tip, but it is _not_
OK to proceed as if it did not exist. Otherwise we might silently
delete objects that cannot be recovered.
+
+Note that we do assert command failure in these cases, because that is
+what currently happens. If that changes, these tests should be revisited.
'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
@@ -18,39 +21,58 @@ test_expect_success 'disable reflogs' '
git reflog expire --expire=all --all
'
+create_bogus_ref () {
+ test_when_finished 'rm -f .git/refs/heads/bogus..name' &&
+ echo $bogus >.git/refs/heads/bogus..name
+}
+
test_expect_success 'create history reachable only from a bogus-named ref' '
test_tick && git commit --allow-empty -m main &&
base=$(git rev-parse HEAD) &&
test_tick && git commit --allow-empty -m bogus &&
bogus=$(git rev-parse HEAD) &&
git cat-file commit $bogus >saved &&
- echo $bogus >.git/refs/heads/bogus..name &&
git reset --hard HEAD^
'
test_expect_success 'pruning does not drop bogus object' '
test_when_finished "git hash-object -w -t commit saved" &&
- test_might_fail git prune --expire=now &&
- verbose git cat-file -e $bogus
+ create_bogus_ref &&
+ test_must_fail git prune --expire=now &&
+ git cat-file -e $bogus
'
test_expect_success 'put bogus object into pack' '
git tag reachable $bogus &&
git repack -ad &&
git tag -d reachable &&
- verbose git cat-file -e $bogus
+ git cat-file -e $bogus
+'
+
+test_expect_success 'non-destructive repack bails on bogus ref' '
+ create_bogus_ref &&
+ test_must_fail git repack -adk
'
+test_expect_success 'GIT_REF_PARANOIA=0 overrides safety' '
+ create_bogus_ref &&
+ GIT_REF_PARANOIA=0 git repack -adk
+'
+
+
test_expect_success 'destructive repack keeps packed object' '
- test_might_fail git repack -Ad --unpack-unreachable=now &&
- verbose git cat-file -e $bogus &&
- test_might_fail git repack -ad &&
- verbose git cat-file -e $bogus
+ create_bogus_ref &&
+ test_must_fail git repack -Ad --unpack-unreachable=now &&
+ git cat-file -e $bogus &&
+ test_must_fail git repack -ad &&
+ git cat-file -e $bogus
'
-# subsequent tests will have different corruptions
-test_expect_success 'clean up bogus ref' '
- rm .git/refs/heads/bogus..name
+test_expect_success 'destructive repack not confused by dangling symref' '
+ test_when_finished "git symbolic-ref -d refs/heads/dangling" &&
+ git symbolic-ref refs/heads/dangling refs/heads/does-not-exist &&
+ git repack -ad &&
+ test_must_fail git cat-file -e $bogus
'
# We create two new objects here, "one" and "two". Our
@@ -77,8 +99,8 @@ test_expect_success 'create history with missing tip commit' '
test_expect_success 'pruning with a corrupted tip does not drop history' '
test_when_finished "git hash-object -w -t commit saved" &&
- test_might_fail git prune --expire=now &&
- verbose git cat-file -e $recoverable
+ test_must_fail git prune --expire=now &&
+ git cat-file -e $recoverable
'
test_expect_success 'pack-refs does not silently delete broken loose ref' '
diff --git a/t/t5319-multi-pack-index.sh b/t/t5319-multi-pack-index.sh
index 3d4d9f10c3..bd17f308b3 100755
--- a/t/t5319-multi-pack-index.sh
+++ b/t/t5319-multi-pack-index.sh
@@ -174,12 +174,12 @@ test_expect_success 'write progress off for redirected stderr' '
'
test_expect_success 'write force progress on for stderr' '
- GIT_PROGRESS_DELAY=0 git multi-pack-index --object-dir=$objdir --progress write 2>err &&
+ GIT_PROGRESS_DELAY=0 git multi-pack-index --object-dir=$objdir write --progress 2>err &&
test_file_not_empty err
'
test_expect_success 'write with the --no-progress option' '
- GIT_PROGRESS_DELAY=0 git multi-pack-index --object-dir=$objdir --no-progress write 2>err &&
+ GIT_PROGRESS_DELAY=0 git multi-pack-index --object-dir=$objdir write --no-progress 2>err &&
test_line_count = 0 err
'
@@ -201,6 +201,34 @@ test_expect_success 'write midx with twelve packs' '
compare_results_with_midx "twelve packs"
+test_expect_success 'multi-pack-index *.rev cleanup with --object-dir' '
+ git init repo &&
+ git clone -s repo alternate &&
+
+ test_when_finished "rm -rf repo alternate" &&
+
+ (
+ cd repo &&
+ test_commit base &&
+ git repack -d
+ ) &&
+
+ ours="alternate/.git/objects/pack/multi-pack-index-123.rev" &&
+ theirs="repo/.git/objects/pack/multi-pack-index-abc.rev" &&
+ touch "$ours" "$theirs" &&
+
+ (
+ cd alternate &&
+ git multi-pack-index --object-dir ../repo/.git/objects write
+ ) &&
+
+ # writing a midx in "repo" should not remove the .rev file in the
+ # alternate
+ test_path_is_file repo/.git/objects/pack/multi-pack-index &&
+ test_path_is_file $ours &&
+ test_path_is_missing $theirs
+'
+
test_expect_success 'warn on improper hash version' '
git init --object-format=sha1 sha1 &&
(
@@ -277,6 +305,23 @@ test_expect_success 'midx picks objects from preferred pack' '
)
'
+test_expect_success 'preferred packs must be non-empty' '
+ test_when_finished rm -rf preferred.git &&
+ git init preferred.git &&
+ (
+ cd preferred.git &&
+
+ test_commit base &&
+ git repack -ad &&
+
+ empty="$(git pack-objects $objdir/pack/pack </dev/null)" &&
+
+ test_must_fail git multi-pack-index write \
+ --preferred-pack=pack-$empty.pack 2>err &&
+ grep "with no objects" err
+ )
+'
+
test_expect_success 'verify multi-pack-index success' '
git multi-pack-index verify --object-dir=$objdir
'
@@ -429,12 +474,12 @@ test_expect_success 'repack progress off for redirected stderr' '
'
test_expect_success 'repack force progress on for stderr' '
- GIT_PROGRESS_DELAY=0 git multi-pack-index --object-dir=$objdir --progress repack 2>err &&
+ GIT_PROGRESS_DELAY=0 git multi-pack-index --object-dir=$objdir repack --progress 2>err &&
test_file_not_empty err
'
test_expect_success 'repack with the --no-progress option' '
- GIT_PROGRESS_DELAY=0 git multi-pack-index --object-dir=$objdir --no-progress repack 2>err &&
+ GIT_PROGRESS_DELAY=0 git multi-pack-index --object-dir=$objdir repack --no-progress 2>err &&
test_line_count = 0 err
'
@@ -487,7 +532,8 @@ test_expect_success 'repack preserves multi-pack-index when creating packs' '
compare_results_with_midx "after repack"
test_expect_success 'multi-pack-index and pack-bitmap' '
- git -c repack.writeBitmaps=true repack -ad &&
+ GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=0 \
+ git -c repack.writeBitmaps=true repack -ad &&
git multi-pack-index write &&
git rev-list --test-bitmap HEAD
'
@@ -537,7 +583,15 @@ test_expect_success 'force some 64-bit offsets with pack-objects' '
idx64=objects64/pack/test-64-$pack64.idx &&
chmod u+w $idx64 &&
corrupt_data $idx64 $(test_oid idxoff) "\02" &&
- midx64=$(git multi-pack-index --object-dir=objects64 write) &&
+ # objects64 is not a real repository, but can serve as an alternate
+ # anyway so we can write a MIDX into it
+ git init repo &&
+ test_when_finished "rm -fr repo" &&
+ (
+ cd repo &&
+ ( cd ../objects64 && pwd ) >.git/objects/info/alternates &&
+ midx64=$(git multi-pack-index --object-dir=../objects64 write)
+ ) &&
midx_read_expect 1 63 5 objects64 " large-offsets"
'
@@ -618,7 +672,7 @@ test_expect_success 'expire progress off for redirected stderr' '
test_expect_success 'expire force progress on for stderr' '
(
cd dup &&
- GIT_PROGRESS_DELAY=0 git multi-pack-index --progress expire 2>err &&
+ GIT_PROGRESS_DELAY=0 git multi-pack-index expire --progress 2>err &&
test_file_not_empty err
)
'
@@ -626,7 +680,7 @@ test_expect_success 'expire force progress on for stderr' '
test_expect_success 'expire with the --no-progress option' '
(
cd dup &&
- GIT_PROGRESS_DELAY=0 git multi-pack-index --no-progress expire 2>err &&
+ GIT_PROGRESS_DELAY=0 git multi-pack-index expire --no-progress 2>err &&
test_line_count = 0 err
)
'
@@ -842,4 +896,9 @@ test_expect_success 'usage shown without sub-command' '
! test_i18ngrep "unrecognized subcommand" err
'
+test_expect_success 'complains when run outside of a repository' '
+ nongit test_must_fail git multi-pack-index write 2>err &&
+ grep "not a git repository" err
+'
+
test_done
diff --git a/t/t5326-multi-pack-bitmaps.sh b/t/t5326-multi-pack-bitmaps.sh
new file mode 100755
index 0000000000..ec4aa89f63
--- /dev/null
+++ b/t/t5326-multi-pack-bitmaps.sh
@@ -0,0 +1,316 @@
+#!/bin/sh
+
+test_description='exercise basic multi-pack bitmap functionality'
+. ./test-lib.sh
+. "${TEST_DIRECTORY}/lib-bitmap.sh"
+
+# We'll be writing our own midx and bitmaps, so avoid getting confused by the
+# automatic ones.
+GIT_TEST_MULTI_PACK_INDEX=0
+GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=0
+
+objdir=.git/objects
+midx=$objdir/pack/multi-pack-index
+
+# midx_pack_source <obj>
+midx_pack_source () {
+ test-tool read-midx --show-objects .git/objects | grep "^$1 " | cut -f2
+}
+
+setup_bitmap_history
+
+test_expect_success 'enable core.multiPackIndex' '
+ git config core.multiPackIndex true
+'
+
+test_expect_success 'create single-pack midx with bitmaps' '
+ git repack -ad &&
+ git multi-pack-index write --bitmap &&
+ test_path_is_file $midx &&
+ test_path_is_file $midx-$(midx_checksum $objdir).bitmap &&
+ test_path_is_file $midx-$(midx_checksum $objdir).rev
+'
+
+basic_bitmap_tests
+
+test_expect_success 'create new additional packs' '
+ for i in $(test_seq 1 16)
+ do
+ test_commit "$i" &&
+ git repack -d || return 1
+ done &&
+
+ git checkout -b other2 HEAD~8 &&
+ for i in $(test_seq 1 8)
+ do
+ test_commit "side-$i" &&
+ git repack -d || return 1
+ done &&
+ git checkout second
+'
+
+test_expect_success 'create multi-pack midx with bitmaps' '
+ git multi-pack-index write --bitmap &&
+
+ ls $objdir/pack/pack-*.pack >packs &&
+ test_line_count = 25 packs &&
+
+ test_path_is_file $midx &&
+ test_path_is_file $midx-$(midx_checksum $objdir).bitmap &&
+ test_path_is_file $midx-$(midx_checksum $objdir).rev
+'
+
+basic_bitmap_tests
+
+test_expect_success '--no-bitmap is respected when bitmaps exist' '
+ git multi-pack-index write --bitmap &&
+
+ test_commit respect--no-bitmap &&
+ git repack -d &&
+
+ test_path_is_file $midx &&
+ test_path_is_file $midx-$(midx_checksum $objdir).bitmap &&
+ test_path_is_file $midx-$(midx_checksum $objdir).rev &&
+
+ git multi-pack-index write --no-bitmap &&
+
+ test_path_is_file $midx &&
+ test_path_is_missing $midx-$(midx_checksum $objdir).bitmap &&
+ test_path_is_missing $midx-$(midx_checksum $objdir).rev
+'
+
+test_expect_success 'setup midx with base from later pack' '
+ # Write a and b so that "a" is a delta on top of base "b", since Git
+ # prefers to delete contents out of a base rather than add to a shorter
+ # object.
+ test_seq 1 128 >a &&
+ test_seq 1 130 >b &&
+
+ git add a b &&
+ git commit -m "initial commit" &&
+
+ a=$(git rev-parse HEAD:a) &&
+ b=$(git rev-parse HEAD:b) &&
+
+ # In the first pack, "a" is stored as a delta to "b".
+ p1=$(git pack-objects .git/objects/pack/pack <<-EOF
+ $a
+ $b
+ EOF
+ ) &&
+
+ # In the second pack, "a" is missing, and "b" is not a delta nor base to
+ # any other object.
+ p2=$(git pack-objects .git/objects/pack/pack <<-EOF
+ $b
+ $(git rev-parse HEAD)
+ $(git rev-parse HEAD^{tree})
+ EOF
+ ) &&
+
+ git prune-packed &&
+ # Use the second pack as the preferred source, so that "b" occurs
+ # earlier in the MIDX object order, rendering "a" unusable for pack
+ # reuse.
+ git multi-pack-index write --bitmap --preferred-pack=pack-$p2.idx &&
+
+ have_delta $a $b &&
+ test $(midx_pack_source $a) != $(midx_pack_source $b)
+'
+
+rev_list_tests 'full bitmap with backwards delta'
+
+test_expect_success 'clone with bitmaps enabled' '
+ git clone --no-local --bare . clone-reverse-delta.git &&
+ test_when_finished "rm -fr clone-reverse-delta.git" &&
+
+ git rev-parse HEAD >expect &&
+ git --git-dir=clone-reverse-delta.git rev-parse HEAD >actual &&
+ test_cmp expect actual
+'
+
+bitmap_reuse_tests() {
+ from=$1
+ to=$2
+
+ test_expect_success "setup pack reuse tests ($from -> $to)" '
+ rm -fr repo &&
+ git init repo &&
+ (
+ cd repo &&
+ test_commit_bulk 16 &&
+ git tag old-tip &&
+
+ git config core.multiPackIndex true &&
+ if test "MIDX" = "$from"
+ then
+ git repack -Ad &&
+ git multi-pack-index write --bitmap
+ else
+ git repack -Adb
+ fi
+ )
+ '
+
+ test_expect_success "build bitmap from existing ($from -> $to)" '
+ (
+ cd repo &&
+ test_commit_bulk --id=further 16 &&
+ git tag new-tip &&
+
+ if test "MIDX" = "$to"
+ then
+ git repack -d &&
+ git multi-pack-index write --bitmap
+ else
+ git repack -Adb
+ fi
+ )
+ '
+
+ test_expect_success "verify resulting bitmaps ($from -> $to)" '
+ (
+ cd repo &&
+ git for-each-ref &&
+ git rev-list --test-bitmap refs/tags/old-tip &&
+ git rev-list --test-bitmap refs/tags/new-tip
+ )
+ '
+}
+
+bitmap_reuse_tests 'pack' 'MIDX'
+bitmap_reuse_tests 'MIDX' 'pack'
+bitmap_reuse_tests 'MIDX' 'MIDX'
+
+test_expect_success 'missing object closure fails gracefully' '
+ rm -fr repo &&
+ git init repo &&
+ test_when_finished "rm -fr repo" &&
+ (
+ cd repo &&
+
+ test_commit loose &&
+ test_commit packed &&
+
+ # Do not pass "--revs"; we want a pack without the "loose"
+ # commit.
+ git pack-objects $objdir/pack/pack <<-EOF &&
+ $(git rev-parse packed)
+ EOF
+
+ test_must_fail git multi-pack-index write --bitmap 2>err &&
+ grep "doesn.t have full closure" err &&
+ test_path_is_missing $midx
+ )
+'
+
+test_expect_success 'setup partial bitmaps' '
+ test_commit packed &&
+ git repack &&
+ test_commit loose &&
+ git multi-pack-index write --bitmap 2>err &&
+ test_path_is_file $midx &&
+ test_path_is_file $midx-$(midx_checksum $objdir).bitmap &&
+ test_path_is_file $midx-$(midx_checksum $objdir).rev
+'
+
+basic_bitmap_tests HEAD~
+
+test_expect_success 'removing a MIDX clears stale bitmaps' '
+ rm -fr repo &&
+ git init repo &&
+ test_when_finished "rm -fr repo" &&
+ (
+ cd repo &&
+ test_commit base &&
+ git repack &&
+ git multi-pack-index write --bitmap &&
+
+ # Write a MIDX and bitmap; remove the MIDX but leave the bitmap.
+ stale_bitmap=$midx-$(midx_checksum $objdir).bitmap &&
+ stale_rev=$midx-$(midx_checksum $objdir).rev &&
+ rm $midx &&
+
+ # Then write a new MIDX.
+ test_commit new &&
+ git repack &&
+ git multi-pack-index write --bitmap &&
+
+ test_path_is_file $midx &&
+ test_path_is_file $midx-$(midx_checksum $objdir).bitmap &&
+ test_path_is_file $midx-$(midx_checksum $objdir).rev &&
+ test_path_is_missing $stale_bitmap &&
+ test_path_is_missing $stale_rev
+ )
+'
+
+test_expect_success 'pack.preferBitmapTips' '
+ git init repo &&
+ test_when_finished "rm -fr repo" &&
+ (
+ cd repo &&
+
+ test_commit_bulk --message="%s" 103 &&
+
+ git log --format="%H" >commits.raw &&
+ sort <commits.raw >commits &&
+
+ git log --format="create refs/tags/%s %H" HEAD >refs &&
+ git update-ref --stdin <refs &&
+
+ git multi-pack-index write --bitmap &&
+ test_path_is_file $midx &&
+ test_path_is_file $midx-$(midx_checksum $objdir).bitmap &&
+ test_path_is_file $midx-$(midx_checksum $objdir).rev &&
+
+ test-tool bitmap list-commits | sort >bitmaps &&
+ comm -13 bitmaps commits >before &&
+ test_line_count = 1 before &&
+
+ perl -ne "printf(\"create refs/tags/include/%d \", $.); print" \
+ <before | git update-ref --stdin &&
+
+ rm -fr $midx-$(midx_checksum $objdir).bitmap &&
+ rm -fr $midx-$(midx_checksum $objdir).rev &&
+ rm -fr $midx &&
+
+ git -c pack.preferBitmapTips=refs/tags/include \
+ multi-pack-index write --bitmap &&
+ test-tool bitmap list-commits | sort >bitmaps &&
+ comm -13 bitmaps commits >after &&
+
+ ! test_cmp before after
+ )
+'
+
+test_expect_success 'hash-cache values are propagated from pack bitmaps' '
+ rm -fr repo &&
+ git init repo &&
+ test_when_finished "rm -fr repo" &&
+ (
+ cd repo &&
+
+ test_commit base &&
+ test_commit base2 &&
+ git repack -adb &&
+
+ test-tool bitmap dump-hashes >pack.raw &&
+ test_file_not_empty pack.raw &&
+ sort pack.raw >pack.hashes &&
+
+ test_commit new &&
+ git repack &&
+ git multi-pack-index write --bitmap &&
+
+ test-tool bitmap dump-hashes >midx.raw &&
+ sort midx.raw >midx.hashes &&
+
+ # ensure that every namehash in the pack bitmap can be found in
+ # the midx bitmap (i.e., that there are no oid-namehash pairs
+ # unique to the pack bitmap).
+ comm -23 pack.hashes midx.hashes >dropped.hashes &&
+ test_must_be_empty dropped.hashes
+ )
+'
+
+test_done
diff --git a/t/t5516-fetch-push.sh b/t/t5516-fetch-push.sh
index 4db8edd9c8..8212ca56dc 100755
--- a/t/t5516-fetch-push.sh
+++ b/t/t5516-fetch-push.sh
@@ -662,10 +662,10 @@ test_expect_success 'push does not update local refs on failure' '
test_expect_success 'allow deleting an invalid remote ref' '
- mk_test testrepo heads/main &&
+ mk_test testrepo heads/branch &&
rm -f testrepo/.git/objects/??/* &&
- git push testrepo :refs/heads/main &&
- (cd testrepo && test_must_fail git rev-parse --verify refs/heads/main)
+ git push testrepo :refs/heads/branch &&
+ (cd testrepo && test_must_fail git rev-parse --verify refs/heads/branch)
'
@@ -706,25 +706,26 @@ test_expect_success 'pushing valid refs triggers post-receive and post-update ho
'
test_expect_success 'deleting dangling ref triggers hooks with correct args' '
- mk_test_with_hooks testrepo heads/main &&
+ mk_test_with_hooks testrepo heads/branch &&
+ orig=$(git -C testrepo rev-parse refs/heads/branch) &&
rm -f testrepo/.git/objects/??/* &&
- git push testrepo :refs/heads/main &&
+ git push testrepo :refs/heads/branch &&
(
cd testrepo/.git &&
cat >pre-receive.expect <<-EOF &&
- $ZERO_OID $ZERO_OID refs/heads/main
+ $orig $ZERO_OID refs/heads/branch
EOF
cat >update.expect <<-EOF &&
- refs/heads/main $ZERO_OID $ZERO_OID
+ refs/heads/branch $orig $ZERO_OID
EOF
cat >post-receive.expect <<-EOF &&
- $ZERO_OID $ZERO_OID refs/heads/main
+ $orig $ZERO_OID refs/heads/branch
EOF
cat >post-update.expect <<-EOF &&
- refs/heads/main
+ refs/heads/branch
EOF
test_cmp pre-receive.expect pre-receive.actual &&
diff --git a/t/t5551-http-fetch-smart.sh b/t/t5551-http-fetch-smart.sh
index 4f87d90c5b..f92c79c132 100755
--- a/t/t5551-http-fetch-smart.sh
+++ b/t/t5551-http-fetch-smart.sh
@@ -196,8 +196,8 @@ test_expect_success 'GIT_TRACE_CURL redacts auth details' '
# Ensure that there is no "Basic" followed by a base64 string, but that
# the auth details are redacted
- ! grep "Authorization: Basic [0-9a-zA-Z+/]" trace &&
- grep "Authorization: Basic <redacted>" trace
+ ! grep -i "Authorization: Basic [0-9a-zA-Z+/]" trace &&
+ grep -i "Authorization: Basic <redacted>" trace
'
test_expect_success 'GIT_CURL_VERBOSE redacts auth details' '
@@ -208,8 +208,8 @@ test_expect_success 'GIT_CURL_VERBOSE redacts auth details' '
# Ensure that there is no "Basic" followed by a base64 string, but that
# the auth details are redacted
- ! grep "Authorization: Basic [0-9a-zA-Z+/]" trace &&
- grep "Authorization: Basic <redacted>" trace
+ ! grep -i "Authorization: Basic [0-9a-zA-Z+/]" trace &&
+ grep -i "Authorization: Basic <redacted>" trace
'
test_expect_success 'GIT_TRACE_CURL does not redact auth details if GIT_TRACE_REDACT=0' '
@@ -219,7 +219,7 @@ test_expect_success 'GIT_TRACE_CURL does not redact auth details if GIT_TRACE_RE
git clone --bare "$HTTPD_URL/auth/smart/repo.git" redact-auth &&
expect_askpass both user@host &&
- grep "Authorization: Basic [0-9a-zA-Z+/]" trace
+ grep -i "Authorization: Basic [0-9a-zA-Z+/]" trace
'
test_expect_success 'disable dumb http on server' '
@@ -474,10 +474,10 @@ test_expect_success 'cookies are redacted by default' '
GIT_TRACE_CURL=true \
git -c "http.cookieFile=$(pwd)/cookies" clone \
$HTTPD_URL/smart/repo.git clone 2>err &&
- grep "Cookie:.*Foo=<redacted>" err &&
- grep "Cookie:.*Bar=<redacted>" err &&
- ! grep "Cookie:.*Foo=1" err &&
- ! grep "Cookie:.*Bar=2" err
+ grep -i "Cookie:.*Foo=<redacted>" err &&
+ grep -i "Cookie:.*Bar=<redacted>" err &&
+ ! grep -i "Cookie:.*Foo=1" err &&
+ ! grep -i "Cookie:.*Bar=2" err
'
test_expect_success 'empty values of cookies are also redacted' '
@@ -486,7 +486,7 @@ test_expect_success 'empty values of cookies are also redacted' '
GIT_TRACE_CURL=true \
git -c "http.cookieFile=$(pwd)/cookies" clone \
$HTTPD_URL/smart/repo.git clone 2>err &&
- grep "Cookie:.*Foo=<redacted>" err
+ grep -i "Cookie:.*Foo=<redacted>" err
'
test_expect_success 'GIT_TRACE_REDACT=0 disables cookie redaction' '
@@ -496,8 +496,8 @@ test_expect_success 'GIT_TRACE_REDACT=0 disables cookie redaction' '
GIT_TRACE_REDACT=0 GIT_TRACE_CURL=true \
git -c "http.cookieFile=$(pwd)/cookies" clone \
$HTTPD_URL/smart/repo.git clone 2>err &&
- grep "Cookie:.*Foo=1" err &&
- grep "Cookie:.*Bar=2" err
+ grep -i "Cookie:.*Foo=1" err &&
+ grep -i "Cookie:.*Bar=2" err
'
test_expect_success 'GIT_TRACE_CURL_NO_DATA prevents data from being traced' '
@@ -558,4 +558,13 @@ test_expect_success 'http auth forgets bogus credentials' '
expect_askpass both user@host
'
+test_expect_success 'client falls back from v2 to v0 to match server' '
+ GIT_TRACE_PACKET=$PWD/trace \
+ GIT_TEST_PROTOCOL_VERSION=2 \
+ git clone $HTTPD_URL/smart_v0/repo.git repo-v0 &&
+ # check for v0; there the HEAD symref is communicated in the capability
+ # line; v2 uses a different syntax on each ref advertisement line
+ grep symref=HEAD:refs/heads/ trace
+'
+
test_done
diff --git a/t/t5555-http-smart-common.sh b/t/t5555-http-smart-common.sh
new file mode 100755
index 0000000000..49faf5e283
--- /dev/null
+++ b/t/t5555-http-smart-common.sh
@@ -0,0 +1,161 @@
+#!/bin/sh
+
+test_description='test functionality common to smart fetch & push'
+
+. ./test-lib.sh
+
+test_expect_success 'setup' '
+ test_commit --no-tag initial
+'
+
+test_expect_success 'git upload-pack --http-backend-info-refs and --advertise-refs are aliased' '
+ git upload-pack --http-backend-info-refs . >expected 2>err.expected &&
+ git upload-pack --advertise-refs . >actual 2>err.actual &&
+ test_cmp err.expected err.actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'git receive-pack --http-backend-info-refs and --advertise-refs are aliased' '
+ git receive-pack --http-backend-info-refs . >expected 2>err.expected &&
+ git receive-pack --advertise-refs . >actual 2>err.actual &&
+ test_cmp err.expected err.actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'git upload-pack --advertise-refs' '
+ cat >expect <<-EOF &&
+ $(git rev-parse HEAD) HEAD
+ $(git rev-parse HEAD) $(git symbolic-ref HEAD)
+ 0000
+ EOF
+
+ # We only care about GIT_PROTOCOL, not GIT_TEST_PROTOCOL_VERSION
+ sane_unset GIT_PROTOCOL &&
+ GIT_TEST_PROTOCOL_VERSION=2 \
+ git upload-pack --advertise-refs . >out 2>err &&
+
+ test-tool pkt-line unpack <out >actual &&
+ test_must_be_empty err &&
+ test_cmp actual expect &&
+
+ # The --advertise-refs alias works
+ git upload-pack --advertise-refs . >out 2>err &&
+
+ test-tool pkt-line unpack <out >actual &&
+ test_must_be_empty err &&
+ test_cmp actual expect
+'
+
+test_expect_success 'git upload-pack --advertise-refs: v0' '
+ # With no specified protocol
+ cat >expect <<-EOF &&
+ $(git rev-parse HEAD) HEAD
+ $(git rev-parse HEAD) $(git symbolic-ref HEAD)
+ 0000
+ EOF
+
+ git upload-pack --advertise-refs . >out 2>err &&
+ test-tool pkt-line unpack <out >actual &&
+ test_must_be_empty err &&
+ test_cmp actual expect &&
+
+ # With explicit v0
+ GIT_PROTOCOL=version=0 \
+ git upload-pack --advertise-refs . >out 2>err &&
+ test-tool pkt-line unpack <out >actual 2>err &&
+ test_must_be_empty err &&
+ test_cmp actual expect
+
+'
+
+test_expect_success 'git receive-pack --advertise-refs: v0' '
+ # With no specified protocol
+ cat >expect <<-EOF &&
+ $(git rev-parse HEAD) $(git symbolic-ref HEAD)
+ 0000
+ EOF
+
+ git receive-pack --advertise-refs . >out 2>err &&
+ test-tool pkt-line unpack <out >actual &&
+ test_must_be_empty err &&
+ test_cmp actual expect &&
+
+ # With explicit v0
+ GIT_PROTOCOL=version=0 \
+ git receive-pack --advertise-refs . >out 2>err &&
+ test-tool pkt-line unpack <out >actual 2>err &&
+ test_must_be_empty err &&
+ test_cmp actual expect
+
+'
+
+test_expect_success 'git upload-pack --advertise-refs: v1' '
+ # With no specified protocol
+ cat >expect <<-EOF &&
+ version 1
+ $(git rev-parse HEAD) HEAD
+ $(git rev-parse HEAD) $(git symbolic-ref HEAD)
+ 0000
+ EOF
+
+ GIT_PROTOCOL=version=1 \
+ git upload-pack --advertise-refs . >out &&
+
+ test-tool pkt-line unpack <out >actual 2>err &&
+ test_must_be_empty err &&
+ test_cmp actual expect
+'
+
+test_expect_success 'git receive-pack --advertise-refs: v1' '
+ # With no specified protocol
+ cat >expect <<-EOF &&
+ version 1
+ $(git rev-parse HEAD) $(git symbolic-ref HEAD)
+ 0000
+ EOF
+
+ GIT_PROTOCOL=version=1 \
+ git receive-pack --advertise-refs . >out &&
+
+ test-tool pkt-line unpack <out >actual 2>err &&
+ test_must_be_empty err &&
+ test_cmp actual expect
+'
+
+test_expect_success 'git upload-pack --advertise-refs: v2' '
+ cat >expect <<-EOF &&
+ version 2
+ agent=FAKE
+ ls-refs=unborn
+ fetch=shallow wait-for-done
+ server-option
+ object-format=$(test_oid algo)
+ object-info
+ 0000
+ EOF
+
+ GIT_PROTOCOL=version=2 \
+ GIT_USER_AGENT=FAKE \
+ git upload-pack --advertise-refs . >out 2>err &&
+
+ test-tool pkt-line unpack <out >actual &&
+ test_must_be_empty err &&
+ test_cmp actual expect
+'
+
+test_expect_success 'git receive-pack --advertise-refs: v2' '
+ # There is no v2 yet for receive-pack, implicit v0
+ cat >expect <<-EOF &&
+ $(git rev-parse HEAD) $(git symbolic-ref HEAD)
+ 0000
+ EOF
+
+ GIT_PROTOCOL=version=2 \
+ git receive-pack --advertise-refs . >out 2>err &&
+
+ test-tool pkt-line unpack <out >actual &&
+ test_must_be_empty err &&
+ test_cmp actual expect
+'
+
+test_done
diff --git a/t/t5562/invoke-with-content-length.pl b/t/t5562/invoke-with-content-length.pl
index 0943474af2..718dd9b49d 100644
--- a/t/t5562/invoke-with-content-length.pl
+++ b/t/t5562/invoke-with-content-length.pl
@@ -13,11 +13,6 @@ my $body_data;
defined read($body_fh, $body_data, $body_size) or die "Cannot read $body_filename: $!";
close($body_fh);
-my $exited = 0;
-$SIG{"CHLD"} = sub {
- $exited = 1;
-};
-
# write data
my $pid = open(my $out, "|-", @command);
{
@@ -29,8 +24,13 @@ my $pid = open(my $out, "|-", @command);
}
print $out $body_data or die "Cannot write data: $!";
-sleep 60; # is interrupted by SIGCHLD
-if (!$exited) {
- close($out);
+$SIG{ALRM} = sub {
+ kill 'KILL', $pid;
die "Command did not exit after reading whole body";
+};
+alarm 60;
+
+my $ret = waitpid($pid, 0);
+if ($ret != $pid) {
+ die "confusing return from waitpid: $ret";
}
diff --git a/t/t5600-clone-fail-cleanup.sh b/t/t5600-clone-fail-cleanup.sh
index 5bf10261d3..34b3df4027 100755
--- a/t/t5600-clone-fail-cleanup.sh
+++ b/t/t5600-clone-fail-cleanup.sh
@@ -35,7 +35,9 @@ test_expect_success 'create a repo to clone' '
'
test_expect_success 'create objects in repo for later corruption' '
- test_commit -C foo file
+ test_commit -C foo file &&
+ git -C foo checkout --detach &&
+ test_commit -C foo detached
'
# source repository given to git clone should be relative to the
diff --git a/t/t5701-git-serve.sh b/t/t5701-git-serve.sh
index 930721f053..aa1827d841 100755
--- a/t/t5701-git-serve.sh
+++ b/t/t5701-git-serve.sh
@@ -72,6 +72,37 @@ test_expect_success 'request invalid command' '
test_i18ngrep "invalid command" err
'
+test_expect_success 'request capability as command' '
+ test-tool pkt-line pack >in <<-EOF &&
+ command=agent
+ object-format=$(test_oid algo)
+ 0000
+ EOF
+ test_must_fail test-tool serve-v2 --stateless-rpc 2>err <in &&
+ grep invalid.command.*agent err
+'
+
+test_expect_success 'request command as capability' '
+ test-tool pkt-line pack >in <<-EOF &&
+ command=ls-refs
+ object-format=$(test_oid algo)
+ fetch
+ 0000
+ EOF
+ test_must_fail test-tool serve-v2 --stateless-rpc 2>err <in &&
+ grep unknown.capability err
+'
+
+test_expect_success 'requested command is command=value' '
+ test-tool pkt-line pack >in <<-EOF &&
+ command=ls-refs=whatever
+ object-format=$(test_oid algo)
+ 0000
+ EOF
+ test_must_fail test-tool serve-v2 --stateless-rpc 2>err <in &&
+ grep invalid.command.*ls-refs=whatever err
+'
+
test_expect_success 'wrong object-format' '
test-tool pkt-line pack >in <<-EOF &&
command=fetch
@@ -116,6 +147,19 @@ test_expect_success 'basics of ls-refs' '
test_cmp expect actual
'
+test_expect_success 'ls-refs complains about unknown options' '
+ test-tool pkt-line pack >in <<-EOF &&
+ command=ls-refs
+ object-format=$(test_oid algo)
+ 0001
+ no-such-arg
+ 0000
+ EOF
+
+ test_must_fail test-tool serve-v2 --stateless-rpc 2>err <in &&
+ grep unexpected.line.*no-such-arg err
+'
+
test_expect_success 'basic ref-prefixes' '
test-tool pkt-line pack >in <<-EOF &&
command=ls-refs
@@ -158,6 +202,37 @@ test_expect_success 'refs/heads prefix' '
test_cmp expect actual
'
+test_expect_success 'ignore very large set of prefixes' '
+ # generate a large number of ref-prefixes that we expect
+ # to match nothing; the value here exceeds TOO_MANY_PREFIXES
+ # from ls-refs.c.
+ {
+ echo command=ls-refs &&
+ echo object-format=$(test_oid algo) &&
+ echo 0001 &&
+ perl -le "print \"ref-prefix refs/heads/\$_\" for (1..65536)" &&
+ echo 0000
+ } |
+ test-tool pkt-line pack >in &&
+
+ # and then confirm that we see unmatched prefixes anyway (i.e.,
+ # that the prefix was not applied).
+ cat >expect <<-EOF &&
+ $(git rev-parse HEAD) HEAD
+ $(git rev-parse refs/heads/dev) refs/heads/dev
+ $(git rev-parse refs/heads/main) refs/heads/main
+ $(git rev-parse refs/heads/release) refs/heads/release
+ $(git rev-parse refs/tags/annotated-tag) refs/tags/annotated-tag
+ $(git rev-parse refs/tags/one) refs/tags/one
+ $(git rev-parse refs/tags/two) refs/tags/two
+ 0000
+ EOF
+
+ test-tool serve-v2 --stateless-rpc <in >out &&
+ test-tool pkt-line unpack <out >actual &&
+ test_cmp expect actual
+'
+
test_expect_success 'peel parameter' '
test-tool pkt-line pack >in <<-EOF &&
command=ls-refs
diff --git a/t/t5702-protocol-v2.sh b/t/t5702-protocol-v2.sh
index d3687b1a2e..d527cf6c49 100755
--- a/t/t5702-protocol-v2.sh
+++ b/t/t5702-protocol-v2.sh
@@ -237,6 +237,19 @@ test_expect_success '...but not if explicitly forbidden by config' '
! grep "refs/heads/mydefaultbranch" file_empty_child/.git/HEAD
'
+test_expect_success 'bare clone propagates empty default branch' '
+ test_when_finished "rm -rf file_empty_parent file_empty_child.git" &&
+
+ GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME= \
+ git -c init.defaultBranch=mydefaultbranch init file_empty_parent &&
+
+ GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME= \
+ git -c init.defaultBranch=main -c protocol.version=2 \
+ clone --bare \
+ "file://$(pwd)/file_empty_parent" file_empty_child.git &&
+ grep "refs/heads/mydefaultbranch" file_empty_child.git/HEAD
+'
+
test_expect_success 'fetch with file:// using protocol v2' '
test_when_finished "rm -f log" &&
diff --git a/t/t5704-protocol-violations.sh b/t/t5704-protocol-violations.sh
index 5c941949b9..bc393d7c31 100755
--- a/t/t5704-protocol-violations.sh
+++ b/t/t5704-protocol-violations.sh
@@ -32,4 +32,19 @@ test_expect_success 'extra delim packet in v2 fetch args' '
test_i18ngrep "expected flush after fetch arguments" err
'
+test_expect_success 'bogus symref in v0 capabilities' '
+ test_commit foo &&
+ oid=$(git rev-parse HEAD) &&
+ dst=refs/heads/foo &&
+ {
+ printf "%s HEAD\0symref object-format=%s symref=HEAD:%s\n" \
+ "$oid" "$GIT_DEFAULT_HASH" "$dst" |
+ test-tool pkt-line pack-raw-stdin &&
+ printf "0000"
+ } >input &&
+ git ls-remote --symref --upload-pack="cat input; read junk;:" . >actual &&
+ printf "ref: %s\tHEAD\n%s\tHEAD\n" "$dst" "$oid" >expect &&
+ test_cmp expect actual
+'
+
test_done
diff --git a/t/t6030-bisect-porcelain.sh b/t/t6030-bisect-porcelain.sh
index a1baf4e451..1be85d064e 100755
--- a/t/t6030-bisect-porcelain.sh
+++ b/t/t6030-bisect-porcelain.sh
@@ -962,4 +962,22 @@ test_expect_success 'bisect handles annotated tags' '
grep "$bad is the first bad commit" output
'
+test_expect_success 'bisect run fails with exit code equals or greater than 128' '
+ write_script test_script.sh <<-\EOF &&
+ exit 128
+ EOF
+ test_must_fail git bisect run ./test_script.sh &&
+ write_script test_script.sh <<-\EOF &&
+ exit 255
+ EOF
+ test_must_fail git bisect run ./test_script.sh
+'
+
+test_expect_success 'bisect visualize with a filename with dash and space' '
+ echo "My test line" >>"./-hello 2" &&
+ git add -- "./-hello 2" &&
+ git commit --quiet -m "Add test line" -- "./-hello 2" &&
+ git bisect visualize -p -- "-hello 2"
+'
+
test_done
diff --git a/t/t6415-merge-dir-to-symlink.sh b/t/t6415-merge-dir-to-symlink.sh
index 2ce104aca7..2655e295f5 100755
--- a/t/t6415-merge-dir-to-symlink.sh
+++ b/t/t6415-merge-dir-to-symlink.sh
@@ -25,7 +25,8 @@ test_expect_success 'checkout does not clobber untracked symlink' '
git reset --hard main &&
git rm --cached a/b &&
git commit -m "untracked symlink remains" &&
- test_must_fail git checkout start^0
+ test_must_fail git checkout start^0 &&
+ git clean -fd # Do not leave the untracked symlink in the way
'
test_expect_success 'a/b-2/c/d is kept when clobbering symlink b' '
@@ -34,7 +35,8 @@ test_expect_success 'a/b-2/c/d is kept when clobbering symlink b' '
git rm --cached a/b &&
git commit -m "untracked symlink remains" &&
git checkout -f start^0 &&
- test_path_is_file a/b-2/c/d
+ test_path_is_file a/b-2/c/d &&
+ git clean -fd # Do not leave the untracked symlink in the way
'
test_expect_success 'checkout should not have deleted a/b-2/c/d' '
diff --git a/t/t6424-merge-unrelated-index-changes.sh b/t/t6424-merge-unrelated-index-changes.sh
index 5e3779ebc9..89dd544f38 100755
--- a/t/t6424-merge-unrelated-index-changes.sh
+++ b/t/t6424-merge-unrelated-index-changes.sh
@@ -132,6 +132,7 @@ test_expect_success 'merge-recursive, when index==head but head!=HEAD' '
# Make index match B
git diff C B -- | git apply --cached &&
+ test_when_finished "git clean -fd" && # Do not leave untracked around
# Merge B & F, with B as "head"
git merge-recursive A -- B F > out &&
test_i18ngrep "Already up to date" out
diff --git a/t/t6430-merge-recursive.sh b/t/t6430-merge-recursive.sh
index ffcc01fe65..a0efe7cb6d 100755
--- a/t/t6430-merge-recursive.sh
+++ b/t/t6430-merge-recursive.sh
@@ -718,7 +718,9 @@ test_expect_success 'merge-recursive remembers the names of all base trees' '
# merge-recursive prints in reverse order, but we do not care
sort <trees >expect &&
sed -n "s/^virtual //p" out | sort >actual &&
- test_cmp expect actual
+ test_cmp expect actual &&
+
+ git clean -fd
'
test_expect_success 'merge-recursive internal merge resolves to the sameness' '
diff --git a/t/t6436-merge-overwrite.sh b/t/t6436-merge-overwrite.sh
index 84b4aacf49..c0b7bd7c3f 100755
--- a/t/t6436-merge-overwrite.sh
+++ b/t/t6436-merge-overwrite.sh
@@ -68,7 +68,8 @@ test_expect_success 'will not overwrite removed file' '
git commit -m "rm c1.c" &&
cp important c1.c &&
test_must_fail git merge c1a &&
- test_cmp important c1.c
+ test_cmp important c1.c &&
+ rm c1.c # Do not leave untracked file in way of future tests
'
test_expect_success 'will not overwrite re-added file' '
diff --git a/t/t7002-mv-sparse-checkout.sh b/t/t7002-mv-sparse-checkout.sh
new file mode 100755
index 0000000000..545748949a
--- /dev/null
+++ b/t/t7002-mv-sparse-checkout.sh
@@ -0,0 +1,189 @@
+#!/bin/sh
+
+test_description='git mv in sparse working trees'
+
+. ./test-lib.sh
+
+test_expect_success 'setup' "
+ mkdir -p sub/dir sub/dir2 &&
+ touch a b c sub/d sub/dir/e sub/dir2/e &&
+ git add -A &&
+ git commit -m files &&
+
+ cat >sparse_error_header <<-EOF &&
+ The following paths and/or pathspecs matched paths that exist
+ outside of your sparse-checkout definition, so will not be
+ updated in the index:
+ EOF
+
+ cat >sparse_hint <<-EOF
+ hint: If you intend to update such entries, try one of the following:
+ hint: * Use the --sparse option.
+ hint: * Disable or modify the sparsity rules.
+ hint: Disable this message with \"git config advice.updateSparsePath false\"
+ EOF
+"
+
+test_expect_success 'mv refuses to move sparse-to-sparse' '
+ test_when_finished rm -f e &&
+ git reset --hard &&
+ git sparse-checkout set a &&
+ touch b &&
+ test_must_fail git mv b e 2>stderr &&
+ cat sparse_error_header >expect &&
+ echo b >>expect &&
+ echo e >>expect &&
+ cat sparse_hint >>expect &&
+ test_cmp expect stderr &&
+ git mv --sparse b e 2>stderr &&
+ test_must_be_empty stderr
+'
+
+test_expect_success 'mv refuses to move sparse-to-sparse, ignores failure' '
+ test_when_finished rm -f b c e &&
+ git reset --hard &&
+ git sparse-checkout set a &&
+
+ # tracked-to-untracked
+ touch b &&
+ git mv -k b e 2>stderr &&
+ test_path_exists b &&
+ test_path_is_missing e &&
+ cat sparse_error_header >expect &&
+ echo b >>expect &&
+ echo e >>expect &&
+ cat sparse_hint >>expect &&
+ test_cmp expect stderr &&
+
+ git mv --sparse b e 2>stderr &&
+ test_must_be_empty stderr &&
+ test_path_is_missing b &&
+ test_path_exists e &&
+
+ # tracked-to-tracked
+ git reset --hard &&
+ touch b &&
+ git mv -k b c 2>stderr &&
+ test_path_exists b &&
+ test_path_is_missing c &&
+ cat sparse_error_header >expect &&
+ echo b >>expect &&
+ echo c >>expect &&
+ cat sparse_hint >>expect &&
+ test_cmp expect stderr &&
+
+ git mv --sparse b c 2>stderr &&
+ test_must_be_empty stderr &&
+ test_path_is_missing b &&
+ test_path_exists c
+'
+
+test_expect_success 'mv refuses to move non-sparse-to-sparse' '
+ test_when_finished rm -f b c e &&
+ git reset --hard &&
+ git sparse-checkout set a &&
+
+ # tracked-to-untracked
+ test_must_fail git mv a e 2>stderr &&
+ test_path_exists a &&
+ test_path_is_missing e &&
+ cat sparse_error_header >expect &&
+ echo e >>expect &&
+ cat sparse_hint >>expect &&
+ test_cmp expect stderr &&
+ git mv --sparse a e 2>stderr &&
+ test_must_be_empty stderr &&
+ test_path_is_missing a &&
+ test_path_exists e &&
+
+ # tracked-to-tracked
+ rm e &&
+ git reset --hard &&
+ test_must_fail git mv a c 2>stderr &&
+ test_path_exists a &&
+ test_path_is_missing c &&
+ cat sparse_error_header >expect &&
+ echo c >>expect &&
+ cat sparse_hint >>expect &&
+ test_cmp expect stderr &&
+ git mv --sparse a c 2>stderr &&
+ test_must_be_empty stderr &&
+ test_path_is_missing a &&
+ test_path_exists c
+'
+
+test_expect_success 'mv refuses to move sparse-to-non-sparse' '
+ test_when_finished rm -f b c e &&
+ git reset --hard &&
+ git sparse-checkout set a e &&
+
+ # tracked-to-untracked
+ touch b &&
+ test_must_fail git mv b e 2>stderr &&
+ cat sparse_error_header >expect &&
+ echo b >>expect &&
+ cat sparse_hint >>expect &&
+ test_cmp expect stderr &&
+ git mv --sparse b e 2>stderr &&
+ test_must_be_empty stderr
+'
+
+test_expect_success 'recursive mv refuses to move (possible) sparse' '
+ test_when_finished rm -rf b c e sub2 &&
+ git reset --hard &&
+ # Without cone mode, "sub" and "sub2" do not match
+ git sparse-checkout set sub/dir sub2/dir &&
+
+ # Add contained contents to ensure we avoid non-existence errors
+ mkdir sub/dir2 &&
+ touch sub/d sub/dir2/e &&
+
+ test_must_fail git mv sub sub2 2>stderr &&
+ cat sparse_error_header >expect &&
+ cat >>expect <<-\EOF &&
+ sub/d
+ sub2/d
+ sub/dir/e
+ sub2/dir/e
+ sub/dir2/e
+ sub2/dir2/e
+ EOF
+ cat sparse_hint >>expect &&
+ test_cmp expect stderr &&
+ git mv --sparse sub sub2 2>stderr &&
+ test_must_be_empty stderr &&
+ git commit -m "moved sub to sub2" &&
+ git rev-parse HEAD~1:sub >expect &&
+ git rev-parse HEAD:sub2 >actual &&
+ test_cmp expect actual &&
+ git reset --hard HEAD~1
+'
+
+test_expect_success 'recursive mv refuses to move sparse' '
+ git reset --hard &&
+ # Use cone mode so "sub/" matches the sparse-checkout patterns
+ git sparse-checkout init --cone &&
+ git sparse-checkout set sub/dir sub2/dir &&
+
+ # Add contained contents to ensure we avoid non-existence errors
+ mkdir sub/dir2 &&
+ touch sub/dir2/e &&
+
+ test_must_fail git mv sub sub2 2>stderr &&
+ cat sparse_error_header >expect &&
+ cat >>expect <<-\EOF &&
+ sub/dir2/e
+ sub2/dir2/e
+ EOF
+ cat sparse_hint >>expect &&
+ test_cmp expect stderr &&
+ git mv --sparse sub sub2 2>stderr &&
+ test_must_be_empty stderr &&
+ git commit -m "moved sub to sub2" &&
+ git rev-parse HEAD~1:sub >expect &&
+ git rev-parse HEAD:sub2 >actual &&
+ test_cmp expect actual &&
+ git reset --hard HEAD~1
+'
+
+test_done
diff --git a/t/t7112-reset-submodule.sh b/t/t7112-reset-submodule.sh
index 19830d9036..a3e2413bc3 100755
--- a/t/t7112-reset-submodule.sh
+++ b/t/t7112-reset-submodule.sh
@@ -6,7 +6,6 @@ test_description='reset can handle submodules'
. "$TEST_DIRECTORY"/lib-submodule-update.sh
KNOWN_FAILURE_DIRECTORY_SUBMODULE_CONFLICTS=1
-KNOWN_FAILURE_SUBMODULE_OVERWRITE_IGNORED_UNTRACKED=1
test_submodule_switch_recursing_with_args "reset --keep"
diff --git a/t/t7201-co.sh b/t/t7201-co.sh
index 7f6e23a4bb..b7ba1c3268 100755
--- a/t/t7201-co.sh
+++ b/t/t7201-co.sh
@@ -585,6 +585,7 @@ test_expect_success 'checkout --conflict=diff3' '
'
test_expect_success 'failing checkout -b should not break working tree' '
+ git clean -fd && # Remove untracked files in the way
git reset --hard main &&
git symbolic-ref HEAD refs/heads/main &&
test_must_fail git checkout -b renamer side^ &&
diff --git a/t/t7519-status-fsmonitor.sh b/t/t7519-status-fsmonitor.sh
index deea88d443..f488d930df 100755
--- a/t/t7519-status-fsmonitor.sh
+++ b/t/t7519-status-fsmonitor.sh
@@ -389,47 +389,55 @@ test_expect_success 'status succeeds after staging/unstaging' '
# If "!" is supplied, then we verify that we do not call ensure_full_index
# during a call to 'git status'. Otherwise, we verify that we _do_ call it.
check_sparse_index_behavior () {
- git status --porcelain=v2 >expect &&
- git sparse-checkout init --cone --sparse-index &&
- git sparse-checkout set dir1 dir2 &&
+ git -C full status --porcelain=v2 >expect &&
GIT_TRACE2_EVENT="$(pwd)/trace2.txt" GIT_TRACE2_EVENT_NESTING=10 \
- git status --porcelain=v2 >actual &&
+ git -C sparse status --porcelain=v2 >actual &&
test_region $1 index ensure_full_index trace2.txt &&
test_region fsm_hook query trace2.txt &&
test_cmp expect actual &&
- rm trace2.txt &&
- git sparse-checkout disable
+ rm trace2.txt
}
test_expect_success 'status succeeds with sparse index' '
- git reset --hard &&
-
- test_config core.fsmonitor "$TEST_DIRECTORY/t7519/fsmonitor-all" &&
- check_sparse_index_behavior ! &&
-
- write_script .git/hooks/fsmonitor-test<<-\EOF &&
- printf "last_update_token\0"
- EOF
- git config core.fsmonitor .git/hooks/fsmonitor-test &&
- check_sparse_index_behavior ! &&
+ (
+ sane_unset GIT_TEST_SPLIT_INDEX &&
- write_script .git/hooks/fsmonitor-test<<-\EOF &&
- printf "last_update_token\0"
- printf "dir1/modified\0"
- EOF
- check_sparse_index_behavior ! &&
+ git clone . full &&
+ git clone --sparse . sparse &&
+ git -C sparse sparse-checkout init --cone --sparse-index &&
+ git -C sparse sparse-checkout set dir1 dir2 &&
- cp -r dir1 dir1a &&
- git add dir1a &&
- git commit -m "add dir1a" &&
+ write_script .git/hooks/fsmonitor-test <<-\EOF &&
+ printf "last_update_token\0"
+ EOF
+ git -C full config core.fsmonitor ../.git/hooks/fsmonitor-test &&
+ git -C sparse config core.fsmonitor ../.git/hooks/fsmonitor-test &&
+ check_sparse_index_behavior ! &&
- # This one modifies outside the sparse-checkout definition
- # and hence we expect to expand the sparse-index.
- write_script .git/hooks/fsmonitor-test<<-\EOF &&
- printf "last_update_token\0"
- printf "dir1a/modified\0"
- EOF
- check_sparse_index_behavior
+ write_script .git/hooks/fsmonitor-test <<-\EOF &&
+ printf "last_update_token\0"
+ printf "dir1/modified\0"
+ EOF
+ check_sparse_index_behavior ! &&
+
+ git -C sparse sparse-checkout add dir1a &&
+
+ for repo in full sparse
+ do
+ cp -r $repo/dir1 $repo/dir1a &&
+ git -C $repo add dir1a &&
+ git -C $repo commit -m "add dir1a" || return 1
+ done &&
+ git -C sparse sparse-checkout set dir1 dir2 &&
+
+ # This one modifies outside the sparse-checkout definition
+ # and hence we expect to expand the sparse-index.
+ write_script .git/hooks/fsmonitor-test <<-\EOF &&
+ printf "last_update_token\0"
+ printf "dir1a/modified\0"
+ EOF
+ check_sparse_index_behavior
+ )
'
test_done
diff --git a/t/t7600-merge.sh b/t/t7600-merge.sh
index 2ef39d3088..c773e30b3f 100755
--- a/t/t7600-merge.sh
+++ b/t/t7600-merge.sh
@@ -717,6 +717,7 @@ test_expect_success 'failed fast-forward merge with --autostash' '
git reset --hard c0 &&
git merge-file file file.orig file.5 &&
cp file.5 other &&
+ test_when_finished "rm other" &&
test_must_fail git merge --autostash c1 2>err &&
test_i18ngrep "Applied autostash." err &&
test_cmp file.5 file
diff --git a/t/t7700-repack.sh b/t/t7700-repack.sh
index 25b235c063..98eda3bfeb 100755
--- a/t/t7700-repack.sh
+++ b/t/t7700-repack.sh
@@ -63,13 +63,14 @@ test_expect_success 'objects in packs marked .keep are not repacked' '
test_expect_success 'writing bitmaps via command-line can duplicate .keep objects' '
# build on $oid, $packid, and .keep state from previous
- git repack -Adbl &&
+ GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=0 git repack -Adbl &&
test_has_duplicate_object true
'
test_expect_success 'writing bitmaps via config can duplicate .keep objects' '
# build on $oid, $packid, and .keep state from previous
- git -c repack.writebitmaps=true repack -Adl &&
+ GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=0 \
+ git -c repack.writebitmaps=true repack -Adl &&
test_has_duplicate_object true
'
@@ -189,7 +190,9 @@ test_expect_success 'repack --keep-pack' '
test_expect_success 'bitmaps are created by default in bare repos' '
git clone --bare .git bare.git &&
- git -C bare.git repack -ad &&
+ rm -f bare.git/objects/pack/*.bitmap &&
+ GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=0 \
+ git -C bare.git repack -ad &&
bitmap=$(ls bare.git/objects/pack/*.bitmap) &&
test_path_is_file "$bitmap"
'
@@ -200,7 +203,8 @@ test_expect_success 'incremental repack does not complain' '
'
test_expect_success 'bitmaps can be disabled on bare repos' '
- git -c repack.writeBitmaps=false -C bare.git repack -ad &&
+ GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=0 \
+ git -c repack.writeBitmaps=false -C bare.git repack -ad &&
bitmap=$(ls bare.git/objects/pack/*.bitmap || :) &&
test -z "$bitmap"
'
@@ -211,7 +215,8 @@ test_expect_success 'no bitmaps created if .keep files present' '
keep=${pack%.pack}.keep &&
test_when_finished "rm -f \"\$keep\"" &&
>"$keep" &&
- git -C bare.git repack -ad 2>stderr &&
+ GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=0 \
+ git -C bare.git repack -ad 2>stderr &&
test_must_be_empty stderr &&
find bare.git/objects/pack/ -type f -name "*.bitmap" >actual &&
test_must_be_empty actual
@@ -222,7 +227,8 @@ test_expect_success 'auto-bitmaps do not complain if unavailable' '
blob=$(test-tool genrandom big $((1024*1024)) |
git -C bare.git hash-object -w --stdin) &&
git -C bare.git update-ref refs/tags/big $blob &&
- git -C bare.git repack -ad 2>stderr &&
+ GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=0 \
+ git -C bare.git repack -ad 2>stderr &&
test_must_be_empty stderr &&
find bare.git/objects/pack -type f -name "*.bitmap" >actual &&
test_must_be_empty actual
diff --git a/t/t7800-difftool.sh b/t/t7800-difftool.sh
index a173f564bc..096456292c 100755
--- a/t/t7800-difftool.sh
+++ b/t/t7800-difftool.sh
@@ -453,6 +453,13 @@ run_dir_diff_test 'difftool --dir-diff' '
grep "^file$" output
'
+run_dir_diff_test 'difftool --dir-diff avoids repeated slashes in TMPDIR' '
+ TMPDIR="${TMPDIR:-/tmp}////" \
+ git difftool --dir-diff $symlinks --extcmd echo branch >output &&
+ grep -v // output >actual &&
+ test_line_count = 1 actual
+'
+
run_dir_diff_test 'difftool --dir-diff ignores --prompt' '
git difftool --dir-diff $symlinks --prompt --extcmd ls branch >output &&
grep "^sub$" output &&
@@ -674,7 +681,6 @@ test_expect_success SYMLINKS 'difftool --dir-diff handles modified symlinks' '
rm c &&
ln -s d c &&
cat >expect <<-EOF &&
- b
c
c
@@ -710,7 +716,6 @@ test_expect_success SYMLINKS 'difftool --dir-diff handles modified symlinks' '
# Deleted symlinks
rm -f c &&
cat >expect <<-EOF &&
- b
c
EOF
@@ -723,6 +728,71 @@ test_expect_success SYMLINKS 'difftool --dir-diff handles modified symlinks' '
test_cmp expect actual
'
+test_expect_success SYMLINKS 'difftool --dir-diff writes symlinks as raw text' '
+ # Start out on a branch called "branch-init".
+ git init -b branch-init symlink-files &&
+ (
+ cd symlink-files &&
+ # This test ensures that symlinks are written as raw text.
+ # The "cat" tools output link and file contents.
+ git config difftool.cat-left-link.cmd "cat \"\$LOCAL/link\"" &&
+ git config difftool.cat-left-a.cmd "cat \"\$LOCAL/file-a\"" &&
+ git config difftool.cat-right-link.cmd "cat \"\$REMOTE/link\"" &&
+ git config difftool.cat-right-b.cmd "cat \"\$REMOTE/file-b\"" &&
+
+ # Record the empty initial state so that we can come back here
+ # later and not have to consider the any cases where difftool
+ # will create symlinks back into the worktree.
+ test_tick &&
+ git commit --allow-empty -m init &&
+
+ # Create a file called "file-a" with a symlink pointing to it.
+ git switch -c branch-a &&
+ echo a >file-a &&
+ ln -s file-a link &&
+ git add file-a link &&
+ test_tick &&
+ git commit -m link-to-file-a &&
+
+ # Create a file called "file-b" and point the symlink to it.
+ git switch -c branch-b &&
+ echo b >file-b &&
+ rm link &&
+ ln -s file-b link &&
+ git add file-b link &&
+ git rm file-a &&
+ test_tick &&
+ git commit -m link-to-file-b &&
+
+ # Checkout the initial branch so that the --symlinks behavior is
+ # not activated. The two directories should be completely
+ # independent with no symlinks pointing back here.
+ git switch branch-init &&
+
+ # The left link must be "file-a" and "file-a" must contain "a".
+ echo file-a >expect &&
+ git difftool --symlinks --dir-diff --tool cat-left-link \
+ branch-a branch-b >actual &&
+ test_cmp expect actual &&
+
+ echo a >expect &&
+ git difftool --symlinks --dir-diff --tool cat-left-a \
+ branch-a branch-b >actual &&
+ test_cmp expect actual &&
+
+ # The right link must be "file-b" and "file-b" must contain "b".
+ echo file-b >expect &&
+ git difftool --symlinks --dir-diff --tool cat-right-link \
+ branch-a branch-b >actual &&
+ test_cmp expect actual &&
+
+ echo b >expect &&
+ git difftool --symlinks --dir-diff --tool cat-right-b \
+ branch-a branch-b >actual &&
+ test_cmp expect actual
+ )
+'
+
test_expect_success 'add -N and difftool -d' '
test_when_finished git reset --hard &&
diff --git a/t/t7814-grep-recurse-submodules.sh b/t/t7814-grep-recurse-submodules.sh
index 828cb3ba58..058e5d0c96 100755
--- a/t/t7814-grep-recurse-submodules.sh
+++ b/t/t7814-grep-recurse-submodules.sh
@@ -8,6 +8,9 @@ submodules.
. ./test-lib.sh
+GIT_TEST_FATAL_REGISTER_SUBMODULE_ODB=1
+export GIT_TEST_FATAL_REGISTER_SUBMODULE_ODB
+
test_expect_success 'setup directory structure and submodule' '
echo "(1|2)d(3|4)" >a &&
mkdir b &&
@@ -438,4 +441,107 @@ test_expect_success 'grep --recurse-submodules with --cached ignores worktree mo
test_must_fail git grep --recurse-submodules --cached "A modified line in submodule" >actual 2>&1 &&
test_must_be_empty actual
'
+
+test_expect_failure 'grep --textconv: superproject .gitattributes does not affect submodules' '
+ reset_and_clean &&
+ test_config_global diff.d2x.textconv "sed -e \"s/d/x/\"" &&
+ echo "a diff=d2x" >.gitattributes &&
+
+ cat >expect <<-\EOF &&
+ a:(1|2)x(3|4)
+ EOF
+ git grep --textconv --recurse-submodules x >actual &&
+ test_cmp expect actual
+'
+
+test_expect_failure 'grep --textconv: superproject .gitattributes (from index) does not affect submodules' '
+ reset_and_clean &&
+ test_config_global diff.d2x.textconv "sed -e \"s/d/x/\"" &&
+ echo "a diff=d2x" >.gitattributes &&
+ git add .gitattributes &&
+ rm .gitattributes &&
+
+ cat >expect <<-\EOF &&
+ a:(1|2)x(3|4)
+ EOF
+ git grep --textconv --recurse-submodules x >actual &&
+ test_cmp expect actual
+'
+
+test_expect_failure 'grep --textconv: superproject .git/info/attributes does not affect submodules' '
+ reset_and_clean &&
+ test_config_global diff.d2x.textconv "sed -e \"s/d/x/\"" &&
+ super_attr="$(git rev-parse --git-path info/attributes)" &&
+ test_when_finished "rm -f \"$super_attr\"" &&
+ echo "a diff=d2x" >"$super_attr" &&
+
+ cat >expect <<-\EOF &&
+ a:(1|2)x(3|4)
+ EOF
+ git grep --textconv --recurse-submodules x >actual &&
+ test_cmp expect actual
+'
+
+# Note: what currently prevents this test from passing is not that the
+# .gitattributes file from "./submodule" is being ignored, but that it is being
+# propagated to the nested "./submodule/sub" files.
+#
+test_expect_failure 'grep --textconv correctly reads submodule .gitattributes' '
+ reset_and_clean &&
+ test_config_global diff.d2x.textconv "sed -e \"s/d/x/\"" &&
+ echo "a diff=d2x" >submodule/.gitattributes &&
+
+ cat >expect <<-\EOF &&
+ submodule/a:(1|2)x(3|4)
+ EOF
+ git grep --textconv --recurse-submodules x >actual &&
+ test_cmp expect actual
+'
+
+test_expect_failure 'grep --textconv correctly reads submodule .gitattributes (from index)' '
+ reset_and_clean &&
+ test_config_global diff.d2x.textconv "sed -e \"s/d/x/\"" &&
+ echo "a diff=d2x" >submodule/.gitattributes &&
+ git -C submodule add .gitattributes &&
+ rm submodule/.gitattributes &&
+
+ cat >expect <<-\EOF &&
+ submodule/a:(1|2)x(3|4)
+ EOF
+ git grep --textconv --recurse-submodules x >actual &&
+ test_cmp expect actual
+'
+
+test_expect_failure 'grep --textconv correctly reads submodule .git/info/attributes' '
+ reset_and_clean &&
+ test_config_global diff.d2x.textconv "sed -e \"s/d/x/\"" &&
+
+ submodule_attr="$(git -C submodule rev-parse --path-format=absolute --git-path info/attributes)" &&
+ test_when_finished "rm -f \"$submodule_attr\"" &&
+ echo "a diff=d2x" >"$submodule_attr" &&
+
+ cat >expect <<-\EOF &&
+ submodule/a:(1|2)x(3|4)
+ EOF
+ git grep --textconv --recurse-submodules x >actual &&
+ test_cmp expect actual
+'
+
+test_expect_failure 'grep saves textconv cache in the appropriate repository' '
+ reset_and_clean &&
+ test_config_global diff.d2x_cached.textconv "sed -e \"s/d/x/\"" &&
+ test_config_global diff.d2x_cached.cachetextconv true &&
+ echo "a diff=d2x_cached" >submodule/.gitattributes &&
+
+ # We only read/write to the textconv cache when grepping from an OID,
+ # as the working tree file might have modifications.
+ git grep --textconv --cached --recurse-submodules x &&
+
+ super_textconv_cache="$(git rev-parse --git-path refs/notes/textconv/d2x_cached)" &&
+ sub_textconv_cache="$(git -C submodule rev-parse \
+ --path-format=absolute --git-path refs/notes/textconv/d2x_cached)" &&
+ test_path_is_missing "$super_textconv_cache" &&
+ test_path_is_file "$sub_textconv_cache"
+'
+
test_done
diff --git a/t/t7900-maintenance.sh b/t/t7900-maintenance.sh
index fc16ac2258..9b9f11a8e7 100755
--- a/t/t7900-maintenance.sh
+++ b/t/t7900-maintenance.sh
@@ -20,6 +20,17 @@ test_xmllint () {
fi
}
+test_lazy_prereq SYSTEMD_ANALYZE '
+ systemd-analyze verify /lib/systemd/system/basic.target
+'
+
+test_systemd_analyze_verify () {
+ if test_have_prereq SYSTEMD_ANALYZE
+ then
+ systemd-analyze verify "$@"
+ fi
+}
+
test_expect_success 'help text' '
test_expect_code 129 git maintenance -h 2>err &&
test_i18ngrep "usage: git maintenance <subcommand>" err &&
@@ -265,7 +276,7 @@ test_expect_success 'incremental-repack task' '
# Delete refs that have not been repacked in these packs.
git for-each-ref --format="delete %(refname)" \
- refs/prefetch refs/tags >refs &&
+ refs/prefetch refs/tags refs/remotes >refs &&
git update-ref --stdin <refs &&
# Replace the object directory with this pack layout.
@@ -274,6 +285,10 @@ test_expect_success 'incremental-repack task' '
ls $packDir/*.pack >packs-before &&
test_line_count = 3 packs-before &&
+ # make sure we do not have any broken refs that were
+ # missed in the deletion above
+ git for-each-ref &&
+
# the job repacks the two into a new pack, but does not
# delete the old ones.
git maintenance run --task=incremental-repack &&
@@ -492,8 +507,21 @@ test_expect_success !MINGW 'register and unregister with regex metacharacters' '
maintenance.repo "$(pwd)/$META"
'
+test_expect_success 'start --scheduler=<scheduler>' '
+ test_expect_code 129 git maintenance start --scheduler=foo 2>err &&
+ test_i18ngrep "unrecognized --scheduler argument" err &&
+
+ test_expect_code 129 git maintenance start --no-scheduler 2>err &&
+ test_i18ngrep "unknown option" err &&
+
+ test_expect_code 128 \
+ env GIT_TEST_MAINT_SCHEDULER="launchctl:true,schtasks:true" \
+ git maintenance start --scheduler=crontab 2>err &&
+ test_i18ngrep "fatal: crontab scheduler is not available" err
+'
+
test_expect_success 'start from empty cron table' '
- GIT_TEST_MAINT_SCHEDULER="crontab:test-tool crontab cron.txt" git maintenance start &&
+ GIT_TEST_MAINT_SCHEDULER="crontab:test-tool crontab cron.txt" git maintenance start --scheduler=crontab &&
# start registers the repo
git config --get --global --fixed-value maintenance.repo "$(pwd)" &&
@@ -516,7 +544,7 @@ test_expect_success 'stop from existing schedule' '
test_expect_success 'start preserves existing schedule' '
echo "Important information!" >cron.txt &&
- GIT_TEST_MAINT_SCHEDULER="crontab:test-tool crontab cron.txt" git maintenance start &&
+ GIT_TEST_MAINT_SCHEDULER="crontab:test-tool crontab cron.txt" git maintenance start --scheduler=crontab &&
grep "Important information!" cron.txt
'
@@ -545,7 +573,7 @@ test_expect_success 'start and stop macOS maintenance' '
EOF
rm -f args &&
- GIT_TEST_MAINT_SCHEDULER=launchctl:./print-args git maintenance start &&
+ GIT_TEST_MAINT_SCHEDULER=launchctl:./print-args git maintenance start --scheduler=launchctl &&
# start registers the repo
git config --get --global --fixed-value maintenance.repo "$(pwd)" &&
@@ -584,11 +612,11 @@ test_expect_success 'start and stop macOS maintenance' '
test_expect_success 'use launchctl list to prevent extra work' '
# ensure we are registered
- GIT_TEST_MAINT_SCHEDULER=launchctl:./print-args git maintenance start &&
+ GIT_TEST_MAINT_SCHEDULER=launchctl:./print-args git maintenance start --scheduler=launchctl &&
# do it again on a fresh args file
rm -f args &&
- GIT_TEST_MAINT_SCHEDULER=launchctl:./print-args git maintenance start &&
+ GIT_TEST_MAINT_SCHEDULER=launchctl:./print-args git maintenance start --scheduler=launchctl &&
ls "$HOME/Library/LaunchAgents" >actual &&
cat >expect <<-\EOF &&
@@ -613,7 +641,7 @@ test_expect_success 'start and stop Windows maintenance' '
EOF
rm -f args &&
- GIT_TEST_MAINT_SCHEDULER="schtasks:./print-args" git maintenance start &&
+ GIT_TEST_MAINT_SCHEDULER="schtasks:./print-args" git maintenance start --scheduler=schtasks &&
# start registers the repo
git config --get --global --fixed-value maintenance.repo "$(pwd)" &&
@@ -636,6 +664,83 @@ test_expect_success 'start and stop Windows maintenance' '
test_cmp expect args
'
+test_expect_success 'start and stop Linux/systemd maintenance' '
+ write_script print-args <<-\EOF &&
+ printf "%s\n" "$*" >>args
+ EOF
+
+ XDG_CONFIG_HOME="$PWD" &&
+ export XDG_CONFIG_HOME &&
+ rm -f args &&
+ GIT_TEST_MAINT_SCHEDULER="systemctl:./print-args" git maintenance start --scheduler=systemd-timer &&
+
+ # start registers the repo
+ git config --get --global --fixed-value maintenance.repo "$(pwd)" &&
+
+ test_systemd_analyze_verify "systemd/user/git-maintenance@.service" &&
+
+ printf -- "--user enable --now git-maintenance@%s.timer\n" hourly daily weekly >expect &&
+ test_cmp expect args &&
+
+ rm -f args &&
+ GIT_TEST_MAINT_SCHEDULER="systemctl:./print-args" git maintenance stop &&
+
+ # stop does not unregister the repo
+ git config --get --global --fixed-value maintenance.repo "$(pwd)" &&
+
+ test_path_is_missing "systemd/user/git-maintenance@.timer" &&
+ test_path_is_missing "systemd/user/git-maintenance@.service" &&
+
+ printf -- "--user disable --now git-maintenance@%s.timer\n" hourly daily weekly >expect &&
+ test_cmp expect args
+'
+
+test_expect_success 'start and stop when several schedulers are available' '
+ write_script print-args <<-\EOF &&
+ printf "%s\n" "$*" | sed "s:gui/[0-9][0-9]*:gui/[UID]:; s:\(schtasks /create .* /xml\).*:\1:;" >>args
+ EOF
+
+ rm -f args &&
+ GIT_TEST_MAINT_SCHEDULER="systemctl:./print-args systemctl,launchctl:./print-args launchctl,schtasks:./print-args schtasks" git maintenance start --scheduler=systemd-timer &&
+ printf "launchctl bootout gui/[UID] $pfx/Library/LaunchAgents/org.git-scm.git.%s.plist\n" \
+ hourly daily weekly >expect &&
+ printf "schtasks /delete /tn Git Maintenance (%s) /f\n" \
+ hourly daily weekly >>expect &&
+ printf -- "systemctl --user enable --now git-maintenance@%s.timer\n" hourly daily weekly >>expect &&
+ test_cmp expect args &&
+
+ rm -f args &&
+ GIT_TEST_MAINT_SCHEDULER="systemctl:./print-args systemctl,launchctl:./print-args launchctl,schtasks:./print-args schtasks" git maintenance start --scheduler=launchctl &&
+ printf -- "systemctl --user disable --now git-maintenance@%s.timer\n" hourly daily weekly >expect &&
+ printf "schtasks /delete /tn Git Maintenance (%s) /f\n" \
+ hourly daily weekly >>expect &&
+ for frequency in hourly daily weekly
+ do
+ PLIST="$pfx/Library/LaunchAgents/org.git-scm.git.$frequency.plist" &&
+ echo "launchctl bootout gui/[UID] $PLIST" >>expect &&
+ echo "launchctl bootstrap gui/[UID] $PLIST" >>expect || return 1
+ done &&
+ test_cmp expect args &&
+
+ rm -f args &&
+ GIT_TEST_MAINT_SCHEDULER="systemctl:./print-args systemctl,launchctl:./print-args launchctl,schtasks:./print-args schtasks" git maintenance start --scheduler=schtasks &&
+ printf -- "systemctl --user disable --now git-maintenance@%s.timer\n" hourly daily weekly >expect &&
+ printf "launchctl bootout gui/[UID] $pfx/Library/LaunchAgents/org.git-scm.git.%s.plist\n" \
+ hourly daily weekly >>expect &&
+ printf "schtasks /create /tn Git Maintenance (%s) /f /xml\n" \
+ hourly daily weekly >>expect &&
+ test_cmp expect args &&
+
+ rm -f args &&
+ GIT_TEST_MAINT_SCHEDULER="systemctl:./print-args systemctl,launchctl:./print-args launchctl,schtasks:./print-args schtasks" git maintenance stop &&
+ printf -- "systemctl --user disable --now git-maintenance@%s.timer\n" hourly daily weekly >expect &&
+ printf "launchctl bootout gui/[UID] $pfx/Library/LaunchAgents/org.git-scm.git.%s.plist\n" \
+ hourly daily weekly >>expect &&
+ printf "schtasks /delete /tn Git Maintenance (%s) /f\n" \
+ hourly daily weekly >>expect &&
+ test_cmp expect args
+'
+
test_expect_success 'register preserves existing strategy' '
git config maintenance.strategy none &&
git maintenance register &&
diff --git a/t/t9001-send-email.sh b/t/t9001-send-email.sh
index 9022074474..aa0c20499b 100755
--- a/t/t9001-send-email.sh
+++ b/t/t9001-send-email.sh
@@ -1533,6 +1533,21 @@ test_expect_success $PREREQ 'sendemail.8bitEncoding works' '
test_cmp content-type-decl actual
'
+test_expect_success $PREREQ 'sendemail.8bitEncoding in .git/config overrides --global .gitconfig' '
+ clean_fake_sendmail &&
+ git config sendemail.assume8bitEncoding UTF-8 &&
+ test_when_finished "rm -rf home" &&
+ mkdir home &&
+ git config -f home/.gitconfig sendemail.assume8bitEncoding "bogus too" &&
+ echo bogus |
+ env HOME="$(pwd)/home" DEBUG=1 \
+ git send-email --from=author@example.com --to=nobody@example.com \
+ --smtp-server="$(pwd)/fake.sendmail" \
+ email-using-8bit >stdout &&
+ egrep "Content|MIME" msgtxt1 >actual &&
+ test_cmp content-type-decl actual
+'
+
test_expect_success $PREREQ '--8bit-encoding overrides sendemail.8bitEncoding' '
clean_fake_sendmail &&
git config sendemail.assume8bitEncoding "bogus too" &&
diff --git a/t/t9400-git-cvsserver-server.sh b/t/t9400-git-cvsserver-server.sh
index 2d29d486ee..17f988edd2 100755
--- a/t/t9400-git-cvsserver-server.sh
+++ b/t/t9400-git-cvsserver-server.sh
@@ -36,6 +36,13 @@ CVSWORK="$PWD/cvswork"
CVS_SERVER=git-cvsserver
export CVSROOT CVS_SERVER
+if perl -e 'exit(1) if not defined crypt("", "cv")'
+then
+ PWDHASH='lac2ItudM3.KM'
+else
+ PWDHASH='$2b$10$t8fGvE/a9eLmfOLzsZme2uOa2QtoMYwIxq9wZA6aBKtF1Yb7FJIzi'
+fi
+
rm -rf "$CVSWORK" "$SERVERDIR"
test_expect_success 'setup' '
git config push.default matching &&
@@ -54,7 +61,7 @@ test_expect_success 'setup' '
GIT_DIR="$SERVERDIR" git config --bool gitcvs.enabled true &&
GIT_DIR="$SERVERDIR" git config gitcvs.logfile "$SERVERDIR/gitcvs.log" &&
GIT_DIR="$SERVERDIR" git config gitcvs.authdb "$SERVERDIR/auth.db" &&
- echo cvsuser:cvGVEarMLnhlA > "$SERVERDIR/auth.db"
+ echo "cvsuser:$PWDHASH" >"$SERVERDIR/auth.db"
'
# note that cvs doesn't accept absolute pathnames
diff --git a/t/test-lib-functions.sh b/t/test-lib-functions.sh
index e28411bb75..eef2262a36 100644
--- a/t/test-lib-functions.sh
+++ b/t/test-lib-functions.sh
@@ -137,33 +137,110 @@ test_tick () {
# Stop execution and start a shell. This is useful for debugging tests.
#
# Be sure to remove all invocations of this command before submitting.
+# WARNING: the shell invoked by this helper does not have the same environment
+# as the one running the tests (shell variables and functions are not
+# available, and the options below further modify the environment). As such,
+# commands copied from a test script might behave differently than when
+# running the test.
+#
+# Usage: test_pause [options]
+# -t
+# Use your original TERM instead of test-lib.sh's "dumb".
+# This usually restores color output in the invoked shell.
+# -s
+# Invoke $SHELL instead of $TEST_SHELL_PATH.
+# -h
+# Use your original HOME instead of test-lib.sh's "$TRASH_DIRECTORY".
+# This allows you to use your regular shell environment and Git aliases.
+# CAUTION: running commands copied from a test script into the paused shell
+# might result in files in your HOME being overwritten.
+# -a
+# Shortcut for -t -s -h
test_pause () {
- "$SHELL_PATH" <&6 >&5 2>&7
+ PAUSE_TERM=$TERM &&
+ PAUSE_SHELL=$TEST_SHELL_PATH &&
+ PAUSE_HOME=$HOME &&
+ while test $# != 0
+ do
+ case "$1" in
+ -t)
+ PAUSE_TERM="$USER_TERM"
+ ;;
+ -s)
+ PAUSE_SHELL="$SHELL"
+ ;;
+ -h)
+ PAUSE_HOME="$USER_HOME"
+ ;;
+ -a)
+ PAUSE_TERM="$USER_TERM"
+ PAUSE_SHELL="$SHELL"
+ PAUSE_HOME="$USER_HOME"
+ ;;
+ *)
+ break
+ ;;
+ esac
+ shift
+ done &&
+ TERM="$PAUSE_TERM" HOME="$PAUSE_HOME" "$PAUSE_SHELL" <&6 >&5 2>&7
}
# Wrap git with a debugger. Adding this to a command can make it easier
# to understand what is going on in a failing test.
#
+# Usage: debug [options] <git command>
+# -d <debugger>
+# --debugger=<debugger>
+# Use <debugger> instead of GDB
+# -t
+# Use your original TERM instead of test-lib.sh's "dumb".
+# This usually restores color output in the debugger.
+# WARNING: the command being debugged might behave differently than when
+# running the test.
+#
# Examples:
# debug git checkout master
# debug --debugger=nemiver git $ARGS
# debug -d "valgrind --tool=memcheck --track-origins=yes" git $ARGS
debug () {
- case "$1" in
- -d)
- GIT_DEBUGGER="$2" &&
- shift 2
- ;;
- --debugger=*)
- GIT_DEBUGGER="${1#*=}" &&
- shift 1
- ;;
- *)
- GIT_DEBUGGER=1
- ;;
- esac &&
- GIT_DEBUGGER="${GIT_DEBUGGER}" "$@" <&6 >&5 2>&7
+ GIT_DEBUGGER=1 &&
+ DEBUG_TERM=$TERM &&
+ while test $# != 0
+ do
+ case "$1" in
+ -t)
+ DEBUG_TERM="$USER_TERM"
+ ;;
+ -d)
+ GIT_DEBUGGER="$2" &&
+ shift
+ ;;
+ --debugger=*)
+ GIT_DEBUGGER="${1#*=}"
+ ;;
+ *)
+ break
+ ;;
+ esac
+ shift
+ done &&
+
+ dotfiles=".gdbinit .lldbinit"
+
+ for dotfile in $dotfiles
+ do
+ dotfile="$USER_HOME/$dotfile" &&
+ test -f "$dotfile" && cp "$dotfile" "$HOME" || :
+ done &&
+
+ TERM="$DEBUG_TERM" GIT_DEBUGGER="${GIT_DEBUGGER}" "$@" <&6 >&5 2>&7 &&
+
+ for dotfile in $dotfiles
+ do
+ rm -f "$HOME/$dotfile"
+ done
}
# Usage: test_commit [options] <message> [<file> [<contents> [<tag>]]]
diff --git a/t/test-lib.sh b/t/test-lib.sh
index fc1e521519..8361b5c1c5 100644
--- a/t/test-lib.sh
+++ b/t/test-lib.sh
@@ -534,7 +534,7 @@ SQ=\'
# when case-folding filenames
u200c=$(printf '\342\200\214')
-export _x05 _x35 _x40 _z40 LF u200c EMPTY_TREE EMPTY_BLOB ZERO_OID OID_REGEX
+export _x05 _x35 LF u200c EMPTY_TREE EMPTY_BLOB ZERO_OID OID_REGEX
# Each test should start with something like this, after copyright notices:
#
@@ -585,8 +585,9 @@ else
}
fi
+USER_TERM="$TERM"
TERM=dumb
-export TERM
+export TERM USER_TERM
error () {
say_color error "error: $*"
@@ -1380,10 +1381,31 @@ then
test_done
fi
+# skip non-whitelisted tests when compiled with SANITIZE=leak
+if test -n "$SANITIZE_LEAK"
+then
+ if test_bool_env GIT_TEST_PASSING_SANITIZE_LEAK false
+ then
+ # We need to see it in "git env--helper" (via
+ # test_bool_env)
+ export TEST_PASSES_SANITIZE_LEAK
+
+ if ! test_bool_env TEST_PASSES_SANITIZE_LEAK false
+ then
+ skip_all="skipping $this_test under GIT_TEST_PASSING_SANITIZE_LEAK=true"
+ test_done
+ fi
+ fi
+elif test_bool_env GIT_TEST_PASSING_SANITIZE_LEAK false
+then
+ error "GIT_TEST_PASSING_SANITIZE_LEAK=true has no effect except when compiled with SANITIZE=leak"
+fi
+
# Last-minute variable setup
+USER_HOME="$HOME"
HOME="$TRASH_DIRECTORY"
GNUPGHOME="$HOME/gnupg-home-not-used"
-export HOME GNUPGHOME
+export HOME GNUPGHOME USER_HOME
# Test repository
rm -fr "$TRASH_DIRECTORY" || {
@@ -1423,10 +1445,9 @@ then
fi
# Convenience
-# A regexp to match 5, 35 and 40 hexdigits
+# A regexp to match 5 and 35 hexdigits
_x05='[0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f]'
_x35="$_x05$_x05$_x05$_x05$_x05$_x05$_x05"
-_x40="$_x35$_x05"
test_oid_init
@@ -1435,7 +1456,6 @@ OID_REGEX=$(echo $ZERO_OID | sed -e 's/0/[0-9a-f]/g')
OIDPATH_REGEX=$(test_oid_to_path $ZERO_OID | sed -e 's/0/[0-9a-f]/g')
EMPTY_TREE=$(test_oid empty_tree)
EMPTY_BLOB=$(test_oid empty_blob)
-_z40=$ZERO_OID
# Provide an implementation of the 'yes' utility; the upper bound
# limit is there to help Windows that cannot stop this loop from
@@ -1534,6 +1554,7 @@ test -z "$NO_PYTHON" && test_set_prereq PYTHON
test -n "$USE_LIBPCRE2" && test_set_prereq PCRE
test -n "$USE_LIBPCRE2" && test_set_prereq LIBPCRE2
test -z "$NO_GETTEXT" && test_set_prereq GETTEXT
+test -n "$SANITIZE_LEAK" && test_set_prereq SANITIZE_LEAK
if test -z "$GIT_TEST_CHECK_CACHE_TREE"
then
diff --git a/trace.h b/trace.h
index 0dbbad0e41..e25984051a 100644
--- a/trace.h
+++ b/trace.h
@@ -89,7 +89,7 @@ struct trace_key {
extern struct trace_key trace_default_key;
-#define TRACE_KEY_INIT(name) { "GIT_TRACE_" #name, 0, 0, 0 }
+#define TRACE_KEY_INIT(name) { .key = "GIT_TRACE_" #name }
extern struct trace_key trace_perf_key;
extern struct trace_key trace_setup_key;
diff --git a/trace2.c b/trace2.c
index b9b154ac44..b2d471526f 100644
--- a/trace2.c
+++ b/trace2.c
@@ -394,6 +394,37 @@ void trace2_child_exit_fl(const char *file, int line, struct child_process *cmd,
us_elapsed_child);
}
+void trace2_child_ready_fl(const char *file, int line,
+ struct child_process *cmd,
+ const char *ready)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+ uint64_t us_now;
+ uint64_t us_elapsed_absolute;
+ uint64_t us_elapsed_child;
+
+ if (!trace2_enabled)
+ return;
+
+ us_now = getnanotime() / 1000;
+ us_elapsed_absolute = tr2tls_absolute_elapsed(us_now);
+
+ if (cmd->trace2_child_us_start)
+ us_elapsed_child = us_now - cmd->trace2_child_us_start;
+ else
+ us_elapsed_child = 0;
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_child_ready_fl)
+ tgt_j->pfn_child_ready_fl(file, line,
+ us_elapsed_absolute,
+ cmd->trace2_child_id,
+ cmd->pid,
+ ready,
+ us_elapsed_child);
+}
+
int trace2_exec_fl(const char *file, int line, const char *exe,
const char **argv)
{
diff --git a/trace2.h b/trace2.h
index 9b7286c572..0cc7b5f531 100644
--- a/trace2.h
+++ b/trace2.h
@@ -254,6 +254,31 @@ void trace2_child_exit_fl(const char *file, int line, struct child_process *cmd,
trace2_child_exit_fl(__FILE__, __LINE__, (cmd), (code))
/**
+ * Emits a "child_ready" message containing the "child-id" and a flag
+ * indicating whether the child was considered "ready" when we
+ * released it.
+ *
+ * This function should be called after starting a daemon process in
+ * the background (and after giving it sufficient time to boot
+ * up) to indicate that we no longer control or own it.
+ *
+ * The "ready" argument should contain one of { "ready", "timeout",
+ * "error" } to indicate the state of the running daemon when we
+ * released it.
+ *
+ * If the daemon process fails to start or it exits or is terminated
+ * while we are still waiting for it, the caller should emit a
+ * regular "child_exit" to report the normal process exit information.
+ *
+ */
+void trace2_child_ready_fl(const char *file, int line,
+ struct child_process *cmd,
+ const char *ready);
+
+#define trace2_child_ready(cmd, ready) \
+ trace2_child_ready_fl(__FILE__, __LINE__, (cmd), (ready))
+
+/**
* Emit an 'exec' event prior to calling one of exec(), execv(),
* execvp(), and etc. On Unix-derived systems, this will be the
* last event emitted for the current process, unless the exec
@@ -350,7 +375,7 @@ void trace2_def_repo_fl(const char *file, int line, struct repository *repo);
* being started, such as "read_recursive" or "do_read_index".
*
* The `repo` field, if set, will be used to get the "repo-id", so that
- * recursive oerations can be attributed to the correct repository.
+ * recursive operations can be attributed to the correct repository.
*/
void trace2_region_enter_fl(const char *file, int line, const char *category,
const char *label, const struct repository *repo, ...);
diff --git a/trace2/tr2_tgt.h b/trace2/tr2_tgt.h
index 1f66fd6573..65f94e1574 100644
--- a/trace2/tr2_tgt.h
+++ b/trace2/tr2_tgt.h
@@ -45,6 +45,10 @@ typedef void(tr2_tgt_evt_child_exit_fl_t)(const char *file, int line,
uint64_t us_elapsed_absolute, int cid,
int pid, int code,
uint64_t us_elapsed_child);
+typedef void(tr2_tgt_evt_child_ready_fl_t)(const char *file, int line,
+ uint64_t us_elapsed_absolute,
+ int cid, int pid, const char *ready,
+ uint64_t us_elapsed_child);
typedef void(tr2_tgt_evt_thread_start_fl_t)(const char *file, int line,
uint64_t us_elapsed_absolute);
@@ -116,6 +120,7 @@ struct tr2_tgt {
tr2_tgt_evt_alias_fl_t *pfn_alias_fl;
tr2_tgt_evt_child_start_fl_t *pfn_child_start_fl;
tr2_tgt_evt_child_exit_fl_t *pfn_child_exit_fl;
+ tr2_tgt_evt_child_ready_fl_t *pfn_child_ready_fl;
tr2_tgt_evt_thread_start_fl_t *pfn_thread_start_fl;
tr2_tgt_evt_thread_exit_fl_t *pfn_thread_exit_fl;
tr2_tgt_evt_exec_fl_t *pfn_exec_fl;
diff --git a/trace2/tr2_tgt_event.c b/trace2/tr2_tgt_event.c
index 578a9a5287..70cfc2f77c 100644
--- a/trace2/tr2_tgt_event.c
+++ b/trace2/tr2_tgt_event.c
@@ -383,6 +383,27 @@ static void fn_child_exit_fl(const char *file, int line,
jw_release(&jw);
}
+static void fn_child_ready_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute, int cid, int pid,
+ const char *ready, uint64_t us_elapsed_child)
+{
+ const char *event_name = "child_ready";
+ struct json_writer jw = JSON_WRITER_INIT;
+ double t_rel = (double)us_elapsed_child / 1000000.0;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, NULL, &jw);
+ jw_object_intmax(&jw, "child_id", cid);
+ jw_object_intmax(&jw, "pid", pid);
+ jw_object_string(&jw, "ready", ready);
+ jw_object_double(&jw, "t_rel", 6, t_rel);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+
+ jw_release(&jw);
+}
+
static void fn_thread_start_fl(const char *file, int line,
uint64_t us_elapsed_absolute)
{
@@ -610,6 +631,7 @@ struct tr2_tgt tr2_tgt_event = {
fn_alias_fl,
fn_child_start_fl,
fn_child_exit_fl,
+ fn_child_ready_fl,
fn_thread_start_fl,
fn_thread_exit_fl,
fn_exec_fl,
diff --git a/trace2/tr2_tgt_normal.c b/trace2/tr2_tgt_normal.c
index a5751c8864..58d9e430f0 100644
--- a/trace2/tr2_tgt_normal.c
+++ b/trace2/tr2_tgt_normal.c
@@ -251,6 +251,19 @@ static void fn_child_exit_fl(const char *file, int line,
strbuf_release(&buf_payload);
}
+static void fn_child_ready_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute, int cid, int pid,
+ const char *ready, uint64_t us_elapsed_child)
+{
+ struct strbuf buf_payload = STRBUF_INIT;
+ double elapsed = (double)us_elapsed_child / 1000000.0;
+
+ strbuf_addf(&buf_payload, "child_ready[%d] pid:%d ready:%s elapsed:%.6f",
+ cid, pid, ready, elapsed);
+ normal_io_write_fl(file, line, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
static void fn_exec_fl(const char *file, int line, uint64_t us_elapsed_absolute,
int exec_id, const char *exe, const char **argv)
{
@@ -330,6 +343,7 @@ struct tr2_tgt tr2_tgt_normal = {
fn_alias_fl,
fn_child_start_fl,
fn_child_exit_fl,
+ fn_child_ready_fl,
NULL, /* thread_start */
NULL, /* thread_exit */
fn_exec_fl,
diff --git a/trace2/tr2_tgt_perf.c b/trace2/tr2_tgt_perf.c
index af4d65a0a5..e4acca13d6 100644
--- a/trace2/tr2_tgt_perf.c
+++ b/trace2/tr2_tgt_perf.c
@@ -360,6 +360,20 @@ static void fn_child_exit_fl(const char *file, int line,
strbuf_release(&buf_payload);
}
+static void fn_child_ready_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute, int cid, int pid,
+ const char *ready, uint64_t us_elapsed_child)
+{
+ const char *event_name = "child_ready";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "[ch%d] pid:%d ready:%s", cid, pid, ready);
+
+ perf_io_write_fl(file, line, event_name, NULL, &us_elapsed_absolute,
+ &us_elapsed_child, NULL, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
static void fn_thread_start_fl(const char *file, int line,
uint64_t us_elapsed_absolute)
{
@@ -553,6 +567,7 @@ struct tr2_tgt tr2_tgt_perf = {
fn_alias_fl,
fn_child_start_fl,
fn_child_exit_fl,
+ fn_child_ready_fl,
fn_thread_start_fl,
fn_thread_exit_fl,
fn_exec_fl,
diff --git a/trace2/tr2_tls.c b/trace2/tr2_tls.c
index 067c23755f..7da94aba52 100644
--- a/trace2/tr2_tls.c
+++ b/trace2/tr2_tls.c
@@ -95,6 +95,7 @@ void tr2tls_unset_self(void)
pthread_setspecific(tr2tls_key, NULL);
+ strbuf_release(&ctx->thread_name);
free(ctx->array_us_start);
free(ctx);
}
diff --git a/transport-helper.c b/transport-helper.c
index 4be035edb8..e8dbdd1153 100644
--- a/transport-helper.c
+++ b/transport-helper.c
@@ -671,8 +671,8 @@ static int connect_helper(struct transport *transport, const char *name,
static struct ref *get_refs_list_using_list(struct transport *transport,
int for_push);
-static int fetch(struct transport *transport,
- int nr_heads, struct ref **to_fetch)
+static int fetch_refs(struct transport *transport,
+ int nr_heads, struct ref **to_fetch)
{
struct helper_data *data = transport->data;
int i, count;
@@ -681,7 +681,7 @@ static int fetch(struct transport *transport,
if (process_connect(transport, 0)) {
do_take_over(transport);
- return transport->vtable->fetch(transport, nr_heads, to_fetch);
+ return transport->vtable->fetch_refs(transport, nr_heads, to_fetch);
}
/*
@@ -1261,12 +1261,12 @@ static struct ref *get_refs_list_using_list(struct transport *transport,
}
static struct transport_vtable vtable = {
- set_helper_option,
- get_refs_list,
- fetch,
- push_refs,
- connect_helper,
- release_helper
+ .set_option = set_helper_option,
+ .get_refs_list = get_refs_list,
+ .fetch_refs = fetch_refs,
+ .push_refs = push_refs,
+ .connect = connect_helper,
+ .disconnect = release_helper
};
int transport_helper_init(struct transport *transport, const char *name)
diff --git a/transport-internal.h b/transport-internal.h
index b60f1ba907..c4ca0b733a 100644
--- a/transport-internal.h
+++ b/transport-internal.h
@@ -34,7 +34,7 @@ struct transport_vtable {
* get_refs_list(), it should set the old_sha1 fields in the
* provided refs now.
**/
- int (*fetch)(struct transport *transport, int refs_nr, struct ref **refs);
+ int (*fetch_refs)(struct transport *transport, int refs_nr, struct ref **refs);
/**
* Push the objects and refs. Send the necessary objects, and
diff --git a/transport.c b/transport.c
index 17e9629710..e4f1decae2 100644
--- a/transport.c
+++ b/transport.c
@@ -1,7 +1,7 @@
#include "cache.h"
#include "config.h"
#include "transport.h"
-#include "run-command.h"
+#include "hook.h"
#include "pkt-line.h"
#include "fetch-pack.h"
#include "remote.h"
@@ -162,12 +162,16 @@ static int fetch_refs_from_bundle(struct transport *transport,
int nr_heads, struct ref **to_fetch)
{
struct bundle_transport_data *data = transport->data;
+ struct strvec extra_index_pack_args = STRVEC_INIT;
int ret;
+ if (transport->progress)
+ strvec_push(&extra_index_pack_args, "-v");
+
if (!data->get_refs_from_bundle_called)
get_refs_from_bundle(transport, 0, NULL);
ret = unbundle(the_repository, &data->header, data->fd,
- transport->progress ? BUNDLE_VERBOSE : 0);
+ &extra_index_pack_args);
transport->hash_algo = data->header.hash_algo;
return ret;
}
@@ -883,12 +887,10 @@ static int disconnect_git(struct transport *transport)
}
static struct transport_vtable taken_over_vtable = {
- NULL,
- get_refs_via_connect,
- fetch_refs_via_pack,
- git_transport_push,
- NULL,
- disconnect_git
+ .get_refs_list = get_refs_via_connect,
+ .fetch_refs = fetch_refs_via_pack,
+ .push_refs = git_transport_push,
+ .disconnect = disconnect_git
};
void transport_take_over(struct transport *transport,
@@ -1032,21 +1034,17 @@ void transport_check_allowed(const char *type)
}
static struct transport_vtable bundle_vtable = {
- NULL,
- get_refs_from_bundle,
- fetch_refs_from_bundle,
- NULL,
- NULL,
- close_bundle
+ .get_refs_list = get_refs_from_bundle,
+ .fetch_refs = fetch_refs_from_bundle,
+ .disconnect = close_bundle
};
static struct transport_vtable builtin_smart_vtable = {
- NULL,
- get_refs_via_connect,
- fetch_refs_via_pack,
- git_transport_push,
- connect_git,
- disconnect_git
+ .get_refs_list = get_refs_via_connect,
+ .fetch_refs = fetch_refs_via_pack,
+ .push_refs = git_transport_push,
+ .connect = connect_git,
+ .disconnect = disconnect_git
};
struct transport *transport_get(struct remote *remote, const char *url)
@@ -1453,7 +1451,7 @@ int transport_fetch_refs(struct transport *transport, struct ref *refs)
heads[nr_heads++] = rm;
}
- rc = transport->vtable->fetch(transport, nr_heads, heads);
+ rc = transport->vtable->fetch_refs(transport, nr_heads, heads);
free(heads);
return rc;
diff --git a/transport.h b/transport.h
index 1cbab11373..8bb4c8bbc8 100644
--- a/transport.h
+++ b/transport.h
@@ -262,7 +262,9 @@ struct transport_ls_refs_options {
*/
char *unborn_head_target;
};
-#define TRANSPORT_LS_REFS_OPTIONS_INIT { STRVEC_INIT }
+#define TRANSPORT_LS_REFS_OPTIONS_INIT { \
+ .ref_prefixes = STRVEC_INIT, \
+}
/*
* Retrieve refs from a remote.
diff --git a/tree-diff.c b/tree-diff.c
index 1572615bd9..437c98a70e 100644
--- a/tree-diff.c
+++ b/tree-diff.c
@@ -21,7 +21,9 @@
ALLOC_ARRAY((x), nr); \
} while(0)
#define FAST_ARRAY_FREE(x, nr) do { \
- if ((nr) > 2) \
+ if ((nr) <= 2) \
+ xalloca_free((x)); \
+ else \
free((x)); \
} while(0)
diff --git a/unicode-width.h b/unicode-width.h
index b50e686bae..97c851b27d 100644
--- a/unicode-width.h
+++ b/unicode-width.h
@@ -26,7 +26,9 @@ static const struct interval zero_width[] = {
{ 0x0825, 0x0827 },
{ 0x0829, 0x082D },
{ 0x0859, 0x085B },
-{ 0x08D3, 0x0902 },
+{ 0x0890, 0x0891 },
+{ 0x0898, 0x089F },
+{ 0x08CA, 0x0902 },
{ 0x093A, 0x093A },
{ 0x093C, 0x093C },
{ 0x0941, 0x0948 },
@@ -66,6 +68,7 @@ static const struct interval zero_width[] = {
{ 0x0BCD, 0x0BCD },
{ 0x0C00, 0x0C00 },
{ 0x0C04, 0x0C04 },
+{ 0x0C3C, 0x0C3C },
{ 0x0C3E, 0x0C40 },
{ 0x0C46, 0x0C48 },
{ 0x0C4A, 0x0C4D },
@@ -116,7 +119,7 @@ static const struct interval zero_width[] = {
{ 0x1160, 0x11FF },
{ 0x135D, 0x135F },
{ 0x1712, 0x1714 },
-{ 0x1732, 0x1734 },
+{ 0x1732, 0x1733 },
{ 0x1752, 0x1753 },
{ 0x1772, 0x1773 },
{ 0x17B4, 0x17B5 },
@@ -124,7 +127,7 @@ static const struct interval zero_width[] = {
{ 0x17C6, 0x17C6 },
{ 0x17C9, 0x17D3 },
{ 0x17DD, 0x17DD },
-{ 0x180B, 0x180E },
+{ 0x180B, 0x180F },
{ 0x1885, 0x1886 },
{ 0x18A9, 0x18A9 },
{ 0x1920, 0x1922 },
@@ -140,7 +143,7 @@ static const struct interval zero_width[] = {
{ 0x1A65, 0x1A6C },
{ 0x1A73, 0x1A7C },
{ 0x1A7F, 0x1A7F },
-{ 0x1AB0, 0x1AC0 },
+{ 0x1AB0, 0x1ACE },
{ 0x1B00, 0x1B03 },
{ 0x1B34, 0x1B34 },
{ 0x1B36, 0x1B3A },
@@ -163,8 +166,7 @@ static const struct interval zero_width[] = {
{ 0x1CED, 0x1CED },
{ 0x1CF4, 0x1CF4 },
{ 0x1CF8, 0x1CF9 },
-{ 0x1DC0, 0x1DF9 },
-{ 0x1DFB, 0x1DFF },
+{ 0x1DC0, 0x1DFF },
{ 0x200B, 0x200F },
{ 0x202A, 0x202E },
{ 0x2060, 0x2064 },
@@ -227,12 +229,16 @@ static const struct interval zero_width[] = {
{ 0x10D24, 0x10D27 },
{ 0x10EAB, 0x10EAC },
{ 0x10F46, 0x10F50 },
+{ 0x10F82, 0x10F85 },
{ 0x11001, 0x11001 },
{ 0x11038, 0x11046 },
+{ 0x11070, 0x11070 },
+{ 0x11073, 0x11074 },
{ 0x1107F, 0x11081 },
{ 0x110B3, 0x110B6 },
{ 0x110B9, 0x110BA },
{ 0x110BD, 0x110BD },
+{ 0x110C2, 0x110C2 },
{ 0x110CD, 0x110CD },
{ 0x11100, 0x11102 },
{ 0x11127, 0x1112B },
@@ -315,6 +321,8 @@ static const struct interval zero_width[] = {
{ 0x16FE4, 0x16FE4 },
{ 0x1BC9D, 0x1BC9E },
{ 0x1BCA0, 0x1BCA3 },
+{ 0x1CF00, 0x1CF2D },
+{ 0x1CF30, 0x1CF46 },
{ 0x1D167, 0x1D169 },
{ 0x1D173, 0x1D182 },
{ 0x1D185, 0x1D18B },
@@ -332,6 +340,7 @@ static const struct interval zero_width[] = {
{ 0x1E023, 0x1E024 },
{ 0x1E026, 0x1E02A },
{ 0x1E130, 0x1E136 },
+{ 0x1E2AE, 0x1E2AE },
{ 0x1E2EC, 0x1E2EF },
{ 0x1E8D0, 0x1E8D6 },
{ 0x1E944, 0x1E94A },
@@ -404,7 +413,10 @@ static const struct interval double_width[] = {
{ 0x17000, 0x187F7 },
{ 0x18800, 0x18CD5 },
{ 0x18D00, 0x18D08 },
-{ 0x1B000, 0x1B11E },
+{ 0x1AFF0, 0x1AFF3 },
+{ 0x1AFF5, 0x1AFFB },
+{ 0x1AFFD, 0x1AFFE },
+{ 0x1B000, 0x1B122 },
{ 0x1B150, 0x1B152 },
{ 0x1B164, 0x1B167 },
{ 0x1B170, 0x1B2FB },
@@ -439,21 +451,23 @@ static const struct interval double_width[] = {
{ 0x1F6CC, 0x1F6CC },
{ 0x1F6D0, 0x1F6D2 },
{ 0x1F6D5, 0x1F6D7 },
+{ 0x1F6DD, 0x1F6DF },
{ 0x1F6EB, 0x1F6EC },
{ 0x1F6F4, 0x1F6FC },
{ 0x1F7E0, 0x1F7EB },
+{ 0x1F7F0, 0x1F7F0 },
{ 0x1F90C, 0x1F93A },
{ 0x1F93C, 0x1F945 },
-{ 0x1F947, 0x1F978 },
-{ 0x1F97A, 0x1F9CB },
-{ 0x1F9CD, 0x1F9FF },
+{ 0x1F947, 0x1F9FF },
{ 0x1FA70, 0x1FA74 },
-{ 0x1FA78, 0x1FA7A },
+{ 0x1FA78, 0x1FA7C },
{ 0x1FA80, 0x1FA86 },
-{ 0x1FA90, 0x1FAA8 },
-{ 0x1FAB0, 0x1FAB6 },
-{ 0x1FAC0, 0x1FAC2 },
-{ 0x1FAD0, 0x1FAD6 },
+{ 0x1FA90, 0x1FAAC },
+{ 0x1FAB0, 0x1FABA },
+{ 0x1FAC0, 0x1FAC5 },
+{ 0x1FAD0, 0x1FAD9 },
+{ 0x1FAE0, 0x1FAE7 },
+{ 0x1FAF0, 0x1FAF6 },
{ 0x20000, 0x2FFFD },
{ 0x30000, 0x3FFFD }
};
diff --git a/unpack-trees.c b/unpack-trees.c
index a903e71386..a7e1712d23 100644
--- a/unpack-trees.c
+++ b/unpack-trees.c
@@ -1255,7 +1255,7 @@ static int sparse_dir_matches_path(const struct cache_entry *ce,
static struct cache_entry *find_cache_entry(struct traverse_info *info,
const struct name_entry *p)
{
- struct cache_entry *ce;
+ const char *path;
int pos = find_cache_pos(info, p->path, p->pathlen);
struct unpack_trees_options *o = info->data;
@@ -1281,9 +1281,11 @@ static struct cache_entry *find_cache_entry(struct traverse_info *info,
* paths (e.g. "subdir-").
*/
while (pos >= 0) {
- ce = o->src_index->cache[pos];
+ struct cache_entry *ce = o->src_index->cache[pos];
- if (strncmp(ce->name, p->path, p->pathlen))
+ if (!skip_prefix(ce->name, info->traverse_path, &path) ||
+ strncmp(path, p->path, p->pathlen) ||
+ path[p->pathlen] != '/')
return NULL;
if (S_ISSPARSEDIR(ce->ce_mode) &&
@@ -1692,9 +1694,15 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options
static struct cache_entry *dfc;
struct pattern_list pl;
int free_pattern_list = 0;
+ struct dir_struct dir = DIR_INIT;
+
+ if (o->reset == UNPACK_RESET_INVALID)
+ BUG("o->reset had a value of 1; should be UNPACK_TREES_*_UNTRACKED");
if (len > MAX_UNPACK_TREES)
die("unpack_trees takes at most %d trees", MAX_UNPACK_TREES);
+ if (o->dir)
+ BUG("o->dir is for internal use only");
trace_performance_enter();
trace2_region_enter("unpack_trees", "unpack_trees", the_repository);
@@ -1705,6 +1713,16 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options
ensure_full_index(o->dst_index);
}
+ if (o->reset == UNPACK_RESET_OVERWRITE_UNTRACKED &&
+ o->preserve_ignored)
+ BUG("UNPACK_RESET_OVERWRITE_UNTRACKED incompatible with preserved ignored files");
+
+ if (!o->preserve_ignored) {
+ o->dir = &dir;
+ o->dir->flags |= DIR_SHOW_IGNORED;
+ setup_standard_excludes(o->dir);
+ }
+
if (!core_apply_sparse_checkout || !o->update)
o->skip_sparse_checkout = 1;
if (!o->skip_sparse_checkout && !o->pl) {
@@ -1866,6 +1884,10 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options
done:
if (free_pattern_list)
clear_pattern_list(&pl);
+ if (o->dir) {
+ dir_clear(o->dir);
+ o->dir = NULL;
+ }
trace2_region_leave("unpack_trees", "unpack_trees", the_repository);
trace_performance_leave("unpack_trees");
return ret;
@@ -2156,9 +2178,15 @@ static int icase_exists(struct unpack_trees_options *o, const char *name, int le
return src && !ie_match_stat(o->src_index, src, st, CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE);
}
+enum absent_checking_type {
+ COMPLETELY_ABSENT,
+ ABSENT_ANY_DIRECTORY
+};
+
static int check_ok_to_remove(const char *name, int len, int dtype,
const struct cache_entry *ce, struct stat *st,
enum unpack_trees_error_types error_type,
+ enum absent_checking_type absent_type,
struct unpack_trees_options *o)
{
const struct cache_entry *result;
@@ -2193,6 +2221,10 @@ static int check_ok_to_remove(const char *name, int len, int dtype,
return 0;
}
+ /* If we only care about directories, then we can remove */
+ if (absent_type == ABSENT_ANY_DIRECTORY)
+ return 0;
+
/*
* The previous round may already have decided to
* delete this path, which is in a subdirectory that
@@ -2213,12 +2245,14 @@ static int check_ok_to_remove(const char *name, int len, int dtype,
*/
static int verify_absent_1(const struct cache_entry *ce,
enum unpack_trees_error_types error_type,
+ enum absent_checking_type absent_type,
struct unpack_trees_options *o)
{
int len;
struct stat st;
- if (o->index_only || o->reset || !o->update)
+ if (o->index_only || !o->update ||
+ o->reset == UNPACK_RESET_OVERWRITE_UNTRACKED)
return 0;
len = check_leading_path(ce->name, ce_namelen(ce), 0);
@@ -2238,7 +2272,8 @@ static int verify_absent_1(const struct cache_entry *ce,
NULL, o);
else
ret = check_ok_to_remove(path, len, DT_UNKNOWN, NULL,
- &st, error_type, o);
+ &st, error_type,
+ absent_type, o);
}
free(path);
return ret;
@@ -2253,7 +2288,7 @@ static int verify_absent_1(const struct cache_entry *ce,
return check_ok_to_remove(ce->name, ce_namelen(ce),
ce_to_dtype(ce), ce, &st,
- error_type, o);
+ error_type, absent_type, o);
}
}
@@ -2263,14 +2298,23 @@ static int verify_absent(const struct cache_entry *ce,
{
if (!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE))
return 0;
- return verify_absent_1(ce, error_type, o);
+ return verify_absent_1(ce, error_type, COMPLETELY_ABSENT, o);
+}
+
+static int verify_absent_if_directory(const struct cache_entry *ce,
+ enum unpack_trees_error_types error_type,
+ struct unpack_trees_options *o)
+{
+ if (!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE))
+ return 0;
+ return verify_absent_1(ce, error_type, ABSENT_ANY_DIRECTORY, o);
}
static int verify_absent_sparse(const struct cache_entry *ce,
enum unpack_trees_error_types error_type,
struct unpack_trees_options *o)
{
- return verify_absent_1(ce, error_type, o);
+ return verify_absent_1(ce, error_type, COMPLETELY_ABSENT, o);
}
static int merged_entry(const struct cache_entry *ce,
@@ -2344,6 +2388,12 @@ static int merged_entry(const struct cache_entry *ce,
* Previously unmerged entry left as an existence
* marker by read_index_unmerged();
*/
+ if (verify_absent_if_directory(merge,
+ ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) {
+ discard_cache_entry(merge);
+ return -1;
+ }
+
invalidate_ce_path(old, o);
}
@@ -2361,7 +2411,10 @@ static int deleted_entry(const struct cache_entry *ce,
if (verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_REMOVED, o))
return -1;
return 0;
+ } else if (verify_absent_if_directory(ce, ERROR_WOULD_LOSE_UNTRACKED_REMOVED, o)) {
+ return -1;
}
+
if (!(old->ce_flags & CE_CONFLICTED) && verify_uptodate(old, o))
return -1;
add_entry(o, ce, CE_REMOVE, 0);
diff --git a/unpack-trees.h b/unpack-trees.h
index 2d88b19dca..71ffb7eeb0 100644
--- a/unpack-trees.h
+++ b/unpack-trees.h
@@ -45,10 +45,17 @@ void setup_unpack_trees_porcelain(struct unpack_trees_options *opts,
*/
void clear_unpack_trees_porcelain(struct unpack_trees_options *opts);
+enum unpack_trees_reset_type {
+ UNPACK_RESET_NONE = 0, /* traditional "false" value; still valid */
+ UNPACK_RESET_INVALID = 1, /* "true" no longer valid; use below values */
+ UNPACK_RESET_PROTECT_UNTRACKED,
+ UNPACK_RESET_OVERWRITE_UNTRACKED
+};
+
struct unpack_trees_options {
- unsigned int reset,
- merge,
+ unsigned int merge,
update,
+ preserve_ignored,
clone,
index_only,
nontrivial_merge,
@@ -64,9 +71,9 @@ struct unpack_trees_options {
exiting_early,
show_all_errors,
dry_run;
+ enum unpack_trees_reset_type reset;
const char *prefix;
int cache_bottom;
- struct dir_struct *dir;
struct pathspec *pathspec;
merge_fn_t fn;
const char *msgs[NB_UNPACK_TREES_WARNING_TYPES];
@@ -88,6 +95,7 @@ struct unpack_trees_options {
struct index_state result;
struct pattern_list *pl; /* for internal use */
+ struct dir_struct *dir; /* for internal use only */
struct checkout_metadata meta;
};
diff --git a/upload-pack.c b/upload-pack.c
index 6ce07231d3..c78d55bc67 100644
--- a/upload-pack.c
+++ b/upload-pack.c
@@ -1207,14 +1207,14 @@ static int send_ref(const char *refname, const struct object_id *oid,
format_symref_info(&symref_info, &data->symref);
format_session_id(&session_id, data);
- packet_write_fmt(1, "%s %s%c%s%s%s%s%s%s%s object-format=%s agent=%s\n",
+ packet_fwrite_fmt(stdout, "%s %s%c%s%s%s%s%s%s%s object-format=%s agent=%s\n",
oid_to_hex(oid), refname_nons,
0, capabilities,
(data->allow_uor & ALLOW_TIP_SHA1) ?
" allow-tip-sha1-in-want" : "",
(data->allow_uor & ALLOW_REACHABLE_SHA1) ?
" allow-reachable-sha1-in-want" : "",
- data->stateless_rpc ? " no-done" : "",
+ data->no_done ? " no-done" : "",
symref_info.buf,
data->allow_filter ? " filter" : "",
session_id.buf,
@@ -1223,11 +1223,11 @@ static int send_ref(const char *refname, const struct object_id *oid,
strbuf_release(&symref_info);
strbuf_release(&session_id);
} else {
- packet_write_fmt(1, "%s %s\n", oid_to_hex(oid), refname_nons);
+ packet_fwrite_fmt(stdout, "%s %s\n", oid_to_hex(oid), refname_nons);
}
capabilities = NULL;
if (!peel_iterated_oid(oid, &peeled))
- packet_write_fmt(1, "%s %s^{}\n", oid_to_hex(&peeled), refname_nons);
+ packet_fwrite_fmt(stdout, "%s %s^{}\n", oid_to_hex(&peeled), refname_nons);
return 0;
}
@@ -1329,7 +1329,8 @@ static int upload_pack_config(const char *var, const char *value, void *cb_data)
return parse_hide_refs_config(var, value, "uploadpack");
}
-void upload_pack(struct upload_pack_options *options)
+void upload_pack(const int advertise_refs, const int stateless_rpc,
+ const int timeout)
{
struct packet_reader reader;
struct upload_pack_data data;
@@ -1338,16 +1339,24 @@ void upload_pack(struct upload_pack_options *options)
git_config(upload_pack_config, &data);
- data.stateless_rpc = options->stateless_rpc;
- data.daemon_mode = options->daemon_mode;
- data.timeout = options->timeout;
+ data.stateless_rpc = stateless_rpc;
+ data.timeout = timeout;
+ if (data.timeout)
+ data.daemon_mode = 1;
head_ref_namespaced(find_symref, &data.symref);
- if (options->advertise_refs || !data.stateless_rpc) {
+ if (advertise_refs || !data.stateless_rpc) {
reset_timeout(data.timeout);
+ if (advertise_refs)
+ data.no_done = 1;
head_ref_namespaced(send_ref, &data);
for_each_namespaced_ref(send_ref, &data);
+ /*
+ * fflush stdout before calling advertise_shallow_grafts because send_ref
+ * uses stdio.
+ */
+ fflush_or_die(stdout);
advertise_shallow_grafts(1);
packet_flush(1);
} else {
@@ -1355,7 +1364,7 @@ void upload_pack(struct upload_pack_options *options)
for_each_namespaced_ref(check_ref, NULL);
}
- if (!options->advertise_refs) {
+ if (!advertise_refs) {
packet_reader_init(&reader, 0, NULL, 0,
PACKET_READ_CHOMP_NEWLINE |
PACKET_READ_DIE_ON_ERR_PACKET);
@@ -1659,8 +1668,7 @@ enum fetch_state {
FETCH_DONE,
};
-int upload_pack_v2(struct repository *r, struct strvec *keys,
- struct packet_reader *request)
+int upload_pack_v2(struct repository *r, struct packet_reader *request)
{
enum fetch_state state = FETCH_PROCESS_ARGS;
struct upload_pack_data data;
diff --git a/upload-pack.h b/upload-pack.h
index 27ddcdc6cb..d6ee25ea98 100644
--- a/upload-pack.h
+++ b/upload-pack.h
@@ -1,20 +1,12 @@
#ifndef UPLOAD_PACK_H
#define UPLOAD_PACK_H
-struct upload_pack_options {
- int stateless_rpc;
- int advertise_refs;
- unsigned int timeout;
- int daemon_mode;
-};
-
-void upload_pack(struct upload_pack_options *options);
+void upload_pack(const int advertise_refs, const int stateless_rpc,
+ const int timeout);
struct repository;
-struct strvec;
struct packet_reader;
-int upload_pack_v2(struct repository *r, struct strvec *keys,
- struct packet_reader *request);
+int upload_pack_v2(struct repository *r, struct packet_reader *request);
struct strbuf;
int upload_pack_advertise(struct repository *r,
diff --git a/wrapper.c b/wrapper.c
index 7c6586af32..1460d4e27b 100644
--- a/wrapper.c
+++ b/wrapper.c
@@ -145,6 +145,18 @@ void *xcalloc(size_t nmemb, size_t size)
return ret;
}
+void xsetenv(const char *name, const char *value, int overwrite)
+{
+ if (setenv(name, value, overwrite))
+ die_errno(_("could not setenv '%s'"), name ? name : "(null)");
+}
+
+void xunsetenv(const char *name)
+{
+ if (!unsetenv(name))
+ die_errno(_("could not unsetenv '%s'"), name ? name : "(null)");
+}
+
/*
* Limit size of IO chunks, because huge chunks only cause pain. OS X
* 64-bit is buggy, returning EINVAL if len >= INT_MAX; and even in
diff --git a/write-or-die.c b/write-or-die.c
index d33e68f6ab..0b1ec8190b 100644
--- a/write-or-die.c
+++ b/write-or-die.c
@@ -70,3 +70,15 @@ void write_or_die(int fd, const void *buf, size_t count)
die_errno("write error");
}
}
+
+void fwrite_or_die(FILE *f, const void *buf, size_t count)
+{
+ if (fwrite(buf, 1, count, f) != count)
+ die_errno("fwrite error");
+}
+
+void fflush_or_die(FILE *f)
+{
+ if (fflush(f))
+ die_errno("fflush error");
+}