diff options
365 files changed, 9842 insertions, 3627 deletions
diff --git a/.cirrus.yml b/.cirrus.yml index c2f5fe385a..e114ffee1a 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -2,8 +2,15 @@ env: CIRRUS_CLONE_DEPTH: 1 freebsd_12_task: + env: + GIT_PROVE_OPTS: "--timer --jobs 10" + GIT_TEST_OPTS: "--no-chain-lint --no-bin-wrappers" + MAKEFLAGS: "-j4" + DEFAULT_TEST_TARGET: prove + DEVELOPER: 1 freebsd_instance: - image: freebsd-12-1-release-amd64 + image_family: freebsd-12-2 + memory: 2G install_script: pkg install -y gettext gmake perl5 create_user_script: diff --git a/.github/workflows/l10n.yml b/.github/workflows/l10n.yml new file mode 100644 index 0000000000..27f72f0ff3 --- /dev/null +++ b/.github/workflows/l10n.yml @@ -0,0 +1,105 @@ +name: git-l10n + +on: [push, pull_request_target] + +jobs: + git-po-helper: + if: >- + endsWith(github.repository, '/git-po') || + contains(github.head_ref, 'l10n') || + contains(github.ref, 'l10n') + runs-on: ubuntu-latest + permissions: + pull-requests: write + steps: + - name: Setup base and head objects + id: setup-tips + run: | + if test "${{ github.event_name }}" = "pull_request_target" + then + base=${{ github.event.pull_request.base.sha }} + head=${{ github.event.pull_request.head.sha }} + else + base=${{ github.event.before }} + head=${{ github.event.after }} + fi + echo "::set-output name=base::$base" + echo "::set-output name=head::$head" + - name: Run partial clone + run: | + git -c init.defaultBranch=master init --bare . + git remote add \ + --mirror=fetch \ + origin \ + https://github.com/${{ github.repository }} + # Fetch tips that may be unreachable from github.ref: + # - For a forced push, "$base" may be unreachable. + # - For a "pull_request_target" event, "$head" may be unreachable. + args= + for commit in \ + ${{ steps.setup-tips.outputs.base }} \ + ${{ steps.setup-tips.outputs.head }} + do + case $commit in + *[^0]*) + args="$args $commit" + ;; + *) + # Should not fetch ZERO-OID. + ;; + esac + done + git -c protocol.version=2 fetch \ + --progress \ + --no-tags \ + --no-write-fetch-head \ + --filter=blob:none \ + origin \ + ${{ github.ref }} \ + $args + - uses: actions/setup-go@v2 + with: + go-version: '>=1.16' + - name: Install git-po-helper + run: go install github.com/git-l10n/git-po-helper@main + - name: Install other dependencies + run: | + sudo apt-get update -q && + sudo apt-get install -q -y gettext + - name: Run git-po-helper + id: check-commits + run: | + exit_code=0 + git-po-helper check-commits \ + --github-action-event="${{ github.event_name }}" -- \ + ${{ steps.setup-tips.outputs.base }}..${{ steps.setup-tips.outputs.head }} \ + >git-po-helper.out 2>&1 || exit_code=$? + if test $exit_code -ne 0 || grep -q WARNING git-po-helper.out + then + # Remove ANSI colors which are proper for console logs but not + # proper for PR comment. + echo "COMMENT_BODY<<EOF" >>$GITHUB_ENV + perl -pe 's/\e\[[0-9;]*m//g; s/\bEOF$//g' git-po-helper.out >>$GITHUB_ENV + echo "EOF" >>$GITHUB_ENV + fi + cat git-po-helper.out + exit $exit_code + - name: Create comment in pull request for report + uses: mshick/add-pr-comment@v1 + if: >- + always() && + github.event_name == 'pull_request_target' && + env.COMMENT_BODY != '' + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + repo-token-user-login: 'github-actions[bot]' + message: > + ${{ steps.check-commits.outcome == 'failure' && 'Errors and warnings' || 'Warnings' }} + found by [git-po-helper](https://github.com/git-l10n/git-po-helper#readme) in workflow + [#${{ github.run_number }}](${{ env.GITHUB_SERVER_URL }}/${{ github.repository }}/actions/runs/${{ github.run_id }}): + + ``` + + ${{ env.COMMENT_BODY }} + + ``` diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 47876a4f02..b053b01c66 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -259,6 +259,8 @@ jobs: image: alpine - jobname: Linux32 image: daald/ubuntu32:xenial + - jobname: pedantic + image: fedora env: jobname: ${{matrix.vector.jobname}} runs-on: ubuntu-latest @@ -271,7 +273,7 @@ jobs: if: failure() - name: Upload failed tests' directories if: failure() && env.FAILED_TEST_ARTIFACTS != '' - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v1 with: name: failed-tests-${{matrix.vector.jobname}} path: ${{env.FAILED_TEST_ARTIFACTS}} diff --git a/Documentation/MyFirstObjectWalk.txt b/Documentation/MyFirstObjectWalk.txt index 2d10eea7a9..45eb84d8b4 100644 --- a/Documentation/MyFirstObjectWalk.txt +++ b/Documentation/MyFirstObjectWalk.txt @@ -691,7 +691,7 @@ help understand. In our case, that means we omit trees and blobs not directly referenced by `HEAD` or `HEAD`'s history, because we begin the walk with only `HEAD` in the `pending` list.) -First, we'll need to `#include "list-objects-filter-options.h`" and set up the +First, we'll need to `#include "list-objects-filter-options.h"` and set up the `struct list_objects_filter_options` at the top of the function. ---- @@ -779,7 +779,7 @@ Count all the objects within and modify the print statement: while ((oid = oidset_iter_next(&oit))) omitted_count++; - printf("commits %d\nblobs %d\ntags %d\ntrees%d\nomitted %d\n", + printf("commits %d\nblobs %d\ntags %d\ntrees %d\nomitted %d\n", commit_count, blob_count, tag_count, tree_count, omitted_count); ---- diff --git a/Documentation/RelNotes/2.34.0.txt b/Documentation/RelNotes/2.34.0.txt new file mode 100644 index 0000000000..a7793987f2 --- /dev/null +++ b/Documentation/RelNotes/2.34.0.txt @@ -0,0 +1,310 @@ +Git 2.34 Release Notes +====================== + +Updates since Git 2.33 +---------------------- + +UI, Workflows & Features + + * Pathname expansion (like "~username/") learned a way to specify a + location relative to Git installation (e.g. its $sharedir which is + $(prefix)/share), with "%(prefix)". + + * Use `ort` instead of `recursive` as the default merge strategy. + + * The userdiff pattern for "java" language has been updated. + + * "git rebase" by default skips changes that are equivalent to + commits that are already in the history the branch is rebased onto; + give messages when this happens to let the users be aware of + skipped commits, and also teach them how to tell "rebase" to keep + duplicated changes. + + * The advice message that "git cherry-pick" gives when it asks + conflicted replay of a commit to be resolved by the end user has + been updated. + + * After "git clone --recurse-submodules", all submodules are cloned + but they are not by default recursed into by other commands. With + submodule.stickyRecursiveClone configuration set, submodule.recurse + configuration is set to true in a repository created by "clone" + with "--recurse-submodules" option. + + * The logic for auto-correction of misspelt subcommands learned to go + interactive when the help.autocorrect configuration variable is set + to 'prompt'. + + * "git maintenance" scheduler learned to use systemd timers as a + possible backend. + + * "git diff --submodule=diff" showed failure from run_command() when + trying to run diff inside a submodule, when the user manually + removes the submodule directory. + + * "git bundle unbundle" learned to show progress display. + + * In cone mode, the sparse-index code path learned to remove ignored + files (like build artifacts) outside the sparse cone, allowing the + entire directory outside the sparse cone to be removed, which is + especially useful when the sparse patterns change. + + * Taking advantage of the CGI interface, http-backend has been + updated to enable protocol v2 automatically when the other side + asks for it. + + * The credential-cache helper has been adjusted to Windows. + + * The error in "git help no-such-git-command" is handled better. + + * The unicode character width table (used for output alignment) has + been updated. + + +Performance, Internal Implementation, Development Support etc. + + * "git bisect" spawned "git show-branch" only to pretty-print the + title of the commit after checking out the next version to be + tested; this has been rewritten in C. + + * "git add" can work better with the sparse index. + + * Support for ancient versions of cURL library (pre 7.19.4) has been + dropped. + + * A handful of tests that assumed implementation details of files + backend for refs have been cleaned up. + + * trace2 logs learned to show parent process name to see in what + context Git was invoked. + + * Loading of ref tips to prepare for common ancestry negotiation in + "git fetch-pack" has been optimized by taking advantage of the + commit graph when available. + + * Remind developers that the userdiff patterns should be kept simple + and permissive, assuming that the contents they apply are always + syntactically correct. + + * The current implementation of GIT_TEST_FAIL_PREREQS is broken in + that checking for the lack of a prerequisite would not work. Avoid + the use of "if ! test_have_prereq X" in a test script. + + * The revision traversal API has been optimized by taking advantage + of the commit-graph, when available, to determine if a commit is + reachable from any of the existing refs. + + * "git fetch --quiet" optimization to avoid useless computation of + info that will never be displayed. + + * Callers from older advice_config[] based API has been updated to + use the newer advice_if_enabled() and advice_enabled() API. + + * Teach "test_pause" and "debug" helpers to allow using the HOME and + TERM environment variables the user usually uses. + + * "make INSTALL_STRIP=-s install" allows the installation step to use + "install -s" to strip the binaries as they get installed. + + * Code that handles large number of refs in the "git fetch" code + path has been optimized. + + * The reachability bitmap file used to be generated only for a single + pack, but now we've learned to generate bitmaps for history that + span across multiple packfiles. + + * The code to make "git grep" recurse into submodules has been + updated to migrate away from the "add submodule's object store as + an alternate object store" mechanism (which is suboptimal). + + * The tracing of process ancestry information has been enhanced. + + * Reduce number of write(2) system calls while sending the + ref advertisement. + + * Update the build procedure to use the "-pedantic" build when + DEVELOPER makefile macro is in effect. + + * Large part of "git submodule add" gets rewritten in C. + + * The run-command API has been updated so that the callers can easily + ask the file descriptors open for packfiles to be closed immediately + before spawning commands that may trigger auto-gc. + + * An oddball OPTION_ARGUMENT feature has been removed from the + parse-options API. + + +Fixes since v2.33 +----------------- + + * Input validation of "git pack-objects --stdin-packs" has been + corrected. + (merge 561fa03529 ab/pack-stdin-packs-fix later to maint). + + * Bugfix for common ancestor negotiation recently introduced in "git + push" code path. + (merge 82823118b9 jt/push-negotiation-fixes later to maint). + + * "git pull" had various corner cases that were not well thought out + around its --rebase backend, e.g. "git pull --ff-only" did not stop + but went ahead and rebased when the history on other side is not a + descendant of our history. The series tries to fix them up. + (merge 6f843a3355 en/pull-conflicting-options later to maint). + + * "git apply" miscounted the bytes and failed to read to the end of + binary hunks. + (merge 46d723ce57 jk/apply-binary-hunk-parsing-fix later to maint). + + * "git range-diff" code clean-up. + (merge c4d5907324 jk/range-diff-fixes later to maint). + + * "git commit --fixup" now works with "--edit" again, after it was + broken in v2.32. + (merge 8ef6aad664 jk/commit-edit-fixup-fix later to maint). + + * Use upload-artifacts v1 (instead of v2) for 32-bit linux, as the + new version has a blocker bug for that architecture. + (merge 3cf9bb36bf cb/ci-use-upload-artifacts-v1 later to maint). + + * Checking out all the paths from HEAD during the last conflicted + step in "git rebase" and continuing would cause the step to be + skipped (which is expected), but leaves MERGE_MSG file behind in + $GIT_DIR and confuses the next "git commit", which has been + corrected. + (merge e5ee33e855 pw/rebase-skip-final-fix later to maint). + + * Various bugs in "git rebase -r" have been fixed. + (merge f2563c9ef3 pw/rebase-r-fixes later to maint). + + * mmap() imitation used to call xmalloc() that dies upon malloc() + failure, which has been corrected to just return an error to the + caller to be handled. + (merge 95b4ff3931 rs/git-mmap-uses-malloc later to maint). + + * "git diff --relative" segfaulted and/or produced incorrect result + when there are unmerged paths. + (merge 8174627b3d dd/diff-files-unmerged-fix later to maint). + + * The delayed checkout code path in "git checkout" etc. were chatty + even when --quiet and/or --no-progress options were given. + (merge 7a132c628e mt/quiet-with-delayed-checkout later to maint). + + * "git branch -D <branch>" used to refuse to remove a broken branch + ref that points at a missing commit, which has been corrected. + (merge 597a977489 rs/branch-allow-deleting-dangling later to maint). + + * Build update for Apple clang. + (merge f32c5d3716 cb/makefile-apple-clang later to maint). + + * The parser for the "--nl" option of "git column" has been + corrected. + (merge c93ca46cf5 sg/column-nl later to maint). + + * "git upload-pack" which runs on the other side of "git fetch" + forgot to take the ref namespaces into account when handling + want-ref requests. + (merge 53a66ec37c ka/want-ref-in-namespace later to maint). + + * The sparse-index support can corrupt the index structure by storing + a stale and/or uninitialized data, which has been corrected. + (merge d9e9b44d7a jh/sparse-index-resize-fix later to maint). + + * Buggy tests could damage repositories outside the throw-away test + area we created. We now by default export GIT_CEILING_DIRECTORIES + to limit the damage from such a stray test. + (merge 614c3d8f2e sg/set-ceiling-during-tests later to maint). + + * Even when running "git send-email" without its own threaded + discussion support, a threading related header in one message is + carried over to the subsequent message to result in an unwanted + threading, which has been corrected. + (merge e082113484 mh/send-email-reset-in-reply-to later to maint). + + * The output from "git fast-export", when its anonymization feature + is in use, showed an annotated tag incorrectly. + (merge 2f040a9671 tk/fast-export-anonymized-tag-fix later to maint). + + * Doc update plus improved error reporting. + (merge 1e93770888 jk/log-warn-on-bogus-encoding later to maint). + + * Recent "diff -m" changes broke "gitk", which has been corrected. + (merge 5acffd3473 so/diff-index-regression-fix later to maint). + + * Regression fix. + (merge b996f84989 ab/send-email-config-fix later to maint). + + * The "git apply -3" code path learned not to bother the lower level + merge machinery when the three-way merge can be trivially resolved + without the content level merge. This fixes a regression caused by + recent "-3way first and fall back to direct application" change. + (merge 57f183b698 jc/trivial-threeway-binary-merge later to maint). + + * The code that optionally creates the *.rev reverse index file has + been optimized to avoid needless computation when it is not writing + the file out. + (merge 8fe8bae9d2 ab/reverse-midx-optim later to maint). + + * "git range-diff -I... <range> <range>" segfaulted, which has been + corrected. + (merge 709b3f32d3 rs/range-diff-avoid-segfault-with-I later to maint). + + * The order in which various files that make up a single (conceptual) + packfile has been reevaluated and straightened up. This matters in + correctness, as an incomplete set of files must not be shown to a + running Git. + (merge 4bc1fd6e39 tb/pack-finalize-ordering later to maint). + + * The "mode" word is useless in a call to open(2) that does not + create a new file. Such a call in the files backend of the ref + subsystem has been cleaned up. + (merge 35cf94eaf6 rs/no-mode-to-open-when-appending later to maint). + + * "git update-ref --stdin" failed to flush its output as needed, + which potentially led the conversation to a deadlock. + (merge 7c1200745b ps/update-ref-batch-flush later to maint). + + * When "git am --abort" fails to abort correctly, it still exited + with exit status of 0, which has been corrected. + (merge c5ead19ea2 en/am-abort-fix later to maint). + + * Correct nr and alloc members of strvec struct to be of type size_t. + (merge 8d133a4653 jk/strvec-typefix later to maint). + + * Other code cleanup, docfix, build fix, etc. + (merge 1d9c8daef8 ab/bundle-doc later to maint). + (merge 81483fe613 en/merge-strategy-docs later to maint). + (merge 626beebdf8 js/log-protocol-version later to maint). + (merge 00e302da76 cb/builtin-merge-format-string-fix later to maint). + (merge ad51ae4dc0 cb/ci-freebsd-update later to maint). + (merge be6444d1ca fc/completion-updates later to maint). + (merge ff7b83f562 ti/tcsh-completion-regression-fix later to maint). + (merge 325b06deda sg/make-fix-ar-invocation later to maint). + (merge bd72824c60 me/t5582-cleanup later to maint). + (merge f6a5af0f62 ga/send-email-sendmail-cmd later to maint). + (merge f58c7468cd ab/ls-remote-packet-trace later to maint). + (merge 0160f7e725 ab/rebase-fatal-fatal-fix later to maint). + (merge a16eb6b1ff js/maintenance-launchctl-fix later to maint). + (merge c21b2511c2 jk/t5323-no-pack-test-fix later to maint). + (merge 5146c2f148 mh/credential-leakfix later to maint). + (merge 1549577338 dd/t6300-wo-gpg-fix later to maint). + (merge 66e905b7dd rs/xopen-reports-open-failures later to maint). + (merge 469888e6a5 es/walken-tutorial-fix later to maint). + (merge 88682b016d ba/object-info later to maint). + (merge b45c172e51 ab/gc-log-rephrase later to maint). + (merge ccdd5d1eb1 ab/mailmap-leakfix later to maint). + (merge 6540b71614 cb/remote-ndebug-fix later to maint). + (merge e4f8d27585 rs/show-branch-simplify later to maint). + (merge e124ecf7f7 rs/archive-use-object-id later to maint). + (merge cebead1ebf cb/ci-build-pedantic later to maint). + (merge ca0cc98e03 bs/doc-bugreport-outdir later to maint). + (merge 72b113e562 ab/no-more-check-bindir later to maint). + (merge 92a5d1c9b4 jc/prefix-filename-allocates later to maint). + (merge d9a65b6c0a rs/setup-use-xopen-and-xdup later to maint). + (merge e8f55568de jk/t5562-racefix later to maint). + (merge 8f0f110156 rs/drop-core-compression-vars later to maint). + (merge b6d8887d3d ma/doc-git-version later to maint). + (merge 66c0c44df6 cb/plug-leaks-in-alloca-emu-users later to maint). + (merge afb32e8101 kz/revindex-comment-fix later to maint). + (merge ae578de926 po/git-config-doc-mentions-help-c later to maint). + (merge 187fc8b8b6 cb/unicode-14 later to maint). + (merge 3584cff71c en/typofixes later to maint). diff --git a/Documentation/config.txt b/Documentation/config.txt index bf82766a6a..0c0e6b859f 100644 --- a/Documentation/config.txt +++ b/Documentation/config.txt @@ -298,6 +298,15 @@ pathname:: tilde expansion happens to such a string: `~/` is expanded to the value of `$HOME`, and `~user/` to the specified user's home directory. ++ +If a path starts with `%(prefix)/`, the remainder is interpreted as a +path relative to Git's "runtime prefix", i.e. relative to the location +where Git itself was installed. For example, `%(prefix)/bin/` refers to +the directory in which the Git executable itself lives. If Git was +compiled without runtime prefix support, the compiled-in prefix will be +subsituted instead. In the unlikely event that a literal path needs to +be specified that should _not_ be expanded, it needs to be prefixed by +`./`, like so: `./%(prefix)/bin`. Variables diff --git a/Documentation/config/advice.txt b/Documentation/config/advice.txt index 8b2849ff7b..063eec2511 100644 --- a/Documentation/config/advice.txt +++ b/Documentation/config/advice.txt @@ -44,6 +44,9 @@ advice.*:: Shown when linkgit:git-push[1] rejects a forced update of a branch when its remote-tracking ref has updates that we do not have locally. + skippedCherryPicks:: + Shown when linkgit:git-rebase[1] skips a commit that has already + been cherry-picked onto the upstream branch. statusAheadBehind:: Shown when linkgit:git-status[1] computes the ahead/behind counts for a local ref compared to its remote tracking ref, diff --git a/Documentation/config/gui.txt b/Documentation/config/gui.txt index d30831a130..0c087fd8c9 100644 --- a/Documentation/config/gui.txt +++ b/Documentation/config/gui.txt @@ -11,7 +11,7 @@ gui.displayUntracked:: in the file list. The default is "true". gui.encoding:: - Specifies the default encoding to use for displaying of + Specifies the default character encoding to use for displaying of file contents in linkgit:git-gui[1] and linkgit:gitk[1]. It can be overridden by setting the 'encoding' attribute for relevant files (see linkgit:gitattributes[5]). diff --git a/Documentation/config/help.txt b/Documentation/config/help.txt index 783a90a0f9..610701f9a3 100644 --- a/Documentation/config/help.txt +++ b/Documentation/config/help.txt @@ -9,13 +9,15 @@ help.format:: help.autoCorrect:: If git detects typos and can identify exactly one valid command similar - to the error, git will automatically run the intended command after - waiting a duration of time defined by this configuration value in - deciseconds (0.1 sec). If this value is 0, the suggested corrections - will be shown, but not executed. If it is a negative integer, or - "immediate", the suggested command - is run immediately. If "never", suggestions are not shown at all. The - default value is zero. + to the error, git will try to suggest the correct command or even + run the suggestion automatically. Possible config values are: + - 0 (default): show the suggested command. + - positive number: run the suggested command after specified +deciseconds (0.1 sec). + - "immediate": run the suggested command immediately. + - "prompt": show the suggestion and prompt for confirmation to run +the command. + - "never": don't run or show any suggested command. help.htmlPath:: Specify the path where the HTML documentation resides. File system paths diff --git a/Documentation/config/transfer.txt b/Documentation/config/transfer.txt index 505126a780..b49429eb4d 100644 --- a/Documentation/config/transfer.txt +++ b/Documentation/config/transfer.txt @@ -52,13 +52,17 @@ If you have multiple hideRefs values, later entries override earlier ones (and entries in more-specific config files override less-specific ones). + If a namespace is in use, the namespace prefix is stripped from each -reference before it is matched against `transfer.hiderefs` patterns. +reference before it is matched against `transfer.hiderefs` patterns. In +order to match refs before stripping, add a `^` in front of the ref name. If +you combine `!` and `^`, `!` must be specified first. ++ For example, if `refs/heads/master` is specified in `transfer.hideRefs` and the current namespace is `foo`, then `refs/namespaces/foo/refs/heads/master` -is omitted from the advertisements but `refs/heads/master` and -`refs/namespaces/bar/refs/heads/master` are still advertised as so-called -"have" lines. In order to match refs before stripping, add a `^` in front of -the ref name. If you combine `!` and `^`, `!` must be specified first. +is omitted from the advertisements. If `uploadpack.allowRefInWant` is set, +`upload-pack` will treat `want-ref refs/heads/master` in a protocol v2 +`fetch` command as if `refs/namespaces/foo/refs/heads/master` did not exist. +`receive-pack`, on the other hand, will still advertise the object id the +ref is pointing to without mentioning its name (a so-called ".have" line). + Even if you hide refs, a client may still be able to steal the target objects via the techniques described in the "SECURITY" section of the diff --git a/Documentation/git-am.txt b/Documentation/git-am.txt index 8714dfcb76..0a4a984dfd 100644 --- a/Documentation/git-am.txt +++ b/Documentation/git-am.txt @@ -178,6 +178,8 @@ default. You can use `--no-utf8` to override this. --abort:: Restore the original branch and abort the patching operation. + Revert contents of files involved in the am operation to their + pre-am state. --quit:: Abort the patching operation but keep HEAD and the index diff --git a/Documentation/git-branch.txt b/Documentation/git-branch.txt index 94dc9a54f2..5449767121 100644 --- a/Documentation/git-branch.txt +++ b/Documentation/git-branch.txt @@ -118,7 +118,8 @@ OPTIONS Reset <branchname> to <startpoint>, even if <branchname> exists already. Without `-f`, 'git branch' refuses to change an existing branch. In combination with `-d` (or `--delete`), allow deleting the - branch irrespective of its merged status. In combination with + branch irrespective of its merged status, or whether it even + points to a valid commit. In combination with `-m` (or `--move`), allow renaming the branch even if the new branch name already exists, the same applies for `-c` (or `--copy`). diff --git a/Documentation/git-bugreport.txt b/Documentation/git-bugreport.txt index 66e88c2e31..d8817bf3ce 100644 --- a/Documentation/git-bugreport.txt +++ b/Documentation/git-bugreport.txt @@ -40,8 +40,8 @@ OPTIONS ------- -o <path>:: --output-directory <path>:: - Place the resulting bug report file in `<path>` instead of the root of - the Git repository. + Place the resulting bug report file in `<path>` instead of the current + directory. -s <format>:: --suffix <format>:: diff --git a/Documentation/git-bundle.txt b/Documentation/git-bundle.txt index 53804cad4b..71b5ecabd1 100644 --- a/Documentation/git-bundle.txt +++ b/Documentation/git-bundle.txt @@ -13,26 +13,53 @@ SYNOPSIS [--version=<version>] <file> <git-rev-list-args> 'git bundle' verify [-q | --quiet] <file> 'git bundle' list-heads <file> [<refname>...] -'git bundle' unbundle <file> [<refname>...] +'git bundle' unbundle [--progress] <file> [<refname>...] DESCRIPTION ----------- -Some workflows require that one or more branches of development on one -machine be replicated on another machine, but the two machines cannot -be directly connected, and therefore the interactive Git protocols (git, -ssh, http) cannot be used. +Create, unpack, and manipulate "bundle" files. Bundles are used for +the "offline" transfer of Git objects without an active "server" +sitting on the other side of the network connection. -The 'git bundle' command packages objects and references in an archive -at the originating machine, which can then be imported into another -repository using 'git fetch', 'git pull', or 'git clone', -after moving the archive by some means (e.g., by sneakernet). +They can be used to create both incremental and full backups of a +repository, and to relay the state of the references in one repository +to another. -As no -direct connection between the repositories exists, the user must specify a -basis for the bundle that is held by the destination repository: the -bundle assumes that all objects in the basis are already in the -destination repository. +Git commands that fetch or otherwise "read" via protocols such as +`ssh://` and `https://` can also operate on bundle files. It is +possible linkgit:git-clone[1] a new repository from a bundle, to use +linkgit:git-fetch[1] to fetch from one, and to list the references +contained within it with linkgit:git-ls-remote[1]. There's no +corresponding "write" support, i.e.a 'git push' into a bundle is not +supported. + +See the "EXAMPLES" section below for examples of how to use bundles. + +BUNDLE FORMAT +------------- + +Bundles are `.pack` files (see linkgit:git-pack-objects[1]) with a +header indicating what references are contained within the bundle. + +Like the the packed archive format itself bundles can either be +self-contained, or be created using exclusions. +See the "OBJECT PREREQUISITES" section below. + +Bundles created using revision exclusions are "thin packs" created +using the `--thin` option to linkgit:git-pack-objects[1], and +unbundled using the `--fix-thin` option to linkgit:git-index-pack[1]. + +There is no option to create a "thick pack" when using revision +exclusions, users should not be concerned about the difference. By +using "thin packs" bundles created using exclusions are smaller in +size. That they're "thin" under the hood is merely noted here as a +curiosity, and as a reference to other documentation + +See link:technical/bundle-format.html[the `bundle-format` +documentation] for more details and the discussion of "thin pack" in +link:technical/pack-format.html[the pack format documentation] for +further details. OPTIONS ------- @@ -117,28 +144,88 @@ unbundle <file>:: SPECIFYING REFERENCES --------------------- -'git bundle' will only package references that are shown by -'git show-ref': this includes heads, tags, and remote heads. References -such as `master~1` cannot be packaged, but are perfectly suitable for -defining the basis. More than one reference may be packaged, and more -than one basis can be specified. The objects packaged are those not -contained in the union of the given bases. Each basis can be -specified explicitly (e.g. `^master~10`), or implicitly (e.g. -`master~10..master`, `--since=10.days.ago master`). +Revisions must accompanied by reference names to be packaged in a +bundle. + +More than one reference may be packaged, and more than one set of prerequisite objects can +be specified. The objects packaged are those not contained in the +union of the prerequisites. + +The 'git bundle create' command resolves the reference names for you +using the same rules as `git rev-parse --abbrev-ref=loose`. Each +prerequisite can be specified explicitly (e.g. `^master~10`), or implicitly +(e.g. `master~10..master`, `--since=10.days.ago master`). + +All of these simple cases are OK (assuming we have a "master" and +"next" branch): + +---------------- +$ git bundle create master.bundle master +$ echo master | git bundle create master.bundle --stdin +$ git bundle create master-and-next.bundle master next +$ (echo master; echo next) | git bundle create master-and-next.bundle --stdin +---------------- + +And so are these (and the same but omitted `--stdin` examples): + +---------------- +$ git bundle create recent-master.bundle master~10..master +$ git bundle create recent-updates.bundle master~10..master next~5..next +---------------- + +A revision name or a range whose right-hand-side cannot be resolved to +a reference is not accepted: + +---------------- +$ git bundle create HEAD.bundle $(git rev-parse HEAD) +fatal: Refusing to create empty bundle. +$ git bundle create master-yesterday.bundle master~10..master~5 +fatal: Refusing to create empty bundle. +---------------- + +OBJECT PREREQUISITES +-------------------- + +When creating bundles it is possible to create a self-contained bundle +that can be unbundled in a repository with no common history, as well +as providing negative revisions to exclude objects needed in the +earlier parts of the history. + +Feeding a revision such as `new` to `git bundle create` will create a +bundle file that contains all the objects reachable from the revision +`new`. That bundle can be unbundled in any repository to obtain a full +history that leads to the revision `new`: + +---------------- +$ git bundle create full.bundle new +---------------- + +A revision range such as `old..new` will produce a bundle file that +will require the revision `old` (and any objects reachable from it) +to exist for the bundle to be "unbundle"-able: + +---------------- +$ git bundle create full.bundle old..new +---------------- + +A self-contained bundle without any prerequisites can be extracted +into anywhere, even into an empty repository, or be cloned from +(i.e., `new`, but not `old..new`). -It is very important that the basis used be held by the destination. It is okay to err on the side of caution, causing the bundle file to contain objects already in the destination, as these are ignored when unpacking at the destination. -`git clone` can use any bundle created without negative refspecs -(e.g., `new`, but not `old..new`). If you want to match `git clone --mirror`, which would include your refs such as `refs/remotes/*`, use `--all`. If you want to provide the same set of refs that a clone directly from the source repository would get, use `--branches --tags` for the `<git-rev-list-args>`. +The 'git bundle verify' command can be used to check whether your +recipient repository has the required prerequisite commits for a +bundle. + EXAMPLES -------- @@ -149,7 +236,7 @@ but we can move data from A to B via some mechanism (CD, email, etc.). We want to update R2 with development made on the branch master in R1. To bootstrap the process, you can first create a bundle that does not have -any basis. You can use a tag to remember up to what commit you last +any prerequisites. You can use a tag to remember up to what commit you last processed, in order to make it easy to later update the other repository with an incremental bundle: @@ -200,7 +287,7 @@ machineB$ git pull If you know up to what commit the intended recipient repository should have the necessary objects, you can use that knowledge to specify the -basis, giving a cut-off point to limit the revisions and objects that go +prerequisites, giving a cut-off point to limit the revisions and objects that go in the resulting bundle. The previous example used the lastR2bundle tag for this purpose, but you can use any other options that you would give to the linkgit:git-log[1] command. Here are more examples: @@ -211,7 +298,7 @@ You can use a tag that is present in both: $ git bundle create mybundle v1.0.0..master ---------------- -You can use a basis based on time: +You can use a prerequisite based on time: ---------------- $ git bundle create mybundle --since=10.days master @@ -224,7 +311,7 @@ $ git bundle create mybundle -10 master ---------------- You can run `git-bundle verify` to see if you can extract from a bundle -that was created with a basis: +that was created with a prerequisite: ---------------- $ git bundle verify mybundle diff --git a/Documentation/git-column.txt b/Documentation/git-column.txt index f58e9c43e6..6cea9ab463 100644 --- a/Documentation/git-column.txt +++ b/Documentation/git-column.txt @@ -39,7 +39,7 @@ OPTIONS --indent=<string>:: String to be printed at the beginning of each line. ---nl=<N>:: +--nl=<string>:: String to be printed at the end of each line, including newline character. diff --git a/Documentation/git-config.txt b/Documentation/git-config.txt index 2dc4bae6da..992225f612 100644 --- a/Documentation/git-config.txt +++ b/Documentation/git-config.txt @@ -71,6 +71,9 @@ codes are: On success, the command returns the exit code 0. +A list of all available configuration variables can be obtained using the +`git help --config` command. + [[OPTIONS]] OPTIONS ------- diff --git a/Documentation/git-cvsserver.txt b/Documentation/git-cvsserver.txt index f2e4a47ebe..4dc57ed254 100644 --- a/Documentation/git-cvsserver.txt +++ b/Documentation/git-cvsserver.txt @@ -99,7 +99,7 @@ looks like ------ -Only anonymous access is provided by pserve by default. To commit you +Only anonymous access is provided by pserver by default. To commit you will have to create pserver accounts, simply add a gitcvs.authdb setting in the config file of the repositories you want the cvsserver to allow writes to, for example: @@ -114,21 +114,20 @@ The format of these files is username followed by the encrypted password, for example: ------ - myuser:$1Oyx5r9mdGZ2 - myuser:$1$BA)@$vbnMJMDym7tA32AamXrm./ + myuser:sqkNi8zPf01HI + myuser:$1$9K7FzU28$VfF6EoPYCJEYcVQwATgOP/ + myuser:$5$.NqmNH1vwfzGpV8B$znZIcumu1tNLATgV2l6e1/mY8RzhUDHMOaVOeL1cxV3 ------ You can use the 'htpasswd' facility that comes with Apache to make these -files, but Apache's MD5 crypt method differs from the one used by most C -library's crypt() function, so don't use the -m option. +files, but only with the -d option (or -B if your system suports it). -Alternatively you can produce the password with perl's crypt() operator: ------ - perl -e 'my ($user, $pass) = @ARGV; printf "%s:%s\n", $user, crypt($user, $pass)' $USER password ------ +Preferably use the system specific utility that manages password hash +creation in your platform (e.g. mkpasswd in Linux, encrypt in OpenBSD or +pwhash in NetBSD) and paste it in the right location. Then provide your password via the pserver method, for example: ------ - cvs -d:pserver:someuser:somepassword <at> server/path/repo.git co <HEAD_name> + cvs -d:pserver:someuser:somepassword@server:/path/repo.git co <HEAD_name> ------ No special setup is needed for SSH access, other than having Git tools in the PATH. If you have clients that do not accept the CVS_SERVER @@ -138,7 +137,7 @@ Note: Newer CVS versions (>= 1.12.11) also support specifying CVS_SERVER directly in CVSROOT like ------ -cvs -d ":ext;CVS_SERVER=git cvsserver:user@server/path/repo.git" co <HEAD_name> + cvs -d ":ext;CVS_SERVER=git cvsserver:user@server/path/repo.git" co <HEAD_name> ------ This has the advantage that it will be saved in your 'CVS/Root' files and you don't need to worry about always setting the correct environment @@ -186,8 +185,8 @@ allowing access over SSH. + -- ------ - export CVSROOT=:ext:user@server:/var/git/project.git - export CVS_SERVER="git cvsserver" + export CVSROOT=:ext:user@server:/var/git/project.git + export CVS_SERVER="git cvsserver" ------ -- 4. For SSH clients that will make commits, make sure their server-side @@ -203,7 +202,7 @@ allowing access over SSH. `project-master` directory: + ------ - cvs co -d project-master master + cvs co -d project-master master ------ [[dbbackend]] diff --git a/Documentation/git-for-each-ref.txt b/Documentation/git-for-each-ref.txt index 2ae2478de7..6da899c629 100644 --- a/Documentation/git-for-each-ref.txt +++ b/Documentation/git-for-each-ref.txt @@ -235,6 +235,15 @@ and `date` to extract the named component. For email fields (`authoremail`, without angle brackets, and `:localpart` to get the part before the `@` symbol out of the trimmed email. +The raw data in an object is `raw`. + +raw:size:: + The raw data size of the object. + +Note that `--format=%(raw)` can not be used with `--python`, `--shell`, `--tcl`, +because such language may not support arbitrary binary data in their string +variable type. + The message in a commit or a tag object is `contents`, from which `contents:<part>` can be used to extract various parts out of: diff --git a/Documentation/git-http-backend.txt b/Documentation/git-http-backend.txt index 558966aa83..0c5c0dde19 100644 --- a/Documentation/git-http-backend.txt +++ b/Documentation/git-http-backend.txt @@ -16,7 +16,9 @@ A simple CGI program to serve the contents of a Git repository to Git clients accessing the repository over http:// and https:// protocols. The program supports clients fetching using both the smart HTTP protocol and the backwards-compatible dumb HTTP protocol, as well as clients -pushing using the smart HTTP protocol. +pushing using the smart HTTP protocol. It also supports Git's +more-efficient "v2" protocol if properly configured; see the +discussion of `GIT_PROTOCOL` in the ENVIRONMENT section below. It verifies that the directory has the magic file "git-daemon-export-ok", and it will refuse to export any Git directory @@ -77,6 +79,18 @@ Apache 2.x:: SetEnv GIT_PROJECT_ROOT /var/www/git SetEnv GIT_HTTP_EXPORT_ALL ScriptAlias /git/ /usr/libexec/git-core/git-http-backend/ + +# This is not strictly necessary using Apache and a modern version of +# git-http-backend, as the webserver will pass along the header in the +# environment as HTTP_GIT_PROTOCOL, and http-backend will copy that into +# GIT_PROTOCOL. But you may need this line (or something similar if you +# are using a different webserver), or if you want to support older Git +# versions that did not do that copying. +# +# Having the webserver set up GIT_PROTOCOL is perfectly fine even with +# modern versions (and will take precedence over HTTP_GIT_PROTOCOL, +# which means it can be used to override the client's request). +SetEnvIf Git-Protocol ".*" GIT_PROTOCOL=$0 ---------------------------------------------------------------- + To enable anonymous read access but authenticated write access, @@ -264,6 +278,16 @@ a repository with an extremely large number of refs. The value can be specified with a unit (e.g., `100M` for 100 megabytes). The default is 10 megabytes. +Clients may probe for optional protocol capabilities (like the v2 +protocol) using the `Git-Protocol` HTTP header. In order to support +these, the contents of that header must appear in the `GIT_PROTOCOL` +environment variable. Most webservers will pass this header to the CGI +via the `HTTP_GIT_PROTOCOL` variable, and `git-http-backend` will +automatically copy that to `GIT_PROTOCOL`. However, some webservers may +be more selective about which headers they'll pass, in which case they +need to be configured explicitly (see the mention of `Git-Protocol` in +the Apache config from the earlier EXAMPLES section). + The backend process sets GIT_COMMITTER_NAME to '$REMOTE_USER' and GIT_COMMITTER_EMAIL to '$\{REMOTE_USER}@http.$\{REMOTE_ADDR\}', ensuring that any reflogs created by 'git-receive-pack' contain some diff --git a/Documentation/git-index-pack.txt b/Documentation/git-index-pack.txt index 7fa74b9e79..1f1e359225 100644 --- a/Documentation/git-index-pack.txt +++ b/Documentation/git-index-pack.txt @@ -82,6 +82,12 @@ OPTIONS --strict:: Die, if the pack contains broken objects or links. +--progress-title:: + For internal use only. ++ +Set the title of the progress bar. The title is "Receiving objects" by +default and "Indexing objects" when `--stdin` is specified. + --check-self-contained-and-connected:: Die if the pack contains broken links. For internal use only. diff --git a/Documentation/git-maintenance.txt b/Documentation/git-maintenance.txt index 1e738ad398..e2cfb68ab5 100644 --- a/Documentation/git-maintenance.txt +++ b/Documentation/git-maintenance.txt @@ -179,6 +179,17 @@ OPTIONS `maintenance.<task>.enabled` configured as `true` are considered. See the 'TASKS' section for the list of accepted `<task>` values. +--scheduler=auto|crontab|systemd-timer|launchctl|schtasks:: + When combined with the `start` subcommand, specify the scheduler + for running the hourly, daily and weekly executions of + `git maintenance run`. + Possible values for `<scheduler>` are `auto`, `crontab` + (POSIX), `systemd-timer` (Linux), `launchctl` (macOS), and + `schtasks` (Windows). When `auto` is specified, the + appropriate platform-specific scheduler is used; on Linux, + `systemd-timer` is used if available, otherwise + `crontab`. Default is `auto`. + TROUBLESHOOTING --------------- @@ -277,6 +288,52 @@ schedule to ensure you are executing the correct binaries in your schedule. +BACKGROUND MAINTENANCE ON LINUX SYSTEMD SYSTEMS +----------------------------------------------- + +While Linux supports `cron`, depending on the distribution, `cron` may +be an optional package not necessarily installed. On modern Linux +distributions, systemd timers are superseding it. + +If user systemd timers are available, they will be used as a replacement +of `cron`. + +In this case, `git maintenance start` will create user systemd timer units +and start the timers. The current list of user-scheduled tasks can be found +by running `systemctl --user list-timers`. The timers written by `git +maintenance start` are similar to this: + +----------------------------------------------------------------------- +$ systemctl --user list-timers +NEXT LEFT LAST PASSED UNIT ACTIVATES +Thu 2021-04-29 19:00:00 CEST 42min left Thu 2021-04-29 18:00:11 CEST 17min ago git-maintenance@hourly.timer git-maintenance@hourly.service +Fri 2021-04-30 00:00:00 CEST 5h 42min left Thu 2021-04-29 00:00:11 CEST 18h ago git-maintenance@daily.timer git-maintenance@daily.service +Mon 2021-05-03 00:00:00 CEST 3 days left Mon 2021-04-26 00:00:11 CEST 3 days ago git-maintenance@weekly.timer git-maintenance@weekly.service +----------------------------------------------------------------------- + +One timer is registered for each `--schedule=<frequency>` option. + +The definition of the systemd units can be inspected in the following files: + +----------------------------------------------------------------------- +~/.config/systemd/user/git-maintenance@.timer +~/.config/systemd/user/git-maintenance@.service +~/.config/systemd/user/timers.target.wants/git-maintenance@hourly.timer +~/.config/systemd/user/timers.target.wants/git-maintenance@daily.timer +~/.config/systemd/user/timers.target.wants/git-maintenance@weekly.timer +----------------------------------------------------------------------- + +`git maintenance start` will overwrite these files and start the timer +again with `systemctl --user`, so any customization should be done by +creating a drop-in file, i.e. a `.conf` suffixed file in the +`~/.config/systemd/user/git-maintenance@.service.d` directory. + +`git maintenance stop` will stop the user systemd timers and delete +the above mentioned files. + +For more details, see `systemd.timer(5)`. + + BACKGROUND MAINTENANCE ON MACOS SYSTEMS --------------------------------------- diff --git a/Documentation/git-merge.txt b/Documentation/git-merge.txt index 3819fadac1..e4f3352eb5 100644 --- a/Documentation/git-merge.txt +++ b/Documentation/git-merge.txt @@ -61,6 +61,8 @@ merge has resulted in conflicts. OPTIONS ------- +:git-merge: 1 + include::merge-options.txt[] -m <msg>:: diff --git a/Documentation/git-multi-pack-index.txt b/Documentation/git-multi-pack-index.txt index ffd601bc17..a9df3dbd32 100644 --- a/Documentation/git-multi-pack-index.txt +++ b/Documentation/git-multi-pack-index.txt @@ -10,7 +10,7 @@ SYNOPSIS -------- [verse] 'git multi-pack-index' [--object-dir=<dir>] [--[no-]progress] - [--preferred-pack=<pack>] <subcommand> + [--preferred-pack=<pack>] [--[no-]bitmap] <subcommand> DESCRIPTION ----------- @@ -23,6 +23,8 @@ OPTIONS Use given directory for the location of Git objects. We check `<dir>/packs/multi-pack-index` for the current MIDX file, and `<dir>/packs` for the pack-files to index. ++ +`<dir>` must be an alternate of the current repository. --[no-]progress:: Turn progress on/off explicitly. If neither is specified, progress is @@ -37,9 +39,12 @@ write:: -- --preferred-pack=<pack>:: Optionally specify the tie-breaking pack used when - multiple packs contain the same object. If not given, - ties are broken in favor of the pack with the lowest - mtime. + multiple packs contain the same object. `<pack>` must + contain at least one object. If not given, ties are + broken in favor of the pack with the lowest mtime. + + --[no-]bitmap:: + Control whether or not a multi-pack bitmap is written. -- verify:: @@ -81,6 +86,13 @@ EXAMPLES $ git multi-pack-index write ----------------------------------------------- +* Write a MIDX file for the packfiles in the current .git folder with a +corresponding bitmap. ++ +------------------------------------------------------------- +$ git multi-pack-index write --preferred-pack=<pack> --bitmap +------------------------------------------------------------- + * Write a MIDX file for the packfiles in an alternate object store. + ----------------------------------------------- diff --git a/Documentation/git-pull.txt b/Documentation/git-pull.txt index 7f4b2d1982..aef757ec89 100644 --- a/Documentation/git-pull.txt +++ b/Documentation/git-pull.txt @@ -15,14 +15,17 @@ SYNOPSIS DESCRIPTION ----------- -Incorporates changes from a remote repository into the current -branch. In its default mode, `git pull` is shorthand for -`git fetch` followed by `git merge FETCH_HEAD`. - -More precisely, 'git pull' runs 'git fetch' with the given -parameters and calls 'git merge' to merge the retrieved branch -heads into the current branch. -With `--rebase`, it runs 'git rebase' instead of 'git merge'. +Incorporates changes from a remote repository into the current branch. +If the current branch is behind the remote, then by default it will +fast-forward the current branch to match the remote. If the current +branch and the remote have diverged, the user needs to specify how to +reconcile the divergent branches with `--rebase` or `--no-rebase` (or +the corresponding configuration option in `pull.rebase`). + +More precisely, `git pull` runs `git fetch` with the given parameters +and then depending on configuration options or command line flags, +will call either `git rebase` or `git merge` to reconcile diverging +branches. <repository> should be the name of a remote repository as passed to linkgit:git-fetch[1]. <refspec> can name an @@ -132,7 +135,7 @@ published that history already. Do *not* use this option unless you have read linkgit:git-rebase[1] carefully. --no-rebase:: - Override earlier --rebase. + This is shorthand for --rebase=false. Options related to fetching ~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/Documentation/git-rebase.txt b/Documentation/git-rebase.txt index 55af6fd24e..506345cb0e 100644 --- a/Documentation/git-rebase.txt +++ b/Documentation/git-rebase.txt @@ -79,9 +79,10 @@ remain the checked-out branch. If the upstream branch already contains a change you have made (e.g., because you mailed a patch which was applied upstream), then that commit -will be skipped. For example, running `git rebase master` on the -following history (in which `A'` and `A` introduce the same set of changes, -but have different committer information): +will be skipped and warnings will be issued (if the `merge` backend is +used). For example, running `git rebase master` on the following +history (in which `A'` and `A` introduce the same set of changes, but +have different committer information): ------------ A---B---C topic @@ -312,7 +313,10 @@ See also INCOMPATIBLE OPTIONS below. By default (or if `--no-reapply-cherry-picks` is given), these commits will be automatically dropped. Because this necessitates reading all upstream commits, this can be expensive in repos with a large number -of upstream commits that need to be read. +of upstream commits that need to be read. When using the `merge` +backend, warnings will be issued for each dropped commit (unless +`--quiet` is given). Advice will also be issued unless +`advice.skippedCherryPicks` is set to false (see linkgit:git-config[1]). + `--reapply-cherry-picks` allows rebase to forgo reading all upstream commits, potentially improving performance. @@ -340,9 +344,7 @@ See also INCOMPATIBLE OPTIONS below. -m:: --merge:: - Use merging strategies to rebase. When the recursive (default) merge - strategy is used, this allows rebase to be aware of renames on the - upstream side. This is the default. + Using merging strategies to rebase (default). + Note that a rebase merge works by replaying each commit from the working branch on top of the <upstream> branch. Because of this, when a merge @@ -354,9 +356,8 @@ See also INCOMPATIBLE OPTIONS below. -s <strategy>:: --strategy=<strategy>:: - Use the given merge strategy. - If there is no `-s` option 'git merge-recursive' is used - instead. This implies --merge. + Use the given merge strategy, instead of the default `ort`. + This implies `--merge`. + Because 'git rebase' replays each commit from the working branch on top of the <upstream> branch using the given strategy, using @@ -369,7 +370,7 @@ See also INCOMPATIBLE OPTIONS below. --strategy-option=<strategy-option>:: Pass the <strategy-option> through to the merge strategy. This implies `--merge` and, if no strategy has been - specified, `-s recursive`. Note the reversal of 'ours' and + specified, `-s ort`. Note the reversal of 'ours' and 'theirs' as noted above for the `-m` option. + See also INCOMPATIBLE OPTIONS below. @@ -530,7 +531,7 @@ The `--rebase-merges` mode is similar in spirit to the deprecated where commits can be reordered, inserted and dropped at will. + It is currently only possible to recreate the merge commits using the -`recursive` merge strategy; Different merge strategies can be used only via +`ort` merge strategy; different merge strategies can be used only via explicit `exec git merge -s <strategy> [...]` commands. + See also REBASING MERGES and INCOMPATIBLE OPTIONS below. @@ -1219,12 +1220,16 @@ successful merge so that the user can edit the message. If a `merge` command fails for any reason other than merge conflicts (i.e. when the merge operation did not even start), it is rescheduled immediately. -At this time, the `merge` command will *always* use the `recursive` -merge strategy for regular merges, and `octopus` for octopus merges, -with no way to choose a different one. To work around -this, an `exec` command can be used to call `git merge` explicitly, -using the fact that the labels are worktree-local refs (the ref -`refs/rewritten/onto` would correspond to the label `onto`, for example). +By default, the `merge` command will use the `ort` merge strategy for +regular merges, and `octopus` for octopus merges. One can specify a +default strategy for all merges using the `--strategy` argument when +invoking rebase, or can override specific merges in the interactive +list of commands by using an `exec` command to call `git merge` +explicitly with a `--strategy` argument. Note that when calling `git +merge` explicitly like this, you can make use of the fact that the +labels are worktree-local refs (the ref `refs/rewritten/onto` would +correspond to the label `onto`, for example) in order to refer to the +branches you want to merge. Note: the first command (`label onto`) labels the revision onto which the commits are rebased; The name `onto` is just a convention, as a nod diff --git a/Documentation/git-receive-pack.txt b/Documentation/git-receive-pack.txt index 25702ed730..014a78409b 100644 --- a/Documentation/git-receive-pack.txt +++ b/Documentation/git-receive-pack.txt @@ -41,6 +41,11 @@ OPTIONS <directory>:: The repository to sync into. +--http-backend-info-refs:: + Used by linkgit:git-http-backend[1] to serve up + `$GIT_URL/info/refs?service=git-receive-pack` requests. See + `--http-backend-info-refs` in linkgit:git-upload-pack[1]. + PRE-RECEIVE HOOK ---------------- Before any ref is updated, if $GIT_DIR/hooks/pre-receive file exists diff --git a/Documentation/git-sparse-checkout.txt b/Documentation/git-sparse-checkout.txt index fdcf43f87c..42056ee9ff 100644 --- a/Documentation/git-sparse-checkout.txt +++ b/Documentation/git-sparse-checkout.txt @@ -210,6 +210,16 @@ case-insensitive check. This corrects for case mismatched filenames in the 'git sparse-checkout set' command to reflect the expected cone in the working directory. +When changing the sparse-checkout patterns in cone mode, Git will inspect each +tracked directory that is not within the sparse-checkout cone to see if it +contains any untracked files. If all of those files are ignored due to the +`.gitignore` patterns, then the directory will be deleted. If any of the +untracked files within that directory is not ignored, then no deletions will +occur within that directory and a warning message will appear. If these files +are important, then reset your sparse-checkout definition so they are included, +use `git add` and `git commit` to store them, then remove any remaining files +manually to ensure Git can behave optimally. + SUBMODULES ---------- diff --git a/Documentation/git-upload-pack.txt b/Documentation/git-upload-pack.txt index 9822c1eb1a..8f87b23ea8 100644 --- a/Documentation/git-upload-pack.txt +++ b/Documentation/git-upload-pack.txt @@ -36,14 +36,26 @@ OPTIONS This fits with the HTTP POST request processing model where a program may read the request, write a response, and must exit. ---advertise-refs:: - Only the initial ref advertisement is output, and the program exits - immediately. This fits with the HTTP GET request model, where - no request content is received but a response must be produced. +--http-backend-info-refs:: + Used by linkgit:git-http-backend[1] to serve up + `$GIT_URL/info/refs?service=git-upload-pack` requests. See + "Smart Clients" in link:technical/http-protocol.html[the HTTP + transfer protocols] documentation and "HTTP Transport" in + link:technical/protocol-v2.html[the Git Wire Protocol, Version + 2] documentation. Also understood by + linkgit:git-receive-pack[1]. <directory>:: The repository to sync from. +ENVIRONMENT +----------- + +`GIT_PROTOCOL`:: + Internal variable used for handshaking the wire protocol. Server + admins may need to configure some transports to allow this + variable to be passed. See the discussion in linkgit:git[1]. + SEE ALSO -------- linkgit:gitnamespaces[7] diff --git a/Documentation/git-version.txt b/Documentation/git-version.txt new file mode 100644 index 0000000000..80fa7754a6 --- /dev/null +++ b/Documentation/git-version.txt @@ -0,0 +1,28 @@ +git-version(1) +============== + +NAME +---- +git-version - Display version information about Git + +SYNOPSIS +-------- +[verse] +'git version' [--build-options] + +DESCRIPTION +----------- +With no options given, the version of 'git' is printed on the standard output. + +Note that `git --version` is identical to `git version` because the +former is internally converted into the latter. + +OPTIONS +------- +--build-options:: + Include additional information about how git was built for diagnostic + purposes. + +GIT +--- +Part of the linkgit:git[1] suite diff --git a/Documentation/git.txt b/Documentation/git.txt index 6dd241ef83..abace9eac2 100644 --- a/Documentation/git.txt +++ b/Documentation/git.txt @@ -41,6 +41,10 @@ OPTIONS ------- --version:: Prints the Git suite version that the 'git' program came from. ++ +This option is internaly converted to `git version ...` and accepts +the same options as the linkgit:git-version[1] command. If `--help` is +also given, it takes precedence over `--version`. --help:: Prints the synopsis and a list of the most commonly used @@ -894,6 +898,21 @@ for full details. Contains a colon ':' separated list of keys with optional values 'key[=value]'. Presence of unknown keys and values must be ignored. ++ +Note that servers may need to be configured to allow this variable to +pass over some transports. It will be propagated automatically when +accessing local repositories (i.e., `file://` or a filesystem path), as +well as over the `git://` protocol. For git-over-http, it should work +automatically in most configurations, but see the discussion in +linkgit:git-http-backend[1]. For git-over-ssh, the ssh server may need +to be configured to allow clients to pass this variable (e.g., by using +`AcceptEnv GIT_PROTOCOL` with OpenSSH). ++ +This configuration is optional. If the variable is not propagated, then +clients will fall back to the original "v0" protocol (but may miss out +on some performance improvements or features). This variable currently +only affects clones and fetches; it is not yet used for pushes (but may +be in the future). `GIT_OPTIONAL_LOCKS`:: If set to `0`, Git will complete any requested operation without diff --git a/Documentation/gitfaq.txt b/Documentation/gitfaq.txt index afdaeab850..8c1f2d5675 100644 --- a/Documentation/gitfaq.txt +++ b/Documentation/gitfaq.txt @@ -275,7 +275,7 @@ best to always use a regular merge commit. [[merge-two-revert-one]] If I make a change on two branches but revert it on one, why does the merge of those branches include the change?:: - By default, when Git does a merge, it uses a strategy called the recursive + By default, when Git does a merge, it uses a strategy called the `ort` strategy, which does a fancy three-way merge. In such a case, when Git performs the merge, it considers exactly three points: the two heads and a third point, called the _merge base_, which is usually the common ancestor of diff --git a/Documentation/merge-options.txt b/Documentation/merge-options.txt index 52565014c1..61ec157c2f 100644 --- a/Documentation/merge-options.txt +++ b/Documentation/merge-options.txt @@ -2,6 +2,9 @@ --no-commit:: Perform the merge and commit the result. This option can be used to override --no-commit. +ifdef::git-pull[] + Only useful when merging. +endif::git-pull[] + With --no-commit perform the merge and stop just before creating a merge commit, to give the user a chance to inspect and further @@ -39,6 +42,7 @@ set to `no` at the beginning of them. to `MERGE_MSG` before being passed on to the commit machinery in the case of a merge conflict. +ifdef::git-merge[] --ff:: --no-ff:: --ff-only:: @@ -47,6 +51,22 @@ set to `no` at the beginning of them. default unless merging an annotated (and possibly signed) tag that is not stored in its natural place in the `refs/tags/` hierarchy, in which case `--no-ff` is assumed. +endif::git-merge[] +ifdef::git-pull[] +--ff-only:: + Only update to the new history if there is no divergent local + history. This is the default when no method for reconciling + divergent histories is provided (via the --rebase=* flags). + +--ff:: +--no-ff:: + When merging rather than rebasing, specifies how a merge is + handled when the merged-in history is already a descendant of + the current history. If merging is requested, `--ff` is the + default unless merging an annotated (and possibly signed) tag + that is not stored in its natural place in the `refs/tags/` + hierarchy, in which case `--no-ff` is assumed. +endif::git-pull[] + With `--ff`, when possible resolve the merge as a fast-forward (only update the branch pointer to match the merged branch; do not create a @@ -55,9 +75,11 @@ descendant of the current history), create a merge commit. + With `--no-ff`, create a merge commit in all cases, even when the merge could instead be resolved as a fast-forward. +ifdef::git-merge[] + With `--ff-only`, resolve the merge as a fast-forward when possible. When not possible, refuse to merge and exit with a non-zero status. +endif::git-merge[] -S[<keyid>]:: --gpg-sign[=<keyid>]:: @@ -73,6 +95,9 @@ When not possible, refuse to merge and exit with a non-zero status. In addition to branch names, populate the log message with one-line descriptions from at most <n> actual commits that are being merged. See also linkgit:git-fmt-merge-msg[1]. +ifdef::git-pull[] + Only useful when merging. +endif::git-pull[] + With --no-log do not list one-line descriptions from the actual commits being merged. @@ -102,18 +127,25 @@ With --no-squash perform the merge and commit the result. This option can be used to override --squash. + With --squash, --commit is not allowed, and will fail. +ifdef::git-pull[] ++ +Only useful when merging. +endif::git-pull[] --no-verify:: This option bypasses the pre-merge and commit-msg hooks. See also linkgit:githooks[5]. +ifdef::git-pull[] + Only useful when merging. +endif::git-pull[] -s <strategy>:: --strategy=<strategy>:: Use the given merge strategy; can be supplied more than once to specify them in the order they should be tried. If there is no `-s` option, a built-in list of strategies - is used instead ('git merge-recursive' when merging a single - head, 'git merge-octopus' otherwise). + is used instead (`ort` when merging a single head, + `octopus` otherwise). -X <option>:: --strategy-option=<option>:: @@ -127,6 +159,10 @@ With --squash, --commit is not allowed, and will fail. default trust model, this means the signing key has been signed by a trusted key. If the tip commit of the side branch is not signed with a valid key, the merge is aborted. +ifdef::git-pull[] ++ +Only useful when merging. +endif::git-pull[] --summary:: --no-summary:: @@ -167,3 +203,7 @@ endif::git-pull[] projects that started their lives independently. As that is a very rare occasion, no configuration variable to enable this by default exists and will not be added. +ifdef::git-pull[] ++ +Only useful when merging. +endif::git-pull[] diff --git a/Documentation/merge-strategies.txt b/Documentation/merge-strategies.txt index 2912de706b..5fc54ec060 100644 --- a/Documentation/merge-strategies.txt +++ b/Documentation/merge-strategies.txt @@ -6,28 +6,23 @@ backend 'merge strategies' to be chosen with `-s` option. Some strategies can also take their own options, which can be passed by giving `-X<option>` arguments to `git merge` and/or `git pull`. -resolve:: - This can only resolve two heads (i.e. the current branch - and another branch you pulled from) using a 3-way merge - algorithm. It tries to carefully detect criss-cross - merge ambiguities and is considered generally safe and - fast. - -recursive:: - This can only resolve two heads using a 3-way merge - algorithm. When there is more than one common - ancestor that can be used for 3-way merge, it creates a - merged tree of the common ancestors and uses that as - the reference tree for the 3-way merge. This has been - reported to result in fewer merge conflicts without - causing mismerges by tests done on actual merge commits - taken from Linux 2.6 kernel development history. - Additionally this can detect and handle merges involving - renames, but currently cannot make use of detected - copies. This is the default merge strategy when pulling - or merging one branch. +ort:: + This is the default merge strategy when pulling or merging one + branch. This strategy can only resolve two heads using a + 3-way merge algorithm. When there is more than one common + ancestor that can be used for 3-way merge, it creates a merged + tree of the common ancestors and uses that as the reference + tree for the 3-way merge. This has been reported to result in + fewer merge conflicts without causing mismerges by tests done + on actual merge commits taken from Linux 2.6 kernel + development history. Additionally this strategy can detect + and handle merges involving renames. It does not make use of + detected copies. The name for this algorithm is an acronym + ("Ostensibly Recursive's Twin") and came from the fact that it + was written as a replacement for the previous default + algorithm, `recursive`. + -The 'recursive' strategy can take the following options: +The 'ort' strategy can take the following options: ours;; This option forces conflicting hunks to be auto-resolved cleanly by @@ -43,19 +38,6 @@ theirs;; This is the opposite of 'ours'; note that, unlike 'ours', there is no 'theirs' merge strategy to confuse this merge option with. -patience;; - With this option, 'merge-recursive' spends a little extra time - to avoid mismerges that sometimes occur due to unimportant - matching lines (e.g., braces from distinct functions). Use - this when the branches to be merged have diverged wildly. - See also linkgit:git-diff[1] `--patience`. - -diff-algorithm=[patience|minimal|histogram|myers];; - Tells 'merge-recursive' to use a different diff algorithm, which - can help avoid mismerges that occur due to unimportant matching - lines (such as braces from distinct functions). See also - linkgit:git-diff[1] `--diff-algorithm`. - ignore-space-change;; ignore-all-space;; ignore-space-at-eol;; @@ -84,11 +66,6 @@ no-renormalize;; Disables the `renormalize` option. This overrides the `merge.renormalize` configuration variable. -no-renames;; - Turn off rename detection. This overrides the `merge.renames` - configuration variable. - See also linkgit:git-diff[1] `--no-renames`. - find-renames[=<n>];; Turn on rename detection, optionally setting the similarity threshold. This is the default. This overrides the @@ -105,6 +82,46 @@ subtree[=<path>];; is prefixed (or stripped from the beginning) to make the shape of two trees to match. +recursive:: + This can only resolve two heads using a 3-way merge + algorithm. When there is more than one common + ancestor that can be used for 3-way merge, it creates a + merged tree of the common ancestors and uses that as + the reference tree for the 3-way merge. This has been + reported to result in fewer merge conflicts without + causing mismerges by tests done on actual merge commits + taken from Linux 2.6 kernel development history. + Additionally this can detect and handle merges involving + renames. It does not make use of detected copies. This was + the default strategy for resolving two heads from Git v0.99.9k + until v2.33.0. ++ +The 'recursive' strategy takes the same options as 'ort'. However, +there are three additional options that 'ort' ignores (not documented +above) that are potentially useful with the 'recursive' strategy: + +patience;; + Deprecated synonym for `diff-algorithm=patience`. + +diff-algorithm=[patience|minimal|histogram|myers];; + Use a different diff algorithm while merging, which can help + avoid mismerges that occur due to unimportant matching lines + (such as braces from distinct functions). See also + linkgit:git-diff[1] `--diff-algorithm`. Note that `ort` + specifically uses `diff-algorithm=histogram`, while `recursive` + defaults to the `diff.algorithm` config setting. + +no-renames;; + Turn off rename detection. This overrides the `merge.renames` + configuration variable. + See also linkgit:git-diff[1] `--no-renames`. + +resolve:: + This can only resolve two heads (i.e. the current branch + and another branch you pulled from) using a 3-way merge + algorithm. It tries to carefully detect criss-cross + merge ambiguities. It does not handle renames. + octopus:: This resolves cases with more than two heads, but refuses to do a complex merge that needs manual resolution. It is @@ -121,13 +138,13 @@ ours:: the 'recursive' merge strategy. subtree:: - This is a modified recursive strategy. When merging trees A and + This is a modified `ort` strategy. When merging trees A and B, if B corresponds to a subtree of A, B is first adjusted to match the tree structure of A, instead of reading the trees at the same level. This adjustment is also done to the common ancestor tree. -With the strategies that use 3-way merge (including the default, 'recursive'), +With the strategies that use 3-way merge (including the default, 'ort'), if a change is made on both branches, but later reverted on one of the branches, that change will be present in the merged result; some people find this behavior confusing. It occurs because only the heads and the merge base diff --git a/Documentation/pretty-options.txt b/Documentation/pretty-options.txt index 27ddaf84a1..b3af850608 100644 --- a/Documentation/pretty-options.txt +++ b/Documentation/pretty-options.txt @@ -33,14 +33,16 @@ people using 80-column terminals. used together. --encoding=<encoding>:: - The commit objects record the encoding used for the log message + Commit objects record the character encoding used for the log message in their encoding header; this option can be used to tell the command to re-code the commit log message in the encoding preferred by the user. For non plumbing commands this defaults to UTF-8. Note that if an object claims to be encoded in `X` and we are outputting in `X`, we will output the object verbatim; this means that invalid sequences in the original - commit may be copied to the output. + commit may be copied to the output. Likewise, if iconv(3) fails + to convert the commit, we will output the original object + verbatim, along with a warning. --expand-tabs=<n>:: --expand-tabs:: diff --git a/Documentation/rev-list-options.txt b/Documentation/rev-list-options.txt index 24569b06d1..b7bd27e171 100644 --- a/Documentation/rev-list-options.txt +++ b/Documentation/rev-list-options.txt @@ -968,6 +968,11 @@ list of the missing objects. Object IDs are prefixed with a ``?'' character. objects. endif::git-rev-list[] +--unsorted-input:: + Show commits in the order they were given on the command line instead + of sorting them in reverse chronological order by commit time. Cannot + be combined with `--no-walk` or `--no-walk=sorted`. + --no-walk[=(sorted|unsorted)]:: Only show the given commits, but do not traverse their ancestors. This has no effect if a range is specified. If the argument @@ -975,7 +980,8 @@ endif::git-rev-list[] given on the command line. Otherwise (if `sorted` or no argument was given), the commits are shown in reverse chronological order by commit time. - Cannot be combined with `--graph`. + Cannot be combined with `--graph`. Cannot be combined with + `--unsorted-input` if `sorted` or no argument was given. --do-walk:: Overrides a previous `--no-walk`. diff --git a/Documentation/technical/api-parse-options.txt b/Documentation/technical/api-parse-options.txt index 5a60bbfa7f..acfd5dc1d8 100644 --- a/Documentation/technical/api-parse-options.txt +++ b/Documentation/technical/api-parse-options.txt @@ -198,11 +198,6 @@ There are some macros to easily define options: The filename will be prefixed by passing the filename along with the prefix argument of `parse_options()` to `prefix_filename()`. -`OPT_ARGUMENT(long, &int_var, description)`:: - Introduce a long-option argument that will be kept in `argv[]`. - If this option was seen, `int_var` will be set to one (except - if a `NULL` pointer was passed). - `OPT_NUMBER_CALLBACK(&var, description, func_ptr)`:: Recognize numerical options like -123 and feed the integer as if it was an argument to the function given by `func_ptr`. diff --git a/Documentation/technical/api-trace2.txt b/Documentation/technical/api-trace2.txt index 037a91cbca..b9f3198fbe 100644 --- a/Documentation/technical/api-trace2.txt +++ b/Documentation/technical/api-trace2.txt @@ -493,6 +493,20 @@ about specific error arguments. } ------------ +`"cmd_ancestry"`:: + This event contains the text command name for the parent (and earlier + generations of parents) of the current process, in an array ordered from + nearest parent to furthest great-grandparent. It may not be implemented + on all platforms. ++ +------------ +{ + "event":"cmd_ancestry", + ... + "ancestry":["bash","tmux: server","systemd"] +} +------------ + `"cmd_name"`:: This event contains the command name for this git process and the hierarchy of commands from parent git processes. diff --git a/Documentation/technical/bitmap-format.txt b/Documentation/technical/bitmap-format.txt index f8c18a0f7a..04b3ec2178 100644 --- a/Documentation/technical/bitmap-format.txt +++ b/Documentation/technical/bitmap-format.txt @@ -1,6 +1,44 @@ GIT bitmap v1 format ==================== +== Pack and multi-pack bitmaps + +Bitmaps store reachability information about the set of objects in a packfile, +or a multi-pack index (MIDX). The former is defined obviously, and the latter is +defined as the union of objects in packs contained in the MIDX. + +A bitmap may belong to either one pack, or the repository's multi-pack index (if +it exists). A repository may have at most one bitmap. + +An object is uniquely described by its bit position within a bitmap: + + - If the bitmap belongs to a packfile, the __n__th bit corresponds to + the __n__th object in pack order. For a function `offset` which maps + objects to their byte offset within a pack, pack order is defined as + follows: + + o1 <= o2 <==> offset(o1) <= offset(o2) + + - If the bitmap belongs to a MIDX, the __n__th bit corresponds to the + __n__th object in MIDX order. With an additional function `pack` which + maps objects to the pack they were selected from by the MIDX, MIDX order + is defined as follows: + + o1 <= o2 <==> pack(o1) <= pack(o2) /\ offset(o1) <= offset(o2) + + The ordering between packs is done according to the MIDX's .rev file. + Notably, the preferred pack sorts ahead of all other packs. + +The on-disk representation (described below) of a bitmap is the same regardless +of whether or not that bitmap belongs to a packfile or a MIDX. The only +difference is the interpretation of the bits, which is described above. + +Certain bitmap extensions are supported (see: Appendix B). No extensions are +required for bitmaps corresponding to packfiles. For bitmaps that correspond to +MIDXs, both the bit-cache and rev-cache extensions are required. + +== On-disk format + - A header appears at the beginning: 4-byte signature: {'B', 'I', 'T', 'M'} @@ -14,17 +52,19 @@ GIT bitmap v1 format The following flags are supported: - BITMAP_OPT_FULL_DAG (0x1) REQUIRED - This flag must always be present. It implies that the bitmap - index has been generated for a packfile with full closure - (i.e. where every single object in the packfile can find - its parent links inside the same packfile). This is a - requirement for the bitmap index format, also present in JGit, - that greatly reduces the complexity of the implementation. + This flag must always be present. It implies that the + bitmap index has been generated for a packfile or + multi-pack index (MIDX) with full closure (i.e. where + every single object in the packfile/MIDX can find its + parent links inside the same packfile/MIDX). This is a + requirement for the bitmap index format, also present in + JGit, that greatly reduces the complexity of the + implementation. - BITMAP_OPT_HASH_CACHE (0x4) If present, the end of the bitmap file contains `N` 32-bit name-hash values, one per object in the - pack. The format and meaning of the name-hash is + pack/MIDX. The format and meaning of the name-hash is described below. 4-byte entry count (network byte order) @@ -33,7 +73,8 @@ GIT bitmap v1 format 20-byte checksum - The SHA1 checksum of the pack this bitmap index belongs to. + The SHA1 checksum of the pack/MIDX this bitmap index + belongs to. - 4 EWAH bitmaps that act as type indexes @@ -50,7 +91,7 @@ GIT bitmap v1 format - Tags In each bitmap, the `n`th bit is set to true if the `n`th object - in the packfile is of that type. + in the packfile or multi-pack index is of that type. The obvious consequence is that the OR of all 4 bitmaps will result in a full set (all bits set), and the AND of all 4 bitmaps will @@ -62,8 +103,9 @@ GIT bitmap v1 format Each entry contains the following: - 4-byte object position (network byte order) - The position **in the index for the packfile** where the - bitmap for this commit is found. + The position **in the index for the packfile or + multi-pack index** where the bitmap for this commit is + found. - 1-byte XOR-offset The xor offset used to compress this bitmap. For an entry @@ -146,10 +188,11 @@ Name-hash cache --------------- If the BITMAP_OPT_HASH_CACHE flag is set, the end of the bitmap contains -a cache of 32-bit values, one per object in the pack. The value at +a cache of 32-bit values, one per object in the pack/MIDX. The value at position `i` is the hash of the pathname at which the `i`th object -(counting in index order) in the pack can be found. This can be fed -into the delta heuristics to compare objects with similar pathnames. +(counting in index or multi-pack index order) in the pack/MIDX can be found. +This can be fed into the delta heuristics to compare objects with similar +pathnames. The hash algorithm used is: diff --git a/Documentation/technical/directory-rename-detection.txt b/Documentation/technical/directory-rename-detection.txt index 49b83ef3cc..029ee2cedc 100644 --- a/Documentation/technical/directory-rename-detection.txt +++ b/Documentation/technical/directory-rename-detection.txt @@ -2,9 +2,9 @@ Directory rename detection ========================== Rename detection logic in diffcore-rename that checks for renames of -individual files is aggregated and analyzed in merge-recursive for cases -where combinations of renames indicate that a full directory has been -renamed. +individual files is also aggregated there and then analyzed in either +merge-ort or merge-recursive for cases where combinations of renames +indicate that a full directory has been renamed. Scope of abilities ------------------ @@ -88,9 +88,11 @@ directory rename detection support in: Folks have requested in the past that `git diff` detect directory renames and somehow simplify its output. It is not clear whether this would be desirable or how the output should be simplified, so this was - simply not implemented. Further, to implement this, directory rename - detection logic would need to move from merge-recursive to - diffcore-rename. + simply not implemented. Also, while diffcore-rename has most of the + logic for detecting directory renames, some of the logic is still found + within merge-ort and merge-recursive. Fully supporting directory + rename detection in diffs would require copying or moving the remaining + bits of logic to the diff machinery. * am diff --git a/Documentation/technical/http-protocol.txt b/Documentation/technical/http-protocol.txt index 96d89ea9b2..cc5126cfed 100644 --- a/Documentation/technical/http-protocol.txt +++ b/Documentation/technical/http-protocol.txt @@ -225,6 +225,9 @@ The client may send Extra Parameters (see Documentation/technical/pack-protocol.txt) as a colon-separated string in the Git-Protocol HTTP header. +Uses the `--http-backend-info-refs` option to +linkgit:git-upload-pack[1]. + Dumb Server Response ^^^^^^^^^^^^^^^^^^^^ Dumb servers MUST respond with the dumb server reply format. diff --git a/Documentation/technical/multi-pack-index.txt b/Documentation/technical/multi-pack-index.txt index fb688976c4..1a73c3ee20 100644 --- a/Documentation/technical/multi-pack-index.txt +++ b/Documentation/technical/multi-pack-index.txt @@ -71,14 +71,10 @@ Future Work still reducing the number of binary searches required for object lookups. -- The reachability bitmap is currently paired directly with a single - packfile, using the pack-order as the object order to hopefully - compress the bitmaps well using run-length encoding. This could be - extended to pair a reachability bitmap with a multi-pack-index. If - the multi-pack-index is extended to store a "stable object order" +- If the multi-pack-index is extended to store a "stable object order" (a function Order(hash) = integer that is constant for a given hash, - even as the multi-pack-index is updated) then a reachability bitmap - could point to a multi-pack-index and be updated independently. + even as the multi-pack-index is updated) then MIDX bitmaps could be + updated independently of the MIDX. - Packfiles can be marked as "special" using empty files that share the initial name but replace ".pack" with ".keep" or ".promisor". diff --git a/Documentation/technical/protocol-v2.txt b/Documentation/technical/protocol-v2.txt index 1040d85319..21e8258ccf 100644 --- a/Documentation/technical/protocol-v2.txt +++ b/Documentation/technical/protocol-v2.txt @@ -42,7 +42,8 @@ Initial Client Request In general a client can request to speak protocol v2 by sending `version=2` through the respective side-channel for the transport being used which inevitably sets `GIT_PROTOCOL`. More information can be -found in `pack-protocol.txt` and `http-protocol.txt`. In all cases the +found in `pack-protocol.txt` and `http-protocol.txt`, as well as the +`GIT_PROTOCOL` definition in `git.txt`. In all cases the response from the server is the capability advertisement. Git Transport @@ -58,6 +59,8 @@ SSH and File Transport When using either the ssh:// or file:// transport, the GIT_PROTOCOL environment variable must be set explicitly to include "version=2". +The server may need to be configured to allow this environment variable +to pass. HTTP Transport ~~~~~~~~~~~~~~ @@ -81,6 +84,12 @@ A v2 server would reply: Subsequent requests are then made directly to the service `$GIT_URL/git-upload-pack`. (This works the same for git-receive-pack). +Uses the `--http-backend-info-refs` option to +linkgit:git-upload-pack[1]. + +The server may need to be configured to pass this header's contents via +the `GIT_PROTOCOL` variable. See the discussion in `git-http-backend.txt`. + Capability Advertisement ------------------------ @@ -190,7 +199,11 @@ ls-refs takes in the following arguments: Show peeled tags. ref-prefix <prefix> When specified, only references having a prefix matching one of - the provided prefixes are displayed. + the provided prefixes are displayed. Multiple instances may be + given, in which case references matching any prefix will be + shown. Note that this is purely for optimization; a server MAY + show refs not matching the prefix if it chooses, and clients + should filter the result themselves. If the 'unborn' feature is advertised the following argument can be included in the client's request. diff --git a/Documentation/user-manual.txt b/Documentation/user-manual.txt index 96240598e3..865074bed4 100644 --- a/Documentation/user-manual.txt +++ b/Documentation/user-manual.txt @@ -3190,7 +3190,7 @@ that *updated* thing--the old state that you added originally ends up not being pointed to by any commit or tree, so it's now a dangling blob object. -Similarly, when the "recursive" merge strategy runs, and finds that +Similarly, when the "ort" merge strategy runs, and finds that there are criss-cross merges and thus more than one merge base (which is fairly unusual, but it does happen), it will generate one temporary midway tree (or possibly even more, if you had lots of criss-crossing diff --git a/GIT-VERSION-GEN b/GIT-VERSION-GEN index b1c0d4eb2e..b48559d654 100755 --- a/GIT-VERSION-GEN +++ b/GIT-VERSION-GEN @@ -1,7 +1,7 @@ #!/bin/sh GVF=GIT-VERSION-FILE -DEF_VER=v2.33.0 +DEF_VER=v2.33.GIT LF=' ' @@ -138,12 +138,15 @@ Issues of note: BLK_SHA1. Also included is a version optimized for PowerPC (PPC_SHA1). - - "libcurl" library is used by git-http-fetch, git-fetch, and, if - the curl version >= 7.34.0, for git-imap-send. You might also - want the "curl" executable for debugging purposes. If you do not - use http:// or https:// repositories, and do not want to put - patches into an IMAP mailbox, you do not have to have them - (use NO_CURL). + - "libcurl" library is used for fetching and pushing + repositories over http:// or https://, as well as by + git-imap-send if the curl version is >= 7.34.0. If you do + not need that functionality, use NO_CURL to build without + it. + + Git requires version "7.19.4" or later of "libcurl" to build + without NO_CURL. This version requirement may be bumped in + the future. - "expat" library; git-http-push uses it for remote lock management over DAV. Similar to "curl" above, this is optional @@ -409,15 +409,6 @@ all:: # Define NEEDS_LIBRT if your platform requires linking with librt (glibc version # before 2.17) for clock_gettime and CLOCK_MONOTONIC. # -# Define USE_PARENS_AROUND_GETTEXT_N to "yes" if your compiler happily -# compiles the following initialization: -# -# static const char s[] = ("FOO"); -# -# and define it to "no" if you need to remove the parentheses () around the -# constant. The default is "auto", which means to use parentheses if your -# compiler is detected to support it. -# # Define HAVE_BSD_SYSCTL if your platform has a BSD-compatible sysctl function. # # Define HAVE_GETDELIM if your system has the getdelim() function. @@ -465,6 +456,9 @@ all:: # the global variable _wpgmptr containing the absolute path of the current # executable (this is the case on Windows). # +# INSTALL_STRIP can be set to "-s" to strip binaries during installation, +# if your $(INSTALL) command supports the option. +# # Define GENERATE_COMPILATION_DATABASE to "yes" to generate JSON compilation # database entries during compilation if your compiler supports it, using the # `-MJ` flag. The JSON entries will be placed in the `compile_commands/` @@ -495,10 +489,9 @@ all:: # setting this flag the exceptions are removed, and all of # -Wextra is used. # -# pedantic: +# no-pedantic: # -# Enable -pedantic compilation. This also disables -# USE_PARENS_AROUND_GETTEXT_N to produce only relevant warnings. +# Disable -pedantic compilation. GIT-VERSION-FILE: FORCE @$(SHELL_PATH) ./GIT-VERSION-GEN @@ -1285,6 +1278,7 @@ endif ifeq ($(COMPUTE_HEADER_DEPENDENCIES),auto) dep_check = $(shell $(CC) $(ALL_CFLAGS) \ + -Wno-pedantic \ -c -MF /dev/null -MQ /dev/null -MMD -MP \ -x c /dev/null -o /dev/null 2>&1; \ echo $$?) @@ -1348,14 +1342,6 @@ ifneq (,$(SOCKLEN_T)) BASIC_CFLAGS += -Dsocklen_t=$(SOCKLEN_T) endif -ifeq (yes,$(USE_PARENS_AROUND_GETTEXT_N)) - BASIC_CFLAGS += -DUSE_PARENS_AROUND_GETTEXT_N=1 -else -ifeq (no,$(USE_PARENS_AROUND_GETTEXT_N)) - BASIC_CFLAGS += -DUSE_PARENS_AROUND_GETTEXT_N=0 -endif -endif - ifeq ($(uname_S),Darwin) ifndef NO_FINK ifeq ($(shell test -d /sw/lib && echo y),y) @@ -1437,15 +1423,8 @@ else REMOTE_CURL_NAMES = $(REMOTE_CURL_PRIMARY) $(REMOTE_CURL_ALIASES) PROGRAM_OBJS += http-fetch.o PROGRAMS += $(REMOTE_CURL_NAMES) - curl_check := $(shell (echo 070908; $(CURL_CONFIG) --vernum | sed -e '/^70[BC]/s/^/0/') 2>/dev/null | sort -r | sed -ne 2p) - ifeq "$(curl_check)" "070908" - ifndef NO_EXPAT - PROGRAM_OBJS += http-push.o - else - EXCLUDED_PROGRAMS += git-http-push - endif - else - EXCLUDED_PROGRAMS += git-http-push + ifndef NO_EXPAT + PROGRAM_OBJS += http-push.o endif curl_check := $(shell (echo 072200; $(CURL_CONFIG) --vernum | sed -e '/^70[BC]/s/^/0/') 2>/dev/null | sort -r | sed -ne 2p) ifeq "$(curl_check)" "072200" @@ -1918,6 +1897,10 @@ ifneq ($(PROCFS_EXECUTABLE_PATH),) BASIC_CFLAGS += '-DPROCFS_EXECUTABLE_PATH="$(procfs_executable_path_SQ)"' endif +ifndef HAVE_PLATFORM_PROCINFO + COMPAT_OBJS += compat/stub/procinfo.o +endif + ifdef HAVE_NS_GET_EXECUTABLE_PATH BASIC_CFLAGS += -DHAVE_NS_GET_EXECUTABLE_PATH endif @@ -2478,7 +2461,6 @@ dep_args = -MF $(dep_file) -MQ $@ -MMD -MP endif ifneq ($(COMPUTE_HEADER_DEPENDENCIES),yes) -dep_dirs = missing_dep_dirs = dep_args = endif @@ -2604,10 +2586,10 @@ $(REMOTE_CURL_PRIMARY): remote-curl.o http.o http-walker.o GIT-LDFLAGS $(GITLIBS $(CURL_LIBCURL) $(EXPAT_LIBEXPAT) $(LIBS) $(LIB_FILE): $(LIB_OBJS) - $(QUIET_AR)$(AR) $(ARFLAGS) $@ $^ + $(QUIET_AR)$(RM) $@ && $(AR) $(ARFLAGS) $@ $^ $(XDIFF_LIB): $(XDIFF_OBJS) - $(QUIET_AR)$(AR) $(ARFLAGS) $@ $^ + $(QUIET_AR)$(RM) $@ && $(AR) $(ARFLAGS) $@ $^ export DEFAULT_EDITOR DEFAULT_PAGER @@ -2747,19 +2729,25 @@ FIND_SOURCE_FILES = ( \ | sed -e 's|^\./||' \ ) -$(ETAGS_TARGET): FORCE - $(QUIET_GEN)$(RM) "$(ETAGS_TARGET)+" && \ - $(FIND_SOURCE_FILES) | xargs etags -a -o "$(ETAGS_TARGET)+" && \ - mv "$(ETAGS_TARGET)+" "$(ETAGS_TARGET)" +FOUND_SOURCE_FILES = $(shell $(FIND_SOURCE_FILES)) -tags: FORCE - $(QUIET_GEN)$(RM) tags+ && \ - $(FIND_SOURCE_FILES) | xargs ctags -a -o tags+ && \ - mv tags+ tags +$(ETAGS_TARGET): $(FOUND_SOURCE_FILES) + $(QUIET_GEN)$(RM) $@+ && \ + echo $(FOUND_SOURCE_FILES) | xargs etags -a -o $@+ && \ + mv $@+ $@ -cscope: - $(RM) cscope* - $(FIND_SOURCE_FILES) | xargs cscope -b +tags: $(FOUND_SOURCE_FILES) + $(QUIET_GEN)$(RM) $@+ && \ + echo $(FOUND_SOURCE_FILES) | xargs ctags -a -o $@+ && \ + mv $@+ $@ + +cscope.out: $(FOUND_SOURCE_FILES) + $(QUIET_GEN)$(RM) $@+ && \ + echo $(FOUND_SOURCE_FILES) | xargs cscope -f$@+ -b && \ + mv $@+ $@ + +.PHONY: cscope +cscope: cscope.out ### Detect prefix changes TRACK_PREFIX = $(bindir_SQ):$(gitexecdir_SQ):$(template_dir_SQ):$(prefix_SQ):\ @@ -2850,6 +2838,11 @@ endif ifdef GIT_TEST_PERL_FATAL_WARNINGS @echo GIT_TEST_PERL_FATAL_WARNINGS=\''$(subst ','\'',$(subst ','\'',$(GIT_TEST_PERL_FATAL_WARNINGS)))'\' >>$@+ endif +ifdef RUNTIME_PREFIX + @echo RUNTIME_PREFIX=\'true\' >>$@+ +else + @echo RUNTIME_PREFIX=\'false\' >>$@+ +endif @if cmp $@+ $@ >/dev/null 2>&1; then $(RM) $@+; else mv $@+ $@; fi ### Detect Python interpreter path changes @@ -2943,7 +2936,7 @@ check: config-list.h command-list.h exit 1; \ fi -FOUND_C_SOURCES = $(filter %.c,$(shell $(FIND_SOURCE_FILES))) +FOUND_C_SOURCES = $(filter %.c,$(FOUND_SOURCE_FILES)) COCCI_SOURCES = $(filter-out $(THIRD_PARTY_SOURCES),$(FOUND_C_SOURCES)) %.cocci.patch: %.cocci $(COCCI_SOURCES) @@ -2996,7 +2989,8 @@ mergetools_instdir = $(prefix)/$(mergetoolsdir) endif mergetools_instdir_SQ = $(subst ','\'',$(mergetools_instdir)) -install_bindir_programs := $(patsubst %,%$X,$(BINDIR_PROGRAMS_NEED_X)) $(BINDIR_PROGRAMS_NO_X) +install_bindir_xprograms := $(patsubst %,%$X,$(BINDIR_PROGRAMS_NEED_X)) +install_bindir_programs := $(install_bindir_xprograms) $(BINDIR_PROGRAMS_NO_X) .PHONY: profile-install profile-fast-install profile-install: profile @@ -3005,12 +2999,17 @@ profile-install: profile profile-fast-install: profile-fast $(MAKE) install +INSTALL_STRIP = + install: all $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)' $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(gitexec_instdir_SQ)' - $(INSTALL) $(ALL_PROGRAMS) '$(DESTDIR_SQ)$(gitexec_instdir_SQ)' + $(INSTALL) $(INSTALL_STRIP) $(PROGRAMS) '$(DESTDIR_SQ)$(gitexec_instdir_SQ)' + $(INSTALL) $(SCRIPTS) '$(DESTDIR_SQ)$(gitexec_instdir_SQ)' $(INSTALL) -m 644 $(SCRIPT_LIB) '$(DESTDIR_SQ)$(gitexec_instdir_SQ)' - $(INSTALL) $(install_bindir_programs) '$(DESTDIR_SQ)$(bindir_SQ)' + $(INSTALL) $(INSTALL_STRIP) $(install_bindir_xprograms) '$(DESTDIR_SQ)$(bindir_SQ)' + $(INSTALL) $(BINDIR_PROGRAMS_NO_X) '$(DESTDIR_SQ)$(bindir_SQ)' + ifdef MSVC # We DO NOT install the individual foo.o.pdb files because they # have already been rolled up into the exe's pdb file. @@ -3091,8 +3090,7 @@ endif ln "$$execdir/git-remote-http$X" "$$execdir/$$p" 2>/dev/null || \ ln -s "git-remote-http$X" "$$execdir/$$p" 2>/dev/null || \ cp "$$execdir/git-remote-http$X" "$$execdir/$$p" || exit; } \ - done && \ - ./check_bindir "z$$bindir" "z$$execdir" "$$bindir/git-add$X" + done .PHONY: install-gitweb install-doc install-man install-man-perl install-html install-info install-pdf .PHONY: quick-install-doc quick-install-man quick-install-html @@ -3268,7 +3266,7 @@ endif .PHONY: all install profile-clean cocciclean clean strip .PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell -.PHONY: FORCE cscope +.PHONY: FORCE ### Check documentation # @@ -1 +1 @@ -Documentation/RelNotes/2.33.0.txt
\ No newline at end of file +Documentation/RelNotes/2.34.0.txt
\ No newline at end of file @@ -4,37 +4,6 @@ #include "help.h" #include "string-list.h" -int advice_fetch_show_forced_updates = 1; -int advice_push_update_rejected = 1; -int advice_push_non_ff_current = 1; -int advice_push_non_ff_matching = 1; -int advice_push_already_exists = 1; -int advice_push_fetch_first = 1; -int advice_push_needs_force = 1; -int advice_push_unqualified_ref_name = 1; -int advice_push_ref_needs_update = 1; -int advice_status_hints = 1; -int advice_status_u_option = 1; -int advice_status_ahead_behind_warning = 1; -int advice_commit_before_merge = 1; -int advice_reset_quiet_warning = 1; -int advice_resolve_conflict = 1; -int advice_sequencer_in_use = 1; -int advice_implicit_identity = 1; -int advice_detached_head = 1; -int advice_set_upstream_failure = 1; -int advice_object_name_warning = 1; -int advice_amworkdir = 1; -int advice_rm_hints = 1; -int advice_add_embedded_repo = 1; -int advice_ignored_hook = 1; -int advice_waiting_for_editor = 1; -int advice_graft_file_deprecated = 1; -int advice_checkout_ambiguous_remote_branch_name = 1; -int advice_submodule_alternate_error_strategy_die = 1; -int advice_add_ignored_file = 1; -int advice_add_empty_pathspec = 1; - static int advice_use_color = -1; static char advice_colors[][COLOR_MAXLEN] = { GIT_COLOR_RESET, @@ -63,49 +32,12 @@ static const char *advise_get_color(enum color_advice ix) } static struct { - const char *name; - int *preference; -} advice_config[] = { - { "fetchShowForcedUpdates", &advice_fetch_show_forced_updates }, - { "pushUpdateRejected", &advice_push_update_rejected }, - { "pushNonFFCurrent", &advice_push_non_ff_current }, - { "pushNonFFMatching", &advice_push_non_ff_matching }, - { "pushAlreadyExists", &advice_push_already_exists }, - { "pushFetchFirst", &advice_push_fetch_first }, - { "pushNeedsForce", &advice_push_needs_force }, - { "pushUnqualifiedRefName", &advice_push_unqualified_ref_name }, - { "pushRefNeedsUpdate", &advice_push_ref_needs_update }, - { "statusHints", &advice_status_hints }, - { "statusUoption", &advice_status_u_option }, - { "statusAheadBehindWarning", &advice_status_ahead_behind_warning }, - { "commitBeforeMerge", &advice_commit_before_merge }, - { "resetQuiet", &advice_reset_quiet_warning }, - { "resolveConflict", &advice_resolve_conflict }, - { "sequencerInUse", &advice_sequencer_in_use }, - { "implicitIdentity", &advice_implicit_identity }, - { "detachedHead", &advice_detached_head }, - { "setUpstreamFailure", &advice_set_upstream_failure }, - { "objectNameWarning", &advice_object_name_warning }, - { "amWorkDir", &advice_amworkdir }, - { "rmHints", &advice_rm_hints }, - { "addEmbeddedRepo", &advice_add_embedded_repo }, - { "ignoredHook", &advice_ignored_hook }, - { "waitingForEditor", &advice_waiting_for_editor }, - { "graftFileDeprecated", &advice_graft_file_deprecated }, - { "checkoutAmbiguousRemoteBranchName", &advice_checkout_ambiguous_remote_branch_name }, - { "submoduleAlternateErrorStrategyDie", &advice_submodule_alternate_error_strategy_die }, - { "addIgnoredFile", &advice_add_ignored_file }, - { "addEmptyPathspec", &advice_add_empty_pathspec }, - - /* make this an alias for backward compatibility */ - { "pushNonFastForward", &advice_push_update_rejected } -}; - -static struct { const char *key; int enabled; } advice_setting[] = { [ADVICE_ADD_EMBEDDED_REPO] = { "addEmbeddedRepo", 1 }, + [ADVICE_ADD_EMPTY_PATHSPEC] = { "addEmptyPathspec", 1 }, + [ADVICE_ADD_IGNORED_FILE] = { "addIgnoredFile", 1 }, [ADVICE_AM_WORK_DIR] = { "amWorkDir", 1 }, [ADVICE_CHECKOUT_AMBIGUOUS_REMOTE_BRANCH_NAME] = { "checkoutAmbiguousRemoteBranchName", 1 }, [ADVICE_COMMIT_BEFORE_MERGE] = { "commitBeforeMerge", 1 }, @@ -133,6 +65,7 @@ static struct { [ADVICE_RM_HINTS] = { "rmHints", 1 }, [ADVICE_SEQUENCER_IN_USE] = { "sequencerInUse", 1 }, [ADVICE_SET_UPSTREAM_FAILURE] = { "setUpstreamFailure", 1 }, + [ADVICE_SKIPPED_CHERRY_PICKS] = { "skippedCherryPicks", 1 }, [ADVICE_STATUS_AHEAD_BEHIND_WARNING] = { "statusAheadBehindWarning", 1 }, [ADVICE_STATUS_HINTS] = { "statusHints", 1 }, [ADVICE_STATUS_U_OPTION] = { "statusUoption", 1 }, @@ -221,13 +154,6 @@ int git_default_advice_config(const char *var, const char *value) if (!skip_prefix(var, "advice.", &k)) return 0; - for (i = 0; i < ARRAY_SIZE(advice_config); i++) { - if (strcasecmp(k, advice_config[i].name)) - continue; - *advice_config[i].preference = git_config_bool(var, value); - break; - } - for (i = 0; i < ARRAY_SIZE(advice_setting); i++) { if (strcasecmp(k, advice_setting[i].key)) continue; @@ -262,7 +188,7 @@ int error_resolve_conflict(const char *me) error(_("It is not possible to %s because you have unmerged files."), me); - if (advice_resolve_conflict) + if (advice_enabled(ADVICE_RESOLVE_CONFLICT)) /* * Message used both when 'git commit' fails and when * other commands doing a merge do. @@ -281,11 +207,16 @@ void NORETURN die_resolve_conflict(const char *me) void NORETURN die_conclude_merge(void) { error(_("You have not concluded your merge (MERGE_HEAD exists).")); - if (advice_resolve_conflict) + if (advice_enabled(ADVICE_RESOLVE_CONFLICT)) advise(_("Please, commit your changes before merging.")); die(_("Exiting because of unfinished merge.")); } +void NORETURN die_ff_impossible(void) +{ + die(_("Not possible to fast-forward, aborting.")); +} + void advise_on_updating_sparse_paths(struct string_list *pathspec_list) { struct string_list_item *item; @@ -5,37 +5,6 @@ struct string_list; -extern int advice_fetch_show_forced_updates; -extern int advice_push_update_rejected; -extern int advice_push_non_ff_current; -extern int advice_push_non_ff_matching; -extern int advice_push_already_exists; -extern int advice_push_fetch_first; -extern int advice_push_needs_force; -extern int advice_push_unqualified_ref_name; -extern int advice_push_ref_needs_update; -extern int advice_status_hints; -extern int advice_status_u_option; -extern int advice_status_ahead_behind_warning; -extern int advice_commit_before_merge; -extern int advice_reset_quiet_warning; -extern int advice_resolve_conflict; -extern int advice_sequencer_in_use; -extern int advice_implicit_identity; -extern int advice_detached_head; -extern int advice_set_upstream_failure; -extern int advice_object_name_warning; -extern int advice_amworkdir; -extern int advice_rm_hints; -extern int advice_add_embedded_repo; -extern int advice_ignored_hook; -extern int advice_waiting_for_editor; -extern int advice_graft_file_deprecated; -extern int advice_checkout_ambiguous_remote_branch_name; -extern int advice_submodule_alternate_error_strategy_die; -extern int advice_add_ignored_file; -extern int advice_add_empty_pathspec; - /* * To add a new advice, you need to: * Define a new advice_type. @@ -45,6 +14,8 @@ extern int advice_add_empty_pathspec; */ enum advice_type { ADVICE_ADD_EMBEDDED_REPO, + ADVICE_ADD_EMPTY_PATHSPEC, + ADVICE_ADD_IGNORED_FILE, ADVICE_AM_WORK_DIR, ADVICE_CHECKOUT_AMBIGUOUS_REMOTE_BRANCH_NAME, ADVICE_COMMIT_BEFORE_MERGE, @@ -75,6 +46,7 @@ extern int advice_add_empty_pathspec; ADVICE_SUBMODULE_ALTERNATE_ERROR_STRATEGY_DIE, ADVICE_UPDATE_SPARSE_PATH, ADVICE_WAITING_FOR_EDITOR, + ADVICE_SKIPPED_CHERRY_PICKS, }; int git_default_advice_config(const char *var, const char *value); @@ -96,6 +68,7 @@ void advise_if_enabled(enum advice_type type, const char *advice, ...); int error_resolve_conflict(const char *me); void NORETURN die_resolve_conflict(const char *me); void NORETURN die_conclude_merge(void); +void NORETURN die_ff_impossible(void); void advise_on_updating_sparse_paths(struct string_list *pathspec_list); void detach_advice(const char *new_name); @@ -1917,6 +1917,7 @@ static struct fragment *parse_binary_hunk(struct apply_state *state, state->linenr++; buffer += llen; + size -= llen; while (1) { int byte_length, max_byte_length, newsize; llen = linelen(buffer, size); @@ -3467,6 +3468,21 @@ static int load_preimage(struct apply_state *state, return 0; } +static int resolve_to(struct image *image, const struct object_id *result_id) +{ + unsigned long size; + enum object_type type; + + clear_image(image); + + image->buf = read_object_file(result_id, &type, &size); + if (!image->buf || type != OBJ_BLOB) + die("unable to read blob object %s", oid_to_hex(result_id)); + image->len = size; + + return 0; +} + static int three_way_merge(struct apply_state *state, struct image *image, char *path, @@ -3478,6 +3494,12 @@ static int three_way_merge(struct apply_state *state, mmbuffer_t result = { NULL }; int status; + /* resolve trivial cases first */ + if (oideq(base, ours)) + return resolve_to(image, theirs); + else if (oideq(base, theirs) || oideq(ours, theirs)) + return resolve_to(image, ours); + read_mmblob(&base_file, base); read_mmblob(&our_file, ours); read_mmblob(&their_file, theirs); @@ -191,7 +191,7 @@ static int write_archive_entry(const struct object_id *oid, const char *base, return err; } -static void queue_directory(const unsigned char *sha1, +static void queue_directory(const struct object_id *oid, struct strbuf *base, const char *filename, unsigned mode, struct archiver_context *c) { @@ -203,7 +203,7 @@ static void queue_directory(const unsigned char *sha1, d->mode = mode; c->bottom = d; d->len = xsnprintf(d->path, len, "%.*s%s/", (int)base->len, base->buf, filename); - oidread(&d->oid, sha1); + oidcpy(&d->oid, oid); } static int write_directory(struct archiver_context *c) @@ -250,8 +250,7 @@ static int queue_or_write_archive_entry(const struct object_id *oid, if (check_attr_export_ignore(check)) return 0; - queue_directory(oid->hash, base, filename, - mode, c); + queue_directory(oid, base, filename, mode, c); return READ_TREE_RECURSIVE; } @@ -14,6 +14,7 @@ #include "utf8.h" #include "quote.h" #include "thread-utils.h" +#include "dir.h" const char git_attr__true[] = "(builtin)true"; const char git_attr__false[] = "\0(builtin)false"; @@ -744,6 +745,20 @@ static struct attr_stack *read_attr_from_index(struct index_state *istate, if (!istate) return NULL; + /* + * The .gitattributes file only applies to files within its + * parent directory. In the case of cone-mode sparse-checkout, + * the .gitattributes file is sparse if and only if all paths + * within that directory are also sparse. Thus, don't load the + * .gitattributes file since it will not matter. + * + * In the case of a sparse index, it is critical that we don't go + * looking for a .gitattributes file, as doing so would cause the + * index to expand. + */ + if (!path_in_cone_mode_sparse_checkout(path, istate)) + return NULL; + buf = read_blob_data_from_index(istate, path, NULL); if (!buf) return NULL; @@ -23,7 +23,6 @@ static struct oid_array skipped_revs; static struct object_id *current_bad_oid; static const char *argv_checkout[] = {"checkout", "-q", NULL, "--", NULL}; -static const char *argv_show_branch[] = {"show-branch", NULL, NULL}; static const char *term_bad; static const char *term_good; @@ -728,7 +727,9 @@ static int is_expected_rev(const struct object_id *oid) static enum bisect_error bisect_checkout(const struct object_id *bisect_rev, int no_checkout) { char bisect_rev_hex[GIT_MAX_HEXSZ + 1]; - enum bisect_error res = BISECT_OK; + struct commit *commit; + struct pretty_print_context pp = {0}; + struct strbuf commit_msg = STRBUF_INIT; oid_to_hex_r(bisect_rev_hex, bisect_rev); update_ref(NULL, "BISECT_EXPECTED_REV", bisect_rev, NULL, 0, UPDATE_REFS_DIE_ON_ERR); @@ -738,24 +739,21 @@ static enum bisect_error bisect_checkout(const struct object_id *bisect_rev, int update_ref(NULL, "BISECT_HEAD", bisect_rev, NULL, 0, UPDATE_REFS_DIE_ON_ERR); } else { - res = run_command_v_opt(argv_checkout, RUN_GIT_CMD); - if (res) + if (run_command_v_opt(argv_checkout, RUN_GIT_CMD)) /* * Errors in `run_command()` itself, signaled by res < 0, * and errors in the child process, signaled by res > 0 - * can both be treated as regular BISECT_FAILURE (-1). + * can both be treated as regular BISECT_FAILED (-1). */ - return -abs(res); + return BISECT_FAILED; } - argv_show_branch[1] = bisect_rev_hex; - res = run_command_v_opt(argv_show_branch, RUN_GIT_CMD); - /* - * Errors in `run_command()` itself, signaled by res < 0, - * and errors in the child process, signaled by res > 0 - * can both be treated as regular BISECT_FAILURE (-1). - */ - return -abs(res); + commit = lookup_commit_reference(the_repository, bisect_rev); + format_commit_message(commit, "[%H] %s%n", &commit_msg, &pp); + fputs(commit_msg.buf, stdout); + strbuf_release(&commit_msg); + + return BISECT_OK; } static struct commit *get_commit_reference(struct repository *r, @@ -271,7 +271,7 @@ void create_branch(struct repository *r, real_ref = NULL; if (get_oid_mb(start_name, &oid)) { if (explicit_tracking) { - if (advice_set_upstream_failure) { + if (advice_enabled(ADVICE_SET_UPSTREAM_FAILURE)) { error(_(upstream_missing), start_name); advise(_(upstream_advice)); exit(1); diff --git a/builtin/add.c b/builtin/add.c index 09e684585d..24da07578f 100644 --- a/builtin/add.c +++ b/builtin/add.c @@ -144,8 +144,6 @@ static int renormalize_tracked_files(const struct pathspec *pathspec, int flags) { int i, retval = 0; - /* TODO: audit for interaction with sparse-index. */ - ensure_full_index(&the_index); for (i = 0; i < active_nr; i++) { struct cache_entry *ce = active_cache[i]; @@ -198,7 +196,10 @@ static int refresh(int verbose, const struct pathspec *pathspec) _("Unstaged changes after refreshing the index:")); for (i = 0; i < pathspec->nr; i++) { if (!seen[i]) { - if (matches_skip_worktree(pathspec, i, &skip_worktree_seen)) { + const char *path = pathspec->items[i].original; + + if (matches_skip_worktree(pathspec, i, &skip_worktree_seen) || + !path_in_sparse_checkout(path, &the_index)) { string_list_append(&only_match_skip_worktree, pathspec->items[i].original); } else { @@ -313,9 +314,7 @@ static int edit_patch(int argc, const char **argv, const char *prefix) rev.diffopt.output_format = DIFF_FORMAT_PATCH; rev.diffopt.use_color = 0; rev.diffopt.flags.ignore_dirty_submodules = 1; - out = open(file, O_CREAT | O_WRONLY | O_TRUNC, 0666); - if (out < 0) - die(_("Could not open '%s' for writing."), file); + out = xopen(file, O_CREAT | O_WRONLY | O_TRUNC, 0666); rev.diffopt.file = xfdopen(out, "w"); rev.diffopt.close_file = 1; if (run_diff_files(&rev, 0)) @@ -419,6 +418,7 @@ static const char embedded_advice[] = N_( static void check_embedded_repo(const char *path) { struct strbuf name = STRBUF_INIT; + static int adviced_on_embedded_repo = 0; if (!warn_on_embedded_repo) return; @@ -430,10 +430,10 @@ static void check_embedded_repo(const char *path) strbuf_strip_suffix(&name, "/"); warning(_("adding embedded git repository: %s"), name.buf); - if (advice_add_embedded_repo) { + if (!adviced_on_embedded_repo && + advice_enabled(ADVICE_ADD_EMBEDDED_REPO)) { advise(embedded_advice, name.buf, name.buf); - /* there may be multiple entries; advise only once */ - advice_add_embedded_repo = 0; + adviced_on_embedded_repo = 1; } strbuf_release(&name); @@ -447,7 +447,7 @@ static int add_files(struct dir_struct *dir, int flags) fprintf(stderr, _(ignore_error)); for (i = 0; i < dir->ignored_nr; i++) fprintf(stderr, "%s\n", dir->ignored[i]->name); - if (advice_add_ignored_file) + if (advice_enabled(ADVICE_ADD_IGNORED_FILE)) advise(_("Use -f if you really want to add them.\n" "Turn this message off by running\n" "\"git config advice.addIgnoredFile false\"")); @@ -528,6 +528,9 @@ int cmd_add(int argc, const char **argv, const char *prefix) add_new_files = !take_worktree_changes && !refresh_only && !add_renormalize; require_pathspec = !(take_worktree_changes || (0 < addremove_explicit)); + prepare_repo_settings(the_repository); + the_repository->settings.command_requires_full_index = 0; + hold_locked_index(&lock_file, LOCK_DIE_ON_ERROR); /* @@ -553,7 +556,7 @@ int cmd_add(int argc, const char **argv, const char *prefix) if (require_pathspec && pathspec.nr == 0) { fprintf(stderr, _("Nothing specified, nothing added.\n")); - if (advice_add_empty_pathspec) + if (advice_enabled(ADVICE_ADD_EMPTY_PATHSPEC)) advise( _("Maybe you wanted to say 'git add .'?\n" "Turn this message off by running\n" "\"git config advice.addEmptyPathspec false\"")); diff --git a/builtin/am.c b/builtin/am.c index 0c2ad96b70..e4a0ff9cd7 100644 --- a/builtin/am.c +++ b/builtin/am.c @@ -1820,7 +1820,7 @@ static void am_run(struct am_state *state, int resume) printf_ln(_("Patch failed at %s %.*s"), msgnum(state), linelen(state->msg), state->msg); - if (advice_amworkdir) + if (advice_enabled(ADVICE_AM_WORK_DIR)) advise(_("Use 'git am --show-current-patch=diff' to see the failed patch")); die_user_resolve(state); @@ -1848,7 +1848,6 @@ next: */ if (!state->rebasing) { am_destroy(state); - close_object_store(the_repository->objects); run_auto_maintenance(state->quiet); } } @@ -2106,7 +2105,8 @@ static void am_abort(struct am_state *state) if (!has_orig_head) oidcpy(&orig_head, the_hash_algo->empty_tree); - clean_index(&curr_head, &orig_head); + if (clean_index(&curr_head, &orig_head)) + die(_("failed to clean index")); if (has_orig_head) update_ref("am --abort", "HEAD", &orig_head, diff --git a/builtin/archive.c b/builtin/archive.c index 45d11669aa..7176b041b6 100644 --- a/builtin/archive.c +++ b/builtin/archive.c @@ -12,9 +12,7 @@ static void create_output_file(const char *output_file) { - int output_fd = open(output_file, O_CREAT | O_WRONLY | O_TRUNC, 0666); - if (output_fd < 0) - die_errno(_("could not create archive file '%s'"), output_file); + int output_fd = xopen(output_file, O_CREAT | O_WRONLY | O_TRUNC, 0666); if (output_fd != 1) { if (dup2(output_fd, 1) < 0) die_errno(_("could not redirect output")); diff --git a/builtin/bisect--helper.c b/builtin/bisect--helper.c index f184eaeac6..bc210b23c8 100644 --- a/builtin/bisect--helper.c +++ b/builtin/bisect--helper.c @@ -18,10 +18,10 @@ static GIT_PATH_FUNC(git_path_bisect_log, "BISECT_LOG") static GIT_PATH_FUNC(git_path_head_name, "head-name") static GIT_PATH_FUNC(git_path_bisect_names, "BISECT_NAMES") static GIT_PATH_FUNC(git_path_bisect_first_parent, "BISECT_FIRST_PARENT") +static GIT_PATH_FUNC(git_path_bisect_run, "BISECT_RUN") static const char * const git_bisect_helper_usage[] = { N_("git bisect--helper --bisect-reset [<commit>]"), - N_("git bisect--helper --bisect-next-check <good_term> <bad_term> [<term>]"), N_("git bisect--helper --bisect-terms [--term-good | --term-old | --term-bad | --term-new]"), N_("git bisect--helper --bisect-start [--term-{new,bad}=<term> --term-{old,good}=<term>]" " [--no-checkout] [--first-parent] [<bad> [<good>...]] [--] [<paths>...]"), @@ -30,6 +30,8 @@ static const char * const git_bisect_helper_usage[] = { N_("git bisect--helper --bisect-state (good|old) [<rev>...]"), N_("git bisect--helper --bisect-replay <filename>"), N_("git bisect--helper --bisect-skip [(<rev>|<range>)...]"), + N_("git bisect--helper --bisect-visualize"), + N_("git bisect--helper --bisect-run <cmd>..."), NULL }; @@ -143,6 +145,19 @@ static int append_to_file(const char *path, const char *format, ...) return res; } +static int print_file_to_stdout(const char *path) +{ + int fd = open(path, O_RDONLY); + int ret = 0; + + if (fd < 0) + return error_errno(_("cannot open file '%s' for reading"), path); + if (copy_fd(fd, 1) < 0) + ret = error_errno(_("failed to read '%s'"), path); + close(fd); + return ret; +} + static int check_term_format(const char *term, const char *orig_term) { int res; @@ -1036,6 +1051,125 @@ static enum bisect_error bisect_skip(struct bisect_terms *terms, const char **ar return res; } +static int bisect_visualize(struct bisect_terms *terms, const char **argv, int argc) +{ + struct strvec args = STRVEC_INIT; + int flags = RUN_COMMAND_NO_STDIN, res = 0; + struct strbuf sb = STRBUF_INIT; + + if (bisect_next_check(terms, NULL) != 0) + return BISECT_FAILED; + + if (!argc) { + if ((getenv("DISPLAY") || getenv("SESSIONNAME") || getenv("MSYSTEM") || + getenv("SECURITYSESSIONID")) && exists_in_PATH("gitk")) { + strvec_push(&args, "gitk"); + } else { + strvec_push(&args, "log"); + flags |= RUN_GIT_CMD; + } + } else { + if (argv[0][0] == '-') { + strvec_push(&args, "log"); + flags |= RUN_GIT_CMD; + } else if (strcmp(argv[0], "tig") && !starts_with(argv[0], "git")) + flags |= RUN_GIT_CMD; + + strvec_pushv(&args, argv); + } + + strvec_pushl(&args, "--bisect", "--", NULL); + + strbuf_read_file(&sb, git_path_bisect_names(), 0); + sq_dequote_to_strvec(sb.buf, &args); + strbuf_release(&sb); + + res = run_command_v_opt(args.v, flags); + strvec_clear(&args); + return res; +} + +static int bisect_run(struct bisect_terms *terms, const char **argv, int argc) +{ + int res = BISECT_OK; + struct strbuf command = STRBUF_INIT; + struct strvec args = STRVEC_INIT; + struct strvec run_args = STRVEC_INIT; + const char *new_state; + int temporary_stdout_fd, saved_stdout; + + if (bisect_next_check(terms, NULL)) + return BISECT_FAILED; + + if (argc) + sq_quote_argv(&command, argv); + else { + error(_("bisect run failed: no command provided.")); + return BISECT_FAILED; + } + + strvec_push(&run_args, command.buf); + + while (1) { + strvec_clear(&args); + + printf(_("running %s\n"), command.buf); + res = run_command_v_opt(run_args.v, RUN_USING_SHELL); + + if (res < 0 || 128 <= res) { + error(_("bisect run failed: exit code %d from" + " '%s' is < 0 or >= 128"), res, command.buf); + strbuf_release(&command); + return res; + } + + if (res == 125) + new_state = "skip"; + else if (!res) + new_state = terms->term_good; + else + new_state = terms->term_bad; + + temporary_stdout_fd = open(git_path_bisect_run(), O_CREAT | O_WRONLY | O_TRUNC, 0666); + + if (temporary_stdout_fd < 0) + return error_errno(_("cannot open file '%s' for writing"), git_path_bisect_run()); + + fflush(stdout); + saved_stdout = dup(1); + dup2(temporary_stdout_fd, 1); + + res = bisect_state(terms, &new_state, 1); + + fflush(stdout); + dup2(saved_stdout, 1); + close(saved_stdout); + close(temporary_stdout_fd); + + print_file_to_stdout(git_path_bisect_run()); + + if (res == BISECT_ONLY_SKIPPED_LEFT) + error(_("bisect run cannot continue any more")); + else if (res == BISECT_INTERNAL_SUCCESS_MERGE_BASE) { + printf(_("bisect run success")); + res = BISECT_OK; + } else if (res == BISECT_INTERNAL_SUCCESS_1ST_BAD_FOUND) { + printf(_("bisect found first bad commit")); + res = BISECT_OK; + } else if (res) { + error(_("bisect run failed:'git bisect--helper --bisect-state" + " %s' exited with error code %d"), args.v[0], res); + } else { + continue; + } + + strbuf_release(&command); + strvec_clear(&args); + strvec_clear(&run_args); + return res; + } +} + int cmd_bisect__helper(int argc, const char **argv, const char *prefix) { enum { @@ -1048,7 +1182,9 @@ int cmd_bisect__helper(int argc, const char **argv, const char *prefix) BISECT_STATE, BISECT_LOG, BISECT_REPLAY, - BISECT_SKIP + BISECT_SKIP, + BISECT_VISUALIZE, + BISECT_RUN, } cmdmode = 0; int res = 0, nolog = 0; struct option options[] = { @@ -1070,6 +1206,10 @@ int cmd_bisect__helper(int argc, const char **argv, const char *prefix) N_("replay the bisection process from the given file"), BISECT_REPLAY), OPT_CMDMODE(0, "bisect-skip", &cmdmode, N_("skip some commits for checkout"), BISECT_SKIP), + OPT_CMDMODE(0, "bisect-visualize", &cmdmode, + N_("visualize the bisection"), BISECT_VISUALIZE), + OPT_CMDMODE(0, "bisect-run", &cmdmode, + N_("use <cmd>... to automatically bisect."), BISECT_RUN), OPT_BOOL(0, "no-log", &nolog, N_("no log for BISECT_WRITE")), OPT_END() @@ -1089,12 +1229,6 @@ int cmd_bisect__helper(int argc, const char **argv, const char *prefix) return error(_("--bisect-reset requires either no argument or a commit")); res = bisect_reset(argc ? argv[0] : NULL); break; - case BISECT_NEXT_CHECK: - if (argc != 2 && argc != 3) - return error(_("--bisect-next-check requires 2 or 3 arguments")); - set_terms(&terms, argv[1], argv[0]); - res = bisect_next_check(&terms, argc == 3 ? argv[2] : NULL); - break; case BISECT_TERMS: if (argc > 1) return error(_("--bisect-terms requires 0 or 1 argument")); @@ -1131,6 +1265,16 @@ int cmd_bisect__helper(int argc, const char **argv, const char *prefix) get_terms(&terms); res = bisect_skip(&terms, argv, argc); break; + case BISECT_VISUALIZE: + get_terms(&terms); + res = bisect_visualize(&terms, argv, argc); + break; + case BISECT_RUN: + if (!argc) + return error(_("bisect run failed: no command provided.")); + get_terms(&terms); + res = bisect_run(&terms, argv, argc); + break; default: BUG("unknown subcommand %d", cmdmode); } diff --git a/builtin/branch.c b/builtin/branch.c index b23b1d1752..03c7b7253a 100644 --- a/builtin/branch.c +++ b/builtin/branch.c @@ -168,7 +168,7 @@ static int check_branch_commit(const char *branchname, const char *refname, int kinds, int force) { struct commit *rev = lookup_commit_reference(the_repository, oid); - if (!rev) { + if (!force && !rev) { error(_("Couldn't look up commit object for '%s'"), refname); return -1; } diff --git a/builtin/bugreport.c b/builtin/bugreport.c index 9915a5841d..06ed10dc92 100644 --- a/builtin/bugreport.c +++ b/builtin/bugreport.c @@ -171,10 +171,7 @@ int cmd_bugreport(int argc, const char **argv, const char *prefix) get_populated_hooks(&buffer, !startup_info->have_repository); /* fopen doesn't offer us an O_EXCL alternative, except with glibc. */ - report = open(report_path.buf, O_CREAT | O_EXCL | O_WRONLY, 0666); - - if (report < 0) - die(_("couldn't create a new file at '%s'"), report_path.buf); + report = xopen(report_path.buf, O_CREAT | O_EXCL | O_WRONLY, 0666); if (write_in_full(report, buffer.buf, buffer.len) < 0) die_errno(_("unable to write to %s"), report_path.buf); diff --git a/builtin/bundle.c b/builtin/bundle.c index 053a51bea1..5a85d7cd0f 100644 --- a/builtin/bundle.c +++ b/builtin/bundle.c @@ -39,8 +39,6 @@ static const char * const builtin_bundle_unbundle_usage[] = { NULL }; -static int verbose; - static int parse_options_cmd_bundle(int argc, const char **argv, const char* prefix, @@ -162,10 +160,15 @@ static int cmd_bundle_unbundle(int argc, const char **argv, const char *prefix) struct bundle_header header = BUNDLE_HEADER_INIT; int bundle_fd = -1; int ret; + int progress = isatty(2); + struct option options[] = { + OPT_BOOL(0, "progress", &progress, + N_("show progress meter")), OPT_END() }; char *bundle_file; + struct strvec extra_index_pack_args = STRVEC_INIT; argc = parse_options_cmd_bundle(argc, argv, prefix, builtin_bundle_unbundle_usage, options, &bundle_file); @@ -177,7 +180,11 @@ static int cmd_bundle_unbundle(int argc, const char **argv, const char *prefix) } if (!startup_info->have_repository) die(_("Need a repository to unbundle.")); - ret = !!unbundle(the_repository, &header, bundle_fd, 0) || + if (progress) + strvec_pushl(&extra_index_pack_args, "-v", "--progress-title", + _("Unbundling objects"), NULL); + ret = !!unbundle(the_repository, &header, bundle_fd, + &extra_index_pack_args) || list_bundle_refs(&header, argc, argv); bundle_header_release(&header); cleanup: @@ -188,7 +195,6 @@ cleanup: int cmd_bundle(int argc, const char **argv, const char *prefix) { struct option options[] = { - OPT__VERBOSE(&verbose, N_("be verbose; must be placed before a subcommand")), OPT_END() }; int result; diff --git a/builtin/checkout.c b/builtin/checkout.c index b5d477919a..8c69dcdf72 100644 --- a/builtin/checkout.c +++ b/builtin/checkout.c @@ -404,7 +404,7 @@ static int checkout_worktree(const struct checkout_opts *opts, mem_pool_discard(&ce_mem_pool, should_validate_cache_entries()); remove_marked_cache_entries(&the_index, 1); remove_scheduled_dirs(); - errs |= finish_delayed_checkout(&state, &nr_checkouts); + errs |= finish_delayed_checkout(&state, &nr_checkouts, opts->show_progress); if (opts->count_checkout_paths) { if (nr_unmerged) @@ -918,7 +918,7 @@ static void update_refs_for_switch(const struct checkout_opts *opts, REF_NO_DEREF, UPDATE_REFS_DIE_ON_ERR); if (!opts->quiet) { if (old_branch_info->path && - advice_detached_head && !opts->force_detach) + advice_enabled(ADVICE_DETACHED_HEAD) && !opts->force_detach) detach_advice(new_branch_info->name); describe_detached_head(_("HEAD is now at"), new_branch_info->commit); } @@ -1011,7 +1011,7 @@ static void suggest_reattach(struct commit *commit, struct rev_info *revs) sb.buf); strbuf_release(&sb); - if (advice_detached_head) + if (advice_enabled(ADVICE_DETACHED_HEAD)) fprintf(stderr, Q_( /* The singular version */ @@ -1182,7 +1182,7 @@ static const char *parse_remote_branch(const char *arg, } if (!remote && num_matches > 1) { - if (advice_checkout_ambiguous_remote_branch_name) { + if (advice_enabled(ADVICE_CHECKOUT_AMBIGUOUS_REMOTE_BRANCH_NAME)) { advise(_("If you meant to check out a remote tracking branch on, e.g. 'origin',\n" "you can do so by fully qualifying the name with the --track option:\n" "\n" diff --git a/builtin/clone.c b/builtin/clone.c index 66fe66679c..c9d4ca2664 100644 --- a/builtin/clone.c +++ b/builtin/clone.c @@ -217,120 +217,6 @@ static char *get_repo_path(const char *repo, int *is_bundle) return canon; } -static char *guess_dir_name(const char *repo, int is_bundle, int is_bare) -{ - const char *end = repo + strlen(repo), *start, *ptr; - size_t len; - char *dir; - - /* - * Skip scheme. - */ - start = strstr(repo, "://"); - if (start == NULL) - start = repo; - else - start += 3; - - /* - * Skip authentication data. The stripping does happen - * greedily, such that we strip up to the last '@' inside - * the host part. - */ - for (ptr = start; ptr < end && !is_dir_sep(*ptr); ptr++) { - if (*ptr == '@') - start = ptr + 1; - } - - /* - * Strip trailing spaces, slashes and /.git - */ - while (start < end && (is_dir_sep(end[-1]) || isspace(end[-1]))) - end--; - if (end - start > 5 && is_dir_sep(end[-5]) && - !strncmp(end - 4, ".git", 4)) { - end -= 5; - while (start < end && is_dir_sep(end[-1])) - end--; - } - - /* - * Strip trailing port number if we've got only a - * hostname (that is, there is no dir separator but a - * colon). This check is required such that we do not - * strip URI's like '/foo/bar:2222.git', which should - * result in a dir '2222' being guessed due to backwards - * compatibility. - */ - if (memchr(start, '/', end - start) == NULL - && memchr(start, ':', end - start) != NULL) { - ptr = end; - while (start < ptr && isdigit(ptr[-1]) && ptr[-1] != ':') - ptr--; - if (start < ptr && ptr[-1] == ':') - end = ptr - 1; - } - - /* - * Find last component. To remain backwards compatible we - * also regard colons as path separators, such that - * cloning a repository 'foo:bar.git' would result in a - * directory 'bar' being guessed. - */ - ptr = end; - while (start < ptr && !is_dir_sep(ptr[-1]) && ptr[-1] != ':') - ptr--; - start = ptr; - - /* - * Strip .{bundle,git}. - */ - len = end - start; - strip_suffix_mem(start, &len, is_bundle ? ".bundle" : ".git"); - - if (!len || (len == 1 && *start == '/')) - die(_("No directory name could be guessed.\n" - "Please specify a directory on the command line")); - - if (is_bare) - dir = xstrfmt("%.*s.git", (int)len, start); - else - dir = xstrndup(start, len); - /* - * Replace sequences of 'control' characters and whitespace - * with one ascii space, remove leading and trailing spaces. - */ - if (*dir) { - char *out = dir; - int prev_space = 1 /* strip leading whitespace */; - for (end = dir; *end; ++end) { - char ch = *end; - if ((unsigned char)ch < '\x20') - ch = '\x20'; - if (isspace(ch)) { - if (prev_space) - continue; - prev_space = 1; - } else - prev_space = 0; - *out++ = ch; - } - *out = '\0'; - if (out > dir && prev_space) - out[-1] = '\0'; - } - return dir; -} - -static void strip_trailing_slashes(char *dir) -{ - char *end = dir + strlen(dir); - - while (dir < end - 1 && is_dir_sep(end[-1])) - end--; - *end = '\0'; -} - static int add_one_reference(struct string_list_item *item, void *cb_data) { struct strbuf err = STRBUF_INIT; @@ -657,7 +543,7 @@ static void write_followtags(const struct ref *refs, const char *msg) } } -static int iterate_ref_map(void *cb_data, struct object_id *oid) +static const struct object_id *iterate_ref_map(void *cb_data) { struct ref **rm = cb_data; struct ref *ref = *rm; @@ -668,13 +554,11 @@ static int iterate_ref_map(void *cb_data, struct object_id *oid) */ while (ref && !ref->peer_ref) ref = ref->next; - /* Returning -1 notes "end of list" to the caller. */ if (!ref) - return -1; + return NULL; - oidcpy(oid, &ref->old_oid); *rm = ref->next; - return 0; + return &ref->old_oid; } static void update_remote_refs(const struct ref *refs, @@ -786,7 +670,7 @@ static int checkout(int submodule_progress) return 0; } if (!strcmp(head, "HEAD")) { - if (advice_detached_head) + if (advice_enabled(ADVICE_DETACHED_HEAD)) detach_advice(oid_to_hex(&oid)); FREE_AND_NULL(head); } else { @@ -1041,8 +925,8 @@ int cmd_clone(int argc, const char **argv, const char *prefix) if (argc == 2) dir = xstrdup(argv[1]); else - dir = guess_dir_name(repo_name, is_bundle, option_bare); - strip_trailing_slashes(dir); + dir = git_url_basename(repo_name, is_bundle, option_bare); + strip_dir_trailing_slashes(dir); dest_exists = path_exists(dir); if (dest_exists && !is_empty_dir(dir)) @@ -1114,6 +998,7 @@ int cmd_clone(int argc, const char **argv, const char *prefix) if (option_recurse_submodules.nr > 0) { struct string_list_item *item; struct strbuf sb = STRBUF_INIT; + int val; /* remove duplicates */ string_list_sort(&option_recurse_submodules); @@ -1130,6 +1015,10 @@ int cmd_clone(int argc, const char **argv, const char *prefix) strbuf_detach(&sb, NULL)); } + if (!git_config_get_bool("submodule.stickyRecursiveClone", &val) && + val) + string_list_append(&option_config, "submodule.recurse=true"); + if (option_required_reference.nr && option_optional_reference.nr) die(_("clone --recursive is not compatible with " @@ -1340,6 +1229,9 @@ int cmd_clone(int argc, const char **argv, const char *prefix) our_head_points_at = remote_head_points_at; } else { + const char *branch; + char *ref; + if (option_branch) die(_("Remote branch %s not found in upstream %s"), option_branch, remote_name); @@ -1350,24 +1242,22 @@ int cmd_clone(int argc, const char **argv, const char *prefix) remote_head_points_at = NULL; remote_head = NULL; option_no_checkout = 1; - if (!option_bare) { - const char *branch; - char *ref; - - if (transport_ls_refs_options.unborn_head_target && - skip_prefix(transport_ls_refs_options.unborn_head_target, - "refs/heads/", &branch)) { - ref = transport_ls_refs_options.unborn_head_target; - transport_ls_refs_options.unborn_head_target = NULL; - create_symref("HEAD", ref, reflog_msg.buf); - } else { - branch = git_default_branch_name(0); - ref = xstrfmt("refs/heads/%s", branch); - } - install_branch_config(0, branch, remote_name, ref); - free(ref); + if (transport_ls_refs_options.unborn_head_target && + skip_prefix(transport_ls_refs_options.unborn_head_target, + "refs/heads/", &branch)) { + ref = transport_ls_refs_options.unborn_head_target; + transport_ls_refs_options.unborn_head_target = NULL; + create_symref("HEAD", ref, reflog_msg.buf); + } else { + branch = git_default_branch_name(0); + ref = xstrfmt("refs/heads/%s", branch); } + + if (!option_bare) + install_branch_config(0, branch, remote_name, ref); + + free(ref); } write_refspec_config(src_ref_prefix, our_head_points_at, diff --git a/builtin/column.c b/builtin/column.c index 40d4b3bee2..158fdf53d9 100644 --- a/builtin/column.c +++ b/builtin/column.c @@ -29,7 +29,7 @@ int cmd_column(int argc, const char **argv, const char *prefix) OPT_INTEGER(0, "raw-mode", &colopts, N_("layout to use")), OPT_INTEGER(0, "width", &copts.width, N_("maximum width")), OPT_STRING(0, "indent", &copts.indent, N_("string"), N_("padding space on left border")), - OPT_INTEGER(0, "nl", &copts.nl, N_("padding space on right border")), + OPT_STRING(0, "nl", &copts.nl, N_("string"), N_("padding space on right border")), OPT_INTEGER(0, "padding", &copts.padding, N_("padding space between columns")), OPT_END() }; diff --git a/builtin/commit-graph.c b/builtin/commit-graph.c index cd86315221..0386f5c775 100644 --- a/builtin/commit-graph.c +++ b/builtin/commit-graph.c @@ -9,26 +9,29 @@ #include "progress.h" #include "tag.h" -static char const * const builtin_commit_graph_usage[] = { - N_("git commit-graph verify [--object-dir <objdir>] [--shallow] [--[no-]progress]"), - N_("git commit-graph write [--object-dir <objdir>] [--append] " - "[--split[=<strategy>]] [--reachable|--stdin-packs|--stdin-commits] " - "[--changed-paths] [--[no-]max-new-filters <n>] [--[no-]progress] " - "<split options>"), +#define BUILTIN_COMMIT_GRAPH_VERIFY_USAGE \ + N_("git commit-graph verify [--object-dir <objdir>] [--shallow] [--[no-]progress]") + +#define BUILTIN_COMMIT_GRAPH_WRITE_USAGE \ + N_("git commit-graph write [--object-dir <objdir>] [--append] " \ + "[--split[=<strategy>]] [--reachable|--stdin-packs|--stdin-commits] " \ + "[--changed-paths] [--[no-]max-new-filters <n>] [--[no-]progress] " \ + "<split options>") + +static const char * builtin_commit_graph_verify_usage[] = { + BUILTIN_COMMIT_GRAPH_VERIFY_USAGE, NULL }; -static const char * const builtin_commit_graph_verify_usage[] = { - N_("git commit-graph verify [--object-dir <objdir>] [--shallow] [--[no-]progress]"), +static const char * builtin_commit_graph_write_usage[] = { + BUILTIN_COMMIT_GRAPH_WRITE_USAGE, NULL }; -static const char * const builtin_commit_graph_write_usage[] = { - N_("git commit-graph write [--object-dir <objdir>] [--append] " - "[--split[=<strategy>]] [--reachable|--stdin-packs|--stdin-commits] " - "[--changed-paths] [--[no-]max-new-filters <n>] [--[no-]progress] " - "<split options>"), - NULL +static char const * const builtin_commit_graph_usage[] = { + BUILTIN_COMMIT_GRAPH_VERIFY_USAGE, + BUILTIN_COMMIT_GRAPH_WRITE_USAGE, + NULL, }; static struct opts_commit_graph { @@ -43,26 +46,18 @@ static struct opts_commit_graph { int enable_changed_paths; } opts; -static struct object_directory *find_odb(struct repository *r, - const char *obj_dir) -{ - struct object_directory *odb; - char *obj_dir_real = real_pathdup(obj_dir, 1); - struct strbuf odb_path_real = STRBUF_INIT; - - prepare_alt_odb(r); - for (odb = r->objects->odb; odb; odb = odb->next) { - strbuf_realpath(&odb_path_real, odb->path, 1); - if (!strcmp(obj_dir_real, odb_path_real.buf)) - break; - } - - free(obj_dir_real); - strbuf_release(&odb_path_real); +static struct option common_opts[] = { + OPT_STRING(0, "object-dir", &opts.obj_dir, + N_("dir"), + N_("the object directory to store the graph")), + OPT_BOOL(0, "progress", &opts.progress, + N_("force progress reporting")), + OPT_END() +}; - if (!odb) - die(_("could not find object directory matching %s"), obj_dir); - return odb; +static struct option *add_common_options(struct option *to) +{ + return parse_options_concat(common_opts, to); } static int graph_verify(int argc, const char **argv) @@ -76,21 +71,20 @@ static int graph_verify(int argc, const char **argv) int flags = 0; static struct option builtin_commit_graph_verify_options[] = { - OPT_STRING(0, "object-dir", &opts.obj_dir, - N_("dir"), - N_("the object directory to store the graph")), OPT_BOOL(0, "shallow", &opts.shallow, N_("if the commit-graph is split, only verify the tip file")), - OPT_BOOL(0, "progress", &opts.progress, N_("force progress reporting")), OPT_END(), }; + struct option *options = add_common_options(builtin_commit_graph_verify_options); trace2_cmd_mode("verify"); opts.progress = isatty(2); argc = parse_options(argc, argv, NULL, - builtin_commit_graph_verify_options, + options, builtin_commit_graph_verify_usage, 0); + if (argc) + usage_with_options(builtin_commit_graph_verify_usage, options); if (!opts.obj_dir) opts.obj_dir = get_object_directory(); @@ -106,6 +100,7 @@ static int graph_verify(int argc, const char **argv) die_errno(_("Could not open commit-graph '%s'"), graph_name); FREE_AND_NULL(graph_name); + FREE_AND_NULL(options); if (open_ok) graph = load_commit_graph_one_fd_st(the_repository, fd, &st, odb); @@ -206,9 +201,6 @@ static int graph_write(int argc, const char **argv) struct progress *progress = NULL; static struct option builtin_commit_graph_write_options[] = { - OPT_STRING(0, "object-dir", &opts.obj_dir, - N_("dir"), - N_("the object directory to store the graph")), OPT_BOOL(0, "reachable", &opts.reachable, N_("start walk at all refs")), OPT_BOOL(0, "stdin-packs", &opts.stdin_packs, @@ -219,7 +211,6 @@ static int graph_write(int argc, const char **argv) N_("include all commits already in the commit-graph file")), OPT_BOOL(0, "changed-paths", &opts.enable_changed_paths, N_("enable computation for changed paths")), - OPT_BOOL(0, "progress", &opts.progress, N_("force progress reporting")), OPT_CALLBACK_F(0, "split", &write_opts.split_flags, NULL, N_("allow writing an incremental commit-graph file"), PARSE_OPT_OPTARG | PARSE_OPT_NONEG, @@ -235,6 +226,7 @@ static int graph_write(int argc, const char **argv) 0, write_option_max_new_filters), OPT_END(), }; + struct option *options = add_common_options(builtin_commit_graph_write_options); opts.progress = isatty(2); opts.enable_changed_paths = -1; @@ -248,8 +240,10 @@ static int graph_write(int argc, const char **argv) git_config(git_commit_graph_write_config, &opts); argc = parse_options(argc, argv, NULL, - builtin_commit_graph_write_options, + options, builtin_commit_graph_write_usage, 0); + if (argc) + usage_with_options(builtin_commit_graph_write_usage, options); if (opts.reachable + opts.stdin_packs + opts.stdin_commits > 1) die(_("use at most one of --reachable, --stdin-commits, or --stdin-packs")); @@ -304,6 +298,7 @@ static int graph_write(int argc, const char **argv) result = 1; cleanup: + FREE_AND_NULL(options); string_list_clear(&pack_indexes, 0); strbuf_release(&buf); return result; @@ -311,32 +306,25 @@ cleanup: int cmd_commit_graph(int argc, const char **argv, const char *prefix) { - static struct option builtin_commit_graph_options[] = { - OPT_STRING(0, "object-dir", &opts.obj_dir, - N_("dir"), - N_("the object directory to store the graph")), - OPT_END(), - }; - - if (argc == 2 && !strcmp(argv[1], "-h")) - usage_with_options(builtin_commit_graph_usage, - builtin_commit_graph_options); + struct option *builtin_commit_graph_options = common_opts; git_config(git_default_config, NULL); argc = parse_options(argc, argv, prefix, builtin_commit_graph_options, builtin_commit_graph_usage, PARSE_OPT_STOP_AT_NON_OPTION); + if (!argc) + goto usage; save_commit_buffer = 0; - if (argc > 0) { - if (!strcmp(argv[0], "verify")) - return graph_verify(argc, argv); - if (!strcmp(argv[0], "write")) - return graph_write(argc, argv); - } + if (!strcmp(argv[0], "verify")) + return graph_verify(argc, argv); + else if (argc && !strcmp(argv[0], "write")) + return graph_write(argc, argv); + error(_("unrecognized subcommand: %s"), argv[0]); +usage: usage_with_options(builtin_commit_graph_usage, builtin_commit_graph_options); } diff --git a/builtin/commit-tree.c b/builtin/commit-tree.c index 1031b9a491..63ea322933 100644 --- a/builtin/commit-tree.c +++ b/builtin/commit-tree.c @@ -88,9 +88,7 @@ static int parse_file_arg_callback(const struct option *opt, if (!strcmp(arg, "-")) fd = 0; else { - fd = open(arg, O_RDONLY); - if (fd < 0) - die_errno(_("git commit-tree: failed to open '%s'"), arg); + fd = xopen(arg, O_RDONLY); } if (strbuf_read(buf, fd, 0) < 0) die_errno(_("git commit-tree: failed to read '%s'"), arg); diff --git a/builtin/commit.c b/builtin/commit.c index 243c626307..e7320f66f9 100644 --- a/builtin/commit.c +++ b/builtin/commit.c @@ -203,7 +203,7 @@ static void status_init_config(struct wt_status *s, config_fn_t fn) init_diff_ui_defaults(); git_config(fn, s); determine_whence(s); - s->hints = advice_status_hints; /* must come after git_config() */ + s->hints = advice_enabled(ADVICE_STATUS_HINTS); /* must come after git_config() */ } static void rollback_index_files(void) @@ -1033,7 +1033,7 @@ static int prepare_to_commit(const char *index_file, const char *prefix, */ if (!committable && whence != FROM_MERGE && !allow_empty && !(amend && is_a_merge(current_head))) { - s->hints = advice_status_hints; + s->hints = advice_enabled(ADVICE_STATUS_HINTS); s->display_comment_prefix = old_display_comment_prefix; run_status(stdout, index_file, prefix, 0, s); if (amend) @@ -1253,8 +1253,6 @@ static int parse_and_validate_options(int argc, const char *argv[], if (logfile || have_option_m || use_message) use_editor = 0; - if (0 <= edit_flag) - use_editor = edit_flag; /* Sanity check options */ if (amend && !current_head) @@ -1344,6 +1342,9 @@ static int parse_and_validate_options(int argc, const char *argv[], } } + if (0 <= edit_flag) + use_editor = edit_flag; + cleanup_mode = get_cleanup_mode(cleanup_arg, use_editor); handle_untracked_files_arg(s); diff --git a/builtin/credential-cache.c b/builtin/credential-cache.c index 76a6ba3722..78c02ad531 100644 --- a/builtin/credential-cache.c +++ b/builtin/credential-cache.c @@ -11,6 +11,32 @@ #define FLAG_SPAWN 0x1 #define FLAG_RELAY 0x2 +#ifdef GIT_WINDOWS_NATIVE + +static int connection_closed(int error) +{ + return (error == EINVAL); +} + +static int connection_fatally_broken(int error) +{ + return (error != ENOENT) && (error != ENETDOWN); +} + +#else + +static int connection_closed(int error) +{ + return (error == ECONNRESET); +} + +static int connection_fatally_broken(int error) +{ + return (error != ENOENT) && (error != ECONNREFUSED); +} + +#endif + static int send_request(const char *socket, const struct strbuf *out) { int got_data = 0; @@ -28,7 +54,7 @@ static int send_request(const char *socket, const struct strbuf *out) int r; r = read_in_full(fd, in, sizeof(in)); - if (r == 0 || (r < 0 && errno == ECONNRESET)) + if (r == 0 || (r < 0 && connection_closed(errno))) break; if (r < 0) die_errno("read error from cache daemon"); @@ -75,7 +101,7 @@ static void do_cache(const char *socket, const char *action, int timeout, } if (send_request(socket, &buf) < 0) { - if (errno != ENOENT && errno != ECONNREFUSED) + if (connection_fatally_broken(errno)) die_errno("unable to connect to cache daemon"); if (flags & FLAG_SPAWN) { spawn_daemon(socket); @@ -90,7 +116,7 @@ static char *get_socket_path(void) { struct stat sb; char *old_dir, *socket; - old_dir = expand_user_path("~/.git-credential-cache", 0); + old_dir = interpolate_path("~/.git-credential-cache", 0); if (old_dir && !stat(old_dir, &sb) && S_ISDIR(sb.st_mode)) socket = xstrfmt("%s/socket", old_dir); else diff --git a/builtin/credential-store.c b/builtin/credential-store.c index ae3c1ba75f..62a4f3c265 100644 --- a/builtin/credential-store.c +++ b/builtin/credential-store.c @@ -173,7 +173,7 @@ int cmd_credential_store(int argc, const char **argv, const char *prefix) if (file) { string_list_append(&fns, file); } else { - if ((file = expand_user_path("~/.git-credentials", 0))) + if ((file = interpolate_path("~/.git-credentials", 0))) string_list_append_nodup(&fns, file); file = xdg_config_home("credentials"); if (file) diff --git a/builtin/diff-index.c b/builtin/diff-index.c index cf09559e42..5fd23ab5b6 100644 --- a/builtin/diff-index.c +++ b/builtin/diff-index.c @@ -29,10 +29,10 @@ int cmd_diff_index(int argc, const char **argv, const char *prefix) prefix = precompose_argv_prefix(argc, argv, prefix); /* - * We need no diff for merges options, and we need to avoid conflict - * with our own meaning of "-m". + * We need (some of) diff for merges options (e.g., --cc), and we need + * to avoid conflict with our own meaning of "-m". */ - diff_merges_suppress_options_parsing(); + diff_merges_suppress_m_parsing(); argc = setup_revisions(argc, argv, &rev, NULL); for (i = 1; i < argc; i++) { diff --git a/builtin/difftool.c b/builtin/difftool.c index 6a9242a803..210da03908 100644 --- a/builtin/difftool.c +++ b/builtin/difftool.c @@ -331,7 +331,7 @@ static int checkout_path(unsigned mode, struct object_id *oid, } static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix, - int argc, const char **argv) + struct child_process *child) { char tmpdir[PATH_MAX]; struct strbuf info = STRBUF_INIT, lpath = STRBUF_INIT; @@ -352,7 +352,6 @@ static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix, struct index_state wtindex; struct checkout lstate, rstate; int rc, flags = RUN_GIT_CMD, err = 0; - struct child_process child = CHILD_PROCESS_INIT; const char *helper_argv[] = { "difftool--helper", NULL, NULL, NULL }; struct hashmap wt_modified, tmp_modified; int indices_loaded = 0; @@ -387,19 +386,15 @@ static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix, rdir_len = rdir.len; wtdir_len = wtdir.len; - child.no_stdin = 1; - child.git_cmd = 1; - child.use_shell = 0; - child.clean_on_exit = 1; - child.dir = prefix; - child.out = -1; - strvec_pushl(&child.args, "diff", "--raw", "--no-abbrev", "-z", - NULL); - for (i = 0; i < argc; i++) - strvec_push(&child.args, argv[i]); - if (start_command(&child)) + child->no_stdin = 1; + child->git_cmd = 1; + child->use_shell = 0; + child->clean_on_exit = 1; + child->dir = prefix; + child->out = -1; + if (start_command(child)) die("could not obtain raw diff"); - fp = xfdopen(child.out, "r"); + fp = xfdopen(child->out, "r"); /* Build index info for left and right sides of the diff */ i = 0; @@ -410,9 +405,9 @@ static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix, const char *src_path, *dst_path; if (starts_with(info.buf, "::")) - die(N_("combined diff formats('-c' and '--cc') are " + die(N_("combined diff formats ('-c' and '--cc') are " "not supported in\n" - "directory diff mode('-d' and '--dir-diff').")); + "directory diff mode ('-d' and '--dir-diff').")); if (parse_index_info(info.buf, &lmode, &rmode, &loid, &roid, &status)) @@ -525,7 +520,7 @@ static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix, fclose(fp); fp = NULL; - if (finish_command(&child)) { + if (finish_command(child)) { ret = error("error occurred running diff --raw"); goto finish; } @@ -562,11 +557,13 @@ static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix, if (*entry->left) { add_path(&ldir, ldir_len, entry->path); ensure_leading_directories(ldir.buf); + unlink(ldir.buf); write_file(ldir.buf, "%s", entry->left); } if (*entry->right) { add_path(&rdir, rdir_len, entry->path); ensure_leading_directories(rdir.buf); + unlink(rdir.buf); write_file(rdir.buf, "%s", entry->right); } } @@ -668,25 +665,23 @@ finish: } static int run_file_diff(int prompt, const char *prefix, - int argc, const char **argv) + struct child_process *child) { - struct strvec args = STRVEC_INIT; const char *env[] = { "GIT_PAGER=", "GIT_EXTERNAL_DIFF=git-difftool--helper", NULL, NULL }; - int i; if (prompt > 0) env[2] = "GIT_DIFFTOOL_PROMPT=true"; else if (!prompt) env[2] = "GIT_DIFFTOOL_NO_PROMPT=true"; + child->git_cmd = 1; + child->dir = prefix; + strvec_pushv(&child->env_array, env); - strvec_push(&args, "diff"); - for (i = 0; i < argc; i++) - strvec_push(&args, argv[i]); - return run_command_v_opt_cd_env(args.v, RUN_GIT_CMD, prefix, env); + return run_command(child); } int cmd_difftool(int argc, const char **argv, const char *prefix) @@ -713,12 +708,13 @@ int cmd_difftool(int argc, const char **argv, const char *prefix) "`--tool`")), OPT_BOOL(0, "trust-exit-code", &trust_exit_code, N_("make 'git-difftool' exit when an invoked diff " - "tool returns a non - zero exit code")), + "tool returns a non-zero exit code")), OPT_STRING('x', "extcmd", &extcmd, N_("command"), N_("specify a custom command for viewing diffs")), - OPT_ARGUMENT("no-index", &no_index, N_("passed to `diff`")), + OPT_BOOL(0, "no-index", &no_index, N_("passed to `diff`")), OPT_END() }; + struct child_process child = CHILD_PROCESS_INIT; git_config(difftool_config, NULL); symlinks = has_symlinks; @@ -768,7 +764,14 @@ int cmd_difftool(int argc, const char **argv, const char *prefix) * will invoke a separate instance of 'git-difftool--helper' for * each file that changed. */ + strvec_push(&child.args, "diff"); + if (no_index) + strvec_push(&child.args, "--no-index"); + if (dir_diff) + strvec_pushl(&child.args, "--raw", "--no-abbrev", "-z", NULL); + strvec_pushv(&child.args, argv); + if (dir_diff) - return run_dir_diff(extcmd, symlinks, prefix, argc, argv); - return run_file_diff(prompt, prefix, argc, argv); + return run_dir_diff(extcmd, symlinks, prefix, &child); + return run_file_diff(prompt, prefix, &child); } diff --git a/builtin/fast-export.c b/builtin/fast-export.c index 3c20f164f0..95e8e89e81 100644 --- a/builtin/fast-export.c +++ b/builtin/fast-export.c @@ -821,6 +821,7 @@ static void handle_tag(const char *name, struct tag *tag) static struct hashmap tags; message = anonymize_str(&tags, anonymize_tag, message, message_size, NULL); + message_size = strlen(message); } } diff --git a/builtin/fetch.c b/builtin/fetch.c index 25740c13df..f7abbc31ff 100644 --- a/builtin/fetch.c +++ b/builtin/fetch.c @@ -712,7 +712,7 @@ static void adjust_refcol_width(const struct ref *ref) int max, rlen, llen, len; /* uptodate lines are only shown on high verbosity level */ - if (!verbosity && oideq(&ref->peer_ref->old_oid, &ref->old_oid)) + if (verbosity <= 0 && oideq(&ref->peer_ref->old_oid, &ref->old_oid)) return; max = term_columns(); @@ -748,6 +748,9 @@ static void prepare_format_display(struct ref *ref_map) struct ref *rm; const char *format = "full"; + if (verbosity < 0) + return; + git_config_get_string_tmp("fetch.output", &format); if (!strcasecmp(format, "full")) compact_format = 0; @@ -827,7 +830,12 @@ static void format_display(struct strbuf *display, char code, const char *remote, const char *local, int summary_width) { - int width = (summary_width + strlen(summary) - gettext_width(summary)); + int width; + + if (verbosity < 0) + return; + + width = (summary_width + strlen(summary) - gettext_width(summary)); strbuf_addf(display, "%c %-*s ", code, width, summary); if (!compact_format) @@ -846,13 +854,11 @@ static int update_local_ref(struct ref *ref, int summary_width) { struct commit *current = NULL, *updated; - enum object_type type; struct branch *current_branch = branch_get(NULL); const char *pretty_ref = prettify_refname(ref->name); int fast_forward = 0; - type = oid_object_info(the_repository, &ref->new_oid, NULL); - if (type < 0) + if (!repo_has_object_file(the_repository, &ref->new_oid)) die(_("object %s not found"), oid_to_hex(&ref->new_oid)); if (oideq(&ref->old_oid, &ref->new_oid)) { @@ -964,7 +970,7 @@ static int update_local_ref(struct ref *ref, } } -static int iterate_ref_map(void *cb_data, struct object_id *oid) +static const struct object_id *iterate_ref_map(void *cb_data) { struct ref **rm = cb_data; struct ref *ref = *rm; @@ -972,10 +978,9 @@ static int iterate_ref_map(void *cb_data, struct object_id *oid) while (ref && ref->status == REF_STATUS_REJECT_SHALLOW) ref = ref->next; if (!ref) - return -1; /* end of the list */ + return NULL; *rm = ref->next; - oidcpy(oid, &ref->old_oid); - return 0; + return &ref->old_oid; } struct fetch_head { @@ -1074,7 +1079,6 @@ static int store_updated_refs(const char *raw_url, const char *remote_name, int connectivity_checked, struct ref *ref_map) { struct fetch_head fetch_head; - struct commit *commit; int url_len, i, rc = 0; struct strbuf note = STRBUF_INIT, err = STRBUF_INIT; struct ref_transaction *transaction = NULL; @@ -1122,6 +1126,7 @@ static int store_updated_refs(const char *raw_url, const char *remote_name, want_status <= FETCH_HEAD_IGNORE; want_status++) { for (rm = ref_map; rm; rm = rm->next) { + struct commit *commit = NULL; struct ref *ref = NULL; if (rm->status == REF_STATUS_REJECT_SHALLOW) { @@ -1131,11 +1136,23 @@ static int store_updated_refs(const char *raw_url, const char *remote_name, continue; } - commit = lookup_commit_reference_gently(the_repository, - &rm->old_oid, - 1); - if (!commit) - rm->fetch_head_status = FETCH_HEAD_NOT_FOR_MERGE; + /* + * References in "refs/tags/" are often going to point + * to annotated tags, which are not part of the + * commit-graph. We thus only try to look up refs in + * the graph which are not in that namespace to not + * regress performance in repositories with many + * annotated tags. + */ + if (!starts_with(rm->name, "refs/tags/")) + commit = lookup_commit_in_graph(the_repository, &rm->old_oid); + if (!commit) { + commit = lookup_commit_reference_gently(the_repository, + &rm->old_oid, + 1); + if (!commit) + rm->fetch_head_status = FETCH_HEAD_NOT_FOR_MERGE; + } if (rm->fetch_head_status != want_status) continue; @@ -1202,13 +1219,12 @@ static int store_updated_refs(const char *raw_url, const char *remote_name, "FETCH_HEAD", summary_width); } if (note.len) { - if (verbosity >= 0 && !shown_url) { + if (!shown_url) { fprintf(stderr, _("From %.*s\n"), url_len, url); shown_url = 1; } - if (verbosity >= 0) - fprintf(stderr, " %s\n", note.buf); + fprintf(stderr, " %s\n", note.buf); } } } @@ -1229,7 +1245,7 @@ static int store_updated_refs(const char *raw_url, const char *remote_name, " 'git remote prune %s' to remove any old, conflicting " "branches"), remote_name); - if (advice_fetch_show_forced_updates) { + if (advice_enabled(ADVICE_FETCH_SHOW_FORCED_UPDATES)) { if (!fetch_show_forced_updates) { warning(_(warn_show_forced_updates)); } else if (forced_updates_ms > FORCED_UPDATES_DELAY_WARNING_IN_MS) { @@ -1282,37 +1298,35 @@ static int check_exist_and_connected(struct ref *ref_map) return check_connected(iterate_ref_map, &rm, &opt); } -static int fetch_refs(struct transport *transport, struct ref *ref_map) +static int fetch_and_consume_refs(struct transport *transport, struct ref *ref_map) { - int ret = check_exist_and_connected(ref_map); + int connectivity_checked = 1; + int ret; + + /* + * We don't need to perform a fetch in case we can already satisfy all + * refs. + */ + ret = check_exist_and_connected(ref_map); if (ret) { trace2_region_enter("fetch", "fetch_refs", the_repository); ret = transport_fetch_refs(transport, ref_map); trace2_region_leave("fetch", "fetch_refs", the_repository); + if (ret) + goto out; + connectivity_checked = transport->smart_options ? + transport->smart_options->connectivity_checked : 0; } - if (!ret) - /* - * Keep the new pack's ".keep" file around to allow the caller - * time to update refs to reference the new objects. - */ - return 0; - transport_unlock_pack(transport); - return ret; -} -/* Update local refs based on the ref values fetched from a remote */ -static int consume_refs(struct transport *transport, struct ref *ref_map) -{ - int connectivity_checked = transport->smart_options - ? transport->smart_options->connectivity_checked : 0; - int ret; trace2_region_enter("fetch", "consume_refs", the_repository); ret = store_updated_refs(transport->url, transport->remote->name, connectivity_checked, ref_map); - transport_unlock_pack(transport); trace2_region_leave("fetch", "consume_refs", the_repository); + +out: + transport_unlock_pack(transport); return ret; } @@ -1428,7 +1442,9 @@ static void add_negotiation_tips(struct git_transport_options *smart_options) if (!has_glob_specials(s)) { struct object_id oid; if (get_oid(s, &oid)) - die("%s is not a valid object", s); + die(_("%s is not a valid object"), s); + if (!has_object(the_repository, &oid, 0)) + die(_("the object %s does not exist"), s); oid_array_append(oids, &oid); continue; } @@ -1499,8 +1515,7 @@ static void backfill_tags(struct transport *transport, struct ref *ref_map) transport_set_option(transport, TRANS_OPT_FOLLOWTAGS, NULL); transport_set_option(transport, TRANS_OPT_DEPTH, "0"); transport_set_option(transport, TRANS_OPT_DEEPEN_RELATIVE, NULL); - if (!fetch_refs(transport, ref_map)) - consume_refs(transport, ref_map); + fetch_and_consume_refs(transport, ref_map); if (gsecondary) { transport_disconnect(gsecondary); @@ -1591,7 +1606,7 @@ static int do_fetch(struct transport *transport, transport->url); } } - if (fetch_refs(transport, ref_map) || consume_refs(transport, ref_map)) { + if (fetch_and_consume_refs(transport, ref_map)) { free_refs(ref_map); retcode = 1; goto cleanup; @@ -2133,8 +2148,6 @@ int cmd_fetch(int argc, const char **argv, const char *prefix) NULL); } - close_object_store(the_repository->objects); - if (enable_auto_gc) run_auto_maintenance(verbosity < 0); diff --git a/builtin/gc.c b/builtin/gc.c index f05d2f0a1a..6b3de3dd51 100644 --- a/builtin/gc.c +++ b/builtin/gc.c @@ -502,7 +502,7 @@ static int report_last_gc_error(void) */ warning(_("The last gc run reported the following. " "Please correct the root cause\n" - "and remove %s.\n" + "and remove %s\n" "Automatic cleanup will not be performed " "until the file is removed.\n\n" "%s"), @@ -663,8 +663,8 @@ int cmd_gc(int argc, const char **argv, const char *prefix) gc_before_repack(); if (!repository_format_precious_objects) { - close_object_store(the_repository->objects); - if (run_command_v_opt(repack.v, RUN_GIT_CMD)) + if (run_command_v_opt(repack.v, + RUN_GIT_CMD | RUN_CLOSE_OBJECT_STORE)) die(FAILED_RUN, repack.v[0]); if (prune_expire) { @@ -848,7 +848,7 @@ static int run_write_commit_graph(struct maintenance_run_opts *opts) { struct child_process child = CHILD_PROCESS_INIT; - child.git_cmd = 1; + child.git_cmd = child.close_object_store = 1; strvec_pushl(&child.args, "commit-graph", "write", "--split", "--reachable", NULL); @@ -864,7 +864,6 @@ static int maintenance_task_commit_graph(struct maintenance_run_opts *opts) if (!the_repository->settings.core_commit_graph) return 0; - close_object_store(the_repository->objects); if (run_write_commit_graph(opts)) { error(_("failed to write commit-graph")); return 1; @@ -913,7 +912,7 @@ static int maintenance_task_gc(struct maintenance_run_opts *opts) { struct child_process child = CHILD_PROCESS_INIT; - child.git_cmd = 1; + child.git_cmd = child.close_object_store = 1; strvec_push(&child.args, "gc"); if (opts->auto_flag) @@ -923,7 +922,6 @@ static int maintenance_task_gc(struct maintenance_run_opts *opts) else strvec_push(&child.args, "--no-quiet"); - close_object_store(the_repository->objects); return run_command(&child); } @@ -1097,14 +1095,12 @@ static int multi_pack_index_expire(struct maintenance_run_opts *opts) { struct child_process child = CHILD_PROCESS_INIT; - child.git_cmd = 1; + child.git_cmd = child.close_object_store = 1; strvec_pushl(&child.args, "multi-pack-index", "expire", NULL); if (opts->quiet) strvec_push(&child.args, "--no-progress"); - close_object_store(the_repository->objects); - if (run_command(&child)) return error(_("'git multi-pack-index expire' failed")); @@ -1155,7 +1151,7 @@ static int multi_pack_index_repack(struct maintenance_run_opts *opts) { struct child_process child = CHILD_PROCESS_INIT; - child.git_cmd = 1; + child.git_cmd = child.close_object_store = 1; strvec_pushl(&child.args, "multi-pack-index", "repack", NULL); if (opts->quiet) @@ -1164,8 +1160,6 @@ static int multi_pack_index_repack(struct maintenance_run_opts *opts) strvec_pushf(&child.args, "--batch-size=%"PRIuMAX, (uintmax_t)get_auto_pack_size()); - close_object_store(the_repository->objects); - if (run_command(&child)) return error(_("'git multi-pack-index repack' failed")); @@ -1529,6 +1523,93 @@ static const char *get_frequency(enum schedule_priority schedule) } } +/* + * get_schedule_cmd` reads the GIT_TEST_MAINT_SCHEDULER environment variable + * to mock the schedulers that `git maintenance start` rely on. + * + * For test purpose, GIT_TEST_MAINT_SCHEDULER can be set to a comma-separated + * list of colon-separated key/value pairs where each pair contains a scheduler + * and its corresponding mock. + * + * * If $GIT_TEST_MAINT_SCHEDULER is not set, return false and leave the + * arguments unmodified. + * + * * If $GIT_TEST_MAINT_SCHEDULER is set, return true. + * In this case, the *cmd value is read as input. + * + * * if the input value *cmd is the key of one of the comma-separated list + * item, then *is_available is set to true and *cmd is modified and becomes + * the mock command. + * + * * if the input value *cmd isn’t the key of any of the comma-separated list + * item, then *is_available is set to false. + * + * Ex.: + * GIT_TEST_MAINT_SCHEDULER not set + * +-------+-------------------------------------------------+ + * | Input | Output | + * | *cmd | return code | *cmd | *is_available | + * +-------+-------------+-------------------+---------------+ + * | "foo" | false | "foo" (unchanged) | (unchanged) | + * +-------+-------------+-------------------+---------------+ + * + * GIT_TEST_MAINT_SCHEDULER set to “foo:./mock_foo.sh,bar:./mock_bar.sh” + * +-------+-------------------------------------------------+ + * | Input | Output | + * | *cmd | return code | *cmd | *is_available | + * +-------+-------------+-------------------+---------------+ + * | "foo" | true | "./mock.foo.sh" | true | + * | "qux" | true | "qux" (unchanged) | false | + * +-------+-------------+-------------------+---------------+ + */ +static int get_schedule_cmd(const char **cmd, int *is_available) +{ + char *testing = xstrdup_or_null(getenv("GIT_TEST_MAINT_SCHEDULER")); + struct string_list_item *item; + struct string_list list = STRING_LIST_INIT_NODUP; + + if (!testing) + return 0; + + if (is_available) + *is_available = 0; + + string_list_split_in_place(&list, testing, ',', -1); + for_each_string_list_item(item, &list) { + struct string_list pair = STRING_LIST_INIT_NODUP; + + if (string_list_split_in_place(&pair, item->string, ':', 2) != 2) + continue; + + if (!strcmp(*cmd, pair.items[0].string)) { + *cmd = pair.items[1].string; + if (is_available) + *is_available = 1; + string_list_clear(&list, 0); + UNLEAK(testing); + return 1; + } + } + + string_list_clear(&list, 0); + free(testing); + return 1; +} + +static int is_launchctl_available(void) +{ + const char *cmd = "launchctl"; + int is_available; + if (get_schedule_cmd(&cmd, &is_available)) + return is_available; + +#ifdef __APPLE__ + return 1; +#else + return 0; +#endif +} + static char *launchctl_service_name(const char *frequency) { struct strbuf label = STRBUF_INIT; @@ -1542,7 +1623,7 @@ static char *launchctl_service_filename(const char *name) struct strbuf filename = STRBUF_INIT; strbuf_addf(&filename, "~/Library/LaunchAgents/%s.plist", name); - expanded = expand_user_path(filename.buf, 1); + expanded = interpolate_path(filename.buf, 1); if (!expanded) die(_("failed to expand path '%s'"), filename.buf); @@ -1555,19 +1636,17 @@ static char *launchctl_get_uid(void) return xstrfmt("gui/%d", getuid()); } -static int launchctl_boot_plist(int enable, const char *filename, const char *cmd) +static int launchctl_boot_plist(int enable, const char *filename) { + const char *cmd = "launchctl"; int result; struct child_process child = CHILD_PROCESS_INIT; char *uid = launchctl_get_uid(); + get_schedule_cmd(&cmd, NULL); strvec_split(&child.args, cmd); - if (enable) - strvec_push(&child.args, "bootstrap"); - else - strvec_push(&child.args, "bootout"); - strvec_push(&child.args, uid); - strvec_push(&child.args, filename); + strvec_pushl(&child.args, enable ? "bootstrap" : "bootout", uid, + filename, NULL); child.no_stderr = 1; child.no_stdout = 1; @@ -1581,38 +1660,56 @@ static int launchctl_boot_plist(int enable, const char *filename, const char *cm return result; } -static int launchctl_remove_plist(enum schedule_priority schedule, const char *cmd) +static int launchctl_remove_plist(enum schedule_priority schedule) { const char *frequency = get_frequency(schedule); char *name = launchctl_service_name(frequency); char *filename = launchctl_service_filename(name); - int result = launchctl_boot_plist(0, filename, cmd); + int result = launchctl_boot_plist(0, filename); unlink(filename); free(filename); free(name); return result; } -static int launchctl_remove_plists(const char *cmd) +static int launchctl_remove_plists(void) { - return launchctl_remove_plist(SCHEDULE_HOURLY, cmd) || - launchctl_remove_plist(SCHEDULE_DAILY, cmd) || - launchctl_remove_plist(SCHEDULE_WEEKLY, cmd); + return launchctl_remove_plist(SCHEDULE_HOURLY) || + launchctl_remove_plist(SCHEDULE_DAILY) || + launchctl_remove_plist(SCHEDULE_WEEKLY); } -static int launchctl_schedule_plist(const char *exec_path, enum schedule_priority schedule, const char *cmd) +static int launchctl_list_contains_plist(const char *name, const char *cmd) { - FILE *plist; - int i; + struct child_process child = CHILD_PROCESS_INIT; + + strvec_split(&child.args, cmd); + strvec_pushl(&child.args, "list", name, NULL); + + child.no_stderr = 1; + child.no_stdout = 1; + + if (start_command(&child)) + die(_("failed to start launchctl")); + + /* Returns failure if 'name' doesn't exist. */ + return !finish_command(&child); +} + +static int launchctl_schedule_plist(const char *exec_path, enum schedule_priority schedule) +{ + int i, fd; const char *preamble, *repeat; const char *frequency = get_frequency(schedule); char *name = launchctl_service_name(frequency); char *filename = launchctl_service_filename(name); + struct lock_file lk = LOCK_INIT; + static unsigned long lock_file_timeout_ms = ULONG_MAX; + struct strbuf plist = STRBUF_INIT, plist2 = STRBUF_INIT; + struct stat st; + const char *cmd = "launchctl"; - if (safe_create_leading_directories(filename)) - die(_("failed to create directories for '%s'"), filename); - plist = xfopen(filename, "w"); - + get_schedule_cmd(&cmd, NULL); preamble = "<?xml version=\"1.0\"?>\n" "<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n" "<plist version=\"1.0\">" @@ -1630,7 +1727,7 @@ static int launchctl_schedule_plist(const char *exec_path, enum schedule_priorit "</array>\n" "<key>StartCalendarInterval</key>\n" "<array>\n"; - fprintf(plist, preamble, name, exec_path, exec_path, frequency); + strbuf_addf(&plist, preamble, name, exec_path, exec_path, frequency); switch (schedule) { case SCHEDULE_HOURLY: @@ -1639,7 +1736,7 @@ static int launchctl_schedule_plist(const char *exec_path, enum schedule_priorit "<key>Minute</key><integer>0</integer>\n" "</dict>\n"; for (i = 1; i <= 23; i++) - fprintf(plist, repeat, i); + strbuf_addf(&plist, repeat, i); break; case SCHEDULE_DAILY: @@ -1649,50 +1746,91 @@ static int launchctl_schedule_plist(const char *exec_path, enum schedule_priorit "<key>Minute</key><integer>0</integer>\n" "</dict>\n"; for (i = 1; i <= 6; i++) - fprintf(plist, repeat, i); + strbuf_addf(&plist, repeat, i); break; case SCHEDULE_WEEKLY: - fprintf(plist, - "<dict>\n" - "<key>Day</key><integer>0</integer>\n" - "<key>Hour</key><integer>0</integer>\n" - "<key>Minute</key><integer>0</integer>\n" - "</dict>\n"); + strbuf_addstr(&plist, + "<dict>\n" + "<key>Day</key><integer>0</integer>\n" + "<key>Hour</key><integer>0</integer>\n" + "<key>Minute</key><integer>0</integer>\n" + "</dict>\n"); break; default: /* unreachable */ break; } - fprintf(plist, "</array>\n</dict>\n</plist>\n"); - fclose(plist); + strbuf_addstr(&plist, "</array>\n</dict>\n</plist>\n"); + + if (safe_create_leading_directories(filename)) + die(_("failed to create directories for '%s'"), filename); - /* bootout might fail if not already running, so ignore */ - launchctl_boot_plist(0, filename, cmd); - if (launchctl_boot_plist(1, filename, cmd)) - die(_("failed to bootstrap service %s"), filename); + if ((long)lock_file_timeout_ms < 0 && + git_config_get_ulong("gc.launchctlplistlocktimeoutms", + &lock_file_timeout_ms)) + lock_file_timeout_ms = 150; + + fd = hold_lock_file_for_update_timeout(&lk, filename, LOCK_DIE_ON_ERROR, + lock_file_timeout_ms); + + /* + * Does this file already exist? With the intended contents? Is it + * registered already? Then it does not need to be re-registered. + */ + if (!stat(filename, &st) && st.st_size == plist.len && + strbuf_read_file(&plist2, filename, plist.len) == plist.len && + !strbuf_cmp(&plist, &plist2) && + launchctl_list_contains_plist(name, cmd)) + rollback_lock_file(&lk); + else { + if (write_in_full(fd, plist.buf, plist.len) < 0 || + commit_lock_file(&lk)) + die_errno(_("could not write '%s'"), filename); + + /* bootout might fail if not already running, so ignore */ + launchctl_boot_plist(0, filename); + if (launchctl_boot_plist(1, filename)) + die(_("failed to bootstrap service %s"), filename); + } free(filename); free(name); + strbuf_release(&plist); + strbuf_release(&plist2); return 0; } -static int launchctl_add_plists(const char *cmd) +static int launchctl_add_plists(void) { const char *exec_path = git_exec_path(); - return launchctl_schedule_plist(exec_path, SCHEDULE_HOURLY, cmd) || - launchctl_schedule_plist(exec_path, SCHEDULE_DAILY, cmd) || - launchctl_schedule_plist(exec_path, SCHEDULE_WEEKLY, cmd); + return launchctl_schedule_plist(exec_path, SCHEDULE_HOURLY) || + launchctl_schedule_plist(exec_path, SCHEDULE_DAILY) || + launchctl_schedule_plist(exec_path, SCHEDULE_WEEKLY); } -static int launchctl_update_schedule(int run_maintenance, int fd, const char *cmd) +static int launchctl_update_schedule(int run_maintenance, int fd) { if (run_maintenance) - return launchctl_add_plists(cmd); + return launchctl_add_plists(); else - return launchctl_remove_plists(cmd); + return launchctl_remove_plists(); +} + +static int is_schtasks_available(void) +{ + const char *cmd = "schtasks"; + int is_available; + if (get_schedule_cmd(&cmd, &is_available)) + return is_available; + +#ifdef GIT_WINDOWS_NATIVE + return 1; +#else + return 0; +#endif } static char *schtasks_task_name(const char *frequency) @@ -1702,13 +1840,15 @@ static char *schtasks_task_name(const char *frequency) return strbuf_detach(&label, NULL); } -static int schtasks_remove_task(enum schedule_priority schedule, const char *cmd) +static int schtasks_remove_task(enum schedule_priority schedule) { + const char *cmd = "schtasks"; int result; struct strvec args = STRVEC_INIT; const char *frequency = get_frequency(schedule); char *name = schtasks_task_name(frequency); + get_schedule_cmd(&cmd, NULL); strvec_split(&args, cmd); strvec_pushl(&args, "/delete", "/tn", name, "/f", NULL); @@ -1719,15 +1859,16 @@ static int schtasks_remove_task(enum schedule_priority schedule, const char *cmd return result; } -static int schtasks_remove_tasks(const char *cmd) +static int schtasks_remove_tasks(void) { - return schtasks_remove_task(SCHEDULE_HOURLY, cmd) || - schtasks_remove_task(SCHEDULE_DAILY, cmd) || - schtasks_remove_task(SCHEDULE_WEEKLY, cmd); + return schtasks_remove_task(SCHEDULE_HOURLY) || + schtasks_remove_task(SCHEDULE_DAILY) || + schtasks_remove_task(SCHEDULE_WEEKLY); } -static int schtasks_schedule_task(const char *exec_path, enum schedule_priority schedule, const char *cmd) +static int schtasks_schedule_task(const char *exec_path, enum schedule_priority schedule) { + const char *cmd = "schtasks"; int result; struct child_process child = CHILD_PROCESS_INIT; const char *xml; @@ -1736,6 +1877,8 @@ static int schtasks_schedule_task(const char *exec_path, enum schedule_priority char *name = schtasks_task_name(frequency); struct strbuf tfilename = STRBUF_INIT; + get_schedule_cmd(&cmd, NULL); + strbuf_addf(&tfilename, "%s/schedule_%s_XXXXXX", get_git_common_dir(), frequency); tfile = xmks_tempfile(tfilename.buf); @@ -1840,28 +1983,52 @@ static int schtasks_schedule_task(const char *exec_path, enum schedule_priority return result; } -static int schtasks_schedule_tasks(const char *cmd) +static int schtasks_schedule_tasks(void) { const char *exec_path = git_exec_path(); - return schtasks_schedule_task(exec_path, SCHEDULE_HOURLY, cmd) || - schtasks_schedule_task(exec_path, SCHEDULE_DAILY, cmd) || - schtasks_schedule_task(exec_path, SCHEDULE_WEEKLY, cmd); + return schtasks_schedule_task(exec_path, SCHEDULE_HOURLY) || + schtasks_schedule_task(exec_path, SCHEDULE_DAILY) || + schtasks_schedule_task(exec_path, SCHEDULE_WEEKLY); } -static int schtasks_update_schedule(int run_maintenance, int fd, const char *cmd) +static int schtasks_update_schedule(int run_maintenance, int fd) { if (run_maintenance) - return schtasks_schedule_tasks(cmd); + return schtasks_schedule_tasks(); else - return schtasks_remove_tasks(cmd); + return schtasks_remove_tasks(); +} + +static int is_crontab_available(void) +{ + const char *cmd = "crontab"; + int is_available; + struct child_process child = CHILD_PROCESS_INIT; + + if (get_schedule_cmd(&cmd, &is_available)) + return is_available; + + strvec_split(&child.args, cmd); + strvec_push(&child.args, "-l"); + child.no_stdin = 1; + child.no_stdout = 1; + child.no_stderr = 1; + child.silent_exec_failure = 1; + + if (start_command(&child)) + return 0; + /* Ignore exit code, as an empty crontab will return error. */ + finish_command(&child); + return 1; } #define BEGIN_LINE "# BEGIN GIT MAINTENANCE SCHEDULE" #define END_LINE "# END GIT MAINTENANCE SCHEDULE" -static int crontab_update_schedule(int run_maintenance, int fd, const char *cmd) +static int crontab_update_schedule(int run_maintenance, int fd) { + const char *cmd = "crontab"; int result = 0; int in_old_region = 0; struct child_process crontab_list = CHILD_PROCESS_INIT; @@ -1869,6 +2036,7 @@ static int crontab_update_schedule(int run_maintenance, int fd, const char *cmd) FILE *cron_list, *cron_in; struct strbuf line = STRBUF_INIT; + get_schedule_cmd(&cmd, NULL); strvec_split(&crontab_list.args, cmd); strvec_push(&crontab_list.args, "-l"); crontab_list.in = -1; @@ -1945,66 +2113,376 @@ done_editing: return result; } +static int real_is_systemd_timer_available(void) +{ + struct child_process child = CHILD_PROCESS_INIT; + + strvec_pushl(&child.args, "systemctl", "--user", "list-timers", NULL); + child.no_stdin = 1; + child.no_stdout = 1; + child.no_stderr = 1; + child.silent_exec_failure = 1; + + if (start_command(&child)) + return 0; + if (finish_command(&child)) + return 0; + return 1; +} + +static int is_systemd_timer_available(void) +{ + const char *cmd = "systemctl"; + int is_available; + + if (get_schedule_cmd(&cmd, &is_available)) + return is_available; + + return real_is_systemd_timer_available(); +} + +static char *xdg_config_home_systemd(const char *filename) +{ + return xdg_config_home_for("systemd/user", filename); +} + +static int systemd_timer_enable_unit(int enable, + enum schedule_priority schedule) +{ + const char *cmd = "systemctl"; + struct child_process child = CHILD_PROCESS_INIT; + const char *frequency = get_frequency(schedule); + + /* + * Disabling the systemd unit while it is already disabled makes + * systemctl print an error. + * Let's ignore it since it means we already are in the expected state: + * the unit is disabled. + * + * On the other hand, enabling a systemd unit which is already enabled + * produces no error. + */ + if (!enable) + child.no_stderr = 1; + + get_schedule_cmd(&cmd, NULL); + strvec_split(&child.args, cmd); + strvec_pushl(&child.args, "--user", enable ? "enable" : "disable", + "--now", NULL); + strvec_pushf(&child.args, "git-maintenance@%s.timer", frequency); + + if (start_command(&child)) + return error(_("failed to start systemctl")); + if (finish_command(&child)) + /* + * Disabling an already disabled systemd unit makes + * systemctl fail. + * Let's ignore this failure. + * + * Enabling an enabled systemd unit doesn't fail. + */ + if (enable) + return error(_("failed to run systemctl")); + return 0; +} + +static int systemd_timer_delete_unit_templates(void) +{ + int ret = 0; + char *filename = xdg_config_home_systemd("git-maintenance@.timer"); + if (unlink(filename) && !is_missing_file_error(errno)) + ret = error_errno(_("failed to delete '%s'"), filename); + FREE_AND_NULL(filename); + + filename = xdg_config_home_systemd("git-maintenance@.service"); + if (unlink(filename) && !is_missing_file_error(errno)) + ret = error_errno(_("failed to delete '%s'"), filename); + + free(filename); + return ret; +} + +static int systemd_timer_delete_units(void) +{ + return systemd_timer_enable_unit(0, SCHEDULE_HOURLY) || + systemd_timer_enable_unit(0, SCHEDULE_DAILY) || + systemd_timer_enable_unit(0, SCHEDULE_WEEKLY) || + systemd_timer_delete_unit_templates(); +} + +static int systemd_timer_write_unit_templates(const char *exec_path) +{ + char *filename; + FILE *file; + const char *unit; + + filename = xdg_config_home_systemd("git-maintenance@.timer"); + if (safe_create_leading_directories(filename)) { + error(_("failed to create directories for '%s'"), filename); + goto error; + } + file = fopen_or_warn(filename, "w"); + if (file == NULL) + goto error; + + unit = "# This file was created and is maintained by Git.\n" + "# Any edits made in this file might be replaced in the future\n" + "# by a Git command.\n" + "\n" + "[Unit]\n" + "Description=Optimize Git repositories data\n" + "\n" + "[Timer]\n" + "OnCalendar=%i\n" + "Persistent=true\n" + "\n" + "[Install]\n" + "WantedBy=timers.target\n"; + if (fputs(unit, file) == EOF) { + error(_("failed to write to '%s'"), filename); + fclose(file); + goto error; + } + if (fclose(file) == EOF) { + error_errno(_("failed to flush '%s'"), filename); + goto error; + } + free(filename); + + filename = xdg_config_home_systemd("git-maintenance@.service"); + file = fopen_or_warn(filename, "w"); + if (file == NULL) + goto error; + + unit = "# This file was created and is maintained by Git.\n" + "# Any edits made in this file might be replaced in the future\n" + "# by a Git command.\n" + "\n" + "[Unit]\n" + "Description=Optimize Git repositories data\n" + "\n" + "[Service]\n" + "Type=oneshot\n" + "ExecStart=\"%s/git\" --exec-path=\"%s\" for-each-repo --config=maintenance.repo maintenance run --schedule=%%i\n" + "LockPersonality=yes\n" + "MemoryDenyWriteExecute=yes\n" + "NoNewPrivileges=yes\n" + "RestrictAddressFamilies=AF_UNIX AF_INET AF_INET6\n" + "RestrictNamespaces=yes\n" + "RestrictRealtime=yes\n" + "RestrictSUIDSGID=yes\n" + "SystemCallArchitectures=native\n" + "SystemCallFilter=@system-service\n"; + if (fprintf(file, unit, exec_path, exec_path) < 0) { + error(_("failed to write to '%s'"), filename); + fclose(file); + goto error; + } + if (fclose(file) == EOF) { + error_errno(_("failed to flush '%s'"), filename); + goto error; + } + free(filename); + return 0; + +error: + free(filename); + systemd_timer_delete_unit_templates(); + return -1; +} + +static int systemd_timer_setup_units(void) +{ + const char *exec_path = git_exec_path(); + + int ret = systemd_timer_write_unit_templates(exec_path) || + systemd_timer_enable_unit(1, SCHEDULE_HOURLY) || + systemd_timer_enable_unit(1, SCHEDULE_DAILY) || + systemd_timer_enable_unit(1, SCHEDULE_WEEKLY); + if (ret) + systemd_timer_delete_units(); + return ret; +} + +static int systemd_timer_update_schedule(int run_maintenance, int fd) +{ + if (run_maintenance) + return systemd_timer_setup_units(); + else + return systemd_timer_delete_units(); +} + +enum scheduler { + SCHEDULER_INVALID = -1, + SCHEDULER_AUTO, + SCHEDULER_CRON, + SCHEDULER_SYSTEMD, + SCHEDULER_LAUNCHCTL, + SCHEDULER_SCHTASKS, +}; + +static const struct { + const char *name; + int (*is_available)(void); + int (*update_schedule)(int run_maintenance, int fd); +} scheduler_fn[] = { + [SCHEDULER_CRON] = { + .name = "crontab", + .is_available = is_crontab_available, + .update_schedule = crontab_update_schedule, + }, + [SCHEDULER_SYSTEMD] = { + .name = "systemctl", + .is_available = is_systemd_timer_available, + .update_schedule = systemd_timer_update_schedule, + }, + [SCHEDULER_LAUNCHCTL] = { + .name = "launchctl", + .is_available = is_launchctl_available, + .update_schedule = launchctl_update_schedule, + }, + [SCHEDULER_SCHTASKS] = { + .name = "schtasks", + .is_available = is_schtasks_available, + .update_schedule = schtasks_update_schedule, + }, +}; + +static enum scheduler parse_scheduler(const char *value) +{ + if (!value) + return SCHEDULER_INVALID; + else if (!strcasecmp(value, "auto")) + return SCHEDULER_AUTO; + else if (!strcasecmp(value, "cron") || !strcasecmp(value, "crontab")) + return SCHEDULER_CRON; + else if (!strcasecmp(value, "systemd") || + !strcasecmp(value, "systemd-timer")) + return SCHEDULER_SYSTEMD; + else if (!strcasecmp(value, "launchctl")) + return SCHEDULER_LAUNCHCTL; + else if (!strcasecmp(value, "schtasks")) + return SCHEDULER_SCHTASKS; + else + return SCHEDULER_INVALID; +} + +static int maintenance_opt_scheduler(const struct option *opt, const char *arg, + int unset) +{ + enum scheduler *scheduler = opt->value; + + BUG_ON_OPT_NEG(unset); + + *scheduler = parse_scheduler(arg); + if (*scheduler == SCHEDULER_INVALID) + return error(_("unrecognized --scheduler argument '%s'"), arg); + return 0; +} + +struct maintenance_start_opts { + enum scheduler scheduler; +}; + +static enum scheduler resolve_scheduler(enum scheduler scheduler) +{ + if (scheduler != SCHEDULER_AUTO) + return scheduler; + #if defined(__APPLE__) -static const char platform_scheduler[] = "launchctl"; + return SCHEDULER_LAUNCHCTL; + #elif defined(GIT_WINDOWS_NATIVE) -static const char platform_scheduler[] = "schtasks"; + return SCHEDULER_SCHTASKS; + +#elif defined(__linux__) + if (is_systemd_timer_available()) + return SCHEDULER_SYSTEMD; + else if (is_crontab_available()) + return SCHEDULER_CRON; + else + die(_("neither systemd timers nor crontab are available")); + #else -static const char platform_scheduler[] = "crontab"; + return SCHEDULER_CRON; #endif +} -static int update_background_schedule(int enable) +static void validate_scheduler(enum scheduler scheduler) { - int result; - const char *scheduler = platform_scheduler; - const char *cmd = scheduler; - char *testing; + if (scheduler == SCHEDULER_INVALID) + BUG("invalid scheduler"); + if (scheduler == SCHEDULER_AUTO) + BUG("resolve_scheduler should have been called before"); + + if (!scheduler_fn[scheduler].is_available()) + die(_("%s scheduler is not available"), + scheduler_fn[scheduler].name); +} + +static int update_background_schedule(const struct maintenance_start_opts *opts, + int enable) +{ + unsigned int i; + int result = 0; struct lock_file lk; char *lock_path = xstrfmt("%s/schedule", the_repository->objects->odb->path); - testing = xstrdup_or_null(getenv("GIT_TEST_MAINT_SCHEDULER")); - if (testing) { - char *sep = strchr(testing, ':'); - if (!sep) - die("GIT_TEST_MAINT_SCHEDULER unparseable: %s", testing); - *sep = '\0'; - scheduler = testing; - cmd = sep + 1; + if (hold_lock_file_for_update(&lk, lock_path, LOCK_NO_DEREF) < 0) { + free(lock_path); + return error(_("another process is scheduling background maintenance")); } - if (hold_lock_file_for_update(&lk, lock_path, LOCK_NO_DEREF) < 0) { - result = error(_("another process is scheduling background maintenance")); - goto cleanup; + for (i = 1; i < ARRAY_SIZE(scheduler_fn); i++) { + if (enable && opts->scheduler == i) + continue; + if (!scheduler_fn[i].is_available()) + continue; + scheduler_fn[i].update_schedule(0, get_lock_file_fd(&lk)); } - if (!strcmp(scheduler, "launchctl")) - result = launchctl_update_schedule(enable, get_lock_file_fd(&lk), cmd); - else if (!strcmp(scheduler, "schtasks")) - result = schtasks_update_schedule(enable, get_lock_file_fd(&lk), cmd); - else if (!strcmp(scheduler, "crontab")) - result = crontab_update_schedule(enable, get_lock_file_fd(&lk), cmd); - else - die("unknown background scheduler: %s", scheduler); + if (enable) + result = scheduler_fn[opts->scheduler].update_schedule( + 1, get_lock_file_fd(&lk)); rollback_lock_file(&lk); -cleanup: free(lock_path); - free(testing); return result; } -static int maintenance_start(void) +static const char *const builtin_maintenance_start_usage[] = { + N_("git maintenance start [--scheduler=<scheduler>]"), + NULL +}; + +static int maintenance_start(int argc, const char **argv, const char *prefix) { + struct maintenance_start_opts opts = { 0 }; + struct option options[] = { + OPT_CALLBACK_F( + 0, "scheduler", &opts.scheduler, N_("scheduler"), + N_("scheduler to trigger git maintenance run"), + PARSE_OPT_NONEG, maintenance_opt_scheduler), + OPT_END() + }; + + argc = parse_options(argc, argv, prefix, options, + builtin_maintenance_start_usage, 0); + if (argc) + usage_with_options(builtin_maintenance_start_usage, options); + + opts.scheduler = resolve_scheduler(opts.scheduler); + validate_scheduler(opts.scheduler); + if (maintenance_register()) warning(_("failed to add repo to global config")); - - return update_background_schedule(1); + return update_background_schedule(&opts, 1); } static int maintenance_stop(void) { - return update_background_schedule(0); + return update_background_schedule(NULL, 0); } static const char builtin_maintenance_usage[] = N_("git maintenance <subcommand> [<options>]"); @@ -2018,7 +2496,7 @@ int cmd_maintenance(int argc, const char **argv, const char *prefix) if (!strcmp(argv[1], "run")) return maintenance_run(argc - 1, argv + 1, prefix); if (!strcmp(argv[1], "start")) - return maintenance_start(); + return maintenance_start(argc - 1, argv + 1, prefix); if (!strcmp(argv[1], "stop")) return maintenance_stop(); if (!strcmp(argv[1], "register")) diff --git a/builtin/grep.c b/builtin/grep.c index 7d2f8e5adb..51278b01fa 100644 --- a/builtin/grep.c +++ b/builtin/grep.c @@ -65,6 +65,9 @@ static int todo_done; /* Has all work items been added? */ static int all_work_added; +static struct repository **repos_to_free; +static size_t repos_to_free_nr, repos_to_free_alloc; + /* This lock protects all the variables above. */ static pthread_mutex_t grep_mutex; @@ -168,6 +171,19 @@ static void work_done(struct work_item *w) grep_unlock(); } +static void free_repos(void) +{ + int i; + + for (i = 0; i < repos_to_free_nr; i++) { + repo_clear(repos_to_free[i]); + free(repos_to_free[i]); + } + FREE_AND_NULL(repos_to_free); + repos_to_free_nr = 0; + repos_to_free_alloc = 0; +} + static void *run(void *arg) { int hit = 0; @@ -333,7 +349,7 @@ static int grep_oid(struct grep_opt *opt, const struct object_id *oid, struct grep_source gs; grep_source_name(opt, filename, tree_name_len, &pathbuf); - grep_source_init(&gs, GREP_SOURCE_OID, pathbuf.buf, path, oid); + grep_source_init_oid(&gs, pathbuf.buf, path, oid, opt->repo); strbuf_release(&pathbuf); if (num_threads > 1) { @@ -359,7 +375,7 @@ static int grep_file(struct grep_opt *opt, const char *filename) struct grep_source gs; grep_source_name(opt, filename, 0, &buf); - grep_source_init(&gs, GREP_SOURCE_FILE, buf.buf, filename, filename); + grep_source_init_file(&gs, buf.buf, filename); strbuf_release(&buf); if (num_threads > 1) { @@ -415,19 +431,24 @@ static int grep_submodule(struct grep_opt *opt, const struct object_id *oid, const char *filename, const char *path, int cached) { - struct repository subrepo; + struct repository *subrepo; struct repository *superproject = opt->repo; const struct submodule *sub; struct grep_opt subopt; - int hit; + int hit = 0; sub = submodule_from_path(superproject, null_oid(), path); if (!is_submodule_active(superproject, path)) return 0; - if (repo_submodule_init(&subrepo, superproject, sub)) + subrepo = xmalloc(sizeof(*subrepo)); + if (repo_submodule_init(subrepo, superproject, sub)) { + free(subrepo); return 0; + } + ALLOC_GROW(repos_to_free, repos_to_free_nr + 1, repos_to_free_alloc); + repos_to_free[repos_to_free_nr++] = subrepo; /* * NEEDSWORK: repo_read_gitmodules() might call @@ -438,53 +459,49 @@ static int grep_submodule(struct grep_opt *opt, * subrepo's odbs to the in-memory alternates list. */ obj_read_lock(); - repo_read_gitmodules(&subrepo, 0); + repo_read_gitmodules(subrepo, 0); /* - * NEEDSWORK: This adds the submodule's object directory to the list of - * alternates for the single in-memory object store. This has some bad - * consequences for memory (processed objects will never be freed) and - * performance (this increases the number of pack files git has to pay - * attention to, to the sum of the number of pack files in all the - * repositories processed so far). This can be removed once the object - * store is no longer global and instead is a member of the repository - * object. + * All code paths tested by test code no longer need submodule ODBs to + * be added as alternates, but add it to the list just in case. + * Submodule ODBs added through add_submodule_odb_by_path() will be + * lazily registered as alternates when needed (and except in an + * unexpected code interaction, it won't be needed). */ - add_to_alternates_memory(subrepo.objects->odb->path); + add_submodule_odb_by_path(subrepo->objects->odb->path); obj_read_unlock(); memcpy(&subopt, opt, sizeof(subopt)); - subopt.repo = &subrepo; + subopt.repo = subrepo; if (oid) { - struct object *object; + enum object_type object_type; struct tree_desc tree; void *data; unsigned long size; struct strbuf base = STRBUF_INIT; obj_read_lock(); - object = parse_object_or_die(oid, NULL); + object_type = oid_object_info(subrepo, oid, NULL); obj_read_unlock(); - data = read_object_with_reference(&subrepo, - &object->oid, tree_type, + data = read_object_with_reference(subrepo, + oid, tree_type, &size, NULL); if (!data) - die(_("unable to read tree (%s)"), oid_to_hex(&object->oid)); + die(_("unable to read tree (%s)"), oid_to_hex(oid)); strbuf_addstr(&base, filename); strbuf_addch(&base, '/'); init_tree_desc(&tree, data, size); hit = grep_tree(&subopt, pathspec, &tree, &base, base.len, - object->type == OBJ_COMMIT); + object_type == OBJ_COMMIT); strbuf_release(&base); free(data); } else { hit = grep_cache(&subopt, pathspec, cached); } - repo_clear(&subrepo); return hit; } @@ -1182,5 +1199,6 @@ int cmd_grep(int argc, const char **argv, const char *prefix) run_pager(&opt, prefix); clear_pathspec(&pathspec); free_grep_patterns(&opt); + free_repos(); return !hit; } diff --git a/builtin/hash-object.c b/builtin/hash-object.c index 640ef4ded5..c7b3ad74c6 100644 --- a/builtin/hash-object.c +++ b/builtin/hash-object.c @@ -53,9 +53,7 @@ static void hash_object(const char *path, const char *type, const char *vpath, unsigned flags, int literally) { int fd; - fd = open(path, O_RDONLY); - if (fd < 0) - die_errno("Cannot open '%s'", path); + fd = xopen(path, O_RDONLY); hash_fd(fd, type, vpath, flags, literally); } @@ -117,7 +115,7 @@ int cmd_hash_object(int argc, const char **argv, const char *prefix) prefix = setup_git_directory_gently(&nongit); if (vpath && prefix) - vpath = xstrdup(prefix_filename(prefix, vpath)); + vpath = prefix_filename(prefix, vpath); git_config(git_default_config, NULL); diff --git a/builtin/help.c b/builtin/help.c index b7eec06c3d..7731659765 100644 --- a/builtin/help.c +++ b/builtin/help.c @@ -467,11 +467,14 @@ static void get_html_page_path(struct strbuf *page_path, const char *page) if (!html_path) html_path = to_free = system_path(GIT_HTML_PATH); - /* Check that we have a git documentation directory. */ + /* + * Check that the page we're looking for exists. + */ if (!strstr(html_path, "://")) { - if (stat(mkpath("%s/git.html", html_path), &st) + if (stat(mkpath("%s/%s.html", html_path, page), &st) || !S_ISREG(st.st_mode)) - die("'%s': not a documentation directory.", html_path); + die("'%s/%s.html': documentation file not found.", + html_path, page); } strbuf_init(page_path, 0); diff --git a/builtin/index-pack.c b/builtin/index-pack.c index 8336466865..7ce69c087e 100644 --- a/builtin/index-pack.c +++ b/builtin/index-pack.c @@ -122,6 +122,7 @@ static int strict; static int do_fsck_object; static struct fsck_options fsck_options = FSCK_OPTIONS_MISSING_GITMODULES; static int verbose; +static const char *progress_title; static int show_resolving_progress; static int show_stat; static int check_self_contained_and_connected; @@ -187,9 +188,7 @@ static void init_thread(void) pthread_key_create(&key, NULL); CALLOC_ARRAY(thread_data, nr_threads); for (i = 0; i < nr_threads; i++) { - thread_data[i].pack_fd = open(curr_pack, O_RDONLY); - if (thread_data[i].pack_fd == -1) - die_errno(_("unable to open %s"), curr_pack); + thread_data[i].pack_fd = xopen(curr_pack, O_RDONLY); } threads_active = 1; @@ -338,15 +337,11 @@ static const char *open_pack_file(const char *pack_name) "pack/tmp_pack_XXXXXX"); pack_name = strbuf_detach(&tmp_file, NULL); } else { - output_fd = open(pack_name, O_CREAT|O_EXCL|O_RDWR, 0600); - if (output_fd < 0) - die_errno(_("unable to create '%s'"), pack_name); + output_fd = xopen(pack_name, O_CREAT|O_EXCL|O_RDWR, 0600); } nothread_data.pack_fd = output_fd; } else { - input_fd = open(pack_name, O_RDONLY); - if (input_fd < 0) - die_errno(_("cannot open packfile '%s'"), pack_name); + input_fd = xopen(pack_name, O_RDONLY); output_fd = -1; nothread_data.pack_fd = input_fd; } @@ -1157,6 +1152,7 @@ static void parse_pack_objects(unsigned char *hash) if (verbose) progress = start_progress( + progress_title ? progress_title : from_stdin ? _("Receiving objects") : _("Indexing objects"), nr_objects); for (i = 0; i < nr_objects; i++) { @@ -1481,6 +1477,22 @@ static void write_special_file(const char *suffix, const char *msg, strbuf_release(&name_buf); } +static void rename_tmp_packfile(const char **final_name, + const char *curr_name, + struct strbuf *name, unsigned char *hash, + const char *ext, int make_read_only_if_same) +{ + if (*final_name != curr_name) { + if (!*final_name) + *final_name = odb_pack_name(name, hash, ext); + if (finalize_object_file(curr_name, *final_name)) + die(_("unable to rename temporary '*.%s' file to '%s"), + ext, *final_name); + } else if (make_read_only_if_same) { + chmod(*final_name, 0444); + } +} + static void final(const char *final_pack_name, const char *curr_pack_name, const char *final_index_name, const char *curr_index_name, const char *final_rev_index_name, const char *curr_rev_index_name, @@ -1509,31 +1521,13 @@ static void final(const char *final_pack_name, const char *curr_pack_name, write_special_file("promisor", promisor_msg, final_pack_name, hash, NULL); - if (final_pack_name != curr_pack_name) { - if (!final_pack_name) - final_pack_name = odb_pack_name(&pack_name, hash, "pack"); - if (finalize_object_file(curr_pack_name, final_pack_name)) - die(_("cannot store pack file")); - } else if (from_stdin) - chmod(final_pack_name, 0444); - - if (final_index_name != curr_index_name) { - if (!final_index_name) - final_index_name = odb_pack_name(&index_name, hash, "idx"); - if (finalize_object_file(curr_index_name, final_index_name)) - die(_("cannot store index file")); - } else - chmod(final_index_name, 0444); - - if (curr_rev_index_name) { - if (final_rev_index_name != curr_rev_index_name) { - if (!final_rev_index_name) - final_rev_index_name = odb_pack_name(&rev_index_name, hash, "rev"); - if (finalize_object_file(curr_rev_index_name, final_rev_index_name)) - die(_("cannot store reverse index file")); - } else - chmod(final_rev_index_name, 0444); - } + rename_tmp_packfile(&final_pack_name, curr_pack_name, &pack_name, + hash, "pack", from_stdin); + if (curr_rev_index_name) + rename_tmp_packfile(&final_rev_index_name, curr_rev_index_name, + &rev_index_name, hash, "rev", 1); + rename_tmp_packfile(&final_index_name, curr_index_name, &index_name, + hash, "idx", 1); if (do_fsck_object) { struct packed_git *p; @@ -1806,6 +1800,10 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix) input_len = sizeof(*hdr); } else if (!strcmp(arg, "-v")) { verbose = 1; + } else if (!strcmp(arg, "--progress-title")) { + if (progress_title || (i+1) >= argc) + usage(index_pack_usage); + progress_title = argv[++i]; } else if (!strcmp(arg, "--show-resolving-progress")) { show_resolving_progress = 1; } else if (!strcmp(arg, "--report-end-of-input")) { diff --git a/builtin/log.c b/builtin/log.c index 3d7717ba5c..f75d87e8d7 100644 --- a/builtin/log.c +++ b/builtin/log.c @@ -637,7 +637,7 @@ int cmd_show(int argc, const char **argv, const char *prefix) repo_init_revisions(the_repository, &rev, prefix); rev.diff = 1; rev.always_show_header = 1; - rev.no_walk = REVISION_WALK_NO_WALK_SORTED; + rev.no_walk = 1; rev.diffopt.stat_width = -1; /* Scale to real terminal size */ memset(&opt, 0, sizeof(opt)); diff --git a/builtin/ls-files.c b/builtin/ls-files.c index 29a26ad8ae..e6d415e077 100644 --- a/builtin/ls-files.c +++ b/builtin/ls-files.c @@ -614,7 +614,7 @@ int cmd_ls_files(int argc, const char **argv, const char *cmd_prefix) struct option builtin_ls_files_options[] = { /* Think twice before adding "--nul" synonym to this */ OPT_SET_INT('z', NULL, &line_terminator, - N_("paths are separated with NUL character"), '\0'), + N_("separate paths with the NUL character"), '\0'), OPT_BOOL('t', NULL, &show_tag, N_("identify the file status with tags")), OPT_BOOL('v', NULL, &show_valid_bit, @@ -651,7 +651,7 @@ int cmd_ls_files(int argc, const char **argv, const char *cmd_prefix) N_("skip files matching pattern"), PARSE_OPT_NONEG, option_parse_exclude), OPT_CALLBACK_F('X', "exclude-from", &dir, N_("file"), - N_("exclude patterns are read from <file>"), + N_("read exclude patterns from <file>"), PARSE_OPT_NONEG, option_parse_exclude_from), OPT_STRING(0, "exclude-per-directory", &dir.exclude_per_dir, N_("file"), N_("read additional per-directory exclude patterns in <file>")), diff --git a/builtin/ls-remote.c b/builtin/ls-remote.c index 1794548c71..f4fd823af8 100644 --- a/builtin/ls-remote.c +++ b/builtin/ls-remote.c @@ -84,6 +84,8 @@ int cmd_ls_remote(int argc, const char **argv, const char *prefix) PARSE_OPT_STOP_AT_NON_OPTION); dest = argv[0]; + packet_trace_identity("ls-remote"); + UNLEAK(sorting); if (argc > 1) { diff --git a/builtin/mailsplit.c b/builtin/mailsplit.c index 664400b816..7baef30569 100644 --- a/builtin/mailsplit.c +++ b/builtin/mailsplit.c @@ -75,9 +75,7 @@ static int split_one(FILE *mbox, const char *name, int allow_bare) fprintf(stderr, "corrupt mailbox\n"); exit(1); } - fd = open(name, O_WRONLY | O_CREAT | O_EXCL, 0666); - if (fd < 0) - die_errno("cannot open output file '%s'", name); + fd = xopen(name, O_WRONLY | O_CREAT | O_EXCL, 0666); output = xfdopen(fd, "w"); /* Copy it out, while searching for a line that begins with diff --git a/builtin/merge.c b/builtin/merge.c index 22f23990b3..3fbdacc7db 100644 --- a/builtin/merge.c +++ b/builtin/merge.c @@ -88,9 +88,9 @@ static int autostash; static int no_verify; static struct strategy all_strategy[] = { - { "recursive", DEFAULT_TWOHEAD | NO_TRIVIAL }, + { "recursive", NO_TRIVIAL }, { "octopus", DEFAULT_OCTOPUS }, - { "ort", NO_TRIVIAL }, + { "ort", DEFAULT_TWOHEAD | NO_TRIVIAL }, { "resolve", 0 }, { "ours", NO_FAST_FORWARD | NO_TRIVIAL }, { "subtree", NO_FAST_FORWARD | NO_TRIVIAL }, @@ -469,7 +469,6 @@ static void finish(struct commit *head_commit, * We ignore errors in 'gc --auto', since the * user should see them. */ - close_object_store(the_repository->objects); run_auto_maintenance(verbosity < 0); } } @@ -739,7 +738,7 @@ static int try_merge_strategy(const char *strategy, struct commit_list *common, for (x = 0; x < xopts_nr; x++) if (parse_merge_opt(&o, xopts[x])) - die(_("Unknown option for merge-recursive: -X%s"), xopts[x]); + die(_("unknown strategy option: -X%s"), xopts[x]); o.branch1 = head_arg; o.branch2 = merge_remote_util(remoteheads->item)->name; @@ -862,9 +861,11 @@ static void prepare_to_commit(struct commit_list *remoteheads) strbuf_commented_addf(&msg, "\n"); } strbuf_commented_addf(&msg, _(merge_editor_comment)); - strbuf_commented_addf(&msg, _(cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS ? - scissors_editor_comment : - no_scissors_editor_comment), comment_line_char); + if (cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS) + strbuf_commented_addf(&msg, _(scissors_editor_comment)); + else + strbuf_commented_addf(&msg, + _(no_scissors_editor_comment), comment_line_char); } if (signoff) append_signoff(&msg, ignore_non_trailer(msg.buf, msg.len), 0); @@ -1136,9 +1137,7 @@ static void handle_fetch_head(struct commit_list **remotes, struct strbuf *merge merge_names = &fetch_head_file; filename = git_path_fetch_head(the_repository); - fd = open(filename, O_RDONLY); - if (fd < 0) - die_errno(_("could not open '%s' for reading"), filename); + fd = xopen(filename, O_RDONLY); if (strbuf_read(merge_names, fd, 0) < 0) die_errno(_("could not read '%s'"), filename); @@ -1276,6 +1275,9 @@ int cmd_merge(int argc, const char **argv, const char *prefix) if (argc == 2 && !strcmp(argv[1], "-h")) usage_with_options(builtin_merge_usage, builtin_merge_options); + prepare_repo_settings(the_repository); + the_repository->settings.command_requires_full_index = 0; + /* * Check if we are _not_ on a detached HEAD, i.e. if there is a * current branch. @@ -1368,14 +1370,14 @@ int cmd_merge(int argc, const char **argv, const char *prefix) * There is no unmerged entry, don't advise 'git * add/rm <file>', just 'git commit'. */ - if (advice_resolve_conflict) + if (advice_enabled(ADVICE_RESOLVE_CONFLICT)) die(_("You have not concluded your merge (MERGE_HEAD exists).\n" "Please, commit your changes before you merge.")); else die(_("You have not concluded your merge (MERGE_HEAD exists).")); } if (ref_exists("CHERRY_PICK_HEAD")) { - if (advice_resolve_conflict) + if (advice_enabled(ADVICE_RESOLVE_CONFLICT)) die(_("You have not concluded your cherry-pick (CHERRY_PICK_HEAD exists).\n" "Please, commit your changes before you merge.")); else @@ -1485,6 +1487,12 @@ int cmd_merge(int argc, const char **argv, const char *prefix) fast_forward = FF_NO; } + if (!use_strategies && !pull_twohead && + remoteheads && !remoteheads->next) { + char *default_strategy = getenv("GIT_TEST_MERGE_ALGORITHM"); + if (default_strategy) + append_strategy(get_strategy(default_strategy)); + } if (!use_strategies) { if (!remoteheads) ; /* already up-to-date */ @@ -1622,7 +1630,7 @@ int cmd_merge(int argc, const char **argv, const char *prefix) } if (fast_forward == FF_ONLY) - die(_("Not possible to fast-forward, aborting.")); + die_ff_impossible(); if (autostash) create_autostash(the_repository, diff --git a/builtin/multi-pack-index.c b/builtin/multi-pack-index.c index 8ff0dee2ec..66de6efd41 100644 --- a/builtin/multi-pack-index.c +++ b/builtin/multi-pack-index.c @@ -68,6 +68,8 @@ static int cmd_multi_pack_index_write(int argc, const char **argv) OPT_STRING(0, "preferred-pack", &opts.preferred_pack, N_("preferred-pack"), N_("pack for reuse when computing a multi-pack bitmap")), + OPT_BIT(0, "bitmap", &opts.flags, N_("write multi-pack bitmap"), + MIDX_WRITE_BITMAP | MIDX_WRITE_REV_INDEX), OPT_END(), }; @@ -164,7 +166,7 @@ int cmd_multi_pack_index(int argc, const char **argv, if (!opts.object_dir) opts.object_dir = get_object_directory(); - if (argc == 0) + if (!argc) goto usage; if (!strcmp(argv[0], "repack")) @@ -175,10 +177,9 @@ int cmd_multi_pack_index(int argc, const char **argv, return cmd_multi_pack_index_verify(argc, argv); else if (!strcmp(argv[0], "expire")) return cmd_multi_pack_index_expire(argc, argv); - else { - error(_("unrecognized subcommand: %s"), argv[0]); + + error(_("unrecognized subcommand: %s"), argv[0]); usage: - usage_with_options(builtin_multi_pack_index_usage, - builtin_multi_pack_index_options); - } + usage_with_options(builtin_multi_pack_index_usage, + builtin_multi_pack_index_options); } diff --git a/builtin/notes.c b/builtin/notes.c index 74bba39ca8..71c59583a1 100644 --- a/builtin/notes.c +++ b/builtin/notes.c @@ -172,9 +172,7 @@ static void prepare_note_data(const struct object_id *object, struct note_data * /* write the template message before editing: */ d->edit_path = git_pathdup("NOTES_EDITMSG"); - fd = open(d->edit_path, O_CREAT | O_TRUNC | O_WRONLY, 0600); - if (fd < 0) - die_errno(_("could not create file '%s'"), d->edit_path); + fd = xopen(d->edit_path, O_CREAT | O_TRUNC | O_WRONLY, 0600); if (d->given) write_or_die(fd, d->buf.buf, d->buf.len); diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c index de00adbb9e..1a3dd445f8 100644 --- a/builtin/pack-objects.c +++ b/builtin/pack-objects.c @@ -1124,6 +1124,11 @@ static void write_reused_pack(struct hashfile *f) break; offset += ewah_bit_ctz64(word >> offset); + /* + * Can use bit positions directly, even for MIDX + * bitmaps. See comment in try_partial_reuse() + * for why. + */ write_reused_pack_one(pos + offset, f, &w_curs); display_progress(progress_state, ++written); } @@ -1217,6 +1222,7 @@ static void write_pack_file(void) if (!pack_to_stdout) { struct stat st; struct strbuf tmpname = STRBUF_INIT; + char *idx_tmp_name = NULL; /* * Packs are runtime accessed in their mtime @@ -1237,7 +1243,8 @@ static void write_pack_file(void) warning_errno(_("failed utime() on %s"), pack_tmp_name); } - strbuf_addf(&tmpname, "%s-", base_name); + strbuf_addf(&tmpname, "%s-%s.", base_name, + hash_to_hex(hash)); if (write_bitmap_index) { bitmap_writer_set_checksum(hash); @@ -1245,23 +1252,29 @@ static void write_pack_file(void) &to_pack, written_list, nr_written); } - finish_tmp_packfile(&tmpname, pack_tmp_name, + stage_tmp_packfiles(&tmpname, pack_tmp_name, written_list, nr_written, - &pack_idx_opts, hash); + &pack_idx_opts, hash, &idx_tmp_name); if (write_bitmap_index) { - strbuf_addf(&tmpname, "%s.bitmap", hash_to_hex(hash)); + size_t tmpname_len = tmpname.len; + strbuf_addstr(&tmpname, "bitmap"); stop_progress(&progress_state); bitmap_writer_show_progress(progress); bitmap_writer_select_commits(indexed_commits, indexed_commits_nr, -1); - bitmap_writer_build(&to_pack); + if (bitmap_writer_build(&to_pack) < 0) + die(_("failed to write bitmap index")); bitmap_writer_finish(written_list, nr_written, tmpname.buf, write_bitmap_options); write_bitmap_index = 0; + strbuf_setlen(&tmpname, tmpname_len); } + rename_tmp_packfile_idx(&tmpname, &idx_tmp_name); + + free(idx_tmp_name); strbuf_release(&tmpname); free(pack_tmp_name); puts(hash_to_hex(hash)); @@ -3311,9 +3324,26 @@ static void read_packs_list_from_stdin(void) } /* - * First handle all of the excluded packs, marking them as kept in-core - * so that later calls to add_object_entry() discards any objects that - * are also found in excluded packs. + * Arguments we got on stdin may not even be packs. First + * check that to avoid segfaulting later on in + * e.g. pack_mtime_cmp(), excluded packs are handled below. + * + * Since we first parsed our STDIN and then sorted the input + * lines the pack we error on will be whatever line happens to + * sort first. This is lazy, it's enough that we report one + * bad case here, we don't need to report the first/last one, + * or all of them. + */ + for_each_string_list_item(item, &include_packs) { + struct packed_git *p = item->util; + if (!p) + die(_("could not find pack '%s'"), item->string); + } + + /* + * Then, handle all of the excluded packs, marking them as + * kept in-core so that later calls to add_object_entry() + * discards any objects that are also found in excluded packs. */ for_each_string_list_item(item, &exclude_packs) { struct packed_git *p = item->util; @@ -3388,13 +3418,9 @@ static void read_object_list_from_stdin(void) } } -/* Remember to update object flag allocation in object.h */ -#define OBJECT_ADDED (1u<<20) - static void show_commit(struct commit *commit, void *data) { add_object_entry(&commit->object.oid, OBJ_COMMIT, NULL, 0); - commit->object.flags |= OBJECT_ADDED; if (write_bitmap_index) index_commit_for_bitmap(commit); @@ -3407,7 +3433,6 @@ static void show_object(struct object *obj, const char *name, void *data) { add_preferred_base_object(name); add_object_entry(&obj->oid, obj->type, name, 0); - obj->flags |= OBJECT_ADDED; if (use_delta_islands) { const char *p; @@ -3488,79 +3513,23 @@ static void show_edge(struct commit *commit) add_preferred_base(&commit->object.oid); } -struct in_pack_object { - off_t offset; - struct object *object; -}; - -struct in_pack { - unsigned int alloc; - unsigned int nr; - struct in_pack_object *array; -}; - -static void mark_in_pack_object(struct object *object, struct packed_git *p, struct in_pack *in_pack) -{ - in_pack->array[in_pack->nr].offset = find_pack_entry_one(object->oid.hash, p); - in_pack->array[in_pack->nr].object = object; - in_pack->nr++; -} - -/* - * Compare the objects in the offset order, in order to emulate the - * "git rev-list --objects" output that produced the pack originally. - */ -static int ofscmp(const void *a_, const void *b_) +static int add_object_in_unpacked_pack(const struct object_id *oid, + struct packed_git *pack, + uint32_t pos, + void *_data) { - struct in_pack_object *a = (struct in_pack_object *)a_; - struct in_pack_object *b = (struct in_pack_object *)b_; - - if (a->offset < b->offset) - return -1; - else if (a->offset > b->offset) - return 1; - else - return oidcmp(&a->object->oid, &b->object->oid); + add_object_entry(oid, OBJ_NONE, "", 0); + return 0; } static void add_objects_in_unpacked_packs(void) { - struct packed_git *p; - struct in_pack in_pack; - uint32_t i; - - memset(&in_pack, 0, sizeof(in_pack)); - - for (p = get_all_packs(the_repository); p; p = p->next) { - struct object_id oid; - struct object *o; - - if (!p->pack_local || p->pack_keep || p->pack_keep_in_core) - continue; - if (open_pack_index(p)) - die(_("cannot open pack index")); - - ALLOC_GROW(in_pack.array, - in_pack.nr + p->num_objects, - in_pack.alloc); - - for (i = 0; i < p->num_objects; i++) { - nth_packed_object_id(&oid, p, i); - o = lookup_unknown_object(the_repository, &oid); - if (!(o->flags & OBJECT_ADDED)) - mark_in_pack_object(o, p, &in_pack); - o->flags |= OBJECT_ADDED; - } - } - - if (in_pack.nr) { - QSORT(in_pack.array, in_pack.nr, ofscmp); - for (i = 0; i < in_pack.nr; i++) { - struct object *o = in_pack.array[i].object; - add_object_entry(&o->oid, o->type, "", 0); - } - } - free(in_pack.array); + if (for_each_packed_object(add_object_in_unpacked_pack, NULL, + FOR_EACH_OBJECT_PACK_ORDER | + FOR_EACH_OBJECT_LOCAL_ONLY | + FOR_EACH_OBJECT_SKIP_IN_CORE_KEPT_PACKS | + FOR_EACH_OBJECT_SKIP_ON_DISK_KEPT_PACKS)) + die(_("cannot open pack index")); } static int add_loose_object(const struct object_id *oid, const char *path, diff --git a/builtin/pull.c b/builtin/pull.c index 3e13f81084..cf6c56e2d8 100644 --- a/builtin/pull.c +++ b/builtin/pull.c @@ -26,6 +26,7 @@ #include "wt-status.h" #include "commit-reach.h" #include "sequencer.h" +#include "packfile.h" /** * Parses the value of --rebase. If value is a false value, returns @@ -577,7 +578,7 @@ static int run_fetch(const char *repo, const char **refspecs) strvec_pushv(&args, refspecs); } else if (*refspecs) BUG("refspecs without repo?"); - ret = run_command_v_opt(args.v, RUN_GIT_CMD); + ret = run_command_v_opt(args.v, RUN_GIT_CMD | RUN_CLOSE_OBJECT_STORE); strvec_clear(&args); return ret; } @@ -893,6 +894,8 @@ static int run_rebase(const struct object_id *newbase, strvec_pushv(&args, opt_strategy_opts.v); if (opt_gpg_sign) strvec_push(&args, opt_gpg_sign); + if (opt_signoff) + strvec_push(&args, opt_signoff); if (opt_autostash == 0) strvec_push(&args, "--no-autostash"); else if (opt_autostash == 1) @@ -911,12 +914,18 @@ static int run_rebase(const struct object_id *newbase, return ret; } -static int get_can_ff(struct object_id *orig_head, struct object_id *orig_merge_head) +static int get_can_ff(struct object_id *orig_head, + struct oid_array *merge_heads) { int ret; struct commit_list *list = NULL; struct commit *merge_head, *head; + struct object_id *orig_merge_head; + if (merge_heads->nr > 1) + return 0; + + orig_merge_head = &merge_heads->oid[0]; head = lookup_commit_reference(the_repository, orig_head); commit_list_insert(head, &list); merge_head = lookup_commit_reference(the_repository, orig_merge_head); @@ -927,9 +936,9 @@ static int get_can_ff(struct object_id *orig_head, struct object_id *orig_merge_ static void show_advice_pull_non_ff(void) { - advise(_("Pulling without specifying how to reconcile divergent branches is\n" - "discouraged. You can squelch this message by running one of the following\n" - "commands sometime before your next pull:\n" + advise(_("You have divergent branches and need to specify how to reconcile them.\n" + "You can do so by running one of the following commands sometime before\n" + "your next pull:\n" "\n" " git config pull.rebase false # merge (the default strategy)\n" " git config pull.rebase true # rebase\n" @@ -966,8 +975,22 @@ int cmd_pull(int argc, const char **argv, const char *prefix) parse_repo_refspecs(argc, argv, &repo, &refspecs); - if (!opt_ff) + if (!opt_ff) { opt_ff = xstrdup_or_null(config_get_ff()); + /* + * A subtle point: opt_ff was set on the line above via + * reading from config. opt_rebase, in contrast, is set + * before this point via command line options. The setting + * of opt_rebase via reading from config (using + * config_get_rebase()) does not happen until later. We + * are relying on the next if-condition happening before + * the config_get_rebase() call so that an explicit + * "--rebase" can override a config setting of + * pull.ff=only. + */ + if (opt_rebase >= 0 && opt_ff && !strcmp(opt_ff, "--ff-only")) + opt_ff = "--ff"; + } if (opt_rebase < 0) opt_rebase = config_get_rebase(&rebase_unspecified); @@ -1041,14 +1064,25 @@ int cmd_pull(int argc, const char **argv, const char *prefix) die(_("Cannot merge multiple branches into empty head.")); return pull_into_void(merge_heads.oid, &curr_head); } - if (opt_rebase && merge_heads.nr > 1) - die(_("Cannot rebase onto multiple branches.")); + if (merge_heads.nr > 1) { + if (opt_rebase) + die(_("Cannot rebase onto multiple branches.")); + if (opt_ff && !strcmp(opt_ff, "--ff-only")) + die(_("Cannot fast-forward to multiple branches.")); + } - can_ff = get_can_ff(&orig_head, &merge_heads.oid[0]); + can_ff = get_can_ff(&orig_head, &merge_heads); - if (rebase_unspecified && !opt_ff && !can_ff) { - if (opt_verbosity >= 0) - show_advice_pull_non_ff(); + /* ff-only takes precedence over rebase */ + if (opt_ff && !strcmp(opt_ff, "--ff-only")) { + if (!can_ff) + die_ff_impossible(); + opt_rebase = REBASE_FALSE; + } + /* If no action specified and we can't fast forward, then warn. */ + if (!opt_ff && rebase_unspecified && !can_ff) { + show_advice_pull_non_ff(); + die(_("Need to specify how to reconcile divergent branches.")); } if (opt_rebase) { diff --git a/builtin/push.c b/builtin/push.c index e8b10a9b7e..4b026ce6c6 100644 --- a/builtin/push.c +++ b/builtin/push.c @@ -289,42 +289,42 @@ static const char message_advice_ref_needs_update[] = static void advise_pull_before_push(void) { - if (!advice_push_non_ff_current || !advice_push_update_rejected) + if (!advice_enabled(ADVICE_PUSH_NON_FF_CURRENT) || !advice_enabled(ADVICE_PUSH_UPDATE_REJECTED)) return; advise(_(message_advice_pull_before_push)); } static void advise_checkout_pull_push(void) { - if (!advice_push_non_ff_matching || !advice_push_update_rejected) + if (!advice_enabled(ADVICE_PUSH_NON_FF_MATCHING) || !advice_enabled(ADVICE_PUSH_UPDATE_REJECTED)) return; advise(_(message_advice_checkout_pull_push)); } static void advise_ref_already_exists(void) { - if (!advice_push_already_exists || !advice_push_update_rejected) + if (!advice_enabled(ADVICE_PUSH_ALREADY_EXISTS) || !advice_enabled(ADVICE_PUSH_UPDATE_REJECTED)) return; advise(_(message_advice_ref_already_exists)); } static void advise_ref_fetch_first(void) { - if (!advice_push_fetch_first || !advice_push_update_rejected) + if (!advice_enabled(ADVICE_PUSH_FETCH_FIRST) || !advice_enabled(ADVICE_PUSH_UPDATE_REJECTED)) return; advise(_(message_advice_ref_fetch_first)); } static void advise_ref_needs_force(void) { - if (!advice_push_needs_force || !advice_push_update_rejected) + if (!advice_enabled(ADVICE_PUSH_NEEDS_FORCE) || !advice_enabled(ADVICE_PUSH_UPDATE_REJECTED)) return; advise(_(message_advice_ref_needs_force)); } static void advise_ref_needs_update(void) { - if (!advice_push_ref_needs_update || !advice_push_update_rejected) + if (!advice_enabled(ADVICE_PUSH_REF_NEEDS_UPDATE) || !advice_enabled(ADVICE_PUSH_UPDATE_REJECTED)) return; advise(_(message_advice_ref_needs_update)); } diff --git a/builtin/rebase.c b/builtin/rebase.c index 33e0961900..6c2463037f 100644 --- a/builtin/rebase.c +++ b/builtin/rebase.c @@ -405,6 +405,7 @@ static int run_sequencer_rebase(struct rebase_options *opts, flags |= opts->root_with_onto ? TODO_LIST_ROOT_WITH_ONTO : 0; flags |= command == ACTION_SHORTEN_OIDS ? TODO_LIST_SHORTEN_IDS : 0; flags |= opts->reapply_cherry_picks ? TODO_LIST_REAPPLY_CHERRY_PICKS : 0; + flags |= opts->flags & REBASE_NO_QUIET ? TODO_LIST_WARN_SKIPPED_CHERRY_PICKS : 0; switch (command) { case ACTION_NONE: { @@ -559,6 +560,9 @@ int cmd_rebase__interactive(int argc, const char **argv, const char *prefix) argc = parse_options(argc, argv, prefix, options, builtin_rebase_interactive_usage, PARSE_OPT_KEEP_ARGV0); + prepare_repo_settings(the_repository); + the_repository->settings.command_requires_full_index = 0; + if (!is_null_oid(&squash_onto)) opts.squash_onto = &squash_onto; @@ -740,7 +744,6 @@ static int finish_rebase(struct rebase_options *opts) delete_ref(NULL, "REBASE_HEAD", NULL, REF_NO_DEREF); unlink(git_path_auto_merge(the_repository)); apply_autostash(state_dir_path("autostash", opts)); - close_object_store(the_repository->objects); /* * We ignore errors in 'git maintenance run --auto', since the * user should see them. @@ -1430,6 +1433,9 @@ int cmd_rebase(int argc, const char **argv, const char *prefix) usage_with_options(builtin_rebase_usage, builtin_rebase_options); + prepare_repo_settings(the_repository); + the_repository->settings.command_requires_full_index = 0; + options.allow_empty_message = 1; git_config(rebase_config, &options); /* options.gpg_sign_opt will be either "-S" or NULL */ @@ -1713,7 +1719,7 @@ int cmd_rebase(int argc, const char **argv, const char *prefix) int i; if (!options.strategy) - options.strategy = "recursive"; + options.strategy = "ort"; strbuf_reset(&buf); for (i = 0; i < strategy_options.nr; i++) @@ -1918,7 +1924,7 @@ int cmd_rebase(int argc, const char **argv, const char *prefix) &options.orig_head)) options.head_name = NULL; else - die(_("fatal: no such branch/commit '%s'"), + die(_("no such branch/commit '%s'"), branch_name); } else if (argc == 0) { /* Do not need to switch branches, we are already on it. */ diff --git a/builtin/receive-pack.c b/builtin/receive-pack.c index 2d1f97e1ca..48960a9575 100644 --- a/builtin/receive-pack.c +++ b/builtin/receive-pack.c @@ -1306,7 +1306,7 @@ static void refuse_unconfigured_deny_delete_current(void) rp_error("%s", _(refuse_unconfigured_deny_delete_current_msg)); } -static int command_singleton_iterator(void *cb_data, struct object_id *oid); +static const struct object_id *command_singleton_iterator(void *cb_data); static int update_shallow_ref(struct command *cmd, struct shallow_info *si) { struct shallow_lock shallow_lock = SHALLOW_LOCK_INIT; @@ -1731,16 +1731,15 @@ static void check_aliased_updates(struct command *commands) string_list_clear(&ref_list, 0); } -static int command_singleton_iterator(void *cb_data, struct object_id *oid) +static const struct object_id *command_singleton_iterator(void *cb_data) { struct command **cmd_list = cb_data; struct command *cmd = *cmd_list; if (!cmd || is_null_oid(&cmd->new_oid)) - return -1; /* end of list */ + return NULL; *cmd_list = NULL; /* this returns only one */ - oidcpy(oid, &cmd->new_oid); - return 0; + return &cmd->new_oid; } static void set_connectivity_errors(struct command *commands, @@ -1770,7 +1769,7 @@ struct iterate_data { struct shallow_info *si; }; -static int iterate_receive_command_list(void *cb_data, struct object_id *oid) +static const struct object_id *iterate_receive_command_list(void *cb_data) { struct iterate_data *data = cb_data; struct command **cmd_list = &data->cmds; @@ -1781,13 +1780,11 @@ static int iterate_receive_command_list(void *cb_data, struct object_id *oid) /* to be checked in update_shallow_ref() */ continue; if (!is_null_oid(&cmd->new_oid) && !cmd->skip_update) { - oidcpy(oid, &cmd->new_oid); *cmd_list = cmd->next; - return 0; + return &cmd->new_oid; } } - *cmd_list = NULL; - return -1; /* end of list */ + return NULL; } static void reject_updates_to_hidden(struct command *commands) @@ -2477,7 +2474,8 @@ int cmd_receive_pack(int argc, const char **argv, const char *prefix) struct option options[] = { OPT__QUIET(&quiet, N_("quiet")), OPT_HIDDEN_BOOL(0, "stateless-rpc", &stateless_rpc, NULL), - OPT_HIDDEN_BOOL(0, "advertise-refs", &advertise_refs, NULL), + OPT_HIDDEN_BOOL(0, "http-backend-info-refs", &advertise_refs, NULL), + OPT_ALIAS(0, "advertise-refs", "http-backend-info-refs"), OPT_HIDDEN_BOOL(0, "reject-thin-pack-for-testing", &reject_thin, NULL), OPT_END() }; @@ -2580,10 +2578,9 @@ int cmd_receive_pack(int argc, const char **argv, const char *prefix) proc.no_stdin = 1; proc.stdout_to_stderr = 1; proc.err = use_sideband ? -1 : 0; - proc.git_cmd = 1; + proc.git_cmd = proc.close_object_store = 1; proc.argv = argv_gc_auto; - close_object_store(the_repository->objects); if (!start_command(&proc)) { if (use_sideband) copy_to_sideband(proc.err, -1, NULL); diff --git a/builtin/reflog.c b/builtin/reflog.c index 09541d1c80..bd4c669918 100644 --- a/builtin/reflog.c +++ b/builtin/reflog.c @@ -629,8 +629,9 @@ static int cmd_reflog_expire(int argc, const char **argv, const char *prefix) free_worktrees(worktrees); for (i = 0; i < collected.nr; i++) { struct collected_reflog *e = collected.e[i]; + set_reflog_expiry_param(&cb.cmd, explicit_expiry, e->reflog); - status |= reflog_expire(e->reflog, &e->oid, flags, + status |= reflog_expire(e->reflog, flags, reflog_expiry_prepare, should_expire_reflog_ent, reflog_expiry_cleanup, @@ -642,13 +643,12 @@ static int cmd_reflog_expire(int argc, const char **argv, const char *prefix) for (; i < argc; i++) { char *ref; - struct object_id oid; - if (!dwim_log(argv[i], strlen(argv[i]), &oid, &ref)) { + if (!dwim_log(argv[i], strlen(argv[i]), NULL, &ref)) { status |= error(_("%s points nowhere!"), argv[i]); continue; } set_reflog_expiry_param(&cb.cmd, explicit_expiry, ref); - status |= reflog_expire(ref, &oid, flags, + status |= reflog_expire(ref, flags, reflog_expiry_prepare, should_expire_reflog_ent, reflog_expiry_cleanup, @@ -700,7 +700,6 @@ static int cmd_reflog_delete(int argc, const char **argv, const char *prefix) for ( ; i < argc; i++) { const char *spec = strstr(argv[i], "@{"); - struct object_id oid; char *ep, *ref; int recno; @@ -709,7 +708,7 @@ static int cmd_reflog_delete(int argc, const char **argv, const char *prefix) continue; } - if (!dwim_log(argv[i], spec - argv[i], &oid, &ref)) { + if (!dwim_log(argv[i], spec - argv[i], NULL, &ref)) { status |= error(_("no reflog for '%s'"), argv[i]); continue; } @@ -724,7 +723,7 @@ static int cmd_reflog_delete(int argc, const char **argv, const char *prefix) cb.cmd.expire_total = 0; } - status |= reflog_expire(ref, &oid, flags, + status |= reflog_expire(ref, flags, reflog_expiry_prepare, should_expire_reflog_ent, reflog_expiry_cleanup, diff --git a/builtin/repack.c b/builtin/repack.c index 5f9bc74adc..c1a209013b 100644 --- a/builtin/repack.c +++ b/builtin/repack.c @@ -208,10 +208,10 @@ static struct { unsigned optional:1; } exts[] = { {".pack"}, - {".idx"}, {".rev", 1}, {".bitmap", 1}, {".promisor", 1}, + {".idx"}, }; static unsigned populate_pack_exts(char *name) @@ -515,6 +515,10 @@ int cmd_repack(int argc, const char **argv, const char *prefix) if (!(pack_everything & ALL_INTO_ONE) || !is_bare_repository()) write_bitmaps = 0; + } else if (write_bitmaps && + git_env_bool(GIT_TEST_MULTI_PACK_INDEX, 0) && + git_env_bool(GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP, 0)) { + write_bitmaps = 0; } if (pack_kept_objects < 0) pack_kept_objects = write_bitmaps > 0; @@ -725,8 +729,12 @@ int cmd_repack(int argc, const char **argv, const char *prefix) update_server_info(0); remove_temporary_files(); - if (git_env_bool(GIT_TEST_MULTI_PACK_INDEX, 0)) - write_midx_file(get_object_directory(), NULL, 0); + if (git_env_bool(GIT_TEST_MULTI_PACK_INDEX, 0)) { + unsigned flags = 0; + if (git_env_bool(GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP, 0)) + flags |= MIDX_WRITE_BITMAP | MIDX_WRITE_REV_INDEX; + write_midx_file(get_object_directory(), NULL, flags); + } string_list_clear(&names, 0); string_list_clear(&rollback, 0); diff --git a/builtin/replace.c b/builtin/replace.c index cd48765911..946938d011 100644 --- a/builtin/replace.c +++ b/builtin/replace.c @@ -507,7 +507,7 @@ static int convert_graft_file(int force) if (!fp) return -1; - advice_graft_file_deprecated = 0; + no_graft_file_deprecated_advice = 1; while (strbuf_getline(&buf, fp) != EOF) { if (*buf.buf == '#') continue; diff --git a/builtin/reset.c b/builtin/reset.c index 43e855cb88..51c9e2f43f 100644 --- a/builtin/reset.c +++ b/builtin/reset.c @@ -412,7 +412,7 @@ int cmd_reset(int argc, const char **argv, const char *prefix) refresh_index(&the_index, flags, NULL, NULL, _("Unstaged changes after reset:")); t_delta_in_ms = (getnanotime() - t_begin) / 1000000; - if (advice_reset_quiet_warning && t_delta_in_ms > REFRESH_INDEX_DELAY_WARNING_IN_MS) { + if (advice_enabled(ADVICE_RESET_QUIET_WARNING) && t_delta_in_ms > REFRESH_INDEX_DELAY_WARNING_IN_MS) { printf(_("\nIt took %.2f seconds to enumerate unstaged changes after reset. You can\n" "use '--quiet' to avoid this. Set the config setting reset.quiet to true\n" "to make this the default.\n"), t_delta_in_ms / 1000.0); diff --git a/builtin/revert.c b/builtin/revert.c index 237f2f18d4..51776abea6 100644 --- a/builtin/revert.c +++ b/builtin/revert.c @@ -136,6 +136,9 @@ static int run_sequencer(int argc, const char **argv, struct replay_opts *opts) PARSE_OPT_KEEP_ARGV0 | PARSE_OPT_KEEP_UNKNOWN); + prepare_repo_settings(the_repository); + the_repository->settings.command_requires_full_index = 0; + /* implies allow_empty */ if (opts->keep_redundant_commits) opts->allow_empty = 1; @@ -191,7 +194,8 @@ static int run_sequencer(int argc, const char **argv, struct replay_opts *opts) struct setup_revision_opt s_r_opt; opts->revs = xmalloc(sizeof(*opts->revs)); repo_init_revisions(the_repository, opts->revs, NULL); - opts->revs->no_walk = REVISION_WALK_NO_WALK_UNSORTED; + opts->revs->no_walk = 1; + opts->revs->unsorted_input = 1; if (argc < 2) usage_with_options(usage_str, options); if (!strcmp(argv[1], "-")) diff --git a/builtin/rm.c b/builtin/rm.c index 8a24c715e0..3b44b807e5 100644 --- a/builtin/rm.c +++ b/builtin/rm.c @@ -55,7 +55,7 @@ static void print_error_files(struct string_list *files_list, strbuf_addf(&err_msg, "\n %s", files_list->items[i].string); - if (advice_rm_hints) + if (advice_enabled(ADVICE_RM_HINTS)) strbuf_addstr(&err_msg, hints_msg); *errs = error("%s", err_msg.buf); strbuf_release(&err_msg); diff --git a/builtin/send-pack.c b/builtin/send-pack.c index a7e01667b0..729dea1d25 100644 --- a/builtin/send-pack.c +++ b/builtin/send-pack.c @@ -230,6 +230,7 @@ int cmd_send_pack(int argc, const char **argv, const char *prefix) args.atomic = atomic; args.stateless_rpc = stateless_rpc; args.push_options = push_options.nr ? &push_options : NULL; + args.url = dest; if (from_stdin) { if (args.stateless_rpc) { diff --git a/builtin/show-branch.c b/builtin/show-branch.c index d77ce7aeb3..bea4bbf468 100644 --- a/builtin/show-branch.c +++ b/builtin/show-branch.c @@ -482,10 +482,9 @@ static void snarf_refs(int head, int remotes) } } -static int rev_is_head(const char *head, const char *name, - unsigned char *head_sha1, unsigned char *sha1) +static int rev_is_head(const char *head, const char *name) { - if (!head || (head_sha1 && sha1 && !hasheq(head_sha1, sha1))) + if (!head) return 0; skip_prefix(head, "refs/heads/", &head); if (!skip_prefix(name, "refs/heads/", &name)) @@ -806,9 +805,7 @@ int cmd_show_branch(int ac, const char **av, const char *prefix) /* We are only interested in adding the branch * HEAD points at. */ - if (rev_is_head(head, - ref_name[i], - head_oid.hash, NULL)) + if (rev_is_head(head, ref_name[i])) has_head++; } if (!has_head) { @@ -867,10 +864,8 @@ int cmd_show_branch(int ac, const char **av, const char *prefix) if (1 < num_rev || extra < 0) { for (i = 0; i < num_rev; i++) { int j; - int is_head = rev_is_head(head, - ref_name[i], - head_oid.hash, - rev[i]->object.oid.hash); + int is_head = rev_is_head(head, ref_name[i]) && + oideq(&head_oid, &rev[i]->object.oid); if (extra < 0) printf("%c [%s] ", is_head ? '*' : ' ', ref_name[i]); diff --git a/builtin/sparse-checkout.c b/builtin/sparse-checkout.c index 8ba9f13787..d0f5c4702b 100644 --- a/builtin/sparse-checkout.c +++ b/builtin/sparse-checkout.c @@ -100,6 +100,98 @@ static int sparse_checkout_list(int argc, const char **argv) return 0; } +static void clean_tracked_sparse_directories(struct repository *r) +{ + int i, was_full = 0; + struct strbuf path = STRBUF_INIT; + size_t pathlen; + struct string_list_item *item; + struct string_list sparse_dirs = STRING_LIST_INIT_DUP; + + /* + * If we are not using cone mode patterns, then we cannot + * delete directories outside of the sparse cone. + */ + if (!r || !r->index || !r->worktree) + return; + if (init_sparse_checkout_patterns(r->index) || + !r->index->sparse_checkout_patterns->use_cone_patterns) + return; + + /* + * Use the sparse index as a data structure to assist finding + * directories that are safe to delete. This conversion to a + * sparse index will not delete directories that contain + * conflicted entries or submodules. + */ + if (!r->index->sparse_index) { + /* + * If something, such as a merge conflict or other concern, + * prevents us from converting to a sparse index, then do + * not try deleting files. + */ + if (convert_to_sparse(r->index, SPARSE_INDEX_MEMORY_ONLY)) + return; + was_full = 1; + } + + strbuf_addstr(&path, r->worktree); + strbuf_complete(&path, '/'); + pathlen = path.len; + + /* + * Collect directories that have gone out of scope but also + * exist on disk, so there is some work to be done. We need to + * store the entries in a list before exploring, since that might + * expand the sparse-index again. + */ + for (i = 0; i < r->index->cache_nr; i++) { + struct cache_entry *ce = r->index->cache[i]; + + if (S_ISSPARSEDIR(ce->ce_mode) && + repo_file_exists(r, ce->name)) + string_list_append(&sparse_dirs, ce->name); + } + + for_each_string_list_item(item, &sparse_dirs) { + struct dir_struct dir = DIR_INIT; + struct pathspec p = { 0 }; + struct strvec s = STRVEC_INIT; + + strbuf_setlen(&path, pathlen); + strbuf_addstr(&path, item->string); + + dir.flags |= DIR_SHOW_IGNORED_TOO; + + setup_standard_excludes(&dir); + strvec_push(&s, path.buf); + + parse_pathspec(&p, PATHSPEC_GLOB, 0, NULL, s.v); + fill_directory(&dir, r->index, &p); + + if (dir.nr) { + warning(_("directory '%s' contains untracked files," + " but is not in the sparse-checkout cone"), + item->string); + } else if (remove_dir_recursively(&path, 0)) { + /* + * Removal is "best effort". If something blocks + * the deletion, then continue with a warning. + */ + warning(_("failed to remove directory '%s'"), + item->string); + } + + dir_clear(&dir); + } + + string_list_clear(&sparse_dirs, 0); + strbuf_release(&path); + + if (was_full) + ensure_full_index(r->index); +} + static int update_working_directory(struct pattern_list *pl) { enum update_sparsity_result result; @@ -141,6 +233,8 @@ static int update_working_directory(struct pattern_list *pl) else rollback_lock_file(&lock_file); + clean_tracked_sparse_directories(r); + r->index->sparse_checkout_patterns = NULL; return result; } diff --git a/builtin/stash.c b/builtin/stash.c index 8f42360ca9..5512f4942c 100644 --- a/builtin/stash.c +++ b/builtin/stash.c @@ -313,6 +313,17 @@ static int reset_head(void) return run_command(&cp); } +static int is_path_a_directory(const char *path) +{ + /* + * This function differs from abspath.c:is_directory() in that + * here we use lstat() instead of stat(); we do not want to + * follow symbolic links here. + */ + struct stat st; + return (!lstat(path, &st) && S_ISDIR(st.st_mode)); +} + static void add_diff_to_buf(struct diff_queue_struct *q, struct diff_options *options, void *data) @@ -320,6 +331,9 @@ static void add_diff_to_buf(struct diff_queue_struct *q, int i; for (i = 0; i < q->nr; i++) { + if (is_path_a_directory(q->queue[i]->one->path)) + continue; + strbuf_addstr(data, q->queue[i]->one->path); /* NUL-terminate: will be fed to update-index -z */ @@ -521,9 +535,6 @@ static int do_apply_stash(const char *prefix, struct stash_info *info, } } - if (info->has_u && restore_untracked(&info->u_tree)) - return error(_("could not restore untracked files from stash")); - init_merge_options(&o, the_repository); o.branch1 = "Updated upstream"; @@ -558,6 +569,9 @@ static int do_apply_stash(const char *prefix, struct stash_info *info, unstage_changes_unless_new(&c_tree); } + if (info->has_u && restore_untracked(&info->u_tree)) + return error(_("could not restore untracked files from stash")); + if (!quiet) { struct child_process cp = CHILD_PROCESS_INIT; diff --git a/builtin/submodule--helper.c b/builtin/submodule--helper.c index ef2776a9e4..5336daf186 100644 --- a/builtin/submodule--helper.c +++ b/builtin/submodule--helper.c @@ -199,34 +199,28 @@ static char *relative_url(const char *remote_url, return strbuf_detach(&sb, NULL); } -static int resolve_relative_url(int argc, const char **argv, const char *prefix) +static char *resolve_relative_url(const char *rel_url, const char *up_path, int quiet) { - char *remoteurl = NULL; + char *remoteurl, *resolved_url; char *remote = get_default_remote(); - const char *up_path = NULL; - char *res; - const char *url; - struct strbuf sb = STRBUF_INIT; - - if (argc != 2 && argc != 3) - die("resolve-relative-url only accepts one or two arguments"); - - url = argv[1]; - strbuf_addf(&sb, "remote.%s.url", remote); - free(remote); + struct strbuf remotesb = STRBUF_INIT; - if (git_config_get_string(sb.buf, &remoteurl)) - /* the repository is its own authoritative upstream */ + strbuf_addf(&remotesb, "remote.%s.url", remote); + if (git_config_get_string(remotesb.buf, &remoteurl)) { + if (!quiet) + warning(_("could not look up configuration '%s'. " + "Assuming this repository is its own " + "authoritative upstream."), + remotesb.buf); remoteurl = xgetcwd(); + } + resolved_url = relative_url(remoteurl, rel_url, up_path); - if (argc == 3) - up_path = argv[2]; - - res = relative_url(remoteurl, url, up_path); - puts(res); - free(res); + free(remote); free(remoteurl); - return 0; + strbuf_release(&remotesb); + + return resolved_url; } static int resolve_relative_url_test(int argc, const char **argv, const char *prefix) @@ -590,26 +584,6 @@ static int module_foreach(int argc, const char **argv, const char *prefix) return 0; } -static char *compute_submodule_clone_url(const char *rel_url) -{ - char *remoteurl, *relurl; - char *remote = get_default_remote(); - struct strbuf remotesb = STRBUF_INIT; - - strbuf_addf(&remotesb, "remote.%s.url", remote); - if (git_config_get_string(remotesb.buf, &remoteurl)) { - warning(_("could not look up configuration '%s'. Assuming this repository is its own authoritative upstream."), remotesb.buf); - remoteurl = xgetcwd(); - } - relurl = relative_url(remoteurl, rel_url, NULL); - - free(remote); - free(remoteurl); - strbuf_release(&remotesb); - - return relurl; -} - struct init_cb { const char *prefix; unsigned int flags; @@ -660,7 +634,7 @@ static void init_submodule(const char *path, const char *prefix, if (starts_with_dot_dot_slash(url) || starts_with_dot_slash(url)) { char *oldurl = url; - url = compute_submodule_clone_url(oldurl); + url = resolve_relative_url(oldurl, NULL, 0); free(oldurl); } @@ -1380,20 +1354,10 @@ static void sync_submodule(const char *path, const char *prefix, if (sub && sub->url) { if (starts_with_dot_dot_slash(sub->url) || starts_with_dot_slash(sub->url)) { - char *remote_url, *up_path; - char *remote = get_default_remote(); - strbuf_addf(&sb, "remote.%s.url", remote); - - if (git_config_get_string(sb.buf, &remote_url)) - remote_url = xgetcwd(); - - up_path = get_up_path(path); - sub_origin_url = relative_url(remote_url, sub->url, up_path); - super_config_url = relative_url(remote_url, sub->url, NULL); - - free(remote); + char *up_path = get_up_path(path); + sub_origin_url = resolve_relative_url(sub->url, up_path, 1); + super_config_url = resolve_relative_url(sub->url, NULL, 1); free(up_path); - free(remote_url); } else { sub_origin_url = xstrdup(sub->url); super_config_url = xstrdup(sub->url); @@ -1704,18 +1668,24 @@ static int add_possible_reference_from_superproject( * standard layout with .git/(modules/<name>)+/objects */ if (strip_suffix(odb->path, "/objects", &len)) { + struct repository alternate; char *sm_alternate; struct strbuf sb = STRBUF_INIT; struct strbuf err = STRBUF_INIT; strbuf_add(&sb, odb->path, len); + repo_init(&alternate, sb.buf, NULL); + /* * We need to end the new path with '/' to mark it as a dir, * otherwise a submodule name containing '/' will be broken * as the last part of a missing submodule reference would * be taken as a file name. */ - strbuf_addf(&sb, "/modules/%s/", sas->submodule_name); + strbuf_reset(&sb); + submodule_name_to_gitdir(&sb, &alternate, sas->submodule_name); + strbuf_addch(&sb, '/'); + repo_clear(&alternate); sm_alternate = compute_alternate_path(sb.buf, &err); if (sm_alternate) { @@ -1724,7 +1694,7 @@ static int add_possible_reference_from_superproject( } else { switch (sas->error_mode) { case SUBMODULE_ALTERNATE_ERROR_DIE: - if (advice_submodule_alternate_error_strategy_die) + if (advice_enabled(ADVICE_SUBMODULE_ALTERNATE_ERROR_STRATEGY_DIE)) advise(_(alternate_error_advice)); die(_("submodule '%s' cannot add alternate: %s"), sas->submodule_name, err.buf); @@ -1785,7 +1755,7 @@ static int clone_submodule(struct module_clone_data *clone_data) struct strbuf sb = STRBUF_INIT; struct child_process cp = CHILD_PROCESS_INIT; - strbuf_addf(&sb, "%s/modules/%s", get_git_dir(), clone_data->name); + submodule_name_to_gitdir(&sb, the_repository, clone_data->name); sm_gitdir = absolute_pathdup(sb.buf); strbuf_reset(&sb); @@ -2045,6 +2015,20 @@ struct submodule_update_clone { .max_jobs = 1, \ } +struct update_data { + const char *recursive_prefix; + const char *sm_path; + const char *displaypath; + struct object_id oid; + struct object_id suboid; + struct submodule_update_strategy update_strategy; + int depth; + unsigned int force: 1; + unsigned int quiet: 1; + unsigned int nofetch: 1; + unsigned int just_cloned: 1; +}; +#define UPDATE_DATA_INIT { .update_strategy = SUBMODULE_UPDATE_STRATEGY_INIT } static void next_submodule_warn_missing(struct submodule_update_clone *suc, struct strbuf *out, const char *displaypath) @@ -2134,7 +2118,7 @@ static int prepare_to_clone_next_submodule(const struct cache_entry *ce, if (repo_config_get_string_tmp(the_repository, sb.buf, &url)) { if (starts_with_dot_slash(sub->url) || starts_with_dot_dot_slash(sub->url)) { - url = compute_submodule_clone_url(sub->url); + url = resolve_relative_url(sub->url, NULL, 0); need_free_url = 1; } else url = sub->url; @@ -2298,6 +2282,181 @@ static int git_update_clone_config(const char *var, const char *value, return 0; } +static int is_tip_reachable(const char *path, struct object_id *oid) +{ + struct child_process cp = CHILD_PROCESS_INIT; + struct strbuf rev = STRBUF_INIT; + char *hex = oid_to_hex(oid); + + cp.git_cmd = 1; + cp.dir = xstrdup(path); + cp.no_stderr = 1; + strvec_pushl(&cp.args, "rev-list", "-n", "1", hex, "--not", "--all", NULL); + + prepare_submodule_repo_env(&cp.env_array); + + if (capture_command(&cp, &rev, GIT_MAX_HEXSZ + 1) || rev.len) + return 0; + + return 1; +} + +static int fetch_in_submodule(const char *module_path, int depth, int quiet, struct object_id *oid) +{ + struct child_process cp = CHILD_PROCESS_INIT; + + prepare_submodule_repo_env(&cp.env_array); + cp.git_cmd = 1; + cp.dir = xstrdup(module_path); + + strvec_push(&cp.args, "fetch"); + if (quiet) + strvec_push(&cp.args, "--quiet"); + if (depth) + strvec_pushf(&cp.args, "--depth=%d", depth); + if (oid) { + char *hex = oid_to_hex(oid); + char *remote = get_default_remote(); + strvec_pushl(&cp.args, remote, hex, NULL); + } + + return run_command(&cp); +} + +static int run_update_command(struct update_data *ud, int subforce) +{ + struct strvec args = STRVEC_INIT; + struct strvec child_env = STRVEC_INIT; + char *oid = oid_to_hex(&ud->oid); + int must_die_on_failure = 0; + int git_cmd; + + switch (ud->update_strategy.type) { + case SM_UPDATE_CHECKOUT: + git_cmd = 1; + strvec_pushl(&args, "checkout", "-q", NULL); + if (subforce) + strvec_push(&args, "-f"); + break; + case SM_UPDATE_REBASE: + git_cmd = 1; + strvec_push(&args, "rebase"); + if (ud->quiet) + strvec_push(&args, "--quiet"); + must_die_on_failure = 1; + break; + case SM_UPDATE_MERGE: + git_cmd = 1; + strvec_push(&args, "merge"); + if (ud->quiet) + strvec_push(&args, "--quiet"); + must_die_on_failure = 1; + break; + case SM_UPDATE_COMMAND: + git_cmd = 0; + strvec_push(&args, ud->update_strategy.command); + must_die_on_failure = 1; + break; + default: + BUG("unexpected update strategy type: %s", + submodule_strategy_to_string(&ud->update_strategy)); + } + strvec_push(&args, oid); + + prepare_submodule_repo_env(&child_env); + if (run_command_v_opt_cd_env(args.v, git_cmd ? RUN_GIT_CMD : RUN_USING_SHELL, + ud->sm_path, child_env.v)) { + switch (ud->update_strategy.type) { + case SM_UPDATE_CHECKOUT: + printf(_("Unable to checkout '%s' in submodule path '%s'"), + oid, ud->displaypath); + break; + case SM_UPDATE_REBASE: + printf(_("Unable to rebase '%s' in submodule path '%s'"), + oid, ud->displaypath); + break; + case SM_UPDATE_MERGE: + printf(_("Unable to merge '%s' in submodule path '%s'"), + oid, ud->displaypath); + break; + case SM_UPDATE_COMMAND: + printf(_("Execution of '%s %s' failed in submodule path '%s'"), + ud->update_strategy.command, oid, ud->displaypath); + break; + default: + BUG("unexpected update strategy type: %s", + submodule_strategy_to_string(&ud->update_strategy)); + } + /* + * NEEDSWORK: We are currently printing to stdout with error + * return so that the shell caller handles the error output + * properly. Once we start handling the error messages within + * C, we should use die() instead. + */ + if (must_die_on_failure) + return 2; + /* + * This signifies to the caller in shell that the command + * failed without dying + */ + return 1; + } + + switch (ud->update_strategy.type) { + case SM_UPDATE_CHECKOUT: + printf(_("Submodule path '%s': checked out '%s'\n"), + ud->displaypath, oid); + break; + case SM_UPDATE_REBASE: + printf(_("Submodule path '%s': rebased into '%s'\n"), + ud->displaypath, oid); + break; + case SM_UPDATE_MERGE: + printf(_("Submodule path '%s': merged in '%s'\n"), + ud->displaypath, oid); + break; + case SM_UPDATE_COMMAND: + printf(_("Submodule path '%s': '%s %s'\n"), + ud->displaypath, ud->update_strategy.command, oid); + break; + default: + BUG("unexpected update strategy type: %s", + submodule_strategy_to_string(&ud->update_strategy)); + } + + return 0; +} + +static int do_run_update_procedure(struct update_data *ud) +{ + int subforce = is_null_oid(&ud->suboid) || ud->force; + + if (!ud->nofetch) { + /* + * Run fetch only if `oid` isn't present or it + * is not reachable from a ref. + */ + if (!is_tip_reachable(ud->sm_path, &ud->oid) && + fetch_in_submodule(ud->sm_path, ud->depth, ud->quiet, NULL) && + !ud->quiet) + fprintf_ln(stderr, + _("Unable to fetch in submodule path '%s'; " + "trying to directly fetch %s:"), + ud->displaypath, oid_to_hex(&ud->oid)); + /* + * Now we tried the usual fetch, but `oid` may + * not be reachable from any of the refs. + */ + if (!is_tip_reachable(ud->sm_path, &ud->oid) && + fetch_in_submodule(ud->sm_path, ud->depth, ud->quiet, &ud->oid)) + die(_("Fetched in submodule path '%s', but it did not " + "contain %s. Direct fetching of that commit failed."), + ud->displaypath, oid_to_hex(&ud->oid)); + } + + return run_update_command(ud, subforce); +} + static void update_submodule(struct update_clone_data *ucd) { fprintf(stdout, "dummy %s %d\t%s\n", @@ -2395,6 +2554,73 @@ static int update_clone(int argc, const char **argv, const char *prefix) return update_submodules(&suc); } +static int run_update_procedure(int argc, const char **argv, const char *prefix) +{ + int force = 0, quiet = 0, nofetch = 0, just_cloned = 0; + char *prefixed_path, *update = NULL; + struct update_data update_data = UPDATE_DATA_INIT; + + struct option options[] = { + OPT__QUIET(&quiet, N_("suppress output for update by rebase or merge")), + OPT__FORCE(&force, N_("force checkout updates"), 0), + OPT_BOOL('N', "no-fetch", &nofetch, + N_("don't fetch new objects from the remote site")), + OPT_BOOL(0, "just-cloned", &just_cloned, + N_("overrides update mode in case the repository is a fresh clone")), + OPT_INTEGER(0, "depth", &update_data.depth, N_("depth for shallow fetch")), + OPT_STRING(0, "prefix", &prefix, + N_("path"), + N_("path into the working tree")), + OPT_STRING(0, "update", &update, + N_("string"), + N_("rebase, merge, checkout or none")), + OPT_STRING(0, "recursive-prefix", &update_data.recursive_prefix, N_("path"), + N_("path into the working tree, across nested " + "submodule boundaries")), + OPT_CALLBACK_F(0, "oid", &update_data.oid, N_("sha1"), + N_("SHA1 expected by superproject"), PARSE_OPT_NONEG, + parse_opt_object_id), + OPT_CALLBACK_F(0, "suboid", &update_data.suboid, N_("subsha1"), + N_("SHA1 of submodule's HEAD"), PARSE_OPT_NONEG, + parse_opt_object_id), + OPT_END() + }; + + const char *const usage[] = { + N_("git submodule--helper run-update-procedure [<options>] <path>"), + NULL + }; + + argc = parse_options(argc, argv, prefix, options, usage, 0); + + if (argc != 1) + usage_with_options(usage, options); + + update_data.force = !!force; + update_data.quiet = !!quiet; + update_data.nofetch = !!nofetch; + update_data.just_cloned = !!just_cloned; + update_data.sm_path = argv[0]; + + if (update_data.recursive_prefix) + prefixed_path = xstrfmt("%s%s", update_data.recursive_prefix, update_data.sm_path); + else + prefixed_path = xstrdup(update_data.sm_path); + + update_data.displaypath = get_submodule_displaypath(prefixed_path, prefix); + + determine_submodule_update_strategy(the_repository, update_data.just_cloned, + update_data.sm_path, update, + &update_data.update_strategy); + + free(prefixed_path); + + if (!oideq(&update_data.oid, &update_data.suboid) || update_data.force) + return do_run_update_procedure(&update_data); + + return 3; +} + static int resolve_relative_path(int argc, const char **argv, const char *prefix) { struct strbuf sb = STRBUF_INIT; @@ -2765,7 +2991,7 @@ struct add_data { const char *prefix; const char *branch; const char *reference_path; - const char *sm_path; + char *sm_path; const char *sm_name; const char *repo; const char *realrepo; @@ -2877,61 +3103,244 @@ static int add_submodule(const struct add_data *add_data) return 0; } -static int add_clone(int argc, const char **argv, const char *prefix) +static int config_submodule_in_gitmodules(const char *name, const char *var, const char *value) +{ + char *key; + int ret; + + if (!is_writing_gitmodules_ok()) + die(_("please make sure that the .gitmodules file is in the working tree")); + + key = xstrfmt("submodule.%s.%s", name, var); + ret = config_set_in_gitmodules_file_gently(key, value); + free(key); + + return ret; +} + +static void configure_added_submodule(struct add_data *add_data) +{ + char *key; + char *val = NULL; + struct child_process add_submod = CHILD_PROCESS_INIT; + struct child_process add_gitmodules = CHILD_PROCESS_INIT; + + key = xstrfmt("submodule.%s.url", add_data->sm_name); + git_config_set_gently(key, add_data->realrepo); + free(key); + + add_submod.git_cmd = 1; + strvec_pushl(&add_submod.args, "add", + "--no-warn-embedded-repo", NULL); + if (add_data->force) + strvec_push(&add_submod.args, "--force"); + strvec_pushl(&add_submod.args, "--", add_data->sm_path, NULL); + + if (run_command(&add_submod)) + die(_("Failed to add submodule '%s'"), add_data->sm_path); + + if (config_submodule_in_gitmodules(add_data->sm_name, "path", add_data->sm_path) || + config_submodule_in_gitmodules(add_data->sm_name, "url", add_data->repo)) + die(_("Failed to register submodule '%s'"), add_data->sm_path); + + if (add_data->branch) { + if (config_submodule_in_gitmodules(add_data->sm_name, + "branch", add_data->branch)) + die(_("Failed to register submodule '%s'"), add_data->sm_path); + } + + add_gitmodules.git_cmd = 1; + strvec_pushl(&add_gitmodules.args, + "add", "--force", "--", ".gitmodules", NULL); + + if (run_command(&add_gitmodules)) + die(_("Failed to register submodule '%s'"), add_data->sm_path); + + /* + * NEEDSWORK: In a multi-working-tree world this needs to be + * set in the per-worktree config. + */ + /* + * NEEDSWORK: In the longer run, we need to get rid of this + * pattern of querying "submodule.active" before calling + * is_submodule_active(), since that function needs to find + * out the value of "submodule.active" again anyway. + */ + if (!git_config_get_string("submodule.active", &val) && val) { + /* + * If the submodule being added isn't already covered by the + * current configured pathspec, set the submodule's active flag + */ + if (!is_submodule_active(the_repository, add_data->sm_path)) { + key = xstrfmt("submodule.%s.active", add_data->sm_name); + git_config_set_gently(key, "true"); + free(key); + } + } else { + key = xstrfmt("submodule.%s.active", add_data->sm_name); + git_config_set_gently(key, "true"); + free(key); + } +} + +static void die_on_index_match(const char *path, int force) { - int force = 0, quiet = 0, dissociate = 0, progress = 0; + struct pathspec ps; + const char *args[] = { path, NULL }; + parse_pathspec(&ps, 0, PATHSPEC_PREFER_CWD, NULL, args); + + if (read_cache_preload(NULL) < 0) + die(_("index file corrupt")); + + if (ps.nr) { + int i; + char *ps_matched = xcalloc(ps.nr, 1); + + /* TODO: audit for interaction with sparse-index. */ + ensure_full_index(&the_index); + + /* + * Since there is only one pathspec, we just need + * need to check ps_matched[0] to know if a cache + * entry matched. + */ + for (i = 0; i < active_nr; i++) { + ce_path_match(&the_index, active_cache[i], &ps, + ps_matched); + + if (ps_matched[0]) { + if (!force) + die(_("'%s' already exists in the index"), + path); + if (!S_ISGITLINK(active_cache[i]->ce_mode)) + die(_("'%s' already exists in the index " + "and is not a submodule"), path); + break; + } + } + free(ps_matched); + } +} + +static void die_on_repo_without_commits(const char *path) +{ + struct strbuf sb = STRBUF_INIT; + strbuf_addstr(&sb, path); + if (is_nonbare_repository_dir(&sb)) { + struct object_id oid; + if (resolve_gitlink_ref(path, "HEAD", &oid) < 0) + die(_("'%s' does not have a commit checked out"), path); + } +} + +static int module_add(int argc, const char **argv, const char *prefix) +{ + int force = 0, quiet = 0, progress = 0, dissociate = 0; struct add_data add_data = ADD_DATA_INIT; struct option options[] = { - OPT_STRING('b', "branch", &add_data.branch, - N_("branch"), - N_("branch of repository to checkout on cloning")), - OPT_STRING(0, "prefix", &prefix, - N_("path"), - N_("alternative anchor for relative paths")), - OPT_STRING(0, "path", &add_data.sm_path, - N_("path"), - N_("where the new submodule will be cloned to")), - OPT_STRING(0, "name", &add_data.sm_name, - N_("string"), - N_("name of the new submodule")), - OPT_STRING(0, "url", &add_data.realrepo, - N_("string"), - N_("url where to clone the submodule from")), - OPT_STRING(0, "reference", &add_data.reference_path, - N_("repo"), - N_("reference repository")), - OPT_BOOL(0, "dissociate", &dissociate, - N_("use --reference only while cloning")), - OPT_INTEGER(0, "depth", &add_data.depth, - N_("depth for shallow clones")), - OPT_BOOL(0, "progress", &progress, - N_("force cloning progress")), + OPT_STRING('b', "branch", &add_data.branch, N_("branch"), + N_("branch of repository to add as submodule")), OPT__FORCE(&force, N_("allow adding an otherwise ignored submodule path"), PARSE_OPT_NOCOMPLETE), - OPT__QUIET(&quiet, "suppress output for cloning a submodule"), + OPT__QUIET(&quiet, N_("print only error messages")), + OPT_BOOL(0, "progress", &progress, N_("force cloning progress")), + OPT_STRING(0, "reference", &add_data.reference_path, N_("repository"), + N_("reference repository")), + OPT_BOOL(0, "dissociate", &dissociate, N_("borrow the objects from reference repositories")), + OPT_STRING(0, "name", &add_data.sm_name, N_("name"), + N_("sets the submodule’s name to the given string " + "instead of defaulting to its path")), + OPT_INTEGER(0, "depth", &add_data.depth, N_("depth for shallow clones")), OPT_END() }; const char *const usage[] = { - N_("git submodule--helper add-clone [<options>...] " - "--url <url> --path <path> --name <name>"), + N_("git submodule--helper add [<options>] [--] <repository> [<path>]"), NULL }; argc = parse_options(argc, argv, prefix, options, usage, 0); - if (argc != 0) + if (!is_writing_gitmodules_ok()) + die(_("please make sure that the .gitmodules file is in the working tree")); + + if (prefix && *prefix && + add_data.reference_path && !is_absolute_path(add_data.reference_path)) + add_data.reference_path = xstrfmt("%s%s", prefix, add_data.reference_path); + + if (argc == 0 || argc > 2) usage_with_options(usage, options); + add_data.repo = argv[0]; + if (argc == 1) + add_data.sm_path = git_url_basename(add_data.repo, 0, 0); + else + add_data.sm_path = xstrdup(argv[1]); + + if (prefix && *prefix && !is_absolute_path(add_data.sm_path)) + add_data.sm_path = xstrfmt("%s%s", prefix, add_data.sm_path); + + if (starts_with_dot_dot_slash(add_data.repo) || + starts_with_dot_slash(add_data.repo)) { + if (prefix) + die(_("Relative path can only be used from the toplevel " + "of the working tree")); + + /* dereference source url relative to parent's url */ + add_data.realrepo = resolve_relative_url(add_data.repo, NULL, 1); + } else if (is_dir_sep(add_data.repo[0]) || strchr(add_data.repo, ':')) { + add_data.realrepo = add_data.repo; + } else { + die(_("repo URL: '%s' must be absolute or begin with ./|../"), + add_data.repo); + } + + /* + * normalize path: + * multiple //; leading ./; /./; /../; + */ + normalize_path_copy(add_data.sm_path, add_data.sm_path); + strip_dir_trailing_slashes(add_data.sm_path); + + die_on_index_match(add_data.sm_path, force); + die_on_repo_without_commits(add_data.sm_path); + + if (!force) { + int exit_code = -1; + struct strbuf sb = STRBUF_INIT; + struct child_process cp = CHILD_PROCESS_INIT; + cp.git_cmd = 1; + cp.no_stdout = 1; + strvec_pushl(&cp.args, "add", "--dry-run", "--ignore-missing", + "--no-warn-embedded-repo", add_data.sm_path, NULL); + if ((exit_code = pipe_command(&cp, NULL, 0, NULL, 0, &sb, 0))) { + strbuf_complete_line(&sb); + fputs(sb.buf, stderr); + free(add_data.sm_path); + return exit_code; + } + strbuf_release(&sb); + } + + if(!add_data.sm_name) + add_data.sm_name = add_data.sm_path; + + if (check_submodule_name(add_data.sm_name)) + die(_("'%s' is not a valid submodule name"), add_data.sm_name); + add_data.prefix = prefix; - add_data.progress = !!progress; - add_data.dissociate = !!dissociate; add_data.force = !!force; add_data.quiet = !!quiet; + add_data.progress = !!progress; + add_data.dissociate = !!dissociate; - if (add_submodule(&add_data)) + if (add_submodule(&add_data)) { + free(add_data.sm_path); return 1; + } + configure_added_submodule(&add_data); + free(add_data.sm_path); return 0; } @@ -2948,12 +3357,12 @@ static struct cmd_struct commands[] = { {"list", module_list, 0}, {"name", module_name, 0}, {"clone", module_clone, 0}, - {"add-clone", add_clone, 0}, + {"add", module_add, SUPPORT_SUPER_PREFIX}, {"update-module-mode", module_update_module_mode, 0}, {"update-clone", update_clone, 0}, + {"run-update-procedure", run_update_procedure, 0}, {"ensure-core-worktree", ensure_core_worktree, 0}, {"relative-path", resolve_relative_path, 0}, - {"resolve-relative-url", resolve_relative_url, 0}, {"resolve-relative-url-test", resolve_relative_url_test, 0}, {"foreach", module_foreach, SUPPORT_SUPER_PREFIX}, {"init", module_init, SUPPORT_SUPER_PREFIX}, diff --git a/builtin/tag.c b/builtin/tag.c index 82fcfc0982..065b6bf093 100644 --- a/builtin/tag.c +++ b/builtin/tag.c @@ -146,7 +146,7 @@ static int verify_tag(const char *name, const char *ref, const struct object_id *oid, void *cb_data) { int flags; - const struct ref_format *format = cb_data; + struct ref_format *format = cb_data; flags = GPG_VERIFY_VERBOSE; if (format->format) @@ -293,9 +293,7 @@ static void create_tag(const struct object_id *object, const char *object_ref, /* write the template message before editing: */ path = git_pathdup("TAG_EDITMSG"); - fd = open(path, O_CREAT | O_TRUNC | O_WRONLY, 0600); - if (fd < 0) - die_errno(_("could not create file '%s'"), path); + fd = xopen(path, O_CREAT | O_TRUNC | O_WRONLY, 0600); if (opt->message_given) { write_or_die(fd, buf->buf, buf->len); diff --git a/builtin/update-index.c b/builtin/update-index.c index f1f16f2de5..187203e8bb 100644 --- a/builtin/update-index.c +++ b/builtin/update-index.c @@ -95,9 +95,7 @@ static int create_file(const char *path) { int fd; path = get_mtime_path(path); - fd = open(path, O_CREAT | O_RDWR, 0644); - if (fd < 0) - die_errno(_("failed to create file %s"), path); + fd = xopen(path, O_CREAT | O_RDWR, 0644); return fd; } diff --git a/builtin/update-ref.c b/builtin/update-ref.c index 6029a80544..a84e7b47a2 100644 --- a/builtin/update-ref.c +++ b/builtin/update-ref.c @@ -302,6 +302,12 @@ static void parse_cmd_verify(struct ref_transaction *transaction, strbuf_release(&err); } +static void report_ok(const char *command) +{ + fprintf(stdout, "%s: ok\n", command); + fflush(stdout); +} + static void parse_cmd_option(struct ref_transaction *transaction, const char *next, const char *end) { @@ -317,7 +323,7 @@ static void parse_cmd_start(struct ref_transaction *transaction, { if (*next != line_termination) die("start: extra input: %s", next); - puts("start: ok"); + report_ok("start"); } static void parse_cmd_prepare(struct ref_transaction *transaction, @@ -328,7 +334,7 @@ static void parse_cmd_prepare(struct ref_transaction *transaction, die("prepare: extra input: %s", next); if (ref_transaction_prepare(transaction, &error)) die("prepare: %s", error.buf); - puts("prepare: ok"); + report_ok("prepare"); } static void parse_cmd_abort(struct ref_transaction *transaction, @@ -339,7 +345,7 @@ static void parse_cmd_abort(struct ref_transaction *transaction, die("abort: extra input: %s", next); if (ref_transaction_abort(transaction, &error)) die("abort: %s", error.buf); - puts("abort: ok"); + report_ok("abort"); } static void parse_cmd_commit(struct ref_transaction *transaction, @@ -350,7 +356,7 @@ static void parse_cmd_commit(struct ref_transaction *transaction, die("commit: extra input: %s", next); if (ref_transaction_commit(transaction, &error)) die("commit: %s", error.buf); - puts("commit: ok"); + report_ok("commit"); ref_transaction_free(transaction); } diff --git a/builtin/upload-pack.c b/builtin/upload-pack.c index 6da8fa2607..125af53885 100644 --- a/builtin/upload-pack.c +++ b/builtin/upload-pack.c @@ -16,16 +16,18 @@ int cmd_upload_pack(int argc, const char **argv, const char *prefix) { const char *dir; int strict = 0; - struct upload_pack_options opts = { 0 }; - struct serve_options serve_opts = SERVE_OPTIONS_INIT; + int advertise_refs = 0; + int stateless_rpc = 0; + int timeout = 0; struct option options[] = { - OPT_BOOL(0, "stateless-rpc", &opts.stateless_rpc, + OPT_BOOL(0, "stateless-rpc", &stateless_rpc, N_("quit after a single request/response exchange")), - OPT_BOOL(0, "advertise-refs", &opts.advertise_refs, - N_("exit immediately after initial ref advertisement")), + OPT_HIDDEN_BOOL(0, "http-backend-info-refs", &advertise_refs, + N_("serve up the info/refs for git-http-backend")), + OPT_ALIAS(0, "advertise-refs", "http-backend-info-refs"), OPT_BOOL(0, "strict", &strict, N_("do not try <directory>/.git/ if <directory> is no Git directory")), - OPT_INTEGER(0, "timeout", &opts.timeout, + OPT_INTEGER(0, "timeout", &timeout, N_("interrupt transfer after <n> seconds of inactivity")), OPT_END() }; @@ -38,9 +40,6 @@ int cmd_upload_pack(int argc, const char **argv, const char *prefix) if (argc != 1) usage_with_options(upload_pack_usage, options); - if (opts.timeout) - opts.daemon_mode = 1; - setup_path(); dir = argv[0]; @@ -50,21 +49,22 @@ int cmd_upload_pack(int argc, const char **argv, const char *prefix) switch (determine_protocol_version_server()) { case protocol_v2: - serve_opts.advertise_capabilities = opts.advertise_refs; - serve_opts.stateless_rpc = opts.stateless_rpc; - serve(&serve_opts); + if (advertise_refs) + protocol_v2_advertise_capabilities(); + else + protocol_v2_serve_loop(stateless_rpc); break; case protocol_v1: /* * v1 is just the original protocol with a version string, * so just fall through after writing the version string. */ - if (opts.advertise_refs || !opts.stateless_rpc) + if (advertise_refs || !stateless_rpc) packet_write_fmt(1, "version 1\n"); /* fallthrough */ case protocol_v0: - upload_pack(&opts); + upload_pack(advertise_refs, stateless_rpc, timeout); break; case protocol_unknown_version: BUG("unknown protocol version"); diff --git a/bulk-checkin.c b/bulk-checkin.c index b023d9959a..8785b2ac80 100644 --- a/bulk-checkin.c +++ b/bulk-checkin.c @@ -23,9 +23,25 @@ static struct bulk_checkin_state { uint32_t nr_written; } state; +static void finish_tmp_packfile(struct strbuf *basename, + const char *pack_tmp_name, + struct pack_idx_entry **written_list, + uint32_t nr_written, + struct pack_idx_option *pack_idx_opts, + unsigned char hash[]) +{ + char *idx_tmp_name = NULL; + + stage_tmp_packfiles(basename, pack_tmp_name, written_list, nr_written, + pack_idx_opts, hash, &idx_tmp_name); + rename_tmp_packfile_idx(basename, &idx_tmp_name); + + free(idx_tmp_name); +} + static void finish_bulk_checkin(struct bulk_checkin_state *state) { - struct object_id oid; + unsigned char hash[GIT_MAX_RAWSZ]; struct strbuf packname = STRBUF_INIT; int i; @@ -37,19 +53,20 @@ static void finish_bulk_checkin(struct bulk_checkin_state *state) unlink(state->pack_tmp_name); goto clear_exit; } else if (state->nr_written == 1) { - finalize_hashfile(state->f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE); + finalize_hashfile(state->f, hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE); } else { - int fd = finalize_hashfile(state->f, oid.hash, 0); - fixup_pack_header_footer(fd, oid.hash, state->pack_tmp_name, - state->nr_written, oid.hash, + int fd = finalize_hashfile(state->f, hash, 0); + fixup_pack_header_footer(fd, hash, state->pack_tmp_name, + state->nr_written, hash, state->offset); close(fd); } - strbuf_addf(&packname, "%s/pack/pack-", get_object_directory()); + strbuf_addf(&packname, "%s/pack/pack-%s.", get_object_directory(), + hash_to_hex(hash)); finish_tmp_packfile(&packname, state->pack_tmp_name, state->written, state->nr_written, - &state->pack_idx_opts, oid.hash); + &state->pack_idx_opts, hash); for (i = 0; i < state->nr_written; i++) free(state->written[i]); @@ -569,18 +569,18 @@ err: } int unbundle(struct repository *r, struct bundle_header *header, - int bundle_fd, int flags) + int bundle_fd, struct strvec *extra_index_pack_args) { - const char *argv_index_pack[] = {"index-pack", - "--fix-thin", "--stdin", NULL, NULL}; struct child_process ip = CHILD_PROCESS_INIT; + strvec_pushl(&ip.args, "index-pack", "--fix-thin", "--stdin", NULL); - if (flags & BUNDLE_VERBOSE) - argv_index_pack[3] = "-v"; + if (extra_index_pack_args) { + strvec_pushv(&ip.args, extra_index_pack_args->v); + strvec_clear(extra_index_pack_args); + } if (verify_bundle(r, header, 0)) return -1; - ip.argv = argv_index_pack; ip.in = bundle_fd; ip.no_stdout = 1; ip.git_cmd = 1; @@ -26,9 +26,19 @@ int create_bundle(struct repository *r, const char *path, int argc, const char **argv, struct strvec *pack_options, int version); int verify_bundle(struct repository *r, struct bundle_header *header, int verbose); -#define BUNDLE_VERBOSE 1 + +/** + * Unbundle after reading the header with read_bundle_header(). + * + * We'll invoke "git index-pack --stdin --fix-thin" for you on the + * provided `bundle_fd` from read_bundle_header(). + * + * Provide "extra_index_pack_args" to pass any extra arguments + * (e.g. "-v" for verbose/progress), NULL otherwise. The provided + * "extra_index_pack_args" (if any) will be strvec_clear()'d for you. + */ int unbundle(struct repository *r, struct bundle_header *header, - int bundle_fd, int flags); + int bundle_fd, struct strvec *extra_index_pack_args); int list_bundle_refs(struct bundle_header *header, int argc, const char **argv); @@ -958,7 +958,6 @@ extern char *apply_default_ignorewhitespace; extern const char *git_attributes_file; extern const char *git_hooks_path; extern int zlib_compression_level; -extern int core_compression_level; extern int pack_compression_level; extern size_t packed_git_window_size; extern size_t packed_git_limit; @@ -1211,51 +1210,10 @@ enum scld_error safe_create_leading_directories(char *path); enum scld_error safe_create_leading_directories_const(const char *path); enum scld_error safe_create_leading_directories_no_share(char *path); -/* - * Callback function for raceproof_create_file(). This function is - * expected to do something that makes dirname(path) permanent despite - * the fact that other processes might be cleaning up empty - * directories at the same time. Usually it will create a file named - * path, but alternatively it could create another file in that - * directory, or even chdir() into that directory. The function should - * return 0 if the action was completed successfully. On error, it - * should return a nonzero result and set errno. - * raceproof_create_file() treats two errno values specially: - * - * - ENOENT -- dirname(path) does not exist. In this case, - * raceproof_create_file() tries creating dirname(path) - * (and any parent directories, if necessary) and calls - * the function again. - * - * - EISDIR -- the file already exists and is a directory. In this - * case, raceproof_create_file() removes the directory if - * it is empty (and recursively any empty directories that - * it contains) and calls the function again. - * - * Any other errno causes raceproof_create_file() to fail with the - * callback's return value and errno. - * - * Obviously, this function should be OK with being called again if it - * fails with ENOENT or EISDIR. In other scenarios it will not be - * called again. - */ -typedef int create_file_fn(const char *path, void *cb); - -/* - * Create a file in dirname(path) by calling fn, creating leading - * directories if necessary. Retry a few times in case we are racing - * with another process that is trying to clean up the directory that - * contains path. See the documentation for create_file_fn for more - * details. - * - * Return the value and set the errno that resulted from the most - * recent call of fn. fn is always called at least once, and will be - * called more than once if it returns ENOENT or EISDIR. - */ -int raceproof_create_file(const char *path, create_file_fn fn, void *cb); - int mkdir_in_gitdir(const char *path); -char *expand_user_path(const char *path, int real_home); +char *interpolate_path(const char *path, int real_home); +/* NEEDSWORK: remove this synonym once in-flight topics have migrated */ +#define expand_user_path interpolate_path const char *enter_repo(const char *path, int strict); static inline int is_absolute_path(const char *path) { @@ -1297,6 +1255,13 @@ int looks_like_command_line_option(const char *str); /** * Return a newly allocated string with the evaluation of + * "$XDG_CONFIG_HOME/$subdir/$filename" if $XDG_CONFIG_HOME is non-empty, otherwise + * "$HOME/.config/$subdir/$filename". Return NULL upon error. + */ +char *xdg_config_home_for(const char *subdir, const char *filename); + +/** + * Return a newly allocated string with the evaluation of * "$XDG_CONFIG_HOME/git/$filename" if $XDG_CONFIG_HOME is non-empty, otherwise * "$HOME/.config/git/$filename". Return NULL upon error. */ @@ -1736,6 +1701,8 @@ extern const char *git_mailmap_blob; void maybe_flush_or_die(FILE *, const char *); __attribute__((format (printf, 2, 3))) void fprintf_or_die(FILE *, const char *fmt, ...); +void fwrite_or_die(FILE *f, const void *buf, size_t count); +void fflush_or_die(FILE *f); #define COPY_READ_ERROR (-2) #define COPY_WRITE_ERROR (-3) diff --git a/check_bindir b/check_bindir deleted file mode 100755 index 623eadcbb7..0000000000 --- a/check_bindir +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/sh -bindir="$1" -gitexecdir="$2" -gitcmd="$3" -if test "$bindir" != "$gitexecdir" && test -x "$gitcmd" -then - echo - echo "!! You have installed git-* commands to new gitexecdir." - echo "!! Old version git-* commands still remain in bindir." - echo "!! Mixing two versions of Git will lead to problems." - echo "!! Please remove old version commands in bindir now." - echo -fi diff --git a/ci/install-docker-dependencies.sh b/ci/install-docker-dependencies.sh index 26a6689766..07a8c6b199 100755 --- a/ci/install-docker-dependencies.sh +++ b/ci/install-docker-dependencies.sh @@ -15,4 +15,8 @@ linux-musl) apk add --update build-base curl-dev openssl-dev expat-dev gettext \ pcre2-dev python3 musl-libintl perl-utils ncurses >/dev/null ;; +pedantic) + dnf -yq update >/dev/null && + dnf -yq install make gcc findutils diffutils perl python3 gettext zlib-devel expat-devel openssl-devel curl-devel pcre2-devel >/dev/null + ;; esac diff --git a/ci/run-build-and-tests.sh b/ci/run-build-and-tests.sh index 3ce81ffee9..cc62616d80 100755 --- a/ci/run-build-and-tests.sh +++ b/ci/run-build-and-tests.sh @@ -10,6 +10,11 @@ windows*) cmd //c mklink //j t\\.prove "$(cygpath -aw "$cache_dir/.prove")";; *) ln -s "$cache_dir/.prove" t/.prove;; esac +if test "$jobname" = "pedantic" +then + export DEVOPTS=pedantic +fi + make case "$jobname" in linux-gcc) @@ -23,6 +28,7 @@ linux-gcc) export GIT_TEST_COMMIT_GRAPH=1 export GIT_TEST_COMMIT_GRAPH_CHANGED_PATHS=1 export GIT_TEST_MULTI_PACK_INDEX=1 + export GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=1 export GIT_TEST_ADD_I_USE_BUILTIN=1 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=master export GIT_TEST_WRITE_REV_INDEX=1 @@ -35,10 +41,9 @@ linux-clang) export GIT_TEST_DEFAULT_HASH=sha256 make test ;; -linux-gcc-4.8) +linux-gcc-4.8|pedantic) # Don't run the tests; we only care about whether Git can be - # built with GCC 4.8, as it errors out on some undesired (C99) - # constructs that newer compilers seem to quietly accept. + # built with GCC 4.8 or with pedantic ;; *) make test diff --git a/commit-graph.c b/commit-graph.c index 3860a0d847..2706683acf 100644 --- a/commit-graph.c +++ b/commit-graph.c @@ -713,6 +713,7 @@ static void close_commit_graph_one(struct commit_graph *g) if (!g) return; + clear_commit_graph_data_slab(&commit_graph_data_slab); close_commit_graph_one(g->base_graph); free_commit_graph(g); } @@ -723,7 +724,7 @@ void close_commit_graph(struct raw_object_store *o) o->commit_graph = NULL; } -static int bsearch_graph(struct commit_graph *g, struct object_id *oid, uint32_t *pos) +static int bsearch_graph(struct commit_graph *g, const struct object_id *oid, uint32_t *pos) { return bsearch_hash(oid->hash, g->chunk_oid_fanout, g->chunk_oid_lookup, g->hash_len, pos); @@ -864,26 +865,55 @@ static int fill_commit_in_graph(struct repository *r, return 1; } -static int find_commit_in_graph(struct commit *item, struct commit_graph *g, uint32_t *pos) +static int search_commit_pos_in_graph(const struct object_id *id, struct commit_graph *g, uint32_t *pos) +{ + struct commit_graph *cur_g = g; + uint32_t lex_index; + + while (cur_g && !bsearch_graph(cur_g, id, &lex_index)) + cur_g = cur_g->base_graph; + + if (cur_g) { + *pos = lex_index + cur_g->num_commits_in_base; + return 1; + } + + return 0; +} + +static int find_commit_pos_in_graph(struct commit *item, struct commit_graph *g, uint32_t *pos) { uint32_t graph_pos = commit_graph_position(item); if (graph_pos != COMMIT_NOT_FROM_GRAPH) { *pos = graph_pos; return 1; } else { - struct commit_graph *cur_g = g; - uint32_t lex_index; + return search_commit_pos_in_graph(&item->object.oid, g, pos); + } +} + +struct commit *lookup_commit_in_graph(struct repository *repo, const struct object_id *id) +{ + struct commit *commit; + uint32_t pos; - while (cur_g && !bsearch_graph(cur_g, &(item->object.oid), &lex_index)) - cur_g = cur_g->base_graph; + if (!repo->objects->commit_graph) + return NULL; + if (!search_commit_pos_in_graph(id, repo->objects->commit_graph, &pos)) + return NULL; + if (!repo_has_object_file(repo, id)) + return NULL; - if (cur_g) { - *pos = lex_index + cur_g->num_commits_in_base; - return 1; - } + commit = lookup_commit(repo, id); + if (!commit) + return NULL; + if (commit->object.parsed) + return commit; - return 0; - } + if (!fill_commit_in_graph(repo, commit, repo->objects->commit_graph, pos)) + return NULL; + + return commit; } static int parse_commit_in_graph_one(struct repository *r, @@ -895,7 +925,7 @@ static int parse_commit_in_graph_one(struct repository *r, if (item->object.parsed) return 1; - if (find_commit_in_graph(item, g, &pos)) + if (find_commit_pos_in_graph(item, g, &pos)) return fill_commit_in_graph(r, item, g, pos); return 0; @@ -921,7 +951,7 @@ void load_commit_graph_info(struct repository *r, struct commit *item) uint32_t pos; if (!prepare_commit_graph(r)) return; - if (find_commit_in_graph(item, r->objects->commit_graph, &pos)) + if (find_commit_pos_in_graph(item, r->objects->commit_graph, &pos)) fill_commit_graph_info(item, r->objects->commit_graph, pos); } @@ -1091,9 +1121,9 @@ static int write_graph_chunk_data(struct hashfile *f, edge_value += ctx->new_num_commits_in_base; else if (ctx->new_base_graph) { uint32_t pos; - if (find_commit_in_graph(parent->item, - ctx->new_base_graph, - &pos)) + if (find_commit_pos_in_graph(parent->item, + ctx->new_base_graph, + &pos)) edge_value = pos; } @@ -1122,9 +1152,9 @@ static int write_graph_chunk_data(struct hashfile *f, edge_value += ctx->new_num_commits_in_base; else if (ctx->new_base_graph) { uint32_t pos; - if (find_commit_in_graph(parent->item, - ctx->new_base_graph, - &pos)) + if (find_commit_pos_in_graph(parent->item, + ctx->new_base_graph, + &pos)) edge_value = pos; } @@ -1235,9 +1265,9 @@ static int write_graph_chunk_extra_edges(struct hashfile *f, edge_value += ctx->new_num_commits_in_base; else if (ctx->new_base_graph) { uint32_t pos; - if (find_commit_in_graph(parent->item, - ctx->new_base_graph, - &pos)) + if (find_commit_pos_in_graph(parent->item, + ctx->new_base_graph, + &pos)) edge_value = pos; } @@ -2096,7 +2126,7 @@ static void sort_and_scan_merged_commits(struct write_commit_graph_context *ctx) ctx->num_extra_edges = 0; for (i = 0; i < ctx->commits.nr; i++) { - display_progress(ctx->progress, i); + display_progress(ctx->progress, i + 1); if (i && oideq(&ctx->commits.list[i - 1]->object.oid, &ctx->commits.list[i]->object.oid)) { diff --git a/commit-graph.h b/commit-graph.h index 96c24fb577..04a94e1830 100644 --- a/commit-graph.h +++ b/commit-graph.h @@ -41,6 +41,14 @@ int open_commit_graph(const char *graph_file, int *fd, struct stat *st); int parse_commit_in_graph(struct repository *r, struct commit *item); /* + * Look up the given commit ID in the commit-graph. This will only return a + * commit if the ID exists both in the graph and in the object database such + * that we don't return commits whose object has been pruned. Otherwise, this + * function returns `NULL`. + */ +struct commit *lookup_commit_in_graph(struct repository *repo, const struct object_id *id); + +/* * It is possible that we loaded commit contents from the commit buffer, * but we also want to ensure the commit-graph content is correctly * checked and filled. Fill the graph_pos and generation members of @@ -25,6 +25,7 @@ static struct commit_extra_header *read_commit_extra_header_lines(const char *buf, size_t len, const char **); int save_commit_buffer = 1; +int no_graft_file_deprecated_advice; const char *commit_type = "commit"; @@ -190,7 +191,8 @@ static int read_graft_file(struct repository *r, const char *graft_file) struct strbuf buf = STRBUF_INIT; if (!fp) return -1; - if (advice_graft_file_deprecated) + if (!no_graft_file_deprecated_advice && + advice_enabled(ADVICE_GRAFT_FILE_DEPRECATED)) advise(_("Support for <GIT_DIR>/info/grafts is deprecated\n" "and will be removed in a future Git version.\n" "\n" @@ -41,6 +41,7 @@ struct commit { }; extern int save_commit_buffer; +extern int no_graft_file_deprecated_advice; extern const char *commit_type; /* While we can decorate any object with a name, it's only used for commits.. */ diff --git a/compat/linux/procinfo.c b/compat/linux/procinfo.c new file mode 100644 index 0000000000..bc2f9382a1 --- /dev/null +++ b/compat/linux/procinfo.c @@ -0,0 +1,176 @@ +#include "cache.h" + +#include "strbuf.h" +#include "strvec.h" +#include "trace2.h" + +/* + * We need more complex parsing in stat_parent_pid() and + * parse_proc_stat() below than a dumb fscanf(). That's because while + * the statcomm field is surrounded by parentheses, the process itself + * is free to insert any arbitrary byte sequence its its name. That + * can include newlines, spaces, closing parentheses etc. + * + * See do_task_stat() in fs/proc/array.c in linux.git, this is in + * contrast with the escaped version of the name found in + * /proc/%d/status. + * + * So instead of using fscanf() we'll read N bytes from it, look for + * the first "(", and then the last ")", anything in-between is our + * process name. + * + * How much N do we need? On Linux /proc/sys/kernel/pid_max is 2^15 by + * default, but it can be raised set to values of up to 2^22. So + * that's 7 digits for a PID. We have 2 PIDs in the first four fields + * we're interested in, so 2 * 7 = 14. + * + * We then have 3 spaces between those four values, and we'd like to + * get to the space between the 4th and the 5th (the "pgrp" field) to + * make sure we read the entire "ppid" field. So that brings us up to + * 14 + 3 + 1 = 18. Add the two parentheses around the "comm" value + * and it's 20. The "state" value itself is then one character (now at + * 21). + * + * Finally the maximum length of the "comm" name itself is 15 + * characters, e.g. a setting of "123456789abcdefg" will be truncated + * to "123456789abcdef". See PR_SET_NAME in prctl(2). So all in all + * we'd need to read 21 + 15 = 36 bytes. + * + * Let's just read 2^6 (64) instead for good measure. If PID_MAX ever + * grows past 2^22 we'll be future-proof. We'll then anchor at the + * last ")" we find to locate the parent PID. + */ +#define STAT_PARENT_PID_READ_N 64 + +static int parse_proc_stat(struct strbuf *sb, struct strbuf *name, + int *statppid) +{ + const char *comm_lhs = strchr(sb->buf, '('); + const char *comm_rhs = strrchr(sb->buf, ')'); + const char *ppid_lhs, *ppid_rhs; + char *p; + pid_t ppid; + + if (!comm_lhs || !comm_rhs) + goto bad_kernel; + + /* + * We're at the ")", that's followed by " X ", where X is a + * single "state" character. So advance by 4 bytes. + */ + ppid_lhs = comm_rhs + 4; + + /* + * Read until the space between the "ppid" and "pgrp" fields + * to make sure we're anchored after the untruncated "ppid" + * field.. + */ + ppid_rhs = strchr(ppid_lhs, ' '); + if (!ppid_rhs) + goto bad_kernel; + + ppid = strtol(ppid_lhs, &p, 10); + if (ppid_rhs == p) { + const char *comm = comm_lhs + 1; + size_t commlen = comm_rhs - comm; + + strbuf_add(name, comm, commlen); + *statppid = ppid; + + return 0; + } + +bad_kernel: + /* + * We were able to read our STAT_PARENT_PID_READ_N bytes from + * /proc/%d/stat, but the content is bad. Broken kernel? + * Should not happen, but handle it gracefully. + */ + return -1; +} + +static int stat_parent_pid(pid_t pid, struct strbuf *name, int *statppid) +{ + struct strbuf procfs_path = STRBUF_INIT; + struct strbuf sb = STRBUF_INIT; + FILE *fp; + int ret = -1; + + /* try to use procfs if it's present. */ + strbuf_addf(&procfs_path, "/proc/%d/stat", pid); + fp = fopen(procfs_path.buf, "r"); + if (!fp) + goto cleanup; + + /* + * We could be more strict here and assert that we read at + * least STAT_PARENT_PID_READ_N. My reading of procfs(5) is + * that on any modern kernel (at least since 2.6.0 released in + * 2003) even if all the mandatory numeric fields were zero'd + * out we'd get at least 100 bytes, but let's just check that + * we got anything at all and trust the parse_proc_stat() + * function to handle its "Bad Kernel?" error checking. + */ + if (!strbuf_fread(&sb, STAT_PARENT_PID_READ_N, fp)) + goto cleanup; + if (parse_proc_stat(&sb, name, statppid) < 0) + goto cleanup; + + ret = 0; +cleanup: + if (fp) + fclose(fp); + strbuf_release(&procfs_path); + strbuf_release(&sb); + + return ret; +} + +static void push_ancestry_name(struct strvec *names, pid_t pid) +{ + struct strbuf name = STRBUF_INIT; + int ppid; + + if (stat_parent_pid(pid, &name, &ppid) < 0) + goto cleanup; + + strvec_push(names, name.buf); + + /* + * Both errors and reaching the end of the process chain are + * reported as fields of 0 by proc(5) + */ + if (ppid) + push_ancestry_name(names, ppid); +cleanup: + strbuf_release(&name); + + return; +} + +void trace2_collect_process_info(enum trace2_process_info_reason reason) +{ + struct strvec names = STRVEC_INIT; + + if (!trace2_is_enabled()) + return; + + switch (reason) { + case TRACE2_PROCESS_INFO_EXIT: + /* + * The Windows version of this calls its + * get_peak_memory_info() here. We may want to insert + * similar process-end statistics here in the future. + */ + break; + case TRACE2_PROCESS_INFO_STARTUP: + push_ancestry_name(&names, getppid()); + + if (names.nr) + trace2_cmd_ancestry(names.v); + strvec_clear(&names); + break; + } + + return; +} diff --git a/compat/mmap.c b/compat/mmap.c index 14d31010df..8d6c02d4bc 100644 --- a/compat/mmap.c +++ b/compat/mmap.c @@ -7,7 +7,12 @@ void *git_mmap(void *start, size_t length, int prot, int flags, int fd, off_t of if (start != NULL || flags != MAP_PRIVATE || prot != PROT_READ) die("Invalid usage of mmap when built with NO_MMAP"); - start = xmalloc(length); + if (length == 0) { + errno = EINVAL; + return MAP_FAILED; + } + + start = malloc(length); if (start == NULL) { errno = ENOMEM; return MAP_FAILED; diff --git a/compat/nedmalloc/nedmalloc.c b/compat/nedmalloc/nedmalloc.c index 1cc31c3502..edb438a777 100644 --- a/compat/nedmalloc/nedmalloc.c +++ b/compat/nedmalloc/nedmalloc.c @@ -510,7 +510,7 @@ static void threadcache_free(nedpool *p, threadcache *tc, int mymspace, void *me assert(idx<=THREADCACHEMAXBINS); if(tck==*binsptr) { - fprintf(stderr, "Attempt to free already freed memory block %p - aborting!\n", tck); + fprintf(stderr, "Attempt to free already freed memory block %p - aborting!\n", (void *)tck); abort(); } #ifdef FULLSANITYCHECKS diff --git a/compat/stub/procinfo.c b/compat/stub/procinfo.c new file mode 100644 index 0000000000..12c0a23c9e --- /dev/null +++ b/compat/stub/procinfo.c @@ -0,0 +1,11 @@ +#include "git-compat-util.h" + +#include "trace2.h" + +/* + * Stub. See sample implementations in compat/linux/procinfo.c and + * compat/win32/trace2_win32_process_info.c. + */ +void trace2_collect_process_info(enum trace2_process_info_reason reason) +{ +} diff --git a/compat/win32/lazyload.h b/compat/win32/lazyload.h index 9e631c8593..d2056cdadf 100644 --- a/compat/win32/lazyload.h +++ b/compat/win32/lazyload.h @@ -37,7 +37,7 @@ struct proc_addr { #define INIT_PROC_ADDR(function) \ (function = get_proc_addr(&proc_addr_##function)) -static inline void *get_proc_addr(struct proc_addr *proc) +static inline FARPROC get_proc_addr(struct proc_addr *proc) { /* only do this once */ if (!proc->initialized) { @@ -76,7 +76,6 @@ static struct key_value_info *current_config_kvi; */ static enum config_scope current_parsing_scope; -static int core_compression_seen; static int pack_compression_seen; static int zlib_compression_seen; @@ -137,7 +136,7 @@ static int handle_path_include(const char *path, struct config_include_data *inc if (!path) return config_error_nonbool("include.path"); - expanded = expand_user_path(path, 0); + expanded = interpolate_path(path, 0); if (!expanded) return error(_("could not expand include path '%s'"), path); path = expanded; @@ -185,7 +184,7 @@ static int prepare_include_condition_pattern(struct strbuf *pat) char *expanded; int prefix = 0; - expanded = expand_user_path(pat->buf, 1); + expanded = interpolate_path(pat->buf, 1); if (expanded) { strbuf_reset(pat); strbuf_addstr(pat, expanded); @@ -1270,7 +1269,7 @@ int git_config_pathname(const char **dest, const char *var, const char *value) { if (!value) return config_error_nonbool(var); - *dest = expand_user_path(value, 0); + *dest = interpolate_path(value, 0); if (!*dest) die(_("failed to expand user dir in: '%s'"), value); return 0; @@ -1400,8 +1399,6 @@ static int git_default_core_config(const char *var, const char *value, void *cb) level = Z_DEFAULT_COMPRESSION; else if (level < 0 || level > Z_BEST_COMPRESSION) die(_("bad zlib compression level %d"), level); - core_compression_level = level; - core_compression_seen = 1; if (!zlib_compression_seen) zlib_compression_level = level; if (!pack_compression_seen) @@ -1796,6 +1793,7 @@ int git_config_from_mem(config_fn_t fn, int git_config_from_blob_oid(config_fn_t fn, const char *name, + struct repository *repo, const struct object_id *oid, void *data) { @@ -1804,7 +1802,7 @@ int git_config_from_blob_oid(config_fn_t fn, unsigned long size; int ret; - buf = read_object_file(oid, &type, &size); + buf = repo_read_object_file(repo, oid, &type, &size); if (!buf) return error(_("unable to load config blob object '%s'"), name); if (type != OBJ_BLOB) { @@ -1820,14 +1818,15 @@ int git_config_from_blob_oid(config_fn_t fn, } static int git_config_from_blob_ref(config_fn_t fn, + struct repository *repo, const char *name, void *data) { struct object_id oid; - if (get_oid(name, &oid) < 0) + if (repo_get_oid(repo, name, &oid) < 0) return error(_("unable to resolve config blob '%s'"), name); - return git_config_from_blob_oid(fn, name, &oid, data); + return git_config_from_blob_oid(fn, name, repo, &oid, data); } char *git_system_config(void) @@ -1845,7 +1844,7 @@ void git_global_config(char **user_out, char **xdg_out) char *xdg_config = NULL; if (!user_config) { - user_config = expand_user_path("~/.gitconfig", 0); + user_config = interpolate_path("~/.gitconfig", 0); xdg_config = xdg_config_home("config"); } @@ -1958,12 +1957,16 @@ int config_with_options(config_fn_t fn, void *data, * If we have a specific filename, use it. Otherwise, follow the * regular lookup sequence. */ - if (config_source && config_source->use_stdin) + if (config_source && config_source->use_stdin) { return git_config_from_stdin(fn, data); - else if (config_source && config_source->file) + } else if (config_source && config_source->file) { return git_config_from_file(fn, config_source->file, data); - else if (config_source && config_source->blob) - return git_config_from_blob_ref(fn, config_source->blob, data); + } else if (config_source && config_source->blob) { + struct repository *repo = config_source->repo ? + config_source->repo : the_repository; + return git_config_from_blob_ref(fn, repo, config_source->blob, + data); + } return do_git_config_sequence(opts, fn, data); } @@ -49,6 +49,8 @@ const char *config_scope_name(enum config_scope scope); struct git_config_source { unsigned int use_stdin:1; const char *file; + /* The repository if blob is not NULL; leave blank for the_repository */ + struct repository *repo; const char *blob; enum config_scope scope; }; @@ -136,6 +138,7 @@ int git_config_from_mem(config_fn_t fn, const char *buf, size_t len, void *data, const struct config_options *opts); int git_config_from_blob_oid(config_fn_t fn, const char *name, + struct repository *repo, const struct object_id *oid, void *data); void git_config_push_parameter(const char *text); void git_config_push_env(const char *spec); diff --git a/config.mak.dev b/config.mak.dev index 022fb58218..c080ac0231 100644 --- a/config.mak.dev +++ b/config.mak.dev @@ -1,13 +1,20 @@ +ifndef COMPILER_FEATURES +COMPILER_FEATURES := $(shell ./detect-compiler $(CC)) +endif + ifeq ($(filter no-error,$(DEVOPTS)),) DEVELOPER_CFLAGS += -Werror SPARSE_FLAGS += -Wsparse-error endif -ifneq ($(filter pedantic,$(DEVOPTS)),) +DEVELOPER_CFLAGS += -Wall +ifeq ($(filter no-pedantic,$(DEVOPTS)),) DEVELOPER_CFLAGS += -pedantic -# don't warn for each N_ use -DEVELOPER_CFLAGS += -DUSE_PARENS_AROUND_GETTEXT_N=0 +DEVELOPER_CFLAGS += -Wpedantic +ifneq ($(filter gcc5,$(COMPILER_FEATURES)),) +DEVELOPER_CFLAGS += -Wno-pedantic-ms-format +DEVELOPER_CFLAGS += -Wno-incompatible-pointer-types +endif endif -DEVELOPER_CFLAGS += -Wall DEVELOPER_CFLAGS += -Wdeclaration-after-statement DEVELOPER_CFLAGS += -Wformat-security DEVELOPER_CFLAGS += -Wold-style-definition @@ -18,10 +25,6 @@ DEVELOPER_CFLAGS += -Wunused DEVELOPER_CFLAGS += -Wvla DEVELOPER_CFLAGS += -fno-common -ifndef COMPILER_FEATURES -COMPILER_FEATURES := $(shell ./detect-compiler $(CC)) -endif - ifneq ($(filter clang4,$(COMPILER_FEATURES)),) DEVELOPER_CFLAGS += -Wtautological-constant-out-of-range-compare endif diff --git a/config.mak.uname b/config.mak.uname index 69413fb3dc..76516aaa9a 100644 --- a/config.mak.uname +++ b/config.mak.uname @@ -58,6 +58,8 @@ ifeq ($(uname_S),Linux) FREAD_READS_DIRECTORIES = UnfortunatelyYes BASIC_CFLAGS += -DHAVE_SYSINFO PROCFS_EXECUTABLE_PATH = /proc/self/exe + HAVE_PLATFORM_PROCINFO = YesPlease + COMPAT_OBJS += compat/linux/procinfo.o endif ifeq ($(uname_S),GNU/kFreeBSD) HAVE_ALLOCA_H = YesPlease @@ -617,6 +619,7 @@ ifneq (,$(findstring MINGW,$(uname_S))) ETAGS_TARGET = ETAGS NO_POSIX_GOODIES = UnfortunatelyYes DEFAULT_HELP_FORMAT = html + HAVE_PLATFORM_PROCINFO = YesPlease BASIC_LDFLAGS += -municode COMPAT_CFLAGS += -DNOGDI -Icompat -Icompat/win32 COMPAT_CFLAGS += -DSTRIP_EXTENSION=\".exe\" @@ -164,6 +164,8 @@ enum protocol_version discover_version(struct packet_reader *reader) BUG("unknown protocol version"); } + trace2_data_intmax("transfer", NULL, "negotiated-version", version); + return version; } diff --git a/connected.c b/connected.c index b18299fdf0..cf68e37a97 100644 --- a/connected.c +++ b/connected.c @@ -24,7 +24,7 @@ int check_connected(oid_iterate_fn fn, void *cb_data, struct child_process rev_list = CHILD_PROCESS_INIT; FILE *rev_list_in; struct check_connected_options defaults = CHECK_CONNECTED_INIT; - struct object_id oid; + const struct object_id *oid; int err = 0; struct packed_git *new_pack = NULL; struct transport *transport; @@ -34,7 +34,8 @@ int check_connected(oid_iterate_fn fn, void *cb_data, opt = &defaults; transport = opt->transport; - if (fn(cb_data, &oid)) { + oid = fn(cb_data); + if (!oid) { if (opt->err_fd) close(opt->err_fd); return err; @@ -73,7 +74,7 @@ int check_connected(oid_iterate_fn fn, void *cb_data, for (p = get_all_packs(the_repository); p; p = p->next) { if (!p->pack_promisor) continue; - if (find_pack_entry_one(oid.hash, p)) + if (find_pack_entry_one(oid->hash, p)) goto promisor_pack_found; } /* @@ -83,7 +84,7 @@ int check_connected(oid_iterate_fn fn, void *cb_data, goto no_promisor_pack_found; promisor_pack_found: ; - } while (!fn(cb_data, &oid)); + } while ((oid = fn(cb_data)) != NULL); return 0; } @@ -106,6 +107,7 @@ no_promisor_pack_found: if (opt->progress) strvec_pushf(&rev_list.args, "--progress=%s", _("Checking connectivity")); + strvec_push(&rev_list.args, "--unsorted-input"); rev_list.git_cmd = 1; rev_list.env = opt->env; @@ -132,12 +134,12 @@ no_promisor_pack_found: * are sure the ref is good and not sending it to * rev-list for verification. */ - if (new_pack && find_pack_entry_one(oid.hash, new_pack)) + if (new_pack && find_pack_entry_one(oid->hash, new_pack)) continue; - if (fprintf(rev_list_in, "%s\n", oid_to_hex(&oid)) < 0) + if (fprintf(rev_list_in, "%s\n", oid_to_hex(oid)) < 0) break; - } while (!fn(cb_data, &oid)); + } while ((oid = fn(cb_data)) != NULL); if (ferror(rev_list_in) || fflush(rev_list_in)) { if (errno != EPIPE && errno != EINVAL) diff --git a/connected.h b/connected.h index 8d5a6b3ad6..6e59c92aa3 100644 --- a/connected.h +++ b/connected.h @@ -9,7 +9,7 @@ struct transport; * When called after returning the name for the last object, return -1 * to signal EOF, otherwise return 0. */ -typedef int (*oid_iterate_fn)(void *, struct object_id *oid); +typedef const struct object_id *(*oid_iterate_fn)(void *); /* * Named-arguments struct for check_connected. All arguments are diff --git a/contrib/coccinelle/xopen.cocci b/contrib/coccinelle/xopen.cocci new file mode 100644 index 0000000000..b71db67019 --- /dev/null +++ b/contrib/coccinelle/xopen.cocci @@ -0,0 +1,19 @@ +@@ +identifier fd; +identifier die_fn =~ "^(die|die_errno)$"; +@@ + int fd = +- open ++ xopen + (...); +- if ( \( fd < 0 \| fd == -1 \) ) { die_fn(...); } + +@@ +expression fd; +identifier die_fn =~ "^(die|die_errno)$"; +@@ + fd = +- open ++ xopen + (...); +- if ( \( fd < 0 \| fd == -1 \) ) { die_fn(...); } diff --git a/contrib/completion/git-completion.bash b/contrib/completion/git-completion.bash index 4bdd27ddc8..8108eda1e8 100644 --- a/contrib/completion/git-completion.bash +++ b/contrib/completion/git-completion.bash @@ -356,7 +356,7 @@ __gitcomp () local cur_="${3-$cur}" case "$cur_" in - --*=) + *=) ;; --no-*) local c i=0 IFS=$' \t\n' @@ -421,7 +421,7 @@ __gitcomp_builtin () local incl="${2-}" local excl="${3-}" - local var=__gitcomp_builtin_"${cmd/-/_}" + local var=__gitcomp_builtin_"${cmd//-/_}" local options eval "options=\${$var-}" @@ -2650,10 +2650,10 @@ __git_complete_config_variable_name () return ;; branch.*) - local pfx="${cur%.*}." - cur_="${cur#*.}" + local pfx="${cur_%.*}." + cur_="${cur_#*.}" __gitcomp_direct "$(__git_heads "$pfx" "$cur_" ".")" - __gitcomp_nl_append $'autoSetupMerge\nautoSetupRebase\n' "$pfx" "$cur_" "$sfx" + __gitcomp_nl_append $'autoSetupMerge\nautoSetupRebase\n' "$pfx" "$cur_" "${sfx- }" return ;; guitool.*.*) @@ -2687,7 +2687,7 @@ __git_complete_config_variable_name () local pfx="${cur_%.*}." cur_="${cur_#*.}" __git_compute_all_commands - __gitcomp_nl "$__git_all_commands" "$pfx" "$cur_" "$sfx" + __gitcomp_nl "$__git_all_commands" "$pfx" "$cur_" "${sfx- }" return ;; remote.*.*) @@ -2703,7 +2703,7 @@ __git_complete_config_variable_name () local pfx="${cur_%.*}." cur_="${cur_#*.}" __gitcomp_nl "$(__git_remotes)" "$pfx" "$cur_" "." - __gitcomp_nl_append "pushDefault" "$pfx" "$cur_" "$sfx" + __gitcomp_nl_append "pushDefault" "$pfx" "$cur_" "${sfx- }" return ;; url.*.*) diff --git a/contrib/completion/git-completion.tcsh b/contrib/completion/git-completion.tcsh index 4a790d8f4e..ba797e5b3c 100644 --- a/contrib/completion/git-completion.tcsh +++ b/contrib/completion/git-completion.tcsh @@ -80,8 +80,9 @@ else COMP_CWORD=\$((\${#COMP_WORDS[@]}-1)) fi -# Call _git() or _gitk() of the bash script, based on the first argument -_\${1} +# Call __git_wrap__git_main() or __git_wrap__gitk_main() of the bash script, +# based on the first argument +__git_wrap__\${1}_main IFS=\$'\n' if [ \${#COMPREPLY[*]} -eq 0 ]; then diff --git a/credential.c b/credential.c index 3c05c7c669..000ac7a8d4 100644 --- a/credential.c +++ b/credential.c @@ -128,6 +128,7 @@ static void credential_apply_config(struct credential *c) normalized_url = url_normalize(url.buf, &config.url); git_config(urlmatch_config_entry, &config); + string_list_clear(&config.vars, 1); free(normalized_url); strbuf_release(&url); diff --git a/csum-file.c b/csum-file.c index c951cf8277..26e8a6df44 100644 --- a/csum-file.c +++ b/csum-file.c @@ -131,12 +131,8 @@ struct hashfile *hashfd_check(const char *name) int sink, check; struct hashfile *f; - sink = open("/dev/null", O_WRONLY); - if (sink < 0) - die_errno("unable to open /dev/null"); - check = open(name, O_RDONLY); - if (check < 0) - die_errno("unable to open '%s'", name); + sink = xopen("/dev/null", O_WRONLY); + check = xopen(name, O_RDONLY); f = hashfd(sink, name); f->check_fd = check; f->check_buffer = xmalloc(f->buffer_len); diff --git a/detect-compiler b/detect-compiler index 70b754481c..11d60da5b7 100755 --- a/detect-compiler +++ b/detect-compiler @@ -13,11 +13,11 @@ get_version_line() { } get_family() { - get_version_line | sed 's/^\(.*\) version [0-9][^ ]* .*/\1/' + get_version_line | sed 's/^\(.*\) version [0-9].*/\1/' } get_version() { - get_version_line | sed 's/^.* version \([0-9][^ ]*\) .*/\1/' + get_version_line | sed 's/^.* version \([0-9][^ ]*\).*/\1/' } print_flags() { @@ -38,10 +38,7 @@ case "$(get_family)" in gcc) print_flags gcc ;; -clang) - print_flags clang - ;; -"FreeBSD clang") +clang | *" clang") print_flags clang ;; "Apple LLVM") diff --git a/diff-lib.c b/diff-lib.c index f9eadc4fc1..ca085a03ef 100644 --- a/diff-lib.c +++ b/diff-lib.c @@ -117,6 +117,10 @@ int run_diff_files(struct rev_info *revs, unsigned int option) if (!ce_path_match(istate, ce, &revs->prune_data, NULL)) continue; + if (revs->diffopt.prefix && + strncmp(ce->name, revs->diffopt.prefix, revs->diffopt.prefix_length)) + continue; + if (ce_stage(ce)) { struct combine_diff_path *dpath; struct diff_filepair *pair; diff --git a/diff-merges.c b/diff-merges.c index d897fd8a29..5060ccd890 100644 --- a/diff-merges.c +++ b/diff-merges.c @@ -6,7 +6,7 @@ typedef void (*diff_merges_setup_func_t)(struct rev_info *); static void set_separate(struct rev_info *revs); static diff_merges_setup_func_t set_to_default = set_separate; -static int suppress_parsing; +static int suppress_m_parsing; static void suppress(struct rev_info *revs) { @@ -91,9 +91,9 @@ int diff_merges_config(const char *value) return 0; } -void diff_merges_suppress_options_parsing(void) +void diff_merges_suppress_m_parsing(void) { - suppress_parsing = 1; + suppress_m_parsing = 1; } int diff_merges_parse_opts(struct rev_info *revs, const char **argv) @@ -102,10 +102,7 @@ int diff_merges_parse_opts(struct rev_info *revs, const char **argv) const char *optarg; const char *arg = argv[0]; - if (suppress_parsing) - return 0; - - if (!strcmp(arg, "-m")) { + if (!suppress_m_parsing && !strcmp(arg, "-m")) { set_to_default(revs); } else if (!strcmp(arg, "-c")) { set_combined(revs); @@ -153,9 +150,6 @@ void diff_merges_set_dense_combined_if_unset(struct rev_info *revs) void diff_merges_setup_revs(struct rev_info *revs) { - if (suppress_parsing) - return; - if (revs->combine_merges == 0) revs->dense_combined_merges = 0; if (revs->separate_merges == 0) diff --git a/diff-merges.h b/diff-merges.h index b5d57f6563..19639689bb 100644 --- a/diff-merges.h +++ b/diff-merges.h @@ -11,7 +11,7 @@ struct rev_info; int diff_merges_config(const char *value); -void diff_merges_suppress_options_parsing(void); +void diff_merges_suppress_m_parsing(void); int diff_merges_parse_opts(struct rev_info *revs, const char **argv); @@ -26,6 +26,7 @@ #include "parse-options.h" #include "help.h" #include "promisor-remote.h" +#include "dir.h" #ifdef NO_FAST_WORKING_DIRECTORY #define FAST_WORKING_DIRECTORY 0 @@ -3907,6 +3908,13 @@ static int reuse_worktree_file(struct index_state *istate, if (!want_file && would_convert_to_git(istate, name)) return 0; + /* + * If this path does not match our sparse-checkout definition, + * then the file will not be in the working directory. + */ + if (!path_in_sparse_checkout(name, istate)) + return 0; + len = strlen(name); pos = index_name_pos(istate, name, len); if (pos < 0) diff --git a/diffcore-rename.c b/diffcore-rename.c index c95857b51f..bebd4ed6a4 100644 --- a/diffcore-rename.c +++ b/diffcore-rename.c @@ -317,10 +317,11 @@ static int find_identical_files(struct hashmap *srcs, } static void insert_file_table(struct repository *r, + struct mem_pool *pool, struct hashmap *table, int index, struct diff_filespec *filespec) { - struct file_similarity *entry = xmalloc(sizeof(*entry)); + struct file_similarity *entry = mem_pool_alloc(pool, sizeof(*entry)); entry->index = index; entry->filespec = filespec; @@ -336,7 +337,8 @@ static void insert_file_table(struct repository *r, * and then during the second round we try to match * cache-dirty entries as well. */ -static int find_exact_renames(struct diff_options *options) +static int find_exact_renames(struct diff_options *options, + struct mem_pool *pool) { int i, renames = 0; struct hashmap file_table; @@ -346,7 +348,7 @@ static int find_exact_renames(struct diff_options *options) */ hashmap_init(&file_table, NULL, NULL, rename_src_nr); for (i = rename_src_nr-1; i >= 0; i--) - insert_file_table(options->repo, + insert_file_table(options->repo, pool, &file_table, i, rename_src[i].p->one); @@ -354,8 +356,8 @@ static int find_exact_renames(struct diff_options *options) for (i = 0; i < rename_dst_nr; i++) renames += find_identical_files(&file_table, i, options); - /* Free the hash data structure and entries */ - hashmap_clear_and_free(&file_table, struct file_similarity, entry); + /* Free the hash data structure (entries will be freed with the pool) */ + hashmap_clear(&file_table); return renames; } @@ -1330,7 +1332,47 @@ static void handle_early_known_dir_renames(struct dir_rename_info *info, rename_src_nr = new_num_src; } +static void free_filespec_data(struct diff_filespec *spec) +{ + if (!--spec->count) + diff_free_filespec_data(spec); +} + +static void pool_free_filespec(struct mem_pool *pool, + struct diff_filespec *spec) +{ + if (!pool) { + free_filespec(spec); + return; + } + + /* + * Similar to free_filespec(), but only frees the data. The spec + * itself was allocated in the pool and should not be individually + * freed. + */ + free_filespec_data(spec); +} + +void pool_diff_free_filepair(struct mem_pool *pool, + struct diff_filepair *p) +{ + if (!pool) { + diff_free_filepair(p); + return; + } + + /* + * Similar to diff_free_filepair() but only frees the data from the + * filespecs; not the filespecs or the filepair which were + * allocated from the pool. + */ + free_filespec_data(p->one); + free_filespec_data(p->two); +} + void diffcore_rename_extended(struct diff_options *options, + struct mem_pool *pool, struct strintmap *relevant_sources, struct strintmap *dirs_removed, struct strmap *dir_rename_count, @@ -1345,6 +1387,7 @@ void diffcore_rename_extended(struct diff_options *options, int num_destinations, dst_cnt; int num_sources, want_copies; struct progress *progress = NULL; + struct mem_pool local_pool; struct dir_rename_info info; struct diff_populate_filespec_options dpf_options = { .check_binary = 0, @@ -1413,11 +1456,18 @@ void diffcore_rename_extended(struct diff_options *options, goto cleanup; /* nothing to do */ trace2_region_enter("diff", "exact renames", options->repo); + mem_pool_init(&local_pool, 32*1024); /* * We really want to cull the candidates list early * with cheap tests in order to avoid doing deltas. */ - rename_count = find_exact_renames(options); + rename_count = find_exact_renames(options, &local_pool); + /* + * Discard local_pool immediately instead of at "cleanup:" in order + * to reduce maximum memory usage; inexact rename detection uses up + * a fair amount of memory, and mem_pools can too. + */ + mem_pool_discard(&local_pool, 0); trace2_region_leave("diff", "exact renames", options->repo); /* Did we only want exact renames? */ @@ -1636,7 +1686,7 @@ void diffcore_rename_extended(struct diff_options *options, pair_to_free = p; if (pair_to_free) - diff_free_filepair(pair_to_free); + pool_diff_free_filepair(pool, pair_to_free); } diff_debug_queue("done copying original", &outq); @@ -1646,7 +1696,7 @@ void diffcore_rename_extended(struct diff_options *options, for (i = 0; i < rename_dst_nr; i++) if (rename_dst[i].filespec_to_free) - free_filespec(rename_dst[i].filespec_to_free); + pool_free_filespec(pool, rename_dst[i].filespec_to_free); cleanup_dir_rename_info(&info, dirs_removed, dir_rename_count != NULL); FREE_AND_NULL(rename_dst); @@ -1663,5 +1713,5 @@ void diffcore_rename_extended(struct diff_options *options, void diffcore_rename(struct diff_options *options) { - diffcore_rename_extended(options, NULL, NULL, NULL, NULL); + diffcore_rename_extended(options, NULL, NULL, NULL, NULL, NULL); } diff --git a/diffcore.h b/diffcore.h index 533b30e21e..badc2261c2 100644 --- a/diffcore.h +++ b/diffcore.h @@ -127,6 +127,8 @@ struct diff_filepair { #define DIFF_PAIR_MODE_CHANGED(p) ((p)->one->mode != (p)->two->mode) void diff_free_filepair(struct diff_filepair *); +void pool_diff_free_filepair(struct mem_pool *pool, + struct diff_filepair *p); int diff_unmodified_pair(struct diff_filepair *); @@ -179,6 +181,7 @@ void partial_clear_dir_rename_count(struct strmap *dir_rename_count); void diffcore_break(struct repository *, int); void diffcore_rename(struct diff_options *); void diffcore_rename_extended(struct diff_options *options, + struct mem_pool *pool, struct strintmap *relevant_sources, struct strintmap *dirs_removed, struct strmap *dir_rename_count, @@ -1439,6 +1439,58 @@ done: return result; } +int init_sparse_checkout_patterns(struct index_state *istate) +{ + if (!core_apply_sparse_checkout) + return 1; + if (istate->sparse_checkout_patterns) + return 0; + + CALLOC_ARRAY(istate->sparse_checkout_patterns, 1); + + if (get_sparse_checkout_patterns(istate->sparse_checkout_patterns) < 0) { + FREE_AND_NULL(istate->sparse_checkout_patterns); + return -1; + } + + return 0; +} + +static int path_in_sparse_checkout_1(const char *path, + struct index_state *istate, + int require_cone_mode) +{ + const char *base; + int dtype = DT_REG; + + /* + * We default to accepting a path if there are no patterns or + * they are of the wrong type. + */ + if (init_sparse_checkout_patterns(istate) || + (require_cone_mode && + !istate->sparse_checkout_patterns->use_cone_patterns)) + return 1; + + base = strrchr(path, '/'); + return path_matches_pattern_list(path, strlen(path), base ? base + 1 : path, + &dtype, + istate->sparse_checkout_patterns, + istate) > 0; +} + +int path_in_sparse_checkout(const char *path, + struct index_state *istate) +{ + return path_in_sparse_checkout_1(path, istate, 0); +} + +int path_in_cone_mode_sparse_checkout(const char *path, + struct index_state *istate) +{ + return path_in_sparse_checkout_1(path, istate, 1); +} + static struct path_pattern *last_matching_pattern_from_lists( struct dir_struct *dir, struct index_state *istate, const char *pathname, int pathlen, @@ -2970,6 +3022,120 @@ int is_empty_dir(const char *path) return ret; } +char *git_url_basename(const char *repo, int is_bundle, int is_bare) +{ + const char *end = repo + strlen(repo), *start, *ptr; + size_t len; + char *dir; + + /* + * Skip scheme. + */ + start = strstr(repo, "://"); + if (start == NULL) + start = repo; + else + start += 3; + + /* + * Skip authentication data. The stripping does happen + * greedily, such that we strip up to the last '@' inside + * the host part. + */ + for (ptr = start; ptr < end && !is_dir_sep(*ptr); ptr++) { + if (*ptr == '@') + start = ptr + 1; + } + + /* + * Strip trailing spaces, slashes and /.git + */ + while (start < end && (is_dir_sep(end[-1]) || isspace(end[-1]))) + end--; + if (end - start > 5 && is_dir_sep(end[-5]) && + !strncmp(end - 4, ".git", 4)) { + end -= 5; + while (start < end && is_dir_sep(end[-1])) + end--; + } + + /* + * Strip trailing port number if we've got only a + * hostname (that is, there is no dir separator but a + * colon). This check is required such that we do not + * strip URI's like '/foo/bar:2222.git', which should + * result in a dir '2222' being guessed due to backwards + * compatibility. + */ + if (memchr(start, '/', end - start) == NULL + && memchr(start, ':', end - start) != NULL) { + ptr = end; + while (start < ptr && isdigit(ptr[-1]) && ptr[-1] != ':') + ptr--; + if (start < ptr && ptr[-1] == ':') + end = ptr - 1; + } + + /* + * Find last component. To remain backwards compatible we + * also regard colons as path separators, such that + * cloning a repository 'foo:bar.git' would result in a + * directory 'bar' being guessed. + */ + ptr = end; + while (start < ptr && !is_dir_sep(ptr[-1]) && ptr[-1] != ':') + ptr--; + start = ptr; + + /* + * Strip .{bundle,git}. + */ + len = end - start; + strip_suffix_mem(start, &len, is_bundle ? ".bundle" : ".git"); + + if (!len || (len == 1 && *start == '/')) + die(_("No directory name could be guessed.\n" + "Please specify a directory on the command line")); + + if (is_bare) + dir = xstrfmt("%.*s.git", (int)len, start); + else + dir = xstrndup(start, len); + /* + * Replace sequences of 'control' characters and whitespace + * with one ascii space, remove leading and trailing spaces. + */ + if (*dir) { + char *out = dir; + int prev_space = 1 /* strip leading whitespace */; + for (end = dir; *end; ++end) { + char ch = *end; + if ((unsigned char)ch < '\x20') + ch = '\x20'; + if (isspace(ch)) { + if (prev_space) + continue; + prev_space = 1; + } else + prev_space = 0; + *out++ = ch; + } + *out = '\0'; + if (out > dir && prev_space) + out[-1] = '\0'; + } + return dir; +} + +void strip_dir_trailing_slashes(char *dir) +{ + char *end = dir + strlen(dir); + + while (dir < end - 1 && is_dir_sep(end[-1])) + end--; + *end = '\0'; +} + static int remove_dir_recurse(struct strbuf *path, int flag, int *kept_up) { DIR *dir; @@ -3633,7 +3799,7 @@ static void connect_wt_gitdir_in_nested(const char *sub_worktree, strbuf_reset(&sub_wt); strbuf_reset(&sub_gd); strbuf_addf(&sub_wt, "%s/%s", sub_worktree, sub->path); - strbuf_addf(&sub_gd, "%s/modules/%s", sub_gitdir, sub->name); + submodule_name_to_gitdir(&sub_gd, &subrepo, sub->name); connect_work_tree_and_git_dir(sub_wt.buf, sub_gd.buf, 1); } @@ -394,6 +394,14 @@ enum pattern_match_result path_matches_pattern_list(const char *pathname, const char *basename, int *dtype, struct pattern_list *pl, struct index_state *istate); + +int init_sparse_checkout_patterns(struct index_state *state); + +int path_in_sparse_checkout(const char *path, + struct index_state *istate); +int path_in_cone_mode_sparse_checkout(const char *path, + struct index_state *istate); + struct dir_entry *dir_add_ignored(struct dir_struct *dir, struct index_state *istate, const char *pathname, int len); @@ -453,6 +461,17 @@ static inline int is_dot_or_dotdot(const char *name) int is_empty_dir(const char *dir); +/* + * Retrieve the "humanish" basename of the given Git URL. + * + * For example: + * /path/to/repo.git => "repo" + * host.xz:foo/.git => "foo" + * http://example.com/user/bar.baz => "bar.baz" + */ +char *git_url_basename(const char *repo, int is_bundle, int is_bare); +void strip_dir_trailing_slashes(char *dir); + void setup_standard_excludes(struct dir_struct *dir); char *get_sparse_checkout_filename(void); @@ -58,7 +58,7 @@ static int launch_specified_editor(const char *editor, const char *path, const char *args[] = { editor, NULL, NULL }; struct child_process p = CHILD_PROCESS_INIT; int ret, sig; - int print_waiting_for_editor = advice_waiting_for_editor && isatty(2); + int print_waiting_for_editor = advice_enabled(ADVICE_WAITING_FOR_EDITOR) && isatty(2); if (print_waiting_for_editor) { /* @@ -159,25 +159,25 @@ static int remove_available_paths(struct string_list_item *item, void *cb_data) return !available; } -int finish_delayed_checkout(struct checkout *state, int *nr_checkouts) +int finish_delayed_checkout(struct checkout *state, int *nr_checkouts, + int show_progress) { int errs = 0; - unsigned delayed_object_count; + unsigned processed_paths = 0; off_t filtered_bytes = 0; struct string_list_item *filter, *path; - struct progress *progress; + struct progress *progress = NULL; struct delayed_checkout *dco = state->delayed_checkout; if (!state->delayed_checkout) return errs; dco->state = CE_RETRY; - delayed_object_count = dco->paths.nr; - progress = start_delayed_progress(_("Filtering content"), delayed_object_count); + if (show_progress) + progress = start_delayed_progress(_("Filtering content"), dco->paths.nr); while (dco->filters.nr > 0) { for_each_string_list_item(filter, &dco->filters) { struct string_list available_paths = STRING_LIST_INIT_NODUP; - display_progress(progress, delayed_object_count - dco->paths.nr); if (!async_query_available_blobs(filter->string, &available_paths)) { /* Filter reported an error */ @@ -224,6 +224,7 @@ int finish_delayed_checkout(struct checkout *state, int *nr_checkouts) ce = index_file_exists(state->istate, path->string, strlen(path->string), 0); if (ce) { + display_progress(progress, ++processed_paths); errs |= checkout_entry(ce, state, NULL, nr_checkouts); filtered_bytes += ce->ce_stat_data.sd_size; display_throughput(progress, filtered_bytes); @@ -43,7 +43,8 @@ static inline int checkout_entry(struct cache_entry *ce, } void enable_delayed_checkout(struct checkout *state); -int finish_delayed_checkout(struct checkout *state, int *nr_checkouts); +int finish_delayed_checkout(struct checkout *state, int *nr_checkouts, + int show_progress); /* * Unlink the last component and schedule the leading directories for diff --git a/environment.c b/environment.c index d6b22ede7e..b4ba4fa22d 100644 --- a/environment.c +++ b/environment.c @@ -41,7 +41,6 @@ char *apply_default_ignorewhitespace; const char *git_attributes_file; const char *git_hooks_path; int zlib_compression_level = Z_BEST_SPEED; -int core_compression_level; int pack_compression_level = Z_DEFAULT_COMPRESSION; int fsync_object_files; size_t packed_git_window_size = DEFAULT_PACKED_GIT_WINDOW_SIZE; diff --git a/fetch-pack.c b/fetch-pack.c index b0c7be717c..a9604f35a3 100644 --- a/fetch-pack.c +++ b/fetch-pack.c @@ -119,6 +119,11 @@ static struct commit *deref_without_lazy_fetch(const struct object_id *oid, { enum object_type type; struct object_info info = { .typep = &type }; + struct commit *commit; + + commit = lookup_commit_in_graph(the_repository, oid); + if (commit) + return commit; while (1) { if (oid_object_info_extended(the_repository, oid, &info, @@ -137,8 +142,14 @@ static struct commit *deref_without_lazy_fetch(const struct object_id *oid, break; } } - if (type == OBJ_COMMIT) - return (struct commit *) parse_object(the_repository, oid); + + if (type == OBJ_COMMIT) { + struct commit *commit = lookup_commit(the_repository, oid); + if (!commit || repo_parse_commit(the_repository, commit)) + return NULL; + return commit; + } + return NULL; } @@ -1906,16 +1917,15 @@ static void update_shallow(struct fetch_pack_args *args, oid_array_clear(&ref); } -static int iterate_ref_map(void *cb_data, struct object_id *oid) +static const struct object_id *iterate_ref_map(void *cb_data) { struct ref **rm = cb_data; struct ref *ref = *rm; if (!ref) - return -1; /* end of the list */ + return NULL; *rm = ref->next; - oidcpy(oid, &ref->old_oid); - return 0; + return &ref->old_oid; } struct ref *fetch_pack(struct fetch_pack_args *args, @@ -55,31 +55,7 @@ const char *Q_(const char *msgid, const char *plu, unsigned long n) } /* Mark msgid for translation but do not translate it. */ -#if !USE_PARENS_AROUND_GETTEXT_N #define N_(msgid) msgid -#else -/* - * Strictly speaking, this will lead to invalid C when - * used this way: - * static const char s[] = N_("FOO"); - * which will expand to - * static const char s[] = ("FOO"); - * and in valid C, the initializer on the right hand side must - * be without the parentheses. But many compilers do accept it - * as a language extension and it will allow us to catch mistakes - * like: - * static const char *msgs[] = { - * N_("one") - * N_("two"), - * N_("three"), - * NULL - * }; - * (notice the missing comma on one of the lines) by forcing - * a compilation error, because parenthesised ("one") ("two") - * will not get silently turned into ("onetwo"). - */ -#define N_(msgid) (msgid) -#endif const char *get_preferred_languages(void); int is_utf8_locale(void); diff --git a/git-bisect.sh b/git-bisect.sh index 6a7afaea8d..405cf76f2a 100755 --- a/git-bisect.sh +++ b/git-bisect.sh @@ -34,94 +34,9 @@ Please use "git help bisect" to get the full man page.' OPTIONS_SPEC= . git-sh-setup -_x40='[0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f]' -_x40="$_x40$_x40$_x40$_x40$_x40$_x40$_x40$_x40" TERM_BAD=bad TERM_GOOD=good -bisect_visualize() { - git bisect--helper --bisect-next-check $TERM_GOOD $TERM_BAD fail || exit - - if test $# = 0 - then - if test -n "${DISPLAY+set}${SESSIONNAME+set}${MSYSTEM+set}${SECURITYSESSIONID+set}" && - type gitk >/dev/null 2>&1 - then - set gitk - else - set git log - fi - else - case "$1" in - git*|tig) ;; - -*) set git log "$@" ;; - *) set git "$@" ;; - esac - fi - - eval '"$@"' --bisect -- $(cat "$GIT_DIR/BISECT_NAMES") -} - -bisect_run () { - git bisect--helper --bisect-next-check $TERM_GOOD $TERM_BAD fail || exit - - test -n "$*" || die "$(gettext "bisect run failed: no command provided.")" - - while true - do - command="$@" - eval_gettextln "running \$command" - "$@" - res=$? - - # Check for really bad run error. - if [ $res -lt 0 -o $res -ge 128 ] - then - eval_gettextln "bisect run failed: -exit code \$res from '\$command' is < 0 or >= 128" >&2 - exit $res - fi - - # Find current state depending on run success or failure. - # A special exit code of 125 means cannot test. - if [ $res -eq 125 ] - then - state='skip' - elif [ $res -gt 0 ] - then - state="$TERM_BAD" - else - state="$TERM_GOOD" - fi - - git bisect--helper --bisect-state $state >"$GIT_DIR/BISECT_RUN" - res=$? - - cat "$GIT_DIR/BISECT_RUN" - - if sane_grep "first $TERM_BAD commit could be any of" "$GIT_DIR/BISECT_RUN" \ - >/dev/null - then - gettextln "bisect run cannot continue any more" >&2 - exit $res - fi - - if [ $res -ne 0 ] - then - eval_gettextln "bisect run failed: -'bisect-state \$state' exited with error code \$res" >&2 - exit $res - fi - - if sane_grep "is the first $TERM_BAD commit" "$GIT_DIR/BISECT_RUN" >/dev/null - then - gettextln "bisect run success" - exit 0; - fi - - done -} - get_terms () { if test -s "$GIT_DIR/BISECT_TERMS" then @@ -152,7 +67,7 @@ case "$#" in # Not sure we want "next" at the UI level anymore. git bisect--helper --bisect-next "$@" || exit ;; visualize|view) - bisect_visualize "$@" ;; + git bisect--helper --bisect-visualize "$@" || exit;; reset) git bisect--helper --bisect-reset "$@" ;; replay) @@ -160,7 +75,7 @@ case "$#" in log) git bisect--helper --bisect-log || exit ;; run) - bisect_run "$@" ;; + git bisect--helper --bisect-run "$@" || exit;; terms) git bisect--helper --bisect-terms "$@" || exit;; *) diff --git a/git-compat-util.h b/git-compat-util.h index b46605300a..7c99eef661 100644 --- a/git-compat-util.h +++ b/git-compat-util.h @@ -160,6 +160,9 @@ # endif #define WIN32_LEAN_AND_MEAN /* stops windows.h including winsock.h */ #include <winsock2.h> +#ifndef NO_UNIX_SOCKETS +#include <afunix.h> +#endif #include <windows.h> #define GIT_WINDOWS_NATIVE #endif @@ -1253,10 +1256,6 @@ int warn_on_fopen_errors(const char *path); */ int open_nofollow(const char *path, int flags); -#if !defined(USE_PARENS_AROUND_GETTEXT_N) && defined(__GNUC__) -#define USE_PARENS_AROUND_GETTEXT_N 1 -#endif - #ifndef SHELL_PATH # define SHELL_PATH "/bin/sh" #endif diff --git a/git-curl-compat.h b/git-curl-compat.h new file mode 100644 index 0000000000..a308bdb3b9 --- /dev/null +++ b/git-curl-compat.h @@ -0,0 +1,128 @@ +#ifndef GIT_CURL_COMPAT_H +#define GIT_CURL_COMPAT_H +#include <curl/curl.h> + +/** + * This header centralizes the declaration of our libcurl dependencies + * to make it easy to discover the oldest versions we support, and to + * inform decisions about removing support for older libcurl in the + * future. + * + * The oldest supported version of curl is documented in the "INSTALL" + * document. + * + * The source of truth for what versions have which symbols is + * https://github.com/curl/curl/blob/master/docs/libcurl/symbols-in-versions; + * the release dates are taken from curl.git (at + * https://github.com/curl/curl/). + * + * For each X symbol we need from curl we define our own + * GIT_CURL_HAVE_X. If multiple similar symbols with the same prefix + * were defined in the same version we pick one and check for that name. + * + * We may also define a missing CURL_* symbol to its known value, if + * doing so is sufficient to add support for it to older versions that + * don't have it. + * + * Keep any symbols in date order of when their support was + * introduced, oldest first, in the official version of cURL library. + */ + +/** + * CURL_SOCKOPT_OK was added in 7.21.5, released in April 2011. + */ +#if LIBCURL_VERSION_NUM < 0x071505 +#define CURL_SOCKOPT_OK 0 +#endif + +/** + * CURLOPT_TCP_KEEPALIVE was added in 7.25.0, released in March 2012. + */ +#if LIBCURL_VERSION_NUM >= 0x071900 +#define GITCURL_HAVE_CURLOPT_TCP_KEEPALIVE 1 +#endif + + +/** + * CURLOPT_LOGIN_OPTIONS was added in 7.34.0, released in December + * 2013. + * + * If we start requiring 7.34.0 we might also be able to remove the + * code conditional on USE_CURL_FOR_IMAP_SEND in imap-send.c, see + * 1e16b255b95 (git-imap-send: use libcurl for implementation, + * 2014-11-09) and the check it added for "072200" in the Makefile. + + */ +#if LIBCURL_VERSION_NUM >= 0x072200 +#define GIT_CURL_HAVE_CURLOPT_LOGIN_OPTIONS 1 +#endif + +/** + * CURL_SSLVERSION_TLSv1_[012] was added in 7.34.0, released in + * December 2013. + */ +#if LIBCURL_VERSION_NUM >= 0x072200 +#define GIT_CURL_HAVE_CURL_SSLVERSION_TLSv1_0 +#endif + +/** + * CURLOPT_PINNEDPUBLICKEY was added in 7.39.0, released in November + * 2014. + */ +#if LIBCURL_VERSION_NUM >= 0x072c00 +#define GIT_CURL_HAVE_CURLOPT_PINNEDPUBLICKEY 1 +#endif + +/** + * CURL_HTTP_VERSION_2 was added in 7.43.0, released in June 2015. + * + * The CURL_HTTP_VERSION_2 alias (but not CURL_HTTP_VERSION_2_0) has + * always been a macro, not an enum field (checked on curl version + * 7.78.0) + */ +#if LIBCURL_VERSION_NUM >= 0x072b00 +#define GIT_CURL_HAVE_CURL_HTTP_VERSION_2 1 +#endif + +/** + * CURLSSLOPT_NO_REVOKE was added in 7.44.0, released in August 2015. + * + * The CURLSSLOPT_NO_REVOKE is, has always been a macro, not an enum + * field (checked on curl version 7.78.0) + */ +#if LIBCURL_VERSION_NUM >= 0x072c00 +#define GIT_CURL_HAVE_CURLSSLOPT_NO_REVOKE 1 +#endif + +/** + * CURLOPT_PROXY_CAINFO was added in 7.52.0, released in August 2017. + */ +#if LIBCURL_VERSION_NUM >= 0x073400 +#define GIT_CURL_HAVE_CURLOPT_PROXY_CAINFO 1 +#endif + +/** + * CURLOPT_PROXY_{KEYPASSWD,SSLCERT,SSLKEY} was added in 7.52.0, + * released in August 2017. + */ +#if LIBCURL_VERSION_NUM >= 0x073400 +#define GIT_CURL_HAVE_CURLOPT_PROXY_KEYPASSWD 1 +#endif + +/** + * CURL_SSLVERSION_TLSv1_3 was added in 7.53.0, released in February + * 2017. + */ +#if LIBCURL_VERSION_NUM >= 0x073400 +#define GIT_CURL_HAVE_CURL_SSLVERSION_TLSv1_3 1 +#endif + +/** + * CURLSSLSET_{NO_BACKENDS,OK,TOO_LATE,UNKNOWN_BACKEND} were added in + * 7.56.0, released in September 2017. + */ +#if LIBCURL_VERSION_NUM >= 0x073800 +#define GIT_CURL_HAVE_CURLSSLSET_NO_BACKENDS +#endif + +#endif diff --git a/git-cvsserver.perl b/git-cvsserver.perl index ed035f32c2..64319bed43 100755 --- a/git-cvsserver.perl +++ b/git-cvsserver.perl @@ -222,10 +222,11 @@ if ($state->{method} eq 'pserver') { open my $passwd, "<", $authdb or die $!; while (<$passwd>) { if (m{^\Q$user\E:(.*)}) { - if (crypt($user, descramble($password)) eq $1) { + my $hash = crypt(descramble($password), $1); + if (defined $hash and $hash eq $1) { $auth_ok = 1; } - }; + } } close $passwd; diff --git a/git-send-email.perl b/git-send-email.perl index e65d969d0b..5262d88ee3 100755 --- a/git-send-email.perl +++ b/git-send-email.perl @@ -376,7 +376,7 @@ sub read_config { @$target = @values; } else { - my $v = $known_keys->{$key}->[0]; + my $v = $known_keys->{$key}->[-1]; next unless defined $v; next if $configured->{$setting}++; $$target = $v; @@ -1697,7 +1697,6 @@ EOF $in_reply_to = $initial_in_reply_to; $references = $initial_in_reply_to || ''; -$subject = $initial_subject; $message_num = 0; # Prepares the email, prompts the user, sends it out @@ -1720,6 +1719,7 @@ sub process_file { @xh = (); my $input_format = undef; my @header = (); + $subject = $initial_subject; $message = ""; $message_num++; # First unfold multiline header fields @@ -1926,15 +1926,23 @@ sub process_file { } # set up for the next message - if ($thread && $message_was_sent && - ($chain_reply_to || !defined $in_reply_to || length($in_reply_to) == 0 || - $message_num == 1)) { - $in_reply_to = $message_id; - if (length $references > 0) { - $references .= "\n $message_id"; - } else { - $references = "$message_id"; + if ($thread) { + if ($message_was_sent && + ($chain_reply_to || !defined $in_reply_to || length($in_reply_to) == 0 || + $message_num == 1)) { + $in_reply_to = $message_id; + if (length $references > 0) { + $references .= "\n $message_id"; + } else { + $references = "$message_id"; + } } + } elsif (!defined $initial_in_reply_to) { + # --thread and --in-reply-to manage the "In-Reply-To" header and by + # extension the "References" header. If these commands are not used, reset + # the header values to their defaults. + $in_reply_to = undef; + $references = ''; } $message_id = undef; $num_sent++; diff --git a/git-sh-setup.sh b/git-sh-setup.sh index 10d9764185..cee053cdc3 100644 --- a/git-sh-setup.sh +++ b/git-sh-setup.sh @@ -223,9 +223,6 @@ require_clean_work_tree () { "rewrite branches") gettextln "Cannot rewrite branches: You have unstaged changes." >&2 ;; - "pull with rebase") - gettextln "Cannot pull with rebase: You have unstaged changes." >&2 - ;; *) eval_gettextln "Cannot \$action: You have unstaged changes." >&2 ;; @@ -242,9 +239,6 @@ require_clean_work_tree () { rebase) gettextln "Cannot rebase: Your index contains uncommitted changes." >&2 ;; - "pull with rebase") - gettextln "Cannot pull with rebase: Your index contains uncommitted changes." >&2 - ;; *) eval_gettextln "Cannot \$action: Your index contains uncommitted changes." >&2 ;; diff --git a/git-submodule.sh b/git-submodule.sh index dbd2ec2050..652861aa66 100755 --- a/git-submodule.sh +++ b/git-submodule.sh @@ -63,11 +63,6 @@ isnumber() n=$(($1 + 0)) 2>/dev/null && test "$n" = "$1" } -# Given a full hex object ID, is this the zero OID? -is_zero_oid () { - echo "$1" | sane_egrep '^0+$' >/dev/null 2>&1 -} - # Sanitize the local git environment for use within a submodule. We # can't simply use clear_local_git_env since we want to preserve some # of the settings from GIT_CONFIG_PARAMETERS. @@ -145,130 +140,12 @@ cmd_add() shift done - if ! git submodule--helper config --check-writeable >/dev/null 2>&1 - then - die "fatal: $(eval_gettext "please make sure that the .gitmodules file is in the working tree")" - fi - - if test -n "$reference_path" + if test -z "$1" then - is_absolute_path "$reference_path" || - reference_path="$wt_prefix$reference_path" - - reference="--reference=$reference_path" - fi - - repo=$1 - sm_path=$2 - - if test -z "$sm_path"; then - sm_path=$(printf '%s\n' "$repo" | - sed -e 's|/$||' -e 's|:*/*\.git$||' -e 's|.*[/:]||g') - fi - - if test -z "$repo" || test -z "$sm_path"; then usage fi - is_absolute_path "$sm_path" || sm_path="$wt_prefix$sm_path" - - # assure repo is absolute or relative to parent - case "$repo" in - ./*|../*) - test -z "$wt_prefix" || - die "fatal: $(gettext "Relative path can only be used from the toplevel of the working tree")" - - # dereference source url relative to parent's url - realrepo=$(git submodule--helper resolve-relative-url "$repo") || exit - ;; - *:*|/*) - # absolute url - realrepo=$repo - ;; - *) - die "fatal: $(eval_gettext "repo URL: '\$repo' must be absolute or begin with ./|../")" - ;; - esac - - # normalize path: - # multiple //; leading ./; /./; /../; trailing / - sm_path=$(printf '%s/\n' "$sm_path" | - sed -e ' - s|//*|/|g - s|^\(\./\)*|| - s|/\(\./\)*|/|g - :start - s|\([^/]*\)/\.\./|| - tstart - s|/*$|| - ') - if test -z "$force" - then - git ls-files --error-unmatch "$sm_path" > /dev/null 2>&1 && - die "fatal: $(eval_gettext "'\$sm_path' already exists in the index")" - else - git ls-files -s "$sm_path" | sane_grep -v "^160000" > /dev/null 2>&1 && - die "fatal: $(eval_gettext "'\$sm_path' already exists in the index and is not a submodule")" - fi - - if test -d "$sm_path" && - test -z $(git -C "$sm_path" rev-parse --show-cdup 2>/dev/null) - then - git -C "$sm_path" rev-parse --verify -q HEAD >/dev/null || - die "fatal: $(eval_gettext "'\$sm_path' does not have a commit checked out")" - fi - - if test -z "$force" - then - dryerr=$(git add --dry-run --ignore-missing --no-warn-embedded-repo "$sm_path" 2>&1 >/dev/null) - res=$? - if test $res -ne 0 - then - echo >&2 "$dryerr" - exit $res - fi - fi - - if test -n "$custom_name" - then - sm_name="$custom_name" - else - sm_name="$sm_path" - fi - - if ! git submodule--helper check-name "$sm_name" - then - die "fatal: $(eval_gettext "'$sm_name' is not a valid submodule name")" - fi - - git submodule--helper add-clone ${GIT_QUIET:+--quiet} ${force:+"--force"} ${progress:+"--progress"} ${branch:+--branch "$branch"} --prefix "$wt_prefix" --path "$sm_path" --name "$sm_name" --url "$realrepo" ${reference:+"$reference"} ${dissociate:+"--dissociate"} ${depth:+"$depth"} || exit - git config submodule."$sm_name".url "$realrepo" - - git add --no-warn-embedded-repo $force "$sm_path" || - die "fatal: $(eval_gettext "Failed to add submodule '\$sm_path'")" - - git submodule--helper config submodule."$sm_name".path "$sm_path" && - git submodule--helper config submodule."$sm_name".url "$repo" && - if test -n "$branch" - then - git submodule--helper config submodule."$sm_name".branch "$branch" - fi && - git add --force .gitmodules || - die "fatal: $(eval_gettext "Failed to register submodule '\$sm_path'")" - - # NEEDSWORK: In a multi-working-tree world, this needs to be - # set in the per-worktree config. - if git config --get submodule.active >/dev/null - then - # If the submodule being adding isn't already covered by the - # current configured pathspec, set the submodule's active flag - if ! git submodule--helper is-active "$sm_path" - then - git config submodule."$sm_name".active "true" - fi - else - git config submodule."$sm_name".active "true" - fi + git ${wt_prefix:+-C "$wt_prefix"} ${prefix:+--super-prefix "$prefix"} submodule--helper add ${GIT_QUIET:+--quiet} ${force:+--force} ${progress:+"--progress"} ${branch:+--branch "$branch"} ${reference_path:+--reference "$reference_path"} ${dissociate:+--dissociate} ${custom_name:+--name "$custom_name"} ${depth:+"$depth"} -- "$@" } # @@ -369,13 +246,6 @@ cmd_deinit() git ${wt_prefix:+-C "$wt_prefix"} submodule--helper deinit ${GIT_QUIET:+--quiet} ${force:+--force} ${deinit_all:+--all} -- "$@" } -is_tip_reachable () ( - sanitize_submodule_env && - cd "$1" && - rev=$(git rev-list -n 1 "$2" --not --all 2>/dev/null) && - test -z "$rev" -) - # usage: fetch_in_submodule <module_path> [<depth>] [<sha1>] # Because arguments are positional, use an empty string to omit <depth> # but include <sha1>. @@ -519,14 +389,13 @@ cmd_update() git submodule--helper ensure-core-worktree "$sm_path" || exit 1 - update_module=$(git submodule--helper update-module-mode $just_cloned "$sm_path" $update) - displaypath=$(git submodule--helper relative-path "$prefix$sm_path" "$wt_prefix") if test $just_cloned -eq 1 then subsha1= else + just_cloned= subsha1=$(sanitize_submodule_env; cd "$sm_path" && git rev-parse --verify HEAD) || die "fatal: $(eval_gettext "Unable to find current revision in submodule path '\$displaypath'")" @@ -547,70 +416,38 @@ cmd_update() die "fatal: $(eval_gettext "Unable to find current \${remote_name}/\${branch} revision in submodule path '\$sm_path'")" fi - if test "$subsha1" != "$sha1" || test -n "$force" - then - subforce=$force - # If we don't already have a -f flag and the submodule has never been checked out - if test -z "$subsha1" && test -z "$force" - then - subforce="-f" - fi - - if test -z "$nofetch" - then - # Run fetch only if $sha1 isn't present or it - # is not reachable from a ref. - is_tip_reachable "$sm_path" "$sha1" || - fetch_in_submodule "$sm_path" $depth || - say "$(eval_gettext "Unable to fetch in submodule path '\$displaypath'; trying to directly fetch \$sha1:")" - - # Now we tried the usual fetch, but $sha1 may - # not be reachable from any of the refs - is_tip_reachable "$sm_path" "$sha1" || - fetch_in_submodule "$sm_path" "$depth" "$sha1" || - die "fatal: $(eval_gettext "Fetched in submodule path '\$displaypath', but it did not contain \$sha1. Direct fetching of that commit failed.")" - fi - - must_die_on_failure= - case "$update_module" in - checkout) - command="git checkout $subforce -q" - die_msg="fatal: $(eval_gettext "Unable to checkout '\$sha1' in submodule path '\$displaypath'")" - say_msg="$(eval_gettext "Submodule path '\$displaypath': checked out '\$sha1'")" - ;; - rebase) - command="git rebase ${GIT_QUIET:+--quiet}" - die_msg="fatal: $(eval_gettext "Unable to rebase '\$sha1' in submodule path '\$displaypath'")" - say_msg="$(eval_gettext "Submodule path '\$displaypath': rebased into '\$sha1'")" - must_die_on_failure=yes - ;; - merge) - command="git merge ${GIT_QUIET:+--quiet}" - die_msg="fatal: $(eval_gettext "Unable to merge '\$sha1' in submodule path '\$displaypath'")" - say_msg="$(eval_gettext "Submodule path '\$displaypath': merged in '\$sha1'")" - must_die_on_failure=yes - ;; - !*) - command="${update_module#!}" - die_msg="fatal: $(eval_gettext "Execution of '\$command \$sha1' failed in submodule path '\$displaypath'")" - say_msg="$(eval_gettext "Submodule path '\$displaypath': '\$command \$sha1'")" - must_die_on_failure=yes - ;; - *) - die "fatal: $(eval_gettext "Invalid update mode '$update_module' for submodule path '$path'")" - esac - - if (sanitize_submodule_env; cd "$sm_path" && $command "$sha1") - then - say "$say_msg" - elif test -n "$must_die_on_failure" - then - die_with_status 2 "$die_msg" - else - err="${err};$die_msg" - continue - fi - fi + out=$(git submodule--helper run-update-procedure \ + ${wt_prefix:+--prefix "$wt_prefix"} \ + ${GIT_QUIET:+--quiet} \ + ${force:+--force} \ + ${just_cloned:+--just-cloned} \ + ${nofetch:+--no-fetch} \ + ${depth:+"$depth"} \ + ${update:+--update "$update"} \ + ${prefix:+--recursive-prefix "$prefix"} \ + ${sha1:+--oid "$sha1"} \ + ${subsha1:+--suboid "$subsha1"} \ + "--" \ + "$sm_path") + + # exit codes for run-update-procedure: + # 0: update was successful, say command output + # 1: update procedure failed, but should not die + # 2 or 128: subcommand died during execution + # 3: no update procedure was run + res="$?" + case $res in + 0) + say "$out" + ;; + 1) + err="${err};fatal: $out" + continue + ;; + 2|128) + die_with_status $res "fatal: $out" + ;; + esac if test -n "$recursive" then @@ -561,7 +561,7 @@ static struct cmd_struct commands[] = { { "merge-tree", cmd_merge_tree, RUN_SETUP | NO_PARSEOPT }, { "mktag", cmd_mktag, RUN_SETUP | NO_PARSEOPT }, { "mktree", cmd_mktree, RUN_SETUP }, - { "multi-pack-index", cmd_multi_pack_index, RUN_SETUP_GENTLY }, + { "multi-pack-index", cmd_multi_pack_index, RUN_SETUP }, { "mv", cmd_mv, RUN_SETUP | NEED_WORK_TREE }, { "name-rev", cmd_name_rev, RUN_SETUP }, { "notes", cmd_notes, RUN_SETUP }, diff --git a/gitweb/gitweb.perl b/gitweb/gitweb.perl index e09e024a09..fbd1c20a23 100755 --- a/gitweb/gitweb.perl +++ b/gitweb/gitweb.perl @@ -3796,7 +3796,8 @@ sub git_get_heads_list { my @headslist; open my $fd, '-|', git_cmd(), 'for-each-ref', - ($limit ? '--count='.($limit+1) : ()), '--sort=-committerdate', + ($limit ? '--count='.($limit+1) : ()), + '--sort=-HEAD', '--sort=-committerdate', '--format=%(objectname) %(refname) %(subject)%00%(committer)', @patterns or return; @@ -1825,14 +1825,24 @@ int grep_source(struct grep_opt *opt, struct grep_source *gs) return grep_source_1(opt, gs, 0); } +static void grep_source_init_buf(struct grep_source *gs, char *buf, + unsigned long size) +{ + gs->type = GREP_SOURCE_BUF; + gs->name = NULL; + gs->path = NULL; + gs->buf = buf; + gs->size = size; + gs->driver = NULL; + gs->identifier = NULL; +} + int grep_buffer(struct grep_opt *opt, char *buf, unsigned long size) { struct grep_source gs; int r; - grep_source_init(&gs, GREP_SOURCE_BUF, NULL, NULL, NULL); - gs.buf = buf; - gs.size = size; + grep_source_init_buf(&gs, buf, size); r = grep_source(opt, &gs); @@ -1840,28 +1850,30 @@ int grep_buffer(struct grep_opt *opt, char *buf, unsigned long size) return r; } -void grep_source_init(struct grep_source *gs, enum grep_source_type type, - const char *name, const char *path, - const void *identifier) +void grep_source_init_file(struct grep_source *gs, const char *name, + const char *path) { - gs->type = type; + gs->type = GREP_SOURCE_FILE; gs->name = xstrdup_or_null(name); gs->path = xstrdup_or_null(path); gs->buf = NULL; gs->size = 0; gs->driver = NULL; + gs->identifier = xstrdup(path); +} - switch (type) { - case GREP_SOURCE_FILE: - gs->identifier = xstrdup(identifier); - break; - case GREP_SOURCE_OID: - gs->identifier = oiddup(identifier); - break; - case GREP_SOURCE_BUF: - gs->identifier = NULL; - break; - } +void grep_source_init_oid(struct grep_source *gs, const char *name, + const char *path, const struct object_id *oid, + struct repository *repo) +{ + gs->type = GREP_SOURCE_OID; + gs->name = xstrdup_or_null(name); + gs->path = xstrdup_or_null(path); + gs->buf = NULL; + gs->size = 0; + gs->driver = NULL; + gs->identifier = oiddup(oid); + gs->repo = repo; } void grep_source_clear(struct grep_source *gs) @@ -1890,7 +1902,8 @@ static int grep_source_load_oid(struct grep_source *gs) { enum object_type type; - gs->buf = read_object_file(gs->identifier, &type, &gs->size); + gs->buf = repo_read_object_file(gs->repo, gs->identifier, &type, + &gs->size); if (!gs->buf) return error(_("'%s': unable to read %s"), gs->name, @@ -120,7 +120,20 @@ struct grep_opt { struct grep_pat *header_list; struct grep_pat **header_tail; struct grep_expr *pattern_expression; + + /* + * NEEDSWORK: See if we can remove this field, because the repository + * should probably be per-source. That is, grep.c functions using this + * field should probably start using "repo" in "struct grep_source" + * instead. + * + * This is potentially the cause of at least one bug - "git grep" + * ignoring the textconv attributes from submodules. See [1] for more + * information. + * [1] https://lore.kernel.org/git/CAHd-oW5iEQarYVxEXoTG-ua2zdoybTrSjCBKtO0YT292fm0NQQ@mail.gmail.com/ + */ struct repository *repo; + const char *prefix; int prefix_length; regex_t regexp; @@ -187,6 +200,7 @@ struct grep_source { GREP_SOURCE_BUF, } type; void *identifier; + struct repository *repo; /* if GREP_SOURCE_OID */ char *buf; unsigned long size; @@ -195,9 +209,11 @@ struct grep_source { struct userdiff_driver *driver; }; -void grep_source_init(struct grep_source *gs, enum grep_source_type type, - const char *name, const char *path, - const void *identifier); +void grep_source_init_file(struct grep_source *gs, const char *name, + const char *path); +void grep_source_init_oid(struct grep_source *gs, const char *name, + const char *path, const struct object_id *oid, + struct repository *repo); void grep_source_clear_data(struct grep_source *gs); void grep_source_clear(struct grep_source *gs); void grep_source_load_driver(struct grep_source *gs, @@ -11,6 +11,7 @@ #include "version.h" #include "refs.h" #include "parse-options.h" +#include "prompt.h" struct category_description { uint32_t category; @@ -472,6 +473,7 @@ int is_in_cmdlist(struct cmdnames *c, const char *s) static int autocorrect; static struct cmdnames aliases; +#define AUTOCORRECT_PROMPT (-3) #define AUTOCORRECT_NEVER (-2) #define AUTOCORRECT_IMMEDIATELY (-1) @@ -486,6 +488,8 @@ static int git_unknown_cmd_config(const char *var, const char *value, void *cb) autocorrect = AUTOCORRECT_NEVER; } else if (!strcmp(value, "immediate")) { autocorrect = AUTOCORRECT_IMMEDIATELY; + } else if (!strcmp(value, "prompt")) { + autocorrect = AUTOCORRECT_PROMPT; } else { int v = git_config_int(var, value); autocorrect = (v < 0) @@ -539,6 +543,12 @@ const char *help_unknown_cmd(const char *cmd) read_early_config(git_unknown_cmd_config, NULL); + /* + * Disable autocorrection prompt in a non-interactive session + */ + if ((autocorrect == AUTOCORRECT_PROMPT) && (!isatty(0) || !isatty(2))) + autocorrect = AUTOCORRECT_NEVER; + if (autocorrect == AUTOCORRECT_NEVER) { fprintf_ln(stderr, _("git: '%s' is not a git command. See 'git --help'."), cmd); exit(1); @@ -618,7 +628,16 @@ const char *help_unknown_cmd(const char *cmd) _("Continuing under the assumption that " "you meant '%s'."), assumed); - else { + else if (autocorrect == AUTOCORRECT_PROMPT) { + char *answer; + struct strbuf msg = STRBUF_INIT; + strbuf_addf(&msg, _("Run '%s' instead? (y/N)"), assumed); + answer = git_prompt(msg.buf, PROMPT_ECHO); + strbuf_release(&msg); + if (!(starts_with(answer, "y") || + starts_with(answer, "Y"))) + exit(1); + } else { fprintf_ln(stderr, _("Continuing in %0.1f seconds, " "assuming that you meant '%s'."), diff --git a/http-backend.c b/http-backend.c index b329bf63f0..e7c0eeab23 100644 --- a/http-backend.c +++ b/http-backend.c @@ -534,7 +534,7 @@ static void get_info_refs(struct strbuf *hdr, char *arg) if (service_name) { const char *argv[] = {NULL /* service name */, - "--stateless-rpc", "--advertise-refs", + "--http-backend-info-refs", ".", NULL}; struct rpc_service *svc = select_service(hdr, service_name); @@ -739,6 +739,7 @@ static int bad_request(struct strbuf *hdr, const struct service_cmd *c) int cmd_main(int argc, const char **argv) { char *method = getenv("REQUEST_METHOD"); + const char *proto_header; char *dir; struct service_cmd *cmd = NULL; char *cmd_arg = NULL; @@ -789,6 +790,9 @@ int cmd_main(int argc, const char **argv) http_config(); max_request_buffer = git_env_ulong("GIT_HTTP_MAX_REQUEST_BUFFER", max_request_buffer); + proto_header = getenv("HTTP_GIT_PROTOCOL"); + if (proto_header) + setenv(GIT_PROTOCOL_ENVIRONMENT, proto_header, 0); cmd->imp(&hdr, cmd_arg); return 0; diff --git a/http-push.c b/http-push.c index d7cb1675a2..3309aaf004 100644 --- a/http-push.c +++ b/http-push.c @@ -203,10 +203,8 @@ static void curl_setup_http(CURL *curl, const char *url, curl_easy_setopt(curl, CURLOPT_INFILE, buffer); curl_easy_setopt(curl, CURLOPT_INFILESIZE, buffer->buf.len); curl_easy_setopt(curl, CURLOPT_READFUNCTION, fread_buffer); -#ifndef NO_CURL_IOCTL curl_easy_setopt(curl, CURLOPT_IOCTLFUNCTION, ioctl_buffer); curl_easy_setopt(curl, CURLOPT_IOCTLDATA, buffer); -#endif curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_fn); curl_easy_setopt(curl, CURLOPT_NOBODY, 0); curl_easy_setopt(curl, CURLOPT_CUSTOMREQUEST, custom_req); @@ -249,8 +247,6 @@ static void process_response(void *callback_data) finish_request(request); } -#ifdef USE_CURL_MULTI - static void start_fetch_loose(struct transfer_request *request) { struct active_request_slot *slot; @@ -299,7 +295,6 @@ static void start_mkcol(struct transfer_request *request) FREE_AND_NULL(request->url); } } -#endif static void start_fetch_packed(struct transfer_request *request) { @@ -605,7 +600,6 @@ static void finish_request(struct transfer_request *request) } } -#ifdef USE_CURL_MULTI static int is_running_queue; static int fill_active_slot(void *unused) { @@ -629,7 +623,6 @@ static int fill_active_slot(void *unused) } return 0; } -#endif static void get_remote_object_list(unsigned char parent); @@ -658,10 +651,8 @@ static void add_fetch_request(struct object *obj) request->next = request_queue_head; request_queue_head = request; -#ifdef USE_CURL_MULTI fill_active_slots(); step_active_slots(); -#endif } static int add_send_request(struct object *obj, struct remote_lock *lock) @@ -696,10 +687,8 @@ static int add_send_request(struct object *obj, struct remote_lock *lock) request->next = request_queue_head; request_queue_head = request; -#ifdef USE_CURL_MULTI fill_active_slots(); step_active_slots(); -#endif return 1; } @@ -894,7 +883,7 @@ static struct remote_lock *lock_remote(const char *path, long timeout) slot->results = &results; curl_setup_http(slot->curl, url, DAV_LOCK, &out_buffer, fwrite_buffer); curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, dav_headers); - curl_easy_setopt(slot->curl, CURLOPT_FILE, &in_buffer); + curl_easy_setopt(slot->curl, CURLOPT_WRITEDATA, &in_buffer); CALLOC_ARRAY(lock, 1); lock->timeout = -1; @@ -1153,7 +1142,7 @@ static void remote_ls(const char *path, int flags, curl_setup_http(slot->curl, url, DAV_PROPFIND, &out_buffer, fwrite_buffer); curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, dav_headers); - curl_easy_setopt(slot->curl, CURLOPT_FILE, &in_buffer); + curl_easy_setopt(slot->curl, CURLOPT_WRITEDATA, &in_buffer); if (start_active_slot(slot)) { run_active_slot(slot); @@ -1227,7 +1216,7 @@ static int locking_available(void) curl_setup_http(slot->curl, repo->url, DAV_PROPFIND, &out_buffer, fwrite_buffer); curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, dav_headers); - curl_easy_setopt(slot->curl, CURLOPT_FILE, &in_buffer); + curl_easy_setopt(slot->curl, CURLOPT_WRITEDATA, &in_buffer); if (start_active_slot(slot)) { run_active_slot(slot); @@ -1682,21 +1671,15 @@ static int delete_remote_branch(const char *pattern, int force) static void run_request_queue(void) { -#ifdef USE_CURL_MULTI is_running_queue = 1; fill_active_slots(); add_fill_function(NULL, fill_active_slot); -#endif do { finish_all_active_slots(); -#ifdef USE_CURL_MULTI fill_active_slots(); -#endif } while (request_queue_head && !aborted); -#ifdef USE_CURL_MULTI is_running_queue = 0; -#endif } int cmd_main(int argc, const char **argv) @@ -1770,10 +1753,6 @@ int cmd_main(int argc, const char **argv) break; } -#ifndef USE_CURL_MULTI - die("git-push is not available for http/https repository when not compiled with USE_CURL_MULTI"); -#endif - if (!repo->url) usage(http_push_usage); @@ -1786,9 +1765,7 @@ int cmd_main(int argc, const char **argv) http_init(NULL, repo->url, 1); -#ifdef USE_CURL_MULTI is_running_queue = 0; -#endif /* Verify DAV compliance/lock support */ if (!locking_available()) { diff --git a/http-walker.c b/http-walker.c index 90d8ecb57e..910fae539b 100644 --- a/http-walker.c +++ b/http-walker.c @@ -127,7 +127,6 @@ static void release_object_request(struct object_request *obj_req) free(obj_req); } -#ifdef USE_CURL_MULTI static int fill_active_slot(struct walker *walker) { struct object_request *obj_req; @@ -146,7 +145,6 @@ static int fill_active_slot(struct walker *walker) } return 0; } -#endif static void prefetch(struct walker *walker, unsigned char *sha1) { @@ -163,10 +161,8 @@ static void prefetch(struct walker *walker, unsigned char *sha1) http_is_verbose = walker->get_verbosely; list_add_tail(&newreq->node, &object_queue_head); -#ifdef USE_CURL_MULTI fill_active_slots(); step_active_slots(); -#endif } static int is_alternate_allowed(const char *url) @@ -357,11 +353,9 @@ static void fetch_alternates(struct walker *walker, const char *base) * wait for them to arrive and return to processing this request's * curl message */ -#ifdef USE_CURL_MULTI while (cdata->got_alternates == 0) { step_active_slots(); } -#endif /* Nothing to do if they've already been fetched */ if (cdata->got_alternates == 1) @@ -384,7 +378,7 @@ static void fetch_alternates(struct walker *walker, const char *base) alt_req.walker = walker; slot->callback_data = &alt_req; - curl_easy_setopt(slot->curl, CURLOPT_FILE, &buffer); + curl_easy_setopt(slot->curl, CURLOPT_WRITEDATA, &buffer); curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_buffer); curl_easy_setopt(slot->curl, CURLOPT_URL, url.buf); @@ -505,12 +499,8 @@ static int fetch_object(struct walker *walker, unsigned char *hash) return 0; } -#ifdef USE_CURL_MULTI while (obj_req->state == WAITING) step_active_slots(); -#else - start_object_request(walker, obj_req); -#endif /* * obj_req->req might change when fetching alternates in the callback @@ -623,9 +613,7 @@ struct walker *get_http_walker(const char *url) walker->cleanup = cleanup; walker->data = data; -#ifdef USE_CURL_MULTI add_fill_function(walker, (int (*)(void *)) fill_active_slot); -#endif return walker; } @@ -1,4 +1,5 @@ #include "git-compat-util.h" +#include "git-curl-compat.h" #include "http.h" #include "config.h" #include "pack.h" @@ -19,28 +20,16 @@ static struct trace_key trace_curl = TRACE_KEY_INIT(CURL); static int trace_curl_data = 1; static int trace_curl_redact = 1; -#if LIBCURL_VERSION_NUM >= 0x070a08 long int git_curl_ipresolve = CURL_IPRESOLVE_WHATEVER; -#else -long int git_curl_ipresolve; -#endif int active_requests; int http_is_verbose; ssize_t http_post_buffer = 16 * LARGE_PACKET_MAX; -#if LIBCURL_VERSION_NUM >= 0x070a06 -#define LIBCURL_CAN_HANDLE_AUTH_ANY -#endif - static int min_curl_sessions = 1; static int curl_session_count; -#ifdef USE_CURL_MULTI static int max_requests = -1; static CURLM *curlm; -#endif -#ifndef NO_CURL_EASY_DUPHANDLE static CURL *curl_default; -#endif #define PREV_BUF_SIZE 4096 @@ -59,25 +48,19 @@ static struct { { "sslv2", CURL_SSLVERSION_SSLv2 }, { "sslv3", CURL_SSLVERSION_SSLv3 }, { "tlsv1", CURL_SSLVERSION_TLSv1 }, -#if LIBCURL_VERSION_NUM >= 0x072200 +#ifdef GIT_CURL_HAVE_CURL_SSLVERSION_TLSv1_0 { "tlsv1.0", CURL_SSLVERSION_TLSv1_0 }, { "tlsv1.1", CURL_SSLVERSION_TLSv1_1 }, { "tlsv1.2", CURL_SSLVERSION_TLSv1_2 }, #endif -#if LIBCURL_VERSION_NUM >= 0x073400 +#ifdef GIT_CURL_HAVE_CURL_SSLVERSION_TLSv1_3 { "tlsv1.3", CURL_SSLVERSION_TLSv1_3 }, #endif }; -#if LIBCURL_VERSION_NUM >= 0x070903 static const char *ssl_key; -#endif -#if LIBCURL_VERSION_NUM >= 0x070908 static const char *ssl_capath; -#endif -#if LIBCURL_VERSION_NUM >= 0x071304 static const char *curl_no_proxy; -#endif -#if LIBCURL_VERSION_NUM >= 0x072c00 +#ifdef GIT_CURL_HAVE_CURLOPT_PINNEDPUBLICKEY static const char *ssl_pinnedkey; #endif static const char *ssl_cainfo; @@ -101,9 +84,7 @@ static struct { { "digest", CURLAUTH_DIGEST }, { "negotiate", CURLAUTH_GSSNEGOTIATE }, { "ntlm", CURLAUTH_NTLM }, -#ifdef LIBCURL_CAN_HANDLE_AUTH_ANY { "anyauth", CURLAUTH_ANY }, -#endif /* * CURLAUTH_DIGEST_IE has no corresponding command-line option in * curl(1) and is not included in CURLAUTH_ANY, so we leave it out @@ -133,27 +114,15 @@ static int curl_empty_auth = -1; enum http_follow_config http_follow_config = HTTP_FOLLOW_INITIAL; -#if LIBCURL_VERSION_NUM >= 0x071700 -/* Use CURLOPT_KEYPASSWD as is */ -#elif LIBCURL_VERSION_NUM >= 0x070903 -#define CURLOPT_KEYPASSWD CURLOPT_SSLKEYPASSWD -#else -#define CURLOPT_KEYPASSWD CURLOPT_SSLCERTPASSWD -#endif - static struct credential cert_auth = CREDENTIAL_INIT; static int ssl_cert_password_required; -#ifdef LIBCURL_CAN_HANDLE_AUTH_ANY static unsigned long http_auth_methods = CURLAUTH_ANY; static int http_auth_methods_restricted; /* Modes for which empty_auth cannot actually help us. */ static unsigned long empty_auth_useless = CURLAUTH_BASIC -#ifdef CURLAUTH_DIGEST_IE | CURLAUTH_DIGEST_IE -#endif | CURLAUTH_DIGEST; -#endif static struct curl_slist *pragma_header; static struct curl_slist *no_pragma_header; @@ -186,7 +155,6 @@ size_t fread_buffer(char *ptr, size_t eltsize, size_t nmemb, void *buffer_) return size / eltsize; } -#ifndef NO_CURL_IOCTL curlioerr ioctl_buffer(CURL *handle, int cmd, void *clientp) { struct buffer *buffer = clientp; @@ -203,7 +171,6 @@ curlioerr ioctl_buffer(CURL *handle, int cmd, void *clientp) return CURLIOE_UNKNOWNCMD; } } -#endif size_t fwrite_buffer(char *ptr, size_t eltsize, size_t nmemb, void *buffer_) { @@ -237,12 +204,8 @@ static void finish_active_slot(struct active_request_slot *slot) if (slot->results != NULL) { slot->results->curl_result = slot->curl_result; slot->results->http_code = slot->http_code; -#if LIBCURL_VERSION_NUM >= 0x070a08 curl_easy_getinfo(slot->curl, CURLINFO_HTTPAUTH_AVAIL, &slot->results->auth_avail); -#else - slot->results->auth_avail = 0; -#endif curl_easy_getinfo(slot->curl, CURLINFO_HTTP_CONNECTCODE, &slot->results->http_connectcode); @@ -255,12 +218,9 @@ static void finish_active_slot(struct active_request_slot *slot) static void xmulti_remove_handle(struct active_request_slot *slot) { -#ifdef USE_CURL_MULTI curl_multi_remove_handle(curlm, slot->curl); -#endif } -#ifdef USE_CURL_MULTI static void process_curl_messages(void) { int num_messages; @@ -288,7 +248,6 @@ static void process_curl_messages(void) curl_message = curl_multi_info_read(curlm, &num_messages); } } -#endif static int http_options(const char *var, const char *value, void *cb) { @@ -305,14 +264,10 @@ static int http_options(const char *var, const char *value, void *cb) return git_config_string(&ssl_version, var, value); if (!strcmp("http.sslcert", var)) return git_config_pathname(&ssl_cert, var, value); -#if LIBCURL_VERSION_NUM >= 0x070903 if (!strcmp("http.sslkey", var)) return git_config_pathname(&ssl_key, var, value); -#endif -#if LIBCURL_VERSION_NUM >= 0x070908 if (!strcmp("http.sslcapath", var)) return git_config_pathname(&ssl_capath, var, value); -#endif if (!strcmp("http.sslcainfo", var)) return git_config_pathname(&ssl_cainfo, var, value); if (!strcmp("http.sslcertpasswordprotected", var)) { @@ -341,18 +296,14 @@ static int http_options(const char *var, const char *value, void *cb) if (!strcmp("http.minsessions", var)) { min_curl_sessions = git_config_int(var, value); -#ifndef USE_CURL_MULTI if (min_curl_sessions > 1) min_curl_sessions = 1; -#endif return 0; } -#ifdef USE_CURL_MULTI if (!strcmp("http.maxrequests", var)) { max_requests = git_config_int(var, value); return 0; } -#endif if (!strcmp("http.lowspeedlimit", var)) { curl_low_speed_limit = (long)git_config_int(var, value); return 0; @@ -423,10 +374,10 @@ static int http_options(const char *var, const char *value, void *cb) } if (!strcmp("http.pinnedpubkey", var)) { -#if LIBCURL_VERSION_NUM >= 0x072c00 +#ifdef GIT_CURL_HAVE_CURLOPT_PINNEDPUBLICKEY return git_config_pathname(&ssl_pinnedkey, var, value); #else - warning(_("Public key pinning not supported with cURL < 7.44.0")); + warning(_("Public key pinning not supported with cURL < 7.39.0")); return 0; #endif } @@ -461,12 +412,6 @@ static int curl_empty_auth_enabled(void) if (curl_empty_auth >= 0) return curl_empty_auth; -#ifndef LIBCURL_CAN_HANDLE_AUTH_ANY - /* - * Our libcurl is too old to do AUTH_ANY in the first place; - * just default to turning the feature off. - */ -#else /* * In the automatic case, kick in the empty-auth * hack as long as we would potentially try some @@ -479,7 +424,6 @@ static int curl_empty_auth_enabled(void) if (http_auth_methods_restricted && (http_auth_methods & ~empty_auth_useless)) return 1; -#endif return 0; } @@ -493,24 +437,8 @@ static void init_curl_http_auth(CURL *result) credential_fill(&http_auth); -#if LIBCURL_VERSION_NUM >= 0x071301 curl_easy_setopt(result, CURLOPT_USERNAME, http_auth.username); curl_easy_setopt(result, CURLOPT_PASSWORD, http_auth.password); -#else - { - static struct strbuf up = STRBUF_INIT; - /* - * Note that we assume we only ever have a single set of - * credentials in a given program run, so we do not have - * to worry about updating this buffer, only setting its - * initial value. - */ - if (!up.len) - strbuf_addf(&up, "%s:%s", - http_auth.username, http_auth.password); - curl_easy_setopt(result, CURLOPT_USERPWD, up.buf); - } -#endif } /* *var must be free-able */ @@ -524,22 +452,10 @@ static void var_override(const char **var, char *value) static void set_proxyauth_name_password(CURL *result) { -#if LIBCURL_VERSION_NUM >= 0x071301 curl_easy_setopt(result, CURLOPT_PROXYUSERNAME, proxy_auth.username); curl_easy_setopt(result, CURLOPT_PROXYPASSWORD, proxy_auth.password); -#else - struct strbuf s = STRBUF_INIT; - - strbuf_addstr_urlencode(&s, proxy_auth.username, - is_rfc3986_unreserved); - strbuf_addch(&s, ':'); - strbuf_addstr_urlencode(&s, proxy_auth.password, - is_rfc3986_unreserved); - curl_proxyuserpwd = strbuf_detach(&s, NULL); - curl_easy_setopt(result, CURLOPT_PROXYUSERPWD, curl_proxyuserpwd); -#endif } static void init_curl_proxy_auth(CURL *result) @@ -552,7 +468,6 @@ static void init_curl_proxy_auth(CURL *result) var_override(&http_proxy_authmethod, getenv("GIT_HTTP_PROXY_AUTHMETHOD")); -#if LIBCURL_VERSION_NUM >= 0x070a07 /* CURLOPT_PROXYAUTH and CURLAUTH_ANY */ if (http_proxy_authmethod) { int i; for (i = 0; i < ARRAY_SIZE(proxy_authmethods); i++) { @@ -570,7 +485,6 @@ static void init_curl_proxy_auth(CURL *result) } else curl_easy_setopt(result, CURLOPT_PROXYAUTH, CURLAUTH_ANY); -#endif } static int has_cert_password(void) @@ -587,7 +501,7 @@ static int has_cert_password(void) return 1; } -#if LIBCURL_VERSION_NUM >= 0x073400 +#ifdef GIT_CURL_HAVE_CURLOPT_PROXY_KEYPASSWD static int has_proxy_cert_password(void) { if (http_proxy_ssl_cert == NULL || proxy_ssl_cert_password_required != 1) @@ -603,13 +517,13 @@ static int has_proxy_cert_password(void) } #endif -#if LIBCURL_VERSION_NUM >= 0x071900 +#ifdef GITCURL_HAVE_CURLOPT_TCP_KEEPALIVE static void set_curl_keepalive(CURL *c) { curl_easy_setopt(c, CURLOPT_TCP_KEEPALIVE, 1); } -#elif LIBCURL_VERSION_NUM >= 0x071000 +#else static int sockopt_callback(void *client, curl_socket_t fd, curlsocktype type) { int ka = 1; @@ -623,19 +537,13 @@ static int sockopt_callback(void *client, curl_socket_t fd, curlsocktype type) if (rc < 0) warning_errno("unable to set SO_KEEPALIVE on socket"); - return 0; /* CURL_SOCKOPT_OK only exists since curl 7.21.5 */ + return CURL_SOCKOPT_OK; } static void set_curl_keepalive(CURL *c) { curl_easy_setopt(c, CURLOPT_SOCKOPTFUNCTION, sockopt_callback); } - -#else -static void set_curl_keepalive(CURL *c) -{ - /* not supported on older curl versions */ -} #endif static void redact_sensitive_header(struct strbuf *header) @@ -643,8 +551,8 @@ static void redact_sensitive_header(struct strbuf *header) const char *sensitive_header; if (trace_curl_redact && - (skip_prefix(header->buf, "Authorization:", &sensitive_header) || - skip_prefix(header->buf, "Proxy-Authorization:", &sensitive_header))) { + (skip_iprefix(header->buf, "Authorization:", &sensitive_header) || + skip_iprefix(header->buf, "Proxy-Authorization:", &sensitive_header))) { /* The first token is the type, which is OK to log */ while (isspace(*sensitive_header)) sensitive_header++; @@ -654,7 +562,7 @@ static void redact_sensitive_header(struct strbuf *header) strbuf_setlen(header, sensitive_header - header->buf); strbuf_addstr(header, " <redacted>"); } else if (trace_curl_redact && - skip_prefix(header->buf, "Cookie:", &sensitive_header)) { + skip_iprefix(header->buf, "Cookie:", &sensitive_header)) { struct strbuf redacted_header = STRBUF_INIT; const char *cookie; @@ -809,7 +717,6 @@ void setup_curl_trace(CURL *handle) curl_easy_setopt(handle, CURLOPT_DEBUGDATA, NULL); } -#ifdef CURLPROTO_HTTP static long get_curl_allowed_protocols(int from_user) { long allowed_protocols = 0; @@ -825,9 +732,8 @@ static long get_curl_allowed_protocols(int from_user) return allowed_protocols; } -#endif -#if LIBCURL_VERSION_NUM >=0x072f00 +#ifdef GIT_CURL_HAVE_CURL_HTTP_VERSION_2 static int get_curl_http_version_opt(const char *version_string, long *opt) { int i; @@ -869,7 +775,7 @@ static CURL *get_curl_handle(void) curl_easy_setopt(result, CURLOPT_SSL_VERIFYHOST, 2); } -#if LIBCURL_VERSION_NUM >= 0x072f00 // 7.47.0 +#ifdef GIT_CURL_HAVE_CURL_HTTP_VERSION_2 if (curl_http_version) { long opt; if (!get_curl_http_version_opt(curl_http_version, &opt)) { @@ -879,12 +785,8 @@ static CURL *get_curl_handle(void) } #endif -#if LIBCURL_VERSION_NUM >= 0x070907 curl_easy_setopt(result, CURLOPT_NETRC, CURL_NETRC_OPTIONAL); -#endif -#ifdef LIBCURL_CAN_HANDLE_AUTH_ANY curl_easy_setopt(result, CURLOPT_HTTPAUTH, CURLAUTH_ANY); -#endif #ifdef CURLGSSAPI_DELEGATION_FLAG if (curl_deleg) { @@ -904,7 +806,7 @@ static CURL *get_curl_handle(void) if (http_ssl_backend && !strcmp("schannel", http_ssl_backend) && !http_schannel_check_revoke) { -#if LIBCURL_VERSION_NUM >= 0x072c00 +#ifdef GIT_CURL_HAVE_CURLSSLOPT_NO_REVOKE curl_easy_setopt(result, CURLOPT_SSL_OPTIONS, CURLSSLOPT_NO_REVOKE); #else warning(_("CURLSSLOPT_NO_REVOKE not supported with cURL < 7.44.0")); @@ -940,28 +842,24 @@ static CURL *get_curl_handle(void) curl_easy_setopt(result, CURLOPT_SSLCERT, ssl_cert); if (has_cert_password()) curl_easy_setopt(result, CURLOPT_KEYPASSWD, cert_auth.password); -#if LIBCURL_VERSION_NUM >= 0x070903 if (ssl_key != NULL) curl_easy_setopt(result, CURLOPT_SSLKEY, ssl_key); -#endif -#if LIBCURL_VERSION_NUM >= 0x070908 if (ssl_capath != NULL) curl_easy_setopt(result, CURLOPT_CAPATH, ssl_capath); -#endif -#if LIBCURL_VERSION_NUM >= 0x072c00 +#ifdef GIT_CURL_HAVE_CURLOPT_PINNEDPUBLICKEY if (ssl_pinnedkey != NULL) curl_easy_setopt(result, CURLOPT_PINNEDPUBLICKEY, ssl_pinnedkey); #endif if (http_ssl_backend && !strcmp("schannel", http_ssl_backend) && !http_schannel_use_ssl_cainfo) { curl_easy_setopt(result, CURLOPT_CAINFO, NULL); -#if LIBCURL_VERSION_NUM >= 0x073400 +#ifdef GIT_CURL_HAVE_CURLOPT_PROXY_CAINFO curl_easy_setopt(result, CURLOPT_PROXY_CAINFO, NULL); #endif } else if (ssl_cainfo != NULL || http_proxy_ssl_ca_info != NULL) { if (ssl_cainfo != NULL) curl_easy_setopt(result, CURLOPT_CAINFO, ssl_cainfo); -#if LIBCURL_VERSION_NUM >= 0x073400 +#ifdef GIT_CURL_HAVE_CURLOPT_PROXY_CAINFO if (http_proxy_ssl_ca_info != NULL) curl_easy_setopt(result, CURLOPT_PROXY_CAINFO, http_proxy_ssl_ca_info); #endif @@ -975,19 +873,11 @@ static CURL *get_curl_handle(void) } curl_easy_setopt(result, CURLOPT_MAXREDIRS, 20); -#if LIBCURL_VERSION_NUM >= 0x071301 curl_easy_setopt(result, CURLOPT_POSTREDIR, CURL_REDIR_POST_ALL); -#elif LIBCURL_VERSION_NUM >= 0x071101 - curl_easy_setopt(result, CURLOPT_POST301, 1); -#endif -#ifdef CURLPROTO_HTTP curl_easy_setopt(result, CURLOPT_REDIR_PROTOCOLS, get_curl_allowed_protocols(0)); curl_easy_setopt(result, CURLOPT_PROTOCOLS, get_curl_allowed_protocols(-1)); -#else - warning(_("Protocol restrictions not supported with cURL < 7.19.4")); -#endif if (getenv("GIT_CURL_VERBOSE")) http_trace_curl_no_data(); setup_curl_trace(result); @@ -1002,10 +892,8 @@ static CURL *get_curl_handle(void) if (curl_ftp_no_epsv) curl_easy_setopt(result, CURLOPT_FTP_USE_EPSV, 0); -#ifdef CURLOPT_USE_SSL if (curl_ssl_try) curl_easy_setopt(result, CURLOPT_USE_SSL, CURLUSESSL_TRY); -#endif /* * CURL also examines these variables as a fallback; but we need to query @@ -1040,7 +928,6 @@ static CURL *get_curl_handle(void) */ curl_easy_setopt(result, CURLOPT_PROXY, ""); } else if (curl_http_proxy) { -#if LIBCURL_VERSION_NUM >= 0x071800 if (starts_with(curl_http_proxy, "socks5h")) curl_easy_setopt(result, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS5_HOSTNAME); @@ -1053,8 +940,7 @@ static CURL *get_curl_handle(void) else if (starts_with(curl_http_proxy, "socks")) curl_easy_setopt(result, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS4); -#endif -#if LIBCURL_VERSION_NUM >= 0x073400 +#ifdef GIT_CURL_HAVE_CURLOPT_PROXY_KEYPASSWD else if (starts_with(curl_http_proxy, "https")) { curl_easy_setopt(result, CURLOPT_PROXYTYPE, CURLPROXY_HTTPS); @@ -1081,11 +967,9 @@ static CURL *get_curl_handle(void) die("Invalid proxy URL '%s'", curl_http_proxy); curl_easy_setopt(result, CURLOPT_PROXY, proxy_auth.host); -#if LIBCURL_VERSION_NUM >= 0x071304 var_override(&curl_no_proxy, getenv("NO_PROXY")); var_override(&curl_no_proxy, getenv("no_proxy")); curl_easy_setopt(result, CURLOPT_NOPROXY, curl_no_proxy); -#endif } init_curl_proxy_auth(result); @@ -1121,7 +1005,7 @@ void http_init(struct remote *remote, const char *url, int proactive_auth) free(normalized_url); string_list_clear(&config.vars, 1); -#if LIBCURL_VERSION_NUM >= 0x073800 +#ifdef GIT_CURL_HAVE_CURLSSLSET_NO_BACKENDS if (http_ssl_backend) { const curl_ssl_backend **backends; struct strbuf buf = STRBUF_INIT; @@ -1164,7 +1048,6 @@ void http_init(struct remote *remote, const char *url, int proactive_auth) no_pragma_header = curl_slist_append(http_copy_default_headers(), "Pragma:"); -#ifdef USE_CURL_MULTI { char *http_max_requests = getenv("GIT_HTTP_MAX_REQUESTS"); if (http_max_requests != NULL) @@ -1174,18 +1057,13 @@ void http_init(struct remote *remote, const char *url, int proactive_auth) curlm = curl_multi_init(); if (!curlm) die("curl_multi_init failed"); -#endif if (getenv("GIT_SSL_NO_VERIFY")) curl_ssl_verify = 0; set_from_env(&ssl_cert, "GIT_SSL_CERT"); -#if LIBCURL_VERSION_NUM >= 0x070903 set_from_env(&ssl_key, "GIT_SSL_KEY"); -#endif -#if LIBCURL_VERSION_NUM >= 0x070908 set_from_env(&ssl_capath, "GIT_SSL_CAPATH"); -#endif set_from_env(&ssl_cainfo, "GIT_SSL_CAINFO"); set_from_env(&user_agent, "GIT_HTTP_USER_AGENT"); @@ -1201,10 +1079,8 @@ void http_init(struct remote *remote, const char *url, int proactive_auth) curl_ssl_verify = 1; curl_session_count = 0; -#ifdef USE_CURL_MULTI if (max_requests < 1) max_requests = DEFAULT_MAX_REQUESTS; -#endif set_from_env(&http_proxy_ssl_cert, "GIT_PROXY_SSL_CERT"); set_from_env(&http_proxy_ssl_key, "GIT_PROXY_SSL_KEY"); @@ -1224,9 +1100,7 @@ void http_init(struct remote *remote, const char *url, int proactive_auth) ssl_cert_password_required = 1; } -#ifndef NO_CURL_EASY_DUPHANDLE curl_default = get_curl_handle(); -#endif } void http_cleanup(void) @@ -1244,13 +1118,9 @@ void http_cleanup(void) } active_queue_head = NULL; -#ifndef NO_CURL_EASY_DUPHANDLE curl_easy_cleanup(curl_default); -#endif -#ifdef USE_CURL_MULTI curl_multi_cleanup(curlm); -#endif curl_global_cleanup(); string_list_clear(&extra_http_headers, 0); @@ -1297,7 +1167,6 @@ struct active_request_slot *get_active_slot(void) struct active_request_slot *slot = active_queue_head; struct active_request_slot *newslot; -#ifdef USE_CURL_MULTI int num_transfers; /* Wait for a slot to open up if the queue is full */ @@ -1306,7 +1175,6 @@ struct active_request_slot *get_active_slot(void) if (num_transfers < active_requests) process_curl_messages(); } -#endif while (slot != NULL && slot->in_use) slot = slot->next; @@ -1329,11 +1197,7 @@ struct active_request_slot *get_active_slot(void) } if (slot->curl == NULL) { -#ifdef NO_CURL_EASY_DUPHANDLE - slot->curl = get_curl_handle(); -#else slot->curl = curl_easy_duphandle(curl_default); -#endif curl_session_count++; } @@ -1367,12 +1231,8 @@ struct active_request_slot *get_active_slot(void) else curl_easy_setopt(slot->curl, CURLOPT_FOLLOWLOCATION, 0); -#if LIBCURL_VERSION_NUM >= 0x070a08 curl_easy_setopt(slot->curl, CURLOPT_IPRESOLVE, git_curl_ipresolve); -#endif -#ifdef LIBCURL_CAN_HANDLE_AUTH_ANY curl_easy_setopt(slot->curl, CURLOPT_HTTPAUTH, http_auth_methods); -#endif if (http_auth.password || curl_empty_auth_enabled()) init_curl_http_auth(slot->curl); @@ -1381,7 +1241,6 @@ struct active_request_slot *get_active_slot(void) int start_active_slot(struct active_request_slot *slot) { -#ifdef USE_CURL_MULTI CURLMcode curlm_result = curl_multi_add_handle(curlm, slot->curl); int num_transfers; @@ -1399,11 +1258,9 @@ int start_active_slot(struct active_request_slot *slot) * something. */ curl_multi_perform(curlm, &num_transfers); -#endif return 1; } -#ifdef USE_CURL_MULTI struct fill_chain { void *data; int (*fill)(void *); @@ -1462,11 +1319,9 @@ void step_active_slots(void) fill_active_slots(); } } -#endif void run_active_slot(struct active_request_slot *slot) { -#ifdef USE_CURL_MULTI fd_set readfds; fd_set writefds; fd_set excfds; @@ -1479,7 +1334,6 @@ void run_active_slot(struct active_request_slot *slot) step_active_slots(); if (slot->in_use) { -#if LIBCURL_VERSION_NUM >= 0x070f04 long curl_timeout; curl_multi_timeout(curlm, &curl_timeout); if (curl_timeout == 0) { @@ -1491,10 +1345,6 @@ void run_active_slot(struct active_request_slot *slot) select_timeout.tv_sec = curl_timeout / 1000; select_timeout.tv_usec = (curl_timeout % 1000) * 1000; } -#else - select_timeout.tv_sec = 0; - select_timeout.tv_usec = 50000; -#endif max_fd = -1; FD_ZERO(&readfds); @@ -1517,12 +1367,6 @@ void run_active_slot(struct active_request_slot *slot) select(max_fd+1, &readfds, &writefds, &excfds, &select_timeout); } } -#else - while (slot->in_use) { - slot->curl_result = curl_easy_perform(slot->curl); - finish_active_slot(slot); - } -#endif } static void release_active_slot(struct active_request_slot *slot) @@ -1536,9 +1380,7 @@ static void release_active_slot(struct active_request_slot *slot) curl_session_count--; } } -#ifdef USE_CURL_MULTI fill_active_slots(); -#endif } void finish_all_active_slots(void) @@ -1654,24 +1496,20 @@ static int handle_curl_result(struct slot_results *results) credential_reject(&http_auth); return HTTP_NOAUTH; } else { -#ifdef LIBCURL_CAN_HANDLE_AUTH_ANY http_auth_methods &= ~CURLAUTH_GSSNEGOTIATE; if (results->auth_avail) { http_auth_methods &= results->auth_avail; http_auth_methods_restricted = 1; } -#endif return HTTP_REAUTH; } } else { if (results->http_connectcode == 407) credential_reject(&proxy_auth); -#if LIBCURL_VERSION_NUM >= 0x070c00 if (!curl_errorstr[0]) strlcpy(curl_errorstr, curl_easy_strerror(results->curl_result), sizeof(curl_errorstr)); -#endif return HTTP_ERROR; } } @@ -1930,7 +1768,7 @@ static int http_request(const char *url, curl_easy_setopt(slot->curl, CURLOPT_NOBODY, 1); } else { curl_easy_setopt(slot->curl, CURLOPT_NOBODY, 0); - curl_easy_setopt(slot->curl, CURLOPT_FILE, result); + curl_easy_setopt(slot->curl, CURLOPT_WRITEDATA, result); if (target == HTTP_REQUEST_FILE) { off_t posn = ftello(result); @@ -2347,7 +2185,7 @@ struct http_pack_request *new_direct_http_pack_request( } preq->slot = get_active_slot(); - curl_easy_setopt(preq->slot->curl, CURLOPT_FILE, preq->packfile); + curl_easy_setopt(preq->slot->curl, CURLOPT_WRITEDATA, preq->packfile); curl_easy_setopt(preq->slot->curl, CURLOPT_WRITEFUNCTION, fwrite); curl_easy_setopt(preq->slot->curl, CURLOPT_URL, preq->url); curl_easy_setopt(preq->slot->curl, CURLOPT_HTTPHEADER, @@ -2518,7 +2356,7 @@ struct http_object_request *new_http_object_request(const char *base_url, freq->slot = get_active_slot(); - curl_easy_setopt(freq->slot->curl, CURLOPT_FILE, freq); + curl_easy_setopt(freq->slot->curl, CURLOPT_WRITEDATA, freq); curl_easy_setopt(freq->slot->curl, CURLOPT_FAILONERROR, 0); curl_easy_setopt(freq->slot->curl, CURLOPT_WRITEFUNCTION, fwrite_sha1_file); curl_easy_setopt(freq->slot->curl, CURLOPT_ERRORBUFFER, freq->errorstr); @@ -10,49 +10,7 @@ #include "remote.h" #include "url.h" -/* - * We detect based on the cURL version if multi-transfer is - * usable in this implementation and define this symbol accordingly. - * This shouldn't be set by the Makefile or by the user (e.g. via CFLAGS). - */ -#undef USE_CURL_MULTI - -#if LIBCURL_VERSION_NUM >= 0x071000 -#define USE_CURL_MULTI #define DEFAULT_MAX_REQUESTS 5 -#endif - -#if LIBCURL_VERSION_NUM < 0x070704 -#define curl_global_cleanup() do { /* nothing */ } while (0) -#endif - -#if LIBCURL_VERSION_NUM < 0x070800 -#define curl_global_init(a) do { /* nothing */ } while (0) -#elif LIBCURL_VERSION_NUM >= 0x070c00 -#define curl_global_init(a) curl_global_init_mem(a, xmalloc, free, \ - xrealloc, xstrdup, xcalloc) -#endif - -#if (LIBCURL_VERSION_NUM < 0x070c04) || (LIBCURL_VERSION_NUM == 0x071000) -#define NO_CURL_EASY_DUPHANDLE -#endif - -#if LIBCURL_VERSION_NUM < 0x070a03 -#define CURLE_HTTP_RETURNED_ERROR CURLE_HTTP_NOT_FOUND -#endif - -#if LIBCURL_VERSION_NUM < 0x070c03 -#define NO_CURL_IOCTL -#endif - -/* - * CURLOPT_USE_SSL was known as CURLOPT_FTP_SSL up to 7.16.4, - * and the constants were known as CURLFTPSSL_* -*/ -#if !defined(CURLOPT_USE_SSL) && defined(CURLOPT_FTP_SSL) -#define CURLOPT_USE_SSL CURLOPT_FTP_SSL -#define CURLUSESSL_TRY CURLFTPSSL_TRY -#endif struct slot_results { CURLcode curl_result; @@ -82,9 +40,7 @@ struct buffer { size_t fread_buffer(char *ptr, size_t eltsize, size_t nmemb, void *strbuf); size_t fwrite_buffer(char *ptr, size_t eltsize, size_t nmemb, void *strbuf); size_t fwrite_null(char *ptr, size_t eltsize, size_t nmemb, void *strbuf); -#ifndef NO_CURL_IOCTL curlioerr ioctl_buffer(CURL *handle, int cmd, void *clientp); -#endif /* Slot lifecycle functions */ struct active_request_slot *get_active_slot(void); @@ -101,11 +57,9 @@ void finish_all_active_slots(void); int run_one_slot(struct active_request_slot *slot, struct slot_results *results); -#ifdef USE_CURL_MULTI void fill_active_slots(void); void add_fill_function(void *data, int (*fill)(void *)); void step_active_slots(void); -#endif void http_init(struct remote *remote, const char *url, int proactive_auth); diff --git a/imap-send.c b/imap-send.c index a0540ba5cf..e6090a0346 100644 --- a/imap-send.c +++ b/imap-send.c @@ -1441,7 +1441,7 @@ static CURL *setup_curl(struct imap_server_conf *srvc, struct credential *cred) curl_easy_setopt(curl, CURLOPT_PORT, server.port); if (server.auth_method) { -#if LIBCURL_VERSION_NUM < 0x072200 +#ifndef GIT_CURL_HAVE_CURLOPT_LOGIN_OPTIONS warning("No LOGIN_OPTIONS support in this cURL version"); #else struct strbuf auth = STRBUF_INIT; @@ -1517,11 +1517,7 @@ static int curl_append_msgs_to_imap(struct imap_server_conf *server, if (cred.username) { if (res == CURLE_OK) credential_approve(&cred); -#if LIBCURL_VERSION_NUM >= 0x070d01 else if (res == CURLE_LOGIN_DENIED) -#else - else -#endif credential_reject(&cred); } diff --git a/list-objects.c b/list-objects.c index 473a332416..2f623f8211 100644 --- a/list-objects.c +++ b/list-objects.c @@ -337,8 +337,8 @@ static void add_pending_tree(struct rev_info *revs, struct tree *tree) add_pending_object(revs, &tree->object, ""); } -static void traverse_trees_and_blobs(struct traversal_context *ctx, - struct strbuf *base) +static void traverse_non_commits(struct traversal_context *ctx, + struct strbuf *base) { int i; @@ -410,9 +410,9 @@ static void do_traverse(struct traversal_context *ctx) * needs a reallocation for each commit. Can we pass the * tree directory without allocation churn? */ - traverse_trees_and_blobs(ctx, &csp); + traverse_non_commits(ctx, &csp); } - traverse_trees_and_blobs(ctx, &csp); + traverse_non_commits(ctx, &csp); strbuf_release(&csp); } @@ -41,6 +41,12 @@ static void ensure_config_read(void) } /* + * If we see this many or more "ref-prefix" lines from the client, we consider + * it "too many" and will avoid using the prefix feature entirely. + */ +#define TOO_MANY_PREFIXES 65536 + +/* * Check if one of the prefixes is a prefix of the ref. * If no prefixes were provided, all refs match. */ @@ -65,6 +71,7 @@ struct ls_refs_data { unsigned peel; unsigned symrefs; struct strvec prefixes; + struct strbuf buf; unsigned unborn : 1; }; @@ -73,7 +80,8 @@ static int send_ref(const char *refname, const struct object_id *oid, { struct ls_refs_data *data = cb_data; const char *refname_nons = strip_namespace(refname); - struct strbuf refline = STRBUF_INIT; + + strbuf_reset(&data->buf); if (ref_is_hidden(refname_nons, refname)) return 0; @@ -82,9 +90,9 @@ static int send_ref(const char *refname, const struct object_id *oid, return 0; if (oid) - strbuf_addf(&refline, "%s %s", oid_to_hex(oid), refname_nons); + strbuf_addf(&data->buf, "%s %s", oid_to_hex(oid), refname_nons); else - strbuf_addf(&refline, "unborn %s", refname_nons); + strbuf_addf(&data->buf, "unborn %s", refname_nons); if (data->symrefs && flag & REF_ISSYMREF) { struct object_id unused; const char *symref_target = resolve_ref_unsafe(refname, 0, @@ -94,20 +102,19 @@ static int send_ref(const char *refname, const struct object_id *oid, if (!symref_target) die("'%s' is a symref but it is not?", refname); - strbuf_addf(&refline, " symref-target:%s", + strbuf_addf(&data->buf, " symref-target:%s", strip_namespace(symref_target)); } if (data->peel && oid) { struct object_id peeled; if (!peel_iterated_oid(oid, &peeled)) - strbuf_addf(&refline, " peeled:%s", oid_to_hex(&peeled)); + strbuf_addf(&data->buf, " peeled:%s", oid_to_hex(&peeled)); } - strbuf_addch(&refline, '\n'); - packet_write(1, refline.buf, refline.len); + strbuf_addch(&data->buf, '\n'); + packet_fwrite(stdout, data->buf.buf, data->buf.len); - strbuf_release(&refline); return 0; } @@ -138,13 +145,13 @@ static int ls_refs_config(const char *var, const char *value, void *data) return parse_hide_refs_config(var, value, "uploadpack"); } -int ls_refs(struct repository *r, struct strvec *keys, - struct packet_reader *request) +int ls_refs(struct repository *r, struct packet_reader *request) { struct ls_refs_data data; memset(&data, 0, sizeof(data)); strvec_init(&data.prefixes); + strbuf_init(&data.buf, 0); ensure_config_read(); git_config(ls_refs_config, NULL); @@ -157,22 +164,35 @@ int ls_refs(struct repository *r, struct strvec *keys, data.peel = 1; else if (!strcmp("symrefs", arg)) data.symrefs = 1; - else if (skip_prefix(arg, "ref-prefix ", &out)) - strvec_push(&data.prefixes, out); + else if (skip_prefix(arg, "ref-prefix ", &out)) { + if (data.prefixes.nr < TOO_MANY_PREFIXES) + strvec_push(&data.prefixes, out); + } else if (!strcmp("unborn", arg)) data.unborn = allow_unborn; + else + die(_("unexpected line: '%s'"), arg); } if (request->status != PACKET_READ_FLUSH) die(_("expected flush after ls-refs arguments")); + /* + * If we saw too many prefixes, we must avoid using them at all; as + * soon as we have any prefix, they are meant to form a comprehensive + * list. + */ + if (data.prefixes.nr >= TOO_MANY_PREFIXES) + strvec_clear(&data.prefixes); + send_possibly_unborn_head(&data); if (!data.prefixes.nr) strvec_push(&data.prefixes, ""); for_each_fullref_in_prefixes(get_git_namespace(), data.prefixes.v, send_ref, &data, 0); - packet_flush(1); + packet_fflush(stdout); strvec_clear(&data.prefixes); + strbuf_release(&data.buf); return 0; } @@ -2,10 +2,8 @@ #define LS_REFS_H struct repository; -struct strvec; struct packet_reader; -int ls_refs(struct repository *r, struct strvec *keys, - struct packet_reader *request); +int ls_refs(struct repository *r, struct packet_reader *request); int ls_refs_advertise(struct repository *r, struct strbuf *value); #endif /* LS_REFS_H */ @@ -37,6 +37,7 @@ static void free_mailmap_info(void *p, const char *s) s, debug_str(mi->name), debug_str(mi->email)); free(mi->name); free(mi->email); + free(mi); } static void free_mailmap_entry(void *p, const char *s) @@ -52,6 +53,7 @@ static void free_mailmap_entry(void *p, const char *s) me->namemap.strdup_strings = 1; string_list_clear_func(&me->namemap, free_mailmap_info); + free(me); } /* diff --git a/merge-ort.c b/merge-ort.c index 6eb910d6f0..0a76952e9b 100644 --- a/merge-ort.c +++ b/merge-ort.c @@ -303,8 +303,6 @@ struct merge_options_internal { * * these keys serve to intern all the path strings, which allows * us to do pointer comparison on directory names instead of * strcmp; we just have to be careful to use the interned strings. - * (Technically paths_to_free may track some strings that were - * removed from froms paths.) * * The values of paths: * * either a pointer to a merged_info, or a conflict_info struct @@ -340,14 +338,14 @@ struct merge_options_internal { struct strmap conflicted; /* - * paths_to_free: additional list of strings to free + * pool: memory pool for fast allocation/deallocation * - * If keys are removed from "paths", they are added to paths_to_free - * to ensure they are later freed. We avoid free'ing immediately since - * other places (e.g. conflict_info.pathnames[]) may still be - * referencing these paths. + * We allocate room for lots of filenames and auxiliary data + * structures in merge_options_internal, and it tends to all be + * freed together too. Using a memory pool for these provides a + * nice speedup. */ - struct string_list paths_to_free; + struct mem_pool pool; /* * output: special messages and conflict notices for various paths @@ -519,64 +517,45 @@ static void clear_or_reinit_internal_opts(struct merge_options_internal *opti, { struct rename_info *renames = &opti->renames; int i; - void (*strmap_func)(struct strmap *, int) = + void (*strmap_clear_func)(struct strmap *, int) = reinitialize ? strmap_partial_clear : strmap_clear; - void (*strintmap_func)(struct strintmap *) = + void (*strintmap_clear_func)(struct strintmap *) = reinitialize ? strintmap_partial_clear : strintmap_clear; - void (*strset_func)(struct strset *) = + void (*strset_clear_func)(struct strset *) = reinitialize ? strset_partial_clear : strset_clear; - /* - * We marked opti->paths with strdup_strings = 0, so that we - * wouldn't have to make another copy of the fullpath created by - * make_traverse_path from setup_path_info(). But, now that we've - * used it and have no other references to these strings, it is time - * to deallocate them. - */ - free_strmap_strings(&opti->paths); - strmap_func(&opti->paths, 1); + strmap_clear_func(&opti->paths, 0); /* * All keys and values in opti->conflicted are a subset of those in * opti->paths. We don't want to deallocate anything twice, so we * don't free the keys and we pass 0 for free_values. */ - strmap_func(&opti->conflicted, 0); - - /* - * opti->paths_to_free is similar to opti->paths; we created it with - * strdup_strings = 0 to avoid making _another_ copy of the fullpath - * but now that we've used it and have no other references to these - * strings, it is time to deallocate them. We do so by temporarily - * setting strdup_strings to 1. - */ - opti->paths_to_free.strdup_strings = 1; - string_list_clear(&opti->paths_to_free, 0); - opti->paths_to_free.strdup_strings = 0; + strmap_clear_func(&opti->conflicted, 0); if (opti->attr_index.cache_nr) /* true iff opt->renormalize */ discard_index(&opti->attr_index); /* Free memory used by various renames maps */ for (i = MERGE_SIDE1; i <= MERGE_SIDE2; ++i) { - strintmap_func(&renames->dirs_removed[i]); - strmap_func(&renames->dir_renames[i], 0); - strintmap_func(&renames->relevant_sources[i]); + strintmap_clear_func(&renames->dirs_removed[i]); + strmap_clear_func(&renames->dir_renames[i], 0); + strintmap_clear_func(&renames->relevant_sources[i]); if (!reinitialize) assert(renames->cached_pairs_valid_side == 0); if (i != renames->cached_pairs_valid_side && -1 != renames->cached_pairs_valid_side) { - strset_func(&renames->cached_target_names[i]); - strmap_func(&renames->cached_pairs[i], 1); - strset_func(&renames->cached_irrelevant[i]); + strset_clear_func(&renames->cached_target_names[i]); + strmap_clear_func(&renames->cached_pairs[i], 1); + strset_clear_func(&renames->cached_irrelevant[i]); partial_clear_dir_rename_count(&renames->dir_rename_count[i]); if (!reinitialize) strmap_clear(&renames->dir_rename_count[i], 1); } } for (i = MERGE_SIDE1; i <= MERGE_SIDE2; ++i) { - strintmap_func(&renames->deferred[i].possible_trivial_merges); - strset_func(&renames->deferred[i].target_dirs); + strintmap_clear_func(&renames->deferred[i].possible_trivial_merges); + strset_clear_func(&renames->deferred[i].target_dirs); renames->deferred[i].trivial_merges_okay = 1; /* 1 == maybe */ } renames->cached_pairs_valid_side = 0; @@ -603,6 +582,8 @@ static void clear_or_reinit_internal_opts(struct merge_options_internal *opti, strmap_clear(&opti->output, 0); } + mem_pool_discard(&opti->pool, 0); + /* Clean out callback_data as well. */ FREE_AND_NULL(renames->callback_data); renames->callback_data_nr = renames->callback_data_alloc = 0; @@ -665,6 +646,36 @@ static void path_msg(struct merge_options *opt, strbuf_addch(sb, '\n'); } +static struct diff_filespec *pool_alloc_filespec(struct mem_pool *pool, + const char *path) +{ + /* Similar to alloc_filespec(), but allocate from pool and reuse path */ + struct diff_filespec *spec; + + spec = mem_pool_calloc(pool, 1, sizeof(*spec)); + spec->path = (char*)path; /* spec won't modify it */ + + spec->count = 1; + spec->is_binary = -1; + return spec; +} + +static struct diff_filepair *pool_diff_queue(struct mem_pool *pool, + struct diff_queue_struct *queue, + struct diff_filespec *one, + struct diff_filespec *two) +{ + /* Same code as diff_queue(), except allocate from pool */ + struct diff_filepair *dp; + + dp = mem_pool_calloc(pool, 1, sizeof(*dp)); + dp->one = one; + dp->two = two; + if (queue) + diff_q(queue, dp); + return dp; +} + /* add a string to a strbuf, but converting "/" to "_" */ static void add_flattened_path(struct strbuf *out, const char *s) { @@ -793,8 +804,9 @@ static void setup_path_info(struct merge_options *opt, assert(!df_conflict || !resolved); /* df_conflict implies !resolved */ assert(resolved == (merged_version != NULL)); - mi = xcalloc(1, resolved ? sizeof(struct merged_info) : - sizeof(struct conflict_info)); + mi = mem_pool_calloc(&opt->priv->pool, 1, + resolved ? sizeof(struct merged_info) : + sizeof(struct conflict_info)); mi->directory_name = current_dir_name; mi->basename_offset = current_dir_name_len; mi->clean = !!resolved; @@ -891,11 +903,11 @@ static void add_pair(struct merge_options *opt, return; } - one = alloc_filespec(pathname); - two = alloc_filespec(pathname); + one = pool_alloc_filespec(&opt->priv->pool, pathname); + two = pool_alloc_filespec(&opt->priv->pool, pathname); fill_filespec(is_add ? two : one, &names[names_idx].oid, 1, names[names_idx].mode); - diff_queue(&renames->pairs[side], one, two); + pool_diff_queue(&opt->priv->pool, &renames->pairs[side], one, two); } static void collect_rename_info(struct merge_options *opt, @@ -1086,7 +1098,7 @@ static int collect_merge_info_callback(int n, len = traverse_path_len(info, p->pathlen); /* +1 in both of the following lines to include the NUL byte */ - fullpath = xmalloc(len + 1); + fullpath = mem_pool_alloc(&opt->priv->pool, len + 1); make_traverse_path(fullpath, len + 1, info, p->path, p->pathlen); /* @@ -1341,7 +1353,7 @@ static int handle_deferred_entries(struct merge_options *opt, copy = renames->deferred[side].possible_trivial_merges; strintmap_init_with_options(&renames->deferred[side].possible_trivial_merges, 0, - NULL, + &opt->priv->pool, 0); strintmap_for_each_entry(©, &iter, entry) { const char *path = entry->key; @@ -2293,12 +2305,17 @@ static void apply_directory_rename_modifications(struct merge_options *opt, VERIFY_CI(ci); /* Find parent directories missing from opt->priv->paths */ - cur_path = new_path; + cur_path = mem_pool_strdup(&opt->priv->pool, new_path); + free((char*)new_path); + new_path = (char *)cur_path; + while (1) { /* Find the parent directory of cur_path */ char *last_slash = strrchr(cur_path, '/'); if (last_slash) { - parent_name = xstrndup(cur_path, last_slash - cur_path); + parent_name = mem_pool_strndup(&opt->priv->pool, + cur_path, + last_slash - cur_path); } else { parent_name = opt->priv->toplevel_dir; break; @@ -2307,7 +2324,6 @@ static void apply_directory_rename_modifications(struct merge_options *opt, /* Look it up in opt->priv->paths */ entry = strmap_get_entry(&opt->priv->paths, parent_name); if (entry) { - free((char*)parent_name); parent_name = entry->key; /* reuse known pointer */ break; } @@ -2334,13 +2350,6 @@ static void apply_directory_rename_modifications(struct merge_options *opt, parent_name = cur_dir; } - /* - * We are removing old_path from opt->priv->paths. old_path also will - * eventually need to be freed, but it may still be used by e.g. - * ci->pathnames. So, store it in another string-list for now. - */ - string_list_append(&opt->priv->paths_to_free, old_path); - assert(ci->filemask == 2 || ci->filemask == 4); assert(ci->dirmask == 0); strmap_remove(&opt->priv->paths, old_path, 0); @@ -2374,7 +2383,6 @@ static void apply_directory_rename_modifications(struct merge_options *opt, new_ci->stages[index].mode = ci->stages[index].mode; oidcpy(&new_ci->stages[index].oid, &ci->stages[index].oid); - free(ci); ci = new_ci; } @@ -2802,10 +2810,23 @@ static void use_cached_pairs(struct merge_options *opt, if (!new_name) new_name = old_name; + /* + * cached_pairs has *copies* of old_name and new_name, + * because it has to persist across merges. Since + * pool_alloc_filespec() will just re-use the existing + * filenames, which will also get re-used by + * opt->priv->paths if they become renames, and then + * get freed at the end of the merge, that would leave + * the copy in cached_pairs dangling. Avoid this by + * making a copy here. + */ + old_name = mem_pool_strdup(&opt->priv->pool, old_name); + new_name = mem_pool_strdup(&opt->priv->pool, new_name); + /* We don't care about oid/mode, only filenames and status */ - one = alloc_filespec(old_name); - two = alloc_filespec(new_name); - diff_queue(pairs, one, two); + one = pool_alloc_filespec(&opt->priv->pool, old_name); + two = pool_alloc_filespec(&opt->priv->pool, new_name); + pool_diff_queue(&opt->priv->pool, pairs, one, two); pairs->queue[pairs->nr-1]->status = entry->value ? 'R' : 'D'; } } @@ -2913,6 +2934,7 @@ static int detect_regular_renames(struct merge_options *opt, diff_queued_diff = renames->pairs[side_index]; trace2_region_enter("diff", "diffcore_rename", opt->repo); diffcore_rename_extended(&diff_opts, + &opt->priv->pool, &renames->relevant_sources[side_index], &renames->dirs_removed[side_index], &renames->dir_rename_count[side_index], @@ -2963,7 +2985,7 @@ static int collect_renames(struct merge_options *opt, if (p->status != 'A' && p->status != 'R') { possibly_cache_new_pair(renames, p, side_index, NULL); - diff_free_filepair(p); + pool_diff_free_filepair(&opt->priv->pool, p); continue; } @@ -2976,7 +2998,7 @@ static int collect_renames(struct merge_options *opt, possibly_cache_new_pair(renames, p, side_index, new_path); if (p->status != 'R' && !new_path) { - diff_free_filepair(p); + pool_diff_free_filepair(&opt->priv->pool, p); continue; } @@ -3094,7 +3116,7 @@ cleanup: side_pairs = &renames->pairs[s]; for (i = 0; i < side_pairs->nr; ++i) { struct diff_filepair *p = side_pairs->queue[i]; - diff_free_filepair(p); + pool_diff_free_filepair(&opt->priv->pool, p); } } @@ -3107,7 +3129,8 @@ simple_cleanup: if (combined.nr) { int i; for (i = 0; i < combined.nr; i++) - diff_free_filepair(combined.queue[i]); + pool_diff_free_filepair(&opt->priv->pool, + combined.queue[i]); free(combined.queue); } @@ -3581,7 +3604,8 @@ static void process_entry(struct merge_options *opt, * the directory to remain here, so we need to move this * path to some new location. */ - CALLOC_ARRAY(new_ci, 1); + new_ci = mem_pool_calloc(&opt->priv->pool, 1, sizeof(*new_ci)); + /* We don't really want new_ci->merged.result copied, but it'll * be overwritten below so it doesn't matter. We also don't * want any directory mode/oid values copied, but we'll zero @@ -3673,7 +3697,8 @@ static void process_entry(struct merge_options *opt, const char *a_path = NULL, *b_path = NULL; int rename_a = 0, rename_b = 0; - new_ci = xmalloc(sizeof(*new_ci)); + new_ci = mem_pool_alloc(&opt->priv->pool, + sizeof(*new_ci)); if (S_ISREG(a_mode)) rename_a = 1; @@ -3742,17 +3767,8 @@ static void process_entry(struct merge_options *opt, b_path = path; strmap_put(&opt->priv->paths, b_path, new_ci); - if (rename_a && rename_b) { + if (rename_a && rename_b) strmap_remove(&opt->priv->paths, path, 0); - /* - * We removed path from opt->priv->paths. path - * will also eventually need to be freed, but - * it may still be used by e.g. ci->pathnames. - * So, store it in another string-list for now. - */ - string_list_append(&opt->priv->paths_to_free, - path); - } /* * Do special handling for b_path since process_entry() @@ -4058,6 +4074,21 @@ static int record_conflicted_index_entries(struct merge_options *opt) if (strmap_empty(&opt->priv->conflicted)) return 0; + /* + * We are in a conflicted state. These conflicts might be inside + * sparse-directory entries, so check if any entries are outside + * of the sparse-checkout cone preemptively. + * + * We set original_cache_nr below, but that might change if + * index_name_pos() calls ask for paths within sparse directories. + */ + strmap_for_each_entry(&opt->priv->conflicted, &iter, e) { + if (!path_in_sparse_checkout(e->key, index)) { + ensure_full_index(index); + break; + } + } + /* If any entries have skip_worktree set, we'll have to check 'em out */ state.force = 1; state.quiet = 1; @@ -4065,7 +4096,7 @@ static int record_conflicted_index_entries(struct merge_options *opt) state.istate = index; original_cache_nr = index->cache_nr; - /* Put every entry from paths into plist, then sort */ + /* Append every entry from conflicted into index, then sort */ strmap_for_each_entry(&opt->priv->conflicted, &iter, e) { const char *path = e->key; struct conflict_info *ci = e->value; @@ -4293,6 +4324,7 @@ static void merge_start(struct merge_options *opt, struct merge_result *result) { struct rename_info *renames; int i; + struct mem_pool *pool = NULL; /* Sanity checks on opt */ trace2_region_enter("merge", "sanity checks", opt->repo); @@ -4358,9 +4390,11 @@ static void merge_start(struct merge_options *opt, struct merge_result *result) /* Initialization of various renames fields */ renames = &opt->priv->renames; + mem_pool_init(&opt->priv->pool, 0); + pool = &opt->priv->pool; for (i = MERGE_SIDE1; i <= MERGE_SIDE2; i++) { strintmap_init_with_options(&renames->dirs_removed[i], - NOT_RELEVANT, NULL, 0); + NOT_RELEVANT, pool, 0); strmap_init_with_options(&renames->dir_rename_count[i], NULL, 1); strmap_init_with_options(&renames->dir_renames[i], @@ -4374,7 +4408,7 @@ static void merge_start(struct merge_options *opt, struct merge_result *result) */ strintmap_init_with_options(&renames->relevant_sources[i], -1 /* explicitly invalid */, - NULL, 0); + pool, 0); strmap_init_with_options(&renames->cached_pairs[i], NULL, 1); strset_init_with_options(&renames->cached_irrelevant[i], @@ -4384,9 +4418,9 @@ static void merge_start(struct merge_options *opt, struct merge_result *result) } for (i = MERGE_SIDE1; i <= MERGE_SIDE2; i++) { strintmap_init_with_options(&renames->deferred[i].possible_trivial_merges, - 0, NULL, 0); + 0, pool, 0); strset_init_with_options(&renames->deferred[i].target_dirs, - NULL, 1); + pool, 1); renames->deferred[i].trivial_merges_okay = 1; /* 1 == maybe */ } @@ -4394,14 +4428,13 @@ static void merge_start(struct merge_options *opt, struct merge_result *result) * Although we initialize opt->priv->paths with strdup_strings=0, * that's just to avoid making yet another copy of an allocated * string. Putting the entry into paths means we are taking - * ownership, so we will later free it. paths_to_free is similar. + * ownership, so we will later free it. * * In contrast, conflicted just has a subset of keys from paths, so * we don't want to free those (it'd be a duplicate free). */ - strmap_init_with_options(&opt->priv->paths, NULL, 0); - strmap_init_with_options(&opt->priv->conflicted, NULL, 0); - string_list_init_nodup(&opt->priv->paths_to_free); + strmap_init_with_options(&opt->priv->paths, pool, 0); + strmap_init_with_options(&opt->priv->conflicted, pool, 0); /* * keys & strbufs in output will sometimes need to outlive "paths", diff --git a/merge-recursive.c b/merge-recursive.c index 3355d50e8a..e594d4c3fa 100644 --- a/merge-recursive.c +++ b/merge-recursive.c @@ -55,10 +55,7 @@ static int path_hashmap_cmp(const void *cmp_data, a = container_of(eptr, const struct path_hashmap_entry, e); b = container_of(entry_or_key, const struct path_hashmap_entry, e); - if (ignore_case) - return strcasecmp(a->path, key ? key : b->path); - else - return strcmp(a->path, key ? key : b->path); + return fspathcmp(a->path, key ? key : b->path); } /* @@ -3750,6 +3747,9 @@ int merge_recursive(struct merge_options *opt, assert(opt->ancestor == NULL || !strcmp(opt->ancestor, "constructed merge base")); + prepare_repo_settings(opt->repo); + opt->repo->settings.command_requires_full_index = 1; + if (merge_start(opt, repo_get_commit_tree(opt->repo, h1))) return -1; clean = merge_recursive_internal(opt, h1, h2, merge_bases, result); @@ -13,6 +13,10 @@ #include "repository.h" #include "chunk-format.h" #include "pack.h" +#include "pack-bitmap.h" +#include "refs.h" +#include "revision.h" +#include "list-objects.h" #define MIDX_SIGNATURE 0x4d494458 /* "MIDX" */ #define MIDX_VERSION 1 @@ -48,12 +52,12 @@ static uint8_t oid_version(void) } } -static const unsigned char *get_midx_checksum(struct multi_pack_index *m) +const unsigned char *get_midx_checksum(struct multi_pack_index *m) { return m->data + m->data_len - the_hash_algo->rawsz; } -static char *get_midx_filename(const char *object_dir) +char *get_midx_filename(const char *object_dir) { return xstrfmt("%s/pack/multi-pack-index", object_dir); } @@ -195,6 +199,8 @@ void close_midx(struct multi_pack_index *m) if (!m) return; + close_midx(m->next); + munmap((unsigned char *)m->data, m->data_len); for (i = 0; i < m->num_packs; i++) { @@ -203,6 +209,7 @@ void close_midx(struct multi_pack_index *m) } FREE_AND_NULL(m->packs); FREE_AND_NULL(m->pack_names); + free(m); } int prepare_midx_pack(struct repository *r, struct multi_pack_index *m, uint32_t pack_int_id) @@ -276,14 +283,18 @@ uint32_t nth_midxed_pack_int_id(struct multi_pack_index *m, uint32_t pos) (off_t)pos * MIDX_CHUNK_OFFSET_WIDTH); } -static int nth_midxed_pack_entry(struct repository *r, - struct multi_pack_index *m, - struct pack_entry *e, - uint32_t pos) +int fill_midx_entry(struct repository * r, + const struct object_id *oid, + struct pack_entry *e, + struct multi_pack_index *m) { + uint32_t pos; uint32_t pack_int_id; struct packed_git *p; + if (!bsearch_midx(oid, m, &pos)) + return 0; + if (pos >= m->num_objects) return 0; @@ -303,15 +314,9 @@ static int nth_midxed_pack_entry(struct repository *r, if (!is_pack_valid(p)) return 0; - if (p->num_bad_objects) { - uint32_t i; - struct object_id oid; - nth_midxed_object_oid(&oid, m, pos); - for (i = 0; i < p->num_bad_objects; i++) - if (hasheq(oid.hash, - p->bad_object_sha1 + the_hash_algo->rawsz * i)) - return 0; - } + if (oidset_size(&p->bad_objects) && + oidset_contains(&p->bad_objects, oid)) + return 0; e->offset = nth_midxed_offset(m, pos); e->p = p; @@ -319,19 +324,6 @@ static int nth_midxed_pack_entry(struct repository *r, return 1; } -int fill_midx_entry(struct repository * r, - const struct object_id *oid, - struct pack_entry *e, - struct multi_pack_index *m) -{ - uint32_t pos; - - if (!bsearch_midx(oid, m, &pos)) - return 0; - - return nth_midxed_pack_entry(r, m, e, pos); -} - /* Match "foo.idx" against either "foo.pack" _or_ "foo.idx". */ static int cmp_idx_or_pack_name(const char *idx_or_pack_name, const char *idx_name) @@ -882,7 +874,7 @@ static void write_midx_reverse_index(char *midx_name, unsigned char *midx_hash, strbuf_release(&buf); } -static void clear_midx_files_ext(struct repository *r, const char *ext, +static void clear_midx_files_ext(const char *object_dir, const char *ext, unsigned char *keep_hash); static int midx_checksum_valid(struct multi_pack_index *m) @@ -890,7 +882,167 @@ static int midx_checksum_valid(struct multi_pack_index *m) return hashfile_checksum_valid(m->data, m->data_len); } -static int write_midx_internal(const char *object_dir, struct multi_pack_index *m, +static void prepare_midx_packing_data(struct packing_data *pdata, + struct write_midx_context *ctx) +{ + uint32_t i; + + memset(pdata, 0, sizeof(struct packing_data)); + prepare_packing_data(the_repository, pdata); + + for (i = 0; i < ctx->entries_nr; i++) { + struct pack_midx_entry *from = &ctx->entries[ctx->pack_order[i]]; + struct object_entry *to = packlist_alloc(pdata, &from->oid); + + oe_set_in_pack(pdata, to, + ctx->info[ctx->pack_perm[from->pack_int_id]].p); + } +} + +static int add_ref_to_pending(const char *refname, + const struct object_id *oid, + int flag, void *cb_data) +{ + struct rev_info *revs = (struct rev_info*)cb_data; + struct object *object; + + if ((flag & REF_ISSYMREF) && (flag & REF_ISBROKEN)) { + warning("symbolic ref is dangling: %s", refname); + return 0; + } + + object = parse_object_or_die(oid, refname); + if (object->type != OBJ_COMMIT) + return 0; + + add_pending_object(revs, object, ""); + if (bitmap_is_preferred_refname(revs->repo, refname)) + object->flags |= NEEDS_BITMAP; + return 0; +} + +struct bitmap_commit_cb { + struct commit **commits; + size_t commits_nr, commits_alloc; + + struct write_midx_context *ctx; +}; + +static const struct object_id *bitmap_oid_access(size_t index, + const void *_entries) +{ + const struct pack_midx_entry *entries = _entries; + return &entries[index].oid; +} + +static void bitmap_show_commit(struct commit *commit, void *_data) +{ + struct bitmap_commit_cb *data = _data; + int pos = oid_pos(&commit->object.oid, data->ctx->entries, + data->ctx->entries_nr, + bitmap_oid_access); + if (pos < 0) + return; + + ALLOC_GROW(data->commits, data->commits_nr + 1, data->commits_alloc); + data->commits[data->commits_nr++] = commit; +} + +static struct commit **find_commits_for_midx_bitmap(uint32_t *indexed_commits_nr_p, + struct write_midx_context *ctx) +{ + struct rev_info revs; + struct bitmap_commit_cb cb = {0}; + + cb.ctx = ctx; + + repo_init_revisions(the_repository, &revs, NULL); + setup_revisions(0, NULL, &revs, NULL); + for_each_ref(add_ref_to_pending, &revs); + + /* + * Skipping promisor objects here is intentional, since it only excludes + * them from the list of reachable commits that we want to select from + * when computing the selection of MIDX'd commits to receive bitmaps. + * + * Reachability bitmaps do require that their objects be closed under + * reachability, but fetching any objects missing from promisors at this + * point is too late. But, if one of those objects can be reached from + * an another object that is included in the bitmap, then we will + * complain later that we don't have reachability closure (and fail + * appropriately). + */ + fetch_if_missing = 0; + revs.exclude_promisor_objects = 1; + + if (prepare_revision_walk(&revs)) + die(_("revision walk setup failed")); + + traverse_commit_list(&revs, bitmap_show_commit, NULL, &cb); + if (indexed_commits_nr_p) + *indexed_commits_nr_p = cb.commits_nr; + + return cb.commits; +} + +static int write_midx_bitmap(char *midx_name, unsigned char *midx_hash, + struct write_midx_context *ctx, + unsigned flags) +{ + struct packing_data pdata; + struct pack_idx_entry **index; + struct commit **commits = NULL; + uint32_t i, commits_nr; + char *bitmap_name = xstrfmt("%s-%s.bitmap", midx_name, hash_to_hex(midx_hash)); + int ret; + + prepare_midx_packing_data(&pdata, ctx); + + commits = find_commits_for_midx_bitmap(&commits_nr, ctx); + + /* + * Build the MIDX-order index based on pdata.objects (which is already + * in MIDX order; c.f., 'midx_pack_order_cmp()' for the definition of + * this order). + */ + ALLOC_ARRAY(index, pdata.nr_objects); + for (i = 0; i < pdata.nr_objects; i++) + index[i] = &pdata.objects[i].idx; + + bitmap_writer_show_progress(flags & MIDX_PROGRESS); + bitmap_writer_build_type_index(&pdata, index, pdata.nr_objects); + + /* + * bitmap_writer_finish expects objects in lex order, but pack_order + * gives us exactly that. use it directly instead of re-sorting the + * array. + * + * This changes the order of objects in 'index' between + * bitmap_writer_build_type_index and bitmap_writer_finish. + * + * The same re-ordering takes place in the single-pack bitmap code via + * write_idx_file(), which is called by finish_tmp_packfile(), which + * happens between bitmap_writer_build_type_index() and + * bitmap_writer_finish(). + */ + for (i = 0; i < pdata.nr_objects; i++) + index[ctx->pack_order[i]] = &pdata.objects[i].idx; + + bitmap_writer_select_commits(commits, commits_nr, -1); + ret = bitmap_writer_build(&pdata); + if (ret < 0) + goto cleanup; + + bitmap_writer_set_checksum(midx_hash); + bitmap_writer_finish(index, pdata.nr_objects, bitmap_name, 0); + +cleanup: + free(index); + free(bitmap_name); + return ret; +} + +static int write_midx_internal(const char *object_dir, struct string_list *packs_to_drop, const char *preferred_pack_name, unsigned flags) @@ -901,20 +1053,26 @@ static int write_midx_internal(const char *object_dir, struct multi_pack_index * struct hashfile *f = NULL; struct lock_file lk; struct write_midx_context ctx = { 0 }; + struct multi_pack_index *cur; int pack_name_concat_len = 0; int dropped_packs = 0; int result = 0; struct chunkfile *cf; + /* Ensure the given object_dir is local, or a known alternate. */ + find_odb(the_repository, object_dir); + midx_name = get_midx_filename(object_dir); if (safe_create_leading_directories(midx_name)) die_errno(_("unable to create leading directories of %s"), midx_name); - if (m) - ctx.m = m; - else - ctx.m = load_multi_pack_index(object_dir, 1); + for (cur = get_multi_pack_index(the_repository); cur; cur = cur->next) { + if (!strcmp(object_dir, cur->object_dir)) { + ctx.m = cur; + break; + } + } if (ctx.m && !midx_checksum_valid(ctx.m)) { warning(_("ignoring existing multi-pack-index; checksum mismatch")); @@ -932,8 +1090,27 @@ static int write_midx_internal(const char *object_dir, struct multi_pack_index * ctx.info[ctx.nr].orig_pack_int_id = i; ctx.info[ctx.nr].pack_name = xstrdup(ctx.m->pack_names[i]); - ctx.info[ctx.nr].p = NULL; + ctx.info[ctx.nr].p = ctx.m->packs[i]; ctx.info[ctx.nr].expired = 0; + + if (flags & MIDX_WRITE_REV_INDEX) { + /* + * If generating a reverse index, need to have + * packed_git's loaded to compare their + * mtimes and object count. + */ + if (prepare_midx_pack(the_repository, ctx.m, i)) { + error(_("could not load pack")); + result = 1; + goto cleanup; + } + + if (open_pack_index(ctx.m->packs[i])) + die(_("could not open index for %s"), + ctx.m->packs[i]->pack_name); + ctx.info[ctx.nr].p = ctx.m->packs[i]; + } + ctx.nr++; } } @@ -947,18 +1124,89 @@ static int write_midx_internal(const char *object_dir, struct multi_pack_index * for_each_file_in_pack_dir(object_dir, add_pack_to_midx, &ctx); stop_progress(&ctx.progress); - if (ctx.m && ctx.nr == ctx.m->num_packs && !packs_to_drop) - goto cleanup; + if (ctx.m && ctx.nr == ctx.m->num_packs && !packs_to_drop) { + struct bitmap_index *bitmap_git; + int bitmap_exists; + int want_bitmap = flags & MIDX_WRITE_BITMAP; + + bitmap_git = prepare_midx_bitmap_git(ctx.m); + bitmap_exists = bitmap_git && bitmap_is_midx(bitmap_git); + free_bitmap_index(bitmap_git); + + if (bitmap_exists || !want_bitmap) { + /* + * The correct MIDX already exists, and so does a + * corresponding bitmap (or one wasn't requested). + */ + if (!want_bitmap) + clear_midx_files_ext(object_dir, ".bitmap", + NULL); + goto cleanup; + } + } - ctx.preferred_pack_idx = -1; if (preferred_pack_name) { + int found = 0; for (i = 0; i < ctx.nr; i++) { if (!cmp_idx_or_pack_name(preferred_pack_name, ctx.info[i].pack_name)) { ctx.preferred_pack_idx = i; + found = 1; break; } } + + if (!found) + warning(_("unknown preferred pack: '%s'"), + preferred_pack_name); + } else if (ctx.nr && + (flags & (MIDX_WRITE_REV_INDEX | MIDX_WRITE_BITMAP))) { + struct packed_git *oldest = ctx.info[ctx.preferred_pack_idx].p; + ctx.preferred_pack_idx = 0; + + if (packs_to_drop && packs_to_drop->nr) + BUG("cannot write a MIDX bitmap during expiration"); + + /* + * set a preferred pack when writing a bitmap to ensure that + * the pack from which the first object is selected in pseudo + * pack-order has all of its objects selected from that pack + * (and not another pack containing a duplicate) + */ + for (i = 1; i < ctx.nr; i++) { + struct packed_git *p = ctx.info[i].p; + + if (!oldest->num_objects || p->mtime < oldest->mtime) { + oldest = p; + ctx.preferred_pack_idx = i; + } + } + + if (!oldest->num_objects) { + /* + * If all packs are empty; unset the preferred index. + * This is acceptable since there will be no duplicate + * objects to resolve, so the preferred value doesn't + * matter. + */ + ctx.preferred_pack_idx = -1; + } + } else { + /* + * otherwise don't mark any pack as preferred to avoid + * interfering with expiration logic below + */ + ctx.preferred_pack_idx = -1; + } + + if (ctx.preferred_pack_idx > -1) { + struct packed_git *preferred = ctx.info[ctx.preferred_pack_idx].p; + if (!preferred->num_objects) { + error(_("cannot select preferred pack %s with no objects"), + preferred->pack_name); + result = 1; + goto cleanup; + } } ctx.entries = get_sorted_entries(ctx.m, ctx.info, ctx.nr, &ctx.entries_nr, @@ -1029,11 +1277,7 @@ static int write_midx_internal(const char *object_dir, struct multi_pack_index * ctx.info, ctx.nr, sizeof(*ctx.info), idx_or_pack_name_cmp); - - if (!preferred) - warning(_("unknown preferred pack: '%s'"), - preferred_pack_name); - else { + if (preferred) { uint32_t perm = ctx.pack_perm[preferred->orig_pack_int_id]; if (perm == PACK_EXPIRED) warning(_("preferred pack '%s' is expired"), @@ -1048,9 +1292,6 @@ static int write_midx_internal(const char *object_dir, struct multi_pack_index * hold_lock_file_for_update(&lk, midx_name, LOCK_DIE_ON_ERROR); f = hashfd(get_lock_file_fd(&lk), get_lock_file_path(&lk)); - if (ctx.m) - close_midx(ctx.m); - if (ctx.nr - dropped_packs == 0) { error(_("no pack files to index.")); result = 1; @@ -1081,15 +1322,27 @@ static int write_midx_internal(const char *object_dir, struct multi_pack_index * finalize_hashfile(f, midx_hash, CSUM_FSYNC | CSUM_HASH_IN_STREAM); free_chunkfile(cf); - if (flags & MIDX_WRITE_REV_INDEX) + if (flags & (MIDX_WRITE_REV_INDEX | MIDX_WRITE_BITMAP)) ctx.pack_order = midx_pack_order(&ctx); if (flags & MIDX_WRITE_REV_INDEX) write_midx_reverse_index(midx_name, midx_hash, &ctx); - clear_midx_files_ext(the_repository, ".rev", midx_hash); + if (flags & MIDX_WRITE_BITMAP) { + if (write_midx_bitmap(midx_name, midx_hash, &ctx, flags) < 0) { + error(_("could not write multi-pack bitmap")); + result = 1; + goto cleanup; + } + } + + if (ctx.m) + close_object_store(the_repository->objects); commit_lock_file(&lk); + clear_midx_files_ext(object_dir, ".bitmap", midx_hash); + clear_midx_files_ext(object_dir, ".rev", midx_hash); + cleanup: for (i = 0; i < ctx.nr; i++) { if (ctx.info[i].p) { @@ -1104,6 +1357,7 @@ cleanup: free(ctx.pack_perm); free(ctx.pack_order); free(midx_name); + return result; } @@ -1111,8 +1365,7 @@ int write_midx_file(const char *object_dir, const char *preferred_pack_name, unsigned flags) { - return write_midx_internal(object_dir, NULL, NULL, preferred_pack_name, - flags); + return write_midx_internal(object_dir, NULL, preferred_pack_name, flags); } struct clear_midx_data { @@ -1135,7 +1388,7 @@ static void clear_midx_file_ext(const char *full_path, size_t full_path_len, die_errno(_("failed to remove %s"), full_path); } -static void clear_midx_files_ext(struct repository *r, const char *ext, +static void clear_midx_files_ext(const char *object_dir, const char *ext, unsigned char *keep_hash) { struct clear_midx_data data; @@ -1146,7 +1399,7 @@ static void clear_midx_files_ext(struct repository *r, const char *ext, hash_to_hex(keep_hash), ext); data.ext = ext; - for_each_file_in_pack_dir(r->objects->odb->path, + for_each_file_in_pack_dir(object_dir, clear_midx_file_ext, &data); @@ -1165,7 +1418,8 @@ void clear_midx_file(struct repository *r) if (remove_path(midx)) die(_("failed to clear multi-pack-index at %s"), midx); - clear_midx_files_ext(r, ".rev", NULL); + clear_midx_files_ext(r->objects->odb->path, ".bitmap", NULL); + clear_midx_files_ext(r->objects->odb->path, ".rev", NULL); free(midx); } @@ -1390,8 +1644,10 @@ int expire_midx_packs(struct repository *r, const char *object_dir, unsigned fla free(count); - if (packs_to_drop.nr) - result = write_midx_internal(object_dir, m, &packs_to_drop, NULL, flags); + if (packs_to_drop.nr) { + result = write_midx_internal(object_dir, &packs_to_drop, NULL, flags); + m = NULL; + } string_list_clear(&packs_to_drop, 0); return result; @@ -1580,7 +1836,7 @@ int midx_repack(struct repository *r, const char *object_dir, size_t batch_size, goto cleanup; } - result = write_midx_internal(object_dir, m, NULL, NULL, flags); + result = write_midx_internal(object_dir, NULL, NULL, flags); m = NULL; cleanup: @@ -8,6 +8,8 @@ struct pack_entry; struct repository; #define GIT_TEST_MULTI_PACK_INDEX "GIT_TEST_MULTI_PACK_INDEX" +#define GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP \ + "GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP" struct multi_pack_index { struct multi_pack_index *next; @@ -41,7 +43,10 @@ struct multi_pack_index { #define MIDX_PROGRESS (1 << 0) #define MIDX_WRITE_REV_INDEX (1 << 1) +#define MIDX_WRITE_BITMAP (1 << 2) +const unsigned char *get_midx_checksum(struct multi_pack_index *m); +char *get_midx_filename(const char *object_dir); char *get_midx_rev_filename(struct multi_pack_index *m); struct multi_pack_index *load_multi_pack_index(const char *object_dir, int local); diff --git a/notes-merge.c b/notes-merge.c index 46c1f7c7f1..b4a3a903e8 100644 --- a/notes-merge.c +++ b/notes-merge.c @@ -273,7 +273,7 @@ static void check_notes_merge_worktree(struct notes_merge_options *o) */ if (file_exists(git_path(NOTES_MERGE_WORKTREE)) && !is_empty_dir(git_path(NOTES_MERGE_WORKTREE))) { - if (advice_resolve_conflict) + if (advice_enabled(ADVICE_RESOLVE_CONFLICT)) die(_("You have not concluded your previous " "notes merge (%s exists).\nPlease, use " "'git notes merge --commit' or 'git notes " diff --git a/object-file.c b/object-file.c index a8be899481..112d9b4bad 100644 --- a/object-file.c +++ b/object-file.c @@ -32,6 +32,7 @@ #include "packfile.h" #include "object-store.h" #include "promisor-remote.h" +#include "submodule.h" /* The maximum size for an object header. */ #define MAX_HEADER_LEN 32 @@ -414,74 +415,6 @@ enum scld_error safe_create_leading_directories_const(const char *path) return result; } -int raceproof_create_file(const char *path, create_file_fn fn, void *cb) -{ - /* - * The number of times we will try to remove empty directories - * in the way of path. This is only 1 because if another - * process is racily creating directories that conflict with - * us, we don't want to fight against them. - */ - int remove_directories_remaining = 1; - - /* - * The number of times that we will try to create the - * directories containing path. We are willing to attempt this - * more than once, because another process could be trying to - * clean up empty directories at the same time as we are - * trying to create them. - */ - int create_directories_remaining = 3; - - /* A scratch copy of path, filled lazily if we need it: */ - struct strbuf path_copy = STRBUF_INIT; - - int ret, save_errno; - - /* Sanity check: */ - assert(*path); - -retry_fn: - ret = fn(path, cb); - save_errno = errno; - if (!ret) - goto out; - - if (errno == EISDIR && remove_directories_remaining-- > 0) { - /* - * A directory is in the way. Maybe it is empty; try - * to remove it: - */ - if (!path_copy.len) - strbuf_addstr(&path_copy, path); - - if (!remove_dir_recursively(&path_copy, REMOVE_DIR_EMPTY_ONLY)) - goto retry_fn; - } else if (errno == ENOENT && create_directories_remaining-- > 0) { - /* - * Maybe the containing directory didn't exist, or - * maybe it was just deleted by a process that is - * racing with us to clean up empty directories. Try - * to create it: - */ - enum scld_error scld_result; - - if (!path_copy.len) - strbuf_addstr(&path_copy, path); - - do { - scld_result = safe_create_leading_directories(path_copy.buf); - if (scld_result == SCLD_OK) - goto retry_fn; - } while (scld_result == SCLD_VANISHED && create_directories_remaining-- > 0); - } - -out: - strbuf_release(&path_copy); - errno = save_errno; - return ret; -} - static void fill_loose_path(struct strbuf *buf, const struct object_id *oid) { int i; @@ -820,6 +753,27 @@ out: return ref_git; } +struct object_directory *find_odb(struct repository *r, const char *obj_dir) +{ + struct object_directory *odb; + char *obj_dir_real = real_pathdup(obj_dir, 1); + struct strbuf odb_path_real = STRBUF_INIT; + + prepare_alt_odb(r); + for (odb = r->objects->odb; odb; odb = odb->next) { + strbuf_realpath(&odb_path_real, odb->path, 1); + if (!strcmp(obj_dir_real, odb_path_real.buf)) + break; + } + + free(obj_dir_real); + strbuf_release(&odb_path_real); + + if (!odb) + die(_("could not find object directory matching %s"), obj_dir); + return odb; +} + static void fill_alternate_refs_command(struct child_process *cmd, const char *repo_path) { @@ -1592,6 +1546,10 @@ static int do_oid_object_info_extended(struct repository *r, break; } + if (register_all_submodule_odb_as_alternates()) + /* We added some alternates; retry */ + continue; + /* Check if it is a missing object */ if (fetch_if_missing && repo_has_promisor_remote(r) && !already_retried && @@ -1616,7 +1574,7 @@ static int do_oid_object_info_extended(struct repository *r, return 0; rtype = packed_object_info(r, e.p, e.offset, oi); if (rtype < 0) { - mark_bad_packed_object(e.p, real->hash); + mark_bad_packed_object(e.p, real); return do_oid_object_info_extended(r, real, oi, 0); } else if (oi->whence == OI_PACKED) { oi->u.packed.offset = e.offset; @@ -1725,7 +1683,7 @@ void *read_object_file_extended(struct repository *r, die(_("loose object %s (stored in %s) is corrupt"), oid_to_hex(repl), path); - if ((p = has_packed_and_bad(r, repl->hash)) != NULL) + if ((p = has_packed_and_bad(r, repl)) != NULL) die(_("packed object %s (stored in %s) is corrupt"), oid_to_hex(repl), p->pack_name); obj_read_unlock(); diff --git a/object-name.c b/object-name.c index 3263c19457..fdff4601b2 100644 --- a/object-name.c +++ b/object-name.c @@ -806,7 +806,7 @@ static int get_oid_basic(struct repository *r, const char *str, int len, refs_found = repo_dwim_ref(r, str, len, &tmp_oid, &real_ref, 0); if (refs_found > 0) { warning(warn_msg, len, str); - if (advice_object_name_warning) + if (advice_enabled(ADVICE_OBJECT_NAME_WARNING)) fprintf(stderr, "%s\n", _(object_name_msg)); } free(real_ref); diff --git a/object-store.h b/object-store.h index d24915ced1..c5130d8bae 100644 --- a/object-store.h +++ b/object-store.h @@ -10,6 +10,7 @@ #include "khash.h" #include "dir.h" #include "oidtree.h" +#include "oidset.h" struct object_directory { struct object_directory *next; @@ -38,6 +39,7 @@ KHASH_INIT(odb_path_map, const char * /* key: odb_path */, void prepare_alt_odb(struct repository *r); char *compute_alternate_path(const char *path, struct strbuf *err); +struct object_directory *find_odb(struct repository *r, const char *obj_dir); typedef int alt_odb_fn(struct object_directory *, void *); int foreach_alt_odb(alt_odb_fn, void*); typedef void alternate_ref_fn(const struct object_id *oid, void *); @@ -75,9 +77,8 @@ struct packed_git { const void *index_data; size_t index_size; uint32_t num_objects; - uint32_t num_bad_objects; uint32_t crc_offset; - unsigned char *bad_object_sha1; + struct oidset bad_objects; int index_version; time_t mtime; int pack_fd; @@ -455,6 +456,12 @@ enum for_each_object_flags { * Visit objects within a pack in packfile order rather than .idx order */ FOR_EACH_OBJECT_PACK_ORDER = (1<<2), + + /* Only iterate over packs that are not marked as kept in-core. */ + FOR_EACH_OBJECT_SKIP_IN_CORE_KEPT_PACKS = (1<<3), + + /* Only iterate over packs that do not have .keep files. */ + FOR_EACH_OBJECT_SKIP_ON_DISK_KEPT_PACKS = (1<<4), }; /* @@ -75,7 +75,6 @@ struct object_array { * builtin/fsck.c: 0--3 * builtin/gc.c: 0 * builtin/index-pack.c: 2021 - * builtin/pack-objects.c: 20 * builtin/reflog.c: 10--12 * builtin/show-branch.c: 0-------------------------------------------26 * builtin/unpack-objects.c: 2021 @@ -36,11 +36,6 @@ void oidset_clear(struct oidset *set) oidset_init(set, 0); } -int oidset_size(struct oidset *set) -{ - return kh_size(&set->set); -} - void oidset_parse_file(struct oidset *set, const char *path) { oidset_parse_file_carefully(set, path, NULL, NULL); @@ -57,7 +57,10 @@ int oidset_remove(struct oidset *set, const struct object_id *oid); /** * Returns the number of oids in the set. */ -int oidset_size(struct oidset *set); +static inline int oidset_size(const struct oidset *set) +{ + return kh_size(&set->set); +} /** * Remove all entries from the oidset, freeing any resources associated with diff --git a/pack-bitmap-write.c b/pack-bitmap-write.c index 88d9e696a5..9c55c1531e 100644 --- a/pack-bitmap-write.c +++ b/pack-bitmap-write.c @@ -48,7 +48,7 @@ void bitmap_writer_show_progress(int show) } /** - * Build the initial type index for the packfile + * Build the initial type index for the packfile or multi-pack-index */ void bitmap_writer_build_type_index(struct packing_data *to_pack, struct pack_idx_entry **index, @@ -125,15 +125,20 @@ static inline void push_bitmapped_commit(struct commit *commit) writer.selected_nr++; } -static uint32_t find_object_pos(const struct object_id *oid) +static uint32_t find_object_pos(const struct object_id *oid, int *found) { struct object_entry *entry = packlist_find(writer.to_pack, oid); if (!entry) { - die("Failed to write bitmap index. Packfile doesn't have full closure " + if (found) + *found = 0; + warning("Failed to write bitmap index. Packfile doesn't have full closure " "(object %s is missing)", oid_to_hex(oid)); + return 0; } + if (found) + *found = 1; return oe_in_pack_pos(writer.to_pack, entry); } @@ -331,9 +336,10 @@ static void bitmap_builder_clear(struct bitmap_builder *bb) bb->commits_nr = bb->commits_alloc = 0; } -static void fill_bitmap_tree(struct bitmap *bitmap, - struct tree *tree) +static int fill_bitmap_tree(struct bitmap *bitmap, + struct tree *tree) { + int found; uint32_t pos; struct tree_desc desc; struct name_entry entry; @@ -342,9 +348,11 @@ static void fill_bitmap_tree(struct bitmap *bitmap, * If our bit is already set, then there is nothing to do. Both this * tree and all of its children will be set. */ - pos = find_object_pos(&tree->object.oid); + pos = find_object_pos(&tree->object.oid, &found); + if (!found) + return -1; if (bitmap_get(bitmap, pos)) - return; + return 0; bitmap_set(bitmap, pos); if (parse_tree(tree) < 0) @@ -355,11 +363,15 @@ static void fill_bitmap_tree(struct bitmap *bitmap, while (tree_entry(&desc, &entry)) { switch (object_type(entry.mode)) { case OBJ_TREE: - fill_bitmap_tree(bitmap, - lookup_tree(the_repository, &entry.oid)); + if (fill_bitmap_tree(bitmap, + lookup_tree(the_repository, &entry.oid)) < 0) + return -1; break; case OBJ_BLOB: - bitmap_set(bitmap, find_object_pos(&entry.oid)); + pos = find_object_pos(&entry.oid, &found); + if (!found) + return -1; + bitmap_set(bitmap, pos); break; default: /* Gitlink, etc; not reachable */ @@ -368,15 +380,18 @@ static void fill_bitmap_tree(struct bitmap *bitmap, } free_tree_buffer(tree); + return 0; } -static void fill_bitmap_commit(struct bb_commit *ent, - struct commit *commit, - struct prio_queue *queue, - struct prio_queue *tree_queue, - struct bitmap_index *old_bitmap, - const uint32_t *mapping) +static int fill_bitmap_commit(struct bb_commit *ent, + struct commit *commit, + struct prio_queue *queue, + struct prio_queue *tree_queue, + struct bitmap_index *old_bitmap, + const uint32_t *mapping) { + int found; + uint32_t pos; if (!ent->bitmap) ent->bitmap = bitmap_new(); @@ -401,11 +416,16 @@ static void fill_bitmap_commit(struct bb_commit *ent, * Mark ourselves and queue our tree. The commit * walk ensures we cover all parents. */ - bitmap_set(ent->bitmap, find_object_pos(&c->object.oid)); + pos = find_object_pos(&c->object.oid, &found); + if (!found) + return -1; + bitmap_set(ent->bitmap, pos); prio_queue_put(tree_queue, get_commit_tree(c)); for (p = c->parents; p; p = p->next) { - int pos = find_object_pos(&p->item->object.oid); + pos = find_object_pos(&p->item->object.oid, &found); + if (!found) + return -1; if (!bitmap_get(ent->bitmap, pos)) { bitmap_set(ent->bitmap, pos); prio_queue_put(queue, p->item); @@ -413,8 +433,12 @@ static void fill_bitmap_commit(struct bb_commit *ent, } } - while (tree_queue->nr) - fill_bitmap_tree(ent->bitmap, prio_queue_get(tree_queue)); + while (tree_queue->nr) { + if (fill_bitmap_tree(ent->bitmap, + prio_queue_get(tree_queue)) < 0) + return -1; + } + return 0; } static void store_selected(struct bb_commit *ent, struct commit *commit) @@ -432,7 +456,7 @@ static void store_selected(struct bb_commit *ent, struct commit *commit) kh_value(writer.bitmaps, hash_pos) = stored; } -void bitmap_writer_build(struct packing_data *to_pack) +int bitmap_writer_build(struct packing_data *to_pack) { struct bitmap_builder bb; size_t i; @@ -441,6 +465,7 @@ void bitmap_writer_build(struct packing_data *to_pack) struct prio_queue tree_queue = { NULL }; struct bitmap_index *old_bitmap; uint32_t *mapping; + int closed = 1; /* until proven otherwise */ writer.bitmaps = kh_init_oid_map(); writer.to_pack = to_pack; @@ -463,8 +488,11 @@ void bitmap_writer_build(struct packing_data *to_pack) struct commit *child; int reused = 0; - fill_bitmap_commit(ent, commit, &queue, &tree_queue, - old_bitmap, mapping); + if (fill_bitmap_commit(ent, commit, &queue, &tree_queue, + old_bitmap, mapping) < 0) { + closed = 0; + break; + } if (ent->selected) { store_selected(ent, commit); @@ -492,6 +520,7 @@ void bitmap_writer_build(struct packing_data *to_pack) clear_prio_queue(&queue); clear_prio_queue(&tree_queue); bitmap_builder_clear(&bb); + free_bitmap_index(old_bitmap); free(mapping); trace2_region_leave("pack-bitmap-write", "building_bitmaps_total", @@ -499,7 +528,9 @@ void bitmap_writer_build(struct packing_data *to_pack) stop_progress(&writer.progress); - compute_xor_offsets(); + if (closed) + compute_xor_offsets(); + return closed ? 0 : -1; } /** diff --git a/pack-bitmap.c b/pack-bitmap.c index d999616c9e..8504110a4d 100644 --- a/pack-bitmap.c +++ b/pack-bitmap.c @@ -13,6 +13,7 @@ #include "repository.h" #include "object-store.h" #include "list-objects-filter-options.h" +#include "midx.h" #include "config.h" /* @@ -35,8 +36,15 @@ struct stored_bitmap { * the active bitmap index is the largest one. */ struct bitmap_index { - /* Packfile to which this bitmap index belongs to */ + /* + * The pack or multi-pack index (MIDX) that this bitmap index belongs + * to. + * + * Exactly one of these must be non-NULL; this specifies the object + * order used to interpret this bitmap. + */ struct packed_git *pack; + struct multi_pack_index *midx; /* * Mark the first `reuse_objects` in the packfile as reused: @@ -71,6 +79,9 @@ struct bitmap_index { /* If not NULL, this is a name-hash cache pointing into map. */ uint32_t *hashes; + /* The checksum of the packfile or MIDX; points into map. */ + const unsigned char *checksum; + /* * Extended index. * @@ -136,6 +147,13 @@ static struct ewah_bitmap *read_bitmap_1(struct bitmap_index *index) return b; } +static uint32_t bitmap_num_objects(struct bitmap_index *index) +{ + if (index->midx) + return index->midx->num_objects; + return index->pack->num_objects; +} + static int load_bitmap_header(struct bitmap_index *index) { struct bitmap_disk_header *header = (void *)index->map; @@ -154,7 +172,7 @@ static int load_bitmap_header(struct bitmap_index *index) /* Parse known bitmap format options */ { uint32_t flags = ntohs(header->options); - size_t cache_size = st_mult(index->pack->num_objects, sizeof(uint32_t)); + size_t cache_size = st_mult(bitmap_num_objects(index), sizeof(uint32_t)); unsigned char *index_end = index->map + index->map_size - the_hash_algo->rawsz; if ((flags & BITMAP_OPT_FULL_DAG) == 0) @@ -170,6 +188,7 @@ static int load_bitmap_header(struct bitmap_index *index) } index->entry_count = ntohl(header->entry_count); + index->checksum = header->checksum; index->map_pos += header_size; return 0; } @@ -218,6 +237,15 @@ static inline uint8_t read_u8(const unsigned char *buffer, size_t *pos) #define MAX_XOR_OFFSET 160 +static int nth_bitmap_object_oid(struct bitmap_index *index, + struct object_id *oid, + uint32_t n) +{ + if (index->midx) + return nth_midxed_object_oid(oid, index->midx, n) ? 0 : -1; + return nth_packed_object_id(oid, index->pack, n); +} + static int load_bitmap_entries_v1(struct bitmap_index *index) { uint32_t i; @@ -237,7 +265,7 @@ static int load_bitmap_entries_v1(struct bitmap_index *index) xor_offset = read_u8(index->map, &index->map_pos); flags = read_u8(index->map, &index->map_pos); - if (nth_packed_object_id(&oid, index->pack, commit_idx_pos) < 0) + if (nth_bitmap_object_oid(index, &oid, commit_idx_pos) < 0) return error("corrupt ewah bitmap: commit index %u out of range", (unsigned)commit_idx_pos); @@ -262,7 +290,14 @@ static int load_bitmap_entries_v1(struct bitmap_index *index) return 0; } -static char *pack_bitmap_filename(struct packed_git *p) +char *midx_bitmap_filename(struct multi_pack_index *midx) +{ + return xstrfmt("%s-%s.bitmap", + get_midx_filename(midx->object_dir), + hash_to_hex(get_midx_checksum(midx))); +} + +char *pack_bitmap_filename(struct packed_git *p) { size_t len; @@ -271,6 +306,57 @@ static char *pack_bitmap_filename(struct packed_git *p) return xstrfmt("%.*s.bitmap", (int)len, p->pack_name); } +static int open_midx_bitmap_1(struct bitmap_index *bitmap_git, + struct multi_pack_index *midx) +{ + struct stat st; + char *idx_name = midx_bitmap_filename(midx); + int fd = git_open(idx_name); + + free(idx_name); + + if (fd < 0) + return -1; + + if (fstat(fd, &st)) { + close(fd); + return -1; + } + + if (bitmap_git->pack || bitmap_git->midx) { + /* ignore extra bitmap file; we can only handle one */ + warning("ignoring extra bitmap file: %s", + get_midx_filename(midx->object_dir)); + close(fd); + return -1; + } + + bitmap_git->midx = midx; + bitmap_git->map_size = xsize_t(st.st_size); + bitmap_git->map_pos = 0; + bitmap_git->map = xmmap(NULL, bitmap_git->map_size, PROT_READ, + MAP_PRIVATE, fd, 0); + close(fd); + + if (load_bitmap_header(bitmap_git) < 0) + goto cleanup; + + if (!hasheq(get_midx_checksum(bitmap_git->midx), bitmap_git->checksum)) + goto cleanup; + + if (load_midx_revindex(bitmap_git->midx) < 0) { + warning(_("multi-pack bitmap is missing required reverse index")); + goto cleanup; + } + return 0; + +cleanup: + munmap(bitmap_git->map, bitmap_git->map_size); + bitmap_git->map_size = 0; + bitmap_git->map = NULL; + return -1; +} + static int open_pack_bitmap_1(struct bitmap_index *bitmap_git, struct packed_git *packfile) { int fd; @@ -292,7 +378,8 @@ static int open_pack_bitmap_1(struct bitmap_index *bitmap_git, struct packed_git return -1; } - if (bitmap_git->pack) { + if (bitmap_git->pack || bitmap_git->midx) { + /* ignore extra bitmap file; we can only handle one */ warning("ignoring extra bitmap file: %s", packfile->pack_name); close(fd); return -1; @@ -319,13 +406,39 @@ static int open_pack_bitmap_1(struct bitmap_index *bitmap_git, struct packed_git return 0; } -static int load_pack_bitmap(struct bitmap_index *bitmap_git) +static int load_reverse_index(struct bitmap_index *bitmap_git) +{ + if (bitmap_is_midx(bitmap_git)) { + uint32_t i; + int ret; + + /* + * The multi-pack-index's .rev file is already loaded via + * open_pack_bitmap_1(). + * + * But we still need to open the individual pack .rev files, + * since we will need to make use of them in pack-objects. + */ + for (i = 0; i < bitmap_git->midx->num_packs; i++) { + if (prepare_midx_pack(the_repository, bitmap_git->midx, i)) + die(_("load_reverse_index: could not open pack")); + ret = load_pack_revindex(bitmap_git->midx->packs[i]); + if (ret) + return ret; + } + return 0; + } + return load_pack_revindex(bitmap_git->pack); +} + +static int load_bitmap(struct bitmap_index *bitmap_git) { assert(bitmap_git->map); bitmap_git->bitmaps = kh_init_oid_map(); bitmap_git->ext_index.positions = kh_init_oid_pos(); - if (load_pack_revindex(bitmap_git->pack)) + + if (load_reverse_index(bitmap_git)) goto failed; if (!(bitmap_git->commits = read_bitmap_1(bitmap_git)) || @@ -369,11 +482,46 @@ static int open_pack_bitmap(struct repository *r, return ret; } +static int open_midx_bitmap(struct repository *r, + struct bitmap_index *bitmap_git) +{ + struct multi_pack_index *midx; + + assert(!bitmap_git->map); + + for (midx = get_multi_pack_index(r); midx; midx = midx->next) { + if (!open_midx_bitmap_1(bitmap_git, midx)) + return 0; + } + return -1; +} + +static int open_bitmap(struct repository *r, + struct bitmap_index *bitmap_git) +{ + assert(!bitmap_git->map); + + if (!open_midx_bitmap(r, bitmap_git)) + return 0; + return open_pack_bitmap(r, bitmap_git); +} + struct bitmap_index *prepare_bitmap_git(struct repository *r) { struct bitmap_index *bitmap_git = xcalloc(1, sizeof(*bitmap_git)); - if (!open_pack_bitmap(r, bitmap_git) && !load_pack_bitmap(bitmap_git)) + if (!open_bitmap(r, bitmap_git) && !load_bitmap(bitmap_git)) + return bitmap_git; + + free_bitmap_index(bitmap_git); + return NULL; +} + +struct bitmap_index *prepare_midx_bitmap_git(struct multi_pack_index *midx) +{ + struct bitmap_index *bitmap_git = xcalloc(1, sizeof(*bitmap_git)); + + if (!open_midx_bitmap_1(bitmap_git, midx) && !load_bitmap(bitmap_git)) return bitmap_git; free_bitmap_index(bitmap_git); @@ -404,7 +552,7 @@ static inline int bitmap_position_extended(struct bitmap_index *bitmap_git, if (pos < kh_end(positions)) { int bitmap_pos = kh_value(positions, pos); - return bitmap_pos + bitmap_git->pack->num_objects; + return bitmap_pos + bitmap_num_objects(bitmap_git); } return -1; @@ -423,10 +571,26 @@ static inline int bitmap_position_packfile(struct bitmap_index *bitmap_git, return pos; } +static int bitmap_position_midx(struct bitmap_index *bitmap_git, + const struct object_id *oid) +{ + uint32_t want, got; + if (!bsearch_midx(oid, bitmap_git->midx, &want)) + return -1; + + if (midx_to_pack_pos(bitmap_git->midx, want, &got) < 0) + return -1; + return got; +} + static int bitmap_position(struct bitmap_index *bitmap_git, const struct object_id *oid) { - int pos = bitmap_position_packfile(bitmap_git, oid); + int pos; + if (bitmap_is_midx(bitmap_git)) + pos = bitmap_position_midx(bitmap_git, oid); + else + pos = bitmap_position_packfile(bitmap_git, oid); return (pos >= 0) ? pos : bitmap_position_extended(bitmap_git, oid); } @@ -456,7 +620,7 @@ static int ext_index_add_object(struct bitmap_index *bitmap_git, bitmap_pos = kh_value(eindex->positions, hash_pos); } - return bitmap_pos + bitmap_git->pack->num_objects; + return bitmap_pos + bitmap_num_objects(bitmap_git); } struct bitmap_show_data { @@ -673,7 +837,7 @@ static void show_extended_objects(struct bitmap_index *bitmap_git, for (i = 0; i < eindex->count; ++i) { struct object *obj; - if (!bitmap_get(objects, bitmap_git->pack->num_objects + i)) + if (!bitmap_get(objects, bitmap_num_objects(bitmap_git) + i)) continue; obj = eindex->objects[i]; @@ -737,6 +901,7 @@ static void show_objects_for_type( continue; for (offset = 0; offset < BITS_IN_EWORD; ++offset) { + struct packed_git *pack; struct object_id oid; uint32_t hash = 0, index_pos; off_t ofs; @@ -746,14 +911,28 @@ static void show_objects_for_type( offset += ewah_bit_ctz64(word >> offset); - index_pos = pack_pos_to_index(bitmap_git->pack, pos + offset); - ofs = pack_pos_to_offset(bitmap_git->pack, pos + offset); - nth_packed_object_id(&oid, bitmap_git->pack, index_pos); + if (bitmap_is_midx(bitmap_git)) { + struct multi_pack_index *m = bitmap_git->midx; + uint32_t pack_id; + + index_pos = pack_pos_to_midx(m, pos + offset); + ofs = nth_midxed_offset(m, index_pos); + nth_midxed_object_oid(&oid, m, index_pos); + + pack_id = nth_midxed_pack_int_id(m, index_pos); + pack = bitmap_git->midx->packs[pack_id]; + } else { + index_pos = pack_pos_to_index(bitmap_git->pack, pos + offset); + ofs = pack_pos_to_offset(bitmap_git->pack, pos + offset); + nth_bitmap_object_oid(bitmap_git, &oid, index_pos); + + pack = bitmap_git->pack; + } if (bitmap_git->hashes) hash = get_be32(bitmap_git->hashes + index_pos); - show_reach(&oid, object_type, 0, hash, bitmap_git->pack, ofs); + show_reach(&oid, object_type, 0, hash, pack, ofs); } } } @@ -765,8 +944,13 @@ static int in_bitmapped_pack(struct bitmap_index *bitmap_git, struct object *object = roots->item; roots = roots->next; - if (find_pack_entry_one(object->oid.hash, bitmap_git->pack) > 0) - return 1; + if (bitmap_is_midx(bitmap_git)) { + if (bsearch_midx(&object->oid, bitmap_git->midx, NULL)) + return 1; + } else { + if (find_pack_entry_one(object->oid.hash, bitmap_git->pack) > 0) + return 1; + } } return 0; @@ -832,7 +1016,7 @@ static void filter_bitmap_exclude_type(struct bitmap_index *bitmap_git, * them individually. */ for (i = 0; i < eindex->count; i++) { - uint32_t pos = i + bitmap_git->pack->num_objects; + uint32_t pos = i + bitmap_num_objects(bitmap_git); if (eindex->objects[i]->type == type && bitmap_get(to_filter, pos) && !bitmap_get(tips, pos)) @@ -853,23 +1037,35 @@ static void filter_bitmap_blob_none(struct bitmap_index *bitmap_git, static unsigned long get_size_by_pos(struct bitmap_index *bitmap_git, uint32_t pos) { - struct packed_git *pack = bitmap_git->pack; unsigned long size; struct object_info oi = OBJECT_INFO_INIT; oi.sizep = &size; - if (pos < pack->num_objects) { - off_t ofs = pack_pos_to_offset(pack, pos); + if (pos < bitmap_num_objects(bitmap_git)) { + struct packed_git *pack; + off_t ofs; + + if (bitmap_is_midx(bitmap_git)) { + uint32_t midx_pos = pack_pos_to_midx(bitmap_git->midx, pos); + uint32_t pack_id = nth_midxed_pack_int_id(bitmap_git->midx, midx_pos); + + pack = bitmap_git->midx->packs[pack_id]; + ofs = nth_midxed_offset(bitmap_git->midx, midx_pos); + } else { + pack = bitmap_git->pack; + ofs = pack_pos_to_offset(pack, pos); + } + if (packed_object_info(the_repository, pack, ofs, &oi) < 0) { struct object_id oid; - nth_packed_object_id(&oid, pack, - pack_pos_to_index(pack, pos)); + nth_bitmap_object_oid(bitmap_git, &oid, + pack_pos_to_index(pack, pos)); die(_("unable to get size of %s"), oid_to_hex(&oid)); } } else { struct eindex *eindex = &bitmap_git->ext_index; - struct object *obj = eindex->objects[pos - pack->num_objects]; + struct object *obj = eindex->objects[pos - bitmap_num_objects(bitmap_git)]; if (oid_object_info_extended(the_repository, &obj->oid, &oi, 0) < 0) die(_("unable to get size of %s"), oid_to_hex(&obj->oid)); } @@ -911,7 +1107,7 @@ static void filter_bitmap_blob_limit(struct bitmap_index *bitmap_git, } for (i = 0; i < eindex->count; i++) { - uint32_t pos = i + bitmap_git->pack->num_objects; + uint32_t pos = i + bitmap_num_objects(bitmap_git); if (eindex->objects[i]->type == OBJ_BLOB && bitmap_get(to_filter, pos) && !bitmap_get(tips, pos) && @@ -1041,7 +1237,7 @@ struct bitmap_index *prepare_bitmap_walk(struct rev_info *revs, /* try to open a bitmapped pack, but don't parse it yet * because we may not need to use it */ CALLOC_ARRAY(bitmap_git, 1); - if (open_pack_bitmap(revs->repo, bitmap_git) < 0) + if (open_bitmap(revs->repo, bitmap_git) < 0) goto cleanup; for (i = 0; i < revs->pending.nr; ++i) { @@ -1085,7 +1281,7 @@ struct bitmap_index *prepare_bitmap_walk(struct rev_info *revs, * from disk. this is the point of no return; after this the rev_list * becomes invalidated and we must perform the revwalk through bitmaps */ - if (load_pack_bitmap(bitmap_git) < 0) + if (load_bitmap(bitmap_git) < 0) goto cleanup; object_array_clear(&revs->pending); @@ -1128,22 +1324,49 @@ cleanup: return NULL; } -static void try_partial_reuse(struct bitmap_index *bitmap_git, - size_t pos, - struct bitmap *reuse, - struct pack_window **w_curs) +/* + * -1 means "stop trying further objects"; 0 means we may or may not have + * reused, but you can keep feeding bits. + */ +static int try_partial_reuse(struct packed_git *pack, + size_t pos, + struct bitmap *reuse, + struct pack_window **w_curs) { - off_t offset, header; + off_t offset, delta_obj_offset; enum object_type type; unsigned long size; - if (pos >= bitmap_git->pack->num_objects) - return; /* not actually in the pack */ + /* + * try_partial_reuse() is called either on (a) objects in the + * bitmapped pack (in the case of a single-pack bitmap) or (b) + * objects in the preferred pack of a multi-pack bitmap. + * Importantly, the latter can pretend as if only a single pack + * exists because: + * + * - The first pack->num_objects bits of a MIDX bitmap are + * reserved for the preferred pack, and + * + * - Ties due to duplicate objects are always resolved in + * favor of the preferred pack. + * + * Therefore we do not need to ever ask the MIDX for its copy of + * an object by OID, since it will always select it from the + * preferred pack. Likewise, the selected copy of the base + * object for any deltas will reside in the same pack. + * + * This means that we can reuse pos when looking up the bit in + * the reuse bitmap, too, since bits corresponding to the + * preferred pack precede all bits from other packs. + */ + + if (pos >= pack->num_objects) + return -1; /* not actually in the pack or MIDX preferred pack */ - offset = header = pack_pos_to_offset(bitmap_git->pack, pos); - type = unpack_object_header(bitmap_git->pack, w_curs, &offset, &size); + offset = delta_obj_offset = pack_pos_to_offset(pack, pos); + type = unpack_object_header(pack, w_curs, &offset, &size); if (type < 0) - return; /* broken packfile, punt */ + return -1; /* broken packfile, punt */ if (type == OBJ_REF_DELTA || type == OBJ_OFS_DELTA) { off_t base_offset; @@ -1157,12 +1380,12 @@ static void try_partial_reuse(struct bitmap_index *bitmap_git, * and the normal slow path will complain about it in * more detail. */ - base_offset = get_delta_base(bitmap_git->pack, w_curs, - &offset, type, header); + base_offset = get_delta_base(pack, w_curs, &offset, type, + delta_obj_offset); if (!base_offset) - return; - if (offset_to_pack_pos(bitmap_git->pack, base_offset, &base_pos) < 0) - return; + return 0; + if (offset_to_pack_pos(pack, base_offset, &base_pos) < 0) + return 0; /* * We assume delta dependencies always point backwards. This @@ -1174,7 +1397,7 @@ static void try_partial_reuse(struct bitmap_index *bitmap_git, * odd parameters. */ if (base_pos >= pos) - return; + return 0; /* * And finally, if we're not sending the base as part of our @@ -1185,13 +1408,22 @@ static void try_partial_reuse(struct bitmap_index *bitmap_git, * object_entry code path handle it. */ if (!bitmap_get(reuse, base_pos)) - return; + return 0; } /* * If we got here, then the object is OK to reuse. Mark it. */ bitmap_set(reuse, pos); + return 0; +} + +static uint32_t midx_preferred_pack(struct bitmap_index *bitmap_git) +{ + struct multi_pack_index *m = bitmap_git->midx; + if (!m) + BUG("midx_preferred_pack: requires non-empty MIDX"); + return nth_midxed_pack_int_id(m, pack_pos_to_midx(bitmap_git->midx, 0)); } int reuse_partial_packfile_from_bitmap(struct bitmap_index *bitmap_git, @@ -1199,20 +1431,37 @@ int reuse_partial_packfile_from_bitmap(struct bitmap_index *bitmap_git, uint32_t *entries, struct bitmap **reuse_out) { + struct packed_git *pack; struct bitmap *result = bitmap_git->result; struct bitmap *reuse; struct pack_window *w_curs = NULL; size_t i = 0; uint32_t offset; + uint32_t objects_nr; assert(result); + load_reverse_index(bitmap_git); + + if (bitmap_is_midx(bitmap_git)) + pack = bitmap_git->midx->packs[midx_preferred_pack(bitmap_git)]; + else + pack = bitmap_git->pack; + objects_nr = pack->num_objects; + while (i < result->word_alloc && result->words[i] == (eword_t)~0) i++; - /* Don't mark objects not in the packfile */ - if (i > bitmap_git->pack->num_objects / BITS_IN_EWORD) - i = bitmap_git->pack->num_objects / BITS_IN_EWORD; + /* + * Don't mark objects not in the packfile or preferred pack. This bitmap + * marks objects eligible for reuse, but the pack-reuse code only + * understands how to reuse a single pack. Since the preferred pack is + * guaranteed to have all bases for its deltas (in a multi-pack bitmap), + * we use it instead of another pack. In single-pack bitmaps, the choice + * is made for us. + */ + if (i > objects_nr / BITS_IN_EWORD) + i = objects_nr / BITS_IN_EWORD; reuse = bitmap_word_alloc(i); memset(reuse->words, 0xFF, i * sizeof(eword_t)); @@ -1226,10 +1475,23 @@ int reuse_partial_packfile_from_bitmap(struct bitmap_index *bitmap_git, break; offset += ewah_bit_ctz64(word >> offset); - try_partial_reuse(bitmap_git, pos + offset, reuse, &w_curs); + if (try_partial_reuse(pack, pos + offset, + reuse, &w_curs) < 0) { + /* + * try_partial_reuse indicated we couldn't reuse + * any bits, so there is no point in trying more + * bits in the current word, or any other words + * in result. + * + * Jump out of both loops to avoid future + * unnecessary calls to try_partial_reuse. + */ + goto done; + } } } +done: unuse_pack(&w_curs); *entries = bitmap_popcount(reuse); @@ -1243,7 +1505,7 @@ int reuse_partial_packfile_from_bitmap(struct bitmap_index *bitmap_git, * need to be handled separately. */ bitmap_and_not(result, reuse); - *packfile_out = bitmap_git->pack; + *packfile_out = pack; *reuse_out = reuse; return 0; } @@ -1296,7 +1558,7 @@ static uint32_t count_object_type(struct bitmap_index *bitmap_git, for (i = 0; i < eindex->count; ++i) { if (eindex->objects[i]->type == type && - bitmap_get(objects, bitmap_git->pack->num_objects + i)) + bitmap_get(objects, bitmap_num_objects(bitmap_git) + i)) count++; } @@ -1325,10 +1587,52 @@ void count_bitmap_commit_list(struct bitmap_index *bitmap_git, struct bitmap_test_data { struct bitmap_index *bitmap_git; struct bitmap *base; + struct bitmap *commits; + struct bitmap *trees; + struct bitmap *blobs; + struct bitmap *tags; struct progress *prg; size_t seen; }; +static void test_bitmap_type(struct bitmap_test_data *tdata, + struct object *obj, int pos) +{ + enum object_type bitmap_type = OBJ_NONE; + int bitmaps_nr = 0; + + if (bitmap_get(tdata->commits, pos)) { + bitmap_type = OBJ_COMMIT; + bitmaps_nr++; + } + if (bitmap_get(tdata->trees, pos)) { + bitmap_type = OBJ_TREE; + bitmaps_nr++; + } + if (bitmap_get(tdata->blobs, pos)) { + bitmap_type = OBJ_BLOB; + bitmaps_nr++; + } + if (bitmap_get(tdata->tags, pos)) { + bitmap_type = OBJ_TAG; + bitmaps_nr++; + } + + if (bitmap_type == OBJ_NONE) + die("object %s not found in type bitmaps", + oid_to_hex(&obj->oid)); + + if (bitmaps_nr > 1) + die("object %s does not have a unique type", + oid_to_hex(&obj->oid)); + + if (bitmap_type != obj->type) + die("object %s: real type %s, expected: %s", + oid_to_hex(&obj->oid), + type_name(obj->type), + type_name(bitmap_type)); +} + static void test_show_object(struct object *object, const char *name, void *data) { @@ -1338,6 +1642,7 @@ static void test_show_object(struct object *object, const char *name, bitmap_pos = bitmap_position(tdata->bitmap_git, &object->oid); if (bitmap_pos < 0) die("Object not in bitmap: %s\n", oid_to_hex(&object->oid)); + test_bitmap_type(tdata, object, bitmap_pos); bitmap_set(tdata->base, bitmap_pos); display_progress(tdata->prg, ++tdata->seen); @@ -1352,6 +1657,7 @@ static void test_show_commit(struct commit *commit, void *data) &commit->object.oid); if (bitmap_pos < 0) die("Object not in bitmap: %s\n", oid_to_hex(&commit->object.oid)); + test_bitmap_type(tdata, &commit->object, bitmap_pos); bitmap_set(tdata->base, bitmap_pos); display_progress(tdata->prg, ++tdata->seen); @@ -1399,6 +1705,10 @@ void test_bitmap_walk(struct rev_info *revs) tdata.bitmap_git = bitmap_git; tdata.base = bitmap_new(); + tdata.commits = ewah_to_bitmap(bitmap_git->commits); + tdata.trees = ewah_to_bitmap(bitmap_git->trees); + tdata.blobs = ewah_to_bitmap(bitmap_git->blobs); + tdata.tags = ewah_to_bitmap(bitmap_git->tags); tdata.prg = start_progress("Verifying bitmap entries", result_popcnt); tdata.seen = 0; @@ -1469,15 +1779,26 @@ uint32_t *create_bitmap_mapping(struct bitmap_index *bitmap_git, uint32_t i, num_objects; uint32_t *reposition; - num_objects = bitmap_git->pack->num_objects; + if (!bitmap_is_midx(bitmap_git)) + load_reverse_index(bitmap_git); + else if (load_midx_revindex(bitmap_git->midx) < 0) + BUG("rebuild_existing_bitmaps: missing required rev-cache " + "extension"); + + num_objects = bitmap_num_objects(bitmap_git); CALLOC_ARRAY(reposition, num_objects); for (i = 0; i < num_objects; ++i) { struct object_id oid; struct object_entry *oe; - nth_packed_object_id(&oid, bitmap_git->pack, - pack_pos_to_index(bitmap_git->pack, i)); + if (bitmap_is_midx(bitmap_git)) + nth_midxed_object_oid(&oid, + bitmap_git->midx, + pack_pos_to_midx(bitmap_git->midx, i)); + else + nth_packed_object_id(&oid, bitmap_git->pack, + pack_pos_to_index(bitmap_git->pack, i)); oe = packlist_find(mapping, &oid); if (oe) @@ -1503,6 +1824,19 @@ void free_bitmap_index(struct bitmap_index *b) free(b->ext_index.hashes); bitmap_free(b->result); bitmap_free(b->haves); + if (bitmap_is_midx(b)) { + /* + * Multi-pack bitmaps need to have resources associated with + * their on-disk reverse indexes unmapped so that stale .rev and + * .bitmap files can be removed. + * + * Unlike pack-based bitmaps, multi-pack bitmaps can be read and + * written in the same 'git multi-pack-index write --bitmap' + * process. Close resources so they can be removed safely on + * platforms like Windows. + */ + close_midx_revindex(b->midx); + } free(b); } @@ -1517,7 +1851,6 @@ static off_t get_disk_usage_for_type(struct bitmap_index *bitmap_git, enum object_type object_type) { struct bitmap *result = bitmap_git->result; - struct packed_git *pack = bitmap_git->pack; off_t total = 0; struct ewah_iterator it; eword_t filter; @@ -1534,15 +1867,35 @@ static off_t get_disk_usage_for_type(struct bitmap_index *bitmap_git, continue; for (offset = 0; offset < BITS_IN_EWORD; offset++) { - size_t pos; - if ((word >> offset) == 0) break; offset += ewah_bit_ctz64(word >> offset); - pos = base + offset; - total += pack_pos_to_offset(pack, pos + 1) - - pack_pos_to_offset(pack, pos); + + if (bitmap_is_midx(bitmap_git)) { + uint32_t pack_pos; + uint32_t midx_pos = pack_pos_to_midx(bitmap_git->midx, base + offset); + off_t offset = nth_midxed_offset(bitmap_git->midx, midx_pos); + + uint32_t pack_id = nth_midxed_pack_int_id(bitmap_git->midx, midx_pos); + struct packed_git *pack = bitmap_git->midx->packs[pack_id]; + + if (offset_to_pack_pos(pack, offset, &pack_pos) < 0) { + struct object_id oid; + nth_midxed_object_oid(&oid, bitmap_git->midx, midx_pos); + + die(_("could not find %s in pack %s at offset %"PRIuMAX), + oid_to_hex(&oid), + pack->pack_name, + (uintmax_t)offset); + } + + total += pack_pos_to_offset(pack, pack_pos + 1) - offset; + } else { + size_t pos = base + offset; + total += pack_pos_to_offset(bitmap_git->pack, pos + 1) - + pack_pos_to_offset(bitmap_git->pack, pos); + } } } @@ -1552,7 +1905,6 @@ static off_t get_disk_usage_for_type(struct bitmap_index *bitmap_git, static off_t get_disk_usage_for_extended(struct bitmap_index *bitmap_git) { struct bitmap *result = bitmap_git->result; - struct packed_git *pack = bitmap_git->pack; struct eindex *eindex = &bitmap_git->ext_index; off_t total = 0; struct object_info oi = OBJECT_INFO_INIT; @@ -1564,7 +1916,7 @@ static off_t get_disk_usage_for_extended(struct bitmap_index *bitmap_git) for (i = 0; i < eindex->count; i++) { struct object *obj = eindex->objects[i]; - if (!bitmap_get(result, pack->num_objects + i)) + if (!bitmap_get(result, bitmap_num_objects(bitmap_git) + i)) continue; if (oid_object_info_extended(the_repository, &obj->oid, &oi, 0) < 0) @@ -1594,7 +1946,28 @@ off_t get_disk_usage_from_bitmap(struct bitmap_index *bitmap_git, return total; } +int bitmap_is_midx(struct bitmap_index *bitmap_git) +{ + return !!bitmap_git->midx; +} + const struct string_list *bitmap_preferred_tips(struct repository *r) { return repo_config_get_value_multi(r, "pack.preferbitmaptips"); } + +int bitmap_is_preferred_refname(struct repository *r, const char *refname) +{ + const struct string_list *preferred_tips = bitmap_preferred_tips(r); + struct string_list_item *item; + + if (!preferred_tips) + return 0; + + for_each_string_list_item(item, preferred_tips) { + if (starts_with(refname, item->string)) + return 1; + } + + return 0; +} diff --git a/pack-bitmap.h b/pack-bitmap.h index 99d733eb26..469090bad2 100644 --- a/pack-bitmap.h +++ b/pack-bitmap.h @@ -44,6 +44,7 @@ typedef int (*show_reachable_fn)( struct bitmap_index; struct bitmap_index *prepare_bitmap_git(struct repository *r); +struct bitmap_index *prepare_midx_bitmap_git(struct multi_pack_index *midx); void count_bitmap_commit_list(struct bitmap_index *, uint32_t *commits, uint32_t *trees, uint32_t *blobs, uint32_t *tags); void traverse_bitmap_commit_list(struct bitmap_index *, @@ -87,12 +88,17 @@ struct ewah_bitmap *bitmap_for_commit(struct bitmap_index *bitmap_git, struct commit *commit); void bitmap_writer_select_commits(struct commit **indexed_commits, unsigned int indexed_commits_nr, int max_bitmaps); -void bitmap_writer_build(struct packing_data *to_pack); +int bitmap_writer_build(struct packing_data *to_pack); void bitmap_writer_finish(struct pack_idx_entry **index, uint32_t index_nr, const char *filename, uint16_t options); +char *midx_bitmap_filename(struct multi_pack_index *midx); +char *pack_bitmap_filename(struct packed_git *p); + +int bitmap_is_midx(struct bitmap_index *bitmap_git); const struct string_list *bitmap_preferred_tips(struct repository *r); +int bitmap_is_preferred_refname(struct repository *r, const char *refname); #endif diff --git a/pack-revindex.h b/pack-revindex.h index 479b8f2f9c..74f4eae668 100644 --- a/pack-revindex.h +++ b/pack-revindex.h @@ -109,7 +109,7 @@ off_t pack_pos_to_offset(struct packed_git *p, uint32_t pos); * If the reverse index has not yet been loaded, or the position is out of * bounds, this function aborts. * - * This function runs in time O(log N) with the number of objects in the MIDX. + * This function runs in constant time. */ uint32_t pack_pos_to_midx(struct multi_pack_index *m, uint32_t pos); @@ -120,7 +120,7 @@ uint32_t pack_pos_to_midx(struct multi_pack_index *m, uint32_t pos); * If the reverse index has not yet been loaded, or the position is out of * bounds, this function aborts. * - * This function runs in constant time. + * This function runs in time O(log N) with the number of objects in the MIDX. */ int midx_to_pack_pos(struct multi_pack_index *midx, uint32_t at, uint32_t *pos); diff --git a/pack-write.c b/pack-write.c index f1fc3ecafa..a5846f3a34 100644 --- a/pack-write.c +++ b/pack-write.c @@ -75,9 +75,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec index_name = strbuf_detach(&tmp_file, NULL); } else { unlink(index_name); - fd = open(index_name, O_CREAT|O_EXCL|O_WRONLY, 0600); - if (fd < 0) - die_errno("unable to create '%s'", index_name); + fd = xopen(index_name, O_CREAT|O_EXCL|O_WRONLY, 0600); } f = hashfd(fd, index_name); } @@ -224,6 +222,9 @@ const char *write_rev_file(const char *rev_name, uint32_t i; const char *ret; + if (!(flags & WRITE_REV) && !(flags & WRITE_REV_VERIFY)) + return NULL; + ALLOC_ARRAY(pack_order, nr_objects); for (i = 0; i < nr_objects; i++) pack_order[i] = i; @@ -256,9 +257,7 @@ const char *write_rev_file_order(const char *rev_name, rev_name = strbuf_detach(&tmp_file, NULL); } else { unlink(rev_name); - fd = open(rev_name, O_CREAT|O_EXCL|O_WRONLY, 0600); - if (fd < 0) - die_errno("unable to create '%s'", rev_name); + fd = xopen(rev_name, O_CREAT|O_EXCL|O_WRONLY, 0600); } f = hashfd(fd, rev_name); } else if (flags & WRITE_REV_VERIFY) { @@ -462,49 +461,48 @@ struct hashfile *create_tmp_packfile(char **pack_tmp_name) return hashfd(fd, *pack_tmp_name); } -void finish_tmp_packfile(struct strbuf *name_buffer, +static void rename_tmp_packfile(struct strbuf *name_prefix, const char *source, + const char *ext) +{ + size_t name_prefix_len = name_prefix->len; + + strbuf_addstr(name_prefix, ext); + if (rename(source, name_prefix->buf)) + die_errno("unable to rename temporary file to '%s'", + name_prefix->buf); + strbuf_setlen(name_prefix, name_prefix_len); +} + +void rename_tmp_packfile_idx(struct strbuf *name_buffer, + char **idx_tmp_name) +{ + rename_tmp_packfile(name_buffer, *idx_tmp_name, "idx"); +} + +void stage_tmp_packfiles(struct strbuf *name_buffer, const char *pack_tmp_name, struct pack_idx_entry **written_list, uint32_t nr_written, struct pack_idx_option *pack_idx_opts, - unsigned char hash[]) + unsigned char hash[], + char **idx_tmp_name) { - const char *idx_tmp_name, *rev_tmp_name = NULL; - int basename_len = name_buffer->len; + const char *rev_tmp_name = NULL; if (adjust_shared_perm(pack_tmp_name)) die_errno("unable to make temporary pack file readable"); - idx_tmp_name = write_idx_file(NULL, written_list, nr_written, - pack_idx_opts, hash); - if (adjust_shared_perm(idx_tmp_name)) + *idx_tmp_name = (char *)write_idx_file(NULL, written_list, nr_written, + pack_idx_opts, hash); + if (adjust_shared_perm(*idx_tmp_name)) die_errno("unable to make temporary index file readable"); rev_tmp_name = write_rev_file(NULL, written_list, nr_written, hash, pack_idx_opts->flags); - strbuf_addf(name_buffer, "%s.pack", hash_to_hex(hash)); - - if (rename(pack_tmp_name, name_buffer->buf)) - die_errno("unable to rename temporary pack file"); - - strbuf_setlen(name_buffer, basename_len); - - strbuf_addf(name_buffer, "%s.idx", hash_to_hex(hash)); - if (rename(idx_tmp_name, name_buffer->buf)) - die_errno("unable to rename temporary index file"); - - strbuf_setlen(name_buffer, basename_len); - - if (rev_tmp_name) { - strbuf_addf(name_buffer, "%s.rev", hash_to_hex(hash)); - if (rename(rev_tmp_name, name_buffer->buf)) - die_errno("unable to rename temporary reverse-index file"); - } - - strbuf_setlen(name_buffer, basename_len); - - free((void *)idx_tmp_name); + rename_tmp_packfile(name_buffer, pack_tmp_name, "pack"); + if (rev_tmp_name) + rename_tmp_packfile(name_buffer, rev_tmp_name, "rev"); } void write_promisor_file(const char *promisor_name, struct ref **sought, int nr_sought) @@ -110,6 +110,14 @@ int encode_in_pack_object_header(unsigned char *hdr, int hdr_len, int read_pack_header(int fd, struct pack_header *); struct hashfile *create_tmp_packfile(char **pack_tmp_name); -void finish_tmp_packfile(struct strbuf *name_buffer, const char *pack_tmp_name, struct pack_idx_entry **written_list, uint32_t nr_written, struct pack_idx_option *pack_idx_opts, unsigned char sha1[]); +void stage_tmp_packfiles(struct strbuf *name_buffer, + const char *pack_tmp_name, + struct pack_idx_entry **written_list, + uint32_t nr_written, + struct pack_idx_option *pack_idx_opts, + unsigned char hash[], + char **idx_tmp_name); +void rename_tmp_packfile_idx(struct strbuf *basename, + char **idx_tmp_name); #endif diff --git a/packfile.c b/packfile.c index 9ef6d98292..89402cfc69 100644 --- a/packfile.c +++ b/packfile.c @@ -339,6 +339,7 @@ void close_pack(struct packed_git *p) close_pack_fd(p); close_pack_index(p); close_pack_revindex(p); + oidset_clear(&p->bad_objects); } void close_object_store(struct raw_object_store *o) @@ -860,7 +861,7 @@ static void prepare_pack(const char *full_name, size_t full_name_len, if (!strcmp(file_name, "multi-pack-index")) return; if (starts_with(file_name, "multi-pack-index") && - ends_with(file_name, ".rev")) + (ends_with(file_name, ".bitmap") || ends_with(file_name, ".rev"))) return; if (ends_with(file_name, ".idx") || ends_with(file_name, ".rev") || @@ -1161,31 +1162,19 @@ int unpack_object_header(struct packed_git *p, return type; } -void mark_bad_packed_object(struct packed_git *p, const unsigned char *sha1) +void mark_bad_packed_object(struct packed_git *p, const struct object_id *oid) { - unsigned i; - const unsigned hashsz = the_hash_algo->rawsz; - for (i = 0; i < p->num_bad_objects; i++) - if (hasheq(sha1, p->bad_object_sha1 + hashsz * i)) - return; - p->bad_object_sha1 = xrealloc(p->bad_object_sha1, - st_mult(GIT_MAX_RAWSZ, - st_add(p->num_bad_objects, 1))); - hashcpy(p->bad_object_sha1 + hashsz * p->num_bad_objects, sha1); - p->num_bad_objects++; + oidset_insert(&p->bad_objects, oid); } const struct packed_git *has_packed_and_bad(struct repository *r, - const unsigned char *sha1) + const struct object_id *oid) { struct packed_git *p; - unsigned i; for (p = r->objects->packed_git; p; p = p->next) - for (i = 0; i < p->num_bad_objects; i++) - if (hasheq(sha1, - p->bad_object_sha1 + the_hash_algo->rawsz * i)) - return p; + if (oidset_contains(&p->bad_objects, oid)) + return p; return NULL; } @@ -1272,7 +1261,7 @@ static int retry_bad_packed_offset(struct repository *r, if (offset_to_pack_pos(p, obj_offset, &pos) < 0) return OBJ_BAD; nth_packed_object_id(&oid, p, pack_pos_to_index(p, pos)); - mark_bad_packed_object(p, oid.hash); + mark_bad_packed_object(p, &oid); type = oid_object_info(r, &oid, NULL); if (type <= OBJ_NONE) return OBJ_BAD; @@ -1722,7 +1711,7 @@ void *unpack_entry(struct repository *r, struct packed_git *p, off_t obj_offset, nth_packed_object_id(&oid, p, index_pos); error("bad packed object CRC for %s", oid_to_hex(&oid)); - mark_bad_packed_object(p, oid.hash); + mark_bad_packed_object(p, &oid); data = NULL; goto out; } @@ -1811,7 +1800,7 @@ void *unpack_entry(struct repository *r, struct packed_git *p, off_t obj_offset, " at offset %"PRIuMAX" from %s", oid_to_hex(&base_oid), (uintmax_t)obj_offset, p->pack_name); - mark_bad_packed_object(p, base_oid.hash); + mark_bad_packed_object(p, &base_oid); base = read_object(r, &base_oid, &type, &base_size); external_base = base; } @@ -2016,13 +2005,9 @@ static int fill_pack_entry(const struct object_id *oid, { off_t offset; - if (p->num_bad_objects) { - unsigned i; - for (i = 0; i < p->num_bad_objects; i++) - if (hasheq(oid->hash, - p->bad_object_sha1 + the_hash_algo->rawsz * i)) - return 0; - } + if (oidset_size(&p->bad_objects) && + oidset_contains(&p->bad_objects, oid)) + return 0; offset = find_pack_entry_one(oid->hash, p); if (!offset) @@ -2205,6 +2190,12 @@ int for_each_packed_object(each_packed_object_fn cb, void *data, if ((flags & FOR_EACH_OBJECT_PROMISOR_ONLY) && !p->pack_promisor) continue; + if ((flags & FOR_EACH_OBJECT_SKIP_IN_CORE_KEPT_PACKS) && + p->pack_keep_in_core) + continue; + if ((flags & FOR_EACH_OBJECT_SKIP_ON_DISK_KEPT_PACKS) && + p->pack_keep) + continue; if (open_pack_index(p)) { pack_errors = 1; continue; diff --git a/packfile.h b/packfile.h index 3ae117a8ae..186146779d 100644 --- a/packfile.h +++ b/packfile.h @@ -159,8 +159,8 @@ int packed_object_info(struct repository *r, struct packed_git *pack, off_t offset, struct object_info *); -void mark_bad_packed_object(struct packed_git *p, const unsigned char *sha1); -const struct packed_git *has_packed_and_bad(struct repository *r, const unsigned char *sha1); +void mark_bad_packed_object(struct packed_git *, const struct object_id *); +const struct packed_git *has_packed_and_bad(struct repository *, const struct object_id *); #define ON_DISK_KEEP_PACKS 1 #define IN_CORE_KEEP_PACKS 2 diff --git a/parse-options.c b/parse-options.c index 2abff136a1..55c5821b08 100644 --- a/parse-options.c +++ b/parse-options.c @@ -310,19 +310,6 @@ static enum parse_opt_result parse_long_opt( again: if (!skip_prefix(arg, long_name, &rest)) rest = NULL; - if (options->type == OPTION_ARGUMENT) { - if (!rest) - continue; - if (*rest == '=') - return error(_("%s takes no value"), - optname(options, flags)); - if (*rest) - continue; - if (options->value) - *(int *)options->value = options->defval; - p->out[p->cpidx++] = arg - 2; - return PARSE_OPT_DONE; - } if (!rest) { /* abbreviated? */ if (!(p->flags & PARSE_OPT_KEEP_UNKNOWN) && diff --git a/parse-options.h b/parse-options.h index a845a9d952..39d9088254 100644 --- a/parse-options.h +++ b/parse-options.h @@ -8,7 +8,6 @@ enum parse_opt_type { /* special types */ OPTION_END, - OPTION_ARGUMENT, OPTION_GROUP, OPTION_NUMBER, OPTION_ALIAS, @@ -155,8 +154,6 @@ struct option { #define OPT_INTEGER_F(s, l, v, h, f) { OPTION_INTEGER, (s), (l), (v), N_("n"), (h), (f) } #define OPT_END() { OPTION_END } -#define OPT_ARGUMENT(l, v, h) { OPTION_ARGUMENT, 0, (l), (v), NULL, \ - (h), PARSE_OPT_NOARG, NULL, 1 } #define OPT_GROUP(h) { OPTION_GROUP, 0, NULL, NULL, NULL, (h) } #define OPT_BIT(s, l, v, h, b) OPT_BIT_F(s, l, v, h, b, 0) #define OPT_BITOP(s, l, v, h, set, clear) { OPTION_BITOP, (s), (l), (v), NULL, (h), \ @@ -12,6 +12,7 @@ #include "packfile.h" #include "object-store.h" #include "lockfile.h" +#include "exec-cmd.h" static int get_st_mode_bits(const char *path, int *mode) { @@ -719,19 +720,25 @@ static struct passwd *getpw_str(const char *username, size_t len) } /* - * Return a string with ~ and ~user expanded via getpw*. If buf != NULL, - * then it is a newly allocated string. Returns NULL on getpw failure or - * if path is NULL. + * Return a string with ~ and ~user expanded via getpw*. Returns NULL on getpw + * failure or if path is NULL. * - * If real_home is true, strbuf_realpath($HOME) is used in the expansion. + * If real_home is true, strbuf_realpath($HOME) is used in the `~/` expansion. + * + * If the path starts with `%(prefix)/`, the remainder is interpreted as + * relative to where Git is installed, and expanded to the absolute path. */ -char *expand_user_path(const char *path, int real_home) +char *interpolate_path(const char *path, int real_home) { struct strbuf user_path = STRBUF_INIT; const char *to_copy = path; if (path == NULL) goto return_null; + + if (skip_prefix(path, "%(prefix)/", &path)) + return system_path(path); + if (path[0] == '~') { const char *first_slash = strchrnul(path, '/'); const char *username = path + 1; @@ -812,7 +819,7 @@ const char *enter_repo(const char *path, int strict) strbuf_add(&validated_path, path, len); if (used_path.buf[0] == '~') { - char *newpath = expand_user_path(used_path.buf, 0); + char *newpath = interpolate_path(used_path.buf, 0); if (!newpath) return NULL; strbuf_attach(&used_path, newpath, strlen(newpath), @@ -1503,21 +1510,28 @@ int looks_like_command_line_option(const char *str) return str && str[0] == '-'; } -char *xdg_config_home(const char *filename) +char *xdg_config_home_for(const char *subdir, const char *filename) { const char *home, *config_home; + assert(subdir); assert(filename); config_home = getenv("XDG_CONFIG_HOME"); if (config_home && *config_home) - return mkpathdup("%s/git/%s", config_home, filename); + return mkpathdup("%s/%s/%s", config_home, subdir, filename); home = getenv("HOME"); if (home) - return mkpathdup("%s/.config/git/%s", home, filename); + return mkpathdup("%s/.config/%s/%s", home, subdir, filename); + return NULL; } +char *xdg_config_home(const char *filename) +{ + return xdg_config_home_for("git", filename); +} + char *xdg_cache_home(const char *filename) { const char *home, *cache_home; diff --git a/pathspec.c b/pathspec.c index 08f8d3eedc..44306fdaca 100644 --- a/pathspec.c +++ b/pathspec.c @@ -37,8 +37,6 @@ void add_pathspec_matches_against_index(const struct pathspec *pathspec, num_unmatched++; if (!num_unmatched) return; - /* TODO: audit for interaction with sparse-index. */ - ensure_full_index(istate); for (i = 0; i < istate->cache_nr; i++) { const struct cache_entry *ce = istate->cache[i]; if (sw_action == PS_IGNORE_SKIP_WORKTREE && ce_skip_worktree(ce)) diff --git a/pkt-line.c b/pkt-line.c index 9f63eae2e6..de4a94b437 100644 --- a/pkt-line.c +++ b/pkt-line.c @@ -243,6 +243,43 @@ void packet_write(int fd_out, const char *buf, size_t size) die("%s", err.buf); } +void packet_fwrite(FILE *f, const char *buf, size_t size) +{ + size_t packet_size; + char header[4]; + + if (size > LARGE_PACKET_DATA_MAX) + die(_("packet write failed - data exceeds max packet size")); + + packet_trace(buf, size, 1); + packet_size = size + 4; + + set_packet_header(header, packet_size); + fwrite_or_die(f, header, 4); + fwrite_or_die(f, buf, size); +} + +void packet_fwrite_fmt(FILE *fh, const char *fmt, ...) +{ + static struct strbuf buf = STRBUF_INIT; + va_list args; + + strbuf_reset(&buf); + + va_start(args, fmt); + format_packet(&buf, "", fmt, args); + va_end(args); + + fwrite_or_die(fh, buf.buf, buf.len); +} + +void packet_fflush(FILE *f) +{ + packet_trace("0000", 4, 1); + fwrite_or_die(f, "0000", 4); + fflush_or_die(f); +} + void packet_buf_write(struct strbuf *buf, const char *fmt, ...) { va_list args; diff --git a/pkt-line.h b/pkt-line.h index 5af5f45687..82b95e4bdd 100644 --- a/pkt-line.h +++ b/pkt-line.h @@ -36,6 +36,17 @@ int write_packetized_from_fd_no_flush(int fd_in, int fd_out); int write_packetized_from_buf_no_flush(const char *src_in, size_t len, int fd_out); /* + * Stdio versions of packet_write functions. When mixing these with fd + * based functions, take care to call fflush(3) before doing fd writes or + * closing the fd. + */ +void packet_fwrite(FILE *f, const char *buf, size_t size); +void packet_fwrite_fmt(FILE *f, const char *fmt, ...) __attribute__((format (printf, 2, 3))); + +/* packet_fflush writes a flush packet and flushes the stdio buffer of f */ +void packet_fflush(FILE *f); + +/* * Read a packetized line into the buffer, which must be at least size bytes * long. The return value specifies the number of bytes read into the buffer. * @@ -671,7 +671,11 @@ const char *repo_logmsg_reencode(struct repository *r, * If the re-encoding failed, out might be NULL here; in that * case we just return the commit message verbatim. */ - return out ? out : msg; + if (!out) { + warning("unable to reencode commit to '%s'", output_encoding); + return msg; + } + return out; } static int mailmap_name(const char **email, size_t *email_len, diff --git a/protocol-caps.c b/protocol-caps.c index 13a9e63a04..bbde91810a 100644 --- a/protocol-caps.c +++ b/protocol-caps.c @@ -69,13 +69,13 @@ static void send_info(struct repository *r, struct packet_writer *writer, } } - packet_writer_write(writer, "%s", - strbuf_detach(&send_buffer, NULL)); + packet_writer_write(writer, "%s", send_buffer.buf); + strbuf_reset(&send_buffer); } + strbuf_release(&send_buffer); } -int cap_object_info(struct repository *r, struct strvec *keys, - struct packet_reader *request) +int cap_object_info(struct repository *r, struct packet_reader *request) { struct requested_info info; struct packet_writer writer; diff --git a/protocol-caps.h b/protocol-caps.h index 0a9f49df11..15c4550360 100644 --- a/protocol-caps.h +++ b/protocol-caps.h @@ -2,9 +2,7 @@ #define PROTOCOL_CAPS_H struct repository; -struct strvec; struct packet_reader; -int cap_object_info(struct repository *r, struct strvec *keys, - struct packet_reader *request); +int cap_object_info(struct repository *r, struct packet_reader *request); #endif /* PROTOCOL_CAPS_H */ diff --git a/protocol.c b/protocol.c index 052d7edbb9..c53f7df5be 100644 --- a/protocol.c +++ b/protocol.c @@ -73,6 +73,8 @@ enum protocol_version determine_protocol_version_server(void) string_list_clear(&list, 0); } + trace2_data_intmax("transfer", NULL, "negotiated-version", version); + return version; } @@ -471,6 +471,23 @@ void perl_quote_buf(struct strbuf *sb, const char *src) strbuf_addch(sb, sq); } +void perl_quote_buf_with_len(struct strbuf *sb, const char *src, size_t len) +{ + const char sq = '\''; + const char bq = '\\'; + const char *c = src; + const char *end = src + len; + + strbuf_addch(sb, sq); + while (c != end) { + if (*c == sq || *c == bq) + strbuf_addch(sb, bq); + strbuf_addch(sb, *c); + c++; + } + strbuf_addch(sb, sq); +} + void python_quote_buf(struct strbuf *sb, const char *src) { const char sq = '\''; @@ -95,6 +95,7 @@ char *quote_path(const char *in, const char *prefix, struct strbuf *out, unsigne /* quoting as a string literal for other languages */ void perl_quote_buf(struct strbuf *sb, const char *src); +void perl_quote_buf_with_len(struct strbuf *sb, const char *src, size_t len); void python_quote_buf(struct strbuf *sb, const char *src); void tcl_quote_buf(struct strbuf *sb, const char *src); void basic_regex_quote_buf(struct strbuf *sb, const char *src); diff --git a/range-diff.c b/range-diff.c index e9479794b4..cac89a2f4f 100644 --- a/range-diff.c +++ b/range-diff.c @@ -26,17 +26,6 @@ struct patch_util { struct object_id oid; }; -static size_t find_end_of_line(char *buffer, unsigned long size) -{ - char *eol = memchr(buffer, '\n', size); - - if (!eol) - return size; - - *eol = '\0'; - return eol + 1 - buffer; -} - /* * Reads the patches into a string list, with the `util` field being populated * as struct object_id (will need to be free()d). @@ -49,7 +38,7 @@ static int read_patches(const char *range, struct string_list *list, struct patch_util *util = NULL; int in_header = 1; char *line, *current_filename = NULL; - int offset, len; + ssize_t len; size_t size; strvec_pushl(&cp.args, "log", "--no-color", "-p", "--no-merges", @@ -86,11 +75,18 @@ static int read_patches(const char *range, struct string_list *list, line = contents.buf; size = contents.len; - for (offset = 0; size > 0; offset += len, size -= len, line += len) { + for (; size > 0; size -= len, line += len) { const char *p; + char *eol; + + eol = memchr(line, '\n', size); + if (eol) { + *eol = '\0'; + len = eol + 1 - line; + } else { + len = size; + } - len = find_end_of_line(line, size); - line[len - 1] = '\0'; if (skip_prefix(line, "commit ", &p)) { if (util) { string_list_append(list, buf.buf)->util = util; @@ -132,7 +128,8 @@ static int read_patches(const char *range, struct string_list *list, strbuf_addch(&buf, '\n'); if (!util->diff_offset) util->diff_offset = buf.len; - line[len - 1] = '\n'; + if (eol) + *eol = '\n'; orig_len = len; len = parse_git_diff_header(&root, &linenr, 0, line, len, size, &patch); @@ -485,6 +482,7 @@ static void output(struct string_list *a, struct string_list *b, else diff_setup(&opts); + opts.no_free = 1; if (!opts.output_format) opts.output_format = DIFF_FORMAT_PATCH; opts.flags.suppress_diff_headers = 1; @@ -545,6 +543,8 @@ static void output(struct string_list *a, struct string_list *b, strbuf_release(&buf); strbuf_release(&dashes); strbuf_release(&indent); + opts.no_free = 0; + diff_free(&opts); } int show_range_diff(const char *range1, const char *range2, diff --git a/read-cache.c b/read-cache.c index 9048ef9e90..f5d4385c40 100644 --- a/read-cache.c +++ b/read-cache.c @@ -3069,7 +3069,7 @@ static int do_write_locked_index(struct index_state *istate, struct lock_file *l int ret; int was_full = !istate->sparse_index; - ret = convert_to_sparse(istate); + ret = convert_to_sparse(istate, 0); if (ret) { warning(_("failed to convert to a sparse-index")); @@ -3182,7 +3182,7 @@ static int write_shared_index(struct index_state *istate, int ret, was_full = !istate->sparse_index; move_cache_to_base_index(istate); - convert_to_sparse(istate); + convert_to_sparse(istate, 0); trace2_region_enter_printf("index", "shared/do_write_index", the_repository, "%s", get_tempfile_path(*temp)); diff --git a/ref-filter.c b/ref-filter.c index 0cfef7b719..93ce2a6ef2 100644 --- a/ref-filter.c +++ b/ref-filter.c @@ -144,6 +144,7 @@ enum atom_type { ATOM_BODY, ATOM_TRAILERS, ATOM_CONTENTS, + ATOM_RAW, ATOM_UPSTREAM, ATOM_PUSH, ATOM_SYMREF, @@ -156,6 +157,7 @@ enum atom_type { ATOM_IF, ATOM_THEN, ATOM_ELSE, + ATOM_REST, }; /* @@ -190,6 +192,9 @@ static struct used_atom { unsigned int nlines; } contents; struct { + enum { RAW_BARE, RAW_LENGTH } option; + } raw_data; + struct { cmp_status cmp_status; const char *str; } if_then_else; @@ -223,7 +228,7 @@ static int strbuf_addf_ret(struct strbuf *sb, int ret, const char *fmt, ...) return ret; } -static int color_atom_parser(const struct ref_format *format, struct used_atom *atom, +static int color_atom_parser(struct ref_format *format, struct used_atom *atom, const char *color_value, struct strbuf *err) { if (!color_value) @@ -261,7 +266,7 @@ static int refname_atom_parser_internal(struct refname_atom *atom, const char *a return 0; } -static int remote_ref_atom_parser(const struct ref_format *format, struct used_atom *atom, +static int remote_ref_atom_parser(struct ref_format *format, struct used_atom *atom, const char *arg, struct strbuf *err) { struct string_list params = STRING_LIST_INIT_DUP; @@ -308,7 +313,7 @@ static int remote_ref_atom_parser(const struct ref_format *format, struct used_a return 0; } -static int objecttype_atom_parser(const struct ref_format *format, struct used_atom *atom, +static int objecttype_atom_parser(struct ref_format *format, struct used_atom *atom, const char *arg, struct strbuf *err) { if (arg) @@ -320,7 +325,7 @@ static int objecttype_atom_parser(const struct ref_format *format, struct used_a return 0; } -static int objectsize_atom_parser(const struct ref_format *format, struct used_atom *atom, +static int objectsize_atom_parser(struct ref_format *format, struct used_atom *atom, const char *arg, struct strbuf *err) { if (!arg) { @@ -340,7 +345,7 @@ static int objectsize_atom_parser(const struct ref_format *format, struct used_a return 0; } -static int deltabase_atom_parser(const struct ref_format *format, struct used_atom *atom, +static int deltabase_atom_parser(struct ref_format *format, struct used_atom *atom, const char *arg, struct strbuf *err) { if (arg) @@ -352,7 +357,7 @@ static int deltabase_atom_parser(const struct ref_format *format, struct used_at return 0; } -static int body_atom_parser(const struct ref_format *format, struct used_atom *atom, +static int body_atom_parser(struct ref_format *format, struct used_atom *atom, const char *arg, struct strbuf *err) { if (arg) @@ -361,7 +366,7 @@ static int body_atom_parser(const struct ref_format *format, struct used_atom *a return 0; } -static int subject_atom_parser(const struct ref_format *format, struct used_atom *atom, +static int subject_atom_parser(struct ref_format *format, struct used_atom *atom, const char *arg, struct strbuf *err) { if (!arg) @@ -373,7 +378,7 @@ static int subject_atom_parser(const struct ref_format *format, struct used_atom return 0; } -static int trailers_atom_parser(const struct ref_format *format, struct used_atom *atom, +static int trailers_atom_parser(struct ref_format *format, struct used_atom *atom, const char *arg, struct strbuf *err) { atom->u.contents.trailer_opts.no_divider = 1; @@ -399,7 +404,7 @@ static int trailers_atom_parser(const struct ref_format *format, struct used_ato return 0; } -static int contents_atom_parser(const struct ref_format *format, struct used_atom *atom, +static int contents_atom_parser(struct ref_format *format, struct used_atom *atom, const char *arg, struct strbuf *err) { if (!arg) @@ -427,7 +432,19 @@ static int contents_atom_parser(const struct ref_format *format, struct used_ato return 0; } -static int oid_atom_parser(const struct ref_format *format, struct used_atom *atom, +static int raw_atom_parser(struct ref_format *format, struct used_atom *atom, + const char *arg, struct strbuf *err) +{ + if (!arg) + atom->u.raw_data.option = RAW_BARE; + else if (!strcmp(arg, "size")) + atom->u.raw_data.option = RAW_LENGTH; + else + return strbuf_addf_ret(err, -1, _("unrecognized %%(raw) argument: %s"), arg); + return 0; +} + +static int oid_atom_parser(struct ref_format *format, struct used_atom *atom, const char *arg, struct strbuf *err) { if (!arg) @@ -446,7 +463,7 @@ static int oid_atom_parser(const struct ref_format *format, struct used_atom *at return 0; } -static int person_email_atom_parser(const struct ref_format *format, struct used_atom *atom, +static int person_email_atom_parser(struct ref_format *format, struct used_atom *atom, const char *arg, struct strbuf *err) { if (!arg) @@ -460,7 +477,7 @@ static int person_email_atom_parser(const struct ref_format *format, struct used return 0; } -static int refname_atom_parser(const struct ref_format *format, struct used_atom *atom, +static int refname_atom_parser(struct ref_format *format, struct used_atom *atom, const char *arg, struct strbuf *err) { return refname_atom_parser_internal(&atom->u.refname, arg, atom->name, err); @@ -477,7 +494,7 @@ static align_type parse_align_position(const char *s) return -1; } -static int align_atom_parser(const struct ref_format *format, struct used_atom *atom, +static int align_atom_parser(struct ref_format *format, struct used_atom *atom, const char *arg, struct strbuf *err) { struct align *align = &atom->u.align; @@ -529,7 +546,7 @@ static int align_atom_parser(const struct ref_format *format, struct used_atom * return 0; } -static int if_atom_parser(const struct ref_format *format, struct used_atom *atom, +static int if_atom_parser(struct ref_format *format, struct used_atom *atom, const char *arg, struct strbuf *err) { if (!arg) { @@ -544,7 +561,16 @@ static int if_atom_parser(const struct ref_format *format, struct used_atom *ato return 0; } -static int head_atom_parser(const struct ref_format *format, struct used_atom *atom, +static int rest_atom_parser(struct ref_format *format, struct used_atom *atom, + const char *arg, struct strbuf *err) +{ + if (arg) + return strbuf_addf_ret(err, -1, _("%%(rest) does not take arguments")); + format->use_rest = 1; + return 0; +} + +static int head_atom_parser(struct ref_format *format, struct used_atom *atom, const char *arg, struct strbuf *unused_err) { atom->u.head = resolve_refdup("HEAD", RESOLVE_REF_READING, NULL, NULL); @@ -555,7 +581,7 @@ static struct { const char *name; info_source source; cmp_type cmp_type; - int (*parser)(const struct ref_format *format, struct used_atom *atom, + int (*parser)(struct ref_format *format, struct used_atom *atom, const char *arg, struct strbuf *err); } valid_atom[] = { [ATOM_REFNAME] = { "refname", SOURCE_NONE, FIELD_STR, refname_atom_parser }, @@ -587,6 +613,7 @@ static struct { [ATOM_BODY] = { "body", SOURCE_OBJ, FIELD_STR, body_atom_parser }, [ATOM_TRAILERS] = { "trailers", SOURCE_OBJ, FIELD_STR, trailers_atom_parser }, [ATOM_CONTENTS] = { "contents", SOURCE_OBJ, FIELD_STR, contents_atom_parser }, + [ATOM_RAW] = { "raw", SOURCE_OBJ, FIELD_STR, raw_atom_parser }, [ATOM_UPSTREAM] = { "upstream", SOURCE_NONE, FIELD_STR, remote_ref_atom_parser }, [ATOM_PUSH] = { "push", SOURCE_NONE, FIELD_STR, remote_ref_atom_parser }, [ATOM_SYMREF] = { "symref", SOURCE_NONE, FIELD_STR, refname_atom_parser }, @@ -599,6 +626,7 @@ static struct { [ATOM_IF] = { "if", SOURCE_NONE, FIELD_STR, if_atom_parser }, [ATOM_THEN] = { "then", SOURCE_NONE }, [ATOM_ELSE] = { "else", SOURCE_NONE }, + [ATOM_REST] = { "rest", SOURCE_NONE, FIELD_STR, rest_atom_parser }, /* * Please update $__git_ref_fieldlist in git-completion.bash * when you add new atoms @@ -621,16 +649,23 @@ struct ref_formatting_state { struct atom_value { const char *s; + ssize_t s_size; int (*handler)(struct atom_value *atomv, struct ref_formatting_state *state, struct strbuf *err); uintmax_t value; /* used for sorting when not FIELD_STR */ struct used_atom *atom; }; +#define ATOM_SIZE_UNSPECIFIED (-1) + +#define ATOM_VALUE_INIT { \ + .s_size = ATOM_SIZE_UNSPECIFIED \ +} + /* * Used to parse format string and sort specifiers */ -static int parse_ref_filter_atom(const struct ref_format *format, +static int parse_ref_filter_atom(struct ref_format *format, const char *atom, const char *ep, struct strbuf *err) { @@ -645,13 +680,6 @@ static int parse_ref_filter_atom(const struct ref_format *format, return strbuf_addf_ret(err, -1, _("malformed field name: %.*s"), (int)(ep-atom), atom); - /* Do we have the atom already used elsewhere? */ - for (i = 0; i < used_atom_cnt; i++) { - int len = strlen(used_atom[i].name); - if (len == ep - atom && !memcmp(used_atom[i].name, atom, len)) - return i; - } - /* * If the atom name has a colon, strip it and everything after * it off - it specifies the format for this entry, and @@ -661,6 +689,13 @@ static int parse_ref_filter_atom(const struct ref_format *format, arg = memchr(sp, ':', ep - sp); atom_len = (arg ? arg : ep) - sp; + /* Do we have the atom already used elsewhere? */ + for (i = 0; i < used_atom_cnt; i++) { + int len = strlen(used_atom[i].name); + if (len == ep - atom && !memcmp(used_atom[i].name, atom, len)) + return i; + } + /* Is the atom a valid one? */ for (i = 0; i < ARRAY_SIZE(valid_atom); i++) { int len = strlen(valid_atom[i].name); @@ -710,17 +745,23 @@ static int parse_ref_filter_atom(const struct ref_format *format, return at; } -static void quote_formatting(struct strbuf *s, const char *str, int quote_style) +static void quote_formatting(struct strbuf *s, const char *str, ssize_t len, int quote_style) { switch (quote_style) { case QUOTE_NONE: - strbuf_addstr(s, str); + if (len < 0) + strbuf_addstr(s, str); + else + strbuf_add(s, str, len); break; case QUOTE_SHELL: sq_quote_buf(s, str); break; case QUOTE_PERL: - perl_quote_buf(s, str); + if (len < 0) + perl_quote_buf(s, str); + else + perl_quote_buf_with_len(s, str, len); break; case QUOTE_PYTHON: python_quote_buf(s, str); @@ -741,9 +782,11 @@ static int append_atom(struct atom_value *v, struct ref_formatting_state *state, * encountered. */ if (!state->stack->prev) - quote_formatting(&state->stack->output, v->s, state->quote_style); - else + quote_formatting(&state->stack->output, v->s, v->s_size, state->quote_style); + else if (v->s_size < 0) strbuf_addstr(&state->stack->output, v->s); + else + strbuf_add(&state->stack->output, v->s, v->s_size); return 0; } @@ -843,21 +886,23 @@ static int if_atom_handler(struct atom_value *atomv, struct ref_formatting_state return 0; } -static int is_empty(const char *s) +static int is_empty(struct strbuf *buf) { - while (*s != '\0') { - if (!isspace(*s)) - return 0; - s++; - } - return 1; -} + const char *cur = buf->buf; + const char *end = buf->buf + buf->len; + + while (cur != end && (isspace(*cur))) + cur++; + + return cur == end; + } static int then_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state, struct strbuf *err) { struct ref_formatting_stack *cur = state->stack; struct if_then_else *if_then_else = NULL; + size_t str_len = 0; if (cur->at_end == if_then_else_handler) if_then_else = (struct if_then_else *)cur->at_end_data; @@ -868,18 +913,22 @@ static int then_atom_handler(struct atom_value *atomv, struct ref_formatting_sta if (if_then_else->else_atom_seen) return strbuf_addf_ret(err, -1, _("format: %%(then) atom used after %%(else)")); if_then_else->then_atom_seen = 1; + if (if_then_else->str) + str_len = strlen(if_then_else->str); /* * If the 'equals' or 'notequals' attribute is used then * perform the required comparison. If not, only non-empty * strings satisfy the 'if' condition. */ if (if_then_else->cmp_status == COMPARE_EQUAL) { - if (!strcmp(if_then_else->str, cur->output.buf)) + if (str_len == cur->output.len && + !memcmp(if_then_else->str, cur->output.buf, cur->output.len)) if_then_else->condition_satisfied = 1; } else if (if_then_else->cmp_status == COMPARE_UNEQUAL) { - if (strcmp(if_then_else->str, cur->output.buf)) + if (str_len != cur->output.len || + memcmp(if_then_else->str, cur->output.buf, cur->output.len)) if_then_else->condition_satisfied = 1; - } else if (cur->output.len && !is_empty(cur->output.buf)) + } else if (cur->output.len && !is_empty(&cur->output)) if_then_else->condition_satisfied = 1; strbuf_reset(&cur->output); return 0; @@ -925,7 +974,7 @@ static int end_atom_handler(struct atom_value *atomv, struct ref_formatting_stat * only on the topmost supporting atom. */ if (!current->prev->prev) { - quote_formatting(&s, current->output.buf, state->quote_style); + quote_formatting(&s, current->output.buf, current->output.len, state->quote_style); strbuf_swap(¤t->output, &s); } strbuf_release(&s); @@ -955,6 +1004,11 @@ static const char *find_next(const char *cp) return NULL; } +static int reject_atom(enum atom_type atom_type) +{ + return atom_type == ATOM_REST; +} + /* * Make sure the format string is well formed, and parse out * the used atoms. @@ -975,6 +1029,16 @@ int verify_ref_format(struct ref_format *format) at = parse_ref_filter_atom(format, sp + 2, ep, &err); if (at < 0) die("%s", err.buf); + if (reject_atom(used_atom[at].atom_type)) + die(_("this command reject atom %%(%.*s)"), (int)(ep - sp - 2), sp + 2); + + if ((format->quote_style == QUOTE_PYTHON || + format->quote_style == QUOTE_SHELL || + format->quote_style == QUOTE_TCL) && + used_atom[at].atom_type == ATOM_RAW && + used_atom[at].u.raw_data.option == RAW_BARE) + die(_("--format=%.*s cannot be used with" + "--python, --shell, --tcl"), (int)(ep - sp - 2), sp + 2); cp = ep + 1; if (skip_prefix(used_atom[at].name, "color:", &color)) @@ -1357,25 +1421,42 @@ static void append_lines(struct strbuf *out, const char *buf, unsigned long size } /* See grab_values */ -static void grab_sub_body_contents(struct atom_value *val, int deref, void *buf) +static void grab_sub_body_contents(struct atom_value *val, int deref, struct expand_data *data) { int i; const char *subpos = NULL, *bodypos = NULL, *sigpos = NULL; size_t sublen = 0, bodylen = 0, nonsiglen = 0, siglen = 0; + void *buf = data->content; for (i = 0; i < used_atom_cnt; i++) { struct used_atom *atom = &used_atom[i]; const char *name = atom->name; struct atom_value *v = &val[i]; + enum atom_type atom_type = atom->atom_type; if (!!deref != (*name == '*')) continue; if (deref) name++; - if (strcmp(name, "body") && - !starts_with(name, "subject") && - !starts_with(name, "trailers") && - !starts_with(name, "contents")) + + if (atom_type == ATOM_RAW) { + unsigned long buf_size = data->size; + + if (atom->u.raw_data.option == RAW_BARE) { + v->s = xmemdupz(buf, buf_size); + v->s_size = buf_size; + } else if (atom->u.raw_data.option == RAW_LENGTH) { + v->s = xstrfmt("%"PRIuMAX, (uintmax_t)buf_size); + } + continue; + } + + if ((data->type != OBJ_TAG && + data->type != OBJ_COMMIT) || + (strcmp(name, "body") && + !starts_with(name, "subject") && + !starts_with(name, "trailers") && + !starts_with(name, "contents"))) continue; if (!subpos) find_subpos(buf, @@ -1439,25 +1520,29 @@ static void fill_missing_values(struct atom_value *val) * pointed at by the ref itself; otherwise it is the object the * ref (which is a tag) refers to. */ -static void grab_values(struct atom_value *val, int deref, struct object *obj, void *buf) +static void grab_values(struct atom_value *val, int deref, struct object *obj, struct expand_data *data) { + void *buf = data->content; + switch (obj->type) { case OBJ_TAG: grab_tag_values(val, deref, obj); - grab_sub_body_contents(val, deref, buf); + grab_sub_body_contents(val, deref, data); grab_person("tagger", val, deref, buf); break; case OBJ_COMMIT: grab_commit_values(val, deref, obj); - grab_sub_body_contents(val, deref, buf); + grab_sub_body_contents(val, deref, data); grab_person("author", val, deref, buf); grab_person("committer", val, deref, buf); break; case OBJ_TREE: /* grab_tree_values(val, deref, obj, buf, sz); */ + grab_sub_body_contents(val, deref, data); break; case OBJ_BLOB: /* grab_blob_values(val, deref, obj, buf, sz); */ + grab_sub_body_contents(val, deref, data); break; default: die("Eh? Object of type %d?", obj->type); @@ -1679,7 +1764,7 @@ static int get_object(struct ref_array_item *ref, int deref, struct object **obj return strbuf_addf_ret(err, -1, _("parse_object_buffer failed on %s for %s"), oid_to_hex(&oi->oid), ref->refname); } - grab_values(ref->value, deref, *obj, oi->content); + grab_values(ref->value, deref, *obj, oi); } grab_common_values(ref->value, deref, oi); @@ -1761,6 +1846,7 @@ static int populate_value(struct ref_array_item *ref, struct strbuf *err) const char *refname; struct branch *branch = NULL; + v->s_size = ATOM_SIZE_UNSPECIFIED; v->handler = append_atom; v->atom = atom; @@ -1864,6 +1950,12 @@ static int populate_value(struct ref_array_item *ref, struct strbuf *err) v->handler = else_atom_handler; v->s = xstrdup(""); continue; + } else if (atom_type == ATOM_REST) { + if (ref->rest) + v->s = xstrdup(ref->rest); + else + v->s = xstrdup(""); + continue; } else continue; @@ -2081,6 +2173,7 @@ static struct ref_array_item *new_ref_array_item(const char *refname, FLEX_ALLOC_STR(ref, refname, refname); oidcpy(&ref->objectname, oid); + ref->rest = NULL; return ref; } @@ -2368,6 +2461,19 @@ static int compare_detached_head(struct ref_array_item *a, struct ref_array_item return 0; } +static int memcasecmp(const void *vs1, const void *vs2, size_t n) +{ + const char *s1 = vs1, *s2 = vs2; + const char *end = s1 + n; + + for (; s1 < end; s1++, s2++) { + int diff = tolower(*s1) - tolower(*s2); + if (diff) + return diff; + } + return 0; +} + static int cmp_ref_sorting(struct ref_sorting *s, struct ref_array_item *a, struct ref_array_item *b) { struct atom_value *va, *vb; @@ -2388,10 +2494,29 @@ static int cmp_ref_sorting(struct ref_sorting *s, struct ref_array_item *a, stru } else if (s->sort_flags & REF_SORTING_VERSION) { cmp = versioncmp(va->s, vb->s); } else if (cmp_type == FIELD_STR) { - int (*cmp_fn)(const char *, const char *); - cmp_fn = s->sort_flags & REF_SORTING_ICASE - ? strcasecmp : strcmp; - cmp = cmp_fn(va->s, vb->s); + if (va->s_size < 0 && vb->s_size < 0) { + int (*cmp_fn)(const char *, const char *); + cmp_fn = s->sort_flags & REF_SORTING_ICASE + ? strcasecmp : strcmp; + cmp = cmp_fn(va->s, vb->s); + } else { + size_t a_size = va->s_size < 0 ? + strlen(va->s) : va->s_size; + size_t b_size = vb->s_size < 0 ? + strlen(vb->s) : vb->s_size; + int (*cmp_fn)(const void *, const void *, size_t); + cmp_fn = s->sort_flags & REF_SORTING_ICASE + ? memcasecmp : memcmp; + + cmp = cmp_fn(va->s, vb->s, b_size > a_size ? + a_size : b_size); + if (!cmp) { + if (a_size > b_size) + cmp = 1; + else if (a_size < b_size) + cmp = -1; + } + } } else { if (va->value < vb->value) cmp = -1; @@ -2461,9 +2586,9 @@ static void append_literal(const char *cp, const char *ep, struct ref_formatting } int format_ref_array_item(struct ref_array_item *info, - const struct ref_format *format, - struct strbuf *final_buf, - struct strbuf *error_buf) + struct ref_format *format, + struct strbuf *final_buf, + struct strbuf *error_buf) { const char *cp, *sp, *ep; struct ref_formatting_state state = REF_FORMATTING_STATE_INIT; @@ -2490,7 +2615,7 @@ int format_ref_array_item(struct ref_array_item *info, append_literal(cp, sp, &state); } if (format->need_color_reset_at_eol) { - struct atom_value resetv; + struct atom_value resetv = ATOM_VALUE_INIT; resetv.s = GIT_COLOR_RESET; if (append_atom(&resetv, &state, error_buf)) { pop_stack_element(&state.stack); @@ -2507,7 +2632,7 @@ int format_ref_array_item(struct ref_array_item *info, } void pretty_print_ref(const char *name, const struct object_id *oid, - const struct ref_format *format) + struct ref_format *format) { struct ref_array_item *ref_item; struct strbuf output = STRBUF_INIT; diff --git a/ref-filter.h b/ref-filter.h index baf72a7189..c15dee8d6b 100644 --- a/ref-filter.h +++ b/ref-filter.h @@ -38,6 +38,7 @@ struct ref_sorting { struct ref_array_item { struct object_id objectname; + const char *rest; int flag; unsigned int kind; const char *symref; @@ -76,14 +77,16 @@ struct ref_format { * verify_ref_format() afterwards to finalize. */ const char *format; + const char *rest; int quote_style; + int use_rest; int use_color; /* Internal state to ref-filter */ int need_color_reset_at_eol; }; -#define REF_FORMAT_INIT { NULL, 0, -1 } +#define REF_FORMAT_INIT { .use_color = -1 } /* Macros for checking --merged and --no-merged options */ #define _OPT_MERGED_NO_MERGED(option, filter, h) \ @@ -116,7 +119,7 @@ void ref_array_sort(struct ref_sorting *sort, struct ref_array *array); void ref_sorting_set_sort_flags_all(struct ref_sorting *sorting, unsigned int mask, int on); /* Based on the given format and quote_style, fill the strbuf */ int format_ref_array_item(struct ref_array_item *info, - const struct ref_format *format, + struct ref_format *format, struct strbuf *final_buf, struct strbuf *error_buf); /* Parse a single sort specifier and add it to the list */ @@ -137,7 +140,7 @@ void setup_ref_filter_porcelain_msg(void); * name must be a fully qualified refname. */ void pretty_print_ref(const char *name, const struct object_id *oid, - const struct ref_format *format); + struct ref_format *format); /* * Push a single ref onto the array; this can be used to construct your own diff --git a/reflog-walk.c b/reflog-walk.c index e9cd328369..8ac4b284b6 100644 --- a/reflog-walk.c +++ b/reflog-walk.c @@ -158,10 +158,9 @@ int add_reflog_for_walk(struct reflog_walk_info *info, } reflogs = read_complete_reflog(branch); if (!reflogs || reflogs->nr == 0) { - struct object_id oid; char *b; int ret = dwim_log(branch, strlen(branch), - &oid, &b); + NULL, &b); if (ret > 1) free(b); else if (ret == 1) { @@ -698,7 +698,7 @@ int repo_dwim_log(struct repository *r, const char *str, int len, strbuf_addf(&path, *p, len, str); ref = refs_resolve_ref_unsafe(refs, path.buf, RESOLVE_REF_READING, - &hash, NULL); + oid ? &hash : NULL, NULL); if (!ref) continue; if (refs_reflog_exists(refs, path.buf)) @@ -710,7 +710,8 @@ int repo_dwim_log(struct repository *r, const char *str, int len, continue; if (!logs_found++) { *log = xstrdup(it); - oidcpy(oid, &hash); + if (oid) + oidcpy(oid, &hash); } if (!warn_ambiguous_refs) break; @@ -1681,7 +1682,7 @@ int refs_read_raw_ref(struct ref_store *ref_store, } return ref_store->be->read_raw_ref(ref_store, refname, oid, referent, - type); + type, &errno); } /* This function needs to return a meaningful errno on failure */ @@ -2370,19 +2371,19 @@ int delete_reflog(const char *refname) } int refs_reflog_expire(struct ref_store *refs, - const char *refname, const struct object_id *oid, + const char *refname, unsigned int flags, reflog_expiry_prepare_fn prepare_fn, reflog_expiry_should_prune_fn should_prune_fn, reflog_expiry_cleanup_fn cleanup_fn, void *policy_cb_data) { - return refs->be->reflog_expire(refs, refname, oid, flags, + return refs->be->reflog_expire(refs, refname, flags, prepare_fn, should_prune_fn, cleanup_fn, policy_cb_data); } -int reflog_expire(const char *refname, const struct object_id *oid, +int reflog_expire(const char *refname, unsigned int flags, reflog_expiry_prepare_fn prepare_fn, reflog_expiry_should_prune_fn should_prune_fn, @@ -2390,7 +2391,7 @@ int reflog_expire(const char *refname, const struct object_id *oid, void *policy_cb_data) { return refs_reflog_expire(get_main_ref_store(the_repository), - refname, oid, flags, + refname, flags, prepare_fn, should_prune_fn, cleanup_fn, policy_cb_data); } @@ -796,7 +796,7 @@ enum expire_reflog_flags { * expiration policy that is desired. * * reflog_expiry_prepare_fn -- Called once after the reference is - * locked. + * locked. Called with the OID of the locked reference. * * reflog_expiry_should_prune_fn -- Called once for each entry in the * existing reflog. It should return true iff that entry should be @@ -816,20 +816,19 @@ typedef int reflog_expiry_should_prune_fn(struct object_id *ooid, typedef void reflog_expiry_cleanup_fn(void *cb_data); /* - * Expire reflog entries for the specified reference. oid is the old - * value of the reference. flags is a combination of the constants in + * Expire reflog entries for the specified reference. + * flags is a combination of the constants in * enum expire_reflog_flags. The three function pointers are described * above. On success, return zero. */ int refs_reflog_expire(struct ref_store *refs, const char *refname, - const struct object_id *oid, unsigned int flags, reflog_expiry_prepare_fn prepare_fn, reflog_expiry_should_prune_fn should_prune_fn, reflog_expiry_cleanup_fn cleanup_fn, void *policy_cb_data); -int reflog_expire(const char *refname, const struct object_id *oid, +int reflog_expire(const char *refname, unsigned int flags, reflog_expiry_prepare_fn prepare_fn, reflog_expiry_should_prune_fn should_prune_fn, diff --git a/refs/debug.c b/refs/debug.c index 1a7a9e11cf..8667c64023 100644 --- a/refs/debug.c +++ b/refs/debug.c @@ -239,15 +239,14 @@ debug_ref_iterator_begin(struct ref_store *ref_store, const char *prefix, static int debug_read_raw_ref(struct ref_store *ref_store, const char *refname, struct object_id *oid, struct strbuf *referent, - unsigned int *type) + unsigned int *type, int *failure_errno) { struct debug_ref_store *drefs = (struct debug_ref_store *)ref_store; int res = 0; oidcpy(oid, null_oid()); - errno = 0; res = drefs->refs->be->read_raw_ref(drefs->refs, refname, oid, referent, - type); + type, failure_errno); if (res == 0) { trace_printf_key(&trace_refs, "read_raw_ref: %s: %s (=> %s) type %x: %d\n", @@ -255,7 +254,7 @@ static int debug_read_raw_ref(struct ref_store *ref_store, const char *refname, } else { trace_printf_key(&trace_refs, "read_raw_ref: %s: %d (errno %d)\n", refname, - res, errno); + res, *failure_errno); } return res; } @@ -365,8 +364,8 @@ struct debug_reflog_expiry_should_prune { }; static void debug_reflog_expiry_prepare(const char *refname, - const struct object_id *oid, - void *cb_data) + const struct object_id *oid, + void *cb_data) { struct debug_reflog_expiry_should_prune *prune = cb_data; trace_printf_key(&trace_refs, "reflog_expire_prepare: %s\n", refname); @@ -392,7 +391,7 @@ static void debug_reflog_expiry_cleanup(void *cb_data) } static int debug_reflog_expire(struct ref_store *ref_store, const char *refname, - const struct object_id *oid, unsigned int flags, + unsigned int flags, reflog_expiry_prepare_fn prepare_fn, reflog_expiry_should_prune_fn should_prune_fn, reflog_expiry_cleanup_fn cleanup_fn, @@ -405,7 +404,7 @@ static int debug_reflog_expire(struct ref_store *ref_store, const char *refname, .should_prune = should_prune_fn, .cb_data = policy_cb_data, }; - int res = drefs->refs->be->reflog_expire(drefs->refs, refname, oid, + int res = drefs->refs->be->reflog_expire(drefs->refs, refname, flags, &debug_reflog_expiry_prepare, &debug_reflog_expiry_should_prune_fn, &debug_reflog_expiry_cleanup, diff --git a/refs/files-backend.c b/refs/files-backend.c index 677b7e4cdd..e46178780e 100644 --- a/refs/files-backend.c +++ b/refs/files-backend.c @@ -341,9 +341,9 @@ static struct ref_cache *get_loose_ref_cache(struct files_ref_store *refs) return refs->loose; } -static int files_read_raw_ref(struct ref_store *ref_store, - const char *refname, struct object_id *oid, - struct strbuf *referent, unsigned int *type) +static int files_read_raw_ref(struct ref_store *ref_store, const char *refname, + struct object_id *oid, struct strbuf *referent, + unsigned int *type, int *failure_errno) { struct files_ref_store *refs = files_downcast(ref_store, REF_STORE_READ, "read_raw_ref"); @@ -354,7 +354,6 @@ static int files_read_raw_ref(struct ref_store *ref_store, struct stat st; int fd; int ret = -1; - int save_errno; int remaining_retries = 3; *type = 0; @@ -459,10 +458,9 @@ stat_ref: ret = parse_loose_ref_contents(buf, oid, referent, type); out: - save_errno = errno; + *failure_errno = errno; strbuf_release(&sb_path); strbuf_release(&sb_contents); - errno = save_errno; return ret; } @@ -531,7 +529,6 @@ static void unlock_ref(struct ref_lock *lock) static int lock_raw_ref(struct files_ref_store *refs, const char *refname, int mustexist, const struct string_list *extras, - const struct string_list *skip, struct ref_lock **lock_p, struct strbuf *referent, unsigned int *type, @@ -541,6 +538,7 @@ static int lock_raw_ref(struct files_ref_store *refs, struct strbuf ref_file = STRBUF_INIT; int attempts_remaining = 3; int ret = TRANSACTION_GENERIC_ERROR; + int failure_errno; assert(err); files_assert_main_repository(refs, "lock_raw_ref"); @@ -568,7 +566,7 @@ retry: * reason to expect this error to be transitory. */ if (refs_verify_refname_available(&refs->base, refname, - extras, skip, err)) { + extras, NULL, err)) { if (mustexist) { /* * To the user the relevant error is @@ -611,7 +609,9 @@ retry: if (hold_lock_file_for_update_timeout( &lock->lk, ref_file.buf, LOCK_NO_DEREF, get_files_ref_lock_timeout_ms()) < 0) { - if (errno == ENOENT && --attempts_remaining > 0) { + int myerr = errno; + errno = 0; + if (myerr == ENOENT && --attempts_remaining > 0) { /* * Maybe somebody just deleted one of the * directories leading to ref_file. Try @@ -619,7 +619,7 @@ retry: */ goto retry; } else { - unable_to_lock_message(ref_file.buf, errno, err); + unable_to_lock_message(ref_file.buf, myerr, err); goto error_return; } } @@ -629,9 +629,9 @@ retry: * fear that its value will change. */ - if (files_read_raw_ref(&refs->base, refname, - &lock->old_oid, referent, type)) { - if (errno == ENOENT) { + if (files_read_raw_ref(&refs->base, refname, &lock->old_oid, referent, + type, &failure_errno)) { + if (failure_errno == ENOENT) { if (mustexist) { /* Garden variety missing reference. */ strbuf_addf(err, "unable to resolve reference '%s'", @@ -655,7 +655,7 @@ retry: * reference named "refs/foo/bar/baz". */ } - } else if (errno == EISDIR) { + } else if (failure_errno == EISDIR) { /* * There is a directory in the way. It might have * contained references that have been deleted. If @@ -673,7 +673,7 @@ retry: REMOVE_DIR_EMPTY_ONLY)) { if (refs_verify_refname_available( &refs->base, refname, - extras, skip, err)) { + extras, NULL, err)) { /* * The error message set by * verify_refname_available() is OK. @@ -693,13 +693,13 @@ retry: goto error_return; } } - } else if (errno == EINVAL && (*type & REF_ISBROKEN)) { + } else if (failure_errno == EINVAL && (*type & REF_ISBROKEN)) { strbuf_addf(err, "unable to resolve reference '%s': " "reference broken", refname); goto error_return; } else { strbuf_addf(err, "unable to resolve reference '%s': %s", - refname, strerror(errno)); + refname, strerror(failure_errno)); goto error_return; } @@ -710,7 +710,7 @@ retry: */ if (refs_verify_refname_available( refs->packed_ref_store, refname, - extras, skip, err)) + extras, NULL, err)) goto error_return; } @@ -854,39 +854,112 @@ static struct ref_iterator *files_ref_iterator_begin( } /* - * Verify that the reference locked by lock has the value old_oid - * (unless it is NULL). Fail if the reference doesn't exist and - * mustexist is set. Return 0 on success. On error, write an error - * message to err, set errno, and return a negative value. + * Callback function for raceproof_create_file(). This function is + * expected to do something that makes dirname(path) permanent despite + * the fact that other processes might be cleaning up empty + * directories at the same time. Usually it will create a file named + * path, but alternatively it could create another file in that + * directory, or even chdir() into that directory. The function should + * return 0 if the action was completed successfully. On error, it + * should return a nonzero result and set errno. + * raceproof_create_file() treats two errno values specially: + * + * - ENOENT -- dirname(path) does not exist. In this case, + * raceproof_create_file() tries creating dirname(path) + * (and any parent directories, if necessary) and calls + * the function again. + * + * - EISDIR -- the file already exists and is a directory. In this + * case, raceproof_create_file() removes the directory if + * it is empty (and recursively any empty directories that + * it contains) and calls the function again. + * + * Any other errno causes raceproof_create_file() to fail with the + * callback's return value and errno. + * + * Obviously, this function should be OK with being called again if it + * fails with ENOENT or EISDIR. In other scenarios it will not be + * called again. + */ +typedef int create_file_fn(const char *path, void *cb); + +/* + * Create a file in dirname(path) by calling fn, creating leading + * directories if necessary. Retry a few times in case we are racing + * with another process that is trying to clean up the directory that + * contains path. See the documentation for create_file_fn for more + * details. + * + * Return the value and set the errno that resulted from the most + * recent call of fn. fn is always called at least once, and will be + * called more than once if it returns ENOENT or EISDIR. */ -static int verify_lock(struct ref_store *ref_store, struct ref_lock *lock, - const struct object_id *old_oid, int mustexist, - struct strbuf *err) +static int raceproof_create_file(const char *path, create_file_fn fn, void *cb) { - assert(err); + /* + * The number of times we will try to remove empty directories + * in the way of path. This is only 1 because if another + * process is racily creating directories that conflict with + * us, we don't want to fight against them. + */ + int remove_directories_remaining = 1; - if (refs_read_ref_full(ref_store, lock->ref_name, - mustexist ? RESOLVE_REF_READING : 0, - &lock->old_oid, NULL)) { - if (old_oid) { - int save_errno = errno; - strbuf_addf(err, "can't verify ref '%s'", lock->ref_name); - errno = save_errno; - return -1; - } else { - oidclr(&lock->old_oid); - return 0; - } - } - if (old_oid && !oideq(&lock->old_oid, old_oid)) { - strbuf_addf(err, "ref '%s' is at %s but expected %s", - lock->ref_name, - oid_to_hex(&lock->old_oid), - oid_to_hex(old_oid)); - errno = EBUSY; - return -1; + /* + * The number of times that we will try to create the + * directories containing path. We are willing to attempt this + * more than once, because another process could be trying to + * clean up empty directories at the same time as we are + * trying to create them. + */ + int create_directories_remaining = 3; + + /* A scratch copy of path, filled lazily if we need it: */ + struct strbuf path_copy = STRBUF_INIT; + + int ret, save_errno; + + /* Sanity check: */ + assert(*path); + +retry_fn: + ret = fn(path, cb); + save_errno = errno; + if (!ret) + goto out; + + if (errno == EISDIR && remove_directories_remaining-- > 0) { + /* + * A directory is in the way. Maybe it is empty; try + * to remove it: + */ + if (!path_copy.len) + strbuf_addstr(&path_copy, path); + + if (!remove_dir_recursively(&path_copy, REMOVE_DIR_EMPTY_ONLY)) + goto retry_fn; + } else if (errno == ENOENT && create_directories_remaining-- > 0) { + /* + * Maybe the containing directory didn't exist, or + * maybe it was just deleted by a process that is + * racing with us to clean up empty directories. Try + * to create it: + */ + enum scld_error scld_result; + + if (!path_copy.len) + strbuf_addstr(&path_copy, path); + + do { + scld_result = safe_create_leading_directories(path_copy.buf); + if (scld_result == SCLD_OK) + goto retry_fn; + } while (scld_result == SCLD_VANISHED && create_directories_remaining-- > 0); } - return 0; + +out: + strbuf_release(&path_copy); + errno = save_errno; + return ret; } static int remove_empty_directories(struct strbuf *path) @@ -910,64 +983,27 @@ static int create_reflock(const char *path, void *cb) /* * Locks a ref returning the lock on success and NULL on failure. - * On failure errno is set to something meaningful. */ static struct ref_lock *lock_ref_oid_basic(struct files_ref_store *refs, - const char *refname, - const struct object_id *old_oid, - const struct string_list *extras, - const struct string_list *skip, - unsigned int flags, int *type, + const char *refname, int *type, struct strbuf *err) { struct strbuf ref_file = STRBUF_INIT; struct ref_lock *lock; - int last_errno = 0; - int mustexist = (old_oid && !is_null_oid(old_oid)); - int resolve_flags = RESOLVE_REF_NO_RECURSE; - int resolved; files_assert_main_repository(refs, "lock_ref_oid_basic"); assert(err); CALLOC_ARRAY(lock, 1); - if (mustexist) - resolve_flags |= RESOLVE_REF_READING; - if (flags & REF_DELETING) - resolve_flags |= RESOLVE_REF_ALLOW_BAD_NAME; - files_ref_path(refs, &ref_file, refname); - resolved = !!refs_resolve_ref_unsafe(&refs->base, - refname, resolve_flags, - &lock->old_oid, type); - if (!resolved && errno == EISDIR) { - /* - * we are trying to lock foo but we used to - * have foo/bar which now does not exist; - * it is normal for the empty directory 'foo' - * to remain. - */ - if (remove_empty_directories(&ref_file)) { - last_errno = errno; - if (!refs_verify_refname_available( - &refs->base, - refname, extras, skip, err)) - strbuf_addf(err, "there are still refs under '%s'", - refname); - goto error_return; - } - resolved = !!refs_resolve_ref_unsafe(&refs->base, - refname, resolve_flags, - &lock->old_oid, type); - } - if (!resolved) { - last_errno = errno; - if (last_errno != ENOTDIR || - !refs_verify_refname_available(&refs->base, refname, - extras, skip, err)) + if (!refs_resolve_ref_unsafe(&refs->base, refname, + RESOLVE_REF_NO_RECURSE, + &lock->old_oid, type)) { + if (!refs_verify_refname_available(&refs->base, refname, + NULL, NULL, err)) strbuf_addf(err, "unable to resolve reference '%s': %s", - refname, strerror(last_errno)); + refname, strerror(errno)); goto error_return; } @@ -980,23 +1016,20 @@ static struct ref_lock *lock_ref_oid_basic(struct files_ref_store *refs, */ if (is_null_oid(&lock->old_oid) && refs_verify_refname_available(refs->packed_ref_store, refname, - extras, skip, err)) { - last_errno = ENOTDIR; + NULL, NULL, err)) goto error_return; - } lock->ref_name = xstrdup(refname); if (raceproof_create_file(ref_file.buf, create_reflock, &lock->lk)) { - last_errno = errno; unable_to_lock_message(ref_file.buf, errno, err); goto error_return; } - if (verify_lock(&refs->base, lock, old_oid, mustexist, err)) { - last_errno = errno; - goto error_return; - } + if (refs_read_ref_full(&refs->base, lock->ref_name, + 0, + &lock->old_oid, NULL)) + oidclr(&lock->old_oid); goto out; error_return: @@ -1005,7 +1038,6 @@ static struct ref_lock *lock_ref_oid_basic(struct files_ref_store *refs, out: strbuf_release(&ref_file); - errno = last_errno; return lock; } @@ -1415,8 +1447,7 @@ static int files_copy_or_rename_ref(struct ref_store *ref_store, logmoved = log; - lock = lock_ref_oid_basic(refs, newrefname, NULL, NULL, NULL, - REF_NO_DEREF, NULL, &err); + lock = lock_ref_oid_basic(refs, newrefname, NULL, &err); if (!lock) { if (copy) error("unable to copy '%s' to '%s': %s", oldrefname, newrefname, err.buf); @@ -1438,8 +1469,7 @@ static int files_copy_or_rename_ref(struct ref_store *ref_store, goto out; rollback: - lock = lock_ref_oid_basic(refs, oldrefname, NULL, NULL, NULL, - REF_NO_DEREF, NULL, &err); + lock = lock_ref_oid_basic(refs, oldrefname, NULL, &err); if (!lock) { error("unable to lock %s for rollback: %s", oldrefname, err.buf); strbuf_release(&err); @@ -1569,7 +1599,7 @@ static int log_ref_setup(struct files_ref_store *refs, goto error; } } else { - *logfd = open(logfile, O_APPEND | O_WRONLY, 0666); + *logfd = open(logfile, O_APPEND | O_WRONLY); if (*logfd < 0) { if (errno == ENOENT || errno == EISDIR) { /* @@ -1846,9 +1876,7 @@ static int files_create_symref(struct ref_store *ref_store, struct ref_lock *lock; int ret; - lock = lock_ref_oid_basic(refs, refname, NULL, - NULL, NULL, REF_NO_DEREF, NULL, - &err); + lock = lock_ref_oid_basic(refs, refname, NULL, &err); if (!lock) { error("%s", err.buf); strbuf_release(&err); @@ -2416,7 +2444,7 @@ static int lock_ref_for_update(struct files_ref_store *refs, } ret = lock_raw_ref(refs, update->refname, mustexist, - affected_refnames, NULL, + affected_refnames, &lock, &referent, &update->type, err); if (ret) { @@ -3037,7 +3065,7 @@ static int expire_reflog_ent(struct object_id *ooid, struct object_id *noid, } static int files_reflog_expire(struct ref_store *ref_store, - const char *refname, const struct object_id *oid, + const char *refname, unsigned int flags, reflog_expiry_prepare_fn prepare_fn, reflog_expiry_should_prune_fn should_prune_fn, @@ -3054,6 +3082,7 @@ static int files_reflog_expire(struct ref_store *ref_store, int status = 0; int type; struct strbuf err = STRBUF_INIT; + const struct object_id *oid; memset(&cb, 0, sizeof(cb)); cb.flags = flags; @@ -3065,14 +3094,26 @@ static int files_reflog_expire(struct ref_store *ref_store, * reference itself, plus we might need to update the * reference if --updateref was specified: */ - lock = lock_ref_oid_basic(refs, refname, oid, - NULL, NULL, REF_NO_DEREF, - &type, &err); + lock = lock_ref_oid_basic(refs, refname, &type, &err); if (!lock) { error("cannot lock ref '%s': %s", refname, err.buf); strbuf_release(&err); return -1; } + oid = &lock->old_oid; + + /* + * When refs are deleted, their reflog is deleted before the + * ref itself is deleted. This is because there is no separate + * lock for reflog; instead we take a lock on the ref with + * lock_ref_oid_basic(). + * + * If a race happens and the reflog doesn't exist after we've + * acquired the lock that's OK. We've got nothing more to do; + * We were asked to delete the reflog, but someone else + * deleted it! The caller doesn't care that we deleted it, + * just that it is deleted. So we can return successfully. + */ if (!refs_reflog_exists(ref_store, refname)) { unlock_ref(lock); return 0; diff --git a/refs/packed-backend.c b/refs/packed-backend.c index f8aa97d799..47247a1491 100644 --- a/refs/packed-backend.c +++ b/refs/packed-backend.c @@ -724,9 +724,9 @@ static struct snapshot *get_snapshot(struct packed_ref_store *refs) return refs->snapshot; } -static int packed_read_raw_ref(struct ref_store *ref_store, - const char *refname, struct object_id *oid, - struct strbuf *referent, unsigned int *type) +static int packed_read_raw_ref(struct ref_store *ref_store, const char *refname, + struct object_id *oid, struct strbuf *referent, + unsigned int *type, int *failure_errno) { struct packed_ref_store *refs = packed_downcast(ref_store, REF_STORE_READ, "read_raw_ref"); @@ -739,7 +739,7 @@ static int packed_read_raw_ref(struct ref_store *ref_store, if (!rec) { /* refname is not a packed reference. */ - errno = ENOENT; + *failure_errno = ENOENT; return -1; } @@ -1600,6 +1600,7 @@ static int packed_for_each_reflog_ent(struct ref_store *ref_store, const char *refname, each_reflog_ent_fn fn, void *cb_data) { + BUG("packed reference store does not support reflogs"); return 0; } @@ -1608,12 +1609,14 @@ static int packed_for_each_reflog_ent_reverse(struct ref_store *ref_store, each_reflog_ent_fn fn, void *cb_data) { + BUG("packed reference store does not support reflogs"); return 0; } static int packed_reflog_exists(struct ref_store *ref_store, const char *refname) { + BUG("packed reference store does not support reflogs"); return 0; } @@ -1627,17 +1630,19 @@ static int packed_create_reflog(struct ref_store *ref_store, static int packed_delete_reflog(struct ref_store *ref_store, const char *refname) { + BUG("packed reference store does not support reflogs"); return 0; } static int packed_reflog_expire(struct ref_store *ref_store, - const char *refname, const struct object_id *oid, + const char *refname, unsigned int flags, reflog_expiry_prepare_fn prepare_fn, reflog_expiry_should_prune_fn should_prune_fn, reflog_expiry_cleanup_fn cleanup_fn, void *policy_cb_data) { + BUG("packed reference store does not support reflogs"); return 0; } diff --git a/refs/refs-internal.h b/refs/refs-internal.h index 3155708345..ddd6d7f8eb 100644 --- a/refs/refs-internal.h +++ b/refs/refs-internal.h @@ -593,7 +593,7 @@ typedef int create_reflog_fn(struct ref_store *ref_store, const char *refname, int force_create, struct strbuf *err); typedef int delete_reflog_fn(struct ref_store *ref_store, const char *refname); typedef int reflog_expire_fn(struct ref_store *ref_store, - const char *refname, const struct object_id *oid, + const char *refname, unsigned int flags, reflog_expiry_prepare_fn prepare_fn, reflog_expiry_should_prune_fn should_prune_fn, @@ -620,11 +620,15 @@ typedef int reflog_expire_fn(struct ref_store *ref_store, * properly-formatted or even safe reference name. NEITHER INPUT NOR * OUTPUT REFERENCE NAMES ARE VALIDATED WITHIN THIS FUNCTION. * - * Return 0 on success. If the ref doesn't exist, set errno to ENOENT - * and return -1. If the ref exists but is neither a symbolic ref nor - * an object ID, it is broken; set REF_ISBROKEN in type, set errno to - * EINVAL, and return -1. If there is another error reading the ref, - * set errno appropriately and return -1. + * Return 0 on success, or -1 on failure. If the ref exists but is neither a + * symbolic ref nor an object ID, it is broken. In this case set REF_ISBROKEN in + * type, and return -1 (failure_errno should not be ENOENT) + * + * failure_errno provides errno codes that are interpreted beyond error + * reporting. The following error codes have special meaning: + * * ENOENT: the ref doesn't exist + * * EISDIR: ref name is a directory + * * ENOTDIR: ref prefix is not a directory * * Backend-specific flags might be set in type as well, regardless of * outcome. @@ -638,9 +642,9 @@ typedef int reflog_expire_fn(struct ref_store *ref_store, * - in all other cases, referent will be untouched, and therefore * refname will still be valid and unchanged. */ -typedef int read_raw_ref_fn(struct ref_store *ref_store, - const char *refname, struct object_id *oid, - struct strbuf *referent, unsigned int *type); +typedef int read_raw_ref_fn(struct ref_store *ref_store, const char *refname, + struct object_id *oid, struct strbuf *referent, + unsigned int *type, int *failure_errno); struct ref_storage_be { struct ref_storage_be *next; diff --git a/remote-curl.c b/remote-curl.c index 6c320d5704..598cff7cde 100644 --- a/remote-curl.c +++ b/remote-curl.c @@ -185,8 +185,6 @@ static int set_option(const char *name, const char *value) strbuf_detach(&unquoted, NULL)); } return 0; - -#if LIBCURL_VERSION_NUM >= 0x070a08 } else if (!strcmp(name, "family")) { if (!strcmp(value, "ipv4")) git_curl_ipresolve = CURL_IPRESOLVE_V4; @@ -197,7 +195,6 @@ static int set_option(const char *name, const char *value) else return -1; return 0; -#endif /* LIBCURL_VERSION_NUM >= 0x070a08 */ } else if (!strcmp(name, "from-promisor")) { options.from_promisor = 1; return 0; @@ -709,7 +706,6 @@ static size_t rpc_out(void *ptr, size_t eltsize, return avail; } -#ifndef NO_CURL_IOCTL static curlioerr rpc_ioctl(CURL *handle, int cmd, void *clientp) { struct rpc_state *rpc = clientp; @@ -730,7 +726,6 @@ static curlioerr rpc_ioctl(CURL *handle, int cmd, void *clientp) return CURLIOE_UNKNOWNCMD; } } -#endif struct check_pktline_state { char len_buf[4]; @@ -858,7 +853,7 @@ static int probe_rpc(struct rpc_state *rpc, struct slot_results *results) curl_easy_setopt(slot->curl, CURLOPT_POSTFIELDSIZE, 4); curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, headers); curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_buffer); - curl_easy_setopt(slot->curl, CURLOPT_FILE, &buf); + curl_easy_setopt(slot->curl, CURLOPT_WRITEDATA, &buf); err = run_slot(slot, results); @@ -949,10 +944,8 @@ retry: rpc->initial_buffer = 1; curl_easy_setopt(slot->curl, CURLOPT_READFUNCTION, rpc_out); curl_easy_setopt(slot->curl, CURLOPT_INFILE, rpc); -#ifndef NO_CURL_IOCTL curl_easy_setopt(slot->curl, CURLOPT_IOCTLFUNCTION, rpc_ioctl); curl_easy_setopt(slot->curl, CURLOPT_IOCTLDATA, rpc); -#endif if (options.verbosity > 1) { fprintf(stderr, "POST %s (chunked)\n", rpc->service_name); fflush(stderr); @@ -1023,7 +1016,7 @@ retry: rpc_in_data.slot = slot; rpc_in_data.check_pktline = stateless_connect; memset(&rpc_in_data.pktline_state, 0, sizeof(rpc_in_data.pktline_state)); - curl_easy_setopt(slot->curl, CURLOPT_FILE, &rpc_in_data); + curl_easy_setopt(slot->curl, CURLOPT_WRITEDATA, &rpc_in_data); curl_easy_setopt(slot->curl, CURLOPT_FAILONERROR, 0); @@ -135,7 +135,7 @@ static inline void init_remotes_hash(void) static struct remote *make_remote(const char *name, int len) { - struct remote *ret, *replaced; + struct remote *ret; struct remotes_hash_key lookup; struct hashmap_entry lookup_entry, *e; @@ -162,8 +162,8 @@ static struct remote *make_remote(const char *name, int len) remotes[remotes_nr++] = ret; hashmap_entry_init(&ret->ent, lookup_entry.hash); - replaced = hashmap_put_entry(&remotes_hash, ret, ent); - assert(replaced == NULL); /* no previous entry overwritten */ + if (hashmap_put_entry(&remotes_hash, ret, ent)) + BUG("hashmap_put overwrote entry after hashmap_get returned NULL"); return ret; } @@ -1111,7 +1111,7 @@ static void show_push_unqualified_ref_name_error(const char *dst_value, "Neither worked, so we gave up. You must fully qualify the ref."), dst_value, matched_src_name); - if (!advice_push_unqualified_ref_name) + if (!advice_enabled(ADVICE_PUSH_UNQUALIFIED_REF_NAME)) return; if (get_oid(matched_src_name, &oid)) @@ -2118,7 +2118,7 @@ int format_tracking_info(struct branch *branch, struct strbuf *sb, strbuf_addf(sb, _("Your branch is based on '%s', but the upstream is gone.\n"), base); - if (advice_status_hints) + if (advice_enabled(ADVICE_STATUS_HINTS)) strbuf_addstr(sb, _(" (use \"git branch --unset-upstream\" to fixup)\n")); } else if (!sti) { @@ -2129,7 +2129,7 @@ int format_tracking_info(struct branch *branch, struct strbuf *sb, strbuf_addf(sb, _("Your branch and '%s' refer to different commits.\n"), base); - if (advice_status_hints) + if (advice_enabled(ADVICE_STATUS_HINTS)) strbuf_addf(sb, _(" (use \"%s\" for details)\n"), "git status --ahead-behind"); } else if (!theirs) { @@ -2138,7 +2138,7 @@ int format_tracking_info(struct branch *branch, struct strbuf *sb, "Your branch is ahead of '%s' by %d commits.\n", ours), base, ours); - if (advice_status_hints) + if (advice_enabled(ADVICE_STATUS_HINTS)) strbuf_addstr(sb, _(" (use \"git push\" to publish your local commits)\n")); } else if (!ours) { @@ -2149,7 +2149,7 @@ int format_tracking_info(struct branch *branch, struct strbuf *sb, "and can be fast-forwarded.\n", theirs), base, theirs); - if (advice_status_hints) + if (advice_enabled(ADVICE_STATUS_HINTS)) strbuf_addstr(sb, _(" (use \"git pull\" to update your local branch)\n")); } else { @@ -2162,7 +2162,7 @@ int format_tracking_info(struct branch *branch, struct strbuf *sb, "respectively.\n", ours + theirs), base, ours, theirs); - if (advice_status_hints) + if (advice_enabled(ADVICE_STATUS_HINTS)) strbuf_addstr(sb, _(" (use \"git pull\" to merge the remote branch into yours)\n")); } diff --git a/repository.c b/repository.c index b2bf44c6fa..710a3b4bf8 100644 --- a/repository.c +++ b/repository.c @@ -213,8 +213,7 @@ int repo_submodule_init(struct repository *subrepo, * submodule would not have a worktree. */ strbuf_reset(&gitdir); - strbuf_repo_git_path(&gitdir, superproject, - "modules/%s", sub->name); + submodule_name_to_gitdir(&gitdir, superproject, sub->name); if (repo_init(subrepo, gitdir.buf, NULL)) { ret = -1; diff --git a/revision.c b/revision.c index cddd0542a6..0dabb5a0bc 100644 --- a/revision.c +++ b/revision.c @@ -360,20 +360,18 @@ static struct object *get_reference(struct rev_info *revs, const char *name, unsigned int flags) { struct object *object; + struct commit *commit; /* - * If the repository has commit graphs, repo_parse_commit() avoids - * reading the object buffer, so use it whenever possible. + * If the repository has commit graphs, we try to opportunistically + * look up the object ID in those graphs. Like this, we can avoid + * parsing commit data from disk. */ - if (oid_object_info(revs->repo, oid, NULL) == OBJ_COMMIT) { - struct commit *c = lookup_commit(revs->repo, oid); - if (!repo_parse_commit(revs->repo, c)) - object = (struct object *) c; - else - object = NULL; - } else { + commit = lookup_commit_in_graph(revs->repo, oid); + if (commit) + object = &commit->object; + else object = parse_object(revs->repo, oid); - } if (!object) { if (revs->ignore_missing) @@ -1534,7 +1532,7 @@ static int handle_one_ref(const char *path, const struct object_id *oid, object = get_reference(cb->all_revs, path, oid, cb->all_flags); add_rev_cmdline(cb->all_revs, object, path, REV_CMD_REF, cb->all_flags); - add_pending_oid(cb->all_revs, path, oid, cb->all_flags); + add_pending_object(cb->all_revs, object, path); return 0; } @@ -2256,6 +2254,10 @@ static int handle_revision_opt(struct rev_info *revs, int argc, const char **arg } else if (!strcmp(arg, "--author-date-order")) { revs->sort_order = REV_SORT_BY_AUTHOR_DATE; revs->topo_order = 1; + } else if (!strcmp(arg, "--unsorted-input")) { + if (revs->no_walk) + die(_("--unsorted-input is incompatible with --no-walk")); + revs->unsorted_input = 1; } else if (!strcmp(arg, "--early-output")) { revs->early_output = 100; revs->topo_order = 1; @@ -2651,16 +2653,22 @@ static int handle_revision_pseudo_opt(const char *submodule, } else if (!strcmp(arg, "--not")) { *flags ^= UNINTERESTING | BOTTOM; } else if (!strcmp(arg, "--no-walk")) { - revs->no_walk = REVISION_WALK_NO_WALK_SORTED; + if (!revs->no_walk && revs->unsorted_input) + die(_("--no-walk is incompatible with --unsorted-input")); + revs->no_walk = 1; } else if (skip_prefix(arg, "--no-walk=", &optarg)) { + if (!revs->no_walk && revs->unsorted_input) + die(_("--no-walk is incompatible with --unsorted-input")); + /* * Detached form ("--no-walk X" as opposed to "--no-walk=X") * not allowed, since the argument is optional. */ + revs->no_walk = 1; if (!strcmp(optarg, "sorted")) - revs->no_walk = REVISION_WALK_NO_WALK_SORTED; + revs->unsorted_input = 0; else if (!strcmp(optarg, "unsorted")) - revs->no_walk = REVISION_WALK_NO_WALK_UNSORTED; + revs->unsorted_input = 1; else return error("invalid argument to --no-walk"); } else if (!strcmp(arg, "--do-walk")) { @@ -3584,7 +3592,7 @@ int prepare_revision_walk(struct rev_info *revs) if (!revs->reflog_info) prepare_to_use_bloom_filter(revs); - if (revs->no_walk != REVISION_WALK_NO_WALK_UNSORTED) + if (!revs->unsorted_input) commit_list_sort_by_date(&revs->commits); if (revs->no_walk) return 0; diff --git a/revision.h b/revision.h index fbb068da9f..0c65a760ee 100644 --- a/revision.h +++ b/revision.h @@ -79,10 +79,6 @@ struct rev_cmdline_info { } *rev; }; -#define REVISION_WALK_WALK 0 -#define REVISION_WALK_NO_WALK_SORTED 1 -#define REVISION_WALK_NO_WALK_UNSORTED 2 - struct oidset; struct topo_walk_info; @@ -129,7 +125,8 @@ struct rev_info { /* Traversal flags */ unsigned int dense:1, prune:1, - no_walk:2, + no_walk:1, + unsorted_input:1, remove_empty_trees:1, simplify_history:1, show_pulls:1, diff --git a/run-command.c b/run-command.c index f72e72cce7..04a07e6366 100644 --- a/run-command.c +++ b/run-command.c @@ -8,6 +8,7 @@ #include "string-list.h" #include "quote.h" #include "config.h" +#include "packfile.h" void child_process_init(struct child_process *child) { @@ -210,9 +211,9 @@ static char *locate_in_PATH(const char *file) return NULL; } -static int exists_in_PATH(const char *file) +int exists_in_PATH(const char *command) { - char *r = locate_in_PATH(file); + char *r = locate_in_PATH(command); int found = r != NULL; free(r); return found; @@ -740,6 +741,9 @@ fail_pipe: fflush(NULL); + if (cmd->close_object_store) + close_object_store(the_repository->objects); + #ifndef GIT_WINDOWS_NATIVE { int notify_pipe[2]; @@ -761,9 +765,7 @@ fail_pipe: notify_pipe[0] = notify_pipe[1] = -1; if (cmd->no_stdin || cmd->no_stdout || cmd->no_stderr) { - null_fd = open("/dev/null", O_RDWR | O_CLOEXEC); - if (null_fd < 0) - die_errno(_("open /dev/null failed")); + null_fd = xopen("/dev/null", O_RDWR | O_CLOEXEC); set_cloexec(null_fd); } @@ -1044,6 +1046,7 @@ int run_command_v_opt_cd_env_tr2(const char **argv, int opt, const char *dir, cmd.use_shell = opt & RUN_USING_SHELL ? 1 : 0; cmd.clean_on_exit = opt & RUN_CLEAN_ON_EXIT ? 1 : 0; cmd.wait_after_clean = opt & RUN_WAIT_AFTER_CLEAN ? 1 : 0; + cmd.close_object_store = opt & RUN_CLOSE_OBJECT_STORE ? 1 : 0; cmd.dir = dir; cmd.env = env; cmd.trace2_child_class = tr2_class; @@ -1336,7 +1339,7 @@ const char *find_hook(const char *name) err = errno; #endif - if (err == EACCES && advice_ignored_hook) { + if (err == EACCES && advice_enabled(ADVICE_IGNORED_HOOK)) { static struct string_list advise_given = STRING_LIST_INIT_DUP; if (!string_list_lookup(&advise_given, name)) { @@ -1886,6 +1889,7 @@ int run_auto_maintenance(int quiet) return 0; maint.git_cmd = 1; + maint.close_object_store = 1; strvec_pushl(&maint.args, "maintenance", "run", "--auto", NULL); strvec_push(&maint.args, quiet ? "--quiet" : "--no-quiet"); diff --git a/run-command.h b/run-command.h index af1296769f..b9aff74914 100644 --- a/run-command.h +++ b/run-command.h @@ -134,6 +134,14 @@ struct child_process { */ unsigned use_shell:1; + /** + * Release any open file handles to the object store before running + * the command; This is necessary e.g. when the spawned process may + * want to repack because that would delete `.pack` files (and on + * Windows, you cannot delete files that are still in use). + */ + unsigned close_object_store:1; + unsigned stdout_to_stderr:1; unsigned clean_on_exit:1; unsigned wait_after_clean:1; @@ -183,6 +191,18 @@ void child_process_clear(struct child_process *); int is_executable(const char *name); /** + * Check if the command exists on $PATH. This emulates the path search that + * execvp would perform, without actually executing the command so it + * can be used before fork() to prepare to run a command using + * execve() or after execvp() to diagnose why it failed. + * + * The caller should ensure that command contains no directory separators. + * + * Returns 1 if it is found in $PATH or 0 if the command could not be found. + */ +int exists_in_PATH(const char *command); + +/** * Start a sub-process. Takes a pointer to a `struct child_process` * that specifies the details and returns pipe FDs (if requested). * See below for details. @@ -233,13 +253,14 @@ int run_hook_ve(const char *const *env, const char *name, va_list args); */ int run_auto_maintenance(int quiet); -#define RUN_COMMAND_NO_STDIN 1 -#define RUN_GIT_CMD 2 /*If this is to be git sub-command */ -#define RUN_COMMAND_STDOUT_TO_STDERR 4 -#define RUN_SILENT_EXEC_FAILURE 8 -#define RUN_USING_SHELL 16 -#define RUN_CLEAN_ON_EXIT 32 -#define RUN_WAIT_AFTER_CLEAN 64 +#define RUN_COMMAND_NO_STDIN (1<<0) +#define RUN_GIT_CMD (1<<1) +#define RUN_COMMAND_STDOUT_TO_STDERR (1<<2) +#define RUN_SILENT_EXEC_FAILURE (1<<3) +#define RUN_USING_SHELL (1<<4) +#define RUN_CLEAN_ON_EXIT (1<<5) +#define RUN_WAIT_AFTER_CLEAN (1<<6) +#define RUN_CLOSE_OBJECT_STORE (1<<7) /** * Convenience functions that encapsulate a sequence of diff --git a/send-pack.c b/send-pack.c index 5a79e0e711..b3a495b7b1 100644 --- a/send-pack.c +++ b/send-pack.c @@ -425,8 +425,10 @@ static void get_commons_through_negotiation(const char *url, child.no_stdin = 1; child.out = -1; strvec_pushl(&child.args, "fetch", "--negotiate-only", NULL); - for (ref = remote_refs; ref; ref = ref->next) - strvec_pushf(&child.args, "--negotiation-tip=%s", oid_to_hex(&ref->new_oid)); + for (ref = remote_refs; ref; ref = ref->next) { + if (!is_null_oid(&ref->new_oid)) + strvec_pushf(&child.args, "--negotiation-tip=%s", oid_to_hex(&ref->new_oid)); + } strvec_push(&child.args, url); if (start_command(&child)) diff --git a/sequencer.c b/sequencer.c index 7f07cd00f3..614d56f5e2 100644 --- a/sequencer.c +++ b/sequencer.c @@ -403,7 +403,7 @@ static void print_advice(struct repository *r, int show_hint, char *msg = getenv("GIT_CHERRY_PICK_HELP"); if (msg) { - fprintf(stderr, "%s\n", msg); + advise("%s\n", msg); /* * A conflict has occurred but the porcelain * (typically rebase --interactive) wants to take care @@ -418,10 +418,22 @@ static void print_advice(struct repository *r, int show_hint, if (opts->no_commit) advise(_("after resolving the conflicts, mark the corrected paths\n" "with 'git add <paths>' or 'git rm <paths>'")); + else if (opts->action == REPLAY_PICK) + advise(_("After resolving the conflicts, mark them with\n" + "\"git add/rm <pathspec>\", then run\n" + "\"git cherry-pick --continue\".\n" + "You can instead skip this commit with \"git cherry-pick --skip\".\n" + "To abort and get back to the state before \"git cherry-pick\",\n" + "run \"git cherry-pick --abort\".")); + else if (opts->action == REPLAY_REVERT) + advise(_("After resolving the conflicts, mark them with\n" + "\"git add/rm <pathspec>\", then run\n" + "\"git revert --continue\".\n" + "You can instead skip this commit with \"git revert --skip\".\n" + "To abort and get back to the state before \"git revert\",\n" + "run \"git revert --abort\".")); else - advise(_("after resolving the conflicts, mark the corrected paths\n" - "with 'git add <paths>' or 'git rm <paths>'\n" - "and commit the result with 'git commit'")); + BUG("unexpected pick action in print_advice()"); } } @@ -486,7 +498,7 @@ static int error_dirty_index(struct repository *repo, struct replay_opts *opts) error(_("your local changes would be overwritten by %s."), _(action_name(opts))); - if (advice_commit_before_merge) + if (advice_enabled(ADVICE_COMMIT_BEFORE_MERGE)) advise(_("commit your changes or stash them to proceed.")); return -1; } @@ -636,7 +648,7 @@ static int do_recursive_merge(struct repository *r, for (i = 0; i < opts->xopts_nr; i++) parse_merge_opt(&o, opts->xopts[i]); - if (opts->strategy && !strcmp(opts->strategy, "ort")) { + if (!opts->strategy || !strcmp(opts->strategy, "ort")) { memset(&result, 0, sizeof(result)); merge_incore_nonrecursive(&o, base_tree, head_tree, next_tree, &result); @@ -652,6 +664,7 @@ static int do_recursive_merge(struct repository *r, merge_switch_to_result(&o, head_tree, &result, 1, show_output); clean = result.clean; } else { + ensure_full_index(r->index); clean = merge_trees(&o, head_tree, next_tree, base_tree); if (is_rebase_i(opts) && clean <= 0) fputs(o.obuf.buf, stdout); @@ -983,7 +996,8 @@ static int run_git_commit(const char *defmsg, cmd.git_cmd = 1; - if (is_rebase_i(opts) && read_env_script(&cmd.env_array)) { + if (is_rebase_i(opts) && !(!defmsg && (flags & AMEND_MSG)) && + read_env_script(&cmd.env_array)) { const char *gpg_opt = gpg_sign_opt_quoted(opts); return error(_(staged_changes_advice), @@ -1241,7 +1255,7 @@ N_("Your name and email address were configured automatically based\n" static const char *implicit_ident_advice(void) { - char *user_config = expand_user_path("~/.gitconfig", 0); + char *user_config = interpolate_path("~/.gitconfig", 0); char *xdg_config = xdg_config_home("config"); int config_exists = file_exists(user_config) || file_exists(xdg_config); @@ -1293,7 +1307,7 @@ void print_commit_summary(struct repository *r, if (!committer_ident_sufficiently_given()) { strbuf_addstr(&format, "\n Committer: "); strbuf_addbuf_percentquote(&format, &committer_ident); - if (advice_implicit_identity) { + if (advice_enabled(ADVICE_IMPLICIT_IDENTITY)) { strbuf_addch(&format, '\n'); strbuf_addstr(&format, implicit_ident_advice()); } @@ -2065,7 +2079,7 @@ static int do_pick_commit(struct repository *r, /* * We do not intend to commit immediately. We just want to * merge the differences in, so let's compute the tree - * that represents the "current" state for merge-recursive + * that represents the "current" state for the merge machinery * to work on. */ if (write_index_as_tree(&head, r->index, r->index_file, 0, NULL)) @@ -2346,6 +2360,7 @@ static int read_and_refresh_cache(struct repository *r, _(action_name(opts))); } refresh_index(r->index, REFRESH_QUIET|REFRESH_UNMERGED, NULL, NULL, NULL); + if (index_fd >= 0) { if (write_locked_index(r->index, &index_lock, COMMIT_LOCK | SKIP_IF_UNCHANGED)) { @@ -2353,6 +2368,13 @@ static int read_and_refresh_cache(struct repository *r, _(action_name(opts))); } } + + /* + * If we are resolving merges in any way other than "ort", then + * expand the sparse index. + */ + if (opts->strategy && strcmp(opts->strategy, "ort")) + ensure_full_index(r->index); return 0; } @@ -3041,7 +3063,7 @@ static int create_seq_dir(struct repository *r) } if (in_progress_error) { error("%s", in_progress_error); - if (advice_sequencer_in_use) + if (advice_enabled(ADVICE_SEQUENCER_IN_USE)) advise(in_progress_advice, advise_skip ? "--skip | " : ""); return -1; @@ -3245,7 +3267,7 @@ int sequencer_skip(struct repository *r, struct replay_opts *opts) give_advice: error(_("there is nothing to skip")); - if (advice_resolve_conflict) { + if (advice_enabled(ADVICE_RESOLVE_CONFLICT)) { advise(_("have you committed already?\n" "try \"git %s --continue\""), action == REPLAY_REVERT ? "revert" : "cherry-pick"); @@ -3739,10 +3761,9 @@ static struct commit *lookup_label(const char *label, int len, static int do_merge(struct repository *r, struct commit *commit, const char *arg, int arg_len, - int flags, struct replay_opts *opts) + int flags, int *check_todo, struct replay_opts *opts) { - int run_commit_flags = (flags & TODO_EDIT_MERGE_MSG) ? - EDIT_MSG | VERIFY_MSG : 0; + int run_commit_flags = 0; struct strbuf ref_name = STRBUF_INIT; struct commit *head_commit, *merge_commit, *i; struct commit_list *bases, *j, *reversed = NULL; @@ -3816,6 +3837,45 @@ static int do_merge(struct repository *r, goto leave_merge; } + /* + * If HEAD is not identical to the first parent of the original merge + * commit, we cannot fast-forward. + */ + can_fast_forward = opts->allow_ff && commit && commit->parents && + oideq(&commit->parents->item->object.oid, + &head_commit->object.oid); + + /* + * If any merge head is different from the original one, we cannot + * fast-forward. + */ + if (can_fast_forward) { + struct commit_list *p = commit->parents->next; + + for (j = to_merge; j && p; j = j->next, p = p->next) + if (!oideq(&j->item->object.oid, + &p->item->object.oid)) { + can_fast_forward = 0; + break; + } + /* + * If the number of merge heads differs from the original merge + * commit, we cannot fast-forward. + */ + if (j || p) + can_fast_forward = 0; + } + + if (can_fast_forward) { + rollback_lock_file(&lock); + ret = fast_forward_to(r, &commit->object.oid, + &head_commit->object.oid, 0, opts); + if (flags & TODO_EDIT_MERGE_MSG) + goto fast_forward_edit; + + goto leave_merge; + } + if (commit) { const char *encoding = get_commit_output_encoding(); const char *message = logmsg_reencode(commit, NULL, encoding); @@ -3865,46 +3925,6 @@ static int do_merge(struct repository *r, } } - /* - * If HEAD is not identical to the first parent of the original merge - * commit, we cannot fast-forward. - */ - can_fast_forward = opts->allow_ff && commit && commit->parents && - oideq(&commit->parents->item->object.oid, - &head_commit->object.oid); - - /* - * If any merge head is different from the original one, we cannot - * fast-forward. - */ - if (can_fast_forward) { - struct commit_list *p = commit->parents->next; - - for (j = to_merge; j && p; j = j->next, p = p->next) - if (!oideq(&j->item->object.oid, - &p->item->object.oid)) { - can_fast_forward = 0; - break; - } - /* - * If the number of merge heads differs from the original merge - * commit, we cannot fast-forward. - */ - if (j || p) - can_fast_forward = 0; - } - - if (can_fast_forward) { - rollback_lock_file(&lock); - ret = fast_forward_to(r, &commit->object.oid, - &head_commit->object.oid, 0, opts); - if (flags & TODO_EDIT_MERGE_MSG) { - run_commit_flags |= AMEND_MSG; - goto fast_forward_edit; - } - goto leave_merge; - } - if (strategy || to_merge->next) { /* Octopus merge */ struct child_process cmd = CHILD_PROCESS_INIT; @@ -3935,7 +3955,10 @@ static int do_merge(struct repository *r, strvec_pushf(&cmd.args, "-X%s", opts->xopts[k]); } - strvec_push(&cmd.args, "--no-edit"); + if (!(flags & TODO_EDIT_MERGE_MSG)) + strvec_push(&cmd.args, "--no-edit"); + else + strvec_push(&cmd.args, "--edit"); strvec_push(&cmd.args, "--no-ff"); strvec_push(&cmd.args, "--no-log"); strvec_push(&cmd.args, "--no-stat"); @@ -3988,7 +4011,7 @@ static int do_merge(struct repository *r, o.branch2 = ref_name.buf; o.buffer_output = 2; - if (opts->strategy && !strcmp(opts->strategy, "ort")) { + if (!opts->strategy || !strcmp(opts->strategy, "ort")) { /* * TODO: Should use merge_incore_recursive() and * merge_switch_to_result(), skipping the call to @@ -4035,10 +4058,17 @@ static int do_merge(struct repository *r, * value (a negative one would indicate that the `merge` * command needs to be rescheduled). */ - fast_forward_edit: ret = !!run_git_commit(git_path_merge_msg(r), opts, run_commit_flags); + if (!ret && flags & TODO_EDIT_MERGE_MSG) { + fast_forward_edit: + *check_todo = 1; + run_commit_flags |= AMEND_MSG | EDIT_MSG | VERIFY_MSG; + ret = !!run_git_commit(NULL, opts, run_commit_flags); + } + + leave_merge: strbuf_release(&ref_name); rollback_lock_file(&lock); @@ -4405,9 +4435,8 @@ static int pick_commits(struct repository *r, if ((res = do_reset(r, arg, item->arg_len, opts))) reschedule = 1; } else if (item->command == TODO_MERGE) { - if ((res = do_merge(r, item->commit, - arg, item->arg_len, - item->flags, opts)) < 0) + if ((res = do_merge(r, item->commit, arg, item->arg_len, + item->flags, &check_todo, opts)) < 0) reschedule = 1; else if (item->commit) record_in_rewritten(&item->commit->object.oid, @@ -4716,6 +4745,9 @@ static int commit_staged_changes(struct repository *r, refs_delete_ref(get_main_ref_store(r), "", "CHERRY_PICK_HEAD", NULL, 0)) return error(_("could not remove CHERRY_PICK_HEAD")); + if (unlink(git_path_merge_msg(r)) && errno != ENOENT) + return error_errno(_("could not remove '%s'"), + git_path_merge_msg(r)); if (!final_fixup) return 0; } @@ -5099,6 +5131,7 @@ static int make_script_with_merges(struct pretty_print_context *pp, int keep_empty = flags & TODO_LIST_KEEP_EMPTY; int rebase_cousins = flags & TODO_LIST_REBASE_COUSINS; int root_with_onto = flags & TODO_LIST_ROOT_WITH_ONTO; + int skipped_commit = 0; struct strbuf buf = STRBUF_INIT, oneline = STRBUF_INIT; struct strbuf label = STRBUF_INIT; struct commit_list *commits = NULL, **tail = &commits, *iter; @@ -5149,8 +5182,13 @@ static int make_script_with_merges(struct pretty_print_context *pp, oidset_insert(&interesting, &commit->object.oid); is_empty = is_original_commit_empty(commit); - if (!is_empty && (commit->object.flags & PATCHSAME)) + if (!is_empty && (commit->object.flags & PATCHSAME)) { + if (flags & TODO_LIST_WARN_SKIPPED_CHERRY_PICKS) + warning(_("skipped previously applied commit %s"), + short_commit_name(commit)); + skipped_commit = 1; continue; + } if (is_empty && !keep_empty) continue; @@ -5214,6 +5252,9 @@ static int make_script_with_merges(struct pretty_print_context *pp, oidcpy(&entry->entry.oid, &commit->object.oid); oidmap_put(&commit2todo, entry); } + if (skipped_commit) + advise_if_enabled(ADVICE_SKIPPED_CHERRY_PICKS, + _("use --reapply-cherry-picks to include skipped commits")); /* * Second phase: @@ -5334,6 +5375,7 @@ int sequencer_make_script(struct repository *r, struct strbuf *out, int argc, const char *insn = flags & TODO_LIST_ABBREVIATE_CMDS ? "p" : "pick"; int rebase_merges = flags & TODO_LIST_REBASE_MERGES; int reapply_cherry_picks = flags & TODO_LIST_REAPPLY_CHERRY_PICKS; + int skipped_commit = 0; repo_init_revisions(r, &revs, NULL); revs.verbose_header = 1; @@ -5369,8 +5411,13 @@ int sequencer_make_script(struct repository *r, struct strbuf *out, int argc, while ((commit = get_revision(&revs))) { int is_empty = is_original_commit_empty(commit); - if (!is_empty && (commit->object.flags & PATCHSAME)) + if (!is_empty && (commit->object.flags & PATCHSAME)) { + if (flags & TODO_LIST_WARN_SKIPPED_CHERRY_PICKS) + warning(_("skipped previously applied commit %s"), + short_commit_name(commit)); + skipped_commit = 1; continue; + } if (is_empty && !keep_empty) continue; strbuf_addf(out, "%s %s ", insn, @@ -5380,6 +5427,9 @@ int sequencer_make_script(struct repository *r, struct strbuf *out, int argc, strbuf_addf(out, " %c empty", comment_line_char); strbuf_addch(out, '\n'); } + if (skipped_commit) + advise_if_enabled(ADVICE_SKIPPED_CHERRY_PICKS, + _("use --reapply-cherry-picks to include skipped commits")); return 0; } diff --git a/sequencer.h b/sequencer.h index d57d8ea23d..2088344cb3 100644 --- a/sequencer.h +++ b/sequencer.h @@ -156,6 +156,7 @@ int sequencer_remove_state(struct replay_opts *opts); */ #define TODO_LIST_ROOT_WITH_ONTO (1U << 6) #define TODO_LIST_REAPPLY_CHERRY_PICKS (1U << 7) +#define TODO_LIST_WARN_SKIPPED_CHERRY_PICKS (1U << 8) int sequencer_make_script(struct repository *r, struct strbuf *out, int argc, const char **argv, unsigned flags); @@ -9,7 +9,8 @@ #include "serve.h" #include "upload-pack.h" -static int advertise_sid; +static int advertise_sid = -1; +static int client_hash_algo = GIT_HASH_SHA1; static int always_advertise(struct repository *r, struct strbuf *value) @@ -33,8 +34,22 @@ static int object_format_advertise(struct repository *r, return 1; } +static void object_format_receive(struct repository *r, + const char *algo_name) +{ + if (!algo_name) + die("object-format capability requires an argument"); + + client_hash_algo = hash_algo_by_name(algo_name); + if (client_hash_algo == GIT_HASH_UNKNOWN) + die("unknown object format '%s'", algo_name); +} + static int session_id_advertise(struct repository *r, struct strbuf *value) { + if (advertise_sid == -1 && + git_config_get_bool("transfer.advertisesid", &advertise_sid)) + advertise_sid = 0; if (!advertise_sid) return 0; if (value) @@ -42,6 +57,14 @@ static int session_id_advertise(struct repository *r, struct strbuf *value) return 1; } +static void session_id_receive(struct repository *r, + const char *client_sid) +{ + if (!client_sid) + client_sid = ""; + trace2_data_string("transfer", NULL, "client-sid", client_sid); +} + struct protocol_capability { /* * The name of the capability. The server uses this name when @@ -60,34 +83,70 @@ struct protocol_capability { /* * Function called when a client requests the capability as a command. - * The function will be provided the capabilities requested via 'keys' - * as well as a struct packet_reader 'request' which the command should + * Will be provided a struct packet_reader 'request' which it should * use to read the command specific part of the request. Every command * MUST read until a flush packet is seen before sending a response. * * This field should be NULL for capabilities which are not commands. */ - int (*command)(struct repository *r, - struct strvec *keys, - struct packet_reader *request); + int (*command)(struct repository *r, struct packet_reader *request); + + /* + * Function called when a client requests the capability as a + * non-command. This may be NULL if the capability does nothing. + * + * For a capability of the form "foo=bar", the value string points to + * the content after the "=" (i.e., "bar"). For simple capabilities + * (just "foo"), it is NULL. + */ + void (*receive)(struct repository *r, const char *value); }; static struct protocol_capability capabilities[] = { - { "agent", agent_advertise, NULL }, - { "ls-refs", ls_refs_advertise, ls_refs }, - { "fetch", upload_pack_advertise, upload_pack_v2 }, - { "server-option", always_advertise, NULL }, - { "object-format", object_format_advertise, NULL }, - { "session-id", session_id_advertise, NULL }, - { "object-info", always_advertise, cap_object_info }, + { + .name = "agent", + .advertise = agent_advertise, + }, + { + .name = "ls-refs", + .advertise = ls_refs_advertise, + .command = ls_refs, + }, + { + .name = "fetch", + .advertise = upload_pack_advertise, + .command = upload_pack_v2, + }, + { + .name = "server-option", + .advertise = always_advertise, + }, + { + .name = "object-format", + .advertise = object_format_advertise, + .receive = object_format_receive, + }, + { + .name = "session-id", + .advertise = session_id_advertise, + .receive = session_id_receive, + }, + { + .name = "object-info", + .advertise = always_advertise, + .command = cap_object_info, + }, }; -static void advertise_capabilities(void) +void protocol_v2_advertise_capabilities(void) { struct strbuf capability = STRBUF_INIT; struct strbuf value = STRBUF_INIT; int i; + /* serve by default supports v2 */ + packet_write_fmt(1, "version 2\n"); + for (i = 0; i < ARRAY_SIZE(capabilities); i++) { struct protocol_capability *c = &capabilities[i]; @@ -112,7 +171,7 @@ static void advertise_capabilities(void) strbuf_release(&value); } -static struct protocol_capability *get_capability(const char *key) +static struct protocol_capability *get_capability(const char *key, const char **value) { int i; @@ -122,31 +181,46 @@ static struct protocol_capability *get_capability(const char *key) for (i = 0; i < ARRAY_SIZE(capabilities); i++) { struct protocol_capability *c = &capabilities[i]; const char *out; - if (skip_prefix(key, c->name, &out) && (!*out || *out == '=')) + if (!skip_prefix(key, c->name, &out)) + continue; + if (!*out) { + *value = NULL; return c; + } + if (*out++ == '=') { + *value = out; + return c; + } } return NULL; } -static int is_valid_capability(const char *key) +static int receive_client_capability(const char *key) { - const struct protocol_capability *c = get_capability(key); + const char *value; + const struct protocol_capability *c = get_capability(key, &value); - return c && c->advertise(the_repository, NULL); + if (!c || c->command || !c->advertise(the_repository, NULL)) + return 0; + + if (c->receive) + c->receive(the_repository, value); + return 1; } -static int is_command(const char *key, struct protocol_capability **command) +static int parse_command(const char *key, struct protocol_capability **command) { const char *out; if (skip_prefix(key, "command=", &out)) { - struct protocol_capability *cmd = get_capability(out); + const char *value; + struct protocol_capability *cmd = get_capability(out, &value); if (*command) die("command '%s' requested after already requesting command '%s'", out, (*command)->name); - if (!cmd || !cmd->advertise(the_repository, NULL) || !cmd->command) + if (!cmd || !cmd->advertise(the_repository, NULL) || !cmd->command || value) die("invalid command '%s'", out); *command = cmd; @@ -156,42 +230,6 @@ static int is_command(const char *key, struct protocol_capability **command) return 0; } -int has_capability(const struct strvec *keys, const char *capability, - const char **value) -{ - int i; - for (i = 0; i < keys->nr; i++) { - const char *out; - if (skip_prefix(keys->v[i], capability, &out) && - (!*out || *out == '=')) { - if (value) { - if (*out == '=') - out++; - *value = out; - } - return 1; - } - } - - return 0; -} - -static void check_algorithm(struct repository *r, struct strvec *keys) -{ - int client = GIT_HASH_SHA1, server = hash_algo_by_ptr(r->hash_algo); - const char *algo_name; - - if (has_capability(keys, "object-format", &algo_name)) { - client = hash_algo_by_name(algo_name); - if (client == GIT_HASH_UNKNOWN) - die("unknown object format '%s'", algo_name); - } - - if (client != server) - die("mismatched object format: server %s; client %s\n", - r->hash_algo->name, hash_algos[client].name); -} - enum request_state { PROCESS_REQUEST_KEYS, PROCESS_REQUEST_DONE, @@ -201,9 +239,8 @@ static int process_request(void) { enum request_state state = PROCESS_REQUEST_KEYS; struct packet_reader reader; - struct strvec keys = STRVEC_INIT; + int seen_capability_or_command = 0; struct protocol_capability *command = NULL; - const char *client_sid; packet_reader_init(&reader, 0, NULL, 0, PACKET_READ_CHOMP_NEWLINE | @@ -223,10 +260,9 @@ static int process_request(void) case PACKET_READ_EOF: BUG("Should have already died when seeing EOF"); case PACKET_READ_NORMAL: - /* collect request; a sequence of keys and values */ - if (is_command(reader.line, &command) || - is_valid_capability(reader.line)) - strvec_push(&keys, reader.line); + if (parse_command(reader.line, &command) || + receive_client_capability(reader.line)) + seen_capability_or_command = 1; else die("unknown capability '%s'", reader.line); @@ -238,7 +274,7 @@ static int process_request(void) * If no command and no keys were given then the client * wanted to terminate the connection. */ - if (!keys.nr) + if (!seen_capability_or_command) return 1; /* @@ -265,40 +301,26 @@ static int process_request(void) if (!command) die("no command requested"); - check_algorithm(the_repository, &keys); - - if (has_capability(&keys, "session-id", &client_sid)) - trace2_data_string("transfer", NULL, "client-sid", client_sid); + if (client_hash_algo != hash_algo_by_ptr(the_repository->hash_algo)) + die("mismatched object format: server %s; client %s\n", + the_repository->hash_algo->name, + hash_algos[client_hash_algo].name); - command->command(the_repository, &keys, &reader); + command->command(the_repository, &reader); - strvec_clear(&keys); return 0; } -/* Main serve loop for protocol version 2 */ -void serve(struct serve_options *options) +void protocol_v2_serve_loop(int stateless_rpc) { - git_config_get_bool("transfer.advertisesid", &advertise_sid); - - if (options->advertise_capabilities || !options->stateless_rpc) { - /* serve by default supports v2 */ - packet_write_fmt(1, "version 2\n"); - - advertise_capabilities(); - /* - * If only the list of capabilities was requested exit - * immediately after advertising capabilities - */ - if (options->advertise_capabilities) - return; - } + if (!stateless_rpc) + protocol_v2_advertise_capabilities(); /* * If stateless-rpc was requested then exit after * a single request/response exchange */ - if (options->stateless_rpc) { + if (stateless_rpc) { process_request(); } else { for (;;) @@ -1,15 +1,7 @@ #ifndef SERVE_H #define SERVE_H -struct strvec; -int has_capability(const struct strvec *keys, const char *capability, - const char **value); - -struct serve_options { - unsigned advertise_capabilities; - unsigned stateless_rpc; -}; -#define SERVE_OPTIONS_INIT { 0 } -void serve(struct serve_options *options); +void protocol_v2_advertise_capabilities(void); +void protocol_v2_serve_loop(int stateless_rpc); #endif /* SERVE_H */ @@ -1423,11 +1423,9 @@ const char *resolve_gitdir_gently(const char *suspect, int *return_error_code) /* if any standard file descriptor is missing open it to /dev/null */ void sanitize_stdfds(void) { - int fd = open("/dev/null", O_RDWR, 0); - while (fd != -1 && fd < 2) - fd = dup(fd); - if (fd == -1) - die_errno(_("open /dev/null or dup failed")); + int fd = xopen("/dev/null", O_RDWR); + while (fd < 2) + fd = xdup(fd); if (fd > 2) close(fd); } diff --git a/sparse-index.c b/sparse-index.c index c6b4feec41..7b7ff79e04 100644 --- a/sparse-index.c +++ b/sparse-index.c @@ -33,19 +33,14 @@ static int convert_to_sparse_rec(struct index_state *istate, { int i, can_convert = 1; int start_converted = num_converted; - enum pattern_match_result match; - int dtype = DT_UNKNOWN; struct strbuf child_path = STRBUF_INIT; - struct pattern_list *pl = istate->sparse_checkout_patterns; /* * Is the current path outside of the sparse cone? * Then check if the region can be replaced by a sparse * directory entry (everything is sparse and merged). */ - match = path_matches_pattern_list(ct_path, ct_pathlen, - NULL, &dtype, pl, istate); - if (match != NOT_MATCHED) + if (path_in_sparse_checkout(ct_path, istate)) can_convert = 0; for (i = start; can_convert && i < end; i++) { @@ -127,41 +122,51 @@ static int index_has_unmerged_entries(struct index_state *istate) return 0; } -int convert_to_sparse(struct index_state *istate) +int convert_to_sparse(struct index_state *istate, int flags) { int test_env; - if (istate->split_index || istate->sparse_index || + if (istate->sparse_index || !istate->cache_nr || !core_apply_sparse_checkout || !core_sparse_checkout_cone) return 0; if (!istate->repo) istate->repo = the_repository; - /* - * The GIT_TEST_SPARSE_INDEX environment variable triggers the - * index.sparse config variable to be on. - */ - test_env = git_env_bool("GIT_TEST_SPARSE_INDEX", -1); - if (test_env >= 0) - set_sparse_index_config(istate->repo, test_env); - - /* - * Only convert to sparse if index.sparse is set. - */ - prepare_repo_settings(istate->repo); - if (!istate->repo->settings.sparse_index) - return 0; + if (!(flags & SPARSE_INDEX_MEMORY_ONLY)) { + /* + * The sparse index is not (yet) integrated with a split index. + */ + if (istate->split_index) + return 0; + /* + * The GIT_TEST_SPARSE_INDEX environment variable triggers the + * index.sparse config variable to be on. + */ + test_env = git_env_bool("GIT_TEST_SPARSE_INDEX", -1); + if (test_env >= 0) + set_sparse_index_config(istate->repo, test_env); - if (!istate->sparse_checkout_patterns) { - istate->sparse_checkout_patterns = xcalloc(1, sizeof(struct pattern_list)); - if (get_sparse_checkout_patterns(istate->sparse_checkout_patterns) < 0) + /* + * Only convert to sparse if index.sparse is set. + */ + prepare_repo_settings(istate->repo); + if (!istate->repo->settings.sparse_index) return 0; } - if (!istate->sparse_checkout_patterns->use_cone_patterns) { - warning(_("attempting to use sparse-index without cone mode")); - return -1; - } + if (init_sparse_checkout_patterns(istate)) + return 0; + + /* + * We need cone-mode patterns to use sparse-index. If a user edits + * their sparse-checkout file manually, then we can detect during + * parsing that they are not actually using cone-mode patterns and + * hence we need to abort this conversion _without error_. Warnings + * already exist in the pattern parsing to inform the user of their + * bad patterns. + */ + if (!istate->sparse_checkout_patterns->use_cone_patterns) + return 0; /* * NEEDSWORK: If we have unmerged entries, then stay full. @@ -172,10 +177,15 @@ int convert_to_sparse(struct index_state *istate) /* Clear and recompute the cache-tree */ cache_tree_free(&istate->cache_tree); - if (cache_tree_update(istate, 0)) { - warning(_("unable to update cache-tree, staying full")); - return -1; - } + /* + * Silently return if there is a problem with the cache tree update, + * which might just be due to a conflict state in some entry. + * + * This might create new tree objects, so be sure to use + * WRITE_TREE_MISSING_OK. + */ + if (cache_tree_update(istate, WRITE_TREE_MISSING_OK)) + return 0; remove_fsmonitor(istate); @@ -283,6 +293,7 @@ void ensure_full_index(struct index_state *istate) /* Copy back into original index. */ memcpy(&istate->name_hash, &full->name_hash, sizeof(full->name_hash)); + memcpy(&istate->dir_hash, &full->dir_hash, sizeof(full->dir_hash)); istate->sparse_index = 0; free(istate->cache); istate->cache = full->cache; diff --git a/sparse-index.h b/sparse-index.h index 1115a0d7dd..9f3d7bc7fa 100644 --- a/sparse-index.h +++ b/sparse-index.h @@ -2,7 +2,8 @@ #define SPARSE_INDEX_H__ struct index_state; -int convert_to_sparse(struct index_state *istate); +#define SPARSE_INDEX_MEMORY_ONLY (1 << 0) +int convert_to_sparse(struct index_state *istate, int flags); /* * Some places in the codebase expect to search for a specific path. @@ -29,8 +29,8 @@ extern const char *empty_strvec[]; */ struct strvec { const char **v; - int nr; - int alloc; + size_t nr; + size_t alloc; }; #define STRVEC_INIT { empty_strvec, 0, 0 } diff --git a/submodule-config.c b/submodule-config.c index 2026120fb3..f95344028b 100644 --- a/submodule-config.c +++ b/submodule-config.c @@ -649,9 +649,10 @@ static void config_from_gitmodules(config_fn_t fn, struct repository *repo, void config_source.file = file; } else if (repo_get_oid(repo, GITMODULES_INDEX, &oid) >= 0 || repo_get_oid(repo, GITMODULES_HEAD, &oid) >= 0) { + config_source.repo = repo; config_source.blob = oidstr = xstrdup(oid_to_hex(&oid)); if (repo != the_repository) - add_to_alternates_memory(repo->objects->odb->path); + add_submodule_odb_by_path(repo->objects->odb->path); } else { goto out; } @@ -702,7 +703,7 @@ void gitmodules_config_oid(const struct object_id *commit_oid) if (gitmodule_oid_from_commit(commit_oid, &oid, &rev)) { git_config_from_blob_oid(gitmodules_cb, rev.buf, - &oid, the_repository); + the_repository, &oid, the_repository); } strbuf_release(&rev); diff --git a/submodule.c b/submodule.c index 8e611fe1db..78aed03d92 100644 --- a/submodule.c +++ b/submodule.c @@ -165,6 +165,8 @@ void stage_updated_gitmodules(struct index_state *istate) die(_("staging updated .gitmodules failed")); } +static struct string_list added_submodule_odb_paths = STRING_LIST_INIT_NODUP; + /* TODO: remove this function, use repo_submodule_init instead. */ int add_submodule_odb(const char *path) { @@ -178,12 +180,33 @@ int add_submodule_odb(const char *path) ret = -1; goto done; } - add_to_alternates_memory(objects_directory.buf); + string_list_insert(&added_submodule_odb_paths, + strbuf_detach(&objects_directory, NULL)); done: strbuf_release(&objects_directory); return ret; } +void add_submodule_odb_by_path(const char *path) +{ + string_list_insert(&added_submodule_odb_paths, xstrdup(path)); +} + +int register_all_submodule_odb_as_alternates(void) +{ + int i; + int ret = added_submodule_odb_paths.nr; + + for (i = 0; i < added_submodule_odb_paths.nr; i++) + add_to_alternates_memory(added_submodule_odb_paths.items[i].string); + if (ret) { + string_list_clear(&added_submodule_odb_paths, 0); + if (git_env_bool("GIT_TEST_FATAL_REGISTER_SUBMODULE_ODB", 0)) + BUG("register_all_submodule_odb_as_alternates() called"); + } + return ret; +} + void set_diffopt_flags_from_submodule_config(struct diff_options *diffopt, const char *path) { @@ -237,6 +260,11 @@ int option_parse_recurse_submodules_worktree_updater(const struct option *opt, /* * Determine if a submodule has been initialized at a given 'path' */ +/* + * NEEDSWORK: Emit a warning if submodule.active exists, but is valueless, + * ie, the config looks like: "[submodule] active\n". + * Since that is an invalid pathspec, we should inform the user. + */ int is_submodule_active(struct repository *repo, const char *path) { int ret = 0; @@ -697,8 +725,20 @@ void show_submodule_inline_diff(struct diff_options *o, const char *path, strvec_push(&cp.args, oid_to_hex(new_oid)); prepare_submodule_repo_env(&cp.env_array); - if (start_command(&cp)) + + if (!is_directory(path)) { + /* fall back to absorbed git dir, if any */ + if (!sub) + goto done; + cp.dir = sub->gitdir; + strvec_push(&cp.env_array, GIT_DIR_ENVIRONMENT "=."); + strvec_push(&cp.env_array, GIT_WORK_TREE_ENVIRONMENT "=."); + } + + if (start_command(&cp)) { diff_emit_submodule_error(o, "(diff failed)\n"); + goto done; + } while (strbuf_getwholeline_fd(&sb, cp.out, '\n') != EOF) diff_emit_submodule_pipethrough(o, sb.buf, sb.len); @@ -1819,14 +1859,16 @@ out: void submodule_unset_core_worktree(const struct submodule *sub) { - char *config_path = xstrfmt("%s/modules/%s/config", - get_git_dir(), sub->name); + struct strbuf config_path = STRBUF_INIT; + + submodule_name_to_gitdir(&config_path, the_repository, sub->name); + strbuf_addstr(&config_path, "/config"); - if (git_config_set_in_file_gently(config_path, "core.worktree", NULL)) + if (git_config_set_in_file_gently(config_path.buf, "core.worktree", NULL)) warning(_("Could not unset core.worktree setting in submodule '%s'"), sub->path); - free(config_path); + strbuf_release(&config_path); } static const char *get_super_prefix_or_empty(void) @@ -1922,20 +1964,22 @@ int submodule_move_head(const char *path, absorb_git_dir_into_superproject(path, ABSORB_GITDIR_RECURSE_SUBMODULES); } else { - char *gitdir = xstrfmt("%s/modules/%s", - get_git_dir(), sub->name); - connect_work_tree_and_git_dir(path, gitdir, 0); - free(gitdir); + struct strbuf gitdir = STRBUF_INIT; + submodule_name_to_gitdir(&gitdir, the_repository, + sub->name); + connect_work_tree_and_git_dir(path, gitdir.buf, 0); + strbuf_release(&gitdir); /* make sure the index is clean as well */ submodule_reset_index(path); } if (old_head && (flags & SUBMODULE_MOVE_HEAD_FORCE)) { - char *gitdir = xstrfmt("%s/modules/%s", - get_git_dir(), sub->name); - connect_work_tree_and_git_dir(path, gitdir, 1); - free(gitdir); + struct strbuf gitdir = STRBUF_INIT; + submodule_name_to_gitdir(&gitdir, the_repository, + sub->name); + connect_work_tree_and_git_dir(path, gitdir.buf, 1); + strbuf_release(&gitdir); } } @@ -2050,7 +2094,7 @@ int validate_submodule_git_dir(char *git_dir, const char *submodule_name) static void relocate_single_git_dir_into_superproject(const char *path) { char *old_git_dir = NULL, *real_old_git_dir = NULL, *real_new_git_dir = NULL; - char *new_git_dir; + struct strbuf new_gitdir = STRBUF_INIT; const struct submodule *sub; if (submodule_uses_worktrees(path)) @@ -2068,14 +2112,13 @@ static void relocate_single_git_dir_into_superproject(const char *path) if (!sub) die(_("could not lookup name for submodule '%s'"), path); - new_git_dir = git_pathdup("modules/%s", sub->name); - if (validate_submodule_git_dir(new_git_dir, sub->name) < 0) + submodule_name_to_gitdir(&new_gitdir, the_repository, sub->name); + if (validate_submodule_git_dir(new_gitdir.buf, sub->name) < 0) die(_("refusing to move '%s' into an existing git dir"), real_old_git_dir); - if (safe_create_leading_directories_const(new_git_dir) < 0) - die(_("could not create directory '%s'"), new_git_dir); - real_new_git_dir = real_pathdup(new_git_dir, 1); - free(new_git_dir); + if (safe_create_leading_directories_const(new_gitdir.buf) < 0) + die(_("could not create directory '%s'"), new_gitdir.buf); + real_new_git_dir = real_pathdup(new_gitdir.buf, 1); fprintf(stderr, _("Migrating git directory of '%s%s' from\n'%s' to\n'%s'\n"), get_super_prefix_or_empty(), path, @@ -2086,6 +2129,7 @@ static void relocate_single_git_dir_into_superproject(const char *path) free(old_git_dir); free(real_old_git_dir); free(real_new_git_dir); + strbuf_release(&new_gitdir); } /* @@ -2105,6 +2149,7 @@ void absorb_git_dir_into_superproject(const char *path, /* Not populated? */ if (!sub_git_dir) { const struct submodule *sub; + struct strbuf sub_gitdir = STRBUF_INIT; if (err_code == READ_GITFILE_ERR_STAT_FAILED) { /* unpopulated as expected */ @@ -2126,8 +2171,9 @@ void absorb_git_dir_into_superproject(const char *path, sub = submodule_from_path(the_repository, null_oid(), path); if (!sub) die(_("could not lookup name for submodule '%s'"), path); - connect_work_tree_and_git_dir(path, - git_path("modules/%s", sub->name), 0); + submodule_name_to_gitdir(&sub_gitdir, the_repository, sub->name); + connect_work_tree_and_git_dir(path, sub_gitdir.buf, 0); + strbuf_release(&sub_gitdir); } else { /* Is it already absorbed into the superprojects git dir? */ char *real_sub_git_dir = real_pathdup(sub_git_dir, 1); @@ -2278,9 +2324,36 @@ int submodule_to_gitdir(struct strbuf *buf, const char *submodule) goto cleanup; } strbuf_reset(buf); - strbuf_git_path(buf, "%s/%s", "modules", sub->name); + submodule_name_to_gitdir(buf, the_repository, sub->name); } cleanup: return ret; } + +void submodule_name_to_gitdir(struct strbuf *buf, struct repository *r, + const char *submodule_name) +{ + /* + * NEEDSWORK: The current way of mapping a submodule's name to + * its location in .git/modules/ has problems with some naming + * schemes. For example, if a submodule is named "foo" and + * another is named "foo/bar" (whether present in the same + * superproject commit or not - the problem will arise if both + * superproject commits have been checked out at any point in + * time), or if two submodule names only have different cases in + * a case-insensitive filesystem. + * + * There are several solutions, including encoding the path in + * some way, introducing a submodule.<name>.gitdir config in + * .git/config (not .gitmodules) that allows overriding what the + * gitdir of a submodule would be (and teach Git, upon noticing + * a clash, to automatically determine a non-clashing name and + * to write such a config), or introducing a + * submodule.<name>.gitdir config in .gitmodules that repo + * administrators can explicitly set. Nothing has been decided, + * so for now, just append the name at the end of the path. + */ + strbuf_repo_git_path(buf, r, "modules/"); + strbuf_addstr(buf, submodule_name); +} diff --git a/submodule.h b/submodule.h index 84640c49c1..4578e501b8 100644 --- a/submodule.h +++ b/submodule.h @@ -97,7 +97,15 @@ int submodule_uses_gitfile(const char *path); #define SUBMODULE_REMOVAL_IGNORE_IGNORED_UNTRACKED (1<<2) int bad_to_remove_submodule(const char *path, unsigned flags); +/* + * Call add_submodule_odb() to add the submodule at the given path to a list. + * When register_all_submodule_odb_as_alternates() is called, the object stores + * of all submodules in that list will be added as alternates in + * the_repository. + */ int add_submodule_odb(const char *path); +void add_submodule_odb_by_path(const char *path); +int register_all_submodule_odb_as_alternates(void); /* * Checks if there are submodule changes in a..b. If a is the null OID, @@ -125,6 +133,13 @@ int push_unpushed_submodules(struct repository *r, int submodule_to_gitdir(struct strbuf *buf, const char *submodule); /* + * Given a submodule name, create a path to where the submodule's gitdir lives + * inside of the provided repository's 'modules' directory. + */ +void submodule_name_to_gitdir(struct strbuf *buf, struct repository *r, + const char *submodule_name); + +/* * Make sure that no submodule's git dir is nested in a sibling submodule's. */ int validate_submodule_git_dir(char *git_dir, const char *submodule_name); @@ -425,6 +425,10 @@ GIT_TEST_MULTI_PACK_INDEX=<boolean>, when true, forces the multi-pack- index to be written after every 'git repack' command, and overrides the 'core.multiPackIndex' setting to true. +GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=<boolean>, when true, sets the +'--bitmap' option on all invocations of 'git multi-pack-index write', +and ignores pack-objects' '--write-bitmap-index'. + GIT_TEST_SIDEBAND_ALL=<boolean>, when true, overrides the 'uploadpack.allowSidebandAll' setting to true, and when false, forces fetch-pack to not request sideband-all (even if the server advertises @@ -448,6 +452,16 @@ GIT_TEST_CHECKOUT_WORKERS=<n> overrides the 'checkout.workers' setting to <n> and 'checkout.thresholdForParallelism' to 0, forcing the execution of the parallel-checkout code. +GIT_TEST_FATAL_REGISTER_SUBMODULE_ODB=<boolean>, when true, makes +registering submodule ODBs as alternates a fatal action. Support for +this environment variable can be removed once the migration to +explicitly providing repositories when accessing submodule objects is +complete (in which case we might want to replace this with a trace2 +call so that users can make it visible if accessing submodule objects +without an explicit repository still happens) or needs to be abandoned +for whatever reason (in which case the migrated codepaths still retain +their performance benefits). + Naming Tests ------------ @@ -753,7 +767,8 @@ Test harness library -------------------- There are a handful helper functions defined in the test harness -library for your script to use. +library for your script to use. Some of them are listed below; +see test-lib-functions.sh for the full list and their options. - test_expect_success [<prereq>] <message> <script> @@ -799,10 +814,12 @@ library for your script to use. argument. This is primarily meant for use during the development of a new test script. - - debug <git-command> + - debug [options] <git-command> Run a git command inside a debugger. This is primarily meant for - use when debugging a failing test script. + use when debugging a failing test script. With '-t', use your + original TERM instead of test-lib.sh's "dumb", so that your + debugger interface has colors. - test_done @@ -989,7 +1006,7 @@ library for your script to use. EOF - - test_pause + - test_pause [options] This command is useful for writing and debugging tests and must be removed before submitting. It halts the execution of the test and diff --git a/t/helper/test-parse-options.c b/t/helper/test-parse-options.c index 2051ce57db..a282b6ff13 100644 --- a/t/helper/test-parse-options.c +++ b/t/helper/test-parse-options.c @@ -134,7 +134,6 @@ int cmd__parse_options(int argc, const char **argv) OPT_NOOP_NOARG(0, "obsolete"), OPT_STRING_LIST(0, "list", &list, "str", "add str to list"), OPT_GROUP("Magic arguments"), - OPT_ARGUMENT("quux", NULL, "means --quux"), OPT_NUMBER_CALLBACK(&integer, "set integer to NUM", number_callback), { OPTION_COUNTUP, '+', NULL, &boolean, NULL, "same as -b", diff --git a/t/helper/test-read-midx.c b/t/helper/test-read-midx.c index 7c2eb11a8e..cb0d27049a 100644 --- a/t/helper/test-read-midx.c +++ b/t/helper/test-read-midx.c @@ -60,12 +60,26 @@ static int read_midx_file(const char *object_dir, int show_objects) return 0; } +static int read_midx_checksum(const char *object_dir) +{ + struct multi_pack_index *m; + + setup_git_directory(); + m = load_multi_pack_index(object_dir, 1); + if (!m) + return 1; + printf("%s\n", hash_to_hex(get_midx_checksum(m))); + return 0; +} + int cmd__read_midx(int argc, const char **argv) { if (!(argc == 2 || argc == 3)) - usage("read-midx [--show-objects] <object-dir>"); + usage("read-midx [--show-objects|--checksum] <object-dir>"); if (!strcmp(argv[1], "--show-objects")) return read_midx_file(argv[2], 1); + else if (!strcmp(argv[1], "--checksum")) + return read_midx_checksum(argv[2]); return read_midx_file(argv[1], 0); } diff --git a/t/helper/test-run-command.c b/t/helper/test-run-command.c index 7ae03dc712..14c57365e7 100644 --- a/t/helper/test-run-command.c +++ b/t/helper/test-run-command.c @@ -61,7 +61,7 @@ struct testsuite { int quiet, immediate, verbose, verbose_log, trace, write_junit_xml; }; #define TESTSUITE_INIT \ - { STRING_LIST_INIT_DUP, STRING_LIST_INIT_DUP, -1, 0, 0, 0, 0, 0, 0 } + { STRING_LIST_INIT_DUP, STRING_LIST_INIT_DUP, 0, 0, 0, 0, 0, 0, 0 } static int next_test(struct child_process *cp, struct strbuf *err, void *cb, void **task_cb) @@ -142,9 +142,6 @@ static int testsuite(int argc, const char **argv) OPT_END() }; - memset(&suite, 0, sizeof(suite)); - suite.tests.strdup_strings = suite.failed.strdup_strings = 1; - argc = parse_options(argc, argv, NULL, options, testsuite_usage, PARSE_OPT_STOP_AT_NON_OPTION); diff --git a/t/helper/test-serve-v2.c b/t/helper/test-serve-v2.c index aee35e5aef..28e905afc3 100644 --- a/t/helper/test-serve-v2.c +++ b/t/helper/test-serve-v2.c @@ -10,12 +10,12 @@ static char const * const serve_usage[] = { int cmd__serve_v2(int argc, const char **argv) { - struct serve_options opts = SERVE_OPTIONS_INIT; - + int stateless_rpc = 0; + int advertise_capabilities = 0; struct option options[] = { - OPT_BOOL(0, "stateless-rpc", &opts.stateless_rpc, + OPT_BOOL(0, "stateless-rpc", &stateless_rpc, N_("quit after a single request/response exchange")), - OPT_BOOL(0, "advertise-capabilities", &opts.advertise_capabilities, + OPT_BOOL(0, "advertise-capabilities", &advertise_capabilities, N_("exit immediately after advertising capabilities")), OPT_END() }; @@ -25,7 +25,11 @@ int cmd__serve_v2(int argc, const char **argv) argc = parse_options(argc, argv, prefix, options, serve_usage, PARSE_OPT_KEEP_DASHDASH | PARSE_OPT_KEEP_UNKNOWN); - serve(&opts); + + if (advertise_capabilities) + protocol_v2_advertise_capabilities(); + else + protocol_v2_serve_loop(stateless_rpc); return 0; } diff --git a/t/lib-bitmap.sh b/t/lib-bitmap.sh index fe3f98be24..21d0392dda 100644 --- a/t/lib-bitmap.sh +++ b/t/lib-bitmap.sh @@ -1,3 +1,6 @@ +# Helpers for scripts testing bitmap functionality; see t5310 for +# example usage. + # Compare a file containing rev-list bitmap traversal output to its non-bitmap # counterpart. You can't just use test_cmp for this, because the two produce # subtly different output: @@ -24,3 +27,240 @@ test_bitmap_traversal () { test_cmp "$1.normalized" "$2.normalized" && rm -f "$1.normalized" "$2.normalized" } + +# To ensure the logic for "maximal commits" is exercised, make +# the repository a bit more complicated. +# +# other second +# * * +# (99 commits) (99 commits) +# * * +# |\ /| +# | * octo-other octo-second * | +# |/|\_________ ____________/|\| +# | \ \/ __________/ | +# | | ________/\ / | +# * |/ * merge-right * +# | _|__________/ \____________ | +# |/ | \| +# (l1) * * merge-left * (r1) +# | / \________________________ | +# |/ \| +# (l2) * * (r2) +# \___________________________ | +# \| +# * (base) +# +# We only push bits down the first-parent history, which +# makes some of these commits unimportant! +# +# The important part for the maximal commit algorithm is how +# the bitmasks are extended. Assuming starting bit positions +# for second (bit 0) and other (bit 1), the bitmasks at the +# end should be: +# +# second: 1 (maximal, selected) +# other: 01 (maximal, selected) +# (base): 11 (maximal) +# +# This complicated history was important for a previous +# version of the walk that guarantees never walking a +# commit multiple times. That goal might be important +# again, so preserve this complicated case. For now, this +# test will guarantee that the bitmaps are computed +# correctly, even with the repeat calculations. +setup_bitmap_history() { + test_expect_success 'setup repo with moderate-sized history' ' + test_commit_bulk --id=file 10 && + git branch -M second && + git checkout -b other HEAD~5 && + test_commit_bulk --id=side 10 && + + # add complicated history setup, including merges and + # ambiguous merge-bases + + git checkout -b merge-left other~2 && + git merge second~2 -m "merge-left" && + + git checkout -b merge-right second~1 && + git merge other~1 -m "merge-right" && + + git checkout -b octo-second second && + git merge merge-left merge-right -m "octopus-second" && + + git checkout -b octo-other other && + git merge merge-left merge-right -m "octopus-other" && + + git checkout other && + git merge octo-other -m "pull octopus" && + + git checkout second && + git merge octo-second -m "pull octopus" && + + # Remove these branches so they are not selected + # as bitmap tips + git branch -D merge-left && + git branch -D merge-right && + git branch -D octo-other && + git branch -D octo-second && + + # add padding to make these merges less interesting + # and avoid having them selected for bitmaps + test_commit_bulk --id=file 100 && + git checkout other && + test_commit_bulk --id=side 100 && + git checkout second && + + bitmaptip=$(git rev-parse second) && + blob=$(echo tagged-blob | git hash-object -w --stdin) && + git tag tagged-blob $blob + ' +} + +rev_list_tests_head () { + test_expect_success "counting commits via bitmap ($state, $branch)" ' + git rev-list --count $branch >expect && + git rev-list --use-bitmap-index --count $branch >actual && + test_cmp expect actual + ' + + test_expect_success "counting partial commits via bitmap ($state, $branch)" ' + git rev-list --count $branch~5..$branch >expect && + git rev-list --use-bitmap-index --count $branch~5..$branch >actual && + test_cmp expect actual + ' + + test_expect_success "counting commits with limit ($state, $branch)" ' + git rev-list --count -n 1 $branch >expect && + git rev-list --use-bitmap-index --count -n 1 $branch >actual && + test_cmp expect actual + ' + + test_expect_success "counting non-linear history ($state, $branch)" ' + git rev-list --count other...second >expect && + git rev-list --use-bitmap-index --count other...second >actual && + test_cmp expect actual + ' + + test_expect_success "counting commits with limiting ($state, $branch)" ' + git rev-list --count $branch -- 1.t >expect && + git rev-list --use-bitmap-index --count $branch -- 1.t >actual && + test_cmp expect actual + ' + + test_expect_success "counting objects via bitmap ($state, $branch)" ' + git rev-list --count --objects $branch >expect && + git rev-list --use-bitmap-index --count --objects $branch >actual && + test_cmp expect actual + ' + + test_expect_success "enumerate commits ($state, $branch)" ' + git rev-list --use-bitmap-index $branch >actual && + git rev-list $branch >expect && + test_bitmap_traversal --no-confirm-bitmaps expect actual + ' + + test_expect_success "enumerate --objects ($state, $branch)" ' + git rev-list --objects --use-bitmap-index $branch >actual && + git rev-list --objects $branch >expect && + test_bitmap_traversal expect actual + ' + + test_expect_success "bitmap --objects handles non-commit objects ($state, $branch)" ' + git rev-list --objects --use-bitmap-index $branch tagged-blob >actual && + grep $blob actual + ' +} + +rev_list_tests () { + state=$1 + + for branch in "second" "other" + do + rev_list_tests_head + done +} + +basic_bitmap_tests () { + tip="$1" + test_expect_success 'rev-list --test-bitmap verifies bitmaps' " + git rev-list --test-bitmap "${tip:-HEAD}" + " + + rev_list_tests 'full bitmap' + + test_expect_success 'clone from bitmapped repository' ' + rm -fr clone.git && + git clone --no-local --bare . clone.git && + git rev-parse HEAD >expect && + git --git-dir=clone.git rev-parse HEAD >actual && + test_cmp expect actual + ' + + test_expect_success 'partial clone from bitmapped repository' ' + test_config uploadpack.allowfilter true && + rm -fr partial-clone.git && + git clone --no-local --bare --filter=blob:none . partial-clone.git && + ( + cd partial-clone.git && + pack=$(echo objects/pack/*.pack) && + git verify-pack -v "$pack" >have && + awk "/blob/ { print \$1 }" <have >blobs && + # we expect this single blob because of the direct ref + git rev-parse refs/tags/tagged-blob >expect && + test_cmp expect blobs + ) + ' + + test_expect_success 'setup further non-bitmapped commits' ' + test_commit_bulk --id=further 10 + ' + + rev_list_tests 'partial bitmap' + + test_expect_success 'fetch (partial bitmap)' ' + git --git-dir=clone.git fetch origin second:second && + git rev-parse HEAD >expect && + git --git-dir=clone.git rev-parse HEAD >actual && + test_cmp expect actual + ' + + test_expect_success 'enumerating progress counts pack-reused objects' ' + count=$(git rev-list --objects --all --count) && + git repack -adb && + + # check first with only reused objects; confirm that our + # progress showed the right number, and also that we did + # pack-reuse as expected. Check only the final "done" + # line of the meter (there may be an arbitrary number of + # intermediate lines ending with CR). + GIT_PROGRESS_DELAY=0 \ + git pack-objects --all --stdout --progress \ + </dev/null >/dev/null 2>stderr && + grep "Enumerating objects: $count, done" stderr && + grep "pack-reused $count" stderr && + + # now the same but with one non-reused object + git commit --allow-empty -m "an extra commit object" && + GIT_PROGRESS_DELAY=0 \ + git pack-objects --all --stdout --progress \ + </dev/null >/dev/null 2>stderr && + grep "Enumerating objects: $((count+1)), done" stderr && + grep "pack-reused $count" stderr + ' +} + +# have_delta <obj> <expected_base> +# +# Note that because this relies on cat-file, it might find _any_ copy of an +# object in the repository. The caller is responsible for making sure +# there's only one (e.g., via "repack -ad", or having just fetched a copy). +have_delta () { + echo $2 >expect && + echo $1 | git cat-file --batch-check="%(deltabase)" >actual && + test_cmp expect actual +} + +midx_checksum () { + test-tool read-midx --checksum "$1" +} diff --git a/t/lib-httpd/apache.conf b/t/lib-httpd/apache.conf index afa91e38b0..180a41fe96 100644 --- a/t/lib-httpd/apache.conf +++ b/t/lib-httpd/apache.conf @@ -81,8 +81,6 @@ PassEnv GIT_TRACE PassEnv GIT_CONFIG_NOSYSTEM PassEnv GIT_TEST_SIDEBAND_ALL -SetEnvIf Git-Protocol ".*" GIT_PROTOCOL=$0 - Alias /dumb/ www/ Alias /auth/dumb/ www/auth/dumb/ @@ -117,6 +115,11 @@ Alias /auth/dumb/ www/auth/dumb/ SetEnv GIT_EXEC_PATH ${GIT_EXEC_PATH} SetEnv GIT_HTTP_EXPORT_ALL </LocationMatch> +<LocationMatch /smart_v0/> + SetEnv GIT_EXEC_PATH ${GIT_EXEC_PATH} + SetEnv GIT_HTTP_EXPORT_ALL + SetEnv GIT_PROTOCOL +</LocationMatch> ScriptAlias /smart/incomplete_length/git-upload-pack incomplete-length-upload-pack-v2-http.sh/ ScriptAlias /smart/incomplete_body/git-upload-pack incomplete-body-upload-pack-v2-http.sh/ ScriptAliasMatch /error_git_upload_pack/(.*)/git-upload-pack error.sh/ diff --git a/t/lib-rebase.sh b/t/lib-rebase.sh index dc75b83451..ec6b9b107d 100644 --- a/t/lib-rebase.sh +++ b/t/lib-rebase.sh @@ -151,3 +151,59 @@ test_editor_unchanged () { EOF test_cmp expect actual } + +# Set up an editor for testing reword commands +# Checks that there are no uncommitted changes when rewording and that the +# todo-list is reread after each +set_reword_editor () { + >reword-actual && + >reword-oid && + + # Check rewording keeps the original authorship + GIT_AUTHOR_NAME="Reword Author" + GIT_AUTHOR_EMAIL="reword.author@example.com" + GIT_AUTHOR_DATE=@123456 + + write_script reword-sequence-editor.sh <<-\EOF && + todo="$(cat "$1")" && + echo "exec git log -1 --pretty=format:'%an <%ae> %at%n%B%n' \ + >>reword-actual" >"$1" && + printf "%s\n" "$todo" >>"$1" + EOF + + write_script reword-editor.sh <<-EOF && + # Save the oid of the first reworded commit so we can check rebase + # fast-forwards to it. Also check that we do not write .git/MERGE_MSG + # when fast-forwarding + if ! test -s reword-oid + then + git rev-parse HEAD >reword-oid && + if test -f .git/MERGE_MSG + then + echo 1>&2 "error: .git/MERGE_MSG exists" + exit 1 + fi + fi && + # There should be no uncommited changes + git diff --exit-code HEAD && + # The todo-list should be re-read after a reword + GIT_SEQUENCE_EDITOR="\"$PWD/reword-sequence-editor.sh\"" \ + git rebase --edit-todo && + echo edited >>"\$1" + EOF + + test_set_editor "$PWD/reword-editor.sh" +} + +# Check the results of a rebase after calling set_reword_editor +# Pass the commits that were reworded in the order that they were picked +# Expects the first pick to be a fast-forward +check_reworded_commits () { + test_cmp_rev "$(cat reword-oid)" "$1^{commit}" && + git log --format="%an <%ae> %at%n%B%nedited%n" --no-walk=unsorted "$@" \ + >reword-expected && + test_cmp reword-expected reword-actual && + git log --format="%an <%ae> %at%n%B" -n $# --first-parent --reverse \ + >reword-log && + test_cmp reword-expected reword-log +} diff --git a/t/perf/lib-bitmap.sh b/t/perf/lib-bitmap.sh new file mode 100644 index 0000000000..63d3bc7cec --- /dev/null +++ b/t/perf/lib-bitmap.sh @@ -0,0 +1,69 @@ +# Helper functions for testing bitmap performance; see p5310. + +test_full_bitmap () { + test_perf 'simulated clone' ' + git pack-objects --stdout --all </dev/null >/dev/null + ' + + test_perf 'simulated fetch' ' + have=$(git rev-list HEAD~100 -1) && + { + echo HEAD && + echo ^$have + } | git pack-objects --revs --stdout >/dev/null + ' + + test_perf 'pack to file (bitmap)' ' + git pack-objects --use-bitmap-index --all pack1b </dev/null >/dev/null + ' + + test_perf 'rev-list (commits)' ' + git rev-list --all --use-bitmap-index >/dev/null + ' + + test_perf 'rev-list (objects)' ' + git rev-list --all --use-bitmap-index --objects >/dev/null + ' + + test_perf 'rev-list with tag negated via --not --all (objects)' ' + git rev-list perf-tag --not --all --use-bitmap-index --objects >/dev/null + ' + + test_perf 'rev-list with negative tag (objects)' ' + git rev-list HEAD --not perf-tag --use-bitmap-index --objects >/dev/null + ' + + test_perf 'rev-list count with blob:none' ' + git rev-list --use-bitmap-index --count --objects --all \ + --filter=blob:none >/dev/null + ' + + test_perf 'rev-list count with blob:limit=1k' ' + git rev-list --use-bitmap-index --count --objects --all \ + --filter=blob:limit=1k >/dev/null + ' + + test_perf 'rev-list count with tree:0' ' + git rev-list --use-bitmap-index --count --objects --all \ + --filter=tree:0 >/dev/null + ' + + test_perf 'simulated partial clone' ' + git pack-objects --stdout --all --filter=blob:none </dev/null >/dev/null + ' +} + +test_partial_bitmap () { + test_perf 'clone (partial bitmap)' ' + git pack-objects --stdout --all </dev/null >/dev/null + ' + + test_perf 'pack to file (partial bitmap)' ' + git pack-objects --use-bitmap-index --all pack2b </dev/null >/dev/null + ' + + test_perf 'rev-list with tree filter (partial bitmap)' ' + git rev-list --use-bitmap-index --count --objects --all \ + --filter=tree:0 >/dev/null + ' +} diff --git a/t/perf/p5310-pack-bitmaps.sh b/t/perf/p5310-pack-bitmaps.sh index 452be01056..7ad4f237bc 100755 --- a/t/perf/p5310-pack-bitmaps.sh +++ b/t/perf/p5310-pack-bitmaps.sh @@ -2,6 +2,7 @@ test_description='Tests pack performance using bitmaps' . ./perf-lib.sh +. "${TEST_DIRECTORY}/perf/lib-bitmap.sh" test_perf_large_repo @@ -25,56 +26,7 @@ test_perf 'repack to disk' ' git repack -ad ' -test_perf 'simulated clone' ' - git pack-objects --stdout --all </dev/null >/dev/null -' - -test_perf 'simulated fetch' ' - have=$(git rev-list HEAD~100 -1) && - { - echo HEAD && - echo ^$have - } | git pack-objects --revs --stdout >/dev/null -' - -test_perf 'pack to file (bitmap)' ' - git pack-objects --use-bitmap-index --all pack1b </dev/null >/dev/null -' - -test_perf 'rev-list (commits)' ' - git rev-list --all --use-bitmap-index >/dev/null -' - -test_perf 'rev-list (objects)' ' - git rev-list --all --use-bitmap-index --objects >/dev/null -' - -test_perf 'rev-list with tag negated via --not --all (objects)' ' - git rev-list perf-tag --not --all --use-bitmap-index --objects >/dev/null -' - -test_perf 'rev-list with negative tag (objects)' ' - git rev-list HEAD --not perf-tag --use-bitmap-index --objects >/dev/null -' - -test_perf 'rev-list count with blob:none' ' - git rev-list --use-bitmap-index --count --objects --all \ - --filter=blob:none >/dev/null -' - -test_perf 'rev-list count with blob:limit=1k' ' - git rev-list --use-bitmap-index --count --objects --all \ - --filter=blob:limit=1k >/dev/null -' - -test_perf 'rev-list count with tree:0' ' - git rev-list --use-bitmap-index --count --objects --all \ - --filter=tree:0 >/dev/null -' - -test_perf 'simulated partial clone' ' - git pack-objects --stdout --all --filter=blob:none </dev/null >/dev/null -' +test_full_bitmap test_expect_success 'create partial bitmap state' ' # pick a commit to represent the repo tip in the past @@ -97,17 +49,6 @@ test_expect_success 'create partial bitmap state' ' git update-ref HEAD $orig_tip ' -test_perf 'clone (partial bitmap)' ' - git pack-objects --stdout --all </dev/null >/dev/null -' - -test_perf 'pack to file (partial bitmap)' ' - git pack-objects --use-bitmap-index --all pack2b </dev/null >/dev/null -' - -test_perf 'rev-list with tree filter (partial bitmap)' ' - git rev-list --use-bitmap-index --count --objects --all \ - --filter=tree:0 >/dev/null -' +test_partial_bitmap test_done diff --git a/t/perf/p5326-multi-pack-bitmaps.sh b/t/perf/p5326-multi-pack-bitmaps.sh new file mode 100755 index 0000000000..5845109ac7 --- /dev/null +++ b/t/perf/p5326-multi-pack-bitmaps.sh @@ -0,0 +1,43 @@ +#!/bin/sh + +test_description='Tests performance using midx bitmaps' +. ./perf-lib.sh +. "${TEST_DIRECTORY}/perf/lib-bitmap.sh" + +test_perf_large_repo + +test_expect_success 'enable multi-pack index' ' + git config core.multiPackIndex true +' + +test_perf 'setup multi-pack index' ' + git repack -ad && + git multi-pack-index write --bitmap +' + +test_full_bitmap + +test_expect_success 'create partial bitmap state' ' + # pick a commit to represent the repo tip in the past + cutoff=$(git rev-list HEAD~100 -1) && + orig_tip=$(git rev-parse HEAD) && + + # now pretend we have just one tip + rm -rf .git/logs .git/refs/* .git/packed-refs && + git update-ref HEAD $cutoff && + + # and then repack, which will leave us with a nice + # big bitmap pack of the "old" history, and all of + # the new history will be loose, as if it had been pushed + # up incrementally and exploded via unpack-objects + git repack -Ad && + git multi-pack-index write --bitmap && + + # and now restore our original tip, as if the pushes + # had happened + git update-ref HEAD $orig_tip +' + +test_partial_bitmap + +test_done diff --git a/t/perf/run b/t/perf/run index d19dec258a..55219aa405 100755 --- a/t/perf/run +++ b/t/perf/run @@ -74,7 +74,7 @@ set_git_test_installed () { mydir=$1 mydir_abs=$(cd $mydir && pwd) - mydir_abs_wrappers="$mydir_abs_wrappers/bin-wrappers" + mydir_abs_wrappers="$mydir_abs/bin-wrappers" if test -d "$mydir_abs_wrappers" then GIT_TEST_INSTALLED=$mydir_abs_wrappers diff --git a/t/t0000-basic.sh b/t/t0000-basic.sh index cb87768513..5c342de713 100755 --- a/t/t0000-basic.sh +++ b/t/t0000-basic.sh @@ -1271,28 +1271,29 @@ P=$(test_oid root) test_expect_success 'git commit-tree records the correct tree in a commit' ' commit0=$(echo NO | git commit-tree $P) && - tree=$(git show --pretty=raw $commit0 | - sed -n -e "s/^tree //p" -e "/^author /q") && + git show --pretty=raw $commit0 >out && + tree=$(sed -n -e "s/^tree //p" -e "/^author /q" out) && test "z$tree" = "z$P" ' test_expect_success 'git commit-tree records the correct parent in a commit' ' commit1=$(echo NO | git commit-tree $P -p $commit0) && - parent=$(git show --pretty=raw $commit1 | - sed -n -e "s/^parent //p" -e "/^author /q") && + git show --pretty=raw $commit1 >out && + parent=$(sed -n -e "s/^parent //p" -e "/^author /q" out) && test "z$commit0" = "z$parent" ' test_expect_success 'git commit-tree omits duplicated parent in a commit' ' commit2=$(echo NO | git commit-tree $P -p $commit0 -p $commit0) && - parent=$(git show --pretty=raw $commit2 | - sed -n -e "s/^parent //p" -e "/^author /q" | - sort -u) && + git show --pretty=raw $commit2 >out && + cat >match.sed <<-\EOF && + s/^parent //p + /^author /q + EOF + parent=$(sed -n -f match.sed out | sort -u) && test "z$commit0" = "z$parent" && - numparent=$(git show --pretty=raw $commit2 | - sed -n -e "s/^parent //p" -e "/^author /q" | - wc -l) && - test $numparent = 1 + git show --pretty=raw $commit2 >out && + test_stdout_line_count = 1 sed -n -f match.sed out ' test_expect_success 'update-index D/F conflict' ' diff --git a/t/t0012-help.sh b/t/t0012-help.sh index 5679e29c62..913f34c8e9 100755 --- a/t/t0012-help.sh +++ b/t/t0012-help.sh @@ -73,6 +73,22 @@ test_expect_success 'git help -g' ' test_i18ngrep "^ tutorial " help.output ' +test_expect_success 'git help fails for non-existing html pages' ' + configure_help && + mkdir html-empty && + test_must_fail git -c help.htmlpath=html-empty help status && + test_must_be_empty test-browser.log +' + +test_expect_success 'git help succeeds without git.html' ' + configure_help && + mkdir html-with-docs && + touch html-with-docs/git-status.html && + git -c help.htmlpath=html-with-docs help status && + echo "html-with-docs/git-status.html" >expect && + test_cmp expect test-browser.log +' + test_expect_success 'generate builtin list' ' git --list-cmds=builtins >builtins ' diff --git a/t/t0021-conversion.sh b/t/t0021-conversion.sh index b5749f327d..33dfc9cd56 100755 --- a/t/t0021-conversion.sh +++ b/t/t0021-conversion.sh @@ -6,6 +6,7 @@ GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME . ./test-lib.sh +. "$TEST_DIRECTORY"/lib-terminal.sh TEST_ROOT="$PWD" PATH=$TEST_ROOT:$PATH @@ -1061,4 +1062,74 @@ test_expect_success PERL,SYMLINKS,CASE_INSENSITIVE_FS \ ) ' +test_expect_success PERL 'setup for progress tests' ' + git init progress && + ( + cd progress && + git config filter.delay.process "rot13-filter.pl delay-progress.log clean smudge delay" && + git config filter.delay.required true && + + echo "*.a filter=delay" >.gitattributes && + touch test-delay10.a && + git add . && + git commit -m files + ) +' + +test_delayed_checkout_progress () { + if test "$1" = "!" + then + local expect_progress=N && + shift + else + local expect_progress= + fi && + + if test $# -lt 1 + then + BUG "no command given to test_delayed_checkout_progress" + fi && + + ( + cd progress && + GIT_PROGRESS_DELAY=0 && + export GIT_PROGRESS_DELAY && + rm -f *.a delay-progress.log && + + "$@" 2>err && + grep "IN: smudge test-delay10.a .* \\[DELAYED\\]" delay-progress.log && + if test "$expect_progress" = N + then + ! grep "Filtering content" err + else + grep "Filtering content" err + fi + ) +} + +for mode in pathspec branch +do + case "$mode" in + pathspec) opt='.' ;; + branch) opt='-f HEAD' ;; + esac + + test_expect_success PERL,TTY "delayed checkout shows progress by default on tty ($mode checkout)" ' + test_delayed_checkout_progress test_terminal git checkout $opt + ' + + test_expect_success PERL "delayed checkout ommits progress on non-tty ($mode checkout)" ' + test_delayed_checkout_progress ! git checkout $opt + ' + + test_expect_success PERL,TTY "delayed checkout ommits progress with --quiet ($mode checkout)" ' + test_delayed_checkout_progress ! test_terminal git checkout --quiet $opt + ' + + test_expect_success PERL,TTY "delayed checkout honors --[no]-progress ($mode checkout)" ' + test_delayed_checkout_progress ! test_terminal git checkout --no-progress $opt && + test_delayed_checkout_progress test_terminal git checkout --quiet --progress $opt + ' +done + test_done diff --git a/t/t0040-parse-options.sh b/t/t0040-parse-options.sh index ad4746d899..da310ed29b 100755 --- a/t/t0040-parse-options.sh +++ b/t/t0040-parse-options.sh @@ -37,7 +37,6 @@ String options --list <str> add str to list Magic arguments - --quux means --quux -NUM set integer to NUM + same as -b --ambiguous positive ambiguity @@ -263,10 +262,6 @@ test_expect_success 'detect possible typos' ' test_cmp typo.err output.err ' -test_expect_success 'keep some options as arguments' ' - test-tool parse-options --expect="arg 00: --quux" --quux -' - cat >expect <<\EOF Callback: "four", 0 boolean: 5 diff --git a/t/t0060-path-utils.sh b/t/t0060-path-utils.sh index de4960783f..34d1061f32 100755 --- a/t/t0060-path-utils.sh +++ b/t/t0060-path-utils.sh @@ -525,4 +525,30 @@ test_expect_success MINGW 'is_valid_path() on Windows' ' "PRN./abc" ' +test_lazy_prereq RUNTIME_PREFIX ' + test true = "$RUNTIME_PREFIX" +' + +test_lazy_prereq CAN_EXEC_IN_PWD ' + cp "$GIT_EXEC_PATH"/git$X ./ && + ./git rev-parse +' + +test_expect_success RUNTIME_PREFIX,CAN_EXEC_IN_PWD 'RUNTIME_PREFIX works' ' + mkdir -p pretend/bin pretend/libexec/git-core && + echo "echo HERE" | write_script pretend/libexec/git-core/git-here && + cp "$GIT_EXEC_PATH"/git$X pretend/bin/ && + GIT_EXEC_PATH= ./pretend/bin/git here >actual && + echo HERE >expect && + test_cmp expect actual' + +test_expect_success RUNTIME_PREFIX,CAN_EXEC_IN_PWD '%(prefix)/ works' ' + mkdir -p pretend/bin && + cp "$GIT_EXEC_PATH"/git$X pretend/bin/ && + git config yes.path "%(prefix)/yes" && + GIT_EXEC_PATH= ./pretend/bin/git config --path yes.path >actual && + echo "$(pwd)/pretend/yes" >expect && + test_cmp expect actual +' + test_done diff --git a/t/t0090-cache-tree.sh b/t/t0090-cache-tree.sh index 9bf66c9e68..9067572648 100755 --- a/t/t0090-cache-tree.sh +++ b/t/t0090-cache-tree.sh @@ -195,6 +195,7 @@ test_expect_success 'reset --hard gives cache-tree' ' test_expect_success 'reset --hard without index gives cache-tree' ' rm -f .git/index && + git clean -fd && git reset --hard && test_cache_tree ' diff --git a/t/t0210/scrub_normal.perl b/t/t0210/scrub_normal.perl index c65d1a815e..7cc4de392a 100644 --- a/t/t0210/scrub_normal.perl +++ b/t/t0210/scrub_normal.perl @@ -42,6 +42,12 @@ while (<>) { # so just omit it for testing purposes. # print "cmd_path _EXE_\n"; } + elsif ($line =~ m/^cmd_ancestry/) { + # 'cmd_ancestry' is not implemented everywhere, so for portability's + # sake, skip it when parsing normal. + # + # print "$line"; + } else { print "$line"; } diff --git a/t/t0211/scrub_perf.perl b/t/t0211/scrub_perf.perl index 351af7844e..d164b750ff 100644 --- a/t/t0211/scrub_perf.perl +++ b/t/t0211/scrub_perf.perl @@ -44,6 +44,11 @@ while (<>) { # $tokens[$col_rest] = "_EXE_"; goto SKIP_LINE; } + elsif ($tokens[$col_event] =~ m/cmd_ancestry/) { + # 'cmd_ancestry' is platform-specific and not implemented everywhere, + # so skip it. + goto SKIP_LINE; + } elsif ($tokens[$col_event] =~ m/child_exit/) { $tokens[$col_rest] =~ s/ pid:\d* / pid:_PID_ /; } diff --git a/t/t0212/parse_events.perl b/t/t0212/parse_events.perl index 6584bb5634..b6408560c0 100644 --- a/t/t0212/parse_events.perl +++ b/t/t0212/parse_events.perl @@ -132,7 +132,10 @@ while (<>) { # just omit it for testing purposes. # $processes->{$sid}->{'path'} = "_EXE_"; } - + elsif ($event eq 'cmd_ancestry') { + # 'cmd_ancestry' is platform-specific and not implemented everywhere, so + # just skip it for testing purposes. + } elsif ($event eq 'cmd_name') { $processes->{$sid}->{'name'} = $line->{'name'}; $processes->{$sid}->{'hierarchy'} = $line->{'hierarchy'}; diff --git a/t/t0301-credential-cache.sh b/t/t0301-credential-cache.sh index ebd5fa5249..698b7159f0 100755 --- a/t/t0301-credential-cache.sh +++ b/t/t0301-credential-cache.sh @@ -9,6 +9,21 @@ test -z "$NO_UNIX_SOCKETS" || { test_done } +uname_s=$(uname -s) +case $uname_s in +*MINGW*) + test_path_is_socket () { + # `test -S` cannot detect Win10's Unix sockets + test_path_exists "$1" + } + ;; +*) + test_path_is_socket () { + test -S "$1" + } + ;; +esac + # don't leave a stale daemon running test_atexit 'git credential-cache exit' @@ -21,7 +36,7 @@ test_expect_success 'socket defaults to ~/.cache/git/credential/socket' ' rmdir -p .cache/git/credential/ " && test_path_is_missing "$HOME/.git-credential-cache" && - test -S "$HOME/.cache/git/credential/socket" + test_path_is_socket "$HOME/.cache/git/credential/socket" ' XDG_CACHE_HOME="$HOME/xdg" @@ -31,7 +46,7 @@ helper_test cache test_expect_success "use custom XDG_CACHE_HOME if set and default sockets are not created" ' test_when_finished "git credential-cache exit" && - test -S "$XDG_CACHE_HOME/git/credential/socket" && + test_path_is_socket "$XDG_CACHE_HOME/git/credential/socket" && test_path_is_missing "$HOME/.git-credential-cache/socket" && test_path_is_missing "$HOME/.cache/git/credential/socket" ' @@ -48,7 +63,7 @@ test_expect_success 'credential-cache --socket option overrides default location username=store-user password=store-pass EOF - test -S "$HOME/dir/socket" + test_path_is_socket "$HOME/dir/socket" ' test_expect_success "use custom XDG_CACHE_HOME even if xdg socket exists" ' @@ -62,7 +77,7 @@ test_expect_success "use custom XDG_CACHE_HOME even if xdg socket exists" ' username=store-user password=store-pass EOF - test -S "$HOME/.cache/git/credential/socket" && + test_path_is_socket "$HOME/.cache/git/credential/socket" && XDG_CACHE_HOME="$HOME/xdg" && export XDG_CACHE_HOME && check approve cache <<-\EOF && @@ -71,7 +86,7 @@ test_expect_success "use custom XDG_CACHE_HOME even if xdg socket exists" ' username=store-user password=store-pass EOF - test -S "$XDG_CACHE_HOME/git/credential/socket" + test_path_is_socket "$XDG_CACHE_HOME/git/credential/socket" ' test_expect_success 'use user socket if user directory exists' ' @@ -79,14 +94,15 @@ test_expect_success 'use user socket if user directory exists' ' git credential-cache exit && rmdir \"\$HOME/.git-credential-cache/\" " && - mkdir -p -m 700 "$HOME/.git-credential-cache/" && + mkdir -p "$HOME/.git-credential-cache/" && + chmod 700 "$HOME/.git-credential-cache/" && check approve cache <<-\EOF && protocol=https host=example.com username=store-user password=store-pass EOF - test -S "$HOME/.git-credential-cache/socket" + test_path_is_socket "$HOME/.git-credential-cache/socket" ' test_expect_success SYMLINKS 'use user socket if user directory is a symlink to a directory' ' @@ -103,7 +119,7 @@ test_expect_success SYMLINKS 'use user socket if user directory is a symlink to username=store-user password=store-pass EOF - test -S "$HOME/.git-credential-cache/socket" + test_path_is_socket "$HOME/.git-credential-cache/socket" ' helper_test_timeout cache --timeout=1 diff --git a/t/t0410-partial-clone.sh b/t/t0410-partial-clone.sh index a211a66c67..bba679685f 100755 --- a/t/t0410-partial-clone.sh +++ b/t/t0410-partial-clone.sh @@ -4,6 +4,9 @@ test_description='partial clone' . ./test-lib.sh +# missing promisor objects cause repacks which write bitmaps to fail +GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=0 + delete_object () { rm $1/.git/objects/$(echo $2 | sed -e 's|^..|&/|') } @@ -536,7 +539,13 @@ test_expect_success 'gc does not repack promisor objects if there are none' ' repack_and_check () { rm -rf repo2 && cp -r repo repo2 && - git -C repo2 repack $1 -d && + if test x"$1" = "x--must-fail" + then + shift + test_must_fail git -C repo2 repack $1 -d + else + git -C repo2 repack $1 -d + fi && git -C repo2 fsck && git -C repo2 cat-file -e $2 && @@ -561,6 +570,7 @@ test_expect_success 'repack -d does not irreversibly delete promisor objects' ' printf "$THREE\n" | pack_as_from_promisor && delete_object repo "$ONE" && + repack_and_check --must-fail -ab "$TWO" "$THREE" && repack_and_check -a "$TWO" "$THREE" && repack_and_check -A "$TWO" "$THREE" && repack_and_check -l "$TWO" "$THREE" diff --git a/t/t1091-sparse-checkout-builtin.sh b/t/t1091-sparse-checkout-builtin.sh index 38fc8340f5..71236981e6 100755 --- a/t/t1091-sparse-checkout-builtin.sh +++ b/t/t1091-sparse-checkout-builtin.sh @@ -642,4 +642,63 @@ test_expect_success MINGW 'cone mode replaces backslashes with slashes' ' check_files repo/deep a deeper1 ' +test_expect_success 'cone mode clears ignored subdirectories' ' + rm repo/.git/info/sparse-checkout && + + git -C repo sparse-checkout init --cone && + git -C repo sparse-checkout set deep/deeper1 && + + cat >repo/.gitignore <<-\EOF && + obj/ + *.o + EOF + + git -C repo add .gitignore && + git -C repo commit -m ".gitignore" && + + mkdir -p repo/obj repo/folder1/obj repo/deep/deeper2/obj && + for file in folder1/obj/a obj/a folder1/file.o folder1.o \ + deep/deeper2/obj/a deep/deeper2/file.o file.o + do + echo ignored >repo/$file || return 1 + done && + + git -C repo status --porcelain=v2 >out && + test_must_be_empty out && + + git -C repo sparse-checkout reapply && + test_path_is_missing repo/folder1 && + test_path_is_missing repo/deep/deeper2 && + test_path_is_dir repo/obj && + test_path_is_file repo/file.o && + + git -C repo status --porcelain=v2 >out && + test_must_be_empty out && + + git -C repo sparse-checkout set deep/deeper2 && + test_path_is_missing repo/deep/deeper1 && + test_path_is_dir repo/deep/deeper2 && + test_path_is_dir repo/obj && + test_path_is_file repo/file.o && + + >repo/deep/deeper2/ignored.o && + >repo/deep/deeper2/untracked && + + # When an untracked file is in the way, all untracked files + # (even ignored files) are preserved. + git -C repo sparse-checkout set folder1 2>err && + grep "contains untracked files" err && + test_path_is_file repo/deep/deeper2/ignored.o && + test_path_is_file repo/deep/deeper2/untracked && + + # The rest of the cone matches expectation + test_path_is_missing repo/deep/deeper1 && + test_path_is_dir repo/obj && + test_path_is_file repo/file.o && + + git -C repo status --porcelain=v2 >out && + echo "? deep/deeper2/untracked" >expect && + test_cmp expect out +' + test_done diff --git a/t/t1092-sparse-checkout-compatibility.sh b/t/t1092-sparse-checkout-compatibility.sh index 91e30d6ec2..886e78715f 100755 --- a/t/t1092-sparse-checkout-compatibility.sh +++ b/t/t1092-sparse-checkout-compatibility.sh @@ -47,7 +47,7 @@ test_expect_success 'setup' ' git checkout -b base && for dir in folder1 folder2 deep do - git checkout -b update-$dir && + git checkout -b update-$dir base && echo "updated $dir" >$dir/a && git commit -a -m "update $dir" || return 1 done && @@ -114,6 +114,16 @@ test_expect_success 'setup' ' git add . && git commit -m "file to dir" && + for side in left right + do + git checkout -b merge-$side base && + echo $side >>deep/deeper2/a && + echo $side >>folder1/a && + echo $side >>folder2/a && + git add . && + git commit -m "$side" || return 1 + done && + git checkout -b deepest base && echo "updated deepest" >deep/deeper1/deepest/a && git commit -a -m "update deepest" && @@ -312,9 +322,6 @@ test_expect_success 'commit including unstaged changes' ' test_expect_success 'status/add: outside sparse cone' ' init_repos && - # adding a "missing" file outside the cone should fail - test_sparse_match test_must_fail git add folder1/a && - # folder1 is at HEAD, but outside the sparse cone run_on_sparse mkdir folder1 && cp initial-repo/folder1/a sparse-checkout/folder1/a && @@ -330,21 +337,23 @@ test_expect_success 'status/add: outside sparse cone' ' test_sparse_match git status --porcelain=v2 && - # This "git add folder1/a" fails with a warning - # in the sparse repos, differing from the full - # repo. This is intentional. + # Adding the path outside of the sparse-checkout cone should fail. test_sparse_match test_must_fail git add folder1/a && test_sparse_match test_must_fail git add --refresh folder1/a && - test_all_match git status --porcelain=v2 && + + # NEEDSWORK: Adding a newly-tracked file outside the cone succeeds + test_sparse_match git add folder1/new && test_all_match git add . && test_all_match git status --porcelain=v2 && test_all_match git commit -m folder1/new && + test_all_match git rev-parse HEAD^{tree} && run_on_all ../edit-contents folder1/newer && test_all_match git add folder1/ && test_all_match git status --porcelain=v2 && - test_all_match git commit -m folder1/newer + test_all_match git commit -m folder1/newer && + test_all_match git rev-parse HEAD^{tree} ' test_expect_success 'checkout and reset --hard' ' @@ -472,16 +481,84 @@ test_expect_success 'checkout and reset (mixed) [sparse]' ' test_sparse_match git reset update-folder2 ' -test_expect_success 'merge' ' +test_expect_success 'merge, cherry-pick, and rebase' ' init_repos && - test_all_match git checkout -b merge update-deep && - test_all_match git merge -m "folder1" update-folder1 && - test_all_match git rev-parse HEAD^{tree} && - test_all_match git merge -m "folder2" update-folder2 && + for OPERATION in "merge -m merge" cherry-pick rebase + do + test_all_match git checkout -B temp update-deep && + test_all_match git $OPERATION update-folder1 && + test_all_match git rev-parse HEAD^{tree} && + test_all_match git $OPERATION update-folder2 && + test_all_match git rev-parse HEAD^{tree} || return 1 + done +' + +# NEEDSWORK: This test is documenting current behavior, but that +# behavior can be confusing to users so there is desire to change it. +# Right now, users might be using this flow to work through conflicts, +# so any solution should present advice to users who try this sequence +# of commands to follow whatever new method we create. +test_expect_success 'merge with conflict outside cone' ' + init_repos && + + test_all_match git checkout -b merge-tip merge-left && + test_all_match git status --porcelain=v2 && + test_all_match test_must_fail git merge -m merge merge-right && + test_all_match git status --porcelain=v2 && + + # Resolve the conflict in different ways: + # 1. Revert to the base + test_all_match git checkout base -- deep/deeper2/a && + test_all_match git status --porcelain=v2 && + + # 2. Add the file with conflict markers + test_all_match git add folder1/a && + test_all_match git status --porcelain=v2 && + + # 3. Rename the file to another sparse filename and + # accept conflict markers as resolved content. + run_on_all mv folder2/a folder2/z && + test_all_match git add folder2 && + test_all_match git status --porcelain=v2 && + + test_all_match git merge --continue && + test_all_match git status --porcelain=v2 && test_all_match git rev-parse HEAD^{tree} ' +test_expect_success 'cherry-pick/rebase with conflict outside cone' ' + init_repos && + + for OPERATION in cherry-pick rebase + do + test_all_match git checkout -B tip && + test_all_match git reset --hard merge-left && + test_all_match git status --porcelain=v2 && + test_all_match test_must_fail git $OPERATION merge-right && + test_all_match git status --porcelain=v2 && + + # Resolve the conflict in different ways: + # 1. Revert to the base + test_all_match git checkout base -- deep/deeper2/a && + test_all_match git status --porcelain=v2 && + + # 2. Add the file with conflict markers + test_all_match git add folder1/a && + test_all_match git status --porcelain=v2 && + + # 3. Rename the file to another sparse filename and + # accept conflict markers as resolved content. + run_on_all mv folder2/a folder2/z && + test_all_match git add folder2 && + test_all_match git status --porcelain=v2 && + + test_all_match git $OPERATION --continue && + test_all_match git status --porcelain=v2 && + test_all_match git rev-parse HEAD^{tree} || return 1 + done +' + test_expect_success 'merge with outside renames' ' init_repos && @@ -575,8 +652,17 @@ test_expect_success 'sparse-index is expanded and converted back' ' ensure_not_expanded () { rm -f trace2.txt && echo >>sparse-index/untracked.txt && - GIT_TRACE2_EVENT="$(pwd)/trace2.txt" GIT_TRACE2_EVENT_NESTING=10 \ - git -C sparse-index "$@" && + + if test "$1" = "!" + then + shift && + test_must_fail env \ + GIT_TRACE2_EVENT="$(pwd)/trace2.txt" GIT_TRACE2_EVENT_NESTING=10 \ + git -C sparse-index "$@" || return 1 + else + GIT_TRACE2_EVENT="$(pwd)/trace2.txt" GIT_TRACE2_EVENT_NESTING=10 \ + git -C sparse-index "$@" || return 1 + fi && test_region ! index ensure_full_index trace2.txt } @@ -598,7 +684,42 @@ test_expect_success 'sparse-index is not expanded' ' git -C sparse-index reset --hard && ensure_not_expanded checkout rename-out-to-out -- deep/deeper1 && git -C sparse-index reset --hard && - ensure_not_expanded restore -s rename-out-to-out -- deep/deeper1 + ensure_not_expanded restore -s rename-out-to-out -- deep/deeper1 && + + echo >>sparse-index/README.md && + ensure_not_expanded add -A && + echo >>sparse-index/extra.txt && + ensure_not_expanded add extra.txt && + echo >>sparse-index/untracked.txt && + ensure_not_expanded add . && + + ensure_not_expanded checkout -f update-deep && + test_config -C sparse-index pull.twohead ort && + ( + sane_unset GIT_TEST_MERGE_ALGORITHM && + for OPERATION in "merge -m merge" cherry-pick rebase + do + ensure_not_expanded merge -m merge update-folder1 && + ensure_not_expanded merge -m merge update-folder2 || return 1 + done + ) +' + +test_expect_success 'sparse-index is not expanded: merge conflict in cone' ' + init_repos && + + for side in right left + do + git -C sparse-index checkout -b expand-$side base && + echo $side >sparse-index/deep/a && + git -C sparse-index commit -a -m "$side" || return 1 + done && + + ( + sane_unset GIT_TEST_MERGE_ALGORITHM && + git -C sparse-index config pull.twohead ort && + ensure_not_expanded ! merge -m merged expand-right + ) ' # NEEDSWORK: a sparse-checkout behaves differently from a full checkout diff --git a/t/t1400-update-ref.sh b/t/t1400-update-ref.sh index 4506cd435b..0d4f73acaa 100755 --- a/t/t1400-update-ref.sh +++ b/t/t1400-update-ref.sh @@ -1598,6 +1598,40 @@ test_expect_success 'transaction cannot restart ongoing transaction' ' test_must_fail git show-ref --verify refs/heads/restart ' +test_expect_success PIPE 'transaction flushes status updates' ' + mkfifo in out && + (git update-ref --stdin <in >out &) && + + exec 9>in && + exec 8<out && + test_when_finished "exec 9>&-" && + test_when_finished "exec 8<&-" && + + echo "start" >&9 && + echo "start: ok" >expected && + read line <&8 && + echo "$line" >actual && + test_cmp expected actual && + + echo "create refs/heads/flush $A" >&9 && + + echo prepare >&9 && + echo "prepare: ok" >expected && + read line <&8 && + echo "$line" >actual && + test_cmp expected actual && + + # This must now fail given that we have locked the ref. + test_must_fail git update-ref refs/heads/flush $B 2>stderr && + grep "fatal: update_ref failed for ref ${SQ}refs/heads/flush${SQ}: cannot lock ref" stderr && + + echo commit >&9 && + echo "commit: ok" >expected && + read line <&8 && + echo "$line" >actual && + test_cmp expected actual +' + test_expect_success 'directory not created deleting packed ref' ' git branch d1/d2/r1 HEAD && git pack-refs --all && diff --git a/t/t1405-main-ref-store.sh b/t/t1405-main-ref-store.sh index a237d9880e..49718b7ea7 100755 --- a/t/t1405-main-ref-store.sh +++ b/t/t1405-main-ref-store.sh @@ -9,12 +9,18 @@ export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME RUN="test-tool ref-store main" -test_expect_success 'pack_refs(PACK_REFS_ALL | PACK_REFS_PRUNE)' ' - test_commit one && + +test_expect_success 'setup' ' + test_commit one +' + +test_expect_success REFFILES 'pack_refs(PACK_REFS_ALL | PACK_REFS_PRUNE)' ' N=`find .git/refs -type f | wc -l` && test "$N" != 0 && - $RUN pack-refs 3 && - N=`find .git/refs -type f | wc -l` + ALL_OR_PRUNE_FLAG=3 && + $RUN pack-refs ${ALL_OR_PRUNE_FLAG} && + N=`find .git/refs -type f` && + test -z "$N" ' test_expect_success 'create_symref(FOO, refs/heads/main)' ' @@ -98,12 +104,12 @@ test_expect_success 'reflog_exists(HEAD)' ' test_expect_success 'delete_reflog(HEAD)' ' $RUN delete-reflog HEAD && - ! test -f .git/logs/HEAD + test_must_fail git reflog exists HEAD ' test_expect_success 'create-reflog(HEAD)' ' $RUN create-reflog HEAD 1 && - test -f .git/logs/HEAD + git reflog exists HEAD ' test_expect_success 'delete_ref(refs/heads/foo)' ' diff --git a/t/t1410-reflog.sh b/t/t1410-reflog.sh index 27b9080251..d42f067ff8 100755 --- a/t/t1410-reflog.sh +++ b/t/t1410-reflog.sh @@ -374,7 +374,9 @@ test_expect_failure 'reflog with non-commit entries displays all entries' ' test_line_count = 3 actual ' -test_expect_success 'reflog expire operates on symref not referrent' ' +# This test takes a lock on an individual ref; this is not supported in +# reftable. +test_expect_success REFFILES 'reflog expire operates on symref not referrent' ' git branch --create-reflog the_symref && git branch --create-reflog referrent && git update-ref referrent HEAD && diff --git a/t/t1503-rev-parse-verify.sh b/t/t1503-rev-parse-verify.sh index bf08102391..40958615eb 100755 --- a/t/t1503-rev-parse-verify.sh +++ b/t/t1503-rev-parse-verify.sh @@ -142,7 +142,7 @@ test_expect_success 'main@{n} for various n' ' test_must_fail git rev-parse --verify main@{$Np1} ' -test_expect_success SYMLINKS 'ref resolution not confused by broken symlinks' ' +test_expect_success SYMLINKS,REFFILES 'ref resolution not confused by broken symlinks' ' ln -s does-not-exist .git/refs/heads/broken && test_must_fail git rev-parse --verify broken ' diff --git a/t/t2021-checkout-overwrite.sh b/t/t2021-checkout-overwrite.sh index 70d69263e6..660132ff8d 100755 --- a/t/t2021-checkout-overwrite.sh +++ b/t/t2021-checkout-overwrite.sh @@ -48,6 +48,7 @@ test_expect_success 'checkout commit with dir must not remove untracked a/b' ' test_expect_success SYMLINKS 'the symlink remained' ' + test_when_finished "rm a/b" && test -h a/b ' diff --git a/t/t2402-worktree-list.sh b/t/t2402-worktree-list.sh index fedcefe8de..4012bd67b0 100755 --- a/t/t2402-worktree-list.sh +++ b/t/t2402-worktree-list.sh @@ -230,7 +230,7 @@ test_expect_success 'broken main worktree still at the top' ' EOF cd linked && echo "worktree $(pwd)" >expected && - echo "ref: .broken" >../.git/HEAD && + (cd ../ && test-tool ref-store main create-symref HEAD .broken ) && git worktree list --porcelain >out && head -n 3 out >actual && test_cmp ../expected actual && diff --git a/t/t3200-branch.sh b/t/t3200-branch.sh index cc4b10236e..e575ffb4ff 100755 --- a/t/t3200-branch.sh +++ b/t/t3200-branch.sh @@ -1272,6 +1272,19 @@ test_expect_success 'attempt to delete a branch merged to its base' ' test_must_fail git branch -d my10 ' +test_expect_success 'branch --delete --force removes dangling branch' ' + git checkout main && + test_commit unstable && + hash=$(git rev-parse HEAD) && + objpath=$(echo $hash | sed -e "s|^..|.git/objects/&/|") && + git branch --no-track dangling && + mv $objpath $objpath.x && + test_when_finished "mv $objpath.x $objpath" && + git branch --delete --force dangling && + git for-each-ref refs/heads/dangling >actual && + test_must_be_empty actual +' + test_expect_success 'use --edit-description' ' write_script editor <<-\EOF && echo "New contents" >"$1" diff --git a/t/t3203-branch-output.sh b/t/t3203-branch-output.sh index 5325b9f67a..6e94c6db7b 100755 --- a/t/t3203-branch-output.sh +++ b/t/t3203-branch-output.sh @@ -340,6 +340,10 @@ test_expect_success 'git branch --format option' ' test_cmp expect actual ' +test_expect_success 'git branch with --format=%(rest) must fail' ' + test_must_fail git branch --format="%(rest)" >actual +' + test_expect_success 'worktree colors correct' ' cat >expect <<-EOF && * <GREEN>(HEAD detached from fromtag)<RESET> diff --git a/t/t3320-notes-merge-worktrees.sh b/t/t3320-notes-merge-worktrees.sh index 052516e6c6..6b2d507f3e 100755 --- a/t/t3320-notes-merge-worktrees.sh +++ b/t/t3320-notes-merge-worktrees.sh @@ -46,8 +46,9 @@ test_expect_success 'create some new worktrees' ' test_expect_success 'merge z into y fails and sets NOTES_MERGE_REF' ' git config core.notesRef refs/notes/y && test_must_fail git notes merge z && - echo "ref: refs/notes/y" >expect && - test_cmp expect .git/NOTES_MERGE_REF + echo "refs/notes/y" >expect && + git symbolic-ref NOTES_MERGE_REF >actual && + test_cmp expect actual ' test_expect_success 'merge z into y while mid-merge in another workdir fails' ' @@ -57,7 +58,7 @@ test_expect_success 'merge z into y while mid-merge in another workdir fails' ' test_must_fail git notes merge z 2>err && test_i18ngrep "a notes merge into refs/notes/y is already in-progress at" err ) && - test_path_is_missing .git/worktrees/worktree/NOTES_MERGE_REF + test_must_fail git -C worktree symbolic-ref NOTES_MERGE_REF ' test_expect_success 'merge z into x while mid-merge on y succeeds' ' @@ -68,8 +69,9 @@ test_expect_success 'merge z into x while mid-merge on y succeeds' ' test_i18ngrep "Automatic notes merge failed" out && grep -v "A notes merge into refs/notes/x is already in-progress in" out ) && - echo "ref: refs/notes/x" >expect && - test_cmp expect .git/worktrees/worktree2/NOTES_MERGE_REF + echo "refs/notes/x" >expect && + git -C worktree2 symbolic-ref NOTES_MERGE_REF >actual && + test_cmp expect actual ' test_done diff --git a/t/t3403-rebase-skip.sh b/t/t3403-rebase-skip.sh index e26762d0b2..f6e4864497 100755 --- a/t/t3403-rebase-skip.sh +++ b/t/t3403-rebase-skip.sh @@ -20,6 +20,7 @@ test_expect_success setup ' git add hello && git commit -m "hello" && git branch skip-reference && + git tag hello && echo world >> hello && git commit -a -m "hello world" && @@ -36,7 +37,8 @@ test_expect_success setup ' test_tick && GIT_AUTHOR_NAME="Another Author" \ GIT_AUTHOR_EMAIL="another.author@example.com" \ - git commit --amend --no-edit -m amended-goodbye && + git commit --amend --no-edit -m amended-goodbye \ + --reset-author && test_tick && git tag amended-goodbye && @@ -51,7 +53,7 @@ test_expect_success setup ' ' test_expect_success 'rebase with git am -3 (default)' ' - test_must_fail git rebase main + test_must_fail git rebase --apply main ' test_expect_success 'rebase --skip can not be used with other options' ' @@ -95,6 +97,13 @@ test_expect_success 'moved back to branch correctly' ' test_debug 'gitk --all & sleep 1' +test_expect_success 'skipping final pick removes .git/MERGE_MSG' ' + test_must_fail git rebase --onto hello reverted-goodbye^ \ + reverted-goodbye && + git rebase --skip && + test_path_is_missing .git/MERGE_MSG +' + test_expect_success 'correct advice upon picking empty commit' ' test_when_finished "git rebase --abort" && test_must_fail git rebase -i --onto goodbye \ diff --git a/t/t3404-rebase-interactive.sh b/t/t3404-rebase-interactive.sh index 66bcbbf952..972ce026bb 100755 --- a/t/t3404-rebase-interactive.sh +++ b/t/t3404-rebase-interactive.sh @@ -297,6 +297,7 @@ test_expect_success 'abort with error when new base cannot be checked out' ' output && test_i18ngrep "file1" output && test_path_is_missing .git/rebase-merge && + rm file1 && git reset --hard HEAD^ ' @@ -839,6 +840,19 @@ test_expect_success 'reword' ' git show HEAD~2 | grep "C changed" ' +test_expect_success 'no uncommited changes when rewording the todo list is reloaded' ' + git checkout E && + test_when_finished "git checkout @{-1}" && + ( + set_fake_editor && + GIT_SEQUENCE_EDITOR="\"$PWD/fake-editor.sh\"" && + export GIT_SEQUENCE_EDITOR && + set_reword_editor && + FAKE_LINES="reword 1 reword 2" git rebase -i C + ) && + check_reworded_commits D E +' + test_expect_success 'rebase -i can copy notes' ' git config notes.rewrite.rebase true && git config notes.rewriteRef "refs/notes/*" && diff --git a/t/t3418-rebase-continue.sh b/t/t3418-rebase-continue.sh index f4c2ee02bc..738fbae9b2 100755 --- a/t/t3418-rebase-continue.sh +++ b/t/t3418-rebase-continue.sh @@ -21,7 +21,7 @@ test_expect_success 'setup' ' git checkout main ' -test_expect_success 'interactive rebase --continue works with touched file' ' +test_expect_success 'merge based rebase --continue with works with touched file' ' rm -fr .git/rebase-* && git reset --hard && git checkout main && @@ -31,12 +31,22 @@ test_expect_success 'interactive rebase --continue works with touched file' ' git rebase --continue ' -test_expect_success 'non-interactive rebase --continue works with touched file' ' +test_expect_success 'merge based rebase --continue removes .git/MERGE_MSG' ' + git checkout -f --detach topic && + + test_must_fail git rebase --onto main HEAD^ && + git read-tree --reset -u HEAD && + test_path_is_file .git/MERGE_MSG && + git rebase --continue && + test_path_is_missing .git/MERGE_MSG +' + +test_expect_success 'apply based rebase --continue works with touched file' ' rm -fr .git/rebase-* && git reset --hard && git checkout main && - test_must_fail git rebase --onto main main topic && + test_must_fail git rebase --apply --onto main main topic && echo "Resolved" >F2 && git add F2 && test-tool chmtime =-60 F1 && @@ -254,7 +264,7 @@ test_rerere_autoupdate () { ' } -test_rerere_autoupdate +test_rerere_autoupdate --apply test_rerere_autoupdate -m GIT_SEQUENCE_EDITOR=: && export GIT_SEQUENCE_EDITOR test_rerere_autoupdate -i diff --git a/t/t3430-rebase-merges.sh b/t/t3430-rebase-merges.sh index 6748070df5..43c82d9a33 100755 --- a/t/t3430-rebase-merges.sh +++ b/t/t3430-rebase-merges.sh @@ -172,19 +172,39 @@ test_expect_success 'failed `merge <branch>` does not crash' ' grep "^Merge branch ${SQ}G${SQ}$" .git/rebase-merge/message ' -test_expect_success 'fast-forward merge -c still rewords' ' - git checkout -b fast-forward-merge-c H && +test_expect_success 'merge -c commits before rewording and reloads todo-list' ' + cat >script-from-scratch <<-\EOF && + merge -c E B + merge -c H G + EOF + + git checkout -b merge-c H && ( - set_fake_editor && - FAKE_COMMIT_MESSAGE=edited \ - GIT_SEQUENCE_EDITOR="echo merge -c H G >" \ - git rebase -ir @^ + set_reword_editor && + GIT_SEQUENCE_EDITOR="\"$PWD/replace-editor.sh\"" \ + git rebase -i -r D ) && - echo edited >expected && - git log --pretty=format:%B -1 >actual && - test_cmp expected actual + check_reworded_commits E H ' +test_expect_success 'merge -c rewords when a strategy is given' ' + git checkout -b merge-c-with-strategy H && + write_script git-merge-override <<-\EOF && + echo overridden$1 >G.t + git add G.t + EOF + + PATH="$PWD:$PATH" \ + GIT_SEQUENCE_EDITOR="echo merge -c H G >" \ + GIT_EDITOR="echo edited >>" \ + git rebase --no-ff -ir -s override -Xxopt E && + test_write_lines overridden--xopt >expect && + test_cmp expect G.t && + test_write_lines H "" edited "" >expect && + git log --format=%B -1 >actual && + test_cmp expect actual + +' test_expect_success 'with a branch tip that was cherry-picked already' ' git checkout -b already-upstream main && base="$(git rev-parse --verify HEAD)" && diff --git a/t/t3435-rebase-gpg-sign.sh b/t/t3435-rebase-gpg-sign.sh index ec10766858..5f8ba2c739 100755 --- a/t/t3435-rebase-gpg-sign.sh +++ b/t/t3435-rebase-gpg-sign.sh @@ -65,6 +65,7 @@ test_rebase_gpg_sign ! true -i --gpg-sign --no-gpg-sign test_rebase_gpg_sign false -i --no-gpg-sign --gpg-sign test_expect_failure 'rebase -p --no-gpg-sign override commit.gpgsign' ' + test_when_finished "git clean -f" && git reset --hard merged && git config commit.gpgsign true && git rebase -p --no-gpg-sign --onto=one fork-point main && diff --git a/t/t3501-revert-cherry-pick.sh b/t/t3501-revert-cherry-pick.sh index 9d100cd188..4b5b607673 100755 --- a/t/t3501-revert-cherry-pick.sh +++ b/t/t3501-revert-cherry-pick.sh @@ -158,4 +158,20 @@ test_expect_success 'cherry-pick works with dirty renamed file' ' grep -q "^modified$" renamed ' +test_expect_success 'advice from failed revert' ' + test_commit --no-tag "add dream" dream dream && + dream_oid=$(git rev-parse --short HEAD) && + cat <<-EOF >expected && + error: could not revert $dream_oid... add dream + hint: After resolving the conflicts, mark them with + hint: "git add/rm <pathspec>", then run + hint: "git revert --continue". + hint: You can instead skip this commit with "git revert --skip". + hint: To abort and get back to the state before "git revert", + hint: run "git revert --abort". + EOF + test_commit --append --no-tag "double-add dream" dream dream && + test_must_fail git revert HEAD^ 2>actual && + test_cmp expected actual +' test_done diff --git a/t/t3507-cherry-pick-conflict.sh b/t/t3507-cherry-pick-conflict.sh index 014001b8f3..979e843c65 100755 --- a/t/t3507-cherry-pick-conflict.sh +++ b/t/t3507-cherry-pick-conflict.sh @@ -47,20 +47,23 @@ test_expect_success 'failed cherry-pick does not advance HEAD' ' test "$head" = "$newhead" ' -test_expect_success 'advice from failed cherry-pick' " +test_expect_success 'advice from failed cherry-pick' ' pristine_detach initial && - picked=\$(git rev-parse --short picked) && + picked=$(git rev-parse --short picked) && cat <<-EOF >expected && - error: could not apply \$picked... picked - hint: after resolving the conflicts, mark the corrected paths - hint: with 'git add <paths>' or 'git rm <paths>' - hint: and commit the result with 'git commit' + error: could not apply $picked... picked + hint: After resolving the conflicts, mark them with + hint: "git add/rm <pathspec>", then run + hint: "git cherry-pick --continue". + hint: You can instead skip this commit with "git cherry-pick --skip". + hint: To abort and get back to the state before "git cherry-pick", + hint: run "git cherry-pick --abort". EOF test_must_fail git cherry-pick picked 2>actual && test_cmp expected actual -" +' test_expect_success 'advice from failed cherry-pick --no-commit' " pristine_detach initial && diff --git a/t/t3510-cherry-pick-sequence.sh b/t/t3510-cherry-pick-sequence.sh index 49010aa946..3b0fa66c33 100755 --- a/t/t3510-cherry-pick-sequence.sh +++ b/t/t3510-cherry-pick-sequence.sh @@ -238,6 +238,7 @@ test_expect_success 'allow skipping commit but not abort for a new history' ' ' test_expect_success 'allow skipping stopped cherry-pick because of untracked file modifications' ' + test_when_finished "rm unrelated" && pristine_detach initial && git rm --cached unrelated && git commit -m "untrack unrelated" && diff --git a/t/t3903-stash.sh b/t/t3903-stash.sh index 873aa56e35..f0a82be9de 100755 --- a/t/t3903-stash.sh +++ b/t/t3903-stash.sh @@ -1307,4 +1307,62 @@ test_expect_success 'stash -c stash.useBuiltin=false warning ' ' test_must_be_empty err ' +test_expect_success 'git stash succeeds despite directory/file change' ' + test_create_repo directory_file_switch_v1 && + ( + cd directory_file_switch_v1 && + test_commit init && + + test_write_lines this file has some words >filler && + git add filler && + git commit -m filler && + + git rm filler && + mkdir filler && + echo contents >filler/file && + git stash push + ) +' + +test_expect_success 'git stash can pop file -> directory saved changes' ' + test_create_repo directory_file_switch_v2 && + ( + cd directory_file_switch_v2 && + test_commit init && + + test_write_lines this file has some words >filler && + git add filler && + git commit -m filler && + + git rm filler && + mkdir filler && + echo contents >filler/file && + cp filler/file expect && + git stash push --include-untracked && + git stash apply --index && + test_cmp expect filler/file + ) +' + +test_expect_success 'git stash can pop directory -> file saved changes' ' + test_create_repo directory_file_switch_v3 && + ( + cd directory_file_switch_v3 && + test_commit init && + + mkdir filler && + test_write_lines some words >filler/file1 && + test_write_lines and stuff >filler/file2 && + git add filler && + git commit -m filler && + + git rm -rf filler && + echo contents >filler && + cp filler expect && + git stash push --include-untracked && + git stash apply --index && + test_cmp expect filler + ) +' + test_done diff --git a/t/t4013-diff-various.sh b/t/t4013-diff-various.sh index e561a8e485..28683d059d 100755 --- a/t/t4013-diff-various.sh +++ b/t/t4013-diff-various.sh @@ -65,7 +65,7 @@ test_expect_success setup ' export GIT_AUTHOR_DATE GIT_COMMITTER_DATE && git checkout master && - git pull -s ours . side && + git pull -s ours --no-rebase . side && GIT_AUTHOR_DATE="2006-06-26 00:05:00 +0000" && GIT_COMMITTER_DATE="2006-06-26 00:05:00 +0000" && diff --git a/t/t4018/java-class-member-function b/t/t4018/java-class-member-function index 298bc7a71b..3b95f68b3b 100644 --- a/t/t4018/java-class-member-function +++ b/t/t4018/java-class-member-function @@ -3,6 +3,10 @@ public class Beer int special; public static void main(String RIGHT[]) { + someMethodCall(); + someOtherMethod("17") + .doThat(); + // Whatever System.out.print("ChangeMe"); } } diff --git a/t/t4018/java-enum-constant b/t/t4018/java-enum-constant new file mode 100644 index 0000000000..a1931c8379 --- /dev/null +++ b/t/t4018/java-enum-constant @@ -0,0 +1,6 @@ +private enum RIGHT { + ONE, + TWO, + THREE, + ChangeMe +} diff --git a/t/t4018/java-method-return-generic-bounded b/t/t4018/java-method-return-generic-bounded new file mode 100644 index 0000000000..66dd78c379 --- /dev/null +++ b/t/t4018/java-method-return-generic-bounded @@ -0,0 +1,9 @@ +class MyExample { + public <T extends Bar & Foo<T>, R> Map<T, R[]> foo(String[] RIGHT) { + someMethodCall(); + someOtherMethod() + .doThat(); + // Whatever... + return (List<T>) Arrays.asList("ChangeMe"); + } +} diff --git a/t/t4018/java-method-return-generic-wildcard b/t/t4018/java-method-return-generic-wildcard new file mode 100644 index 0000000000..96e9e5f2c1 --- /dev/null +++ b/t/t4018/java-method-return-generic-wildcard @@ -0,0 +1,9 @@ +class MyExample { + public List<? extends Comparable> foo(String[] RIGHT) { + someMethodCall(); + someOtherMethod() + .doThat(); + // Whatever... + return Arrays.asList("ChangeMe"); + } +} diff --git a/t/t4018/java-nested-field b/t/t4018/java-nested-field new file mode 100644 index 0000000000..d92d3ec688 --- /dev/null +++ b/t/t4018/java-nested-field @@ -0,0 +1,6 @@ +class MyExample { + private static class RIGHT { + // change an inner class field + String inner = "ChangeMe"; + } +} diff --git a/t/t4018/php-enum b/t/t4018/php-enum new file mode 100644 index 0000000000..91a69c1a2b --- /dev/null +++ b/t/t4018/php-enum @@ -0,0 +1,4 @@ +enum RIGHT: string +{ + case Foo = 'ChangeMe'; +} diff --git a/t/t4045-diff-relative.sh b/t/t4045-diff-relative.sh index 61ba5f707f..fab351b48a 100755 --- a/t/t4045-diff-relative.sh +++ b/t/t4045-diff-relative.sh @@ -162,4 +162,57 @@ check_diff_relative_option subdir file2 true --no-relative --relative check_diff_relative_option . file2 false --no-relative --relative=subdir check_diff_relative_option . file2 true --no-relative --relative=subdir +test_expect_success 'setup diff --relative unmerged' ' + test_commit zero file0 && + test_commit base subdir/file0 && + git switch -c br1 && + test_commit one file0 && + test_commit sub1 subdir/file0 && + git switch -c br2 base && + test_commit two file0 && + git switch -c br3 && + test_commit sub3 subdir/file0 +' + +test_expect_success 'diff --relative without change in subdir' ' + git switch br2 && + test_when_finished "git merge --abort" && + test_must_fail git merge one && + git -C subdir diff --relative >out && + test_must_be_empty out && + git -C subdir diff --relative --name-only >out && + test_must_be_empty out +' + +test_expect_success 'diff --relative --name-only with change in subdir' ' + git switch br3 && + test_when_finished "git merge --abort" && + test_must_fail git merge sub1 && + test_write_lines file0 file0 >expected && + git -C subdir diff --relative --name-only >out && + test_cmp expected out +' + +test_expect_failure 'diff --relative with change in subdir' ' + git switch br3 && + br1_blob=$(git rev-parse --short --verify br1:subdir/file0) && + br3_blob=$(git rev-parse --short --verify br3:subdir/file0) && + test_when_finished "git merge --abort" && + test_must_fail git merge br1 && + cat >expected <<-EOF && + diff --cc file0 + index $br3_blob,$br1_blob..0000000 + --- a/file0 + +++ b/file0 + @@@ -1,1 -1,1 +1,5 @@@ + ++<<<<<<< HEAD + +sub3 + ++======= + + sub1 + ++>>>>>>> br1 + EOF + git -C subdir diff --relative >out && + test_cmp expected out +' + test_done diff --git a/t/t4060-diff-submodule-option-diff-format.sh b/t/t4060-diff-submodule-option-diff-format.sh index dc7b242697..d86e38abd8 100755 --- a/t/t4060-diff-submodule-option-diff-format.sh +++ b/t/t4060-diff-submodule-option-diff-format.sh @@ -361,7 +361,6 @@ test_expect_success 'typechanged submodule(submodule->blob)' ' rm -f sm1 && test_create_repo sm1 && head6=$(add_file sm1 foo6 foo7) -fullhead6=$(cd sm1; git rev-parse --verify HEAD) test_expect_success 'nonexistent commit' ' git diff-index -p --submodule=diff HEAD >actual && cat >expected <<-EOF && @@ -704,10 +703,26 @@ test_expect_success 'path filter' ' diff_cmp expected actual ' -commit_file sm2 +cat >.gitmodules <<-EOF +[submodule "sm2"] + path = sm2 + url = bogus_url +EOF +git add .gitmodules +commit_file sm2 .gitmodules + test_expect_success 'given commit' ' git diff-index -p --submodule=diff HEAD^ >actual && cat >expected <<-EOF && + diff --git a/.gitmodules b/.gitmodules + new file mode 100644 + index 1234567..89abcde + --- /dev/null + +++ b/.gitmodules + @@ -0,0 +1,3 @@ + +[submodule "sm2"] + +path = sm2 + +url = bogus_url Submodule sm1 $head7...0000000 (submodule deleted) Submodule sm2 0000000...$head9 (new submodule) diff --git a/sm2/foo8 b/sm2/foo8 @@ -729,15 +744,21 @@ test_expect_success 'given commit' ' ' test_expect_success 'setup .git file for sm2' ' - (cd sm2 && - REAL="$(pwd)/../.real" && - mv .git "$REAL" && - echo "gitdir: $REAL" >.git) + git submodule absorbgitdirs sm2 ' test_expect_success 'diff --submodule=diff with .git file' ' git diff --submodule=diff HEAD^ >actual && cat >expected <<-EOF && + diff --git a/.gitmodules b/.gitmodules + new file mode 100644 + index 1234567..89abcde + --- /dev/null + +++ b/.gitmodules + @@ -0,0 +1,3 @@ + +[submodule "sm2"] + +path = sm2 + +url = bogus_url Submodule sm1 $head7...0000000 (submodule deleted) Submodule sm2 0000000...$head9 (new submodule) diff --git a/sm2/foo8 b/sm2/foo8 @@ -758,9 +779,67 @@ test_expect_success 'diff --submodule=diff with .git file' ' diff_cmp expected actual ' +mv sm2 sm2-bak + +test_expect_success 'deleted submodule with .git file' ' + git diff-index -p --submodule=diff HEAD >actual && + cat >expected <<-EOF && + Submodule sm1 $head7...0000000 (submodule deleted) + Submodule sm2 $head9...0000000 (submodule deleted) + diff --git a/sm2/foo8 b/sm2/foo8 + deleted file mode 100644 + index 1234567..89abcde + --- a/sm2/foo8 + +++ /dev/null + @@ -1 +0,0 @@ + -foo8 + diff --git a/sm2/foo9 b/sm2/foo9 + deleted file mode 100644 + index 1234567..89abcde + --- a/sm2/foo9 + +++ /dev/null + @@ -1 +0,0 @@ + -foo9 + EOF + diff_cmp expected actual +' + +echo submodule-to-blob>sm2 + +test_expect_success 'typechanged(submodule->blob) submodule with .git file' ' + git diff-index -p --submodule=diff HEAD >actual && + cat >expected <<-EOF && + Submodule sm1 $head7...0000000 (submodule deleted) + Submodule sm2 $head9...0000000 (submodule deleted) + diff --git a/sm2/foo8 b/sm2/foo8 + deleted file mode 100644 + index 1234567..89abcde + --- a/sm2/foo8 + +++ /dev/null + @@ -1 +0,0 @@ + -foo8 + diff --git a/sm2/foo9 b/sm2/foo9 + deleted file mode 100644 + index 1234567..89abcde + --- a/sm2/foo9 + +++ /dev/null + @@ -1 +0,0 @@ + -foo9 + diff --git a/sm2 b/sm2 + new file mode 100644 + index 1234567..89abcde + --- /dev/null + +++ b/sm2 + @@ -0,0 +1 @@ + +submodule-to-blob + EOF + diff_cmp expected actual +' + +rm sm2 +mv sm2-bak sm2 + test_expect_success 'setup nested submodule' ' - git submodule add -f ./sm2 && - git commit -a -m "add sm2" && git -C sm2 submodule add ../sm2 nested && git -C sm2 commit -a -m "nested sub" && head10=$(git -C sm2 rev-parse --short --verify HEAD) @@ -791,6 +870,7 @@ test_expect_success 'diff --submodule=diff with moved nested submodule HEAD' ' test_expect_success 'diff --submodule=diff recurses into nested submodules' ' cat >expected <<-EOF && + Submodule sm1 $head7...0000000 (submodule deleted) Submodule sm2 contains modified content Submodule sm2 $head9..$head10: diff --git a/sm2/.gitmodules b/sm2/.gitmodules @@ -830,4 +910,67 @@ test_expect_success 'diff --submodule=diff recurses into nested submodules' ' diff_cmp expected actual ' +(cd sm2; commit_file nested) +commit_file sm2 +head12=$(cd sm2; git rev-parse --short --verify HEAD) + +mv sm2 sm2-bak + +test_expect_success 'diff --submodule=diff recurses into deleted nested submodules' ' + cat >expected <<-EOF && + Submodule sm1 $head7...0000000 (submodule deleted) + Submodule sm2 $head12...0000000 (submodule deleted) + diff --git a/sm2/.gitmodules b/sm2/.gitmodules + deleted file mode 100644 + index 3a816b8..0000000 + --- a/sm2/.gitmodules + +++ /dev/null + @@ -1,3 +0,0 @@ + -[submodule "nested"] + - path = nested + - url = ../sm2 + diff --git a/sm2/foo8 b/sm2/foo8 + deleted file mode 100644 + index db9916b..0000000 + --- a/sm2/foo8 + +++ /dev/null + @@ -1 +0,0 @@ + -foo8 + diff --git a/sm2/foo9 b/sm2/foo9 + deleted file mode 100644 + index 9c3b4f6..0000000 + --- a/sm2/foo9 + +++ /dev/null + @@ -1 +0,0 @@ + -foo9 + Submodule nested $head11...0000000 (submodule deleted) + diff --git a/sm2/nested/file b/sm2/nested/file + deleted file mode 100644 + index ca281f5..0000000 + --- a/sm2/nested/file + +++ /dev/null + @@ -1 +0,0 @@ + -nested content + diff --git a/sm2/nested/foo8 b/sm2/nested/foo8 + deleted file mode 100644 + index db9916b..0000000 + --- a/sm2/nested/foo8 + +++ /dev/null + @@ -1 +0,0 @@ + -foo8 + diff --git a/sm2/nested/foo9 b/sm2/nested/foo9 + deleted file mode 100644 + index 9c3b4f6..0000000 + --- a/sm2/nested/foo9 + +++ /dev/null + @@ -1 +0,0 @@ + -foo9 + EOF + git diff --submodule=diff >actual 2>err && + test_must_be_empty err && + diff_cmp expected actual +' + +mv sm2-bak sm2 + test_done diff --git a/t/t4103-apply-binary.sh b/t/t4103-apply-binary.sh index fad6d3f542..d370ecfe0d 100755 --- a/t/t4103-apply-binary.sh +++ b/t/t4103-apply-binary.sh @@ -158,4 +158,27 @@ test_expect_success 'apply binary -p0 diff' ' test -z "$(git diff --name-status binary -- file3)" ' +test_expect_success 'reject truncated binary diff' ' + do_reset && + + # this length is calculated to get us very close to + # the 8192-byte strbuf we will use to read in the patch. + test-tool genrandom foo 6205 >file1 && + git diff --binary >patch && + + # truncate the patch at the second "literal" line, + # but exclude the trailing newline. We must use perl + # for this, since tools like "sed" cannot reliably + # produce output without the trailing newline. + perl -pe " + if (/^literal/ && \$count++ >= 1) { + chomp; + print; + exit 0; + } + " <patch >patch.trunc && + + do_reset && + test_must_fail git apply patch.trunc +' test_done diff --git a/t/t4108-apply-threeway.sh b/t/t4108-apply-threeway.sh index 65147efdea..cc3aa3314a 100755 --- a/t/t4108-apply-threeway.sh +++ b/t/t4108-apply-threeway.sh @@ -230,4 +230,49 @@ test_expect_success 'apply with --3way --cached and conflicts' ' test_cmp expect.diff actual.diff ' +test_expect_success 'apply binary file patch' ' + git reset --hard main && + cp "$TEST_DIRECTORY/test-binary-1.png" bin.png && + git add bin.png && + git commit -m "add binary file" && + + cp "$TEST_DIRECTORY/test-binary-2.png" bin.png && + + git diff --binary >bin.diff && + git reset --hard && + + # Apply must succeed. + git apply bin.diff +' + +test_expect_success 'apply binary file patch with 3way' ' + git reset --hard main && + cp "$TEST_DIRECTORY/test-binary-1.png" bin.png && + git add bin.png && + git commit -m "add binary file" && + + cp "$TEST_DIRECTORY/test-binary-2.png" bin.png && + + git diff --binary >bin.diff && + git reset --hard && + + # Apply must succeed. + git apply --3way --index bin.diff +' + +test_expect_success 'apply full-index patch with 3way' ' + git reset --hard main && + cp "$TEST_DIRECTORY/test-binary-1.png" bin.png && + git add bin.png && + git commit -m "add binary file" && + + cp "$TEST_DIRECTORY/test-binary-2.png" bin.png && + + git diff --full-index >bin.diff && + git reset --hard && + + # Apply must succeed. + git apply --3way --index bin.diff +' + test_done diff --git a/t/t4151-am-abort.sh b/t/t4151-am-abort.sh index 9d8d3c72e7..2374151662 100755 --- a/t/t4151-am-abort.sh +++ b/t/t4151-am-abort.sh @@ -23,7 +23,13 @@ test_expect_success setup ' test_tick && git commit -a -m $i || return 1 done && + git branch changes && git format-patch --no-numbered initial && + git checkout -b conflicting initial && + echo different >>file-1 && + echo whatever >new-file && + git add file-1 new-file && + git commit -m different && git checkout -b side initial && echo local change >file-2-expect ' @@ -191,4 +197,37 @@ test_expect_success 'am --abort leaves index stat info alone' ' git diff-files --exit-code --quiet ' +test_expect_success 'git am --abort return failed exit status when it fails' ' + test_when_finished "rm -rf file-2/ && git reset --hard && git am --abort" && + git checkout changes && + git format-patch -1 --stdout conflicting >changes.mbox && + test_must_fail git am --3way changes.mbox && + + git rm file-2 && + mkdir file-2 && + echo precious >file-2/somefile && + test_must_fail git am --abort && + test_path_is_dir file-2/ +' + +test_expect_success 'git am --abort cleans relevant files' ' + git checkout changes && + git format-patch -1 --stdout conflicting >changes.mbox && + test_must_fail git am --3way changes.mbox && + + test_path_is_file new-file && + echo further changes >>file-1 && + echo change other file >>file-2 && + + # Abort, and expect the files touched by am to be reverted + git am --abort && + + test_path_is_missing new-file && + + # Files not involved in am operation are left modified + git diff --name-only changes >actual && + test_write_lines file-2 >expect && + test_cmp expect actual +' + test_done diff --git a/t/t4210-log-i18n.sh b/t/t4210-log-i18n.sh index d2dfcf164e..0141f36e33 100755 --- a/t/t4210-log-i18n.sh +++ b/t/t4210-log-i18n.sh @@ -131,4 +131,11 @@ do fi done +test_expect_success 'log shows warning when conversion fails' ' + enc=this-encoding-does-not-exist && + git log -1 --encoding=$enc 2>err && + echo "warning: unable to reencode commit to ${SQ}${enc}${SQ}" >expect && + test_cmp expect err +' + test_done diff --git a/t/t5300-pack-object.sh b/t/t5300-pack-object.sh index 5c5e53f0be..e13a884207 100755 --- a/t/t5300-pack-object.sh +++ b/t/t5300-pack-object.sh @@ -34,6 +34,110 @@ test_expect_success 'setup' ' } >expect ' +test_expect_success 'setup pack-object <stdin' ' + git init pack-object-stdin && + test_commit -C pack-object-stdin one && + test_commit -C pack-object-stdin two + +' + +test_expect_success 'pack-object <stdin parsing: basic [|--revs]' ' + cat >in <<-EOF && + $(git -C pack-object-stdin rev-parse one) + EOF + + git -C pack-object-stdin pack-objects basic-stdin <in && + idx=$(echo pack-object-stdin/basic-stdin-*.idx) && + git show-index <"$idx" >actual && + test_line_count = 1 actual && + + git -C pack-object-stdin pack-objects --revs basic-stdin-revs <in && + idx=$(echo pack-object-stdin/basic-stdin-revs-*.idx) && + git show-index <"$idx" >actual && + test_line_count = 3 actual +' + +test_expect_success 'pack-object <stdin parsing: [|--revs] bad line' ' + cat >in <<-EOF && + $(git -C pack-object-stdin rev-parse one) + garbage + $(git -C pack-object-stdin rev-parse two) + EOF + + sed "s/^> //g" >err.expect <<-EOF && + fatal: expected object ID, got garbage: + > garbage + + EOF + test_must_fail git -C pack-object-stdin pack-objects bad-line-stdin <in 2>err.actual && + test_cmp err.expect err.actual && + + cat >err.expect <<-EOF && + fatal: bad revision '"'"'garbage'"'"' + EOF + test_must_fail git -C pack-object-stdin pack-objects --revs bad-line-stdin-revs <in 2>err.actual && + test_cmp err.expect err.actual +' + +test_expect_success 'pack-object <stdin parsing: [|--revs] empty line' ' + cat >in <<-EOF && + $(git -C pack-object-stdin rev-parse one) + + $(git -C pack-object-stdin rev-parse two) + EOF + + sed -e "s/^> //g" -e "s/Z$//g" >err.expect <<-EOF && + fatal: expected object ID, got garbage: + > Z + + EOF + test_must_fail git -C pack-object-stdin pack-objects empty-line-stdin <in 2>err.actual && + test_cmp err.expect err.actual && + + git -C pack-object-stdin pack-objects --revs empty-line-stdin-revs <in && + idx=$(echo pack-object-stdin/empty-line-stdin-revs-*.idx) && + git show-index <"$idx" >actual && + test_line_count = 3 actual +' + +test_expect_success 'pack-object <stdin parsing: [|--revs] with --stdin' ' + cat >in <<-EOF && + $(git -C pack-object-stdin rev-parse one) + $(git -C pack-object-stdin rev-parse two) + EOF + + # There is the "--stdin-packs is incompatible with --revs" + # test below, but we should make sure that the revision.c + # --stdin is not picked up + cat >err.expect <<-EOF && + fatal: disallowed abbreviated or ambiguous option '"'"'stdin'"'"' + EOF + test_must_fail git -C pack-object-stdin pack-objects stdin-with-stdin-option --stdin <in 2>err.actual && + test_cmp err.expect err.actual && + + test_must_fail git -C pack-object-stdin pack-objects --stdin --revs stdin-with-stdin-option-revs 2>err.actual <in && + test_cmp err.expect err.actual +' + +test_expect_success 'pack-object <stdin parsing: --stdin-packs handles garbage' ' + cat >in <<-EOF && + $(git -C pack-object-stdin rev-parse one) + $(git -C pack-object-stdin rev-parse two) + EOF + + # That we get "two" and not "one" has to do with OID + # ordering. It happens to be the same here under SHA-1 and + # SHA-256. See commentary in pack-objects.c + cat >err.expect <<-EOF && + fatal: could not find pack '"'"'$(git -C pack-object-stdin rev-parse two)'"'"' + EOF + test_must_fail git \ + -C pack-object-stdin \ + pack-objects stdin-with-stdin-option --stdin-packs \ + <in 2>err.actual && + test_cmp err.expect err.actual +' + # usage: check_deltas <stderr_from_pack_objects> <cmp_op> <nr_deltas> # e.g.: check_deltas stderr -gt 0 check_deltas() { diff --git a/t/t5304-prune.sh b/t/t5304-prune.sh index 7cabb85ca6..8ae314af58 100755 --- a/t/t5304-prune.sh +++ b/t/t5304-prune.sh @@ -291,6 +291,7 @@ test_expect_success 'prune: handle HEAD reflog in multiple worktrees' ' cat ../expected >blob && git add blob && git commit -m "second commit in third" && + git clean -f && # Remove untracked left behind by deleting index git reset --hard HEAD^ ) && git prune --expire=now && diff --git a/t/t5310-pack-bitmaps.sh b/t/t5310-pack-bitmaps.sh index b02838750e..673baa5c3c 100755 --- a/t/t5310-pack-bitmaps.sh +++ b/t/t5310-pack-bitmaps.sh @@ -8,6 +8,10 @@ export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME . "$TEST_DIRECTORY"/lib-bundle.sh . "$TEST_DIRECTORY"/lib-bitmap.sh +# t5310 deals only with single-pack bitmaps, so don't write MIDX bitmaps in +# their place. +GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=0 + objpath () { echo ".git/objects/$(echo "$1" | sed -e 's|\(..\)|\1/|')" } @@ -25,93 +29,10 @@ has_any () { grep -Ff "$1" "$2" } -# To ensure the logic for "maximal commits" is exercised, make -# the repository a bit more complicated. -# -# other second -# * * -# (99 commits) (99 commits) -# * * -# |\ /| -# | * octo-other octo-second * | -# |/|\_________ ____________/|\| -# | \ \/ __________/ | -# | | ________/\ / | -# * |/ * merge-right * -# | _|__________/ \____________ | -# |/ | \| -# (l1) * * merge-left * (r1) -# | / \________________________ | -# |/ \| -# (l2) * * (r2) -# \___________________________ | -# \| -# * (base) -# -# We only push bits down the first-parent history, which -# makes some of these commits unimportant! -# -# The important part for the maximal commit algorithm is how -# the bitmasks are extended. Assuming starting bit positions -# for second (bit 0) and other (bit 1), the bitmasks at the -# end should be: -# -# second: 1 (maximal, selected) -# other: 01 (maximal, selected) -# (base): 11 (maximal) -# -# This complicated history was important for a previous -# version of the walk that guarantees never walking a -# commit multiple times. That goal might be important -# again, so preserve this complicated case. For now, this -# test will guarantee that the bitmaps are computed -# correctly, even with the repeat calculations. - -test_expect_success 'setup repo with moderate-sized history' ' - test_commit_bulk --id=file 10 && - git branch -M second && - git checkout -b other HEAD~5 && - test_commit_bulk --id=side 10 && - - # add complicated history setup, including merges and - # ambiguous merge-bases - - git checkout -b merge-left other~2 && - git merge second~2 -m "merge-left" && - - git checkout -b merge-right second~1 && - git merge other~1 -m "merge-right" && - - git checkout -b octo-second second && - git merge merge-left merge-right -m "octopus-second" && - - git checkout -b octo-other other && - git merge merge-left merge-right -m "octopus-other" && - - git checkout other && - git merge octo-other -m "pull octopus" && - - git checkout second && - git merge octo-second -m "pull octopus" && - - # Remove these branches so they are not selected - # as bitmap tips - git branch -D merge-left && - git branch -D merge-right && - git branch -D octo-other && - git branch -D octo-second && - - # add padding to make these merges less interesting - # and avoid having them selected for bitmaps - test_commit_bulk --id=file 100 && - git checkout other && - test_commit_bulk --id=side 100 && - git checkout second && - - bitmaptip=$(git rev-parse second) && - blob=$(echo tagged-blob | git hash-object -w --stdin) && - git tag tagged-blob $blob && - git config repack.writebitmaps true +setup_bitmap_history + +test_expect_success 'setup writing bitmaps during repack' ' + git config repack.writeBitmaps true ' test_expect_success 'full repack creates bitmaps' ' @@ -123,109 +44,7 @@ test_expect_success 'full repack creates bitmaps' ' grep "\"key\":\"num_maximal_commits\",\"value\":\"107\"" trace ' -test_expect_success 'rev-list --test-bitmap verifies bitmaps' ' - git rev-list --test-bitmap HEAD -' - -rev_list_tests_head () { - test_expect_success "counting commits via bitmap ($state, $branch)" ' - git rev-list --count $branch >expect && - git rev-list --use-bitmap-index --count $branch >actual && - test_cmp expect actual - ' - - test_expect_success "counting partial commits via bitmap ($state, $branch)" ' - git rev-list --count $branch~5..$branch >expect && - git rev-list --use-bitmap-index --count $branch~5..$branch >actual && - test_cmp expect actual - ' - - test_expect_success "counting commits with limit ($state, $branch)" ' - git rev-list --count -n 1 $branch >expect && - git rev-list --use-bitmap-index --count -n 1 $branch >actual && - test_cmp expect actual - ' - - test_expect_success "counting non-linear history ($state, $branch)" ' - git rev-list --count other...second >expect && - git rev-list --use-bitmap-index --count other...second >actual && - test_cmp expect actual - ' - - test_expect_success "counting commits with limiting ($state, $branch)" ' - git rev-list --count $branch -- 1.t >expect && - git rev-list --use-bitmap-index --count $branch -- 1.t >actual && - test_cmp expect actual - ' - - test_expect_success "counting objects via bitmap ($state, $branch)" ' - git rev-list --count --objects $branch >expect && - git rev-list --use-bitmap-index --count --objects $branch >actual && - test_cmp expect actual - ' - - test_expect_success "enumerate commits ($state, $branch)" ' - git rev-list --use-bitmap-index $branch >actual && - git rev-list $branch >expect && - test_bitmap_traversal --no-confirm-bitmaps expect actual - ' - - test_expect_success "enumerate --objects ($state, $branch)" ' - git rev-list --objects --use-bitmap-index $branch >actual && - git rev-list --objects $branch >expect && - test_bitmap_traversal expect actual - ' - - test_expect_success "bitmap --objects handles non-commit objects ($state, $branch)" ' - git rev-list --objects --use-bitmap-index $branch tagged-blob >actual && - grep $blob actual - ' -} - -rev_list_tests () { - state=$1 - - for branch in "second" "other" - do - rev_list_tests_head - done -} - -rev_list_tests 'full bitmap' - -test_expect_success 'clone from bitmapped repository' ' - git clone --no-local --bare . clone.git && - git rev-parse HEAD >expect && - git --git-dir=clone.git rev-parse HEAD >actual && - test_cmp expect actual -' - -test_expect_success 'partial clone from bitmapped repository' ' - test_config uploadpack.allowfilter true && - git clone --no-local --bare --filter=blob:none . partial-clone.git && - ( - cd partial-clone.git && - pack=$(echo objects/pack/*.pack) && - git verify-pack -v "$pack" >have && - awk "/blob/ { print \$1 }" <have >blobs && - # we expect this single blob because of the direct ref - git rev-parse refs/tags/tagged-blob >expect && - test_cmp expect blobs - ) -' - -test_expect_success 'setup further non-bitmapped commits' ' - test_commit_bulk --id=further 10 -' - -rev_list_tests 'partial bitmap' - -test_expect_success 'fetch (partial bitmap)' ' - git --git-dir=clone.git fetch origin second:second && - git rev-parse HEAD >expect && - git --git-dir=clone.git rev-parse HEAD >actual && - test_cmp expect actual -' +basic_bitmap_tests test_expect_success 'incremental repack fails when bitmaps are requested' ' test_commit more-1 && @@ -461,40 +280,6 @@ test_expect_success 'truncated bitmap fails gracefully (cache)' ' test_i18ngrep corrupted.bitmap.index stderr ' -test_expect_success 'enumerating progress counts pack-reused objects' ' - count=$(git rev-list --objects --all --count) && - git repack -adb && - - # check first with only reused objects; confirm that our progress - # showed the right number, and also that we did pack-reuse as expected. - # Check only the final "done" line of the meter (there may be an - # arbitrary number of intermediate lines ending with CR). - GIT_PROGRESS_DELAY=0 \ - git pack-objects --all --stdout --progress \ - </dev/null >/dev/null 2>stderr && - grep "Enumerating objects: $count, done" stderr && - grep "pack-reused $count" stderr && - - # now the same but with one non-reused object - git commit --allow-empty -m "an extra commit object" && - GIT_PROGRESS_DELAY=0 \ - git pack-objects --all --stdout --progress \ - </dev/null >/dev/null 2>stderr && - grep "Enumerating objects: $((count+1)), done" stderr && - grep "pack-reused $count" stderr -' - -# have_delta <obj> <expected_base> -# -# Note that because this relies on cat-file, it might find _any_ copy of an -# object in the repository. The caller is responsible for making sure -# there's only one (e.g., via "repack -ad", or having just fetched a copy). -have_delta () { - echo $2 >expect && - echo $1 | git cat-file --batch-check="%(deltabase)" >actual && - test_cmp expect actual -} - # Create a state of history with these properties: # # - refs that allow a client to fetch some new history, while sharing some old diff --git a/t/t5318-commit-graph.sh b/t/t5318-commit-graph.sh index af88f805aa..295c5bd94d 100755 --- a/t/t5318-commit-graph.sh +++ b/t/t5318-commit-graph.sh @@ -5,6 +5,25 @@ test_description='commit graph' GIT_TEST_COMMIT_GRAPH_CHANGED_PATHS=0 +test_expect_success 'usage' ' + test_expect_code 129 git commit-graph write blah 2>err && + test_expect_code 129 git commit-graph write verify +' + +test_expect_success 'usage shown without sub-command' ' + test_expect_code 129 git commit-graph 2>err && + ! grep error: err +' + +test_expect_success 'usage shown with an error on unknown sub-command' ' + cat >expect <<-\EOF && + error: unrecognized subcommand: unknown + EOF + test_expect_code 129 git commit-graph unknown 2>stderr && + grep error stderr >actual && + test_cmp expect actual +' + test_expect_success 'setup full repo' ' mkdir full && cd "$TRASH_DIRECTORY/full" && diff --git a/t/t5319-multi-pack-index.sh b/t/t5319-multi-pack-index.sh index 3d4d9f10c3..bb04f0f23b 100755 --- a/t/t5319-multi-pack-index.sh +++ b/t/t5319-multi-pack-index.sh @@ -201,6 +201,34 @@ test_expect_success 'write midx with twelve packs' ' compare_results_with_midx "twelve packs" +test_expect_success 'multi-pack-index *.rev cleanup with --object-dir' ' + git init repo && + git clone -s repo alternate && + + test_when_finished "rm -rf repo alternate" && + + ( + cd repo && + test_commit base && + git repack -d + ) && + + ours="alternate/.git/objects/pack/multi-pack-index-123.rev" && + theirs="repo/.git/objects/pack/multi-pack-index-abc.rev" && + touch "$ours" "$theirs" && + + ( + cd alternate && + git multi-pack-index --object-dir ../repo/.git/objects write + ) && + + # writing a midx in "repo" should not remove the .rev file in the + # alternate + test_path_is_file repo/.git/objects/pack/multi-pack-index && + test_path_is_file $ours && + test_path_is_missing $theirs +' + test_expect_success 'warn on improper hash version' ' git init --object-format=sha1 sha1 && ( @@ -277,6 +305,23 @@ test_expect_success 'midx picks objects from preferred pack' ' ) ' +test_expect_success 'preferred packs must be non-empty' ' + test_when_finished rm -rf preferred.git && + git init preferred.git && + ( + cd preferred.git && + + test_commit base && + git repack -ad && + + empty="$(git pack-objects $objdir/pack/pack </dev/null)" && + + test_must_fail git multi-pack-index write \ + --preferred-pack=pack-$empty.pack 2>err && + grep "with no objects" err + ) +' + test_expect_success 'verify multi-pack-index success' ' git multi-pack-index verify --object-dir=$objdir ' @@ -487,7 +532,8 @@ test_expect_success 'repack preserves multi-pack-index when creating packs' ' compare_results_with_midx "after repack" test_expect_success 'multi-pack-index and pack-bitmap' ' - git -c repack.writeBitmaps=true repack -ad && + GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=0 \ + git -c repack.writeBitmaps=true repack -ad && git multi-pack-index write && git rev-list --test-bitmap HEAD ' @@ -537,7 +583,15 @@ test_expect_success 'force some 64-bit offsets with pack-objects' ' idx64=objects64/pack/test-64-$pack64.idx && chmod u+w $idx64 && corrupt_data $idx64 $(test_oid idxoff) "\02" && - midx64=$(git multi-pack-index --object-dir=objects64 write) && + # objects64 is not a real repository, but can serve as an alternate + # anyway so we can write a MIDX into it + git init repo && + test_when_finished "rm -fr repo" && + ( + cd repo && + ( cd ../objects64 && pwd ) >.git/objects/info/alternates && + midx64=$(git multi-pack-index --object-dir=../objects64 write) + ) && midx_read_expect 1 63 5 objects64 " large-offsets" ' @@ -842,4 +896,9 @@ test_expect_success 'usage shown without sub-command' ' ! test_i18ngrep "unrecognized subcommand" err ' +test_expect_success 'complains when run outside of a repository' ' + nongit test_must_fail git multi-pack-index write 2>err && + grep "not a git repository" err +' + test_done diff --git a/t/t5323-pack-redundant.sh b/t/t5323-pack-redundant.sh index 8b01793845..8dbbcc5e51 100755 --- a/t/t5323-pack-redundant.sh +++ b/t/t5323-pack-redundant.sh @@ -114,9 +114,9 @@ test_expect_success 'setup main repo' ' create_commits_in "$main_repo" A B C D E F G H I J K L M N O P Q R ' -test_expect_success 'master: pack-redundant works with no packfile' ' +test_expect_success 'main: pack-redundant works with no packfile' ' ( - cd "$master_repo" && + cd "$main_repo" && cat >expect <<-EOF && fatal: Zero packs found! EOF diff --git a/t/t5326-multi-pack-bitmaps.sh b/t/t5326-multi-pack-bitmaps.sh new file mode 100755 index 0000000000..4ad7c2c969 --- /dev/null +++ b/t/t5326-multi-pack-bitmaps.sh @@ -0,0 +1,286 @@ +#!/bin/sh + +test_description='exercise basic multi-pack bitmap functionality' +. ./test-lib.sh +. "${TEST_DIRECTORY}/lib-bitmap.sh" + +# We'll be writing our own midx and bitmaps, so avoid getting confused by the +# automatic ones. +GIT_TEST_MULTI_PACK_INDEX=0 +GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=0 + +objdir=.git/objects +midx=$objdir/pack/multi-pack-index + +# midx_pack_source <obj> +midx_pack_source () { + test-tool read-midx --show-objects .git/objects | grep "^$1 " | cut -f2 +} + +setup_bitmap_history + +test_expect_success 'enable core.multiPackIndex' ' + git config core.multiPackIndex true +' + +test_expect_success 'create single-pack midx with bitmaps' ' + git repack -ad && + git multi-pack-index write --bitmap && + test_path_is_file $midx && + test_path_is_file $midx-$(midx_checksum $objdir).bitmap && + test_path_is_file $midx-$(midx_checksum $objdir).rev +' + +basic_bitmap_tests + +test_expect_success 'create new additional packs' ' + for i in $(test_seq 1 16) + do + test_commit "$i" && + git repack -d || return 1 + done && + + git checkout -b other2 HEAD~8 && + for i in $(test_seq 1 8) + do + test_commit "side-$i" && + git repack -d || return 1 + done && + git checkout second +' + +test_expect_success 'create multi-pack midx with bitmaps' ' + git multi-pack-index write --bitmap && + + ls $objdir/pack/pack-*.pack >packs && + test_line_count = 25 packs && + + test_path_is_file $midx && + test_path_is_file $midx-$(midx_checksum $objdir).bitmap && + test_path_is_file $midx-$(midx_checksum $objdir).rev +' + +basic_bitmap_tests + +test_expect_success '--no-bitmap is respected when bitmaps exist' ' + git multi-pack-index write --bitmap && + + test_commit respect--no-bitmap && + git repack -d && + + test_path_is_file $midx && + test_path_is_file $midx-$(midx_checksum $objdir).bitmap && + test_path_is_file $midx-$(midx_checksum $objdir).rev && + + git multi-pack-index write --no-bitmap && + + test_path_is_file $midx && + test_path_is_missing $midx-$(midx_checksum $objdir).bitmap && + test_path_is_missing $midx-$(midx_checksum $objdir).rev +' + +test_expect_success 'setup midx with base from later pack' ' + # Write a and b so that "a" is a delta on top of base "b", since Git + # prefers to delete contents out of a base rather than add to a shorter + # object. + test_seq 1 128 >a && + test_seq 1 130 >b && + + git add a b && + git commit -m "initial commit" && + + a=$(git rev-parse HEAD:a) && + b=$(git rev-parse HEAD:b) && + + # In the first pack, "a" is stored as a delta to "b". + p1=$(git pack-objects .git/objects/pack/pack <<-EOF + $a + $b + EOF + ) && + + # In the second pack, "a" is missing, and "b" is not a delta nor base to + # any other object. + p2=$(git pack-objects .git/objects/pack/pack <<-EOF + $b + $(git rev-parse HEAD) + $(git rev-parse HEAD^{tree}) + EOF + ) && + + git prune-packed && + # Use the second pack as the preferred source, so that "b" occurs + # earlier in the MIDX object order, rendering "a" unusable for pack + # reuse. + git multi-pack-index write --bitmap --preferred-pack=pack-$p2.idx && + + have_delta $a $b && + test $(midx_pack_source $a) != $(midx_pack_source $b) +' + +rev_list_tests 'full bitmap with backwards delta' + +test_expect_success 'clone with bitmaps enabled' ' + git clone --no-local --bare . clone-reverse-delta.git && + test_when_finished "rm -fr clone-reverse-delta.git" && + + git rev-parse HEAD >expect && + git --git-dir=clone-reverse-delta.git rev-parse HEAD >actual && + test_cmp expect actual +' + +bitmap_reuse_tests() { + from=$1 + to=$2 + + test_expect_success "setup pack reuse tests ($from -> $to)" ' + rm -fr repo && + git init repo && + ( + cd repo && + test_commit_bulk 16 && + git tag old-tip && + + git config core.multiPackIndex true && + if test "MIDX" = "$from" + then + git repack -Ad && + git multi-pack-index write --bitmap + else + git repack -Adb + fi + ) + ' + + test_expect_success "build bitmap from existing ($from -> $to)" ' + ( + cd repo && + test_commit_bulk --id=further 16 && + git tag new-tip && + + if test "MIDX" = "$to" + then + git repack -d && + git multi-pack-index write --bitmap + else + git repack -Adb + fi + ) + ' + + test_expect_success "verify resulting bitmaps ($from -> $to)" ' + ( + cd repo && + git for-each-ref && + git rev-list --test-bitmap refs/tags/old-tip && + git rev-list --test-bitmap refs/tags/new-tip + ) + ' +} + +bitmap_reuse_tests 'pack' 'MIDX' +bitmap_reuse_tests 'MIDX' 'pack' +bitmap_reuse_tests 'MIDX' 'MIDX' + +test_expect_success 'missing object closure fails gracefully' ' + rm -fr repo && + git init repo && + test_when_finished "rm -fr repo" && + ( + cd repo && + + test_commit loose && + test_commit packed && + + # Do not pass "--revs"; we want a pack without the "loose" + # commit. + git pack-objects $objdir/pack/pack <<-EOF && + $(git rev-parse packed) + EOF + + test_must_fail git multi-pack-index write --bitmap 2>err && + grep "doesn.t have full closure" err && + test_path_is_missing $midx + ) +' + +test_expect_success 'setup partial bitmaps' ' + test_commit packed && + git repack && + test_commit loose && + git multi-pack-index write --bitmap 2>err && + test_path_is_file $midx && + test_path_is_file $midx-$(midx_checksum $objdir).bitmap && + test_path_is_file $midx-$(midx_checksum $objdir).rev +' + +basic_bitmap_tests HEAD~ + +test_expect_success 'removing a MIDX clears stale bitmaps' ' + rm -fr repo && + git init repo && + test_when_finished "rm -fr repo" && + ( + cd repo && + test_commit base && + git repack && + git multi-pack-index write --bitmap && + + # Write a MIDX and bitmap; remove the MIDX but leave the bitmap. + stale_bitmap=$midx-$(midx_checksum $objdir).bitmap && + stale_rev=$midx-$(midx_checksum $objdir).rev && + rm $midx && + + # Then write a new MIDX. + test_commit new && + git repack && + git multi-pack-index write --bitmap && + + test_path_is_file $midx && + test_path_is_file $midx-$(midx_checksum $objdir).bitmap && + test_path_is_file $midx-$(midx_checksum $objdir).rev && + test_path_is_missing $stale_bitmap && + test_path_is_missing $stale_rev + ) +' + +test_expect_success 'pack.preferBitmapTips' ' + git init repo && + test_when_finished "rm -fr repo" && + ( + cd repo && + + test_commit_bulk --message="%s" 103 && + + git log --format="%H" >commits.raw && + sort <commits.raw >commits && + + git log --format="create refs/tags/%s %H" HEAD >refs && + git update-ref --stdin <refs && + + git multi-pack-index write --bitmap && + test_path_is_file $midx && + test_path_is_file $midx-$(midx_checksum $objdir).bitmap && + test_path_is_file $midx-$(midx_checksum $objdir).rev && + + test-tool bitmap list-commits | sort >bitmaps && + comm -13 bitmaps commits >before && + test_line_count = 1 before && + + perl -ne "printf(\"create refs/tags/include/%d \", $.); print" \ + <before | git update-ref --stdin && + + rm -fr $midx-$(midx_checksum $objdir).bitmap && + rm -fr $midx-$(midx_checksum $objdir).rev && + rm -fr $midx && + + git -c pack.preferBitmapTips=refs/tags/include \ + multi-pack-index write --bitmap && + test-tool bitmap list-commits | sort >bitmaps && + comm -13 bitmaps commits >after && + + ! test_cmp before after + ) +' + +test_done diff --git a/t/t5510-fetch.sh b/t/t5510-fetch.sh index e83b2a6506..a0faf0dd94 100755 --- a/t/t5510-fetch.sh +++ b/t/t5510-fetch.sh @@ -1214,6 +1214,19 @@ test_expect_success '--negotiation-tip understands abbreviated SHA-1' ' check_negotiation_tip ' +test_expect_success '--negotiation-tip rejects missing OIDs' ' + setup_negotiation_tip server server 0 && + test_must_fail git -C client fetch \ + --negotiation-tip=alpha_1 \ + --negotiation-tip=$(test_oid zero) \ + origin alpha_s beta_s 2>err && + cat >fatal-expect <<-EOF && + fatal: the object $(test_oid zero) does not exist +EOF + grep fatal: err >fatal-actual && + test_cmp fatal-expect fatal-actual +' + . "$TEST_DIRECTORY"/lib-httpd.sh start_httpd diff --git a/t/t5516-fetch-push.sh b/t/t5516-fetch-push.sh index 0916f76302..4db8edd9c8 100755 --- a/t/t5516-fetch-push.sh +++ b/t/t5516-fetch-push.sh @@ -201,6 +201,7 @@ test_expect_success 'push with negotiation' ' # Without negotiation mk_empty testrepo && git push testrepo $the_first_commit:refs/remotes/origin/first_commit && + test_commit -C testrepo unrelated_commit && git -C testrepo config receive.hideRefs refs/remotes/origin/first_commit && echo now pushing without negotiation && GIT_TRACE2_EVENT="$(pwd)/event" git -c protocol.version=2 push testrepo refs/heads/main:refs/remotes/origin/main && @@ -210,6 +211,7 @@ test_expect_success 'push with negotiation' ' rm event && mk_empty testrepo && git push testrepo $the_first_commit:refs/remotes/origin/first_commit && + test_commit -C testrepo unrelated_commit && git -C testrepo config receive.hideRefs refs/remotes/origin/first_commit && GIT_TRACE2_EVENT="$(pwd)/event" git -c protocol.version=2 -c push.negotiate=1 push testrepo refs/heads/main:refs/remotes/origin/main && grep_wrote 2 event # 1 commit, 1 tree @@ -219,6 +221,7 @@ test_expect_success 'push with negotiation proceeds anyway even if negotiation f rm event && mk_empty testrepo && git push testrepo $the_first_commit:refs/remotes/origin/first_commit && + test_commit -C testrepo unrelated_commit && git -C testrepo config receive.hideRefs refs/remotes/origin/first_commit && GIT_TEST_PROTOCOL_VERSION=0 GIT_TRACE2_EVENT="$(pwd)/event" \ git -c push.negotiate=1 push testrepo refs/heads/main:refs/remotes/origin/main 2>err && @@ -1767,5 +1770,4 @@ test_expect_success 'denyCurrentBranch and worktrees' ' git -C cloned push origin HEAD:new-wt && test_must_fail git -C cloned push --delete origin new-wt ' - test_done diff --git a/t/t5520-pull.sh b/t/t5520-pull.sh index e2c0c51022..672001a18b 100755 --- a/t/t5520-pull.sh +++ b/t/t5520-pull.sh @@ -136,12 +136,12 @@ test_expect_success 'the default remote . should not break explicit pull' ' git reset --hard HEAD^ && echo file >expect && test_cmp expect file && - git pull . second && + git pull --no-rebase . second && echo modified >expect && test_cmp expect file && git reflog -1 >reflog.actual && sed "s/^[0-9a-f][0-9a-f]*/OBJID/" reflog.actual >reflog.fuzzy && - echo "OBJID HEAD@{0}: pull . second: Fast-forward" >reflog.expected && + echo "OBJID HEAD@{0}: pull --no-rebase . second: Fast-forward" >reflog.expected && test_cmp reflog.expected reflog.fuzzy ' @@ -226,7 +226,7 @@ test_expect_success 'fail if the index has unresolved entries' ' test_commit modified2 file && git ls-files -u >unmerged && test_must_be_empty unmerged && - test_must_fail git pull . second && + test_must_fail git pull --no-rebase . second && git ls-files -u >unmerged && test_file_not_empty unmerged && cp file expected && @@ -409,37 +409,37 @@ test_expect_success 'pull --rebase --no-autostash & rebase.autostash unset' ' test_expect_success 'pull succeeds with dirty working directory and merge.autostash set' ' test_config merge.autostash true && - test_pull_autostash 2 + test_pull_autostash 2 --no-rebase ' test_expect_success 'pull --autostash & merge.autostash=true' ' test_config merge.autostash true && - test_pull_autostash 2 --autostash + test_pull_autostash 2 --autostash --no-rebase ' test_expect_success 'pull --autostash & merge.autostash=false' ' test_config merge.autostash false && - test_pull_autostash 2 --autostash + test_pull_autostash 2 --autostash --no-rebase ' test_expect_success 'pull --autostash & merge.autostash unset' ' test_unconfig merge.autostash && - test_pull_autostash 2 --autostash + test_pull_autostash 2 --autostash --no-rebase ' test_expect_success 'pull --no-autostash & merge.autostash=true' ' test_config merge.autostash true && - test_pull_autostash_fail --no-autostash + test_pull_autostash_fail --no-autostash --no-rebase ' test_expect_success 'pull --no-autostash & merge.autostash=false' ' test_config merge.autostash false && - test_pull_autostash_fail --no-autostash + test_pull_autostash_fail --no-autostash --no-rebase ' test_expect_success 'pull --no-autostash & merge.autostash unset' ' test_unconfig merge.autostash && - test_pull_autostash_fail --no-autostash + test_pull_autostash_fail --no-autostash --no-rebase ' test_expect_success 'pull.rebase' ' diff --git a/t/t5521-pull-options.sh b/t/t5521-pull-options.sh index 63a688bdbf..7601c919fd 100755 --- a/t/t5521-pull-options.sh +++ b/t/t5521-pull-options.sh @@ -113,7 +113,7 @@ test_expect_success 'git pull --force' ' git pull two && test_commit A && git branch -f origin && - git pull --all --force + git pull --no-rebase --all --force ) ' @@ -179,7 +179,7 @@ test_expect_success 'git pull --allow-unrelated-histories' ' ( cd dst && test_must_fail git pull ../src side && - git pull --allow-unrelated-histories ../src side + git pull --no-rebase --allow-unrelated-histories ../src side ) ' diff --git a/t/t5524-pull-msg.sh b/t/t5524-pull-msg.sh index c278adaa5a..b2be3605f5 100755 --- a/t/t5524-pull-msg.sh +++ b/t/t5524-pull-msg.sh @@ -28,7 +28,7 @@ test_expect_success setup ' test_expect_success pull ' ( cd cloned && - git pull --log && + git pull --no-rebase --log && git log -2 && git cat-file commit HEAD >result && grep Dollar result @@ -41,7 +41,7 @@ test_expect_success '--log=1 limits shortlog length' ' git reset --hard HEAD^ && test "$(cat afile)" = original && test "$(cat bfile)" = added && - git pull --log=1 && + git pull --no-rebase --log=1 && git log -3 && git cat-file commit HEAD >result && grep Dollar result && diff --git a/t/t5549-fetch-push-http.sh b/t/t5549-fetch-push-http.sh new file mode 100755 index 0000000000..2cdebcb735 --- /dev/null +++ b/t/t5549-fetch-push-http.sh @@ -0,0 +1,72 @@ +#!/bin/sh + +test_description='fetch/push functionality using the HTTP protocol' + +GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main +export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME + +. ./test-lib.sh +. "$TEST_DIRECTORY"/lib-httpd.sh +start_httpd + +SERVER="$HTTPD_DOCUMENT_ROOT_PATH/server" +URI="$HTTPD_URL/smart/server" + +grep_wrote () { + object_count=$1 + file_name=$2 + grep 'write_pack_file/wrote.*"value":"'$1'"' $2 +} + +setup_client_and_server () { + git init client && + test_when_finished 'rm -rf client' && + test_commit -C client first_commit && + test_commit -C client second_commit && + + git init "$SERVER" && + test_when_finished 'rm -rf "$SERVER"' && + test_config -C "$SERVER" http.receivepack true && + test_commit -C "$SERVER" unrelated_commit && + git -C client push "$URI" first_commit:refs/remotes/origin/first_commit && + git -C "$SERVER" config receive.hideRefs refs/remotes/origin/first_commit +} + +test_expect_success 'push without negotiation (for comparing object counts with the next test)' ' + setup_client_and_server && + + GIT_TRACE2_EVENT="$(pwd)/event" git -C client -c protocol.version=2 \ + push "$URI" refs/heads/main:refs/remotes/origin/main && + test_when_finished "rm -f event" && + grep_wrote 6 event # 2 commits, 2 trees, 2 blobs +' + +test_expect_success 'push with negotiation' ' + setup_client_and_server && + + GIT_TRACE2_EVENT="$(pwd)/event" git -C client -c protocol.version=2 -c push.negotiate=1 \ + push "$URI" refs/heads/main:refs/remotes/origin/main && + test_when_finished "rm -f event" && + grep_wrote 3 event # 1 commit, 1 tree, 1 blob +' + +test_expect_success 'push with negotiation proceeds anyway even if negotiation fails' ' + setup_client_and_server && + + # Use protocol v0 to make negotiation fail (because protocol v0 does + # not support the "wait-for-done" capability, which is required for + # push negotiation) + GIT_TEST_PROTOCOL_VERSION=0 GIT_TRACE2_EVENT="$(pwd)/event" git -C client -c push.negotiate=1 \ + push "$URI" refs/heads/main:refs/remotes/origin/main 2>err && + test_when_finished "rm -f event" && + grep_wrote 6 event && # 2 commits, 2 trees, 2 blobs + + cat >warning-expect <<-EOF && + warning: --negotiate-only requires protocol v2 + warning: push negotiation failed; proceeding anyway with push +EOF + grep warning: err >warning-actual && + test_cmp warning-expect warning-actual +' + +test_done diff --git a/t/t5551-http-fetch-smart.sh b/t/t5551-http-fetch-smart.sh index 4f87d90c5b..f92c79c132 100755 --- a/t/t5551-http-fetch-smart.sh +++ b/t/t5551-http-fetch-smart.sh @@ -196,8 +196,8 @@ test_expect_success 'GIT_TRACE_CURL redacts auth details' ' # Ensure that there is no "Basic" followed by a base64 string, but that # the auth details are redacted - ! grep "Authorization: Basic [0-9a-zA-Z+/]" trace && - grep "Authorization: Basic <redacted>" trace + ! grep -i "Authorization: Basic [0-9a-zA-Z+/]" trace && + grep -i "Authorization: Basic <redacted>" trace ' test_expect_success 'GIT_CURL_VERBOSE redacts auth details' ' @@ -208,8 +208,8 @@ test_expect_success 'GIT_CURL_VERBOSE redacts auth details' ' # Ensure that there is no "Basic" followed by a base64 string, but that # the auth details are redacted - ! grep "Authorization: Basic [0-9a-zA-Z+/]" trace && - grep "Authorization: Basic <redacted>" trace + ! grep -i "Authorization: Basic [0-9a-zA-Z+/]" trace && + grep -i "Authorization: Basic <redacted>" trace ' test_expect_success 'GIT_TRACE_CURL does not redact auth details if GIT_TRACE_REDACT=0' ' @@ -219,7 +219,7 @@ test_expect_success 'GIT_TRACE_CURL does not redact auth details if GIT_TRACE_RE git clone --bare "$HTTPD_URL/auth/smart/repo.git" redact-auth && expect_askpass both user@host && - grep "Authorization: Basic [0-9a-zA-Z+/]" trace + grep -i "Authorization: Basic [0-9a-zA-Z+/]" trace ' test_expect_success 'disable dumb http on server' ' @@ -474,10 +474,10 @@ test_expect_success 'cookies are redacted by default' ' GIT_TRACE_CURL=true \ git -c "http.cookieFile=$(pwd)/cookies" clone \ $HTTPD_URL/smart/repo.git clone 2>err && - grep "Cookie:.*Foo=<redacted>" err && - grep "Cookie:.*Bar=<redacted>" err && - ! grep "Cookie:.*Foo=1" err && - ! grep "Cookie:.*Bar=2" err + grep -i "Cookie:.*Foo=<redacted>" err && + grep -i "Cookie:.*Bar=<redacted>" err && + ! grep -i "Cookie:.*Foo=1" err && + ! grep -i "Cookie:.*Bar=2" err ' test_expect_success 'empty values of cookies are also redacted' ' @@ -486,7 +486,7 @@ test_expect_success 'empty values of cookies are also redacted' ' GIT_TRACE_CURL=true \ git -c "http.cookieFile=$(pwd)/cookies" clone \ $HTTPD_URL/smart/repo.git clone 2>err && - grep "Cookie:.*Foo=<redacted>" err + grep -i "Cookie:.*Foo=<redacted>" err ' test_expect_success 'GIT_TRACE_REDACT=0 disables cookie redaction' ' @@ -496,8 +496,8 @@ test_expect_success 'GIT_TRACE_REDACT=0 disables cookie redaction' ' GIT_TRACE_REDACT=0 GIT_TRACE_CURL=true \ git -c "http.cookieFile=$(pwd)/cookies" clone \ $HTTPD_URL/smart/repo.git clone 2>err && - grep "Cookie:.*Foo=1" err && - grep "Cookie:.*Bar=2" err + grep -i "Cookie:.*Foo=1" err && + grep -i "Cookie:.*Bar=2" err ' test_expect_success 'GIT_TRACE_CURL_NO_DATA prevents data from being traced' ' @@ -558,4 +558,13 @@ test_expect_success 'http auth forgets bogus credentials' ' expect_askpass both user@host ' +test_expect_success 'client falls back from v2 to v0 to match server' ' + GIT_TRACE_PACKET=$PWD/trace \ + GIT_TEST_PROTOCOL_VERSION=2 \ + git clone $HTTPD_URL/smart_v0/repo.git repo-v0 && + # check for v0; there the HEAD symref is communicated in the capability + # line; v2 uses a different syntax on each ref advertisement line + grep symref=HEAD:refs/heads/ trace +' + test_done diff --git a/t/t5553-set-upstream.sh b/t/t5553-set-upstream.sh index b1d614ce18..9c12c0f8c3 100755 --- a/t/t5553-set-upstream.sh +++ b/t/t5553-set-upstream.sh @@ -108,27 +108,27 @@ test_expect_success 'setup commit on main and other pull' ' test_expect_success 'pull --set-upstream upstream main sets branch main but not other' ' clear_config main other && - git pull --set-upstream upstream main && + git pull --no-rebase --set-upstream upstream main && check_config main upstream refs/heads/main && check_config_missing other ' test_expect_success 'pull --set-upstream main:other2 does not set the branch other2' ' clear_config other2 && - git pull --set-upstream upstream main:other2 && + git pull --no-rebase --set-upstream upstream main:other2 && check_config_missing other2 ' test_expect_success 'pull --set-upstream upstream other sets branch main' ' clear_config main other && - git pull --set-upstream upstream other && + git pull --no-rebase --set-upstream upstream other && check_config main upstream refs/heads/other && check_config_missing other ' test_expect_success 'pull --set-upstream upstream tag does not set the tag' ' clear_config three && - git pull --tags --set-upstream upstream three && + git pull --no-rebase --tags --set-upstream upstream three && check_config_missing three ' @@ -144,16 +144,16 @@ test_expect_success 'pull --set-upstream http://nosuchdomain.example.com fails w test_expect_success 'pull --set-upstream upstream HEAD sets branch HEAD' ' clear_config main other && - git pull --set-upstream upstream HEAD && + git pull --no-rebase --set-upstream upstream HEAD && check_config main upstream HEAD && git checkout other && - git pull --set-upstream upstream HEAD && + git pull --no-rebase --set-upstream upstream HEAD && check_config other upstream HEAD ' test_expect_success 'pull --set-upstream upstream with more than one branch does nothing' ' clear_config main three && - git pull --set-upstream upstream main three && + git pull --no-rebase --set-upstream upstream main three && check_config_missing main && check_config_missing three ' diff --git a/t/t5555-http-smart-common.sh b/t/t5555-http-smart-common.sh new file mode 100755 index 0000000000..49faf5e283 --- /dev/null +++ b/t/t5555-http-smart-common.sh @@ -0,0 +1,161 @@ +#!/bin/sh + +test_description='test functionality common to smart fetch & push' + +. ./test-lib.sh + +test_expect_success 'setup' ' + test_commit --no-tag initial +' + +test_expect_success 'git upload-pack --http-backend-info-refs and --advertise-refs are aliased' ' + git upload-pack --http-backend-info-refs . >expected 2>err.expected && + git upload-pack --advertise-refs . >actual 2>err.actual && + test_cmp err.expected err.actual && + test_cmp expected actual +' + +test_expect_success 'git receive-pack --http-backend-info-refs and --advertise-refs are aliased' ' + git receive-pack --http-backend-info-refs . >expected 2>err.expected && + git receive-pack --advertise-refs . >actual 2>err.actual && + test_cmp err.expected err.actual && + test_cmp expected actual +' + +test_expect_success 'git upload-pack --advertise-refs' ' + cat >expect <<-EOF && + $(git rev-parse HEAD) HEAD + $(git rev-parse HEAD) $(git symbolic-ref HEAD) + 0000 + EOF + + # We only care about GIT_PROTOCOL, not GIT_TEST_PROTOCOL_VERSION + sane_unset GIT_PROTOCOL && + GIT_TEST_PROTOCOL_VERSION=2 \ + git upload-pack --advertise-refs . >out 2>err && + + test-tool pkt-line unpack <out >actual && + test_must_be_empty err && + test_cmp actual expect && + + # The --advertise-refs alias works + git upload-pack --advertise-refs . >out 2>err && + + test-tool pkt-line unpack <out >actual && + test_must_be_empty err && + test_cmp actual expect +' + +test_expect_success 'git upload-pack --advertise-refs: v0' ' + # With no specified protocol + cat >expect <<-EOF && + $(git rev-parse HEAD) HEAD + $(git rev-parse HEAD) $(git symbolic-ref HEAD) + 0000 + EOF + + git upload-pack --advertise-refs . >out 2>err && + test-tool pkt-line unpack <out >actual && + test_must_be_empty err && + test_cmp actual expect && + + # With explicit v0 + GIT_PROTOCOL=version=0 \ + git upload-pack --advertise-refs . >out 2>err && + test-tool pkt-line unpack <out >actual 2>err && + test_must_be_empty err && + test_cmp actual expect + +' + +test_expect_success 'git receive-pack --advertise-refs: v0' ' + # With no specified protocol + cat >expect <<-EOF && + $(git rev-parse HEAD) $(git symbolic-ref HEAD) + 0000 + EOF + + git receive-pack --advertise-refs . >out 2>err && + test-tool pkt-line unpack <out >actual && + test_must_be_empty err && + test_cmp actual expect && + + # With explicit v0 + GIT_PROTOCOL=version=0 \ + git receive-pack --advertise-refs . >out 2>err && + test-tool pkt-line unpack <out >actual 2>err && + test_must_be_empty err && + test_cmp actual expect + +' + +test_expect_success 'git upload-pack --advertise-refs: v1' ' + # With no specified protocol + cat >expect <<-EOF && + version 1 + $(git rev-parse HEAD) HEAD + $(git rev-parse HEAD) $(git symbolic-ref HEAD) + 0000 + EOF + + GIT_PROTOCOL=version=1 \ + git upload-pack --advertise-refs . >out && + + test-tool pkt-line unpack <out >actual 2>err && + test_must_be_empty err && + test_cmp actual expect +' + +test_expect_success 'git receive-pack --advertise-refs: v1' ' + # With no specified protocol + cat >expect <<-EOF && + version 1 + $(git rev-parse HEAD) $(git symbolic-ref HEAD) + 0000 + EOF + + GIT_PROTOCOL=version=1 \ + git receive-pack --advertise-refs . >out && + + test-tool pkt-line unpack <out >actual 2>err && + test_must_be_empty err && + test_cmp actual expect +' + +test_expect_success 'git upload-pack --advertise-refs: v2' ' + cat >expect <<-EOF && + version 2 + agent=FAKE + ls-refs=unborn + fetch=shallow wait-for-done + server-option + object-format=$(test_oid algo) + object-info + 0000 + EOF + + GIT_PROTOCOL=version=2 \ + GIT_USER_AGENT=FAKE \ + git upload-pack --advertise-refs . >out 2>err && + + test-tool pkt-line unpack <out >actual && + test_must_be_empty err && + test_cmp actual expect +' + +test_expect_success 'git receive-pack --advertise-refs: v2' ' + # There is no v2 yet for receive-pack, implicit v0 + cat >expect <<-EOF && + $(git rev-parse HEAD) $(git symbolic-ref HEAD) + 0000 + EOF + + GIT_PROTOCOL=version=2 \ + git receive-pack --advertise-refs . >out 2>err && + + test-tool pkt-line unpack <out >actual && + test_must_be_empty err && + test_cmp actual expect +' + +test_done diff --git a/t/t5562/invoke-with-content-length.pl b/t/t5562/invoke-with-content-length.pl index 0943474af2..718dd9b49d 100644 --- a/t/t5562/invoke-with-content-length.pl +++ b/t/t5562/invoke-with-content-length.pl @@ -13,11 +13,6 @@ my $body_data; defined read($body_fh, $body_data, $body_size) or die "Cannot read $body_filename: $!"; close($body_fh); -my $exited = 0; -$SIG{"CHLD"} = sub { - $exited = 1; -}; - # write data my $pid = open(my $out, "|-", @command); { @@ -29,8 +24,13 @@ my $pid = open(my $out, "|-", @command); } print $out $body_data or die "Cannot write data: $!"; -sleep 60; # is interrupted by SIGCHLD -if (!$exited) { - close($out); +$SIG{ALRM} = sub { + kill 'KILL', $pid; die "Command did not exit after reading whole body"; +}; +alarm 60; + +my $ret = waitpid($pid, 0); +if ($ret != $pid) { + die "confusing return from waitpid: $ret"; } diff --git a/t/t5582-fetch-negative-refspec.sh b/t/t5582-fetch-negative-refspec.sh index e5d2e79ad3..7a80e47c2b 100755 --- a/t/t5582-fetch-negative-refspec.sh +++ b/t/t5582-fetch-negative-refspec.sh @@ -105,7 +105,6 @@ test_expect_success "fetch with negative pattern refspec does not expand prefix" ' test_expect_success "fetch with negative refspec avoids duplicate conflict" ' - cd "$D" && ( cd one && git branch dups/a && diff --git a/t/t5604-clone-reference.sh b/t/t5604-clone-reference.sh index e845d621f6..24340e6d56 100755 --- a/t/t5604-clone-reference.sh +++ b/t/t5604-clone-reference.sh @@ -87,7 +87,7 @@ test_expect_success 'updating origin' ' ' test_expect_success 'pulling changes from origin' ' - git -C C pull origin + git -C C pull --no-rebase origin ' # the 2 local objects are commit and tree from the merge @@ -96,7 +96,7 @@ test_expect_success 'that alternate to origin gets used' ' ' test_expect_success 'pulling changes from origin' ' - git -C D pull origin + git -C D pull --no-rebase origin ' # the 5 local objects are expected; file3 blob, commit in A to add it diff --git a/t/t5606-clone-options.sh b/t/t5606-clone-options.sh index 3a595c0f82..d822153e4d 100755 --- a/t/t5606-clone-options.sh +++ b/t/t5606-clone-options.sh @@ -16,6 +16,18 @@ test_expect_success 'setup' ' ' +test_expect_success 'submodule.stickyRecursiveClone flag manipulates submodule.recurse value' ' + + test_config_global submodule.stickyRecursiveClone true && + git clone --recurse-submodules parent clone_recurse_true && + test_cmp_config -C clone_recurse_true true submodule.recurse && + + test_config_global submodule.stickyRecursiveClone false && + git clone --recurse-submodules parent clone_recurse_false && + test_expect_code 1 git -C clone_recurse_false config --get submodule.recurse + +' + test_expect_success 'clone -o' ' git clone -o foo parent clone-o && diff --git a/t/t5607-clone-bundle.sh b/t/t5607-clone-bundle.sh index ed0d911e95..51705aa86a 100755 --- a/t/t5607-clone-bundle.sh +++ b/t/t5607-clone-bundle.sh @@ -91,7 +91,8 @@ test_expect_success 'ridiculously long subject in boundary' ' git fetch long-subject-bundle.bdl && - if ! test_have_prereq SHA1 + algo=$(test_oid algo) && + if test "$algo" != sha1 then echo "@object-format=sha256" fi >expect && @@ -100,7 +101,7 @@ test_expect_success 'ridiculously long subject in boundary' ' $(git rev-parse HEAD) HEAD EOF - if test_have_prereq SHA1 + if test "$algo" = sha1 then head -n 3 long-subject-bundle.bdl else diff --git a/t/t5701-git-serve.sh b/t/t5701-git-serve.sh index 930721f053..aa1827d841 100755 --- a/t/t5701-git-serve.sh +++ b/t/t5701-git-serve.sh @@ -72,6 +72,37 @@ test_expect_success 'request invalid command' ' test_i18ngrep "invalid command" err ' +test_expect_success 'request capability as command' ' + test-tool pkt-line pack >in <<-EOF && + command=agent + object-format=$(test_oid algo) + 0000 + EOF + test_must_fail test-tool serve-v2 --stateless-rpc 2>err <in && + grep invalid.command.*agent err +' + +test_expect_success 'request command as capability' ' + test-tool pkt-line pack >in <<-EOF && + command=ls-refs + object-format=$(test_oid algo) + fetch + 0000 + EOF + test_must_fail test-tool serve-v2 --stateless-rpc 2>err <in && + grep unknown.capability err +' + +test_expect_success 'requested command is command=value' ' + test-tool pkt-line pack >in <<-EOF && + command=ls-refs=whatever + object-format=$(test_oid algo) + 0000 + EOF + test_must_fail test-tool serve-v2 --stateless-rpc 2>err <in && + grep invalid.command.*ls-refs=whatever err +' + test_expect_success 'wrong object-format' ' test-tool pkt-line pack >in <<-EOF && command=fetch @@ -116,6 +147,19 @@ test_expect_success 'basics of ls-refs' ' test_cmp expect actual ' +test_expect_success 'ls-refs complains about unknown options' ' + test-tool pkt-line pack >in <<-EOF && + command=ls-refs + object-format=$(test_oid algo) + 0001 + no-such-arg + 0000 + EOF + + test_must_fail test-tool serve-v2 --stateless-rpc 2>err <in && + grep unexpected.line.*no-such-arg err +' + test_expect_success 'basic ref-prefixes' ' test-tool pkt-line pack >in <<-EOF && command=ls-refs @@ -158,6 +202,37 @@ test_expect_success 'refs/heads prefix' ' test_cmp expect actual ' +test_expect_success 'ignore very large set of prefixes' ' + # generate a large number of ref-prefixes that we expect + # to match nothing; the value here exceeds TOO_MANY_PREFIXES + # from ls-refs.c. + { + echo command=ls-refs && + echo object-format=$(test_oid algo) && + echo 0001 && + perl -le "print \"ref-prefix refs/heads/\$_\" for (1..65536)" && + echo 0000 + } | + test-tool pkt-line pack >in && + + # and then confirm that we see unmatched prefixes anyway (i.e., + # that the prefix was not applied). + cat >expect <<-EOF && + $(git rev-parse HEAD) HEAD + $(git rev-parse refs/heads/dev) refs/heads/dev + $(git rev-parse refs/heads/main) refs/heads/main + $(git rev-parse refs/heads/release) refs/heads/release + $(git rev-parse refs/tags/annotated-tag) refs/tags/annotated-tag + $(git rev-parse refs/tags/one) refs/tags/one + $(git rev-parse refs/tags/two) refs/tags/two + 0000 + EOF + + test-tool serve-v2 --stateless-rpc <in >out && + test-tool pkt-line unpack <out >actual && + test_cmp expect actual +' + test_expect_success 'peel parameter' ' test-tool pkt-line pack >in <<-EOF && command=ls-refs diff --git a/t/t5702-protocol-v2.sh b/t/t5702-protocol-v2.sh index 78de1ff2ad..d527cf6c49 100755 --- a/t/t5702-protocol-v2.sh +++ b/t/t5702-protocol-v2.sh @@ -27,9 +27,9 @@ test_expect_success 'list refs with git:// using protocol v2' ' ls-remote --symref "$GIT_DAEMON_URL/parent" >actual && # Client requested to use protocol v2 - grep "git> .*\\\0\\\0version=2\\\0$" log && + grep "ls-remote> .*\\\0\\\0version=2\\\0$" log && # Server responded using protocol v2 - grep "git< version 2" log && + grep "ls-remote< version 2" log && git ls-remote --symref "$GIT_DAEMON_URL/parent" >expect && test_cmp expect actual @@ -151,7 +151,7 @@ test_expect_success 'list refs with file:// using protocol v2' ' ls-remote --symref "file://$(pwd)/file_parent" >actual && # Server responded using protocol v2 - grep "git< version 2" log && + grep "ls-remote< version 2" log && git ls-remote --symref "file://$(pwd)/file_parent" >expect && test_cmp expect actual @@ -237,6 +237,19 @@ test_expect_success '...but not if explicitly forbidden by config' ' ! grep "refs/heads/mydefaultbranch" file_empty_child/.git/HEAD ' +test_expect_success 'bare clone propagates empty default branch' ' + test_when_finished "rm -rf file_empty_parent file_empty_child.git" && + + GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME= \ + git -c init.defaultBranch=mydefaultbranch init file_empty_parent && + + GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME= \ + git -c init.defaultBranch=main -c protocol.version=2 \ + clone --bare \ + "file://$(pwd)/file_empty_parent" file_empty_child.git && + grep "refs/heads/mydefaultbranch" file_empty_child.git/HEAD +' + test_expect_success 'fetch with file:// using protocol v2' ' test_when_finished "rm -f log" && diff --git a/t/t5703-upload-pack-ref-in-want.sh b/t/t5703-upload-pack-ref-in-want.sh index e9e471621d..220098523a 100755 --- a/t/t5703-upload-pack-ref-in-want.sh +++ b/t/t5703-upload-pack-ref-in-want.sh @@ -40,6 +40,30 @@ write_command () { fi } +# Write a complete fetch command to stdout, suitable for use with `test-tool +# pkt-line`. "want-ref", "want", and "have" lines are read from stdin. +# +# Examples: +# +# write_fetch_command <<-EOF +# want-ref refs/heads/main +# have $(git rev-parse a) +# EOF +# +# write_fetch_command <<-EOF +# want $(git rev-parse b) +# have $(git rev-parse a) +# EOF +# +write_fetch_command () { + write_command fetch && + echo "0001" && + echo "no-progress" && + cat && + echo "done" && + echo "0000" +} + # c(o/foo) d(o/bar) # \ / # b e(baz) f(main) @@ -77,15 +101,11 @@ test_expect_success 'config controls ref-in-want advertisement' ' ' test_expect_success 'invalid want-ref line' ' - test-tool pkt-line pack >in <<-EOF && - $(write_command fetch) - 0001 - no-progress + write_fetch_command >pkt <<-EOF && want-ref refs/heads/non-existent - done - 0000 EOF + test-tool pkt-line pack <pkt >in && test_must_fail test-tool serve-v2 --stateless-rpc 2>out <in && grep "unknown ref" out ' @@ -97,16 +117,11 @@ test_expect_success 'basic want-ref' ' EOF git rev-parse f >expected_commits && - oid=$(git rev-parse a) && - test-tool pkt-line pack >in <<-EOF && - $(write_command fetch) - 0001 - no-progress + write_fetch_command >pkt <<-EOF && want-ref refs/heads/main - have $oid - done - 0000 + have $(git rev-parse a) EOF + test-tool pkt-line pack <pkt >in && test-tool serve-v2 --stateless-rpc >out <in && check_output @@ -121,17 +136,12 @@ test_expect_success 'multiple want-ref lines' ' EOF git rev-parse c d >expected_commits && - oid=$(git rev-parse b) && - test-tool pkt-line pack >in <<-EOF && - $(write_command fetch) - 0001 - no-progress + write_fetch_command >pkt <<-EOF && want-ref refs/heads/o/foo want-ref refs/heads/o/bar - have $oid - done - 0000 + have $(git rev-parse b) EOF + test-tool pkt-line pack <pkt >in && test-tool serve-v2 --stateless-rpc >out <in && check_output @@ -144,16 +154,12 @@ test_expect_success 'mix want and want-ref' ' EOF git rev-parse e f >expected_commits && - test-tool pkt-line pack >in <<-EOF && - $(write_command fetch) - 0001 - no-progress + write_fetch_command >pkt <<-EOF && want-ref refs/heads/main want $(git rev-parse e) have $(git rev-parse a) - done - 0000 EOF + test-tool pkt-line pack <pkt >in && test-tool serve-v2 --stateless-rpc >out <in && check_output @@ -166,16 +172,11 @@ test_expect_success 'want-ref with ref we already have commit for' ' EOF >expected_commits && - oid=$(git rev-parse c) && - test-tool pkt-line pack >in <<-EOF && - $(write_command fetch) - 0001 - no-progress + write_fetch_command >pkt <<-EOF && want-ref refs/heads/o/foo - have $oid - done - 0000 + have $(git rev-parse c) EOF + test-tool pkt-line pack <pkt >in && test-tool serve-v2 --stateless-rpc >out <in && check_output @@ -298,6 +299,141 @@ test_expect_success 'fetching with wildcard that matches multiple refs' ' grep "want-ref refs/heads/o/bar" log ' +REPO="$(pwd)/repo-ns" + +test_expect_success 'setup namespaced repo' ' + ( + git init -b main "$REPO" && + cd "$REPO" && + test_commit a && + test_commit b && + git checkout a && + test_commit c && + git checkout a && + test_commit d && + git update-ref refs/heads/ns-no b && + git update-ref refs/namespaces/ns/refs/heads/ns-yes c && + git update-ref refs/namespaces/ns/refs/heads/hidden d + ) && + git -C "$REPO" config uploadpack.allowRefInWant true +' + +test_expect_success 'with namespace: want-ref is considered relative to namespace' ' + wanted_ref=refs/heads/ns-yes && + + oid=$(git -C "$REPO" rev-parse "refs/namespaces/ns/$wanted_ref") && + cat >expected_refs <<-EOF && + $oid $wanted_ref + EOF + cat >expected_commits <<-EOF && + $oid + $(git -C "$REPO" rev-parse a) + EOF + + write_fetch_command >pkt <<-EOF && + want-ref $wanted_ref + EOF + test-tool pkt-line pack <pkt >in && + + GIT_NAMESPACE=ns test-tool -C "$REPO" serve-v2 --stateless-rpc >out <in && + check_output +' + +test_expect_success 'with namespace: want-ref outside namespace is unknown' ' + wanted_ref=refs/heads/ns-no && + + write_fetch_command >pkt <<-EOF && + want-ref $wanted_ref + EOF + test-tool pkt-line pack <pkt >in && + + test_must_fail env GIT_NAMESPACE=ns \ + test-tool -C "$REPO" serve-v2 --stateless-rpc >out <in && + grep "unknown ref" out +' + +# Cross-check refs/heads/ns-no indeed exists +test_expect_success 'without namespace: want-ref outside namespace succeeds' ' + wanted_ref=refs/heads/ns-no && + + oid=$(git -C "$REPO" rev-parse $wanted_ref) && + cat >expected_refs <<-EOF && + $oid $wanted_ref + EOF + cat >expected_commits <<-EOF && + $oid + $(git -C "$REPO" rev-parse a) + EOF + + write_fetch_command >pkt <<-EOF && + want-ref $wanted_ref + EOF + test-tool pkt-line pack <pkt >in && + + test-tool -C "$REPO" serve-v2 --stateless-rpc >out <in && + check_output +' + +test_expect_success 'with namespace: hideRefs is matched, relative to namespace' ' + wanted_ref=refs/heads/hidden && + git -C "$REPO" config transfer.hideRefs $wanted_ref && + + write_fetch_command >pkt <<-EOF && + want-ref $wanted_ref + EOF + test-tool pkt-line pack <pkt >in && + + test_must_fail env GIT_NAMESPACE=ns \ + test-tool -C "$REPO" serve-v2 --stateless-rpc >out <in && + grep "unknown ref" out +' + +# Cross-check refs/heads/hidden indeed exists +test_expect_success 'with namespace: want-ref succeeds if hideRefs is removed' ' + wanted_ref=refs/heads/hidden && + git -C "$REPO" config --unset transfer.hideRefs $wanted_ref && + + oid=$(git -C "$REPO" rev-parse "refs/namespaces/ns/$wanted_ref") && + cat >expected_refs <<-EOF && + $oid $wanted_ref + EOF + cat >expected_commits <<-EOF && + $oid + $(git -C "$REPO" rev-parse a) + EOF + + write_fetch_command >pkt <<-EOF && + want-ref $wanted_ref + EOF + test-tool pkt-line pack <pkt >in && + + GIT_NAMESPACE=ns test-tool -C "$REPO" serve-v2 --stateless-rpc >out <in && + check_output +' + +test_expect_success 'without namespace: relative hideRefs does not match' ' + wanted_ref=refs/namespaces/ns/refs/heads/hidden && + git -C "$REPO" config transfer.hideRefs refs/heads/hidden && + + oid=$(git -C "$REPO" rev-parse $wanted_ref) && + cat >expected_refs <<-EOF && + $oid $wanted_ref + EOF + cat >expected_commits <<-EOF && + $oid + $(git -C "$REPO" rev-parse a) + EOF + + write_fetch_command >pkt <<-EOF && + want-ref $wanted_ref + EOF + test-tool pkt-line pack <pkt >in && + + test-tool -C "$REPO" serve-v2 --stateless-rpc >out <in && + check_output +' + + . "$TEST_DIRECTORY"/lib-httpd.sh start_httpd diff --git a/t/t5705-session-id-in-capabilities.sh b/t/t5705-session-id-in-capabilities.sh index f1d189d5bc..eb8c79aafd 100755 --- a/t/t5705-session-id-in-capabilities.sh +++ b/t/t5705-session-id-in-capabilities.sh @@ -73,6 +73,17 @@ do grep \"key\":\"server-sid\" tr2-client-events && grep \"key\":\"client-sid\" tr2-server-events ' + + test_expect_success "client & server log negotiated version (v${PROTO})" ' + test_when_finished "rm -rf local tr2-client-events tr2-server-events" && + cp -r "$LOCAL_PRISTINE" local && + GIT_TRACE2_EVENT="$(pwd)/tr2-client-events" \ + git -c protocol.version=$PROTO -C local fetch \ + --upload-pack "GIT_TRACE2_EVENT=\"$(pwd)/tr2-server-events\" git-upload-pack" \ + origin && + grep \"key\":\"negotiated-version\",\"value\":\"$PROTO\" tr2-client-events && + grep \"key\":\"negotiated-version\",\"value\":\"$PROTO\" tr2-server-events + ' done test_done diff --git a/t/t6000-rev-list-misc.sh b/t/t6000-rev-list-misc.sh index 12def7bcbf..ef849e5bc8 100755 --- a/t/t6000-rev-list-misc.sh +++ b/t/t6000-rev-list-misc.sh @@ -169,4 +169,35 @@ test_expect_success 'rev-list --count --objects' ' test_line_count = $count actual ' +test_expect_success 'rev-list --unsorted-input results in different sorting' ' + git rev-list --unsorted-input HEAD HEAD~ >first && + git rev-list --unsorted-input HEAD~ HEAD >second && + ! test_cmp first second && + sort first >first.sorted && + sort second >second.sorted && + test_cmp first.sorted second.sorted +' + +test_expect_success 'rev-list --unsorted-input incompatible with --no-walk' ' + cat >expect <<-EOF && + fatal: --no-walk is incompatible with --unsorted-input + EOF + test_must_fail git rev-list --unsorted-input --no-walk HEAD 2>error && + test_cmp expect error && + test_must_fail git rev-list --unsorted-input --no-walk=sorted HEAD 2>error && + test_cmp expect error && + test_must_fail git rev-list --unsorted-input --no-walk=unsorted HEAD 2>error && + test_cmp expect error && + + cat >expect <<-EOF && + fatal: --unsorted-input is incompatible with --no-walk + EOF + test_must_fail git rev-list --no-walk --unsorted-input HEAD 2>error && + test_cmp expect error && + test_must_fail git rev-list --no-walk=sorted --unsorted-input HEAD 2>error && + test_cmp expect error && + test_must_fail git rev-list --no-walk=unsorted --unsorted-input HEAD 2>error && + test_cmp expect error +' + test_done diff --git a/t/t6001-rev-list-graft.sh b/t/t6001-rev-list-graft.sh index 90d93f77fa..7294147334 100755 --- a/t/t6001-rev-list-graft.sh +++ b/t/t6001-rev-list-graft.sh @@ -23,7 +23,8 @@ test_expect_success setup ' git commit -a -m "Third in one history." && A2=$(git rev-parse --verify HEAD) && - rm -f .git/refs/heads/main .git/index && + git update-ref -d refs/heads/main && + rm -f .git/index && echo >fileA fileA again && echo >subdir/fileB fileB again && diff --git a/t/t6030-bisect-porcelain.sh b/t/t6030-bisect-porcelain.sh index a1baf4e451..1be85d064e 100755 --- a/t/t6030-bisect-porcelain.sh +++ b/t/t6030-bisect-porcelain.sh @@ -962,4 +962,22 @@ test_expect_success 'bisect handles annotated tags' ' grep "$bad is the first bad commit" output ' +test_expect_success 'bisect run fails with exit code equals or greater than 128' ' + write_script test_script.sh <<-\EOF && + exit 128 + EOF + test_must_fail git bisect run ./test_script.sh && + write_script test_script.sh <<-\EOF && + exit 255 + EOF + test_must_fail git bisect run ./test_script.sh +' + +test_expect_success 'bisect visualize with a filename with dash and space' ' + echo "My test line" >>"./-hello 2" && + git add -- "./-hello 2" && + git commit --quiet -m "Add test line" -- "./-hello 2" && + git bisect visualize -p -- "-hello 2" +' + test_done diff --git a/t/t6050-replace.sh b/t/t6050-replace.sh index e33d512ec1..2500acc2ef 100755 --- a/t/t6050-replace.sh +++ b/t/t6050-replace.sh @@ -132,7 +132,7 @@ tagger T A Gger <> 0 +0000 EOF test_expect_success 'tag replaced commit' ' - git mktag <tag.sig >.git/refs/tags/mytag + git update-ref refs/tags/mytag $(git mktag <tag.sig) ' test_expect_success '"git fsck" works' ' diff --git a/t/t6120-describe.sh b/t/t6120-describe.sh index 1a501ee09e..bae2419150 100755 --- a/t/t6120-describe.sh +++ b/t/t6120-describe.sh @@ -107,7 +107,8 @@ test_expect_success 'describe --contains defaults to HEAD without commit-ish' ' check_describe tags/A --all A^0 test_expect_success 'renaming tag A to Q locally produces a warning' " - mv .git/refs/tags/A .git/refs/tags/Q && + git update-ref refs/tags/Q $(git rev-parse refs/tags/A) && + git update-ref -d refs/tags/A && git describe HEAD 2>err >out && cat >expected <<-\EOF && warning: tag 'Q' is externally known as 'A' @@ -135,7 +136,8 @@ test_expect_success 'abbrev=0 will not break misplaced tag (2)' ' ' test_expect_success 'rename tag Q back to A' ' - mv .git/refs/tags/Q .git/refs/tags/A + git update-ref refs/tags/A $(git rev-parse refs/tags/Q) && + git update-ref -d refs/tags/Q ' test_expect_success 'pack tag refs' 'git pack-refs' diff --git a/t/t6300-for-each-ref.sh b/t/t6300-for-each-ref.sh index 9e0214076b..80679d5e12 100755 --- a/t/t6300-for-each-ref.sh +++ b/t/t6300-for-each-ref.sh @@ -59,18 +59,25 @@ test_atom() { # Automatically test "contents:size" atom after testing "contents" if test "$2" = "contents" then - case $(git cat-file -t "$ref") in - tag) - # We cannot use $3 as it expects sanitize_pgp to run - expect=$(git cat-file tag $ref | tail -n +6 | wc -c) ;; - tree | blob) - expect='' ;; - commit) - expect=$(printf '%s' "$3" | wc -c) ;; - esac - # Leave $expect unquoted to lose possible leading whitespaces - echo $expect >expected + # for commit leg, $3 is changed there + expect=$(printf '%s' "$3" | wc -c) test_expect_${4:-success} $PREREQ "basic atom: $1 contents:size" ' + type=$(git cat-file -t "$ref") && + case $type in + tag) + # We cannot use $3 as it expects sanitize_pgp to run + git cat-file tag $ref >out && + expect=$(tail -n +6 out | wc -c) && + rm -f out ;; + tree | blob) + expect="" ;; + commit) + : "use the calculated expect" ;; + *) + BUG "unknown object type" ;; + esac && + # Leave $expect unquoted to lose possible leading whitespaces + echo $expect >expected && git for-each-ref --format="%(contents:size)" "$ref" >actual && test_cmp expected actual ' @@ -130,6 +137,8 @@ test_atom head parent:short=10 '' test_atom head numparent 0 test_atom head object '' test_atom head type '' +test_atom head raw "$(git cat-file commit refs/heads/main) +" test_atom head '*objectname' '' test_atom head '*objecttype' '' test_atom head author 'A U Thor <author@example.com> 1151968724 +0200' @@ -221,6 +230,15 @@ test_atom tag contents 'Tagging at 1151968727 ' test_atom tag HEAD ' ' +test_expect_success 'basic atom: refs/tags/testtag *raw' ' + git cat-file commit refs/tags/testtag^{} >expected && + git for-each-ref --format="%(*raw)" refs/tags/testtag >actual && + sanitize_pgp <expected >expected.clean && + echo >>expected.clean && + sanitize_pgp <actual >actual.clean && + test_cmp expected.clean actual.clean +' + test_expect_success 'Check invalid atoms names are errors' ' test_must_fail git for-each-ref --format="%(INVALID)" refs/heads ' @@ -686,6 +704,15 @@ test_atom refs/tags/signed-empty contents:body '' test_atom refs/tags/signed-empty contents:signature "$sig" test_atom refs/tags/signed-empty contents "$sig" +test_expect_success GPG 'basic atom: refs/tags/signed-empty raw' ' + git cat-file tag refs/tags/signed-empty >expected && + git for-each-ref --format="%(raw)" refs/tags/signed-empty >actual && + sanitize_pgp <expected >expected.clean && + echo >>expected.clean && + sanitize_pgp <actual >actual.clean && + test_cmp expected.clean actual.clean +' + test_atom refs/tags/signed-short subject 'subject line' test_atom refs/tags/signed-short subject:sanitize 'subject-line' test_atom refs/tags/signed-short contents:subject 'subject line' @@ -695,6 +722,15 @@ test_atom refs/tags/signed-short contents:signature "$sig" test_atom refs/tags/signed-short contents "subject line $sig" +test_expect_success GPG 'basic atom: refs/tags/signed-short raw' ' + git cat-file tag refs/tags/signed-short >expected && + git for-each-ref --format="%(raw)" refs/tags/signed-short >actual && + sanitize_pgp <expected >expected.clean && + echo >>expected.clean && + sanitize_pgp <actual >actual.clean && + test_cmp expected.clean actual.clean +' + test_atom refs/tags/signed-long subject 'subject line' test_atom refs/tags/signed-long subject:sanitize 'subject-line' test_atom refs/tags/signed-long contents:subject 'subject line' @@ -708,6 +744,15 @@ test_atom refs/tags/signed-long contents "subject line body contents $sig" +test_expect_success GPG 'basic atom: refs/tags/signed-long raw' ' + git cat-file tag refs/tags/signed-long >expected && + git for-each-ref --format="%(raw)" refs/tags/signed-long >actual && + sanitize_pgp <expected >expected.clean && + echo >>expected.clean && + sanitize_pgp <actual >actual.clean && + test_cmp expected.clean actual.clean +' + test_expect_success 'set up refs pointing to tree and blob' ' git update-ref refs/mytrees/first refs/heads/main^{tree} && git update-ref refs/myblobs/first refs/heads/main:one @@ -720,6 +765,16 @@ test_atom refs/mytrees/first contents:body "" test_atom refs/mytrees/first contents:signature "" test_atom refs/mytrees/first contents "" +test_expect_success 'basic atom: refs/mytrees/first raw' ' + git cat-file tree refs/mytrees/first >expected && + echo >>expected && + git for-each-ref --format="%(raw)" refs/mytrees/first >actual && + test_cmp expected actual && + git cat-file -s refs/mytrees/first >expected && + git for-each-ref --format="%(raw:size)" refs/mytrees/first >actual && + test_cmp expected actual +' + test_atom refs/myblobs/first subject "" test_atom refs/myblobs/first contents:subject "" test_atom refs/myblobs/first body "" @@ -727,6 +782,189 @@ test_atom refs/myblobs/first contents:body "" test_atom refs/myblobs/first contents:signature "" test_atom refs/myblobs/first contents "" +test_expect_success 'basic atom: refs/myblobs/first raw' ' + git cat-file blob refs/myblobs/first >expected && + echo >>expected && + git for-each-ref --format="%(raw)" refs/myblobs/first >actual && + test_cmp expected actual && + git cat-file -s refs/myblobs/first >expected && + git for-each-ref --format="%(raw:size)" refs/myblobs/first >actual && + test_cmp expected actual +' + +test_expect_success 'set up refs pointing to binary blob' ' + printf "a\0b\0c" >blob1 && + printf "a\0c\0b" >blob2 && + printf "\0a\0b\0c" >blob3 && + printf "abc" >blob4 && + printf "\0 \0 \0 " >blob5 && + printf "\0 \0a\0 " >blob6 && + printf " " >blob7 && + >blob8 && + obj=$(git hash-object -w blob1) && + git update-ref refs/myblobs/blob1 "$obj" && + obj=$(git hash-object -w blob2) && + git update-ref refs/myblobs/blob2 "$obj" && + obj=$(git hash-object -w blob3) && + git update-ref refs/myblobs/blob3 "$obj" && + obj=$(git hash-object -w blob4) && + git update-ref refs/myblobs/blob4 "$obj" && + obj=$(git hash-object -w blob5) && + git update-ref refs/myblobs/blob5 "$obj" && + obj=$(git hash-object -w blob6) && + git update-ref refs/myblobs/blob6 "$obj" && + obj=$(git hash-object -w blob7) && + git update-ref refs/myblobs/blob7 "$obj" && + obj=$(git hash-object -w blob8) && + git update-ref refs/myblobs/blob8 "$obj" +' + +test_expect_success 'Verify sorts with raw' ' + cat >expected <<-EOF && + refs/myblobs/blob8 + refs/myblobs/blob5 + refs/myblobs/blob6 + refs/myblobs/blob3 + refs/myblobs/blob7 + refs/mytrees/first + refs/myblobs/first + refs/myblobs/blob1 + refs/myblobs/blob2 + refs/myblobs/blob4 + refs/heads/main + EOF + git for-each-ref --format="%(refname)" --sort=raw \ + refs/heads/main refs/myblobs/ refs/mytrees/first >actual && + test_cmp expected actual +' + +test_expect_success 'Verify sorts with raw:size' ' + cat >expected <<-EOF && + refs/myblobs/blob8 + refs/myblobs/first + refs/myblobs/blob7 + refs/heads/main + refs/myblobs/blob4 + refs/myblobs/blob1 + refs/myblobs/blob2 + refs/myblobs/blob3 + refs/myblobs/blob5 + refs/myblobs/blob6 + refs/mytrees/first + EOF + git for-each-ref --format="%(refname)" --sort=raw:size \ + refs/heads/main refs/myblobs/ refs/mytrees/first >actual && + test_cmp expected actual +' + +test_expect_success 'validate raw atom with %(if:equals)' ' + cat >expected <<-EOF && + not equals + not equals + not equals + not equals + not equals + not equals + refs/myblobs/blob4 + not equals + not equals + not equals + not equals + not equals + EOF + git for-each-ref --format="%(if:equals=abc)%(raw)%(then)%(refname)%(else)not equals%(end)" \ + refs/myblobs/ refs/heads/ >actual && + test_cmp expected actual +' + +test_expect_success 'validate raw atom with %(if:notequals)' ' + cat >expected <<-EOF && + refs/heads/ambiguous + refs/heads/main + refs/heads/newtag + refs/myblobs/blob1 + refs/myblobs/blob2 + refs/myblobs/blob3 + equals + refs/myblobs/blob5 + refs/myblobs/blob6 + refs/myblobs/blob7 + refs/myblobs/blob8 + refs/myblobs/first + EOF + git for-each-ref --format="%(if:notequals=abc)%(raw)%(then)%(refname)%(else)equals%(end)" \ + refs/myblobs/ refs/heads/ >actual && + test_cmp expected actual +' + +test_expect_success 'empty raw refs with %(if)' ' + cat >expected <<-EOF && + refs/myblobs/blob1 not empty + refs/myblobs/blob2 not empty + refs/myblobs/blob3 not empty + refs/myblobs/blob4 not empty + refs/myblobs/blob5 not empty + refs/myblobs/blob6 not empty + refs/myblobs/blob7 empty + refs/myblobs/blob8 empty + refs/myblobs/first not empty + EOF + git for-each-ref --format="%(refname) %(if)%(raw)%(then)not empty%(else)empty%(end)" \ + refs/myblobs/ >actual && + test_cmp expected actual +' + +test_expect_success '%(raw) with --python must fail' ' + test_must_fail git for-each-ref --format="%(raw)" --python +' + +test_expect_success '%(raw) with --tcl must fail' ' + test_must_fail git for-each-ref --format="%(raw)" --tcl +' + +test_expect_success '%(raw) with --perl' ' + git for-each-ref --format="\$name= %(raw); +print \"\$name\"" refs/myblobs/blob1 --perl | perl >actual && + cmp blob1 actual && + git for-each-ref --format="\$name= %(raw); +print \"\$name\"" refs/myblobs/blob3 --perl | perl >actual && + cmp blob3 actual && + git for-each-ref --format="\$name= %(raw); +print \"\$name\"" refs/myblobs/blob8 --perl | perl >actual && + cmp blob8 actual && + git for-each-ref --format="\$name= %(raw); +print \"\$name\"" refs/myblobs/first --perl | perl >actual && + cmp one actual && + git cat-file tree refs/mytrees/first > expected && + git for-each-ref --format="\$name= %(raw); +print \"\$name\"" refs/mytrees/first --perl | perl >actual && + cmp expected actual +' + +test_expect_success '%(raw) with --shell must fail' ' + test_must_fail git for-each-ref --format="%(raw)" --shell +' + +test_expect_success '%(raw) with --shell and --sort=raw must fail' ' + test_must_fail git for-each-ref --format="%(raw)" --sort=raw --shell +' + +test_expect_success '%(raw:size) with --shell' ' + git for-each-ref --format="%(raw:size)" | while read line + do + echo "'\''$line'\''" >>expect + done && + git for-each-ref --format="%(raw:size)" --shell >actual && + test_cmp expect actual +' + +test_expect_success 'for-each-ref --format compare with cat-file --batch' ' + git rev-parse refs/mytrees/first | git cat-file --batch >expected && + git for-each-ref --format="%(objectname) %(objecttype) %(objectsize) +%(raw)" refs/mytrees/first >actual && + test_cmp expected actual +' + test_expect_success 'set up multiple-sort tags' ' for when in 100000 200000 do @@ -980,6 +1218,10 @@ test_expect_success 'basic atom: head contents:trailers' ' test_cmp expect actual.clean ' +test_expect_success 'basic atom: rest must fail' ' + test_must_fail git for-each-ref --format="%(rest)" refs/heads/main +' + test_expect_success 'trailer parsing not fooled by --- line' ' git commit --allow-empty -F - <<-\EOF && this is the subject diff --git a/t/t6402-merge-rename.sh b/t/t6402-merge-rename.sh index 3da2896e3b..3a32b1a45c 100755 --- a/t/t6402-merge-rename.sh +++ b/t/t6402-merge-rename.sh @@ -103,7 +103,7 @@ test_expect_success 'setup' ' test_expect_success 'pull renaming branch into unrenaming one' \ ' git show-branch && - test_expect_code 1 git pull . white && + test_expect_code 1 git pull --no-rebase . white && git ls-files -s && test_stdout_line_count = 3 git ls-files -u B && test_stdout_line_count = 1 git ls-files -s N && @@ -119,7 +119,7 @@ test_expect_success 'pull renaming branch into another renaming one' \ rm -f B && git reset --hard && git checkout red && - test_expect_code 1 git pull . white && + test_expect_code 1 git pull --no-rebase . white && test_stdout_line_count = 3 git ls-files -u B && test_stdout_line_count = 1 git ls-files -s N && sed -ne "/^g/{ @@ -133,7 +133,7 @@ test_expect_success 'pull unrenaming branch into renaming one' \ ' git reset --hard && git show-branch && - test_expect_code 1 git pull . main && + test_expect_code 1 git pull --no-rebase . main && test_stdout_line_count = 3 git ls-files -u B && test_stdout_line_count = 1 git ls-files -s N && sed -ne "/^g/{ @@ -147,7 +147,7 @@ test_expect_success 'pull conflicting renames' \ ' git reset --hard && git show-branch && - test_expect_code 1 git pull . blue && + test_expect_code 1 git pull --no-rebase . blue && test_stdout_line_count = 1 git ls-files -u A && test_stdout_line_count = 1 git ls-files -u B && test_stdout_line_count = 1 git ls-files -u C && @@ -163,7 +163,7 @@ test_expect_success 'interference with untracked working tree file' ' git reset --hard && git show-branch && echo >A this file should not matter && - test_expect_code 1 git pull . white && + test_expect_code 1 git pull --no-rebase . white && test_path_is_file A ' @@ -173,7 +173,7 @@ test_expect_success 'interference with untracked working tree file' ' git show-branch && rm -f A && echo >A this file should not matter && - test_expect_code 1 git pull . red && + test_expect_code 1 git pull --no-rebase . red && test_path_is_file A ' @@ -183,7 +183,7 @@ test_expect_success 'interference with untracked working tree file' ' git checkout -f main && git tag -f anchor && git show-branch && - git pull . yellow && + git pull --no-rebase . yellow && test_path_is_missing M && git reset --hard anchor ' @@ -210,7 +210,7 @@ test_expect_success 'updated working tree file should prevent the merge' ' echo >>M one line addition && cat M >M.saved && git update-index M && - test_expect_code 128 git pull . yellow && + test_expect_code 128 git pull --no-rebase . yellow && test_cmp M M.saved && rm -f M.saved ' @@ -222,7 +222,7 @@ test_expect_success 'interference with untracked working tree file' ' git tag -f anchor && git show-branch && echo >M this file should not matter && - git pull . main && + git pull --no-rebase . main && test_path_is_file M && ! { git ls-files -s | diff --git a/t/t6409-merge-subtree.sh b/t/t6409-merge-subtree.sh index d406b2343c..ba7890ec52 100755 --- a/t/t6409-merge-subtree.sh +++ b/t/t6409-merge-subtree.sh @@ -100,7 +100,7 @@ test_expect_success 'merge update' ' git checkout -b topic_2 && git commit -m "update git-gui" && cd ../git && - git pull -s subtree gui topic_2 && + git pull --no-rebase -s subtree gui topic_2 && git ls-files -s >actual && ( echo "100644 $o3 0 git-gui/git-gui.sh" && @@ -129,7 +129,7 @@ test_expect_success 'initial ambiguous subtree' ' test_expect_success 'merge using explicit' ' cd ../git && git reset --hard topic_2 && - git pull -Xsubtree=git-gui gui topic_2 && + git pull --no-rebase -Xsubtree=git-gui gui topic_2 && git ls-files -s >actual && ( echo "100644 $o3 0 git-gui/git-gui.sh" && @@ -142,7 +142,7 @@ test_expect_success 'merge using explicit' ' test_expect_success 'merge2 using explicit' ' cd ../git && git reset --hard topic_2 && - git pull -Xsubtree=git-gui2 gui topic_2 && + git pull --no-rebase -Xsubtree=git-gui2 gui topic_2 && git ls-files -s >actual && ( echo "100644 $o1 0 git-gui/git-gui.sh" && diff --git a/t/t6415-merge-dir-to-symlink.sh b/t/t6415-merge-dir-to-symlink.sh index 2ce104aca7..2655e295f5 100755 --- a/t/t6415-merge-dir-to-symlink.sh +++ b/t/t6415-merge-dir-to-symlink.sh @@ -25,7 +25,8 @@ test_expect_success 'checkout does not clobber untracked symlink' ' git reset --hard main && git rm --cached a/b && git commit -m "untracked symlink remains" && - test_must_fail git checkout start^0 + test_must_fail git checkout start^0 && + git clean -fd # Do not leave the untracked symlink in the way ' test_expect_success 'a/b-2/c/d is kept when clobbering symlink b' ' @@ -34,7 +35,8 @@ test_expect_success 'a/b-2/c/d is kept when clobbering symlink b' ' git rm --cached a/b && git commit -m "untracked symlink remains" && git checkout -f start^0 && - test_path_is_file a/b-2/c/d + test_path_is_file a/b-2/c/d && + git clean -fd # Do not leave the untracked symlink in the way ' test_expect_success 'checkout should not have deleted a/b-2/c/d' ' diff --git a/t/t6417-merge-ours-theirs.sh b/t/t6417-merge-ours-theirs.sh index ac9aee9a66..ec065d6a65 100755 --- a/t/t6417-merge-ours-theirs.sh +++ b/t/t6417-merge-ours-theirs.sh @@ -69,11 +69,11 @@ test_expect_success 'binary file with -Xours/-Xtheirs' ' ' test_expect_success 'pull passes -X to underlying merge' ' - git reset --hard main && git pull -s recursive -Xours . side && - git reset --hard main && git pull -s recursive -X ours . side && - git reset --hard main && git pull -s recursive -Xtheirs . side && - git reset --hard main && git pull -s recursive -X theirs . side && - git reset --hard main && test_must_fail git pull -s recursive -X bork . side + git reset --hard main && git pull --no-rebase -s recursive -Xours . side && + git reset --hard main && git pull --no-rebase -s recursive -X ours . side && + git reset --hard main && git pull --no-rebase -s recursive -Xtheirs . side && + git reset --hard main && git pull --no-rebase -s recursive -X theirs . side && + git reset --hard main && test_must_fail git pull --no-rebase -s recursive -X bork . side ' test_expect_success SYMLINKS 'symlink with -Xours/-Xtheirs' ' diff --git a/t/t6424-merge-unrelated-index-changes.sh b/t/t6424-merge-unrelated-index-changes.sh index 5e3779ebc9..89dd544f38 100755 --- a/t/t6424-merge-unrelated-index-changes.sh +++ b/t/t6424-merge-unrelated-index-changes.sh @@ -132,6 +132,7 @@ test_expect_success 'merge-recursive, when index==head but head!=HEAD' ' # Make index match B git diff C B -- | git apply --cached && + test_when_finished "git clean -fd" && # Do not leave untracked around # Merge B & F, with B as "head" git merge-recursive A -- B F > out && test_i18ngrep "Already up to date" out diff --git a/t/t6430-merge-recursive.sh b/t/t6430-merge-recursive.sh index ffcc01fe65..a0efe7cb6d 100755 --- a/t/t6430-merge-recursive.sh +++ b/t/t6430-merge-recursive.sh @@ -718,7 +718,9 @@ test_expect_success 'merge-recursive remembers the names of all base trees' ' # merge-recursive prints in reverse order, but we do not care sort <trees >expect && sed -n "s/^virtual //p" out | sort >actual && - test_cmp expect actual + test_cmp expect actual && + + git clean -fd ' test_expect_success 'merge-recursive internal merge resolves to the sameness' ' diff --git a/t/t6436-merge-overwrite.sh b/t/t6436-merge-overwrite.sh index 84b4aacf49..c0b7bd7c3f 100755 --- a/t/t6436-merge-overwrite.sh +++ b/t/t6436-merge-overwrite.sh @@ -68,7 +68,8 @@ test_expect_success 'will not overwrite removed file' ' git commit -m "rm c1.c" && cp important c1.c && test_must_fail git merge c1a && - test_cmp important c1.c + test_cmp important c1.c && + rm c1.c # Do not leave untracked file in way of future tests ' test_expect_success 'will not overwrite re-added file' ' diff --git a/t/t6500-gc.sh b/t/t6500-gc.sh index 10c7ae7f09..c2021267f2 100755 --- a/t/t6500-gc.sh +++ b/t/t6500-gc.sh @@ -241,7 +241,7 @@ test_expect_success 'background auto gc respects lock for all operations' ' # create a ref whose loose presence we can use to detect a pack-refs run git update-ref refs/heads/should-be-loose HEAD && - test_path_is_file .git/refs/heads/should-be-loose && + (ls -1 .git/refs/heads .git/reftable >expect || true) && # now fake a concurrent gc that holds the lock; we can use our # shell pid so that it looks valid. @@ -258,7 +258,8 @@ test_expect_success 'background auto gc respects lock for all operations' ' # our gc should exit zero without doing anything run_and_wait_for_auto_gc && - test_path_is_file .git/refs/heads/should-be-loose + (ls -1 .git/refs/heads .git/reftable >actual || true) && + test_cmp expect actual ' # DO NOT leave a detached auto gc process running near the end of the diff --git a/t/t7004-tag.sh b/t/t7004-tag.sh index 2f72c5c688..082be85dff 100755 --- a/t/t7004-tag.sh +++ b/t/t7004-tag.sh @@ -1998,6 +1998,10 @@ test_expect_success '--format should list tags as per format given' ' test_cmp expect actual ' +test_expect_success 'git tag -l with --format="%(rest)" must fail' ' + test_must_fail git tag -l --format="%(rest)" "v1*" +' + test_expect_success "set up color tests" ' echo "<RED>v1.0<RESET>" >expect.color && echo "v1.0" >expect.bare && diff --git a/t/t7030-verify-tag.sh b/t/t7030-verify-tag.sh index 3cefde9602..10faa64515 100755 --- a/t/t7030-verify-tag.sh +++ b/t/t7030-verify-tag.sh @@ -194,6 +194,10 @@ test_expect_success GPG 'verifying tag with --format' ' test_cmp expect actual ' +test_expect_success GPG 'verifying tag with --format="%(rest)" must fail' ' + test_must_fail git verify-tag --format="%(rest)" "fourth-signed" +' + test_expect_success GPG 'verifying a forged tag with --format should fail silently' ' test_must_fail git verify-tag --format="tagname : %(tag)" $(cat forged1.tag) >actual-forged && test_must_be_empty actual-forged diff --git a/t/t7064-wtstatus-pv2.sh b/t/t7064-wtstatus-pv2.sh index 4613882caf..eeb0534163 100755 --- a/t/t7064-wtstatus-pv2.sh +++ b/t/t7064-wtstatus-pv2.sh @@ -373,10 +373,7 @@ test_expect_success 'verify upstream fields in branch header' ' ## Test upstream-gone case. Fake this by pointing ## origin/initial-branch at a non-existing commit. - OLD=$(git rev-parse origin/initial-branch) && - NEW=$ZERO_OID && - mv .git/packed-refs .git/old-packed-refs && - sed "s/$OLD/$NEW/g" <.git/old-packed-refs >.git/packed-refs && + git update-ref -d refs/remotes/origin/initial-branch && HUF=$(git rev-parse HEAD) && diff --git a/t/t7201-co.sh b/t/t7201-co.sh index 7f6e23a4bb..b7ba1c3268 100755 --- a/t/t7201-co.sh +++ b/t/t7201-co.sh @@ -585,6 +585,7 @@ test_expect_success 'checkout --conflict=diff3' ' ' test_expect_success 'failing checkout -b should not break working tree' ' + git clean -fd && # Remove untracked files in the way git reset --hard main && git symbolic-ref HEAD refs/heads/main && test_must_fail git checkout -b renamer side^ && diff --git a/t/t7500-commit-template-squash-signoff.sh b/t/t7500-commit-template-squash-signoff.sh index 54c2082acb..8dd0f98812 100755 --- a/t/t7500-commit-template-squash-signoff.sh +++ b/t/t7500-commit-template-squash-signoff.sh @@ -270,7 +270,7 @@ EOF test_expect_success 'commit --fixup provides correct one-line commit message' ' commit_for_rebase_autosquash_setup && - git commit --fixup HEAD~1 && + EDITOR="echo ignored >>" git commit --fixup HEAD~1 && commit_msg_is "fixup! target message subject line" ' @@ -281,6 +281,13 @@ test_expect_success 'commit --fixup -m"something" -m"extra"' ' extra" ' +test_expect_success 'commit --fixup --edit' ' + commit_for_rebase_autosquash_setup && + EDITOR="printf \"something\nextra\" >>" git commit --fixup HEAD~1 --edit && + commit_msg_is "fixup! target message subject linesomething +extra" +' + get_commit_msg () { rev="$1" && git log -1 --pretty=format:"%B" "$rev" diff --git a/t/t7519-status-fsmonitor.sh b/t/t7519-status-fsmonitor.sh index deea88d443..f1463197b9 100755 --- a/t/t7519-status-fsmonitor.sh +++ b/t/t7519-status-fsmonitor.sh @@ -389,43 +389,47 @@ test_expect_success 'status succeeds after staging/unstaging' ' # If "!" is supplied, then we verify that we do not call ensure_full_index # during a call to 'git status'. Otherwise, we verify that we _do_ call it. check_sparse_index_behavior () { - git status --porcelain=v2 >expect && - git sparse-checkout init --cone --sparse-index && - git sparse-checkout set dir1 dir2 && + git -C full status --porcelain=v2 >expect && GIT_TRACE2_EVENT="$(pwd)/trace2.txt" GIT_TRACE2_EVENT_NESTING=10 \ - git status --porcelain=v2 >actual && + git -C sparse status --porcelain=v2 >actual && test_region $1 index ensure_full_index trace2.txt && test_region fsm_hook query trace2.txt && test_cmp expect actual && - rm trace2.txt && - git sparse-checkout disable + rm trace2.txt } test_expect_success 'status succeeds with sparse index' ' - git reset --hard && + git clone . full && + git clone --sparse . sparse && + git -C sparse sparse-checkout init --cone --sparse-index && + git -C sparse sparse-checkout set dir1 dir2 && - test_config core.fsmonitor "$TEST_DIRECTORY/t7519/fsmonitor-all" && - check_sparse_index_behavior ! && - - write_script .git/hooks/fsmonitor-test<<-\EOF && + write_script .git/hooks/fsmonitor-test <<-\EOF && printf "last_update_token\0" EOF - git config core.fsmonitor .git/hooks/fsmonitor-test && + git -C full config core.fsmonitor ../.git/hooks/fsmonitor-test && + git -C sparse config core.fsmonitor ../.git/hooks/fsmonitor-test && check_sparse_index_behavior ! && - write_script .git/hooks/fsmonitor-test<<-\EOF && + write_script .git/hooks/fsmonitor-test <<-\EOF && printf "last_update_token\0" printf "dir1/modified\0" EOF check_sparse_index_behavior ! && - cp -r dir1 dir1a && - git add dir1a && - git commit -m "add dir1a" && + git -C sparse sparse-checkout add dir1a && + + for repo in full sparse + do + cp -r $repo/dir1 $repo/dir1a && + git -C $repo add dir1a && + git -C $repo commit -m "add dir1a" || return 1 + done && + git -C sparse sparse-checkout set dir1 dir2 && # This one modifies outside the sparse-checkout definition # and hence we expect to expand the sparse-index. - write_script .git/hooks/fsmonitor-test<<-\EOF && + write_script .git/hooks/fsmonitor-test <<-\EOF && printf "last_update_token\0" printf "dir1a/modified\0" EOF diff --git a/t/t7600-merge.sh b/t/t7600-merge.sh index 2ef39d3088..c773e30b3f 100755 --- a/t/t7600-merge.sh +++ b/t/t7600-merge.sh @@ -717,6 +717,7 @@ test_expect_success 'failed fast-forward merge with --autostash' ' git reset --hard c0 && git merge-file file file.orig file.5 && cp file.5 other && + test_when_finished "rm other" && test_must_fail git merge --autostash c1 2>err && test_i18ngrep "Applied autostash." err && test_cmp file.5 file diff --git a/t/t7601-merge-pull-config.sh b/t/t7601-merge-pull-config.sh index 52e8ccc933..1f652f433e 100755 --- a/t/t7601-merge-pull-config.sh +++ b/t/t7601-merge-pull-config.sh @@ -27,120 +27,324 @@ test_expect_success 'setup' ' git tag c3 ' -test_expect_success 'pull.rebase not set' ' +test_expect_success 'pull.rebase not set, ff possible' ' git reset --hard c0 && git pull . c1 2>err && - test_i18ngrep ! "Pulling without specifying how to reconcile" err + test_i18ngrep ! "You have divergent branches" err ' test_expect_success 'pull.rebase not set and pull.ff=true' ' git reset --hard c0 && test_config pull.ff true && git pull . c1 2>err && - test_i18ngrep ! "Pulling without specifying how to reconcile" err + test_i18ngrep ! "You have divergent branches" err ' test_expect_success 'pull.rebase not set and pull.ff=false' ' git reset --hard c0 && test_config pull.ff false && git pull . c1 2>err && - test_i18ngrep ! "Pulling without specifying how to reconcile" err + test_i18ngrep ! "You have divergent branches" err ' test_expect_success 'pull.rebase not set and pull.ff=only' ' git reset --hard c0 && test_config pull.ff only && git pull . c1 2>err && - test_i18ngrep ! "Pulling without specifying how to reconcile" err + test_i18ngrep ! "You have divergent branches" err ' test_expect_success 'pull.rebase not set and --rebase given' ' git reset --hard c0 && git pull --rebase . c1 2>err && - test_i18ngrep ! "Pulling without specifying how to reconcile" err + test_i18ngrep ! "You have divergent branches" err ' test_expect_success 'pull.rebase not set and --no-rebase given' ' git reset --hard c0 && git pull --no-rebase . c1 2>err && - test_i18ngrep ! "Pulling without specifying how to reconcile" err + test_i18ngrep ! "You have divergent branches" err ' test_expect_success 'pull.rebase not set and --ff given' ' git reset --hard c0 && git pull --ff . c1 2>err && - test_i18ngrep ! "Pulling without specifying how to reconcile" err + test_i18ngrep ! "You have divergent branches" err ' test_expect_success 'pull.rebase not set and --no-ff given' ' git reset --hard c0 && git pull --no-ff . c1 2>err && - test_i18ngrep ! "Pulling without specifying how to reconcile" err + test_i18ngrep ! "You have divergent branches" err ' test_expect_success 'pull.rebase not set and --ff-only given' ' git reset --hard c0 && git pull --ff-only . c1 2>err && - test_i18ngrep ! "Pulling without specifying how to reconcile" err + test_i18ngrep ! "You have divergent branches" err ' test_expect_success 'pull.rebase not set (not-fast-forward)' ' git reset --hard c2 && - git -c color.advice=always pull . c1 2>err && + test_must_fail git -c color.advice=always pull . c1 2>err && test_decode_color <err >decoded && test_i18ngrep "<YELLOW>hint: " decoded && - test_i18ngrep "Pulling without specifying how to reconcile" decoded + test_i18ngrep "You have divergent branches" decoded ' test_expect_success 'pull.rebase not set and pull.ff=true (not-fast-forward)' ' git reset --hard c2 && test_config pull.ff true && git pull . c1 2>err && - test_i18ngrep ! "Pulling without specifying how to reconcile" err + test_i18ngrep ! "You have divergent branches" err ' test_expect_success 'pull.rebase not set and pull.ff=false (not-fast-forward)' ' git reset --hard c2 && test_config pull.ff false && git pull . c1 2>err && - test_i18ngrep ! "Pulling without specifying how to reconcile" err + test_i18ngrep ! "You have divergent branches" err ' test_expect_success 'pull.rebase not set and pull.ff=only (not-fast-forward)' ' git reset --hard c2 && test_config pull.ff only && test_must_fail git pull . c1 2>err && - test_i18ngrep ! "Pulling without specifying how to reconcile" err + test_i18ngrep ! "You have divergent branches" err ' test_expect_success 'pull.rebase not set and --rebase given (not-fast-forward)' ' git reset --hard c2 && git pull --rebase . c1 2>err && - test_i18ngrep ! "Pulling without specifying how to reconcile" err + test_i18ngrep ! "You have divergent branches" err ' test_expect_success 'pull.rebase not set and --no-rebase given (not-fast-forward)' ' git reset --hard c2 && git pull --no-rebase . c1 2>err && - test_i18ngrep ! "Pulling without specifying how to reconcile" err + test_i18ngrep ! "You have divergent branches" err ' test_expect_success 'pull.rebase not set and --ff given (not-fast-forward)' ' git reset --hard c2 && git pull --ff . c1 2>err && - test_i18ngrep ! "Pulling without specifying how to reconcile" err + test_i18ngrep ! "You have divergent branches" err ' test_expect_success 'pull.rebase not set and --no-ff given (not-fast-forward)' ' git reset --hard c2 && git pull --no-ff . c1 2>err && - test_i18ngrep ! "Pulling without specifying how to reconcile" err + test_i18ngrep ! "You have divergent branches" err ' test_expect_success 'pull.rebase not set and --ff-only given (not-fast-forward)' ' git reset --hard c2 && test_must_fail git pull --ff-only . c1 2>err && - test_i18ngrep ! "Pulling without specifying how to reconcile" err + test_i18ngrep ! "You have divergent branches" err +' + +test_does_rebase () { + git reset --hard c2 && + git "$@" . c1 && + # Check that we actually did a rebase + git rev-list --count HEAD >actual && + git rev-list --merges --count HEAD >>actual && + test_write_lines 3 0 >expect && + test_cmp expect actual && + rm actual expect +} + +# Prefers merge over fast-forward +test_does_merge_when_ff_possible () { + git reset --hard c0 && + git "$@" . c1 && + # Check that we actually did a merge + git rev-list --count HEAD >actual && + git rev-list --merges --count HEAD >>actual && + test_write_lines 3 1 >expect && + test_cmp expect actual && + rm actual expect +} + +# Prefers fast-forward over merge or rebase +test_does_fast_forward () { + git reset --hard c0 && + git "$@" . c1 && + + # Check that we did not get any merges + git rev-list --count HEAD >actual && + git rev-list --merges --count HEAD >>actual && + test_write_lines 2 0 >expect && + test_cmp expect actual && + + # Check that we ended up at c1 + git rev-parse HEAD >actual && + git rev-parse c1^{commit} >expect && + test_cmp actual expect && + + # Remove temporary files + rm actual expect +} + +# Doesn't fail when fast-forward not possible; does a merge +test_falls_back_to_full_merge () { + git reset --hard c2 && + git "$@" . c1 && + # Check that we actually did a merge + git rev-list --count HEAD >actual && + git rev-list --merges --count HEAD >>actual && + test_write_lines 4 1 >expect && + test_cmp expect actual && + rm actual expect +} + +# Attempts fast forward, which is impossible, and bails +test_attempts_fast_forward () { + git reset --hard c2 && + test_must_fail git "$@" . c1 2>err && + test_i18ngrep "Not possible to fast-forward, aborting" err +} + +# +# Group 1: Interaction of --ff-only with --[no-]rebase +# (And related interaction of pull.ff=only with pull.rebase) +# +test_expect_success '--ff-only overrides --rebase' ' + test_attempts_fast_forward pull --rebase --ff-only +' + +test_expect_success '--ff-only overrides --rebase even if first' ' + test_attempts_fast_forward pull --ff-only --rebase +' + +test_expect_success '--ff-only overrides --no-rebase' ' + test_attempts_fast_forward pull --ff-only --no-rebase +' + +test_expect_success 'pull.ff=only overrides pull.rebase=true' ' + test_attempts_fast_forward -c pull.ff=only -c pull.rebase=true pull +' + +test_expect_success 'pull.ff=only overrides pull.rebase=false' ' + test_attempts_fast_forward -c pull.ff=only -c pull.rebase=false pull +' + +# Group 2: --rebase=[!false] overrides --no-ff and --ff +# (And related interaction of pull.rebase=!false and pull.ff=!only) +test_expect_success '--rebase overrides --no-ff' ' + test_does_rebase pull --rebase --no-ff +' + +test_expect_success '--rebase overrides --ff' ' + test_does_rebase pull --rebase --ff +' + +test_expect_success '--rebase fast-forwards when possible' ' + test_does_fast_forward pull --rebase --ff +' + +test_expect_success 'pull.rebase=true overrides pull.ff=false' ' + test_does_rebase -c pull.rebase=true -c pull.ff=false pull +' + +test_expect_success 'pull.rebase=true overrides pull.ff=true' ' + test_does_rebase -c pull.rebase=true -c pull.ff=true pull +' + +# Group 3: command line flags take precedence over config +test_expect_success '--ff-only takes precedence over pull.rebase=true' ' + test_attempts_fast_forward -c pull.rebase=true pull --ff-only +' + +test_expect_success '--ff-only takes precedence over pull.rebase=false' ' + test_attempts_fast_forward -c pull.rebase=false pull --ff-only +' + +test_expect_success '--no-rebase takes precedence over pull.ff=only' ' + test_falls_back_to_full_merge -c pull.ff=only pull --no-rebase +' + +test_expect_success '--rebase takes precedence over pull.ff=only' ' + test_does_rebase -c pull.ff=only pull --rebase +' + +test_expect_success '--rebase overrides pull.ff=true' ' + test_does_rebase -c pull.ff=true pull --rebase +' + +test_expect_success '--rebase overrides pull.ff=false' ' + test_does_rebase -c pull.ff=false pull --rebase +' + +test_expect_success '--rebase overrides pull.ff unset' ' + test_does_rebase pull --rebase +' + +# Group 4: --no-rebase heeds pull.ff=!only or explict --ff or --no-ff + +test_expect_success '--no-rebase works with --no-ff' ' + test_does_merge_when_ff_possible pull --no-rebase --no-ff +' + +test_expect_success '--no-rebase works with --ff' ' + test_does_fast_forward pull --no-rebase --ff +' + +test_expect_success '--no-rebase does ff if pull.ff unset' ' + test_does_fast_forward pull --no-rebase +' + +test_expect_success '--no-rebase heeds pull.ff=true' ' + test_does_fast_forward -c pull.ff=true pull --no-rebase +' + +test_expect_success '--no-rebase heeds pull.ff=false' ' + test_does_merge_when_ff_possible -c pull.ff=false pull --no-rebase +' + +# Group 5: pull.rebase=!false in combination with --no-ff or --ff +test_expect_success 'pull.rebase=true and --no-ff' ' + test_does_rebase -c pull.rebase=true pull --no-ff +' + +test_expect_success 'pull.rebase=true and --ff' ' + test_does_rebase -c pull.rebase=true pull --ff +' + +test_expect_success 'pull.rebase=false and --no-ff' ' + test_does_merge_when_ff_possible -c pull.rebase=false pull --no-ff +' + +test_expect_success 'pull.rebase=false and --ff, ff possible' ' + test_does_fast_forward -c pull.rebase=false pull --ff +' + +test_expect_success 'pull.rebase=false and --ff, ff not possible' ' + test_falls_back_to_full_merge -c pull.rebase=false pull --ff +' + +# End of groupings for conflicting merge vs. rebase flags/options + +test_expect_success 'Multiple heads warns about inability to fast forward' ' + git reset --hard c1 && + test_must_fail git pull . c2 c3 2>err && + test_i18ngrep "You have divergent branches" err +' + +test_expect_success 'Multiple can never be fast forwarded' ' + git reset --hard c0 && + test_must_fail git -c pull.ff=only pull . c1 c2 c3 2>err && + test_i18ngrep ! "You have divergent branches" err && + # In addition to calling out "cannot fast-forward", we very much + # want the "multiple branches" piece to be called out to users. + test_i18ngrep "Cannot fast-forward to multiple branches" err +' + +test_expect_success 'Cannot rebase with multiple heads' ' + git reset --hard c0 && + test_must_fail git -c pull.rebase=true pull . c1 c2 c3 2>err && + test_i18ngrep ! "You have divergent branches" err && + test_i18ngrep "Cannot rebase onto multiple branches." err ' test_expect_success 'merge c1 with c2' ' diff --git a/t/t7603-merge-reduce-heads.sh b/t/t7603-merge-reduce-heads.sh index 98948955ae..27cd94ad6f 100755 --- a/t/t7603-merge-reduce-heads.sh +++ b/t/t7603-merge-reduce-heads.sh @@ -68,7 +68,7 @@ test_expect_success 'merge c1 with c2, c3, c4, c5' ' test_expect_success 'pull c2, c3, c4, c5 into c1' ' git reset --hard c1 && - git pull . c2 c3 c4 c5 && + git pull --no-rebase . c2 c3 c4 c5 && test "$(git rev-parse c1)" != "$(git rev-parse HEAD)" && test "$(git rev-parse c1)" = "$(git rev-parse HEAD^1)" && test "$(git rev-parse c2)" = "$(git rev-parse HEAD^2)" && diff --git a/t/t7700-repack.sh b/t/t7700-repack.sh index 25b235c063..98eda3bfeb 100755 --- a/t/t7700-repack.sh +++ b/t/t7700-repack.sh @@ -63,13 +63,14 @@ test_expect_success 'objects in packs marked .keep are not repacked' ' test_expect_success 'writing bitmaps via command-line can duplicate .keep objects' ' # build on $oid, $packid, and .keep state from previous - git repack -Adbl && + GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=0 git repack -Adbl && test_has_duplicate_object true ' test_expect_success 'writing bitmaps via config can duplicate .keep objects' ' # build on $oid, $packid, and .keep state from previous - git -c repack.writebitmaps=true repack -Adl && + GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=0 \ + git -c repack.writebitmaps=true repack -Adl && test_has_duplicate_object true ' @@ -189,7 +190,9 @@ test_expect_success 'repack --keep-pack' ' test_expect_success 'bitmaps are created by default in bare repos' ' git clone --bare .git bare.git && - git -C bare.git repack -ad && + rm -f bare.git/objects/pack/*.bitmap && + GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=0 \ + git -C bare.git repack -ad && bitmap=$(ls bare.git/objects/pack/*.bitmap) && test_path_is_file "$bitmap" ' @@ -200,7 +203,8 @@ test_expect_success 'incremental repack does not complain' ' ' test_expect_success 'bitmaps can be disabled on bare repos' ' - git -c repack.writeBitmaps=false -C bare.git repack -ad && + GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=0 \ + git -c repack.writeBitmaps=false -C bare.git repack -ad && bitmap=$(ls bare.git/objects/pack/*.bitmap || :) && test -z "$bitmap" ' @@ -211,7 +215,8 @@ test_expect_success 'no bitmaps created if .keep files present' ' keep=${pack%.pack}.keep && test_when_finished "rm -f \"\$keep\"" && >"$keep" && - git -C bare.git repack -ad 2>stderr && + GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=0 \ + git -C bare.git repack -ad 2>stderr && test_must_be_empty stderr && find bare.git/objects/pack/ -type f -name "*.bitmap" >actual && test_must_be_empty actual @@ -222,7 +227,8 @@ test_expect_success 'auto-bitmaps do not complain if unavailable' ' blob=$(test-tool genrandom big $((1024*1024)) | git -C bare.git hash-object -w --stdin) && git -C bare.git update-ref refs/tags/big $blob && - git -C bare.git repack -ad 2>stderr && + GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=0 \ + git -C bare.git repack -ad 2>stderr && test_must_be_empty stderr && find bare.git/objects/pack -type f -name "*.bitmap" >actual && test_must_be_empty actual diff --git a/t/t7800-difftool.sh b/t/t7800-difftool.sh index a173f564bc..528e0dabf0 100755 --- a/t/t7800-difftool.sh +++ b/t/t7800-difftool.sh @@ -674,7 +674,6 @@ test_expect_success SYMLINKS 'difftool --dir-diff handles modified symlinks' ' rm c && ln -s d c && cat >expect <<-EOF && - b c c @@ -710,7 +709,6 @@ test_expect_success SYMLINKS 'difftool --dir-diff handles modified symlinks' ' # Deleted symlinks rm -f c && cat >expect <<-EOF && - b c EOF @@ -723,6 +721,71 @@ test_expect_success SYMLINKS 'difftool --dir-diff handles modified symlinks' ' test_cmp expect actual ' +test_expect_success SYMLINKS 'difftool --dir-diff writes symlinks as raw text' ' + # Start out on a branch called "branch-init". + git init -b branch-init symlink-files && + ( + cd symlink-files && + # This test ensures that symlinks are written as raw text. + # The "cat" tools output link and file contents. + git config difftool.cat-left-link.cmd "cat \"\$LOCAL/link\"" && + git config difftool.cat-left-a.cmd "cat \"\$LOCAL/file-a\"" && + git config difftool.cat-right-link.cmd "cat \"\$REMOTE/link\"" && + git config difftool.cat-right-b.cmd "cat \"\$REMOTE/file-b\"" && + + # Record the empty initial state so that we can come back here + # later and not have to consider the any cases where difftool + # will create symlinks back into the worktree. + test_tick && + git commit --allow-empty -m init && + + # Create a file called "file-a" with a symlink pointing to it. + git switch -c branch-a && + echo a >file-a && + ln -s file-a link && + git add file-a link && + test_tick && + git commit -m link-to-file-a && + + # Create a file called "file-b" and point the symlink to it. + git switch -c branch-b && + echo b >file-b && + rm link && + ln -s file-b link && + git add file-b link && + git rm file-a && + test_tick && + git commit -m link-to-file-b && + + # Checkout the initial branch so that the --symlinks behavior is + # not activated. The two directories should be completely + # independent with no symlinks pointing back here. + git switch branch-init && + + # The left link must be "file-a" and "file-a" must contain "a". + echo file-a >expect && + git difftool --symlinks --dir-diff --tool cat-left-link \ + branch-a branch-b >actual && + test_cmp expect actual && + + echo a >expect && + git difftool --symlinks --dir-diff --tool cat-left-a \ + branch-a branch-b >actual && + test_cmp expect actual && + + # The right link must be "file-b" and "file-b" must contain "b". + echo file-b >expect && + git difftool --symlinks --dir-diff --tool cat-right-link \ + branch-a branch-b >actual && + test_cmp expect actual && + + echo b >expect && + git difftool --symlinks --dir-diff --tool cat-right-b \ + branch-a branch-b >actual && + test_cmp expect actual + ) +' + test_expect_success 'add -N and difftool -d' ' test_when_finished git reset --hard && diff --git a/t/t7814-grep-recurse-submodules.sh b/t/t7814-grep-recurse-submodules.sh index 828cb3ba58..3172f5b936 100755 --- a/t/t7814-grep-recurse-submodules.sh +++ b/t/t7814-grep-recurse-submodules.sh @@ -8,6 +8,9 @@ submodules. . ./test-lib.sh +GIT_TEST_FATAL_REGISTER_SUBMODULE_ODB=1 +export GIT_TEST_FATAL_REGISTER_SUBMODULE_ODB + test_expect_success 'setup directory structure and submodule' ' echo "(1|2)d(3|4)" >a && mkdir b && diff --git a/t/t7900-maintenance.sh b/t/t7900-maintenance.sh index 58f46c77e6..36a4218745 100755 --- a/t/t7900-maintenance.sh +++ b/t/t7900-maintenance.sh @@ -20,6 +20,18 @@ test_xmllint () { fi } +test_lazy_prereq SYSTEMD_ANALYZE ' + systemd-analyze --help >out && + grep verify out +' + +test_systemd_analyze_verify () { + if test_have_prereq SYSTEMD_ANALYZE + then + systemd-analyze verify "$@" + fi +} + test_expect_success 'help text' ' test_expect_code 129 git maintenance -h 2>err && test_i18ngrep "usage: git maintenance <subcommand>" err && @@ -492,8 +504,21 @@ test_expect_success !MINGW 'register and unregister with regex metacharacters' ' maintenance.repo "$(pwd)/$META" ' +test_expect_success 'start --scheduler=<scheduler>' ' + test_expect_code 129 git maintenance start --scheduler=foo 2>err && + test_i18ngrep "unrecognized --scheduler argument" err && + + test_expect_code 129 git maintenance start --no-scheduler 2>err && + test_i18ngrep "unknown option" err && + + test_expect_code 128 \ + env GIT_TEST_MAINT_SCHEDULER="launchctl:true,schtasks:true" \ + git maintenance start --scheduler=crontab 2>err && + test_i18ngrep "fatal: crontab scheduler is not available" err +' + test_expect_success 'start from empty cron table' ' - GIT_TEST_MAINT_SCHEDULER="crontab:test-tool crontab cron.txt" git maintenance start && + GIT_TEST_MAINT_SCHEDULER="crontab:test-tool crontab cron.txt" git maintenance start --scheduler=crontab && # start registers the repo git config --get --global --fixed-value maintenance.repo "$(pwd)" && @@ -516,7 +541,7 @@ test_expect_success 'stop from existing schedule' ' test_expect_success 'start preserves existing schedule' ' echo "Important information!" >cron.txt && - GIT_TEST_MAINT_SCHEDULER="crontab:test-tool crontab cron.txt" git maintenance start && + GIT_TEST_MAINT_SCHEDULER="crontab:test-tool crontab cron.txt" git maintenance start --scheduler=crontab && grep "Important information!" cron.txt ' @@ -545,7 +570,7 @@ test_expect_success 'start and stop macOS maintenance' ' EOF rm -f args && - GIT_TEST_MAINT_SCHEDULER=launchctl:./print-args git maintenance start && + GIT_TEST_MAINT_SCHEDULER=launchctl:./print-args git maintenance start --scheduler=launchctl && # start registers the repo git config --get --global --fixed-value maintenance.repo "$(pwd)" && @@ -582,6 +607,23 @@ test_expect_success 'start and stop macOS maintenance' ' test_line_count = 0 actual ' +test_expect_success 'use launchctl list to prevent extra work' ' + # ensure we are registered + GIT_TEST_MAINT_SCHEDULER=launchctl:./print-args git maintenance start --scheduler=launchctl && + + # do it again on a fresh args file + rm -f args && + GIT_TEST_MAINT_SCHEDULER=launchctl:./print-args git maintenance start --scheduler=launchctl && + + ls "$HOME/Library/LaunchAgents" >actual && + cat >expect <<-\EOF && + list org.git-scm.git.hourly + list org.git-scm.git.daily + list org.git-scm.git.weekly + EOF + test_cmp expect args +' + test_expect_success 'start and stop Windows maintenance' ' write_script print-args <<-\EOF && echo $* >>args @@ -596,7 +638,7 @@ test_expect_success 'start and stop Windows maintenance' ' EOF rm -f args && - GIT_TEST_MAINT_SCHEDULER="schtasks:./print-args" git maintenance start && + GIT_TEST_MAINT_SCHEDULER="schtasks:./print-args" git maintenance start --scheduler=schtasks && # start registers the repo git config --get --global --fixed-value maintenance.repo "$(pwd)" && @@ -619,6 +661,83 @@ test_expect_success 'start and stop Windows maintenance' ' test_cmp expect args ' +test_expect_success 'start and stop Linux/systemd maintenance' ' + write_script print-args <<-\EOF && + printf "%s\n" "$*" >>args + EOF + + XDG_CONFIG_HOME="$PWD" && + export XDG_CONFIG_HOME && + rm -f args && + GIT_TEST_MAINT_SCHEDULER="systemctl:./print-args" git maintenance start --scheduler=systemd-timer && + + # start registers the repo + git config --get --global --fixed-value maintenance.repo "$(pwd)" && + + test_systemd_analyze_verify "systemd/user/git-maintenance@.service" && + + printf -- "--user enable --now git-maintenance@%s.timer\n" hourly daily weekly >expect && + test_cmp expect args && + + rm -f args && + GIT_TEST_MAINT_SCHEDULER="systemctl:./print-args" git maintenance stop && + + # stop does not unregister the repo + git config --get --global --fixed-value maintenance.repo "$(pwd)" && + + test_path_is_missing "systemd/user/git-maintenance@.timer" && + test_path_is_missing "systemd/user/git-maintenance@.service" && + + printf -- "--user disable --now git-maintenance@%s.timer\n" hourly daily weekly >expect && + test_cmp expect args +' + +test_expect_success 'start and stop when several schedulers are available' ' + write_script print-args <<-\EOF && + printf "%s\n" "$*" | sed "s:gui/[0-9][0-9]*:gui/[UID]:; s:\(schtasks /create .* /xml\).*:\1:;" >>args + EOF + + rm -f args && + GIT_TEST_MAINT_SCHEDULER="systemctl:./print-args systemctl,launchctl:./print-args launchctl,schtasks:./print-args schtasks" git maintenance start --scheduler=systemd-timer && + printf "launchctl bootout gui/[UID] $pfx/Library/LaunchAgents/org.git-scm.git.%s.plist\n" \ + hourly daily weekly >expect && + printf "schtasks /delete /tn Git Maintenance (%s) /f\n" \ + hourly daily weekly >>expect && + printf -- "systemctl --user enable --now git-maintenance@%s.timer\n" hourly daily weekly >>expect && + test_cmp expect args && + + rm -f args && + GIT_TEST_MAINT_SCHEDULER="systemctl:./print-args systemctl,launchctl:./print-args launchctl,schtasks:./print-args schtasks" git maintenance start --scheduler=launchctl && + printf -- "systemctl --user disable --now git-maintenance@%s.timer\n" hourly daily weekly >expect && + printf "schtasks /delete /tn Git Maintenance (%s) /f\n" \ + hourly daily weekly >>expect && + for frequency in hourly daily weekly + do + PLIST="$pfx/Library/LaunchAgents/org.git-scm.git.$frequency.plist" && + echo "launchctl bootout gui/[UID] $PLIST" >>expect && + echo "launchctl bootstrap gui/[UID] $PLIST" >>expect || return 1 + done && + test_cmp expect args && + + rm -f args && + GIT_TEST_MAINT_SCHEDULER="systemctl:./print-args systemctl,launchctl:./print-args launchctl,schtasks:./print-args schtasks" git maintenance start --scheduler=schtasks && + printf -- "systemctl --user disable --now git-maintenance@%s.timer\n" hourly daily weekly >expect && + printf "launchctl bootout gui/[UID] $pfx/Library/LaunchAgents/org.git-scm.git.%s.plist\n" \ + hourly daily weekly >>expect && + printf "schtasks /create /tn Git Maintenance (%s) /f /xml\n" \ + hourly daily weekly >>expect && + test_cmp expect args && + + rm -f args && + GIT_TEST_MAINT_SCHEDULER="systemctl:./print-args systemctl,launchctl:./print-args launchctl,schtasks:./print-args schtasks" git maintenance stop && + printf -- "systemctl --user disable --now git-maintenance@%s.timer\n" hourly daily weekly >expect && + printf "launchctl bootout gui/[UID] $pfx/Library/LaunchAgents/org.git-scm.git.%s.plist\n" \ + hourly daily weekly >>expect && + printf "schtasks /delete /tn Git Maintenance (%s) /f\n" \ + hourly daily weekly >>expect && + test_cmp expect args +' + test_expect_success 'register preserves existing strategy' ' git config maintenance.strategy none && git maintenance register && diff --git a/t/t9001-send-email.sh b/t/t9001-send-email.sh index 57fc10e7f8..aa0c20499b 100755 --- a/t/t9001-send-email.sh +++ b/t/t9001-send-email.sh @@ -1533,6 +1533,21 @@ test_expect_success $PREREQ 'sendemail.8bitEncoding works' ' test_cmp content-type-decl actual ' +test_expect_success $PREREQ 'sendemail.8bitEncoding in .git/config overrides --global .gitconfig' ' + clean_fake_sendmail && + git config sendemail.assume8bitEncoding UTF-8 && + test_when_finished "rm -rf home" && + mkdir home && + git config -f home/.gitconfig sendemail.assume8bitEncoding "bogus too" && + echo bogus | + env HOME="$(pwd)/home" DEBUG=1 \ + git send-email --from=author@example.com --to=nobody@example.com \ + --smtp-server="$(pwd)/fake.sendmail" \ + email-using-8bit >stdout && + egrep "Content|MIME" msgtxt1 >actual && + test_cmp content-type-decl actual +' + test_expect_success $PREREQ '--8bit-encoding overrides sendemail.8bitEncoding' ' clean_fake_sendmail && git config sendemail.assume8bitEncoding "bogus too" && @@ -2198,7 +2213,7 @@ test_expect_success $PREREQ 'leading and trailing whitespaces are removed' ' test_expect_success $PREREQ 'test using command name with --sendmail-cmd' ' clean_fake_sendmail && - PATH="$(pwd):$PATH" \ + PATH="$PWD:$PATH" \ git send-email \ --from="Example <nobody@example.com>" \ --to=nobody@example.com \ @@ -2227,6 +2242,51 @@ test_expect_success $PREREQ 'test shell expression with --sendmail-cmd' ' test_path_is_file commandline1 ' +test_expect_success $PREREQ 'set up in-reply-to/references patches' ' + cat >has-reply.patch <<-\EOF && + From: A U Thor <author@example.com> + Subject: patch with in-reply-to + Message-ID: <patch.with.in.reply.to@example.com> + In-Reply-To: <replied.to@example.com> + References: <replied.to@example.com> + + This is the body. + EOF + cat >no-reply.patch <<-\EOF + From: A U Thor <author@example.com> + Subject: patch without in-reply-to + Message-ID: <patch.without.in.reply.to@example.com> + + This is the body. + EOF +' + +test_expect_success $PREREQ 'patch reply headers correct with --no-thread' ' + clean_fake_sendmail && + git send-email \ + --no-thread \ + --to=nobody@example.com \ + --smtp-server="$(pwd)/fake.sendmail" \ + has-reply.patch no-reply.patch && + grep "In-Reply-To: <replied.to@example.com>" msgtxt1 && + grep "References: <replied.to@example.com>" msgtxt1 && + ! grep replied.to@example.com msgtxt2 +' + +test_expect_success $PREREQ 'cmdline in-reply-to used with --no-thread' ' + clean_fake_sendmail && + git send-email \ + --no-thread \ + --in-reply-to="<cmdline.reply@example.com>" \ + --to=nobody@example.com \ + --smtp-server="$(pwd)/fake.sendmail" \ + has-reply.patch no-reply.patch && + grep "In-Reply-To: <cmdline.reply@example.com>" msgtxt1 && + grep "References: <cmdline.reply@example.com>" msgtxt1 && + grep "In-Reply-To: <cmdline.reply@example.com>" msgtxt2 && + grep "References: <cmdline.reply@example.com>" msgtxt2 +' + test_expect_success $PREREQ 'invoke hook' ' mkdir -p .git/hooks && diff --git a/t/t9002-column.sh b/t/t9002-column.sh index 89983527b6..6d3dbde3fe 100755 --- a/t/t9002-column.sh +++ b/t/t9002-column.sh @@ -42,6 +42,24 @@ EOF test_cmp expected actual ' +test_expect_success '--nl' ' + cat >expected <<\EOF && +oneZ +twoZ +threeZ +fourZ +fiveZ +sixZ +sevenZ +eightZ +nineZ +tenZ +elevenZ +EOF + git column --nl="Z$LF" --mode=plain <lista >actual && + test_cmp expected actual +' + test_expect_success '80 columns' ' cat >expected <<\EOF && one two three four five six seven eight nine ten eleven diff --git a/t/t9351-fast-export-anonymize.sh b/t/t9351-fast-export-anonymize.sh index 1c6e6fcdaf..77047e250d 100755 --- a/t/t9351-fast-export-anonymize.sh +++ b/t/t9351-fast-export-anonymize.sh @@ -18,7 +18,8 @@ test_expect_success 'setup simple repo' ' git update-index --add --cacheinfo 160000,$fake_commit,link1 && git update-index --add --cacheinfo 160000,$fake_commit,link2 && git commit -m "add gitlink" && - git tag -m "annotated tag" mytag + git tag -m "annotated tag" mytag && + git tag -m "annotated tag with long message" longtag ' test_expect_success 'export anonymized stream' ' @@ -55,7 +56,8 @@ test_expect_success 'stream retains other as refname' ' test_expect_success 'stream omits other refnames' ' ! grep main stream && - ! grep mytag stream + ! grep mytag stream && + ! grep longtag stream ' test_expect_success 'stream omits identities' ' @@ -118,9 +120,9 @@ test_expect_success 'identical gitlinks got identical oid' ' test_line_count = 1 commits ' -test_expect_success 'tag points to branch tip' ' +test_expect_success 'all tags point to branch tip' ' git rev-parse $other_branch >expect && - git for-each-ref --format="%(*objectname)" | grep . >actual && + git for-each-ref --format="%(*objectname)" | grep . | uniq >actual && test_cmp expect actual ' diff --git a/t/t9400-git-cvsserver-server.sh b/t/t9400-git-cvsserver-server.sh index 2d29d486ee..17f988edd2 100755 --- a/t/t9400-git-cvsserver-server.sh +++ b/t/t9400-git-cvsserver-server.sh @@ -36,6 +36,13 @@ CVSWORK="$PWD/cvswork" CVS_SERVER=git-cvsserver export CVSROOT CVS_SERVER +if perl -e 'exit(1) if not defined crypt("", "cv")' +then + PWDHASH='lac2ItudM3.KM' +else + PWDHASH='$2b$10$t8fGvE/a9eLmfOLzsZme2uOa2QtoMYwIxq9wZA6aBKtF1Yb7FJIzi' +fi + rm -rf "$CVSWORK" "$SERVERDIR" test_expect_success 'setup' ' git config push.default matching && @@ -54,7 +61,7 @@ test_expect_success 'setup' ' GIT_DIR="$SERVERDIR" git config --bool gitcvs.enabled true && GIT_DIR="$SERVERDIR" git config gitcvs.logfile "$SERVERDIR/gitcvs.log" && GIT_DIR="$SERVERDIR" git config gitcvs.authdb "$SERVERDIR/auth.db" && - echo cvsuser:cvGVEarMLnhlA > "$SERVERDIR/auth.db" + echo "cvsuser:$PWDHASH" >"$SERVERDIR/auth.db" ' # note that cvs doesn't accept absolute pathnames diff --git a/t/t9902-completion.sh b/t/t9902-completion.sh index 11573936d5..5decc3b269 100755 --- a/t/t9902-completion.sh +++ b/t/t9902-completion.sh @@ -540,6 +540,15 @@ test_expect_success '__gitcomp - expand/narrow all negative options' ' EOF ' +test_expect_success '__gitcomp - equal skip' ' + test_gitcomp "--option=" "--option=" <<-\EOF && + + EOF + test_gitcomp "option=" "option=" <<-\EOF + + EOF +' + test_expect_success '__gitcomp - doesnt fail because of invalid variable name' ' __gitcomp "$invalid_variable_name" ' @@ -2380,6 +2389,12 @@ test_expect_success 'git clone --config= - value' ' EOF ' +test_expect_success 'options with value' ' + test_completion "git merge -X diff-algorithm=" <<-\EOF + + EOF +' + test_expect_success 'sourcing the completion script clears cached commands' ' __git_compute_all_commands && verbose test -n "$__git_all_commands" && diff --git a/t/test-lib-functions.sh b/t/test-lib-functions.sh index e28411bb75..eef2262a36 100644 --- a/t/test-lib-functions.sh +++ b/t/test-lib-functions.sh @@ -137,33 +137,110 @@ test_tick () { # Stop execution and start a shell. This is useful for debugging tests. # # Be sure to remove all invocations of this command before submitting. +# WARNING: the shell invoked by this helper does not have the same environment +# as the one running the tests (shell variables and functions are not +# available, and the options below further modify the environment). As such, +# commands copied from a test script might behave differently than when +# running the test. +# +# Usage: test_pause [options] +# -t +# Use your original TERM instead of test-lib.sh's "dumb". +# This usually restores color output in the invoked shell. +# -s +# Invoke $SHELL instead of $TEST_SHELL_PATH. +# -h +# Use your original HOME instead of test-lib.sh's "$TRASH_DIRECTORY". +# This allows you to use your regular shell environment and Git aliases. +# CAUTION: running commands copied from a test script into the paused shell +# might result in files in your HOME being overwritten. +# -a +# Shortcut for -t -s -h test_pause () { - "$SHELL_PATH" <&6 >&5 2>&7 + PAUSE_TERM=$TERM && + PAUSE_SHELL=$TEST_SHELL_PATH && + PAUSE_HOME=$HOME && + while test $# != 0 + do + case "$1" in + -t) + PAUSE_TERM="$USER_TERM" + ;; + -s) + PAUSE_SHELL="$SHELL" + ;; + -h) + PAUSE_HOME="$USER_HOME" + ;; + -a) + PAUSE_TERM="$USER_TERM" + PAUSE_SHELL="$SHELL" + PAUSE_HOME="$USER_HOME" + ;; + *) + break + ;; + esac + shift + done && + TERM="$PAUSE_TERM" HOME="$PAUSE_HOME" "$PAUSE_SHELL" <&6 >&5 2>&7 } # Wrap git with a debugger. Adding this to a command can make it easier # to understand what is going on in a failing test. # +# Usage: debug [options] <git command> +# -d <debugger> +# --debugger=<debugger> +# Use <debugger> instead of GDB +# -t +# Use your original TERM instead of test-lib.sh's "dumb". +# This usually restores color output in the debugger. +# WARNING: the command being debugged might behave differently than when +# running the test. +# # Examples: # debug git checkout master # debug --debugger=nemiver git $ARGS # debug -d "valgrind --tool=memcheck --track-origins=yes" git $ARGS debug () { - case "$1" in - -d) - GIT_DEBUGGER="$2" && - shift 2 - ;; - --debugger=*) - GIT_DEBUGGER="${1#*=}" && - shift 1 - ;; - *) - GIT_DEBUGGER=1 - ;; - esac && - GIT_DEBUGGER="${GIT_DEBUGGER}" "$@" <&6 >&5 2>&7 + GIT_DEBUGGER=1 && + DEBUG_TERM=$TERM && + while test $# != 0 + do + case "$1" in + -t) + DEBUG_TERM="$USER_TERM" + ;; + -d) + GIT_DEBUGGER="$2" && + shift + ;; + --debugger=*) + GIT_DEBUGGER="${1#*=}" + ;; + *) + break + ;; + esac + shift + done && + + dotfiles=".gdbinit .lldbinit" + + for dotfile in $dotfiles + do + dotfile="$USER_HOME/$dotfile" && + test -f "$dotfile" && cp "$dotfile" "$HOME" || : + done && + + TERM="$DEBUG_TERM" GIT_DEBUGGER="${GIT_DEBUGGER}" "$@" <&6 >&5 2>&7 && + + for dotfile in $dotfiles + do + rm -f "$HOME/$dotfile" + done } # Usage: test_commit [options] <message> [<file> [<contents> [<tag>]]] diff --git a/t/test-lib.sh b/t/test-lib.sh index abcfbed6d6..aa1ad8180e 100644 --- a/t/test-lib.sh +++ b/t/test-lib.sh @@ -534,7 +534,7 @@ SQ=\' # when case-folding filenames u200c=$(printf '\342\200\214') -export _x05 _x35 _x40 _z40 LF u200c EMPTY_TREE EMPTY_BLOB ZERO_OID OID_REGEX +export _x05 _x35 LF u200c EMPTY_TREE EMPTY_BLOB ZERO_OID OID_REGEX # Each test should start with something like this, after copyright notices: # @@ -585,8 +585,9 @@ else } fi +USER_TERM="$TERM" TERM=dumb -export TERM +export TERM USER_TERM error () { say_color error "error: $*" @@ -1343,7 +1344,8 @@ fi GIT_TEMPLATE_DIR="$GIT_BUILD_DIR"/templates/blt GIT_CONFIG_NOSYSTEM=1 GIT_ATTR_NOSYSTEM=1 -export PATH GIT_EXEC_PATH GIT_TEMPLATE_DIR GIT_CONFIG_NOSYSTEM GIT_ATTR_NOSYSTEM +GIT_CEILING_DIRECTORIES="$TRASH_DIRECTORY/.." +export PATH GIT_EXEC_PATH GIT_TEMPLATE_DIR GIT_CONFIG_NOSYSTEM GIT_ATTR_NOSYSTEM GIT_CEILING_DIRECTORIES if test -z "$GIT_TEST_CMP" then @@ -1380,9 +1382,10 @@ then fi # Last-minute variable setup +USER_HOME="$HOME" HOME="$TRASH_DIRECTORY" GNUPGHOME="$HOME/gnupg-home-not-used" -export HOME GNUPGHOME +export HOME GNUPGHOME USER_HOME # Test repository rm -fr "$TRASH_DIRECTORY" || { @@ -1422,10 +1425,9 @@ then fi # Convenience -# A regexp to match 5, 35 and 40 hexdigits +# A regexp to match 5 and 35 hexdigits _x05='[0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f]' _x35="$_x05$_x05$_x05$_x05$_x05$_x05$_x05" -_x40="$_x35$_x05" test_oid_init @@ -1434,7 +1436,6 @@ OID_REGEX=$(echo $ZERO_OID | sed -e 's/0/[0-9a-f]/g') OIDPATH_REGEX=$(test_oid_to_path $ZERO_OID | sed -e 's/0/[0-9a-f]/g') EMPTY_TREE=$(test_oid empty_tree) EMPTY_BLOB=$(test_oid empty_blob) -_z40=$ZERO_OID # Provide an implementation of the 'yes' utility; the upper bound # limit is there to help Windows that cannot stop this loop from @@ -260,6 +260,19 @@ void trace2_cmd_path_fl(const char *file, int line, const char *pathname) tgt_j->pfn_command_path_fl(file, line, pathname); } +void trace2_cmd_ancestry_fl(const char *file, int line, const char **parent_names) +{ + struct tr2_tgt *tgt_j; + int j; + + if (!trace2_enabled) + return; + + for_each_wanted_builtin (j, tgt_j) + if (tgt_j->pfn_command_ancestry_fl) + tgt_j->pfn_command_ancestry_fl(file, line, parent_names); +} + void trace2_cmd_name_fl(const char *file, int line, const char *name) { struct tr2_tgt *tgt_j; @@ -134,6 +134,16 @@ void trace2_cmd_path_fl(const char *file, int line, const char *pathname); #define trace2_cmd_path(p) trace2_cmd_path_fl(__FILE__, __LINE__, (p)) /* + * Emit an 'ancestry' event with the process name of the current process's + * parent process. + * This gives post-processors a way to determine what invoked the command and + * learn more about usage patterns. + */ +void trace2_cmd_ancestry_fl(const char *file, int line, const char **parent_names); + +#define trace2_cmd_ancestry(v) trace2_cmd_ancestry_fl(__FILE__, __LINE__, (v)) + +/* * Emit a 'cmd_name' event with the canonical name of the command. * This gives post-processors a simple field to identify the command * without having to parse the argv. @@ -340,7 +350,7 @@ void trace2_def_repo_fl(const char *file, int line, struct repository *repo); * being started, such as "read_recursive" or "do_read_index". * * The `repo` field, if set, will be used to get the "repo-id", so that - * recursive oerations can be attributed to the correct repository. + * recursive operations can be attributed to the correct repository. */ void trace2_region_enter_fl(const char *file, int line, const char *category, const char *label, const struct repository *repo, ...); @@ -492,13 +502,7 @@ enum trace2_process_info_reason { TRACE2_PROCESS_INFO_EXIT, }; -#if defined(GIT_WINDOWS_NATIVE) void trace2_collect_process_info(enum trace2_process_info_reason reason); -#else -#define trace2_collect_process_info(reason) \ - do { \ - } while (0) -#endif const char *trace2_session_id(void); diff --git a/trace2/tr2_tgt.h b/trace2/tr2_tgt.h index 7b90469212..1f66fd6573 100644 --- a/trace2/tr2_tgt.h +++ b/trace2/tr2_tgt.h @@ -27,6 +27,8 @@ typedef void(tr2_tgt_evt_error_va_fl_t)(const char *file, int line, typedef void(tr2_tgt_evt_command_path_fl_t)(const char *file, int line, const char *command_path); +typedef void(tr2_tgt_evt_command_ancestry_fl_t)(const char *file, int line, + const char **parent_names); typedef void(tr2_tgt_evt_command_name_fl_t)(const char *file, int line, const char *name, const char *hierarchy); @@ -108,6 +110,7 @@ struct tr2_tgt { tr2_tgt_evt_atexit_t *pfn_atexit; tr2_tgt_evt_error_va_fl_t *pfn_error_va_fl; tr2_tgt_evt_command_path_fl_t *pfn_command_path_fl; + tr2_tgt_evt_command_ancestry_fl_t *pfn_command_ancestry_fl; tr2_tgt_evt_command_name_fl_t *pfn_command_name_fl; tr2_tgt_evt_command_mode_fl_t *pfn_command_mode_fl; tr2_tgt_evt_alias_fl_t *pfn_alias_fl; diff --git a/trace2/tr2_tgt_event.c b/trace2/tr2_tgt_event.c index 6353e8ad91..578a9a5287 100644 --- a/trace2/tr2_tgt_event.c +++ b/trace2/tr2_tgt_event.c @@ -261,6 +261,26 @@ static void fn_command_path_fl(const char *file, int line, const char *pathname) jw_release(&jw); } +static void fn_command_ancestry_fl(const char *file, int line, const char **parent_names) +{ + const char *event_name = "cmd_ancestry"; + const char *parent_name = NULL; + struct json_writer jw = JSON_WRITER_INIT; + + jw_object_begin(&jw, 0); + event_fmt_prepare(event_name, file, line, NULL, &jw); + jw_object_inline_begin_array(&jw, "ancestry"); + + while ((parent_name = *parent_names++)) + jw_array_string(&jw, parent_name); + + jw_end(&jw); /* 'ancestry' array */ + jw_end(&jw); /* event object */ + + tr2_dst_write_line(&tr2dst_event, &jw.json); + jw_release(&jw); +} + static void fn_command_name_fl(const char *file, int line, const char *name, const char *hierarchy) { @@ -584,6 +604,7 @@ struct tr2_tgt tr2_tgt_event = { fn_atexit, fn_error_va_fl, fn_command_path_fl, + fn_command_ancestry_fl, fn_command_name_fl, fn_command_mode_fl, fn_alias_fl, diff --git a/trace2/tr2_tgt_normal.c b/trace2/tr2_tgt_normal.c index 31b602c171..a5751c8864 100644 --- a/trace2/tr2_tgt_normal.c +++ b/trace2/tr2_tgt_normal.c @@ -160,6 +160,24 @@ static void fn_command_path_fl(const char *file, int line, const char *pathname) strbuf_release(&buf_payload); } +static void fn_command_ancestry_fl(const char *file, int line, const char **parent_names) +{ + const char *parent_name = NULL; + struct strbuf buf_payload = STRBUF_INIT; + + /* cmd_ancestry parent <- grandparent <- great-grandparent */ + strbuf_addstr(&buf_payload, "cmd_ancestry "); + while ((parent_name = *parent_names++)) { + strbuf_addstr(&buf_payload, parent_name); + /* if we'll write another one after this, add a delimiter */ + if (parent_names && *parent_names) + strbuf_addstr(&buf_payload, " <- "); + } + + normal_io_write_fl(file, line, &buf_payload); + strbuf_release(&buf_payload); +} + static void fn_command_name_fl(const char *file, int line, const char *name, const char *hierarchy) { @@ -306,6 +324,7 @@ struct tr2_tgt tr2_tgt_normal = { fn_atexit, fn_error_va_fl, fn_command_path_fl, + fn_command_ancestry_fl, fn_command_name_fl, fn_command_mode_fl, fn_alias_fl, diff --git a/trace2/tr2_tgt_perf.c b/trace2/tr2_tgt_perf.c index a8018f18cc..af4d65a0a5 100644 --- a/trace2/tr2_tgt_perf.c +++ b/trace2/tr2_tgt_perf.c @@ -253,6 +253,21 @@ static void fn_command_path_fl(const char *file, int line, const char *pathname) strbuf_release(&buf_payload); } +static void fn_command_ancestry_fl(const char *file, int line, const char **parent_names) +{ + const char *event_name = "cmd_ancestry"; + struct strbuf buf_payload = STRBUF_INIT; + + strbuf_addstr(&buf_payload, "ancestry:["); + /* It's not an argv but the rules are basically the same. */ + sq_append_quote_argv_pretty(&buf_payload, parent_names); + strbuf_addch(&buf_payload, ']'); + + perf_io_write_fl(file, line, event_name, NULL, NULL, NULL, NULL, + &buf_payload); + strbuf_release(&buf_payload); +} + static void fn_command_name_fl(const char *file, int line, const char *name, const char *hierarchy) { @@ -532,6 +547,7 @@ struct tr2_tgt tr2_tgt_perf = { fn_atexit, fn_error_va_fl, fn_command_path_fl, + fn_command_ancestry_fl, fn_command_name_fl, fn_command_mode_fl, fn_alias_fl, diff --git a/trace2/tr2_tls.c b/trace2/tr2_tls.c index 067c23755f..7da94aba52 100644 --- a/trace2/tr2_tls.c +++ b/trace2/tr2_tls.c @@ -95,6 +95,7 @@ void tr2tls_unset_self(void) pthread_setspecific(tr2tls_key, NULL); + strbuf_release(&ctx->thread_name); free(ctx->array_us_start); free(ctx); } diff --git a/transport-helper.c b/transport-helper.c index 4be035edb8..e8dbdd1153 100644 --- a/transport-helper.c +++ b/transport-helper.c @@ -671,8 +671,8 @@ static int connect_helper(struct transport *transport, const char *name, static struct ref *get_refs_list_using_list(struct transport *transport, int for_push); -static int fetch(struct transport *transport, - int nr_heads, struct ref **to_fetch) +static int fetch_refs(struct transport *transport, + int nr_heads, struct ref **to_fetch) { struct helper_data *data = transport->data; int i, count; @@ -681,7 +681,7 @@ static int fetch(struct transport *transport, if (process_connect(transport, 0)) { do_take_over(transport); - return transport->vtable->fetch(transport, nr_heads, to_fetch); + return transport->vtable->fetch_refs(transport, nr_heads, to_fetch); } /* @@ -1261,12 +1261,12 @@ static struct ref *get_refs_list_using_list(struct transport *transport, } static struct transport_vtable vtable = { - set_helper_option, - get_refs_list, - fetch, - push_refs, - connect_helper, - release_helper + .set_option = set_helper_option, + .get_refs_list = get_refs_list, + .fetch_refs = fetch_refs, + .push_refs = push_refs, + .connect = connect_helper, + .disconnect = release_helper }; int transport_helper_init(struct transport *transport, const char *name) diff --git a/transport-internal.h b/transport-internal.h index b60f1ba907..c4ca0b733a 100644 --- a/transport-internal.h +++ b/transport-internal.h @@ -34,7 +34,7 @@ struct transport_vtable { * get_refs_list(), it should set the old_sha1 fields in the * provided refs now. **/ - int (*fetch)(struct transport *transport, int refs_nr, struct ref **refs); + int (*fetch_refs)(struct transport *transport, int refs_nr, struct ref **refs); /** * Push the objects and refs. Send the necessary objects, and diff --git a/transport.c b/transport.c index 17e9629710..b37664ba87 100644 --- a/transport.c +++ b/transport.c @@ -162,12 +162,16 @@ static int fetch_refs_from_bundle(struct transport *transport, int nr_heads, struct ref **to_fetch) { struct bundle_transport_data *data = transport->data; + struct strvec extra_index_pack_args = STRVEC_INIT; int ret; + if (transport->progress) + strvec_push(&extra_index_pack_args, "-v"); + if (!data->get_refs_from_bundle_called) get_refs_from_bundle(transport, 0, NULL); ret = unbundle(the_repository, &data->header, data->fd, - transport->progress ? BUNDLE_VERBOSE : 0); + &extra_index_pack_args); transport->hash_algo = data->header.hash_algo; return ret; } @@ -883,12 +887,10 @@ static int disconnect_git(struct transport *transport) } static struct transport_vtable taken_over_vtable = { - NULL, - get_refs_via_connect, - fetch_refs_via_pack, - git_transport_push, - NULL, - disconnect_git + .get_refs_list = get_refs_via_connect, + .fetch_refs = fetch_refs_via_pack, + .push_refs = git_transport_push, + .disconnect = disconnect_git }; void transport_take_over(struct transport *transport, @@ -1032,21 +1034,17 @@ void transport_check_allowed(const char *type) } static struct transport_vtable bundle_vtable = { - NULL, - get_refs_from_bundle, - fetch_refs_from_bundle, - NULL, - NULL, - close_bundle + .get_refs_list = get_refs_from_bundle, + .fetch_refs = fetch_refs_from_bundle, + .disconnect = close_bundle }; static struct transport_vtable builtin_smart_vtable = { - NULL, - get_refs_via_connect, - fetch_refs_via_pack, - git_transport_push, - connect_git, - disconnect_git + .get_refs_list = get_refs_via_connect, + .fetch_refs = fetch_refs_via_pack, + .push_refs = git_transport_push, + .connect = connect_git, + .disconnect = disconnect_git }; struct transport *transport_get(struct remote *remote, const char *url) @@ -1453,7 +1451,7 @@ int transport_fetch_refs(struct transport *transport, struct ref *refs) heads[nr_heads++] = rm; } - rc = transport->vtable->fetch(transport, nr_heads, heads); + rc = transport->vtable->fetch_refs(transport, nr_heads, heads); free(heads); return rc; diff --git a/tree-diff.c b/tree-diff.c index 1572615bd9..437c98a70e 100644 --- a/tree-diff.c +++ b/tree-diff.c @@ -21,7 +21,9 @@ ALLOC_ARRAY((x), nr); \ } while(0) #define FAST_ARRAY_FREE(x, nr) do { \ - if ((nr) > 2) \ + if ((nr) <= 2) \ + xalloca_free((x)); \ + else \ free((x)); \ } while(0) diff --git a/unicode-width.h b/unicode-width.h index b50e686bae..97c851b27d 100644 --- a/unicode-width.h +++ b/unicode-width.h @@ -26,7 +26,9 @@ static const struct interval zero_width[] = { { 0x0825, 0x0827 }, { 0x0829, 0x082D }, { 0x0859, 0x085B }, -{ 0x08D3, 0x0902 }, +{ 0x0890, 0x0891 }, +{ 0x0898, 0x089F }, +{ 0x08CA, 0x0902 }, { 0x093A, 0x093A }, { 0x093C, 0x093C }, { 0x0941, 0x0948 }, @@ -66,6 +68,7 @@ static const struct interval zero_width[] = { { 0x0BCD, 0x0BCD }, { 0x0C00, 0x0C00 }, { 0x0C04, 0x0C04 }, +{ 0x0C3C, 0x0C3C }, { 0x0C3E, 0x0C40 }, { 0x0C46, 0x0C48 }, { 0x0C4A, 0x0C4D }, @@ -116,7 +119,7 @@ static const struct interval zero_width[] = { { 0x1160, 0x11FF }, { 0x135D, 0x135F }, { 0x1712, 0x1714 }, -{ 0x1732, 0x1734 }, +{ 0x1732, 0x1733 }, { 0x1752, 0x1753 }, { 0x1772, 0x1773 }, { 0x17B4, 0x17B5 }, @@ -124,7 +127,7 @@ static const struct interval zero_width[] = { { 0x17C6, 0x17C6 }, { 0x17C9, 0x17D3 }, { 0x17DD, 0x17DD }, -{ 0x180B, 0x180E }, +{ 0x180B, 0x180F }, { 0x1885, 0x1886 }, { 0x18A9, 0x18A9 }, { 0x1920, 0x1922 }, @@ -140,7 +143,7 @@ static const struct interval zero_width[] = { { 0x1A65, 0x1A6C }, { 0x1A73, 0x1A7C }, { 0x1A7F, 0x1A7F }, -{ 0x1AB0, 0x1AC0 }, +{ 0x1AB0, 0x1ACE }, { 0x1B00, 0x1B03 }, { 0x1B34, 0x1B34 }, { 0x1B36, 0x1B3A }, @@ -163,8 +166,7 @@ static const struct interval zero_width[] = { { 0x1CED, 0x1CED }, { 0x1CF4, 0x1CF4 }, { 0x1CF8, 0x1CF9 }, -{ 0x1DC0, 0x1DF9 }, -{ 0x1DFB, 0x1DFF }, +{ 0x1DC0, 0x1DFF }, { 0x200B, 0x200F }, { 0x202A, 0x202E }, { 0x2060, 0x2064 }, @@ -227,12 +229,16 @@ static const struct interval zero_width[] = { { 0x10D24, 0x10D27 }, { 0x10EAB, 0x10EAC }, { 0x10F46, 0x10F50 }, +{ 0x10F82, 0x10F85 }, { 0x11001, 0x11001 }, { 0x11038, 0x11046 }, +{ 0x11070, 0x11070 }, +{ 0x11073, 0x11074 }, { 0x1107F, 0x11081 }, { 0x110B3, 0x110B6 }, { 0x110B9, 0x110BA }, { 0x110BD, 0x110BD }, +{ 0x110C2, 0x110C2 }, { 0x110CD, 0x110CD }, { 0x11100, 0x11102 }, { 0x11127, 0x1112B }, @@ -315,6 +321,8 @@ static const struct interval zero_width[] = { { 0x16FE4, 0x16FE4 }, { 0x1BC9D, 0x1BC9E }, { 0x1BCA0, 0x1BCA3 }, +{ 0x1CF00, 0x1CF2D }, +{ 0x1CF30, 0x1CF46 }, { 0x1D167, 0x1D169 }, { 0x1D173, 0x1D182 }, { 0x1D185, 0x1D18B }, @@ -332,6 +340,7 @@ static const struct interval zero_width[] = { { 0x1E023, 0x1E024 }, { 0x1E026, 0x1E02A }, { 0x1E130, 0x1E136 }, +{ 0x1E2AE, 0x1E2AE }, { 0x1E2EC, 0x1E2EF }, { 0x1E8D0, 0x1E8D6 }, { 0x1E944, 0x1E94A }, @@ -404,7 +413,10 @@ static const struct interval double_width[] = { { 0x17000, 0x187F7 }, { 0x18800, 0x18CD5 }, { 0x18D00, 0x18D08 }, -{ 0x1B000, 0x1B11E }, +{ 0x1AFF0, 0x1AFF3 }, +{ 0x1AFF5, 0x1AFFB }, +{ 0x1AFFD, 0x1AFFE }, +{ 0x1B000, 0x1B122 }, { 0x1B150, 0x1B152 }, { 0x1B164, 0x1B167 }, { 0x1B170, 0x1B2FB }, @@ -439,21 +451,23 @@ static const struct interval double_width[] = { { 0x1F6CC, 0x1F6CC }, { 0x1F6D0, 0x1F6D2 }, { 0x1F6D5, 0x1F6D7 }, +{ 0x1F6DD, 0x1F6DF }, { 0x1F6EB, 0x1F6EC }, { 0x1F6F4, 0x1F6FC }, { 0x1F7E0, 0x1F7EB }, +{ 0x1F7F0, 0x1F7F0 }, { 0x1F90C, 0x1F93A }, { 0x1F93C, 0x1F945 }, -{ 0x1F947, 0x1F978 }, -{ 0x1F97A, 0x1F9CB }, -{ 0x1F9CD, 0x1F9FF }, +{ 0x1F947, 0x1F9FF }, { 0x1FA70, 0x1FA74 }, -{ 0x1FA78, 0x1FA7A }, +{ 0x1FA78, 0x1FA7C }, { 0x1FA80, 0x1FA86 }, -{ 0x1FA90, 0x1FAA8 }, -{ 0x1FAB0, 0x1FAB6 }, -{ 0x1FAC0, 0x1FAC2 }, -{ 0x1FAD0, 0x1FAD6 }, +{ 0x1FA90, 0x1FAAC }, +{ 0x1FAB0, 0x1FABA }, +{ 0x1FAC0, 0x1FAC5 }, +{ 0x1FAD0, 0x1FAD9 }, +{ 0x1FAE0, 0x1FAE7 }, +{ 0x1FAF0, 0x1FAF6 }, { 0x20000, 0x2FFFD }, { 0x30000, 0x3FFFD } }; diff --git a/unpack-trees.c b/unpack-trees.c index 5786645f31..8ea0a542da 100644 --- a/unpack-trees.c +++ b/unpack-trees.c @@ -111,17 +111,17 @@ void setup_unpack_trees_porcelain(struct unpack_trees_options *opts, strvec_init(&opts->msgs_to_free); if (!strcmp(cmd, "checkout")) - msg = advice_commit_before_merge + msg = advice_enabled(ADVICE_COMMIT_BEFORE_MERGE) ? _("Your local changes to the following files would be overwritten by checkout:\n%%s" "Please commit your changes or stash them before you switch branches.") : _("Your local changes to the following files would be overwritten by checkout:\n%%s"); else if (!strcmp(cmd, "merge")) - msg = advice_commit_before_merge + msg = advice_enabled(ADVICE_COMMIT_BEFORE_MERGE) ? _("Your local changes to the following files would be overwritten by merge:\n%%s" "Please commit your changes or stash them before you merge.") : _("Your local changes to the following files would be overwritten by merge:\n%%s"); else - msg = advice_commit_before_merge + msg = advice_enabled(ADVICE_COMMIT_BEFORE_MERGE) ? _("Your local changes to the following files would be overwritten by %s:\n%%s" "Please commit your changes or stash them before you %s.") : _("Your local changes to the following files would be overwritten by %s:\n%%s"); @@ -132,17 +132,17 @@ void setup_unpack_trees_porcelain(struct unpack_trees_options *opts, _("Updating the following directories would lose untracked files in them:\n%s"); if (!strcmp(cmd, "checkout")) - msg = advice_commit_before_merge + msg = advice_enabled(ADVICE_COMMIT_BEFORE_MERGE) ? _("The following untracked working tree files would be removed by checkout:\n%%s" "Please move or remove them before you switch branches.") : _("The following untracked working tree files would be removed by checkout:\n%%s"); else if (!strcmp(cmd, "merge")) - msg = advice_commit_before_merge + msg = advice_enabled(ADVICE_COMMIT_BEFORE_MERGE) ? _("The following untracked working tree files would be removed by merge:\n%%s" "Please move or remove them before you merge.") : _("The following untracked working tree files would be removed by merge:\n%%s"); else - msg = advice_commit_before_merge + msg = advice_enabled(ADVICE_COMMIT_BEFORE_MERGE) ? _("The following untracked working tree files would be removed by %s:\n%%s" "Please move or remove them before you %s.") : _("The following untracked working tree files would be removed by %s:\n%%s"); @@ -150,17 +150,17 @@ void setup_unpack_trees_porcelain(struct unpack_trees_options *opts, strvec_pushf(&opts->msgs_to_free, msg, cmd, cmd); if (!strcmp(cmd, "checkout")) - msg = advice_commit_before_merge + msg = advice_enabled(ADVICE_COMMIT_BEFORE_MERGE) ? _("The following untracked working tree files would be overwritten by checkout:\n%%s" "Please move or remove them before you switch branches.") : _("The following untracked working tree files would be overwritten by checkout:\n%%s"); else if (!strcmp(cmd, "merge")) - msg = advice_commit_before_merge + msg = advice_enabled(ADVICE_COMMIT_BEFORE_MERGE) ? _("The following untracked working tree files would be overwritten by merge:\n%%s" "Please move or remove them before you merge.") : _("The following untracked working tree files would be overwritten by merge:\n%%s"); else - msg = advice_commit_before_merge + msg = advice_enabled(ADVICE_COMMIT_BEFORE_MERGE) ? _("The following untracked working tree files would be overwritten by %s:\n%%s" "Please move or remove them before you %s.") : _("The following untracked working tree files would be overwritten by %s:\n%%s"); @@ -479,7 +479,7 @@ static int check_updates(struct unpack_trees_options *o, errs |= run_parallel_checkout(&state, pc_workers, pc_threshold, progress, &cnt); stop_progress(&progress); - errs |= finish_delayed_checkout(&state, NULL); + errs |= finish_delayed_checkout(&state, NULL, o->verbose_update); git_attr_set_direction(GIT_ATTR_CHECKIN); if (o->clone) @@ -1255,7 +1255,7 @@ static int sparse_dir_matches_path(const struct cache_entry *ce, static struct cache_entry *find_cache_entry(struct traverse_info *info, const struct name_entry *p) { - struct cache_entry *ce; + const char *path; int pos = find_cache_pos(info, p->path, p->pathlen); struct unpack_trees_options *o = info->data; @@ -1281,9 +1281,11 @@ static struct cache_entry *find_cache_entry(struct traverse_info *info, * paths (e.g. "subdir-"). */ while (pos >= 0) { - ce = o->src_index->cache[pos]; + struct cache_entry *ce = o->src_index->cache[pos]; - if (strncmp(ce->name, p->path, p->pathlen)) + if (!skip_prefix(ce->name, info->traverse_path, &path) || + strncmp(path, p->path, p->pathlen) || + path[p->pathlen] != '/') return NULL; if (S_ISSPARSEDIR(ce->ce_mode) && diff --git a/upload-pack.c b/upload-pack.c index 297b76fcb4..c78d55bc67 100644 --- a/upload-pack.c +++ b/upload-pack.c @@ -1207,14 +1207,14 @@ static int send_ref(const char *refname, const struct object_id *oid, format_symref_info(&symref_info, &data->symref); format_session_id(&session_id, data); - packet_write_fmt(1, "%s %s%c%s%s%s%s%s%s%s object-format=%s agent=%s\n", + packet_fwrite_fmt(stdout, "%s %s%c%s%s%s%s%s%s%s object-format=%s agent=%s\n", oid_to_hex(oid), refname_nons, 0, capabilities, (data->allow_uor & ALLOW_TIP_SHA1) ? " allow-tip-sha1-in-want" : "", (data->allow_uor & ALLOW_REACHABLE_SHA1) ? " allow-reachable-sha1-in-want" : "", - data->stateless_rpc ? " no-done" : "", + data->no_done ? " no-done" : "", symref_info.buf, data->allow_filter ? " filter" : "", session_id.buf, @@ -1223,11 +1223,11 @@ static int send_ref(const char *refname, const struct object_id *oid, strbuf_release(&symref_info); strbuf_release(&session_id); } else { - packet_write_fmt(1, "%s %s\n", oid_to_hex(oid), refname_nons); + packet_fwrite_fmt(stdout, "%s %s\n", oid_to_hex(oid), refname_nons); } capabilities = NULL; if (!peel_iterated_oid(oid, &peeled)) - packet_write_fmt(1, "%s %s^{}\n", oid_to_hex(&peeled), refname_nons); + packet_fwrite_fmt(stdout, "%s %s^{}\n", oid_to_hex(&peeled), refname_nons); return 0; } @@ -1329,7 +1329,8 @@ static int upload_pack_config(const char *var, const char *value, void *cb_data) return parse_hide_refs_config(var, value, "uploadpack"); } -void upload_pack(struct upload_pack_options *options) +void upload_pack(const int advertise_refs, const int stateless_rpc, + const int timeout) { struct packet_reader reader; struct upload_pack_data data; @@ -1338,16 +1339,24 @@ void upload_pack(struct upload_pack_options *options) git_config(upload_pack_config, &data); - data.stateless_rpc = options->stateless_rpc; - data.daemon_mode = options->daemon_mode; - data.timeout = options->timeout; + data.stateless_rpc = stateless_rpc; + data.timeout = timeout; + if (data.timeout) + data.daemon_mode = 1; head_ref_namespaced(find_symref, &data.symref); - if (options->advertise_refs || !data.stateless_rpc) { + if (advertise_refs || !data.stateless_rpc) { reset_timeout(data.timeout); + if (advertise_refs) + data.no_done = 1; head_ref_namespaced(send_ref, &data); for_each_namespaced_ref(send_ref, &data); + /* + * fflush stdout before calling advertise_shallow_grafts because send_ref + * uses stdio. + */ + fflush_or_die(stdout); advertise_shallow_grafts(1); packet_flush(1); } else { @@ -1355,7 +1364,7 @@ void upload_pack(struct upload_pack_options *options) for_each_namespaced_ref(check_ref, NULL); } - if (!options->advertise_refs) { + if (!advertise_refs) { packet_reader_init(&reader, 0, NULL, 0, PACKET_READ_CHOMP_NEWLINE | PACKET_READ_DIE_ON_ERR_PACKET); @@ -1417,21 +1426,25 @@ static int parse_want_ref(struct packet_writer *writer, const char *line, struct string_list *wanted_refs, struct object_array *want_obj) { - const char *arg; - if (skip_prefix(line, "want-ref ", &arg)) { + const char *refname_nons; + if (skip_prefix(line, "want-ref ", &refname_nons)) { struct object_id oid; struct string_list_item *item; struct object *o; + struct strbuf refname = STRBUF_INIT; - if (read_ref(arg, &oid)) { - packet_writer_error(writer, "unknown ref %s", arg); - die("unknown ref %s", arg); + strbuf_addf(&refname, "%s%s", get_git_namespace(), refname_nons); + if (ref_is_hidden(refname_nons, refname.buf) || + read_ref(refname.buf, &oid)) { + packet_writer_error(writer, "unknown ref %s", refname_nons); + die("unknown ref %s", refname_nons); } + strbuf_release(&refname); - item = string_list_append(wanted_refs, arg); + item = string_list_append(wanted_refs, refname_nons); item->util = oiddup(&oid); - o = parse_object_or_die(&oid, arg); + o = parse_object_or_die(&oid, refname_nons); if (!(o->flags & WANTED)) { o->flags |= WANTED; add_object_array(o, NULL, want_obj); @@ -1655,8 +1668,7 @@ enum fetch_state { FETCH_DONE, }; -int upload_pack_v2(struct repository *r, struct strvec *keys, - struct packet_reader *request) +int upload_pack_v2(struct repository *r, struct packet_reader *request) { enum fetch_state state = FETCH_PROCESS_ARGS; struct upload_pack_data data; diff --git a/upload-pack.h b/upload-pack.h index 27ddcdc6cb..d6ee25ea98 100644 --- a/upload-pack.h +++ b/upload-pack.h @@ -1,20 +1,12 @@ #ifndef UPLOAD_PACK_H #define UPLOAD_PACK_H -struct upload_pack_options { - int stateless_rpc; - int advertise_refs; - unsigned int timeout; - int daemon_mode; -}; - -void upload_pack(struct upload_pack_options *options); +void upload_pack(const int advertise_refs, const int stateless_rpc, + const int timeout); struct repository; -struct strvec; struct packet_reader; -int upload_pack_v2(struct repository *r, struct strvec *keys, - struct packet_reader *request); +int upload_pack_v2(struct repository *r, struct packet_reader *request); struct strbuf; int upload_pack_advertise(struct repository *r, diff --git a/userdiff.c b/userdiff.c index d9b2ba752f..af02b1878c 100644 --- a/userdiff.c +++ b/userdiff.c @@ -13,6 +13,16 @@ static int drivers_alloc; #define IPATTERN(name, pattern, word_regex) \ { name, NULL, -1, { pattern, REG_EXTENDED | REG_ICASE }, \ word_regex "|[^[:space:]]|[\xc0-\xff][\x80-\xbf]+" } + +/* + * Built-in drivers for various languages, sorted by their names + * (except that the "default" is left at the end). + * + * When writing or updating patterns, assume that the contents these + * patterns are applied to are syntactically correct. The patterns + * can be simple without implementing all syntactical corner cases, as + * long as they are sufficiently permissive. + */ static struct userdiff_driver builtin_drivers[] = { IPATTERN("ada", "!^(.*[ \t])?(is[ \t]+new|renames|is[ \t]+separate)([ \t].*)?$\n" @@ -142,7 +152,11 @@ PATTERNS("html", "[^<>= \t]+"), PATTERNS("java", "!^[ \t]*(catch|do|for|if|instanceof|new|return|switch|throw|while)\n" - "^[ \t]*(([A-Za-z_][A-Za-z_0-9]*[ \t]+)+[A-Za-z_][A-Za-z_0-9]*[ \t]*\\([^;]*)$", + /* Class, enum, and interface declarations */ + "^[ \t]*(([a-z]+[ \t]+)*(class|enum|interface)[ \t]+[A-Za-z][A-Za-z0-9_$]*[ \t]+.*)$\n" + /* Method definitions; note that constructor signatures are not */ + /* matched because they are indistinguishable from method calls. */ + "^[ \t]*(([A-Za-z_<>&][][?&<>.,A-Za-z_0-9]*[ \t]+)+[A-Za-z_][A-Za-z_0-9]*[ \t]*\\([^;]*)$", /* -- */ "[a-zA-Z_][a-zA-Z0-9_]*" "|[-+0-9.e]+[fFlL]?|0[xXbB]?[0-9a-fA-F]+[lL]?" @@ -214,7 +228,7 @@ PATTERNS("perl", "|<<|<>|<=>|>>"), PATTERNS("php", "^[\t ]*(((public|protected|private|static|abstract|final)[\t ]+)*function.*)$\n" - "^[\t ]*((((final|abstract)[\t ]+)?class|interface|trait).*)$", + "^[\t ]*((((final|abstract)[\t ]+)?class|enum|interface|trait).*)$", /* -- */ "[a-zA-Z_][a-zA-Z0-9_]*" "|[-+0-9.e]+|0[xXbB]?[0-9a-fA-F]+" @@ -193,7 +193,9 @@ int xopen(const char *path, int oflag, ...) if (errno == EINTR) continue; - if ((oflag & O_RDWR) == O_RDWR) + if ((oflag & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) + die_errno(_("unable to create '%s'"), path); + else if ((oflag & O_RDWR) == O_RDWR) die_errno(_("could not open '%s' for reading and writing"), path); else if ((oflag & O_WRONLY) == O_WRONLY) die_errno(_("could not open '%s' for writing"), path); diff --git a/write-or-die.c b/write-or-die.c index d33e68f6ab..0b1ec8190b 100644 --- a/write-or-die.c +++ b/write-or-die.c @@ -70,3 +70,15 @@ void write_or_die(int fd, const void *buf, size_t count) die_errno("write error"); } } + +void fwrite_or_die(FILE *f, const void *buf, size_t count) +{ + if (fwrite(buf, 1, count, f) != count) + die_errno("fwrite error"); +} + +void fflush_or_die(FILE *f) +{ + if (fflush(f)) + die_errno("fflush error"); +} diff --git a/wt-status.c b/wt-status.c index eaed30eafb..e4f29b2b4c 100644 --- a/wt-status.c +++ b/wt-status.c @@ -787,7 +787,7 @@ static void wt_status_collect_untracked(struct wt_status *s) dir_clear(&dir); - if (advice_status_u_option) + if (advice_enabled(ADVICE_STATUS_U_OPTION)) s->untracked_in_ms = (getnanotime() - t_begin) / 1000000; } @@ -1158,7 +1158,7 @@ static void wt_longstatus_print_tracking(struct wt_status *s) if (!format_tracking_info(branch, &sb, s->ahead_behind_flags)) return; - if (advice_status_ahead_behind_warning && + if (advice_enabled(ADVICE_STATUS_AHEAD_BEHIND_WARNING) && s->ahead_behind_flags == AHEAD_BEHIND_FULL) { uint64_t t_delta_in_ms = (getnanotime() - t_begin) / 1000000; if (t_delta_in_ms > AB_DELAY_WARNING_IN_MS) { @@ -1845,7 +1845,7 @@ static void wt_longstatus_print(struct wt_status *s) wt_longstatus_print_other(s, &s->untracked, _("Untracked files"), "add"); if (s->show_ignored_mode) wt_longstatus_print_other(s, &s->ignored, _("Ignored files"), "add -f"); - if (advice_status_u_option && 2000 < s->untracked_in_ms) { + if (advice_enabled(ADVICE_STATUS_U_OPTION) && 2000 < s->untracked_in_ms) { status_printf_ln(s, GIT_COLOR_NORMAL, "%s", ""); status_printf_ln(s, GIT_COLOR_NORMAL, _("It took %.2f seconds to enumerate untracked files. 'status -uno'\n" |