diff options
292 files changed, 7973 insertions, 3315 deletions
diff --git a/.github/workflows/l10n.yml b/.github/workflows/l10n.yml new file mode 100644 index 0000000000..27f72f0ff3 --- /dev/null +++ b/.github/workflows/l10n.yml @@ -0,0 +1,105 @@ +name: git-l10n + +on: [push, pull_request_target] + +jobs: + git-po-helper: + if: >- + endsWith(github.repository, '/git-po') || + contains(github.head_ref, 'l10n') || + contains(github.ref, 'l10n') + runs-on: ubuntu-latest + permissions: + pull-requests: write + steps: + - name: Setup base and head objects + id: setup-tips + run: | + if test "${{ github.event_name }}" = "pull_request_target" + then + base=${{ github.event.pull_request.base.sha }} + head=${{ github.event.pull_request.head.sha }} + else + base=${{ github.event.before }} + head=${{ github.event.after }} + fi + echo "::set-output name=base::$base" + echo "::set-output name=head::$head" + - name: Run partial clone + run: | + git -c init.defaultBranch=master init --bare . + git remote add \ + --mirror=fetch \ + origin \ + https://github.com/${{ github.repository }} + # Fetch tips that may be unreachable from github.ref: + # - For a forced push, "$base" may be unreachable. + # - For a "pull_request_target" event, "$head" may be unreachable. + args= + for commit in \ + ${{ steps.setup-tips.outputs.base }} \ + ${{ steps.setup-tips.outputs.head }} + do + case $commit in + *[^0]*) + args="$args $commit" + ;; + *) + # Should not fetch ZERO-OID. + ;; + esac + done + git -c protocol.version=2 fetch \ + --progress \ + --no-tags \ + --no-write-fetch-head \ + --filter=blob:none \ + origin \ + ${{ github.ref }} \ + $args + - uses: actions/setup-go@v2 + with: + go-version: '>=1.16' + - name: Install git-po-helper + run: go install github.com/git-l10n/git-po-helper@main + - name: Install other dependencies + run: | + sudo apt-get update -q && + sudo apt-get install -q -y gettext + - name: Run git-po-helper + id: check-commits + run: | + exit_code=0 + git-po-helper check-commits \ + --github-action-event="${{ github.event_name }}" -- \ + ${{ steps.setup-tips.outputs.base }}..${{ steps.setup-tips.outputs.head }} \ + >git-po-helper.out 2>&1 || exit_code=$? + if test $exit_code -ne 0 || grep -q WARNING git-po-helper.out + then + # Remove ANSI colors which are proper for console logs but not + # proper for PR comment. + echo "COMMENT_BODY<<EOF" >>$GITHUB_ENV + perl -pe 's/\e\[[0-9;]*m//g; s/\bEOF$//g' git-po-helper.out >>$GITHUB_ENV + echo "EOF" >>$GITHUB_ENV + fi + cat git-po-helper.out + exit $exit_code + - name: Create comment in pull request for report + uses: mshick/add-pr-comment@v1 + if: >- + always() && + github.event_name == 'pull_request_target' && + env.COMMENT_BODY != '' + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + repo-token-user-login: 'github-actions[bot]' + message: > + ${{ steps.check-commits.outcome == 'failure' && 'Errors and warnings' || 'Warnings' }} + found by [git-po-helper](https://github.com/git-l10n/git-po-helper#readme) in workflow + [#${{ github.run_number }}](${{ env.GITHUB_SERVER_URL }}/${{ github.repository }}/actions/runs/${{ github.run_id }}): + + ``` + + ${{ env.COMMENT_BODY }} + + ``` diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index be5cbb3774..4728168478 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -262,6 +262,8 @@ jobs: image: alpine - jobname: Linux32 image: daald/ubuntu32:xenial + - jobname: pedantic + image: fedora env: jobname: ${{matrix.vector.jobname}} runs-on: ubuntu-latest diff --git a/Documentation/MyFirstContribution.txt b/Documentation/MyFirstContribution.txt index 015cf24631..b20bc8e914 100644 --- a/Documentation/MyFirstContribution.txt +++ b/Documentation/MyFirstContribution.txt @@ -1029,22 +1029,42 @@ kidding - be patient!) [[v2-git-send-email]] === Sending v2 -Skip ahead to <<reviewing,Responding to Reviews>> for information on how to -handle comments from reviewers. Continue this section when your topic branch is -shaped the way you want it to look for your patchset v2. +This section will focus on how to send a v2 of your patchset. To learn what +should go into v2, skip ahead to <<reviewing,Responding to Reviews>> for +information on how to handle comments from reviewers. + +We'll reuse our `psuh` topic branch for v2. Before we make any changes, we'll +mark the tip of our v1 branch for easy reference: -When you're ready with the next iteration of your patch, the process is fairly -similar. +---- +$ git checkout psuh +$ git branch psuh-v1 +---- -First, generate your v2 patches again: +Refine your patch series by using `git rebase -i` to adjust commits based upon +reviewer comments. Once the patch series is ready for submission, generate your +patches again, but with some new flags: ---- -$ git format-patch -v2 --cover-letter -o psuh/ master..psuh +$ git format-patch -v2 --cover-letter -o psuh/ --range-diff master..psuh-v1 master.. ---- -This will add your v2 patches, all named like `v2-000n-my-commit-subject.patch`, -to the `psuh/` directory. You may notice that they are sitting alongside the v1 -patches; that's fine, but be careful when you are ready to send them. +The `--range-diff master..psuh-v1` parameter tells `format-patch` to include a +range-diff between `psuh-v1` and `psuh` in the cover letter (see +linkgit:git-range-diff[1]). This helps tell reviewers about the differences +between your v1 and v2 patches. + +The `-v2` parameter tells `format-patch` to output your patches +as version "2". For instance, you may notice that your v2 patches are +all named like `v2-000n-my-commit-subject.patch`. `-v2` will also format +your patches by prefixing them with "[PATCH v2]" instead of "[PATCH]", +and your range-diff will be prefaced with "Range-diff against v1". + +Afer you run this command, `format-patch` will output the patches to the `psuh/` +directory, alongside the v1 patches. Using a single directory makes it easy to +refer to the old v1 patches while proofreading the v2 patches, but you will need +to be careful to send out only the v2 patches. We will use a pattern like +"psuh/v2-*.patch" (not "psuh/*.patch", which would match v1 and v2 patches). Edit your cover letter again. Now is a good time to mention what's different between your last version and now, if it's something significant. You do not @@ -1082,7 +1102,7 @@ to the command: ---- $ git send-email --to=target@example.com --in-reply-to="<foo.12345.author@example.com>" - psuh/v2* + psuh/v2-*.patch ---- [[single-patch]] diff --git a/Documentation/MyFirstObjectWalk.txt b/Documentation/MyFirstObjectWalk.txt index 2d10eea7a9..45eb84d8b4 100644 --- a/Documentation/MyFirstObjectWalk.txt +++ b/Documentation/MyFirstObjectWalk.txt @@ -691,7 +691,7 @@ help understand. In our case, that means we omit trees and blobs not directly referenced by `HEAD` or `HEAD`'s history, because we begin the walk with only `HEAD` in the `pending` list.) -First, we'll need to `#include "list-objects-filter-options.h`" and set up the +First, we'll need to `#include "list-objects-filter-options.h"` and set up the `struct list_objects_filter_options` at the top of the function. ---- @@ -779,7 +779,7 @@ Count all the objects within and modify the print statement: while ((oid = oidset_iter_next(&oit))) omitted_count++; - printf("commits %d\nblobs %d\ntags %d\ntrees%d\nomitted %d\n", + printf("commits %d\nblobs %d\ntags %d\ntrees %d\nomitted %d\n", commit_count, blob_count, tag_count, tree_count, omitted_count); ---- diff --git a/Documentation/RelNotes/2.34.0.txt b/Documentation/RelNotes/2.34.0.txt index 2adefa153d..9d583ead04 100644 --- a/Documentation/RelNotes/2.34.0.txt +++ b/Documentation/RelNotes/2.34.0.txt @@ -14,6 +14,54 @@ UI, Workflows & Features * The userdiff pattern for "java" language has been updated. + * "git rebase" by default skips changes that are equivalent to + commits that are already in the history the branch is rebased onto; + give messages when this happens to let the users be aware of + skipped commits, and also teach them how to tell "rebase" to keep + duplicated changes. + + * The advice message that "git cherry-pick" gives when it asks + conflicted replay of a commit to be resolved by the end user has + been updated. + + * After "git clone --recurse-submodules", all submodules are cloned + but they are not by default recursed into by other commands. With + submodule.stickyRecursiveClone configuration set, submodule.recurse + configuration is set to true in a repository created by "clone" + with "--recurse-submodules" option. + + * The logic for auto-correction of misspelt subcommands learned to go + interactive when the help.autocorrect configuration variable is set + to 'prompt'. + + * "git maintenance" scheduler learned to use systemd timers as a + possible backend. + + * "git diff --submodule=diff" showed failure from run_command() when + trying to run diff inside a submodule, when the user manually + removes the submodule directory. + + * "git bundle unbundle" learned to show progress display. + + * In cone mode, the sparse-index code path learned to remove ignored + files (like build artifacts) outside the sparse cone, allowing the + entire directory outside the sparse cone to be removed, which is + especially useful when the sparse patterns change. + + * Taking advantage of the CGI interface, http-backend has been + updated to enable protocol v2 automatically when the other side + asks for it. + + * The credential-cache helper has been adjusted to Windows. + + * The error in "git help no-such-git-command" is handled better. + + * The unicode character width table (used for output alignment) has + been updated. + + * The ref iteration code used to optionally allow dangling refs to be + shown, which has been tightened up. + Performance, Internal Implementation, Development Support etc. @@ -48,6 +96,51 @@ Performance, Internal Implementation, Development Support etc. of the commit-graph, when available, to determine if a commit is reachable from any of the existing refs. + * "git fetch --quiet" optimization to avoid useless computation of + info that will never be displayed. + + * Callers from older advice_config[] based API has been updated to + use the newer advice_if_enabled() and advice_enabled() API. + + * Teach "test_pause" and "debug" helpers to allow using the HOME and + TERM environment variables the user usually uses. + + * "make INSTALL_STRIP=-s install" allows the installation step to use + "install -s" to strip the binaries as they get installed. + + * Code that handles large number of refs in the "git fetch" code + path has been optimized. + + * The reachability bitmap file used to be generated only for a single + pack, but now we've learned to generate bitmaps for history that + span across multiple packfiles. + + * The code to make "git grep" recurse into submodules has been + updated to migrate away from the "add submodule's object store as + an alternate object store" mechanism (which is suboptimal). + + * The tracing of process ancestry information has been enhanced. + + * Reduce number of write(2) system calls while sending the + ref advertisement. + + * Update the build procedure to use the "-pedantic" build when + DEVELOPER makefile macro is in effect. + + * Large part of "git submodule add" gets rewritten in C. + + * The run-command API has been updated so that the callers can easily + ask the file descriptors open for packfiles to be closed immediately + before spawning commands that may trigger auto-gc. + + * An oddball OPTION_ARGUMENT feature has been removed from the + parse-options API. + + * The mergesort implementation used to sort linked list has been + optimized. + + * Remove external declaration of functions that no longer exist. + Fixes since v2.33 ----------------- @@ -91,6 +184,130 @@ Fixes since v2.33 * Various bugs in "git rebase -r" have been fixed. (merge f2563c9ef3 pw/rebase-r-fixes later to maint). + * mmap() imitation used to call xmalloc() that dies upon malloc() + failure, which has been corrected to just return an error to the + caller to be handled. + (merge 95b4ff3931 rs/git-mmap-uses-malloc later to maint). + + * "git diff --relative" segfaulted and/or produced incorrect result + when there are unmerged paths. + (merge 8174627b3d dd/diff-files-unmerged-fix later to maint). + + * The delayed checkout code path in "git checkout" etc. were chatty + even when --quiet and/or --no-progress options were given. + (merge 7a132c628e mt/quiet-with-delayed-checkout later to maint). + + * "git branch -D <branch>" used to refuse to remove a broken branch + ref that points at a missing commit, which has been corrected. + (merge 597a977489 rs/branch-allow-deleting-dangling later to maint). + + * Build update for Apple clang. + (merge f32c5d3716 cb/makefile-apple-clang later to maint). + + * The parser for the "--nl" option of "git column" has been + corrected. + (merge c93ca46cf5 sg/column-nl later to maint). + + * "git upload-pack" which runs on the other side of "git fetch" + forgot to take the ref namespaces into account when handling + want-ref requests. + (merge 53a66ec37c ka/want-ref-in-namespace later to maint). + + * The sparse-index support can corrupt the index structure by storing + a stale and/or uninitialized data, which has been corrected. + (merge d9e9b44d7a jh/sparse-index-resize-fix later to maint). + + * Buggy tests could damage repositories outside the throw-away test + area we created. We now by default export GIT_CEILING_DIRECTORIES + to limit the damage from such a stray test. + (merge 614c3d8f2e sg/set-ceiling-during-tests later to maint). + + * Even when running "git send-email" without its own threaded + discussion support, a threading related header in one message is + carried over to the subsequent message to result in an unwanted + threading, which has been corrected. + (merge e082113484 mh/send-email-reset-in-reply-to later to maint). + + * The output from "git fast-export", when its anonymization feature + is in use, showed an annotated tag incorrectly. + (merge 2f040a9671 tk/fast-export-anonymized-tag-fix later to maint). + + * Doc update plus improved error reporting. + (merge 1e93770888 jk/log-warn-on-bogus-encoding later to maint). + + * Recent "diff -m" changes broke "gitk", which has been corrected. + (merge 5acffd3473 so/diff-index-regression-fix later to maint). + + * Regression fix. + (merge b996f84989 ab/send-email-config-fix later to maint). + + * The "git apply -3" code path learned not to bother the lower level + merge machinery when the three-way merge can be trivially resolved + without the content level merge. This fixes a regression caused by + recent "-3way first and fall back to direct application" change. + (merge 57f183b698 jc/trivial-threeway-binary-merge later to maint). + + * The code that optionally creates the *.rev reverse index file has + been optimized to avoid needless computation when it is not writing + the file out. + (merge 8fe8bae9d2 ab/reverse-midx-optim later to maint). + + * "git range-diff -I... <range> <range>" segfaulted, which has been + corrected. + (merge 709b3f32d3 rs/range-diff-avoid-segfault-with-I later to maint). + + * The order in which various files that make up a single (conceptual) + packfile has been reevaluated and straightened up. This matters in + correctness, as an incomplete set of files must not be shown to a + running Git. + (merge 4bc1fd6e39 tb/pack-finalize-ordering later to maint). + + * The "mode" word is useless in a call to open(2) that does not + create a new file. Such a call in the files backend of the ref + subsystem has been cleaned up. + (merge 35cf94eaf6 rs/no-mode-to-open-when-appending later to maint). + + * "git update-ref --stdin" failed to flush its output as needed, + which potentially led the conversation to a deadlock. + (merge 7c1200745b ps/update-ref-batch-flush later to maint). + + * When "git am --abort" fails to abort correctly, it still exited + with exit status of 0, which has been corrected. + (merge c5ead19ea2 en/am-abort-fix later to maint). + + * Correct nr and alloc members of strvec struct to be of type size_t. + (merge 8d133a4653 jk/strvec-typefix later to maint). + + * "git stash", where the tentative change involves changing a + directory to a file (or vice versa), was confused, which has been + corrected. + (merge bee8691f19 en/stash-df-fix later to maint). + + * "git clone" from a repository whose HEAD is unborn into a bare + repository didn't follow the branch name the other side used, which + is corrected. + (merge 6b58df54cf jk/clone-unborn-head-in-bare later to maint). + + * "git cvsserver" had a long-standing bug in its authentication code, + which has finally been corrected (it is unclear and is a separate + question if anybody is seriously using it, though). + (merge 4b81f690f6 cb/cvsserver later to maint). + + * "git difftool --dir-diff" mishandled symbolic links. + (merge 5bafb3576a da/difftool-dir-diff-symlink-fix later to maint). + + * Sensitive data in the HTTP trace were supposed to be redacted, but + we failed to do so in HTTP/2 requests. + (merge b66c77a64e jk/http-redact-fix later to maint). + + * "make clean" has been updated to remove leftover .depend/ + directories, even when it is not told to use them to compute header + dependencies. + (merge f0a74bcb03 ab/make-clean-depend-dirs later to maint). + + * Protocol v0 clients can get stuck parsing a malformed feature line. + (merge 44d2aec6e8 ah/connect-parse-feature-v0-fix later to maint). + * Other code cleanup, docfix, build fix, etc. (merge 1d9c8daef8 ab/bundle-doc later to maint). (merge 81483fe613 en/merge-strategy-docs later to maint). @@ -100,3 +317,37 @@ Fixes since v2.33 (merge be6444d1ca fc/completion-updates later to maint). (merge ff7b83f562 ti/tcsh-completion-regression-fix later to maint). (merge 325b06deda sg/make-fix-ar-invocation later to maint). + (merge bd72824c60 me/t5582-cleanup later to maint). + (merge f6a5af0f62 ga/send-email-sendmail-cmd later to maint). + (merge f58c7468cd ab/ls-remote-packet-trace later to maint). + (merge 0160f7e725 ab/rebase-fatal-fatal-fix later to maint). + (merge a16eb6b1ff js/maintenance-launchctl-fix later to maint). + (merge c21b2511c2 jk/t5323-no-pack-test-fix later to maint). + (merge 5146c2f148 mh/credential-leakfix later to maint). + (merge 1549577338 dd/t6300-wo-gpg-fix later to maint). + (merge 66e905b7dd rs/xopen-reports-open-failures later to maint). + (merge 469888e6a5 es/walken-tutorial-fix later to maint). + (merge 88682b016d ba/object-info later to maint). + (merge b45c172e51 ab/gc-log-rephrase later to maint). + (merge ccdd5d1eb1 ab/mailmap-leakfix later to maint). + (merge 6540b71614 cb/remote-ndebug-fix later to maint). + (merge e4f8d27585 rs/show-branch-simplify later to maint). + (merge e124ecf7f7 rs/archive-use-object-id later to maint). + (merge cebead1ebf cb/ci-build-pedantic later to maint). + (merge ca0cc98e03 bs/doc-bugreport-outdir later to maint). + (merge 72b113e562 ab/no-more-check-bindir later to maint). + (merge 92a5d1c9b4 jc/prefix-filename-allocates later to maint). + (merge d9a65b6c0a rs/setup-use-xopen-and-xdup later to maint). + (merge e8f55568de jk/t5562-racefix later to maint). + (merge 8f0f110156 rs/drop-core-compression-vars later to maint). + (merge b6d8887d3d ma/doc-git-version later to maint). + (merge 66c0c44df6 cb/plug-leaks-in-alloca-emu-users later to maint). + (merge afb32e8101 kz/revindex-comment-fix later to maint). + (merge ae578de926 po/git-config-doc-mentions-help-c later to maint). + (merge 187fc8b8b6 cb/unicode-14 later to maint). + (merge 3584cff71c en/typofixes later to maint). + (merge f188160be9 ab/bundle-remove-verbose-option later to maint). + (merge 8c6b4332b4 rs/close-pack-leakfix later to maint). + (merge 51b04c05b7 bs/difftool-msg-tweak later to maint). + (merge dd20e4a6db ab/make-compdb-fix later to maint). + (merge 6ffb990dc4 os/status-docfix later to maint). diff --git a/Documentation/config/advice.txt b/Documentation/config/advice.txt index 8b2849ff7b..063eec2511 100644 --- a/Documentation/config/advice.txt +++ b/Documentation/config/advice.txt @@ -44,6 +44,9 @@ advice.*:: Shown when linkgit:git-push[1] rejects a forced update of a branch when its remote-tracking ref has updates that we do not have locally. + skippedCherryPicks:: + Shown when linkgit:git-rebase[1] skips a commit that has already + been cherry-picked onto the upstream branch. statusAheadBehind:: Shown when linkgit:git-status[1] computes the ahead/behind counts for a local ref compared to its remote tracking ref, diff --git a/Documentation/config/gui.txt b/Documentation/config/gui.txt index d30831a130..0c087fd8c9 100644 --- a/Documentation/config/gui.txt +++ b/Documentation/config/gui.txt @@ -11,7 +11,7 @@ gui.displayUntracked:: in the file list. The default is "true". gui.encoding:: - Specifies the default encoding to use for displaying of + Specifies the default character encoding to use for displaying of file contents in linkgit:git-gui[1] and linkgit:gitk[1]. It can be overridden by setting the 'encoding' attribute for relevant files (see linkgit:gitattributes[5]). diff --git a/Documentation/config/help.txt b/Documentation/config/help.txt index 783a90a0f9..610701f9a3 100644 --- a/Documentation/config/help.txt +++ b/Documentation/config/help.txt @@ -9,13 +9,15 @@ help.format:: help.autoCorrect:: If git detects typos and can identify exactly one valid command similar - to the error, git will automatically run the intended command after - waiting a duration of time defined by this configuration value in - deciseconds (0.1 sec). If this value is 0, the suggested corrections - will be shown, but not executed. If it is a negative integer, or - "immediate", the suggested command - is run immediately. If "never", suggestions are not shown at all. The - default value is zero. + to the error, git will try to suggest the correct command or even + run the suggestion automatically. Possible config values are: + - 0 (default): show the suggested command. + - positive number: run the suggested command after specified +deciseconds (0.1 sec). + - "immediate": run the suggested command immediately. + - "prompt": show the suggestion and prompt for confirmation to run +the command. + - "never": don't run or show any suggested command. help.htmlPath:: Specify the path where the HTML documentation resides. File system paths diff --git a/Documentation/config/transfer.txt b/Documentation/config/transfer.txt index 505126a780..b49429eb4d 100644 --- a/Documentation/config/transfer.txt +++ b/Documentation/config/transfer.txt @@ -52,13 +52,17 @@ If you have multiple hideRefs values, later entries override earlier ones (and entries in more-specific config files override less-specific ones). + If a namespace is in use, the namespace prefix is stripped from each -reference before it is matched against `transfer.hiderefs` patterns. +reference before it is matched against `transfer.hiderefs` patterns. In +order to match refs before stripping, add a `^` in front of the ref name. If +you combine `!` and `^`, `!` must be specified first. ++ For example, if `refs/heads/master` is specified in `transfer.hideRefs` and the current namespace is `foo`, then `refs/namespaces/foo/refs/heads/master` -is omitted from the advertisements but `refs/heads/master` and -`refs/namespaces/bar/refs/heads/master` are still advertised as so-called -"have" lines. In order to match refs before stripping, add a `^` in front of -the ref name. If you combine `!` and `^`, `!` must be specified first. +is omitted from the advertisements. If `uploadpack.allowRefInWant` is set, +`upload-pack` will treat `want-ref refs/heads/master` in a protocol v2 +`fetch` command as if `refs/namespaces/foo/refs/heads/master` did not exist. +`receive-pack`, on the other hand, will still advertise the object id the +ref is pointing to without mentioning its name (a so-called ".have" line). + Even if you hide refs, a client may still be able to steal the target objects via the techniques described in the "SECURITY" section of the diff --git a/Documentation/git-am.txt b/Documentation/git-am.txt index 8714dfcb76..0a4a984dfd 100644 --- a/Documentation/git-am.txt +++ b/Documentation/git-am.txt @@ -178,6 +178,8 @@ default. You can use `--no-utf8` to override this. --abort:: Restore the original branch and abort the patching operation. + Revert contents of files involved in the am operation to their + pre-am state. --quit:: Abort the patching operation but keep HEAD and the index diff --git a/Documentation/git-branch.txt b/Documentation/git-branch.txt index 94dc9a54f2..5449767121 100644 --- a/Documentation/git-branch.txt +++ b/Documentation/git-branch.txt @@ -118,7 +118,8 @@ OPTIONS Reset <branchname> to <startpoint>, even if <branchname> exists already. Without `-f`, 'git branch' refuses to change an existing branch. In combination with `-d` (or `--delete`), allow deleting the - branch irrespective of its merged status. In combination with + branch irrespective of its merged status, or whether it even + points to a valid commit. In combination with `-m` (or `--move`), allow renaming the branch even if the new branch name already exists, the same applies for `-c` (or `--copy`). diff --git a/Documentation/git-bugreport.txt b/Documentation/git-bugreport.txt index 66e88c2e31..d8817bf3ce 100644 --- a/Documentation/git-bugreport.txt +++ b/Documentation/git-bugreport.txt @@ -40,8 +40,8 @@ OPTIONS ------- -o <path>:: --output-directory <path>:: - Place the resulting bug report file in `<path>` instead of the root of - the Git repository. + Place the resulting bug report file in `<path>` instead of the current + directory. -s <format>:: --suffix <format>:: diff --git a/Documentation/git-bundle.txt b/Documentation/git-bundle.txt index ac0d003835..71b5ecabd1 100644 --- a/Documentation/git-bundle.txt +++ b/Documentation/git-bundle.txt @@ -13,7 +13,7 @@ SYNOPSIS [--version=<version>] <file> <git-rev-list-args> 'git bundle' verify [-q | --quiet] <file> 'git bundle' list-heads <file> [<refname>...] -'git bundle' unbundle <file> [<refname>...] +'git bundle' unbundle [--progress] <file> [<refname>...] DESCRIPTION ----------- diff --git a/Documentation/git-column.txt b/Documentation/git-column.txt index f58e9c43e6..6cea9ab463 100644 --- a/Documentation/git-column.txt +++ b/Documentation/git-column.txt @@ -39,7 +39,7 @@ OPTIONS --indent=<string>:: String to be printed at the beginning of each line. ---nl=<N>:: +--nl=<string>:: String to be printed at the end of each line, including newline character. diff --git a/Documentation/git-config.txt b/Documentation/git-config.txt index 2dc4bae6da..992225f612 100644 --- a/Documentation/git-config.txt +++ b/Documentation/git-config.txt @@ -71,6 +71,9 @@ codes are: On success, the command returns the exit code 0. +A list of all available configuration variables can be obtained using the +`git help --config` command. + [[OPTIONS]] OPTIONS ------- diff --git a/Documentation/git-cvsserver.txt b/Documentation/git-cvsserver.txt index f2e4a47ebe..4dc57ed254 100644 --- a/Documentation/git-cvsserver.txt +++ b/Documentation/git-cvsserver.txt @@ -99,7 +99,7 @@ looks like ------ -Only anonymous access is provided by pserve by default. To commit you +Only anonymous access is provided by pserver by default. To commit you will have to create pserver accounts, simply add a gitcvs.authdb setting in the config file of the repositories you want the cvsserver to allow writes to, for example: @@ -114,21 +114,20 @@ The format of these files is username followed by the encrypted password, for example: ------ - myuser:$1Oyx5r9mdGZ2 - myuser:$1$BA)@$vbnMJMDym7tA32AamXrm./ + myuser:sqkNi8zPf01HI + myuser:$1$9K7FzU28$VfF6EoPYCJEYcVQwATgOP/ + myuser:$5$.NqmNH1vwfzGpV8B$znZIcumu1tNLATgV2l6e1/mY8RzhUDHMOaVOeL1cxV3 ------ You can use the 'htpasswd' facility that comes with Apache to make these -files, but Apache's MD5 crypt method differs from the one used by most C -library's crypt() function, so don't use the -m option. +files, but only with the -d option (or -B if your system suports it). -Alternatively you can produce the password with perl's crypt() operator: ------ - perl -e 'my ($user, $pass) = @ARGV; printf "%s:%s\n", $user, crypt($user, $pass)' $USER password ------ +Preferably use the system specific utility that manages password hash +creation in your platform (e.g. mkpasswd in Linux, encrypt in OpenBSD or +pwhash in NetBSD) and paste it in the right location. Then provide your password via the pserver method, for example: ------ - cvs -d:pserver:someuser:somepassword <at> server/path/repo.git co <HEAD_name> + cvs -d:pserver:someuser:somepassword@server:/path/repo.git co <HEAD_name> ------ No special setup is needed for SSH access, other than having Git tools in the PATH. If you have clients that do not accept the CVS_SERVER @@ -138,7 +137,7 @@ Note: Newer CVS versions (>= 1.12.11) also support specifying CVS_SERVER directly in CVSROOT like ------ -cvs -d ":ext;CVS_SERVER=git cvsserver:user@server/path/repo.git" co <HEAD_name> + cvs -d ":ext;CVS_SERVER=git cvsserver:user@server/path/repo.git" co <HEAD_name> ------ This has the advantage that it will be saved in your 'CVS/Root' files and you don't need to worry about always setting the correct environment @@ -186,8 +185,8 @@ allowing access over SSH. + -- ------ - export CVSROOT=:ext:user@server:/var/git/project.git - export CVS_SERVER="git cvsserver" + export CVSROOT=:ext:user@server:/var/git/project.git + export CVS_SERVER="git cvsserver" ------ -- 4. For SSH clients that will make commits, make sure their server-side @@ -203,7 +202,7 @@ allowing access over SSH. `project-master` directory: + ------ - cvs co -d project-master master + cvs co -d project-master master ------ [[dbbackend]] diff --git a/Documentation/git-http-backend.txt b/Documentation/git-http-backend.txt index 558966aa83..0c5c0dde19 100644 --- a/Documentation/git-http-backend.txt +++ b/Documentation/git-http-backend.txt @@ -16,7 +16,9 @@ A simple CGI program to serve the contents of a Git repository to Git clients accessing the repository over http:// and https:// protocols. The program supports clients fetching using both the smart HTTP protocol and the backwards-compatible dumb HTTP protocol, as well as clients -pushing using the smart HTTP protocol. +pushing using the smart HTTP protocol. It also supports Git's +more-efficient "v2" protocol if properly configured; see the +discussion of `GIT_PROTOCOL` in the ENVIRONMENT section below. It verifies that the directory has the magic file "git-daemon-export-ok", and it will refuse to export any Git directory @@ -77,6 +79,18 @@ Apache 2.x:: SetEnv GIT_PROJECT_ROOT /var/www/git SetEnv GIT_HTTP_EXPORT_ALL ScriptAlias /git/ /usr/libexec/git-core/git-http-backend/ + +# This is not strictly necessary using Apache and a modern version of +# git-http-backend, as the webserver will pass along the header in the +# environment as HTTP_GIT_PROTOCOL, and http-backend will copy that into +# GIT_PROTOCOL. But you may need this line (or something similar if you +# are using a different webserver), or if you want to support older Git +# versions that did not do that copying. +# +# Having the webserver set up GIT_PROTOCOL is perfectly fine even with +# modern versions (and will take precedence over HTTP_GIT_PROTOCOL, +# which means it can be used to override the client's request). +SetEnvIf Git-Protocol ".*" GIT_PROTOCOL=$0 ---------------------------------------------------------------- + To enable anonymous read access but authenticated write access, @@ -264,6 +278,16 @@ a repository with an extremely large number of refs. The value can be specified with a unit (e.g., `100M` for 100 megabytes). The default is 10 megabytes. +Clients may probe for optional protocol capabilities (like the v2 +protocol) using the `Git-Protocol` HTTP header. In order to support +these, the contents of that header must appear in the `GIT_PROTOCOL` +environment variable. Most webservers will pass this header to the CGI +via the `HTTP_GIT_PROTOCOL` variable, and `git-http-backend` will +automatically copy that to `GIT_PROTOCOL`. However, some webservers may +be more selective about which headers they'll pass, in which case they +need to be configured explicitly (see the mention of `Git-Protocol` in +the Apache config from the earlier EXAMPLES section). + The backend process sets GIT_COMMITTER_NAME to '$REMOTE_USER' and GIT_COMMITTER_EMAIL to '$\{REMOTE_USER}@http.$\{REMOTE_ADDR\}', ensuring that any reflogs created by 'git-receive-pack' contain some diff --git a/Documentation/git-index-pack.txt b/Documentation/git-index-pack.txt index 7fa74b9e79..1f1e359225 100644 --- a/Documentation/git-index-pack.txt +++ b/Documentation/git-index-pack.txt @@ -82,6 +82,12 @@ OPTIONS --strict:: Die, if the pack contains broken objects or links. +--progress-title:: + For internal use only. ++ +Set the title of the progress bar. The title is "Receiving objects" by +default and "Indexing objects" when `--stdin` is specified. + --check-self-contained-and-connected:: Die if the pack contains broken links. For internal use only. diff --git a/Documentation/git-maintenance.txt b/Documentation/git-maintenance.txt index 1e738ad398..e2cfb68ab5 100644 --- a/Documentation/git-maintenance.txt +++ b/Documentation/git-maintenance.txt @@ -179,6 +179,17 @@ OPTIONS `maintenance.<task>.enabled` configured as `true` are considered. See the 'TASKS' section for the list of accepted `<task>` values. +--scheduler=auto|crontab|systemd-timer|launchctl|schtasks:: + When combined with the `start` subcommand, specify the scheduler + for running the hourly, daily and weekly executions of + `git maintenance run`. + Possible values for `<scheduler>` are `auto`, `crontab` + (POSIX), `systemd-timer` (Linux), `launchctl` (macOS), and + `schtasks` (Windows). When `auto` is specified, the + appropriate platform-specific scheduler is used; on Linux, + `systemd-timer` is used if available, otherwise + `crontab`. Default is `auto`. + TROUBLESHOOTING --------------- @@ -277,6 +288,52 @@ schedule to ensure you are executing the correct binaries in your schedule. +BACKGROUND MAINTENANCE ON LINUX SYSTEMD SYSTEMS +----------------------------------------------- + +While Linux supports `cron`, depending on the distribution, `cron` may +be an optional package not necessarily installed. On modern Linux +distributions, systemd timers are superseding it. + +If user systemd timers are available, they will be used as a replacement +of `cron`. + +In this case, `git maintenance start` will create user systemd timer units +and start the timers. The current list of user-scheduled tasks can be found +by running `systemctl --user list-timers`. The timers written by `git +maintenance start` are similar to this: + +----------------------------------------------------------------------- +$ systemctl --user list-timers +NEXT LEFT LAST PASSED UNIT ACTIVATES +Thu 2021-04-29 19:00:00 CEST 42min left Thu 2021-04-29 18:00:11 CEST 17min ago git-maintenance@hourly.timer git-maintenance@hourly.service +Fri 2021-04-30 00:00:00 CEST 5h 42min left Thu 2021-04-29 00:00:11 CEST 18h ago git-maintenance@daily.timer git-maintenance@daily.service +Mon 2021-05-03 00:00:00 CEST 3 days left Mon 2021-04-26 00:00:11 CEST 3 days ago git-maintenance@weekly.timer git-maintenance@weekly.service +----------------------------------------------------------------------- + +One timer is registered for each `--schedule=<frequency>` option. + +The definition of the systemd units can be inspected in the following files: + +----------------------------------------------------------------------- +~/.config/systemd/user/git-maintenance@.timer +~/.config/systemd/user/git-maintenance@.service +~/.config/systemd/user/timers.target.wants/git-maintenance@hourly.timer +~/.config/systemd/user/timers.target.wants/git-maintenance@daily.timer +~/.config/systemd/user/timers.target.wants/git-maintenance@weekly.timer +----------------------------------------------------------------------- + +`git maintenance start` will overwrite these files and start the timer +again with `systemctl --user`, so any customization should be done by +creating a drop-in file, i.e. a `.conf` suffixed file in the +`~/.config/systemd/user/git-maintenance@.service.d` directory. + +`git maintenance stop` will stop the user systemd timers and delete +the above mentioned files. + +For more details, see `systemd.timer(5)`. + + BACKGROUND MAINTENANCE ON MACOS SYSTEMS --------------------------------------- diff --git a/Documentation/git-multi-pack-index.txt b/Documentation/git-multi-pack-index.txt index ffd601bc17..3b0b55cd75 100644 --- a/Documentation/git-multi-pack-index.txt +++ b/Documentation/git-multi-pack-index.txt @@ -9,8 +9,7 @@ git-multi-pack-index - Write and verify multi-pack-indexes SYNOPSIS -------- [verse] -'git multi-pack-index' [--object-dir=<dir>] [--[no-]progress] - [--preferred-pack=<pack>] <subcommand> +'git multi-pack-index' [--object-dir=<dir>] [--[no-]bitmap] <sub-command> DESCRIPTION ----------- @@ -23,10 +22,13 @@ OPTIONS Use given directory for the location of Git objects. We check `<dir>/packs/multi-pack-index` for the current MIDX file, and `<dir>/packs` for the pack-files to index. ++ +`<dir>` must be an alternate of the current repository. --[no-]progress:: Turn progress on/off explicitly. If neither is specified, progress is - shown if standard error is connected to a terminal. + shown if standard error is connected to a terminal. Supported by + sub-commands `write`, `verify`, `expire`, and `repack. The following subcommands are available: @@ -37,9 +39,12 @@ write:: -- --preferred-pack=<pack>:: Optionally specify the tie-breaking pack used when - multiple packs contain the same object. If not given, - ties are broken in favor of the pack with the lowest - mtime. + multiple packs contain the same object. `<pack>` must + contain at least one object. If not given, ties are + broken in favor of the pack with the lowest mtime. + + --[no-]bitmap:: + Control whether or not a multi-pack bitmap is written. -- verify:: @@ -81,6 +86,13 @@ EXAMPLES $ git multi-pack-index write ----------------------------------------------- +* Write a MIDX file for the packfiles in the current .git folder with a +corresponding bitmap. ++ +------------------------------------------------------------- +$ git multi-pack-index write --preferred-pack=<pack> --bitmap +------------------------------------------------------------- + * Write a MIDX file for the packfiles in an alternate object store. + ----------------------------------------------- diff --git a/Documentation/git-rebase.txt b/Documentation/git-rebase.txt index 3f1030df70..c116dbf4bb 100644 --- a/Documentation/git-rebase.txt +++ b/Documentation/git-rebase.txt @@ -79,9 +79,10 @@ remain the checked-out branch. If the upstream branch already contains a change you have made (e.g., because you mailed a patch which was applied upstream), then that commit -will be skipped. For example, running `git rebase master` on the -following history (in which `A'` and `A` introduce the same set of changes, -but have different committer information): +will be skipped and warnings will be issued (if the `merge` backend is +used). For example, running `git rebase master` on the following +history (in which `A'` and `A` introduce the same set of changes, but +have different committer information): ------------ A---B---C topic @@ -312,7 +313,10 @@ See also INCOMPATIBLE OPTIONS below. By default (or if `--no-reapply-cherry-picks` is given), these commits will be automatically dropped. Because this necessitates reading all upstream commits, this can be expensive in repos with a large number -of upstream commits that need to be read. +of upstream commits that need to be read. When using the `merge` +backend, warnings will be issued for each dropped commit (unless +`--quiet` is given). Advice will also be issued unless +`advice.skippedCherryPicks` is set to false (see linkgit:git-config[1]). + `--reapply-cherry-picks` allows rebase to forgo reading all upstream commits, potentially improving performance. @@ -442,7 +446,8 @@ When --fork-point is active, 'fork_point' will be used instead of ends up being empty, the <upstream> will be used as a fallback. + If <upstream> is given on the command line, then the default is -`--no-fork-point`, otherwise the default is `--fork-point`. +`--no-fork-point`, otherwise the default is `--fork-point`. See also +`rebase.forkpoint` in linkgit:git-config[1]. + If your branch was based on <upstream> but <upstream> was rewound and your branch contains commits which were dropped, this option can be used diff --git a/Documentation/git-receive-pack.txt b/Documentation/git-receive-pack.txt index 25702ed730..014a78409b 100644 --- a/Documentation/git-receive-pack.txt +++ b/Documentation/git-receive-pack.txt @@ -41,6 +41,11 @@ OPTIONS <directory>:: The repository to sync into. +--http-backend-info-refs:: + Used by linkgit:git-http-backend[1] to serve up + `$GIT_URL/info/refs?service=git-receive-pack` requests. See + `--http-backend-info-refs` in linkgit:git-upload-pack[1]. + PRE-RECEIVE HOOK ---------------- Before any ref is updated, if $GIT_DIR/hooks/pre-receive file exists diff --git a/Documentation/git-sparse-checkout.txt b/Documentation/git-sparse-checkout.txt index fdcf43f87c..42056ee9ff 100644 --- a/Documentation/git-sparse-checkout.txt +++ b/Documentation/git-sparse-checkout.txt @@ -210,6 +210,16 @@ case-insensitive check. This corrects for case mismatched filenames in the 'git sparse-checkout set' command to reflect the expected cone in the working directory. +When changing the sparse-checkout patterns in cone mode, Git will inspect each +tracked directory that is not within the sparse-checkout cone to see if it +contains any untracked files. If all of those files are ignored due to the +`.gitignore` patterns, then the directory will be deleted. If any of the +untracked files within that directory is not ignored, then no deletions will +occur within that directory and a warning message will appear. If these files +are important, then reset your sparse-checkout definition so they are included, +use `git add` and `git commit` to store them, then remove any remaining files +manually to ensure Git can behave optimally. + SUBMODULES ---------- diff --git a/Documentation/git-status.txt b/Documentation/git-status.txt index 83f38e3198..c33a3d8d53 100644 --- a/Documentation/git-status.txt +++ b/Documentation/git-status.txt @@ -363,7 +363,7 @@ Field Meaning Unmerged entries have the following format; the first character is a "u" to distinguish from ordinary changed entries. - u <xy> <sub> <m1> <m2> <m3> <mW> <h1> <h2> <h3> <path> + u <XY> <sub> <m1> <m2> <m3> <mW> <h1> <h2> <h3> <path> .... Field Meaning diff --git a/Documentation/git-upload-pack.txt b/Documentation/git-upload-pack.txt index 9822c1eb1a..8f87b23ea8 100644 --- a/Documentation/git-upload-pack.txt +++ b/Documentation/git-upload-pack.txt @@ -36,14 +36,26 @@ OPTIONS This fits with the HTTP POST request processing model where a program may read the request, write a response, and must exit. ---advertise-refs:: - Only the initial ref advertisement is output, and the program exits - immediately. This fits with the HTTP GET request model, where - no request content is received but a response must be produced. +--http-backend-info-refs:: + Used by linkgit:git-http-backend[1] to serve up + `$GIT_URL/info/refs?service=git-upload-pack` requests. See + "Smart Clients" in link:technical/http-protocol.html[the HTTP + transfer protocols] documentation and "HTTP Transport" in + link:technical/protocol-v2.html[the Git Wire Protocol, Version + 2] documentation. Also understood by + linkgit:git-receive-pack[1]. <directory>:: The repository to sync from. +ENVIRONMENT +----------- + +`GIT_PROTOCOL`:: + Internal variable used for handshaking the wire protocol. Server + admins may need to configure some transports to allow this + variable to be passed. See the discussion in linkgit:git[1]. + SEE ALSO -------- linkgit:gitnamespaces[7] diff --git a/Documentation/git-version.txt b/Documentation/git-version.txt new file mode 100644 index 0000000000..80fa7754a6 --- /dev/null +++ b/Documentation/git-version.txt @@ -0,0 +1,28 @@ +git-version(1) +============== + +NAME +---- +git-version - Display version information about Git + +SYNOPSIS +-------- +[verse] +'git version' [--build-options] + +DESCRIPTION +----------- +With no options given, the version of 'git' is printed on the standard output. + +Note that `git --version` is identical to `git version` because the +former is internally converted into the latter. + +OPTIONS +------- +--build-options:: + Include additional information about how git was built for diagnostic + purposes. + +GIT +--- +Part of the linkgit:git[1] suite diff --git a/Documentation/git.txt b/Documentation/git.txt index 6dd241ef83..abace9eac2 100644 --- a/Documentation/git.txt +++ b/Documentation/git.txt @@ -41,6 +41,10 @@ OPTIONS ------- --version:: Prints the Git suite version that the 'git' program came from. ++ +This option is internaly converted to `git version ...` and accepts +the same options as the linkgit:git-version[1] command. If `--help` is +also given, it takes precedence over `--version`. --help:: Prints the synopsis and a list of the most commonly used @@ -894,6 +898,21 @@ for full details. Contains a colon ':' separated list of keys with optional values 'key[=value]'. Presence of unknown keys and values must be ignored. ++ +Note that servers may need to be configured to allow this variable to +pass over some transports. It will be propagated automatically when +accessing local repositories (i.e., `file://` or a filesystem path), as +well as over the `git://` protocol. For git-over-http, it should work +automatically in most configurations, but see the discussion in +linkgit:git-http-backend[1]. For git-over-ssh, the ssh server may need +to be configured to allow clients to pass this variable (e.g., by using +`AcceptEnv GIT_PROTOCOL` with OpenSSH). ++ +This configuration is optional. If the variable is not propagated, then +clients will fall back to the original "v0" protocol (but may miss out +on some performance improvements or features). This variable currently +only affects clones and fetches; it is not yet used for pushes (but may +be in the future). `GIT_OPTIONAL_LOCKS`:: If set to `0`, Git will complete any requested operation without diff --git a/Documentation/pretty-options.txt b/Documentation/pretty-options.txt index 27ddaf84a1..b3af850608 100644 --- a/Documentation/pretty-options.txt +++ b/Documentation/pretty-options.txt @@ -33,14 +33,16 @@ people using 80-column terminals. used together. --encoding=<encoding>:: - The commit objects record the encoding used for the log message + Commit objects record the character encoding used for the log message in their encoding header; this option can be used to tell the command to re-code the commit log message in the encoding preferred by the user. For non plumbing commands this defaults to UTF-8. Note that if an object claims to be encoded in `X` and we are outputting in `X`, we will output the object verbatim; this means that invalid sequences in the original - commit may be copied to the output. + commit may be copied to the output. Likewise, if iconv(3) fails + to convert the commit, we will output the original object + verbatim, along with a warning. --expand-tabs=<n>:: --expand-tabs:: diff --git a/Documentation/technical/api-parse-options.txt b/Documentation/technical/api-parse-options.txt index 5a60bbfa7f..acfd5dc1d8 100644 --- a/Documentation/technical/api-parse-options.txt +++ b/Documentation/technical/api-parse-options.txt @@ -198,11 +198,6 @@ There are some macros to easily define options: The filename will be prefixed by passing the filename along with the prefix argument of `parse_options()` to `prefix_filename()`. -`OPT_ARGUMENT(long, &int_var, description)`:: - Introduce a long-option argument that will be kept in `argv[]`. - If this option was seen, `int_var` will be set to one (except - if a `NULL` pointer was passed). - `OPT_NUMBER_CALLBACK(&var, description, func_ptr)`:: Recognize numerical options like -123 and feed the integer as if it was an argument to the function given by `func_ptr`. diff --git a/Documentation/technical/bitmap-format.txt b/Documentation/technical/bitmap-format.txt index f8c18a0f7a..04b3ec2178 100644 --- a/Documentation/technical/bitmap-format.txt +++ b/Documentation/technical/bitmap-format.txt @@ -1,6 +1,44 @@ GIT bitmap v1 format ==================== +== Pack and multi-pack bitmaps + +Bitmaps store reachability information about the set of objects in a packfile, +or a multi-pack index (MIDX). The former is defined obviously, and the latter is +defined as the union of objects in packs contained in the MIDX. + +A bitmap may belong to either one pack, or the repository's multi-pack index (if +it exists). A repository may have at most one bitmap. + +An object is uniquely described by its bit position within a bitmap: + + - If the bitmap belongs to a packfile, the __n__th bit corresponds to + the __n__th object in pack order. For a function `offset` which maps + objects to their byte offset within a pack, pack order is defined as + follows: + + o1 <= o2 <==> offset(o1) <= offset(o2) + + - If the bitmap belongs to a MIDX, the __n__th bit corresponds to the + __n__th object in MIDX order. With an additional function `pack` which + maps objects to the pack they were selected from by the MIDX, MIDX order + is defined as follows: + + o1 <= o2 <==> pack(o1) <= pack(o2) /\ offset(o1) <= offset(o2) + + The ordering between packs is done according to the MIDX's .rev file. + Notably, the preferred pack sorts ahead of all other packs. + +The on-disk representation (described below) of a bitmap is the same regardless +of whether or not that bitmap belongs to a packfile or a MIDX. The only +difference is the interpretation of the bits, which is described above. + +Certain bitmap extensions are supported (see: Appendix B). No extensions are +required for bitmaps corresponding to packfiles. For bitmaps that correspond to +MIDXs, both the bit-cache and rev-cache extensions are required. + +== On-disk format + - A header appears at the beginning: 4-byte signature: {'B', 'I', 'T', 'M'} @@ -14,17 +52,19 @@ GIT bitmap v1 format The following flags are supported: - BITMAP_OPT_FULL_DAG (0x1) REQUIRED - This flag must always be present. It implies that the bitmap - index has been generated for a packfile with full closure - (i.e. where every single object in the packfile can find - its parent links inside the same packfile). This is a - requirement for the bitmap index format, also present in JGit, - that greatly reduces the complexity of the implementation. + This flag must always be present. It implies that the + bitmap index has been generated for a packfile or + multi-pack index (MIDX) with full closure (i.e. where + every single object in the packfile/MIDX can find its + parent links inside the same packfile/MIDX). This is a + requirement for the bitmap index format, also present in + JGit, that greatly reduces the complexity of the + implementation. - BITMAP_OPT_HASH_CACHE (0x4) If present, the end of the bitmap file contains `N` 32-bit name-hash values, one per object in the - pack. The format and meaning of the name-hash is + pack/MIDX. The format and meaning of the name-hash is described below. 4-byte entry count (network byte order) @@ -33,7 +73,8 @@ GIT bitmap v1 format 20-byte checksum - The SHA1 checksum of the pack this bitmap index belongs to. + The SHA1 checksum of the pack/MIDX this bitmap index + belongs to. - 4 EWAH bitmaps that act as type indexes @@ -50,7 +91,7 @@ GIT bitmap v1 format - Tags In each bitmap, the `n`th bit is set to true if the `n`th object - in the packfile is of that type. + in the packfile or multi-pack index is of that type. The obvious consequence is that the OR of all 4 bitmaps will result in a full set (all bits set), and the AND of all 4 bitmaps will @@ -62,8 +103,9 @@ GIT bitmap v1 format Each entry contains the following: - 4-byte object position (network byte order) - The position **in the index for the packfile** where the - bitmap for this commit is found. + The position **in the index for the packfile or + multi-pack index** where the bitmap for this commit is + found. - 1-byte XOR-offset The xor offset used to compress this bitmap. For an entry @@ -146,10 +188,11 @@ Name-hash cache --------------- If the BITMAP_OPT_HASH_CACHE flag is set, the end of the bitmap contains -a cache of 32-bit values, one per object in the pack. The value at +a cache of 32-bit values, one per object in the pack/MIDX. The value at position `i` is the hash of the pathname at which the `i`th object -(counting in index order) in the pack can be found. This can be fed -into the delta heuristics to compare objects with similar pathnames. +(counting in index or multi-pack index order) in the pack/MIDX can be found. +This can be fed into the delta heuristics to compare objects with similar +pathnames. The hash algorithm used is: diff --git a/Documentation/technical/http-protocol.txt b/Documentation/technical/http-protocol.txt index 96d89ea9b2..cc5126cfed 100644 --- a/Documentation/technical/http-protocol.txt +++ b/Documentation/technical/http-protocol.txt @@ -225,6 +225,9 @@ The client may send Extra Parameters (see Documentation/technical/pack-protocol.txt) as a colon-separated string in the Git-Protocol HTTP header. +Uses the `--http-backend-info-refs` option to +linkgit:git-upload-pack[1]. + Dumb Server Response ^^^^^^^^^^^^^^^^^^^^ Dumb servers MUST respond with the dumb server reply format. diff --git a/Documentation/technical/multi-pack-index.txt b/Documentation/technical/multi-pack-index.txt index fb688976c4..86f40f2490 100644 --- a/Documentation/technical/multi-pack-index.txt +++ b/Documentation/technical/multi-pack-index.txt @@ -36,7 +36,9 @@ Design Details directory of an alternate. It refers only to packfiles in that same directory. -- The core.multiPackIndex config setting must be on to consume MIDX files. +- The core.multiPackIndex config setting must be on (which is the + default) to consume MIDX files. Setting it to `false` prevents + Git from reading a MIDX file, even if one exists. - The file format includes parameters for the object ID hash function, so a future change of hash algorithm does not require @@ -71,14 +73,10 @@ Future Work still reducing the number of binary searches required for object lookups. -- The reachability bitmap is currently paired directly with a single - packfile, using the pack-order as the object order to hopefully - compress the bitmaps well using run-length encoding. This could be - extended to pair a reachability bitmap with a multi-pack-index. If - the multi-pack-index is extended to store a "stable object order" +- If the multi-pack-index is extended to store a "stable object order" (a function Order(hash) = integer that is constant for a given hash, - even as the multi-pack-index is updated) then a reachability bitmap - could point to a multi-pack-index and be updated independently. + even as the multi-pack-index is updated) then MIDX bitmaps could be + updated independently of the MIDX. - Packfiles can be marked as "special" using empty files that share the initial name but replace ".pack" with ".keep" or ".promisor". diff --git a/Documentation/technical/protocol-v2.txt b/Documentation/technical/protocol-v2.txt index 1040d85319..21e8258ccf 100644 --- a/Documentation/technical/protocol-v2.txt +++ b/Documentation/technical/protocol-v2.txt @@ -42,7 +42,8 @@ Initial Client Request In general a client can request to speak protocol v2 by sending `version=2` through the respective side-channel for the transport being used which inevitably sets `GIT_PROTOCOL`. More information can be -found in `pack-protocol.txt` and `http-protocol.txt`. In all cases the +found in `pack-protocol.txt` and `http-protocol.txt`, as well as the +`GIT_PROTOCOL` definition in `git.txt`. In all cases the response from the server is the capability advertisement. Git Transport @@ -58,6 +59,8 @@ SSH and File Transport When using either the ssh:// or file:// transport, the GIT_PROTOCOL environment variable must be set explicitly to include "version=2". +The server may need to be configured to allow this environment variable +to pass. HTTP Transport ~~~~~~~~~~~~~~ @@ -81,6 +84,12 @@ A v2 server would reply: Subsequent requests are then made directly to the service `$GIT_URL/git-upload-pack`. (This works the same for git-receive-pack). +Uses the `--http-backend-info-refs` option to +linkgit:git-upload-pack[1]. + +The server may need to be configured to pass this header's contents via +the `GIT_PROTOCOL` variable. See the discussion in `git-http-backend.txt`. + Capability Advertisement ------------------------ @@ -190,7 +199,11 @@ ls-refs takes in the following arguments: Show peeled tags. ref-prefix <prefix> When specified, only references having a prefix matching one of - the provided prefixes are displayed. + the provided prefixes are displayed. Multiple instances may be + given, in which case references matching any prefix will be + shown. Note that this is purely for optimization; a server MAY + show refs not matching the prefix if it chooses, and clients + should filter the result themselves. If the 'unborn' feature is advertised the following argument can be included in the client's request. @@ -138,12 +138,15 @@ Issues of note: BLK_SHA1. Also included is a version optimized for PowerPC (PPC_SHA1). - - "libcurl" library is used by git-http-fetch, git-fetch, and, if - the curl version >= 7.34.0, for git-imap-send. You might also - want the "curl" executable for debugging purposes. If you do not - use http:// or https:// repositories, and do not want to put - patches into an IMAP mailbox, you do not have to have them - (use NO_CURL). + - "libcurl" library is used for fetching and pushing + repositories over http:// or https://, as well as by + git-imap-send if the curl version is >= 7.34.0. If you do + not need that functionality, use NO_CURL to build without + it. + + Git requires version "7.19.4" or later of "libcurl" to build + without NO_CURL. This version requirement may be bumped in + the future. - "expat" library; git-http-push uses it for remote lock management over DAV. Similar to "curl" above, this is optional @@ -409,15 +409,6 @@ all:: # Define NEEDS_LIBRT if your platform requires linking with librt (glibc version # before 2.17) for clock_gettime and CLOCK_MONOTONIC. # -# Define USE_PARENS_AROUND_GETTEXT_N to "yes" if your compiler happily -# compiles the following initialization: -# -# static const char s[] = ("FOO"); -# -# and define it to "no" if you need to remove the parentheses () around the -# constant. The default is "auto", which means to use parentheses if your -# compiler is detected to support it. -# # Define HAVE_BSD_SYSCTL if your platform has a BSD-compatible sysctl function. # # Define HAVE_GETDELIM if your system has the getdelim() function. @@ -465,6 +456,9 @@ all:: # the global variable _wpgmptr containing the absolute path of the current # executable (this is the case on Windows). # +# INSTALL_STRIP can be set to "-s" to strip binaries during installation, +# if your $(INSTALL) command supports the option. +# # Define GENERATE_COMPILATION_DATABASE to "yes" to generate JSON compilation # database entries during compilation if your compiler supports it, using the # `-MJ` flag. The JSON entries will be placed in the `compile_commands/` @@ -495,10 +489,9 @@ all:: # setting this flag the exceptions are removed, and all of # -Wextra is used. # -# pedantic: +# no-pedantic: # -# Enable -pedantic compilation. This also disables -# USE_PARENS_AROUND_GETTEXT_N to produce only relevant warnings. +# Disable -pedantic compilation. GIT-VERSION-FILE: FORCE @$(SHELL_PATH) ./GIT-VERSION-GEN @@ -1289,6 +1282,7 @@ endif ifeq ($(COMPUTE_HEADER_DEPENDENCIES),auto) dep_check = $(shell $(CC) $(ALL_CFLAGS) \ + -Wno-pedantic \ -c -MF /dev/null -MQ /dev/null -MMD -MP \ -x c /dev/null -o /dev/null 2>&1; \ echo $$?) @@ -1314,6 +1308,7 @@ endif ifeq ($(GENERATE_COMPILATION_DATABASE),yes) compdb_check = $(shell $(CC) $(ALL_CFLAGS) \ + -Wno-pedantic \ -c -MJ /dev/null \ -x c /dev/null -o /dev/null 2>&1; \ echo $$?) @@ -1351,14 +1346,6 @@ ifneq (,$(SOCKLEN_T)) BASIC_CFLAGS += -Dsocklen_t=$(SOCKLEN_T) endif -ifeq (yes,$(USE_PARENS_AROUND_GETTEXT_N)) - BASIC_CFLAGS += -DUSE_PARENS_AROUND_GETTEXT_N=1 -else -ifeq (no,$(USE_PARENS_AROUND_GETTEXT_N)) - BASIC_CFLAGS += -DUSE_PARENS_AROUND_GETTEXT_N=0 -endif -endif - ifeq ($(uname_S),Darwin) ifndef NO_FINK ifeq ($(shell test -d /sw/lib && echo y),y) @@ -1440,15 +1427,8 @@ else REMOTE_CURL_NAMES = $(REMOTE_CURL_PRIMARY) $(REMOTE_CURL_ALIASES) PROGRAM_OBJS += http-fetch.o PROGRAMS += $(REMOTE_CURL_NAMES) - curl_check := $(shell (echo 070908; $(CURL_CONFIG) --vernum | sed -e '/^70[BC]/s/^/0/') 2>/dev/null | sort -r | sed -ne 2p) - ifeq "$(curl_check)" "070908" - ifndef NO_EXPAT - PROGRAM_OBJS += http-push.o - else - EXCLUDED_PROGRAMS += git-http-push - endif - else - EXCLUDED_PROGRAMS += git-http-push + ifndef NO_EXPAT + PROGRAM_OBJS += http-push.o endif curl_check := $(shell (echo 072200; $(CURL_CONFIG) --vernum | sed -e '/^70[BC]/s/^/0/') 2>/dev/null | sort -r | sed -ne 2p) ifeq "$(curl_check)" "072200" @@ -2485,7 +2465,6 @@ dep_args = -MF $(dep_file) -MQ $@ -MMD -MP endif ifneq ($(COMPUTE_HEADER_DEPENDENCIES),yes) -dep_dirs = missing_dep_dirs = dep_args = endif @@ -2754,19 +2733,25 @@ FIND_SOURCE_FILES = ( \ | sed -e 's|^\./||' \ ) -$(ETAGS_TARGET): FORCE - $(QUIET_GEN)$(RM) "$(ETAGS_TARGET)+" && \ - $(FIND_SOURCE_FILES) | xargs etags -a -o "$(ETAGS_TARGET)+" && \ - mv "$(ETAGS_TARGET)+" "$(ETAGS_TARGET)" +FOUND_SOURCE_FILES = $(shell $(FIND_SOURCE_FILES)) + +$(ETAGS_TARGET): $(FOUND_SOURCE_FILES) + $(QUIET_GEN)$(RM) $@+ && \ + echo $(FOUND_SOURCE_FILES) | xargs etags -a -o $@+ && \ + mv $@+ $@ + +tags: $(FOUND_SOURCE_FILES) + $(QUIET_GEN)$(RM) $@+ && \ + echo $(FOUND_SOURCE_FILES) | xargs ctags -a -o $@+ && \ + mv $@+ $@ -tags: FORCE - $(QUIET_GEN)$(RM) tags+ && \ - $(FIND_SOURCE_FILES) | xargs ctags -a -o tags+ && \ - mv tags+ tags +cscope.out: $(FOUND_SOURCE_FILES) + $(QUIET_GEN)$(RM) $@+ && \ + echo $(FOUND_SOURCE_FILES) | xargs cscope -f$@+ -b && \ + mv $@+ $@ -cscope: - $(RM) cscope* - $(FIND_SOURCE_FILES) | xargs cscope -b +.PHONY: cscope +cscope: cscope.out ### Detect prefix changes TRACK_PREFIX = $(bindir_SQ):$(gitexecdir_SQ):$(template_dir_SQ):$(prefix_SQ):\ @@ -2956,7 +2941,7 @@ check: config-list.h command-list.h exit 1; \ fi -FOUND_C_SOURCES = $(filter %.c,$(shell $(FIND_SOURCE_FILES))) +FOUND_C_SOURCES = $(filter %.c,$(FOUND_SOURCE_FILES)) COCCI_SOURCES = $(filter-out $(THIRD_PARTY_SOURCES),$(FOUND_C_SOURCES)) %.cocci.patch: %.cocci $(COCCI_SOURCES) @@ -3009,7 +2994,8 @@ mergetools_instdir = $(prefix)/$(mergetoolsdir) endif mergetools_instdir_SQ = $(subst ','\'',$(mergetools_instdir)) -install_bindir_programs := $(patsubst %,%$X,$(BINDIR_PROGRAMS_NEED_X)) $(BINDIR_PROGRAMS_NO_X) +install_bindir_xprograms := $(patsubst %,%$X,$(BINDIR_PROGRAMS_NEED_X)) +install_bindir_programs := $(install_bindir_xprograms) $(BINDIR_PROGRAMS_NO_X) .PHONY: profile-install profile-fast-install profile-install: profile @@ -3018,12 +3004,17 @@ profile-install: profile profile-fast-install: profile-fast $(MAKE) install +INSTALL_STRIP = + install: all $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)' $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(gitexec_instdir_SQ)' - $(INSTALL) $(ALL_PROGRAMS) '$(DESTDIR_SQ)$(gitexec_instdir_SQ)' + $(INSTALL) $(INSTALL_STRIP) $(PROGRAMS) '$(DESTDIR_SQ)$(gitexec_instdir_SQ)' + $(INSTALL) $(SCRIPTS) '$(DESTDIR_SQ)$(gitexec_instdir_SQ)' $(INSTALL) -m 644 $(SCRIPT_LIB) '$(DESTDIR_SQ)$(gitexec_instdir_SQ)' - $(INSTALL) $(install_bindir_programs) '$(DESTDIR_SQ)$(bindir_SQ)' + $(INSTALL) $(INSTALL_STRIP) $(install_bindir_xprograms) '$(DESTDIR_SQ)$(bindir_SQ)' + $(INSTALL) $(BINDIR_PROGRAMS_NO_X) '$(DESTDIR_SQ)$(bindir_SQ)' + ifdef MSVC # We DO NOT install the individual foo.o.pdb files because they # have already been rolled up into the exe's pdb file. @@ -3104,8 +3095,7 @@ endif ln "$$execdir/git-remote-http$X" "$$execdir/$$p" 2>/dev/null || \ ln -s "git-remote-http$X" "$$execdir/$$p" 2>/dev/null || \ cp "$$execdir/git-remote-http$X" "$$execdir/$$p" || exit; } \ - done && \ - ./check_bindir "z$$bindir" "z$$execdir" "$$bindir/git-add$X" + done .PHONY: install-gitweb install-doc install-man install-man-perl install-html install-info install-pdf .PHONY: quick-install-doc quick-install-man quick-install-html @@ -3281,7 +3271,7 @@ endif .PHONY: all install profile-clean cocciclean clean strip .PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell -.PHONY: FORCE cscope +.PHONY: FORCE ### Check documentation # @@ -4,37 +4,6 @@ #include "help.h" #include "string-list.h" -int advice_fetch_show_forced_updates = 1; -int advice_push_update_rejected = 1; -int advice_push_non_ff_current = 1; -int advice_push_non_ff_matching = 1; -int advice_push_already_exists = 1; -int advice_push_fetch_first = 1; -int advice_push_needs_force = 1; -int advice_push_unqualified_ref_name = 1; -int advice_push_ref_needs_update = 1; -int advice_status_hints = 1; -int advice_status_u_option = 1; -int advice_status_ahead_behind_warning = 1; -int advice_commit_before_merge = 1; -int advice_reset_quiet_warning = 1; -int advice_resolve_conflict = 1; -int advice_sequencer_in_use = 1; -int advice_implicit_identity = 1; -int advice_detached_head = 1; -int advice_set_upstream_failure = 1; -int advice_object_name_warning = 1; -int advice_amworkdir = 1; -int advice_rm_hints = 1; -int advice_add_embedded_repo = 1; -int advice_ignored_hook = 1; -int advice_waiting_for_editor = 1; -int advice_graft_file_deprecated = 1; -int advice_checkout_ambiguous_remote_branch_name = 1; -int advice_submodule_alternate_error_strategy_die = 1; -int advice_add_ignored_file = 1; -int advice_add_empty_pathspec = 1; - static int advice_use_color = -1; static char advice_colors[][COLOR_MAXLEN] = { GIT_COLOR_RESET, @@ -63,49 +32,12 @@ static const char *advise_get_color(enum color_advice ix) } static struct { - const char *name; - int *preference; -} advice_config[] = { - { "fetchShowForcedUpdates", &advice_fetch_show_forced_updates }, - { "pushUpdateRejected", &advice_push_update_rejected }, - { "pushNonFFCurrent", &advice_push_non_ff_current }, - { "pushNonFFMatching", &advice_push_non_ff_matching }, - { "pushAlreadyExists", &advice_push_already_exists }, - { "pushFetchFirst", &advice_push_fetch_first }, - { "pushNeedsForce", &advice_push_needs_force }, - { "pushUnqualifiedRefName", &advice_push_unqualified_ref_name }, - { "pushRefNeedsUpdate", &advice_push_ref_needs_update }, - { "statusHints", &advice_status_hints }, - { "statusUoption", &advice_status_u_option }, - { "statusAheadBehindWarning", &advice_status_ahead_behind_warning }, - { "commitBeforeMerge", &advice_commit_before_merge }, - { "resetQuiet", &advice_reset_quiet_warning }, - { "resolveConflict", &advice_resolve_conflict }, - { "sequencerInUse", &advice_sequencer_in_use }, - { "implicitIdentity", &advice_implicit_identity }, - { "detachedHead", &advice_detached_head }, - { "setUpstreamFailure", &advice_set_upstream_failure }, - { "objectNameWarning", &advice_object_name_warning }, - { "amWorkDir", &advice_amworkdir }, - { "rmHints", &advice_rm_hints }, - { "addEmbeddedRepo", &advice_add_embedded_repo }, - { "ignoredHook", &advice_ignored_hook }, - { "waitingForEditor", &advice_waiting_for_editor }, - { "graftFileDeprecated", &advice_graft_file_deprecated }, - { "checkoutAmbiguousRemoteBranchName", &advice_checkout_ambiguous_remote_branch_name }, - { "submoduleAlternateErrorStrategyDie", &advice_submodule_alternate_error_strategy_die }, - { "addIgnoredFile", &advice_add_ignored_file }, - { "addEmptyPathspec", &advice_add_empty_pathspec }, - - /* make this an alias for backward compatibility */ - { "pushNonFastForward", &advice_push_update_rejected } -}; - -static struct { const char *key; int enabled; } advice_setting[] = { [ADVICE_ADD_EMBEDDED_REPO] = { "addEmbeddedRepo", 1 }, + [ADVICE_ADD_EMPTY_PATHSPEC] = { "addEmptyPathspec", 1 }, + [ADVICE_ADD_IGNORED_FILE] = { "addIgnoredFile", 1 }, [ADVICE_AM_WORK_DIR] = { "amWorkDir", 1 }, [ADVICE_CHECKOUT_AMBIGUOUS_REMOTE_BRANCH_NAME] = { "checkoutAmbiguousRemoteBranchName", 1 }, [ADVICE_COMMIT_BEFORE_MERGE] = { "commitBeforeMerge", 1 }, @@ -133,6 +65,7 @@ static struct { [ADVICE_RM_HINTS] = { "rmHints", 1 }, [ADVICE_SEQUENCER_IN_USE] = { "sequencerInUse", 1 }, [ADVICE_SET_UPSTREAM_FAILURE] = { "setUpstreamFailure", 1 }, + [ADVICE_SKIPPED_CHERRY_PICKS] = { "skippedCherryPicks", 1 }, [ADVICE_STATUS_AHEAD_BEHIND_WARNING] = { "statusAheadBehindWarning", 1 }, [ADVICE_STATUS_HINTS] = { "statusHints", 1 }, [ADVICE_STATUS_U_OPTION] = { "statusUoption", 1 }, @@ -221,13 +154,6 @@ int git_default_advice_config(const char *var, const char *value) if (!skip_prefix(var, "advice.", &k)) return 0; - for (i = 0; i < ARRAY_SIZE(advice_config); i++) { - if (strcasecmp(k, advice_config[i].name)) - continue; - *advice_config[i].preference = git_config_bool(var, value); - break; - } - for (i = 0; i < ARRAY_SIZE(advice_setting); i++) { if (strcasecmp(k, advice_setting[i].key)) continue; @@ -262,7 +188,7 @@ int error_resolve_conflict(const char *me) error(_("It is not possible to %s because you have unmerged files."), me); - if (advice_resolve_conflict) + if (advice_enabled(ADVICE_RESOLVE_CONFLICT)) /* * Message used both when 'git commit' fails and when * other commands doing a merge do. @@ -281,7 +207,7 @@ void NORETURN die_resolve_conflict(const char *me) void NORETURN die_conclude_merge(void) { error(_("You have not concluded your merge (MERGE_HEAD exists).")); - if (advice_resolve_conflict) + if (advice_enabled(ADVICE_RESOLVE_CONFLICT)) advise(_("Please, commit your changes before merging.")); die(_("Exiting because of unfinished merge.")); } @@ -5,37 +5,6 @@ struct string_list; -extern int advice_fetch_show_forced_updates; -extern int advice_push_update_rejected; -extern int advice_push_non_ff_current; -extern int advice_push_non_ff_matching; -extern int advice_push_already_exists; -extern int advice_push_fetch_first; -extern int advice_push_needs_force; -extern int advice_push_unqualified_ref_name; -extern int advice_push_ref_needs_update; -extern int advice_status_hints; -extern int advice_status_u_option; -extern int advice_status_ahead_behind_warning; -extern int advice_commit_before_merge; -extern int advice_reset_quiet_warning; -extern int advice_resolve_conflict; -extern int advice_sequencer_in_use; -extern int advice_implicit_identity; -extern int advice_detached_head; -extern int advice_set_upstream_failure; -extern int advice_object_name_warning; -extern int advice_amworkdir; -extern int advice_rm_hints; -extern int advice_add_embedded_repo; -extern int advice_ignored_hook; -extern int advice_waiting_for_editor; -extern int advice_graft_file_deprecated; -extern int advice_checkout_ambiguous_remote_branch_name; -extern int advice_submodule_alternate_error_strategy_die; -extern int advice_add_ignored_file; -extern int advice_add_empty_pathspec; - /* * To add a new advice, you need to: * Define a new advice_type. @@ -45,6 +14,8 @@ extern int advice_add_empty_pathspec; */ enum advice_type { ADVICE_ADD_EMBEDDED_REPO, + ADVICE_ADD_EMPTY_PATHSPEC, + ADVICE_ADD_IGNORED_FILE, ADVICE_AM_WORK_DIR, ADVICE_CHECKOUT_AMBIGUOUS_REMOTE_BRANCH_NAME, ADVICE_COMMIT_BEFORE_MERGE, @@ -75,6 +46,7 @@ extern int advice_add_empty_pathspec; ADVICE_SUBMODULE_ALTERNATE_ERROR_STRATEGY_DIE, ADVICE_UPDATE_SPARSE_PATH, ADVICE_WAITING_FOR_EDITOR, + ADVICE_SKIPPED_CHERRY_PICKS, }; int git_default_advice_config(const char *var, const char *value); @@ -3468,6 +3468,21 @@ static int load_preimage(struct apply_state *state, return 0; } +static int resolve_to(struct image *image, const struct object_id *result_id) +{ + unsigned long size; + enum object_type type; + + clear_image(image); + + image->buf = read_object_file(result_id, &type, &size); + if (!image->buf || type != OBJ_BLOB) + die("unable to read blob object %s", oid_to_hex(result_id)); + image->len = size; + + return 0; +} + static int three_way_merge(struct apply_state *state, struct image *image, char *path, @@ -3479,6 +3494,12 @@ static int three_way_merge(struct apply_state *state, mmbuffer_t result = { NULL }; int status; + /* resolve trivial cases first */ + if (oideq(base, ours)) + return resolve_to(image, theirs); + else if (oideq(base, theirs) || oideq(ours, theirs)) + return resolve_to(image, ours); + read_mmblob(&base_file, base); read_mmblob(&our_file, ours); read_mmblob(&their_file, theirs); @@ -191,7 +191,7 @@ static int write_archive_entry(const struct object_id *oid, const char *base, return err; } -static void queue_directory(const unsigned char *sha1, +static void queue_directory(const struct object_id *oid, struct strbuf *base, const char *filename, unsigned mode, struct archiver_context *c) { @@ -203,7 +203,7 @@ static void queue_directory(const unsigned char *sha1, d->mode = mode; c->bottom = d; d->len = xsnprintf(d->path, len, "%.*s%s/", (int)base->len, base->buf, filename); - oidread(&d->oid, sha1); + oidcpy(&d->oid, oid); } static int write_directory(struct archiver_context *c) @@ -250,8 +250,7 @@ static int queue_or_write_archive_entry(const struct object_id *oid, if (check_attr_export_ignore(check)) return 0; - queue_directory(oid->hash, base, filename, - mode, c); + queue_directory(oid, base, filename, mode, c); return READ_TREE_RECURSIVE; } @@ -14,6 +14,7 @@ #include "utf8.h" #include "quote.h" #include "thread-utils.h" +#include "dir.h" const char git_attr__true[] = "(builtin)true"; const char git_attr__false[] = "\0(builtin)false"; @@ -744,6 +745,20 @@ static struct attr_stack *read_attr_from_index(struct index_state *istate, if (!istate) return NULL; + /* + * The .gitattributes file only applies to files within its + * parent directory. In the case of cone-mode sparse-checkout, + * the .gitattributes file is sparse if and only if all paths + * within that directory are also sparse. Thus, don't load the + * .gitattributes file since it will not matter. + * + * In the case of a sparse index, it is critical that we don't go + * looking for a .gitattributes file, as doing so would cause the + * index to expand. + */ + if (!path_in_cone_mode_sparse_checkout(path, istate)) + return NULL; + buf = read_blob_data_from_index(istate, path, NULL); if (!buf) return NULL; @@ -271,7 +271,7 @@ void create_branch(struct repository *r, real_ref = NULL; if (get_oid_mb(start_name, &oid)) { if (explicit_tracking) { - if (advice_set_upstream_failure) { + if (advice_enabled(ADVICE_SET_UPSTREAM_FAILURE)) { error(_(upstream_missing), start_name); advise(_(upstream_advice)); exit(1); @@ -225,7 +225,6 @@ int cmd_submodule__helper(int argc, const char **argv, const char *prefix); int cmd_switch(int argc, const char **argv, const char *prefix); int cmd_symbolic_ref(int argc, const char **argv, const char *prefix); int cmd_tag(int argc, const char **argv, const char *prefix); -int cmd_tar_tree(int argc, const char **argv, const char *prefix); int cmd_unpack_file(int argc, const char **argv, const char *prefix); int cmd_unpack_objects(int argc, const char **argv, const char *prefix); int cmd_update_index(int argc, const char **argv, const char *prefix); diff --git a/builtin/add.c b/builtin/add.c index 17528e8f92..24da07578f 100644 --- a/builtin/add.c +++ b/builtin/add.c @@ -190,8 +190,6 @@ static int refresh(int verbose, const struct pathspec *pathspec) struct string_list only_match_skip_worktree = STRING_LIST_INIT_NODUP; int flags = REFRESH_IGNORE_SKIP_WORKTREE | (verbose ? REFRESH_IN_PORCELAIN : REFRESH_QUIET); - struct pattern_list pl = { 0 }; - int sparse_checkout_enabled = !get_sparse_checkout_patterns(&pl); seen = xcalloc(pathspec->nr, 1); refresh_index(&the_index, flags, pathspec, seen, @@ -199,12 +197,9 @@ static int refresh(int verbose, const struct pathspec *pathspec) for (i = 0; i < pathspec->nr; i++) { if (!seen[i]) { const char *path = pathspec->items[i].original; - int dtype = DT_REG; if (matches_skip_worktree(pathspec, i, &skip_worktree_seen) || - (sparse_checkout_enabled && - !path_matches_pattern_list(path, strlen(path), NULL, - &dtype, &pl, &the_index))) { + !path_in_sparse_checkout(path, &the_index)) { string_list_append(&only_match_skip_worktree, pathspec->items[i].original); } else { @@ -319,9 +314,7 @@ static int edit_patch(int argc, const char **argv, const char *prefix) rev.diffopt.output_format = DIFF_FORMAT_PATCH; rev.diffopt.use_color = 0; rev.diffopt.flags.ignore_dirty_submodules = 1; - out = open(file, O_CREAT | O_WRONLY | O_TRUNC, 0666); - if (out < 0) - die(_("Could not open '%s' for writing."), file); + out = xopen(file, O_CREAT | O_WRONLY | O_TRUNC, 0666); rev.diffopt.file = xfdopen(out, "w"); rev.diffopt.close_file = 1; if (run_diff_files(&rev, 0)) @@ -425,6 +418,7 @@ static const char embedded_advice[] = N_( static void check_embedded_repo(const char *path) { struct strbuf name = STRBUF_INIT; + static int adviced_on_embedded_repo = 0; if (!warn_on_embedded_repo) return; @@ -436,10 +430,10 @@ static void check_embedded_repo(const char *path) strbuf_strip_suffix(&name, "/"); warning(_("adding embedded git repository: %s"), name.buf); - if (advice_add_embedded_repo) { + if (!adviced_on_embedded_repo && + advice_enabled(ADVICE_ADD_EMBEDDED_REPO)) { advise(embedded_advice, name.buf, name.buf); - /* there may be multiple entries; advise only once */ - advice_add_embedded_repo = 0; + adviced_on_embedded_repo = 1; } strbuf_release(&name); @@ -453,7 +447,7 @@ static int add_files(struct dir_struct *dir, int flags) fprintf(stderr, _(ignore_error)); for (i = 0; i < dir->ignored_nr; i++) fprintf(stderr, "%s\n", dir->ignored[i]->name); - if (advice_add_ignored_file) + if (advice_enabled(ADVICE_ADD_IGNORED_FILE)) advise(_("Use -f if you really want to add them.\n" "Turn this message off by running\n" "\"git config advice.addIgnoredFile false\"")); @@ -562,7 +556,7 @@ int cmd_add(int argc, const char **argv, const char *prefix) if (require_pathspec && pathspec.nr == 0) { fprintf(stderr, _("Nothing specified, nothing added.\n")); - if (advice_add_empty_pathspec) + if (advice_enabled(ADVICE_ADD_EMPTY_PATHSPEC)) advise( _("Maybe you wanted to say 'git add .'?\n" "Turn this message off by running\n" "\"git config advice.addEmptyPathspec false\"")); diff --git a/builtin/am.c b/builtin/am.c index 0c2ad96b70..e4a0ff9cd7 100644 --- a/builtin/am.c +++ b/builtin/am.c @@ -1820,7 +1820,7 @@ static void am_run(struct am_state *state, int resume) printf_ln(_("Patch failed at %s %.*s"), msgnum(state), linelen(state->msg), state->msg); - if (advice_amworkdir) + if (advice_enabled(ADVICE_AM_WORK_DIR)) advise(_("Use 'git am --show-current-patch=diff' to see the failed patch")); die_user_resolve(state); @@ -1848,7 +1848,6 @@ next: */ if (!state->rebasing) { am_destroy(state); - close_object_store(the_repository->objects); run_auto_maintenance(state->quiet); } } @@ -2106,7 +2105,8 @@ static void am_abort(struct am_state *state) if (!has_orig_head) oidcpy(&orig_head, the_hash_algo->empty_tree); - clean_index(&curr_head, &orig_head); + if (clean_index(&curr_head, &orig_head)) + die(_("failed to clean index")); if (has_orig_head) update_ref("am --abort", "HEAD", &orig_head, diff --git a/builtin/archive.c b/builtin/archive.c index 45d11669aa..7176b041b6 100644 --- a/builtin/archive.c +++ b/builtin/archive.c @@ -12,9 +12,7 @@ static void create_output_file(const char *output_file) { - int output_fd = open(output_file, O_CREAT | O_WRONLY | O_TRUNC, 0666); - if (output_fd < 0) - die_errno(_("could not create archive file '%s'"), output_file); + int output_fd = xopen(output_file, O_CREAT | O_WRONLY | O_TRUNC, 0666); if (output_fd != 1) { if (dup2(output_fd, 1) < 0) die_errno(_("could not redirect output")); diff --git a/builtin/bisect--helper.c b/builtin/bisect--helper.c index f184eaeac6..bc210b23c8 100644 --- a/builtin/bisect--helper.c +++ b/builtin/bisect--helper.c @@ -18,10 +18,10 @@ static GIT_PATH_FUNC(git_path_bisect_log, "BISECT_LOG") static GIT_PATH_FUNC(git_path_head_name, "head-name") static GIT_PATH_FUNC(git_path_bisect_names, "BISECT_NAMES") static GIT_PATH_FUNC(git_path_bisect_first_parent, "BISECT_FIRST_PARENT") +static GIT_PATH_FUNC(git_path_bisect_run, "BISECT_RUN") static const char * const git_bisect_helper_usage[] = { N_("git bisect--helper --bisect-reset [<commit>]"), - N_("git bisect--helper --bisect-next-check <good_term> <bad_term> [<term>]"), N_("git bisect--helper --bisect-terms [--term-good | --term-old | --term-bad | --term-new]"), N_("git bisect--helper --bisect-start [--term-{new,bad}=<term> --term-{old,good}=<term>]" " [--no-checkout] [--first-parent] [<bad> [<good>...]] [--] [<paths>...]"), @@ -30,6 +30,8 @@ static const char * const git_bisect_helper_usage[] = { N_("git bisect--helper --bisect-state (good|old) [<rev>...]"), N_("git bisect--helper --bisect-replay <filename>"), N_("git bisect--helper --bisect-skip [(<rev>|<range>)...]"), + N_("git bisect--helper --bisect-visualize"), + N_("git bisect--helper --bisect-run <cmd>..."), NULL }; @@ -143,6 +145,19 @@ static int append_to_file(const char *path, const char *format, ...) return res; } +static int print_file_to_stdout(const char *path) +{ + int fd = open(path, O_RDONLY); + int ret = 0; + + if (fd < 0) + return error_errno(_("cannot open file '%s' for reading"), path); + if (copy_fd(fd, 1) < 0) + ret = error_errno(_("failed to read '%s'"), path); + close(fd); + return ret; +} + static int check_term_format(const char *term, const char *orig_term) { int res; @@ -1036,6 +1051,125 @@ static enum bisect_error bisect_skip(struct bisect_terms *terms, const char **ar return res; } +static int bisect_visualize(struct bisect_terms *terms, const char **argv, int argc) +{ + struct strvec args = STRVEC_INIT; + int flags = RUN_COMMAND_NO_STDIN, res = 0; + struct strbuf sb = STRBUF_INIT; + + if (bisect_next_check(terms, NULL) != 0) + return BISECT_FAILED; + + if (!argc) { + if ((getenv("DISPLAY") || getenv("SESSIONNAME") || getenv("MSYSTEM") || + getenv("SECURITYSESSIONID")) && exists_in_PATH("gitk")) { + strvec_push(&args, "gitk"); + } else { + strvec_push(&args, "log"); + flags |= RUN_GIT_CMD; + } + } else { + if (argv[0][0] == '-') { + strvec_push(&args, "log"); + flags |= RUN_GIT_CMD; + } else if (strcmp(argv[0], "tig") && !starts_with(argv[0], "git")) + flags |= RUN_GIT_CMD; + + strvec_pushv(&args, argv); + } + + strvec_pushl(&args, "--bisect", "--", NULL); + + strbuf_read_file(&sb, git_path_bisect_names(), 0); + sq_dequote_to_strvec(sb.buf, &args); + strbuf_release(&sb); + + res = run_command_v_opt(args.v, flags); + strvec_clear(&args); + return res; +} + +static int bisect_run(struct bisect_terms *terms, const char **argv, int argc) +{ + int res = BISECT_OK; + struct strbuf command = STRBUF_INIT; + struct strvec args = STRVEC_INIT; + struct strvec run_args = STRVEC_INIT; + const char *new_state; + int temporary_stdout_fd, saved_stdout; + + if (bisect_next_check(terms, NULL)) + return BISECT_FAILED; + + if (argc) + sq_quote_argv(&command, argv); + else { + error(_("bisect run failed: no command provided.")); + return BISECT_FAILED; + } + + strvec_push(&run_args, command.buf); + + while (1) { + strvec_clear(&args); + + printf(_("running %s\n"), command.buf); + res = run_command_v_opt(run_args.v, RUN_USING_SHELL); + + if (res < 0 || 128 <= res) { + error(_("bisect run failed: exit code %d from" + " '%s' is < 0 or >= 128"), res, command.buf); + strbuf_release(&command); + return res; + } + + if (res == 125) + new_state = "skip"; + else if (!res) + new_state = terms->term_good; + else + new_state = terms->term_bad; + + temporary_stdout_fd = open(git_path_bisect_run(), O_CREAT | O_WRONLY | O_TRUNC, 0666); + + if (temporary_stdout_fd < 0) + return error_errno(_("cannot open file '%s' for writing"), git_path_bisect_run()); + + fflush(stdout); + saved_stdout = dup(1); + dup2(temporary_stdout_fd, 1); + + res = bisect_state(terms, &new_state, 1); + + fflush(stdout); + dup2(saved_stdout, 1); + close(saved_stdout); + close(temporary_stdout_fd); + + print_file_to_stdout(git_path_bisect_run()); + + if (res == BISECT_ONLY_SKIPPED_LEFT) + error(_("bisect run cannot continue any more")); + else if (res == BISECT_INTERNAL_SUCCESS_MERGE_BASE) { + printf(_("bisect run success")); + res = BISECT_OK; + } else if (res == BISECT_INTERNAL_SUCCESS_1ST_BAD_FOUND) { + printf(_("bisect found first bad commit")); + res = BISECT_OK; + } else if (res) { + error(_("bisect run failed:'git bisect--helper --bisect-state" + " %s' exited with error code %d"), args.v[0], res); + } else { + continue; + } + + strbuf_release(&command); + strvec_clear(&args); + strvec_clear(&run_args); + return res; + } +} + int cmd_bisect__helper(int argc, const char **argv, const char *prefix) { enum { @@ -1048,7 +1182,9 @@ int cmd_bisect__helper(int argc, const char **argv, const char *prefix) BISECT_STATE, BISECT_LOG, BISECT_REPLAY, - BISECT_SKIP + BISECT_SKIP, + BISECT_VISUALIZE, + BISECT_RUN, } cmdmode = 0; int res = 0, nolog = 0; struct option options[] = { @@ -1070,6 +1206,10 @@ int cmd_bisect__helper(int argc, const char **argv, const char *prefix) N_("replay the bisection process from the given file"), BISECT_REPLAY), OPT_CMDMODE(0, "bisect-skip", &cmdmode, N_("skip some commits for checkout"), BISECT_SKIP), + OPT_CMDMODE(0, "bisect-visualize", &cmdmode, + N_("visualize the bisection"), BISECT_VISUALIZE), + OPT_CMDMODE(0, "bisect-run", &cmdmode, + N_("use <cmd>... to automatically bisect."), BISECT_RUN), OPT_BOOL(0, "no-log", &nolog, N_("no log for BISECT_WRITE")), OPT_END() @@ -1089,12 +1229,6 @@ int cmd_bisect__helper(int argc, const char **argv, const char *prefix) return error(_("--bisect-reset requires either no argument or a commit")); res = bisect_reset(argc ? argv[0] : NULL); break; - case BISECT_NEXT_CHECK: - if (argc != 2 && argc != 3) - return error(_("--bisect-next-check requires 2 or 3 arguments")); - set_terms(&terms, argv[1], argv[0]); - res = bisect_next_check(&terms, argc == 3 ? argv[2] : NULL); - break; case BISECT_TERMS: if (argc > 1) return error(_("--bisect-terms requires 0 or 1 argument")); @@ -1131,6 +1265,16 @@ int cmd_bisect__helper(int argc, const char **argv, const char *prefix) get_terms(&terms); res = bisect_skip(&terms, argv, argc); break; + case BISECT_VISUALIZE: + get_terms(&terms); + res = bisect_visualize(&terms, argv, argc); + break; + case BISECT_RUN: + if (!argc) + return error(_("bisect run failed: no command provided.")); + get_terms(&terms); + res = bisect_run(&terms, argv, argc); + break; default: BUG("unknown subcommand %d", cmdmode); } diff --git a/builtin/branch.c b/builtin/branch.c index b23b1d1752..03c7b7253a 100644 --- a/builtin/branch.c +++ b/builtin/branch.c @@ -168,7 +168,7 @@ static int check_branch_commit(const char *branchname, const char *refname, int kinds, int force) { struct commit *rev = lookup_commit_reference(the_repository, oid); - if (!rev) { + if (!force && !rev) { error(_("Couldn't look up commit object for '%s'"), refname); return -1; } diff --git a/builtin/bugreport.c b/builtin/bugreport.c index 9915a5841d..06ed10dc92 100644 --- a/builtin/bugreport.c +++ b/builtin/bugreport.c @@ -171,10 +171,7 @@ int cmd_bugreport(int argc, const char **argv, const char *prefix) get_populated_hooks(&buffer, !startup_info->have_repository); /* fopen doesn't offer us an O_EXCL alternative, except with glibc. */ - report = open(report_path.buf, O_CREAT | O_EXCL | O_WRONLY, 0666); - - if (report < 0) - die(_("couldn't create a new file at '%s'"), report_path.buf); + report = xopen(report_path.buf, O_CREAT | O_EXCL | O_WRONLY, 0666); if (write_in_full(report, buffer.buf, buffer.len) < 0) die_errno(_("unable to write to %s"), report_path.buf); diff --git a/builtin/bundle.c b/builtin/bundle.c index 053a51bea1..5a85d7cd0f 100644 --- a/builtin/bundle.c +++ b/builtin/bundle.c @@ -39,8 +39,6 @@ static const char * const builtin_bundle_unbundle_usage[] = { NULL }; -static int verbose; - static int parse_options_cmd_bundle(int argc, const char **argv, const char* prefix, @@ -162,10 +160,15 @@ static int cmd_bundle_unbundle(int argc, const char **argv, const char *prefix) struct bundle_header header = BUNDLE_HEADER_INIT; int bundle_fd = -1; int ret; + int progress = isatty(2); + struct option options[] = { + OPT_BOOL(0, "progress", &progress, + N_("show progress meter")), OPT_END() }; char *bundle_file; + struct strvec extra_index_pack_args = STRVEC_INIT; argc = parse_options_cmd_bundle(argc, argv, prefix, builtin_bundle_unbundle_usage, options, &bundle_file); @@ -177,7 +180,11 @@ static int cmd_bundle_unbundle(int argc, const char **argv, const char *prefix) } if (!startup_info->have_repository) die(_("Need a repository to unbundle.")); - ret = !!unbundle(the_repository, &header, bundle_fd, 0) || + if (progress) + strvec_pushl(&extra_index_pack_args, "-v", "--progress-title", + _("Unbundling objects"), NULL); + ret = !!unbundle(the_repository, &header, bundle_fd, + &extra_index_pack_args) || list_bundle_refs(&header, argc, argv); bundle_header_release(&header); cleanup: @@ -188,7 +195,6 @@ cleanup: int cmd_bundle(int argc, const char **argv, const char *prefix) { struct option options[] = { - OPT__VERBOSE(&verbose, N_("be verbose; must be placed before a subcommand")), OPT_END() }; int result; diff --git a/builtin/checkout.c b/builtin/checkout.c index b5d477919a..8c69dcdf72 100644 --- a/builtin/checkout.c +++ b/builtin/checkout.c @@ -404,7 +404,7 @@ static int checkout_worktree(const struct checkout_opts *opts, mem_pool_discard(&ce_mem_pool, should_validate_cache_entries()); remove_marked_cache_entries(&the_index, 1); remove_scheduled_dirs(); - errs |= finish_delayed_checkout(&state, &nr_checkouts); + errs |= finish_delayed_checkout(&state, &nr_checkouts, opts->show_progress); if (opts->count_checkout_paths) { if (nr_unmerged) @@ -918,7 +918,7 @@ static void update_refs_for_switch(const struct checkout_opts *opts, REF_NO_DEREF, UPDATE_REFS_DIE_ON_ERR); if (!opts->quiet) { if (old_branch_info->path && - advice_detached_head && !opts->force_detach) + advice_enabled(ADVICE_DETACHED_HEAD) && !opts->force_detach) detach_advice(new_branch_info->name); describe_detached_head(_("HEAD is now at"), new_branch_info->commit); } @@ -1011,7 +1011,7 @@ static void suggest_reattach(struct commit *commit, struct rev_info *revs) sb.buf); strbuf_release(&sb); - if (advice_detached_head) + if (advice_enabled(ADVICE_DETACHED_HEAD)) fprintf(stderr, Q_( /* The singular version */ @@ -1182,7 +1182,7 @@ static const char *parse_remote_branch(const char *arg, } if (!remote && num_matches > 1) { - if (advice_checkout_ambiguous_remote_branch_name) { + if (advice_enabled(ADVICE_CHECKOUT_AMBIGUOUS_REMOTE_BRANCH_NAME)) { advise(_("If you meant to check out a remote tracking branch on, e.g. 'origin',\n" "you can do so by fully qualifying the name with the --track option:\n" "\n" diff --git a/builtin/clone.c b/builtin/clone.c index 66fe66679c..c9d4ca2664 100644 --- a/builtin/clone.c +++ b/builtin/clone.c @@ -217,120 +217,6 @@ static char *get_repo_path(const char *repo, int *is_bundle) return canon; } -static char *guess_dir_name(const char *repo, int is_bundle, int is_bare) -{ - const char *end = repo + strlen(repo), *start, *ptr; - size_t len; - char *dir; - - /* - * Skip scheme. - */ - start = strstr(repo, "://"); - if (start == NULL) - start = repo; - else - start += 3; - - /* - * Skip authentication data. The stripping does happen - * greedily, such that we strip up to the last '@' inside - * the host part. - */ - for (ptr = start; ptr < end && !is_dir_sep(*ptr); ptr++) { - if (*ptr == '@') - start = ptr + 1; - } - - /* - * Strip trailing spaces, slashes and /.git - */ - while (start < end && (is_dir_sep(end[-1]) || isspace(end[-1]))) - end--; - if (end - start > 5 && is_dir_sep(end[-5]) && - !strncmp(end - 4, ".git", 4)) { - end -= 5; - while (start < end && is_dir_sep(end[-1])) - end--; - } - - /* - * Strip trailing port number if we've got only a - * hostname (that is, there is no dir separator but a - * colon). This check is required such that we do not - * strip URI's like '/foo/bar:2222.git', which should - * result in a dir '2222' being guessed due to backwards - * compatibility. - */ - if (memchr(start, '/', end - start) == NULL - && memchr(start, ':', end - start) != NULL) { - ptr = end; - while (start < ptr && isdigit(ptr[-1]) && ptr[-1] != ':') - ptr--; - if (start < ptr && ptr[-1] == ':') - end = ptr - 1; - } - - /* - * Find last component. To remain backwards compatible we - * also regard colons as path separators, such that - * cloning a repository 'foo:bar.git' would result in a - * directory 'bar' being guessed. - */ - ptr = end; - while (start < ptr && !is_dir_sep(ptr[-1]) && ptr[-1] != ':') - ptr--; - start = ptr; - - /* - * Strip .{bundle,git}. - */ - len = end - start; - strip_suffix_mem(start, &len, is_bundle ? ".bundle" : ".git"); - - if (!len || (len == 1 && *start == '/')) - die(_("No directory name could be guessed.\n" - "Please specify a directory on the command line")); - - if (is_bare) - dir = xstrfmt("%.*s.git", (int)len, start); - else - dir = xstrndup(start, len); - /* - * Replace sequences of 'control' characters and whitespace - * with one ascii space, remove leading and trailing spaces. - */ - if (*dir) { - char *out = dir; - int prev_space = 1 /* strip leading whitespace */; - for (end = dir; *end; ++end) { - char ch = *end; - if ((unsigned char)ch < '\x20') - ch = '\x20'; - if (isspace(ch)) { - if (prev_space) - continue; - prev_space = 1; - } else - prev_space = 0; - *out++ = ch; - } - *out = '\0'; - if (out > dir && prev_space) - out[-1] = '\0'; - } - return dir; -} - -static void strip_trailing_slashes(char *dir) -{ - char *end = dir + strlen(dir); - - while (dir < end - 1 && is_dir_sep(end[-1])) - end--; - *end = '\0'; -} - static int add_one_reference(struct string_list_item *item, void *cb_data) { struct strbuf err = STRBUF_INIT; @@ -657,7 +543,7 @@ static void write_followtags(const struct ref *refs, const char *msg) } } -static int iterate_ref_map(void *cb_data, struct object_id *oid) +static const struct object_id *iterate_ref_map(void *cb_data) { struct ref **rm = cb_data; struct ref *ref = *rm; @@ -668,13 +554,11 @@ static int iterate_ref_map(void *cb_data, struct object_id *oid) */ while (ref && !ref->peer_ref) ref = ref->next; - /* Returning -1 notes "end of list" to the caller. */ if (!ref) - return -1; + return NULL; - oidcpy(oid, &ref->old_oid); *rm = ref->next; - return 0; + return &ref->old_oid; } static void update_remote_refs(const struct ref *refs, @@ -786,7 +670,7 @@ static int checkout(int submodule_progress) return 0; } if (!strcmp(head, "HEAD")) { - if (advice_detached_head) + if (advice_enabled(ADVICE_DETACHED_HEAD)) detach_advice(oid_to_hex(&oid)); FREE_AND_NULL(head); } else { @@ -1041,8 +925,8 @@ int cmd_clone(int argc, const char **argv, const char *prefix) if (argc == 2) dir = xstrdup(argv[1]); else - dir = guess_dir_name(repo_name, is_bundle, option_bare); - strip_trailing_slashes(dir); + dir = git_url_basename(repo_name, is_bundle, option_bare); + strip_dir_trailing_slashes(dir); dest_exists = path_exists(dir); if (dest_exists && !is_empty_dir(dir)) @@ -1114,6 +998,7 @@ int cmd_clone(int argc, const char **argv, const char *prefix) if (option_recurse_submodules.nr > 0) { struct string_list_item *item; struct strbuf sb = STRBUF_INIT; + int val; /* remove duplicates */ string_list_sort(&option_recurse_submodules); @@ -1130,6 +1015,10 @@ int cmd_clone(int argc, const char **argv, const char *prefix) strbuf_detach(&sb, NULL)); } + if (!git_config_get_bool("submodule.stickyRecursiveClone", &val) && + val) + string_list_append(&option_config, "submodule.recurse=true"); + if (option_required_reference.nr && option_optional_reference.nr) die(_("clone --recursive is not compatible with " @@ -1340,6 +1229,9 @@ int cmd_clone(int argc, const char **argv, const char *prefix) our_head_points_at = remote_head_points_at; } else { + const char *branch; + char *ref; + if (option_branch) die(_("Remote branch %s not found in upstream %s"), option_branch, remote_name); @@ -1350,24 +1242,22 @@ int cmd_clone(int argc, const char **argv, const char *prefix) remote_head_points_at = NULL; remote_head = NULL; option_no_checkout = 1; - if (!option_bare) { - const char *branch; - char *ref; - - if (transport_ls_refs_options.unborn_head_target && - skip_prefix(transport_ls_refs_options.unborn_head_target, - "refs/heads/", &branch)) { - ref = transport_ls_refs_options.unborn_head_target; - transport_ls_refs_options.unborn_head_target = NULL; - create_symref("HEAD", ref, reflog_msg.buf); - } else { - branch = git_default_branch_name(0); - ref = xstrfmt("refs/heads/%s", branch); - } - install_branch_config(0, branch, remote_name, ref); - free(ref); + if (transport_ls_refs_options.unborn_head_target && + skip_prefix(transport_ls_refs_options.unborn_head_target, + "refs/heads/", &branch)) { + ref = transport_ls_refs_options.unborn_head_target; + transport_ls_refs_options.unborn_head_target = NULL; + create_symref("HEAD", ref, reflog_msg.buf); + } else { + branch = git_default_branch_name(0); + ref = xstrfmt("refs/heads/%s", branch); } + + if (!option_bare) + install_branch_config(0, branch, remote_name, ref); + + free(ref); } write_refspec_config(src_ref_prefix, our_head_points_at, diff --git a/builtin/column.c b/builtin/column.c index 40d4b3bee2..158fdf53d9 100644 --- a/builtin/column.c +++ b/builtin/column.c @@ -29,7 +29,7 @@ int cmd_column(int argc, const char **argv, const char *prefix) OPT_INTEGER(0, "raw-mode", &colopts, N_("layout to use")), OPT_INTEGER(0, "width", &copts.width, N_("maximum width")), OPT_STRING(0, "indent", &copts.indent, N_("string"), N_("padding space on left border")), - OPT_INTEGER(0, "nl", &copts.nl, N_("padding space on right border")), + OPT_STRING(0, "nl", &copts.nl, N_("string"), N_("padding space on right border")), OPT_INTEGER(0, "padding", &copts.padding, N_("padding space between columns")), OPT_END() }; diff --git a/builtin/commit-graph.c b/builtin/commit-graph.c index cd86315221..3c3de3a156 100644 --- a/builtin/commit-graph.c +++ b/builtin/commit-graph.c @@ -9,26 +9,29 @@ #include "progress.h" #include "tag.h" -static char const * const builtin_commit_graph_usage[] = { - N_("git commit-graph verify [--object-dir <objdir>] [--shallow] [--[no-]progress]"), - N_("git commit-graph write [--object-dir <objdir>] [--append] " - "[--split[=<strategy>]] [--reachable|--stdin-packs|--stdin-commits] " - "[--changed-paths] [--[no-]max-new-filters <n>] [--[no-]progress] " - "<split options>"), +#define BUILTIN_COMMIT_GRAPH_VERIFY_USAGE \ + N_("git commit-graph verify [--object-dir <objdir>] [--shallow] [--[no-]progress]") + +#define BUILTIN_COMMIT_GRAPH_WRITE_USAGE \ + N_("git commit-graph write [--object-dir <objdir>] [--append] " \ + "[--split[=<strategy>]] [--reachable|--stdin-packs|--stdin-commits] " \ + "[--changed-paths] [--[no-]max-new-filters <n>] [--[no-]progress] " \ + "<split options>") + +static const char * builtin_commit_graph_verify_usage[] = { + BUILTIN_COMMIT_GRAPH_VERIFY_USAGE, NULL }; -static const char * const builtin_commit_graph_verify_usage[] = { - N_("git commit-graph verify [--object-dir <objdir>] [--shallow] [--[no-]progress]"), +static const char * builtin_commit_graph_write_usage[] = { + BUILTIN_COMMIT_GRAPH_WRITE_USAGE, NULL }; -static const char * const builtin_commit_graph_write_usage[] = { - N_("git commit-graph write [--object-dir <objdir>] [--append] " - "[--split[=<strategy>]] [--reachable|--stdin-packs|--stdin-commits] " - "[--changed-paths] [--[no-]max-new-filters <n>] [--[no-]progress] " - "<split options>"), - NULL +static char const * const builtin_commit_graph_usage[] = { + BUILTIN_COMMIT_GRAPH_VERIFY_USAGE, + BUILTIN_COMMIT_GRAPH_WRITE_USAGE, + NULL, }; static struct opts_commit_graph { @@ -43,26 +46,16 @@ static struct opts_commit_graph { int enable_changed_paths; } opts; -static struct object_directory *find_odb(struct repository *r, - const char *obj_dir) -{ - struct object_directory *odb; - char *obj_dir_real = real_pathdup(obj_dir, 1); - struct strbuf odb_path_real = STRBUF_INIT; - - prepare_alt_odb(r); - for (odb = r->objects->odb; odb; odb = odb->next) { - strbuf_realpath(&odb_path_real, odb->path, 1); - if (!strcmp(obj_dir_real, odb_path_real.buf)) - break; - } - - free(obj_dir_real); - strbuf_release(&odb_path_real); +static struct option common_opts[] = { + OPT_STRING(0, "object-dir", &opts.obj_dir, + N_("dir"), + N_("the object directory to store the graph")), + OPT_END() +}; - if (!odb) - die(_("could not find object directory matching %s"), obj_dir); - return odb; +static struct option *add_common_options(struct option *to) +{ + return parse_options_concat(common_opts, to); } static int graph_verify(int argc, const char **argv) @@ -76,21 +69,22 @@ static int graph_verify(int argc, const char **argv) int flags = 0; static struct option builtin_commit_graph_verify_options[] = { - OPT_STRING(0, "object-dir", &opts.obj_dir, - N_("dir"), - N_("the object directory to store the graph")), OPT_BOOL(0, "shallow", &opts.shallow, N_("if the commit-graph is split, only verify the tip file")), - OPT_BOOL(0, "progress", &opts.progress, N_("force progress reporting")), + OPT_BOOL(0, "progress", &opts.progress, + N_("force progress reporting")), OPT_END(), }; + struct option *options = add_common_options(builtin_commit_graph_verify_options); trace2_cmd_mode("verify"); opts.progress = isatty(2); argc = parse_options(argc, argv, NULL, - builtin_commit_graph_verify_options, + options, builtin_commit_graph_verify_usage, 0); + if (argc) + usage_with_options(builtin_commit_graph_verify_usage, options); if (!opts.obj_dir) opts.obj_dir = get_object_directory(); @@ -106,6 +100,7 @@ static int graph_verify(int argc, const char **argv) die_errno(_("Could not open commit-graph '%s'"), graph_name); FREE_AND_NULL(graph_name); + FREE_AND_NULL(options); if (open_ok) graph = load_commit_graph_one_fd_st(the_repository, fd, &st, odb); @@ -206,9 +201,6 @@ static int graph_write(int argc, const char **argv) struct progress *progress = NULL; static struct option builtin_commit_graph_write_options[] = { - OPT_STRING(0, "object-dir", &opts.obj_dir, - N_("dir"), - N_("the object directory to store the graph")), OPT_BOOL(0, "reachable", &opts.reachable, N_("start walk at all refs")), OPT_BOOL(0, "stdin-packs", &opts.stdin_packs, @@ -219,7 +211,6 @@ static int graph_write(int argc, const char **argv) N_("include all commits already in the commit-graph file")), OPT_BOOL(0, "changed-paths", &opts.enable_changed_paths, N_("enable computation for changed paths")), - OPT_BOOL(0, "progress", &opts.progress, N_("force progress reporting")), OPT_CALLBACK_F(0, "split", &write_opts.split_flags, NULL, N_("allow writing an incremental commit-graph file"), PARSE_OPT_OPTARG | PARSE_OPT_NONEG, @@ -233,8 +224,11 @@ static int graph_write(int argc, const char **argv) OPT_CALLBACK_F(0, "max-new-filters", &write_opts.max_new_filters, NULL, N_("maximum number of changed-path Bloom filters to compute"), 0, write_option_max_new_filters), + OPT_BOOL(0, "progress", &opts.progress, + N_("force progress reporting")), OPT_END(), }; + struct option *options = add_common_options(builtin_commit_graph_write_options); opts.progress = isatty(2); opts.enable_changed_paths = -1; @@ -248,8 +242,10 @@ static int graph_write(int argc, const char **argv) git_config(git_commit_graph_write_config, &opts); argc = parse_options(argc, argv, NULL, - builtin_commit_graph_write_options, + options, builtin_commit_graph_write_usage, 0); + if (argc) + usage_with_options(builtin_commit_graph_write_usage, options); if (opts.reachable + opts.stdin_packs + opts.stdin_commits > 1) die(_("use at most one of --reachable, --stdin-commits, or --stdin-packs")); @@ -304,6 +300,7 @@ static int graph_write(int argc, const char **argv) result = 1; cleanup: + FREE_AND_NULL(options); string_list_clear(&pack_indexes, 0); strbuf_release(&buf); return result; @@ -311,32 +308,25 @@ cleanup: int cmd_commit_graph(int argc, const char **argv, const char *prefix) { - static struct option builtin_commit_graph_options[] = { - OPT_STRING(0, "object-dir", &opts.obj_dir, - N_("dir"), - N_("the object directory to store the graph")), - OPT_END(), - }; - - if (argc == 2 && !strcmp(argv[1], "-h")) - usage_with_options(builtin_commit_graph_usage, - builtin_commit_graph_options); + struct option *builtin_commit_graph_options = common_opts; git_config(git_default_config, NULL); argc = parse_options(argc, argv, prefix, builtin_commit_graph_options, builtin_commit_graph_usage, PARSE_OPT_STOP_AT_NON_OPTION); + if (!argc) + goto usage; save_commit_buffer = 0; - if (argc > 0) { - if (!strcmp(argv[0], "verify")) - return graph_verify(argc, argv); - if (!strcmp(argv[0], "write")) - return graph_write(argc, argv); - } + if (!strcmp(argv[0], "verify")) + return graph_verify(argc, argv); + else if (argc && !strcmp(argv[0], "write")) + return graph_write(argc, argv); + error(_("unrecognized subcommand: %s"), argv[0]); +usage: usage_with_options(builtin_commit_graph_usage, builtin_commit_graph_options); } diff --git a/builtin/commit-tree.c b/builtin/commit-tree.c index 1031b9a491..63ea322933 100644 --- a/builtin/commit-tree.c +++ b/builtin/commit-tree.c @@ -88,9 +88,7 @@ static int parse_file_arg_callback(const struct option *opt, if (!strcmp(arg, "-")) fd = 0; else { - fd = open(arg, O_RDONLY); - if (fd < 0) - die_errno(_("git commit-tree: failed to open '%s'"), arg); + fd = xopen(arg, O_RDONLY); } if (strbuf_read(buf, fd, 0) < 0) die_errno(_("git commit-tree: failed to read '%s'"), arg); diff --git a/builtin/commit.c b/builtin/commit.c index 7c9b1e7be3..e7320f66f9 100644 --- a/builtin/commit.c +++ b/builtin/commit.c @@ -203,7 +203,7 @@ static void status_init_config(struct wt_status *s, config_fn_t fn) init_diff_ui_defaults(); git_config(fn, s); determine_whence(s); - s->hints = advice_status_hints; /* must come after git_config() */ + s->hints = advice_enabled(ADVICE_STATUS_HINTS); /* must come after git_config() */ } static void rollback_index_files(void) @@ -1033,7 +1033,7 @@ static int prepare_to_commit(const char *index_file, const char *prefix, */ if (!committable && whence != FROM_MERGE && !allow_empty && !(amend && is_a_merge(current_head))) { - s->hints = advice_status_hints; + s->hints = advice_enabled(ADVICE_STATUS_HINTS); s->display_comment_prefix = old_display_comment_prefix; run_status(stdout, index_file, prefix, 0, s); if (amend) diff --git a/builtin/credential-cache.c b/builtin/credential-cache.c index e8a7415747..78c02ad531 100644 --- a/builtin/credential-cache.c +++ b/builtin/credential-cache.c @@ -11,6 +11,32 @@ #define FLAG_SPAWN 0x1 #define FLAG_RELAY 0x2 +#ifdef GIT_WINDOWS_NATIVE + +static int connection_closed(int error) +{ + return (error == EINVAL); +} + +static int connection_fatally_broken(int error) +{ + return (error != ENOENT) && (error != ENETDOWN); +} + +#else + +static int connection_closed(int error) +{ + return (error == ECONNRESET); +} + +static int connection_fatally_broken(int error) +{ + return (error != ENOENT) && (error != ECONNREFUSED); +} + +#endif + static int send_request(const char *socket, const struct strbuf *out) { int got_data = 0; @@ -28,7 +54,7 @@ static int send_request(const char *socket, const struct strbuf *out) int r; r = read_in_full(fd, in, sizeof(in)); - if (r == 0 || (r < 0 && errno == ECONNRESET)) + if (r == 0 || (r < 0 && connection_closed(errno))) break; if (r < 0) die_errno("read error from cache daemon"); @@ -75,7 +101,7 @@ static void do_cache(const char *socket, const char *action, int timeout, } if (send_request(socket, &buf) < 0) { - if (errno != ENOENT && errno != ECONNREFUSED) + if (connection_fatally_broken(errno)) die_errno("unable to connect to cache daemon"); if (flags & FLAG_SPAWN) { spawn_daemon(socket); diff --git a/builtin/diff-index.c b/builtin/diff-index.c index cf09559e42..5fd23ab5b6 100644 --- a/builtin/diff-index.c +++ b/builtin/diff-index.c @@ -29,10 +29,10 @@ int cmd_diff_index(int argc, const char **argv, const char *prefix) prefix = precompose_argv_prefix(argc, argv, prefix); /* - * We need no diff for merges options, and we need to avoid conflict - * with our own meaning of "-m". + * We need (some of) diff for merges options (e.g., --cc), and we need + * to avoid conflict with our own meaning of "-m". */ - diff_merges_suppress_options_parsing(); + diff_merges_suppress_m_parsing(); argc = setup_revisions(argc, argv, &rev, NULL); for (i = 1; i < argc; i++) { diff --git a/builtin/difftool.c b/builtin/difftool.c index 6a9242a803..210da03908 100644 --- a/builtin/difftool.c +++ b/builtin/difftool.c @@ -331,7 +331,7 @@ static int checkout_path(unsigned mode, struct object_id *oid, } static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix, - int argc, const char **argv) + struct child_process *child) { char tmpdir[PATH_MAX]; struct strbuf info = STRBUF_INIT, lpath = STRBUF_INIT; @@ -352,7 +352,6 @@ static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix, struct index_state wtindex; struct checkout lstate, rstate; int rc, flags = RUN_GIT_CMD, err = 0; - struct child_process child = CHILD_PROCESS_INIT; const char *helper_argv[] = { "difftool--helper", NULL, NULL, NULL }; struct hashmap wt_modified, tmp_modified; int indices_loaded = 0; @@ -387,19 +386,15 @@ static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix, rdir_len = rdir.len; wtdir_len = wtdir.len; - child.no_stdin = 1; - child.git_cmd = 1; - child.use_shell = 0; - child.clean_on_exit = 1; - child.dir = prefix; - child.out = -1; - strvec_pushl(&child.args, "diff", "--raw", "--no-abbrev", "-z", - NULL); - for (i = 0; i < argc; i++) - strvec_push(&child.args, argv[i]); - if (start_command(&child)) + child->no_stdin = 1; + child->git_cmd = 1; + child->use_shell = 0; + child->clean_on_exit = 1; + child->dir = prefix; + child->out = -1; + if (start_command(child)) die("could not obtain raw diff"); - fp = xfdopen(child.out, "r"); + fp = xfdopen(child->out, "r"); /* Build index info for left and right sides of the diff */ i = 0; @@ -410,9 +405,9 @@ static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix, const char *src_path, *dst_path; if (starts_with(info.buf, "::")) - die(N_("combined diff formats('-c' and '--cc') are " + die(N_("combined diff formats ('-c' and '--cc') are " "not supported in\n" - "directory diff mode('-d' and '--dir-diff').")); + "directory diff mode ('-d' and '--dir-diff').")); if (parse_index_info(info.buf, &lmode, &rmode, &loid, &roid, &status)) @@ -525,7 +520,7 @@ static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix, fclose(fp); fp = NULL; - if (finish_command(&child)) { + if (finish_command(child)) { ret = error("error occurred running diff --raw"); goto finish; } @@ -562,11 +557,13 @@ static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix, if (*entry->left) { add_path(&ldir, ldir_len, entry->path); ensure_leading_directories(ldir.buf); + unlink(ldir.buf); write_file(ldir.buf, "%s", entry->left); } if (*entry->right) { add_path(&rdir, rdir_len, entry->path); ensure_leading_directories(rdir.buf); + unlink(rdir.buf); write_file(rdir.buf, "%s", entry->right); } } @@ -668,25 +665,23 @@ finish: } static int run_file_diff(int prompt, const char *prefix, - int argc, const char **argv) + struct child_process *child) { - struct strvec args = STRVEC_INIT; const char *env[] = { "GIT_PAGER=", "GIT_EXTERNAL_DIFF=git-difftool--helper", NULL, NULL }; - int i; if (prompt > 0) env[2] = "GIT_DIFFTOOL_PROMPT=true"; else if (!prompt) env[2] = "GIT_DIFFTOOL_NO_PROMPT=true"; + child->git_cmd = 1; + child->dir = prefix; + strvec_pushv(&child->env_array, env); - strvec_push(&args, "diff"); - for (i = 0; i < argc; i++) - strvec_push(&args, argv[i]); - return run_command_v_opt_cd_env(args.v, RUN_GIT_CMD, prefix, env); + return run_command(child); } int cmd_difftool(int argc, const char **argv, const char *prefix) @@ -713,12 +708,13 @@ int cmd_difftool(int argc, const char **argv, const char *prefix) "`--tool`")), OPT_BOOL(0, "trust-exit-code", &trust_exit_code, N_("make 'git-difftool' exit when an invoked diff " - "tool returns a non - zero exit code")), + "tool returns a non-zero exit code")), OPT_STRING('x', "extcmd", &extcmd, N_("command"), N_("specify a custom command for viewing diffs")), - OPT_ARGUMENT("no-index", &no_index, N_("passed to `diff`")), + OPT_BOOL(0, "no-index", &no_index, N_("passed to `diff`")), OPT_END() }; + struct child_process child = CHILD_PROCESS_INIT; git_config(difftool_config, NULL); symlinks = has_symlinks; @@ -768,7 +764,14 @@ int cmd_difftool(int argc, const char **argv, const char *prefix) * will invoke a separate instance of 'git-difftool--helper' for * each file that changed. */ + strvec_push(&child.args, "diff"); + if (no_index) + strvec_push(&child.args, "--no-index"); + if (dir_diff) + strvec_pushl(&child.args, "--raw", "--no-abbrev", "-z", NULL); + strvec_pushv(&child.args, argv); + if (dir_diff) - return run_dir_diff(extcmd, symlinks, prefix, argc, argv); - return run_file_diff(prompt, prefix, argc, argv); + return run_dir_diff(extcmd, symlinks, prefix, &child); + return run_file_diff(prompt, prefix, &child); } diff --git a/builtin/fast-export.c b/builtin/fast-export.c index 3c20f164f0..95e8e89e81 100644 --- a/builtin/fast-export.c +++ b/builtin/fast-export.c @@ -821,6 +821,7 @@ static void handle_tag(const char *name, struct tag *tag) static struct hashmap tags; message = anonymize_str(&tags, anonymize_tag, message, message_size, NULL); + message_size = strlen(message); } } diff --git a/builtin/fetch.c b/builtin/fetch.c index e064687dbd..f7abbc31ff 100644 --- a/builtin/fetch.c +++ b/builtin/fetch.c @@ -712,7 +712,7 @@ static void adjust_refcol_width(const struct ref *ref) int max, rlen, llen, len; /* uptodate lines are only shown on high verbosity level */ - if (!verbosity && oideq(&ref->peer_ref->old_oid, &ref->old_oid)) + if (verbosity <= 0 && oideq(&ref->peer_ref->old_oid, &ref->old_oid)) return; max = term_columns(); @@ -748,6 +748,9 @@ static void prepare_format_display(struct ref *ref_map) struct ref *rm; const char *format = "full"; + if (verbosity < 0) + return; + git_config_get_string_tmp("fetch.output", &format); if (!strcasecmp(format, "full")) compact_format = 0; @@ -827,7 +830,12 @@ static void format_display(struct strbuf *display, char code, const char *remote, const char *local, int summary_width) { - int width = (summary_width + strlen(summary) - gettext_width(summary)); + int width; + + if (verbosity < 0) + return; + + width = (summary_width + strlen(summary) - gettext_width(summary)); strbuf_addf(display, "%c %-*s ", code, width, summary); if (!compact_format) @@ -846,13 +854,11 @@ static int update_local_ref(struct ref *ref, int summary_width) { struct commit *current = NULL, *updated; - enum object_type type; struct branch *current_branch = branch_get(NULL); const char *pretty_ref = prettify_refname(ref->name); int fast_forward = 0; - type = oid_object_info(the_repository, &ref->new_oid, NULL); - if (type < 0) + if (!repo_has_object_file(the_repository, &ref->new_oid)) die(_("object %s not found"), oid_to_hex(&ref->new_oid)); if (oideq(&ref->old_oid, &ref->new_oid)) { @@ -964,7 +970,7 @@ static int update_local_ref(struct ref *ref, } } -static int iterate_ref_map(void *cb_data, struct object_id *oid) +static const struct object_id *iterate_ref_map(void *cb_data) { struct ref **rm = cb_data; struct ref *ref = *rm; @@ -972,10 +978,9 @@ static int iterate_ref_map(void *cb_data, struct object_id *oid) while (ref && ref->status == REF_STATUS_REJECT_SHALLOW) ref = ref->next; if (!ref) - return -1; /* end of the list */ + return NULL; *rm = ref->next; - oidcpy(oid, &ref->old_oid); - return 0; + return &ref->old_oid; } struct fetch_head { @@ -1074,7 +1079,6 @@ static int store_updated_refs(const char *raw_url, const char *remote_name, int connectivity_checked, struct ref *ref_map) { struct fetch_head fetch_head; - struct commit *commit; int url_len, i, rc = 0; struct strbuf note = STRBUF_INIT, err = STRBUF_INIT; struct ref_transaction *transaction = NULL; @@ -1122,6 +1126,7 @@ static int store_updated_refs(const char *raw_url, const char *remote_name, want_status <= FETCH_HEAD_IGNORE; want_status++) { for (rm = ref_map; rm; rm = rm->next) { + struct commit *commit = NULL; struct ref *ref = NULL; if (rm->status == REF_STATUS_REJECT_SHALLOW) { @@ -1131,11 +1136,23 @@ static int store_updated_refs(const char *raw_url, const char *remote_name, continue; } - commit = lookup_commit_reference_gently(the_repository, - &rm->old_oid, - 1); - if (!commit) - rm->fetch_head_status = FETCH_HEAD_NOT_FOR_MERGE; + /* + * References in "refs/tags/" are often going to point + * to annotated tags, which are not part of the + * commit-graph. We thus only try to look up refs in + * the graph which are not in that namespace to not + * regress performance in repositories with many + * annotated tags. + */ + if (!starts_with(rm->name, "refs/tags/")) + commit = lookup_commit_in_graph(the_repository, &rm->old_oid); + if (!commit) { + commit = lookup_commit_reference_gently(the_repository, + &rm->old_oid, + 1); + if (!commit) + rm->fetch_head_status = FETCH_HEAD_NOT_FOR_MERGE; + } if (rm->fetch_head_status != want_status) continue; @@ -1202,13 +1219,12 @@ static int store_updated_refs(const char *raw_url, const char *remote_name, "FETCH_HEAD", summary_width); } if (note.len) { - if (verbosity >= 0 && !shown_url) { + if (!shown_url) { fprintf(stderr, _("From %.*s\n"), url_len, url); shown_url = 1; } - if (verbosity >= 0) - fprintf(stderr, " %s\n", note.buf); + fprintf(stderr, " %s\n", note.buf); } } } @@ -1229,7 +1245,7 @@ static int store_updated_refs(const char *raw_url, const char *remote_name, " 'git remote prune %s' to remove any old, conflicting " "branches"), remote_name); - if (advice_fetch_show_forced_updates) { + if (advice_enabled(ADVICE_FETCH_SHOW_FORCED_UPDATES)) { if (!fetch_show_forced_updates) { warning(_(warn_show_forced_updates)); } else if (forced_updates_ms > FORCED_UPDATES_DELAY_WARNING_IN_MS) { @@ -1282,37 +1298,35 @@ static int check_exist_and_connected(struct ref *ref_map) return check_connected(iterate_ref_map, &rm, &opt); } -static int fetch_refs(struct transport *transport, struct ref *ref_map) +static int fetch_and_consume_refs(struct transport *transport, struct ref *ref_map) { - int ret = check_exist_and_connected(ref_map); + int connectivity_checked = 1; + int ret; + + /* + * We don't need to perform a fetch in case we can already satisfy all + * refs. + */ + ret = check_exist_and_connected(ref_map); if (ret) { trace2_region_enter("fetch", "fetch_refs", the_repository); ret = transport_fetch_refs(transport, ref_map); trace2_region_leave("fetch", "fetch_refs", the_repository); + if (ret) + goto out; + connectivity_checked = transport->smart_options ? + transport->smart_options->connectivity_checked : 0; } - if (!ret) - /* - * Keep the new pack's ".keep" file around to allow the caller - * time to update refs to reference the new objects. - */ - return 0; - transport_unlock_pack(transport); - return ret; -} -/* Update local refs based on the ref values fetched from a remote */ -static int consume_refs(struct transport *transport, struct ref *ref_map) -{ - int connectivity_checked = transport->smart_options - ? transport->smart_options->connectivity_checked : 0; - int ret; trace2_region_enter("fetch", "consume_refs", the_repository); ret = store_updated_refs(transport->url, transport->remote->name, connectivity_checked, ref_map); - transport_unlock_pack(transport); trace2_region_leave("fetch", "consume_refs", the_repository); + +out: + transport_unlock_pack(transport); return ret; } @@ -1501,8 +1515,7 @@ static void backfill_tags(struct transport *transport, struct ref *ref_map) transport_set_option(transport, TRANS_OPT_FOLLOWTAGS, NULL); transport_set_option(transport, TRANS_OPT_DEPTH, "0"); transport_set_option(transport, TRANS_OPT_DEEPEN_RELATIVE, NULL); - if (!fetch_refs(transport, ref_map)) - consume_refs(transport, ref_map); + fetch_and_consume_refs(transport, ref_map); if (gsecondary) { transport_disconnect(gsecondary); @@ -1593,7 +1606,7 @@ static int do_fetch(struct transport *transport, transport->url); } } - if (fetch_refs(transport, ref_map) || consume_refs(transport, ref_map)) { + if (fetch_and_consume_refs(transport, ref_map)) { free_refs(ref_map); retcode = 1; goto cleanup; @@ -2135,8 +2148,6 @@ int cmd_fetch(int argc, const char **argv, const char *prefix) NULL); } - close_object_store(the_repository->objects); - if (enable_auto_gc) run_auto_maintenance(verbosity < 0); diff --git a/builtin/gc.c b/builtin/gc.c index 6ce5ca4512..6b3de3dd51 100644 --- a/builtin/gc.c +++ b/builtin/gc.c @@ -502,7 +502,7 @@ static int report_last_gc_error(void) */ warning(_("The last gc run reported the following. " "Please correct the root cause\n" - "and remove %s.\n" + "and remove %s\n" "Automatic cleanup will not be performed " "until the file is removed.\n\n" "%s"), @@ -663,8 +663,8 @@ int cmd_gc(int argc, const char **argv, const char *prefix) gc_before_repack(); if (!repository_format_precious_objects) { - close_object_store(the_repository->objects); - if (run_command_v_opt(repack.v, RUN_GIT_CMD)) + if (run_command_v_opt(repack.v, + RUN_GIT_CMD | RUN_CLOSE_OBJECT_STORE)) die(FAILED_RUN, repack.v[0]); if (prune_expire) { @@ -848,7 +848,7 @@ static int run_write_commit_graph(struct maintenance_run_opts *opts) { struct child_process child = CHILD_PROCESS_INIT; - child.git_cmd = 1; + child.git_cmd = child.close_object_store = 1; strvec_pushl(&child.args, "commit-graph", "write", "--split", "--reachable", NULL); @@ -864,7 +864,6 @@ static int maintenance_task_commit_graph(struct maintenance_run_opts *opts) if (!the_repository->settings.core_commit_graph) return 0; - close_object_store(the_repository->objects); if (run_write_commit_graph(opts)) { error(_("failed to write commit-graph")); return 1; @@ -913,7 +912,7 @@ static int maintenance_task_gc(struct maintenance_run_opts *opts) { struct child_process child = CHILD_PROCESS_INIT; - child.git_cmd = 1; + child.git_cmd = child.close_object_store = 1; strvec_push(&child.args, "gc"); if (opts->auto_flag) @@ -923,7 +922,6 @@ static int maintenance_task_gc(struct maintenance_run_opts *opts) else strvec_push(&child.args, "--no-quiet"); - close_object_store(the_repository->objects); return run_command(&child); } @@ -1097,14 +1095,12 @@ static int multi_pack_index_expire(struct maintenance_run_opts *opts) { struct child_process child = CHILD_PROCESS_INIT; - child.git_cmd = 1; + child.git_cmd = child.close_object_store = 1; strvec_pushl(&child.args, "multi-pack-index", "expire", NULL); if (opts->quiet) strvec_push(&child.args, "--no-progress"); - close_object_store(the_repository->objects); - if (run_command(&child)) return error(_("'git multi-pack-index expire' failed")); @@ -1155,7 +1151,7 @@ static int multi_pack_index_repack(struct maintenance_run_opts *opts) { struct child_process child = CHILD_PROCESS_INIT; - child.git_cmd = 1; + child.git_cmd = child.close_object_store = 1; strvec_pushl(&child.args, "multi-pack-index", "repack", NULL); if (opts->quiet) @@ -1164,8 +1160,6 @@ static int multi_pack_index_repack(struct maintenance_run_opts *opts) strvec_pushf(&child.args, "--batch-size=%"PRIuMAX, (uintmax_t)get_auto_pack_size()); - close_object_store(the_repository->objects); - if (run_command(&child)) return error(_("'git multi-pack-index repack' failed")); @@ -1529,6 +1523,93 @@ static const char *get_frequency(enum schedule_priority schedule) } } +/* + * get_schedule_cmd` reads the GIT_TEST_MAINT_SCHEDULER environment variable + * to mock the schedulers that `git maintenance start` rely on. + * + * For test purpose, GIT_TEST_MAINT_SCHEDULER can be set to a comma-separated + * list of colon-separated key/value pairs where each pair contains a scheduler + * and its corresponding mock. + * + * * If $GIT_TEST_MAINT_SCHEDULER is not set, return false and leave the + * arguments unmodified. + * + * * If $GIT_TEST_MAINT_SCHEDULER is set, return true. + * In this case, the *cmd value is read as input. + * + * * if the input value *cmd is the key of one of the comma-separated list + * item, then *is_available is set to true and *cmd is modified and becomes + * the mock command. + * + * * if the input value *cmd isn’t the key of any of the comma-separated list + * item, then *is_available is set to false. + * + * Ex.: + * GIT_TEST_MAINT_SCHEDULER not set + * +-------+-------------------------------------------------+ + * | Input | Output | + * | *cmd | return code | *cmd | *is_available | + * +-------+-------------+-------------------+---------------+ + * | "foo" | false | "foo" (unchanged) | (unchanged) | + * +-------+-------------+-------------------+---------------+ + * + * GIT_TEST_MAINT_SCHEDULER set to “foo:./mock_foo.sh,bar:./mock_bar.sh” + * +-------+-------------------------------------------------+ + * | Input | Output | + * | *cmd | return code | *cmd | *is_available | + * +-------+-------------+-------------------+---------------+ + * | "foo" | true | "./mock.foo.sh" | true | + * | "qux" | true | "qux" (unchanged) | false | + * +-------+-------------+-------------------+---------------+ + */ +static int get_schedule_cmd(const char **cmd, int *is_available) +{ + char *testing = xstrdup_or_null(getenv("GIT_TEST_MAINT_SCHEDULER")); + struct string_list_item *item; + struct string_list list = STRING_LIST_INIT_NODUP; + + if (!testing) + return 0; + + if (is_available) + *is_available = 0; + + string_list_split_in_place(&list, testing, ',', -1); + for_each_string_list_item(item, &list) { + struct string_list pair = STRING_LIST_INIT_NODUP; + + if (string_list_split_in_place(&pair, item->string, ':', 2) != 2) + continue; + + if (!strcmp(*cmd, pair.items[0].string)) { + *cmd = pair.items[1].string; + if (is_available) + *is_available = 1; + string_list_clear(&list, 0); + UNLEAK(testing); + return 1; + } + } + + string_list_clear(&list, 0); + free(testing); + return 1; +} + +static int is_launchctl_available(void) +{ + const char *cmd = "launchctl"; + int is_available; + if (get_schedule_cmd(&cmd, &is_available)) + return is_available; + +#ifdef __APPLE__ + return 1; +#else + return 0; +#endif +} + static char *launchctl_service_name(const char *frequency) { struct strbuf label = STRBUF_INIT; @@ -1555,19 +1636,17 @@ static char *launchctl_get_uid(void) return xstrfmt("gui/%d", getuid()); } -static int launchctl_boot_plist(int enable, const char *filename, const char *cmd) +static int launchctl_boot_plist(int enable, const char *filename) { + const char *cmd = "launchctl"; int result; struct child_process child = CHILD_PROCESS_INIT; char *uid = launchctl_get_uid(); + get_schedule_cmd(&cmd, NULL); strvec_split(&child.args, cmd); - if (enable) - strvec_push(&child.args, "bootstrap"); - else - strvec_push(&child.args, "bootout"); - strvec_push(&child.args, uid); - strvec_push(&child.args, filename); + strvec_pushl(&child.args, enable ? "bootstrap" : "bootout", uid, + filename, NULL); child.no_stderr = 1; child.no_stdout = 1; @@ -1581,38 +1660,56 @@ static int launchctl_boot_plist(int enable, const char *filename, const char *cm return result; } -static int launchctl_remove_plist(enum schedule_priority schedule, const char *cmd) +static int launchctl_remove_plist(enum schedule_priority schedule) { const char *frequency = get_frequency(schedule); char *name = launchctl_service_name(frequency); char *filename = launchctl_service_filename(name); - int result = launchctl_boot_plist(0, filename, cmd); + int result = launchctl_boot_plist(0, filename); unlink(filename); free(filename); free(name); return result; } -static int launchctl_remove_plists(const char *cmd) +static int launchctl_remove_plists(void) { - return launchctl_remove_plist(SCHEDULE_HOURLY, cmd) || - launchctl_remove_plist(SCHEDULE_DAILY, cmd) || - launchctl_remove_plist(SCHEDULE_WEEKLY, cmd); + return launchctl_remove_plist(SCHEDULE_HOURLY) || + launchctl_remove_plist(SCHEDULE_DAILY) || + launchctl_remove_plist(SCHEDULE_WEEKLY); } -static int launchctl_schedule_plist(const char *exec_path, enum schedule_priority schedule, const char *cmd) +static int launchctl_list_contains_plist(const char *name, const char *cmd) { - FILE *plist; - int i; + struct child_process child = CHILD_PROCESS_INIT; + + strvec_split(&child.args, cmd); + strvec_pushl(&child.args, "list", name, NULL); + + child.no_stderr = 1; + child.no_stdout = 1; + + if (start_command(&child)) + die(_("failed to start launchctl")); + + /* Returns failure if 'name' doesn't exist. */ + return !finish_command(&child); +} + +static int launchctl_schedule_plist(const char *exec_path, enum schedule_priority schedule) +{ + int i, fd; const char *preamble, *repeat; const char *frequency = get_frequency(schedule); char *name = launchctl_service_name(frequency); char *filename = launchctl_service_filename(name); + struct lock_file lk = LOCK_INIT; + static unsigned long lock_file_timeout_ms = ULONG_MAX; + struct strbuf plist = STRBUF_INIT, plist2 = STRBUF_INIT; + struct stat st; + const char *cmd = "launchctl"; - if (safe_create_leading_directories(filename)) - die(_("failed to create directories for '%s'"), filename); - plist = xfopen(filename, "w"); - + get_schedule_cmd(&cmd, NULL); preamble = "<?xml version=\"1.0\"?>\n" "<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n" "<plist version=\"1.0\">" @@ -1630,7 +1727,7 @@ static int launchctl_schedule_plist(const char *exec_path, enum schedule_priorit "</array>\n" "<key>StartCalendarInterval</key>\n" "<array>\n"; - fprintf(plist, preamble, name, exec_path, exec_path, frequency); + strbuf_addf(&plist, preamble, name, exec_path, exec_path, frequency); switch (schedule) { case SCHEDULE_HOURLY: @@ -1639,7 +1736,7 @@ static int launchctl_schedule_plist(const char *exec_path, enum schedule_priorit "<key>Minute</key><integer>0</integer>\n" "</dict>\n"; for (i = 1; i <= 23; i++) - fprintf(plist, repeat, i); + strbuf_addf(&plist, repeat, i); break; case SCHEDULE_DAILY: @@ -1649,50 +1746,91 @@ static int launchctl_schedule_plist(const char *exec_path, enum schedule_priorit "<key>Minute</key><integer>0</integer>\n" "</dict>\n"; for (i = 1; i <= 6; i++) - fprintf(plist, repeat, i); + strbuf_addf(&plist, repeat, i); break; case SCHEDULE_WEEKLY: - fprintf(plist, - "<dict>\n" - "<key>Day</key><integer>0</integer>\n" - "<key>Hour</key><integer>0</integer>\n" - "<key>Minute</key><integer>0</integer>\n" - "</dict>\n"); + strbuf_addstr(&plist, + "<dict>\n" + "<key>Day</key><integer>0</integer>\n" + "<key>Hour</key><integer>0</integer>\n" + "<key>Minute</key><integer>0</integer>\n" + "</dict>\n"); break; default: /* unreachable */ break; } - fprintf(plist, "</array>\n</dict>\n</plist>\n"); - fclose(plist); + strbuf_addstr(&plist, "</array>\n</dict>\n</plist>\n"); + + if (safe_create_leading_directories(filename)) + die(_("failed to create directories for '%s'"), filename); - /* bootout might fail if not already running, so ignore */ - launchctl_boot_plist(0, filename, cmd); - if (launchctl_boot_plist(1, filename, cmd)) - die(_("failed to bootstrap service %s"), filename); + if ((long)lock_file_timeout_ms < 0 && + git_config_get_ulong("gc.launchctlplistlocktimeoutms", + &lock_file_timeout_ms)) + lock_file_timeout_ms = 150; + + fd = hold_lock_file_for_update_timeout(&lk, filename, LOCK_DIE_ON_ERROR, + lock_file_timeout_ms); + + /* + * Does this file already exist? With the intended contents? Is it + * registered already? Then it does not need to be re-registered. + */ + if (!stat(filename, &st) && st.st_size == plist.len && + strbuf_read_file(&plist2, filename, plist.len) == plist.len && + !strbuf_cmp(&plist, &plist2) && + launchctl_list_contains_plist(name, cmd)) + rollback_lock_file(&lk); + else { + if (write_in_full(fd, plist.buf, plist.len) < 0 || + commit_lock_file(&lk)) + die_errno(_("could not write '%s'"), filename); + + /* bootout might fail if not already running, so ignore */ + launchctl_boot_plist(0, filename); + if (launchctl_boot_plist(1, filename)) + die(_("failed to bootstrap service %s"), filename); + } free(filename); free(name); + strbuf_release(&plist); + strbuf_release(&plist2); return 0; } -static int launchctl_add_plists(const char *cmd) +static int launchctl_add_plists(void) { const char *exec_path = git_exec_path(); - return launchctl_schedule_plist(exec_path, SCHEDULE_HOURLY, cmd) || - launchctl_schedule_plist(exec_path, SCHEDULE_DAILY, cmd) || - launchctl_schedule_plist(exec_path, SCHEDULE_WEEKLY, cmd); + return launchctl_schedule_plist(exec_path, SCHEDULE_HOURLY) || + launchctl_schedule_plist(exec_path, SCHEDULE_DAILY) || + launchctl_schedule_plist(exec_path, SCHEDULE_WEEKLY); } -static int launchctl_update_schedule(int run_maintenance, int fd, const char *cmd) +static int launchctl_update_schedule(int run_maintenance, int fd) { if (run_maintenance) - return launchctl_add_plists(cmd); + return launchctl_add_plists(); else - return launchctl_remove_plists(cmd); + return launchctl_remove_plists(); +} + +static int is_schtasks_available(void) +{ + const char *cmd = "schtasks"; + int is_available; + if (get_schedule_cmd(&cmd, &is_available)) + return is_available; + +#ifdef GIT_WINDOWS_NATIVE + return 1; +#else + return 0; +#endif } static char *schtasks_task_name(const char *frequency) @@ -1702,13 +1840,15 @@ static char *schtasks_task_name(const char *frequency) return strbuf_detach(&label, NULL); } -static int schtasks_remove_task(enum schedule_priority schedule, const char *cmd) +static int schtasks_remove_task(enum schedule_priority schedule) { + const char *cmd = "schtasks"; int result; struct strvec args = STRVEC_INIT; const char *frequency = get_frequency(schedule); char *name = schtasks_task_name(frequency); + get_schedule_cmd(&cmd, NULL); strvec_split(&args, cmd); strvec_pushl(&args, "/delete", "/tn", name, "/f", NULL); @@ -1719,15 +1859,16 @@ static int schtasks_remove_task(enum schedule_priority schedule, const char *cmd return result; } -static int schtasks_remove_tasks(const char *cmd) +static int schtasks_remove_tasks(void) { - return schtasks_remove_task(SCHEDULE_HOURLY, cmd) || - schtasks_remove_task(SCHEDULE_DAILY, cmd) || - schtasks_remove_task(SCHEDULE_WEEKLY, cmd); + return schtasks_remove_task(SCHEDULE_HOURLY) || + schtasks_remove_task(SCHEDULE_DAILY) || + schtasks_remove_task(SCHEDULE_WEEKLY); } -static int schtasks_schedule_task(const char *exec_path, enum schedule_priority schedule, const char *cmd) +static int schtasks_schedule_task(const char *exec_path, enum schedule_priority schedule) { + const char *cmd = "schtasks"; int result; struct child_process child = CHILD_PROCESS_INIT; const char *xml; @@ -1736,6 +1877,8 @@ static int schtasks_schedule_task(const char *exec_path, enum schedule_priority char *name = schtasks_task_name(frequency); struct strbuf tfilename = STRBUF_INIT; + get_schedule_cmd(&cmd, NULL); + strbuf_addf(&tfilename, "%s/schedule_%s_XXXXXX", get_git_common_dir(), frequency); tfile = xmks_tempfile(tfilename.buf); @@ -1840,28 +1983,52 @@ static int schtasks_schedule_task(const char *exec_path, enum schedule_priority return result; } -static int schtasks_schedule_tasks(const char *cmd) +static int schtasks_schedule_tasks(void) { const char *exec_path = git_exec_path(); - return schtasks_schedule_task(exec_path, SCHEDULE_HOURLY, cmd) || - schtasks_schedule_task(exec_path, SCHEDULE_DAILY, cmd) || - schtasks_schedule_task(exec_path, SCHEDULE_WEEKLY, cmd); + return schtasks_schedule_task(exec_path, SCHEDULE_HOURLY) || + schtasks_schedule_task(exec_path, SCHEDULE_DAILY) || + schtasks_schedule_task(exec_path, SCHEDULE_WEEKLY); } -static int schtasks_update_schedule(int run_maintenance, int fd, const char *cmd) +static int schtasks_update_schedule(int run_maintenance, int fd) { if (run_maintenance) - return schtasks_schedule_tasks(cmd); + return schtasks_schedule_tasks(); else - return schtasks_remove_tasks(cmd); + return schtasks_remove_tasks(); +} + +static int is_crontab_available(void) +{ + const char *cmd = "crontab"; + int is_available; + struct child_process child = CHILD_PROCESS_INIT; + + if (get_schedule_cmd(&cmd, &is_available)) + return is_available; + + strvec_split(&child.args, cmd); + strvec_push(&child.args, "-l"); + child.no_stdin = 1; + child.no_stdout = 1; + child.no_stderr = 1; + child.silent_exec_failure = 1; + + if (start_command(&child)) + return 0; + /* Ignore exit code, as an empty crontab will return error. */ + finish_command(&child); + return 1; } #define BEGIN_LINE "# BEGIN GIT MAINTENANCE SCHEDULE" #define END_LINE "# END GIT MAINTENANCE SCHEDULE" -static int crontab_update_schedule(int run_maintenance, int fd, const char *cmd) +static int crontab_update_schedule(int run_maintenance, int fd) { + const char *cmd = "crontab"; int result = 0; int in_old_region = 0; struct child_process crontab_list = CHILD_PROCESS_INIT; @@ -1869,6 +2036,7 @@ static int crontab_update_schedule(int run_maintenance, int fd, const char *cmd) FILE *cron_list, *cron_in; struct strbuf line = STRBUF_INIT; + get_schedule_cmd(&cmd, NULL); strvec_split(&crontab_list.args, cmd); strvec_push(&crontab_list.args, "-l"); crontab_list.in = -1; @@ -1945,66 +2113,376 @@ done_editing: return result; } +static int real_is_systemd_timer_available(void) +{ + struct child_process child = CHILD_PROCESS_INIT; + + strvec_pushl(&child.args, "systemctl", "--user", "list-timers", NULL); + child.no_stdin = 1; + child.no_stdout = 1; + child.no_stderr = 1; + child.silent_exec_failure = 1; + + if (start_command(&child)) + return 0; + if (finish_command(&child)) + return 0; + return 1; +} + +static int is_systemd_timer_available(void) +{ + const char *cmd = "systemctl"; + int is_available; + + if (get_schedule_cmd(&cmd, &is_available)) + return is_available; + + return real_is_systemd_timer_available(); +} + +static char *xdg_config_home_systemd(const char *filename) +{ + return xdg_config_home_for("systemd/user", filename); +} + +static int systemd_timer_enable_unit(int enable, + enum schedule_priority schedule) +{ + const char *cmd = "systemctl"; + struct child_process child = CHILD_PROCESS_INIT; + const char *frequency = get_frequency(schedule); + + /* + * Disabling the systemd unit while it is already disabled makes + * systemctl print an error. + * Let's ignore it since it means we already are in the expected state: + * the unit is disabled. + * + * On the other hand, enabling a systemd unit which is already enabled + * produces no error. + */ + if (!enable) + child.no_stderr = 1; + + get_schedule_cmd(&cmd, NULL); + strvec_split(&child.args, cmd); + strvec_pushl(&child.args, "--user", enable ? "enable" : "disable", + "--now", NULL); + strvec_pushf(&child.args, "git-maintenance@%s.timer", frequency); + + if (start_command(&child)) + return error(_("failed to start systemctl")); + if (finish_command(&child)) + /* + * Disabling an already disabled systemd unit makes + * systemctl fail. + * Let's ignore this failure. + * + * Enabling an enabled systemd unit doesn't fail. + */ + if (enable) + return error(_("failed to run systemctl")); + return 0; +} + +static int systemd_timer_delete_unit_templates(void) +{ + int ret = 0; + char *filename = xdg_config_home_systemd("git-maintenance@.timer"); + if (unlink(filename) && !is_missing_file_error(errno)) + ret = error_errno(_("failed to delete '%s'"), filename); + FREE_AND_NULL(filename); + + filename = xdg_config_home_systemd("git-maintenance@.service"); + if (unlink(filename) && !is_missing_file_error(errno)) + ret = error_errno(_("failed to delete '%s'"), filename); + + free(filename); + return ret; +} + +static int systemd_timer_delete_units(void) +{ + return systemd_timer_enable_unit(0, SCHEDULE_HOURLY) || + systemd_timer_enable_unit(0, SCHEDULE_DAILY) || + systemd_timer_enable_unit(0, SCHEDULE_WEEKLY) || + systemd_timer_delete_unit_templates(); +} + +static int systemd_timer_write_unit_templates(const char *exec_path) +{ + char *filename; + FILE *file; + const char *unit; + + filename = xdg_config_home_systemd("git-maintenance@.timer"); + if (safe_create_leading_directories(filename)) { + error(_("failed to create directories for '%s'"), filename); + goto error; + } + file = fopen_or_warn(filename, "w"); + if (file == NULL) + goto error; + + unit = "# This file was created and is maintained by Git.\n" + "# Any edits made in this file might be replaced in the future\n" + "# by a Git command.\n" + "\n" + "[Unit]\n" + "Description=Optimize Git repositories data\n" + "\n" + "[Timer]\n" + "OnCalendar=%i\n" + "Persistent=true\n" + "\n" + "[Install]\n" + "WantedBy=timers.target\n"; + if (fputs(unit, file) == EOF) { + error(_("failed to write to '%s'"), filename); + fclose(file); + goto error; + } + if (fclose(file) == EOF) { + error_errno(_("failed to flush '%s'"), filename); + goto error; + } + free(filename); + + filename = xdg_config_home_systemd("git-maintenance@.service"); + file = fopen_or_warn(filename, "w"); + if (file == NULL) + goto error; + + unit = "# This file was created and is maintained by Git.\n" + "# Any edits made in this file might be replaced in the future\n" + "# by a Git command.\n" + "\n" + "[Unit]\n" + "Description=Optimize Git repositories data\n" + "\n" + "[Service]\n" + "Type=oneshot\n" + "ExecStart=\"%s/git\" --exec-path=\"%s\" for-each-repo --config=maintenance.repo maintenance run --schedule=%%i\n" + "LockPersonality=yes\n" + "MemoryDenyWriteExecute=yes\n" + "NoNewPrivileges=yes\n" + "RestrictAddressFamilies=AF_UNIX AF_INET AF_INET6\n" + "RestrictNamespaces=yes\n" + "RestrictRealtime=yes\n" + "RestrictSUIDSGID=yes\n" + "SystemCallArchitectures=native\n" + "SystemCallFilter=@system-service\n"; + if (fprintf(file, unit, exec_path, exec_path) < 0) { + error(_("failed to write to '%s'"), filename); + fclose(file); + goto error; + } + if (fclose(file) == EOF) { + error_errno(_("failed to flush '%s'"), filename); + goto error; + } + free(filename); + return 0; + +error: + free(filename); + systemd_timer_delete_unit_templates(); + return -1; +} + +static int systemd_timer_setup_units(void) +{ + const char *exec_path = git_exec_path(); + + int ret = systemd_timer_write_unit_templates(exec_path) || + systemd_timer_enable_unit(1, SCHEDULE_HOURLY) || + systemd_timer_enable_unit(1, SCHEDULE_DAILY) || + systemd_timer_enable_unit(1, SCHEDULE_WEEKLY); + if (ret) + systemd_timer_delete_units(); + return ret; +} + +static int systemd_timer_update_schedule(int run_maintenance, int fd) +{ + if (run_maintenance) + return systemd_timer_setup_units(); + else + return systemd_timer_delete_units(); +} + +enum scheduler { + SCHEDULER_INVALID = -1, + SCHEDULER_AUTO, + SCHEDULER_CRON, + SCHEDULER_SYSTEMD, + SCHEDULER_LAUNCHCTL, + SCHEDULER_SCHTASKS, +}; + +static const struct { + const char *name; + int (*is_available)(void); + int (*update_schedule)(int run_maintenance, int fd); +} scheduler_fn[] = { + [SCHEDULER_CRON] = { + .name = "crontab", + .is_available = is_crontab_available, + .update_schedule = crontab_update_schedule, + }, + [SCHEDULER_SYSTEMD] = { + .name = "systemctl", + .is_available = is_systemd_timer_available, + .update_schedule = systemd_timer_update_schedule, + }, + [SCHEDULER_LAUNCHCTL] = { + .name = "launchctl", + .is_available = is_launchctl_available, + .update_schedule = launchctl_update_schedule, + }, + [SCHEDULER_SCHTASKS] = { + .name = "schtasks", + .is_available = is_schtasks_available, + .update_schedule = schtasks_update_schedule, + }, +}; + +static enum scheduler parse_scheduler(const char *value) +{ + if (!value) + return SCHEDULER_INVALID; + else if (!strcasecmp(value, "auto")) + return SCHEDULER_AUTO; + else if (!strcasecmp(value, "cron") || !strcasecmp(value, "crontab")) + return SCHEDULER_CRON; + else if (!strcasecmp(value, "systemd") || + !strcasecmp(value, "systemd-timer")) + return SCHEDULER_SYSTEMD; + else if (!strcasecmp(value, "launchctl")) + return SCHEDULER_LAUNCHCTL; + else if (!strcasecmp(value, "schtasks")) + return SCHEDULER_SCHTASKS; + else + return SCHEDULER_INVALID; +} + +static int maintenance_opt_scheduler(const struct option *opt, const char *arg, + int unset) +{ + enum scheduler *scheduler = opt->value; + + BUG_ON_OPT_NEG(unset); + + *scheduler = parse_scheduler(arg); + if (*scheduler == SCHEDULER_INVALID) + return error(_("unrecognized --scheduler argument '%s'"), arg); + return 0; +} + +struct maintenance_start_opts { + enum scheduler scheduler; +}; + +static enum scheduler resolve_scheduler(enum scheduler scheduler) +{ + if (scheduler != SCHEDULER_AUTO) + return scheduler; + #if defined(__APPLE__) -static const char platform_scheduler[] = "launchctl"; + return SCHEDULER_LAUNCHCTL; + #elif defined(GIT_WINDOWS_NATIVE) -static const char platform_scheduler[] = "schtasks"; + return SCHEDULER_SCHTASKS; + +#elif defined(__linux__) + if (is_systemd_timer_available()) + return SCHEDULER_SYSTEMD; + else if (is_crontab_available()) + return SCHEDULER_CRON; + else + die(_("neither systemd timers nor crontab are available")); + #else -static const char platform_scheduler[] = "crontab"; + return SCHEDULER_CRON; #endif +} -static int update_background_schedule(int enable) +static void validate_scheduler(enum scheduler scheduler) { - int result; - const char *scheduler = platform_scheduler; - const char *cmd = scheduler; - char *testing; + if (scheduler == SCHEDULER_INVALID) + BUG("invalid scheduler"); + if (scheduler == SCHEDULER_AUTO) + BUG("resolve_scheduler should have been called before"); + + if (!scheduler_fn[scheduler].is_available()) + die(_("%s scheduler is not available"), + scheduler_fn[scheduler].name); +} + +static int update_background_schedule(const struct maintenance_start_opts *opts, + int enable) +{ + unsigned int i; + int result = 0; struct lock_file lk; char *lock_path = xstrfmt("%s/schedule", the_repository->objects->odb->path); - testing = xstrdup_or_null(getenv("GIT_TEST_MAINT_SCHEDULER")); - if (testing) { - char *sep = strchr(testing, ':'); - if (!sep) - die("GIT_TEST_MAINT_SCHEDULER unparseable: %s", testing); - *sep = '\0'; - scheduler = testing; - cmd = sep + 1; + if (hold_lock_file_for_update(&lk, lock_path, LOCK_NO_DEREF) < 0) { + free(lock_path); + return error(_("another process is scheduling background maintenance")); } - if (hold_lock_file_for_update(&lk, lock_path, LOCK_NO_DEREF) < 0) { - result = error(_("another process is scheduling background maintenance")); - goto cleanup; + for (i = 1; i < ARRAY_SIZE(scheduler_fn); i++) { + if (enable && opts->scheduler == i) + continue; + if (!scheduler_fn[i].is_available()) + continue; + scheduler_fn[i].update_schedule(0, get_lock_file_fd(&lk)); } - if (!strcmp(scheduler, "launchctl")) - result = launchctl_update_schedule(enable, get_lock_file_fd(&lk), cmd); - else if (!strcmp(scheduler, "schtasks")) - result = schtasks_update_schedule(enable, get_lock_file_fd(&lk), cmd); - else if (!strcmp(scheduler, "crontab")) - result = crontab_update_schedule(enable, get_lock_file_fd(&lk), cmd); - else - die("unknown background scheduler: %s", scheduler); + if (enable) + result = scheduler_fn[opts->scheduler].update_schedule( + 1, get_lock_file_fd(&lk)); rollback_lock_file(&lk); -cleanup: free(lock_path); - free(testing); return result; } -static int maintenance_start(void) +static const char *const builtin_maintenance_start_usage[] = { + N_("git maintenance start [--scheduler=<scheduler>]"), + NULL +}; + +static int maintenance_start(int argc, const char **argv, const char *prefix) { + struct maintenance_start_opts opts = { 0 }; + struct option options[] = { + OPT_CALLBACK_F( + 0, "scheduler", &opts.scheduler, N_("scheduler"), + N_("scheduler to trigger git maintenance run"), + PARSE_OPT_NONEG, maintenance_opt_scheduler), + OPT_END() + }; + + argc = parse_options(argc, argv, prefix, options, + builtin_maintenance_start_usage, 0); + if (argc) + usage_with_options(builtin_maintenance_start_usage, options); + + opts.scheduler = resolve_scheduler(opts.scheduler); + validate_scheduler(opts.scheduler); + if (maintenance_register()) warning(_("failed to add repo to global config")); - - return update_background_schedule(1); + return update_background_schedule(&opts, 1); } static int maintenance_stop(void) { - return update_background_schedule(0); + return update_background_schedule(NULL, 0); } static const char builtin_maintenance_usage[] = N_("git maintenance <subcommand> [<options>]"); @@ -2018,7 +2496,7 @@ int cmd_maintenance(int argc, const char **argv, const char *prefix) if (!strcmp(argv[1], "run")) return maintenance_run(argc - 1, argv + 1, prefix); if (!strcmp(argv[1], "start")) - return maintenance_start(); + return maintenance_start(argc - 1, argv + 1, prefix); if (!strcmp(argv[1], "stop")) return maintenance_stop(); if (!strcmp(argv[1], "register")) diff --git a/builtin/grep.c b/builtin/grep.c index 7d2f8e5adb..8af5249a7b 100644 --- a/builtin/grep.c +++ b/builtin/grep.c @@ -65,6 +65,9 @@ static int todo_done; /* Has all work items been added? */ static int all_work_added; +static struct repository **repos_to_free; +static size_t repos_to_free_nr, repos_to_free_alloc; + /* This lock protects all the variables above. */ static pthread_mutex_t grep_mutex; @@ -168,6 +171,19 @@ static void work_done(struct work_item *w) grep_unlock(); } +static void free_repos(void) +{ + int i; + + for (i = 0; i < repos_to_free_nr; i++) { + repo_clear(repos_to_free[i]); + free(repos_to_free[i]); + } + FREE_AND_NULL(repos_to_free); + repos_to_free_nr = 0; + repos_to_free_alloc = 0; +} + static void *run(void *arg) { int hit = 0; @@ -333,7 +349,7 @@ static int grep_oid(struct grep_opt *opt, const struct object_id *oid, struct grep_source gs; grep_source_name(opt, filename, tree_name_len, &pathbuf); - grep_source_init(&gs, GREP_SOURCE_OID, pathbuf.buf, path, oid); + grep_source_init_oid(&gs, pathbuf.buf, path, oid, opt->repo); strbuf_release(&pathbuf); if (num_threads > 1) { @@ -359,7 +375,7 @@ static int grep_file(struct grep_opt *opt, const char *filename) struct grep_source gs; grep_source_name(opt, filename, 0, &buf); - grep_source_init(&gs, GREP_SOURCE_FILE, buf.buf, filename, filename); + grep_source_init_file(&gs, buf.buf, filename); strbuf_release(&buf); if (num_threads > 1) { @@ -415,19 +431,21 @@ static int grep_submodule(struct grep_opt *opt, const struct object_id *oid, const char *filename, const char *path, int cached) { - struct repository subrepo; + struct repository *subrepo; struct repository *superproject = opt->repo; - const struct submodule *sub; struct grep_opt subopt; - int hit; - - sub = submodule_from_path(superproject, null_oid(), path); + int hit = 0; if (!is_submodule_active(superproject, path)) return 0; - if (repo_submodule_init(&subrepo, superproject, sub)) + subrepo = xmalloc(sizeof(*subrepo)); + if (repo_submodule_init(subrepo, superproject, path, null_oid())) { + free(subrepo); return 0; + } + ALLOC_GROW(repos_to_free, repos_to_free_nr + 1, repos_to_free_alloc); + repos_to_free[repos_to_free_nr++] = subrepo; /* * NEEDSWORK: repo_read_gitmodules() might call @@ -438,53 +456,49 @@ static int grep_submodule(struct grep_opt *opt, * subrepo's odbs to the in-memory alternates list. */ obj_read_lock(); - repo_read_gitmodules(&subrepo, 0); + repo_read_gitmodules(subrepo, 0); /* - * NEEDSWORK: This adds the submodule's object directory to the list of - * alternates for the single in-memory object store. This has some bad - * consequences for memory (processed objects will never be freed) and - * performance (this increases the number of pack files git has to pay - * attention to, to the sum of the number of pack files in all the - * repositories processed so far). This can be removed once the object - * store is no longer global and instead is a member of the repository - * object. + * All code paths tested by test code no longer need submodule ODBs to + * be added as alternates, but add it to the list just in case. + * Submodule ODBs added through add_submodule_odb_by_path() will be + * lazily registered as alternates when needed (and except in an + * unexpected code interaction, it won't be needed). */ - add_to_alternates_memory(subrepo.objects->odb->path); + add_submodule_odb_by_path(subrepo->objects->odb->path); obj_read_unlock(); memcpy(&subopt, opt, sizeof(subopt)); - subopt.repo = &subrepo; + subopt.repo = subrepo; if (oid) { - struct object *object; + enum object_type object_type; struct tree_desc tree; void *data; unsigned long size; struct strbuf base = STRBUF_INIT; obj_read_lock(); - object = parse_object_or_die(oid, NULL); + object_type = oid_object_info(subrepo, oid, NULL); obj_read_unlock(); - data = read_object_with_reference(&subrepo, - &object->oid, tree_type, + data = read_object_with_reference(subrepo, + oid, tree_type, &size, NULL); if (!data) - die(_("unable to read tree (%s)"), oid_to_hex(&object->oid)); + die(_("unable to read tree (%s)"), oid_to_hex(oid)); strbuf_addstr(&base, filename); strbuf_addch(&base, '/'); init_tree_desc(&tree, data, size); hit = grep_tree(&subopt, pathspec, &tree, &base, base.len, - object->type == OBJ_COMMIT); + object_type == OBJ_COMMIT); strbuf_release(&base); free(data); } else { hit = grep_cache(&subopt, pathspec, cached); } - repo_clear(&subrepo); return hit; } @@ -1182,5 +1196,6 @@ int cmd_grep(int argc, const char **argv, const char *prefix) run_pager(&opt, prefix); clear_pathspec(&pathspec); free_grep_patterns(&opt); + free_repos(); return !hit; } diff --git a/builtin/hash-object.c b/builtin/hash-object.c index 640ef4ded5..c7b3ad74c6 100644 --- a/builtin/hash-object.c +++ b/builtin/hash-object.c @@ -53,9 +53,7 @@ static void hash_object(const char *path, const char *type, const char *vpath, unsigned flags, int literally) { int fd; - fd = open(path, O_RDONLY); - if (fd < 0) - die_errno("Cannot open '%s'", path); + fd = xopen(path, O_RDONLY); hash_fd(fd, type, vpath, flags, literally); } @@ -117,7 +115,7 @@ int cmd_hash_object(int argc, const char **argv, const char *prefix) prefix = setup_git_directory_gently(&nongit); if (vpath && prefix) - vpath = xstrdup(prefix_filename(prefix, vpath)); + vpath = prefix_filename(prefix, vpath); git_config(git_default_config, NULL); diff --git a/builtin/help.c b/builtin/help.c index b7eec06c3d..7731659765 100644 --- a/builtin/help.c +++ b/builtin/help.c @@ -467,11 +467,14 @@ static void get_html_page_path(struct strbuf *page_path, const char *page) if (!html_path) html_path = to_free = system_path(GIT_HTML_PATH); - /* Check that we have a git documentation directory. */ + /* + * Check that the page we're looking for exists. + */ if (!strstr(html_path, "://")) { - if (stat(mkpath("%s/git.html", html_path), &st) + if (stat(mkpath("%s/%s.html", html_path, page), &st) || !S_ISREG(st.st_mode)) - die("'%s': not a documentation directory.", html_path); + die("'%s/%s.html': documentation file not found.", + html_path, page); } strbuf_init(page_path, 0); diff --git a/builtin/index-pack.c b/builtin/index-pack.c index 8336466865..7ce69c087e 100644 --- a/builtin/index-pack.c +++ b/builtin/index-pack.c @@ -122,6 +122,7 @@ static int strict; static int do_fsck_object; static struct fsck_options fsck_options = FSCK_OPTIONS_MISSING_GITMODULES; static int verbose; +static const char *progress_title; static int show_resolving_progress; static int show_stat; static int check_self_contained_and_connected; @@ -187,9 +188,7 @@ static void init_thread(void) pthread_key_create(&key, NULL); CALLOC_ARRAY(thread_data, nr_threads); for (i = 0; i < nr_threads; i++) { - thread_data[i].pack_fd = open(curr_pack, O_RDONLY); - if (thread_data[i].pack_fd == -1) - die_errno(_("unable to open %s"), curr_pack); + thread_data[i].pack_fd = xopen(curr_pack, O_RDONLY); } threads_active = 1; @@ -338,15 +337,11 @@ static const char *open_pack_file(const char *pack_name) "pack/tmp_pack_XXXXXX"); pack_name = strbuf_detach(&tmp_file, NULL); } else { - output_fd = open(pack_name, O_CREAT|O_EXCL|O_RDWR, 0600); - if (output_fd < 0) - die_errno(_("unable to create '%s'"), pack_name); + output_fd = xopen(pack_name, O_CREAT|O_EXCL|O_RDWR, 0600); } nothread_data.pack_fd = output_fd; } else { - input_fd = open(pack_name, O_RDONLY); - if (input_fd < 0) - die_errno(_("cannot open packfile '%s'"), pack_name); + input_fd = xopen(pack_name, O_RDONLY); output_fd = -1; nothread_data.pack_fd = input_fd; } @@ -1157,6 +1152,7 @@ static void parse_pack_objects(unsigned char *hash) if (verbose) progress = start_progress( + progress_title ? progress_title : from_stdin ? _("Receiving objects") : _("Indexing objects"), nr_objects); for (i = 0; i < nr_objects; i++) { @@ -1481,6 +1477,22 @@ static void write_special_file(const char *suffix, const char *msg, strbuf_release(&name_buf); } +static void rename_tmp_packfile(const char **final_name, + const char *curr_name, + struct strbuf *name, unsigned char *hash, + const char *ext, int make_read_only_if_same) +{ + if (*final_name != curr_name) { + if (!*final_name) + *final_name = odb_pack_name(name, hash, ext); + if (finalize_object_file(curr_name, *final_name)) + die(_("unable to rename temporary '*.%s' file to '%s"), + ext, *final_name); + } else if (make_read_only_if_same) { + chmod(*final_name, 0444); + } +} + static void final(const char *final_pack_name, const char *curr_pack_name, const char *final_index_name, const char *curr_index_name, const char *final_rev_index_name, const char *curr_rev_index_name, @@ -1509,31 +1521,13 @@ static void final(const char *final_pack_name, const char *curr_pack_name, write_special_file("promisor", promisor_msg, final_pack_name, hash, NULL); - if (final_pack_name != curr_pack_name) { - if (!final_pack_name) - final_pack_name = odb_pack_name(&pack_name, hash, "pack"); - if (finalize_object_file(curr_pack_name, final_pack_name)) - die(_("cannot store pack file")); - } else if (from_stdin) - chmod(final_pack_name, 0444); - - if (final_index_name != curr_index_name) { - if (!final_index_name) - final_index_name = odb_pack_name(&index_name, hash, "idx"); - if (finalize_object_file(curr_index_name, final_index_name)) - die(_("cannot store index file")); - } else - chmod(final_index_name, 0444); - - if (curr_rev_index_name) { - if (final_rev_index_name != curr_rev_index_name) { - if (!final_rev_index_name) - final_rev_index_name = odb_pack_name(&rev_index_name, hash, "rev"); - if (finalize_object_file(curr_rev_index_name, final_rev_index_name)) - die(_("cannot store reverse index file")); - } else - chmod(final_rev_index_name, 0444); - } + rename_tmp_packfile(&final_pack_name, curr_pack_name, &pack_name, + hash, "pack", from_stdin); + if (curr_rev_index_name) + rename_tmp_packfile(&final_rev_index_name, curr_rev_index_name, + &rev_index_name, hash, "rev", 1); + rename_tmp_packfile(&final_index_name, curr_index_name, &index_name, + hash, "idx", 1); if (do_fsck_object) { struct packed_git *p; @@ -1806,6 +1800,10 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix) input_len = sizeof(*hdr); } else if (!strcmp(arg, "-v")) { verbose = 1; + } else if (!strcmp(arg, "--progress-title")) { + if (progress_title || (i+1) >= argc) + usage(index_pack_usage); + progress_title = argv[++i]; } else if (!strcmp(arg, "--show-resolving-progress")) { show_resolving_progress = 1; } else if (!strcmp(arg, "--report-end-of-input")) { diff --git a/builtin/ls-files.c b/builtin/ls-files.c index 29a26ad8ae..a2000ed6bf 100644 --- a/builtin/ls-files.c +++ b/builtin/ls-files.c @@ -209,10 +209,8 @@ static void show_submodule(struct repository *superproject, struct dir_struct *dir, const char *path) { struct repository subrepo; - const struct submodule *sub = submodule_from_path(superproject, - null_oid(), path); - if (repo_submodule_init(&subrepo, superproject, sub)) + if (repo_submodule_init(&subrepo, superproject, path, null_oid())) return; if (repo_read_index(&subrepo) < 0) @@ -614,7 +612,7 @@ int cmd_ls_files(int argc, const char **argv, const char *cmd_prefix) struct option builtin_ls_files_options[] = { /* Think twice before adding "--nul" synonym to this */ OPT_SET_INT('z', NULL, &line_terminator, - N_("paths are separated with NUL character"), '\0'), + N_("separate paths with the NUL character"), '\0'), OPT_BOOL('t', NULL, &show_tag, N_("identify the file status with tags")), OPT_BOOL('v', NULL, &show_valid_bit, @@ -651,7 +649,7 @@ int cmd_ls_files(int argc, const char **argv, const char *cmd_prefix) N_("skip files matching pattern"), PARSE_OPT_NONEG, option_parse_exclude), OPT_CALLBACK_F('X', "exclude-from", &dir, N_("file"), - N_("exclude patterns are read from <file>"), + N_("read exclude patterns from <file>"), PARSE_OPT_NONEG, option_parse_exclude_from), OPT_STRING(0, "exclude-per-directory", &dir.exclude_per_dir, N_("file"), N_("read additional per-directory exclude patterns in <file>")), diff --git a/builtin/ls-remote.c b/builtin/ls-remote.c index 1794548c71..f4fd823af8 100644 --- a/builtin/ls-remote.c +++ b/builtin/ls-remote.c @@ -84,6 +84,8 @@ int cmd_ls_remote(int argc, const char **argv, const char *prefix) PARSE_OPT_STOP_AT_NON_OPTION); dest = argv[0]; + packet_trace_identity("ls-remote"); + UNLEAK(sorting); if (argc > 1) { diff --git a/builtin/mailsplit.c b/builtin/mailsplit.c index 664400b816..7baef30569 100644 --- a/builtin/mailsplit.c +++ b/builtin/mailsplit.c @@ -75,9 +75,7 @@ static int split_one(FILE *mbox, const char *name, int allow_bare) fprintf(stderr, "corrupt mailbox\n"); exit(1); } - fd = open(name, O_WRONLY | O_CREAT | O_EXCL, 0666); - if (fd < 0) - die_errno("cannot open output file '%s'", name); + fd = xopen(name, O_WRONLY | O_CREAT | O_EXCL, 0666); output = xfdopen(fd, "w"); /* Copy it out, while searching for a line that begins with diff --git a/builtin/merge.c b/builtin/merge.c index febb0c99c9..3fbdacc7db 100644 --- a/builtin/merge.c +++ b/builtin/merge.c @@ -469,7 +469,6 @@ static void finish(struct commit *head_commit, * We ignore errors in 'gc --auto', since the * user should see them. */ - close_object_store(the_repository->objects); run_auto_maintenance(verbosity < 0); } } @@ -1138,9 +1137,7 @@ static void handle_fetch_head(struct commit_list **remotes, struct strbuf *merge merge_names = &fetch_head_file; filename = git_path_fetch_head(the_repository); - fd = open(filename, O_RDONLY); - if (fd < 0) - die_errno(_("could not open '%s' for reading"), filename); + fd = xopen(filename, O_RDONLY); if (strbuf_read(merge_names, fd, 0) < 0) die_errno(_("could not read '%s'"), filename); @@ -1278,6 +1275,9 @@ int cmd_merge(int argc, const char **argv, const char *prefix) if (argc == 2 && !strcmp(argv[1], "-h")) usage_with_options(builtin_merge_usage, builtin_merge_options); + prepare_repo_settings(the_repository); + the_repository->settings.command_requires_full_index = 0; + /* * Check if we are _not_ on a detached HEAD, i.e. if there is a * current branch. @@ -1370,14 +1370,14 @@ int cmd_merge(int argc, const char **argv, const char *prefix) * There is no unmerged entry, don't advise 'git * add/rm <file>', just 'git commit'. */ - if (advice_resolve_conflict) + if (advice_enabled(ADVICE_RESOLVE_CONFLICT)) die(_("You have not concluded your merge (MERGE_HEAD exists).\n" "Please, commit your changes before you merge.")); else die(_("You have not concluded your merge (MERGE_HEAD exists).")); } if (ref_exists("CHERRY_PICK_HEAD")) { - if (advice_resolve_conflict) + if (advice_enabled(ADVICE_RESOLVE_CONFLICT)) die(_("You have not concluded your cherry-pick (CHERRY_PICK_HEAD exists).\n" "Please, commit your changes before you merge.")); else diff --git a/builtin/multi-pack-index.c b/builtin/multi-pack-index.c index 8ff0dee2ec..0fb2439b0f 100644 --- a/builtin/multi-pack-index.c +++ b/builtin/multi-pack-index.c @@ -52,7 +52,6 @@ static struct opts_multi_pack_index { static struct option common_opts[] = { OPT_FILENAME(0, "object-dir", &opts.object_dir, N_("object directory containing set of packfile and pack-index pairs")), - OPT_BIT(0, "progress", &opts.flags, N_("force progress reporting"), MIDX_PROGRESS), OPT_END(), }; @@ -68,6 +67,10 @@ static int cmd_multi_pack_index_write(int argc, const char **argv) OPT_STRING(0, "preferred-pack", &opts.preferred_pack, N_("preferred-pack"), N_("pack for reuse when computing a multi-pack bitmap")), + OPT_BIT(0, "bitmap", &opts.flags, N_("write multi-pack bitmap"), + MIDX_WRITE_BITMAP | MIDX_WRITE_REV_INDEX), + OPT_BIT(0, "progress", &opts.flags, + N_("force progress reporting"), MIDX_PROGRESS), OPT_END(), }; @@ -75,6 +78,8 @@ static int cmd_multi_pack_index_write(int argc, const char **argv) trace2_cmd_mode(argv[0]); + if (isatty(2)) + opts.flags |= MIDX_PROGRESS; argc = parse_options(argc, argv, NULL, options, builtin_multi_pack_index_write_usage, PARSE_OPT_KEEP_UNKNOWN); @@ -90,10 +95,18 @@ static int cmd_multi_pack_index_write(int argc, const char **argv) static int cmd_multi_pack_index_verify(int argc, const char **argv) { - struct option *options = common_opts; + struct option *options; + static struct option builtin_multi_pack_index_verify_options[] = { + OPT_BIT(0, "progress", &opts.flags, + N_("force progress reporting"), MIDX_PROGRESS), + OPT_END(), + }; + options = add_common_options(builtin_multi_pack_index_verify_options); trace2_cmd_mode(argv[0]); + if (isatty(2)) + opts.flags |= MIDX_PROGRESS; argc = parse_options(argc, argv, NULL, options, builtin_multi_pack_index_verify_usage, PARSE_OPT_KEEP_UNKNOWN); @@ -106,10 +119,18 @@ static int cmd_multi_pack_index_verify(int argc, const char **argv) static int cmd_multi_pack_index_expire(int argc, const char **argv) { - struct option *options = common_opts; + struct option *options; + static struct option builtin_multi_pack_index_expire_options[] = { + OPT_BIT(0, "progress", &opts.flags, + N_("force progress reporting"), MIDX_PROGRESS), + OPT_END(), + }; + options = add_common_options(builtin_multi_pack_index_expire_options); trace2_cmd_mode(argv[0]); + if (isatty(2)) + opts.flags |= MIDX_PROGRESS; argc = parse_options(argc, argv, NULL, options, builtin_multi_pack_index_expire_usage, PARSE_OPT_KEEP_UNKNOWN); @@ -126,6 +147,8 @@ static int cmd_multi_pack_index_repack(int argc, const char **argv) static struct option builtin_multi_pack_index_repack_options[] = { OPT_MAGNITUDE(0, "batch-size", &opts.batch_size, N_("during repack, collect pack-files of smaller size into a batch that is larger than this size")), + OPT_BIT(0, "progress", &opts.flags, + N_("force progress reporting"), MIDX_PROGRESS), OPT_END(), }; @@ -133,6 +156,8 @@ static int cmd_multi_pack_index_repack(int argc, const char **argv) trace2_cmd_mode(argv[0]); + if (isatty(2)) + opts.flags |= MIDX_PROGRESS; argc = parse_options(argc, argv, NULL, options, builtin_multi_pack_index_repack_usage, @@ -154,8 +179,6 @@ int cmd_multi_pack_index(int argc, const char **argv, git_config(git_default_config, NULL); - if (isatty(2)) - opts.flags |= MIDX_PROGRESS; argc = parse_options(argc, argv, prefix, builtin_multi_pack_index_options, builtin_multi_pack_index_usage, @@ -164,7 +187,7 @@ int cmd_multi_pack_index(int argc, const char **argv, if (!opts.object_dir) opts.object_dir = get_object_directory(); - if (argc == 0) + if (!argc) goto usage; if (!strcmp(argv[0], "repack")) @@ -175,10 +198,9 @@ int cmd_multi_pack_index(int argc, const char **argv, return cmd_multi_pack_index_verify(argc, argv); else if (!strcmp(argv[0], "expire")) return cmd_multi_pack_index_expire(argc, argv); - else { - error(_("unrecognized subcommand: %s"), argv[0]); + + error(_("unrecognized subcommand: %s"), argv[0]); usage: - usage_with_options(builtin_multi_pack_index_usage, - builtin_multi_pack_index_options); - } + usage_with_options(builtin_multi_pack_index_usage, + builtin_multi_pack_index_options); } diff --git a/builtin/notes.c b/builtin/notes.c index 74bba39ca8..71c59583a1 100644 --- a/builtin/notes.c +++ b/builtin/notes.c @@ -172,9 +172,7 @@ static void prepare_note_data(const struct object_id *object, struct note_data * /* write the template message before editing: */ d->edit_path = git_pathdup("NOTES_EDITMSG"); - fd = open(d->edit_path, O_CREAT | O_TRUNC | O_WRONLY, 0600); - if (fd < 0) - die_errno(_("could not create file '%s'"), d->edit_path); + fd = xopen(d->edit_path, O_CREAT | O_TRUNC | O_WRONLY, 0600); if (d->given) write_or_die(fd, d->buf.buf, d->buf.len); diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c index df49f656b9..1a3dd445f8 100644 --- a/builtin/pack-objects.c +++ b/builtin/pack-objects.c @@ -1124,6 +1124,11 @@ static void write_reused_pack(struct hashfile *f) break; offset += ewah_bit_ctz64(word >> offset); + /* + * Can use bit positions directly, even for MIDX + * bitmaps. See comment in try_partial_reuse() + * for why. + */ write_reused_pack_one(pos + offset, f, &w_curs); display_progress(progress_state, ++written); } @@ -1217,6 +1222,7 @@ static void write_pack_file(void) if (!pack_to_stdout) { struct stat st; struct strbuf tmpname = STRBUF_INIT; + char *idx_tmp_name = NULL; /* * Packs are runtime accessed in their mtime @@ -1237,7 +1243,8 @@ static void write_pack_file(void) warning_errno(_("failed utime() on %s"), pack_tmp_name); } - strbuf_addf(&tmpname, "%s-", base_name); + strbuf_addf(&tmpname, "%s-%s.", base_name, + hash_to_hex(hash)); if (write_bitmap_index) { bitmap_writer_set_checksum(hash); @@ -1245,23 +1252,29 @@ static void write_pack_file(void) &to_pack, written_list, nr_written); } - finish_tmp_packfile(&tmpname, pack_tmp_name, + stage_tmp_packfiles(&tmpname, pack_tmp_name, written_list, nr_written, - &pack_idx_opts, hash); + &pack_idx_opts, hash, &idx_tmp_name); if (write_bitmap_index) { - strbuf_addf(&tmpname, "%s.bitmap", hash_to_hex(hash)); + size_t tmpname_len = tmpname.len; + strbuf_addstr(&tmpname, "bitmap"); stop_progress(&progress_state); bitmap_writer_show_progress(progress); bitmap_writer_select_commits(indexed_commits, indexed_commits_nr, -1); - bitmap_writer_build(&to_pack); + if (bitmap_writer_build(&to_pack) < 0) + die(_("failed to write bitmap index")); bitmap_writer_finish(written_list, nr_written, tmpname.buf, write_bitmap_options); write_bitmap_index = 0; + strbuf_setlen(&tmpname, tmpname_len); } + rename_tmp_packfile_idx(&tmpname, &idx_tmp_name); + + free(idx_tmp_name); strbuf_release(&tmpname); free(pack_tmp_name); puts(hash_to_hex(hash)); @@ -3405,13 +3418,9 @@ static void read_object_list_from_stdin(void) } } -/* Remember to update object flag allocation in object.h */ -#define OBJECT_ADDED (1u<<20) - static void show_commit(struct commit *commit, void *data) { add_object_entry(&commit->object.oid, OBJ_COMMIT, NULL, 0); - commit->object.flags |= OBJECT_ADDED; if (write_bitmap_index) index_commit_for_bitmap(commit); @@ -3424,7 +3433,6 @@ static void show_object(struct object *obj, const char *name, void *data) { add_preferred_base_object(name); add_object_entry(&obj->oid, obj->type, name, 0); - obj->flags |= OBJECT_ADDED; if (use_delta_islands) { const char *p; @@ -3505,79 +3513,23 @@ static void show_edge(struct commit *commit) add_preferred_base(&commit->object.oid); } -struct in_pack_object { - off_t offset; - struct object *object; -}; - -struct in_pack { - unsigned int alloc; - unsigned int nr; - struct in_pack_object *array; -}; - -static void mark_in_pack_object(struct object *object, struct packed_git *p, struct in_pack *in_pack) -{ - in_pack->array[in_pack->nr].offset = find_pack_entry_one(object->oid.hash, p); - in_pack->array[in_pack->nr].object = object; - in_pack->nr++; -} - -/* - * Compare the objects in the offset order, in order to emulate the - * "git rev-list --objects" output that produced the pack originally. - */ -static int ofscmp(const void *a_, const void *b_) +static int add_object_in_unpacked_pack(const struct object_id *oid, + struct packed_git *pack, + uint32_t pos, + void *_data) { - struct in_pack_object *a = (struct in_pack_object *)a_; - struct in_pack_object *b = (struct in_pack_object *)b_; - - if (a->offset < b->offset) - return -1; - else if (a->offset > b->offset) - return 1; - else - return oidcmp(&a->object->oid, &b->object->oid); + add_object_entry(oid, OBJ_NONE, "", 0); + return 0; } static void add_objects_in_unpacked_packs(void) { - struct packed_git *p; - struct in_pack in_pack; - uint32_t i; - - memset(&in_pack, 0, sizeof(in_pack)); - - for (p = get_all_packs(the_repository); p; p = p->next) { - struct object_id oid; - struct object *o; - - if (!p->pack_local || p->pack_keep || p->pack_keep_in_core) - continue; - if (open_pack_index(p)) - die(_("cannot open pack index")); - - ALLOC_GROW(in_pack.array, - in_pack.nr + p->num_objects, - in_pack.alloc); - - for (i = 0; i < p->num_objects; i++) { - nth_packed_object_id(&oid, p, i); - o = lookup_unknown_object(the_repository, &oid); - if (!(o->flags & OBJECT_ADDED)) - mark_in_pack_object(o, p, &in_pack); - o->flags |= OBJECT_ADDED; - } - } - - if (in_pack.nr) { - QSORT(in_pack.array, in_pack.nr, ofscmp); - for (i = 0; i < in_pack.nr; i++) { - struct object *o = in_pack.array[i].object; - add_object_entry(&o->oid, o->type, "", 0); - } - } - free(in_pack.array); + if (for_each_packed_object(add_object_in_unpacked_pack, NULL, + FOR_EACH_OBJECT_PACK_ORDER | + FOR_EACH_OBJECT_LOCAL_ONLY | + FOR_EACH_OBJECT_SKIP_IN_CORE_KEPT_PACKS | + FOR_EACH_OBJECT_SKIP_ON_DISK_KEPT_PACKS)) + die(_("cannot open pack index")); } static int add_loose_object(const struct object_id *oid, const char *path, diff --git a/builtin/pull.c b/builtin/pull.c index b311ea6b9d..cf6c56e2d8 100644 --- a/builtin/pull.c +++ b/builtin/pull.c @@ -26,6 +26,7 @@ #include "wt-status.h" #include "commit-reach.h" #include "sequencer.h" +#include "packfile.h" /** * Parses the value of --rebase. If value is a false value, returns @@ -577,7 +578,7 @@ static int run_fetch(const char *repo, const char **refspecs) strvec_pushv(&args, refspecs); } else if (*refspecs) BUG("refspecs without repo?"); - ret = run_command_v_opt(args.v, RUN_GIT_CMD); + ret = run_command_v_opt(args.v, RUN_GIT_CMD | RUN_CLOSE_OBJECT_STORE); strvec_clear(&args); return ret; } diff --git a/builtin/push.c b/builtin/push.c index e8b10a9b7e..4b026ce6c6 100644 --- a/builtin/push.c +++ b/builtin/push.c @@ -289,42 +289,42 @@ static const char message_advice_ref_needs_update[] = static void advise_pull_before_push(void) { - if (!advice_push_non_ff_current || !advice_push_update_rejected) + if (!advice_enabled(ADVICE_PUSH_NON_FF_CURRENT) || !advice_enabled(ADVICE_PUSH_UPDATE_REJECTED)) return; advise(_(message_advice_pull_before_push)); } static void advise_checkout_pull_push(void) { - if (!advice_push_non_ff_matching || !advice_push_update_rejected) + if (!advice_enabled(ADVICE_PUSH_NON_FF_MATCHING) || !advice_enabled(ADVICE_PUSH_UPDATE_REJECTED)) return; advise(_(message_advice_checkout_pull_push)); } static void advise_ref_already_exists(void) { - if (!advice_push_already_exists || !advice_push_update_rejected) + if (!advice_enabled(ADVICE_PUSH_ALREADY_EXISTS) || !advice_enabled(ADVICE_PUSH_UPDATE_REJECTED)) return; advise(_(message_advice_ref_already_exists)); } static void advise_ref_fetch_first(void) { - if (!advice_push_fetch_first || !advice_push_update_rejected) + if (!advice_enabled(ADVICE_PUSH_FETCH_FIRST) || !advice_enabled(ADVICE_PUSH_UPDATE_REJECTED)) return; advise(_(message_advice_ref_fetch_first)); } static void advise_ref_needs_force(void) { - if (!advice_push_needs_force || !advice_push_update_rejected) + if (!advice_enabled(ADVICE_PUSH_NEEDS_FORCE) || !advice_enabled(ADVICE_PUSH_UPDATE_REJECTED)) return; advise(_(message_advice_ref_needs_force)); } static void advise_ref_needs_update(void) { - if (!advice_push_ref_needs_update || !advice_push_update_rejected) + if (!advice_enabled(ADVICE_PUSH_REF_NEEDS_UPDATE) || !advice_enabled(ADVICE_PUSH_UPDATE_REJECTED)) return; advise(_(message_advice_ref_needs_update)); } diff --git a/builtin/rebase.c b/builtin/rebase.c index c284a7ace1..8c6393f6d7 100644 --- a/builtin/rebase.c +++ b/builtin/rebase.c @@ -405,6 +405,7 @@ static int run_sequencer_rebase(struct rebase_options *opts, flags |= opts->root_with_onto ? TODO_LIST_ROOT_WITH_ONTO : 0; flags |= command == ACTION_SHORTEN_OIDS ? TODO_LIST_SHORTEN_IDS : 0; flags |= opts->reapply_cherry_picks ? TODO_LIST_REAPPLY_CHERRY_PICKS : 0; + flags |= opts->flags & REBASE_NO_QUIET ? TODO_LIST_WARN_SKIPPED_CHERRY_PICKS : 0; switch (command) { case ACTION_NONE: { @@ -559,6 +560,9 @@ int cmd_rebase__interactive(int argc, const char **argv, const char *prefix) argc = parse_options(argc, argv, prefix, options, builtin_rebase_interactive_usage, PARSE_OPT_KEEP_ARGV0); + prepare_repo_settings(the_repository); + the_repository->settings.command_requires_full_index = 0; + if (!is_null_oid(&squash_onto)) opts.squash_onto = &squash_onto; @@ -740,7 +744,6 @@ static int finish_rebase(struct rebase_options *opts) delete_ref(NULL, "REBASE_HEAD", NULL, REF_NO_DEREF); unlink(git_path_auto_merge(the_repository)); apply_autostash(state_dir_path("autostash", opts)); - close_object_store(the_repository->objects); /* * We ignore errors in 'git maintenance run --auto', since the * user should see them. @@ -762,17 +765,6 @@ static int finish_rebase(struct rebase_options *opts) return ret; } -static struct commit *peel_committish(const char *name) -{ - struct object *obj; - struct object_id oid; - - if (get_oid(name, &oid)) - return NULL; - obj = parse_object(the_repository, &oid); - return (struct commit *)peel_to_type(name, 0, obj, OBJ_COMMIT); -} - static void add_var(struct strbuf *buf, const char *name, const char *value) { if (!value) @@ -1430,6 +1422,9 @@ int cmd_rebase(int argc, const char **argv, const char *prefix) usage_with_options(builtin_rebase_usage, builtin_rebase_options); + prepare_repo_settings(the_repository); + the_repository->settings.command_requires_full_index = 0; + options.allow_empty_message = 1; git_config(rebase_config, &options); /* options.gpg_sign_opt will be either "-S" or NULL */ @@ -1574,7 +1569,7 @@ int cmd_rebase(int argc, const char **argv, const char *prefix) die(_("could not move back to %s"), oid_to_hex(&options.orig_head)); remove_branch_state(the_repository, 0); - ret = !!finish_rebase(&options); + ret = finish_rebase(&options); goto cleanup; } case ACTION_QUIT: { @@ -1583,11 +1578,11 @@ int cmd_rebase(int argc, const char **argv, const char *prefix) struct replay_opts replay = REPLAY_OPTS_INIT; replay.action = REPLAY_INTERACTIVE_REBASE; - ret = !!sequencer_remove_state(&replay); + ret = sequencer_remove_state(&replay); } else { strbuf_reset(&buf); strbuf_addstr(&buf, options.state_dir); - ret = !!remove_dir_recursively(&buf, 0); + ret = remove_dir_recursively(&buf, 0); if (ret) error(_("could not remove '%s'"), options.state_dir); @@ -1845,7 +1840,8 @@ int cmd_rebase(int argc, const char **argv, const char *prefix) if (!strcmp(options.upstream_name, "-")) options.upstream_name = "@{-1}"; } - options.upstream = peel_committish(options.upstream_name); + options.upstream = + lookup_commit_reference_by_name(options.upstream_name); if (!options.upstream) die(_("invalid upstream '%s'"), options.upstream_name); options.upstream_arg = options.upstream_name; @@ -1888,7 +1884,8 @@ int cmd_rebase(int argc, const char **argv, const char *prefix) options.onto = lookup_commit_or_die(&merge_base, options.onto_name); } else { - options.onto = peel_committish(options.onto_name); + options.onto = + lookup_commit_reference_by_name(options.onto_name); if (!options.onto) die(_("Does not point to a valid commit '%s'"), options.onto_name); @@ -1913,13 +1910,15 @@ int cmd_rebase(int argc, const char **argv, const char *prefix) die_if_checked_out(buf.buf, 1); options.head_name = xstrdup(buf.buf); /* If not is it a valid ref (branch or commit)? */ - } else if (!get_oid(branch_name, &options.orig_head) && - lookup_commit_reference(the_repository, - &options.orig_head)) + } else { + struct commit *commit = + lookup_commit_reference_by_name(branch_name); + if (!commit) + die(_("no such branch/commit '%s'"), + branch_name); + oidcpy(&options.orig_head, &commit->object.oid); options.head_name = NULL; - else - die(_("fatal: no such branch/commit '%s'"), - branch_name); + } } else if (argc == 0) { /* Do not need to switch branches, we are already on it. */ options.head_name = @@ -1959,7 +1958,7 @@ int cmd_rebase(int argc, const char **argv, const char *prefix) if (require_clean_work_tree(the_repository, "rebase", _("Please commit or stash them."), 1, 1)) { - ret = 1; + ret = -1; goto cleanup; } @@ -1994,7 +1993,7 @@ int cmd_rebase(int argc, const char **argv, const char *prefix) RESET_HEAD_RUN_POST_CHECKOUT_HOOK, NULL, buf.buf, DEFAULT_REFLOG_ACTION) < 0) { - ret = !!error(_("could not switch to " + ret = error(_("could not switch to " "%s"), options.switch_to); goto cleanup; @@ -2009,7 +2008,7 @@ int cmd_rebase(int argc, const char **argv, const char *prefix) else printf(_("Current branch %s is up to date.\n"), branch_name); - ret = !!finish_rebase(&options); + ret = finish_rebase(&options); goto cleanup; } else if (!(options.flags & REBASE_NO_QUIET)) ; /* be quiet */ @@ -2087,7 +2086,7 @@ int cmd_rebase(int argc, const char **argv, const char *prefix) RESET_HEAD_REFS_ONLY, "HEAD", msg.buf, DEFAULT_REFLOG_ACTION); strbuf_release(&msg); - ret = !!finish_rebase(&options); + ret = finish_rebase(&options); goto cleanup; } @@ -2101,7 +2100,7 @@ int cmd_rebase(int argc, const char **argv, const char *prefix) options.revisions = revisions.buf; run_rebase: - ret = !!run_specific_rebase(&options, action); + ret = run_specific_rebase(&options, action); cleanup: strbuf_release(&buf); @@ -2112,5 +2111,5 @@ cleanup: free(options.strategy); strbuf_release(&options.git_format_patch_opt); free(squash_onto_name); - return ret; + return !!ret; } diff --git a/builtin/receive-pack.c b/builtin/receive-pack.c index 2d1f97e1ca..48960a9575 100644 --- a/builtin/receive-pack.c +++ b/builtin/receive-pack.c @@ -1306,7 +1306,7 @@ static void refuse_unconfigured_deny_delete_current(void) rp_error("%s", _(refuse_unconfigured_deny_delete_current_msg)); } -static int command_singleton_iterator(void *cb_data, struct object_id *oid); +static const struct object_id *command_singleton_iterator(void *cb_data); static int update_shallow_ref(struct command *cmd, struct shallow_info *si) { struct shallow_lock shallow_lock = SHALLOW_LOCK_INIT; @@ -1731,16 +1731,15 @@ static void check_aliased_updates(struct command *commands) string_list_clear(&ref_list, 0); } -static int command_singleton_iterator(void *cb_data, struct object_id *oid) +static const struct object_id *command_singleton_iterator(void *cb_data) { struct command **cmd_list = cb_data; struct command *cmd = *cmd_list; if (!cmd || is_null_oid(&cmd->new_oid)) - return -1; /* end of list */ + return NULL; *cmd_list = NULL; /* this returns only one */ - oidcpy(oid, &cmd->new_oid); - return 0; + return &cmd->new_oid; } static void set_connectivity_errors(struct command *commands, @@ -1770,7 +1769,7 @@ struct iterate_data { struct shallow_info *si; }; -static int iterate_receive_command_list(void *cb_data, struct object_id *oid) +static const struct object_id *iterate_receive_command_list(void *cb_data) { struct iterate_data *data = cb_data; struct command **cmd_list = &data->cmds; @@ -1781,13 +1780,11 @@ static int iterate_receive_command_list(void *cb_data, struct object_id *oid) /* to be checked in update_shallow_ref() */ continue; if (!is_null_oid(&cmd->new_oid) && !cmd->skip_update) { - oidcpy(oid, &cmd->new_oid); *cmd_list = cmd->next; - return 0; + return &cmd->new_oid; } } - *cmd_list = NULL; - return -1; /* end of list */ + return NULL; } static void reject_updates_to_hidden(struct command *commands) @@ -2477,7 +2474,8 @@ int cmd_receive_pack(int argc, const char **argv, const char *prefix) struct option options[] = { OPT__QUIET(&quiet, N_("quiet")), OPT_HIDDEN_BOOL(0, "stateless-rpc", &stateless_rpc, NULL), - OPT_HIDDEN_BOOL(0, "advertise-refs", &advertise_refs, NULL), + OPT_HIDDEN_BOOL(0, "http-backend-info-refs", &advertise_refs, NULL), + OPT_ALIAS(0, "advertise-refs", "http-backend-info-refs"), OPT_HIDDEN_BOOL(0, "reject-thin-pack-for-testing", &reject_thin, NULL), OPT_END() }; @@ -2580,10 +2578,9 @@ int cmd_receive_pack(int argc, const char **argv, const char *prefix) proc.no_stdin = 1; proc.stdout_to_stderr = 1; proc.err = use_sideband ? -1 : 0; - proc.git_cmd = 1; + proc.git_cmd = proc.close_object_store = 1; proc.argv = argv_gc_auto; - close_object_store(the_repository->objects); if (!start_command(&proc)) { if (use_sideband) copy_to_sideband(proc.err, -1, NULL); diff --git a/builtin/reflog.c b/builtin/reflog.c index 09541d1c80..bd4c669918 100644 --- a/builtin/reflog.c +++ b/builtin/reflog.c @@ -629,8 +629,9 @@ static int cmd_reflog_expire(int argc, const char **argv, const char *prefix) free_worktrees(worktrees); for (i = 0; i < collected.nr; i++) { struct collected_reflog *e = collected.e[i]; + set_reflog_expiry_param(&cb.cmd, explicit_expiry, e->reflog); - status |= reflog_expire(e->reflog, &e->oid, flags, + status |= reflog_expire(e->reflog, flags, reflog_expiry_prepare, should_expire_reflog_ent, reflog_expiry_cleanup, @@ -642,13 +643,12 @@ static int cmd_reflog_expire(int argc, const char **argv, const char *prefix) for (; i < argc; i++) { char *ref; - struct object_id oid; - if (!dwim_log(argv[i], strlen(argv[i]), &oid, &ref)) { + if (!dwim_log(argv[i], strlen(argv[i]), NULL, &ref)) { status |= error(_("%s points nowhere!"), argv[i]); continue; } set_reflog_expiry_param(&cb.cmd, explicit_expiry, ref); - status |= reflog_expire(ref, &oid, flags, + status |= reflog_expire(ref, flags, reflog_expiry_prepare, should_expire_reflog_ent, reflog_expiry_cleanup, @@ -700,7 +700,6 @@ static int cmd_reflog_delete(int argc, const char **argv, const char *prefix) for ( ; i < argc; i++) { const char *spec = strstr(argv[i], "@{"); - struct object_id oid; char *ep, *ref; int recno; @@ -709,7 +708,7 @@ static int cmd_reflog_delete(int argc, const char **argv, const char *prefix) continue; } - if (!dwim_log(argv[i], spec - argv[i], &oid, &ref)) { + if (!dwim_log(argv[i], spec - argv[i], NULL, &ref)) { status |= error(_("no reflog for '%s'"), argv[i]); continue; } @@ -724,7 +723,7 @@ static int cmd_reflog_delete(int argc, const char **argv, const char *prefix) cb.cmd.expire_total = 0; } - status |= reflog_expire(ref, &oid, flags, + status |= reflog_expire(ref, flags, reflog_expiry_prepare, should_expire_reflog_ent, reflog_expiry_cleanup, diff --git a/builtin/repack.c b/builtin/repack.c index 5f9bc74adc..c1a209013b 100644 --- a/builtin/repack.c +++ b/builtin/repack.c @@ -208,10 +208,10 @@ static struct { unsigned optional:1; } exts[] = { {".pack"}, - {".idx"}, {".rev", 1}, {".bitmap", 1}, {".promisor", 1}, + {".idx"}, }; static unsigned populate_pack_exts(char *name) @@ -515,6 +515,10 @@ int cmd_repack(int argc, const char **argv, const char *prefix) if (!(pack_everything & ALL_INTO_ONE) || !is_bare_repository()) write_bitmaps = 0; + } else if (write_bitmaps && + git_env_bool(GIT_TEST_MULTI_PACK_INDEX, 0) && + git_env_bool(GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP, 0)) { + write_bitmaps = 0; } if (pack_kept_objects < 0) pack_kept_objects = write_bitmaps > 0; @@ -725,8 +729,12 @@ int cmd_repack(int argc, const char **argv, const char *prefix) update_server_info(0); remove_temporary_files(); - if (git_env_bool(GIT_TEST_MULTI_PACK_INDEX, 0)) - write_midx_file(get_object_directory(), NULL, 0); + if (git_env_bool(GIT_TEST_MULTI_PACK_INDEX, 0)) { + unsigned flags = 0; + if (git_env_bool(GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP, 0)) + flags |= MIDX_WRITE_BITMAP | MIDX_WRITE_REV_INDEX; + write_midx_file(get_object_directory(), NULL, flags); + } string_list_clear(&names, 0); string_list_clear(&rollback, 0); diff --git a/builtin/replace.c b/builtin/replace.c index cd48765911..946938d011 100644 --- a/builtin/replace.c +++ b/builtin/replace.c @@ -507,7 +507,7 @@ static int convert_graft_file(int force) if (!fp) return -1; - advice_graft_file_deprecated = 0; + no_graft_file_deprecated_advice = 1; while (strbuf_getline(&buf, fp) != EOF) { if (*buf.buf == '#') continue; diff --git a/builtin/reset.c b/builtin/reset.c index 43e855cb88..51c9e2f43f 100644 --- a/builtin/reset.c +++ b/builtin/reset.c @@ -412,7 +412,7 @@ int cmd_reset(int argc, const char **argv, const char *prefix) refresh_index(&the_index, flags, NULL, NULL, _("Unstaged changes after reset:")); t_delta_in_ms = (getnanotime() - t_begin) / 1000000; - if (advice_reset_quiet_warning && t_delta_in_ms > REFRESH_INDEX_DELAY_WARNING_IN_MS) { + if (advice_enabled(ADVICE_RESET_QUIET_WARNING) && t_delta_in_ms > REFRESH_INDEX_DELAY_WARNING_IN_MS) { printf(_("\nIt took %.2f seconds to enumerate unstaged changes after reset. You can\n" "use '--quiet' to avoid this. Set the config setting reset.quiet to true\n" "to make this the default.\n"), t_delta_in_ms / 1000.0); diff --git a/builtin/revert.c b/builtin/revert.c index 2e13660e4b..51776abea6 100644 --- a/builtin/revert.c +++ b/builtin/revert.c @@ -136,6 +136,9 @@ static int run_sequencer(int argc, const char **argv, struct replay_opts *opts) PARSE_OPT_KEEP_ARGV0 | PARSE_OPT_KEEP_UNKNOWN); + prepare_repo_settings(the_repository); + the_repository->settings.command_requires_full_index = 0; + /* implies allow_empty */ if (opts->keep_redundant_commits) opts->allow_empty = 1; diff --git a/builtin/rm.c b/builtin/rm.c index 8a24c715e0..3b44b807e5 100644 --- a/builtin/rm.c +++ b/builtin/rm.c @@ -55,7 +55,7 @@ static void print_error_files(struct string_list *files_list, strbuf_addf(&err_msg, "\n %s", files_list->items[i].string); - if (advice_rm_hints) + if (advice_enabled(ADVICE_RM_HINTS)) strbuf_addstr(&err_msg, hints_msg); *errs = error("%s", err_msg.buf); strbuf_release(&err_msg); diff --git a/builtin/show-branch.c b/builtin/show-branch.c index d77ce7aeb3..bea4bbf468 100644 --- a/builtin/show-branch.c +++ b/builtin/show-branch.c @@ -482,10 +482,9 @@ static void snarf_refs(int head, int remotes) } } -static int rev_is_head(const char *head, const char *name, - unsigned char *head_sha1, unsigned char *sha1) +static int rev_is_head(const char *head, const char *name) { - if (!head || (head_sha1 && sha1 && !hasheq(head_sha1, sha1))) + if (!head) return 0; skip_prefix(head, "refs/heads/", &head); if (!skip_prefix(name, "refs/heads/", &name)) @@ -806,9 +805,7 @@ int cmd_show_branch(int ac, const char **av, const char *prefix) /* We are only interested in adding the branch * HEAD points at. */ - if (rev_is_head(head, - ref_name[i], - head_oid.hash, NULL)) + if (rev_is_head(head, ref_name[i])) has_head++; } if (!has_head) { @@ -867,10 +864,8 @@ int cmd_show_branch(int ac, const char **av, const char *prefix) if (1 < num_rev || extra < 0) { for (i = 0; i < num_rev; i++) { int j; - int is_head = rev_is_head(head, - ref_name[i], - head_oid.hash, - rev[i]->object.oid.hash); + int is_head = rev_is_head(head, ref_name[i]) && + oideq(&head_oid, &rev[i]->object.oid); if (extra < 0) printf("%c [%s] ", is_head ? '*' : ' ', ref_name[i]); diff --git a/builtin/sparse-checkout.c b/builtin/sparse-checkout.c index 8ba9f13787..d0f5c4702b 100644 --- a/builtin/sparse-checkout.c +++ b/builtin/sparse-checkout.c @@ -100,6 +100,98 @@ static int sparse_checkout_list(int argc, const char **argv) return 0; } +static void clean_tracked_sparse_directories(struct repository *r) +{ + int i, was_full = 0; + struct strbuf path = STRBUF_INIT; + size_t pathlen; + struct string_list_item *item; + struct string_list sparse_dirs = STRING_LIST_INIT_DUP; + + /* + * If we are not using cone mode patterns, then we cannot + * delete directories outside of the sparse cone. + */ + if (!r || !r->index || !r->worktree) + return; + if (init_sparse_checkout_patterns(r->index) || + !r->index->sparse_checkout_patterns->use_cone_patterns) + return; + + /* + * Use the sparse index as a data structure to assist finding + * directories that are safe to delete. This conversion to a + * sparse index will not delete directories that contain + * conflicted entries or submodules. + */ + if (!r->index->sparse_index) { + /* + * If something, such as a merge conflict or other concern, + * prevents us from converting to a sparse index, then do + * not try deleting files. + */ + if (convert_to_sparse(r->index, SPARSE_INDEX_MEMORY_ONLY)) + return; + was_full = 1; + } + + strbuf_addstr(&path, r->worktree); + strbuf_complete(&path, '/'); + pathlen = path.len; + + /* + * Collect directories that have gone out of scope but also + * exist on disk, so there is some work to be done. We need to + * store the entries in a list before exploring, since that might + * expand the sparse-index again. + */ + for (i = 0; i < r->index->cache_nr; i++) { + struct cache_entry *ce = r->index->cache[i]; + + if (S_ISSPARSEDIR(ce->ce_mode) && + repo_file_exists(r, ce->name)) + string_list_append(&sparse_dirs, ce->name); + } + + for_each_string_list_item(item, &sparse_dirs) { + struct dir_struct dir = DIR_INIT; + struct pathspec p = { 0 }; + struct strvec s = STRVEC_INIT; + + strbuf_setlen(&path, pathlen); + strbuf_addstr(&path, item->string); + + dir.flags |= DIR_SHOW_IGNORED_TOO; + + setup_standard_excludes(&dir); + strvec_push(&s, path.buf); + + parse_pathspec(&p, PATHSPEC_GLOB, 0, NULL, s.v); + fill_directory(&dir, r->index, &p); + + if (dir.nr) { + warning(_("directory '%s' contains untracked files," + " but is not in the sparse-checkout cone"), + item->string); + } else if (remove_dir_recursively(&path, 0)) { + /* + * Removal is "best effort". If something blocks + * the deletion, then continue with a warning. + */ + warning(_("failed to remove directory '%s'"), + item->string); + } + + dir_clear(&dir); + } + + string_list_clear(&sparse_dirs, 0); + strbuf_release(&path); + + if (was_full) + ensure_full_index(r->index); +} + static int update_working_directory(struct pattern_list *pl) { enum update_sparsity_result result; @@ -141,6 +233,8 @@ static int update_working_directory(struct pattern_list *pl) else rollback_lock_file(&lock_file); + clean_tracked_sparse_directories(r); + r->index->sparse_checkout_patterns = NULL; return result; } diff --git a/builtin/stash.c b/builtin/stash.c index 8f42360ca9..5512f4942c 100644 --- a/builtin/stash.c +++ b/builtin/stash.c @@ -313,6 +313,17 @@ static int reset_head(void) return run_command(&cp); } +static int is_path_a_directory(const char *path) +{ + /* + * This function differs from abspath.c:is_directory() in that + * here we use lstat() instead of stat(); we do not want to + * follow symbolic links here. + */ + struct stat st; + return (!lstat(path, &st) && S_ISDIR(st.st_mode)); +} + static void add_diff_to_buf(struct diff_queue_struct *q, struct diff_options *options, void *data) @@ -320,6 +331,9 @@ static void add_diff_to_buf(struct diff_queue_struct *q, int i; for (i = 0; i < q->nr; i++) { + if (is_path_a_directory(q->queue[i]->one->path)) + continue; + strbuf_addstr(data, q->queue[i]->one->path); /* NUL-terminate: will be fed to update-index -z */ @@ -521,9 +535,6 @@ static int do_apply_stash(const char *prefix, struct stash_info *info, } } - if (info->has_u && restore_untracked(&info->u_tree)) - return error(_("could not restore untracked files from stash")); - init_merge_options(&o, the_repository); o.branch1 = "Updated upstream"; @@ -558,6 +569,9 @@ static int do_apply_stash(const char *prefix, struct stash_info *info, unstage_changes_unless_new(&c_tree); } + if (info->has_u && restore_untracked(&info->u_tree)) + return error(_("could not restore untracked files from stash")); + if (!quiet) { struct child_process cp = CHILD_PROCESS_INIT; diff --git a/builtin/submodule--helper.c b/builtin/submodule--helper.c index ef2776a9e4..88ce6be69c 100644 --- a/builtin/submodule--helper.c +++ b/builtin/submodule--helper.c @@ -199,34 +199,28 @@ static char *relative_url(const char *remote_url, return strbuf_detach(&sb, NULL); } -static int resolve_relative_url(int argc, const char **argv, const char *prefix) +static char *resolve_relative_url(const char *rel_url, const char *up_path, int quiet) { - char *remoteurl = NULL; + char *remoteurl, *resolved_url; char *remote = get_default_remote(); - const char *up_path = NULL; - char *res; - const char *url; - struct strbuf sb = STRBUF_INIT; - - if (argc != 2 && argc != 3) - die("resolve-relative-url only accepts one or two arguments"); - - url = argv[1]; - strbuf_addf(&sb, "remote.%s.url", remote); - free(remote); + struct strbuf remotesb = STRBUF_INIT; - if (git_config_get_string(sb.buf, &remoteurl)) - /* the repository is its own authoritative upstream */ + strbuf_addf(&remotesb, "remote.%s.url", remote); + if (git_config_get_string(remotesb.buf, &remoteurl)) { + if (!quiet) + warning(_("could not look up configuration '%s'. " + "Assuming this repository is its own " + "authoritative upstream."), + remotesb.buf); remoteurl = xgetcwd(); + } + resolved_url = relative_url(remoteurl, rel_url, up_path); - if (argc == 3) - up_path = argv[2]; - - res = relative_url(remoteurl, url, up_path); - puts(res); - free(res); + free(remote); free(remoteurl); - return 0; + strbuf_release(&remotesb); + + return resolved_url; } static int resolve_relative_url_test(int argc, const char **argv, const char *prefix) @@ -590,26 +584,6 @@ static int module_foreach(int argc, const char **argv, const char *prefix) return 0; } -static char *compute_submodule_clone_url(const char *rel_url) -{ - char *remoteurl, *relurl; - char *remote = get_default_remote(); - struct strbuf remotesb = STRBUF_INIT; - - strbuf_addf(&remotesb, "remote.%s.url", remote); - if (git_config_get_string(remotesb.buf, &remoteurl)) { - warning(_("could not look up configuration '%s'. Assuming this repository is its own authoritative upstream."), remotesb.buf); - remoteurl = xgetcwd(); - } - relurl = relative_url(remoteurl, rel_url, NULL); - - free(remote); - free(remoteurl); - strbuf_release(&remotesb); - - return relurl; -} - struct init_cb { const char *prefix; unsigned int flags; @@ -660,7 +634,7 @@ static void init_submodule(const char *path, const char *prefix, if (starts_with_dot_dot_slash(url) || starts_with_dot_slash(url)) { char *oldurl = url; - url = compute_submodule_clone_url(oldurl); + url = resolve_relative_url(oldurl, NULL, 0); free(oldurl); } @@ -1380,20 +1354,10 @@ static void sync_submodule(const char *path, const char *prefix, if (sub && sub->url) { if (starts_with_dot_dot_slash(sub->url) || starts_with_dot_slash(sub->url)) { - char *remote_url, *up_path; - char *remote = get_default_remote(); - strbuf_addf(&sb, "remote.%s.url", remote); - - if (git_config_get_string(sb.buf, &remote_url)) - remote_url = xgetcwd(); - - up_path = get_up_path(path); - sub_origin_url = relative_url(remote_url, sub->url, up_path); - super_config_url = relative_url(remote_url, sub->url, NULL); - - free(remote); + char *up_path = get_up_path(path); + sub_origin_url = resolve_relative_url(sub->url, up_path, 1); + super_config_url = resolve_relative_url(sub->url, NULL, 1); free(up_path); - free(remote_url); } else { sub_origin_url = xstrdup(sub->url); super_config_url = xstrdup(sub->url); @@ -1704,18 +1668,24 @@ static int add_possible_reference_from_superproject( * standard layout with .git/(modules/<name>)+/objects */ if (strip_suffix(odb->path, "/objects", &len)) { + struct repository alternate; char *sm_alternate; struct strbuf sb = STRBUF_INIT; struct strbuf err = STRBUF_INIT; strbuf_add(&sb, odb->path, len); + repo_init(&alternate, sb.buf, NULL); + /* * We need to end the new path with '/' to mark it as a dir, * otherwise a submodule name containing '/' will be broken * as the last part of a missing submodule reference would * be taken as a file name. */ - strbuf_addf(&sb, "/modules/%s/", sas->submodule_name); + strbuf_reset(&sb); + submodule_name_to_gitdir(&sb, &alternate, sas->submodule_name); + strbuf_addch(&sb, '/'); + repo_clear(&alternate); sm_alternate = compute_alternate_path(sb.buf, &err); if (sm_alternate) { @@ -1724,7 +1694,7 @@ static int add_possible_reference_from_superproject( } else { switch (sas->error_mode) { case SUBMODULE_ALTERNATE_ERROR_DIE: - if (advice_submodule_alternate_error_strategy_die) + if (advice_enabled(ADVICE_SUBMODULE_ALTERNATE_ERROR_STRATEGY_DIE)) advise(_(alternate_error_advice)); die(_("submodule '%s' cannot add alternate: %s"), sas->submodule_name, err.buf); @@ -1785,7 +1755,7 @@ static int clone_submodule(struct module_clone_data *clone_data) struct strbuf sb = STRBUF_INIT; struct child_process cp = CHILD_PROCESS_INIT; - strbuf_addf(&sb, "%s/modules/%s", get_git_dir(), clone_data->name); + submodule_name_to_gitdir(&sb, the_repository, clone_data->name); sm_gitdir = absolute_pathdup(sb.buf); strbuf_reset(&sb); @@ -2045,6 +2015,20 @@ struct submodule_update_clone { .max_jobs = 1, \ } +struct update_data { + const char *recursive_prefix; + const char *sm_path; + const char *displaypath; + struct object_id oid; + struct object_id suboid; + struct submodule_update_strategy update_strategy; + int depth; + unsigned int force: 1; + unsigned int quiet: 1; + unsigned int nofetch: 1; + unsigned int just_cloned: 1; +}; +#define UPDATE_DATA_INIT { .update_strategy = SUBMODULE_UPDATE_STRATEGY_INIT } static void next_submodule_warn_missing(struct submodule_update_clone *suc, struct strbuf *out, const char *displaypath) @@ -2134,7 +2118,7 @@ static int prepare_to_clone_next_submodule(const struct cache_entry *ce, if (repo_config_get_string_tmp(the_repository, sb.buf, &url)) { if (starts_with_dot_slash(sub->url) || starts_with_dot_dot_slash(sub->url)) { - url = compute_submodule_clone_url(sub->url); + url = resolve_relative_url(sub->url, NULL, 0); need_free_url = 1; } else url = sub->url; @@ -2298,6 +2282,181 @@ static int git_update_clone_config(const char *var, const char *value, return 0; } +static int is_tip_reachable(const char *path, struct object_id *oid) +{ + struct child_process cp = CHILD_PROCESS_INIT; + struct strbuf rev = STRBUF_INIT; + char *hex = oid_to_hex(oid); + + cp.git_cmd = 1; + cp.dir = xstrdup(path); + cp.no_stderr = 1; + strvec_pushl(&cp.args, "rev-list", "-n", "1", hex, "--not", "--all", NULL); + + prepare_submodule_repo_env(&cp.env_array); + + if (capture_command(&cp, &rev, GIT_MAX_HEXSZ + 1) || rev.len) + return 0; + + return 1; +} + +static int fetch_in_submodule(const char *module_path, int depth, int quiet, struct object_id *oid) +{ + struct child_process cp = CHILD_PROCESS_INIT; + + prepare_submodule_repo_env(&cp.env_array); + cp.git_cmd = 1; + cp.dir = xstrdup(module_path); + + strvec_push(&cp.args, "fetch"); + if (quiet) + strvec_push(&cp.args, "--quiet"); + if (depth) + strvec_pushf(&cp.args, "--depth=%d", depth); + if (oid) { + char *hex = oid_to_hex(oid); + char *remote = get_default_remote(); + strvec_pushl(&cp.args, remote, hex, NULL); + } + + return run_command(&cp); +} + +static int run_update_command(struct update_data *ud, int subforce) +{ + struct strvec args = STRVEC_INIT; + struct strvec child_env = STRVEC_INIT; + char *oid = oid_to_hex(&ud->oid); + int must_die_on_failure = 0; + int git_cmd; + + switch (ud->update_strategy.type) { + case SM_UPDATE_CHECKOUT: + git_cmd = 1; + strvec_pushl(&args, "checkout", "-q", NULL); + if (subforce) + strvec_push(&args, "-f"); + break; + case SM_UPDATE_REBASE: + git_cmd = 1; + strvec_push(&args, "rebase"); + if (ud->quiet) + strvec_push(&args, "--quiet"); + must_die_on_failure = 1; + break; + case SM_UPDATE_MERGE: + git_cmd = 1; + strvec_push(&args, "merge"); + if (ud->quiet) + strvec_push(&args, "--quiet"); + must_die_on_failure = 1; + break; + case SM_UPDATE_COMMAND: + git_cmd = 0; + strvec_push(&args, ud->update_strategy.command); + must_die_on_failure = 1; + break; + default: + BUG("unexpected update strategy type: %s", + submodule_strategy_to_string(&ud->update_strategy)); + } + strvec_push(&args, oid); + + prepare_submodule_repo_env(&child_env); + if (run_command_v_opt_cd_env(args.v, git_cmd ? RUN_GIT_CMD : RUN_USING_SHELL, + ud->sm_path, child_env.v)) { + switch (ud->update_strategy.type) { + case SM_UPDATE_CHECKOUT: + printf(_("Unable to checkout '%s' in submodule path '%s'"), + oid, ud->displaypath); + break; + case SM_UPDATE_REBASE: + printf(_("Unable to rebase '%s' in submodule path '%s'"), + oid, ud->displaypath); + break; + case SM_UPDATE_MERGE: + printf(_("Unable to merge '%s' in submodule path '%s'"), + oid, ud->displaypath); + break; + case SM_UPDATE_COMMAND: + printf(_("Execution of '%s %s' failed in submodule path '%s'"), + ud->update_strategy.command, oid, ud->displaypath); + break; + default: + BUG("unexpected update strategy type: %s", + submodule_strategy_to_string(&ud->update_strategy)); + } + /* + * NEEDSWORK: We are currently printing to stdout with error + * return so that the shell caller handles the error output + * properly. Once we start handling the error messages within + * C, we should use die() instead. + */ + if (must_die_on_failure) + return 2; + /* + * This signifies to the caller in shell that the command + * failed without dying + */ + return 1; + } + + switch (ud->update_strategy.type) { + case SM_UPDATE_CHECKOUT: + printf(_("Submodule path '%s': checked out '%s'\n"), + ud->displaypath, oid); + break; + case SM_UPDATE_REBASE: + printf(_("Submodule path '%s': rebased into '%s'\n"), + ud->displaypath, oid); + break; + case SM_UPDATE_MERGE: + printf(_("Submodule path '%s': merged in '%s'\n"), + ud->displaypath, oid); + break; + case SM_UPDATE_COMMAND: + printf(_("Submodule path '%s': '%s %s'\n"), + ud->displaypath, ud->update_strategy.command, oid); + break; + default: + BUG("unexpected update strategy type: %s", + submodule_strategy_to_string(&ud->update_strategy)); + } + + return 0; +} + +static int do_run_update_procedure(struct update_data *ud) +{ + int subforce = is_null_oid(&ud->suboid) || ud->force; + + if (!ud->nofetch) { + /* + * Run fetch only if `oid` isn't present or it + * is not reachable from a ref. + */ + if (!is_tip_reachable(ud->sm_path, &ud->oid) && + fetch_in_submodule(ud->sm_path, ud->depth, ud->quiet, NULL) && + !ud->quiet) + fprintf_ln(stderr, + _("Unable to fetch in submodule path '%s'; " + "trying to directly fetch %s:"), + ud->displaypath, oid_to_hex(&ud->oid)); + /* + * Now we tried the usual fetch, but `oid` may + * not be reachable from any of the refs. + */ + if (!is_tip_reachable(ud->sm_path, &ud->oid) && + fetch_in_submodule(ud->sm_path, ud->depth, ud->quiet, &ud->oid)) + die(_("Fetched in submodule path '%s', but it did not " + "contain %s. Direct fetching of that commit failed."), + ud->displaypath, oid_to_hex(&ud->oid)); + } + + return run_update_command(ud, subforce); +} + static void update_submodule(struct update_clone_data *ucd) { fprintf(stdout, "dummy %s %d\t%s\n", @@ -2395,6 +2554,73 @@ static int update_clone(int argc, const char **argv, const char *prefix) return update_submodules(&suc); } +static int run_update_procedure(int argc, const char **argv, const char *prefix) +{ + int force = 0, quiet = 0, nofetch = 0, just_cloned = 0; + char *prefixed_path, *update = NULL; + struct update_data update_data = UPDATE_DATA_INIT; + + struct option options[] = { + OPT__QUIET(&quiet, N_("suppress output for update by rebase or merge")), + OPT__FORCE(&force, N_("force checkout updates"), 0), + OPT_BOOL('N', "no-fetch", &nofetch, + N_("don't fetch new objects from the remote site")), + OPT_BOOL(0, "just-cloned", &just_cloned, + N_("overrides update mode in case the repository is a fresh clone")), + OPT_INTEGER(0, "depth", &update_data.depth, N_("depth for shallow fetch")), + OPT_STRING(0, "prefix", &prefix, + N_("path"), + N_("path into the working tree")), + OPT_STRING(0, "update", &update, + N_("string"), + N_("rebase, merge, checkout or none")), + OPT_STRING(0, "recursive-prefix", &update_data.recursive_prefix, N_("path"), + N_("path into the working tree, across nested " + "submodule boundaries")), + OPT_CALLBACK_F(0, "oid", &update_data.oid, N_("sha1"), + N_("SHA1 expected by superproject"), PARSE_OPT_NONEG, + parse_opt_object_id), + OPT_CALLBACK_F(0, "suboid", &update_data.suboid, N_("subsha1"), + N_("SHA1 of submodule's HEAD"), PARSE_OPT_NONEG, + parse_opt_object_id), + OPT_END() + }; + + const char *const usage[] = { + N_("git submodule--helper run-update-procedure [<options>] <path>"), + NULL + }; + + argc = parse_options(argc, argv, prefix, options, usage, 0); + + if (argc != 1) + usage_with_options(usage, options); + + update_data.force = !!force; + update_data.quiet = !!quiet; + update_data.nofetch = !!nofetch; + update_data.just_cloned = !!just_cloned; + update_data.sm_path = argv[0]; + + if (update_data.recursive_prefix) + prefixed_path = xstrfmt("%s%s", update_data.recursive_prefix, update_data.sm_path); + else + prefixed_path = xstrdup(update_data.sm_path); + + update_data.displaypath = get_submodule_displaypath(prefixed_path, prefix); + + determine_submodule_update_strategy(the_repository, update_data.just_cloned, + update_data.sm_path, update, + &update_data.update_strategy); + + free(prefixed_path); + + if (!oideq(&update_data.oid, &update_data.suboid) || update_data.force) + return do_run_update_procedure(&update_data); + + return 3; +} + static int resolve_relative_path(int argc, const char **argv, const char *prefix) { struct strbuf sb = STRBUF_INIT; @@ -2540,7 +2766,6 @@ static int push_check(int argc, const char **argv, const char *prefix) static int ensure_core_worktree(int argc, const char **argv, const char *prefix) { - const struct submodule *sub; const char *path; const char *cw; struct repository subrepo; @@ -2550,11 +2775,7 @@ static int ensure_core_worktree(int argc, const char **argv, const char *prefix) path = argv[1]; - sub = submodule_from_path(the_repository, null_oid(), path); - if (!sub) - BUG("We could get the submodule handle before?"); - - if (repo_submodule_init(&subrepo, the_repository, sub)) + if (repo_submodule_init(&subrepo, the_repository, path, null_oid())) die(_("could not get a repository handle for submodule '%s'"), path); if (!repo_config_get_string_tmp(&subrepo, "core.worktree", &cw)) { @@ -2765,7 +2986,7 @@ struct add_data { const char *prefix; const char *branch; const char *reference_path; - const char *sm_path; + char *sm_path; const char *sm_name; const char *repo; const char *realrepo; @@ -2877,61 +3098,244 @@ static int add_submodule(const struct add_data *add_data) return 0; } -static int add_clone(int argc, const char **argv, const char *prefix) +static int config_submodule_in_gitmodules(const char *name, const char *var, const char *value) +{ + char *key; + int ret; + + if (!is_writing_gitmodules_ok()) + die(_("please make sure that the .gitmodules file is in the working tree")); + + key = xstrfmt("submodule.%s.%s", name, var); + ret = config_set_in_gitmodules_file_gently(key, value); + free(key); + + return ret; +} + +static void configure_added_submodule(struct add_data *add_data) { - int force = 0, quiet = 0, dissociate = 0, progress = 0; + char *key; + char *val = NULL; + struct child_process add_submod = CHILD_PROCESS_INIT; + struct child_process add_gitmodules = CHILD_PROCESS_INIT; + + key = xstrfmt("submodule.%s.url", add_data->sm_name); + git_config_set_gently(key, add_data->realrepo); + free(key); + + add_submod.git_cmd = 1; + strvec_pushl(&add_submod.args, "add", + "--no-warn-embedded-repo", NULL); + if (add_data->force) + strvec_push(&add_submod.args, "--force"); + strvec_pushl(&add_submod.args, "--", add_data->sm_path, NULL); + + if (run_command(&add_submod)) + die(_("Failed to add submodule '%s'"), add_data->sm_path); + + if (config_submodule_in_gitmodules(add_data->sm_name, "path", add_data->sm_path) || + config_submodule_in_gitmodules(add_data->sm_name, "url", add_data->repo)) + die(_("Failed to register submodule '%s'"), add_data->sm_path); + + if (add_data->branch) { + if (config_submodule_in_gitmodules(add_data->sm_name, + "branch", add_data->branch)) + die(_("Failed to register submodule '%s'"), add_data->sm_path); + } + + add_gitmodules.git_cmd = 1; + strvec_pushl(&add_gitmodules.args, + "add", "--force", "--", ".gitmodules", NULL); + + if (run_command(&add_gitmodules)) + die(_("Failed to register submodule '%s'"), add_data->sm_path); + + /* + * NEEDSWORK: In a multi-working-tree world this needs to be + * set in the per-worktree config. + */ + /* + * NEEDSWORK: In the longer run, we need to get rid of this + * pattern of querying "submodule.active" before calling + * is_submodule_active(), since that function needs to find + * out the value of "submodule.active" again anyway. + */ + if (!git_config_get_string("submodule.active", &val) && val) { + /* + * If the submodule being added isn't already covered by the + * current configured pathspec, set the submodule's active flag + */ + if (!is_submodule_active(the_repository, add_data->sm_path)) { + key = xstrfmt("submodule.%s.active", add_data->sm_name); + git_config_set_gently(key, "true"); + free(key); + } + } else { + key = xstrfmt("submodule.%s.active", add_data->sm_name); + git_config_set_gently(key, "true"); + free(key); + } +} + +static void die_on_index_match(const char *path, int force) +{ + struct pathspec ps; + const char *args[] = { path, NULL }; + parse_pathspec(&ps, 0, PATHSPEC_PREFER_CWD, NULL, args); + + if (read_cache_preload(NULL) < 0) + die(_("index file corrupt")); + + if (ps.nr) { + int i; + char *ps_matched = xcalloc(ps.nr, 1); + + /* TODO: audit for interaction with sparse-index. */ + ensure_full_index(&the_index); + + /* + * Since there is only one pathspec, we just need + * need to check ps_matched[0] to know if a cache + * entry matched. + */ + for (i = 0; i < active_nr; i++) { + ce_path_match(&the_index, active_cache[i], &ps, + ps_matched); + + if (ps_matched[0]) { + if (!force) + die(_("'%s' already exists in the index"), + path); + if (!S_ISGITLINK(active_cache[i]->ce_mode)) + die(_("'%s' already exists in the index " + "and is not a submodule"), path); + break; + } + } + free(ps_matched); + } +} + +static void die_on_repo_without_commits(const char *path) +{ + struct strbuf sb = STRBUF_INIT; + strbuf_addstr(&sb, path); + if (is_nonbare_repository_dir(&sb)) { + struct object_id oid; + if (resolve_gitlink_ref(path, "HEAD", &oid) < 0) + die(_("'%s' does not have a commit checked out"), path); + } +} + +static int module_add(int argc, const char **argv, const char *prefix) +{ + int force = 0, quiet = 0, progress = 0, dissociate = 0; struct add_data add_data = ADD_DATA_INIT; struct option options[] = { - OPT_STRING('b', "branch", &add_data.branch, - N_("branch"), - N_("branch of repository to checkout on cloning")), - OPT_STRING(0, "prefix", &prefix, - N_("path"), - N_("alternative anchor for relative paths")), - OPT_STRING(0, "path", &add_data.sm_path, - N_("path"), - N_("where the new submodule will be cloned to")), - OPT_STRING(0, "name", &add_data.sm_name, - N_("string"), - N_("name of the new submodule")), - OPT_STRING(0, "url", &add_data.realrepo, - N_("string"), - N_("url where to clone the submodule from")), - OPT_STRING(0, "reference", &add_data.reference_path, - N_("repo"), - N_("reference repository")), - OPT_BOOL(0, "dissociate", &dissociate, - N_("use --reference only while cloning")), - OPT_INTEGER(0, "depth", &add_data.depth, - N_("depth for shallow clones")), - OPT_BOOL(0, "progress", &progress, - N_("force cloning progress")), + OPT_STRING('b', "branch", &add_data.branch, N_("branch"), + N_("branch of repository to add as submodule")), OPT__FORCE(&force, N_("allow adding an otherwise ignored submodule path"), PARSE_OPT_NOCOMPLETE), - OPT__QUIET(&quiet, "suppress output for cloning a submodule"), + OPT__QUIET(&quiet, N_("print only error messages")), + OPT_BOOL(0, "progress", &progress, N_("force cloning progress")), + OPT_STRING(0, "reference", &add_data.reference_path, N_("repository"), + N_("reference repository")), + OPT_BOOL(0, "dissociate", &dissociate, N_("borrow the objects from reference repositories")), + OPT_STRING(0, "name", &add_data.sm_name, N_("name"), + N_("sets the submodule’s name to the given string " + "instead of defaulting to its path")), + OPT_INTEGER(0, "depth", &add_data.depth, N_("depth for shallow clones")), OPT_END() }; const char *const usage[] = { - N_("git submodule--helper add-clone [<options>...] " - "--url <url> --path <path> --name <name>"), + N_("git submodule--helper add [<options>] [--] <repository> [<path>]"), NULL }; argc = parse_options(argc, argv, prefix, options, usage, 0); - if (argc != 0) + if (!is_writing_gitmodules_ok()) + die(_("please make sure that the .gitmodules file is in the working tree")); + + if (prefix && *prefix && + add_data.reference_path && !is_absolute_path(add_data.reference_path)) + add_data.reference_path = xstrfmt("%s%s", prefix, add_data.reference_path); + + if (argc == 0 || argc > 2) usage_with_options(usage, options); + add_data.repo = argv[0]; + if (argc == 1) + add_data.sm_path = git_url_basename(add_data.repo, 0, 0); + else + add_data.sm_path = xstrdup(argv[1]); + + if (prefix && *prefix && !is_absolute_path(add_data.sm_path)) + add_data.sm_path = xstrfmt("%s%s", prefix, add_data.sm_path); + + if (starts_with_dot_dot_slash(add_data.repo) || + starts_with_dot_slash(add_data.repo)) { + if (prefix) + die(_("Relative path can only be used from the toplevel " + "of the working tree")); + + /* dereference source url relative to parent's url */ + add_data.realrepo = resolve_relative_url(add_data.repo, NULL, 1); + } else if (is_dir_sep(add_data.repo[0]) || strchr(add_data.repo, ':')) { + add_data.realrepo = add_data.repo; + } else { + die(_("repo URL: '%s' must be absolute or begin with ./|../"), + add_data.repo); + } + + /* + * normalize path: + * multiple //; leading ./; /./; /../; + */ + normalize_path_copy(add_data.sm_path, add_data.sm_path); + strip_dir_trailing_slashes(add_data.sm_path); + + die_on_index_match(add_data.sm_path, force); + die_on_repo_without_commits(add_data.sm_path); + + if (!force) { + int exit_code = -1; + struct strbuf sb = STRBUF_INIT; + struct child_process cp = CHILD_PROCESS_INIT; + cp.git_cmd = 1; + cp.no_stdout = 1; + strvec_pushl(&cp.args, "add", "--dry-run", "--ignore-missing", + "--no-warn-embedded-repo", add_data.sm_path, NULL); + if ((exit_code = pipe_command(&cp, NULL, 0, NULL, 0, &sb, 0))) { + strbuf_complete_line(&sb); + fputs(sb.buf, stderr); + free(add_data.sm_path); + return exit_code; + } + strbuf_release(&sb); + } + + if(!add_data.sm_name) + add_data.sm_name = add_data.sm_path; + + if (check_submodule_name(add_data.sm_name)) + die(_("'%s' is not a valid submodule name"), add_data.sm_name); + add_data.prefix = prefix; - add_data.progress = !!progress; - add_data.dissociate = !!dissociate; add_data.force = !!force; add_data.quiet = !!quiet; + add_data.progress = !!progress; + add_data.dissociate = !!dissociate; - if (add_submodule(&add_data)) + if (add_submodule(&add_data)) { + free(add_data.sm_path); return 1; + } + configure_added_submodule(&add_data); + free(add_data.sm_path); return 0; } @@ -2948,12 +3352,12 @@ static struct cmd_struct commands[] = { {"list", module_list, 0}, {"name", module_name, 0}, {"clone", module_clone, 0}, - {"add-clone", add_clone, 0}, + {"add", module_add, SUPPORT_SUPER_PREFIX}, {"update-module-mode", module_update_module_mode, 0}, {"update-clone", update_clone, 0}, + {"run-update-procedure", run_update_procedure, 0}, {"ensure-core-worktree", ensure_core_worktree, 0}, {"relative-path", resolve_relative_path, 0}, - {"resolve-relative-url", resolve_relative_url, 0}, {"resolve-relative-url-test", resolve_relative_url_test, 0}, {"foreach", module_foreach, SUPPORT_SUPER_PREFIX}, {"init", module_init, SUPPORT_SUPER_PREFIX}, diff --git a/builtin/tag.c b/builtin/tag.c index 452558ec95..065b6bf093 100644 --- a/builtin/tag.c +++ b/builtin/tag.c @@ -293,9 +293,7 @@ static void create_tag(const struct object_id *object, const char *object_ref, /* write the template message before editing: */ path = git_pathdup("TAG_EDITMSG"); - fd = open(path, O_CREAT | O_TRUNC | O_WRONLY, 0600); - if (fd < 0) - die_errno(_("could not create file '%s'"), path); + fd = xopen(path, O_CREAT | O_TRUNC | O_WRONLY, 0600); if (opt->message_given) { write_or_die(fd, buf->buf, buf->len); diff --git a/builtin/update-index.c b/builtin/update-index.c index f1f16f2de5..187203e8bb 100644 --- a/builtin/update-index.c +++ b/builtin/update-index.c @@ -95,9 +95,7 @@ static int create_file(const char *path) { int fd; path = get_mtime_path(path); - fd = open(path, O_CREAT | O_RDWR, 0644); - if (fd < 0) - die_errno(_("failed to create file %s"), path); + fd = xopen(path, O_CREAT | O_RDWR, 0644); return fd; } diff --git a/builtin/update-ref.c b/builtin/update-ref.c index 6029a80544..a84e7b47a2 100644 --- a/builtin/update-ref.c +++ b/builtin/update-ref.c @@ -302,6 +302,12 @@ static void parse_cmd_verify(struct ref_transaction *transaction, strbuf_release(&err); } +static void report_ok(const char *command) +{ + fprintf(stdout, "%s: ok\n", command); + fflush(stdout); +} + static void parse_cmd_option(struct ref_transaction *transaction, const char *next, const char *end) { @@ -317,7 +323,7 @@ static void parse_cmd_start(struct ref_transaction *transaction, { if (*next != line_termination) die("start: extra input: %s", next); - puts("start: ok"); + report_ok("start"); } static void parse_cmd_prepare(struct ref_transaction *transaction, @@ -328,7 +334,7 @@ static void parse_cmd_prepare(struct ref_transaction *transaction, die("prepare: extra input: %s", next); if (ref_transaction_prepare(transaction, &error)) die("prepare: %s", error.buf); - puts("prepare: ok"); + report_ok("prepare"); } static void parse_cmd_abort(struct ref_transaction *transaction, @@ -339,7 +345,7 @@ static void parse_cmd_abort(struct ref_transaction *transaction, die("abort: extra input: %s", next); if (ref_transaction_abort(transaction, &error)) die("abort: %s", error.buf); - puts("abort: ok"); + report_ok("abort"); } static void parse_cmd_commit(struct ref_transaction *transaction, @@ -350,7 +356,7 @@ static void parse_cmd_commit(struct ref_transaction *transaction, die("commit: extra input: %s", next); if (ref_transaction_commit(transaction, &error)) die("commit: %s", error.buf); - puts("commit: ok"); + report_ok("commit"); ref_transaction_free(transaction); } diff --git a/builtin/upload-pack.c b/builtin/upload-pack.c index 6da8fa2607..125af53885 100644 --- a/builtin/upload-pack.c +++ b/builtin/upload-pack.c @@ -16,16 +16,18 @@ int cmd_upload_pack(int argc, const char **argv, const char *prefix) { const char *dir; int strict = 0; - struct upload_pack_options opts = { 0 }; - struct serve_options serve_opts = SERVE_OPTIONS_INIT; + int advertise_refs = 0; + int stateless_rpc = 0; + int timeout = 0; struct option options[] = { - OPT_BOOL(0, "stateless-rpc", &opts.stateless_rpc, + OPT_BOOL(0, "stateless-rpc", &stateless_rpc, N_("quit after a single request/response exchange")), - OPT_BOOL(0, "advertise-refs", &opts.advertise_refs, - N_("exit immediately after initial ref advertisement")), + OPT_HIDDEN_BOOL(0, "http-backend-info-refs", &advertise_refs, + N_("serve up the info/refs for git-http-backend")), + OPT_ALIAS(0, "advertise-refs", "http-backend-info-refs"), OPT_BOOL(0, "strict", &strict, N_("do not try <directory>/.git/ if <directory> is no Git directory")), - OPT_INTEGER(0, "timeout", &opts.timeout, + OPT_INTEGER(0, "timeout", &timeout, N_("interrupt transfer after <n> seconds of inactivity")), OPT_END() }; @@ -38,9 +40,6 @@ int cmd_upload_pack(int argc, const char **argv, const char *prefix) if (argc != 1) usage_with_options(upload_pack_usage, options); - if (opts.timeout) - opts.daemon_mode = 1; - setup_path(); dir = argv[0]; @@ -50,21 +49,22 @@ int cmd_upload_pack(int argc, const char **argv, const char *prefix) switch (determine_protocol_version_server()) { case protocol_v2: - serve_opts.advertise_capabilities = opts.advertise_refs; - serve_opts.stateless_rpc = opts.stateless_rpc; - serve(&serve_opts); + if (advertise_refs) + protocol_v2_advertise_capabilities(); + else + protocol_v2_serve_loop(stateless_rpc); break; case protocol_v1: /* * v1 is just the original protocol with a version string, * so just fall through after writing the version string. */ - if (opts.advertise_refs || !opts.stateless_rpc) + if (advertise_refs || !stateless_rpc) packet_write_fmt(1, "version 1\n"); /* fallthrough */ case protocol_v0: - upload_pack(&opts); + upload_pack(advertise_refs, stateless_rpc, timeout); break; case protocol_unknown_version: BUG("unknown protocol version"); diff --git a/bulk-checkin.c b/bulk-checkin.c index b023d9959a..8785b2ac80 100644 --- a/bulk-checkin.c +++ b/bulk-checkin.c @@ -23,9 +23,25 @@ static struct bulk_checkin_state { uint32_t nr_written; } state; +static void finish_tmp_packfile(struct strbuf *basename, + const char *pack_tmp_name, + struct pack_idx_entry **written_list, + uint32_t nr_written, + struct pack_idx_option *pack_idx_opts, + unsigned char hash[]) +{ + char *idx_tmp_name = NULL; + + stage_tmp_packfiles(basename, pack_tmp_name, written_list, nr_written, + pack_idx_opts, hash, &idx_tmp_name); + rename_tmp_packfile_idx(basename, &idx_tmp_name); + + free(idx_tmp_name); +} + static void finish_bulk_checkin(struct bulk_checkin_state *state) { - struct object_id oid; + unsigned char hash[GIT_MAX_RAWSZ]; struct strbuf packname = STRBUF_INIT; int i; @@ -37,19 +53,20 @@ static void finish_bulk_checkin(struct bulk_checkin_state *state) unlink(state->pack_tmp_name); goto clear_exit; } else if (state->nr_written == 1) { - finalize_hashfile(state->f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE); + finalize_hashfile(state->f, hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE); } else { - int fd = finalize_hashfile(state->f, oid.hash, 0); - fixup_pack_header_footer(fd, oid.hash, state->pack_tmp_name, - state->nr_written, oid.hash, + int fd = finalize_hashfile(state->f, hash, 0); + fixup_pack_header_footer(fd, hash, state->pack_tmp_name, + state->nr_written, hash, state->offset); close(fd); } - strbuf_addf(&packname, "%s/pack/pack-", get_object_directory()); + strbuf_addf(&packname, "%s/pack/pack-%s.", get_object_directory(), + hash_to_hex(hash)); finish_tmp_packfile(&packname, state->pack_tmp_name, state->written, state->nr_written, - &state->pack_idx_opts, oid.hash); + &state->pack_idx_opts, hash); for (i = 0; i < state->nr_written; i++) free(state->written[i]); @@ -569,18 +569,18 @@ err: } int unbundle(struct repository *r, struct bundle_header *header, - int bundle_fd, int flags) + int bundle_fd, struct strvec *extra_index_pack_args) { - const char *argv_index_pack[] = {"index-pack", - "--fix-thin", "--stdin", NULL, NULL}; struct child_process ip = CHILD_PROCESS_INIT; + strvec_pushl(&ip.args, "index-pack", "--fix-thin", "--stdin", NULL); - if (flags & BUNDLE_VERBOSE) - argv_index_pack[3] = "-v"; + if (extra_index_pack_args) { + strvec_pushv(&ip.args, extra_index_pack_args->v); + strvec_clear(extra_index_pack_args); + } if (verify_bundle(r, header, 0)) return -1; - ip.argv = argv_index_pack; ip.in = bundle_fd; ip.no_stdout = 1; ip.git_cmd = 1; @@ -26,9 +26,19 @@ int create_bundle(struct repository *r, const char *path, int argc, const char **argv, struct strvec *pack_options, int version); int verify_bundle(struct repository *r, struct bundle_header *header, int verbose); -#define BUNDLE_VERBOSE 1 + +/** + * Unbundle after reading the header with read_bundle_header(). + * + * We'll invoke "git index-pack --stdin --fix-thin" for you on the + * provided `bundle_fd` from read_bundle_header(). + * + * Provide "extra_index_pack_args" to pass any extra arguments + * (e.g. "-v" for verbose/progress), NULL otherwise. The provided + * "extra_index_pack_args" (if any) will be strvec_clear()'d for you. + */ int unbundle(struct repository *r, struct bundle_header *header, - int bundle_fd, int flags); + int bundle_fd, struct strvec *extra_index_pack_args); int list_bundle_refs(struct bundle_header *header, int argc, const char **argv); @@ -958,7 +958,6 @@ extern char *apply_default_ignorewhitespace; extern const char *git_attributes_file; extern const char *git_hooks_path; extern int zlib_compression_level; -extern int core_compression_level; extern int pack_compression_level; extern size_t packed_git_window_size; extern size_t packed_git_limit; @@ -1211,49 +1210,6 @@ enum scld_error safe_create_leading_directories(char *path); enum scld_error safe_create_leading_directories_const(const char *path); enum scld_error safe_create_leading_directories_no_share(char *path); -/* - * Callback function for raceproof_create_file(). This function is - * expected to do something that makes dirname(path) permanent despite - * the fact that other processes might be cleaning up empty - * directories at the same time. Usually it will create a file named - * path, but alternatively it could create another file in that - * directory, or even chdir() into that directory. The function should - * return 0 if the action was completed successfully. On error, it - * should return a nonzero result and set errno. - * raceproof_create_file() treats two errno values specially: - * - * - ENOENT -- dirname(path) does not exist. In this case, - * raceproof_create_file() tries creating dirname(path) - * (and any parent directories, if necessary) and calls - * the function again. - * - * - EISDIR -- the file already exists and is a directory. In this - * case, raceproof_create_file() removes the directory if - * it is empty (and recursively any empty directories that - * it contains) and calls the function again. - * - * Any other errno causes raceproof_create_file() to fail with the - * callback's return value and errno. - * - * Obviously, this function should be OK with being called again if it - * fails with ENOENT or EISDIR. In other scenarios it will not be - * called again. - */ -typedef int create_file_fn(const char *path, void *cb); - -/* - * Create a file in dirname(path) by calling fn, creating leading - * directories if necessary. Retry a few times in case we are racing - * with another process that is trying to clean up the directory that - * contains path. See the documentation for create_file_fn for more - * details. - * - * Return the value and set the errno that resulted from the most - * recent call of fn. fn is always called at least once, and will be - * called more than once if it returns ENOENT or EISDIR. - */ -int raceproof_create_file(const char *path, create_file_fn fn, void *cb); - int mkdir_in_gitdir(const char *path); char *interpolate_path(const char *path, int real_home); /* NEEDSWORK: remove this synonym once in-flight topics have migrated */ @@ -1299,6 +1255,13 @@ int looks_like_command_line_option(const char *str); /** * Return a newly allocated string with the evaluation of + * "$XDG_CONFIG_HOME/$subdir/$filename" if $XDG_CONFIG_HOME is non-empty, otherwise + * "$HOME/.config/$subdir/$filename". Return NULL upon error. + */ +char *xdg_config_home_for(const char *subdir, const char *filename); + +/** + * Return a newly allocated string with the evaluation of * "$XDG_CONFIG_HOME/git/$filename" if $XDG_CONFIG_HOME is non-empty, otherwise * "$HOME/.config/git/$filename". Return NULL upon error. */ @@ -1719,13 +1682,6 @@ int update_server_info(int); const char *get_log_output_encoding(void); const char *get_commit_output_encoding(void); -/* - * This is a hack for test programs like test-dump-untracked-cache to - * ensure that they do not modify the untracked cache when reading it. - * Do not use it otherwise! - */ -extern int ignore_untracked_cache_config; - int committer_ident_sufficiently_given(void); int author_ident_sufficiently_given(void); @@ -1738,6 +1694,8 @@ extern const char *git_mailmap_blob; void maybe_flush_or_die(FILE *, const char *); __attribute__((format (printf, 2, 3))) void fprintf_or_die(FILE *, const char *fmt, ...); +void fwrite_or_die(FILE *f, const void *buf, size_t count); +void fflush_or_die(FILE *f); #define COPY_READ_ERROR (-2) #define COPY_WRITE_ERROR (-3) diff --git a/check_bindir b/check_bindir deleted file mode 100755 index 623eadcbb7..0000000000 --- a/check_bindir +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/sh -bindir="$1" -gitexecdir="$2" -gitcmd="$3" -if test "$bindir" != "$gitexecdir" && test -x "$gitcmd" -then - echo - echo "!! You have installed git-* commands to new gitexecdir." - echo "!! Old version git-* commands still remain in bindir." - echo "!! Mixing two versions of Git will lead to problems." - echo "!! Please remove old version commands in bindir now." - echo -fi diff --git a/ci/install-docker-dependencies.sh b/ci/install-docker-dependencies.sh index 26a6689766..07a8c6b199 100755 --- a/ci/install-docker-dependencies.sh +++ b/ci/install-docker-dependencies.sh @@ -15,4 +15,8 @@ linux-musl) apk add --update build-base curl-dev openssl-dev expat-dev gettext \ pcre2-dev python3 musl-libintl perl-utils ncurses >/dev/null ;; +pedantic) + dnf -yq update >/dev/null && + dnf -yq install make gcc findutils diffutils perl python3 gettext zlib-devel expat-devel openssl-devel curl-devel pcre2-devel >/dev/null + ;; esac diff --git a/ci/run-build-and-tests.sh b/ci/run-build-and-tests.sh index 3ce81ffee9..cc62616d80 100755 --- a/ci/run-build-and-tests.sh +++ b/ci/run-build-and-tests.sh @@ -10,6 +10,11 @@ windows*) cmd //c mklink //j t\\.prove "$(cygpath -aw "$cache_dir/.prove")";; *) ln -s "$cache_dir/.prove" t/.prove;; esac +if test "$jobname" = "pedantic" +then + export DEVOPTS=pedantic +fi + make case "$jobname" in linux-gcc) @@ -23,6 +28,7 @@ linux-gcc) export GIT_TEST_COMMIT_GRAPH=1 export GIT_TEST_COMMIT_GRAPH_CHANGED_PATHS=1 export GIT_TEST_MULTI_PACK_INDEX=1 + export GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=1 export GIT_TEST_ADD_I_USE_BUILTIN=1 export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=master export GIT_TEST_WRITE_REV_INDEX=1 @@ -35,10 +41,9 @@ linux-clang) export GIT_TEST_DEFAULT_HASH=sha256 make test ;; -linux-gcc-4.8) +linux-gcc-4.8|pedantic) # Don't run the tests; we only care about whether Git can be - # built with GCC 4.8, as it errors out on some undesired (C99) - # constructs that newer compilers seem to quietly accept. + # built with GCC 4.8 or with pedantic ;; *) make test diff --git a/commit-graph.c b/commit-graph.c index 00614acd65..2706683acf 100644 --- a/commit-graph.c +++ b/commit-graph.c @@ -713,6 +713,7 @@ static void close_commit_graph_one(struct commit_graph *g) if (!g) return; + clear_commit_graph_data_slab(&commit_graph_data_slab); close_commit_graph_one(g->base_graph); free_commit_graph(g); } @@ -2125,7 +2126,7 @@ static void sort_and_scan_merged_commits(struct write_commit_graph_context *ctx) ctx->num_extra_edges = 0; for (i = 0; i < ctx->commits.nr; i++) { - display_progress(ctx->progress, i); + display_progress(ctx->progress, i + 1); if (i && oideq(&ctx->commits.list[i - 1]->object.oid, &ctx->commits.list[i]->object.oid)) { @@ -25,6 +25,7 @@ static struct commit_extra_header *read_commit_extra_header_lines(const char *buf, size_t len, const char **); int save_commit_buffer = 1; +int no_graft_file_deprecated_advice; const char *commit_type = "commit"; @@ -190,7 +191,8 @@ static int read_graft_file(struct repository *r, const char *graft_file) struct strbuf buf = STRBUF_INIT; if (!fp) return -1; - if (advice_graft_file_deprecated) + if (!no_graft_file_deprecated_advice && + advice_enabled(ADVICE_GRAFT_FILE_DEPRECATED)) advise(_("Support for <GIT_DIR>/info/grafts is deprecated\n" "and will be removed in a future Git version.\n" "\n" @@ -41,6 +41,7 @@ struct commit { }; extern int save_commit_buffer; +extern int no_graft_file_deprecated_advice; extern const char *commit_type; /* While we can decorate any object with a name, it's only used for commits.. */ diff --git a/compat/linux/procinfo.c b/compat/linux/procinfo.c index 578fed4cd3..bc2f9382a1 100644 --- a/compat/linux/procinfo.c +++ b/compat/linux/procinfo.c @@ -4,51 +4,172 @@ #include "strvec.h" #include "trace2.h" -static void get_ancestry_names(struct strvec *names) +/* + * We need more complex parsing in stat_parent_pid() and + * parse_proc_stat() below than a dumb fscanf(). That's because while + * the statcomm field is surrounded by parentheses, the process itself + * is free to insert any arbitrary byte sequence its its name. That + * can include newlines, spaces, closing parentheses etc. + * + * See do_task_stat() in fs/proc/array.c in linux.git, this is in + * contrast with the escaped version of the name found in + * /proc/%d/status. + * + * So instead of using fscanf() we'll read N bytes from it, look for + * the first "(", and then the last ")", anything in-between is our + * process name. + * + * How much N do we need? On Linux /proc/sys/kernel/pid_max is 2^15 by + * default, but it can be raised set to values of up to 2^22. So + * that's 7 digits for a PID. We have 2 PIDs in the first four fields + * we're interested in, so 2 * 7 = 14. + * + * We then have 3 spaces between those four values, and we'd like to + * get to the space between the 4th and the 5th (the "pgrp" field) to + * make sure we read the entire "ppid" field. So that brings us up to + * 14 + 3 + 1 = 18. Add the two parentheses around the "comm" value + * and it's 20. The "state" value itself is then one character (now at + * 21). + * + * Finally the maximum length of the "comm" name itself is 15 + * characters, e.g. a setting of "123456789abcdefg" will be truncated + * to "123456789abcdef". See PR_SET_NAME in prctl(2). So all in all + * we'd need to read 21 + 15 = 36 bytes. + * + * Let's just read 2^6 (64) instead for good measure. If PID_MAX ever + * grows past 2^22 we'll be future-proof. We'll then anchor at the + * last ")" we find to locate the parent PID. + */ +#define STAT_PARENT_PID_READ_N 64 + +static int parse_proc_stat(struct strbuf *sb, struct strbuf *name, + int *statppid) { + const char *comm_lhs = strchr(sb->buf, '('); + const char *comm_rhs = strrchr(sb->buf, ')'); + const char *ppid_lhs, *ppid_rhs; + char *p; + pid_t ppid; + + if (!comm_lhs || !comm_rhs) + goto bad_kernel; + + /* + * We're at the ")", that's followed by " X ", where X is a + * single "state" character. So advance by 4 bytes. + */ + ppid_lhs = comm_rhs + 4; + + /* + * Read until the space between the "ppid" and "pgrp" fields + * to make sure we're anchored after the untruncated "ppid" + * field.. + */ + ppid_rhs = strchr(ppid_lhs, ' '); + if (!ppid_rhs) + goto bad_kernel; + + ppid = strtol(ppid_lhs, &p, 10); + if (ppid_rhs == p) { + const char *comm = comm_lhs + 1; + size_t commlen = comm_rhs - comm; + + strbuf_add(name, comm, commlen); + *statppid = ppid; + + return 0; + } + +bad_kernel: /* - * NEEDSWORK: We could gather the entire pstree into an array to match - * functionality with compat/win32/trace2_win32_process_info.c. - * To do so, we may want to examine /proc/<pid>/stat. For now, just - * gather the immediate parent name which is readily accessible from - * /proc/$(getppid())/comm. + * We were able to read our STAT_PARENT_PID_READ_N bytes from + * /proc/%d/stat, but the content is bad. Broken kernel? + * Should not happen, but handle it gracefully. */ + return -1; +} + +static int stat_parent_pid(pid_t pid, struct strbuf *name, int *statppid) +{ struct strbuf procfs_path = STRBUF_INIT; - struct strbuf name = STRBUF_INIT; + struct strbuf sb = STRBUF_INIT; + FILE *fp; + int ret = -1; /* try to use procfs if it's present. */ - strbuf_addf(&procfs_path, "/proc/%d/comm", getppid()); - if (strbuf_read_file(&name, procfs_path.buf, 0)) { - strbuf_release(&procfs_path); - strbuf_trim_trailing_newline(&name); - strvec_push(names, strbuf_detach(&name, NULL)); - } + strbuf_addf(&procfs_path, "/proc/%d/stat", pid); + fp = fopen(procfs_path.buf, "r"); + if (!fp) + goto cleanup; + + /* + * We could be more strict here and assert that we read at + * least STAT_PARENT_PID_READ_N. My reading of procfs(5) is + * that on any modern kernel (at least since 2.6.0 released in + * 2003) even if all the mandatory numeric fields were zero'd + * out we'd get at least 100 bytes, but let's just check that + * we got anything at all and trust the parse_proc_stat() + * function to handle its "Bad Kernel?" error checking. + */ + if (!strbuf_fread(&sb, STAT_PARENT_PID_READ_N, fp)) + goto cleanup; + if (parse_proc_stat(&sb, name, statppid) < 0) + goto cleanup; + + ret = 0; +cleanup: + if (fp) + fclose(fp); + strbuf_release(&procfs_path); + strbuf_release(&sb); + + return ret; +} + +static void push_ancestry_name(struct strvec *names, pid_t pid) +{ + struct strbuf name = STRBUF_INIT; + int ppid; + + if (stat_parent_pid(pid, &name, &ppid) < 0) + goto cleanup; + + strvec_push(names, name.buf); + + /* + * Both errors and reaching the end of the process chain are + * reported as fields of 0 by proc(5) + */ + if (ppid) + push_ancestry_name(names, ppid); +cleanup: + strbuf_release(&name); return; - /* NEEDSWORK: add non-procfs-linux implementations here */ } void trace2_collect_process_info(enum trace2_process_info_reason reason) { - if (!trace2_is_enabled()) - return; + struct strvec names = STRVEC_INIT; - /* someday we may want to write something extra here, but not today */ - if (reason == TRACE2_PROCESS_INFO_EXIT) + if (!trace2_is_enabled()) return; - if (reason == TRACE2_PROCESS_INFO_STARTUP) { + switch (reason) { + case TRACE2_PROCESS_INFO_EXIT: /* - * NEEDSWORK: we could do the entire ptree in an array instead, - * see compat/win32/trace2_win32_process_info.c. + * The Windows version of this calls its + * get_peak_memory_info() here. We may want to insert + * similar process-end statistics here in the future. */ - struct strvec names = STRVEC_INIT; - - get_ancestry_names(&names); + break; + case TRACE2_PROCESS_INFO_STARTUP: + push_ancestry_name(&names, getppid()); if (names.nr) trace2_cmd_ancestry(names.v); strvec_clear(&names); + break; } return; diff --git a/compat/mmap.c b/compat/mmap.c index 14d31010df..8d6c02d4bc 100644 --- a/compat/mmap.c +++ b/compat/mmap.c @@ -7,7 +7,12 @@ void *git_mmap(void *start, size_t length, int prot, int flags, int fd, off_t of if (start != NULL || flags != MAP_PRIVATE || prot != PROT_READ) die("Invalid usage of mmap when built with NO_MMAP"); - start = xmalloc(length); + if (length == 0) { + errno = EINVAL; + return MAP_FAILED; + } + + start = malloc(length); if (start == NULL) { errno = ENOMEM; return MAP_FAILED; diff --git a/compat/nedmalloc/nedmalloc.c b/compat/nedmalloc/nedmalloc.c index 1cc31c3502..edb438a777 100644 --- a/compat/nedmalloc/nedmalloc.c +++ b/compat/nedmalloc/nedmalloc.c @@ -510,7 +510,7 @@ static void threadcache_free(nedpool *p, threadcache *tc, int mymspace, void *me assert(idx<=THREADCACHEMAXBINS); if(tck==*binsptr) { - fprintf(stderr, "Attempt to free already freed memory block %p - aborting!\n", tck); + fprintf(stderr, "Attempt to free already freed memory block %p - aborting!\n", (void *)tck); abort(); } #ifdef FULLSANITYCHECKS diff --git a/compat/win32/lazyload.h b/compat/win32/lazyload.h index 9e631c8593..d2056cdadf 100644 --- a/compat/win32/lazyload.h +++ b/compat/win32/lazyload.h @@ -37,7 +37,7 @@ struct proc_addr { #define INIT_PROC_ADDR(function) \ (function = get_proc_addr(&proc_addr_##function)) -static inline void *get_proc_addr(struct proc_addr *proc) +static inline FARPROC get_proc_addr(struct proc_addr *proc) { /* only do this once */ if (!proc->initialized) { @@ -76,7 +76,6 @@ static struct key_value_info *current_config_kvi; */ static enum config_scope current_parsing_scope; -static int core_compression_seen; static int pack_compression_seen; static int zlib_compression_seen; @@ -1400,8 +1399,6 @@ static int git_default_core_config(const char *var, const char *value, void *cb) level = Z_DEFAULT_COMPRESSION; else if (level < 0 || level > Z_BEST_COMPRESSION) die(_("bad zlib compression level %d"), level); - core_compression_level = level; - core_compression_seen = 1; if (!zlib_compression_seen) zlib_compression_level = level; if (!pack_compression_seen) @@ -1796,6 +1793,7 @@ int git_config_from_mem(config_fn_t fn, int git_config_from_blob_oid(config_fn_t fn, const char *name, + struct repository *repo, const struct object_id *oid, void *data) { @@ -1804,7 +1802,7 @@ int git_config_from_blob_oid(config_fn_t fn, unsigned long size; int ret; - buf = read_object_file(oid, &type, &size); + buf = repo_read_object_file(repo, oid, &type, &size); if (!buf) return error(_("unable to load config blob object '%s'"), name); if (type != OBJ_BLOB) { @@ -1820,14 +1818,15 @@ int git_config_from_blob_oid(config_fn_t fn, } static int git_config_from_blob_ref(config_fn_t fn, + struct repository *repo, const char *name, void *data) { struct object_id oid; - if (get_oid(name, &oid) < 0) + if (repo_get_oid(repo, name, &oid) < 0) return error(_("unable to resolve config blob '%s'"), name); - return git_config_from_blob_oid(fn, name, &oid, data); + return git_config_from_blob_oid(fn, name, repo, &oid, data); } char *git_system_config(void) @@ -1958,12 +1957,16 @@ int config_with_options(config_fn_t fn, void *data, * If we have a specific filename, use it. Otherwise, follow the * regular lookup sequence. */ - if (config_source && config_source->use_stdin) + if (config_source && config_source->use_stdin) { return git_config_from_stdin(fn, data); - else if (config_source && config_source->file) + } else if (config_source && config_source->file) { return git_config_from_file(fn, config_source->file, data); - else if (config_source && config_source->blob) - return git_config_from_blob_ref(fn, config_source->blob, data); + } else if (config_source && config_source->blob) { + struct repository *repo = config_source->repo ? + config_source->repo : the_repository; + return git_config_from_blob_ref(fn, repo, config_source->blob, + data); + } return do_git_config_sequence(opts, fn, data); } @@ -49,6 +49,8 @@ const char *config_scope_name(enum config_scope scope); struct git_config_source { unsigned int use_stdin:1; const char *file; + /* The repository if blob is not NULL; leave blank for the_repository */ + struct repository *repo; const char *blob; enum config_scope scope; }; @@ -136,6 +138,7 @@ int git_config_from_mem(config_fn_t fn, const char *buf, size_t len, void *data, const struct config_options *opts); int git_config_from_blob_oid(config_fn_t fn, const char *name, + struct repository *repo, const struct object_id *oid, void *data); void git_config_push_parameter(const char *text); void git_config_push_env(const char *spec); @@ -606,7 +609,6 @@ int git_config_get_maybe_bool(const char *key, int *dest); int git_config_get_pathname(const char *key, const char **dest); int git_config_get_index_threads(int *dest); -int git_config_get_untracked_cache(void); int git_config_get_split_index(void); int git_config_get_max_percent_split_change(void); int git_config_get_fsmonitor(void); diff --git a/config.mak.dev b/config.mak.dev index 022fb58218..c080ac0231 100644 --- a/config.mak.dev +++ b/config.mak.dev @@ -1,13 +1,20 @@ +ifndef COMPILER_FEATURES +COMPILER_FEATURES := $(shell ./detect-compiler $(CC)) +endif + ifeq ($(filter no-error,$(DEVOPTS)),) DEVELOPER_CFLAGS += -Werror SPARSE_FLAGS += -Wsparse-error endif -ifneq ($(filter pedantic,$(DEVOPTS)),) +DEVELOPER_CFLAGS += -Wall +ifeq ($(filter no-pedantic,$(DEVOPTS)),) DEVELOPER_CFLAGS += -pedantic -# don't warn for each N_ use -DEVELOPER_CFLAGS += -DUSE_PARENS_AROUND_GETTEXT_N=0 +DEVELOPER_CFLAGS += -Wpedantic +ifneq ($(filter gcc5,$(COMPILER_FEATURES)),) +DEVELOPER_CFLAGS += -Wno-pedantic-ms-format +DEVELOPER_CFLAGS += -Wno-incompatible-pointer-types +endif endif -DEVELOPER_CFLAGS += -Wall DEVELOPER_CFLAGS += -Wdeclaration-after-statement DEVELOPER_CFLAGS += -Wformat-security DEVELOPER_CFLAGS += -Wold-style-definition @@ -18,10 +25,6 @@ DEVELOPER_CFLAGS += -Wunused DEVELOPER_CFLAGS += -Wvla DEVELOPER_CFLAGS += -fno-common -ifndef COMPILER_FEATURES -COMPILER_FEATURES := $(shell ./detect-compiler $(CC)) -endif - ifneq ($(filter clang4,$(COMPILER_FEATURES)),) DEVELOPER_CFLAGS += -Wtautological-constant-out-of-range-compare endif @@ -557,6 +557,8 @@ const char *parse_feature_value(const char *feature_list, const char *feature, i if (!*value || isspace(*value)) { if (lenp) *lenp = 0; + if (offset) + *offset = found + len - feature_list; return value; } /* feature with a value (e.g., "agent=git/1.2.3") */ diff --git a/connected.c b/connected.c index b5f9523a5f..cf68e37a97 100644 --- a/connected.c +++ b/connected.c @@ -24,7 +24,7 @@ int check_connected(oid_iterate_fn fn, void *cb_data, struct child_process rev_list = CHILD_PROCESS_INIT; FILE *rev_list_in; struct check_connected_options defaults = CHECK_CONNECTED_INIT; - struct object_id oid; + const struct object_id *oid; int err = 0; struct packed_git *new_pack = NULL; struct transport *transport; @@ -34,7 +34,8 @@ int check_connected(oid_iterate_fn fn, void *cb_data, opt = &defaults; transport = opt->transport; - if (fn(cb_data, &oid)) { + oid = fn(cb_data); + if (!oid) { if (opt->err_fd) close(opt->err_fd); return err; @@ -73,7 +74,7 @@ int check_connected(oid_iterate_fn fn, void *cb_data, for (p = get_all_packs(the_repository); p; p = p->next) { if (!p->pack_promisor) continue; - if (find_pack_entry_one(oid.hash, p)) + if (find_pack_entry_one(oid->hash, p)) goto promisor_pack_found; } /* @@ -83,7 +84,7 @@ int check_connected(oid_iterate_fn fn, void *cb_data, goto no_promisor_pack_found; promisor_pack_found: ; - } while (!fn(cb_data, &oid)); + } while ((oid = fn(cb_data)) != NULL); return 0; } @@ -133,12 +134,12 @@ no_promisor_pack_found: * are sure the ref is good and not sending it to * rev-list for verification. */ - if (new_pack && find_pack_entry_one(oid.hash, new_pack)) + if (new_pack && find_pack_entry_one(oid->hash, new_pack)) continue; - if (fprintf(rev_list_in, "%s\n", oid_to_hex(&oid)) < 0) + if (fprintf(rev_list_in, "%s\n", oid_to_hex(oid)) < 0) break; - } while (!fn(cb_data, &oid)); + } while ((oid = fn(cb_data)) != NULL); if (ferror(rev_list_in) || fflush(rev_list_in)) { if (errno != EPIPE && errno != EINVAL) diff --git a/connected.h b/connected.h index 8d5a6b3ad6..6e59c92aa3 100644 --- a/connected.h +++ b/connected.h @@ -9,7 +9,7 @@ struct transport; * When called after returning the name for the last object, return -1 * to signal EOF, otherwise return 0. */ -typedef int (*oid_iterate_fn)(void *, struct object_id *oid); +typedef const struct object_id *(*oid_iterate_fn)(void *); /* * Named-arguments struct for check_connected. All arguments are diff --git a/contrib/coccinelle/xopen.cocci b/contrib/coccinelle/xopen.cocci new file mode 100644 index 0000000000..b71db67019 --- /dev/null +++ b/contrib/coccinelle/xopen.cocci @@ -0,0 +1,19 @@ +@@ +identifier fd; +identifier die_fn =~ "^(die|die_errno)$"; +@@ + int fd = +- open ++ xopen + (...); +- if ( \( fd < 0 \| fd == -1 \) ) { die_fn(...); } + +@@ +expression fd; +identifier die_fn =~ "^(die|die_errno)$"; +@@ + fd = +- open ++ xopen + (...); +- if ( \( fd < 0 \| fd == -1 \) ) { die_fn(...); } diff --git a/credential.c b/credential.c index 3c05c7c669..000ac7a8d4 100644 --- a/credential.c +++ b/credential.c @@ -128,6 +128,7 @@ static void credential_apply_config(struct credential *c) normalized_url = url_normalize(url.buf, &config.url); git_config(urlmatch_config_entry, &config); + string_list_clear(&config.vars, 1); free(normalized_url); strbuf_release(&url); diff --git a/csum-file.c b/csum-file.c index c951cf8277..26e8a6df44 100644 --- a/csum-file.c +++ b/csum-file.c @@ -131,12 +131,8 @@ struct hashfile *hashfd_check(const char *name) int sink, check; struct hashfile *f; - sink = open("/dev/null", O_WRONLY); - if (sink < 0) - die_errno("unable to open /dev/null"); - check = open(name, O_RDONLY); - if (check < 0) - die_errno("unable to open '%s'", name); + sink = xopen("/dev/null", O_WRONLY); + check = xopen(name, O_RDONLY); f = hashfd(sink, name); f->check_fd = check; f->check_buffer = xmalloc(f->buffer_len); diff --git a/detect-compiler b/detect-compiler index 70b754481c..11d60da5b7 100755 --- a/detect-compiler +++ b/detect-compiler @@ -13,11 +13,11 @@ get_version_line() { } get_family() { - get_version_line | sed 's/^\(.*\) version [0-9][^ ]* .*/\1/' + get_version_line | sed 's/^\(.*\) version [0-9].*/\1/' } get_version() { - get_version_line | sed 's/^.* version \([0-9][^ ]*\) .*/\1/' + get_version_line | sed 's/^.* version \([0-9][^ ]*\).*/\1/' } print_flags() { @@ -38,10 +38,7 @@ case "$(get_family)" in gcc) print_flags gcc ;; -clang) - print_flags clang - ;; -"FreeBSD clang") +clang | *" clang") print_flags clang ;; "Apple LLVM") diff --git a/diff-lib.c b/diff-lib.c index f9eadc4fc1..ca085a03ef 100644 --- a/diff-lib.c +++ b/diff-lib.c @@ -117,6 +117,10 @@ int run_diff_files(struct rev_info *revs, unsigned int option) if (!ce_path_match(istate, ce, &revs->prune_data, NULL)) continue; + if (revs->diffopt.prefix && + strncmp(ce->name, revs->diffopt.prefix, revs->diffopt.prefix_length)) + continue; + if (ce_stage(ce)) { struct combine_diff_path *dpath; struct diff_filepair *pair; diff --git a/diff-merges.c b/diff-merges.c index d897fd8a29..5060ccd890 100644 --- a/diff-merges.c +++ b/diff-merges.c @@ -6,7 +6,7 @@ typedef void (*diff_merges_setup_func_t)(struct rev_info *); static void set_separate(struct rev_info *revs); static diff_merges_setup_func_t set_to_default = set_separate; -static int suppress_parsing; +static int suppress_m_parsing; static void suppress(struct rev_info *revs) { @@ -91,9 +91,9 @@ int diff_merges_config(const char *value) return 0; } -void diff_merges_suppress_options_parsing(void) +void diff_merges_suppress_m_parsing(void) { - suppress_parsing = 1; + suppress_m_parsing = 1; } int diff_merges_parse_opts(struct rev_info *revs, const char **argv) @@ -102,10 +102,7 @@ int diff_merges_parse_opts(struct rev_info *revs, const char **argv) const char *optarg; const char *arg = argv[0]; - if (suppress_parsing) - return 0; - - if (!strcmp(arg, "-m")) { + if (!suppress_m_parsing && !strcmp(arg, "-m")) { set_to_default(revs); } else if (!strcmp(arg, "-c")) { set_combined(revs); @@ -153,9 +150,6 @@ void diff_merges_set_dense_combined_if_unset(struct rev_info *revs) void diff_merges_setup_revs(struct rev_info *revs) { - if (suppress_parsing) - return; - if (revs->combine_merges == 0) revs->dense_combined_merges = 0; if (revs->separate_merges == 0) diff --git a/diff-merges.h b/diff-merges.h index b5d57f6563..19639689bb 100644 --- a/diff-merges.h +++ b/diff-merges.h @@ -11,7 +11,7 @@ struct rev_info; int diff_merges_config(const char *value); -void diff_merges_suppress_options_parsing(void); +void diff_merges_suppress_m_parsing(void); int diff_merges_parse_opts(struct rev_info *revs, const char **argv); @@ -26,6 +26,7 @@ #include "parse-options.h" #include "help.h" #include "promisor-remote.h" +#include "dir.h" #ifdef NO_FAST_WORKING_DIRECTORY #define FAST_WORKING_DIRECTORY 0 @@ -3907,6 +3908,13 @@ static int reuse_worktree_file(struct index_state *istate, if (!want_file && would_convert_to_git(istate, name)) return 0; + /* + * If this path does not match our sparse-checkout definition, + * then the file will not be in the working directory. + */ + if (!path_in_sparse_checkout(name, istate)) + return 0; + len = strlen(name); pos = index_name_pos(istate, name, len); if (pos < 0) @@ -1439,6 +1439,58 @@ done: return result; } +int init_sparse_checkout_patterns(struct index_state *istate) +{ + if (!core_apply_sparse_checkout) + return 1; + if (istate->sparse_checkout_patterns) + return 0; + + CALLOC_ARRAY(istate->sparse_checkout_patterns, 1); + + if (get_sparse_checkout_patterns(istate->sparse_checkout_patterns) < 0) { + FREE_AND_NULL(istate->sparse_checkout_patterns); + return -1; + } + + return 0; +} + +static int path_in_sparse_checkout_1(const char *path, + struct index_state *istate, + int require_cone_mode) +{ + const char *base; + int dtype = DT_REG; + + /* + * We default to accepting a path if there are no patterns or + * they are of the wrong type. + */ + if (init_sparse_checkout_patterns(istate) || + (require_cone_mode && + !istate->sparse_checkout_patterns->use_cone_patterns)) + return 1; + + base = strrchr(path, '/'); + return path_matches_pattern_list(path, strlen(path), base ? base + 1 : path, + &dtype, + istate->sparse_checkout_patterns, + istate) > 0; +} + +int path_in_sparse_checkout(const char *path, + struct index_state *istate) +{ + return path_in_sparse_checkout_1(path, istate, 0); +} + +int path_in_cone_mode_sparse_checkout(const char *path, + struct index_state *istate) +{ + return path_in_sparse_checkout_1(path, istate, 1); +} + static struct path_pattern *last_matching_pattern_from_lists( struct dir_struct *dir, struct index_state *istate, const char *pathname, int pathlen, @@ -2970,6 +3022,120 @@ int is_empty_dir(const char *path) return ret; } +char *git_url_basename(const char *repo, int is_bundle, int is_bare) +{ + const char *end = repo + strlen(repo), *start, *ptr; + size_t len; + char *dir; + + /* + * Skip scheme. + */ + start = strstr(repo, "://"); + if (start == NULL) + start = repo; + else + start += 3; + + /* + * Skip authentication data. The stripping does happen + * greedily, such that we strip up to the last '@' inside + * the host part. + */ + for (ptr = start; ptr < end && !is_dir_sep(*ptr); ptr++) { + if (*ptr == '@') + start = ptr + 1; + } + + /* + * Strip trailing spaces, slashes and /.git + */ + while (start < end && (is_dir_sep(end[-1]) || isspace(end[-1]))) + end--; + if (end - start > 5 && is_dir_sep(end[-5]) && + !strncmp(end - 4, ".git", 4)) { + end -= 5; + while (start < end && is_dir_sep(end[-1])) + end--; + } + + /* + * Strip trailing port number if we've got only a + * hostname (that is, there is no dir separator but a + * colon). This check is required such that we do not + * strip URI's like '/foo/bar:2222.git', which should + * result in a dir '2222' being guessed due to backwards + * compatibility. + */ + if (memchr(start, '/', end - start) == NULL + && memchr(start, ':', end - start) != NULL) { + ptr = end; + while (start < ptr && isdigit(ptr[-1]) && ptr[-1] != ':') + ptr--; + if (start < ptr && ptr[-1] == ':') + end = ptr - 1; + } + + /* + * Find last component. To remain backwards compatible we + * also regard colons as path separators, such that + * cloning a repository 'foo:bar.git' would result in a + * directory 'bar' being guessed. + */ + ptr = end; + while (start < ptr && !is_dir_sep(ptr[-1]) && ptr[-1] != ':') + ptr--; + start = ptr; + + /* + * Strip .{bundle,git}. + */ + len = end - start; + strip_suffix_mem(start, &len, is_bundle ? ".bundle" : ".git"); + + if (!len || (len == 1 && *start == '/')) + die(_("No directory name could be guessed.\n" + "Please specify a directory on the command line")); + + if (is_bare) + dir = xstrfmt("%.*s.git", (int)len, start); + else + dir = xstrndup(start, len); + /* + * Replace sequences of 'control' characters and whitespace + * with one ascii space, remove leading and trailing spaces. + */ + if (*dir) { + char *out = dir; + int prev_space = 1 /* strip leading whitespace */; + for (end = dir; *end; ++end) { + char ch = *end; + if ((unsigned char)ch < '\x20') + ch = '\x20'; + if (isspace(ch)) { + if (prev_space) + continue; + prev_space = 1; + } else + prev_space = 0; + *out++ = ch; + } + *out = '\0'; + if (out > dir && prev_space) + out[-1] = '\0'; + } + return dir; +} + +void strip_dir_trailing_slashes(char *dir) +{ + char *end = dir + strlen(dir); + + while (dir < end - 1 && is_dir_sep(end[-1])) + end--; + *end = '\0'; +} + static int remove_dir_recurse(struct strbuf *path, int flag, int *kept_up) { DIR *dir; @@ -3633,7 +3799,7 @@ static void connect_wt_gitdir_in_nested(const char *sub_worktree, strbuf_reset(&sub_wt); strbuf_reset(&sub_gd); strbuf_addf(&sub_wt, "%s/%s", sub_worktree, sub->path); - strbuf_addf(&sub_gd, "%s/modules/%s", sub_gitdir, sub->name); + submodule_name_to_gitdir(&sub_gd, &subrepo, sub->name); connect_work_tree_and_git_dir(sub_wt.buf, sub_gd.buf, 1); } @@ -394,6 +394,14 @@ enum pattern_match_result path_matches_pattern_list(const char *pathname, const char *basename, int *dtype, struct pattern_list *pl, struct index_state *istate); + +int init_sparse_checkout_patterns(struct index_state *state); + +int path_in_sparse_checkout(const char *path, + struct index_state *istate); +int path_in_cone_mode_sparse_checkout(const char *path, + struct index_state *istate); + struct dir_entry *dir_add_ignored(struct dir_struct *dir, struct index_state *istate, const char *pathname, int len); @@ -453,6 +461,17 @@ static inline int is_dot_or_dotdot(const char *name) int is_empty_dir(const char *dir); +/* + * Retrieve the "humanish" basename of the given Git URL. + * + * For example: + * /path/to/repo.git => "repo" + * host.xz:foo/.git => "foo" + * http://example.com/user/bar.baz => "bar.baz" + */ +char *git_url_basename(const char *repo, int is_bundle, int is_bare); +void strip_dir_trailing_slashes(char *dir); + void setup_standard_excludes(struct dir_struct *dir); char *get_sparse_checkout_filename(void); @@ -58,7 +58,7 @@ static int launch_specified_editor(const char *editor, const char *path, const char *args[] = { editor, NULL, NULL }; struct child_process p = CHILD_PROCESS_INIT; int ret, sig; - int print_waiting_for_editor = advice_waiting_for_editor && isatty(2); + int print_waiting_for_editor = advice_enabled(ADVICE_WAITING_FOR_EDITOR) && isatty(2); if (print_waiting_for_editor) { /* @@ -159,25 +159,25 @@ static int remove_available_paths(struct string_list_item *item, void *cb_data) return !available; } -int finish_delayed_checkout(struct checkout *state, int *nr_checkouts) +int finish_delayed_checkout(struct checkout *state, int *nr_checkouts, + int show_progress) { int errs = 0; - unsigned delayed_object_count; + unsigned processed_paths = 0; off_t filtered_bytes = 0; struct string_list_item *filter, *path; - struct progress *progress; + struct progress *progress = NULL; struct delayed_checkout *dco = state->delayed_checkout; if (!state->delayed_checkout) return errs; dco->state = CE_RETRY; - delayed_object_count = dco->paths.nr; - progress = start_delayed_progress(_("Filtering content"), delayed_object_count); + if (show_progress) + progress = start_delayed_progress(_("Filtering content"), dco->paths.nr); while (dco->filters.nr > 0) { for_each_string_list_item(filter, &dco->filters) { struct string_list available_paths = STRING_LIST_INIT_NODUP; - display_progress(progress, delayed_object_count - dco->paths.nr); if (!async_query_available_blobs(filter->string, &available_paths)) { /* Filter reported an error */ @@ -224,6 +224,7 @@ int finish_delayed_checkout(struct checkout *state, int *nr_checkouts) ce = index_file_exists(state->istate, path->string, strlen(path->string), 0); if (ce) { + display_progress(progress, ++processed_paths); errs |= checkout_entry(ce, state, NULL, nr_checkouts); filtered_bytes += ce->ce_stat_data.sd_size; display_throughput(progress, filtered_bytes); @@ -43,7 +43,8 @@ static inline int checkout_entry(struct cache_entry *ce, } void enable_delayed_checkout(struct checkout *state); -int finish_delayed_checkout(struct checkout *state, int *nr_checkouts); +int finish_delayed_checkout(struct checkout *state, int *nr_checkouts, + int show_progress); /* * Unlink the last component and schedule the leading directories for diff --git a/environment.c b/environment.c index d6b22ede7e..43bb1b35ff 100644 --- a/environment.c +++ b/environment.c @@ -41,7 +41,6 @@ char *apply_default_ignorewhitespace; const char *git_attributes_file; const char *git_hooks_path; int zlib_compression_level = Z_BEST_SPEED; -int core_compression_level; int pack_compression_level = Z_DEFAULT_COMPRESSION; int fsync_object_files; size_t packed_git_window_size = DEFAULT_PACKED_GIT_WINDOW_SIZE; @@ -96,13 +95,6 @@ int auto_comment_line_char; /* Parallel index stat data preload? */ int core_preload_index = 1; -/* - * This is a hack for test programs like test-dump-untracked-cache to - * ensure that they do not modify the untracked cache when reading it. - * Do not use it otherwise! - */ -int ignore_untracked_cache_config; - /* This is set by setup_git_dir_gently() and/or git_default_config() */ char *git_work_tree_cfg; @@ -330,8 +322,7 @@ char *get_graft_file(struct repository *r) static void set_git_dir_1(const char *path) { - if (setenv(GIT_DIR_ENVIRONMENT, path, 1)) - die(_("could not set GIT_DIR to '%s'"), path); + xsetenv(GIT_DIR_ENVIRONMENT, path, 1); setup_git_env(path); } diff --git a/fetch-negotiator.c b/fetch-negotiator.c index 57ed5784e1..273390229f 100644 --- a/fetch-negotiator.c +++ b/fetch-negotiator.c @@ -19,7 +19,6 @@ void fetch_negotiator_init(struct repository *r, return; case FETCH_NEGOTIATION_DEFAULT: - default: default_negotiator_init(negotiator); return; } diff --git a/fetch-pack.c b/fetch-pack.c index 0bf7ed7e47..a9604f35a3 100644 --- a/fetch-pack.c +++ b/fetch-pack.c @@ -119,6 +119,11 @@ static struct commit *deref_without_lazy_fetch(const struct object_id *oid, { enum object_type type; struct object_info info = { .typep = &type }; + struct commit *commit; + + commit = lookup_commit_in_graph(the_repository, oid); + if (commit) + return commit; while (1) { if (oid_object_info_extended(the_repository, oid, &info, @@ -1912,16 +1917,15 @@ static void update_shallow(struct fetch_pack_args *args, oid_array_clear(&ref); } -static int iterate_ref_map(void *cb_data, struct object_id *oid) +static const struct object_id *iterate_ref_map(void *cb_data) { struct ref **rm = cb_data; struct ref *ref = *rm; if (!ref) - return -1; /* end of the list */ + return NULL; *rm = ref->next; - oidcpy(oid, &ref->old_oid); - return 0; + return &ref->old_oid; } struct ref *fetch_pack(struct fetch_pack_args *args, @@ -55,31 +55,7 @@ const char *Q_(const char *msgid, const char *plu, unsigned long n) } /* Mark msgid for translation but do not translate it. */ -#if !USE_PARENS_AROUND_GETTEXT_N #define N_(msgid) msgid -#else -/* - * Strictly speaking, this will lead to invalid C when - * used this way: - * static const char s[] = N_("FOO"); - * which will expand to - * static const char s[] = ("FOO"); - * and in valid C, the initializer on the right hand side must - * be without the parentheses. But many compilers do accept it - * as a language extension and it will allow us to catch mistakes - * like: - * static const char *msgs[] = { - * N_("one") - * N_("two"), - * N_("three"), - * NULL - * }; - * (notice the missing comma on one of the lines) by forcing - * a compilation error, because parenthesised ("one") ("two") - * will not get silently turned into ("onetwo"). - */ -#define N_(msgid) (msgid) -#endif const char *get_preferred_languages(void); int is_utf8_locale(void); diff --git a/git-bisect.sh b/git-bisect.sh index 6a7afaea8d..405cf76f2a 100755 --- a/git-bisect.sh +++ b/git-bisect.sh @@ -34,94 +34,9 @@ Please use "git help bisect" to get the full man page.' OPTIONS_SPEC= . git-sh-setup -_x40='[0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f]' -_x40="$_x40$_x40$_x40$_x40$_x40$_x40$_x40$_x40" TERM_BAD=bad TERM_GOOD=good -bisect_visualize() { - git bisect--helper --bisect-next-check $TERM_GOOD $TERM_BAD fail || exit - - if test $# = 0 - then - if test -n "${DISPLAY+set}${SESSIONNAME+set}${MSYSTEM+set}${SECURITYSESSIONID+set}" && - type gitk >/dev/null 2>&1 - then - set gitk - else - set git log - fi - else - case "$1" in - git*|tig) ;; - -*) set git log "$@" ;; - *) set git "$@" ;; - esac - fi - - eval '"$@"' --bisect -- $(cat "$GIT_DIR/BISECT_NAMES") -} - -bisect_run () { - git bisect--helper --bisect-next-check $TERM_GOOD $TERM_BAD fail || exit - - test -n "$*" || die "$(gettext "bisect run failed: no command provided.")" - - while true - do - command="$@" - eval_gettextln "running \$command" - "$@" - res=$? - - # Check for really bad run error. - if [ $res -lt 0 -o $res -ge 128 ] - then - eval_gettextln "bisect run failed: -exit code \$res from '\$command' is < 0 or >= 128" >&2 - exit $res - fi - - # Find current state depending on run success or failure. - # A special exit code of 125 means cannot test. - if [ $res -eq 125 ] - then - state='skip' - elif [ $res -gt 0 ] - then - state="$TERM_BAD" - else - state="$TERM_GOOD" - fi - - git bisect--helper --bisect-state $state >"$GIT_DIR/BISECT_RUN" - res=$? - - cat "$GIT_DIR/BISECT_RUN" - - if sane_grep "first $TERM_BAD commit could be any of" "$GIT_DIR/BISECT_RUN" \ - >/dev/null - then - gettextln "bisect run cannot continue any more" >&2 - exit $res - fi - - if [ $res -ne 0 ] - then - eval_gettextln "bisect run failed: -'bisect-state \$state' exited with error code \$res" >&2 - exit $res - fi - - if sane_grep "is the first $TERM_BAD commit" "$GIT_DIR/BISECT_RUN" >/dev/null - then - gettextln "bisect run success" - exit 0; - fi - - done -} - get_terms () { if test -s "$GIT_DIR/BISECT_TERMS" then @@ -152,7 +67,7 @@ case "$#" in # Not sure we want "next" at the UI level anymore. git bisect--helper --bisect-next "$@" || exit ;; visualize|view) - bisect_visualize "$@" ;; + git bisect--helper --bisect-visualize "$@" || exit;; reset) git bisect--helper --bisect-reset "$@" ;; replay) @@ -160,7 +75,7 @@ case "$#" in log) git bisect--helper --bisect-log || exit ;; run) - bisect_run "$@" ;; + git bisect--helper --bisect-run "$@" || exit;; terms) git bisect--helper --bisect-terms "$@" || exit;; *) diff --git a/git-compat-util.h b/git-compat-util.h index b46605300a..141bb86351 100644 --- a/git-compat-util.h +++ b/git-compat-util.h @@ -160,6 +160,9 @@ # endif #define WIN32_LEAN_AND_MEAN /* stops windows.h including winsock.h */ #include <winsock2.h> +#ifndef NO_UNIX_SOCKETS +#include <afunix.h> +#endif #include <windows.h> #define GIT_WINDOWS_NATIVE #endif @@ -875,6 +878,8 @@ void *xmemdupz(const void *data, size_t len); char *xstrndup(const char *str, size_t len); void *xrealloc(void *ptr, size_t size); void *xcalloc(size_t nmemb, size_t size); +void xsetenv(const char *name, const char *value, int overwrite); +void xunsetenv(const char *name); void *xmmap(void *start, size_t length, int prot, int flags, int fd, off_t offset); const char *mmap_os_err(void); void *xmmap_gently(void *start, size_t length, int prot, int flags, int fd, off_t offset); @@ -1253,10 +1258,6 @@ int warn_on_fopen_errors(const char *path); */ int open_nofollow(const char *path, int flags); -#if !defined(USE_PARENS_AROUND_GETTEXT_N) && defined(__GNUC__) -#define USE_PARENS_AROUND_GETTEXT_N 1 -#endif - #ifndef SHELL_PATH # define SHELL_PATH "/bin/sh" #endif diff --git a/git-curl-compat.h b/git-curl-compat.h new file mode 100644 index 0000000000..a308bdb3b9 --- /dev/null +++ b/git-curl-compat.h @@ -0,0 +1,128 @@ +#ifndef GIT_CURL_COMPAT_H +#define GIT_CURL_COMPAT_H +#include <curl/curl.h> + +/** + * This header centralizes the declaration of our libcurl dependencies + * to make it easy to discover the oldest versions we support, and to + * inform decisions about removing support for older libcurl in the + * future. + * + * The oldest supported version of curl is documented in the "INSTALL" + * document. + * + * The source of truth for what versions have which symbols is + * https://github.com/curl/curl/blob/master/docs/libcurl/symbols-in-versions; + * the release dates are taken from curl.git (at + * https://github.com/curl/curl/). + * + * For each X symbol we need from curl we define our own + * GIT_CURL_HAVE_X. If multiple similar symbols with the same prefix + * were defined in the same version we pick one and check for that name. + * + * We may also define a missing CURL_* symbol to its known value, if + * doing so is sufficient to add support for it to older versions that + * don't have it. + * + * Keep any symbols in date order of when their support was + * introduced, oldest first, in the official version of cURL library. + */ + +/** + * CURL_SOCKOPT_OK was added in 7.21.5, released in April 2011. + */ +#if LIBCURL_VERSION_NUM < 0x071505 +#define CURL_SOCKOPT_OK 0 +#endif + +/** + * CURLOPT_TCP_KEEPALIVE was added in 7.25.0, released in March 2012. + */ +#if LIBCURL_VERSION_NUM >= 0x071900 +#define GITCURL_HAVE_CURLOPT_TCP_KEEPALIVE 1 +#endif + + +/** + * CURLOPT_LOGIN_OPTIONS was added in 7.34.0, released in December + * 2013. + * + * If we start requiring 7.34.0 we might also be able to remove the + * code conditional on USE_CURL_FOR_IMAP_SEND in imap-send.c, see + * 1e16b255b95 (git-imap-send: use libcurl for implementation, + * 2014-11-09) and the check it added for "072200" in the Makefile. + + */ +#if LIBCURL_VERSION_NUM >= 0x072200 +#define GIT_CURL_HAVE_CURLOPT_LOGIN_OPTIONS 1 +#endif + +/** + * CURL_SSLVERSION_TLSv1_[012] was added in 7.34.0, released in + * December 2013. + */ +#if LIBCURL_VERSION_NUM >= 0x072200 +#define GIT_CURL_HAVE_CURL_SSLVERSION_TLSv1_0 +#endif + +/** + * CURLOPT_PINNEDPUBLICKEY was added in 7.39.0, released in November + * 2014. + */ +#if LIBCURL_VERSION_NUM >= 0x072c00 +#define GIT_CURL_HAVE_CURLOPT_PINNEDPUBLICKEY 1 +#endif + +/** + * CURL_HTTP_VERSION_2 was added in 7.43.0, released in June 2015. + * + * The CURL_HTTP_VERSION_2 alias (but not CURL_HTTP_VERSION_2_0) has + * always been a macro, not an enum field (checked on curl version + * 7.78.0) + */ +#if LIBCURL_VERSION_NUM >= 0x072b00 +#define GIT_CURL_HAVE_CURL_HTTP_VERSION_2 1 +#endif + +/** + * CURLSSLOPT_NO_REVOKE was added in 7.44.0, released in August 2015. + * + * The CURLSSLOPT_NO_REVOKE is, has always been a macro, not an enum + * field (checked on curl version 7.78.0) + */ +#if LIBCURL_VERSION_NUM >= 0x072c00 +#define GIT_CURL_HAVE_CURLSSLOPT_NO_REVOKE 1 +#endif + +/** + * CURLOPT_PROXY_CAINFO was added in 7.52.0, released in August 2017. + */ +#if LIBCURL_VERSION_NUM >= 0x073400 +#define GIT_CURL_HAVE_CURLOPT_PROXY_CAINFO 1 +#endif + +/** + * CURLOPT_PROXY_{KEYPASSWD,SSLCERT,SSLKEY} was added in 7.52.0, + * released in August 2017. + */ +#if LIBCURL_VERSION_NUM >= 0x073400 +#define GIT_CURL_HAVE_CURLOPT_PROXY_KEYPASSWD 1 +#endif + +/** + * CURL_SSLVERSION_TLSv1_3 was added in 7.53.0, released in February + * 2017. + */ +#if LIBCURL_VERSION_NUM >= 0x073400 +#define GIT_CURL_HAVE_CURL_SSLVERSION_TLSv1_3 1 +#endif + +/** + * CURLSSLSET_{NO_BACKENDS,OK,TOO_LATE,UNKNOWN_BACKEND} were added in + * 7.56.0, released in September 2017. + */ +#if LIBCURL_VERSION_NUM >= 0x073800 +#define GIT_CURL_HAVE_CURLSSLSET_NO_BACKENDS +#endif + +#endif diff --git a/git-cvsserver.perl b/git-cvsserver.perl index ed035f32c2..64319bed43 100755 --- a/git-cvsserver.perl +++ b/git-cvsserver.perl @@ -222,10 +222,11 @@ if ($state->{method} eq 'pserver') { open my $passwd, "<", $authdb or die $!; while (<$passwd>) { if (m{^\Q$user\E:(.*)}) { - if (crypt($user, descramble($password)) eq $1) { + my $hash = crypt(descramble($password), $1); + if (defined $hash and $hash eq $1) { $auth_ok = 1; } - }; + } } close $passwd; diff --git a/git-send-email.perl b/git-send-email.perl index e65d969d0b..5262d88ee3 100755 --- a/git-send-email.perl +++ b/git-send-email.perl @@ -376,7 +376,7 @@ sub read_config { @$target = @values; } else { - my $v = $known_keys->{$key}->[0]; + my $v = $known_keys->{$key}->[-1]; next unless defined $v; next if $configured->{$setting}++; $$target = $v; @@ -1697,7 +1697,6 @@ EOF $in_reply_to = $initial_in_reply_to; $references = $initial_in_reply_to || ''; -$subject = $initial_subject; $message_num = 0; # Prepares the email, prompts the user, sends it out @@ -1720,6 +1719,7 @@ sub process_file { @xh = (); my $input_format = undef; my @header = (); + $subject = $initial_subject; $message = ""; $message_num++; # First unfold multiline header fields @@ -1926,15 +1926,23 @@ sub process_file { } # set up for the next message - if ($thread && $message_was_sent && - ($chain_reply_to || !defined $in_reply_to || length($in_reply_to) == 0 || - $message_num == 1)) { - $in_reply_to = $message_id; - if (length $references > 0) { - $references .= "\n $message_id"; - } else { - $references = "$message_id"; + if ($thread) { + if ($message_was_sent && + ($chain_reply_to || !defined $in_reply_to || length($in_reply_to) == 0 || + $message_num == 1)) { + $in_reply_to = $message_id; + if (length $references > 0) { + $references .= "\n $message_id"; + } else { + $references = "$message_id"; + } } + } elsif (!defined $initial_in_reply_to) { + # --thread and --in-reply-to manage the "In-Reply-To" header and by + # extension the "References" header. If these commands are not used, reset + # the header values to their defaults. + $in_reply_to = undef; + $references = ''; } $message_id = undef; $num_sent++; diff --git a/git-sh-setup.sh b/git-sh-setup.sh index 10d9764185..cee053cdc3 100644 --- a/git-sh-setup.sh +++ b/git-sh-setup.sh @@ -223,9 +223,6 @@ require_clean_work_tree () { "rewrite branches") gettextln "Cannot rewrite branches: You have unstaged changes." >&2 ;; - "pull with rebase") - gettextln "Cannot pull with rebase: You have unstaged changes." >&2 - ;; *) eval_gettextln "Cannot \$action: You have unstaged changes." >&2 ;; @@ -242,9 +239,6 @@ require_clean_work_tree () { rebase) gettextln "Cannot rebase: Your index contains uncommitted changes." >&2 ;; - "pull with rebase") - gettextln "Cannot pull with rebase: Your index contains uncommitted changes." >&2 - ;; *) eval_gettextln "Cannot \$action: Your index contains uncommitted changes." >&2 ;; diff --git a/git-submodule.sh b/git-submodule.sh index dbd2ec2050..652861aa66 100755 --- a/git-submodule.sh +++ b/git-submodule.sh @@ -63,11 +63,6 @@ isnumber() n=$(($1 + 0)) 2>/dev/null && test "$n" = "$1" } -# Given a full hex object ID, is this the zero OID? -is_zero_oid () { - echo "$1" | sane_egrep '^0+$' >/dev/null 2>&1 -} - # Sanitize the local git environment for use within a submodule. We # can't simply use clear_local_git_env since we want to preserve some # of the settings from GIT_CONFIG_PARAMETERS. @@ -145,130 +140,12 @@ cmd_add() shift done - if ! git submodule--helper config --check-writeable >/dev/null 2>&1 - then - die "fatal: $(eval_gettext "please make sure that the .gitmodules file is in the working tree")" - fi - - if test -n "$reference_path" + if test -z "$1" then - is_absolute_path "$reference_path" || - reference_path="$wt_prefix$reference_path" - - reference="--reference=$reference_path" - fi - - repo=$1 - sm_path=$2 - - if test -z "$sm_path"; then - sm_path=$(printf '%s\n' "$repo" | - sed -e 's|/$||' -e 's|:*/*\.git$||' -e 's|.*[/:]||g') - fi - - if test -z "$repo" || test -z "$sm_path"; then usage fi - is_absolute_path "$sm_path" || sm_path="$wt_prefix$sm_path" - - # assure repo is absolute or relative to parent - case "$repo" in - ./*|../*) - test -z "$wt_prefix" || - die "fatal: $(gettext "Relative path can only be used from the toplevel of the working tree")" - - # dereference source url relative to parent's url - realrepo=$(git submodule--helper resolve-relative-url "$repo") || exit - ;; - *:*|/*) - # absolute url - realrepo=$repo - ;; - *) - die "fatal: $(eval_gettext "repo URL: '\$repo' must be absolute or begin with ./|../")" - ;; - esac - - # normalize path: - # multiple //; leading ./; /./; /../; trailing / - sm_path=$(printf '%s/\n' "$sm_path" | - sed -e ' - s|//*|/|g - s|^\(\./\)*|| - s|/\(\./\)*|/|g - :start - s|\([^/]*\)/\.\./|| - tstart - s|/*$|| - ') - if test -z "$force" - then - git ls-files --error-unmatch "$sm_path" > /dev/null 2>&1 && - die "fatal: $(eval_gettext "'\$sm_path' already exists in the index")" - else - git ls-files -s "$sm_path" | sane_grep -v "^160000" > /dev/null 2>&1 && - die "fatal: $(eval_gettext "'\$sm_path' already exists in the index and is not a submodule")" - fi - - if test -d "$sm_path" && - test -z $(git -C "$sm_path" rev-parse --show-cdup 2>/dev/null) - then - git -C "$sm_path" rev-parse --verify -q HEAD >/dev/null || - die "fatal: $(eval_gettext "'\$sm_path' does not have a commit checked out")" - fi - - if test -z "$force" - then - dryerr=$(git add --dry-run --ignore-missing --no-warn-embedded-repo "$sm_path" 2>&1 >/dev/null) - res=$? - if test $res -ne 0 - then - echo >&2 "$dryerr" - exit $res - fi - fi - - if test -n "$custom_name" - then - sm_name="$custom_name" - else - sm_name="$sm_path" - fi - - if ! git submodule--helper check-name "$sm_name" - then - die "fatal: $(eval_gettext "'$sm_name' is not a valid submodule name")" - fi - - git submodule--helper add-clone ${GIT_QUIET:+--quiet} ${force:+"--force"} ${progress:+"--progress"} ${branch:+--branch "$branch"} --prefix "$wt_prefix" --path "$sm_path" --name "$sm_name" --url "$realrepo" ${reference:+"$reference"} ${dissociate:+"--dissociate"} ${depth:+"$depth"} || exit - git config submodule."$sm_name".url "$realrepo" - - git add --no-warn-embedded-repo $force "$sm_path" || - die "fatal: $(eval_gettext "Failed to add submodule '\$sm_path'")" - - git submodule--helper config submodule."$sm_name".path "$sm_path" && - git submodule--helper config submodule."$sm_name".url "$repo" && - if test -n "$branch" - then - git submodule--helper config submodule."$sm_name".branch "$branch" - fi && - git add --force .gitmodules || - die "fatal: $(eval_gettext "Failed to register submodule '\$sm_path'")" - - # NEEDSWORK: In a multi-working-tree world, this needs to be - # set in the per-worktree config. - if git config --get submodule.active >/dev/null - then - # If the submodule being adding isn't already covered by the - # current configured pathspec, set the submodule's active flag - if ! git submodule--helper is-active "$sm_path" - then - git config submodule."$sm_name".active "true" - fi - else - git config submodule."$sm_name".active "true" - fi + git ${wt_prefix:+-C "$wt_prefix"} ${prefix:+--super-prefix "$prefix"} submodule--helper add ${GIT_QUIET:+--quiet} ${force:+--force} ${progress:+"--progress"} ${branch:+--branch "$branch"} ${reference_path:+--reference "$reference_path"} ${dissociate:+--dissociate} ${custom_name:+--name "$custom_name"} ${depth:+"$depth"} -- "$@" } # @@ -369,13 +246,6 @@ cmd_deinit() git ${wt_prefix:+-C "$wt_prefix"} submodule--helper deinit ${GIT_QUIET:+--quiet} ${force:+--force} ${deinit_all:+--all} -- "$@" } -is_tip_reachable () ( - sanitize_submodule_env && - cd "$1" && - rev=$(git rev-list -n 1 "$2" --not --all 2>/dev/null) && - test -z "$rev" -) - # usage: fetch_in_submodule <module_path> [<depth>] [<sha1>] # Because arguments are positional, use an empty string to omit <depth> # but include <sha1>. @@ -519,14 +389,13 @@ cmd_update() git submodule--helper ensure-core-worktree "$sm_path" || exit 1 - update_module=$(git submodule--helper update-module-mode $just_cloned "$sm_path" $update) - displaypath=$(git submodule--helper relative-path "$prefix$sm_path" "$wt_prefix") if test $just_cloned -eq 1 then subsha1= else + just_cloned= subsha1=$(sanitize_submodule_env; cd "$sm_path" && git rev-parse --verify HEAD) || die "fatal: $(eval_gettext "Unable to find current revision in submodule path '\$displaypath'")" @@ -547,70 +416,38 @@ cmd_update() die "fatal: $(eval_gettext "Unable to find current \${remote_name}/\${branch} revision in submodule path '\$sm_path'")" fi - if test "$subsha1" != "$sha1" || test -n "$force" - then - subforce=$force - # If we don't already have a -f flag and the submodule has never been checked out - if test -z "$subsha1" && test -z "$force" - then - subforce="-f" - fi - - if test -z "$nofetch" - then - # Run fetch only if $sha1 isn't present or it - # is not reachable from a ref. - is_tip_reachable "$sm_path" "$sha1" || - fetch_in_submodule "$sm_path" $depth || - say "$(eval_gettext "Unable to fetch in submodule path '\$displaypath'; trying to directly fetch \$sha1:")" - - # Now we tried the usual fetch, but $sha1 may - # not be reachable from any of the refs - is_tip_reachable "$sm_path" "$sha1" || - fetch_in_submodule "$sm_path" "$depth" "$sha1" || - die "fatal: $(eval_gettext "Fetched in submodule path '\$displaypath', but it did not contain \$sha1. Direct fetching of that commit failed.")" - fi - - must_die_on_failure= - case "$update_module" in - checkout) - command="git checkout $subforce -q" - die_msg="fatal: $(eval_gettext "Unable to checkout '\$sha1' in submodule path '\$displaypath'")" - say_msg="$(eval_gettext "Submodule path '\$displaypath': checked out '\$sha1'")" - ;; - rebase) - command="git rebase ${GIT_QUIET:+--quiet}" - die_msg="fatal: $(eval_gettext "Unable to rebase '\$sha1' in submodule path '\$displaypath'")" - say_msg="$(eval_gettext "Submodule path '\$displaypath': rebased into '\$sha1'")" - must_die_on_failure=yes - ;; - merge) - command="git merge ${GIT_QUIET:+--quiet}" - die_msg="fatal: $(eval_gettext "Unable to merge '\$sha1' in submodule path '\$displaypath'")" - say_msg="$(eval_gettext "Submodule path '\$displaypath': merged in '\$sha1'")" - must_die_on_failure=yes - ;; - !*) - command="${update_module#!}" - die_msg="fatal: $(eval_gettext "Execution of '\$command \$sha1' failed in submodule path '\$displaypath'")" - say_msg="$(eval_gettext "Submodule path '\$displaypath': '\$command \$sha1'")" - must_die_on_failure=yes - ;; - *) - die "fatal: $(eval_gettext "Invalid update mode '$update_module' for submodule path '$path'")" - esac - - if (sanitize_submodule_env; cd "$sm_path" && $command "$sha1") - then - say "$say_msg" - elif test -n "$must_die_on_failure" - then - die_with_status 2 "$die_msg" - else - err="${err};$die_msg" - continue - fi - fi + out=$(git submodule--helper run-update-procedure \ + ${wt_prefix:+--prefix "$wt_prefix"} \ + ${GIT_QUIET:+--quiet} \ + ${force:+--force} \ + ${just_cloned:+--just-cloned} \ + ${nofetch:+--no-fetch} \ + ${depth:+"$depth"} \ + ${update:+--update "$update"} \ + ${prefix:+--recursive-prefix "$prefix"} \ + ${sha1:+--oid "$sha1"} \ + ${subsha1:+--suboid "$subsha1"} \ + "--" \ + "$sm_path") + + # exit codes for run-update-procedure: + # 0: update was successful, say command output + # 1: update procedure failed, but should not die + # 2 or 128: subcommand died during execution + # 3: no update procedure was run + res="$?" + case $res in + 0) + say "$out" + ;; + 1) + err="${err};fatal: $out" + continue + ;; + 2|128) + die_with_status $res "fatal: $out" + ;; + esac if test -n "$recursive" then @@ -561,7 +561,7 @@ static struct cmd_struct commands[] = { { "merge-tree", cmd_merge_tree, RUN_SETUP | NO_PARSEOPT }, { "mktag", cmd_mktag, RUN_SETUP | NO_PARSEOPT }, { "mktree", cmd_mktree, RUN_SETUP }, - { "multi-pack-index", cmd_multi_pack_index, RUN_SETUP_GENTLY }, + { "multi-pack-index", cmd_multi_pack_index, RUN_SETUP }, { "mv", cmd_mv, RUN_SETUP | NEED_WORK_TREE }, { "name-rev", cmd_name_rev, RUN_SETUP }, { "notes", cmd_notes, RUN_SETUP }, diff --git a/gitweb/gitweb.perl b/gitweb/gitweb.perl index e09e024a09..fbd1c20a23 100755 --- a/gitweb/gitweb.perl +++ b/gitweb/gitweb.perl @@ -3796,7 +3796,8 @@ sub git_get_heads_list { my @headslist; open my $fd, '-|', git_cmd(), 'for-each-ref', - ($limit ? '--count='.($limit+1) : ()), '--sort=-committerdate', + ($limit ? '--count='.($limit+1) : ()), + '--sort=-HEAD', '--sort=-committerdate', '--format=%(objectname) %(refname) %(subject)%00%(committer)', @patterns or return; @@ -867,7 +867,7 @@ void free_grep_patterns(struct grep_opt *opt) free_pattern_expr(opt->pattern_expression); } -static char *end_of_line(char *cp, unsigned long *left) +static const char *end_of_line(const char *cp, unsigned long *left) { unsigned long l = *left; while (l && *cp != '\n') { @@ -908,7 +908,8 @@ static void show_name(struct grep_opt *opt, const char *name) opt->output(opt, opt->null_following_name ? "\0" : "\n", 1); } -static int patmatch(struct grep_pat *p, char *line, char *eol, +static int patmatch(struct grep_pat *p, + const char *line, const char *eol, regmatch_t *match, int eflags) { int hit; @@ -922,20 +923,16 @@ static int patmatch(struct grep_pat *p, char *line, char *eol, return hit; } -static int strip_timestamp(char *bol, char **eol_p) +static void strip_timestamp(const char *bol, const char **eol_p) { - char *eol = *eol_p; - int ch; + const char *eol = *eol_p; while (bol < --eol) { if (*eol != '>') continue; *eol_p = ++eol; - ch = *eol; - *eol = '\0'; - return ch; + break; } - return 0; } static struct { @@ -947,12 +944,12 @@ static struct { { "reflog ", 7 }, }; -static int match_one_pattern(struct grep_pat *p, char *bol, char *eol, +static int match_one_pattern(struct grep_pat *p, + const char *bol, const char *eol, enum grep_context ctx, regmatch_t *pmatch, int eflags) { int hit = 0; - int saved_ch = 0; const char *start = bol; if ((p->token != GREP_PATTERN) && @@ -971,7 +968,7 @@ static int match_one_pattern(struct grep_pat *p, char *bol, char *eol, switch (p->field) { case GREP_HEADER_AUTHOR: case GREP_HEADER_COMMITTER: - saved_ch = strip_timestamp(bol, &eol); + strip_timestamp(bol, &eol); break; default: break; @@ -1021,8 +1018,6 @@ static int match_one_pattern(struct grep_pat *p, char *bol, char *eol, goto again; } } - if (p->token == GREP_PATTERN_HEAD && saved_ch) - *eol = saved_ch; if (hit) { pmatch[0].rm_so += bol - start; pmatch[0].rm_eo += bol - start; @@ -1030,8 +1025,9 @@ static int match_one_pattern(struct grep_pat *p, char *bol, char *eol, return hit; } -static int match_expr_eval(struct grep_opt *opt, struct grep_expr *x, char *bol, - char *eol, enum grep_context ctx, ssize_t *col, +static int match_expr_eval(struct grep_opt *opt, struct grep_expr *x, + const char *bol, const char *eol, + enum grep_context ctx, ssize_t *col, ssize_t *icol, int collect_hits) { int h = 0; @@ -1098,7 +1094,8 @@ static int match_expr_eval(struct grep_opt *opt, struct grep_expr *x, char *bol, return h; } -static int match_expr(struct grep_opt *opt, char *bol, char *eol, +static int match_expr(struct grep_opt *opt, + const char *bol, const char *eol, enum grep_context ctx, ssize_t *col, ssize_t *icol, int collect_hits) { @@ -1106,7 +1103,8 @@ static int match_expr(struct grep_opt *opt, char *bol, char *eol, return match_expr_eval(opt, x, bol, eol, ctx, col, icol, collect_hits); } -static int match_line(struct grep_opt *opt, char *bol, char *eol, +static int match_line(struct grep_opt *opt, + const char *bol, const char *eol, ssize_t *col, ssize_t *icol, enum grep_context ctx, int collect_hits) { @@ -1138,7 +1136,8 @@ static int match_line(struct grep_opt *opt, char *bol, char *eol, return hit; } -static int match_next_pattern(struct grep_pat *p, char *bol, char *eol, +static int match_next_pattern(struct grep_pat *p, + const char *bol, const char *eol, enum grep_context ctx, regmatch_t *pmatch, int eflags) { @@ -1159,7 +1158,8 @@ static int match_next_pattern(struct grep_pat *p, char *bol, char *eol, return 1; } -static int next_match(struct grep_opt *opt, char *bol, char *eol, +static int next_match(struct grep_opt *opt, + const char *bol, const char *eol, enum grep_context ctx, regmatch_t *pmatch, int eflags) { struct grep_pat *p; @@ -1215,7 +1215,8 @@ static void show_line_header(struct grep_opt *opt, const char *name, } } -static void show_line(struct grep_opt *opt, char *bol, char *eol, +static void show_line(struct grep_opt *opt, + const char *bol, const char *eol, const char *name, unsigned lno, ssize_t cno, char sign) { int rest = eol - bol; @@ -1246,7 +1247,6 @@ static void show_line(struct grep_opt *opt, char *bol, char *eol, if (opt->color || opt->only_matching) { regmatch_t match; enum grep_context ctx = GREP_CONTEXT_BODY; - int ch = *eol; int eflags = 0; if (opt->color) { @@ -1261,7 +1261,6 @@ static void show_line(struct grep_opt *opt, char *bol, char *eol, else if (sign == '=') line_color = opt->colors[GREP_COLOR_FUNCTION]; } - *eol = '\0'; while (next_match(opt, bol, eol, ctx, &match, eflags)) { if (match.rm_so == match.rm_eo) break; @@ -1279,7 +1278,6 @@ static void show_line(struct grep_opt *opt, char *bol, char *eol, rest -= match.rm_eo; eflags = REG_NOTBOL; } - *eol = ch; } if (!opt->only_matching) { output_color(opt, bol, rest, line_color); @@ -1307,7 +1305,8 @@ static inline void grep_attr_unlock(void) pthread_mutex_unlock(&grep_attr_mutex); } -static int match_funcname(struct grep_opt *opt, struct grep_source *gs, char *bol, char *eol) +static int match_funcname(struct grep_opt *opt, struct grep_source *gs, + const char *bol, const char *eol) { xdemitconf_t *xecfg = opt->priv; if (xecfg && !xecfg->find_func) { @@ -1334,10 +1333,10 @@ static int match_funcname(struct grep_opt *opt, struct grep_source *gs, char *bo } static void show_funcname_line(struct grep_opt *opt, struct grep_source *gs, - char *bol, unsigned lno) + const char *bol, unsigned lno) { while (bol > gs->buf) { - char *eol = --bol; + const char *eol = --bol; while (bol > gs->buf && bol[-1] != '\n') bol--; @@ -1356,7 +1355,7 @@ static void show_funcname_line(struct grep_opt *opt, struct grep_source *gs, static int is_empty_line(const char *bol, const char *eol); static void show_pre_context(struct grep_opt *opt, struct grep_source *gs, - char *bol, char *end, unsigned lno) + const char *bol, const char *end, unsigned lno) { unsigned cur = lno, from = 1, funcname_lno = 0, orig_from; int funcname_needed = !!opt->funcname, comment_needed = 0; @@ -1376,8 +1375,8 @@ static void show_pre_context(struct grep_opt *opt, struct grep_source *gs, /* Rewind. */ while (bol > gs->buf && cur > from) { - char *next_bol = bol; - char *eol = --bol; + const char *next_bol = bol; + const char *eol = --bol; while (bol > gs->buf && bol[-1] != '\n') bol--; @@ -1408,7 +1407,7 @@ static void show_pre_context(struct grep_opt *opt, struct grep_source *gs, /* Back forward. */ while (cur < lno) { - char *eol = bol, sign = (cur == funcname_lno) ? '=' : '-'; + const char *eol = bol, sign = (cur == funcname_lno) ? '=' : '-'; while (*eol != '\n') eol++; @@ -1436,12 +1435,12 @@ static int should_lookahead(struct grep_opt *opt) static int look_ahead(struct grep_opt *opt, unsigned long *left_p, unsigned *lno_p, - char **bol_p) + const char **bol_p) { unsigned lno = *lno_p; - char *bol = *bol_p; + const char *bol = *bol_p; struct grep_pat *p; - char *sp, *last_bol; + const char *sp, *last_bol; regoff_t earliest = -1; for (p = opt->pattern_list; p; p = p->next) { @@ -1543,8 +1542,8 @@ static int is_empty_line(const char *bol, const char *eol) static int grep_source_1(struct grep_opt *opt, struct grep_source *gs, int collect_hits) { - char *bol; - char *peek_bol = NULL; + const char *bol; + const char *peek_bol = NULL; unsigned long left; unsigned lno = 1; unsigned last_hit = 0; @@ -1626,7 +1625,7 @@ static int grep_source_1(struct grep_opt *opt, struct grep_source *gs, int colle bol = gs->buf; left = gs->size; while (left) { - char *eol, ch; + const char *eol; int hit; ssize_t cno; ssize_t col = -1, icol = -1; @@ -1647,14 +1646,11 @@ static int grep_source_1(struct grep_opt *opt, struct grep_source *gs, int colle && look_ahead(opt, &left, &lno, &bol)) break; eol = end_of_line(bol, &left); - ch = *eol; - *eol = 0; if ((ctx == GREP_CONTEXT_HEAD) && (eol == bol)) ctx = GREP_CONTEXT_BODY; hit = match_line(opt, bol, eol, &col, &icol, ctx, collect_hits); - *eol = ch; if (collect_hits) goto next_line; @@ -1713,7 +1709,7 @@ static int grep_source_1(struct grep_opt *opt, struct grep_source *gs, int colle } if (show_function && (!peek_bol || peek_bol < bol)) { unsigned long peek_left = left; - char *peek_eol = eol; + const char *peek_eol = eol; /* * Trailing empty lines are not interesting. @@ -1825,14 +1821,25 @@ int grep_source(struct grep_opt *opt, struct grep_source *gs) return grep_source_1(opt, gs, 0); } -int grep_buffer(struct grep_opt *opt, char *buf, unsigned long size) +static void grep_source_init_buf(struct grep_source *gs, + const char *buf, + unsigned long size) +{ + gs->type = GREP_SOURCE_BUF; + gs->name = NULL; + gs->path = NULL; + gs->buf = buf; + gs->size = size; + gs->driver = NULL; + gs->identifier = NULL; +} + +int grep_buffer(struct grep_opt *opt, const char *buf, unsigned long size) { struct grep_source gs; int r; - grep_source_init(&gs, GREP_SOURCE_BUF, NULL, NULL, NULL); - gs.buf = buf; - gs.size = size; + grep_source_init_buf(&gs, buf, size); r = grep_source(opt, &gs); @@ -1840,28 +1847,30 @@ int grep_buffer(struct grep_opt *opt, char *buf, unsigned long size) return r; } -void grep_source_init(struct grep_source *gs, enum grep_source_type type, - const char *name, const char *path, - const void *identifier) +void grep_source_init_file(struct grep_source *gs, const char *name, + const char *path) { - gs->type = type; + gs->type = GREP_SOURCE_FILE; gs->name = xstrdup_or_null(name); gs->path = xstrdup_or_null(path); gs->buf = NULL; gs->size = 0; gs->driver = NULL; + gs->identifier = xstrdup(path); +} - switch (type) { - case GREP_SOURCE_FILE: - gs->identifier = xstrdup(identifier); - break; - case GREP_SOURCE_OID: - gs->identifier = oiddup(identifier); - break; - case GREP_SOURCE_BUF: - gs->identifier = NULL; - break; - } +void grep_source_init_oid(struct grep_source *gs, const char *name, + const char *path, const struct object_id *oid, + struct repository *repo) +{ + gs->type = GREP_SOURCE_OID; + gs->name = xstrdup_or_null(name); + gs->path = xstrdup_or_null(path); + gs->buf = NULL; + gs->size = 0; + gs->driver = NULL; + gs->identifier = oiddup(oid); + gs->repo = repo; } void grep_source_clear(struct grep_source *gs) @@ -1877,7 +1886,9 @@ void grep_source_clear_data(struct grep_source *gs) switch (gs->type) { case GREP_SOURCE_FILE: case GREP_SOURCE_OID: - FREE_AND_NULL(gs->buf); + /* these types own the buffer */ + free((char *)gs->buf); + gs->buf = NULL; gs->size = 0; break; case GREP_SOURCE_BUF: @@ -1890,7 +1901,8 @@ static int grep_source_load_oid(struct grep_source *gs) { enum object_type type; - gs->buf = read_object_file(gs->identifier, &type, &gs->size); + gs->buf = repo_read_object_file(gs->repo, gs->identifier, &type, + &gs->size); if (!gs->buf) return error(_("'%s': unable to read %s"), gs->name, @@ -120,7 +120,20 @@ struct grep_opt { struct grep_pat *header_list; struct grep_pat **header_tail; struct grep_expr *pattern_expression; + + /* + * NEEDSWORK: See if we can remove this field, because the repository + * should probably be per-source. That is, grep.c functions using this + * field should probably start using "repo" in "struct grep_source" + * instead. + * + * This is potentially the cause of at least one bug - "git grep" + * ignoring the textconv attributes from submodules. See [1] for more + * information. + * [1] https://lore.kernel.org/git/CAHd-oW5iEQarYVxEXoTG-ua2zdoybTrSjCBKtO0YT292fm0NQQ@mail.gmail.com/ + */ struct repository *repo; + const char *prefix; int prefix_length; regex_t regexp; @@ -176,7 +189,7 @@ void append_grep_pattern(struct grep_opt *opt, const char *pat, const char *orig void append_header_grep_pattern(struct grep_opt *, enum grep_header_field, const char *); void compile_grep_patterns(struct grep_opt *opt); void free_grep_patterns(struct grep_opt *opt); -int grep_buffer(struct grep_opt *opt, char *buf, unsigned long size); +int grep_buffer(struct grep_opt *opt, const char *buf, unsigned long size); struct grep_source { char *name; @@ -187,17 +200,20 @@ struct grep_source { GREP_SOURCE_BUF, } type; void *identifier; + struct repository *repo; /* if GREP_SOURCE_OID */ - char *buf; + const char *buf; unsigned long size; char *path; /* for attribute lookups */ struct userdiff_driver *driver; }; -void grep_source_init(struct grep_source *gs, enum grep_source_type type, - const char *name, const char *path, - const void *identifier); +void grep_source_init_file(struct grep_source *gs, const char *name, + const char *path); +void grep_source_init_oid(struct grep_source *gs, const char *name, + const char *path, const struct object_id *oid, + struct repository *repo); void grep_source_clear_data(struct grep_source *gs); void grep_source_clear(struct grep_source *gs); void grep_source_load_driver(struct grep_source *gs, @@ -207,7 +223,6 @@ void grep_source_load_driver(struct grep_source *gs, int grep_source(struct grep_opt *opt, struct grep_source *gs); struct grep_opt *grep_opt_dup(const struct grep_opt *opt); -int grep_threads_ok(const struct grep_opt *opt); /* * Mutex used around access to the attributes machinery if @@ -11,6 +11,7 @@ #include "version.h" #include "refs.h" #include "parse-options.h" +#include "prompt.h" struct category_description { uint32_t category; @@ -472,6 +473,7 @@ int is_in_cmdlist(struct cmdnames *c, const char *s) static int autocorrect; static struct cmdnames aliases; +#define AUTOCORRECT_PROMPT (-3) #define AUTOCORRECT_NEVER (-2) #define AUTOCORRECT_IMMEDIATELY (-1) @@ -486,6 +488,8 @@ static int git_unknown_cmd_config(const char *var, const char *value, void *cb) autocorrect = AUTOCORRECT_NEVER; } else if (!strcmp(value, "immediate")) { autocorrect = AUTOCORRECT_IMMEDIATELY; + } else if (!strcmp(value, "prompt")) { + autocorrect = AUTOCORRECT_PROMPT; } else { int v = git_config_int(var, value); autocorrect = (v < 0) @@ -539,6 +543,12 @@ const char *help_unknown_cmd(const char *cmd) read_early_config(git_unknown_cmd_config, NULL); + /* + * Disable autocorrection prompt in a non-interactive session + */ + if ((autocorrect == AUTOCORRECT_PROMPT) && (!isatty(0) || !isatty(2))) + autocorrect = AUTOCORRECT_NEVER; + if (autocorrect == AUTOCORRECT_NEVER) { fprintf_ln(stderr, _("git: '%s' is not a git command. See 'git --help'."), cmd); exit(1); @@ -618,7 +628,16 @@ const char *help_unknown_cmd(const char *cmd) _("Continuing under the assumption that " "you meant '%s'."), assumed); - else { + else if (autocorrect == AUTOCORRECT_PROMPT) { + char *answer; + struct strbuf msg = STRBUF_INIT; + strbuf_addf(&msg, _("Run '%s' instead? (y/N)"), assumed); + answer = git_prompt(msg.buf, PROMPT_ECHO); + strbuf_release(&msg); + if (!(starts_with(answer, "y") || + starts_with(answer, "Y"))) + exit(1); + } else { fprintf_ln(stderr, _("Continuing in %0.1f seconds, " "assuming that you meant '%s'."), diff --git a/http-backend.c b/http-backend.c index b329bf63f0..e7c0eeab23 100644 --- a/http-backend.c +++ b/http-backend.c @@ -534,7 +534,7 @@ static void get_info_refs(struct strbuf *hdr, char *arg) if (service_name) { const char *argv[] = {NULL /* service name */, - "--stateless-rpc", "--advertise-refs", + "--http-backend-info-refs", ".", NULL}; struct rpc_service *svc = select_service(hdr, service_name); @@ -739,6 +739,7 @@ static int bad_request(struct strbuf *hdr, const struct service_cmd *c) int cmd_main(int argc, const char **argv) { char *method = getenv("REQUEST_METHOD"); + const char *proto_header; char *dir; struct service_cmd *cmd = NULL; char *cmd_arg = NULL; @@ -789,6 +790,9 @@ int cmd_main(int argc, const char **argv) http_config(); max_request_buffer = git_env_ulong("GIT_HTTP_MAX_REQUEST_BUFFER", max_request_buffer); + proto_header = getenv("HTTP_GIT_PROTOCOL"); + if (proto_header) + setenv(GIT_PROTOCOL_ENVIRONMENT, proto_header, 0); cmd->imp(&hdr, cmd_arg); return 0; @@ -1,4 +1,5 @@ #include "git-compat-util.h" +#include "git-curl-compat.h" #include "http.h" #include "config.h" #include "pack.h" @@ -47,19 +48,19 @@ static struct { { "sslv2", CURL_SSLVERSION_SSLv2 }, { "sslv3", CURL_SSLVERSION_SSLv3 }, { "tlsv1", CURL_SSLVERSION_TLSv1 }, -#if LIBCURL_VERSION_NUM >= 0x072200 +#ifdef GIT_CURL_HAVE_CURL_SSLVERSION_TLSv1_0 { "tlsv1.0", CURL_SSLVERSION_TLSv1_0 }, { "tlsv1.1", CURL_SSLVERSION_TLSv1_1 }, { "tlsv1.2", CURL_SSLVERSION_TLSv1_2 }, #endif -#if LIBCURL_VERSION_NUM >= 0x073400 +#ifdef GIT_CURL_HAVE_CURL_SSLVERSION_TLSv1_3 { "tlsv1.3", CURL_SSLVERSION_TLSv1_3 }, #endif }; static const char *ssl_key; static const char *ssl_capath; static const char *curl_no_proxy; -#if LIBCURL_VERSION_NUM >= 0x072c00 +#ifdef GIT_CURL_HAVE_CURLOPT_PINNEDPUBLICKEY static const char *ssl_pinnedkey; #endif static const char *ssl_cainfo; @@ -373,10 +374,10 @@ static int http_options(const char *var, const char *value, void *cb) } if (!strcmp("http.pinnedpubkey", var)) { -#if LIBCURL_VERSION_NUM >= 0x072c00 +#ifdef GIT_CURL_HAVE_CURLOPT_PINNEDPUBLICKEY return git_config_pathname(&ssl_pinnedkey, var, value); #else - warning(_("Public key pinning not supported with cURL < 7.44.0")); + warning(_("Public key pinning not supported with cURL < 7.39.0")); return 0; #endif } @@ -500,7 +501,7 @@ static int has_cert_password(void) return 1; } -#if LIBCURL_VERSION_NUM >= 0x073400 +#ifdef GIT_CURL_HAVE_CURLOPT_PROXY_KEYPASSWD static int has_proxy_cert_password(void) { if (http_proxy_ssl_cert == NULL || proxy_ssl_cert_password_required != 1) @@ -516,7 +517,7 @@ static int has_proxy_cert_password(void) } #endif -#if LIBCURL_VERSION_NUM >= 0x071900 +#ifdef GITCURL_HAVE_CURLOPT_TCP_KEEPALIVE static void set_curl_keepalive(CURL *c) { curl_easy_setopt(c, CURLOPT_TCP_KEEPALIVE, 1); @@ -536,7 +537,7 @@ static int sockopt_callback(void *client, curl_socket_t fd, curlsocktype type) if (rc < 0) warning_errno("unable to set SO_KEEPALIVE on socket"); - return 0; /* CURL_SOCKOPT_OK only exists since curl 7.21.5 */ + return CURL_SOCKOPT_OK; } static void set_curl_keepalive(CURL *c) @@ -550,8 +551,8 @@ static void redact_sensitive_header(struct strbuf *header) const char *sensitive_header; if (trace_curl_redact && - (skip_prefix(header->buf, "Authorization:", &sensitive_header) || - skip_prefix(header->buf, "Proxy-Authorization:", &sensitive_header))) { + (skip_iprefix(header->buf, "Authorization:", &sensitive_header) || + skip_iprefix(header->buf, "Proxy-Authorization:", &sensitive_header))) { /* The first token is the type, which is OK to log */ while (isspace(*sensitive_header)) sensitive_header++; @@ -561,7 +562,7 @@ static void redact_sensitive_header(struct strbuf *header) strbuf_setlen(header, sensitive_header - header->buf); strbuf_addstr(header, " <redacted>"); } else if (trace_curl_redact && - skip_prefix(header->buf, "Cookie:", &sensitive_header)) { + skip_iprefix(header->buf, "Cookie:", &sensitive_header)) { struct strbuf redacted_header = STRBUF_INIT; const char *cookie; @@ -732,7 +733,7 @@ static long get_curl_allowed_protocols(int from_user) return allowed_protocols; } -#if LIBCURL_VERSION_NUM >=0x072f00 +#ifdef GIT_CURL_HAVE_CURL_HTTP_VERSION_2 static int get_curl_http_version_opt(const char *version_string, long *opt) { int i; @@ -774,7 +775,7 @@ static CURL *get_curl_handle(void) curl_easy_setopt(result, CURLOPT_SSL_VERIFYHOST, 2); } -#if LIBCURL_VERSION_NUM >= 0x072f00 // 7.47.0 +#ifdef GIT_CURL_HAVE_CURL_HTTP_VERSION_2 if (curl_http_version) { long opt; if (!get_curl_http_version_opt(curl_http_version, &opt)) { @@ -805,7 +806,7 @@ static CURL *get_curl_handle(void) if (http_ssl_backend && !strcmp("schannel", http_ssl_backend) && !http_schannel_check_revoke) { -#if LIBCURL_VERSION_NUM >= 0x072c00 +#ifdef GIT_CURL_HAVE_CURLSSLOPT_NO_REVOKE curl_easy_setopt(result, CURLOPT_SSL_OPTIONS, CURLSSLOPT_NO_REVOKE); #else warning(_("CURLSSLOPT_NO_REVOKE not supported with cURL < 7.44.0")); @@ -845,20 +846,20 @@ static CURL *get_curl_handle(void) curl_easy_setopt(result, CURLOPT_SSLKEY, ssl_key); if (ssl_capath != NULL) curl_easy_setopt(result, CURLOPT_CAPATH, ssl_capath); -#if LIBCURL_VERSION_NUM >= 0x072c00 +#ifdef GIT_CURL_HAVE_CURLOPT_PINNEDPUBLICKEY if (ssl_pinnedkey != NULL) curl_easy_setopt(result, CURLOPT_PINNEDPUBLICKEY, ssl_pinnedkey); #endif if (http_ssl_backend && !strcmp("schannel", http_ssl_backend) && !http_schannel_use_ssl_cainfo) { curl_easy_setopt(result, CURLOPT_CAINFO, NULL); -#if LIBCURL_VERSION_NUM >= 0x073400 +#ifdef GIT_CURL_HAVE_CURLOPT_PROXY_CAINFO curl_easy_setopt(result, CURLOPT_PROXY_CAINFO, NULL); #endif } else if (ssl_cainfo != NULL || http_proxy_ssl_ca_info != NULL) { if (ssl_cainfo != NULL) curl_easy_setopt(result, CURLOPT_CAINFO, ssl_cainfo); -#if LIBCURL_VERSION_NUM >= 0x073400 +#ifdef GIT_CURL_HAVE_CURLOPT_PROXY_CAINFO if (http_proxy_ssl_ca_info != NULL) curl_easy_setopt(result, CURLOPT_PROXY_CAINFO, http_proxy_ssl_ca_info); #endif @@ -927,7 +928,6 @@ static CURL *get_curl_handle(void) */ curl_easy_setopt(result, CURLOPT_PROXY, ""); } else if (curl_http_proxy) { -#if LIBCURL_VERSION_NUM >= 0x071800 if (starts_with(curl_http_proxy, "socks5h")) curl_easy_setopt(result, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS5_HOSTNAME); @@ -940,8 +940,7 @@ static CURL *get_curl_handle(void) else if (starts_with(curl_http_proxy, "socks")) curl_easy_setopt(result, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS4); -#endif -#if LIBCURL_VERSION_NUM >= 0x073400 +#ifdef GIT_CURL_HAVE_CURLOPT_PROXY_KEYPASSWD else if (starts_with(curl_http_proxy, "https")) { curl_easy_setopt(result, CURLOPT_PROXYTYPE, CURLPROXY_HTTPS); @@ -1006,7 +1005,7 @@ void http_init(struct remote *remote, const char *url, int proactive_auth) free(normalized_url); string_list_clear(&config.vars, 1); -#if LIBCURL_VERSION_NUM >= 0x073800 +#ifdef GIT_CURL_HAVE_CURLSSLSET_NO_BACKENDS if (http_ssl_backend) { const curl_ssl_backend **backends; struct strbuf buf = STRBUF_INIT; diff --git a/imap-send.c b/imap-send.c index 49a5f8aa59..e6090a0346 100644 --- a/imap-send.c +++ b/imap-send.c @@ -1441,7 +1441,7 @@ static CURL *setup_curl(struct imap_server_conf *srvc, struct credential *cred) curl_easy_setopt(curl, CURLOPT_PORT, server.port); if (server.auth_method) { -#if LIBCURL_VERSION_NUM < 0x072200 +#ifndef GIT_CURL_HAVE_CURLOPT_LOGIN_OPTIONS warning("No LOGIN_OPTIONS support in this cURL version"); #else struct strbuf auth = STRBUF_INIT; diff --git a/log-tree.h b/log-tree.h index 1e8c91dbf2..e7e4641cf8 100644 --- a/log-tree.h +++ b/log-tree.h @@ -14,10 +14,8 @@ struct decoration_filter { }; int parse_decorate_color_config(const char *var, const char *slot_name, const char *value); -void init_log_tree_opt(struct rev_info *); int log_tree_diff_flush(struct rev_info *); int log_tree_commit(struct rev_info *, struct commit *); -int log_tree_opt_parse(struct rev_info *, const char **, int); void show_log(struct rev_info *opt); void format_decorations_extended(struct strbuf *sb, const struct commit *commit, int use_color, @@ -41,6 +41,12 @@ static void ensure_config_read(void) } /* + * If we see this many or more "ref-prefix" lines from the client, we consider + * it "too many" and will avoid using the prefix feature entirely. + */ +#define TOO_MANY_PREFIXES 65536 + +/* * Check if one of the prefixes is a prefix of the ref. * If no prefixes were provided, all refs match. */ @@ -65,6 +71,7 @@ struct ls_refs_data { unsigned peel; unsigned symrefs; struct strvec prefixes; + struct strbuf buf; unsigned unborn : 1; }; @@ -73,7 +80,8 @@ static int send_ref(const char *refname, const struct object_id *oid, { struct ls_refs_data *data = cb_data; const char *refname_nons = strip_namespace(refname); - struct strbuf refline = STRBUF_INIT; + + strbuf_reset(&data->buf); if (ref_is_hidden(refname_nons, refname)) return 0; @@ -82,9 +90,9 @@ static int send_ref(const char *refname, const struct object_id *oid, return 0; if (oid) - strbuf_addf(&refline, "%s %s", oid_to_hex(oid), refname_nons); + strbuf_addf(&data->buf, "%s %s", oid_to_hex(oid), refname_nons); else - strbuf_addf(&refline, "unborn %s", refname_nons); + strbuf_addf(&data->buf, "unborn %s", refname_nons); if (data->symrefs && flag & REF_ISSYMREF) { struct object_id unused; const char *symref_target = resolve_ref_unsafe(refname, 0, @@ -94,20 +102,19 @@ static int send_ref(const char *refname, const struct object_id *oid, if (!symref_target) die("'%s' is a symref but it is not?", refname); - strbuf_addf(&refline, " symref-target:%s", + strbuf_addf(&data->buf, " symref-target:%s", strip_namespace(symref_target)); } if (data->peel && oid) { struct object_id peeled; if (!peel_iterated_oid(oid, &peeled)) - strbuf_addf(&refline, " peeled:%s", oid_to_hex(&peeled)); + strbuf_addf(&data->buf, " peeled:%s", oid_to_hex(&peeled)); } - strbuf_addch(&refline, '\n'); - packet_write(1, refline.buf, refline.len); + strbuf_addch(&data->buf, '\n'); + packet_fwrite(stdout, data->buf.buf, data->buf.len); - strbuf_release(&refline); return 0; } @@ -138,13 +145,13 @@ static int ls_refs_config(const char *var, const char *value, void *data) return parse_hide_refs_config(var, value, "uploadpack"); } -int ls_refs(struct repository *r, struct strvec *keys, - struct packet_reader *request) +int ls_refs(struct repository *r, struct packet_reader *request) { struct ls_refs_data data; memset(&data, 0, sizeof(data)); strvec_init(&data.prefixes); + strbuf_init(&data.buf, 0); ensure_config_read(); git_config(ls_refs_config, NULL); @@ -157,22 +164,35 @@ int ls_refs(struct repository *r, struct strvec *keys, data.peel = 1; else if (!strcmp("symrefs", arg)) data.symrefs = 1; - else if (skip_prefix(arg, "ref-prefix ", &out)) - strvec_push(&data.prefixes, out); + else if (skip_prefix(arg, "ref-prefix ", &out)) { + if (data.prefixes.nr < TOO_MANY_PREFIXES) + strvec_push(&data.prefixes, out); + } else if (!strcmp("unborn", arg)) data.unborn = allow_unborn; + else + die(_("unexpected line: '%s'"), arg); } if (request->status != PACKET_READ_FLUSH) die(_("expected flush after ls-refs arguments")); + /* + * If we saw too many prefixes, we must avoid using them at all; as + * soon as we have any prefix, they are meant to form a comprehensive + * list. + */ + if (data.prefixes.nr >= TOO_MANY_PREFIXES) + strvec_clear(&data.prefixes); + send_possibly_unborn_head(&data); if (!data.prefixes.nr) strvec_push(&data.prefixes, ""); for_each_fullref_in_prefixes(get_git_namespace(), data.prefixes.v, send_ref, &data, 0); - packet_flush(1); + packet_fflush(stdout); strvec_clear(&data.prefixes); + strbuf_release(&data.buf); return 0; } @@ -2,10 +2,8 @@ #define LS_REFS_H struct repository; -struct strvec; struct packet_reader; -int ls_refs(struct repository *r, struct strvec *keys, - struct packet_reader *request); +int ls_refs(struct repository *r, struct packet_reader *request); int ls_refs_advertise(struct repository *r, struct strbuf *value); #endif /* LS_REFS_H */ @@ -37,6 +37,7 @@ static void free_mailmap_info(void *p, const char *s) s, debug_str(mi->name), debug_str(mi->email)); free(mi->name); free(mi->email); + free(mi); } static void free_mailmap_entry(void *p, const char *s) @@ -52,6 +53,7 @@ static void free_mailmap_entry(void *p, const char *s) me->namemap.strdup_strings = 1; string_list_clear_func(&me->namemap, free_mailmap_info); + free(me); } /* diff --git a/merge-ort.c b/merge-ort.c index 515dc39b7f..ab42c5d847 100644 --- a/merge-ort.c +++ b/merge-ort.c @@ -32,6 +32,7 @@ #include "promisor-remote.h" #include "revision.h" #include "strmap.h" +#include "submodule-config.h" #include "submodule.h" #include "tree.h" #include "unpack-trees.h" @@ -1511,7 +1512,6 @@ static int find_first_merges(struct repository *repo, xsnprintf(merged_revision, sizeof(merged_revision), "^%s", oid_to_hex(&a->object.oid)); repo_init_revisions(repo, &revs, NULL); - rev_opts.submodule = path; /* FIXME: can't handle linked worktrees in submodules yet */ revs.single_worktree = path != NULL; setup_revisions(ARRAY_SIZE(rev_args)-1, rev_args, &revs, &rev_opts); @@ -1521,7 +1521,7 @@ static int find_first_merges(struct repository *repo, die("revision walk setup failed"); while ((commit = get_revision(&revs)) != NULL) { struct object *o = &(commit->object); - if (in_merge_bases(b, commit)) + if (repo_in_merge_bases(repo, b, commit)) add_object_array(o, NULL, &merges); } reset_revision_walk(); @@ -1536,7 +1536,7 @@ static int find_first_merges(struct repository *repo, contains_another = 0; for (j = 0; j < merges.nr; j++) { struct commit *m2 = (struct commit *) merges.objects[j].item; - if (i != j && in_merge_bases(m2, m1)) { + if (i != j && repo_in_merge_bases(repo, m2, m1)) { contains_another = 1; break; } @@ -1557,10 +1557,12 @@ static int merge_submodule(struct merge_options *opt, const struct object_id *b, struct object_id *result) { + struct repository subrepo; + struct strbuf sb = STRBUF_INIT; + int ret = 0; struct commit *commit_o, *commit_a, *commit_b; int parent_count; struct object_array merges; - struct strbuf sb = STRBUF_INIT; int i; int search = !opt->priv->call_depth; @@ -1576,6 +1578,10 @@ static int merge_submodule(struct merge_options *opt, if (is_null_oid(b)) return 0; + /* + * NEEDSWORK: Remove this when all submodule object accesses are + * through explicitly specified repositores. + */ if (add_submodule_odb(path)) { path_msg(opt, path, 0, _("Failed to merge submodule %s (not checked out)"), @@ -1583,39 +1589,48 @@ static int merge_submodule(struct merge_options *opt, return 0; } - if (!(commit_o = lookup_commit_reference(opt->repo, o)) || - !(commit_a = lookup_commit_reference(opt->repo, a)) || - !(commit_b = lookup_commit_reference(opt->repo, b))) { + if (repo_submodule_init(&subrepo, opt->repo, path, null_oid())) { + path_msg(opt, path, 0, + _("Failed to merge submodule %s (not checked out)"), + path); + return 0; + } + + if (!(commit_o = lookup_commit_reference(&subrepo, o)) || + !(commit_a = lookup_commit_reference(&subrepo, a)) || + !(commit_b = lookup_commit_reference(&subrepo, b))) { path_msg(opt, path, 0, _("Failed to merge submodule %s (commits not present)"), path); - return 0; + goto cleanup; } /* check whether both changes are forward */ - if (!in_merge_bases(commit_o, commit_a) || - !in_merge_bases(commit_o, commit_b)) { + if (!repo_in_merge_bases(&subrepo, commit_o, commit_a) || + !repo_in_merge_bases(&subrepo, commit_o, commit_b)) { path_msg(opt, path, 0, _("Failed to merge submodule %s " "(commits don't follow merge-base)"), path); - return 0; + goto cleanup; } /* Case #1: a is contained in b or vice versa */ - if (in_merge_bases(commit_a, commit_b)) { + if (repo_in_merge_bases(&subrepo, commit_a, commit_b)) { oidcpy(result, b); path_msg(opt, path, 1, _("Note: Fast-forwarding submodule %s to %s"), path, oid_to_hex(b)); - return 1; + ret = 1; + goto cleanup; } - if (in_merge_bases(commit_b, commit_a)) { + if (repo_in_merge_bases(&subrepo, commit_b, commit_a)) { oidcpy(result, a); path_msg(opt, path, 1, _("Note: Fast-forwarding submodule %s to %s"), path, oid_to_hex(a)); - return 1; + ret = 1; + goto cleanup; } /* @@ -1627,10 +1642,10 @@ static int merge_submodule(struct merge_options *opt, /* Skip the search if makes no sense to the calling context. */ if (!search) - return 0; + goto cleanup; /* find commit which merges them */ - parent_count = find_first_merges(opt->repo, path, commit_a, commit_b, + parent_count = find_first_merges(&subrepo, path, commit_a, commit_b, &merges); switch (parent_count) { case 0: @@ -1664,7 +1679,9 @@ static int merge_submodule(struct merge_options *opt, } object_array_clear(&merges); - return 0; +cleanup: + repo_clear(&subrepo); + return ret; } static void initialize_attr_index(struct merge_options *opt) @@ -4074,6 +4091,21 @@ static int record_conflicted_index_entries(struct merge_options *opt) if (strmap_empty(&opt->priv->conflicted)) return 0; + /* + * We are in a conflicted state. These conflicts might be inside + * sparse-directory entries, so check if any entries are outside + * of the sparse-checkout cone preemptively. + * + * We set original_cache_nr below, but that might change if + * index_name_pos() calls ask for paths within sparse directories. + */ + strmap_for_each_entry(&opt->priv->conflicted, &iter, e) { + if (!path_in_sparse_checkout(e->key, index)) { + ensure_full_index(index); + break; + } + } + /* If any entries have skip_worktree set, we'll have to check 'em out */ state.force = 1; state.quiet = 1; @@ -4081,7 +4113,7 @@ static int record_conflicted_index_entries(struct merge_options *opt) state.istate = index; original_cache_nr = index->cache_nr; - /* Put every entry from paths into plist, then sort */ + /* Append every entry from conflicted into index, then sort */ strmap_for_each_entry(&opt->priv->conflicted, &iter, e) { const char *path = e->key; struct conflict_info *ci = e->value; diff --git a/merge-recursive.c b/merge-recursive.c index 3355d50e8a..5a2d8a60c0 100644 --- a/merge-recursive.c +++ b/merge-recursive.c @@ -24,6 +24,7 @@ #include "repository.h" #include "revision.h" #include "string-list.h" +#include "submodule-config.h" #include "submodule.h" #include "tag.h" #include "tree-walk.h" @@ -55,10 +56,7 @@ static int path_hashmap_cmp(const void *cmp_data, a = container_of(eptr, const struct path_hashmap_entry, e); b = container_of(entry_or_key, const struct path_hashmap_entry, e); - if (ignore_case) - return strcasecmp(a->path, key ? key : b->path); - else - return strcmp(a->path, key ? key : b->path); + return fspathcmp(a->path, key ? key : b->path); } /* @@ -1113,7 +1111,6 @@ static int find_first_merges(struct repository *repo, xsnprintf(merged_revision, sizeof(merged_revision), "^%s", oid_to_hex(&a->object.oid)); repo_init_revisions(repo, &revs, NULL); - rev_opts.submodule = path; /* FIXME: can't handle linked worktrees in submodules yet */ revs.single_worktree = path != NULL; setup_revisions(ARRAY_SIZE(rev_args)-1, rev_args, &revs, &rev_opts); @@ -1123,7 +1120,7 @@ static int find_first_merges(struct repository *repo, die("revision walk setup failed"); while ((commit = get_revision(&revs)) != NULL) { struct object *o = &(commit->object); - if (in_merge_bases(b, commit)) + if (repo_in_merge_bases(repo, b, commit)) add_object_array(o, NULL, &merges); } reset_revision_walk(); @@ -1138,7 +1135,7 @@ static int find_first_merges(struct repository *repo, contains_another = 0; for (j = 0; j < merges.nr; j++) { struct commit *m2 = (struct commit *) merges.objects[j].item; - if (i != j && in_merge_bases(m2, m1)) { + if (i != j && repo_in_merge_bases(repo, m2, m1)) { contains_another = 1; break; } @@ -1174,6 +1171,8 @@ static int merge_submodule(struct merge_options *opt, const struct object_id *base, const struct object_id *a, const struct object_id *b) { + struct repository subrepo; + int ret = 0; struct commit *commit_base, *commit_a, *commit_b; int parent_count; struct object_array merges; @@ -1197,27 +1196,36 @@ static int merge_submodule(struct merge_options *opt, if (is_null_oid(b)) return 0; + /* + * NEEDSWORK: Remove this when all submodule object accesses are + * through explicitly specified repositores. + */ if (add_submodule_odb(path)) { output(opt, 1, _("Failed to merge submodule %s (not checked out)"), path); return 0; } - if (!(commit_base = lookup_commit_reference(opt->repo, base)) || - !(commit_a = lookup_commit_reference(opt->repo, a)) || - !(commit_b = lookup_commit_reference(opt->repo, b))) { - output(opt, 1, _("Failed to merge submodule %s (commits not present)"), path); + if (repo_submodule_init(&subrepo, opt->repo, path, null_oid())) { + output(opt, 1, _("Failed to merge submodule %s (not checked out)"), path); return 0; } + if (!(commit_base = lookup_commit_reference(&subrepo, base)) || + !(commit_a = lookup_commit_reference(&subrepo, a)) || + !(commit_b = lookup_commit_reference(&subrepo, b))) { + output(opt, 1, _("Failed to merge submodule %s (commits not present)"), path); + goto cleanup; + } + /* check whether both changes are forward */ - if (!in_merge_bases(commit_base, commit_a) || - !in_merge_bases(commit_base, commit_b)) { + if (!repo_in_merge_bases(&subrepo, commit_base, commit_a) || + !repo_in_merge_bases(&subrepo, commit_base, commit_b)) { output(opt, 1, _("Failed to merge submodule %s (commits don't follow merge-base)"), path); - return 0; + goto cleanup; } /* Case #1: a is contained in b or vice versa */ - if (in_merge_bases(commit_a, commit_b)) { + if (repo_in_merge_bases(&subrepo, commit_a, commit_b)) { oidcpy(result, b); if (show(opt, 3)) { output(opt, 3, _("Fast-forwarding submodule %s to the following commit:"), path); @@ -1227,9 +1235,10 @@ static int merge_submodule(struct merge_options *opt, else ; /* no output */ - return 1; + ret = 1; + goto cleanup; } - if (in_merge_bases(commit_b, commit_a)) { + if (repo_in_merge_bases(&subrepo, commit_b, commit_a)) { oidcpy(result, a); if (show(opt, 3)) { output(opt, 3, _("Fast-forwarding submodule %s to the following commit:"), path); @@ -1239,7 +1248,8 @@ static int merge_submodule(struct merge_options *opt, else ; /* no output */ - return 1; + ret = 1; + goto cleanup; } /* @@ -1251,10 +1261,10 @@ static int merge_submodule(struct merge_options *opt, /* Skip the search if makes no sense to the calling context. */ if (!search) - return 0; + goto cleanup; /* find commit which merges them */ - parent_count = find_first_merges(opt->repo, &merges, path, + parent_count = find_first_merges(&subrepo, &merges, path, commit_a, commit_b); switch (parent_count) { case 0: @@ -1281,7 +1291,9 @@ static int merge_submodule(struct merge_options *opt, } object_array_clear(&merges); - return 0; +cleanup: + repo_clear(&subrepo); + return ret; } static int merge_mode_and_contents(struct merge_options *opt, @@ -3750,6 +3762,9 @@ int merge_recursive(struct merge_options *opt, assert(opt->ancestor == NULL || !strcmp(opt->ancestor, "constructed merge base")); + prepare_repo_settings(opt->repo); + opt->repo->settings.command_requires_full_index = 1; + if (merge_start(opt, repo_get_commit_tree(opt->repo, h1))) return -1; clean = merge_recursive_internal(opt, h1, h2, merge_bases, result); @@ -13,6 +13,10 @@ #include "repository.h" #include "chunk-format.h" #include "pack.h" +#include "pack-bitmap.h" +#include "refs.h" +#include "revision.h" +#include "list-objects.h" #define MIDX_SIGNATURE 0x4d494458 /* "MIDX" */ #define MIDX_VERSION 1 @@ -48,12 +52,12 @@ static uint8_t oid_version(void) } } -static const unsigned char *get_midx_checksum(struct multi_pack_index *m) +const unsigned char *get_midx_checksum(struct multi_pack_index *m) { return m->data + m->data_len - the_hash_algo->rawsz; } -static char *get_midx_filename(const char *object_dir) +char *get_midx_filename(const char *object_dir) { return xstrfmt("%s/pack/multi-pack-index", object_dir); } @@ -195,6 +199,8 @@ void close_midx(struct multi_pack_index *m) if (!m) return; + close_midx(m->next); + munmap((unsigned char *)m->data, m->data_len); for (i = 0; i < m->num_packs; i++) { @@ -203,6 +209,7 @@ void close_midx(struct multi_pack_index *m) } FREE_AND_NULL(m->packs); FREE_AND_NULL(m->pack_names); + free(m); } int prepare_midx_pack(struct repository *r, struct multi_pack_index *m, uint32_t pack_int_id) @@ -276,14 +283,18 @@ uint32_t nth_midxed_pack_int_id(struct multi_pack_index *m, uint32_t pos) (off_t)pos * MIDX_CHUNK_OFFSET_WIDTH); } -static int nth_midxed_pack_entry(struct repository *r, - struct multi_pack_index *m, - struct pack_entry *e, - uint32_t pos) +int fill_midx_entry(struct repository * r, + const struct object_id *oid, + struct pack_entry *e, + struct multi_pack_index *m) { + uint32_t pos; uint32_t pack_int_id; struct packed_git *p; + if (!bsearch_midx(oid, m, &pos)) + return 0; + if (pos >= m->num_objects) return 0; @@ -303,15 +314,9 @@ static int nth_midxed_pack_entry(struct repository *r, if (!is_pack_valid(p)) return 0; - if (p->num_bad_objects) { - uint32_t i; - struct object_id oid; - nth_midxed_object_oid(&oid, m, pos); - for (i = 0; i < p->num_bad_objects; i++) - if (hasheq(oid.hash, - p->bad_object_sha1 + the_hash_algo->rawsz * i)) - return 0; - } + if (oidset_size(&p->bad_objects) && + oidset_contains(&p->bad_objects, oid)) + return 0; e->offset = nth_midxed_offset(m, pos); e->p = p; @@ -319,19 +324,6 @@ static int nth_midxed_pack_entry(struct repository *r, return 1; } -int fill_midx_entry(struct repository * r, - const struct object_id *oid, - struct pack_entry *e, - struct multi_pack_index *m) -{ - uint32_t pos; - - if (!bsearch_midx(oid, m, &pos)) - return 0; - - return nth_midxed_pack_entry(r, m, e, pos); -} - /* Match "foo.idx" against either "foo.pack" _or_ "foo.idx". */ static int cmp_idx_or_pack_name(const char *idx_or_pack_name, const char *idx_name) @@ -882,7 +874,7 @@ static void write_midx_reverse_index(char *midx_name, unsigned char *midx_hash, strbuf_release(&buf); } -static void clear_midx_files_ext(struct repository *r, const char *ext, +static void clear_midx_files_ext(const char *object_dir, const char *ext, unsigned char *keep_hash); static int midx_checksum_valid(struct multi_pack_index *m) @@ -890,7 +882,167 @@ static int midx_checksum_valid(struct multi_pack_index *m) return hashfile_checksum_valid(m->data, m->data_len); } -static int write_midx_internal(const char *object_dir, struct multi_pack_index *m, +static void prepare_midx_packing_data(struct packing_data *pdata, + struct write_midx_context *ctx) +{ + uint32_t i; + + memset(pdata, 0, sizeof(struct packing_data)); + prepare_packing_data(the_repository, pdata); + + for (i = 0; i < ctx->entries_nr; i++) { + struct pack_midx_entry *from = &ctx->entries[ctx->pack_order[i]]; + struct object_entry *to = packlist_alloc(pdata, &from->oid); + + oe_set_in_pack(pdata, to, + ctx->info[ctx->pack_perm[from->pack_int_id]].p); + } +} + +static int add_ref_to_pending(const char *refname, + const struct object_id *oid, + int flag, void *cb_data) +{ + struct rev_info *revs = (struct rev_info*)cb_data; + struct object *object; + + if ((flag & REF_ISSYMREF) && (flag & REF_ISBROKEN)) { + warning("symbolic ref is dangling: %s", refname); + return 0; + } + + object = parse_object_or_die(oid, refname); + if (object->type != OBJ_COMMIT) + return 0; + + add_pending_object(revs, object, ""); + if (bitmap_is_preferred_refname(revs->repo, refname)) + object->flags |= NEEDS_BITMAP; + return 0; +} + +struct bitmap_commit_cb { + struct commit **commits; + size_t commits_nr, commits_alloc; + + struct write_midx_context *ctx; +}; + +static const struct object_id *bitmap_oid_access(size_t index, + const void *_entries) +{ + const struct pack_midx_entry *entries = _entries; + return &entries[index].oid; +} + +static void bitmap_show_commit(struct commit *commit, void *_data) +{ + struct bitmap_commit_cb *data = _data; + int pos = oid_pos(&commit->object.oid, data->ctx->entries, + data->ctx->entries_nr, + bitmap_oid_access); + if (pos < 0) + return; + + ALLOC_GROW(data->commits, data->commits_nr + 1, data->commits_alloc); + data->commits[data->commits_nr++] = commit; +} + +static struct commit **find_commits_for_midx_bitmap(uint32_t *indexed_commits_nr_p, + struct write_midx_context *ctx) +{ + struct rev_info revs; + struct bitmap_commit_cb cb = {0}; + + cb.ctx = ctx; + + repo_init_revisions(the_repository, &revs, NULL); + setup_revisions(0, NULL, &revs, NULL); + for_each_ref(add_ref_to_pending, &revs); + + /* + * Skipping promisor objects here is intentional, since it only excludes + * them from the list of reachable commits that we want to select from + * when computing the selection of MIDX'd commits to receive bitmaps. + * + * Reachability bitmaps do require that their objects be closed under + * reachability, but fetching any objects missing from promisors at this + * point is too late. But, if one of those objects can be reached from + * an another object that is included in the bitmap, then we will + * complain later that we don't have reachability closure (and fail + * appropriately). + */ + fetch_if_missing = 0; + revs.exclude_promisor_objects = 1; + + if (prepare_revision_walk(&revs)) + die(_("revision walk setup failed")); + + traverse_commit_list(&revs, bitmap_show_commit, NULL, &cb); + if (indexed_commits_nr_p) + *indexed_commits_nr_p = cb.commits_nr; + + return cb.commits; +} + +static int write_midx_bitmap(char *midx_name, unsigned char *midx_hash, + struct write_midx_context *ctx, + unsigned flags) +{ + struct packing_data pdata; + struct pack_idx_entry **index; + struct commit **commits = NULL; + uint32_t i, commits_nr; + char *bitmap_name = xstrfmt("%s-%s.bitmap", midx_name, hash_to_hex(midx_hash)); + int ret; + + prepare_midx_packing_data(&pdata, ctx); + + commits = find_commits_for_midx_bitmap(&commits_nr, ctx); + + /* + * Build the MIDX-order index based on pdata.objects (which is already + * in MIDX order; c.f., 'midx_pack_order_cmp()' for the definition of + * this order). + */ + ALLOC_ARRAY(index, pdata.nr_objects); + for (i = 0; i < pdata.nr_objects; i++) + index[i] = &pdata.objects[i].idx; + + bitmap_writer_show_progress(flags & MIDX_PROGRESS); + bitmap_writer_build_type_index(&pdata, index, pdata.nr_objects); + + /* + * bitmap_writer_finish expects objects in lex order, but pack_order + * gives us exactly that. use it directly instead of re-sorting the + * array. + * + * This changes the order of objects in 'index' between + * bitmap_writer_build_type_index and bitmap_writer_finish. + * + * The same re-ordering takes place in the single-pack bitmap code via + * write_idx_file(), which is called by finish_tmp_packfile(), which + * happens between bitmap_writer_build_type_index() and + * bitmap_writer_finish(). + */ + for (i = 0; i < pdata.nr_objects; i++) + index[ctx->pack_order[i]] = &pdata.objects[i].idx; + + bitmap_writer_select_commits(commits, commits_nr, -1); + ret = bitmap_writer_build(&pdata); + if (ret < 0) + goto cleanup; + + bitmap_writer_set_checksum(midx_hash); + bitmap_writer_finish(index, pdata.nr_objects, bitmap_name, 0); + +cleanup: + free(index); + free(bitmap_name); + return ret; +} + +static int write_midx_internal(const char *object_dir, struct string_list *packs_to_drop, const char *preferred_pack_name, unsigned flags) @@ -901,20 +1053,26 @@ static int write_midx_internal(const char *object_dir, struct multi_pack_index * struct hashfile *f = NULL; struct lock_file lk; struct write_midx_context ctx = { 0 }; + struct multi_pack_index *cur; int pack_name_concat_len = 0; int dropped_packs = 0; int result = 0; struct chunkfile *cf; + /* Ensure the given object_dir is local, or a known alternate. */ + find_odb(the_repository, object_dir); + midx_name = get_midx_filename(object_dir); if (safe_create_leading_directories(midx_name)) die_errno(_("unable to create leading directories of %s"), midx_name); - if (m) - ctx.m = m; - else - ctx.m = load_multi_pack_index(object_dir, 1); + for (cur = get_multi_pack_index(the_repository); cur; cur = cur->next) { + if (!strcmp(object_dir, cur->object_dir)) { + ctx.m = cur; + break; + } + } if (ctx.m && !midx_checksum_valid(ctx.m)) { warning(_("ignoring existing multi-pack-index; checksum mismatch")); @@ -932,8 +1090,27 @@ static int write_midx_internal(const char *object_dir, struct multi_pack_index * ctx.info[ctx.nr].orig_pack_int_id = i; ctx.info[ctx.nr].pack_name = xstrdup(ctx.m->pack_names[i]); - ctx.info[ctx.nr].p = NULL; + ctx.info[ctx.nr].p = ctx.m->packs[i]; ctx.info[ctx.nr].expired = 0; + + if (flags & MIDX_WRITE_REV_INDEX) { + /* + * If generating a reverse index, need to have + * packed_git's loaded to compare their + * mtimes and object count. + */ + if (prepare_midx_pack(the_repository, ctx.m, i)) { + error(_("could not load pack")); + result = 1; + goto cleanup; + } + + if (open_pack_index(ctx.m->packs[i])) + die(_("could not open index for %s"), + ctx.m->packs[i]->pack_name); + ctx.info[ctx.nr].p = ctx.m->packs[i]; + } + ctx.nr++; } } @@ -947,18 +1124,89 @@ static int write_midx_internal(const char *object_dir, struct multi_pack_index * for_each_file_in_pack_dir(object_dir, add_pack_to_midx, &ctx); stop_progress(&ctx.progress); - if (ctx.m && ctx.nr == ctx.m->num_packs && !packs_to_drop) - goto cleanup; + if (ctx.m && ctx.nr == ctx.m->num_packs && !packs_to_drop) { + struct bitmap_index *bitmap_git; + int bitmap_exists; + int want_bitmap = flags & MIDX_WRITE_BITMAP; + + bitmap_git = prepare_midx_bitmap_git(ctx.m); + bitmap_exists = bitmap_git && bitmap_is_midx(bitmap_git); + free_bitmap_index(bitmap_git); + + if (bitmap_exists || !want_bitmap) { + /* + * The correct MIDX already exists, and so does a + * corresponding bitmap (or one wasn't requested). + */ + if (!want_bitmap) + clear_midx_files_ext(object_dir, ".bitmap", + NULL); + goto cleanup; + } + } - ctx.preferred_pack_idx = -1; if (preferred_pack_name) { + int found = 0; for (i = 0; i < ctx.nr; i++) { if (!cmp_idx_or_pack_name(preferred_pack_name, ctx.info[i].pack_name)) { ctx.preferred_pack_idx = i; + found = 1; break; } } + + if (!found) + warning(_("unknown preferred pack: '%s'"), + preferred_pack_name); + } else if (ctx.nr && + (flags & (MIDX_WRITE_REV_INDEX | MIDX_WRITE_BITMAP))) { + struct packed_git *oldest = ctx.info[ctx.preferred_pack_idx].p; + ctx.preferred_pack_idx = 0; + + if (packs_to_drop && packs_to_drop->nr) + BUG("cannot write a MIDX bitmap during expiration"); + + /* + * set a preferred pack when writing a bitmap to ensure that + * the pack from which the first object is selected in pseudo + * pack-order has all of its objects selected from that pack + * (and not another pack containing a duplicate) + */ + for (i = 1; i < ctx.nr; i++) { + struct packed_git *p = ctx.info[i].p; + + if (!oldest->num_objects || p->mtime < oldest->mtime) { + oldest = p; + ctx.preferred_pack_idx = i; + } + } + + if (!oldest->num_objects) { + /* + * If all packs are empty; unset the preferred index. + * This is acceptable since there will be no duplicate + * objects to resolve, so the preferred value doesn't + * matter. + */ + ctx.preferred_pack_idx = -1; + } + } else { + /* + * otherwise don't mark any pack as preferred to avoid + * interfering with expiration logic below + */ + ctx.preferred_pack_idx = -1; + } + + if (ctx.preferred_pack_idx > -1) { + struct packed_git *preferred = ctx.info[ctx.preferred_pack_idx].p; + if (!preferred->num_objects) { + error(_("cannot select preferred pack %s with no objects"), + preferred->pack_name); + result = 1; + goto cleanup; + } } ctx.entries = get_sorted_entries(ctx.m, ctx.info, ctx.nr, &ctx.entries_nr, @@ -1029,11 +1277,7 @@ static int write_midx_internal(const char *object_dir, struct multi_pack_index * ctx.info, ctx.nr, sizeof(*ctx.info), idx_or_pack_name_cmp); - - if (!preferred) - warning(_("unknown preferred pack: '%s'"), - preferred_pack_name); - else { + if (preferred) { uint32_t perm = ctx.pack_perm[preferred->orig_pack_int_id]; if (perm == PACK_EXPIRED) warning(_("preferred pack '%s' is expired"), @@ -1048,9 +1292,6 @@ static int write_midx_internal(const char *object_dir, struct multi_pack_index * hold_lock_file_for_update(&lk, midx_name, LOCK_DIE_ON_ERROR); f = hashfd(get_lock_file_fd(&lk), get_lock_file_path(&lk)); - if (ctx.m) - close_midx(ctx.m); - if (ctx.nr - dropped_packs == 0) { error(_("no pack files to index.")); result = 1; @@ -1081,15 +1322,27 @@ static int write_midx_internal(const char *object_dir, struct multi_pack_index * finalize_hashfile(f, midx_hash, CSUM_FSYNC | CSUM_HASH_IN_STREAM); free_chunkfile(cf); - if (flags & MIDX_WRITE_REV_INDEX) + if (flags & (MIDX_WRITE_REV_INDEX | MIDX_WRITE_BITMAP)) ctx.pack_order = midx_pack_order(&ctx); if (flags & MIDX_WRITE_REV_INDEX) write_midx_reverse_index(midx_name, midx_hash, &ctx); - clear_midx_files_ext(the_repository, ".rev", midx_hash); + if (flags & MIDX_WRITE_BITMAP) { + if (write_midx_bitmap(midx_name, midx_hash, &ctx, flags) < 0) { + error(_("could not write multi-pack bitmap")); + result = 1; + goto cleanup; + } + } + + if (ctx.m) + close_object_store(the_repository->objects); commit_lock_file(&lk); + clear_midx_files_ext(object_dir, ".bitmap", midx_hash); + clear_midx_files_ext(object_dir, ".rev", midx_hash); + cleanup: for (i = 0; i < ctx.nr; i++) { if (ctx.info[i].p) { @@ -1104,6 +1357,7 @@ cleanup: free(ctx.pack_perm); free(ctx.pack_order); free(midx_name); + return result; } @@ -1111,8 +1365,7 @@ int write_midx_file(const char *object_dir, const char *preferred_pack_name, unsigned flags) { - return write_midx_internal(object_dir, NULL, NULL, preferred_pack_name, - flags); + return write_midx_internal(object_dir, NULL, preferred_pack_name, flags); } struct clear_midx_data { @@ -1135,7 +1388,7 @@ static void clear_midx_file_ext(const char *full_path, size_t full_path_len, die_errno(_("failed to remove %s"), full_path); } -static void clear_midx_files_ext(struct repository *r, const char *ext, +static void clear_midx_files_ext(const char *object_dir, const char *ext, unsigned char *keep_hash) { struct clear_midx_data data; @@ -1146,7 +1399,7 @@ static void clear_midx_files_ext(struct repository *r, const char *ext, hash_to_hex(keep_hash), ext); data.ext = ext; - for_each_file_in_pack_dir(r->objects->odb->path, + for_each_file_in_pack_dir(object_dir, clear_midx_file_ext, &data); @@ -1165,7 +1418,8 @@ void clear_midx_file(struct repository *r) if (remove_path(midx)) die(_("failed to clear multi-pack-index at %s"), midx); - clear_midx_files_ext(r, ".rev", NULL); + clear_midx_files_ext(r->objects->odb->path, ".bitmap", NULL); + clear_midx_files_ext(r->objects->odb->path, ".rev", NULL); free(midx); } @@ -1390,8 +1644,10 @@ int expire_midx_packs(struct repository *r, const char *object_dir, unsigned fla free(count); - if (packs_to_drop.nr) - result = write_midx_internal(object_dir, m, &packs_to_drop, NULL, flags); + if (packs_to_drop.nr) { + result = write_midx_internal(object_dir, &packs_to_drop, NULL, flags); + m = NULL; + } string_list_clear(&packs_to_drop, 0); return result; @@ -1580,7 +1836,7 @@ int midx_repack(struct repository *r, const char *object_dir, size_t batch_size, goto cleanup; } - result = write_midx_internal(object_dir, m, NULL, NULL, flags); + result = write_midx_internal(object_dir, NULL, NULL, flags); m = NULL; cleanup: @@ -8,6 +8,8 @@ struct pack_entry; struct repository; #define GIT_TEST_MULTI_PACK_INDEX "GIT_TEST_MULTI_PACK_INDEX" +#define GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP \ + "GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP" struct multi_pack_index { struct multi_pack_index *next; @@ -41,7 +43,10 @@ struct multi_pack_index { #define MIDX_PROGRESS (1 << 0) #define MIDX_WRITE_REV_INDEX (1 << 1) +#define MIDX_WRITE_BITMAP (1 << 2) +const unsigned char *get_midx_checksum(struct multi_pack_index *m); +char *get_midx_filename(const char *object_dir); char *get_midx_rev_filename(struct multi_pack_index *m); struct multi_pack_index *load_multi_pack_index(const char *object_dir, int local); diff --git a/notes-merge.c b/notes-merge.c index 46c1f7c7f1..b4a3a903e8 100644 --- a/notes-merge.c +++ b/notes-merge.c @@ -273,7 +273,7 @@ static void check_notes_merge_worktree(struct notes_merge_options *o) */ if (file_exists(git_path(NOTES_MERGE_WORKTREE)) && !is_empty_dir(git_path(NOTES_MERGE_WORKTREE))) { - if (advice_resolve_conflict) + if (advice_enabled(ADVICE_RESOLVE_CONFLICT)) die(_("You have not concluded your previous " "notes merge (%s exists).\nPlease, use " "'git notes merge --commit' or 'git notes " diff --git a/object-file.c b/object-file.c index a8be899481..112d9b4bad 100644 --- a/object-file.c +++ b/object-file.c @@ -32,6 +32,7 @@ #include "packfile.h" #include "object-store.h" #include "promisor-remote.h" +#include "submodule.h" /* The maximum size for an object header. */ #define MAX_HEADER_LEN 32 @@ -414,74 +415,6 @@ enum scld_error safe_create_leading_directories_const(const char *path) return result; } -int raceproof_create_file(const char *path, create_file_fn fn, void *cb) -{ - /* - * The number of times we will try to remove empty directories - * in the way of path. This is only 1 because if another - * process is racily creating directories that conflict with - * us, we don't want to fight against them. - */ - int remove_directories_remaining = 1; - - /* - * The number of times that we will try to create the - * directories containing path. We are willing to attempt this - * more than once, because another process could be trying to - * clean up empty directories at the same time as we are - * trying to create them. - */ - int create_directories_remaining = 3; - - /* A scratch copy of path, filled lazily if we need it: */ - struct strbuf path_copy = STRBUF_INIT; - - int ret, save_errno; - - /* Sanity check: */ - assert(*path); - -retry_fn: - ret = fn(path, cb); - save_errno = errno; - if (!ret) - goto out; - - if (errno == EISDIR && remove_directories_remaining-- > 0) { - /* - * A directory is in the way. Maybe it is empty; try - * to remove it: - */ - if (!path_copy.len) - strbuf_addstr(&path_copy, path); - - if (!remove_dir_recursively(&path_copy, REMOVE_DIR_EMPTY_ONLY)) - goto retry_fn; - } else if (errno == ENOENT && create_directories_remaining-- > 0) { - /* - * Maybe the containing directory didn't exist, or - * maybe it was just deleted by a process that is - * racing with us to clean up empty directories. Try - * to create it: - */ - enum scld_error scld_result; - - if (!path_copy.len) - strbuf_addstr(&path_copy, path); - - do { - scld_result = safe_create_leading_directories(path_copy.buf); - if (scld_result == SCLD_OK) - goto retry_fn; - } while (scld_result == SCLD_VANISHED && create_directories_remaining-- > 0); - } - -out: - strbuf_release(&path_copy); - errno = save_errno; - return ret; -} - static void fill_loose_path(struct strbuf *buf, const struct object_id *oid) { int i; @@ -820,6 +753,27 @@ out: return ref_git; } +struct object_directory *find_odb(struct repository *r, const char *obj_dir) +{ + struct object_directory *odb; + char *obj_dir_real = real_pathdup(obj_dir, 1); + struct strbuf odb_path_real = STRBUF_INIT; + + prepare_alt_odb(r); + for (odb = r->objects->odb; odb; odb = odb->next) { + strbuf_realpath(&odb_path_real, odb->path, 1); + if (!strcmp(obj_dir_real, odb_path_real.buf)) + break; + } + + free(obj_dir_real); + strbuf_release(&odb_path_real); + + if (!odb) + die(_("could not find object directory matching %s"), obj_dir); + return odb; +} + static void fill_alternate_refs_command(struct child_process *cmd, const char *repo_path) { @@ -1592,6 +1546,10 @@ static int do_oid_object_info_extended(struct repository *r, break; } + if (register_all_submodule_odb_as_alternates()) + /* We added some alternates; retry */ + continue; + /* Check if it is a missing object */ if (fetch_if_missing && repo_has_promisor_remote(r) && !already_retried && @@ -1616,7 +1574,7 @@ static int do_oid_object_info_extended(struct repository *r, return 0; rtype = packed_object_info(r, e.p, e.offset, oi); if (rtype < 0) { - mark_bad_packed_object(e.p, real->hash); + mark_bad_packed_object(e.p, real); return do_oid_object_info_extended(r, real, oi, 0); } else if (oi->whence == OI_PACKED) { oi->u.packed.offset = e.offset; @@ -1725,7 +1683,7 @@ void *read_object_file_extended(struct repository *r, die(_("loose object %s (stored in %s) is corrupt"), oid_to_hex(repl), path); - if ((p = has_packed_and_bad(r, repl->hash)) != NULL) + if ((p = has_packed_and_bad(r, repl)) != NULL) die(_("packed object %s (stored in %s) is corrupt"), oid_to_hex(repl), p->pack_name); obj_read_unlock(); diff --git a/object-name.c b/object-name.c index 3263c19457..fdff4601b2 100644 --- a/object-name.c +++ b/object-name.c @@ -806,7 +806,7 @@ static int get_oid_basic(struct repository *r, const char *str, int len, refs_found = repo_dwim_ref(r, str, len, &tmp_oid, &real_ref, 0); if (refs_found > 0) { warning(warn_msg, len, str); - if (advice_object_name_warning) + if (advice_enabled(ADVICE_OBJECT_NAME_WARNING)) fprintf(stderr, "%s\n", _(object_name_msg)); } free(real_ref); diff --git a/object-store.h b/object-store.h index d24915ced1..c5130d8bae 100644 --- a/object-store.h +++ b/object-store.h @@ -10,6 +10,7 @@ #include "khash.h" #include "dir.h" #include "oidtree.h" +#include "oidset.h" struct object_directory { struct object_directory *next; @@ -38,6 +39,7 @@ KHASH_INIT(odb_path_map, const char * /* key: odb_path */, void prepare_alt_odb(struct repository *r); char *compute_alternate_path(const char *path, struct strbuf *err); +struct object_directory *find_odb(struct repository *r, const char *obj_dir); typedef int alt_odb_fn(struct object_directory *, void *); int foreach_alt_odb(alt_odb_fn, void*); typedef void alternate_ref_fn(const struct object_id *oid, void *); @@ -75,9 +77,8 @@ struct packed_git { const void *index_data; size_t index_size; uint32_t num_objects; - uint32_t num_bad_objects; uint32_t crc_offset; - unsigned char *bad_object_sha1; + struct oidset bad_objects; int index_version; time_t mtime; int pack_fd; @@ -455,6 +456,12 @@ enum for_each_object_flags { * Visit objects within a pack in packfile order rather than .idx order */ FOR_EACH_OBJECT_PACK_ORDER = (1<<2), + + /* Only iterate over packs that are not marked as kept in-core. */ + FOR_EACH_OBJECT_SKIP_IN_CORE_KEPT_PACKS = (1<<3), + + /* Only iterate over packs that do not have .keep files. */ + FOR_EACH_OBJECT_SKIP_ON_DISK_KEPT_PACKS = (1<<4), }; /* @@ -75,7 +75,6 @@ struct object_array { * builtin/fsck.c: 0--3 * builtin/gc.c: 0 * builtin/index-pack.c: 2021 - * builtin/pack-objects.c: 20 * builtin/reflog.c: 10--12 * builtin/show-branch.c: 0-------------------------------------------26 * builtin/unpack-objects.c: 2021 @@ -36,11 +36,6 @@ void oidset_clear(struct oidset *set) oidset_init(set, 0); } -int oidset_size(struct oidset *set) -{ - return kh_size(&set->set); -} - void oidset_parse_file(struct oidset *set, const char *path) { oidset_parse_file_carefully(set, path, NULL, NULL); @@ -57,7 +57,10 @@ int oidset_remove(struct oidset *set, const struct object_id *oid); /** * Returns the number of oids in the set. */ -int oidset_size(struct oidset *set); +static inline int oidset_size(const struct oidset *set) +{ + return kh_size(&set->set); +} /** * Remove all entries from the oidset, freeing any resources associated with diff --git a/pack-bitmap-write.c b/pack-bitmap-write.c index 88d9e696a5..9c55c1531e 100644 --- a/pack-bitmap-write.c +++ b/pack-bitmap-write.c @@ -48,7 +48,7 @@ void bitmap_writer_show_progress(int show) } /** - * Build the initial type index for the packfile + * Build the initial type index for the packfile or multi-pack-index */ void bitmap_writer_build_type_index(struct packing_data *to_pack, struct pack_idx_entry **index, @@ -125,15 +125,20 @@ static inline void push_bitmapped_commit(struct commit *commit) writer.selected_nr++; } -static uint32_t find_object_pos(const struct object_id *oid) +static uint32_t find_object_pos(const struct object_id *oid, int *found) { struct object_entry *entry = packlist_find(writer.to_pack, oid); if (!entry) { - die("Failed to write bitmap index. Packfile doesn't have full closure " + if (found) + *found = 0; + warning("Failed to write bitmap index. Packfile doesn't have full closure " "(object %s is missing)", oid_to_hex(oid)); + return 0; } + if (found) + *found = 1; return oe_in_pack_pos(writer.to_pack, entry); } @@ -331,9 +336,10 @@ static void bitmap_builder_clear(struct bitmap_builder *bb) bb->commits_nr = bb->commits_alloc = 0; } -static void fill_bitmap_tree(struct bitmap *bitmap, - struct tree *tree) +static int fill_bitmap_tree(struct bitmap *bitmap, + struct tree *tree) { + int found; uint32_t pos; struct tree_desc desc; struct name_entry entry; @@ -342,9 +348,11 @@ static void fill_bitmap_tree(struct bitmap *bitmap, * If our bit is already set, then there is nothing to do. Both this * tree and all of its children will be set. */ - pos = find_object_pos(&tree->object.oid); + pos = find_object_pos(&tree->object.oid, &found); + if (!found) + return -1; if (bitmap_get(bitmap, pos)) - return; + return 0; bitmap_set(bitmap, pos); if (parse_tree(tree) < 0) @@ -355,11 +363,15 @@ static void fill_bitmap_tree(struct bitmap *bitmap, while (tree_entry(&desc, &entry)) { switch (object_type(entry.mode)) { case OBJ_TREE: - fill_bitmap_tree(bitmap, - lookup_tree(the_repository, &entry.oid)); + if (fill_bitmap_tree(bitmap, + lookup_tree(the_repository, &entry.oid)) < 0) + return -1; break; case OBJ_BLOB: - bitmap_set(bitmap, find_object_pos(&entry.oid)); + pos = find_object_pos(&entry.oid, &found); + if (!found) + return -1; + bitmap_set(bitmap, pos); break; default: /* Gitlink, etc; not reachable */ @@ -368,15 +380,18 @@ static void fill_bitmap_tree(struct bitmap *bitmap, } free_tree_buffer(tree); + return 0; } -static void fill_bitmap_commit(struct bb_commit *ent, - struct commit *commit, - struct prio_queue *queue, - struct prio_queue *tree_queue, - struct bitmap_index *old_bitmap, - const uint32_t *mapping) +static int fill_bitmap_commit(struct bb_commit *ent, + struct commit *commit, + struct prio_queue *queue, + struct prio_queue *tree_queue, + struct bitmap_index *old_bitmap, + const uint32_t *mapping) { + int found; + uint32_t pos; if (!ent->bitmap) ent->bitmap = bitmap_new(); @@ -401,11 +416,16 @@ static void fill_bitmap_commit(struct bb_commit *ent, * Mark ourselves and queue our tree. The commit * walk ensures we cover all parents. */ - bitmap_set(ent->bitmap, find_object_pos(&c->object.oid)); + pos = find_object_pos(&c->object.oid, &found); + if (!found) + return -1; + bitmap_set(ent->bitmap, pos); prio_queue_put(tree_queue, get_commit_tree(c)); for (p = c->parents; p; p = p->next) { - int pos = find_object_pos(&p->item->object.oid); + pos = find_object_pos(&p->item->object.oid, &found); + if (!found) + return -1; if (!bitmap_get(ent->bitmap, pos)) { bitmap_set(ent->bitmap, pos); prio_queue_put(queue, p->item); @@ -413,8 +433,12 @@ static void fill_bitmap_commit(struct bb_commit *ent, } } - while (tree_queue->nr) - fill_bitmap_tree(ent->bitmap, prio_queue_get(tree_queue)); + while (tree_queue->nr) { + if (fill_bitmap_tree(ent->bitmap, + prio_queue_get(tree_queue)) < 0) + return -1; + } + return 0; } static void store_selected(struct bb_commit *ent, struct commit *commit) @@ -432,7 +456,7 @@ static void store_selected(struct bb_commit *ent, struct commit *commit) kh_value(writer.bitmaps, hash_pos) = stored; } -void bitmap_writer_build(struct packing_data *to_pack) +int bitmap_writer_build(struct packing_data *to_pack) { struct bitmap_builder bb; size_t i; @@ -441,6 +465,7 @@ void bitmap_writer_build(struct packing_data *to_pack) struct prio_queue tree_queue = { NULL }; struct bitmap_index *old_bitmap; uint32_t *mapping; + int closed = 1; /* until proven otherwise */ writer.bitmaps = kh_init_oid_map(); writer.to_pack = to_pack; @@ -463,8 +488,11 @@ void bitmap_writer_build(struct packing_data *to_pack) struct commit *child; int reused = 0; - fill_bitmap_commit(ent, commit, &queue, &tree_queue, - old_bitmap, mapping); + if (fill_bitmap_commit(ent, commit, &queue, &tree_queue, + old_bitmap, mapping) < 0) { + closed = 0; + break; + } if (ent->selected) { store_selected(ent, commit); @@ -492,6 +520,7 @@ void bitmap_writer_build(struct packing_data *to_pack) clear_prio_queue(&queue); clear_prio_queue(&tree_queue); bitmap_builder_clear(&bb); + free_bitmap_index(old_bitmap); free(mapping); trace2_region_leave("pack-bitmap-write", "building_bitmaps_total", @@ -499,7 +528,9 @@ void bitmap_writer_build(struct packing_data *to_pack) stop_progress(&writer.progress); - compute_xor_offsets(); + if (closed) + compute_xor_offsets(); + return closed ? 0 : -1; } /** diff --git a/pack-bitmap.c b/pack-bitmap.c index d999616c9e..8504110a4d 100644 --- a/pack-bitmap.c +++ b/pack-bitmap.c @@ -13,6 +13,7 @@ #include "repository.h" #include "object-store.h" #include "list-objects-filter-options.h" +#include "midx.h" #include "config.h" /* @@ -35,8 +36,15 @@ struct stored_bitmap { * the active bitmap index is the largest one. */ struct bitmap_index { - /* Packfile to which this bitmap index belongs to */ + /* + * The pack or multi-pack index (MIDX) that this bitmap index belongs + * to. + * + * Exactly one of these must be non-NULL; this specifies the object + * order used to interpret this bitmap. + */ struct packed_git *pack; + struct multi_pack_index *midx; /* * Mark the first `reuse_objects` in the packfile as reused: @@ -71,6 +79,9 @@ struct bitmap_index { /* If not NULL, this is a name-hash cache pointing into map. */ uint32_t *hashes; + /* The checksum of the packfile or MIDX; points into map. */ + const unsigned char *checksum; + /* * Extended index. * @@ -136,6 +147,13 @@ static struct ewah_bitmap *read_bitmap_1(struct bitmap_index *index) return b; } +static uint32_t bitmap_num_objects(struct bitmap_index *index) +{ + if (index->midx) + return index->midx->num_objects; + return index->pack->num_objects; +} + static int load_bitmap_header(struct bitmap_index *index) { struct bitmap_disk_header *header = (void *)index->map; @@ -154,7 +172,7 @@ static int load_bitmap_header(struct bitmap_index *index) /* Parse known bitmap format options */ { uint32_t flags = ntohs(header->options); - size_t cache_size = st_mult(index->pack->num_objects, sizeof(uint32_t)); + size_t cache_size = st_mult(bitmap_num_objects(index), sizeof(uint32_t)); unsigned char *index_end = index->map + index->map_size - the_hash_algo->rawsz; if ((flags & BITMAP_OPT_FULL_DAG) == 0) @@ -170,6 +188,7 @@ static int load_bitmap_header(struct bitmap_index *index) } index->entry_count = ntohl(header->entry_count); + index->checksum = header->checksum; index->map_pos += header_size; return 0; } @@ -218,6 +237,15 @@ static inline uint8_t read_u8(const unsigned char *buffer, size_t *pos) #define MAX_XOR_OFFSET 160 +static int nth_bitmap_object_oid(struct bitmap_index *index, + struct object_id *oid, + uint32_t n) +{ + if (index->midx) + return nth_midxed_object_oid(oid, index->midx, n) ? 0 : -1; + return nth_packed_object_id(oid, index->pack, n); +} + static int load_bitmap_entries_v1(struct bitmap_index *index) { uint32_t i; @@ -237,7 +265,7 @@ static int load_bitmap_entries_v1(struct bitmap_index *index) xor_offset = read_u8(index->map, &index->map_pos); flags = read_u8(index->map, &index->map_pos); - if (nth_packed_object_id(&oid, index->pack, commit_idx_pos) < 0) + if (nth_bitmap_object_oid(index, &oid, commit_idx_pos) < 0) return error("corrupt ewah bitmap: commit index %u out of range", (unsigned)commit_idx_pos); @@ -262,7 +290,14 @@ static int load_bitmap_entries_v1(struct bitmap_index *index) return 0; } -static char *pack_bitmap_filename(struct packed_git *p) +char *midx_bitmap_filename(struct multi_pack_index *midx) +{ + return xstrfmt("%s-%s.bitmap", + get_midx_filename(midx->object_dir), + hash_to_hex(get_midx_checksum(midx))); +} + +char *pack_bitmap_filename(struct packed_git *p) { size_t len; @@ -271,6 +306,57 @@ static char *pack_bitmap_filename(struct packed_git *p) return xstrfmt("%.*s.bitmap", (int)len, p->pack_name); } +static int open_midx_bitmap_1(struct bitmap_index *bitmap_git, + struct multi_pack_index *midx) +{ + struct stat st; + char *idx_name = midx_bitmap_filename(midx); + int fd = git_open(idx_name); + + free(idx_name); + + if (fd < 0) + return -1; + + if (fstat(fd, &st)) { + close(fd); + return -1; + } + + if (bitmap_git->pack || bitmap_git->midx) { + /* ignore extra bitmap file; we can only handle one */ + warning("ignoring extra bitmap file: %s", + get_midx_filename(midx->object_dir)); + close(fd); + return -1; + } + + bitmap_git->midx = midx; + bitmap_git->map_size = xsize_t(st.st_size); + bitmap_git->map_pos = 0; + bitmap_git->map = xmmap(NULL, bitmap_git->map_size, PROT_READ, + MAP_PRIVATE, fd, 0); + close(fd); + + if (load_bitmap_header(bitmap_git) < 0) + goto cleanup; + + if (!hasheq(get_midx_checksum(bitmap_git->midx), bitmap_git->checksum)) + goto cleanup; + + if (load_midx_revindex(bitmap_git->midx) < 0) { + warning(_("multi-pack bitmap is missing required reverse index")); + goto cleanup; + } + return 0; + +cleanup: + munmap(bitmap_git->map, bitmap_git->map_size); + bitmap_git->map_size = 0; + bitmap_git->map = NULL; + return -1; +} + static int open_pack_bitmap_1(struct bitmap_index *bitmap_git, struct packed_git *packfile) { int fd; @@ -292,7 +378,8 @@ static int open_pack_bitmap_1(struct bitmap_index *bitmap_git, struct packed_git return -1; } - if (bitmap_git->pack) { + if (bitmap_git->pack || bitmap_git->midx) { + /* ignore extra bitmap file; we can only handle one */ warning("ignoring extra bitmap file: %s", packfile->pack_name); close(fd); return -1; @@ -319,13 +406,39 @@ static int open_pack_bitmap_1(struct bitmap_index *bitmap_git, struct packed_git return 0; } -static int load_pack_bitmap(struct bitmap_index *bitmap_git) +static int load_reverse_index(struct bitmap_index *bitmap_git) +{ + if (bitmap_is_midx(bitmap_git)) { + uint32_t i; + int ret; + + /* + * The multi-pack-index's .rev file is already loaded via + * open_pack_bitmap_1(). + * + * But we still need to open the individual pack .rev files, + * since we will need to make use of them in pack-objects. + */ + for (i = 0; i < bitmap_git->midx->num_packs; i++) { + if (prepare_midx_pack(the_repository, bitmap_git->midx, i)) + die(_("load_reverse_index: could not open pack")); + ret = load_pack_revindex(bitmap_git->midx->packs[i]); + if (ret) + return ret; + } + return 0; + } + return load_pack_revindex(bitmap_git->pack); +} + +static int load_bitmap(struct bitmap_index *bitmap_git) { assert(bitmap_git->map); bitmap_git->bitmaps = kh_init_oid_map(); bitmap_git->ext_index.positions = kh_init_oid_pos(); - if (load_pack_revindex(bitmap_git->pack)) + + if (load_reverse_index(bitmap_git)) goto failed; if (!(bitmap_git->commits = read_bitmap_1(bitmap_git)) || @@ -369,11 +482,46 @@ static int open_pack_bitmap(struct repository *r, return ret; } +static int open_midx_bitmap(struct repository *r, + struct bitmap_index *bitmap_git) +{ + struct multi_pack_index *midx; + + assert(!bitmap_git->map); + + for (midx = get_multi_pack_index(r); midx; midx = midx->next) { + if (!open_midx_bitmap_1(bitmap_git, midx)) + return 0; + } + return -1; +} + +static int open_bitmap(struct repository *r, + struct bitmap_index *bitmap_git) +{ + assert(!bitmap_git->map); + + if (!open_midx_bitmap(r, bitmap_git)) + return 0; + return open_pack_bitmap(r, bitmap_git); +} + struct bitmap_index *prepare_bitmap_git(struct repository *r) { struct bitmap_index *bitmap_git = xcalloc(1, sizeof(*bitmap_git)); - if (!open_pack_bitmap(r, bitmap_git) && !load_pack_bitmap(bitmap_git)) + if (!open_bitmap(r, bitmap_git) && !load_bitmap(bitmap_git)) + return bitmap_git; + + free_bitmap_index(bitmap_git); + return NULL; +} + +struct bitmap_index *prepare_midx_bitmap_git(struct multi_pack_index *midx) +{ + struct bitmap_index *bitmap_git = xcalloc(1, sizeof(*bitmap_git)); + + if (!open_midx_bitmap_1(bitmap_git, midx) && !load_bitmap(bitmap_git)) return bitmap_git; free_bitmap_index(bitmap_git); @@ -404,7 +552,7 @@ static inline int bitmap_position_extended(struct bitmap_index *bitmap_git, if (pos < kh_end(positions)) { int bitmap_pos = kh_value(positions, pos); - return bitmap_pos + bitmap_git->pack->num_objects; + return bitmap_pos + bitmap_num_objects(bitmap_git); } return -1; @@ -423,10 +571,26 @@ static inline int bitmap_position_packfile(struct bitmap_index *bitmap_git, return pos; } +static int bitmap_position_midx(struct bitmap_index *bitmap_git, + const struct object_id *oid) +{ + uint32_t want, got; + if (!bsearch_midx(oid, bitmap_git->midx, &want)) + return -1; + + if (midx_to_pack_pos(bitmap_git->midx, want, &got) < 0) + return -1; + return got; +} + static int bitmap_position(struct bitmap_index *bitmap_git, const struct object_id *oid) { - int pos = bitmap_position_packfile(bitmap_git, oid); + int pos; + if (bitmap_is_midx(bitmap_git)) + pos = bitmap_position_midx(bitmap_git, oid); + else + pos = bitmap_position_packfile(bitmap_git, oid); return (pos >= 0) ? pos : bitmap_position_extended(bitmap_git, oid); } @@ -456,7 +620,7 @@ static int ext_index_add_object(struct bitmap_index *bitmap_git, bitmap_pos = kh_value(eindex->positions, hash_pos); } - return bitmap_pos + bitmap_git->pack->num_objects; + return bitmap_pos + bitmap_num_objects(bitmap_git); } struct bitmap_show_data { @@ -673,7 +837,7 @@ static void show_extended_objects(struct bitmap_index *bitmap_git, for (i = 0; i < eindex->count; ++i) { struct object *obj; - if (!bitmap_get(objects, bitmap_git->pack->num_objects + i)) + if (!bitmap_get(objects, bitmap_num_objects(bitmap_git) + i)) continue; obj = eindex->objects[i]; @@ -737,6 +901,7 @@ static void show_objects_for_type( continue; for (offset = 0; offset < BITS_IN_EWORD; ++offset) { + struct packed_git *pack; struct object_id oid; uint32_t hash = 0, index_pos; off_t ofs; @@ -746,14 +911,28 @@ static void show_objects_for_type( offset += ewah_bit_ctz64(word >> offset); - index_pos = pack_pos_to_index(bitmap_git->pack, pos + offset); - ofs = pack_pos_to_offset(bitmap_git->pack, pos + offset); - nth_packed_object_id(&oid, bitmap_git->pack, index_pos); + if (bitmap_is_midx(bitmap_git)) { + struct multi_pack_index *m = bitmap_git->midx; + uint32_t pack_id; + + index_pos = pack_pos_to_midx(m, pos + offset); + ofs = nth_midxed_offset(m, index_pos); + nth_midxed_object_oid(&oid, m, index_pos); + + pack_id = nth_midxed_pack_int_id(m, index_pos); + pack = bitmap_git->midx->packs[pack_id]; + } else { + index_pos = pack_pos_to_index(bitmap_git->pack, pos + offset); + ofs = pack_pos_to_offset(bitmap_git->pack, pos + offset); + nth_bitmap_object_oid(bitmap_git, &oid, index_pos); + + pack = bitmap_git->pack; + } if (bitmap_git->hashes) hash = get_be32(bitmap_git->hashes + index_pos); - show_reach(&oid, object_type, 0, hash, bitmap_git->pack, ofs); + show_reach(&oid, object_type, 0, hash, pack, ofs); } } } @@ -765,8 +944,13 @@ static int in_bitmapped_pack(struct bitmap_index *bitmap_git, struct object *object = roots->item; roots = roots->next; - if (find_pack_entry_one(object->oid.hash, bitmap_git->pack) > 0) - return 1; + if (bitmap_is_midx(bitmap_git)) { + if (bsearch_midx(&object->oid, bitmap_git->midx, NULL)) + return 1; + } else { + if (find_pack_entry_one(object->oid.hash, bitmap_git->pack) > 0) + return 1; + } } return 0; @@ -832,7 +1016,7 @@ static void filter_bitmap_exclude_type(struct bitmap_index *bitmap_git, * them individually. */ for (i = 0; i < eindex->count; i++) { - uint32_t pos = i + bitmap_git->pack->num_objects; + uint32_t pos = i + bitmap_num_objects(bitmap_git); if (eindex->objects[i]->type == type && bitmap_get(to_filter, pos) && !bitmap_get(tips, pos)) @@ -853,23 +1037,35 @@ static void filter_bitmap_blob_none(struct bitmap_index *bitmap_git, static unsigned long get_size_by_pos(struct bitmap_index *bitmap_git, uint32_t pos) { - struct packed_git *pack = bitmap_git->pack; unsigned long size; struct object_info oi = OBJECT_INFO_INIT; oi.sizep = &size; - if (pos < pack->num_objects) { - off_t ofs = pack_pos_to_offset(pack, pos); + if (pos < bitmap_num_objects(bitmap_git)) { + struct packed_git *pack; + off_t ofs; + + if (bitmap_is_midx(bitmap_git)) { + uint32_t midx_pos = pack_pos_to_midx(bitmap_git->midx, pos); + uint32_t pack_id = nth_midxed_pack_int_id(bitmap_git->midx, midx_pos); + + pack = bitmap_git->midx->packs[pack_id]; + ofs = nth_midxed_offset(bitmap_git->midx, midx_pos); + } else { + pack = bitmap_git->pack; + ofs = pack_pos_to_offset(pack, pos); + } + if (packed_object_info(the_repository, pack, ofs, &oi) < 0) { struct object_id oid; - nth_packed_object_id(&oid, pack, - pack_pos_to_index(pack, pos)); + nth_bitmap_object_oid(bitmap_git, &oid, + pack_pos_to_index(pack, pos)); die(_("unable to get size of %s"), oid_to_hex(&oid)); } } else { struct eindex *eindex = &bitmap_git->ext_index; - struct object *obj = eindex->objects[pos - pack->num_objects]; + struct object *obj = eindex->objects[pos - bitmap_num_objects(bitmap_git)]; if (oid_object_info_extended(the_repository, &obj->oid, &oi, 0) < 0) die(_("unable to get size of %s"), oid_to_hex(&obj->oid)); } @@ -911,7 +1107,7 @@ static void filter_bitmap_blob_limit(struct bitmap_index *bitmap_git, } for (i = 0; i < eindex->count; i++) { - uint32_t pos = i + bitmap_git->pack->num_objects; + uint32_t pos = i + bitmap_num_objects(bitmap_git); if (eindex->objects[i]->type == OBJ_BLOB && bitmap_get(to_filter, pos) && !bitmap_get(tips, pos) && @@ -1041,7 +1237,7 @@ struct bitmap_index *prepare_bitmap_walk(struct rev_info *revs, /* try to open a bitmapped pack, but don't parse it yet * because we may not need to use it */ CALLOC_ARRAY(bitmap_git, 1); - if (open_pack_bitmap(revs->repo, bitmap_git) < 0) + if (open_bitmap(revs->repo, bitmap_git) < 0) goto cleanup; for (i = 0; i < revs->pending.nr; ++i) { @@ -1085,7 +1281,7 @@ struct bitmap_index *prepare_bitmap_walk(struct rev_info *revs, * from disk. this is the point of no return; after this the rev_list * becomes invalidated and we must perform the revwalk through bitmaps */ - if (load_pack_bitmap(bitmap_git) < 0) + if (load_bitmap(bitmap_git) < 0) goto cleanup; object_array_clear(&revs->pending); @@ -1128,22 +1324,49 @@ cleanup: return NULL; } -static void try_partial_reuse(struct bitmap_index *bitmap_git, - size_t pos, - struct bitmap *reuse, - struct pack_window **w_curs) +/* + * -1 means "stop trying further objects"; 0 means we may or may not have + * reused, but you can keep feeding bits. + */ +static int try_partial_reuse(struct packed_git *pack, + size_t pos, + struct bitmap *reuse, + struct pack_window **w_curs) { - off_t offset, header; + off_t offset, delta_obj_offset; enum object_type type; unsigned long size; - if (pos >= bitmap_git->pack->num_objects) - return; /* not actually in the pack */ + /* + * try_partial_reuse() is called either on (a) objects in the + * bitmapped pack (in the case of a single-pack bitmap) or (b) + * objects in the preferred pack of a multi-pack bitmap. + * Importantly, the latter can pretend as if only a single pack + * exists because: + * + * - The first pack->num_objects bits of a MIDX bitmap are + * reserved for the preferred pack, and + * + * - Ties due to duplicate objects are always resolved in + * favor of the preferred pack. + * + * Therefore we do not need to ever ask the MIDX for its copy of + * an object by OID, since it will always select it from the + * preferred pack. Likewise, the selected copy of the base + * object for any deltas will reside in the same pack. + * + * This means that we can reuse pos when looking up the bit in + * the reuse bitmap, too, since bits corresponding to the + * preferred pack precede all bits from other packs. + */ + + if (pos >= pack->num_objects) + return -1; /* not actually in the pack or MIDX preferred pack */ - offset = header = pack_pos_to_offset(bitmap_git->pack, pos); - type = unpack_object_header(bitmap_git->pack, w_curs, &offset, &size); + offset = delta_obj_offset = pack_pos_to_offset(pack, pos); + type = unpack_object_header(pack, w_curs, &offset, &size); if (type < 0) - return; /* broken packfile, punt */ + return -1; /* broken packfile, punt */ if (type == OBJ_REF_DELTA || type == OBJ_OFS_DELTA) { off_t base_offset; @@ -1157,12 +1380,12 @@ static void try_partial_reuse(struct bitmap_index *bitmap_git, * and the normal slow path will complain about it in * more detail. */ - base_offset = get_delta_base(bitmap_git->pack, w_curs, - &offset, type, header); + base_offset = get_delta_base(pack, w_curs, &offset, type, + delta_obj_offset); if (!base_offset) - return; - if (offset_to_pack_pos(bitmap_git->pack, base_offset, &base_pos) < 0) - return; + return 0; + if (offset_to_pack_pos(pack, base_offset, &base_pos) < 0) + return 0; /* * We assume delta dependencies always point backwards. This @@ -1174,7 +1397,7 @@ static void try_partial_reuse(struct bitmap_index *bitmap_git, * odd parameters. */ if (base_pos >= pos) - return; + return 0; /* * And finally, if we're not sending the base as part of our @@ -1185,13 +1408,22 @@ static void try_partial_reuse(struct bitmap_index *bitmap_git, * object_entry code path handle it. */ if (!bitmap_get(reuse, base_pos)) - return; + return 0; } /* * If we got here, then the object is OK to reuse. Mark it. */ bitmap_set(reuse, pos); + return 0; +} + +static uint32_t midx_preferred_pack(struct bitmap_index *bitmap_git) +{ + struct multi_pack_index *m = bitmap_git->midx; + if (!m) + BUG("midx_preferred_pack: requires non-empty MIDX"); + return nth_midxed_pack_int_id(m, pack_pos_to_midx(bitmap_git->midx, 0)); } int reuse_partial_packfile_from_bitmap(struct bitmap_index *bitmap_git, @@ -1199,20 +1431,37 @@ int reuse_partial_packfile_from_bitmap(struct bitmap_index *bitmap_git, uint32_t *entries, struct bitmap **reuse_out) { + struct packed_git *pack; struct bitmap *result = bitmap_git->result; struct bitmap *reuse; struct pack_window *w_curs = NULL; size_t i = 0; uint32_t offset; + uint32_t objects_nr; assert(result); + load_reverse_index(bitmap_git); + + if (bitmap_is_midx(bitmap_git)) + pack = bitmap_git->midx->packs[midx_preferred_pack(bitmap_git)]; + else + pack = bitmap_git->pack; + objects_nr = pack->num_objects; + while (i < result->word_alloc && result->words[i] == (eword_t)~0) i++; - /* Don't mark objects not in the packfile */ - if (i > bitmap_git->pack->num_objects / BITS_IN_EWORD) - i = bitmap_git->pack->num_objects / BITS_IN_EWORD; + /* + * Don't mark objects not in the packfile or preferred pack. This bitmap + * marks objects eligible for reuse, but the pack-reuse code only + * understands how to reuse a single pack. Since the preferred pack is + * guaranteed to have all bases for its deltas (in a multi-pack bitmap), + * we use it instead of another pack. In single-pack bitmaps, the choice + * is made for us. + */ + if (i > objects_nr / BITS_IN_EWORD) + i = objects_nr / BITS_IN_EWORD; reuse = bitmap_word_alloc(i); memset(reuse->words, 0xFF, i * sizeof(eword_t)); @@ -1226,10 +1475,23 @@ int reuse_partial_packfile_from_bitmap(struct bitmap_index *bitmap_git, break; offset += ewah_bit_ctz64(word >> offset); - try_partial_reuse(bitmap_git, pos + offset, reuse, &w_curs); + if (try_partial_reuse(pack, pos + offset, + reuse, &w_curs) < 0) { + /* + * try_partial_reuse indicated we couldn't reuse + * any bits, so there is no point in trying more + * bits in the current word, or any other words + * in result. + * + * Jump out of both loops to avoid future + * unnecessary calls to try_partial_reuse. + */ + goto done; + } } } +done: unuse_pack(&w_curs); *entries = bitmap_popcount(reuse); @@ -1243,7 +1505,7 @@ int reuse_partial_packfile_from_bitmap(struct bitmap_index *bitmap_git, * need to be handled separately. */ bitmap_and_not(result, reuse); - *packfile_out = bitmap_git->pack; + *packfile_out = pack; *reuse_out = reuse; return 0; } @@ -1296,7 +1558,7 @@ static uint32_t count_object_type(struct bitmap_index *bitmap_git, for (i = 0; i < eindex->count; ++i) { if (eindex->objects[i]->type == type && - bitmap_get(objects, bitmap_git->pack->num_objects + i)) + bitmap_get(objects, bitmap_num_objects(bitmap_git) + i)) count++; } @@ -1325,10 +1587,52 @@ void count_bitmap_commit_list(struct bitmap_index *bitmap_git, struct bitmap_test_data { struct bitmap_index *bitmap_git; struct bitmap *base; + struct bitmap *commits; + struct bitmap *trees; + struct bitmap *blobs; + struct bitmap *tags; struct progress *prg; size_t seen; }; +static void test_bitmap_type(struct bitmap_test_data *tdata, + struct object *obj, int pos) +{ + enum object_type bitmap_type = OBJ_NONE; + int bitmaps_nr = 0; + + if (bitmap_get(tdata->commits, pos)) { + bitmap_type = OBJ_COMMIT; + bitmaps_nr++; + } + if (bitmap_get(tdata->trees, pos)) { + bitmap_type = OBJ_TREE; + bitmaps_nr++; + } + if (bitmap_get(tdata->blobs, pos)) { + bitmap_type = OBJ_BLOB; + bitmaps_nr++; + } + if (bitmap_get(tdata->tags, pos)) { + bitmap_type = OBJ_TAG; + bitmaps_nr++; + } + + if (bitmap_type == OBJ_NONE) + die("object %s not found in type bitmaps", + oid_to_hex(&obj->oid)); + + if (bitmaps_nr > 1) + die("object %s does not have a unique type", + oid_to_hex(&obj->oid)); + + if (bitmap_type != obj->type) + die("object %s: real type %s, expected: %s", + oid_to_hex(&obj->oid), + type_name(obj->type), + type_name(bitmap_type)); +} + static void test_show_object(struct object *object, const char *name, void *data) { @@ -1338,6 +1642,7 @@ static void test_show_object(struct object *object, const char *name, bitmap_pos = bitmap_position(tdata->bitmap_git, &object->oid); if (bitmap_pos < 0) die("Object not in bitmap: %s\n", oid_to_hex(&object->oid)); + test_bitmap_type(tdata, object, bitmap_pos); bitmap_set(tdata->base, bitmap_pos); display_progress(tdata->prg, ++tdata->seen); @@ -1352,6 +1657,7 @@ static void test_show_commit(struct commit *commit, void *data) &commit->object.oid); if (bitmap_pos < 0) die("Object not in bitmap: %s\n", oid_to_hex(&commit->object.oid)); + test_bitmap_type(tdata, &commit->object, bitmap_pos); bitmap_set(tdata->base, bitmap_pos); display_progress(tdata->prg, ++tdata->seen); @@ -1399,6 +1705,10 @@ void test_bitmap_walk(struct rev_info *revs) tdata.bitmap_git = bitmap_git; tdata.base = bitmap_new(); + tdata.commits = ewah_to_bitmap(bitmap_git->commits); + tdata.trees = ewah_to_bitmap(bitmap_git->trees); + tdata.blobs = ewah_to_bitmap(bitmap_git->blobs); + tdata.tags = ewah_to_bitmap(bitmap_git->tags); tdata.prg = start_progress("Verifying bitmap entries", result_popcnt); tdata.seen = 0; @@ -1469,15 +1779,26 @@ uint32_t *create_bitmap_mapping(struct bitmap_index *bitmap_git, uint32_t i, num_objects; uint32_t *reposition; - num_objects = bitmap_git->pack->num_objects; + if (!bitmap_is_midx(bitmap_git)) + load_reverse_index(bitmap_git); + else if (load_midx_revindex(bitmap_git->midx) < 0) + BUG("rebuild_existing_bitmaps: missing required rev-cache " + "extension"); + + num_objects = bitmap_num_objects(bitmap_git); CALLOC_ARRAY(reposition, num_objects); for (i = 0; i < num_objects; ++i) { struct object_id oid; struct object_entry *oe; - nth_packed_object_id(&oid, bitmap_git->pack, - pack_pos_to_index(bitmap_git->pack, i)); + if (bitmap_is_midx(bitmap_git)) + nth_midxed_object_oid(&oid, + bitmap_git->midx, + pack_pos_to_midx(bitmap_git->midx, i)); + else + nth_packed_object_id(&oid, bitmap_git->pack, + pack_pos_to_index(bitmap_git->pack, i)); oe = packlist_find(mapping, &oid); if (oe) @@ -1503,6 +1824,19 @@ void free_bitmap_index(struct bitmap_index *b) free(b->ext_index.hashes); bitmap_free(b->result); bitmap_free(b->haves); + if (bitmap_is_midx(b)) { + /* + * Multi-pack bitmaps need to have resources associated with + * their on-disk reverse indexes unmapped so that stale .rev and + * .bitmap files can be removed. + * + * Unlike pack-based bitmaps, multi-pack bitmaps can be read and + * written in the same 'git multi-pack-index write --bitmap' + * process. Close resources so they can be removed safely on + * platforms like Windows. + */ + close_midx_revindex(b->midx); + } free(b); } @@ -1517,7 +1851,6 @@ static off_t get_disk_usage_for_type(struct bitmap_index *bitmap_git, enum object_type object_type) { struct bitmap *result = bitmap_git->result; - struct packed_git *pack = bitmap_git->pack; off_t total = 0; struct ewah_iterator it; eword_t filter; @@ -1534,15 +1867,35 @@ static off_t get_disk_usage_for_type(struct bitmap_index *bitmap_git, continue; for (offset = 0; offset < BITS_IN_EWORD; offset++) { - size_t pos; - if ((word >> offset) == 0) break; offset += ewah_bit_ctz64(word >> offset); - pos = base + offset; - total += pack_pos_to_offset(pack, pos + 1) - - pack_pos_to_offset(pack, pos); + + if (bitmap_is_midx(bitmap_git)) { + uint32_t pack_pos; + uint32_t midx_pos = pack_pos_to_midx(bitmap_git->midx, base + offset); + off_t offset = nth_midxed_offset(bitmap_git->midx, midx_pos); + + uint32_t pack_id = nth_midxed_pack_int_id(bitmap_git->midx, midx_pos); + struct packed_git *pack = bitmap_git->midx->packs[pack_id]; + + if (offset_to_pack_pos(pack, offset, &pack_pos) < 0) { + struct object_id oid; + nth_midxed_object_oid(&oid, bitmap_git->midx, midx_pos); + + die(_("could not find %s in pack %s at offset %"PRIuMAX), + oid_to_hex(&oid), + pack->pack_name, + (uintmax_t)offset); + } + + total += pack_pos_to_offset(pack, pack_pos + 1) - offset; + } else { + size_t pos = base + offset; + total += pack_pos_to_offset(bitmap_git->pack, pos + 1) - + pack_pos_to_offset(bitmap_git->pack, pos); + } } } @@ -1552,7 +1905,6 @@ static off_t get_disk_usage_for_type(struct bitmap_index *bitmap_git, static off_t get_disk_usage_for_extended(struct bitmap_index *bitmap_git) { struct bitmap *result = bitmap_git->result; - struct packed_git *pack = bitmap_git->pack; struct eindex *eindex = &bitmap_git->ext_index; off_t total = 0; struct object_info oi = OBJECT_INFO_INIT; @@ -1564,7 +1916,7 @@ static off_t get_disk_usage_for_extended(struct bitmap_index *bitmap_git) for (i = 0; i < eindex->count; i++) { struct object *obj = eindex->objects[i]; - if (!bitmap_get(result, pack->num_objects + i)) + if (!bitmap_get(result, bitmap_num_objects(bitmap_git) + i)) continue; if (oid_object_info_extended(the_repository, &obj->oid, &oi, 0) < 0) @@ -1594,7 +1946,28 @@ off_t get_disk_usage_from_bitmap(struct bitmap_index *bitmap_git, return total; } +int bitmap_is_midx(struct bitmap_index *bitmap_git) +{ + return !!bitmap_git->midx; +} + const struct string_list *bitmap_preferred_tips(struct repository *r) { return repo_config_get_value_multi(r, "pack.preferbitmaptips"); } + +int bitmap_is_preferred_refname(struct repository *r, const char *refname) +{ + const struct string_list *preferred_tips = bitmap_preferred_tips(r); + struct string_list_item *item; + + if (!preferred_tips) + return 0; + + for_each_string_list_item(item, preferred_tips) { + if (starts_with(refname, item->string)) + return 1; + } + + return 0; +} diff --git a/pack-bitmap.h b/pack-bitmap.h index 99d733eb26..469090bad2 100644 --- a/pack-bitmap.h +++ b/pack-bitmap.h @@ -44,6 +44,7 @@ typedef int (*show_reachable_fn)( struct bitmap_index; struct bitmap_index *prepare_bitmap_git(struct repository *r); +struct bitmap_index *prepare_midx_bitmap_git(struct multi_pack_index *midx); void count_bitmap_commit_list(struct bitmap_index *, uint32_t *commits, uint32_t *trees, uint32_t *blobs, uint32_t *tags); void traverse_bitmap_commit_list(struct bitmap_index *, @@ -87,12 +88,17 @@ struct ewah_bitmap *bitmap_for_commit(struct bitmap_index *bitmap_git, struct commit *commit); void bitmap_writer_select_commits(struct commit **indexed_commits, unsigned int indexed_commits_nr, int max_bitmaps); -void bitmap_writer_build(struct packing_data *to_pack); +int bitmap_writer_build(struct packing_data *to_pack); void bitmap_writer_finish(struct pack_idx_entry **index, uint32_t index_nr, const char *filename, uint16_t options); +char *midx_bitmap_filename(struct multi_pack_index *midx); +char *pack_bitmap_filename(struct packed_git *p); + +int bitmap_is_midx(struct bitmap_index *bitmap_git); const struct string_list *bitmap_preferred_tips(struct repository *r); +int bitmap_is_preferred_refname(struct repository *r, const char *refname); #endif diff --git a/pack-revindex.h b/pack-revindex.h index 479b8f2f9c..74f4eae668 100644 --- a/pack-revindex.h +++ b/pack-revindex.h @@ -109,7 +109,7 @@ off_t pack_pos_to_offset(struct packed_git *p, uint32_t pos); * If the reverse index has not yet been loaded, or the position is out of * bounds, this function aborts. * - * This function runs in time O(log N) with the number of objects in the MIDX. + * This function runs in constant time. */ uint32_t pack_pos_to_midx(struct multi_pack_index *m, uint32_t pos); @@ -120,7 +120,7 @@ uint32_t pack_pos_to_midx(struct multi_pack_index *m, uint32_t pos); * If the reverse index has not yet been loaded, or the position is out of * bounds, this function aborts. * - * This function runs in constant time. + * This function runs in time O(log N) with the number of objects in the MIDX. */ int midx_to_pack_pos(struct multi_pack_index *midx, uint32_t at, uint32_t *pos); diff --git a/pack-write.c b/pack-write.c index f1fc3ecafa..a5846f3a34 100644 --- a/pack-write.c +++ b/pack-write.c @@ -75,9 +75,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec index_name = strbuf_detach(&tmp_file, NULL); } else { unlink(index_name); - fd = open(index_name, O_CREAT|O_EXCL|O_WRONLY, 0600); - if (fd < 0) - die_errno("unable to create '%s'", index_name); + fd = xopen(index_name, O_CREAT|O_EXCL|O_WRONLY, 0600); } f = hashfd(fd, index_name); } @@ -224,6 +222,9 @@ const char *write_rev_file(const char *rev_name, uint32_t i; const char *ret; + if (!(flags & WRITE_REV) && !(flags & WRITE_REV_VERIFY)) + return NULL; + ALLOC_ARRAY(pack_order, nr_objects); for (i = 0; i < nr_objects; i++) pack_order[i] = i; @@ -256,9 +257,7 @@ const char *write_rev_file_order(const char *rev_name, rev_name = strbuf_detach(&tmp_file, NULL); } else { unlink(rev_name); - fd = open(rev_name, O_CREAT|O_EXCL|O_WRONLY, 0600); - if (fd < 0) - die_errno("unable to create '%s'", rev_name); + fd = xopen(rev_name, O_CREAT|O_EXCL|O_WRONLY, 0600); } f = hashfd(fd, rev_name); } else if (flags & WRITE_REV_VERIFY) { @@ -462,49 +461,48 @@ struct hashfile *create_tmp_packfile(char **pack_tmp_name) return hashfd(fd, *pack_tmp_name); } -void finish_tmp_packfile(struct strbuf *name_buffer, +static void rename_tmp_packfile(struct strbuf *name_prefix, const char *source, + const char *ext) +{ + size_t name_prefix_len = name_prefix->len; + + strbuf_addstr(name_prefix, ext); + if (rename(source, name_prefix->buf)) + die_errno("unable to rename temporary file to '%s'", + name_prefix->buf); + strbuf_setlen(name_prefix, name_prefix_len); +} + +void rename_tmp_packfile_idx(struct strbuf *name_buffer, + char **idx_tmp_name) +{ + rename_tmp_packfile(name_buffer, *idx_tmp_name, "idx"); +} + +void stage_tmp_packfiles(struct strbuf *name_buffer, const char *pack_tmp_name, struct pack_idx_entry **written_list, uint32_t nr_written, struct pack_idx_option *pack_idx_opts, - unsigned char hash[]) + unsigned char hash[], + char **idx_tmp_name) { - const char *idx_tmp_name, *rev_tmp_name = NULL; - int basename_len = name_buffer->len; + const char *rev_tmp_name = NULL; if (adjust_shared_perm(pack_tmp_name)) die_errno("unable to make temporary pack file readable"); - idx_tmp_name = write_idx_file(NULL, written_list, nr_written, - pack_idx_opts, hash); - if (adjust_shared_perm(idx_tmp_name)) + *idx_tmp_name = (char *)write_idx_file(NULL, written_list, nr_written, + pack_idx_opts, hash); + if (adjust_shared_perm(*idx_tmp_name)) die_errno("unable to make temporary index file readable"); rev_tmp_name = write_rev_file(NULL, written_list, nr_written, hash, pack_idx_opts->flags); - strbuf_addf(name_buffer, "%s.pack", hash_to_hex(hash)); - - if (rename(pack_tmp_name, name_buffer->buf)) - die_errno("unable to rename temporary pack file"); - - strbuf_setlen(name_buffer, basename_len); - - strbuf_addf(name_buffer, "%s.idx", hash_to_hex(hash)); - if (rename(idx_tmp_name, name_buffer->buf)) - die_errno("unable to rename temporary index file"); - - strbuf_setlen(name_buffer, basename_len); - - if (rev_tmp_name) { - strbuf_addf(name_buffer, "%s.rev", hash_to_hex(hash)); - if (rename(rev_tmp_name, name_buffer->buf)) - die_errno("unable to rename temporary reverse-index file"); - } - - strbuf_setlen(name_buffer, basename_len); - - free((void *)idx_tmp_name); + rename_tmp_packfile(name_buffer, pack_tmp_name, "pack"); + if (rev_tmp_name) + rename_tmp_packfile(name_buffer, rev_tmp_name, "rev"); } void write_promisor_file(const char *promisor_name, struct ref **sought, int nr_sought) @@ -110,6 +110,14 @@ int encode_in_pack_object_header(unsigned char *hdr, int hdr_len, int read_pack_header(int fd, struct pack_header *); struct hashfile *create_tmp_packfile(char **pack_tmp_name); -void finish_tmp_packfile(struct strbuf *name_buffer, const char *pack_tmp_name, struct pack_idx_entry **written_list, uint32_t nr_written, struct pack_idx_option *pack_idx_opts, unsigned char sha1[]); +void stage_tmp_packfiles(struct strbuf *name_buffer, + const char *pack_tmp_name, + struct pack_idx_entry **written_list, + uint32_t nr_written, + struct pack_idx_option *pack_idx_opts, + unsigned char hash[], + char **idx_tmp_name); +void rename_tmp_packfile_idx(struct strbuf *basename, + char **idx_tmp_name); #endif diff --git a/packfile.c b/packfile.c index 9ef6d98292..89402cfc69 100644 --- a/packfile.c +++ b/packfile.c @@ -339,6 +339,7 @@ void close_pack(struct packed_git *p) close_pack_fd(p); close_pack_index(p); close_pack_revindex(p); + oidset_clear(&p->bad_objects); } void close_object_store(struct raw_object_store *o) @@ -860,7 +861,7 @@ static void prepare_pack(const char *full_name, size_t full_name_len, if (!strcmp(file_name, "multi-pack-index")) return; if (starts_with(file_name, "multi-pack-index") && - ends_with(file_name, ".rev")) + (ends_with(file_name, ".bitmap") || ends_with(file_name, ".rev"))) return; if (ends_with(file_name, ".idx") || ends_with(file_name, ".rev") || @@ -1161,31 +1162,19 @@ int unpack_object_header(struct packed_git *p, return type; } -void mark_bad_packed_object(struct packed_git *p, const unsigned char *sha1) +void mark_bad_packed_object(struct packed_git *p, const struct object_id *oid) { - unsigned i; - const unsigned hashsz = the_hash_algo->rawsz; - for (i = 0; i < p->num_bad_objects; i++) - if (hasheq(sha1, p->bad_object_sha1 + hashsz * i)) - return; - p->bad_object_sha1 = xrealloc(p->bad_object_sha1, - st_mult(GIT_MAX_RAWSZ, - st_add(p->num_bad_objects, 1))); - hashcpy(p->bad_object_sha1 + hashsz * p->num_bad_objects, sha1); - p->num_bad_objects++; + oidset_insert(&p->bad_objects, oid); } const struct packed_git *has_packed_and_bad(struct repository *r, - const unsigned char *sha1) + const struct object_id *oid) { struct packed_git *p; - unsigned i; for (p = r->objects->packed_git; p; p = p->next) - for (i = 0; i < p->num_bad_objects; i++) - if (hasheq(sha1, - p->bad_object_sha1 + the_hash_algo->rawsz * i)) - return p; + if (oidset_contains(&p->bad_objects, oid)) + return p; return NULL; } @@ -1272,7 +1261,7 @@ static int retry_bad_packed_offset(struct repository *r, if (offset_to_pack_pos(p, obj_offset, &pos) < 0) return OBJ_BAD; nth_packed_object_id(&oid, p, pack_pos_to_index(p, pos)); - mark_bad_packed_object(p, oid.hash); + mark_bad_packed_object(p, &oid); type = oid_object_info(r, &oid, NULL); if (type <= OBJ_NONE) return OBJ_BAD; @@ -1722,7 +1711,7 @@ void *unpack_entry(struct repository *r, struct packed_git *p, off_t obj_offset, nth_packed_object_id(&oid, p, index_pos); error("bad packed object CRC for %s", oid_to_hex(&oid)); - mark_bad_packed_object(p, oid.hash); + mark_bad_packed_object(p, &oid); data = NULL; goto out; } @@ -1811,7 +1800,7 @@ void *unpack_entry(struct repository *r, struct packed_git *p, off_t obj_offset, " at offset %"PRIuMAX" from %s", oid_to_hex(&base_oid), (uintmax_t)obj_offset, p->pack_name); - mark_bad_packed_object(p, base_oid.hash); + mark_bad_packed_object(p, &base_oid); base = read_object(r, &base_oid, &type, &base_size); external_base = base; } @@ -2016,13 +2005,9 @@ static int fill_pack_entry(const struct object_id *oid, { off_t offset; - if (p->num_bad_objects) { - unsigned i; - for (i = 0; i < p->num_bad_objects; i++) - if (hasheq(oid->hash, - p->bad_object_sha1 + the_hash_algo->rawsz * i)) - return 0; - } + if (oidset_size(&p->bad_objects) && + oidset_contains(&p->bad_objects, oid)) + return 0; offset = find_pack_entry_one(oid->hash, p); if (!offset) @@ -2205,6 +2190,12 @@ int for_each_packed_object(each_packed_object_fn cb, void *data, if ((flags & FOR_EACH_OBJECT_PROMISOR_ONLY) && !p->pack_promisor) continue; + if ((flags & FOR_EACH_OBJECT_SKIP_IN_CORE_KEPT_PACKS) && + p->pack_keep_in_core) + continue; + if ((flags & FOR_EACH_OBJECT_SKIP_ON_DISK_KEPT_PACKS) && + p->pack_keep) + continue; if (open_pack_index(p)) { pack_errors = 1; continue; diff --git a/packfile.h b/packfile.h index 3ae117a8ae..186146779d 100644 --- a/packfile.h +++ b/packfile.h @@ -159,8 +159,8 @@ int packed_object_info(struct repository *r, struct packed_git *pack, off_t offset, struct object_info *); -void mark_bad_packed_object(struct packed_git *p, const unsigned char *sha1); -const struct packed_git *has_packed_and_bad(struct repository *r, const unsigned char *sha1); +void mark_bad_packed_object(struct packed_git *, const struct object_id *); +const struct packed_git *has_packed_and_bad(struct repository *, const struct object_id *); #define ON_DISK_KEEP_PACKS 1 #define IN_CORE_KEEP_PACKS 2 diff --git a/parse-options.c b/parse-options.c index 2abff136a1..55c5821b08 100644 --- a/parse-options.c +++ b/parse-options.c @@ -310,19 +310,6 @@ static enum parse_opt_result parse_long_opt( again: if (!skip_prefix(arg, long_name, &rest)) rest = NULL; - if (options->type == OPTION_ARGUMENT) { - if (!rest) - continue; - if (*rest == '=') - return error(_("%s takes no value"), - optname(options, flags)); - if (*rest) - continue; - if (options->value) - *(int *)options->value = options->defval; - p->out[p->cpidx++] = arg - 2; - return PARSE_OPT_DONE; - } if (!rest) { /* abbreviated? */ if (!(p->flags & PARSE_OPT_KEEP_UNKNOWN) && diff --git a/parse-options.h b/parse-options.h index a845a9d952..39d9088254 100644 --- a/parse-options.h +++ b/parse-options.h @@ -8,7 +8,6 @@ enum parse_opt_type { /* special types */ OPTION_END, - OPTION_ARGUMENT, OPTION_GROUP, OPTION_NUMBER, OPTION_ALIAS, @@ -155,8 +154,6 @@ struct option { #define OPT_INTEGER_F(s, l, v, h, f) { OPTION_INTEGER, (s), (l), (v), N_("n"), (h), (f) } #define OPT_END() { OPTION_END } -#define OPT_ARGUMENT(l, v, h) { OPTION_ARGUMENT, 0, (l), (v), NULL, \ - (h), PARSE_OPT_NOARG, NULL, 1 } #define OPT_GROUP(h) { OPTION_GROUP, 0, NULL, NULL, NULL, (h) } #define OPT_BIT(s, l, v, h, b) OPT_BIT_F(s, l, v, h, b, 0) #define OPT_BITOP(s, l, v, h, set, clear) { OPTION_BITOP, (s), (l), (v), NULL, (h), \ @@ -1510,21 +1510,28 @@ int looks_like_command_line_option(const char *str) return str && str[0] == '-'; } -char *xdg_config_home(const char *filename) +char *xdg_config_home_for(const char *subdir, const char *filename) { const char *home, *config_home; + assert(subdir); assert(filename); config_home = getenv("XDG_CONFIG_HOME"); if (config_home && *config_home) - return mkpathdup("%s/git/%s", config_home, filename); + return mkpathdup("%s/%s/%s", config_home, subdir, filename); home = getenv("HOME"); if (home) - return mkpathdup("%s/.config/git/%s", home, filename); + return mkpathdup("%s/.config/%s/%s", home, subdir, filename); + return NULL; } +char *xdg_config_home(const char *filename) +{ + return xdg_config_home_for("git", filename); +} + char *xdg_cache_home(const char *filename) { const char *home, *cache_home; diff --git a/pkt-line.c b/pkt-line.c index 9f63eae2e6..de4a94b437 100644 --- a/pkt-line.c +++ b/pkt-line.c @@ -243,6 +243,43 @@ void packet_write(int fd_out, const char *buf, size_t size) die("%s", err.buf); } +void packet_fwrite(FILE *f, const char *buf, size_t size) +{ + size_t packet_size; + char header[4]; + + if (size > LARGE_PACKET_DATA_MAX) + die(_("packet write failed - data exceeds max packet size")); + + packet_trace(buf, size, 1); + packet_size = size + 4; + + set_packet_header(header, packet_size); + fwrite_or_die(f, header, 4); + fwrite_or_die(f, buf, size); +} + +void packet_fwrite_fmt(FILE *fh, const char *fmt, ...) +{ + static struct strbuf buf = STRBUF_INIT; + va_list args; + + strbuf_reset(&buf); + + va_start(args, fmt); + format_packet(&buf, "", fmt, args); + va_end(args); + + fwrite_or_die(fh, buf.buf, buf.len); +} + +void packet_fflush(FILE *f) +{ + packet_trace("0000", 4, 1); + fwrite_or_die(f, "0000", 4); + fflush_or_die(f); +} + void packet_buf_write(struct strbuf *buf, const char *fmt, ...) { va_list args; diff --git a/pkt-line.h b/pkt-line.h index 5af5f45687..82b95e4bdd 100644 --- a/pkt-line.h +++ b/pkt-line.h @@ -36,6 +36,17 @@ int write_packetized_from_fd_no_flush(int fd_in, int fd_out); int write_packetized_from_buf_no_flush(const char *src_in, size_t len, int fd_out); /* + * Stdio versions of packet_write functions. When mixing these with fd + * based functions, take care to call fflush(3) before doing fd writes or + * closing the fd. + */ +void packet_fwrite(FILE *f, const char *buf, size_t size); +void packet_fwrite_fmt(FILE *f, const char *fmt, ...) __attribute__((format (printf, 2, 3))); + +/* packet_fflush writes a flush packet and flushes the stdio buffer of f */ +void packet_fflush(FILE *f); + +/* * Read a packetized line into the buffer, which must be at least size bytes * long. The return value specifies the number of bytes read into the buffer. * @@ -671,7 +671,11 @@ const char *repo_logmsg_reencode(struct repository *r, * If the re-encoding failed, out might be NULL here; in that * case we just return the commit message verbatim. */ - return out ? out : msg; + if (!out) { + warning("unable to reencode commit to '%s'", output_encoding); + return msg; + } + return out; } static int mailmap_name(const char **email, size_t *email_len, diff --git a/protocol-caps.c b/protocol-caps.c index 13a9e63a04..bbde91810a 100644 --- a/protocol-caps.c +++ b/protocol-caps.c @@ -69,13 +69,13 @@ static void send_info(struct repository *r, struct packet_writer *writer, } } - packet_writer_write(writer, "%s", - strbuf_detach(&send_buffer, NULL)); + packet_writer_write(writer, "%s", send_buffer.buf); + strbuf_reset(&send_buffer); } + strbuf_release(&send_buffer); } -int cap_object_info(struct repository *r, struct strvec *keys, - struct packet_reader *request) +int cap_object_info(struct repository *r, struct packet_reader *request) { struct requested_info info; struct packet_writer writer; diff --git a/protocol-caps.h b/protocol-caps.h index 0a9f49df11..15c4550360 100644 --- a/protocol-caps.h +++ b/protocol-caps.h @@ -2,9 +2,7 @@ #define PROTOCOL_CAPS_H struct repository; -struct strvec; struct packet_reader; -int cap_object_info(struct repository *r, struct strvec *keys, - struct packet_reader *request); +int cap_object_info(struct repository *r, struct packet_reader *request); #endif /* PROTOCOL_CAPS_H */ diff --git a/range-diff.c b/range-diff.c index e731525e66..cac89a2f4f 100644 --- a/range-diff.c +++ b/range-diff.c @@ -482,6 +482,7 @@ static void output(struct string_list *a, struct string_list *b, else diff_setup(&opts); + opts.no_free = 1; if (!opts.output_format) opts.output_format = DIFF_FORMAT_PATCH; opts.flags.suppress_diff_headers = 1; @@ -542,6 +543,8 @@ static void output(struct string_list *a, struct string_list *b, strbuf_release(&buf); strbuf_release(&dashes); strbuf_release(&indent); + opts.no_free = 0; + diff_free(&opts); } int show_range_diff(const char *range1, const char *range2, diff --git a/read-cache.c b/read-cache.c index 9048ef9e90..b0a06db5c5 100644 --- a/read-cache.c +++ b/read-cache.c @@ -1944,13 +1944,22 @@ static void tweak_untracked_cache(struct index_state *istate) prepare_repo_settings(r); - if (r->settings.core_untracked_cache == UNTRACKED_CACHE_REMOVE) { + switch (r->settings.core_untracked_cache) { + case UNTRACKED_CACHE_REMOVE: remove_untracked_cache(istate); - return; - } - - if (r->settings.core_untracked_cache == UNTRACKED_CACHE_WRITE) + break; + case UNTRACKED_CACHE_WRITE: add_untracked_cache(istate); + break; + case UNTRACKED_CACHE_KEEP: + /* + * Either an explicit "core.untrackedCache=keep", the + * default if "core.untrackedCache" isn't configured, + * or a fallback on an unknown "core.untrackedCache" + * value. + */ + break; + } } static void tweak_split_index(struct index_state *istate) @@ -3069,7 +3078,7 @@ static int do_write_locked_index(struct index_state *istate, struct lock_file *l int ret; int was_full = !istate->sparse_index; - ret = convert_to_sparse(istate); + ret = convert_to_sparse(istate, 0); if (ret) { warning(_("failed to convert to a sparse-index")); @@ -3182,7 +3191,7 @@ static int write_shared_index(struct index_state *istate, int ret, was_full = !istate->sparse_index; move_cache_to_base_index(istate); - convert_to_sparse(istate); + convert_to_sparse(istate, 0); trace2_region_enter_printf("index", "shared/do_write_index", the_repository, "%s", get_tempfile_path(*temp)); diff --git a/reflog-walk.c b/reflog-walk.c index e9cd328369..8ac4b284b6 100644 --- a/reflog-walk.c +++ b/reflog-walk.c @@ -158,10 +158,9 @@ int add_reflog_for_walk(struct reflog_walk_info *info, } reflogs = read_complete_reflog(branch); if (!reflogs || reflogs->nr == 0) { - struct object_id oid; char *b; int ret = dwim_log(branch, strlen(branch), - &oid, &b); + NULL, &b); if (ret > 1) free(b); else if (ret == 1) { @@ -33,11 +33,6 @@ static struct ref_storage_be *find_ref_storage_backend(const char *name) return NULL; } -int ref_storage_backend_exists(const char *name) -{ - return find_ref_storage_backend(name) != NULL; -} - /* * How to handle various characters in refnames: * 0: An acceptable character for refs @@ -698,7 +693,7 @@ int repo_dwim_log(struct repository *r, const char *str, int len, strbuf_addf(&path, *p, len, str); ref = refs_resolve_ref_unsafe(refs, path.buf, RESOLVE_REF_READING, - &hash, NULL); + oid ? &hash : NULL, NULL); if (!ref) continue; if (refs_reflog_exists(refs, path.buf)) @@ -710,7 +705,8 @@ int repo_dwim_log(struct repository *r, const char *str, int len, continue; if (!logs_found++) { *log = xstrdup(it); - oidcpy(oid, &hash); + if (oid) + oidcpy(oid, &hash); } if (!warn_ambiguous_refs) break; @@ -1681,7 +1677,7 @@ int refs_read_raw_ref(struct ref_store *ref_store, } return ref_store->be->read_raw_ref(ref_store, refname, oid, referent, - type); + type, &errno); } /* This function needs to return a meaningful errno on failure */ @@ -2370,19 +2366,19 @@ int delete_reflog(const char *refname) } int refs_reflog_expire(struct ref_store *refs, - const char *refname, const struct object_id *oid, + const char *refname, unsigned int flags, reflog_expiry_prepare_fn prepare_fn, reflog_expiry_should_prune_fn should_prune_fn, reflog_expiry_cleanup_fn cleanup_fn, void *policy_cb_data) { - return refs->be->reflog_expire(refs, refname, oid, flags, + return refs->be->reflog_expire(refs, refname, flags, prepare_fn, should_prune_fn, cleanup_fn, policy_cb_data); } -int reflog_expire(const char *refname, const struct object_id *oid, +int reflog_expire(const char *refname, unsigned int flags, reflog_expiry_prepare_fn prepare_fn, reflog_expiry_should_prune_fn should_prune_fn, @@ -2390,7 +2386,7 @@ int reflog_expire(const char *refname, const struct object_id *oid, void *policy_cb_data) { return refs_reflog_expire(get_main_ref_store(the_repository), - refname, oid, flags, + refname, flags, prepare_fn, should_prune_fn, cleanup_fn, policy_cb_data); } @@ -796,7 +796,7 @@ enum expire_reflog_flags { * expiration policy that is desired. * * reflog_expiry_prepare_fn -- Called once after the reference is - * locked. + * locked. Called with the OID of the locked reference. * * reflog_expiry_should_prune_fn -- Called once for each entry in the * existing reflog. It should return true iff that entry should be @@ -816,28 +816,25 @@ typedef int reflog_expiry_should_prune_fn(struct object_id *ooid, typedef void reflog_expiry_cleanup_fn(void *cb_data); /* - * Expire reflog entries for the specified reference. oid is the old - * value of the reference. flags is a combination of the constants in + * Expire reflog entries for the specified reference. + * flags is a combination of the constants in * enum expire_reflog_flags. The three function pointers are described * above. On success, return zero. */ int refs_reflog_expire(struct ref_store *refs, const char *refname, - const struct object_id *oid, unsigned int flags, reflog_expiry_prepare_fn prepare_fn, reflog_expiry_should_prune_fn should_prune_fn, reflog_expiry_cleanup_fn cleanup_fn, void *policy_cb_data); -int reflog_expire(const char *refname, const struct object_id *oid, +int reflog_expire(const char *refname, unsigned int flags, reflog_expiry_prepare_fn prepare_fn, reflog_expiry_should_prune_fn should_prune_fn, reflog_expiry_cleanup_fn cleanup_fn, void *policy_cb_data); -int ref_storage_backend_exists(const char *name); - struct ref_store *get_main_ref_store(struct repository *r); /** diff --git a/refs/debug.c b/refs/debug.c index 1a7a9e11cf..8667c64023 100644 --- a/refs/debug.c +++ b/refs/debug.c @@ -239,15 +239,14 @@ debug_ref_iterator_begin(struct ref_store *ref_store, const char *prefix, static int debug_read_raw_ref(struct ref_store *ref_store, const char *refname, struct object_id *oid, struct strbuf *referent, - unsigned int *type) + unsigned int *type, int *failure_errno) { struct debug_ref_store *drefs = (struct debug_ref_store *)ref_store; int res = 0; oidcpy(oid, null_oid()); - errno = 0; res = drefs->refs->be->read_raw_ref(drefs->refs, refname, oid, referent, - type); + type, failure_errno); if (res == 0) { trace_printf_key(&trace_refs, "read_raw_ref: %s: %s (=> %s) type %x: %d\n", @@ -255,7 +254,7 @@ static int debug_read_raw_ref(struct ref_store *ref_store, const char *refname, } else { trace_printf_key(&trace_refs, "read_raw_ref: %s: %d (errno %d)\n", refname, - res, errno); + res, *failure_errno); } return res; } @@ -365,8 +364,8 @@ struct debug_reflog_expiry_should_prune { }; static void debug_reflog_expiry_prepare(const char *refname, - const struct object_id *oid, - void *cb_data) + const struct object_id *oid, + void *cb_data) { struct debug_reflog_expiry_should_prune *prune = cb_data; trace_printf_key(&trace_refs, "reflog_expire_prepare: %s\n", refname); @@ -392,7 +391,7 @@ static void debug_reflog_expiry_cleanup(void *cb_data) } static int debug_reflog_expire(struct ref_store *ref_store, const char *refname, - const struct object_id *oid, unsigned int flags, + unsigned int flags, reflog_expiry_prepare_fn prepare_fn, reflog_expiry_should_prune_fn should_prune_fn, reflog_expiry_cleanup_fn cleanup_fn, @@ -405,7 +404,7 @@ static int debug_reflog_expire(struct ref_store *ref_store, const char *refname, .should_prune = should_prune_fn, .cb_data = policy_cb_data, }; - int res = drefs->refs->be->reflog_expire(drefs->refs, refname, oid, + int res = drefs->refs->be->reflog_expire(drefs->refs, refname, flags, &debug_reflog_expiry_prepare, &debug_reflog_expiry_should_prune_fn, &debug_reflog_expiry_cleanup, diff --git a/refs/files-backend.c b/refs/files-backend.c index 677b7e4cdd..8d627adf99 100644 --- a/refs/files-backend.c +++ b/refs/files-backend.c @@ -227,7 +227,7 @@ static void add_per_worktree_entries_to_dir(struct ref_dir *dir, const char *dir pos = search_ref_dir(dir, prefix, prefix_len); if (pos >= 0) continue; - child_entry = create_dir_entry(dir->cache, prefix, prefix_len, 1); + child_entry = create_dir_entry(dir->cache, prefix, prefix_len); add_entry_to_dir(dir, child_entry); } } @@ -278,7 +278,7 @@ static void loose_fill_ref_dir(struct ref_store *ref_store, strbuf_addch(&refname, '/'); add_entry_to_dir(dir, create_dir_entry(dir->cache, refname.buf, - refname.len, 1)); + refname.len)); } else { if (!refs_resolve_ref_unsafe(&refs->base, refname.buf, @@ -336,14 +336,14 @@ static struct ref_cache *get_loose_ref_cache(struct files_ref_store *refs) * lazily): */ add_entry_to_dir(get_ref_dir(refs->loose->root), - create_dir_entry(refs->loose, "refs/", 5, 1)); + create_dir_entry(refs->loose, "refs/", 5)); } return refs->loose; } -static int files_read_raw_ref(struct ref_store *ref_store, - const char *refname, struct object_id *oid, - struct strbuf *referent, unsigned int *type) +static int files_read_raw_ref(struct ref_store *ref_store, const char *refname, + struct object_id *oid, struct strbuf *referent, + unsigned int *type, int *failure_errno) { struct files_ref_store *refs = files_downcast(ref_store, REF_STORE_READ, "read_raw_ref"); @@ -354,7 +354,6 @@ static int files_read_raw_ref(struct ref_store *ref_store, struct stat st; int fd; int ret = -1; - int save_errno; int remaining_retries = 3; *type = 0; @@ -459,10 +458,9 @@ stat_ref: ret = parse_loose_ref_contents(buf, oid, referent, type); out: - save_errno = errno; + *failure_errno = errno; strbuf_release(&sb_path); strbuf_release(&sb_contents); - errno = save_errno; return ret; } @@ -531,7 +529,6 @@ static void unlock_ref(struct ref_lock *lock) static int lock_raw_ref(struct files_ref_store *refs, const char *refname, int mustexist, const struct string_list *extras, - const struct string_list *skip, struct ref_lock **lock_p, struct strbuf *referent, unsigned int *type, @@ -541,6 +538,7 @@ static int lock_raw_ref(struct files_ref_store *refs, struct strbuf ref_file = STRBUF_INIT; int attempts_remaining = 3; int ret = TRANSACTION_GENERIC_ERROR; + int failure_errno; assert(err); files_assert_main_repository(refs, "lock_raw_ref"); @@ -568,7 +566,7 @@ retry: * reason to expect this error to be transitory. */ if (refs_verify_refname_available(&refs->base, refname, - extras, skip, err)) { + extras, NULL, err)) { if (mustexist) { /* * To the user the relevant error is @@ -611,7 +609,9 @@ retry: if (hold_lock_file_for_update_timeout( &lock->lk, ref_file.buf, LOCK_NO_DEREF, get_files_ref_lock_timeout_ms()) < 0) { - if (errno == ENOENT && --attempts_remaining > 0) { + int myerr = errno; + errno = 0; + if (myerr == ENOENT && --attempts_remaining > 0) { /* * Maybe somebody just deleted one of the * directories leading to ref_file. Try @@ -619,7 +619,7 @@ retry: */ goto retry; } else { - unable_to_lock_message(ref_file.buf, errno, err); + unable_to_lock_message(ref_file.buf, myerr, err); goto error_return; } } @@ -629,9 +629,9 @@ retry: * fear that its value will change. */ - if (files_read_raw_ref(&refs->base, refname, - &lock->old_oid, referent, type)) { - if (errno == ENOENT) { + if (files_read_raw_ref(&refs->base, refname, &lock->old_oid, referent, + type, &failure_errno)) { + if (failure_errno == ENOENT) { if (mustexist) { /* Garden variety missing reference. */ strbuf_addf(err, "unable to resolve reference '%s'", @@ -655,7 +655,7 @@ retry: * reference named "refs/foo/bar/baz". */ } - } else if (errno == EISDIR) { + } else if (failure_errno == EISDIR) { /* * There is a directory in the way. It might have * contained references that have been deleted. If @@ -673,7 +673,7 @@ retry: REMOVE_DIR_EMPTY_ONLY)) { if (refs_verify_refname_available( &refs->base, refname, - extras, skip, err)) { + extras, NULL, err)) { /* * The error message set by * verify_refname_available() is OK. @@ -693,13 +693,13 @@ retry: goto error_return; } } - } else if (errno == EINVAL && (*type & REF_ISBROKEN)) { + } else if (failure_errno == EINVAL && (*type & REF_ISBROKEN)) { strbuf_addf(err, "unable to resolve reference '%s': " "reference broken", refname); goto error_return; } else { strbuf_addf(err, "unable to resolve reference '%s': %s", - refname, strerror(errno)); + refname, strerror(failure_errno)); goto error_return; } @@ -710,7 +710,7 @@ retry: */ if (refs_verify_refname_available( refs->packed_ref_store, refname, - extras, skip, err)) + extras, NULL, err)) goto error_return; } @@ -854,39 +854,112 @@ static struct ref_iterator *files_ref_iterator_begin( } /* - * Verify that the reference locked by lock has the value old_oid - * (unless it is NULL). Fail if the reference doesn't exist and - * mustexist is set. Return 0 on success. On error, write an error - * message to err, set errno, and return a negative value. + * Callback function for raceproof_create_file(). This function is + * expected to do something that makes dirname(path) permanent despite + * the fact that other processes might be cleaning up empty + * directories at the same time. Usually it will create a file named + * path, but alternatively it could create another file in that + * directory, or even chdir() into that directory. The function should + * return 0 if the action was completed successfully. On error, it + * should return a nonzero result and set errno. + * raceproof_create_file() treats two errno values specially: + * + * - ENOENT -- dirname(path) does not exist. In this case, + * raceproof_create_file() tries creating dirname(path) + * (and any parent directories, if necessary) and calls + * the function again. + * + * - EISDIR -- the file already exists and is a directory. In this + * case, raceproof_create_file() removes the directory if + * it is empty (and recursively any empty directories that + * it contains) and calls the function again. + * + * Any other errno causes raceproof_create_file() to fail with the + * callback's return value and errno. + * + * Obviously, this function should be OK with being called again if it + * fails with ENOENT or EISDIR. In other scenarios it will not be + * called again. + */ +typedef int create_file_fn(const char *path, void *cb); + +/* + * Create a file in dirname(path) by calling fn, creating leading + * directories if necessary. Retry a few times in case we are racing + * with another process that is trying to clean up the directory that + * contains path. See the documentation for create_file_fn for more + * details. + * + * Return the value and set the errno that resulted from the most + * recent call of fn. fn is always called at least once, and will be + * called more than once if it returns ENOENT or EISDIR. */ -static int verify_lock(struct ref_store *ref_store, struct ref_lock *lock, - const struct object_id *old_oid, int mustexist, - struct strbuf *err) +static int raceproof_create_file(const char *path, create_file_fn fn, void *cb) { - assert(err); + /* + * The number of times we will try to remove empty directories + * in the way of path. This is only 1 because if another + * process is racily creating directories that conflict with + * us, we don't want to fight against them. + */ + int remove_directories_remaining = 1; - if (refs_read_ref_full(ref_store, lock->ref_name, - mustexist ? RESOLVE_REF_READING : 0, - &lock->old_oid, NULL)) { - if (old_oid) { - int save_errno = errno; - strbuf_addf(err, "can't verify ref '%s'", lock->ref_name); - errno = save_errno; - return -1; - } else { - oidclr(&lock->old_oid); - return 0; - } - } - if (old_oid && !oideq(&lock->old_oid, old_oid)) { - strbuf_addf(err, "ref '%s' is at %s but expected %s", - lock->ref_name, - oid_to_hex(&lock->old_oid), - oid_to_hex(old_oid)); - errno = EBUSY; - return -1; + /* + * The number of times that we will try to create the + * directories containing path. We are willing to attempt this + * more than once, because another process could be trying to + * clean up empty directories at the same time as we are + * trying to create them. + */ + int create_directories_remaining = 3; + + /* A scratch copy of path, filled lazily if we need it: */ + struct strbuf path_copy = STRBUF_INIT; + + int ret, save_errno; + + /* Sanity check: */ + assert(*path); + +retry_fn: + ret = fn(path, cb); + save_errno = errno; + if (!ret) + goto out; + + if (errno == EISDIR && remove_directories_remaining-- > 0) { + /* + * A directory is in the way. Maybe it is empty; try + * to remove it: + */ + if (!path_copy.len) + strbuf_addstr(&path_copy, path); + + if (!remove_dir_recursively(&path_copy, REMOVE_DIR_EMPTY_ONLY)) + goto retry_fn; + } else if (errno == ENOENT && create_directories_remaining-- > 0) { + /* + * Maybe the containing directory didn't exist, or + * maybe it was just deleted by a process that is + * racing with us to clean up empty directories. Try + * to create it: + */ + enum scld_error scld_result; + + if (!path_copy.len) + strbuf_addstr(&path_copy, path); + + do { + scld_result = safe_create_leading_directories(path_copy.buf); + if (scld_result == SCLD_OK) + goto retry_fn; + } while (scld_result == SCLD_VANISHED && create_directories_remaining-- > 0); } - return 0; + +out: + strbuf_release(&path_copy); + errno = save_errno; + return ret; } static int remove_empty_directories(struct strbuf *path) @@ -910,64 +983,27 @@ static int create_reflock(const char *path, void *cb) /* * Locks a ref returning the lock on success and NULL on failure. - * On failure errno is set to something meaningful. */ static struct ref_lock *lock_ref_oid_basic(struct files_ref_store *refs, - const char *refname, - const struct object_id *old_oid, - const struct string_list *extras, - const struct string_list *skip, - unsigned int flags, int *type, + const char *refname, int *type, struct strbuf *err) { struct strbuf ref_file = STRBUF_INIT; struct ref_lock *lock; - int last_errno = 0; - int mustexist = (old_oid && !is_null_oid(old_oid)); - int resolve_flags = RESOLVE_REF_NO_RECURSE; - int resolved; files_assert_main_repository(refs, "lock_ref_oid_basic"); assert(err); CALLOC_ARRAY(lock, 1); - if (mustexist) - resolve_flags |= RESOLVE_REF_READING; - if (flags & REF_DELETING) - resolve_flags |= RESOLVE_REF_ALLOW_BAD_NAME; - files_ref_path(refs, &ref_file, refname); - resolved = !!refs_resolve_ref_unsafe(&refs->base, - refname, resolve_flags, - &lock->old_oid, type); - if (!resolved && errno == EISDIR) { - /* - * we are trying to lock foo but we used to - * have foo/bar which now does not exist; - * it is normal for the empty directory 'foo' - * to remain. - */ - if (remove_empty_directories(&ref_file)) { - last_errno = errno; - if (!refs_verify_refname_available( - &refs->base, - refname, extras, skip, err)) - strbuf_addf(err, "there are still refs under '%s'", - refname); - goto error_return; - } - resolved = !!refs_resolve_ref_unsafe(&refs->base, - refname, resolve_flags, - &lock->old_oid, type); - } - if (!resolved) { - last_errno = errno; - if (last_errno != ENOTDIR || - !refs_verify_refname_available(&refs->base, refname, - extras, skip, err)) + if (!refs_resolve_ref_unsafe(&refs->base, refname, + RESOLVE_REF_NO_RECURSE, + &lock->old_oid, type)) { + if (!refs_verify_refname_available(&refs->base, refname, + NULL, NULL, err)) strbuf_addf(err, "unable to resolve reference '%s': %s", - refname, strerror(last_errno)); + refname, strerror(errno)); goto error_return; } @@ -980,23 +1016,20 @@ static struct ref_lock *lock_ref_oid_basic(struct files_ref_store *refs, */ if (is_null_oid(&lock->old_oid) && refs_verify_refname_available(refs->packed_ref_store, refname, - extras, skip, err)) { - last_errno = ENOTDIR; + NULL, NULL, err)) goto error_return; - } lock->ref_name = xstrdup(refname); if (raceproof_create_file(ref_file.buf, create_reflock, &lock->lk)) { - last_errno = errno; unable_to_lock_message(ref_file.buf, errno, err); goto error_return; } - if (verify_lock(&refs->base, lock, old_oid, mustexist, err)) { - last_errno = errno; - goto error_return; - } + if (refs_read_ref_full(&refs->base, lock->ref_name, + 0, + &lock->old_oid, NULL)) + oidclr(&lock->old_oid); goto out; error_return: @@ -1005,7 +1038,6 @@ static struct ref_lock *lock_ref_oid_basic(struct files_ref_store *refs, out: strbuf_release(&ref_file); - errno = last_errno; return lock; } @@ -1415,8 +1447,7 @@ static int files_copy_or_rename_ref(struct ref_store *ref_store, logmoved = log; - lock = lock_ref_oid_basic(refs, newrefname, NULL, NULL, NULL, - REF_NO_DEREF, NULL, &err); + lock = lock_ref_oid_basic(refs, newrefname, NULL, &err); if (!lock) { if (copy) error("unable to copy '%s' to '%s': %s", oldrefname, newrefname, err.buf); @@ -1438,8 +1469,7 @@ static int files_copy_or_rename_ref(struct ref_store *ref_store, goto out; rollback: - lock = lock_ref_oid_basic(refs, oldrefname, NULL, NULL, NULL, - REF_NO_DEREF, NULL, &err); + lock = lock_ref_oid_basic(refs, oldrefname, NULL, &err); if (!lock) { error("unable to lock %s for rollback: %s", oldrefname, err.buf); strbuf_release(&err); @@ -1569,7 +1599,7 @@ static int log_ref_setup(struct files_ref_store *refs, goto error; } } else { - *logfd = open(logfile, O_APPEND | O_WRONLY, 0666); + *logfd = open(logfile, O_APPEND | O_WRONLY); if (*logfd < 0) { if (errno == ENOENT || errno == EISDIR) { /* @@ -1846,9 +1876,7 @@ static int files_create_symref(struct ref_store *ref_store, struct ref_lock *lock; int ret; - lock = lock_ref_oid_basic(refs, refname, NULL, - NULL, NULL, REF_NO_DEREF, NULL, - &err); + lock = lock_ref_oid_basic(refs, refname, NULL, &err); if (!lock) { error("%s", err.buf); strbuf_release(&err); @@ -2416,7 +2444,7 @@ static int lock_ref_for_update(struct files_ref_store *refs, } ret = lock_raw_ref(refs, update->refname, mustexist, - affected_refnames, NULL, + affected_refnames, &lock, &referent, &update->type, err); if (ret) { @@ -3037,7 +3065,7 @@ static int expire_reflog_ent(struct object_id *ooid, struct object_id *noid, } static int files_reflog_expire(struct ref_store *ref_store, - const char *refname, const struct object_id *oid, + const char *refname, unsigned int flags, reflog_expiry_prepare_fn prepare_fn, reflog_expiry_should_prune_fn should_prune_fn, @@ -3054,6 +3082,7 @@ static int files_reflog_expire(struct ref_store *ref_store, int status = 0; int type; struct strbuf err = STRBUF_INIT; + const struct object_id *oid; memset(&cb, 0, sizeof(cb)); cb.flags = flags; @@ -3065,14 +3094,26 @@ static int files_reflog_expire(struct ref_store *ref_store, * reference itself, plus we might need to update the * reference if --updateref was specified: */ - lock = lock_ref_oid_basic(refs, refname, oid, - NULL, NULL, REF_NO_DEREF, - &type, &err); + lock = lock_ref_oid_basic(refs, refname, &type, &err); if (!lock) { error("cannot lock ref '%s': %s", refname, err.buf); strbuf_release(&err); return -1; } + oid = &lock->old_oid; + + /* + * When refs are deleted, their reflog is deleted before the + * ref itself is deleted. This is because there is no separate + * lock for reflog; instead we take a lock on the ref with + * lock_ref_oid_basic(). + * + * If a race happens and the reflog doesn't exist after we've + * acquired the lock that's OK. We've got nothing more to do; + * We were asked to delete the reflog, but someone else + * deleted it! The caller doesn't care that we deleted it, + * just that it is deleted. So we can return successfully. + */ if (!refs_reflog_exists(ref_store, refname)) { unlock_ref(lock); return 0; diff --git a/refs/packed-backend.c b/refs/packed-backend.c index f8aa97d799..47247a1491 100644 --- a/refs/packed-backend.c +++ b/refs/packed-backend.c @@ -724,9 +724,9 @@ static struct snapshot *get_snapshot(struct packed_ref_store *refs) return refs->snapshot; } -static int packed_read_raw_ref(struct ref_store *ref_store, - const char *refname, struct object_id *oid, - struct strbuf *referent, unsigned int *type) +static int packed_read_raw_ref(struct ref_store *ref_store, const char *refname, + struct object_id *oid, struct strbuf *referent, + unsigned int *type, int *failure_errno) { struct packed_ref_store *refs = packed_downcast(ref_store, REF_STORE_READ, "read_raw_ref"); @@ -739,7 +739,7 @@ static int packed_read_raw_ref(struct ref_store *ref_store, if (!rec) { /* refname is not a packed reference. */ - errno = ENOENT; + *failure_errno = ENOENT; return -1; } @@ -1600,6 +1600,7 @@ static int packed_for_each_reflog_ent(struct ref_store *ref_store, const char *refname, each_reflog_ent_fn fn, void *cb_data) { + BUG("packed reference store does not support reflogs"); return 0; } @@ -1608,12 +1609,14 @@ static int packed_for_each_reflog_ent_reverse(struct ref_store *ref_store, each_reflog_ent_fn fn, void *cb_data) { + BUG("packed reference store does not support reflogs"); return 0; } static int packed_reflog_exists(struct ref_store *ref_store, const char *refname) { + BUG("packed reference store does not support reflogs"); return 0; } @@ -1627,17 +1630,19 @@ static int packed_create_reflog(struct ref_store *ref_store, static int packed_delete_reflog(struct ref_store *ref_store, const char *refname) { + BUG("packed reference store does not support reflogs"); return 0; } static int packed_reflog_expire(struct ref_store *ref_store, - const char *refname, const struct object_id *oid, + const char *refname, unsigned int flags, reflog_expiry_prepare_fn prepare_fn, reflog_expiry_should_prune_fn should_prune_fn, reflog_expiry_cleanup_fn cleanup_fn, void *policy_cb_data) { + BUG("packed reference store does not support reflogs"); return 0; } diff --git a/refs/ref-cache.c b/refs/ref-cache.c index 49d732f6db..a5ad8a39fb 100644 --- a/refs/ref-cache.c +++ b/refs/ref-cache.c @@ -49,7 +49,7 @@ struct ref_cache *create_ref_cache(struct ref_store *refs, ret->ref_store = refs; ret->fill_ref_dir = fill_ref_dir; - ret->root = create_dir_entry(ret, "", 0, 1); + ret->root = create_dir_entry(ret, "", 0); return ret; } @@ -86,14 +86,13 @@ static void clear_ref_dir(struct ref_dir *dir) } struct ref_entry *create_dir_entry(struct ref_cache *cache, - const char *dirname, size_t len, - int incomplete) + const char *dirname, size_t len) { struct ref_entry *direntry; FLEX_ALLOC_MEM(direntry, name, dirname, len); direntry->u.subdir.cache = cache; - direntry->flag = REF_DIR | (incomplete ? REF_INCOMPLETE : 0); + direntry->flag = REF_DIR | REF_INCOMPLETE; return direntry; } @@ -144,30 +143,19 @@ int search_ref_dir(struct ref_dir *dir, const char *refname, size_t len) /* * Search for a directory entry directly within dir (without * recursing). Sort dir if necessary. subdirname must be a directory - * name (i.e., end in '/'). If mkdir is set, then create the - * directory if it is missing; otherwise, return NULL if the desired + * name (i.e., end in '/'). Returns NULL if the desired * directory cannot be found. dir must already be complete. */ static struct ref_dir *search_for_subdir(struct ref_dir *dir, - const char *subdirname, size_t len, - int mkdir) + const char *subdirname, size_t len) { int entry_index = search_ref_dir(dir, subdirname, len); struct ref_entry *entry; - if (entry_index == -1) { - if (!mkdir) - return NULL; - /* - * Since dir is complete, the absence of a subdir - * means that the subdir really doesn't exist; - * therefore, create an empty record for it but mark - * the record complete. - */ - entry = create_dir_entry(dir->cache, subdirname, len, 0); - add_entry_to_dir(dir, entry); - } else { - entry = dir->entries[entry_index]; - } + + if (entry_index == -1) + return NULL; + + entry = dir->entries[entry_index]; return get_ref_dir(entry); } @@ -176,18 +164,17 @@ static struct ref_dir *search_for_subdir(struct ref_dir *dir, * tree that should hold refname. If refname is a directory name * (i.e., it ends in '/'), then return that ref_dir itself. dir must * represent the top-level directory and must already be complete. - * Sort ref_dirs and recurse into subdirectories as necessary. If - * mkdir is set, then create any missing directories; otherwise, + * Sort ref_dirs and recurse into subdirectories as necessary. Will * return NULL if the desired directory cannot be found. */ static struct ref_dir *find_containing_dir(struct ref_dir *dir, - const char *refname, int mkdir) + const char *refname) { const char *slash; for (slash = strchr(refname, '/'); slash; slash = strchr(slash + 1, '/')) { size_t dirnamelen = slash - refname + 1; struct ref_dir *subdir; - subdir = search_for_subdir(dir, refname, dirnamelen, mkdir); + subdir = search_for_subdir(dir, refname, dirnamelen); if (!subdir) { dir = NULL; break; @@ -202,7 +189,7 @@ struct ref_entry *find_ref_entry(struct ref_dir *dir, const char *refname) { int entry_index; struct ref_entry *entry; - dir = find_containing_dir(dir, refname, 0); + dir = find_containing_dir(dir, refname); if (!dir) return NULL; entry_index = search_ref_dir(dir, refname, strlen(refname)); @@ -212,50 +199,6 @@ struct ref_entry *find_ref_entry(struct ref_dir *dir, const char *refname) return (entry->flag & REF_DIR) ? NULL : entry; } -int remove_entry_from_dir(struct ref_dir *dir, const char *refname) -{ - int refname_len = strlen(refname); - int entry_index; - struct ref_entry *entry; - int is_dir = refname[refname_len - 1] == '/'; - if (is_dir) { - /* - * refname represents a reference directory. Remove - * the trailing slash; otherwise we will get the - * directory *representing* refname rather than the - * one *containing* it. - */ - char *dirname = xmemdupz(refname, refname_len - 1); - dir = find_containing_dir(dir, dirname, 0); - free(dirname); - } else { - dir = find_containing_dir(dir, refname, 0); - } - if (!dir) - return -1; - entry_index = search_ref_dir(dir, refname, refname_len); - if (entry_index == -1) - return -1; - entry = dir->entries[entry_index]; - - MOVE_ARRAY(&dir->entries[entry_index], - &dir->entries[entry_index + 1], dir->nr - entry_index - 1); - dir->nr--; - if (dir->sorted > entry_index) - dir->sorted--; - free_ref_entry(entry); - return dir->nr; -} - -int add_ref_entry(struct ref_dir *dir, struct ref_entry *ref) -{ - dir = find_containing_dir(dir, ref->name, 1); - if (!dir) - return -1; - add_entry_to_dir(dir, ref); - return 0; -} - /* * Emit a warning and return true iff ref1 and ref2 have the same name * and the same oid. Die if they have the same name but different @@ -522,7 +465,7 @@ struct ref_iterator *cache_ref_iterator_begin(struct ref_cache *cache, dir = get_ref_dir(cache->root); if (prefix && *prefix) - dir = find_containing_dir(dir, prefix, 0); + dir = find_containing_dir(dir, prefix); if (!dir) /* There's nothing to iterate over. */ return empty_ref_iterator_begin(); diff --git a/refs/ref-cache.h b/refs/ref-cache.h index 3bfb89d2b3..5c042ae718 100644 --- a/refs/ref-cache.h +++ b/refs/ref-cache.h @@ -169,8 +169,7 @@ struct ref_dir *get_ref_dir(struct ref_entry *entry); * "refs/heads/") or "" for the top-level directory. */ struct ref_entry *create_dir_entry(struct ref_cache *cache, - const char *dirname, size_t len, - int incomplete); + const char *dirname, size_t len); struct ref_entry *create_ref_entry(const char *refname, const struct object_id *oid, int flag); @@ -200,29 +199,6 @@ void free_ref_cache(struct ref_cache *cache); void add_entry_to_dir(struct ref_dir *dir, struct ref_entry *entry); /* - * Remove the entry with the given name from dir, recursing into - * subdirectories as necessary. If refname is the name of a directory - * (i.e., ends with '/'), then remove the directory and its contents. - * If the removal was successful, return the number of entries - * remaining in the directory entry that contained the deleted entry. - * If the name was not found, return -1. Please note that this - * function only deletes the entry from the cache; it does not delete - * it from the filesystem or ensure that other cache entries (which - * might be symbolic references to the removed entry) are updated. - * Nor does it remove any containing dir entries that might be made - * empty by the removal. dir must represent the top-level directory - * and must already be complete. - */ -int remove_entry_from_dir(struct ref_dir *dir, const char *refname); - -/* - * Add a ref_entry to the ref_dir (unsorted), recursing into - * subdirectories as necessary. dir must represent the top-level - * directory. Return 0 on success. - */ -int add_ref_entry(struct ref_dir *dir, struct ref_entry *ref); - -/* * Find the value entry with the given name in dir, sorting ref_dirs * and recursing into subdirectories as necessary. If the name is not * found or it corresponds to a directory entry, return NULL. diff --git a/refs/refs-internal.h b/refs/refs-internal.h index 3155708345..ddd6d7f8eb 100644 --- a/refs/refs-internal.h +++ b/refs/refs-internal.h @@ -593,7 +593,7 @@ typedef int create_reflog_fn(struct ref_store *ref_store, const char *refname, int force_create, struct strbuf *err); typedef int delete_reflog_fn(struct ref_store *ref_store, const char *refname); typedef int reflog_expire_fn(struct ref_store *ref_store, - const char *refname, const struct object_id *oid, + const char *refname, unsigned int flags, reflog_expiry_prepare_fn prepare_fn, reflog_expiry_should_prune_fn should_prune_fn, @@ -620,11 +620,15 @@ typedef int reflog_expire_fn(struct ref_store *ref_store, * properly-formatted or even safe reference name. NEITHER INPUT NOR * OUTPUT REFERENCE NAMES ARE VALIDATED WITHIN THIS FUNCTION. * - * Return 0 on success. If the ref doesn't exist, set errno to ENOENT - * and return -1. If the ref exists but is neither a symbolic ref nor - * an object ID, it is broken; set REF_ISBROKEN in type, set errno to - * EINVAL, and return -1. If there is another error reading the ref, - * set errno appropriately and return -1. + * Return 0 on success, or -1 on failure. If the ref exists but is neither a + * symbolic ref nor an object ID, it is broken. In this case set REF_ISBROKEN in + * type, and return -1 (failure_errno should not be ENOENT) + * + * failure_errno provides errno codes that are interpreted beyond error + * reporting. The following error codes have special meaning: + * * ENOENT: the ref doesn't exist + * * EISDIR: ref name is a directory + * * ENOTDIR: ref prefix is not a directory * * Backend-specific flags might be set in type as well, regardless of * outcome. @@ -638,9 +642,9 @@ typedef int reflog_expire_fn(struct ref_store *ref_store, * - in all other cases, referent will be untouched, and therefore * refname will still be valid and unchanged. */ -typedef int read_raw_ref_fn(struct ref_store *ref_store, - const char *refname, struct object_id *oid, - struct strbuf *referent, unsigned int *type); +typedef int read_raw_ref_fn(struct ref_store *ref_store, const char *refname, + struct object_id *oid, struct strbuf *referent, + unsigned int *type, int *failure_errno); struct ref_storage_be { struct ref_storage_be *next; diff --git a/remote-curl.c b/remote-curl.c index 598cff7cde..3f5688e426 100644 --- a/remote-curl.c +++ b/remote-curl.c @@ -1478,8 +1478,8 @@ int cmd_main(int argc, const char **argv) options.verbosity = 1; options.progress = !!isatty(2); options.thin = 1; - string_list_init(&options.deepen_not, 1); - string_list_init(&options.push_options, 1); + string_list_init_dup(&options.deepen_not); + string_list_init_dup(&options.push_options); /* * Just report "remote-curl" here (folding all the various aliases @@ -135,7 +135,7 @@ static inline void init_remotes_hash(void) static struct remote *make_remote(const char *name, int len) { - struct remote *ret, *replaced; + struct remote *ret; struct remotes_hash_key lookup; struct hashmap_entry lookup_entry, *e; @@ -162,8 +162,8 @@ static struct remote *make_remote(const char *name, int len) remotes[remotes_nr++] = ret; hashmap_entry_init(&ret->ent, lookup_entry.hash); - replaced = hashmap_put_entry(&remotes_hash, ret, ent); - assert(replaced == NULL); /* no previous entry overwritten */ + if (hashmap_put_entry(&remotes_hash, ret, ent)) + BUG("hashmap_put overwrote entry after hashmap_get returned NULL"); return ret; } @@ -1111,7 +1111,7 @@ static void show_push_unqualified_ref_name_error(const char *dst_value, "Neither worked, so we gave up. You must fully qualify the ref."), dst_value, matched_src_name); - if (!advice_push_unqualified_ref_name) + if (!advice_enabled(ADVICE_PUSH_UNQUALIFIED_REF_NAME)) return; if (get_oid(matched_src_name, &oid)) @@ -2118,7 +2118,7 @@ int format_tracking_info(struct branch *branch, struct strbuf *sb, strbuf_addf(sb, _("Your branch is based on '%s', but the upstream is gone.\n"), base); - if (advice_status_hints) + if (advice_enabled(ADVICE_STATUS_HINTS)) strbuf_addstr(sb, _(" (use \"git branch --unset-upstream\" to fixup)\n")); } else if (!sti) { @@ -2129,7 +2129,7 @@ int format_tracking_info(struct branch *branch, struct strbuf *sb, strbuf_addf(sb, _("Your branch and '%s' refer to different commits.\n"), base); - if (advice_status_hints) + if (advice_enabled(ADVICE_STATUS_HINTS)) strbuf_addf(sb, _(" (use \"%s\" for details)\n"), "git status --ahead-behind"); } else if (!theirs) { @@ -2138,7 +2138,7 @@ int format_tracking_info(struct branch *branch, struct strbuf *sb, "Your branch is ahead of '%s' by %d commits.\n", ours), base, ours); - if (advice_status_hints) + if (advice_enabled(ADVICE_STATUS_HINTS)) strbuf_addstr(sb, _(" (use \"git push\" to publish your local commits)\n")); } else if (!ours) { @@ -2149,7 +2149,7 @@ int format_tracking_info(struct branch *branch, struct strbuf *sb, "and can be fast-forwarded.\n", theirs), base, theirs); - if (advice_status_hints) + if (advice_enabled(ADVICE_STATUS_HINTS)) strbuf_addstr(sb, _(" (use \"git pull\" to update your local branch)\n")); } else { @@ -2162,7 +2162,7 @@ int format_tracking_info(struct branch *branch, struct strbuf *sb, "respectively.\n", ours + theirs), base, ours, theirs); - if (advice_status_hints) + if (advice_enabled(ADVICE_STATUS_HINTS)) strbuf_addstr(sb, _(" (use \"git pull\" to merge the remote branch into yours)\n")); } diff --git a/repo-settings.c b/repo-settings.c index 0cfe8b787d..b93e91a212 100644 --- a/repo-settings.c +++ b/repo-settings.c @@ -3,40 +3,77 @@ #include "repository.h" #include "midx.h" -#define UPDATE_DEFAULT_BOOL(s,v) do { if (s == -1) { s = v; } } while(0) +static void repo_cfg_bool(struct repository *r, const char *key, int *dest, + int def) +{ + if (repo_config_get_bool(r, key, dest)) + *dest = def; +} void prepare_repo_settings(struct repository *r) { + int experimental; int value; char *strval; + int manyfiles; - if (r->settings.initialized) + if (r->settings.initialized++) return; /* Defaults */ - memset(&r->settings, -1, sizeof(r->settings)); + r->settings.index_version = -1; + r->settings.core_untracked_cache = UNTRACKED_CACHE_KEEP; + r->settings.fetch_negotiation_algorithm = FETCH_NEGOTIATION_DEFAULT; + + /* Booleans config or default, cascades to other settings */ + repo_cfg_bool(r, "feature.manyfiles", &manyfiles, 0); + repo_cfg_bool(r, "feature.experimental", &experimental, 0); + + /* Defaults modified by feature.* */ + if (experimental) { + r->settings.fetch_negotiation_algorithm = FETCH_NEGOTIATION_SKIPPING; + } + if (manyfiles) { + r->settings.index_version = 4; + r->settings.core_untracked_cache = UNTRACKED_CACHE_WRITE; + } - if (!repo_config_get_bool(r, "core.commitgraph", &value)) - r->settings.core_commit_graph = value; - if (!repo_config_get_bool(r, "commitgraph.readchangedpaths", &value)) - r->settings.commit_graph_read_changed_paths = value; - if (!repo_config_get_bool(r, "gc.writecommitgraph", &value)) - r->settings.gc_write_commit_graph = value; - UPDATE_DEFAULT_BOOL(r->settings.core_commit_graph, 1); - UPDATE_DEFAULT_BOOL(r->settings.commit_graph_read_changed_paths, 1); - UPDATE_DEFAULT_BOOL(r->settings.gc_write_commit_graph, 1); + /* Boolean config or default, does not cascade (simple) */ + repo_cfg_bool(r, "core.commitgraph", &r->settings.core_commit_graph, 1); + repo_cfg_bool(r, "commitgraph.readchangedpaths", &r->settings.commit_graph_read_changed_paths, 1); + repo_cfg_bool(r, "gc.writecommitgraph", &r->settings.gc_write_commit_graph, 1); + repo_cfg_bool(r, "fetch.writecommitgraph", &r->settings.fetch_write_commit_graph, 0); + repo_cfg_bool(r, "pack.usesparse", &r->settings.pack_use_sparse, 1); + repo_cfg_bool(r, "core.multipackindex", &r->settings.core_multi_pack_index, 1); + repo_cfg_bool(r, "index.sparse", &r->settings.sparse_index, 0); + /* + * The GIT_TEST_MULTI_PACK_INDEX variable is special in that + * either it *or* the config sets + * r->settings.core_multi_pack_index if true. We don't take + * the environment variable if it exists (even if false) over + * any config, as in most other cases. + */ + if (git_env_bool(GIT_TEST_MULTI_PACK_INDEX, 0)) + r->settings.core_multi_pack_index = 1; + + /* + * Non-boolean config + */ if (!repo_config_get_int(r, "index.version", &value)) r->settings.index_version = value; - if (!repo_config_get_maybe_bool(r, "core.untrackedcache", &value)) { - if (value == 0) - r->settings.core_untracked_cache = UNTRACKED_CACHE_REMOVE; - else - r->settings.core_untracked_cache = UNTRACKED_CACHE_WRITE; - } else if (!repo_config_get_string(r, "core.untrackedcache", &strval)) { - if (!strcasecmp(strval, "keep")) - r->settings.core_untracked_cache = UNTRACKED_CACHE_KEEP; + if (!repo_config_get_string(r, "core.untrackedcache", &strval)) { + int v = git_parse_maybe_bool(strval); + + /* + * If it's set to "keep", or some other non-boolean + * value then "v < 0". Then we do nothing and keep it + * at the default of UNTRACKED_CACHE_KEEP. + */ + if (v >= 0) + r->settings.core_untracked_cache = v ? + UNTRACKED_CACHE_WRITE : UNTRACKED_CACHE_REMOVE; free(strval); } @@ -45,39 +82,8 @@ void prepare_repo_settings(struct repository *r) r->settings.fetch_negotiation_algorithm = FETCH_NEGOTIATION_SKIPPING; else if (!strcasecmp(strval, "noop")) r->settings.fetch_negotiation_algorithm = FETCH_NEGOTIATION_NOOP; - else - r->settings.fetch_negotiation_algorithm = FETCH_NEGOTIATION_DEFAULT; } - if (!repo_config_get_bool(r, "pack.usesparse", &value)) - r->settings.pack_use_sparse = value; - UPDATE_DEFAULT_BOOL(r->settings.pack_use_sparse, 1); - - value = git_env_bool(GIT_TEST_MULTI_PACK_INDEX, 0); - if (value || !repo_config_get_bool(r, "core.multipackindex", &value)) - r->settings.core_multi_pack_index = value; - UPDATE_DEFAULT_BOOL(r->settings.core_multi_pack_index, 1); - - if (!repo_config_get_bool(r, "feature.manyfiles", &value) && value) { - UPDATE_DEFAULT_BOOL(r->settings.index_version, 4); - UPDATE_DEFAULT_BOOL(r->settings.core_untracked_cache, UNTRACKED_CACHE_WRITE); - } - - if (!repo_config_get_bool(r, "fetch.writecommitgraph", &value)) - r->settings.fetch_write_commit_graph = value; - UPDATE_DEFAULT_BOOL(r->settings.fetch_write_commit_graph, 0); - - if (!repo_config_get_bool(r, "feature.experimental", &value) && value) - UPDATE_DEFAULT_BOOL(r->settings.fetch_negotiation_algorithm, FETCH_NEGOTIATION_SKIPPING); - - /* Hack for test programs like test-dump-untracked-cache */ - if (ignore_untracked_cache_config) - r->settings.core_untracked_cache = UNTRACKED_CACHE_KEEP; - else - UPDATE_DEFAULT_BOOL(r->settings.core_untracked_cache, UNTRACKED_CACHE_KEEP); - - UPDATE_DEFAULT_BOOL(r->settings.fetch_negotiation_algorithm, FETCH_NEGOTIATION_DEFAULT); - /* * This setting guards all index reads to require a full index * over a sparse index. After suitable guards are placed in the @@ -85,11 +91,4 @@ void prepare_repo_settings(struct repository *r) * removed. */ r->settings.command_requires_full_index = 1; - - /* - * Initialize this as off. - */ - r->settings.sparse_index = 0; - if (!repo_config_get_bool(r, "index.sparse", &value) && value) - r->settings.sparse_index = 1; } diff --git a/repository.c b/repository.c index b2bf44c6fa..c5b90ba93e 100644 --- a/repository.c +++ b/repository.c @@ -190,19 +190,15 @@ error: int repo_submodule_init(struct repository *subrepo, struct repository *superproject, - const struct submodule *sub) + const char *path, + const struct object_id *treeish_name) { struct strbuf gitdir = STRBUF_INIT; struct strbuf worktree = STRBUF_INIT; int ret = 0; - if (!sub) { - ret = -1; - goto out; - } - - strbuf_repo_worktree_path(&gitdir, superproject, "%s/.git", sub->path); - strbuf_repo_worktree_path(&worktree, superproject, "%s", sub->path); + strbuf_repo_worktree_path(&gitdir, superproject, "%s/.git", path); + strbuf_repo_worktree_path(&worktree, superproject, "%s", path); if (repo_init(subrepo, gitdir.buf, worktree.buf)) { /* @@ -212,9 +208,15 @@ int repo_submodule_init(struct repository *subrepo, * in the superproject's 'modules' directory. In this case the * submodule would not have a worktree. */ + const struct submodule *sub = + submodule_from_path(superproject, treeish_name, path); + if (!sub) { + ret = -1; + goto out; + } + strbuf_reset(&gitdir); - strbuf_repo_git_path(&gitdir, superproject, - "modules/%s", sub->name); + submodule_name_to_gitdir(&gitdir, superproject, sub->name); if (repo_init(subrepo, gitdir.buf, NULL)) { ret = -1; @@ -225,7 +227,7 @@ int repo_submodule_init(struct repository *subrepo, subrepo->submodule_prefix = xstrfmt("%s%s/", superproject->submodule_prefix ? superproject->submodule_prefix : - "", sub->path); + "", path); out: strbuf_release(&gitdir); diff --git a/repository.h b/repository.h index 3740c93bc0..a057653981 100644 --- a/repository.h +++ b/repository.h @@ -13,18 +13,15 @@ struct submodule_cache; struct promisor_remote_config; enum untracked_cache_setting { - UNTRACKED_CACHE_UNSET = -1, - UNTRACKED_CACHE_REMOVE = 0, - UNTRACKED_CACHE_KEEP = 1, - UNTRACKED_CACHE_WRITE = 2 + UNTRACKED_CACHE_KEEP, + UNTRACKED_CACHE_REMOVE, + UNTRACKED_CACHE_WRITE, }; enum fetch_negotiation_setting { - FETCH_NEGOTIATION_UNSET = -1, - FETCH_NEGOTIATION_NONE = 0, - FETCH_NEGOTIATION_DEFAULT = 1, - FETCH_NEGOTIATION_SKIPPING = 2, - FETCH_NEGOTIATION_NOOP = 3, + FETCH_NEGOTIATION_DEFAULT, + FETCH_NEGOTIATION_SKIPPING, + FETCH_NEGOTIATION_NOOP, }; struct repo_settings { @@ -34,6 +31,8 @@ struct repo_settings { int commit_graph_read_changed_paths; int gc_write_commit_graph; int fetch_write_commit_graph; + int command_requires_full_index; + int sparse_index; int index_version; enum untracked_cache_setting core_untracked_cache; @@ -42,9 +41,6 @@ struct repo_settings { enum fetch_negotiation_setting fetch_negotiation_algorithm; int core_multi_pack_index; - - unsigned command_requires_full_index:1, - sparse_index:1; }; struct repository { @@ -172,15 +168,18 @@ void initialize_the_repository(void); int repo_init(struct repository *r, const char *gitdir, const char *worktree); /* - * Initialize the repository 'subrepo' as the submodule given by the - * struct submodule 'sub' in parent repository 'superproject'. - * Return 0 upon success and a non-zero value upon failure, which may happen - * if the submodule is not found, or 'sub' is NULL. + * Initialize the repository 'subrepo' as the submodule at the given path. If + * the submodule's gitdir cannot be found at <path>/.git, this function calls + * submodule_from_path() to try to find it. treeish_name is only used if + * submodule_from_path() needs to be called; see its documentation for more + * information. + * Return 0 upon success and a non-zero value upon failure. */ -struct submodule; +struct object_id; int repo_submodule_init(struct repository *subrepo, struct repository *superproject, - const struct submodule *sub); + const char *path, + const struct object_id *treeish_name); void repo_clear(struct repository *repo); /* diff --git a/revision.c b/revision.c index 0dabb5a0bc..7e7c41fedd 100644 --- a/revision.c +++ b/revision.c @@ -2563,8 +2563,7 @@ static int for_each_good_bisect_ref(struct ref_store *refs, each_ref_fn fn, void return for_each_bisect_ref(refs, fn, cb_data, term_good); } -static int handle_revision_pseudo_opt(const char *submodule, - struct rev_info *revs, +static int handle_revision_pseudo_opt(struct rev_info *revs, const char **argv, int *flags) { const char *arg = argv[0]; @@ -2572,7 +2571,7 @@ static int handle_revision_pseudo_opt(const char *submodule, struct ref_store *refs; int argcount; - if (submodule) { + if (revs->repo != the_repository) { /* * We need some something like get_submodule_worktrees() * before we can go through all worktrees of a submodule, @@ -2581,9 +2580,8 @@ static int handle_revision_pseudo_opt(const char *submodule, */ if (!revs->single_worktree) BUG("--single-worktree cannot be used together with submodule"); - refs = get_submodule_ref_store(submodule); - } else - refs = get_main_ref_store(revs->repo); + } + refs = get_main_ref_store(revs->repo); /* * NOTE! @@ -2707,12 +2705,8 @@ int setup_revisions(int argc, const char **argv, struct rev_info *revs, struct s { int i, flags, left, seen_dashdash, revarg_opt; struct strvec prune_data = STRVEC_INIT; - const char *submodule = NULL; int seen_end_of_options = 0; - if (opt) - submodule = opt->submodule; - /* First, search for "--" */ if (opt && opt->assume_dashdash) { seen_dashdash = 1; @@ -2741,7 +2735,7 @@ int setup_revisions(int argc, const char **argv, struct rev_info *revs, struct s if (!seen_end_of_options && *arg == '-') { int opts; - opts = handle_revision_pseudo_opt(submodule, + opts = handle_revision_pseudo_opt( revs, argv + i, &flags); if (opts > 0) { diff --git a/revision.h b/revision.h index 0c65a760ee..5578bb4720 100644 --- a/revision.h +++ b/revision.h @@ -336,7 +336,6 @@ extern volatile show_early_output_fn_t show_early_output; struct setup_revision_opt { const char *def; void (*tweak)(struct rev_info *, struct setup_revision_opt *); - const char *submodule; /* TODO: drop this and use rev_info->repo */ unsigned int assume_dashdash:1, allow_exclude_promisor_objects:1; unsigned revarg_opt; diff --git a/run-command.c b/run-command.c index f72e72cce7..04a07e6366 100644 --- a/run-command.c +++ b/run-command.c @@ -8,6 +8,7 @@ #include "string-list.h" #include "quote.h" #include "config.h" +#include "packfile.h" void child_process_init(struct child_process *child) { @@ -210,9 +211,9 @@ static char *locate_in_PATH(const char *file) return NULL; } -static int exists_in_PATH(const char *file) +int exists_in_PATH(const char *command) { - char *r = locate_in_PATH(file); + char *r = locate_in_PATH(command); int found = r != NULL; free(r); return found; @@ -740,6 +741,9 @@ fail_pipe: fflush(NULL); + if (cmd->close_object_store) + close_object_store(the_repository->objects); + #ifndef GIT_WINDOWS_NATIVE { int notify_pipe[2]; @@ -761,9 +765,7 @@ fail_pipe: notify_pipe[0] = notify_pipe[1] = -1; if (cmd->no_stdin || cmd->no_stdout || cmd->no_stderr) { - null_fd = open("/dev/null", O_RDWR | O_CLOEXEC); - if (null_fd < 0) - die_errno(_("open /dev/null failed")); + null_fd = xopen("/dev/null", O_RDWR | O_CLOEXEC); set_cloexec(null_fd); } @@ -1044,6 +1046,7 @@ int run_command_v_opt_cd_env_tr2(const char **argv, int opt, const char *dir, cmd.use_shell = opt & RUN_USING_SHELL ? 1 : 0; cmd.clean_on_exit = opt & RUN_CLEAN_ON_EXIT ? 1 : 0; cmd.wait_after_clean = opt & RUN_WAIT_AFTER_CLEAN ? 1 : 0; + cmd.close_object_store = opt & RUN_CLOSE_OBJECT_STORE ? 1 : 0; cmd.dir = dir; cmd.env = env; cmd.trace2_child_class = tr2_class; @@ -1336,7 +1339,7 @@ const char *find_hook(const char *name) err = errno; #endif - if (err == EACCES && advice_ignored_hook) { + if (err == EACCES && advice_enabled(ADVICE_IGNORED_HOOK)) { static struct string_list advise_given = STRING_LIST_INIT_DUP; if (!string_list_lookup(&advise_given, name)) { @@ -1886,6 +1889,7 @@ int run_auto_maintenance(int quiet) return 0; maint.git_cmd = 1; + maint.close_object_store = 1; strvec_pushl(&maint.args, "maintenance", "run", "--auto", NULL); strvec_push(&maint.args, quiet ? "--quiet" : "--no-quiet"); diff --git a/run-command.h b/run-command.h index af1296769f..b9aff74914 100644 --- a/run-command.h +++ b/run-command.h @@ -134,6 +134,14 @@ struct child_process { */ unsigned use_shell:1; + /** + * Release any open file handles to the object store before running + * the command; This is necessary e.g. when the spawned process may + * want to repack because that would delete `.pack` files (and on + * Windows, you cannot delete files that are still in use). + */ + unsigned close_object_store:1; + unsigned stdout_to_stderr:1; unsigned clean_on_exit:1; unsigned wait_after_clean:1; @@ -183,6 +191,18 @@ void child_process_clear(struct child_process *); int is_executable(const char *name); /** + * Check if the command exists on $PATH. This emulates the path search that + * execvp would perform, without actually executing the command so it + * can be used before fork() to prepare to run a command using + * execve() or after execvp() to diagnose why it failed. + * + * The caller should ensure that command contains no directory separators. + * + * Returns 1 if it is found in $PATH or 0 if the command could not be found. + */ +int exists_in_PATH(const char *command); + +/** * Start a sub-process. Takes a pointer to a `struct child_process` * that specifies the details and returns pipe FDs (if requested). * See below for details. @@ -233,13 +253,14 @@ int run_hook_ve(const char *const *env, const char *name, va_list args); */ int run_auto_maintenance(int quiet); -#define RUN_COMMAND_NO_STDIN 1 -#define RUN_GIT_CMD 2 /*If this is to be git sub-command */ -#define RUN_COMMAND_STDOUT_TO_STDERR 4 -#define RUN_SILENT_EXEC_FAILURE 8 -#define RUN_USING_SHELL 16 -#define RUN_CLEAN_ON_EXIT 32 -#define RUN_WAIT_AFTER_CLEAN 64 +#define RUN_COMMAND_NO_STDIN (1<<0) +#define RUN_GIT_CMD (1<<1) +#define RUN_COMMAND_STDOUT_TO_STDERR (1<<2) +#define RUN_SILENT_EXEC_FAILURE (1<<3) +#define RUN_USING_SHELL (1<<4) +#define RUN_CLEAN_ON_EXIT (1<<5) +#define RUN_WAIT_AFTER_CLEAN (1<<6) +#define RUN_CLOSE_OBJECT_STORE (1<<7) /** * Convenience functions that encapsulate a sequence of diff --git a/sequencer.c b/sequencer.c index a1100019b0..64b1f2e681 100644 --- a/sequencer.c +++ b/sequencer.c @@ -403,7 +403,7 @@ static void print_advice(struct repository *r, int show_hint, char *msg = getenv("GIT_CHERRY_PICK_HELP"); if (msg) { - fprintf(stderr, "%s\n", msg); + advise("%s\n", msg); /* * A conflict has occurred but the porcelain * (typically rebase --interactive) wants to take care @@ -418,10 +418,22 @@ static void print_advice(struct repository *r, int show_hint, if (opts->no_commit) advise(_("after resolving the conflicts, mark the corrected paths\n" "with 'git add <paths>' or 'git rm <paths>'")); + else if (opts->action == REPLAY_PICK) + advise(_("After resolving the conflicts, mark them with\n" + "\"git add/rm <pathspec>\", then run\n" + "\"git cherry-pick --continue\".\n" + "You can instead skip this commit with \"git cherry-pick --skip\".\n" + "To abort and get back to the state before \"git cherry-pick\",\n" + "run \"git cherry-pick --abort\".")); + else if (opts->action == REPLAY_REVERT) + advise(_("After resolving the conflicts, mark them with\n" + "\"git add/rm <pathspec>\", then run\n" + "\"git revert --continue\".\n" + "You can instead skip this commit with \"git revert --skip\".\n" + "To abort and get back to the state before \"git revert\",\n" + "run \"git revert --abort\".")); else - advise(_("after resolving the conflicts, mark the corrected paths\n" - "with 'git add <paths>' or 'git rm <paths>'\n" - "and commit the result with 'git commit'")); + BUG("unexpected pick action in print_advice()"); } } @@ -486,7 +498,7 @@ static int error_dirty_index(struct repository *repo, struct replay_opts *opts) error(_("your local changes would be overwritten by %s."), _(action_name(opts))); - if (advice_commit_before_merge) + if (advice_enabled(ADVICE_COMMIT_BEFORE_MERGE)) advise(_("commit your changes or stash them to proceed.")); return -1; } @@ -652,6 +664,7 @@ static int do_recursive_merge(struct repository *r, merge_switch_to_result(&o, head_tree, &result, 1, show_output); clean = result.clean; } else { + ensure_full_index(r->index); clean = merge_trees(&o, head_tree, next_tree, base_tree); if (is_rebase_i(opts) && clean <= 0) fputs(o.obuf.buf, stdout); @@ -1294,7 +1307,7 @@ void print_commit_summary(struct repository *r, if (!committer_ident_sufficiently_given()) { strbuf_addstr(&format, "\n Committer: "); strbuf_addbuf_percentquote(&format, &committer_ident); - if (advice_implicit_identity) { + if (advice_enabled(ADVICE_IMPLICIT_IDENTITY)) { strbuf_addch(&format, '\n'); strbuf_addstr(&format, implicit_ident_advice()); } @@ -2347,6 +2360,7 @@ static int read_and_refresh_cache(struct repository *r, _(action_name(opts))); } refresh_index(r->index, REFRESH_QUIET|REFRESH_UNMERGED, NULL, NULL, NULL); + if (index_fd >= 0) { if (write_locked_index(r->index, &index_lock, COMMIT_LOCK | SKIP_IF_UNCHANGED)) { @@ -2354,6 +2368,13 @@ static int read_and_refresh_cache(struct repository *r, _(action_name(opts))); } } + + /* + * If we are resolving merges in any way other than "ort", then + * expand the sparse index. + */ + if (opts->strategy && strcmp(opts->strategy, "ort")) + ensure_full_index(r->index); return 0; } @@ -2672,7 +2693,6 @@ static int read_populate_todo(struct repository *r, struct todo_list *todo_list, struct replay_opts *opts) { - struct stat st; const char *todo_file = get_todo_path(opts); int res; @@ -2680,11 +2700,6 @@ static int read_populate_todo(struct repository *r, if (strbuf_read_file_or_whine(&todo_list->buf, todo_file) < 0) return -1; - res = stat(todo_file, &st); - if (res) - return error(_("could not stat '%s'"), todo_file); - fill_stat_data(&todo_list->stat, &st); - res = todo_list_parse_insn_buffer(r, todo_list->buf.buf, todo_list); if (res) { if (is_rebase_i(opts)) @@ -3042,7 +3057,7 @@ static int create_seq_dir(struct repository *r) } if (in_progress_error) { error("%s", in_progress_error); - if (advice_sequencer_in_use) + if (advice_enabled(ADVICE_SEQUENCER_IN_USE)) advise(in_progress_advice, advise_skip ? "--skip | " : ""); return -1; @@ -3246,7 +3261,7 @@ int sequencer_skip(struct repository *r, struct replay_opts *opts) give_advice: error(_("there is nothing to skip")); - if (advice_resolve_conflict) { + if (advice_enabled(ADVICE_RESOLVE_CONFLICT)) { advise(_("have you committed already?\n" "try \"git %s --continue\""), action == REPLAY_REVERT ? "revert" : "cherry-pick"); @@ -4263,6 +4278,30 @@ static int stopped_at_head(struct repository *r) } +static int reread_todo_if_changed(struct repository *r, + struct todo_list *todo_list, + struct replay_opts *opts) +{ + int offset; + struct strbuf buf = STRBUF_INIT; + + if (strbuf_read_file_or_whine(&buf, get_todo_path(opts)) < 0) + return -1; + offset = get_item_line_offset(todo_list, todo_list->current + 1); + if (buf.len != todo_list->buf.len - offset || + memcmp(buf.buf, todo_list->buf.buf + offset, buf.len)) { + /* Reread the todo file if it has changed. */ + todo_list_release(todo_list); + if (read_populate_todo(r, todo_list, opts)) + return -1; /* message was printed */ + /* `current` will be incremented on return */ + todo_list->current = -1; + } + strbuf_release(&buf); + + return 0; +} + static const char rescheduled_advice[] = N_("Could not execute the todo command\n" "\n" @@ -4441,20 +4480,9 @@ static int pick_commits(struct repository *r, item->commit, arg, item->arg_len, opts, res, 0); - } else if (is_rebase_i(opts) && check_todo && !res) { - struct stat st; - - if (stat(get_todo_path(opts), &st)) { - res = error_errno(_("could not stat '%s'"), - get_todo_path(opts)); - } else if (match_stat_data(&todo_list->stat, &st)) { - /* Reread the todo file if it has changed. */ - todo_list_release(todo_list); - if (read_populate_todo(r, todo_list, opts)) - res = -1; /* message was printed */ - /* `current` will be incremented below */ - todo_list->current = -1; - } + } else if (is_rebase_i(opts) && check_todo && !res && + reread_todo_if_changed(r, todo_list, opts)) { + return -1; } todo_list->current++; @@ -5110,6 +5138,7 @@ static int make_script_with_merges(struct pretty_print_context *pp, int keep_empty = flags & TODO_LIST_KEEP_EMPTY; int rebase_cousins = flags & TODO_LIST_REBASE_COUSINS; int root_with_onto = flags & TODO_LIST_ROOT_WITH_ONTO; + int skipped_commit = 0; struct strbuf buf = STRBUF_INIT, oneline = STRBUF_INIT; struct strbuf label = STRBUF_INIT; struct commit_list *commits = NULL, **tail = &commits, *iter; @@ -5160,8 +5189,13 @@ static int make_script_with_merges(struct pretty_print_context *pp, oidset_insert(&interesting, &commit->object.oid); is_empty = is_original_commit_empty(commit); - if (!is_empty && (commit->object.flags & PATCHSAME)) + if (!is_empty && (commit->object.flags & PATCHSAME)) { + if (flags & TODO_LIST_WARN_SKIPPED_CHERRY_PICKS) + warning(_("skipped previously applied commit %s"), + short_commit_name(commit)); + skipped_commit = 1; continue; + } if (is_empty && !keep_empty) continue; @@ -5225,6 +5259,9 @@ static int make_script_with_merges(struct pretty_print_context *pp, oidcpy(&entry->entry.oid, &commit->object.oid); oidmap_put(&commit2todo, entry); } + if (skipped_commit) + advise_if_enabled(ADVICE_SKIPPED_CHERRY_PICKS, + _("use --reapply-cherry-picks to include skipped commits")); /* * Second phase: @@ -5345,6 +5382,7 @@ int sequencer_make_script(struct repository *r, struct strbuf *out, int argc, const char *insn = flags & TODO_LIST_ABBREVIATE_CMDS ? "p" : "pick"; int rebase_merges = flags & TODO_LIST_REBASE_MERGES; int reapply_cherry_picks = flags & TODO_LIST_REAPPLY_CHERRY_PICKS; + int skipped_commit = 0; repo_init_revisions(r, &revs, NULL); revs.verbose_header = 1; @@ -5380,8 +5418,13 @@ int sequencer_make_script(struct repository *r, struct strbuf *out, int argc, while ((commit = get_revision(&revs))) { int is_empty = is_original_commit_empty(commit); - if (!is_empty && (commit->object.flags & PATCHSAME)) + if (!is_empty && (commit->object.flags & PATCHSAME)) { + if (flags & TODO_LIST_WARN_SKIPPED_CHERRY_PICKS) + warning(_("skipped previously applied commit %s"), + short_commit_name(commit)); + skipped_commit = 1; continue; + } if (is_empty && !keep_empty) continue; strbuf_addf(out, "%s %s ", insn, @@ -5391,6 +5434,9 @@ int sequencer_make_script(struct repository *r, struct strbuf *out, int argc, strbuf_addf(out, " %c empty", comment_line_char); strbuf_addch(out, '\n'); } + if (skipped_commit) + advise_if_enabled(ADVICE_SKIPPED_CHERRY_PICKS, + _("use --reapply-cherry-picks to include skipped commits")); return 0; } diff --git a/sequencer.h b/sequencer.h index d57d8ea23d..b4d0305606 100644 --- a/sequencer.h +++ b/sequencer.h @@ -116,7 +116,6 @@ struct todo_list { struct todo_item *items; int nr, alloc, current; int done_nr, total_nr; - struct stat_data stat; }; #define TODO_LIST_INIT { STRBUF_INIT } @@ -156,6 +155,7 @@ int sequencer_remove_state(struct replay_opts *opts); */ #define TODO_LIST_ROOT_WITH_ONTO (1U << 6) #define TODO_LIST_REAPPLY_CHERRY_PICKS (1U << 7) +#define TODO_LIST_WARN_SKIPPED_CHERRY_PICKS (1U << 8) int sequencer_make_script(struct repository *r, struct strbuf *out, int argc, const char **argv, unsigned flags); @@ -9,7 +9,8 @@ #include "serve.h" #include "upload-pack.h" -static int advertise_sid; +static int advertise_sid = -1; +static int client_hash_algo = GIT_HASH_SHA1; static int always_advertise(struct repository *r, struct strbuf *value) @@ -33,8 +34,22 @@ static int object_format_advertise(struct repository *r, return 1; } +static void object_format_receive(struct repository *r, + const char *algo_name) +{ + if (!algo_name) + die("object-format capability requires an argument"); + + client_hash_algo = hash_algo_by_name(algo_name); + if (client_hash_algo == GIT_HASH_UNKNOWN) + die("unknown object format '%s'", algo_name); +} + static int session_id_advertise(struct repository *r, struct strbuf *value) { + if (advertise_sid == -1 && + git_config_get_bool("transfer.advertisesid", &advertise_sid)) + advertise_sid = 0; if (!advertise_sid) return 0; if (value) @@ -42,6 +57,14 @@ static int session_id_advertise(struct repository *r, struct strbuf *value) return 1; } +static void session_id_receive(struct repository *r, + const char *client_sid) +{ + if (!client_sid) + client_sid = ""; + trace2_data_string("transfer", NULL, "client-sid", client_sid); +} + struct protocol_capability { /* * The name of the capability. The server uses this name when @@ -60,34 +83,70 @@ struct protocol_capability { /* * Function called when a client requests the capability as a command. - * The function will be provided the capabilities requested via 'keys' - * as well as a struct packet_reader 'request' which the command should + * Will be provided a struct packet_reader 'request' which it should * use to read the command specific part of the request. Every command * MUST read until a flush packet is seen before sending a response. * * This field should be NULL for capabilities which are not commands. */ - int (*command)(struct repository *r, - struct strvec *keys, - struct packet_reader *request); + int (*command)(struct repository *r, struct packet_reader *request); + + /* + * Function called when a client requests the capability as a + * non-command. This may be NULL if the capability does nothing. + * + * For a capability of the form "foo=bar", the value string points to + * the content after the "=" (i.e., "bar"). For simple capabilities + * (just "foo"), it is NULL. + */ + void (*receive)(struct repository *r, const char *value); }; static struct protocol_capability capabilities[] = { - { "agent", agent_advertise, NULL }, - { "ls-refs", ls_refs_advertise, ls_refs }, - { "fetch", upload_pack_advertise, upload_pack_v2 }, - { "server-option", always_advertise, NULL }, - { "object-format", object_format_advertise, NULL }, - { "session-id", session_id_advertise, NULL }, - { "object-info", always_advertise, cap_object_info }, + { + .name = "agent", + .advertise = agent_advertise, + }, + { + .name = "ls-refs", + .advertise = ls_refs_advertise, + .command = ls_refs, + }, + { + .name = "fetch", + .advertise = upload_pack_advertise, + .command = upload_pack_v2, + }, + { + .name = "server-option", + .advertise = always_advertise, + }, + { + .name = "object-format", + .advertise = object_format_advertise, + .receive = object_format_receive, + }, + { + .name = "session-id", + .advertise = session_id_advertise, + .receive = session_id_receive, + }, + { + .name = "object-info", + .advertise = always_advertise, + .command = cap_object_info, + }, }; -static void advertise_capabilities(void) +void protocol_v2_advertise_capabilities(void) { struct strbuf capability = STRBUF_INIT; struct strbuf value = STRBUF_INIT; int i; + /* serve by default supports v2 */ + packet_write_fmt(1, "version 2\n"); + for (i = 0; i < ARRAY_SIZE(capabilities); i++) { struct protocol_capability *c = &capabilities[i]; @@ -112,7 +171,7 @@ static void advertise_capabilities(void) strbuf_release(&value); } -static struct protocol_capability *get_capability(const char *key) +static struct protocol_capability *get_capability(const char *key, const char **value) { int i; @@ -122,31 +181,46 @@ static struct protocol_capability *get_capability(const char *key) for (i = 0; i < ARRAY_SIZE(capabilities); i++) { struct protocol_capability *c = &capabilities[i]; const char *out; - if (skip_prefix(key, c->name, &out) && (!*out || *out == '=')) + if (!skip_prefix(key, c->name, &out)) + continue; + if (!*out) { + *value = NULL; return c; + } + if (*out++ == '=') { + *value = out; + return c; + } } return NULL; } -static int is_valid_capability(const char *key) +static int receive_client_capability(const char *key) { - const struct protocol_capability *c = get_capability(key); + const char *value; + const struct protocol_capability *c = get_capability(key, &value); - return c && c->advertise(the_repository, NULL); + if (!c || c->command || !c->advertise(the_repository, NULL)) + return 0; + + if (c->receive) + c->receive(the_repository, value); + return 1; } -static int is_command(const char *key, struct protocol_capability **command) +static int parse_command(const char *key, struct protocol_capability **command) { const char *out; if (skip_prefix(key, "command=", &out)) { - struct protocol_capability *cmd = get_capability(out); + const char *value; + struct protocol_capability *cmd = get_capability(out, &value); if (*command) die("command '%s' requested after already requesting command '%s'", out, (*command)->name); - if (!cmd || !cmd->advertise(the_repository, NULL) || !cmd->command) + if (!cmd || !cmd->advertise(the_repository, NULL) || !cmd->command || value) die("invalid command '%s'", out); *command = cmd; @@ -156,42 +230,6 @@ static int is_command(const char *key, struct protocol_capability **command) return 0; } -int has_capability(const struct strvec *keys, const char *capability, - const char **value) -{ - int i; - for (i = 0; i < keys->nr; i++) { - const char *out; - if (skip_prefix(keys->v[i], capability, &out) && - (!*out || *out == '=')) { - if (value) { - if (*out == '=') - out++; - *value = out; - } - return 1; - } - } - - return 0; -} - -static void check_algorithm(struct repository *r, struct strvec *keys) -{ - int client = GIT_HASH_SHA1, server = hash_algo_by_ptr(r->hash_algo); - const char *algo_name; - - if (has_capability(keys, "object-format", &algo_name)) { - client = hash_algo_by_name(algo_name); - if (client == GIT_HASH_UNKNOWN) - die("unknown object format '%s'", algo_name); - } - - if (client != server) - die("mismatched object format: server %s; client %s\n", - r->hash_algo->name, hash_algos[client].name); -} - enum request_state { PROCESS_REQUEST_KEYS, PROCESS_REQUEST_DONE, @@ -201,9 +239,8 @@ static int process_request(void) { enum request_state state = PROCESS_REQUEST_KEYS; struct packet_reader reader; - struct strvec keys = STRVEC_INIT; + int seen_capability_or_command = 0; struct protocol_capability *command = NULL; - const char *client_sid; packet_reader_init(&reader, 0, NULL, 0, PACKET_READ_CHOMP_NEWLINE | @@ -223,10 +260,9 @@ static int process_request(void) case PACKET_READ_EOF: BUG("Should have already died when seeing EOF"); case PACKET_READ_NORMAL: - /* collect request; a sequence of keys and values */ - if (is_command(reader.line, &command) || - is_valid_capability(reader.line)) - strvec_push(&keys, reader.line); + if (parse_command(reader.line, &command) || + receive_client_capability(reader.line)) + seen_capability_or_command = 1; else die("unknown capability '%s'", reader.line); @@ -238,7 +274,7 @@ static int process_request(void) * If no command and no keys were given then the client * wanted to terminate the connection. */ - if (!keys.nr) + if (!seen_capability_or_command) return 1; /* @@ -265,40 +301,26 @@ static int process_request(void) if (!command) die("no command requested"); - check_algorithm(the_repository, &keys); - - if (has_capability(&keys, "session-id", &client_sid)) - trace2_data_string("transfer", NULL, "client-sid", client_sid); + if (client_hash_algo != hash_algo_by_ptr(the_repository->hash_algo)) + die("mismatched object format: server %s; client %s\n", + the_repository->hash_algo->name, + hash_algos[client_hash_algo].name); - command->command(the_repository, &keys, &reader); + command->command(the_repository, &reader); - strvec_clear(&keys); return 0; } -/* Main serve loop for protocol version 2 */ -void serve(struct serve_options *options) +void protocol_v2_serve_loop(int stateless_rpc) { - git_config_get_bool("transfer.advertisesid", &advertise_sid); - - if (options->advertise_capabilities || !options->stateless_rpc) { - /* serve by default supports v2 */ - packet_write_fmt(1, "version 2\n"); - - advertise_capabilities(); - /* - * If only the list of capabilities was requested exit - * immediately after advertising capabilities - */ - if (options->advertise_capabilities) - return; - } + if (!stateless_rpc) + protocol_v2_advertise_capabilities(); /* * If stateless-rpc was requested then exit after * a single request/response exchange */ - if (options->stateless_rpc) { + if (stateless_rpc) { process_request(); } else { for (;;) @@ -1,15 +1,7 @@ #ifndef SERVE_H #define SERVE_H -struct strvec; -int has_capability(const struct strvec *keys, const char *capability, - const char **value); - -struct serve_options { - unsigned advertise_capabilities; - unsigned stateless_rpc; -}; -#define SERVE_OPTIONS_INIT { 0 } -void serve(struct serve_options *options); +void protocol_v2_advertise_capabilities(void); +void protocol_v2_serve_loop(int stateless_rpc); #endif /* SERVE_H */ @@ -1423,11 +1423,9 @@ const char *resolve_gitdir_gently(const char *suspect, int *return_error_code) /* if any standard file descriptor is missing open it to /dev/null */ void sanitize_stdfds(void) { - int fd = open("/dev/null", O_RDWR, 0); - while (fd != -1 && fd < 2) - fd = dup(fd); - if (fd == -1) - die_errno(_("open /dev/null or dup failed")); + int fd = xopen("/dev/null", O_RDWR); + while (fd < 2) + fd = xdup(fd); if (fd > 2) close(fd); } diff --git a/sparse-index.c b/sparse-index.c index c6b4feec41..7b7ff79e04 100644 --- a/sparse-index.c +++ b/sparse-index.c @@ -33,19 +33,14 @@ static int convert_to_sparse_rec(struct index_state *istate, { int i, can_convert = 1; int start_converted = num_converted; - enum pattern_match_result match; - int dtype = DT_UNKNOWN; struct strbuf child_path = STRBUF_INIT; - struct pattern_list *pl = istate->sparse_checkout_patterns; /* * Is the current path outside of the sparse cone? * Then check if the region can be replaced by a sparse * directory entry (everything is sparse and merged). */ - match = path_matches_pattern_list(ct_path, ct_pathlen, - NULL, &dtype, pl, istate); - if (match != NOT_MATCHED) + if (path_in_sparse_checkout(ct_path, istate)) can_convert = 0; for (i = start; can_convert && i < end; i++) { @@ -127,41 +122,51 @@ static int index_has_unmerged_entries(struct index_state *istate) return 0; } -int convert_to_sparse(struct index_state *istate) +int convert_to_sparse(struct index_state *istate, int flags) { int test_env; - if (istate->split_index || istate->sparse_index || + if (istate->sparse_index || !istate->cache_nr || !core_apply_sparse_checkout || !core_sparse_checkout_cone) return 0; if (!istate->repo) istate->repo = the_repository; - /* - * The GIT_TEST_SPARSE_INDEX environment variable triggers the - * index.sparse config variable to be on. - */ - test_env = git_env_bool("GIT_TEST_SPARSE_INDEX", -1); - if (test_env >= 0) - set_sparse_index_config(istate->repo, test_env); - - /* - * Only convert to sparse if index.sparse is set. - */ - prepare_repo_settings(istate->repo); - if (!istate->repo->settings.sparse_index) - return 0; + if (!(flags & SPARSE_INDEX_MEMORY_ONLY)) { + /* + * The sparse index is not (yet) integrated with a split index. + */ + if (istate->split_index) + return 0; + /* + * The GIT_TEST_SPARSE_INDEX environment variable triggers the + * index.sparse config variable to be on. + */ + test_env = git_env_bool("GIT_TEST_SPARSE_INDEX", -1); + if (test_env >= 0) + set_sparse_index_config(istate->repo, test_env); - if (!istate->sparse_checkout_patterns) { - istate->sparse_checkout_patterns = xcalloc(1, sizeof(struct pattern_list)); - if (get_sparse_checkout_patterns(istate->sparse_checkout_patterns) < 0) + /* + * Only convert to sparse if index.sparse is set. + */ + prepare_repo_settings(istate->repo); + if (!istate->repo->settings.sparse_index) return 0; } - if (!istate->sparse_checkout_patterns->use_cone_patterns) { - warning(_("attempting to use sparse-index without cone mode")); - return -1; - } + if (init_sparse_checkout_patterns(istate)) + return 0; + + /* + * We need cone-mode patterns to use sparse-index. If a user edits + * their sparse-checkout file manually, then we can detect during + * parsing that they are not actually using cone-mode patterns and + * hence we need to abort this conversion _without error_. Warnings + * already exist in the pattern parsing to inform the user of their + * bad patterns. + */ + if (!istate->sparse_checkout_patterns->use_cone_patterns) + return 0; /* * NEEDSWORK: If we have unmerged entries, then stay full. @@ -172,10 +177,15 @@ int convert_to_sparse(struct index_state *istate) /* Clear and recompute the cache-tree */ cache_tree_free(&istate->cache_tree); - if (cache_tree_update(istate, 0)) { - warning(_("unable to update cache-tree, staying full")); - return -1; - } + /* + * Silently return if there is a problem with the cache tree update, + * which might just be due to a conflict state in some entry. + * + * This might create new tree objects, so be sure to use + * WRITE_TREE_MISSING_OK. + */ + if (cache_tree_update(istate, WRITE_TREE_MISSING_OK)) + return 0; remove_fsmonitor(istate); @@ -283,6 +293,7 @@ void ensure_full_index(struct index_state *istate) /* Copy back into original index. */ memcpy(&istate->name_hash, &full->name_hash, sizeof(full->name_hash)); + memcpy(&istate->dir_hash, &full->dir_hash, sizeof(full->dir_hash)); istate->sparse_index = 0; free(istate->cache); istate->cache = full->cache; diff --git a/sparse-index.h b/sparse-index.h index 1115a0d7dd..9f3d7bc7fa 100644 --- a/sparse-index.h +++ b/sparse-index.h @@ -2,7 +2,8 @@ #define SPARSE_INDEX_H__ struct index_state; -int convert_to_sparse(struct index_state *istate); +#define SPARSE_INDEX_MEMORY_ONLY (1 << 0) +int convert_to_sparse(struct index_state *istate, int flags); /* * Some places in the codebase expect to search for a specific path. diff --git a/string-list.c b/string-list.c index 43576ad126..549fc416d6 100644 --- a/string-list.c +++ b/string-list.c @@ -13,14 +13,6 @@ void string_list_init_dup(struct string_list *list) memcpy(list, &blank, sizeof(*list)); } -void string_list_init(struct string_list *list, int strdup_strings) -{ - if (strdup_strings) - string_list_init_dup(list); - else - string_list_init_nodup(list); -} - /* if there is no exact match, point to the index where the entry could be * inserted */ static int get_entry_index(const struct string_list *list, const char *string, diff --git a/string-list.h b/string-list.h index 0d6b469239..267d6e5769 100644 --- a/string-list.h +++ b/string-list.h @@ -104,11 +104,6 @@ struct string_list { void string_list_init_nodup(struct string_list *list); void string_list_init_dup(struct string_list *list); -/** - * TODO remove: For compatibility with any in-flight older API users - */ -void string_list_init(struct string_list *list, int strdup_strings); - /** Callback function type for for_each_string_list */ typedef int (*string_list_each_func_t)(struct string_list_item *, void *); @@ -29,8 +29,8 @@ extern const char *empty_strvec[]; */ struct strvec { const char **v; - int nr; - int alloc; + size_t nr; + size_t alloc; }; #define STRVEC_INIT { empty_strvec, 0, 0 } diff --git a/submodule-config.c b/submodule-config.c index 2026120fb3..f95344028b 100644 --- a/submodule-config.c +++ b/submodule-config.c @@ -649,9 +649,10 @@ static void config_from_gitmodules(config_fn_t fn, struct repository *repo, void config_source.file = file; } else if (repo_get_oid(repo, GITMODULES_INDEX, &oid) >= 0 || repo_get_oid(repo, GITMODULES_HEAD, &oid) >= 0) { + config_source.repo = repo; config_source.blob = oidstr = xstrdup(oid_to_hex(&oid)); if (repo != the_repository) - add_to_alternates_memory(repo->objects->odb->path); + add_submodule_odb_by_path(repo->objects->odb->path); } else { goto out; } @@ -702,7 +703,7 @@ void gitmodules_config_oid(const struct object_id *commit_oid) if (gitmodule_oid_from_commit(commit_oid, &oid, &rev)) { git_config_from_blob_oid(gitmodules_cb, rev.buf, - &oid, the_repository); + the_repository, &oid, the_repository); } strbuf_release(&rev); diff --git a/submodule.c b/submodule.c index 8e611fe1db..62beb8fd5f 100644 --- a/submodule.c +++ b/submodule.c @@ -165,6 +165,8 @@ void stage_updated_gitmodules(struct index_state *istate) die(_("staging updated .gitmodules failed")); } +static struct string_list added_submodule_odb_paths = STRING_LIST_INIT_NODUP; + /* TODO: remove this function, use repo_submodule_init instead. */ int add_submodule_odb(const char *path) { @@ -178,12 +180,33 @@ int add_submodule_odb(const char *path) ret = -1; goto done; } - add_to_alternates_memory(objects_directory.buf); + string_list_insert(&added_submodule_odb_paths, + strbuf_detach(&objects_directory, NULL)); done: strbuf_release(&objects_directory); return ret; } +void add_submodule_odb_by_path(const char *path) +{ + string_list_insert(&added_submodule_odb_paths, xstrdup(path)); +} + +int register_all_submodule_odb_as_alternates(void) +{ + int i; + int ret = added_submodule_odb_paths.nr; + + for (i = 0; i < added_submodule_odb_paths.nr; i++) + add_to_alternates_memory(added_submodule_odb_paths.items[i].string); + if (ret) { + string_list_clear(&added_submodule_odb_paths, 0); + if (git_env_bool("GIT_TEST_FATAL_REGISTER_SUBMODULE_ODB", 0)) + BUG("register_all_submodule_odb_as_alternates() called"); + } + return ret; +} + void set_diffopt_flags_from_submodule_config(struct diff_options *diffopt, const char *path) { @@ -237,6 +260,11 @@ int option_parse_recurse_submodules_worktree_updater(const struct option *opt, /* * Determine if a submodule has been initialized at a given 'path' */ +/* + * NEEDSWORK: Emit a warning if submodule.active exists, but is valueless, + * ie, the config looks like: "[submodule] active\n". + * Since that is an invalid pathspec, we should inform the user. + */ int is_submodule_active(struct repository *repo, const char *path) { int ret = 0; @@ -497,9 +525,6 @@ static void prepare_submodule_repo_env_in_gitdir(struct strvec *out) /* * Initialize a repository struct for a submodule based on the provided 'path'. * - * Unlike repo_submodule_init, this tolerates submodules not present - * in .gitmodules. This function exists only to preserve historical behavior, - * * Returns the repository struct on success, * NULL when the submodule is not present. */ @@ -697,8 +722,20 @@ void show_submodule_inline_diff(struct diff_options *o, const char *path, strvec_push(&cp.args, oid_to_hex(new_oid)); prepare_submodule_repo_env(&cp.env_array); - if (start_command(&cp)) + + if (!is_directory(path)) { + /* fall back to absorbed git dir, if any */ + if (!sub) + goto done; + cp.dir = sub->gitdir; + strvec_push(&cp.env_array, GIT_DIR_ENVIRONMENT "=."); + strvec_push(&cp.env_array, GIT_WORK_TREE_ENVIRONMENT "=."); + } + + if (start_command(&cp)) { diff_emit_submodule_error(o, "(diff failed)\n"); + goto done; + } while (strbuf_getwholeline_fd(&sb, cp.out, '\n') != EOF) diff_emit_submodule_pipethrough(o, sb.buf, sb.len); @@ -1381,24 +1418,13 @@ static void fetch_task_release(struct fetch_task *p) } static struct repository *get_submodule_repo_for(struct repository *r, - const struct submodule *sub) + const char *path) { struct repository *ret = xmalloc(sizeof(*ret)); - if (repo_submodule_init(ret, r, sub)) { - /* - * No entry in .gitmodules? Technically not a submodule, - * but historically we supported repositories that happen to be - * in-place where a gitlink is. Keep supporting them. - */ - struct strbuf gitdir = STRBUF_INIT; - strbuf_repo_worktree_path(&gitdir, r, "%s/.git", sub->path); - if (repo_init(ret, gitdir.buf, NULL)) { - strbuf_release(&gitdir); - free(ret); - return NULL; - } - strbuf_release(&gitdir); + if (repo_submodule_init(ret, r, path, null_oid())) { + free(ret); + return NULL; } return ret; @@ -1440,7 +1466,7 @@ static int get_next_submodule(struct child_process *cp, continue; } - task->repo = get_submodule_repo_for(spf->r, task->sub); + task->repo = get_submodule_repo_for(spf->r, task->sub->path); if (task->repo) { struct strbuf submodule_prefix = STRBUF_INIT; child_process_init(cp); @@ -1819,14 +1845,16 @@ out: void submodule_unset_core_worktree(const struct submodule *sub) { - char *config_path = xstrfmt("%s/modules/%s/config", - get_git_dir(), sub->name); + struct strbuf config_path = STRBUF_INIT; - if (git_config_set_in_file_gently(config_path, "core.worktree", NULL)) + submodule_name_to_gitdir(&config_path, the_repository, sub->name); + strbuf_addstr(&config_path, "/config"); + + if (git_config_set_in_file_gently(config_path.buf, "core.worktree", NULL)) warning(_("Could not unset core.worktree setting in submodule '%s'"), sub->path); - free(config_path); + strbuf_release(&config_path); } static const char *get_super_prefix_or_empty(void) @@ -1922,20 +1950,22 @@ int submodule_move_head(const char *path, absorb_git_dir_into_superproject(path, ABSORB_GITDIR_RECURSE_SUBMODULES); } else { - char *gitdir = xstrfmt("%s/modules/%s", - get_git_dir(), sub->name); - connect_work_tree_and_git_dir(path, gitdir, 0); - free(gitdir); + struct strbuf gitdir = STRBUF_INIT; + submodule_name_to_gitdir(&gitdir, the_repository, + sub->name); + connect_work_tree_and_git_dir(path, gitdir.buf, 0); + strbuf_release(&gitdir); /* make sure the index is clean as well */ submodule_reset_index(path); } if (old_head && (flags & SUBMODULE_MOVE_HEAD_FORCE)) { - char *gitdir = xstrfmt("%s/modules/%s", - get_git_dir(), sub->name); - connect_work_tree_and_git_dir(path, gitdir, 1); - free(gitdir); + struct strbuf gitdir = STRBUF_INIT; + submodule_name_to_gitdir(&gitdir, the_repository, + sub->name); + connect_work_tree_and_git_dir(path, gitdir.buf, 1); + strbuf_release(&gitdir); } } @@ -2050,7 +2080,7 @@ int validate_submodule_git_dir(char *git_dir, const char *submodule_name) static void relocate_single_git_dir_into_superproject(const char *path) { char *old_git_dir = NULL, *real_old_git_dir = NULL, *real_new_git_dir = NULL; - char *new_git_dir; + struct strbuf new_gitdir = STRBUF_INIT; const struct submodule *sub; if (submodule_uses_worktrees(path)) @@ -2068,14 +2098,13 @@ static void relocate_single_git_dir_into_superproject(const char *path) if (!sub) die(_("could not lookup name for submodule '%s'"), path); - new_git_dir = git_pathdup("modules/%s", sub->name); - if (validate_submodule_git_dir(new_git_dir, sub->name) < 0) + submodule_name_to_gitdir(&new_gitdir, the_repository, sub->name); + if (validate_submodule_git_dir(new_gitdir.buf, sub->name) < 0) die(_("refusing to move '%s' into an existing git dir"), real_old_git_dir); - if (safe_create_leading_directories_const(new_git_dir) < 0) - die(_("could not create directory '%s'"), new_git_dir); - real_new_git_dir = real_pathdup(new_git_dir, 1); - free(new_git_dir); + if (safe_create_leading_directories_const(new_gitdir.buf) < 0) + die(_("could not create directory '%s'"), new_gitdir.buf); + real_new_git_dir = real_pathdup(new_gitdir.buf, 1); fprintf(stderr, _("Migrating git directory of '%s%s' from\n'%s' to\n'%s'\n"), get_super_prefix_or_empty(), path, @@ -2086,6 +2115,7 @@ static void relocate_single_git_dir_into_superproject(const char *path) free(old_git_dir); free(real_old_git_dir); free(real_new_git_dir); + strbuf_release(&new_gitdir); } /* @@ -2105,6 +2135,7 @@ void absorb_git_dir_into_superproject(const char *path, /* Not populated? */ if (!sub_git_dir) { const struct submodule *sub; + struct strbuf sub_gitdir = STRBUF_INIT; if (err_code == READ_GITFILE_ERR_STAT_FAILED) { /* unpopulated as expected */ @@ -2126,8 +2157,9 @@ void absorb_git_dir_into_superproject(const char *path, sub = submodule_from_path(the_repository, null_oid(), path); if (!sub) die(_("could not lookup name for submodule '%s'"), path); - connect_work_tree_and_git_dir(path, - git_path("modules/%s", sub->name), 0); + submodule_name_to_gitdir(&sub_gitdir, the_repository, sub->name); + connect_work_tree_and_git_dir(path, sub_gitdir.buf, 0); + strbuf_release(&sub_gitdir); } else { /* Is it already absorbed into the superprojects git dir? */ char *real_sub_git_dir = real_pathdup(sub_git_dir, 1); @@ -2278,9 +2310,36 @@ int submodule_to_gitdir(struct strbuf *buf, const char *submodule) goto cleanup; } strbuf_reset(buf); - strbuf_git_path(buf, "%s/%s", "modules", sub->name); + submodule_name_to_gitdir(buf, the_repository, sub->name); } cleanup: return ret; } + +void submodule_name_to_gitdir(struct strbuf *buf, struct repository *r, + const char *submodule_name) +{ + /* + * NEEDSWORK: The current way of mapping a submodule's name to + * its location in .git/modules/ has problems with some naming + * schemes. For example, if a submodule is named "foo" and + * another is named "foo/bar" (whether present in the same + * superproject commit or not - the problem will arise if both + * superproject commits have been checked out at any point in + * time), or if two submodule names only have different cases in + * a case-insensitive filesystem. + * + * There are several solutions, including encoding the path in + * some way, introducing a submodule.<name>.gitdir config in + * .git/config (not .gitmodules) that allows overriding what the + * gitdir of a submodule would be (and teach Git, upon noticing + * a clash, to automatically determine a non-clashing name and + * to write such a config), or introducing a + * submodule.<name>.gitdir config in .gitmodules that repo + * administrators can explicitly set. Nothing has been decided, + * so for now, just append the name at the end of the path. + */ + strbuf_repo_git_path(buf, r, "modules/"); + strbuf_addstr(buf, submodule_name); +} diff --git a/submodule.h b/submodule.h index 84640c49c1..4578e501b8 100644 --- a/submodule.h +++ b/submodule.h @@ -97,7 +97,15 @@ int submodule_uses_gitfile(const char *path); #define SUBMODULE_REMOVAL_IGNORE_IGNORED_UNTRACKED (1<<2) int bad_to_remove_submodule(const char *path, unsigned flags); +/* + * Call add_submodule_odb() to add the submodule at the given path to a list. + * When register_all_submodule_odb_as_alternates() is called, the object stores + * of all submodules in that list will be added as alternates in + * the_repository. + */ int add_submodule_odb(const char *path); +void add_submodule_odb_by_path(const char *path); +int register_all_submodule_odb_as_alternates(void); /* * Checks if there are submodule changes in a..b. If a is the null OID, @@ -125,6 +133,13 @@ int push_unpushed_submodules(struct repository *r, int submodule_to_gitdir(struct strbuf *buf, const char *submodule); /* + * Given a submodule name, create a path to where the submodule's gitdir lives + * inside of the provided repository's 'modules' directory. + */ +void submodule_name_to_gitdir(struct strbuf *buf, struct repository *r, + const char *submodule_name); + +/* * Make sure that no submodule's git dir is nested in a sibling submodule's. */ int validate_submodule_git_dir(char *git_dir, const char *submodule_name); @@ -432,6 +432,10 @@ GIT_TEST_MULTI_PACK_INDEX=<boolean>, when true, forces the multi-pack- index to be written after every 'git repack' command, and overrides the 'core.multiPackIndex' setting to true. +GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=<boolean>, when true, sets the +'--bitmap' option on all invocations of 'git multi-pack-index write', +and ignores pack-objects' '--write-bitmap-index'. + GIT_TEST_SIDEBAND_ALL=<boolean>, when true, overrides the 'uploadpack.allowSidebandAll' setting to true, and when false, forces fetch-pack to not request sideband-all (even if the server advertises @@ -455,6 +459,16 @@ GIT_TEST_CHECKOUT_WORKERS=<n> overrides the 'checkout.workers' setting to <n> and 'checkout.thresholdForParallelism' to 0, forcing the execution of the parallel-checkout code. +GIT_TEST_FATAL_REGISTER_SUBMODULE_ODB=<boolean>, when true, makes +registering submodule ODBs as alternates a fatal action. Support for +this environment variable can be removed once the migration to +explicitly providing repositories when accessing submodule objects is +complete (in which case we might want to replace this with a trace2 +call so that users can make it visible if accessing submodule objects +without an explicit repository still happens) or needs to be abandoned +for whatever reason (in which case the migrated codepaths still retain +their performance benefits). + Naming Tests ------------ @@ -760,7 +774,8 @@ Test harness library -------------------- There are a handful helper functions defined in the test harness -library for your script to use. +library for your script to use. Some of them are listed below; +see test-lib-functions.sh for the full list and their options. - test_expect_success [<prereq>] <message> <script> @@ -806,10 +821,12 @@ library for your script to use. argument. This is primarily meant for use during the development of a new test script. - - debug <git-command> + - debug [options] <git-command> Run a git command inside a debugger. This is primarily meant for - use when debugging a failing test script. + use when debugging a failing test script. With '-t', use your + original TERM instead of test-lib.sh's "dumb", so that your + debugger interface has colors. - test_done @@ -996,7 +1013,7 @@ library for your script to use. EOF - - test_pause + - test_pause [options] This command is useful for writing and debugging tests and must be removed before submitting. It halts the execution of the test and diff --git a/t/helper/test-dump-untracked-cache.c b/t/helper/test-dump-untracked-cache.c index cf0f2c7228..99010614f6 100644 --- a/t/helper/test-dump-untracked-cache.c +++ b/t/helper/test-dump-untracked-cache.c @@ -45,8 +45,10 @@ int cmd__dump_untracked_cache(int ac, const char **av) struct untracked_cache *uc; struct strbuf base = STRBUF_INIT; - /* Hack to avoid modifying the untracked cache when we read it */ - ignore_untracked_cache_config = 1; + /* Set core.untrackedCache=keep before setup_git_directory() */ + xsetenv("GIT_CONFIG_COUNT", "1", 1); + xsetenv("GIT_CONFIG_KEY_0", "core.untrackedCache", 1); + xsetenv("GIT_CONFIG_VALUE_0", "keep", 1); setup_git_directory(); if (read_cache() < 0) diff --git a/t/helper/test-parse-options.c b/t/helper/test-parse-options.c index 2051ce57db..a282b6ff13 100644 --- a/t/helper/test-parse-options.c +++ b/t/helper/test-parse-options.c @@ -134,7 +134,6 @@ int cmd__parse_options(int argc, const char **argv) OPT_NOOP_NOARG(0, "obsolete"), OPT_STRING_LIST(0, "list", &list, "str", "add str to list"), OPT_GROUP("Magic arguments"), - OPT_ARGUMENT("quux", NULL, "means --quux"), OPT_NUMBER_CALLBACK(&integer, "set integer to NUM", number_callback), { OPTION_COUNTUP, '+', NULL, &boolean, NULL, "same as -b", diff --git a/t/helper/test-read-midx.c b/t/helper/test-read-midx.c index 7c2eb11a8e..cb0d27049a 100644 --- a/t/helper/test-read-midx.c +++ b/t/helper/test-read-midx.c @@ -60,12 +60,26 @@ static int read_midx_file(const char *object_dir, int show_objects) return 0; } +static int read_midx_checksum(const char *object_dir) +{ + struct multi_pack_index *m; + + setup_git_directory(); + m = load_multi_pack_index(object_dir, 1); + if (!m) + return 1; + printf("%s\n", hash_to_hex(get_midx_checksum(m))); + return 0; +} + int cmd__read_midx(int argc, const char **argv) { if (!(argc == 2 || argc == 3)) - usage("read-midx [--show-objects] <object-dir>"); + usage("read-midx [--show-objects|--checksum] <object-dir>"); if (!strcmp(argv[1], "--show-objects")) return read_midx_file(argv[2], 1); + else if (!strcmp(argv[1], "--checksum")) + return read_midx_checksum(argv[2]); return read_midx_file(argv[1], 0); } diff --git a/t/helper/test-run-command.c b/t/helper/test-run-command.c index 7ae03dc712..14c57365e7 100644 --- a/t/helper/test-run-command.c +++ b/t/helper/test-run-command.c @@ -61,7 +61,7 @@ struct testsuite { int quiet, immediate, verbose, verbose_log, trace, write_junit_xml; }; #define TESTSUITE_INIT \ - { STRING_LIST_INIT_DUP, STRING_LIST_INIT_DUP, -1, 0, 0, 0, 0, 0, 0 } + { STRING_LIST_INIT_DUP, STRING_LIST_INIT_DUP, 0, 0, 0, 0, 0, 0, 0 } static int next_test(struct child_process *cp, struct strbuf *err, void *cb, void **task_cb) @@ -142,9 +142,6 @@ static int testsuite(int argc, const char **argv) OPT_END() }; - memset(&suite, 0, sizeof(suite)); - suite.tests.strdup_strings = suite.failed.strdup_strings = 1; - argc = parse_options(argc, argv, NULL, options, testsuite_usage, PARSE_OPT_STOP_AT_NON_OPTION); diff --git a/t/helper/test-serve-v2.c b/t/helper/test-serve-v2.c index aee35e5aef..28e905afc3 100644 --- a/t/helper/test-serve-v2.c +++ b/t/helper/test-serve-v2.c @@ -10,12 +10,12 @@ static char const * const serve_usage[] = { int cmd__serve_v2(int argc, const char **argv) { - struct serve_options opts = SERVE_OPTIONS_INIT; - + int stateless_rpc = 0; + int advertise_capabilities = 0; struct option options[] = { - OPT_BOOL(0, "stateless-rpc", &opts.stateless_rpc, + OPT_BOOL(0, "stateless-rpc", &stateless_rpc, N_("quit after a single request/response exchange")), - OPT_BOOL(0, "advertise-capabilities", &opts.advertise_capabilities, + OPT_BOOL(0, "advertise-capabilities", &advertise_capabilities, N_("exit immediately after advertising capabilities")), OPT_END() }; @@ -25,7 +25,11 @@ int cmd__serve_v2(int argc, const char **argv) argc = parse_options(argc, argv, prefix, options, serve_usage, PARSE_OPT_KEEP_DASHDASH | PARSE_OPT_KEEP_UNKNOWN); - serve(&opts); + + if (advertise_capabilities) + protocol_v2_advertise_capabilities(); + else + protocol_v2_serve_loop(stateless_rpc); return 0; } diff --git a/t/helper/test-submodule-nested-repo-config.c b/t/helper/test-submodule-nested-repo-config.c index e3f11ff5a7..dc1c14bde3 100644 --- a/t/helper/test-submodule-nested-repo-config.c +++ b/t/helper/test-submodule-nested-repo-config.c @@ -11,15 +11,13 @@ static void die_usage(const char **argv, const char *msg) int cmd__submodule_nested_repo_config(int argc, const char **argv) { struct repository subrepo; - const struct submodule *sub; if (argc < 3) die_usage(argv, "Wrong number of arguments."); setup_git_directory(); - sub = submodule_from_path(the_repository, null_oid(), argv[1]); - if (repo_submodule_init(&subrepo, the_repository, sub)) { + if (repo_submodule_init(&subrepo, the_repository, argv[1], null_oid())) { die_usage(argv, "Submodule not found."); } diff --git a/t/lib-bitmap.sh b/t/lib-bitmap.sh index fe3f98be24..21d0392dda 100644 --- a/t/lib-bitmap.sh +++ b/t/lib-bitmap.sh @@ -1,3 +1,6 @@ +# Helpers for scripts testing bitmap functionality; see t5310 for +# example usage. + # Compare a file containing rev-list bitmap traversal output to its non-bitmap # counterpart. You can't just use test_cmp for this, because the two produce # subtly different output: @@ -24,3 +27,240 @@ test_bitmap_traversal () { test_cmp "$1.normalized" "$2.normalized" && rm -f "$1.normalized" "$2.normalized" } + +# To ensure the logic for "maximal commits" is exercised, make +# the repository a bit more complicated. +# +# other second +# * * +# (99 commits) (99 commits) +# * * +# |\ /| +# | * octo-other octo-second * | +# |/|\_________ ____________/|\| +# | \ \/ __________/ | +# | | ________/\ / | +# * |/ * merge-right * +# | _|__________/ \____________ | +# |/ | \| +# (l1) * * merge-left * (r1) +# | / \________________________ | +# |/ \| +# (l2) * * (r2) +# \___________________________ | +# \| +# * (base) +# +# We only push bits down the first-parent history, which +# makes some of these commits unimportant! +# +# The important part for the maximal commit algorithm is how +# the bitmasks are extended. Assuming starting bit positions +# for second (bit 0) and other (bit 1), the bitmasks at the +# end should be: +# +# second: 1 (maximal, selected) +# other: 01 (maximal, selected) +# (base): 11 (maximal) +# +# This complicated history was important for a previous +# version of the walk that guarantees never walking a +# commit multiple times. That goal might be important +# again, so preserve this complicated case. For now, this +# test will guarantee that the bitmaps are computed +# correctly, even with the repeat calculations. +setup_bitmap_history() { + test_expect_success 'setup repo with moderate-sized history' ' + test_commit_bulk --id=file 10 && + git branch -M second && + git checkout -b other HEAD~5 && + test_commit_bulk --id=side 10 && + + # add complicated history setup, including merges and + # ambiguous merge-bases + + git checkout -b merge-left other~2 && + git merge second~2 -m "merge-left" && + + git checkout -b merge-right second~1 && + git merge other~1 -m "merge-right" && + + git checkout -b octo-second second && + git merge merge-left merge-right -m "octopus-second" && + + git checkout -b octo-other other && + git merge merge-left merge-right -m "octopus-other" && + + git checkout other && + git merge octo-other -m "pull octopus" && + + git checkout second && + git merge octo-second -m "pull octopus" && + + # Remove these branches so they are not selected + # as bitmap tips + git branch -D merge-left && + git branch -D merge-right && + git branch -D octo-other && + git branch -D octo-second && + + # add padding to make these merges less interesting + # and avoid having them selected for bitmaps + test_commit_bulk --id=file 100 && + git checkout other && + test_commit_bulk --id=side 100 && + git checkout second && + + bitmaptip=$(git rev-parse second) && + blob=$(echo tagged-blob | git hash-object -w --stdin) && + git tag tagged-blob $blob + ' +} + +rev_list_tests_head () { + test_expect_success "counting commits via bitmap ($state, $branch)" ' + git rev-list --count $branch >expect && + git rev-list --use-bitmap-index --count $branch >actual && + test_cmp expect actual + ' + + test_expect_success "counting partial commits via bitmap ($state, $branch)" ' + git rev-list --count $branch~5..$branch >expect && + git rev-list --use-bitmap-index --count $branch~5..$branch >actual && + test_cmp expect actual + ' + + test_expect_success "counting commits with limit ($state, $branch)" ' + git rev-list --count -n 1 $branch >expect && + git rev-list --use-bitmap-index --count -n 1 $branch >actual && + test_cmp expect actual + ' + + test_expect_success "counting non-linear history ($state, $branch)" ' + git rev-list --count other...second >expect && + git rev-list --use-bitmap-index --count other...second >actual && + test_cmp expect actual + ' + + test_expect_success "counting commits with limiting ($state, $branch)" ' + git rev-list --count $branch -- 1.t >expect && + git rev-list --use-bitmap-index --count $branch -- 1.t >actual && + test_cmp expect actual + ' + + test_expect_success "counting objects via bitmap ($state, $branch)" ' + git rev-list --count --objects $branch >expect && + git rev-list --use-bitmap-index --count --objects $branch >actual && + test_cmp expect actual + ' + + test_expect_success "enumerate commits ($state, $branch)" ' + git rev-list --use-bitmap-index $branch >actual && + git rev-list $branch >expect && + test_bitmap_traversal --no-confirm-bitmaps expect actual + ' + + test_expect_success "enumerate --objects ($state, $branch)" ' + git rev-list --objects --use-bitmap-index $branch >actual && + git rev-list --objects $branch >expect && + test_bitmap_traversal expect actual + ' + + test_expect_success "bitmap --objects handles non-commit objects ($state, $branch)" ' + git rev-list --objects --use-bitmap-index $branch tagged-blob >actual && + grep $blob actual + ' +} + +rev_list_tests () { + state=$1 + + for branch in "second" "other" + do + rev_list_tests_head + done +} + +basic_bitmap_tests () { + tip="$1" + test_expect_success 'rev-list --test-bitmap verifies bitmaps' " + git rev-list --test-bitmap "${tip:-HEAD}" + " + + rev_list_tests 'full bitmap' + + test_expect_success 'clone from bitmapped repository' ' + rm -fr clone.git && + git clone --no-local --bare . clone.git && + git rev-parse HEAD >expect && + git --git-dir=clone.git rev-parse HEAD >actual && + test_cmp expect actual + ' + + test_expect_success 'partial clone from bitmapped repository' ' + test_config uploadpack.allowfilter true && + rm -fr partial-clone.git && + git clone --no-local --bare --filter=blob:none . partial-clone.git && + ( + cd partial-clone.git && + pack=$(echo objects/pack/*.pack) && + git verify-pack -v "$pack" >have && + awk "/blob/ { print \$1 }" <have >blobs && + # we expect this single blob because of the direct ref + git rev-parse refs/tags/tagged-blob >expect && + test_cmp expect blobs + ) + ' + + test_expect_success 'setup further non-bitmapped commits' ' + test_commit_bulk --id=further 10 + ' + + rev_list_tests 'partial bitmap' + + test_expect_success 'fetch (partial bitmap)' ' + git --git-dir=clone.git fetch origin second:second && + git rev-parse HEAD >expect && + git --git-dir=clone.git rev-parse HEAD >actual && + test_cmp expect actual + ' + + test_expect_success 'enumerating progress counts pack-reused objects' ' + count=$(git rev-list --objects --all --count) && + git repack -adb && + + # check first with only reused objects; confirm that our + # progress showed the right number, and also that we did + # pack-reuse as expected. Check only the final "done" + # line of the meter (there may be an arbitrary number of + # intermediate lines ending with CR). + GIT_PROGRESS_DELAY=0 \ + git pack-objects --all --stdout --progress \ + </dev/null >/dev/null 2>stderr && + grep "Enumerating objects: $count, done" stderr && + grep "pack-reused $count" stderr && + + # now the same but with one non-reused object + git commit --allow-empty -m "an extra commit object" && + GIT_PROGRESS_DELAY=0 \ + git pack-objects --all --stdout --progress \ + </dev/null >/dev/null 2>stderr && + grep "Enumerating objects: $((count+1)), done" stderr && + grep "pack-reused $count" stderr + ' +} + +# have_delta <obj> <expected_base> +# +# Note that because this relies on cat-file, it might find _any_ copy of an +# object in the repository. The caller is responsible for making sure +# there's only one (e.g., via "repack -ad", or having just fetched a copy). +have_delta () { + echo $2 >expect && + echo $1 | git cat-file --batch-check="%(deltabase)" >actual && + test_cmp expect actual +} + +midx_checksum () { + test-tool read-midx --checksum "$1" +} diff --git a/t/lib-httpd/apache.conf b/t/lib-httpd/apache.conf index afa91e38b0..180a41fe96 100644 --- a/t/lib-httpd/apache.conf +++ b/t/lib-httpd/apache.conf @@ -81,8 +81,6 @@ PassEnv GIT_TRACE PassEnv GIT_CONFIG_NOSYSTEM PassEnv GIT_TEST_SIDEBAND_ALL -SetEnvIf Git-Protocol ".*" GIT_PROTOCOL=$0 - Alias /dumb/ www/ Alias /auth/dumb/ www/auth/dumb/ @@ -117,6 +115,11 @@ Alias /auth/dumb/ www/auth/dumb/ SetEnv GIT_EXEC_PATH ${GIT_EXEC_PATH} SetEnv GIT_HTTP_EXPORT_ALL </LocationMatch> +<LocationMatch /smart_v0/> + SetEnv GIT_EXEC_PATH ${GIT_EXEC_PATH} + SetEnv GIT_HTTP_EXPORT_ALL + SetEnv GIT_PROTOCOL +</LocationMatch> ScriptAlias /smart/incomplete_length/git-upload-pack incomplete-length-upload-pack-v2-http.sh/ ScriptAlias /smart/incomplete_body/git-upload-pack incomplete-body-upload-pack-v2-http.sh/ ScriptAliasMatch /error_git_upload_pack/(.*)/git-upload-pack error.sh/ diff --git a/t/perf/lib-bitmap.sh b/t/perf/lib-bitmap.sh new file mode 100644 index 0000000000..63d3bc7cec --- /dev/null +++ b/t/perf/lib-bitmap.sh @@ -0,0 +1,69 @@ +# Helper functions for testing bitmap performance; see p5310. + +test_full_bitmap () { + test_perf 'simulated clone' ' + git pack-objects --stdout --all </dev/null >/dev/null + ' + + test_perf 'simulated fetch' ' + have=$(git rev-list HEAD~100 -1) && + { + echo HEAD && + echo ^$have + } | git pack-objects --revs --stdout >/dev/null + ' + + test_perf 'pack to file (bitmap)' ' + git pack-objects --use-bitmap-index --all pack1b </dev/null >/dev/null + ' + + test_perf 'rev-list (commits)' ' + git rev-list --all --use-bitmap-index >/dev/null + ' + + test_perf 'rev-list (objects)' ' + git rev-list --all --use-bitmap-index --objects >/dev/null + ' + + test_perf 'rev-list with tag negated via --not --all (objects)' ' + git rev-list perf-tag --not --all --use-bitmap-index --objects >/dev/null + ' + + test_perf 'rev-list with negative tag (objects)' ' + git rev-list HEAD --not perf-tag --use-bitmap-index --objects >/dev/null + ' + + test_perf 'rev-list count with blob:none' ' + git rev-list --use-bitmap-index --count --objects --all \ + --filter=blob:none >/dev/null + ' + + test_perf 'rev-list count with blob:limit=1k' ' + git rev-list --use-bitmap-index --count --objects --all \ + --filter=blob:limit=1k >/dev/null + ' + + test_perf 'rev-list count with tree:0' ' + git rev-list --use-bitmap-index --count --objects --all \ + --filter=tree:0 >/dev/null + ' + + test_perf 'simulated partial clone' ' + git pack-objects --stdout --all --filter=blob:none </dev/null >/dev/null + ' +} + +test_partial_bitmap () { + test_perf 'clone (partial bitmap)' ' + git pack-objects --stdout --all </dev/null >/dev/null + ' + + test_perf 'pack to file (partial bitmap)' ' + git pack-objects --use-bitmap-index --all pack2b </dev/null >/dev/null + ' + + test_perf 'rev-list with tree filter (partial bitmap)' ' + git rev-list --use-bitmap-index --count --objects --all \ + --filter=tree:0 >/dev/null + ' +} diff --git a/t/perf/p5310-pack-bitmaps.sh b/t/perf/p5310-pack-bitmaps.sh index 452be01056..7ad4f237bc 100755 --- a/t/perf/p5310-pack-bitmaps.sh +++ b/t/perf/p5310-pack-bitmaps.sh @@ -2,6 +2,7 @@ test_description='Tests pack performance using bitmaps' . ./perf-lib.sh +. "${TEST_DIRECTORY}/perf/lib-bitmap.sh" test_perf_large_repo @@ -25,56 +26,7 @@ test_perf 'repack to disk' ' git repack -ad ' -test_perf 'simulated clone' ' - git pack-objects --stdout --all </dev/null >/dev/null -' - -test_perf 'simulated fetch' ' - have=$(git rev-list HEAD~100 -1) && - { - echo HEAD && - echo ^$have - } | git pack-objects --revs --stdout >/dev/null -' - -test_perf 'pack to file (bitmap)' ' - git pack-objects --use-bitmap-index --all pack1b </dev/null >/dev/null -' - -test_perf 'rev-list (commits)' ' - git rev-list --all --use-bitmap-index >/dev/null -' - -test_perf 'rev-list (objects)' ' - git rev-list --all --use-bitmap-index --objects >/dev/null -' - -test_perf 'rev-list with tag negated via --not --all (objects)' ' - git rev-list perf-tag --not --all --use-bitmap-index --objects >/dev/null -' - -test_perf 'rev-list with negative tag (objects)' ' - git rev-list HEAD --not perf-tag --use-bitmap-index --objects >/dev/null -' - -test_perf 'rev-list count with blob:none' ' - git rev-list --use-bitmap-index --count --objects --all \ - --filter=blob:none >/dev/null -' - -test_perf 'rev-list count with blob:limit=1k' ' - git rev-list --use-bitmap-index --count --objects --all \ - --filter=blob:limit=1k >/dev/null -' - -test_perf 'rev-list count with tree:0' ' - git rev-list --use-bitmap-index --count --objects --all \ - --filter=tree:0 >/dev/null -' - -test_perf 'simulated partial clone' ' - git pack-objects --stdout --all --filter=blob:none </dev/null >/dev/null -' +test_full_bitmap test_expect_success 'create partial bitmap state' ' # pick a commit to represent the repo tip in the past @@ -97,17 +49,6 @@ test_expect_success 'create partial bitmap state' ' git update-ref HEAD $orig_tip ' -test_perf 'clone (partial bitmap)' ' - git pack-objects --stdout --all </dev/null >/dev/null -' - -test_perf 'pack to file (partial bitmap)' ' - git pack-objects --use-bitmap-index --all pack2b </dev/null >/dev/null -' - -test_perf 'rev-list with tree filter (partial bitmap)' ' - git rev-list --use-bitmap-index --count --objects --all \ - --filter=tree:0 >/dev/null -' +test_partial_bitmap test_done diff --git a/t/perf/p5326-multi-pack-bitmaps.sh b/t/perf/p5326-multi-pack-bitmaps.sh new file mode 100755 index 0000000000..5845109ac7 --- /dev/null +++ b/t/perf/p5326-multi-pack-bitmaps.sh @@ -0,0 +1,43 @@ +#!/bin/sh + +test_description='Tests performance using midx bitmaps' +. ./perf-lib.sh +. "${TEST_DIRECTORY}/perf/lib-bitmap.sh" + +test_perf_large_repo + +test_expect_success 'enable multi-pack index' ' + git config core.multiPackIndex true +' + +test_perf 'setup multi-pack index' ' + git repack -ad && + git multi-pack-index write --bitmap +' + +test_full_bitmap + +test_expect_success 'create partial bitmap state' ' + # pick a commit to represent the repo tip in the past + cutoff=$(git rev-list HEAD~100 -1) && + orig_tip=$(git rev-parse HEAD) && + + # now pretend we have just one tip + rm -rf .git/logs .git/refs/* .git/packed-refs && + git update-ref HEAD $cutoff && + + # and then repack, which will leave us with a nice + # big bitmap pack of the "old" history, and all of + # the new history will be loose, as if it had been pushed + # up incrementally and exploded via unpack-objects + git repack -Ad && + git multi-pack-index write --bitmap && + + # and now restore our original tip, as if the pushes + # had happened + git update-ref HEAD $orig_tip +' + +test_partial_bitmap + +test_done diff --git a/t/perf/run b/t/perf/run index d19dec258a..55219aa405 100755 --- a/t/perf/run +++ b/t/perf/run @@ -74,7 +74,7 @@ set_git_test_installed () { mydir=$1 mydir_abs=$(cd $mydir && pwd) - mydir_abs_wrappers="$mydir_abs_wrappers/bin-wrappers" + mydir_abs_wrappers="$mydir_abs/bin-wrappers" if test -d "$mydir_abs_wrappers" then GIT_TEST_INSTALLED=$mydir_abs_wrappers diff --git a/t/t0000-basic.sh b/t/t0000-basic.sh index cb87768513..5c342de713 100755 --- a/t/t0000-basic.sh +++ b/t/t0000-basic.sh @@ -1271,28 +1271,29 @@ P=$(test_oid root) test_expect_success 'git commit-tree records the correct tree in a commit' ' commit0=$(echo NO | git commit-tree $P) && - tree=$(git show --pretty=raw $commit0 | - sed -n -e "s/^tree //p" -e "/^author /q") && + git show --pretty=raw $commit0 >out && + tree=$(sed -n -e "s/^tree //p" -e "/^author /q" out) && test "z$tree" = "z$P" ' test_expect_success 'git commit-tree records the correct parent in a commit' ' commit1=$(echo NO | git commit-tree $P -p $commit0) && - parent=$(git show --pretty=raw $commit1 | - sed -n -e "s/^parent //p" -e "/^author /q") && + git show --pretty=raw $commit1 >out && + parent=$(sed -n -e "s/^parent //p" -e "/^author /q" out) && test "z$commit0" = "z$parent" ' test_expect_success 'git commit-tree omits duplicated parent in a commit' ' commit2=$(echo NO | git commit-tree $P -p $commit0 -p $commit0) && - parent=$(git show --pretty=raw $commit2 | - sed -n -e "s/^parent //p" -e "/^author /q" | - sort -u) && + git show --pretty=raw $commit2 >out && + cat >match.sed <<-\EOF && + s/^parent //p + /^author /q + EOF + parent=$(sed -n -f match.sed out | sort -u) && test "z$commit0" = "z$parent" && - numparent=$(git show --pretty=raw $commit2 | - sed -n -e "s/^parent //p" -e "/^author /q" | - wc -l) && - test $numparent = 1 + git show --pretty=raw $commit2 >out && + test_stdout_line_count = 1 sed -n -f match.sed out ' test_expect_success 'update-index D/F conflict' ' diff --git a/t/t0012-help.sh b/t/t0012-help.sh index 5679e29c62..913f34c8e9 100755 --- a/t/t0012-help.sh +++ b/t/t0012-help.sh @@ -73,6 +73,22 @@ test_expect_success 'git help -g' ' test_i18ngrep "^ tutorial " help.output ' +test_expect_success 'git help fails for non-existing html pages' ' + configure_help && + mkdir html-empty && + test_must_fail git -c help.htmlpath=html-empty help status && + test_must_be_empty test-browser.log +' + +test_expect_success 'git help succeeds without git.html' ' + configure_help && + mkdir html-with-docs && + touch html-with-docs/git-status.html && + git -c help.htmlpath=html-with-docs help status && + echo "html-with-docs/git-status.html" >expect && + test_cmp expect test-browser.log +' + test_expect_success 'generate builtin list' ' git --list-cmds=builtins >builtins ' diff --git a/t/t0021-conversion.sh b/t/t0021-conversion.sh index b5749f327d..33dfc9cd56 100755 --- a/t/t0021-conversion.sh +++ b/t/t0021-conversion.sh @@ -6,6 +6,7 @@ GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME . ./test-lib.sh +. "$TEST_DIRECTORY"/lib-terminal.sh TEST_ROOT="$PWD" PATH=$TEST_ROOT:$PATH @@ -1061,4 +1062,74 @@ test_expect_success PERL,SYMLINKS,CASE_INSENSITIVE_FS \ ) ' +test_expect_success PERL 'setup for progress tests' ' + git init progress && + ( + cd progress && + git config filter.delay.process "rot13-filter.pl delay-progress.log clean smudge delay" && + git config filter.delay.required true && + + echo "*.a filter=delay" >.gitattributes && + touch test-delay10.a && + git add . && + git commit -m files + ) +' + +test_delayed_checkout_progress () { + if test "$1" = "!" + then + local expect_progress=N && + shift + else + local expect_progress= + fi && + + if test $# -lt 1 + then + BUG "no command given to test_delayed_checkout_progress" + fi && + + ( + cd progress && + GIT_PROGRESS_DELAY=0 && + export GIT_PROGRESS_DELAY && + rm -f *.a delay-progress.log && + + "$@" 2>err && + grep "IN: smudge test-delay10.a .* \\[DELAYED\\]" delay-progress.log && + if test "$expect_progress" = N + then + ! grep "Filtering content" err + else + grep "Filtering content" err + fi + ) +} + +for mode in pathspec branch +do + case "$mode" in + pathspec) opt='.' ;; + branch) opt='-f HEAD' ;; + esac + + test_expect_success PERL,TTY "delayed checkout shows progress by default on tty ($mode checkout)" ' + test_delayed_checkout_progress test_terminal git checkout $opt + ' + + test_expect_success PERL "delayed checkout ommits progress on non-tty ($mode checkout)" ' + test_delayed_checkout_progress ! git checkout $opt + ' + + test_expect_success PERL,TTY "delayed checkout ommits progress with --quiet ($mode checkout)" ' + test_delayed_checkout_progress ! test_terminal git checkout --quiet $opt + ' + + test_expect_success PERL,TTY "delayed checkout honors --[no]-progress ($mode checkout)" ' + test_delayed_checkout_progress ! test_terminal git checkout --no-progress $opt && + test_delayed_checkout_progress test_terminal git checkout --quiet --progress $opt + ' +done + test_done diff --git a/t/t0040-parse-options.sh b/t/t0040-parse-options.sh index ad4746d899..da310ed29b 100755 --- a/t/t0040-parse-options.sh +++ b/t/t0040-parse-options.sh @@ -37,7 +37,6 @@ String options --list <str> add str to list Magic arguments - --quux means --quux -NUM set integer to NUM + same as -b --ambiguous positive ambiguity @@ -263,10 +262,6 @@ test_expect_success 'detect possible typos' ' test_cmp typo.err output.err ' -test_expect_success 'keep some options as arguments' ' - test-tool parse-options --expect="arg 00: --quux" --quux -' - cat >expect <<\EOF Callback: "four", 0 boolean: 5 diff --git a/t/t0090-cache-tree.sh b/t/t0090-cache-tree.sh index 9bf66c9e68..9067572648 100755 --- a/t/t0090-cache-tree.sh +++ b/t/t0090-cache-tree.sh @@ -195,6 +195,7 @@ test_expect_success 'reset --hard gives cache-tree' ' test_expect_success 'reset --hard without index gives cache-tree' ' rm -f .git/index && + git clean -fd && git reset --hard && test_cache_tree ' diff --git a/t/t0301-credential-cache.sh b/t/t0301-credential-cache.sh index ebd5fa5249..698b7159f0 100755 --- a/t/t0301-credential-cache.sh +++ b/t/t0301-credential-cache.sh @@ -9,6 +9,21 @@ test -z "$NO_UNIX_SOCKETS" || { test_done } +uname_s=$(uname -s) +case $uname_s in +*MINGW*) + test_path_is_socket () { + # `test -S` cannot detect Win10's Unix sockets + test_path_exists "$1" + } + ;; +*) + test_path_is_socket () { + test -S "$1" + } + ;; +esac + # don't leave a stale daemon running test_atexit 'git credential-cache exit' @@ -21,7 +36,7 @@ test_expect_success 'socket defaults to ~/.cache/git/credential/socket' ' rmdir -p .cache/git/credential/ " && test_path_is_missing "$HOME/.git-credential-cache" && - test -S "$HOME/.cache/git/credential/socket" + test_path_is_socket "$HOME/.cache/git/credential/socket" ' XDG_CACHE_HOME="$HOME/xdg" @@ -31,7 +46,7 @@ helper_test cache test_expect_success "use custom XDG_CACHE_HOME if set and default sockets are not created" ' test_when_finished "git credential-cache exit" && - test -S "$XDG_CACHE_HOME/git/credential/socket" && + test_path_is_socket "$XDG_CACHE_HOME/git/credential/socket" && test_path_is_missing "$HOME/.git-credential-cache/socket" && test_path_is_missing "$HOME/.cache/git/credential/socket" ' @@ -48,7 +63,7 @@ test_expect_success 'credential-cache --socket option overrides default location username=store-user password=store-pass EOF - test -S "$HOME/dir/socket" + test_path_is_socket "$HOME/dir/socket" ' test_expect_success "use custom XDG_CACHE_HOME even if xdg socket exists" ' @@ -62,7 +77,7 @@ test_expect_success "use custom XDG_CACHE_HOME even if xdg socket exists" ' username=store-user password=store-pass EOF - test -S "$HOME/.cache/git/credential/socket" && + test_path_is_socket "$HOME/.cache/git/credential/socket" && XDG_CACHE_HOME="$HOME/xdg" && export XDG_CACHE_HOME && check approve cache <<-\EOF && @@ -71,7 +86,7 @@ test_expect_success "use custom XDG_CACHE_HOME even if xdg socket exists" ' username=store-user password=store-pass EOF - test -S "$XDG_CACHE_HOME/git/credential/socket" + test_path_is_socket "$XDG_CACHE_HOME/git/credential/socket" ' test_expect_success 'use user socket if user directory exists' ' @@ -79,14 +94,15 @@ test_expect_success 'use user socket if user directory exists' ' git credential-cache exit && rmdir \"\$HOME/.git-credential-cache/\" " && - mkdir -p -m 700 "$HOME/.git-credential-cache/" && + mkdir -p "$HOME/.git-credential-cache/" && + chmod 700 "$HOME/.git-credential-cache/" && check approve cache <<-\EOF && protocol=https host=example.com username=store-user password=store-pass EOF - test -S "$HOME/.git-credential-cache/socket" + test_path_is_socket "$HOME/.git-credential-cache/socket" ' test_expect_success SYMLINKS 'use user socket if user directory is a symlink to a directory' ' @@ -103,7 +119,7 @@ test_expect_success SYMLINKS 'use user socket if user directory is a symlink to username=store-user password=store-pass EOF - test -S "$HOME/.git-credential-cache/socket" + test_path_is_socket "$HOME/.git-credential-cache/socket" ' helper_test_timeout cache --timeout=1 diff --git a/t/t0410-partial-clone.sh b/t/t0410-partial-clone.sh index a211a66c67..bba679685f 100755 --- a/t/t0410-partial-clone.sh +++ b/t/t0410-partial-clone.sh @@ -4,6 +4,9 @@ test_description='partial clone' . ./test-lib.sh +# missing promisor objects cause repacks which write bitmaps to fail +GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=0 + delete_object () { rm $1/.git/objects/$(echo $2 | sed -e 's|^..|&/|') } @@ -536,7 +539,13 @@ test_expect_success 'gc does not repack promisor objects if there are none' ' repack_and_check () { rm -rf repo2 && cp -r repo repo2 && - git -C repo2 repack $1 -d && + if test x"$1" = "x--must-fail" + then + shift + test_must_fail git -C repo2 repack $1 -d + else + git -C repo2 repack $1 -d + fi && git -C repo2 fsck && git -C repo2 cat-file -e $2 && @@ -561,6 +570,7 @@ test_expect_success 'repack -d does not irreversibly delete promisor objects' ' printf "$THREE\n" | pack_as_from_promisor && delete_object repo "$ONE" && + repack_and_check --must-fail -ab "$TWO" "$THREE" && repack_and_check -a "$TWO" "$THREE" && repack_and_check -A "$TWO" "$THREE" && repack_and_check -l "$TWO" "$THREE" diff --git a/t/t1091-sparse-checkout-builtin.sh b/t/t1091-sparse-checkout-builtin.sh index 38fc8340f5..71236981e6 100755 --- a/t/t1091-sparse-checkout-builtin.sh +++ b/t/t1091-sparse-checkout-builtin.sh @@ -642,4 +642,63 @@ test_expect_success MINGW 'cone mode replaces backslashes with slashes' ' check_files repo/deep a deeper1 ' +test_expect_success 'cone mode clears ignored subdirectories' ' + rm repo/.git/info/sparse-checkout && + + git -C repo sparse-checkout init --cone && + git -C repo sparse-checkout set deep/deeper1 && + + cat >repo/.gitignore <<-\EOF && + obj/ + *.o + EOF + + git -C repo add .gitignore && + git -C repo commit -m ".gitignore" && + + mkdir -p repo/obj repo/folder1/obj repo/deep/deeper2/obj && + for file in folder1/obj/a obj/a folder1/file.o folder1.o \ + deep/deeper2/obj/a deep/deeper2/file.o file.o + do + echo ignored >repo/$file || return 1 + done && + + git -C repo status --porcelain=v2 >out && + test_must_be_empty out && + + git -C repo sparse-checkout reapply && + test_path_is_missing repo/folder1 && + test_path_is_missing repo/deep/deeper2 && + test_path_is_dir repo/obj && + test_path_is_file repo/file.o && + + git -C repo status --porcelain=v2 >out && + test_must_be_empty out && + + git -C repo sparse-checkout set deep/deeper2 && + test_path_is_missing repo/deep/deeper1 && + test_path_is_dir repo/deep/deeper2 && + test_path_is_dir repo/obj && + test_path_is_file repo/file.o && + + >repo/deep/deeper2/ignored.o && + >repo/deep/deeper2/untracked && + + # When an untracked file is in the way, all untracked files + # (even ignored files) are preserved. + git -C repo sparse-checkout set folder1 2>err && + grep "contains untracked files" err && + test_path_is_file repo/deep/deeper2/ignored.o && + test_path_is_file repo/deep/deeper2/untracked && + + # The rest of the cone matches expectation + test_path_is_missing repo/deep/deeper1 && + test_path_is_dir repo/obj && + test_path_is_file repo/file.o && + + git -C repo status --porcelain=v2 >out && + echo "? deep/deeper2/untracked" >expect && + test_cmp expect out +' + test_done diff --git a/t/t1092-sparse-checkout-compatibility.sh b/t/t1092-sparse-checkout-compatibility.sh index ddc86bb415..886e78715f 100755 --- a/t/t1092-sparse-checkout-compatibility.sh +++ b/t/t1092-sparse-checkout-compatibility.sh @@ -47,7 +47,7 @@ test_expect_success 'setup' ' git checkout -b base && for dir in folder1 folder2 deep do - git checkout -b update-$dir && + git checkout -b update-$dir base && echo "updated $dir" >$dir/a && git commit -a -m "update $dir" || return 1 done && @@ -481,14 +481,17 @@ test_expect_success 'checkout and reset (mixed) [sparse]' ' test_sparse_match git reset update-folder2 ' -test_expect_success 'merge' ' +test_expect_success 'merge, cherry-pick, and rebase' ' init_repos && - test_all_match git checkout -b merge update-deep && - test_all_match git merge -m "folder1" update-folder1 && - test_all_match git rev-parse HEAD^{tree} && - test_all_match git merge -m "folder2" update-folder2 && - test_all_match git rev-parse HEAD^{tree} + for OPERATION in "merge -m merge" cherry-pick rebase + do + test_all_match git checkout -B temp update-deep && + test_all_match git $OPERATION update-folder1 && + test_all_match git rev-parse HEAD^{tree} && + test_all_match git $OPERATION update-folder2 && + test_all_match git rev-parse HEAD^{tree} || return 1 + done ' # NEEDSWORK: This test is documenting current behavior, but that @@ -524,6 +527,38 @@ test_expect_success 'merge with conflict outside cone' ' test_all_match git rev-parse HEAD^{tree} ' +test_expect_success 'cherry-pick/rebase with conflict outside cone' ' + init_repos && + + for OPERATION in cherry-pick rebase + do + test_all_match git checkout -B tip && + test_all_match git reset --hard merge-left && + test_all_match git status --porcelain=v2 && + test_all_match test_must_fail git $OPERATION merge-right && + test_all_match git status --porcelain=v2 && + + # Resolve the conflict in different ways: + # 1. Revert to the base + test_all_match git checkout base -- deep/deeper2/a && + test_all_match git status --porcelain=v2 && + + # 2. Add the file with conflict markers + test_all_match git add folder1/a && + test_all_match git status --porcelain=v2 && + + # 3. Rename the file to another sparse filename and + # accept conflict markers as resolved content. + run_on_all mv folder2/a folder2/z && + test_all_match git add folder2 && + test_all_match git status --porcelain=v2 && + + test_all_match git $OPERATION --continue && + test_all_match git status --porcelain=v2 && + test_all_match git rev-parse HEAD^{tree} || return 1 + done +' + test_expect_success 'merge with outside renames' ' init_repos && @@ -617,8 +652,17 @@ test_expect_success 'sparse-index is expanded and converted back' ' ensure_not_expanded () { rm -f trace2.txt && echo >>sparse-index/untracked.txt && - GIT_TRACE2_EVENT="$(pwd)/trace2.txt" GIT_TRACE2_EVENT_NESTING=10 \ - git -C sparse-index "$@" && + + if test "$1" = "!" + then + shift && + test_must_fail env \ + GIT_TRACE2_EVENT="$(pwd)/trace2.txt" GIT_TRACE2_EVENT_NESTING=10 \ + git -C sparse-index "$@" || return 1 + else + GIT_TRACE2_EVENT="$(pwd)/trace2.txt" GIT_TRACE2_EVENT_NESTING=10 \ + git -C sparse-index "$@" || return 1 + fi && test_region ! index ensure_full_index trace2.txt } @@ -647,7 +691,35 @@ test_expect_success 'sparse-index is not expanded' ' echo >>sparse-index/extra.txt && ensure_not_expanded add extra.txt && echo >>sparse-index/untracked.txt && - ensure_not_expanded add . + ensure_not_expanded add . && + + ensure_not_expanded checkout -f update-deep && + test_config -C sparse-index pull.twohead ort && + ( + sane_unset GIT_TEST_MERGE_ALGORITHM && + for OPERATION in "merge -m merge" cherry-pick rebase + do + ensure_not_expanded merge -m merge update-folder1 && + ensure_not_expanded merge -m merge update-folder2 || return 1 + done + ) +' + +test_expect_success 'sparse-index is not expanded: merge conflict in cone' ' + init_repos && + + for side in right left + do + git -C sparse-index checkout -b expand-$side base && + echo $side >sparse-index/deep/a && + git -C sparse-index commit -a -m "$side" || return 1 + done && + + ( + sane_unset GIT_TEST_MERGE_ALGORITHM && + git -C sparse-index config pull.twohead ort && + ensure_not_expanded ! merge -m merged expand-right + ) ' # NEEDSWORK: a sparse-checkout behaves differently from a full checkout diff --git a/t/t1400-update-ref.sh b/t/t1400-update-ref.sh index 4506cd435b..0d4f73acaa 100755 --- a/t/t1400-update-ref.sh +++ b/t/t1400-update-ref.sh @@ -1598,6 +1598,40 @@ test_expect_success 'transaction cannot restart ongoing transaction' ' test_must_fail git show-ref --verify refs/heads/restart ' +test_expect_success PIPE 'transaction flushes status updates' ' + mkfifo in out && + (git update-ref --stdin <in >out &) && + + exec 9>in && + exec 8<out && + test_when_finished "exec 9>&-" && + test_when_finished "exec 8<&-" && + + echo "start" >&9 && + echo "start: ok" >expected && + read line <&8 && + echo "$line" >actual && + test_cmp expected actual && + + echo "create refs/heads/flush $A" >&9 && + + echo prepare >&9 && + echo "prepare: ok" >expected && + read line <&8 && + echo "$line" >actual && + test_cmp expected actual && + + # This must now fail given that we have locked the ref. + test_must_fail git update-ref refs/heads/flush $B 2>stderr && + grep "fatal: update_ref failed for ref ${SQ}refs/heads/flush${SQ}: cannot lock ref" stderr && + + echo commit >&9 && + echo "commit: ok" >expected && + read line <&8 && + echo "$line" >actual && + test_cmp expected actual +' + test_expect_success 'directory not created deleting packed ref' ' git branch d1/d2/r1 HEAD && git pack-refs --all && diff --git a/t/t2021-checkout-overwrite.sh b/t/t2021-checkout-overwrite.sh index 70d69263e6..660132ff8d 100755 --- a/t/t2021-checkout-overwrite.sh +++ b/t/t2021-checkout-overwrite.sh @@ -48,6 +48,7 @@ test_expect_success 'checkout commit with dir must not remove untracked a/b' ' test_expect_success SYMLINKS 'the symlink remained' ' + test_when_finished "rm a/b" && test -h a/b ' diff --git a/t/t3200-branch.sh b/t/t3200-branch.sh index cc4b10236e..e575ffb4ff 100755 --- a/t/t3200-branch.sh +++ b/t/t3200-branch.sh @@ -1272,6 +1272,19 @@ test_expect_success 'attempt to delete a branch merged to its base' ' test_must_fail git branch -d my10 ' +test_expect_success 'branch --delete --force removes dangling branch' ' + git checkout main && + test_commit unstable && + hash=$(git rev-parse HEAD) && + objpath=$(echo $hash | sed -e "s|^..|.git/objects/&/|") && + git branch --no-track dangling && + mv $objpath $objpath.x && + test_when_finished "mv $objpath.x $objpath" && + git branch --delete --force dangling && + git for-each-ref refs/heads/dangling >actual && + test_must_be_empty actual +' + test_expect_success 'use --edit-description' ' write_script editor <<-\EOF && echo "New contents" >"$1" diff --git a/t/t3404-rebase-interactive.sh b/t/t3404-rebase-interactive.sh index d877872e8f..972ce026bb 100755 --- a/t/t3404-rebase-interactive.sh +++ b/t/t3404-rebase-interactive.sh @@ -297,6 +297,7 @@ test_expect_success 'abort with error when new base cannot be checked out' ' output && test_i18ngrep "file1" output && test_path_is_missing .git/rebase-merge && + rm file1 && git reset --hard HEAD^ ' diff --git a/t/t3407-rebase-abort.sh b/t/t3407-rebase-abort.sh index 7c381fbc89..ebbaed147a 100755 --- a/t/t3407-rebase-abort.sh +++ b/t/t3407-rebase-abort.sh @@ -7,77 +7,77 @@ export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME . ./test-lib.sh -### Test that we handle space characters properly -work_dir="$(pwd)/test dir" - test_expect_success setup ' - mkdir -p "$work_dir" && - cd "$work_dir" && - git init && - echo a > a && - git add a && - git commit -m a && + test_commit a a a && git branch to-rebase && - echo b > a && - git commit -a -m b && - echo c > a && - git commit -a -m c && + test_commit --annotate b a b && + test_commit --annotate c a c && git checkout to-rebase && - echo d > a && - git commit -a -m "merge should fail on this" && - echo e > a && - git commit -a -m "merge should fail on this, too" && - git branch pre-rebase + test_commit "merge should fail on this" a d d && + test_commit --annotate "merge should fail on this, too" a e pre-rebase ' +# Check that HEAD is equal to "pre-rebase" and the current branch is +# "to-rebase" +check_head() { + test_cmp_rev HEAD pre-rebase^{commit} && + test "$(git symbolic-ref HEAD)" = refs/heads/to-rebase +} + testrebase() { type=$1 - dotest=$2 + state_dir=$2 test_expect_success "rebase$type --abort" ' - cd "$work_dir" && # Clean up the state from the previous one git reset --hard pre-rebase && test_must_fail git rebase$type main && - test_path_is_dir "$dotest" && + test_path_is_dir "$state_dir" && git rebase --abort && - test $(git rev-parse to-rebase) = $(git rev-parse pre-rebase) && - test ! -d "$dotest" + check_head && + test_path_is_missing "$state_dir" ' test_expect_success "rebase$type --abort after --skip" ' - cd "$work_dir" && # Clean up the state from the previous one git reset --hard pre-rebase && test_must_fail git rebase$type main && - test_path_is_dir "$dotest" && + test_path_is_dir "$state_dir" && test_must_fail git rebase --skip && - test $(git rev-parse HEAD) = $(git rev-parse main) && + test_cmp_rev HEAD main && git rebase --abort && - test $(git rev-parse to-rebase) = $(git rev-parse pre-rebase) && - test ! -d "$dotest" + check_head && + test_path_is_missing "$state_dir" ' test_expect_success "rebase$type --abort after --continue" ' - cd "$work_dir" && # Clean up the state from the previous one git reset --hard pre-rebase && test_must_fail git rebase$type main && - test_path_is_dir "$dotest" && + test_path_is_dir "$state_dir" && echo c > a && echo d >> a && git add a && test_must_fail git rebase --continue && - test $(git rev-parse HEAD) != $(git rev-parse main) && + test_cmp_rev ! HEAD main && + git rebase --abort && + check_head && + test_path_is_missing "$state_dir" + ' + + test_expect_success "rebase$type --abort when checking out a tag" ' + test_when_finished "git symbolic-ref HEAD refs/heads/to-rebase" && + git reset --hard a -- && + test_must_fail git rebase$type --onto b c pre-rebase && + test_cmp_rev HEAD b^{commit} && git rebase --abort && - test $(git rev-parse to-rebase) = $(git rev-parse pre-rebase) && - test ! -d "$dotest" + test_cmp_rev HEAD pre-rebase^{commit} && + ! git symbolic-ref HEAD ' test_expect_success "rebase$type --abort does not update reflog" ' - cd "$work_dir" && # Clean up the state from the previous one git reset --hard pre-rebase && git reflog show to-rebase > reflog_before && @@ -89,7 +89,6 @@ testrebase() { ' test_expect_success 'rebase --abort can not be used with other options' ' - cd "$work_dir" && # Clean up the state from the previous one git reset --hard pre-rebase && test_must_fail git rebase$type main && @@ -97,33 +96,21 @@ testrebase() { test_must_fail git rebase --abort -v && git rebase --abort ' + + test_expect_success "rebase$type --quit" ' + test_when_finished "git symbolic-ref HEAD refs/heads/to-rebase" && + # Clean up the state from the previous one + git reset --hard pre-rebase && + test_must_fail git rebase$type main && + test_path_is_dir $state_dir && + head_before=$(git rev-parse HEAD) && + git rebase --quit && + test_cmp_rev HEAD $head_before && + test_path_is_missing .git/rebase-apply + ' } testrebase " --apply" .git/rebase-apply testrebase " --merge" .git/rebase-merge -test_expect_success 'rebase --apply --quit' ' - cd "$work_dir" && - # Clean up the state from the previous one - git reset --hard pre-rebase && - test_must_fail git rebase --apply main && - test_path_is_dir .git/rebase-apply && - head_before=$(git rev-parse HEAD) && - git rebase --quit && - test $(git rev-parse HEAD) = $head_before && - test ! -d .git/rebase-apply -' - -test_expect_success 'rebase --merge --quit' ' - cd "$work_dir" && - # Clean up the state from the previous one - git reset --hard pre-rebase && - test_must_fail git rebase --merge main && - test_path_is_dir .git/rebase-merge && - head_before=$(git rev-parse HEAD) && - git rebase --quit && - test $(git rev-parse HEAD) = $head_before && - test ! -d .git/rebase-merge -' - test_done diff --git a/t/t3435-rebase-gpg-sign.sh b/t/t3435-rebase-gpg-sign.sh index ec10766858..5f8ba2c739 100755 --- a/t/t3435-rebase-gpg-sign.sh +++ b/t/t3435-rebase-gpg-sign.sh @@ -65,6 +65,7 @@ test_rebase_gpg_sign ! true -i --gpg-sign --no-gpg-sign test_rebase_gpg_sign false -i --no-gpg-sign --gpg-sign test_expect_failure 'rebase -p --no-gpg-sign override commit.gpgsign' ' + test_when_finished "git clean -f" && git reset --hard merged && git config commit.gpgsign true && git rebase -p --no-gpg-sign --onto=one fork-point main && diff --git a/t/t3501-revert-cherry-pick.sh b/t/t3501-revert-cherry-pick.sh index 9d100cd188..4b5b607673 100755 --- a/t/t3501-revert-cherry-pick.sh +++ b/t/t3501-revert-cherry-pick.sh @@ -158,4 +158,20 @@ test_expect_success 'cherry-pick works with dirty renamed file' ' grep -q "^modified$" renamed ' +test_expect_success 'advice from failed revert' ' + test_commit --no-tag "add dream" dream dream && + dream_oid=$(git rev-parse --short HEAD) && + cat <<-EOF >expected && + error: could not revert $dream_oid... add dream + hint: After resolving the conflicts, mark them with + hint: "git add/rm <pathspec>", then run + hint: "git revert --continue". + hint: You can instead skip this commit with "git revert --skip". + hint: To abort and get back to the state before "git revert", + hint: run "git revert --abort". + EOF + test_commit --append --no-tag "double-add dream" dream dream && + test_must_fail git revert HEAD^ 2>actual && + test_cmp expected actual +' test_done diff --git a/t/t3507-cherry-pick-conflict.sh b/t/t3507-cherry-pick-conflict.sh index 014001b8f3..979e843c65 100755 --- a/t/t3507-cherry-pick-conflict.sh +++ b/t/t3507-cherry-pick-conflict.sh @@ -47,20 +47,23 @@ test_expect_success 'failed cherry-pick does not advance HEAD' ' test "$head" = "$newhead" ' -test_expect_success 'advice from failed cherry-pick' " +test_expect_success 'advice from failed cherry-pick' ' pristine_detach initial && - picked=\$(git rev-parse --short picked) && + picked=$(git rev-parse --short picked) && cat <<-EOF >expected && - error: could not apply \$picked... picked - hint: after resolving the conflicts, mark the corrected paths - hint: with 'git add <paths>' or 'git rm <paths>' - hint: and commit the result with 'git commit' + error: could not apply $picked... picked + hint: After resolving the conflicts, mark them with + hint: "git add/rm <pathspec>", then run + hint: "git cherry-pick --continue". + hint: You can instead skip this commit with "git cherry-pick --skip". + hint: To abort and get back to the state before "git cherry-pick", + hint: run "git cherry-pick --abort". EOF test_must_fail git cherry-pick picked 2>actual && test_cmp expected actual -" +' test_expect_success 'advice from failed cherry-pick --no-commit' " pristine_detach initial && diff --git a/t/t3510-cherry-pick-sequence.sh b/t/t3510-cherry-pick-sequence.sh index 49010aa946..3b0fa66c33 100755 --- a/t/t3510-cherry-pick-sequence.sh +++ b/t/t3510-cherry-pick-sequence.sh @@ -238,6 +238,7 @@ test_expect_success 'allow skipping commit but not abort for a new history' ' ' test_expect_success 'allow skipping stopped cherry-pick because of untracked file modifications' ' + test_when_finished "rm unrelated" && pristine_detach initial && git rm --cached unrelated && git commit -m "untrack unrelated" && diff --git a/t/t3903-stash.sh b/t/t3903-stash.sh index 873aa56e35..f0a82be9de 100755 --- a/t/t3903-stash.sh +++ b/t/t3903-stash.sh @@ -1307,4 +1307,62 @@ test_expect_success 'stash -c stash.useBuiltin=false warning ' ' test_must_be_empty err ' +test_expect_success 'git stash succeeds despite directory/file change' ' + test_create_repo directory_file_switch_v1 && + ( + cd directory_file_switch_v1 && + test_commit init && + + test_write_lines this file has some words >filler && + git add filler && + git commit -m filler && + + git rm filler && + mkdir filler && + echo contents >filler/file && + git stash push + ) +' + +test_expect_success 'git stash can pop file -> directory saved changes' ' + test_create_repo directory_file_switch_v2 && + ( + cd directory_file_switch_v2 && + test_commit init && + + test_write_lines this file has some words >filler && + git add filler && + git commit -m filler && + + git rm filler && + mkdir filler && + echo contents >filler/file && + cp filler/file expect && + git stash push --include-untracked && + git stash apply --index && + test_cmp expect filler/file + ) +' + +test_expect_success 'git stash can pop directory -> file saved changes' ' + test_create_repo directory_file_switch_v3 && + ( + cd directory_file_switch_v3 && + test_commit init && + + mkdir filler && + test_write_lines some words >filler/file1 && + test_write_lines and stuff >filler/file2 && + git add filler && + git commit -m filler && + + git rm -rf filler && + echo contents >filler && + cp filler expect && + git stash push --include-untracked && + git stash apply --index && + test_cmp expect filler + ) +' + test_done diff --git a/t/t4018/php-enum b/t/t4018/php-enum new file mode 100644 index 0000000000..91a69c1a2b --- /dev/null +++ b/t/t4018/php-enum @@ -0,0 +1,4 @@ +enum RIGHT: string +{ + case Foo = 'ChangeMe'; +} diff --git a/t/t4045-diff-relative.sh b/t/t4045-diff-relative.sh index 61ba5f707f..fab351b48a 100755 --- a/t/t4045-diff-relative.sh +++ b/t/t4045-diff-relative.sh @@ -162,4 +162,57 @@ check_diff_relative_option subdir file2 true --no-relative --relative check_diff_relative_option . file2 false --no-relative --relative=subdir check_diff_relative_option . file2 true --no-relative --relative=subdir +test_expect_success 'setup diff --relative unmerged' ' + test_commit zero file0 && + test_commit base subdir/file0 && + git switch -c br1 && + test_commit one file0 && + test_commit sub1 subdir/file0 && + git switch -c br2 base && + test_commit two file0 && + git switch -c br3 && + test_commit sub3 subdir/file0 +' + +test_expect_success 'diff --relative without change in subdir' ' + git switch br2 && + test_when_finished "git merge --abort" && + test_must_fail git merge one && + git -C subdir diff --relative >out && + test_must_be_empty out && + git -C subdir diff --relative --name-only >out && + test_must_be_empty out +' + +test_expect_success 'diff --relative --name-only with change in subdir' ' + git switch br3 && + test_when_finished "git merge --abort" && + test_must_fail git merge sub1 && + test_write_lines file0 file0 >expected && + git -C subdir diff --relative --name-only >out && + test_cmp expected out +' + +test_expect_failure 'diff --relative with change in subdir' ' + git switch br3 && + br1_blob=$(git rev-parse --short --verify br1:subdir/file0) && + br3_blob=$(git rev-parse --short --verify br3:subdir/file0) && + test_when_finished "git merge --abort" && + test_must_fail git merge br1 && + cat >expected <<-EOF && + diff --cc file0 + index $br3_blob,$br1_blob..0000000 + --- a/file0 + +++ b/file0 + @@@ -1,1 -1,1 +1,5 @@@ + ++<<<<<<< HEAD + +sub3 + ++======= + + sub1 + ++>>>>>>> br1 + EOF + git -C subdir diff --relative >out && + test_cmp expected out +' + test_done diff --git a/t/t4060-diff-submodule-option-diff-format.sh b/t/t4060-diff-submodule-option-diff-format.sh index dc7b242697..d86e38abd8 100755 --- a/t/t4060-diff-submodule-option-diff-format.sh +++ b/t/t4060-diff-submodule-option-diff-format.sh @@ -361,7 +361,6 @@ test_expect_success 'typechanged submodule(submodule->blob)' ' rm -f sm1 && test_create_repo sm1 && head6=$(add_file sm1 foo6 foo7) -fullhead6=$(cd sm1; git rev-parse --verify HEAD) test_expect_success 'nonexistent commit' ' git diff-index -p --submodule=diff HEAD >actual && cat >expected <<-EOF && @@ -704,10 +703,26 @@ test_expect_success 'path filter' ' diff_cmp expected actual ' -commit_file sm2 +cat >.gitmodules <<-EOF +[submodule "sm2"] + path = sm2 + url = bogus_url +EOF +git add .gitmodules +commit_file sm2 .gitmodules + test_expect_success 'given commit' ' git diff-index -p --submodule=diff HEAD^ >actual && cat >expected <<-EOF && + diff --git a/.gitmodules b/.gitmodules + new file mode 100644 + index 1234567..89abcde + --- /dev/null + +++ b/.gitmodules + @@ -0,0 +1,3 @@ + +[submodule "sm2"] + +path = sm2 + +url = bogus_url Submodule sm1 $head7...0000000 (submodule deleted) Submodule sm2 0000000...$head9 (new submodule) diff --git a/sm2/foo8 b/sm2/foo8 @@ -729,15 +744,21 @@ test_expect_success 'given commit' ' ' test_expect_success 'setup .git file for sm2' ' - (cd sm2 && - REAL="$(pwd)/../.real" && - mv .git "$REAL" && - echo "gitdir: $REAL" >.git) + git submodule absorbgitdirs sm2 ' test_expect_success 'diff --submodule=diff with .git file' ' git diff --submodule=diff HEAD^ >actual && cat >expected <<-EOF && + diff --git a/.gitmodules b/.gitmodules + new file mode 100644 + index 1234567..89abcde + --- /dev/null + +++ b/.gitmodules + @@ -0,0 +1,3 @@ + +[submodule "sm2"] + +path = sm2 + +url = bogus_url Submodule sm1 $head7...0000000 (submodule deleted) Submodule sm2 0000000...$head9 (new submodule) diff --git a/sm2/foo8 b/sm2/foo8 @@ -758,9 +779,67 @@ test_expect_success 'diff --submodule=diff with .git file' ' diff_cmp expected actual ' +mv sm2 sm2-bak + +test_expect_success 'deleted submodule with .git file' ' + git diff-index -p --submodule=diff HEAD >actual && + cat >expected <<-EOF && + Submodule sm1 $head7...0000000 (submodule deleted) + Submodule sm2 $head9...0000000 (submodule deleted) + diff --git a/sm2/foo8 b/sm2/foo8 + deleted file mode 100644 + index 1234567..89abcde + --- a/sm2/foo8 + +++ /dev/null + @@ -1 +0,0 @@ + -foo8 + diff --git a/sm2/foo9 b/sm2/foo9 + deleted file mode 100644 + index 1234567..89abcde + --- a/sm2/foo9 + +++ /dev/null + @@ -1 +0,0 @@ + -foo9 + EOF + diff_cmp expected actual +' + +echo submodule-to-blob>sm2 + +test_expect_success 'typechanged(submodule->blob) submodule with .git file' ' + git diff-index -p --submodule=diff HEAD >actual && + cat >expected <<-EOF && + Submodule sm1 $head7...0000000 (submodule deleted) + Submodule sm2 $head9...0000000 (submodule deleted) + diff --git a/sm2/foo8 b/sm2/foo8 + deleted file mode 100644 + index 1234567..89abcde + --- a/sm2/foo8 + +++ /dev/null + @@ -1 +0,0 @@ + -foo8 + diff --git a/sm2/foo9 b/sm2/foo9 + deleted file mode 100644 + index 1234567..89abcde + --- a/sm2/foo9 + +++ /dev/null + @@ -1 +0,0 @@ + -foo9 + diff --git a/sm2 b/sm2 + new file mode 100644 + index 1234567..89abcde + --- /dev/null + +++ b/sm2 + @@ -0,0 +1 @@ + +submodule-to-blob + EOF + diff_cmp expected actual +' + +rm sm2 +mv sm2-bak sm2 + test_expect_success 'setup nested submodule' ' - git submodule add -f ./sm2 && - git commit -a -m "add sm2" && git -C sm2 submodule add ../sm2 nested && git -C sm2 commit -a -m "nested sub" && head10=$(git -C sm2 rev-parse --short --verify HEAD) @@ -791,6 +870,7 @@ test_expect_success 'diff --submodule=diff with moved nested submodule HEAD' ' test_expect_success 'diff --submodule=diff recurses into nested submodules' ' cat >expected <<-EOF && + Submodule sm1 $head7...0000000 (submodule deleted) Submodule sm2 contains modified content Submodule sm2 $head9..$head10: diff --git a/sm2/.gitmodules b/sm2/.gitmodules @@ -830,4 +910,67 @@ test_expect_success 'diff --submodule=diff recurses into nested submodules' ' diff_cmp expected actual ' +(cd sm2; commit_file nested) +commit_file sm2 +head12=$(cd sm2; git rev-parse --short --verify HEAD) + +mv sm2 sm2-bak + +test_expect_success 'diff --submodule=diff recurses into deleted nested submodules' ' + cat >expected <<-EOF && + Submodule sm1 $head7...0000000 (submodule deleted) + Submodule sm2 $head12...0000000 (submodule deleted) + diff --git a/sm2/.gitmodules b/sm2/.gitmodules + deleted file mode 100644 + index 3a816b8..0000000 + --- a/sm2/.gitmodules + +++ /dev/null + @@ -1,3 +0,0 @@ + -[submodule "nested"] + - path = nested + - url = ../sm2 + diff --git a/sm2/foo8 b/sm2/foo8 + deleted file mode 100644 + index db9916b..0000000 + --- a/sm2/foo8 + +++ /dev/null + @@ -1 +0,0 @@ + -foo8 + diff --git a/sm2/foo9 b/sm2/foo9 + deleted file mode 100644 + index 9c3b4f6..0000000 + --- a/sm2/foo9 + +++ /dev/null + @@ -1 +0,0 @@ + -foo9 + Submodule nested $head11...0000000 (submodule deleted) + diff --git a/sm2/nested/file b/sm2/nested/file + deleted file mode 100644 + index ca281f5..0000000 + --- a/sm2/nested/file + +++ /dev/null + @@ -1 +0,0 @@ + -nested content + diff --git a/sm2/nested/foo8 b/sm2/nested/foo8 + deleted file mode 100644 + index db9916b..0000000 + --- a/sm2/nested/foo8 + +++ /dev/null + @@ -1 +0,0 @@ + -foo8 + diff --git a/sm2/nested/foo9 b/sm2/nested/foo9 + deleted file mode 100644 + index 9c3b4f6..0000000 + --- a/sm2/nested/foo9 + +++ /dev/null + @@ -1 +0,0 @@ + -foo9 + EOF + git diff --submodule=diff >actual 2>err && + test_must_be_empty err && + diff_cmp expected actual +' + +mv sm2-bak sm2 + test_done diff --git a/t/t4108-apply-threeway.sh b/t/t4108-apply-threeway.sh index 65147efdea..cc3aa3314a 100755 --- a/t/t4108-apply-threeway.sh +++ b/t/t4108-apply-threeway.sh @@ -230,4 +230,49 @@ test_expect_success 'apply with --3way --cached and conflicts' ' test_cmp expect.diff actual.diff ' +test_expect_success 'apply binary file patch' ' + git reset --hard main && + cp "$TEST_DIRECTORY/test-binary-1.png" bin.png && + git add bin.png && + git commit -m "add binary file" && + + cp "$TEST_DIRECTORY/test-binary-2.png" bin.png && + + git diff --binary >bin.diff && + git reset --hard && + + # Apply must succeed. + git apply bin.diff +' + +test_expect_success 'apply binary file patch with 3way' ' + git reset --hard main && + cp "$TEST_DIRECTORY/test-binary-1.png" bin.png && + git add bin.png && + git commit -m "add binary file" && + + cp "$TEST_DIRECTORY/test-binary-2.png" bin.png && + + git diff --binary >bin.diff && + git reset --hard && + + # Apply must succeed. + git apply --3way --index bin.diff +' + +test_expect_success 'apply full-index patch with 3way' ' + git reset --hard main && + cp "$TEST_DIRECTORY/test-binary-1.png" bin.png && + git add bin.png && + git commit -m "add binary file" && + + cp "$TEST_DIRECTORY/test-binary-2.png" bin.png && + + git diff --full-index >bin.diff && + git reset --hard && + + # Apply must succeed. + git apply --3way --index bin.diff +' + test_done diff --git a/t/t4151-am-abort.sh b/t/t4151-am-abort.sh index 9d8d3c72e7..2374151662 100755 --- a/t/t4151-am-abort.sh +++ b/t/t4151-am-abort.sh @@ -23,7 +23,13 @@ test_expect_success setup ' test_tick && git commit -a -m $i || return 1 done && + git branch changes && git format-patch --no-numbered initial && + git checkout -b conflicting initial && + echo different >>file-1 && + echo whatever >new-file && + git add file-1 new-file && + git commit -m different && git checkout -b side initial && echo local change >file-2-expect ' @@ -191,4 +197,37 @@ test_expect_success 'am --abort leaves index stat info alone' ' git diff-files --exit-code --quiet ' +test_expect_success 'git am --abort return failed exit status when it fails' ' + test_when_finished "rm -rf file-2/ && git reset --hard && git am --abort" && + git checkout changes && + git format-patch -1 --stdout conflicting >changes.mbox && + test_must_fail git am --3way changes.mbox && + + git rm file-2 && + mkdir file-2 && + echo precious >file-2/somefile && + test_must_fail git am --abort && + test_path_is_dir file-2/ +' + +test_expect_success 'git am --abort cleans relevant files' ' + git checkout changes && + git format-patch -1 --stdout conflicting >changes.mbox && + test_must_fail git am --3way changes.mbox && + + test_path_is_file new-file && + echo further changes >>file-1 && + echo change other file >>file-2 && + + # Abort, and expect the files touched by am to be reverted + git am --abort && + + test_path_is_missing new-file && + + # Files not involved in am operation are left modified + git diff --name-only changes >actual && + test_write_lines file-2 >expect && + test_cmp expect actual +' + test_done diff --git a/t/t4210-log-i18n.sh b/t/t4210-log-i18n.sh index d2dfcf164e..0141f36e33 100755 --- a/t/t4210-log-i18n.sh +++ b/t/t4210-log-i18n.sh @@ -131,4 +131,11 @@ do fi done +test_expect_success 'log shows warning when conversion fails' ' + enc=this-encoding-does-not-exist && + git log -1 --encoding=$enc 2>err && + echo "warning: unable to reencode commit to ${SQ}${enc}${SQ}" >expect && + test_cmp expect err +' + test_done diff --git a/t/t5304-prune.sh b/t/t5304-prune.sh index 7cabb85ca6..8ae314af58 100755 --- a/t/t5304-prune.sh +++ b/t/t5304-prune.sh @@ -291,6 +291,7 @@ test_expect_success 'prune: handle HEAD reflog in multiple worktrees' ' cat ../expected >blob && git add blob && git commit -m "second commit in third" && + git clean -f && # Remove untracked left behind by deleting index git reset --hard HEAD^ ) && git prune --expire=now && diff --git a/t/t5310-pack-bitmaps.sh b/t/t5310-pack-bitmaps.sh index b02838750e..673baa5c3c 100755 --- a/t/t5310-pack-bitmaps.sh +++ b/t/t5310-pack-bitmaps.sh @@ -8,6 +8,10 @@ export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME . "$TEST_DIRECTORY"/lib-bundle.sh . "$TEST_DIRECTORY"/lib-bitmap.sh +# t5310 deals only with single-pack bitmaps, so don't write MIDX bitmaps in +# their place. +GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=0 + objpath () { echo ".git/objects/$(echo "$1" | sed -e 's|\(..\)|\1/|')" } @@ -25,93 +29,10 @@ has_any () { grep -Ff "$1" "$2" } -# To ensure the logic for "maximal commits" is exercised, make -# the repository a bit more complicated. -# -# other second -# * * -# (99 commits) (99 commits) -# * * -# |\ /| -# | * octo-other octo-second * | -# |/|\_________ ____________/|\| -# | \ \/ __________/ | -# | | ________/\ / | -# * |/ * merge-right * -# | _|__________/ \____________ | -# |/ | \| -# (l1) * * merge-left * (r1) -# | / \________________________ | -# |/ \| -# (l2) * * (r2) -# \___________________________ | -# \| -# * (base) -# -# We only push bits down the first-parent history, which -# makes some of these commits unimportant! -# -# The important part for the maximal commit algorithm is how -# the bitmasks are extended. Assuming starting bit positions -# for second (bit 0) and other (bit 1), the bitmasks at the -# end should be: -# -# second: 1 (maximal, selected) -# other: 01 (maximal, selected) -# (base): 11 (maximal) -# -# This complicated history was important for a previous -# version of the walk that guarantees never walking a -# commit multiple times. That goal might be important -# again, so preserve this complicated case. For now, this -# test will guarantee that the bitmaps are computed -# correctly, even with the repeat calculations. - -test_expect_success 'setup repo with moderate-sized history' ' - test_commit_bulk --id=file 10 && - git branch -M second && - git checkout -b other HEAD~5 && - test_commit_bulk --id=side 10 && - - # add complicated history setup, including merges and - # ambiguous merge-bases - - git checkout -b merge-left other~2 && - git merge second~2 -m "merge-left" && - - git checkout -b merge-right second~1 && - git merge other~1 -m "merge-right" && - - git checkout -b octo-second second && - git merge merge-left merge-right -m "octopus-second" && - - git checkout -b octo-other other && - git merge merge-left merge-right -m "octopus-other" && - - git checkout other && - git merge octo-other -m "pull octopus" && - - git checkout second && - git merge octo-second -m "pull octopus" && - - # Remove these branches so they are not selected - # as bitmap tips - git branch -D merge-left && - git branch -D merge-right && - git branch -D octo-other && - git branch -D octo-second && - - # add padding to make these merges less interesting - # and avoid having them selected for bitmaps - test_commit_bulk --id=file 100 && - git checkout other && - test_commit_bulk --id=side 100 && - git checkout second && - - bitmaptip=$(git rev-parse second) && - blob=$(echo tagged-blob | git hash-object -w --stdin) && - git tag tagged-blob $blob && - git config repack.writebitmaps true +setup_bitmap_history + +test_expect_success 'setup writing bitmaps during repack' ' + git config repack.writeBitmaps true ' test_expect_success 'full repack creates bitmaps' ' @@ -123,109 +44,7 @@ test_expect_success 'full repack creates bitmaps' ' grep "\"key\":\"num_maximal_commits\",\"value\":\"107\"" trace ' -test_expect_success 'rev-list --test-bitmap verifies bitmaps' ' - git rev-list --test-bitmap HEAD -' - -rev_list_tests_head () { - test_expect_success "counting commits via bitmap ($state, $branch)" ' - git rev-list --count $branch >expect && - git rev-list --use-bitmap-index --count $branch >actual && - test_cmp expect actual - ' - - test_expect_success "counting partial commits via bitmap ($state, $branch)" ' - git rev-list --count $branch~5..$branch >expect && - git rev-list --use-bitmap-index --count $branch~5..$branch >actual && - test_cmp expect actual - ' - - test_expect_success "counting commits with limit ($state, $branch)" ' - git rev-list --count -n 1 $branch >expect && - git rev-list --use-bitmap-index --count -n 1 $branch >actual && - test_cmp expect actual - ' - - test_expect_success "counting non-linear history ($state, $branch)" ' - git rev-list --count other...second >expect && - git rev-list --use-bitmap-index --count other...second >actual && - test_cmp expect actual - ' - - test_expect_success "counting commits with limiting ($state, $branch)" ' - git rev-list --count $branch -- 1.t >expect && - git rev-list --use-bitmap-index --count $branch -- 1.t >actual && - test_cmp expect actual - ' - - test_expect_success "counting objects via bitmap ($state, $branch)" ' - git rev-list --count --objects $branch >expect && - git rev-list --use-bitmap-index --count --objects $branch >actual && - test_cmp expect actual - ' - - test_expect_success "enumerate commits ($state, $branch)" ' - git rev-list --use-bitmap-index $branch >actual && - git rev-list $branch >expect && - test_bitmap_traversal --no-confirm-bitmaps expect actual - ' - - test_expect_success "enumerate --objects ($state, $branch)" ' - git rev-list --objects --use-bitmap-index $branch >actual && - git rev-list --objects $branch >expect && - test_bitmap_traversal expect actual - ' - - test_expect_success "bitmap --objects handles non-commit objects ($state, $branch)" ' - git rev-list --objects --use-bitmap-index $branch tagged-blob >actual && - grep $blob actual - ' -} - -rev_list_tests () { - state=$1 - - for branch in "second" "other" - do - rev_list_tests_head - done -} - -rev_list_tests 'full bitmap' - -test_expect_success 'clone from bitmapped repository' ' - git clone --no-local --bare . clone.git && - git rev-parse HEAD >expect && - git --git-dir=clone.git rev-parse HEAD >actual && - test_cmp expect actual -' - -test_expect_success 'partial clone from bitmapped repository' ' - test_config uploadpack.allowfilter true && - git clone --no-local --bare --filter=blob:none . partial-clone.git && - ( - cd partial-clone.git && - pack=$(echo objects/pack/*.pack) && - git verify-pack -v "$pack" >have && - awk "/blob/ { print \$1 }" <have >blobs && - # we expect this single blob because of the direct ref - git rev-parse refs/tags/tagged-blob >expect && - test_cmp expect blobs - ) -' - -test_expect_success 'setup further non-bitmapped commits' ' - test_commit_bulk --id=further 10 -' - -rev_list_tests 'partial bitmap' - -test_expect_success 'fetch (partial bitmap)' ' - git --git-dir=clone.git fetch origin second:second && - git rev-parse HEAD >expect && - git --git-dir=clone.git rev-parse HEAD >actual && - test_cmp expect actual -' +basic_bitmap_tests test_expect_success 'incremental repack fails when bitmaps are requested' ' test_commit more-1 && @@ -461,40 +280,6 @@ test_expect_success 'truncated bitmap fails gracefully (cache)' ' test_i18ngrep corrupted.bitmap.index stderr ' -test_expect_success 'enumerating progress counts pack-reused objects' ' - count=$(git rev-list --objects --all --count) && - git repack -adb && - - # check first with only reused objects; confirm that our progress - # showed the right number, and also that we did pack-reuse as expected. - # Check only the final "done" line of the meter (there may be an - # arbitrary number of intermediate lines ending with CR). - GIT_PROGRESS_DELAY=0 \ - git pack-objects --all --stdout --progress \ - </dev/null >/dev/null 2>stderr && - grep "Enumerating objects: $count, done" stderr && - grep "pack-reused $count" stderr && - - # now the same but with one non-reused object - git commit --allow-empty -m "an extra commit object" && - GIT_PROGRESS_DELAY=0 \ - git pack-objects --all --stdout --progress \ - </dev/null >/dev/null 2>stderr && - grep "Enumerating objects: $((count+1)), done" stderr && - grep "pack-reused $count" stderr -' - -# have_delta <obj> <expected_base> -# -# Note that because this relies on cat-file, it might find _any_ copy of an -# object in the repository. The caller is responsible for making sure -# there's only one (e.g., via "repack -ad", or having just fetched a copy). -have_delta () { - echo $2 >expect && - echo $1 | git cat-file --batch-check="%(deltabase)" >actual && - test_cmp expect actual -} - # Create a state of history with these properties: # # - refs that allow a client to fetch some new history, while sharing some old diff --git a/t/t5318-commit-graph.sh b/t/t5318-commit-graph.sh index af88f805aa..295c5bd94d 100755 --- a/t/t5318-commit-graph.sh +++ b/t/t5318-commit-graph.sh @@ -5,6 +5,25 @@ test_description='commit graph' GIT_TEST_COMMIT_GRAPH_CHANGED_PATHS=0 +test_expect_success 'usage' ' + test_expect_code 129 git commit-graph write blah 2>err && + test_expect_code 129 git commit-graph write verify +' + +test_expect_success 'usage shown without sub-command' ' + test_expect_code 129 git commit-graph 2>err && + ! grep error: err +' + +test_expect_success 'usage shown with an error on unknown sub-command' ' + cat >expect <<-\EOF && + error: unrecognized subcommand: unknown + EOF + test_expect_code 129 git commit-graph unknown 2>stderr && + grep error stderr >actual && + test_cmp expect actual +' + test_expect_success 'setup full repo' ' mkdir full && cd "$TRASH_DIRECTORY/full" && diff --git a/t/t5319-multi-pack-index.sh b/t/t5319-multi-pack-index.sh index 3d4d9f10c3..bd17f308b3 100755 --- a/t/t5319-multi-pack-index.sh +++ b/t/t5319-multi-pack-index.sh @@ -174,12 +174,12 @@ test_expect_success 'write progress off for redirected stderr' ' ' test_expect_success 'write force progress on for stderr' ' - GIT_PROGRESS_DELAY=0 git multi-pack-index --object-dir=$objdir --progress write 2>err && + GIT_PROGRESS_DELAY=0 git multi-pack-index --object-dir=$objdir write --progress 2>err && test_file_not_empty err ' test_expect_success 'write with the --no-progress option' ' - GIT_PROGRESS_DELAY=0 git multi-pack-index --object-dir=$objdir --no-progress write 2>err && + GIT_PROGRESS_DELAY=0 git multi-pack-index --object-dir=$objdir write --no-progress 2>err && test_line_count = 0 err ' @@ -201,6 +201,34 @@ test_expect_success 'write midx with twelve packs' ' compare_results_with_midx "twelve packs" +test_expect_success 'multi-pack-index *.rev cleanup with --object-dir' ' + git init repo && + git clone -s repo alternate && + + test_when_finished "rm -rf repo alternate" && + + ( + cd repo && + test_commit base && + git repack -d + ) && + + ours="alternate/.git/objects/pack/multi-pack-index-123.rev" && + theirs="repo/.git/objects/pack/multi-pack-index-abc.rev" && + touch "$ours" "$theirs" && + + ( + cd alternate && + git multi-pack-index --object-dir ../repo/.git/objects write + ) && + + # writing a midx in "repo" should not remove the .rev file in the + # alternate + test_path_is_file repo/.git/objects/pack/multi-pack-index && + test_path_is_file $ours && + test_path_is_missing $theirs +' + test_expect_success 'warn on improper hash version' ' git init --object-format=sha1 sha1 && ( @@ -277,6 +305,23 @@ test_expect_success 'midx picks objects from preferred pack' ' ) ' +test_expect_success 'preferred packs must be non-empty' ' + test_when_finished rm -rf preferred.git && + git init preferred.git && + ( + cd preferred.git && + + test_commit base && + git repack -ad && + + empty="$(git pack-objects $objdir/pack/pack </dev/null)" && + + test_must_fail git multi-pack-index write \ + --preferred-pack=pack-$empty.pack 2>err && + grep "with no objects" err + ) +' + test_expect_success 'verify multi-pack-index success' ' git multi-pack-index verify --object-dir=$objdir ' @@ -429,12 +474,12 @@ test_expect_success 'repack progress off for redirected stderr' ' ' test_expect_success 'repack force progress on for stderr' ' - GIT_PROGRESS_DELAY=0 git multi-pack-index --object-dir=$objdir --progress repack 2>err && + GIT_PROGRESS_DELAY=0 git multi-pack-index --object-dir=$objdir repack --progress 2>err && test_file_not_empty err ' test_expect_success 'repack with the --no-progress option' ' - GIT_PROGRESS_DELAY=0 git multi-pack-index --object-dir=$objdir --no-progress repack 2>err && + GIT_PROGRESS_DELAY=0 git multi-pack-index --object-dir=$objdir repack --no-progress 2>err && test_line_count = 0 err ' @@ -487,7 +532,8 @@ test_expect_success 'repack preserves multi-pack-index when creating packs' ' compare_results_with_midx "after repack" test_expect_success 'multi-pack-index and pack-bitmap' ' - git -c repack.writeBitmaps=true repack -ad && + GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=0 \ + git -c repack.writeBitmaps=true repack -ad && git multi-pack-index write && git rev-list --test-bitmap HEAD ' @@ -537,7 +583,15 @@ test_expect_success 'force some 64-bit offsets with pack-objects' ' idx64=objects64/pack/test-64-$pack64.idx && chmod u+w $idx64 && corrupt_data $idx64 $(test_oid idxoff) "\02" && - midx64=$(git multi-pack-index --object-dir=objects64 write) && + # objects64 is not a real repository, but can serve as an alternate + # anyway so we can write a MIDX into it + git init repo && + test_when_finished "rm -fr repo" && + ( + cd repo && + ( cd ../objects64 && pwd ) >.git/objects/info/alternates && + midx64=$(git multi-pack-index --object-dir=../objects64 write) + ) && midx_read_expect 1 63 5 objects64 " large-offsets" ' @@ -618,7 +672,7 @@ test_expect_success 'expire progress off for redirected stderr' ' test_expect_success 'expire force progress on for stderr' ' ( cd dup && - GIT_PROGRESS_DELAY=0 git multi-pack-index --progress expire 2>err && + GIT_PROGRESS_DELAY=0 git multi-pack-index expire --progress 2>err && test_file_not_empty err ) ' @@ -626,7 +680,7 @@ test_expect_success 'expire force progress on for stderr' ' test_expect_success 'expire with the --no-progress option' ' ( cd dup && - GIT_PROGRESS_DELAY=0 git multi-pack-index --no-progress expire 2>err && + GIT_PROGRESS_DELAY=0 git multi-pack-index expire --no-progress 2>err && test_line_count = 0 err ) ' @@ -842,4 +896,9 @@ test_expect_success 'usage shown without sub-command' ' ! test_i18ngrep "unrecognized subcommand" err ' +test_expect_success 'complains when run outside of a repository' ' + nongit test_must_fail git multi-pack-index write 2>err && + grep "not a git repository" err +' + test_done diff --git a/t/t5323-pack-redundant.sh b/t/t5323-pack-redundant.sh index 8b01793845..8dbbcc5e51 100755 --- a/t/t5323-pack-redundant.sh +++ b/t/t5323-pack-redundant.sh @@ -114,9 +114,9 @@ test_expect_success 'setup main repo' ' create_commits_in "$main_repo" A B C D E F G H I J K L M N O P Q R ' -test_expect_success 'master: pack-redundant works with no packfile' ' +test_expect_success 'main: pack-redundant works with no packfile' ' ( - cd "$master_repo" && + cd "$main_repo" && cat >expect <<-EOF && fatal: Zero packs found! EOF diff --git a/t/t5326-multi-pack-bitmaps.sh b/t/t5326-multi-pack-bitmaps.sh new file mode 100755 index 0000000000..4ad7c2c969 --- /dev/null +++ b/t/t5326-multi-pack-bitmaps.sh @@ -0,0 +1,286 @@ +#!/bin/sh + +test_description='exercise basic multi-pack bitmap functionality' +. ./test-lib.sh +. "${TEST_DIRECTORY}/lib-bitmap.sh" + +# We'll be writing our own midx and bitmaps, so avoid getting confused by the +# automatic ones. +GIT_TEST_MULTI_PACK_INDEX=0 +GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=0 + +objdir=.git/objects +midx=$objdir/pack/multi-pack-index + +# midx_pack_source <obj> +midx_pack_source () { + test-tool read-midx --show-objects .git/objects | grep "^$1 " | cut -f2 +} + +setup_bitmap_history + +test_expect_success 'enable core.multiPackIndex' ' + git config core.multiPackIndex true +' + +test_expect_success 'create single-pack midx with bitmaps' ' + git repack -ad && + git multi-pack-index write --bitmap && + test_path_is_file $midx && + test_path_is_file $midx-$(midx_checksum $objdir).bitmap && + test_path_is_file $midx-$(midx_checksum $objdir).rev +' + +basic_bitmap_tests + +test_expect_success 'create new additional packs' ' + for i in $(test_seq 1 16) + do + test_commit "$i" && + git repack -d || return 1 + done && + + git checkout -b other2 HEAD~8 && + for i in $(test_seq 1 8) + do + test_commit "side-$i" && + git repack -d || return 1 + done && + git checkout second +' + +test_expect_success 'create multi-pack midx with bitmaps' ' + git multi-pack-index write --bitmap && + + ls $objdir/pack/pack-*.pack >packs && + test_line_count = 25 packs && + + test_path_is_file $midx && + test_path_is_file $midx-$(midx_checksum $objdir).bitmap && + test_path_is_file $midx-$(midx_checksum $objdir).rev +' + +basic_bitmap_tests + +test_expect_success '--no-bitmap is respected when bitmaps exist' ' + git multi-pack-index write --bitmap && + + test_commit respect--no-bitmap && + git repack -d && + + test_path_is_file $midx && + test_path_is_file $midx-$(midx_checksum $objdir).bitmap && + test_path_is_file $midx-$(midx_checksum $objdir).rev && + + git multi-pack-index write --no-bitmap && + + test_path_is_file $midx && + test_path_is_missing $midx-$(midx_checksum $objdir).bitmap && + test_path_is_missing $midx-$(midx_checksum $objdir).rev +' + +test_expect_success 'setup midx with base from later pack' ' + # Write a and b so that "a" is a delta on top of base "b", since Git + # prefers to delete contents out of a base rather than add to a shorter + # object. + test_seq 1 128 >a && + test_seq 1 130 >b && + + git add a b && + git commit -m "initial commit" && + + a=$(git rev-parse HEAD:a) && + b=$(git rev-parse HEAD:b) && + + # In the first pack, "a" is stored as a delta to "b". + p1=$(git pack-objects .git/objects/pack/pack <<-EOF + $a + $b + EOF + ) && + + # In the second pack, "a" is missing, and "b" is not a delta nor base to + # any other object. + p2=$(git pack-objects .git/objects/pack/pack <<-EOF + $b + $(git rev-parse HEAD) + $(git rev-parse HEAD^{tree}) + EOF + ) && + + git prune-packed && + # Use the second pack as the preferred source, so that "b" occurs + # earlier in the MIDX object order, rendering "a" unusable for pack + # reuse. + git multi-pack-index write --bitmap --preferred-pack=pack-$p2.idx && + + have_delta $a $b && + test $(midx_pack_source $a) != $(midx_pack_source $b) +' + +rev_list_tests 'full bitmap with backwards delta' + +test_expect_success 'clone with bitmaps enabled' ' + git clone --no-local --bare . clone-reverse-delta.git && + test_when_finished "rm -fr clone-reverse-delta.git" && + + git rev-parse HEAD >expect && + git --git-dir=clone-reverse-delta.git rev-parse HEAD >actual && + test_cmp expect actual +' + +bitmap_reuse_tests() { + from=$1 + to=$2 + + test_expect_success "setup pack reuse tests ($from -> $to)" ' + rm -fr repo && + git init repo && + ( + cd repo && + test_commit_bulk 16 && + git tag old-tip && + + git config core.multiPackIndex true && + if test "MIDX" = "$from" + then + git repack -Ad && + git multi-pack-index write --bitmap + else + git repack -Adb + fi + ) + ' + + test_expect_success "build bitmap from existing ($from -> $to)" ' + ( + cd repo && + test_commit_bulk --id=further 16 && + git tag new-tip && + + if test "MIDX" = "$to" + then + git repack -d && + git multi-pack-index write --bitmap + else + git repack -Adb + fi + ) + ' + + test_expect_success "verify resulting bitmaps ($from -> $to)" ' + ( + cd repo && + git for-each-ref && + git rev-list --test-bitmap refs/tags/old-tip && + git rev-list --test-bitmap refs/tags/new-tip + ) + ' +} + +bitmap_reuse_tests 'pack' 'MIDX' +bitmap_reuse_tests 'MIDX' 'pack' +bitmap_reuse_tests 'MIDX' 'MIDX' + +test_expect_success 'missing object closure fails gracefully' ' + rm -fr repo && + git init repo && + test_when_finished "rm -fr repo" && + ( + cd repo && + + test_commit loose && + test_commit packed && + + # Do not pass "--revs"; we want a pack without the "loose" + # commit. + git pack-objects $objdir/pack/pack <<-EOF && + $(git rev-parse packed) + EOF + + test_must_fail git multi-pack-index write --bitmap 2>err && + grep "doesn.t have full closure" err && + test_path_is_missing $midx + ) +' + +test_expect_success 'setup partial bitmaps' ' + test_commit packed && + git repack && + test_commit loose && + git multi-pack-index write --bitmap 2>err && + test_path_is_file $midx && + test_path_is_file $midx-$(midx_checksum $objdir).bitmap && + test_path_is_file $midx-$(midx_checksum $objdir).rev +' + +basic_bitmap_tests HEAD~ + +test_expect_success 'removing a MIDX clears stale bitmaps' ' + rm -fr repo && + git init repo && + test_when_finished "rm -fr repo" && + ( + cd repo && + test_commit base && + git repack && + git multi-pack-index write --bitmap && + + # Write a MIDX and bitmap; remove the MIDX but leave the bitmap. + stale_bitmap=$midx-$(midx_checksum $objdir).bitmap && + stale_rev=$midx-$(midx_checksum $objdir).rev && + rm $midx && + + # Then write a new MIDX. + test_commit new && + git repack && + git multi-pack-index write --bitmap && + + test_path_is_file $midx && + test_path_is_file $midx-$(midx_checksum $objdir).bitmap && + test_path_is_file $midx-$(midx_checksum $objdir).rev && + test_path_is_missing $stale_bitmap && + test_path_is_missing $stale_rev + ) +' + +test_expect_success 'pack.preferBitmapTips' ' + git init repo && + test_when_finished "rm -fr repo" && + ( + cd repo && + + test_commit_bulk --message="%s" 103 && + + git log --format="%H" >commits.raw && + sort <commits.raw >commits && + + git log --format="create refs/tags/%s %H" HEAD >refs && + git update-ref --stdin <refs && + + git multi-pack-index write --bitmap && + test_path_is_file $midx && + test_path_is_file $midx-$(midx_checksum $objdir).bitmap && + test_path_is_file $midx-$(midx_checksum $objdir).rev && + + test-tool bitmap list-commits | sort >bitmaps && + comm -13 bitmaps commits >before && + test_line_count = 1 before && + + perl -ne "printf(\"create refs/tags/include/%d \", $.); print" \ + <before | git update-ref --stdin && + + rm -fr $midx-$(midx_checksum $objdir).bitmap && + rm -fr $midx-$(midx_checksum $objdir).rev && + rm -fr $midx && + + git -c pack.preferBitmapTips=refs/tags/include \ + multi-pack-index write --bitmap && + test-tool bitmap list-commits | sort >bitmaps && + comm -13 bitmaps commits >after && + + ! test_cmp before after + ) +' + +test_done diff --git a/t/t5551-http-fetch-smart.sh b/t/t5551-http-fetch-smart.sh index 4f87d90c5b..f92c79c132 100755 --- a/t/t5551-http-fetch-smart.sh +++ b/t/t5551-http-fetch-smart.sh @@ -196,8 +196,8 @@ test_expect_success 'GIT_TRACE_CURL redacts auth details' ' # Ensure that there is no "Basic" followed by a base64 string, but that # the auth details are redacted - ! grep "Authorization: Basic [0-9a-zA-Z+/]" trace && - grep "Authorization: Basic <redacted>" trace + ! grep -i "Authorization: Basic [0-9a-zA-Z+/]" trace && + grep -i "Authorization: Basic <redacted>" trace ' test_expect_success 'GIT_CURL_VERBOSE redacts auth details' ' @@ -208,8 +208,8 @@ test_expect_success 'GIT_CURL_VERBOSE redacts auth details' ' # Ensure that there is no "Basic" followed by a base64 string, but that # the auth details are redacted - ! grep "Authorization: Basic [0-9a-zA-Z+/]" trace && - grep "Authorization: Basic <redacted>" trace + ! grep -i "Authorization: Basic [0-9a-zA-Z+/]" trace && + grep -i "Authorization: Basic <redacted>" trace ' test_expect_success 'GIT_TRACE_CURL does not redact auth details if GIT_TRACE_REDACT=0' ' @@ -219,7 +219,7 @@ test_expect_success 'GIT_TRACE_CURL does not redact auth details if GIT_TRACE_RE git clone --bare "$HTTPD_URL/auth/smart/repo.git" redact-auth && expect_askpass both user@host && - grep "Authorization: Basic [0-9a-zA-Z+/]" trace + grep -i "Authorization: Basic [0-9a-zA-Z+/]" trace ' test_expect_success 'disable dumb http on server' ' @@ -474,10 +474,10 @@ test_expect_success 'cookies are redacted by default' ' GIT_TRACE_CURL=true \ git -c "http.cookieFile=$(pwd)/cookies" clone \ $HTTPD_URL/smart/repo.git clone 2>err && - grep "Cookie:.*Foo=<redacted>" err && - grep "Cookie:.*Bar=<redacted>" err && - ! grep "Cookie:.*Foo=1" err && - ! grep "Cookie:.*Bar=2" err + grep -i "Cookie:.*Foo=<redacted>" err && + grep -i "Cookie:.*Bar=<redacted>" err && + ! grep -i "Cookie:.*Foo=1" err && + ! grep -i "Cookie:.*Bar=2" err ' test_expect_success 'empty values of cookies are also redacted' ' @@ -486,7 +486,7 @@ test_expect_success 'empty values of cookies are also redacted' ' GIT_TRACE_CURL=true \ git -c "http.cookieFile=$(pwd)/cookies" clone \ $HTTPD_URL/smart/repo.git clone 2>err && - grep "Cookie:.*Foo=<redacted>" err + grep -i "Cookie:.*Foo=<redacted>" err ' test_expect_success 'GIT_TRACE_REDACT=0 disables cookie redaction' ' @@ -496,8 +496,8 @@ test_expect_success 'GIT_TRACE_REDACT=0 disables cookie redaction' ' GIT_TRACE_REDACT=0 GIT_TRACE_CURL=true \ git -c "http.cookieFile=$(pwd)/cookies" clone \ $HTTPD_URL/smart/repo.git clone 2>err && - grep "Cookie:.*Foo=1" err && - grep "Cookie:.*Bar=2" err + grep -i "Cookie:.*Foo=1" err && + grep -i "Cookie:.*Bar=2" err ' test_expect_success 'GIT_TRACE_CURL_NO_DATA prevents data from being traced' ' @@ -558,4 +558,13 @@ test_expect_success 'http auth forgets bogus credentials' ' expect_askpass both user@host ' +test_expect_success 'client falls back from v2 to v0 to match server' ' + GIT_TRACE_PACKET=$PWD/trace \ + GIT_TEST_PROTOCOL_VERSION=2 \ + git clone $HTTPD_URL/smart_v0/repo.git repo-v0 && + # check for v0; there the HEAD symref is communicated in the capability + # line; v2 uses a different syntax on each ref advertisement line + grep symref=HEAD:refs/heads/ trace +' + test_done diff --git a/t/t5555-http-smart-common.sh b/t/t5555-http-smart-common.sh new file mode 100755 index 0000000000..49faf5e283 --- /dev/null +++ b/t/t5555-http-smart-common.sh @@ -0,0 +1,161 @@ +#!/bin/sh + +test_description='test functionality common to smart fetch & push' + +. ./test-lib.sh + +test_expect_success 'setup' ' + test_commit --no-tag initial +' + +test_expect_success 'git upload-pack --http-backend-info-refs and --advertise-refs are aliased' ' + git upload-pack --http-backend-info-refs . >expected 2>err.expected && + git upload-pack --advertise-refs . >actual 2>err.actual && + test_cmp err.expected err.actual && + test_cmp expected actual +' + +test_expect_success 'git receive-pack --http-backend-info-refs and --advertise-refs are aliased' ' + git receive-pack --http-backend-info-refs . >expected 2>err.expected && + git receive-pack --advertise-refs . >actual 2>err.actual && + test_cmp err.expected err.actual && + test_cmp expected actual +' + +test_expect_success 'git upload-pack --advertise-refs' ' + cat >expect <<-EOF && + $(git rev-parse HEAD) HEAD + $(git rev-parse HEAD) $(git symbolic-ref HEAD) + 0000 + EOF + + # We only care about GIT_PROTOCOL, not GIT_TEST_PROTOCOL_VERSION + sane_unset GIT_PROTOCOL && + GIT_TEST_PROTOCOL_VERSION=2 \ + git upload-pack --advertise-refs . >out 2>err && + + test-tool pkt-line unpack <out >actual && + test_must_be_empty err && + test_cmp actual expect && + + # The --advertise-refs alias works + git upload-pack --advertise-refs . >out 2>err && + + test-tool pkt-line unpack <out >actual && + test_must_be_empty err && + test_cmp actual expect +' + +test_expect_success 'git upload-pack --advertise-refs: v0' ' + # With no specified protocol + cat >expect <<-EOF && + $(git rev-parse HEAD) HEAD + $(git rev-parse HEAD) $(git symbolic-ref HEAD) + 0000 + EOF + + git upload-pack --advertise-refs . >out 2>err && + test-tool pkt-line unpack <out >actual && + test_must_be_empty err && + test_cmp actual expect && + + # With explicit v0 + GIT_PROTOCOL=version=0 \ + git upload-pack --advertise-refs . >out 2>err && + test-tool pkt-line unpack <out >actual 2>err && + test_must_be_empty err && + test_cmp actual expect + +' + +test_expect_success 'git receive-pack --advertise-refs: v0' ' + # With no specified protocol + cat >expect <<-EOF && + $(git rev-parse HEAD) $(git symbolic-ref HEAD) + 0000 + EOF + + git receive-pack --advertise-refs . >out 2>err && + test-tool pkt-line unpack <out >actual && + test_must_be_empty err && + test_cmp actual expect && + + # With explicit v0 + GIT_PROTOCOL=version=0 \ + git receive-pack --advertise-refs . >out 2>err && + test-tool pkt-line unpack <out >actual 2>err && + test_must_be_empty err && + test_cmp actual expect + +' + +test_expect_success 'git upload-pack --advertise-refs: v1' ' + # With no specified protocol + cat >expect <<-EOF && + version 1 + $(git rev-parse HEAD) HEAD + $(git rev-parse HEAD) $(git symbolic-ref HEAD) + 0000 + EOF + + GIT_PROTOCOL=version=1 \ + git upload-pack --advertise-refs . >out && + + test-tool pkt-line unpack <out >actual 2>err && + test_must_be_empty err && + test_cmp actual expect +' + +test_expect_success 'git receive-pack --advertise-refs: v1' ' + # With no specified protocol + cat >expect <<-EOF && + version 1 + $(git rev-parse HEAD) $(git symbolic-ref HEAD) + 0000 + EOF + + GIT_PROTOCOL=version=1 \ + git receive-pack --advertise-refs . >out && + + test-tool pkt-line unpack <out >actual 2>err && + test_must_be_empty err && + test_cmp actual expect +' + +test_expect_success 'git upload-pack --advertise-refs: v2' ' + cat >expect <<-EOF && + version 2 + agent=FAKE + ls-refs=unborn + fetch=shallow wait-for-done + server-option + object-format=$(test_oid algo) + object-info + 0000 + EOF + + GIT_PROTOCOL=version=2 \ + GIT_USER_AGENT=FAKE \ + git upload-pack --advertise-refs . >out 2>err && + + test-tool pkt-line unpack <out >actual && + test_must_be_empty err && + test_cmp actual expect +' + +test_expect_success 'git receive-pack --advertise-refs: v2' ' + # There is no v2 yet for receive-pack, implicit v0 + cat >expect <<-EOF && + $(git rev-parse HEAD) $(git symbolic-ref HEAD) + 0000 + EOF + + GIT_PROTOCOL=version=2 \ + git receive-pack --advertise-refs . >out 2>err && + + test-tool pkt-line unpack <out >actual && + test_must_be_empty err && + test_cmp actual expect +' + +test_done diff --git a/t/t5562/invoke-with-content-length.pl b/t/t5562/invoke-with-content-length.pl index 0943474af2..718dd9b49d 100644 --- a/t/t5562/invoke-with-content-length.pl +++ b/t/t5562/invoke-with-content-length.pl @@ -13,11 +13,6 @@ my $body_data; defined read($body_fh, $body_data, $body_size) or die "Cannot read $body_filename: $!"; close($body_fh); -my $exited = 0; -$SIG{"CHLD"} = sub { - $exited = 1; -}; - # write data my $pid = open(my $out, "|-", @command); { @@ -29,8 +24,13 @@ my $pid = open(my $out, "|-", @command); } print $out $body_data or die "Cannot write data: $!"; -sleep 60; # is interrupted by SIGCHLD -if (!$exited) { - close($out); +$SIG{ALRM} = sub { + kill 'KILL', $pid; die "Command did not exit after reading whole body"; +}; +alarm 60; + +my $ret = waitpid($pid, 0); +if ($ret != $pid) { + die "confusing return from waitpid: $ret"; } diff --git a/t/t5582-fetch-negative-refspec.sh b/t/t5582-fetch-negative-refspec.sh index e5d2e79ad3..7a80e47c2b 100755 --- a/t/t5582-fetch-negative-refspec.sh +++ b/t/t5582-fetch-negative-refspec.sh @@ -105,7 +105,6 @@ test_expect_success "fetch with negative pattern refspec does not expand prefix" ' test_expect_success "fetch with negative refspec avoids duplicate conflict" ' - cd "$D" && ( cd one && git branch dups/a && diff --git a/t/t5606-clone-options.sh b/t/t5606-clone-options.sh index 3a595c0f82..d822153e4d 100755 --- a/t/t5606-clone-options.sh +++ b/t/t5606-clone-options.sh @@ -16,6 +16,18 @@ test_expect_success 'setup' ' ' +test_expect_success 'submodule.stickyRecursiveClone flag manipulates submodule.recurse value' ' + + test_config_global submodule.stickyRecursiveClone true && + git clone --recurse-submodules parent clone_recurse_true && + test_cmp_config -C clone_recurse_true true submodule.recurse && + + test_config_global submodule.stickyRecursiveClone false && + git clone --recurse-submodules parent clone_recurse_false && + test_expect_code 1 git -C clone_recurse_false config --get submodule.recurse + +' + test_expect_success 'clone -o' ' git clone -o foo parent clone-o && diff --git a/t/t5701-git-serve.sh b/t/t5701-git-serve.sh index 930721f053..aa1827d841 100755 --- a/t/t5701-git-serve.sh +++ b/t/t5701-git-serve.sh @@ -72,6 +72,37 @@ test_expect_success 'request invalid command' ' test_i18ngrep "invalid command" err ' +test_expect_success 'request capability as command' ' + test-tool pkt-line pack >in <<-EOF && + command=agent + object-format=$(test_oid algo) + 0000 + EOF + test_must_fail test-tool serve-v2 --stateless-rpc 2>err <in && + grep invalid.command.*agent err +' + +test_expect_success 'request command as capability' ' + test-tool pkt-line pack >in <<-EOF && + command=ls-refs + object-format=$(test_oid algo) + fetch + 0000 + EOF + test_must_fail test-tool serve-v2 --stateless-rpc 2>err <in && + grep unknown.capability err +' + +test_expect_success 'requested command is command=value' ' + test-tool pkt-line pack >in <<-EOF && + command=ls-refs=whatever + object-format=$(test_oid algo) + 0000 + EOF + test_must_fail test-tool serve-v2 --stateless-rpc 2>err <in && + grep invalid.command.*ls-refs=whatever err +' + test_expect_success 'wrong object-format' ' test-tool pkt-line pack >in <<-EOF && command=fetch @@ -116,6 +147,19 @@ test_expect_success 'basics of ls-refs' ' test_cmp expect actual ' +test_expect_success 'ls-refs complains about unknown options' ' + test-tool pkt-line pack >in <<-EOF && + command=ls-refs + object-format=$(test_oid algo) + 0001 + no-such-arg + 0000 + EOF + + test_must_fail test-tool serve-v2 --stateless-rpc 2>err <in && + grep unexpected.line.*no-such-arg err +' + test_expect_success 'basic ref-prefixes' ' test-tool pkt-line pack >in <<-EOF && command=ls-refs @@ -158,6 +202,37 @@ test_expect_success 'refs/heads prefix' ' test_cmp expect actual ' +test_expect_success 'ignore very large set of prefixes' ' + # generate a large number of ref-prefixes that we expect + # to match nothing; the value here exceeds TOO_MANY_PREFIXES + # from ls-refs.c. + { + echo command=ls-refs && + echo object-format=$(test_oid algo) && + echo 0001 && + perl -le "print \"ref-prefix refs/heads/\$_\" for (1..65536)" && + echo 0000 + } | + test-tool pkt-line pack >in && + + # and then confirm that we see unmatched prefixes anyway (i.e., + # that the prefix was not applied). + cat >expect <<-EOF && + $(git rev-parse HEAD) HEAD + $(git rev-parse refs/heads/dev) refs/heads/dev + $(git rev-parse refs/heads/main) refs/heads/main + $(git rev-parse refs/heads/release) refs/heads/release + $(git rev-parse refs/tags/annotated-tag) refs/tags/annotated-tag + $(git rev-parse refs/tags/one) refs/tags/one + $(git rev-parse refs/tags/two) refs/tags/two + 0000 + EOF + + test-tool serve-v2 --stateless-rpc <in >out && + test-tool pkt-line unpack <out >actual && + test_cmp expect actual +' + test_expect_success 'peel parameter' ' test-tool pkt-line pack >in <<-EOF && command=ls-refs diff --git a/t/t5702-protocol-v2.sh b/t/t5702-protocol-v2.sh index 78de1ff2ad..d527cf6c49 100755 --- a/t/t5702-protocol-v2.sh +++ b/t/t5702-protocol-v2.sh @@ -27,9 +27,9 @@ test_expect_success 'list refs with git:// using protocol v2' ' ls-remote --symref "$GIT_DAEMON_URL/parent" >actual && # Client requested to use protocol v2 - grep "git> .*\\\0\\\0version=2\\\0$" log && + grep "ls-remote> .*\\\0\\\0version=2\\\0$" log && # Server responded using protocol v2 - grep "git< version 2" log && + grep "ls-remote< version 2" log && git ls-remote --symref "$GIT_DAEMON_URL/parent" >expect && test_cmp expect actual @@ -151,7 +151,7 @@ test_expect_success 'list refs with file:// using protocol v2' ' ls-remote --symref "file://$(pwd)/file_parent" >actual && # Server responded using protocol v2 - grep "git< version 2" log && + grep "ls-remote< version 2" log && git ls-remote --symref "file://$(pwd)/file_parent" >expect && test_cmp expect actual @@ -237,6 +237,19 @@ test_expect_success '...but not if explicitly forbidden by config' ' ! grep "refs/heads/mydefaultbranch" file_empty_child/.git/HEAD ' +test_expect_success 'bare clone propagates empty default branch' ' + test_when_finished "rm -rf file_empty_parent file_empty_child.git" && + + GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME= \ + git -c init.defaultBranch=mydefaultbranch init file_empty_parent && + + GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME= \ + git -c init.defaultBranch=main -c protocol.version=2 \ + clone --bare \ + "file://$(pwd)/file_empty_parent" file_empty_child.git && + grep "refs/heads/mydefaultbranch" file_empty_child.git/HEAD +' + test_expect_success 'fetch with file:// using protocol v2' ' test_when_finished "rm -f log" && diff --git a/t/t5703-upload-pack-ref-in-want.sh b/t/t5703-upload-pack-ref-in-want.sh index e9e471621d..220098523a 100755 --- a/t/t5703-upload-pack-ref-in-want.sh +++ b/t/t5703-upload-pack-ref-in-want.sh @@ -40,6 +40,30 @@ write_command () { fi } +# Write a complete fetch command to stdout, suitable for use with `test-tool +# pkt-line`. "want-ref", "want", and "have" lines are read from stdin. +# +# Examples: +# +# write_fetch_command <<-EOF +# want-ref refs/heads/main +# have $(git rev-parse a) +# EOF +# +# write_fetch_command <<-EOF +# want $(git rev-parse b) +# have $(git rev-parse a) +# EOF +# +write_fetch_command () { + write_command fetch && + echo "0001" && + echo "no-progress" && + cat && + echo "done" && + echo "0000" +} + # c(o/foo) d(o/bar) # \ / # b e(baz) f(main) @@ -77,15 +101,11 @@ test_expect_success 'config controls ref-in-want advertisement' ' ' test_expect_success 'invalid want-ref line' ' - test-tool pkt-line pack >in <<-EOF && - $(write_command fetch) - 0001 - no-progress + write_fetch_command >pkt <<-EOF && want-ref refs/heads/non-existent - done - 0000 EOF + test-tool pkt-line pack <pkt >in && test_must_fail test-tool serve-v2 --stateless-rpc 2>out <in && grep "unknown ref" out ' @@ -97,16 +117,11 @@ test_expect_success 'basic want-ref' ' EOF git rev-parse f >expected_commits && - oid=$(git rev-parse a) && - test-tool pkt-line pack >in <<-EOF && - $(write_command fetch) - 0001 - no-progress + write_fetch_command >pkt <<-EOF && want-ref refs/heads/main - have $oid - done - 0000 + have $(git rev-parse a) EOF + test-tool pkt-line pack <pkt >in && test-tool serve-v2 --stateless-rpc >out <in && check_output @@ -121,17 +136,12 @@ test_expect_success 'multiple want-ref lines' ' EOF git rev-parse c d >expected_commits && - oid=$(git rev-parse b) && - test-tool pkt-line pack >in <<-EOF && - $(write_command fetch) - 0001 - no-progress + write_fetch_command >pkt <<-EOF && want-ref refs/heads/o/foo want-ref refs/heads/o/bar - have $oid - done - 0000 + have $(git rev-parse b) EOF + test-tool pkt-line pack <pkt >in && test-tool serve-v2 --stateless-rpc >out <in && check_output @@ -144,16 +154,12 @@ test_expect_success 'mix want and want-ref' ' EOF git rev-parse e f >expected_commits && - test-tool pkt-line pack >in <<-EOF && - $(write_command fetch) - 0001 - no-progress + write_fetch_command >pkt <<-EOF && want-ref refs/heads/main want $(git rev-parse e) have $(git rev-parse a) - done - 0000 EOF + test-tool pkt-line pack <pkt >in && test-tool serve-v2 --stateless-rpc >out <in && check_output @@ -166,16 +172,11 @@ test_expect_success 'want-ref with ref we already have commit for' ' EOF >expected_commits && - oid=$(git rev-parse c) && - test-tool pkt-line pack >in <<-EOF && - $(write_command fetch) - 0001 - no-progress + write_fetch_command >pkt <<-EOF && want-ref refs/heads/o/foo - have $oid - done - 0000 + have $(git rev-parse c) EOF + test-tool pkt-line pack <pkt >in && test-tool serve-v2 --stateless-rpc >out <in && check_output @@ -298,6 +299,141 @@ test_expect_success 'fetching with wildcard that matches multiple refs' ' grep "want-ref refs/heads/o/bar" log ' +REPO="$(pwd)/repo-ns" + +test_expect_success 'setup namespaced repo' ' + ( + git init -b main "$REPO" && + cd "$REPO" && + test_commit a && + test_commit b && + git checkout a && + test_commit c && + git checkout a && + test_commit d && + git update-ref refs/heads/ns-no b && + git update-ref refs/namespaces/ns/refs/heads/ns-yes c && + git update-ref refs/namespaces/ns/refs/heads/hidden d + ) && + git -C "$REPO" config uploadpack.allowRefInWant true +' + +test_expect_success 'with namespace: want-ref is considered relative to namespace' ' + wanted_ref=refs/heads/ns-yes && + + oid=$(git -C "$REPO" rev-parse "refs/namespaces/ns/$wanted_ref") && + cat >expected_refs <<-EOF && + $oid $wanted_ref + EOF + cat >expected_commits <<-EOF && + $oid + $(git -C "$REPO" rev-parse a) + EOF + + write_fetch_command >pkt <<-EOF && + want-ref $wanted_ref + EOF + test-tool pkt-line pack <pkt >in && + + GIT_NAMESPACE=ns test-tool -C "$REPO" serve-v2 --stateless-rpc >out <in && + check_output +' + +test_expect_success 'with namespace: want-ref outside namespace is unknown' ' + wanted_ref=refs/heads/ns-no && + + write_fetch_command >pkt <<-EOF && + want-ref $wanted_ref + EOF + test-tool pkt-line pack <pkt >in && + + test_must_fail env GIT_NAMESPACE=ns \ + test-tool -C "$REPO" serve-v2 --stateless-rpc >out <in && + grep "unknown ref" out +' + +# Cross-check refs/heads/ns-no indeed exists +test_expect_success 'without namespace: want-ref outside namespace succeeds' ' + wanted_ref=refs/heads/ns-no && + + oid=$(git -C "$REPO" rev-parse $wanted_ref) && + cat >expected_refs <<-EOF && + $oid $wanted_ref + EOF + cat >expected_commits <<-EOF && + $oid + $(git -C "$REPO" rev-parse a) + EOF + + write_fetch_command >pkt <<-EOF && + want-ref $wanted_ref + EOF + test-tool pkt-line pack <pkt >in && + + test-tool -C "$REPO" serve-v2 --stateless-rpc >out <in && + check_output +' + +test_expect_success 'with namespace: hideRefs is matched, relative to namespace' ' + wanted_ref=refs/heads/hidden && + git -C "$REPO" config transfer.hideRefs $wanted_ref && + + write_fetch_command >pkt <<-EOF && + want-ref $wanted_ref + EOF + test-tool pkt-line pack <pkt >in && + + test_must_fail env GIT_NAMESPACE=ns \ + test-tool -C "$REPO" serve-v2 --stateless-rpc >out <in && + grep "unknown ref" out +' + +# Cross-check refs/heads/hidden indeed exists +test_expect_success 'with namespace: want-ref succeeds if hideRefs is removed' ' + wanted_ref=refs/heads/hidden && + git -C "$REPO" config --unset transfer.hideRefs $wanted_ref && + + oid=$(git -C "$REPO" rev-parse "refs/namespaces/ns/$wanted_ref") && + cat >expected_refs <<-EOF && + $oid $wanted_ref + EOF + cat >expected_commits <<-EOF && + $oid + $(git -C "$REPO" rev-parse a) + EOF + + write_fetch_command >pkt <<-EOF && + want-ref $wanted_ref + EOF + test-tool pkt-line pack <pkt >in && + + GIT_NAMESPACE=ns test-tool -C "$REPO" serve-v2 --stateless-rpc >out <in && + check_output +' + +test_expect_success 'without namespace: relative hideRefs does not match' ' + wanted_ref=refs/namespaces/ns/refs/heads/hidden && + git -C "$REPO" config transfer.hideRefs refs/heads/hidden && + + oid=$(git -C "$REPO" rev-parse $wanted_ref) && + cat >expected_refs <<-EOF && + $oid $wanted_ref + EOF + cat >expected_commits <<-EOF && + $oid + $(git -C "$REPO" rev-parse a) + EOF + + write_fetch_command >pkt <<-EOF && + want-ref $wanted_ref + EOF + test-tool pkt-line pack <pkt >in && + + test-tool -C "$REPO" serve-v2 --stateless-rpc >out <in && + check_output +' + + . "$TEST_DIRECTORY"/lib-httpd.sh start_httpd diff --git a/t/t5704-protocol-violations.sh b/t/t5704-protocol-violations.sh index 5c941949b9..bc393d7c31 100755 --- a/t/t5704-protocol-violations.sh +++ b/t/t5704-protocol-violations.sh @@ -32,4 +32,19 @@ test_expect_success 'extra delim packet in v2 fetch args' ' test_i18ngrep "expected flush after fetch arguments" err ' +test_expect_success 'bogus symref in v0 capabilities' ' + test_commit foo && + oid=$(git rev-parse HEAD) && + dst=refs/heads/foo && + { + printf "%s HEAD\0symref object-format=%s symref=HEAD:%s\n" \ + "$oid" "$GIT_DEFAULT_HASH" "$dst" | + test-tool pkt-line pack-raw-stdin && + printf "0000" + } >input && + git ls-remote --symref --upload-pack="cat input; read junk;:" . >actual && + printf "ref: %s\tHEAD\n%s\tHEAD\n" "$dst" "$oid" >expect && + test_cmp expect actual +' + test_done diff --git a/t/t6030-bisect-porcelain.sh b/t/t6030-bisect-porcelain.sh index a1baf4e451..1be85d064e 100755 --- a/t/t6030-bisect-porcelain.sh +++ b/t/t6030-bisect-porcelain.sh @@ -962,4 +962,22 @@ test_expect_success 'bisect handles annotated tags' ' grep "$bad is the first bad commit" output ' +test_expect_success 'bisect run fails with exit code equals or greater than 128' ' + write_script test_script.sh <<-\EOF && + exit 128 + EOF + test_must_fail git bisect run ./test_script.sh && + write_script test_script.sh <<-\EOF && + exit 255 + EOF + test_must_fail git bisect run ./test_script.sh +' + +test_expect_success 'bisect visualize with a filename with dash and space' ' + echo "My test line" >>"./-hello 2" && + git add -- "./-hello 2" && + git commit --quiet -m "Add test line" -- "./-hello 2" && + git bisect visualize -p -- "-hello 2" +' + test_done diff --git a/t/t6300-for-each-ref.sh b/t/t6300-for-each-ref.sh index 0d2e062f79..80679d5e12 100755 --- a/t/t6300-for-each-ref.sh +++ b/t/t6300-for-each-ref.sh @@ -59,18 +59,25 @@ test_atom() { # Automatically test "contents:size" atom after testing "contents" if test "$2" = "contents" then - case $(git cat-file -t "$ref") in - tag) - # We cannot use $3 as it expects sanitize_pgp to run - expect=$(git cat-file tag $ref | tail -n +6 | wc -c) ;; - tree | blob) - expect='' ;; - commit) - expect=$(printf '%s' "$3" | wc -c) ;; - esac - # Leave $expect unquoted to lose possible leading whitespaces - echo $expect >expected + # for commit leg, $3 is changed there + expect=$(printf '%s' "$3" | wc -c) test_expect_${4:-success} $PREREQ "basic atom: $1 contents:size" ' + type=$(git cat-file -t "$ref") && + case $type in + tag) + # We cannot use $3 as it expects sanitize_pgp to run + git cat-file tag $ref >out && + expect=$(tail -n +6 out | wc -c) && + rm -f out ;; + tree | blob) + expect="" ;; + commit) + : "use the calculated expect" ;; + *) + BUG "unknown object type" ;; + esac && + # Leave $expect unquoted to lose possible leading whitespaces + echo $expect >expected && git for-each-ref --format="%(contents:size)" "$ref" >actual && test_cmp expected actual ' diff --git a/t/t6415-merge-dir-to-symlink.sh b/t/t6415-merge-dir-to-symlink.sh index 2ce104aca7..2655e295f5 100755 --- a/t/t6415-merge-dir-to-symlink.sh +++ b/t/t6415-merge-dir-to-symlink.sh @@ -25,7 +25,8 @@ test_expect_success 'checkout does not clobber untracked symlink' ' git reset --hard main && git rm --cached a/b && git commit -m "untracked symlink remains" && - test_must_fail git checkout start^0 + test_must_fail git checkout start^0 && + git clean -fd # Do not leave the untracked symlink in the way ' test_expect_success 'a/b-2/c/d is kept when clobbering symlink b' ' @@ -34,7 +35,8 @@ test_expect_success 'a/b-2/c/d is kept when clobbering symlink b' ' git rm --cached a/b && git commit -m "untracked symlink remains" && git checkout -f start^0 && - test_path_is_file a/b-2/c/d + test_path_is_file a/b-2/c/d && + git clean -fd # Do not leave the untracked symlink in the way ' test_expect_success 'checkout should not have deleted a/b-2/c/d' ' diff --git a/t/t6424-merge-unrelated-index-changes.sh b/t/t6424-merge-unrelated-index-changes.sh index 5e3779ebc9..89dd544f38 100755 --- a/t/t6424-merge-unrelated-index-changes.sh +++ b/t/t6424-merge-unrelated-index-changes.sh @@ -132,6 +132,7 @@ test_expect_success 'merge-recursive, when index==head but head!=HEAD' ' # Make index match B git diff C B -- | git apply --cached && + test_when_finished "git clean -fd" && # Do not leave untracked around # Merge B & F, with B as "head" git merge-recursive A -- B F > out && test_i18ngrep "Already up to date" out diff --git a/t/t6430-merge-recursive.sh b/t/t6430-merge-recursive.sh index ffcc01fe65..a0efe7cb6d 100755 --- a/t/t6430-merge-recursive.sh +++ b/t/t6430-merge-recursive.sh @@ -718,7 +718,9 @@ test_expect_success 'merge-recursive remembers the names of all base trees' ' # merge-recursive prints in reverse order, but we do not care sort <trees >expect && sed -n "s/^virtual //p" out | sort >actual && - test_cmp expect actual + test_cmp expect actual && + + git clean -fd ' test_expect_success 'merge-recursive internal merge resolves to the sameness' ' diff --git a/t/t6436-merge-overwrite.sh b/t/t6436-merge-overwrite.sh index 84b4aacf49..c0b7bd7c3f 100755 --- a/t/t6436-merge-overwrite.sh +++ b/t/t6436-merge-overwrite.sh @@ -68,7 +68,8 @@ test_expect_success 'will not overwrite removed file' ' git commit -m "rm c1.c" && cp important c1.c && test_must_fail git merge c1a && - test_cmp important c1.c + test_cmp important c1.c && + rm c1.c # Do not leave untracked file in way of future tests ' test_expect_success 'will not overwrite re-added file' ' diff --git a/t/t7201-co.sh b/t/t7201-co.sh index 7f6e23a4bb..b7ba1c3268 100755 --- a/t/t7201-co.sh +++ b/t/t7201-co.sh @@ -585,6 +585,7 @@ test_expect_success 'checkout --conflict=diff3' ' ' test_expect_success 'failing checkout -b should not break working tree' ' + git clean -fd && # Remove untracked files in the way git reset --hard main && git symbolic-ref HEAD refs/heads/main && test_must_fail git checkout -b renamer side^ && diff --git a/t/t7519-status-fsmonitor.sh b/t/t7519-status-fsmonitor.sh index deea88d443..f1463197b9 100755 --- a/t/t7519-status-fsmonitor.sh +++ b/t/t7519-status-fsmonitor.sh @@ -389,43 +389,47 @@ test_expect_success 'status succeeds after staging/unstaging' ' # If "!" is supplied, then we verify that we do not call ensure_full_index # during a call to 'git status'. Otherwise, we verify that we _do_ call it. check_sparse_index_behavior () { - git status --porcelain=v2 >expect && - git sparse-checkout init --cone --sparse-index && - git sparse-checkout set dir1 dir2 && + git -C full status --porcelain=v2 >expect && GIT_TRACE2_EVENT="$(pwd)/trace2.txt" GIT_TRACE2_EVENT_NESTING=10 \ - git status --porcelain=v2 >actual && + git -C sparse status --porcelain=v2 >actual && test_region $1 index ensure_full_index trace2.txt && test_region fsm_hook query trace2.txt && test_cmp expect actual && - rm trace2.txt && - git sparse-checkout disable + rm trace2.txt } test_expect_success 'status succeeds with sparse index' ' - git reset --hard && + git clone . full && + git clone --sparse . sparse && + git -C sparse sparse-checkout init --cone --sparse-index && + git -C sparse sparse-checkout set dir1 dir2 && - test_config core.fsmonitor "$TEST_DIRECTORY/t7519/fsmonitor-all" && - check_sparse_index_behavior ! && - - write_script .git/hooks/fsmonitor-test<<-\EOF && + write_script .git/hooks/fsmonitor-test <<-\EOF && printf "last_update_token\0" EOF - git config core.fsmonitor .git/hooks/fsmonitor-test && + git -C full config core.fsmonitor ../.git/hooks/fsmonitor-test && + git -C sparse config core.fsmonitor ../.git/hooks/fsmonitor-test && check_sparse_index_behavior ! && - write_script .git/hooks/fsmonitor-test<<-\EOF && + write_script .git/hooks/fsmonitor-test <<-\EOF && printf "last_update_token\0" printf "dir1/modified\0" EOF check_sparse_index_behavior ! && - cp -r dir1 dir1a && - git add dir1a && - git commit -m "add dir1a" && + git -C sparse sparse-checkout add dir1a && + + for repo in full sparse + do + cp -r $repo/dir1 $repo/dir1a && + git -C $repo add dir1a && + git -C $repo commit -m "add dir1a" || return 1 + done && + git -C sparse sparse-checkout set dir1 dir2 && # This one modifies outside the sparse-checkout definition # and hence we expect to expand the sparse-index. - write_script .git/hooks/fsmonitor-test<<-\EOF && + write_script .git/hooks/fsmonitor-test <<-\EOF && printf "last_update_token\0" printf "dir1a/modified\0" EOF diff --git a/t/t7600-merge.sh b/t/t7600-merge.sh index 2ef39d3088..c773e30b3f 100755 --- a/t/t7600-merge.sh +++ b/t/t7600-merge.sh @@ -717,6 +717,7 @@ test_expect_success 'failed fast-forward merge with --autostash' ' git reset --hard c0 && git merge-file file file.orig file.5 && cp file.5 other && + test_when_finished "rm other" && test_must_fail git merge --autostash c1 2>err && test_i18ngrep "Applied autostash." err && test_cmp file.5 file diff --git a/t/t7700-repack.sh b/t/t7700-repack.sh index 25b235c063..98eda3bfeb 100755 --- a/t/t7700-repack.sh +++ b/t/t7700-repack.sh @@ -63,13 +63,14 @@ test_expect_success 'objects in packs marked .keep are not repacked' ' test_expect_success 'writing bitmaps via command-line can duplicate .keep objects' ' # build on $oid, $packid, and .keep state from previous - git repack -Adbl && + GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=0 git repack -Adbl && test_has_duplicate_object true ' test_expect_success 'writing bitmaps via config can duplicate .keep objects' ' # build on $oid, $packid, and .keep state from previous - git -c repack.writebitmaps=true repack -Adl && + GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=0 \ + git -c repack.writebitmaps=true repack -Adl && test_has_duplicate_object true ' @@ -189,7 +190,9 @@ test_expect_success 'repack --keep-pack' ' test_expect_success 'bitmaps are created by default in bare repos' ' git clone --bare .git bare.git && - git -C bare.git repack -ad && + rm -f bare.git/objects/pack/*.bitmap && + GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=0 \ + git -C bare.git repack -ad && bitmap=$(ls bare.git/objects/pack/*.bitmap) && test_path_is_file "$bitmap" ' @@ -200,7 +203,8 @@ test_expect_success 'incremental repack does not complain' ' ' test_expect_success 'bitmaps can be disabled on bare repos' ' - git -c repack.writeBitmaps=false -C bare.git repack -ad && + GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=0 \ + git -c repack.writeBitmaps=false -C bare.git repack -ad && bitmap=$(ls bare.git/objects/pack/*.bitmap || :) && test -z "$bitmap" ' @@ -211,7 +215,8 @@ test_expect_success 'no bitmaps created if .keep files present' ' keep=${pack%.pack}.keep && test_when_finished "rm -f \"\$keep\"" && >"$keep" && - git -C bare.git repack -ad 2>stderr && + GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=0 \ + git -C bare.git repack -ad 2>stderr && test_must_be_empty stderr && find bare.git/objects/pack/ -type f -name "*.bitmap" >actual && test_must_be_empty actual @@ -222,7 +227,8 @@ test_expect_success 'auto-bitmaps do not complain if unavailable' ' blob=$(test-tool genrandom big $((1024*1024)) | git -C bare.git hash-object -w --stdin) && git -C bare.git update-ref refs/tags/big $blob && - git -C bare.git repack -ad 2>stderr && + GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=0 \ + git -C bare.git repack -ad 2>stderr && test_must_be_empty stderr && find bare.git/objects/pack -type f -name "*.bitmap" >actual && test_must_be_empty actual diff --git a/t/t7800-difftool.sh b/t/t7800-difftool.sh index a173f564bc..528e0dabf0 100755 --- a/t/t7800-difftool.sh +++ b/t/t7800-difftool.sh @@ -674,7 +674,6 @@ test_expect_success SYMLINKS 'difftool --dir-diff handles modified symlinks' ' rm c && ln -s d c && cat >expect <<-EOF && - b c c @@ -710,7 +709,6 @@ test_expect_success SYMLINKS 'difftool --dir-diff handles modified symlinks' ' # Deleted symlinks rm -f c && cat >expect <<-EOF && - b c EOF @@ -723,6 +721,71 @@ test_expect_success SYMLINKS 'difftool --dir-diff handles modified symlinks' ' test_cmp expect actual ' +test_expect_success SYMLINKS 'difftool --dir-diff writes symlinks as raw text' ' + # Start out on a branch called "branch-init". + git init -b branch-init symlink-files && + ( + cd symlink-files && + # This test ensures that symlinks are written as raw text. + # The "cat" tools output link and file contents. + git config difftool.cat-left-link.cmd "cat \"\$LOCAL/link\"" && + git config difftool.cat-left-a.cmd "cat \"\$LOCAL/file-a\"" && + git config difftool.cat-right-link.cmd "cat \"\$REMOTE/link\"" && + git config difftool.cat-right-b.cmd "cat \"\$REMOTE/file-b\"" && + + # Record the empty initial state so that we can come back here + # later and not have to consider the any cases where difftool + # will create symlinks back into the worktree. + test_tick && + git commit --allow-empty -m init && + + # Create a file called "file-a" with a symlink pointing to it. + git switch -c branch-a && + echo a >file-a && + ln -s file-a link && + git add file-a link && + test_tick && + git commit -m link-to-file-a && + + # Create a file called "file-b" and point the symlink to it. + git switch -c branch-b && + echo b >file-b && + rm link && + ln -s file-b link && + git add file-b link && + git rm file-a && + test_tick && + git commit -m link-to-file-b && + + # Checkout the initial branch so that the --symlinks behavior is + # not activated. The two directories should be completely + # independent with no symlinks pointing back here. + git switch branch-init && + + # The left link must be "file-a" and "file-a" must contain "a". + echo file-a >expect && + git difftool --symlinks --dir-diff --tool cat-left-link \ + branch-a branch-b >actual && + test_cmp expect actual && + + echo a >expect && + git difftool --symlinks --dir-diff --tool cat-left-a \ + branch-a branch-b >actual && + test_cmp expect actual && + + # The right link must be "file-b" and "file-b" must contain "b". + echo file-b >expect && + git difftool --symlinks --dir-diff --tool cat-right-link \ + branch-a branch-b >actual && + test_cmp expect actual && + + echo b >expect && + git difftool --symlinks --dir-diff --tool cat-right-b \ + branch-a branch-b >actual && + test_cmp expect actual + ) +' + test_expect_success 'add -N and difftool -d' ' test_when_finished git reset --hard && diff --git a/t/t7814-grep-recurse-submodules.sh b/t/t7814-grep-recurse-submodules.sh index 828cb3ba58..3172f5b936 100755 --- a/t/t7814-grep-recurse-submodules.sh +++ b/t/t7814-grep-recurse-submodules.sh @@ -8,6 +8,9 @@ submodules. . ./test-lib.sh +GIT_TEST_FATAL_REGISTER_SUBMODULE_ODB=1 +export GIT_TEST_FATAL_REGISTER_SUBMODULE_ODB + test_expect_success 'setup directory structure and submodule' ' echo "(1|2)d(3|4)" >a && mkdir b && diff --git a/t/t7900-maintenance.sh b/t/t7900-maintenance.sh index 58f46c77e6..6b4941980c 100755 --- a/t/t7900-maintenance.sh +++ b/t/t7900-maintenance.sh @@ -20,6 +20,17 @@ test_xmllint () { fi } +test_lazy_prereq SYSTEMD_ANALYZE ' + systemd-analyze verify /lib/systemd/system/basic.target +' + +test_systemd_analyze_verify () { + if test_have_prereq SYSTEMD_ANALYZE + then + systemd-analyze verify "$@" + fi +} + test_expect_success 'help text' ' test_expect_code 129 git maintenance -h 2>err && test_i18ngrep "usage: git maintenance <subcommand>" err && @@ -492,8 +503,21 @@ test_expect_success !MINGW 'register and unregister with regex metacharacters' ' maintenance.repo "$(pwd)/$META" ' +test_expect_success 'start --scheduler=<scheduler>' ' + test_expect_code 129 git maintenance start --scheduler=foo 2>err && + test_i18ngrep "unrecognized --scheduler argument" err && + + test_expect_code 129 git maintenance start --no-scheduler 2>err && + test_i18ngrep "unknown option" err && + + test_expect_code 128 \ + env GIT_TEST_MAINT_SCHEDULER="launchctl:true,schtasks:true" \ + git maintenance start --scheduler=crontab 2>err && + test_i18ngrep "fatal: crontab scheduler is not available" err +' + test_expect_success 'start from empty cron table' ' - GIT_TEST_MAINT_SCHEDULER="crontab:test-tool crontab cron.txt" git maintenance start && + GIT_TEST_MAINT_SCHEDULER="crontab:test-tool crontab cron.txt" git maintenance start --scheduler=crontab && # start registers the repo git config --get --global --fixed-value maintenance.repo "$(pwd)" && @@ -516,7 +540,7 @@ test_expect_success 'stop from existing schedule' ' test_expect_success 'start preserves existing schedule' ' echo "Important information!" >cron.txt && - GIT_TEST_MAINT_SCHEDULER="crontab:test-tool crontab cron.txt" git maintenance start && + GIT_TEST_MAINT_SCHEDULER="crontab:test-tool crontab cron.txt" git maintenance start --scheduler=crontab && grep "Important information!" cron.txt ' @@ -545,7 +569,7 @@ test_expect_success 'start and stop macOS maintenance' ' EOF rm -f args && - GIT_TEST_MAINT_SCHEDULER=launchctl:./print-args git maintenance start && + GIT_TEST_MAINT_SCHEDULER=launchctl:./print-args git maintenance start --scheduler=launchctl && # start registers the repo git config --get --global --fixed-value maintenance.repo "$(pwd)" && @@ -582,6 +606,23 @@ test_expect_success 'start and stop macOS maintenance' ' test_line_count = 0 actual ' +test_expect_success 'use launchctl list to prevent extra work' ' + # ensure we are registered + GIT_TEST_MAINT_SCHEDULER=launchctl:./print-args git maintenance start --scheduler=launchctl && + + # do it again on a fresh args file + rm -f args && + GIT_TEST_MAINT_SCHEDULER=launchctl:./print-args git maintenance start --scheduler=launchctl && + + ls "$HOME/Library/LaunchAgents" >actual && + cat >expect <<-\EOF && + list org.git-scm.git.hourly + list org.git-scm.git.daily + list org.git-scm.git.weekly + EOF + test_cmp expect args +' + test_expect_success 'start and stop Windows maintenance' ' write_script print-args <<-\EOF && echo $* >>args @@ -596,7 +637,7 @@ test_expect_success 'start and stop Windows maintenance' ' EOF rm -f args && - GIT_TEST_MAINT_SCHEDULER="schtasks:./print-args" git maintenance start && + GIT_TEST_MAINT_SCHEDULER="schtasks:./print-args" git maintenance start --scheduler=schtasks && # start registers the repo git config --get --global --fixed-value maintenance.repo "$(pwd)" && @@ -619,6 +660,83 @@ test_expect_success 'start and stop Windows maintenance' ' test_cmp expect args ' +test_expect_success 'start and stop Linux/systemd maintenance' ' + write_script print-args <<-\EOF && + printf "%s\n" "$*" >>args + EOF + + XDG_CONFIG_HOME="$PWD" && + export XDG_CONFIG_HOME && + rm -f args && + GIT_TEST_MAINT_SCHEDULER="systemctl:./print-args" git maintenance start --scheduler=systemd-timer && + + # start registers the repo + git config --get --global --fixed-value maintenance.repo "$(pwd)" && + + test_systemd_analyze_verify "systemd/user/git-maintenance@.service" && + + printf -- "--user enable --now git-maintenance@%s.timer\n" hourly daily weekly >expect && + test_cmp expect args && + + rm -f args && + GIT_TEST_MAINT_SCHEDULER="systemctl:./print-args" git maintenance stop && + + # stop does not unregister the repo + git config --get --global --fixed-value maintenance.repo "$(pwd)" && + + test_path_is_missing "systemd/user/git-maintenance@.timer" && + test_path_is_missing "systemd/user/git-maintenance@.service" && + + printf -- "--user disable --now git-maintenance@%s.timer\n" hourly daily weekly >expect && + test_cmp expect args +' + +test_expect_success 'start and stop when several schedulers are available' ' + write_script print-args <<-\EOF && + printf "%s\n" "$*" | sed "s:gui/[0-9][0-9]*:gui/[UID]:; s:\(schtasks /create .* /xml\).*:\1:;" >>args + EOF + + rm -f args && + GIT_TEST_MAINT_SCHEDULER="systemctl:./print-args systemctl,launchctl:./print-args launchctl,schtasks:./print-args schtasks" git maintenance start --scheduler=systemd-timer && + printf "launchctl bootout gui/[UID] $pfx/Library/LaunchAgents/org.git-scm.git.%s.plist\n" \ + hourly daily weekly >expect && + printf "schtasks /delete /tn Git Maintenance (%s) /f\n" \ + hourly daily weekly >>expect && + printf -- "systemctl --user enable --now git-maintenance@%s.timer\n" hourly daily weekly >>expect && + test_cmp expect args && + + rm -f args && + GIT_TEST_MAINT_SCHEDULER="systemctl:./print-args systemctl,launchctl:./print-args launchctl,schtasks:./print-args schtasks" git maintenance start --scheduler=launchctl && + printf -- "systemctl --user disable --now git-maintenance@%s.timer\n" hourly daily weekly >expect && + printf "schtasks /delete /tn Git Maintenance (%s) /f\n" \ + hourly daily weekly >>expect && + for frequency in hourly daily weekly + do + PLIST="$pfx/Library/LaunchAgents/org.git-scm.git.$frequency.plist" && + echo "launchctl bootout gui/[UID] $PLIST" >>expect && + echo "launchctl bootstrap gui/[UID] $PLIST" >>expect || return 1 + done && + test_cmp expect args && + + rm -f args && + GIT_TEST_MAINT_SCHEDULER="systemctl:./print-args systemctl,launchctl:./print-args launchctl,schtasks:./print-args schtasks" git maintenance start --scheduler=schtasks && + printf -- "systemctl --user disable --now git-maintenance@%s.timer\n" hourly daily weekly >expect && + printf "launchctl bootout gui/[UID] $pfx/Library/LaunchAgents/org.git-scm.git.%s.plist\n" \ + hourly daily weekly >>expect && + printf "schtasks /create /tn Git Maintenance (%s) /f /xml\n" \ + hourly daily weekly >>expect && + test_cmp expect args && + + rm -f args && + GIT_TEST_MAINT_SCHEDULER="systemctl:./print-args systemctl,launchctl:./print-args launchctl,schtasks:./print-args schtasks" git maintenance stop && + printf -- "systemctl --user disable --now git-maintenance@%s.timer\n" hourly daily weekly >expect && + printf "launchctl bootout gui/[UID] $pfx/Library/LaunchAgents/org.git-scm.git.%s.plist\n" \ + hourly daily weekly >>expect && + printf "schtasks /delete /tn Git Maintenance (%s) /f\n" \ + hourly daily weekly >>expect && + test_cmp expect args +' + test_expect_success 'register preserves existing strategy' ' git config maintenance.strategy none && git maintenance register && diff --git a/t/t9001-send-email.sh b/t/t9001-send-email.sh index 57fc10e7f8..aa0c20499b 100755 --- a/t/t9001-send-email.sh +++ b/t/t9001-send-email.sh @@ -1533,6 +1533,21 @@ test_expect_success $PREREQ 'sendemail.8bitEncoding works' ' test_cmp content-type-decl actual ' +test_expect_success $PREREQ 'sendemail.8bitEncoding in .git/config overrides --global .gitconfig' ' + clean_fake_sendmail && + git config sendemail.assume8bitEncoding UTF-8 && + test_when_finished "rm -rf home" && + mkdir home && + git config -f home/.gitconfig sendemail.assume8bitEncoding "bogus too" && + echo bogus | + env HOME="$(pwd)/home" DEBUG=1 \ + git send-email --from=author@example.com --to=nobody@example.com \ + --smtp-server="$(pwd)/fake.sendmail" \ + email-using-8bit >stdout && + egrep "Content|MIME" msgtxt1 >actual && + test_cmp content-type-decl actual +' + test_expect_success $PREREQ '--8bit-encoding overrides sendemail.8bitEncoding' ' clean_fake_sendmail && git config sendemail.assume8bitEncoding "bogus too" && @@ -2198,7 +2213,7 @@ test_expect_success $PREREQ 'leading and trailing whitespaces are removed' ' test_expect_success $PREREQ 'test using command name with --sendmail-cmd' ' clean_fake_sendmail && - PATH="$(pwd):$PATH" \ + PATH="$PWD:$PATH" \ git send-email \ --from="Example <nobody@example.com>" \ --to=nobody@example.com \ @@ -2227,6 +2242,51 @@ test_expect_success $PREREQ 'test shell expression with --sendmail-cmd' ' test_path_is_file commandline1 ' +test_expect_success $PREREQ 'set up in-reply-to/references patches' ' + cat >has-reply.patch <<-\EOF && + From: A U Thor <author@example.com> + Subject: patch with in-reply-to + Message-ID: <patch.with.in.reply.to@example.com> + In-Reply-To: <replied.to@example.com> + References: <replied.to@example.com> + + This is the body. + EOF + cat >no-reply.patch <<-\EOF + From: A U Thor <author@example.com> + Subject: patch without in-reply-to + Message-ID: <patch.without.in.reply.to@example.com> + + This is the body. + EOF +' + +test_expect_success $PREREQ 'patch reply headers correct with --no-thread' ' + clean_fake_sendmail && + git send-email \ + --no-thread \ + --to=nobody@example.com \ + --smtp-server="$(pwd)/fake.sendmail" \ + has-reply.patch no-reply.patch && + grep "In-Reply-To: <replied.to@example.com>" msgtxt1 && + grep "References: <replied.to@example.com>" msgtxt1 && + ! grep replied.to@example.com msgtxt2 +' + +test_expect_success $PREREQ 'cmdline in-reply-to used with --no-thread' ' + clean_fake_sendmail && + git send-email \ + --no-thread \ + --in-reply-to="<cmdline.reply@example.com>" \ + --to=nobody@example.com \ + --smtp-server="$(pwd)/fake.sendmail" \ + has-reply.patch no-reply.patch && + grep "In-Reply-To: <cmdline.reply@example.com>" msgtxt1 && + grep "References: <cmdline.reply@example.com>" msgtxt1 && + grep "In-Reply-To: <cmdline.reply@example.com>" msgtxt2 && + grep "References: <cmdline.reply@example.com>" msgtxt2 +' + test_expect_success $PREREQ 'invoke hook' ' mkdir -p .git/hooks && diff --git a/t/t9002-column.sh b/t/t9002-column.sh index 89983527b6..6d3dbde3fe 100755 --- a/t/t9002-column.sh +++ b/t/t9002-column.sh @@ -42,6 +42,24 @@ EOF test_cmp expected actual ' +test_expect_success '--nl' ' + cat >expected <<\EOF && +oneZ +twoZ +threeZ +fourZ +fiveZ +sixZ +sevenZ +eightZ +nineZ +tenZ +elevenZ +EOF + git column --nl="Z$LF" --mode=plain <lista >actual && + test_cmp expected actual +' + test_expect_success '80 columns' ' cat >expected <<\EOF && one two three four five six seven eight nine ten eleven diff --git a/t/t9351-fast-export-anonymize.sh b/t/t9351-fast-export-anonymize.sh index 1c6e6fcdaf..77047e250d 100755 --- a/t/t9351-fast-export-anonymize.sh +++ b/t/t9351-fast-export-anonymize.sh @@ -18,7 +18,8 @@ test_expect_success 'setup simple repo' ' git update-index --add --cacheinfo 160000,$fake_commit,link1 && git update-index --add --cacheinfo 160000,$fake_commit,link2 && git commit -m "add gitlink" && - git tag -m "annotated tag" mytag + git tag -m "annotated tag" mytag && + git tag -m "annotated tag with long message" longtag ' test_expect_success 'export anonymized stream' ' @@ -55,7 +56,8 @@ test_expect_success 'stream retains other as refname' ' test_expect_success 'stream omits other refnames' ' ! grep main stream && - ! grep mytag stream + ! grep mytag stream && + ! grep longtag stream ' test_expect_success 'stream omits identities' ' @@ -118,9 +120,9 @@ test_expect_success 'identical gitlinks got identical oid' ' test_line_count = 1 commits ' -test_expect_success 'tag points to branch tip' ' +test_expect_success 'all tags point to branch tip' ' git rev-parse $other_branch >expect && - git for-each-ref --format="%(*objectname)" | grep . >actual && + git for-each-ref --format="%(*objectname)" | grep . | uniq >actual && test_cmp expect actual ' diff --git a/t/t9400-git-cvsserver-server.sh b/t/t9400-git-cvsserver-server.sh index 2d29d486ee..17f988edd2 100755 --- a/t/t9400-git-cvsserver-server.sh +++ b/t/t9400-git-cvsserver-server.sh @@ -36,6 +36,13 @@ CVSWORK="$PWD/cvswork" CVS_SERVER=git-cvsserver export CVSROOT CVS_SERVER +if perl -e 'exit(1) if not defined crypt("", "cv")' +then + PWDHASH='lac2ItudM3.KM' +else + PWDHASH='$2b$10$t8fGvE/a9eLmfOLzsZme2uOa2QtoMYwIxq9wZA6aBKtF1Yb7FJIzi' +fi + rm -rf "$CVSWORK" "$SERVERDIR" test_expect_success 'setup' ' git config push.default matching && @@ -54,7 +61,7 @@ test_expect_success 'setup' ' GIT_DIR="$SERVERDIR" git config --bool gitcvs.enabled true && GIT_DIR="$SERVERDIR" git config gitcvs.logfile "$SERVERDIR/gitcvs.log" && GIT_DIR="$SERVERDIR" git config gitcvs.authdb "$SERVERDIR/auth.db" && - echo cvsuser:cvGVEarMLnhlA > "$SERVERDIR/auth.db" + echo "cvsuser:$PWDHASH" >"$SERVERDIR/auth.db" ' # note that cvs doesn't accept absolute pathnames diff --git a/t/test-lib-functions.sh b/t/test-lib-functions.sh index e28411bb75..eef2262a36 100644 --- a/t/test-lib-functions.sh +++ b/t/test-lib-functions.sh @@ -137,33 +137,110 @@ test_tick () { # Stop execution and start a shell. This is useful for debugging tests. # # Be sure to remove all invocations of this command before submitting. +# WARNING: the shell invoked by this helper does not have the same environment +# as the one running the tests (shell variables and functions are not +# available, and the options below further modify the environment). As such, +# commands copied from a test script might behave differently than when +# running the test. +# +# Usage: test_pause [options] +# -t +# Use your original TERM instead of test-lib.sh's "dumb". +# This usually restores color output in the invoked shell. +# -s +# Invoke $SHELL instead of $TEST_SHELL_PATH. +# -h +# Use your original HOME instead of test-lib.sh's "$TRASH_DIRECTORY". +# This allows you to use your regular shell environment and Git aliases. +# CAUTION: running commands copied from a test script into the paused shell +# might result in files in your HOME being overwritten. +# -a +# Shortcut for -t -s -h test_pause () { - "$SHELL_PATH" <&6 >&5 2>&7 + PAUSE_TERM=$TERM && + PAUSE_SHELL=$TEST_SHELL_PATH && + PAUSE_HOME=$HOME && + while test $# != 0 + do + case "$1" in + -t) + PAUSE_TERM="$USER_TERM" + ;; + -s) + PAUSE_SHELL="$SHELL" + ;; + -h) + PAUSE_HOME="$USER_HOME" + ;; + -a) + PAUSE_TERM="$USER_TERM" + PAUSE_SHELL="$SHELL" + PAUSE_HOME="$USER_HOME" + ;; + *) + break + ;; + esac + shift + done && + TERM="$PAUSE_TERM" HOME="$PAUSE_HOME" "$PAUSE_SHELL" <&6 >&5 2>&7 } # Wrap git with a debugger. Adding this to a command can make it easier # to understand what is going on in a failing test. # +# Usage: debug [options] <git command> +# -d <debugger> +# --debugger=<debugger> +# Use <debugger> instead of GDB +# -t +# Use your original TERM instead of test-lib.sh's "dumb". +# This usually restores color output in the debugger. +# WARNING: the command being debugged might behave differently than when +# running the test. +# # Examples: # debug git checkout master # debug --debugger=nemiver git $ARGS # debug -d "valgrind --tool=memcheck --track-origins=yes" git $ARGS debug () { - case "$1" in - -d) - GIT_DEBUGGER="$2" && - shift 2 - ;; - --debugger=*) - GIT_DEBUGGER="${1#*=}" && - shift 1 - ;; - *) - GIT_DEBUGGER=1 - ;; - esac && - GIT_DEBUGGER="${GIT_DEBUGGER}" "$@" <&6 >&5 2>&7 + GIT_DEBUGGER=1 && + DEBUG_TERM=$TERM && + while test $# != 0 + do + case "$1" in + -t) + DEBUG_TERM="$USER_TERM" + ;; + -d) + GIT_DEBUGGER="$2" && + shift + ;; + --debugger=*) + GIT_DEBUGGER="${1#*=}" + ;; + *) + break + ;; + esac + shift + done && + + dotfiles=".gdbinit .lldbinit" + + for dotfile in $dotfiles + do + dotfile="$USER_HOME/$dotfile" && + test -f "$dotfile" && cp "$dotfile" "$HOME" || : + done && + + TERM="$DEBUG_TERM" GIT_DEBUGGER="${GIT_DEBUGGER}" "$@" <&6 >&5 2>&7 && + + for dotfile in $dotfiles + do + rm -f "$HOME/$dotfile" + done } # Usage: test_commit [options] <message> [<file> [<contents> [<tag>]]] diff --git a/t/test-lib.sh b/t/test-lib.sh index 3b7acfec23..8361b5c1c5 100644 --- a/t/test-lib.sh +++ b/t/test-lib.sh @@ -534,7 +534,7 @@ SQ=\' # when case-folding filenames u200c=$(printf '\342\200\214') -export _x05 _x35 _x40 _z40 LF u200c EMPTY_TREE EMPTY_BLOB ZERO_OID OID_REGEX +export _x05 _x35 LF u200c EMPTY_TREE EMPTY_BLOB ZERO_OID OID_REGEX # Each test should start with something like this, after copyright notices: # @@ -585,8 +585,9 @@ else } fi +USER_TERM="$TERM" TERM=dumb -export TERM +export TERM USER_TERM error () { say_color error "error: $*" @@ -1343,7 +1344,8 @@ fi GIT_TEMPLATE_DIR="$GIT_BUILD_DIR"/templates/blt GIT_CONFIG_NOSYSTEM=1 GIT_ATTR_NOSYSTEM=1 -export PATH GIT_EXEC_PATH GIT_TEMPLATE_DIR GIT_CONFIG_NOSYSTEM GIT_ATTR_NOSYSTEM +GIT_CEILING_DIRECTORIES="$TRASH_DIRECTORY/.." +export PATH GIT_EXEC_PATH GIT_TEMPLATE_DIR GIT_CONFIG_NOSYSTEM GIT_ATTR_NOSYSTEM GIT_CEILING_DIRECTORIES if test -z "$GIT_TEST_CMP" then @@ -1400,9 +1402,10 @@ then fi # Last-minute variable setup +USER_HOME="$HOME" HOME="$TRASH_DIRECTORY" GNUPGHOME="$HOME/gnupg-home-not-used" -export HOME GNUPGHOME +export HOME GNUPGHOME USER_HOME # Test repository rm -fr "$TRASH_DIRECTORY" || { @@ -1442,10 +1445,9 @@ then fi # Convenience -# A regexp to match 5, 35 and 40 hexdigits +# A regexp to match 5 and 35 hexdigits _x05='[0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f]' _x35="$_x05$_x05$_x05$_x05$_x05$_x05$_x05" -_x40="$_x35$_x05" test_oid_init @@ -1454,7 +1456,6 @@ OID_REGEX=$(echo $ZERO_OID | sed -e 's/0/[0-9a-f]/g') OIDPATH_REGEX=$(test_oid_to_path $ZERO_OID | sed -e 's/0/[0-9a-f]/g') EMPTY_TREE=$(test_oid empty_tree) EMPTY_BLOB=$(test_oid empty_blob) -_z40=$ZERO_OID # Provide an implementation of the 'yes' utility; the upper bound # limit is there to help Windows that cannot stop this loop from @@ -350,7 +350,7 @@ void trace2_def_repo_fl(const char *file, int line, struct repository *repo); * being started, such as "read_recursive" or "do_read_index". * * The `repo` field, if set, will be used to get the "repo-id", so that - * recursive oerations can be attributed to the correct repository. + * recursive operations can be attributed to the correct repository. */ void trace2_region_enter_fl(const char *file, int line, const char *category, const char *label, const struct repository *repo, ...); diff --git a/trace2/tr2_tls.c b/trace2/tr2_tls.c index 067c23755f..7da94aba52 100644 --- a/trace2/tr2_tls.c +++ b/trace2/tr2_tls.c @@ -95,6 +95,7 @@ void tr2tls_unset_self(void) pthread_setspecific(tr2tls_key, NULL); + strbuf_release(&ctx->thread_name); free(ctx->array_us_start); free(ctx); } diff --git a/transport-helper.c b/transport-helper.c index 4be035edb8..e8dbdd1153 100644 --- a/transport-helper.c +++ b/transport-helper.c @@ -671,8 +671,8 @@ static int connect_helper(struct transport *transport, const char *name, static struct ref *get_refs_list_using_list(struct transport *transport, int for_push); -static int fetch(struct transport *transport, - int nr_heads, struct ref **to_fetch) +static int fetch_refs(struct transport *transport, + int nr_heads, struct ref **to_fetch) { struct helper_data *data = transport->data; int i, count; @@ -681,7 +681,7 @@ static int fetch(struct transport *transport, if (process_connect(transport, 0)) { do_take_over(transport); - return transport->vtable->fetch(transport, nr_heads, to_fetch); + return transport->vtable->fetch_refs(transport, nr_heads, to_fetch); } /* @@ -1261,12 +1261,12 @@ static struct ref *get_refs_list_using_list(struct transport *transport, } static struct transport_vtable vtable = { - set_helper_option, - get_refs_list, - fetch, - push_refs, - connect_helper, - release_helper + .set_option = set_helper_option, + .get_refs_list = get_refs_list, + .fetch_refs = fetch_refs, + .push_refs = push_refs, + .connect = connect_helper, + .disconnect = release_helper }; int transport_helper_init(struct transport *transport, const char *name) diff --git a/transport-internal.h b/transport-internal.h index b60f1ba907..c4ca0b733a 100644 --- a/transport-internal.h +++ b/transport-internal.h @@ -34,7 +34,7 @@ struct transport_vtable { * get_refs_list(), it should set the old_sha1 fields in the * provided refs now. **/ - int (*fetch)(struct transport *transport, int refs_nr, struct ref **refs); + int (*fetch_refs)(struct transport *transport, int refs_nr, struct ref **refs); /** * Push the objects and refs. Send the necessary objects, and diff --git a/transport.c b/transport.c index 17e9629710..b37664ba87 100644 --- a/transport.c +++ b/transport.c @@ -162,12 +162,16 @@ static int fetch_refs_from_bundle(struct transport *transport, int nr_heads, struct ref **to_fetch) { struct bundle_transport_data *data = transport->data; + struct strvec extra_index_pack_args = STRVEC_INIT; int ret; + if (transport->progress) + strvec_push(&extra_index_pack_args, "-v"); + if (!data->get_refs_from_bundle_called) get_refs_from_bundle(transport, 0, NULL); ret = unbundle(the_repository, &data->header, data->fd, - transport->progress ? BUNDLE_VERBOSE : 0); + &extra_index_pack_args); transport->hash_algo = data->header.hash_algo; return ret; } @@ -883,12 +887,10 @@ static int disconnect_git(struct transport *transport) } static struct transport_vtable taken_over_vtable = { - NULL, - get_refs_via_connect, - fetch_refs_via_pack, - git_transport_push, - NULL, - disconnect_git + .get_refs_list = get_refs_via_connect, + .fetch_refs = fetch_refs_via_pack, + .push_refs = git_transport_push, + .disconnect = disconnect_git }; void transport_take_over(struct transport *transport, @@ -1032,21 +1034,17 @@ void transport_check_allowed(const char *type) } static struct transport_vtable bundle_vtable = { - NULL, - get_refs_from_bundle, - fetch_refs_from_bundle, - NULL, - NULL, - close_bundle + .get_refs_list = get_refs_from_bundle, + .fetch_refs = fetch_refs_from_bundle, + .disconnect = close_bundle }; static struct transport_vtable builtin_smart_vtable = { - NULL, - get_refs_via_connect, - fetch_refs_via_pack, - git_transport_push, - connect_git, - disconnect_git + .get_refs_list = get_refs_via_connect, + .fetch_refs = fetch_refs_via_pack, + .push_refs = git_transport_push, + .connect = connect_git, + .disconnect = disconnect_git }; struct transport *transport_get(struct remote *remote, const char *url) @@ -1453,7 +1451,7 @@ int transport_fetch_refs(struct transport *transport, struct ref *refs) heads[nr_heads++] = rm; } - rc = transport->vtable->fetch(transport, nr_heads, heads); + rc = transport->vtable->fetch_refs(transport, nr_heads, heads); free(heads); return rc; diff --git a/tree-diff.c b/tree-diff.c index 1572615bd9..437c98a70e 100644 --- a/tree-diff.c +++ b/tree-diff.c @@ -21,7 +21,9 @@ ALLOC_ARRAY((x), nr); \ } while(0) #define FAST_ARRAY_FREE(x, nr) do { \ - if ((nr) > 2) \ + if ((nr) <= 2) \ + xalloca_free((x)); \ + else \ free((x)); \ } while(0) diff --git a/unicode-width.h b/unicode-width.h index b50e686bae..97c851b27d 100644 --- a/unicode-width.h +++ b/unicode-width.h @@ -26,7 +26,9 @@ static const struct interval zero_width[] = { { 0x0825, 0x0827 }, { 0x0829, 0x082D }, { 0x0859, 0x085B }, -{ 0x08D3, 0x0902 }, +{ 0x0890, 0x0891 }, +{ 0x0898, 0x089F }, +{ 0x08CA, 0x0902 }, { 0x093A, 0x093A }, { 0x093C, 0x093C }, { 0x0941, 0x0948 }, @@ -66,6 +68,7 @@ static const struct interval zero_width[] = { { 0x0BCD, 0x0BCD }, { 0x0C00, 0x0C00 }, { 0x0C04, 0x0C04 }, +{ 0x0C3C, 0x0C3C }, { 0x0C3E, 0x0C40 }, { 0x0C46, 0x0C48 }, { 0x0C4A, 0x0C4D }, @@ -116,7 +119,7 @@ static const struct interval zero_width[] = { { 0x1160, 0x11FF }, { 0x135D, 0x135F }, { 0x1712, 0x1714 }, -{ 0x1732, 0x1734 }, +{ 0x1732, 0x1733 }, { 0x1752, 0x1753 }, { 0x1772, 0x1773 }, { 0x17B4, 0x17B5 }, @@ -124,7 +127,7 @@ static const struct interval zero_width[] = { { 0x17C6, 0x17C6 }, { 0x17C9, 0x17D3 }, { 0x17DD, 0x17DD }, -{ 0x180B, 0x180E }, +{ 0x180B, 0x180F }, { 0x1885, 0x1886 }, { 0x18A9, 0x18A9 }, { 0x1920, 0x1922 }, @@ -140,7 +143,7 @@ static const struct interval zero_width[] = { { 0x1A65, 0x1A6C }, { 0x1A73, 0x1A7C }, { 0x1A7F, 0x1A7F }, -{ 0x1AB0, 0x1AC0 }, +{ 0x1AB0, 0x1ACE }, { 0x1B00, 0x1B03 }, { 0x1B34, 0x1B34 }, { 0x1B36, 0x1B3A }, @@ -163,8 +166,7 @@ static const struct interval zero_width[] = { { 0x1CED, 0x1CED }, { 0x1CF4, 0x1CF4 }, { 0x1CF8, 0x1CF9 }, -{ 0x1DC0, 0x1DF9 }, -{ 0x1DFB, 0x1DFF }, +{ 0x1DC0, 0x1DFF }, { 0x200B, 0x200F }, { 0x202A, 0x202E }, { 0x2060, 0x2064 }, @@ -227,12 +229,16 @@ static const struct interval zero_width[] = { { 0x10D24, 0x10D27 }, { 0x10EAB, 0x10EAC }, { 0x10F46, 0x10F50 }, +{ 0x10F82, 0x10F85 }, { 0x11001, 0x11001 }, { 0x11038, 0x11046 }, +{ 0x11070, 0x11070 }, +{ 0x11073, 0x11074 }, { 0x1107F, 0x11081 }, { 0x110B3, 0x110B6 }, { 0x110B9, 0x110BA }, { 0x110BD, 0x110BD }, +{ 0x110C2, 0x110C2 }, { 0x110CD, 0x110CD }, { 0x11100, 0x11102 }, { 0x11127, 0x1112B }, @@ -315,6 +321,8 @@ static const struct interval zero_width[] = { { 0x16FE4, 0x16FE4 }, { 0x1BC9D, 0x1BC9E }, { 0x1BCA0, 0x1BCA3 }, +{ 0x1CF00, 0x1CF2D }, +{ 0x1CF30, 0x1CF46 }, { 0x1D167, 0x1D169 }, { 0x1D173, 0x1D182 }, { 0x1D185, 0x1D18B }, @@ -332,6 +340,7 @@ static const struct interval zero_width[] = { { 0x1E023, 0x1E024 }, { 0x1E026, 0x1E02A }, { 0x1E130, 0x1E136 }, +{ 0x1E2AE, 0x1E2AE }, { 0x1E2EC, 0x1E2EF }, { 0x1E8D0, 0x1E8D6 }, { 0x1E944, 0x1E94A }, @@ -404,7 +413,10 @@ static const struct interval double_width[] = { { 0x17000, 0x187F7 }, { 0x18800, 0x18CD5 }, { 0x18D00, 0x18D08 }, -{ 0x1B000, 0x1B11E }, +{ 0x1AFF0, 0x1AFF3 }, +{ 0x1AFF5, 0x1AFFB }, +{ 0x1AFFD, 0x1AFFE }, +{ 0x1B000, 0x1B122 }, { 0x1B150, 0x1B152 }, { 0x1B164, 0x1B167 }, { 0x1B170, 0x1B2FB }, @@ -439,21 +451,23 @@ static const struct interval double_width[] = { { 0x1F6CC, 0x1F6CC }, { 0x1F6D0, 0x1F6D2 }, { 0x1F6D5, 0x1F6D7 }, +{ 0x1F6DD, 0x1F6DF }, { 0x1F6EB, 0x1F6EC }, { 0x1F6F4, 0x1F6FC }, { 0x1F7E0, 0x1F7EB }, +{ 0x1F7F0, 0x1F7F0 }, { 0x1F90C, 0x1F93A }, { 0x1F93C, 0x1F945 }, -{ 0x1F947, 0x1F978 }, -{ 0x1F97A, 0x1F9CB }, -{ 0x1F9CD, 0x1F9FF }, +{ 0x1F947, 0x1F9FF }, { 0x1FA70, 0x1FA74 }, -{ 0x1FA78, 0x1FA7A }, +{ 0x1FA78, 0x1FA7C }, { 0x1FA80, 0x1FA86 }, -{ 0x1FA90, 0x1FAA8 }, -{ 0x1FAB0, 0x1FAB6 }, -{ 0x1FAC0, 0x1FAC2 }, -{ 0x1FAD0, 0x1FAD6 }, +{ 0x1FA90, 0x1FAAC }, +{ 0x1FAB0, 0x1FABA }, +{ 0x1FAC0, 0x1FAC5 }, +{ 0x1FAD0, 0x1FAD9 }, +{ 0x1FAE0, 0x1FAE7 }, +{ 0x1FAF0, 0x1FAF6 }, { 0x20000, 0x2FFFD }, { 0x30000, 0x3FFFD } }; diff --git a/unpack-trees.c b/unpack-trees.c index 5786645f31..8ea0a542da 100644 --- a/unpack-trees.c +++ b/unpack-trees.c @@ -111,17 +111,17 @@ void setup_unpack_trees_porcelain(struct unpack_trees_options *opts, strvec_init(&opts->msgs_to_free); if (!strcmp(cmd, "checkout")) - msg = advice_commit_before_merge + msg = advice_enabled(ADVICE_COMMIT_BEFORE_MERGE) ? _("Your local changes to the following files would be overwritten by checkout:\n%%s" "Please commit your changes or stash them before you switch branches.") : _("Your local changes to the following files would be overwritten by checkout:\n%%s"); else if (!strcmp(cmd, "merge")) - msg = advice_commit_before_merge + msg = advice_enabled(ADVICE_COMMIT_BEFORE_MERGE) ? _("Your local changes to the following files would be overwritten by merge:\n%%s" "Please commit your changes or stash them before you merge.") : _("Your local changes to the following files would be overwritten by merge:\n%%s"); else - msg = advice_commit_before_merge + msg = advice_enabled(ADVICE_COMMIT_BEFORE_MERGE) ? _("Your local changes to the following files would be overwritten by %s:\n%%s" "Please commit your changes or stash them before you %s.") : _("Your local changes to the following files would be overwritten by %s:\n%%s"); @@ -132,17 +132,17 @@ void setup_unpack_trees_porcelain(struct unpack_trees_options *opts, _("Updating the following directories would lose untracked files in them:\n%s"); if (!strcmp(cmd, "checkout")) - msg = advice_commit_before_merge + msg = advice_enabled(ADVICE_COMMIT_BEFORE_MERGE) ? _("The following untracked working tree files would be removed by checkout:\n%%s" "Please move or remove them before you switch branches.") : _("The following untracked working tree files would be removed by checkout:\n%%s"); else if (!strcmp(cmd, "merge")) - msg = advice_commit_before_merge + msg = advice_enabled(ADVICE_COMMIT_BEFORE_MERGE) ? _("The following untracked working tree files would be removed by merge:\n%%s" "Please move or remove them before you merge.") : _("The following untracked working tree files would be removed by merge:\n%%s"); else - msg = advice_commit_before_merge + msg = advice_enabled(ADVICE_COMMIT_BEFORE_MERGE) ? _("The following untracked working tree files would be removed by %s:\n%%s" "Please move or remove them before you %s.") : _("The following untracked working tree files would be removed by %s:\n%%s"); @@ -150,17 +150,17 @@ void setup_unpack_trees_porcelain(struct unpack_trees_options *opts, strvec_pushf(&opts->msgs_to_free, msg, cmd, cmd); if (!strcmp(cmd, "checkout")) - msg = advice_commit_before_merge + msg = advice_enabled(ADVICE_COMMIT_BEFORE_MERGE) ? _("The following untracked working tree files would be overwritten by checkout:\n%%s" "Please move or remove them before you switch branches.") : _("The following untracked working tree files would be overwritten by checkout:\n%%s"); else if (!strcmp(cmd, "merge")) - msg = advice_commit_before_merge + msg = advice_enabled(ADVICE_COMMIT_BEFORE_MERGE) ? _("The following untracked working tree files would be overwritten by merge:\n%%s" "Please move or remove them before you merge.") : _("The following untracked working tree files would be overwritten by merge:\n%%s"); else - msg = advice_commit_before_merge + msg = advice_enabled(ADVICE_COMMIT_BEFORE_MERGE) ? _("The following untracked working tree files would be overwritten by %s:\n%%s" "Please move or remove them before you %s.") : _("The following untracked working tree files would be overwritten by %s:\n%%s"); @@ -479,7 +479,7 @@ static int check_updates(struct unpack_trees_options *o, errs |= run_parallel_checkout(&state, pc_workers, pc_threshold, progress, &cnt); stop_progress(&progress); - errs |= finish_delayed_checkout(&state, NULL); + errs |= finish_delayed_checkout(&state, NULL, o->verbose_update); git_attr_set_direction(GIT_ATTR_CHECKIN); if (o->clone) @@ -1255,7 +1255,7 @@ static int sparse_dir_matches_path(const struct cache_entry *ce, static struct cache_entry *find_cache_entry(struct traverse_info *info, const struct name_entry *p) { - struct cache_entry *ce; + const char *path; int pos = find_cache_pos(info, p->path, p->pathlen); struct unpack_trees_options *o = info->data; @@ -1281,9 +1281,11 @@ static struct cache_entry *find_cache_entry(struct traverse_info *info, * paths (e.g. "subdir-"). */ while (pos >= 0) { - ce = o->src_index->cache[pos]; + struct cache_entry *ce = o->src_index->cache[pos]; - if (strncmp(ce->name, p->path, p->pathlen)) + if (!skip_prefix(ce->name, info->traverse_path, &path) || + strncmp(path, p->path, p->pathlen) || + path[p->pathlen] != '/') return NULL; if (S_ISSPARSEDIR(ce->ce_mode) && diff --git a/upload-pack.c b/upload-pack.c index 297b76fcb4..c78d55bc67 100644 --- a/upload-pack.c +++ b/upload-pack.c @@ -1207,14 +1207,14 @@ static int send_ref(const char *refname, const struct object_id *oid, format_symref_info(&symref_info, &data->symref); format_session_id(&session_id, data); - packet_write_fmt(1, "%s %s%c%s%s%s%s%s%s%s object-format=%s agent=%s\n", + packet_fwrite_fmt(stdout, "%s %s%c%s%s%s%s%s%s%s object-format=%s agent=%s\n", oid_to_hex(oid), refname_nons, 0, capabilities, (data->allow_uor & ALLOW_TIP_SHA1) ? " allow-tip-sha1-in-want" : "", (data->allow_uor & ALLOW_REACHABLE_SHA1) ? " allow-reachable-sha1-in-want" : "", - data->stateless_rpc ? " no-done" : "", + data->no_done ? " no-done" : "", symref_info.buf, data->allow_filter ? " filter" : "", session_id.buf, @@ -1223,11 +1223,11 @@ static int send_ref(const char *refname, const struct object_id *oid, strbuf_release(&symref_info); strbuf_release(&session_id); } else { - packet_write_fmt(1, "%s %s\n", oid_to_hex(oid), refname_nons); + packet_fwrite_fmt(stdout, "%s %s\n", oid_to_hex(oid), refname_nons); } capabilities = NULL; if (!peel_iterated_oid(oid, &peeled)) - packet_write_fmt(1, "%s %s^{}\n", oid_to_hex(&peeled), refname_nons); + packet_fwrite_fmt(stdout, "%s %s^{}\n", oid_to_hex(&peeled), refname_nons); return 0; } @@ -1329,7 +1329,8 @@ static int upload_pack_config(const char *var, const char *value, void *cb_data) return parse_hide_refs_config(var, value, "uploadpack"); } -void upload_pack(struct upload_pack_options *options) +void upload_pack(const int advertise_refs, const int stateless_rpc, + const int timeout) { struct packet_reader reader; struct upload_pack_data data; @@ -1338,16 +1339,24 @@ void upload_pack(struct upload_pack_options *options) git_config(upload_pack_config, &data); - data.stateless_rpc = options->stateless_rpc; - data.daemon_mode = options->daemon_mode; - data.timeout = options->timeout; + data.stateless_rpc = stateless_rpc; + data.timeout = timeout; + if (data.timeout) + data.daemon_mode = 1; head_ref_namespaced(find_symref, &data.symref); - if (options->advertise_refs || !data.stateless_rpc) { + if (advertise_refs || !data.stateless_rpc) { reset_timeout(data.timeout); + if (advertise_refs) + data.no_done = 1; head_ref_namespaced(send_ref, &data); for_each_namespaced_ref(send_ref, &data); + /* + * fflush stdout before calling advertise_shallow_grafts because send_ref + * uses stdio. + */ + fflush_or_die(stdout); advertise_shallow_grafts(1); packet_flush(1); } else { @@ -1355,7 +1364,7 @@ void upload_pack(struct upload_pack_options *options) for_each_namespaced_ref(check_ref, NULL); } - if (!options->advertise_refs) { + if (!advertise_refs) { packet_reader_init(&reader, 0, NULL, 0, PACKET_READ_CHOMP_NEWLINE | PACKET_READ_DIE_ON_ERR_PACKET); @@ -1417,21 +1426,25 @@ static int parse_want_ref(struct packet_writer *writer, const char *line, struct string_list *wanted_refs, struct object_array *want_obj) { - const char *arg; - if (skip_prefix(line, "want-ref ", &arg)) { + const char *refname_nons; + if (skip_prefix(line, "want-ref ", &refname_nons)) { struct object_id oid; struct string_list_item *item; struct object *o; + struct strbuf refname = STRBUF_INIT; - if (read_ref(arg, &oid)) { - packet_writer_error(writer, "unknown ref %s", arg); - die("unknown ref %s", arg); + strbuf_addf(&refname, "%s%s", get_git_namespace(), refname_nons); + if (ref_is_hidden(refname_nons, refname.buf) || + read_ref(refname.buf, &oid)) { + packet_writer_error(writer, "unknown ref %s", refname_nons); + die("unknown ref %s", refname_nons); } + strbuf_release(&refname); - item = string_list_append(wanted_refs, arg); + item = string_list_append(wanted_refs, refname_nons); item->util = oiddup(&oid); - o = parse_object_or_die(&oid, arg); + o = parse_object_or_die(&oid, refname_nons); if (!(o->flags & WANTED)) { o->flags |= WANTED; add_object_array(o, NULL, want_obj); @@ -1655,8 +1668,7 @@ enum fetch_state { FETCH_DONE, }; -int upload_pack_v2(struct repository *r, struct strvec *keys, - struct packet_reader *request) +int upload_pack_v2(struct repository *r, struct packet_reader *request) { enum fetch_state state = FETCH_PROCESS_ARGS; struct upload_pack_data data; diff --git a/upload-pack.h b/upload-pack.h index 27ddcdc6cb..d6ee25ea98 100644 --- a/upload-pack.h +++ b/upload-pack.h @@ -1,20 +1,12 @@ #ifndef UPLOAD_PACK_H #define UPLOAD_PACK_H -struct upload_pack_options { - int stateless_rpc; - int advertise_refs; - unsigned int timeout; - int daemon_mode; -}; - -void upload_pack(struct upload_pack_options *options); +void upload_pack(const int advertise_refs, const int stateless_rpc, + const int timeout); struct repository; -struct strvec; struct packet_reader; -int upload_pack_v2(struct repository *r, struct strvec *keys, - struct packet_reader *request); +int upload_pack_v2(struct repository *r, struct packet_reader *request); struct strbuf; int upload_pack_advertise(struct repository *r, diff --git a/userdiff.c b/userdiff.c index b073e5563b..af02b1878c 100644 --- a/userdiff.c +++ b/userdiff.c @@ -228,7 +228,7 @@ PATTERNS("perl", "|<<|<>|<=>|>>"), PATTERNS("php", "^[\t ]*(((public|protected|private|static|abstract|final)[\t ]+)*function.*)$\n" - "^[\t ]*((((final|abstract)[\t ]+)?class|interface|trait).*)$", + "^[\t ]*((((final|abstract)[\t ]+)?class|enum|interface|trait).*)$", /* -- */ "[a-zA-Z_][a-zA-Z0-9_]*" "|[-+0-9.e]+|0[xXbB]?[0-9a-fA-F]+" @@ -145,6 +145,18 @@ void *xcalloc(size_t nmemb, size_t size) return ret; } +void xsetenv(const char *name, const char *value, int overwrite) +{ + if (setenv(name, value, overwrite)) + die_errno(_("could not setenv '%s'"), name ? name : "(null)"); +} + +void xunsetenv(const char *name) +{ + if (!unsetenv(name)) + die_errno(_("could not unsetenv '%s'"), name ? name : "(null)"); +} + /* * Limit size of IO chunks, because huge chunks only cause pain. OS X * 64-bit is buggy, returning EINVAL if len >= INT_MAX; and even in @@ -193,7 +205,9 @@ int xopen(const char *path, int oflag, ...) if (errno == EINTR) continue; - if ((oflag & O_RDWR) == O_RDWR) + if ((oflag & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) + die_errno(_("unable to create '%s'"), path); + else if ((oflag & O_RDWR) == O_RDWR) die_errno(_("could not open '%s' for reading and writing"), path); else if ((oflag & O_WRONLY) == O_WRONLY) die_errno(_("could not open '%s' for writing"), path); diff --git a/write-or-die.c b/write-or-die.c index d33e68f6ab..0b1ec8190b 100644 --- a/write-or-die.c +++ b/write-or-die.c @@ -70,3 +70,15 @@ void write_or_die(int fd, const void *buf, size_t count) die_errno("write error"); } } + +void fwrite_or_die(FILE *f, const void *buf, size_t count) +{ + if (fwrite(buf, 1, count, f) != count) + die_errno("fwrite error"); +} + +void fflush_or_die(FILE *f) +{ + if (fflush(f)) + die_errno("fflush error"); +} diff --git a/wt-status.c b/wt-status.c index eaed30eafb..e4f29b2b4c 100644 --- a/wt-status.c +++ b/wt-status.c @@ -787,7 +787,7 @@ static void wt_status_collect_untracked(struct wt_status *s) dir_clear(&dir); - if (advice_status_u_option) + if (advice_enabled(ADVICE_STATUS_U_OPTION)) s->untracked_in_ms = (getnanotime() - t_begin) / 1000000; } @@ -1158,7 +1158,7 @@ static void wt_longstatus_print_tracking(struct wt_status *s) if (!format_tracking_info(branch, &sb, s->ahead_behind_flags)) return; - if (advice_status_ahead_behind_warning && + if (advice_enabled(ADVICE_STATUS_AHEAD_BEHIND_WARNING) && s->ahead_behind_flags == AHEAD_BEHIND_FULL) { uint64_t t_delta_in_ms = (getnanotime() - t_begin) / 1000000; if (t_delta_in_ms > AB_DELAY_WARNING_IN_MS) { @@ -1845,7 +1845,7 @@ static void wt_longstatus_print(struct wt_status *s) wt_longstatus_print_other(s, &s->untracked, _("Untracked files"), "add"); if (s->show_ignored_mode) wt_longstatus_print_other(s, &s->ignored, _("Ignored files"), "add -f"); - if (advice_status_u_option && 2000 < s->untracked_in_ms) { + if (advice_enabled(ADVICE_STATUS_U_OPTION) && 2000 < s->untracked_in_ms) { status_printf_ln(s, GIT_COLOR_NORMAL, "%s", ""); status_printf_ln(s, GIT_COLOR_NORMAL, _("It took %.2f seconds to enumerate untracked files. 'status -uno'\n" |