diff options
241 files changed, 6395 insertions, 2490 deletions
diff --git a/.clang-format b/.clang-format index 611ab4750b..12a89f95f9 100644 --- a/.clang-format +++ b/.clang-format @@ -163,7 +163,7 @@ PenaltyBreakComment: 10 PenaltyBreakFirstLessLess: 0 PenaltyBreakString: 10 PenaltyExcessCharacter: 100 -PenaltyReturnTypeOnItsOwnLine: 5 +PenaltyReturnTypeOnItsOwnLine: 60 # Don't sort #include's SortIncludes: false diff --git a/Documentation/CodingGuidelines b/Documentation/CodingGuidelines index c4cb5ff0d4..48aa4edfbd 100644 --- a/Documentation/CodingGuidelines +++ b/Documentation/CodingGuidelines @@ -386,6 +386,11 @@ For C programs: - Use Git's gettext wrappers to make the user interface translatable. See "Marking strings for translation" in po/README. + - Variables and functions local to a given source file should be marked + with "static". Variables that are visible to other source files + must be declared with "extern" in header files. However, function + declarations should not use "extern", as that is already the default. + For Perl programs: - Most of the C guidelines above apply. diff --git a/Documentation/Makefile b/Documentation/Makefile index 4ae9ba5c86..6232143cb9 100644 --- a/Documentation/Makefile +++ b/Documentation/Makefile @@ -72,6 +72,7 @@ TECH_DOCS += SubmittingPatches TECH_DOCS += technical/hash-function-transition TECH_DOCS += technical/http-protocol TECH_DOCS += technical/index-format +TECH_DOCS += technical/long-running-process-protocol TECH_DOCS += technical/pack-format TECH_DOCS += technical/pack-heuristics TECH_DOCS += technical/pack-protocol diff --git a/Documentation/RelNotes/2.16.2.txt b/Documentation/RelNotes/2.16.2.txt new file mode 100644 index 0000000000..a216466d3d --- /dev/null +++ b/Documentation/RelNotes/2.16.2.txt @@ -0,0 +1,30 @@ +Git v2.16.2 Release Notes +========================= + +Fixes since v2.16.1 +------------------- + + * An old regression in "git describe --all $annotated_tag^0" has been + fixed. + + * "git svn dcommit" did not take into account the fact that a + svn+ssh:// URL with a username@ (typically used for pushing) refers + to the same SVN repository without the username@ and failed when + svn.pushmergeinfo option is set. + + * "git merge -Xours/-Xtheirs" learned to use our/their version when + resolving a conflicting updates to a symbolic link. + + * "git clone $there $here" is allowed even when here directory exists + as long as it is an empty directory, but the command incorrectly + removed it upon a failure of the operation. + + * "git stash -- <pathspec>" incorrectly blew away untracked files in + the directory that matched the pathspec, which has been corrected. + + * "git add -p" was taught to ignore local changes to submodules as + they do not interfere with the partial addition of regular changes + anyway. + + +Also contains various documentation updates and code clean-ups. diff --git a/Documentation/RelNotes/2.17.0.txt b/Documentation/RelNotes/2.17.0.txt index 759e75fbde..ae7c708574 100644 --- a/Documentation/RelNotes/2.17.0.txt +++ b/Documentation/RelNotes/2.17.0.txt @@ -8,17 +8,86 @@ UI, Workflows & Features * "diff" family of commands learned "--find-object=<object-id>" option to limit the findings to changes that involve the named object. - (merge 4d8c51aa19 sb/diff-blobfind-pickaxe later to maint). + + * "git format-patch" learned to give 72-cols to diffstat, which is + consistent with other line length limits the subcommand uses for + its output meant for e-mails. + + * The log from "git daemon" can be redirected with a new option; one + relevant use case is to send the log to standard error (instead of + syslog) when running it from inetd. + + * "git rebase" learned to take "--allow-empty-message" option. + + * "git am" has learned the "--quit" option, in addition to the + existing "--abort" option; having the pair mirrors a few other + commands like "rebase" and "cherry-pick". + + * "git worktree add" learned to run the post-checkout hook, just like + "git clone" runs it upon the initial checkout. Performance, Internal Implementation, Development Support etc. * More perf tests for threaded grep - (merge 7b31b55db1 ab/perf-grep-threads later to maint). * "perf" test output can be sent to codespeed server. (merge 19cf57a92e cc/codespeed later to maint). + * The build procedure for perl/ part has been greatly simplified by + weaning ourselves off of MakeMaker. + + * In preparation for implementing narrow/partial clone, the machinery + for checking object connectivity used by gc and fsck has been + taught that a missing object is OK when it is referenced by a + packfile specially marked as coming from trusted repository that + promises to make them available on-demand and lazily. + + * The machinery to clone & fetch, which in turn involves packing and + unpacking objects, has been told how to omit certain objects using + the filtering mechanism introduced by another topic. It now knows + to mark the resulting pack as a promisor pack to tolerate missing + objects, laying foundation for "narrow" clones. + + * The first step to getting rid of mru API and using the + doubly-linked list API directly instead. + + * Retire mru API as it does not give enough abstraction over + underlying list API to be worth it. + + * Rewrite two more "git submodule" subcommands in C. + + * The tracing machinery learned to report tweaking of environment + variables as well. + (merge 090a09272a nd/trace-with-env later to maint). + + * Update Coccinelle rules to catch and optimize strbuf_addf(&buf, "%s", str) + (merge cd9a4b6d93 rs/strbuf-cocci-workaround later to maint). + + * Prevent "clang-format" from breaking line after function return type. + (merge a3715d43e8 po/clang-format-functype-weight later to maint). + + * The sequencer infrastructure is shared across "git cherry-pick", + "git rebase -i", etc., and has always spawned "git commit" when it + needs to create a commit. It has been taught to do so internally, + when able, by reusing the codepath "git commit" itself uses, which + gives performance boost for a few tens of percents in some sample + scenarios. + + * Push the submodule version of collision-detecting SHA-1 hash + implementation a bit harder on builders. + + * Avoid mmapping small files while using packed refs (especially ones + with zero size, which would cause later munmap() to fail). + (merge ba41a8b600 kg/packed-ref-cache-fix later to maint). + + * Conversion from uchar[20] to struct object_id continues. + + * More tests for wildmatch functions. + + * The code to binary search starting from a fan-out table (which is + how the packfile is indexed with object names) has been refactored + into a reusable helper. Also contains various documentation updates and code clean-ups. @@ -28,44 +97,35 @@ Fixes since v2.16 * An old regression in "git describe --all $annotated_tag^0" has been fixed. - (merge 1bba00130a dk/describe-all-output-fix later to maint). * "git status" after moving a path in the working tree (hence making it appear "removed") and then adding with the -N option (hence making that appear "added") detected it as a rename, but did not report the old and new pathnames correctly. - (merge 176ea74793 nd/ita-wt-renames-in-status later to maint). * "git svn dcommit" did not take into account the fact that a svn+ssh:// URL with a username@ (typically used for pushing) refers to the same SVN repository without the username@ and failed when svn.pushmergeinfo option is set. - (merge 8aaed892fd jm/svn-pushmergeinfo-fix later to maint). * API clean-up around revision traversal. - (merge 6fcec2f9ae rs/lose-leak-pending later to maint). * "git merge -Xours/-Xtheirs" learned to use our/their version when resolving a conflicting updates to a symbolic link. - (merge fd48b46474 jc/merge-symlink-ours-theirs later to maint). * "git clone $there $here" is allowed even when here directory exists as long as it is an empty directory, but the command incorrectly removed it upon a failure of the operation. - (merge d45420c1c8 jk/abort-clone-with-existing-dest later to maint). * "git commit --fixup" did not allow "-m<message>" option to be used at the same time; allow it to annotate resulting commit with more text. - (merge 30884c9afc ab/commit-m-with-fixup later to maint). * When resetting the working tree files recursively, the working tree of submodules are now also reset to match. - (merge 7dcc1f4df8 sb/submodule-update-reset-fix later to maint). * "git stash -- <pathspec>" incorrectly blew away untracked files in the directory that matched the pathspec, which has been corrected. - (merge bba067d2fa tg/stash-with-pathspec-fix later to maint). * Instead of maintaining home-grown email address parsing code, ship a copy of reasonably recent Mail::Address to be used as a fallback @@ -75,10 +135,123 @@ Fixes since v2.16 * "git add -p" was taught to ignore local changes to submodules as they do not interfere with the partial addition of regular changes anyway. - (merge 12434efc1d nd/add-i-ignore-submodules later to maint). + + * Avoid showing a warning message in the middle of a line of "git + diff" output. + (merge 4e056c989f nd/diff-flush-before-warning later to maint). + + * The http tracing code, often used to debug connection issues, + learned to redact potentially sensitive information from its output + so that it can be more safely sharable. + (merge 8ba18e6fa4 jt/http-redact-cookies later to maint). + + * Crash fix for a corner case where an error codepath tried to unlock + what it did not acquire lock on. + (merge 81fcb698e0 mr/packed-ref-store-fix later to maint). + + * The split-index mode had a few corner case bugs fixed. + (merge ae59a4e44f tg/split-index-fixes later to maint). + + * Assorted fixes to "git daemon". + (merge ed15e58efe jk/daemon-fixes later to maint). + + * Completion of "git merge -s<strategy>" (in contrib/) did not work + well in non-C locale. + (merge 7cc763aaa3 nd/list-merge-strategy later to maint). + + * Workaround for segfault with more recent versions of SVN. + (merge 7f6f75e97a ew/svn-branch-segfault-fix later to maint). + + * Plug recently introduced leaks in fsck. + (merge ba3a08ca0e jt/fsck-code-cleanup later to maint). + + * "git pull --rebase" did not pass verbosity setting down when + recursing into a submodule. + (merge a56771a668 sb/pull-rebase-submodule later to maint). + + * The way "git reset --hard" reports the commit the updated HEAD + points at is made consistent with the way how the commit title is + generated by the other parts of the system. This matters when the + title is spread across physically multiple lines. + (merge 1cf823fb68 tg/reset-hard-show-head-with-pretty later to maint). + + * Test fixes. + (merge 63b1a175ee sg/test-i18ngrep later to maint). + + * Some bugs around "untracked cache" feature have been fixed. This + will notice corrupt data in the untracked cache left by old and + buggy code and issue a warning---the index can be fixed by clearing + the untracked cache from it. + (merge 0cacebf099 nd/fix-untracked-cache-invalidation later to maint). + (merge 7bf0be7501 ab/untracked-cache-invalidation-docs later to maint). + + * "git blame HEAD COPYING" in a bare repository failed to run, while + "git blame HEAD -- COPYING" run just fine. This has been corrected. + + * "git add" files in the same directory, but spelling the directory + path in different cases on case insensitive filesystem, corrupted + the name hash data structure and led to unexpected results. This + has been corrected. + (merge c95525e90d bp/name-hash-dirname-fix later to maint). + + * "git rebase -p" mangled log messages of a merge commit, which is + now fixed. + (merge ed5144d7eb js/fix-merge-arg-quoting-in-rebase-p later to maint). + + * Some low level protocol codepath could crash when they get an + unexpected flush packet, which is now fixed. + (merge bb1356dc64 js/packet-read-line-check-null later to maint). + + * "git check-ignore" with multiple paths got confused when one is a + file and the other is a directory, which has been fixed. + (merge d60771e930 rs/check-ignore-multi later to maint). + + * "git describe $garbage" stopped giving any errors when the garbage + happens to be a string with 40 hexadecimal letters. + (merge a8e7a2bf0f sb/describe-blob later to maint). + + * Code to unquote single-quoted string (used in the parser for + configuration files, etc.) did not diagnose bogus input correctly + and produced bogus results instead. + (merge ddbbf8eb25 jk/sq-dequote-on-bogus-input later to maint). + + * Many places in "git apply" knew that "/dev/null" that signals + "there is no such file on this side of the diff" can be followed by + whitespace and garbage when parsing a patch, except for one, which + made an otherwise valid patch (e.g. ones from subversion) rejected. + (merge e454ad4bec tk/apply-dev-null-verify-name-fix later to maint). + + * We no longer create any *.spec file, so "make clean" should not + remove it. + (merge 4321bdcabb tz/do-not-clean-spec-file later to maint). + + * "git push" over http transport did not unquote the push-options + correctly. + (merge 90dce21eb0 jk/push-options-via-transport-fix later to maint). * Other minor doc, test and build updates and code cleanups. (merge e2a5a028c7 bw/oidmap-autoinit later to maint). - (merge f0a6068a9f ys/bisect-object-id-missing-conversion-fix later to maint). - (merge 30221a3389 as/read-tree-prefix-doc-fix later to maint). - (merge 9bd2ce5432 ab/doc-cat-file-e-still-shows-errors later to maint). + (merge ec3b4b06f8 cl/t9001-cleanup later to maint). + (merge e1b3f3dd38 ks/submodule-doc-updates later to maint). + (merge fbac558a9b rs/describe-unique-abbrev later to maint). + (merge 8462ff43e4 tb/crlf-conv-flags later to maint). + (merge 7d68bb0766 rb/hashmap-h-compilation-fix later to maint). + (merge 3449847168 cc/sha1-file-name later to maint). + (merge ad622a256f ds/use-get-be64 later to maint). + (merge f919ffebed sg/cocci-move-array later to maint). + (merge 4e801463c7 jc/mailinfo-cleanup-fix later to maint). + (merge ef5b3a6c5e nd/shared-index-fix later to maint). + (merge 9f5258cbb8 tz/doc-show-defaults-to-head later to maint). + (merge b780e4407d jc/worktree-add-short-help later to maint). + (merge ae239fc8e5 rs/cocci-strbuf-addf-to-addstr later to maint). + (merge 2e22a85e5c nd/ignore-glob-doc-update later to maint). + (merge 3738031581 jk/gettext-poison later to maint). + (merge 54360a1956 rj/sparse-updates later to maint). + (merge 12e31a6b12 sg/doc-test-must-fail-args later to maint). + (merge 760f1ad101 bc/doc-interpret-trailers-grammofix later to maint). + (merge 4ccf461f56 bp/fsmonitor later to maint). + (merge a6119f82b1 jk/test-hashmap-updates later to maint). + (merge 5aea9fe6cc rd/typofix later to maint). + (merge e4e5da2796 sb/status-doc-fix later to maint). + (merge 7976e901c8 gs/test-unset-xdg-cache-home later to maint). + (merge d023df1ee6 tg/worktree-create-tracking later to maint). diff --git a/Documentation/config.txt b/Documentation/config.txt index 0e25b2c92b..bbd66f5b98 100644 --- a/Documentation/config.txt +++ b/Documentation/config.txt @@ -1398,7 +1398,16 @@ fetch.unpackLimit:: fetch.prune:: If true, fetch will automatically behave as if the `--prune` - option was given on the command line. See also `remote.<name>.prune`. + option was given on the command line. See also `remote.<name>.prune` + and the PRUNING section of linkgit:git-fetch[1]. + +fetch.pruneTags:: + If true, fetch will automatically behave as if the + `refs/tags/*:refs/tags/*` refspec was provided when pruning, + if not set already. This allows for setting both this option + and `fetch.prune` to maintain a 1=1 mapping to upstream + refs. See also `remote.<name>.pruneTags` and the PRUNING + section of linkgit:git-fetch[1]. fetch.output:: Control how ref update status is printed. Valid values are @@ -2945,6 +2954,15 @@ remote.<name>.prune:: remote (as if the `--prune` option was given on the command line). Overrides `fetch.prune` settings, if any. +remote.<name>.pruneTags:: + When set to true, fetching from this remote by default will also + remove any local tags that no longer exist on the remote if pruning + is activated in general via `remote.<name>.prune`, `fetch.prune` or + `--prune`. Overrides `fetch.pruneTags` settings, if any. ++ +See also `remote.<name>.prune` and the PRUNING section of +linkgit:git-fetch[1]. + remotes.<group>:: The list of remotes which are fetched by "git remote update <group>". See linkgit:git-remote[1]. @@ -3343,6 +3361,10 @@ uploadpack.packObjectsHook:: was run. I.e., `upload-pack` will feed input intended for `pack-objects` to the hook, and expects a completed packfile on stdout. + +uploadpack.allowFilter:: + If this option is set, `upload-pack` will advertise partial + clone and partial fetch object filtering. + Note that this configuration variable is ignored if it is seen in the repository-level config (this is a safety measure against fetching from diff --git a/Documentation/fetch-options.txt b/Documentation/fetch-options.txt index fb6bebbc61..8631e365f4 100644 --- a/Documentation/fetch-options.txt +++ b/Documentation/fetch-options.txt @@ -73,7 +73,22 @@ ifndef::git-pull[] are fetched due to an explicit refspec (either on the command line or in the remote configuration, for example if the remote was cloned with the --mirror option), then they are also - subject to pruning. + subject to pruning. Supplying `--prune-tags` is a shorthand for + providing the tag refspec. ++ +See the PRUNING section below for more details. + +-P:: +--prune-tags:: + Before fetching, remove any local tags that no longer exist on + the remote if `--prune` is enabled. This option should be used + more carefully, unlike `--prune` it will remove any local + references (local tags) that have been created. This option is + a shorthand for providing the explicit tag refspec along with + `--prune`, see the discussion about that in its documentation. ++ +See the PRUNING section below for more details. + endif::git-pull[] ifndef::git-pull[] diff --git a/Documentation/git-am.txt b/Documentation/git-am.txt index 0f426ae874..6f6c34b0f4 100644 --- a/Documentation/git-am.txt +++ b/Documentation/git-am.txt @@ -16,7 +16,7 @@ SYNOPSIS [--exclude=<path>] [--include=<path>] [--reject] [-q | --quiet] [--[no-]scissors] [-S[<keyid>]] [--patch-format=<format>] [(<mbox> | <Maildir>)...] -'git am' (--continue | --skip | --abort | --show-current-patch) +'git am' (--continue | --skip | --abort | --quit | --show-current-patch) DESCRIPTION ----------- @@ -167,6 +167,10 @@ default. You can use `--no-utf8` to override this. --abort:: Restore the original branch and abort the patching operation. +--quit:: + Abort the patching operation but keep HEAD and the index + untouched. + --show-current-patch:: Show the patch being applied when "git am" is stopped because of conflicts. diff --git a/Documentation/git-daemon.txt b/Documentation/git-daemon.txt index 3c91db7bed..56d54a4898 100644 --- a/Documentation/git-daemon.txt +++ b/Documentation/git-daemon.txt @@ -20,6 +20,7 @@ SYNOPSIS [--inetd | [--listen=<host_or_ipaddr>] [--port=<n>] [--user=<user> [--group=<group>]]] + [--log-destination=(stderr|syslog|none)] [<directory>...] DESCRIPTION @@ -80,7 +81,8 @@ OPTIONS do not have the 'git-daemon-export-ok' file. --inetd:: - Have the server run as an inetd service. Implies --syslog. + Have the server run as an inetd service. Implies --syslog (may be + overridden with `--log-destination=`). Incompatible with --detach, --port, --listen, --user and --group options. @@ -110,8 +112,28 @@ OPTIONS zero for no limit. --syslog:: - Log to syslog instead of stderr. Note that this option does not imply - --verbose, thus by default only error conditions will be logged. + Short for `--log-destination=syslog`. + +--log-destination=<destination>:: + Send log messages to the specified destination. + Note that this option does not imply --verbose, + thus by default only error conditions will be logged. + The <destination> must be one of: ++ +-- +stderr:: + Write to standard error. + Note that if `--detach` is specified, + the process disconnects from the real standard error, + making this destination effectively equivalent to `none`. +syslog:: + Write to syslog, using the `git-daemon` identifier. +none:: + Disable all logging. +-- ++ +The default destination is `syslog` if `--inetd` or `--detach` is specified, +otherwise `stderr`. --user-path:: --user-path=<path>:: diff --git a/Documentation/git-fetch.txt b/Documentation/git-fetch.txt index b153aefa68..e319935597 100644 --- a/Documentation/git-fetch.txt +++ b/Documentation/git-fetch.txt @@ -99,6 +99,93 @@ The latter use of the `remote.<repository>.fetch` values can be overridden by giving the `--refmap=<refspec>` parameter(s) on the command line. +PRUNING +------- + +Git has a default disposition of keeping data unless it's explicitly +thrown away; this extends to holding onto local references to branches +on remotes that have themselves deleted those branches. + +If left to accumulate, these stale references might make performance +worse on big and busy repos that have a lot of branch churn, and +e.g. make the output of commands like `git branch -a --contains +<commit>` needlessly verbose, as well as impacting anything else +that'll work with the complete set of known references. + +These remote-tracking references can be deleted as a one-off with +either of: + +------------------------------------------------ +# While fetching +$ git fetch --prune <name> + +# Only prune, don't fetch +$ git remote prune <name> +------------------------------------------------ + +To prune references as part of your normal workflow without needing to +remember to run that, set `fetch.prune` globally, or +`remote.<name>.prune` per-remote in the config. See +linkgit:git-config[1]. + +Here's where things get tricky and more specific. The pruning feature +doesn't actually care about branches, instead it'll prune local <-> +remote-references as a function of the refspec of the remote (see +`<refspec>` and <<CRTB,CONFIGURED REMOTE-TRACKING BRANCHES>> above). + +Therefore if the refspec for the remote includes +e.g. `refs/tags/*:refs/tags/*`, or you manually run e.g. `git fetch +--prune <name> "refs/tags/*:refs/tags/*"` it won't be stale remote +tracking branches that are deleted, but any local tag that doesn't +exist on the remote. + +This might not be what you expect, i.e. you want to prune remote +`<name>`, but also explicitly fetch tags from it, so when you fetch +from it you delete all your local tags, most of which may not have +come from the `<name>` remote in the first place. + +So be careful when using this with a refspec like +`refs/tags/*:refs/tags/*`, or any other refspec which might map +references from multiple remotes to the same local namespace. + +Since keeping up-to-date with both branches and tags on the remote is +a common use-case the `--prune-tags` option can be supplied along with +`--prune` to prune local tags that don't exist on the remote, and +force-update those tags that differ. Tag pruning can also be enabled +with `fetch.pruneTags` or `remote.<name>.pruneTags` in the config. See +linkgit:git-config[1]. + +The `--prune-tags` option is equivalent to having +`refs/tags/*:refs/tags/*` declared in the refspecs of the remote. This +can lead to some seemingly strange interactions: + +------------------------------------------------ +# These both fetch tags +$ git fetch --no-tags origin 'refs/tags/*:refs/tags/*' +$ git fetch --no-tags --prune-tags origin +------------------------------------------------ + +The reason it doesn't error out when provided without `--prune` or its +config versions is for flexibility of the configured versions, and to +maintain a 1=1 mapping between what the command line flags do, and +what the configuration versions do. + +It's reasonable to e.g. configure `fetch.pruneTags=true` in +`~/.gitconfig` to have tags pruned whenever `git fetch --prune` is +run, without making every invocation of `git fetch` without `--prune` +an error. + +Pruning tags with `--prune-tags` also works when fetching a URL +instead of a named remote. These will all prune tags not found on +origin: + +------------------------------------------------ +$ git fetch origin --prune --prune-tags +$ git fetch origin --prune 'refs/tags/*:refs/tags/*' +$ git fetch <url of origin> --prune --prune-tags +$ git fetch <url of origin> --prune 'refs/tags/*:refs/tags/*' +------------------------------------------------ + OUTPUT ------ diff --git a/Documentation/git-interpret-trailers.txt b/Documentation/git-interpret-trailers.txt index 9dd19a1dd9..ff446f15f7 100644 --- a/Documentation/git-interpret-trailers.txt +++ b/Documentation/git-interpret-trailers.txt @@ -51,7 +51,7 @@ with only spaces at the end of the commit message part, one blank line will be added before the new trailer. Existing trailers are extracted from the input message by looking for -a group of one or more lines that (i) are all trailers, or (ii) contains at +a group of one or more lines that (i) is all trailers, or (ii) contains at least one Git-generated or user-configured trailer and consists of at least 25% trailers. The group must be preceded by one or more empty (or whitespace-only) lines. diff --git a/Documentation/git-pack-objects.txt b/Documentation/git-pack-objects.txt index aa403d02f3..81bc490ac5 100644 --- a/Documentation/git-pack-objects.txt +++ b/Documentation/git-pack-objects.txt @@ -255,6 +255,17 @@ a missing object is encountered. This is the default action. The form '--missing=allow-any' will allow object traversal to continue if a missing object is encountered. Missing objects will silently be omitted from the results. ++ +The form '--missing=allow-promisor' is like 'allow-any', but will only +allow object traversal to continue for EXPECTED promisor missing objects. +Unexpected missing object will raise an error. + +--exclude-promisor-objects:: + Omit objects that are known to be in the promisor remote. (This + option has the purpose of operating only on locally created objects, + so that when we repack, we still maintain a distinction between + locally created objects [without .promisor] and objects from the + promisor remote [with .promisor].) This is used with partial clone. SEE ALSO -------- diff --git a/Documentation/git-rebase.txt b/Documentation/git-rebase.txt index 0b29e48221..3277ca1432 100644 --- a/Documentation/git-rebase.txt +++ b/Documentation/git-rebase.txt @@ -244,6 +244,11 @@ leave out at most one of A and B, in which case it defaults to HEAD. Keep the commits that do not change anything from its parents in the result. +--allow-empty-message:: + By default, rebasing commits with an empty message will fail. + This option overrides that behavior, allowing commits with empty + messages to be rebased. + --skip:: Restart the rebasing process by skipping the current patch. diff --git a/Documentation/git-remote.txt b/Documentation/git-remote.txt index 577b969c1b..4feddc0293 100644 --- a/Documentation/git-remote.txt +++ b/Documentation/git-remote.txt @@ -172,10 +172,14 @@ With `-n` option, the remote heads are not queried first with 'prune':: -Deletes all stale remote-tracking branches under <name>. -These stale branches have already been removed from the remote repository -referenced by <name>, but are still locally available in -"remotes/<name>". +Deletes stale references associated with <name>. By default, stale +remote-tracking branches under <name> are deleted, but depending on +global configuration and the configuration of the remote we might even +prune local tags that haven't been pushed there. Equivalent to `git +fetch --prune <name>`, except that no new references will be fetched. ++ +See the PRUNING section of linkgit:git-fetch[1] for what it'll prune +depending on various configuration. + With `--dry-run` option, report what branches will be pruned, but do not actually prune them. @@ -189,7 +193,7 @@ remotes.default is not defined, all remotes which do not have the configuration parameter remote.<name>.skipDefaultUpdate set to true will be updated. (See linkgit:git-config[1]). + -With `--prune` option, prune all the remotes that are updated. +With `--prune` option, run pruning against all the remotes that are updated. DISCUSSION diff --git a/Documentation/git-show.txt b/Documentation/git-show.txt index 82a4125a2d..e73ef54017 100644 --- a/Documentation/git-show.txt +++ b/Documentation/git-show.txt @@ -9,7 +9,7 @@ git-show - Show various types of objects SYNOPSIS -------- [verse] -'git show' [options] <object>... +'git show' [options] [<object>...] DESCRIPTION ----------- @@ -35,7 +35,7 @@ This manual page describes only the most frequently used options. OPTIONS ------- <object>...:: - The names of objects to show. + The names of objects to show (defaults to 'HEAD'). For a more complete list of ways to spell object names, see "SPECIFYING REVISIONS" section in linkgit:gitrevisions[7]. diff --git a/Documentation/git-status.txt b/Documentation/git-status.txt index 72bfb87f66..f9c91c721e 100644 --- a/Documentation/git-status.txt +++ b/Documentation/git-status.txt @@ -184,10 +184,10 @@ in which case `XY` are `!!`. X Y Meaning ------------------------------------------------- - [MD] not updated + [AMD] not updated M [ MD] updated in index A [ MD] added to index - D [ M] deleted from index + D deleted from index R [ MD] renamed in index C [ MD] copied in index [MARC] index and work tree matches diff --git a/Documentation/git-submodule.txt b/Documentation/git-submodule.txt index ff612001d2..71c5618e82 100644 --- a/Documentation/git-submodule.txt +++ b/Documentation/git-submodule.txt @@ -70,8 +70,8 @@ status [--cached] [--recursive] [--] [<path>...]:: Show the status of the submodules. This will print the SHA-1 of the currently checked out commit for each submodule, along with the submodule path and the output of 'git describe' for the - SHA-1. Each SHA-1 will be prefixed with `-` if the submodule is not - initialized, `+` if the currently checked out submodule commit + SHA-1. Each SHA-1 will possibly be prefixed with `-` if the submodule is + not initialized, `+` if the currently checked out submodule commit does not match the SHA-1 found in the index of the containing repository and `U` if the submodule has merge conflicts. + @@ -132,15 +132,15 @@ expects by cloning missing submodules and updating the working tree of the submodules. The "updating" can be done in several ways depending on command line options and the value of `submodule.<name>.update` configuration variable. The command line option takes precedence over -the configuration variable. if neither is given, a checkout is performed. -update procedures supported both from the command line as well as setting -`submodule.<name>.update`: +the configuration variable. If neither is given, a 'checkout' is performed. +The 'update' procedures supported both from the command line as well as +through the `submodule.<name>.update` configuration are: checkout;; the commit recorded in the superproject will be checked out in the submodule on a detached HEAD. + If `--force` is specified, the submodule will be checked out (using -`git checkout --force` if appropriate), even if the commit specified +`git checkout --force`), even if the commit specified in the index of the containing repository already matches the commit checked out in the submodule. @@ -150,8 +150,8 @@ checked out in the submodule. merge;; the commit recorded in the superproject will be merged into the current branch in the submodule. -The following procedures are only available via the `submodule.<name>.update` -configuration variable: +The following 'update' procedures are only available via the +`submodule.<name>.update` configuration variable: custom command;; arbitrary shell command that takes a single argument (the sha1 of the commit recorded in the diff --git a/Documentation/git-tag.txt b/Documentation/git-tag.txt index 956fc019f9..1d17101bac 100644 --- a/Documentation/git-tag.txt +++ b/Documentation/git-tag.txt @@ -9,7 +9,7 @@ git-tag - Create, list, delete or verify a tag object signed with GPG SYNOPSIS -------- [verse] -'git tag' [-a | -s | -u <keyid>] [-f] [-m <msg> | -F <file>] +'git tag' [-a | -s | -u <keyid>] [-f] [-m <msg> | -F <file>] [-e] <tagname> [<commit> | <object>] 'git tag' -d <tagname>... 'git tag' [-n[<num>]] -l [--contains <commit>] [--no-contains <commit>] @@ -167,6 +167,12 @@ This option is only applicable when listing tags without annotation lines. Implies `-a` if none of `-a`, `-s`, or `-u <keyid>` is given. +-e:: +--edit:: + The message taken from file with `-F` and command line with + `-m` are usually used as the tag message unmodified. + This option lets you further edit the message taken from these sources. + --cleanup=<mode>:: This option sets how the tag message is cleaned up. The '<mode>' can be one of 'verbatim', 'whitespace' and 'strip'. The diff --git a/Documentation/git-update-index.txt b/Documentation/git-update-index.txt index bdb0342593..3897a59ee9 100644 --- a/Documentation/git-update-index.txt +++ b/Documentation/git-update-index.txt @@ -464,6 +464,32 @@ command reads the index; while when `--[no-|force-]untracked-cache` are used, the untracked cache is immediately added to or removed from the index. +Before 2.17, the untracked cache had a bug where replacing a directory +with a symlink to another directory could cause it to incorrectly show +files tracked by git as untracked. See the "status: add a failing test +showing a core.untrackedCache bug" commit to git.git. A workaround for +that is (and this might work for other undiscovered bugs in the +future): + +---------------- +$ git -c core.untrackedCache=false status +---------------- + +This bug has also been shown to affect non-symlink cases of replacing +a directory with a file when it comes to the internal structures of +the untracked cache, but no case has been reported where this resulted in +wrong "git status" output. + +There are also cases where existing indexes written by git versions +before 2.17 will reference directories that don't exist anymore, +potentially causing many "could not open directory" warnings to be +printed on "git status". These are new warnings for existing issues +that were previously silently discarded. + +As with the bug described above the solution is to one-off do a "git +status" run with `core.untrackedCache=false` to flush out the leftover +bad data. + File System Monitor ------------------- @@ -484,8 +510,8 @@ the `core.fsmonitor` configuration variable (see linkgit:git-config[1]) than using the `--fsmonitor` option to `git update-index` in each repository, especially if you want to do so across all repositories you use, because you can set the configuration -variable to `true` (or `false`) in your `$HOME/.gitconfig` just once -and have it affect all repositories you touch. +variable in your `$HOME/.gitconfig` just once and have it affect all +repositories you touch. When the `core.fsmonitor` configuration variable is changed, the file system monitor is added to or removed from the index the next time diff --git a/Documentation/git-worktree.txt b/Documentation/git-worktree.txt index 41585f535d..5ac3f68ab5 100644 --- a/Documentation/git-worktree.txt +++ b/Documentation/git-worktree.txt @@ -52,10 +52,11 @@ is linked to the current repository, sharing everything except working directory specific files such as HEAD, index, etc. `-` may also be specified as `<commit-ish>`; it is synonymous with `@{-1}`. + -If <commit-ish> is a branch name (call it `<branch>` and is not found, +If <commit-ish> is a branch name (call it `<branch>`) and is not found, and neither `-b` nor `-B` nor `--detach` are used, but there does exist a tracking branch in exactly one remote (call it `<remote>`) -with a matching name, treat as equivalent to +with a matching name, treat as equivalent to: ++ ------------ $ git worktree add --track -b <branch> <path> <remote>/<branch> ------------ diff --git a/Documentation/git.txt b/Documentation/git.txt index 3f4161a799..8163b5796b 100644 --- a/Documentation/git.txt +++ b/Documentation/git.txt @@ -646,6 +646,16 @@ of clones and fetches. variable. See `GIT_TRACE` for available trace output options. +`GIT_TRACE_CURL_NO_DATA`:: + When a curl trace is enabled (see `GIT_TRACE_CURL` above), do not dump + data (that is, only dump info lines and headers). + +`GIT_REDACT_COOKIES`:: + This can be set to a comma-separated list of strings. When a curl trace + is enabled (see `GIT_TRACE_CURL` above), whenever a "Cookies:" header + sent by the client is dumped, values of cookies whose key is in that + list (case-sensitive) are redacted. + `GIT_LITERAL_PATHSPECS`:: Setting this variable to `1` will cause Git to treat all pathspecs literally, rather than as glob patterns. For example, diff --git a/Documentation/gitattributes.txt b/Documentation/gitattributes.txt index 30687de81a..c21f5ca109 100644 --- a/Documentation/gitattributes.txt +++ b/Documentation/gitattributes.txt @@ -392,46 +392,14 @@ Long Running Filter Process If the filter command (a string value) is defined via `filter.<driver>.process` then Git can process all blobs with a single filter invocation for the entire life of a single Git -command. This is achieved by using a packet format (pkt-line, -see technical/protocol-common.txt) based protocol over standard -input and standard output as follows. All packets, except for the -"*CONTENT" packets and the "0000" flush packet, are considered -text and therefore are terminated by a LF. - -Git starts the filter when it encounters the first file -that needs to be cleaned or smudged. After the filter started -Git sends a welcome message ("git-filter-client"), a list of supported -protocol version numbers, and a flush packet. Git expects to read a welcome -response message ("git-filter-server"), exactly one protocol version number -from the previously sent list, and a flush packet. All further -communication will be based on the selected version. The remaining -protocol description below documents "version=2". Please note that -"version=42" in the example below does not exist and is only there -to illustrate how the protocol would look like with more than one -version. - -After the version negotiation Git sends a list of all capabilities that -it supports and a flush packet. Git expects to read a list of desired -capabilities, which must be a subset of the supported capabilities list, -and a flush packet as response: ------------------------- -packet: git> git-filter-client -packet: git> version=2 -packet: git> version=42 -packet: git> 0000 -packet: git< git-filter-server -packet: git< version=2 -packet: git< 0000 -packet: git> capability=clean -packet: git> capability=smudge -packet: git> capability=not-yet-invented -packet: git> 0000 -packet: git< capability=clean -packet: git< capability=smudge -packet: git< 0000 ------------------------- -Supported filter capabilities in version 2 are "clean", "smudge", -and "delay". +command. This is achieved by using the long-running process protocol +(described in technical/long-running-process-protocol.txt). + +When Git encounters the first file that needs to be cleaned or smudged, +it starts the filter and performs the handshake. In the handshake, the +welcome message sent by Git is "git-filter-client", only version 2 is +suppported, and the supported capabilities are "clean", "smudge", and +"delay". Afterwards Git sends a list of "key=value" pairs terminated with a flush packet. The list will contain at least the filter command @@ -517,12 +485,6 @@ the protocol then Git will stop the filter process and restart it with the next file that needs to be processed. Depending on the `filter.<driver>.required` flag Git will interpret that as error. -After the filter has processed a command it is expected to wait for -a "key=value" list containing the next command. Git will close -the command pipe on exit. The filter is expected to detect EOF -and exit gracefully on its own. Git will wait until the filter -process has stopped. - Delay ^^^^^ diff --git a/Documentation/gitignore.txt b/Documentation/gitignore.txt index 63260f0056..ff5d7f9ed6 100644 --- a/Documentation/gitignore.txt +++ b/Documentation/gitignore.txt @@ -102,12 +102,11 @@ PATTERN FORMAT (relative to the toplevel of the work tree if not from a `.gitignore` file). - - Otherwise, Git treats the pattern as a shell glob suitable - for consumption by fnmatch(3) with the FNM_PATHNAME flag: - wildcards in the pattern will not match a / in the pathname. - For example, "Documentation/{asterisk}.html" matches - "Documentation/git.html" but not "Documentation/ppc/ppc.html" - or "tools/perf/Documentation/perf.html". + - Otherwise, Git treats the pattern as a shell glob: "`*`" matches + anything except "`/`", "`?`" matches any one character except "`/`" + and "`[]`" matches one character in a selected range. See + fnmatch(3) and the FNM_PATHNAME flag for a more detailed + description. - A leading slash matches the beginning of the pathname. For example, "/{asterisk}.c" matches "cat-file.c" but not diff --git a/Documentation/gitremote-helpers.txt b/Documentation/gitremote-helpers.txt index 4a584f3c5d..4b8c93ec59 100644 --- a/Documentation/gitremote-helpers.txt +++ b/Documentation/gitremote-helpers.txt @@ -466,6 +466,13 @@ set by Git if the remote helper has the 'option' capability. Transmit <string> as a push option. As the push option must not contain LF or NUL characters, the string is not encoded. +'option from-promisor' {'true'|'false'}:: + Indicate that these objects are being fetched from a promisor. + +'option no-dependents' {'true'|'false'}:: + Indicate that only the objects wanted need to be fetched, not + their dependents. + SEE ALSO -------- linkgit:git-remote[1] diff --git a/Documentation/gitsubmodules.txt b/Documentation/gitsubmodules.txt index 46cf120f66..4d6c17782f 100644 --- a/Documentation/gitsubmodules.txt +++ b/Documentation/gitsubmodules.txt @@ -36,8 +36,8 @@ The `gitlink` entry contains the object name of the commit that the superproject expects the submodule’s working directory to be at. The section `submodule.foo.*` in the `.gitmodules` file gives additional -hints to Gits porcelain layer such as where to obtain the submodule via -the `submodule.foo.url` setting. +hints to Git's porcelain layer. For example, the `submodule.foo.url` +setting specifies where to obtain the submodule. Submodules can be used for at least two different use cases: @@ -51,18 +51,21 @@ Submodules can be used for at least two different use cases: 2. Splitting a (logically single) project into multiple repositories and tying them back together. This can be used to - overcome current limitations of Gits implementation to have + overcome current limitations of Git's implementation to have finer grained access: - * Size of the git repository: + * Size of the Git repository: In its current form Git scales up poorly for large repositories containing content that is not compressed by delta computation between trees. - However you can also use submodules to e.g. hold large binary assets - and these repositories are then shallowly cloned such that you do not + For example, you can use submodules to hold large binary assets + and these repositories can be shallowly cloned such that you do not have a large history locally. * Transfer size: In its current form Git requires the whole working tree present. It does not allow partial trees to be transferred in fetch or clone. + If the project you work on consists of multiple repositories tied + together as submodules in a superproject, you can avoid fetching the + working trees of the repositories you are not interested in. * Access control: By restricting user access to submodules, this can be used to implement read/write policies for different users. @@ -73,9 +76,10 @@ The configuration of submodules Submodule operations can be configured using the following mechanisms (from highest to lowest precedence): - * The command line for those commands that support taking submodule specs. - Most commands have a boolean flag '--recurse-submodules' whether to - recurse into submodules. Examples are `ls-files` or `checkout`. + * The command line for those commands that support taking submodules + as part of their pathspecs. Most commands have a boolean flag + `--recurse-submodules` which specify whether to recurse into submodules. + Examples are `grep` and `checkout`. Some commands take enums, such as `fetch` and `push`, where you can specify how submodules are affected. @@ -87,8 +91,8 @@ Submodule operations can be configured using the following mechanisms For example an effect from the submodule's `.gitignore` file would be observed when you run `git status --ignore-submodules=none` in the superproject. This collects information from the submodule's working -directory by running `status` in the submodule, which does pay attention -to its `.gitignore` file. +directory by running `status` in the submodule while paying attention +to the `.gitignore` file of the submodule. + The submodule's `$GIT_DIR/config` file would come into play when running `git push --recurse-submodules=check` in the superproject, as this would @@ -97,20 +101,20 @@ remotes are configured in the submodule as usual in the `$GIT_DIR/config` file. * The configuration file `$GIT_DIR/config` in the superproject. - Typical configuration at this place is controlling if a submodule - is recursed into at all via the `active` flag for example. + Git only recurses into active submodules (see "ACTIVE SUBMODULES" + section below). + If the submodule is not yet initialized, then the configuration -inside the submodule does not exist yet, so configuration where to +inside the submodule does not exist yet, so where to obtain the submodule from is configured here for example. - * the `.gitmodules` file inside the superproject. Additionally to the - required mapping between submodule's name and path, a project usually + * The `.gitmodules` file inside the superproject. A project usually uses this file to suggest defaults for the upstream collection - of repositories. + of repositories for the mapping that is required between a + submodule's name and its path. + -This file mainly serves as the mapping between name and path in -the superproject, such that the submodule's git directory can be +This file mainly serves as the mapping between the name and path of submodules +in the superproject, such that the submodule's Git directory can be located. + If the submodule has never been initialized, this is the only place @@ -137,8 +141,8 @@ directory is automatically moved to `$GIT_DIR/modules/<name>/` of the superproject. * Deinitialized submodule: A `gitlink`, and a `.gitmodules` entry, -but no submodule working directory. The submodule’s git directory -may be there as after deinitializing the git directory is kept around. +but no submodule working directory. The submodule’s Git directory +may be there as after deinitializing the Git directory is kept around. The directory which is supposed to be the working directory is empty instead. + A submodule can be deinitialized by running `git submodule deinit`. @@ -160,6 +164,60 @@ from another repository. To completely remove a submodule, manually delete `$GIT_DIR/modules/<name>/`. +ACTIVE SUBMODULES +----------------- + +A submodule is considered active, + + (a) if `submodule.<name>.active` is set to `true` + or + (b) if the submodule's path matches the pathspec in `submodule.active` + or + (c) if `submodule.<name>.url` is set. + +and these are evaluated in this order. + +For example: + + [submodule "foo"] + active = false + url = https://example.org/foo + [submodule "bar"] + active = true + url = https://example.org/bar + [submodule "baz"] + url = https://example.org/baz + +In the above config only the submodule 'bar' and 'baz' are active, +'bar' due to (a) and 'baz' due to (c). 'foo' is inactive because +(a) takes precedence over (c) + +Note that (c) is a historical artefact and will be ignored if the +(a) and (b) specify that the submodule is not active. In other words, +if we have an `submodule.<name>.active` set to `false` or if the +submodule's path is excluded in the pathspec in `submodule.active`, the +url doesn't matter whether it is present or not. This is illustrated in +the example that follows. + + [submodule "foo"] + active = true + url = https://example.org/foo + [submodule "bar"] + url = https://example.org/bar + [submodule "baz"] + url = https://example.org/baz + [submodule "bob"] + ignore = true + [submodule] + active = b* + active = :(exclude) baz + +In here all submodules except 'baz' (foo, bar, bob) are active. +'foo' due to its own active flag and all the others due to the +submodule active pathspec, which specifies that any submodule +starting with 'b' except 'baz' are also active, regardless of the +presence of the .url field. + Workflow for a third party library ---------------------------------- diff --git a/Documentation/rev-list-options.txt b/Documentation/rev-list-options.txt index 22f5c9b43d..7b273635de 100644 --- a/Documentation/rev-list-options.txt +++ b/Documentation/rev-list-options.txt @@ -750,10 +750,21 @@ The form '--missing=allow-any' will allow object traversal to continue if a missing object is encountered. Missing objects will silently be omitted from the results. + +The form '--missing=allow-promisor' is like 'allow-any', but will only +allow object traversal to continue for EXPECTED promisor missing objects. +Unexpected missing objects will raise an error. ++ The form '--missing=print' is like 'allow-any', but will also print a list of the missing objects. Object IDs are prefixed with a ``?'' character. endif::git-rev-list[] +--exclude-promisor-objects:: + (For internal use only.) Prefilter object traversal at + promisor boundary. This is used with partial clone. This is + stronger than `--missing=allow-promisor` because it limits the + traversal, rather than just silencing errors about missing + objects. + --no-walk[=(sorted|unsorted)]:: Only show the given commits, but do not traverse their ancestors. This has no effect if a range is specified. If the argument diff --git a/Documentation/technical/api-object-access.txt b/Documentation/technical/api-object-access.txt index 03bb0e950d..a1162e5bcd 100644 --- a/Documentation/technical/api-object-access.txt +++ b/Documentation/technical/api-object-access.txt @@ -7,7 +7,7 @@ Talk about <sha1_file.c> and <object.h> family, things like * read_object_with_reference() * has_sha1_file() * write_sha1_file() -* pretend_sha1_file() +* pretend_object_file() * lookup_{object,commit,tag,blob,tree} * parse_{object,commit,tag,blob,tree} * Use of object flags diff --git a/Documentation/technical/api-submodule-config.txt b/Documentation/technical/api-submodule-config.txt index 3dce003fda..ee907c4a82 100644 --- a/Documentation/technical/api-submodule-config.txt +++ b/Documentation/technical/api-submodule-config.txt @@ -4,7 +4,7 @@ submodule config cache API The submodule config cache API allows to read submodule configurations/information from specified revisions. Internally information is lazily read into a cache that is used to avoid -unnecessary parsing of the same .gitmodule files. Lookups can be done by +unnecessary parsing of the same .gitmodules files. Lookups can be done by submodule path or name. Usage diff --git a/Documentation/technical/long-running-process-protocol.txt b/Documentation/technical/long-running-process-protocol.txt new file mode 100644 index 0000000000..aa0aa9af1c --- /dev/null +++ b/Documentation/technical/long-running-process-protocol.txt @@ -0,0 +1,50 @@ +Long-running process protocol +============================= + +This protocol is used when Git needs to communicate with an external +process throughout the entire life of a single Git command. All +communication is in pkt-line format (see technical/protocol-common.txt) +over standard input and standard output. + +Handshake +--------- + +Git starts by sending a welcome message (for example, +"git-filter-client"), a list of supported protocol version numbers, and +a flush packet. Git expects to read the welcome message with "server" +instead of "client" (for example, "git-filter-server"), exactly one +protocol version number from the previously sent list, and a flush +packet. All further communication will be based on the selected version. +The remaining protocol description below documents "version=2". Please +note that "version=42" in the example below does not exist and is only +there to illustrate how the protocol would look like with more than one +version. + +After the version negotiation Git sends a list of all capabilities that +it supports and a flush packet. Git expects to read a list of desired +capabilities, which must be a subset of the supported capabilities list, +and a flush packet as response: +------------------------ +packet: git> git-filter-client +packet: git> version=2 +packet: git> version=42 +packet: git> 0000 +packet: git< git-filter-server +packet: git< version=2 +packet: git< 0000 +packet: git> capability=clean +packet: git> capability=smudge +packet: git> capability=not-yet-invented +packet: git> 0000 +packet: git< capability=clean +packet: git< capability=smudge +packet: git< 0000 +------------------------ + +Shutdown +-------- + +Git will close +the command pipe on exit. The filter is expected to detect EOF +and exit gracefully on its own. Git will wait until the filter +process has stopped. diff --git a/Documentation/technical/pack-protocol.txt b/Documentation/technical/pack-protocol.txt index cd31edc91e..7fee6b780a 100644 --- a/Documentation/technical/pack-protocol.txt +++ b/Documentation/technical/pack-protocol.txt @@ -241,6 +241,7 @@ out of what the server said it could do with the first 'want' line. upload-request = want-list *shallow-line *1depth-request + [filter-request] flush-pkt want-list = first-want @@ -256,6 +257,8 @@ out of what the server said it could do with the first 'want' line. additional-want = PKT-LINE("want" SP obj-id) depth = 1*DIGIT + + filter-request = PKT-LINE("filter" SP filter-spec) ---- Clients MUST send all the obj-ids it wants from the reference @@ -278,6 +281,11 @@ complete those commits. Commits whose parents are not received as a result are defined as shallow and marked as such in the server. This information is sent back to the client in the next step. +The client can optionally request that pack-objects omit various +objects from the packfile using one of several filtering techniques. +These are intended for use with partial clone and partial fetch +operations. See `rev-list` for possible "filter-spec" values. + Once all the 'want's and 'shallow's (and optional 'deepen') are transferred, clients MUST send a flush-pkt, to tell the server side that it is done sending the list. diff --git a/Documentation/technical/protocol-capabilities.txt b/Documentation/technical/protocol-capabilities.txt index 26dcc6f502..332d209b58 100644 --- a/Documentation/technical/protocol-capabilities.txt +++ b/Documentation/technical/protocol-capabilities.txt @@ -309,3 +309,11 @@ to accept a signed push certificate, and asks the <nonce> to be included in the push certificate. A send-pack client MUST NOT send a push-cert packet unless the receive-pack server advertises this capability. + +filter +------ + +If the upload-pack server advertises the 'filter' capability, +fetch-pack may send "filter" commands to request a partial clone +or partial fetch and request that the server omit various objects +from the packfile. diff --git a/Documentation/technical/repository-version.txt b/Documentation/technical/repository-version.txt index 00ad37986e..e03eaccebc 100644 --- a/Documentation/technical/repository-version.txt +++ b/Documentation/technical/repository-version.txt @@ -86,3 +86,15 @@ for testing format-1 compatibility. When the config key `extensions.preciousObjects` is set to `true`, objects in the repository MUST NOT be deleted (e.g., by `git-prune` or `git repack -d`). + +`partialclone` +~~~~~~~~~~~~~~ + +When the config key `extensions.partialclone` is set, it indicates +that the repo was created with a partial clone (or later performed +a partial fetch) and that the remote may have omitted sending +certain unwanted objects. Such a remote is called a "promisor remote" +and it promises that all such omitted objects can be fetched from it +in the future. + +The value of this key is the name of the promisor remote. @@ -84,9 +84,24 @@ Issues of note: GIT_EXEC_PATH=`pwd` PATH=`pwd`:$PATH - GITPERLLIB=`pwd`/perl/blib/lib + GITPERLLIB=`pwd`/perl/build/lib export GIT_EXEC_PATH PATH GITPERLLIB + - By default (unless NO_PERL is provided) Git will ship various perl + scripts & libraries it needs. However, for simplicity it doesn't + use the ExtUtils::MakeMaker toolchain to decide where to place the + perl libraries. Depending on the system this can result in the perl + libraries not being where you'd like them if they're expected to be + used by things other than Git itself. + + Manually supplying a perllibdir prefix should fix this, if this is + a problem you care about, e.g.: + + prefix=/usr perllibdir=/usr/$(/usr/bin/perl -MConfig -wle 'print substr $Config{installsitelib}, 1 + length $Config{siteprefixexp}') + + Will result in e.g. perllibdir=/usr/share/perl/5.26.1 on Debian, + perllibdir=/usr/share/perl5 (which we'd use by default) on CentOS. + - Git is reasonably self-sufficient, but does depend on a few external programs and libraries. Git can be used without most of them by adding the approriate "NO_<LIBRARY>=YesPlease" to the make command line or @@ -294,9 +294,6 @@ all:: # # Define PERL_PATH to the path of your Perl binary (usually /usr/bin/perl). # -# Define NO_PERL_MAKEMAKER if you cannot use Makefiles generated by perl's -# MakeMaker (e.g. using ActiveState under Cygwin). -# # Define NO_PERL if you do not want Perl scripts or libraries at all. # # Define PYTHON_PATH to the path of your Python binary (often /usr/bin/python @@ -479,6 +476,7 @@ gitexecdir = libexec/git-core mergetoolsdir = $(gitexecdir)/mergetools sharedir = $(prefix)/share gitwebdir = $(sharedir)/gitweb +perllibdir = $(sharedir)/perl5 localedir = $(sharedir)/locale template_dir = share/git-core/templates htmldir = $(prefix)/share/doc/git-doc @@ -492,7 +490,7 @@ mandir_relative = $(patsubst $(prefix)/%,%,$(mandir)) infodir_relative = $(patsubst $(prefix)/%,%,$(infodir)) htmldir_relative = $(patsubst $(prefix)/%,%,$(htmldir)) -export prefix bindir sharedir sysconfdir gitwebdir localedir +export prefix bindir sharedir sysconfdir gitwebdir perllibdir localedir CC = cc AR = ar @@ -804,6 +802,7 @@ LIB_OBJS += ewah/ewah_bitmap.o LIB_OBJS += ewah/ewah_io.o LIB_OBJS += ewah/ewah_rlw.o LIB_OBJS += exec_cmd.o +LIB_OBJS += fetch-object.o LIB_OBJS += fetch-pack.o LIB_OBJS += fsck.o LIB_OBJS += fsmonitor.o @@ -832,7 +831,6 @@ LIB_OBJS += merge.o LIB_OBJS += merge-blobs.o LIB_OBJS += merge-recursive.o LIB_OBJS += mergesort.o -LIB_OBJS += mru.o LIB_OBJS += name-hash.o LIB_OBJS += notes.o LIB_OBJS += notes-cache.o @@ -1515,7 +1513,9 @@ else LIB_OBJS += sha1dc_git.o ifdef DC_SHA1_EXTERNAL ifdef DC_SHA1_SUBMODULE + ifneq ($(DC_SHA1_SUBMODULE),auto) $(error Only set DC_SHA1_EXTERNAL or DC_SHA1_SUBMODULE, not both) + endif endif BASIC_CFLAGS += -DDC_SHA1_EXTERNAL EXTLIBS += -lsha1detectcoll @@ -1543,9 +1543,6 @@ ifdef SHA1_MAX_BLOCK_SIZE LIB_OBJS += compat/sha1-chunked.o BASIC_CFLAGS += -DSHA1_MAX_BLOCK_SIZE="$(SHA1_MAX_BLOCK_SIZE)" endif -ifdef NO_PERL_MAKEMAKER - export NO_PERL_MAKEMAKER -endif ifdef NO_HSTRERROR COMPAT_CFLAGS += -DNO_HSTRERROR COMPAT_OBJS += compat/hstrerror.o @@ -1732,8 +1729,10 @@ ETC_GITATTRIBUTES_SQ = $(subst ','\'',$(ETC_GITATTRIBUTES)) DESTDIR_SQ = $(subst ','\'',$(DESTDIR)) bindir_SQ = $(subst ','\'',$(bindir)) bindir_relative_SQ = $(subst ','\'',$(bindir_relative)) +mandir_SQ = $(subst ','\'',$(mandir)) mandir_relative_SQ = $(subst ','\'',$(mandir_relative)) infodir_relative_SQ = $(subst ','\'',$(infodir_relative)) +perllibdir_SQ = $(subst ','\'',$(perllibdir)) localedir_SQ = $(subst ','\'',$(localedir)) gitexecdir_SQ = $(subst ','\'',$(gitexecdir)) template_dir_SQ = $(subst ','\'',$(template_dir)) @@ -1844,9 +1843,6 @@ ifndef NO_TCLTK $(QUIET_SUBDIR0)git-gui $(QUIET_SUBDIR1) gitexecdir='$(gitexec_instdir_SQ)' all $(QUIET_SUBDIR0)gitk-git $(QUIET_SUBDIR1) all endif -ifndef NO_PERL - $(QUIET_SUBDIR0)perl $(QUIET_SUBDIR1) PERL_PATH='$(PERL_PATH_SQ)' prefix='$(prefix_SQ)' localedir='$(localedir_SQ)' all -endif $(QUIET_SUBDIR0)templates $(QUIET_SUBDIR1) SHELL_PATH='$(SHELL_PATH_SQ)' PERL_PATH='$(PERL_PATH_SQ)' please_set_SHELL_PATH_to_a_more_modern_shell: @@ -1928,7 +1924,8 @@ common-cmds.h: $(wildcard Documentation/git-*.txt) SCRIPT_DEFINES = $(SHELL_PATH_SQ):$(DIFF_SQ):$(GIT_VERSION):\ $(localedir_SQ):$(NO_CURL):$(USE_GETTEXT_SCHEME):$(SANE_TOOL_PATH_SQ):\ - $(gitwebdir_SQ):$(PERL_PATH_SQ):$(SANE_TEXT_GREP):$(PAGER_ENV) + $(gitwebdir_SQ):$(PERL_PATH_SQ):$(SANE_TEXT_GREP):$(PAGER_ENV):\ + $(perllibdir_SQ) define cmd_munge_script $(RM) $@ $@+ && \ sed -e '1s|#!.*/sh|#!$(SHELL_PATH_SQ)|' \ @@ -1972,23 +1969,12 @@ git.res: git.rc GIT-VERSION-FILE $(SCRIPT_PERL_GEN): GIT-BUILD-OPTIONS ifndef NO_PERL -$(SCRIPT_PERL_GEN): perl/perl.mak +$(SCRIPT_PERL_GEN): -perl/perl.mak: perl/PM.stamp - -perl/PM.stamp: FORCE - @$(FIND) perl -type f -name '*.pm' | sort >$@+ && \ - $(PERL_PATH) -V >>$@+ && \ - { cmp $@+ $@ >/dev/null 2>/dev/null || mv $@+ $@; } && \ - $(RM) $@+ - -perl/perl.mak: GIT-CFLAGS GIT-PREFIX perl/Makefile perl/Makefile.PL - $(QUIET_SUBDIR0)perl $(QUIET_SUBDIR1) PERL_PATH='$(PERL_PATH_SQ)' prefix='$(prefix_SQ)' $(@F) - -PERL_DEFINES = $(PERL_PATH_SQ):$(PERLLIB_EXTRA_SQ) -$(SCRIPT_PERL_GEN): % : %.perl perl/perl.mak GIT-PERL-DEFINES GIT-VERSION-FILE +PERL_DEFINES = $(PERL_PATH_SQ):$(PERLLIB_EXTRA_SQ):$(perllibdir_SQ) +$(SCRIPT_PERL_GEN): % : %.perl GIT-PERL-DEFINES GIT-VERSION-FILE $(QUIET_GEN)$(RM) $@ $@+ && \ - INSTLIBDIR=`MAKEFLAGS= $(MAKE) -C perl -s --no-print-directory instlibdir` && \ + INSTLIBDIR='$(perllibdir_SQ)' && \ INSTLIBDIR_EXTRA='$(PERLLIB_EXTRA_SQ)' && \ INSTLIBDIR="$$INSTLIBDIR$${INSTLIBDIR_EXTRA:+:$$INSTLIBDIR_EXTRA}" && \ sed -e '1{' \ @@ -2176,6 +2162,8 @@ gettext.sp gettext.s gettext.o: EXTRA_CPPFLAGS = \ http-push.sp http.sp http-walker.sp remote-curl.sp imap-send.sp: SPARSE_FLAGS += \ -DCURL_DISABLE_TYPECHECK +pack-revindex.sp: SPARSE_FLAGS += -Wno-memcpy-max-count + ifdef NO_EXPAT http-walker.sp http-walker.s http-walker.o: EXTRA_CPPFLAGS = -DNO_EXPAT endif @@ -2312,6 +2300,21 @@ endif po/build/locale/%/LC_MESSAGES/git.mo: po/%.po $(QUIET_MSGFMT)mkdir -p $(dir $@) && $(MSGFMT) -o $@ $< +LIB_PERL := $(wildcard perl/Git.pm perl/Git/*.pm perl/Git/*/*.pm perl/Git/*/*/*.pm) +LIB_PERL_GEN := $(patsubst perl/%.pm,perl/build/lib/%.pm,$(LIB_PERL)) + +ifndef NO_PERL +all:: $(LIB_PERL_GEN) +endif + +perl/build/lib/%.pm: perl/%.pm + $(QUIET_GEN)mkdir -p $(dir $@) && \ + sed -e 's|@@LOCALEDIR@@|$(localedir_SQ)|g' < $< > $@ + +perl/build/man/man3/Git.3pm: perl/Git.pm + $(QUIET_GEN)mkdir -p $(dir $@) && \ + pod2man $< $@ + FIND_SOURCE_FILES = ( \ git ls-files \ '*.[hcS]' \ @@ -2572,7 +2575,9 @@ ifndef NO_GETTEXT (cd '$(DESTDIR_SQ)$(localedir_SQ)' && umask 022 && $(TAR) xof -) endif ifndef NO_PERL - $(MAKE) -C perl prefix='$(prefix_SQ)' DESTDIR='$(DESTDIR_SQ)' install + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perllibdir_SQ)' + (cd perl/build/lib && $(TAR) cf - .) | \ + (cd '$(DESTDIR_SQ)$(perllibdir_SQ)' && umask 022 && $(TAR) xof -) $(MAKE) -C gitweb install endif ifndef NO_TCLTK @@ -2622,12 +2627,17 @@ endif install-gitweb: $(MAKE) -C gitweb install -install-doc: +install-doc: install-man-perl $(MAKE) -C Documentation install -install-man: +install-man: install-man-perl $(MAKE) -C Documentation install-man +install-man-perl: perl/build/man/man3/Git.3pm + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(mandir_SQ)/man3' + (cd perl/build/man/man3 && $(TAR) cf - .) | \ + (cd '$(DESTDIR_SQ)$(mandir_SQ)/man3' && umask 022 && $(TAR) xof -) + install-html: $(MAKE) -C Documentation install-html @@ -2662,6 +2672,21 @@ dist: git-archive$(X) configure $(GIT_TARNAME)/configure \ $(GIT_TARNAME)/version \ $(GIT_TARNAME)/git-gui/version +ifdef DC_SHA1_SUBMODULE + @mkdir -p $(GIT_TARNAME)/sha1collisiondetection/lib + @cp sha1collisiondetection/LICENSE.txt \ + $(GIT_TARNAME)/sha1collisiondetection/ + @cp sha1collisiondetection/LICENSE.txt \ + $(GIT_TARNAME)/sha1collisiondetection/ + @cp sha1collisiondetection/lib/sha1.[ch] \ + $(GIT_TARNAME)/sha1collisiondetection/lib/ + @cp sha1collisiondetection/lib/ubc_check.[ch] \ + $(GIT_TARNAME)/sha1collisiondetection/lib/ + $(TAR) rf $(GIT_TARNAME).tar \ + $(GIT_TARNAME)/sha1collisiondetection/LICENSE.txt \ + $(GIT_TARNAME)/sha1collisiondetection/lib/sha1.[ch] \ + $(GIT_TARNAME)/sha1collisiondetection/lib/ubc_check.[ch] +endif @$(RM) -r $(GIT_TARNAME) gzip -f -9 $(GIT_TARNAME).tar @@ -2711,7 +2736,7 @@ clean: profile-clean coverage-clean $(RM) $(TEST_PROGRAMS) $(NO_INSTALL) $(RM) -r bin-wrappers $(dep_dirs) $(RM) -r po/build/ - $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo common-cmds.h $(ETAGS_TARGET) tags cscope* + $(RM) *.pyc *.pyo */*.pyc */*.pyo common-cmds.h $(ETAGS_TARGET) tags cscope* $(RM) -r $(GIT_TARNAME) .doc-tmp-dir $(RM) $(GIT_TARNAME).tar.gz git-core_$(GIT_VERSION)-*.tar.gz $(RM) $(htmldocs).tar.gz $(manpages).tar.gz @@ -2719,7 +2744,7 @@ clean: profile-clean coverage-clean $(MAKE) -C Documentation/ clean ifndef NO_PERL $(MAKE) -C gitweb clean - $(MAKE) -C perl clean + $(RM) -r perl/build/ endif $(MAKE) -C templates/ clean $(MAKE) -C t/ clean @@ -950,7 +950,7 @@ static int gitdiff_verify_name(struct apply_state *state, } free(another); } else { - if (!starts_with(line, "/dev/null\n")) + if (!is_dev_null(line)) return error(_("git apply: bad git-diff - expected /dev/null on line %d"), state->linenr); } @@ -2263,8 +2263,8 @@ static void show_stats(struct apply_state *state, struct patch *patch) static int read_old_data(struct stat *st, struct patch *patch, const char *path, struct strbuf *buf) { - enum safe_crlf safe_crlf = patch->crlf_in_old ? - SAFE_CRLF_KEEP_CRLF : SAFE_CRLF_RENORMALIZE; + int conv_flags = patch->crlf_in_old ? + CONV_EOL_KEEP_CRLF : CONV_EOL_RENORMALIZE; switch (st->st_mode & S_IFMT) { case S_IFLNK: if (strbuf_readlink(buf, path, st->st_size) < 0) @@ -2281,7 +2281,7 @@ static int read_old_data(struct stat *st, struct patch *patch, * should never look at the index when explicit crlf option * is given. */ - convert_to_git(NULL, path, buf->buf, buf->len, buf, safe_crlf); + convert_to_git(NULL, path, buf->buf, buf->len, buf, conv_flags); return 0; default: return -1; @@ -3154,7 +3154,7 @@ static int apply_binary(struct apply_state *state, * See if the old one matches what the patch * applies to. */ - hash_sha1_file(img->buf, img->len, blob_type, oid.hash); + hash_object_file(img->buf, img->len, blob_type, &oid); if (strcmp(oid_to_hex(&oid), patch->old_sha1_prefix)) return error(_("the patch applies to '%s' (%s), " "which does not match the " @@ -3199,7 +3199,7 @@ static int apply_binary(struct apply_state *state, name); /* verify that the result matches */ - hash_sha1_file(img->buf, img->len, blob_type, oid.hash); + hash_object_file(img->buf, img->len, blob_type, &oid); if (strcmp(oid_to_hex(&oid), patch->new_sha1_prefix)) return error(_("binary patch to '%s' creates incorrect result (expecting %s, got %s)"), name, patch->new_sha1_prefix, oid_to_hex(&oid)); @@ -3554,7 +3554,7 @@ static int try_threeway(struct apply_state *state, /* Preimage the patch was prepared for */ if (patch->is_new) - write_sha1_file("", 0, blob_type, pre_oid.hash); + write_object_file("", 0, blob_type, &pre_oid); else if (get_oid(patch->old_sha1_prefix, &pre_oid) || read_blob_object(&buf, &pre_oid, patch->old_mode)) return error(_("repository lacks the necessary blob to fall back on 3-way merge.")); @@ -3570,7 +3570,7 @@ static int try_threeway(struct apply_state *state, return -1; } /* post_oid is theirs */ - write_sha1_file(tmp_image.buf, tmp_image.len, blob_type, post_oid.hash); + write_object_file(tmp_image.buf, tmp_image.len, blob_type, &post_oid); clear_image(&tmp_image); /* our_oid is ours */ @@ -3583,7 +3583,7 @@ static int try_threeway(struct apply_state *state, return error(_("cannot read the current contents of '%s'"), patch->old_name); } - write_sha1_file(tmp_image.buf, tmp_image.len, blob_type, our_oid.hash); + write_object_file(tmp_image.buf, tmp_image.len, blob_type, &our_oid); clear_image(&tmp_image); /* in-core three-way merge between post and our using pre as base */ @@ -4291,7 +4291,7 @@ static int add_index_file(struct apply_state *state, } fill_stat_cache_info(ce, &st); } - if (write_sha1_file(buf, size, blob_type, ce->oid.hash) < 0) { + if (write_object_file(buf, size, blob_type, &ce->oid) < 0) { free(ce); return error(_("unable to create backing store " "for newly created file %s"), path); @@ -232,7 +232,7 @@ static struct commit *fake_working_tree_commit(struct diff_options *opt, convert_to_git(&the_index, path, buf.buf, buf.len, &buf, 0); origin->file.ptr = buf.buf; origin->file.size = buf.len; - pretend_sha1_file(buf.buf, buf.len, OBJ_BLOB, origin->blob_oid.hash); + pretend_object_file(buf.buf, buf.len, OBJ_BLOB, &origin->blob_oid); /* * Read the current index, replace the path entry with diff --git a/builtin/am.c b/builtin/am.c index 21aedec41f..1151b5c73a 100644 --- a/builtin/am.c +++ b/builtin/am.c @@ -1062,7 +1062,7 @@ static void am_setup(struct am_state *state, enum patch_format patch_format, } write_state_text(state, "scissors", str); - sq_quote_argv(&sb, state->git_apply_opts.argv, 0); + sq_quote_argv(&sb, state->git_apply_opts.argv); write_state_text(state, "apply-opt", sb.buf); if (state->rebasing) @@ -1645,8 +1645,8 @@ static void do_commit(const struct am_state *state) setenv("GIT_COMMITTER_DATE", state->ignore_date ? "" : state->author_date, 1); - if (commit_tree(state->msg, state->msg_len, tree.hash, parents, commit.hash, - author, state->sign_commit)) + if (commit_tree(state->msg, state->msg_len, &tree, parents, &commit, + author, state->sign_commit)) die(_("failed to write commit object")); reflog_msg = getenv("GIT_REFLOG_ACTION"); @@ -2181,6 +2181,7 @@ enum resume_mode { RESUME_RESOLVED, RESUME_SKIP, RESUME_ABORT, + RESUME_QUIT, RESUME_SHOW_PATCH }; @@ -2282,6 +2283,9 @@ int cmd_am(int argc, const char **argv, const char *prefix) OPT_CMDMODE(0, "abort", &resume, N_("restore the original branch and abort the patching operation."), RESUME_ABORT), + OPT_CMDMODE(0, "quit", &resume, + N_("abort the patching operation but keep HEAD where it is."), + RESUME_QUIT), OPT_CMDMODE(0, "show-current-patch", &resume, N_("show the patch being applied."), RESUME_SHOW_PATCH), @@ -2353,7 +2357,7 @@ int cmd_am(int argc, const char **argv, const char *prefix) * stray directories. */ if (file_exists(state.dir) && !state.rebasing) { - if (resume == RESUME_ABORT) { + if (resume == RESUME_ABORT || resume == RESUME_QUIT) { am_destroy(&state); am_state_release(&state); return 0; @@ -2395,6 +2399,10 @@ int cmd_am(int argc, const char **argv, const char *prefix) case RESUME_ABORT: am_abort(&state); break; + case RESUME_QUIT: + am_rerere_clear(); + am_destroy(&state); + break; case RESUME_SHOW_PATCH: ret = show_patch(&state); break; diff --git a/builtin/archive.c b/builtin/archive.c index f863465a0f..73971d0dd2 100644 --- a/builtin/archive.c +++ b/builtin/archive.c @@ -55,7 +55,7 @@ static int run_remote_archiver(int argc, const char **argv, buf = packet_read_line(fd[0], NULL); if (!buf) - die(_("git archive: expected ACK/NAK, got EOF")); + die(_("git archive: expected ACK/NAK, got a flush packet")); if (strcmp(buf, "ACK")) { if (starts_with(buf, "NACK ")) die(_("git archive: NACK %s"), buf + 5); diff --git a/builtin/blame.c b/builtin/blame.c index 005f55aaa2..9dcb367b90 100644 --- a/builtin/blame.c +++ b/builtin/blame.c @@ -649,6 +649,15 @@ static int blame_move_callback(const struct option *option, const char *arg, int return 0; } +static int is_a_rev(const char *name) +{ + struct object_id oid; + + if (get_oid(name, &oid)) + return 0; + return OBJ_NONE < sha1_object_info(oid.hash, NULL); +} + int cmd_blame(int argc, const char **argv, const char *prefix) { struct rev_info revs; @@ -845,16 +854,15 @@ parse_done: } else { if (argc < 2) usage_with_options(blame_opt_usage, options); - path = add_prefix(prefix, argv[argc - 1]); - if (argc == 3 && !file_exists(path)) { /* (2b) */ + if (argc == 3 && is_a_rev(argv[argc - 1])) { /* (2b) */ path = add_prefix(prefix, argv[1]); argv[1] = argv[2]; + } else { /* (2a) */ + if (argc == 2 && is_a_rev(argv[1]) && !get_git_work_tree()) + die("missing <path> to blame"); + path = add_prefix(prefix, argv[argc - 1]); } argv[argc - 1] = "--"; - - setup_work_tree(); - if (!file_exists(path)) - die_errno("cannot stat path '%s'", path); } revs.disable_stdin = 1; diff --git a/builtin/cat-file.c b/builtin/cat-file.c index f5fa4fd75a..cf9ea5c796 100644 --- a/builtin/cat-file.c +++ b/builtin/cat-file.c @@ -475,6 +475,8 @@ static int batch_objects(struct batch_options *opt) for_each_loose_object(batch_loose_object, &sa, 0); for_each_packed_object(batch_packed_object, &sa, 0); + if (repository_format_partial_clone) + warning("This repository has extensions.partialClone set. Some objects may not be loaded."); cb.opt = opt; cb.expand = &data; diff --git a/builtin/check-ignore.c b/builtin/check-ignore.c index 3e280b9c7a..ec9a959e08 100644 --- a/builtin/check-ignore.c +++ b/builtin/check-ignore.c @@ -72,7 +72,7 @@ static int check_ignore(struct dir_struct *dir, { const char *full_path; char *seen; - int num_ignored = 0, dtype = DT_UNKNOWN, i; + int num_ignored = 0, i; struct exclude *exclude; struct pathspec pathspec; @@ -104,6 +104,7 @@ static int check_ignore(struct dir_struct *dir, full_path = pathspec.items[i].match; exclude = NULL; if (!seen[i]) { + int dtype = DT_UNKNOWN; exclude = last_exclude_matching(dir, &the_index, full_path, &dtype); } diff --git a/builtin/checkout.c b/builtin/checkout.c index c54c78df54..191b96c49c 100644 --- a/builtin/checkout.c +++ b/builtin/checkout.c @@ -227,8 +227,7 @@ static int checkout_merged(int pos, const struct checkout *state) * (it also writes the merge result to the object database even * when it may contain conflicts). */ - if (write_sha1_file(result_buf.ptr, result_buf.size, - blob_type, oid.hash)) + if (write_object_file(result_buf.ptr, result_buf.size, blob_type, &oid)) die(_("Unable to add merge result for '%s'"), path); free(result_buf.ptr); ce = make_cache_entry(mode, oid.hash, path, 2, 0); diff --git a/builtin/clone.c b/builtin/clone.c index 284651797e..101c27a593 100644 --- a/builtin/clone.c +++ b/builtin/clone.c @@ -26,6 +26,7 @@ #include "run-command.h" #include "connected.h" #include "packfile.h" +#include "list-objects-filter-options.h" /* * Overall FIXMEs: @@ -60,6 +61,7 @@ static struct string_list option_optional_reference = STRING_LIST_INIT_NODUP; static int option_dissociate; static int max_jobs = -1; static struct string_list option_recurse_submodules = STRING_LIST_INIT_NODUP; +static struct list_objects_filter_options filter_options; static int recurse_submodules_cb(const struct option *opt, const char *arg, int unset) @@ -135,6 +137,7 @@ static struct option builtin_clone_options[] = { TRANSPORT_FAMILY_IPV4), OPT_SET_INT('6', "ipv6", &family, N_("use IPv6 addresses only"), TRANSPORT_FAMILY_IPV6), + OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options), OPT_END() }; @@ -893,6 +896,8 @@ int cmd_clone(int argc, const char **argv, const char *prefix) struct refspec *refspec; const char *fetch_pattern; + fetch_if_missing = 0; + packet_trace_identity("clone"); argc = parse_options(argc, argv, prefix, builtin_clone_options, builtin_clone_usage, 0); @@ -1090,6 +1095,8 @@ int cmd_clone(int argc, const char **argv, const char *prefix) warning(_("--shallow-since is ignored in local clones; use file:// instead.")); if (option_not.nr) warning(_("--shallow-exclude is ignored in local clones; use file:// instead.")); + if (filter_options.choice) + warning(_("--filter is ignored in local clones; use file:// instead.")); if (!access(mkpath("%s/shallow", path), F_OK)) { if (option_local > 0) warning(_("source repository is shallow, ignoring --local")); @@ -1118,7 +1125,13 @@ int cmd_clone(int argc, const char **argv, const char *prefix) transport_set_option(transport, TRANS_OPT_UPLOADPACK, option_upload_pack); - if (transport->smart_options && !deepen) + if (filter_options.choice) { + transport_set_option(transport, TRANS_OPT_LIST_OBJECTS_FILTER, + filter_options.filter_spec); + transport_set_option(transport, TRANS_OPT_FROM_PROMISOR, "1"); + } + + if (transport->smart_options && !deepen && !filter_options.choice) transport->smart_options->check_self_contained_and_connected = 1; refs = transport_get_remote_refs(transport); @@ -1178,13 +1191,17 @@ int cmd_clone(int argc, const char **argv, const char *prefix) write_refspec_config(src_ref_prefix, our_head_points_at, remote_head_points_at, &branch_top); + if (filter_options.choice) + partial_clone_register("origin", &filter_options); + if (is_local) clone_local(path, git_dir); else if (refs && complete_refs_before_fetch) transport_fetch_refs(transport, mapped_refs); update_remote_refs(refs, mapped_refs, remote_head_points_at, - branch_top.buf, reflog_msg.buf, transport, !is_local); + branch_top.buf, reflog_msg.buf, transport, + !is_local && !filter_options.choice); update_head(our_head_points_at, remote_head, reflog_msg.buf); @@ -1205,6 +1222,7 @@ int cmd_clone(int argc, const char **argv, const char *prefix) } junk_mode = JUNK_LEAVE_REPO; + fetch_if_missing = 1; err = checkout(submodule_progress); strbuf_release(&reflog_msg); diff --git a/builtin/commit-tree.c b/builtin/commit-tree.c index 2177251e24..e5bdf57b1e 100644 --- a/builtin/commit-tree.c +++ b/builtin/commit-tree.c @@ -117,8 +117,8 @@ int cmd_commit_tree(int argc, const char **argv, const char *prefix) die_errno("git commit-tree: failed to read"); } - if (commit_tree(buffer.buf, buffer.len, tree_oid.hash, parents, - commit_oid.hash, NULL, sign_commit)) { + if (commit_tree(buffer.buf, buffer.len, &tree_oid, parents, &commit_oid, + NULL, sign_commit)) { strbuf_release(&buffer); return 1; } diff --git a/builtin/commit.c b/builtin/commit.c index 4610e3d8e3..e8e8d13be4 100644 --- a/builtin/commit.c +++ b/builtin/commit.c @@ -31,9 +31,7 @@ #include "gpg-interface.h" #include "column.h" #include "sequencer.h" -#include "notes-utils.h" #include "mailmap.h" -#include "sigchain.h" static const char * const builtin_commit_usage[] = { N_("git commit [<options>] [--] <pathspec>..."), @@ -45,31 +43,6 @@ static const char * const builtin_status_usage[] = { NULL }; -static const char implicit_ident_advice_noconfig[] = -N_("Your name and email address were configured automatically based\n" -"on your username and hostname. Please check that they are accurate.\n" -"You can suppress this message by setting them explicitly. Run the\n" -"following command and follow the instructions in your editor to edit\n" -"your configuration file:\n" -"\n" -" git config --global --edit\n" -"\n" -"After doing this, you may fix the identity used for this commit with:\n" -"\n" -" git commit --amend --reset-author\n"); - -static const char implicit_ident_advice_config[] = -N_("Your name and email address were configured automatically based\n" -"on your username and hostname. Please check that they are accurate.\n" -"You can suppress this message by setting them explicitly:\n" -"\n" -" git config --global user.name \"Your Name\"\n" -" git config --global user.email you@example.com\n" -"\n" -"After doing this, you may fix the identity used for this commit with:\n" -"\n" -" git commit --amend --reset-author\n"); - static const char empty_amend_advice[] = N_("You asked to amend the most recent commit, but doing so would make\n" "it empty. You can repeat your command with --allow-empty, or you can\n" @@ -93,8 +66,6 @@ N_("If you wish to skip this commit, use:\n" "Then \"git cherry-pick --continue\" will resume cherry-picking\n" "the remaining commits.\n"); -static GIT_PATH_FUNC(git_path_commit_editmsg, "COMMIT_EDITMSG") - static const char *use_message_buffer; static struct lock_file index_lock; /* real index */ static struct lock_file false_lock; /* used only for partial commits */ @@ -128,12 +99,7 @@ static char *sign_commit; * if editor is used, and only the whitespaces if the message * is specified explicitly. */ -static enum { - CLEANUP_SPACE, - CLEANUP_NONE, - CLEANUP_SCISSORS, - CLEANUP_ALL -} cleanup_mode; +static enum commit_msg_cleanup_mode cleanup_mode; static const char *cleanup_arg; static enum commit_whence whence; @@ -673,7 +639,7 @@ static int prepare_to_commit(const char *index_file, const char *prefix, struct strbuf sb = STRBUF_INIT; const char *hook_arg1 = NULL; const char *hook_arg2 = NULL; - int clean_message_contents = (cleanup_mode != CLEANUP_NONE); + int clean_message_contents = (cleanup_mode != COMMIT_MSG_CLEANUP_NONE); int old_display_comment_prefix; /* This checks and barfs if author is badly specified */ @@ -814,7 +780,7 @@ static int prepare_to_commit(const char *index_file, const char *prefix, struct ident_split ci, ai; if (whence != FROM_COMMIT) { - if (cleanup_mode == CLEANUP_SCISSORS) + if (cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS) wt_status_add_cut_line(s->fp); status_printf_ln(s, GIT_COLOR_NORMAL, whence == FROM_MERGE @@ -834,14 +800,15 @@ static int prepare_to_commit(const char *index_file, const char *prefix, } fprintf(s->fp, "\n"); - if (cleanup_mode == CLEANUP_ALL) + if (cleanup_mode == COMMIT_MSG_CLEANUP_ALL) status_printf(s, GIT_COLOR_NORMAL, _("Please enter the commit message for your changes." " Lines starting\nwith '%c' will be ignored, and an empty" " message aborts the commit.\n"), comment_line_char); - else if (cleanup_mode == CLEANUP_SCISSORS && whence == FROM_COMMIT) + else if (cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS && + whence == FROM_COMMIT) wt_status_add_cut_line(s->fp); - else /* CLEANUP_SPACE, that is. */ + else /* COMMIT_MSG_CLEANUP_SPACE, that is. */ status_printf(s, GIT_COLOR_NORMAL, _("Please enter the commit message for your changes." " Lines starting\n" @@ -986,65 +953,6 @@ static int prepare_to_commit(const char *index_file, const char *prefix, return 1; } -static int rest_is_empty(struct strbuf *sb, int start) -{ - int i, eol; - const char *nl; - - /* Check if the rest is just whitespace and Signed-off-by's. */ - for (i = start; i < sb->len; i++) { - nl = memchr(sb->buf + i, '\n', sb->len - i); - if (nl) - eol = nl - sb->buf; - else - eol = sb->len; - - if (strlen(sign_off_header) <= eol - i && - starts_with(sb->buf + i, sign_off_header)) { - i = eol; - continue; - } - while (i < eol) - if (!isspace(sb->buf[i++])) - return 0; - } - - return 1; -} - -/* - * Find out if the message in the strbuf contains only whitespace and - * Signed-off-by lines. - */ -static int message_is_empty(struct strbuf *sb) -{ - if (cleanup_mode == CLEANUP_NONE && sb->len) - return 0; - return rest_is_empty(sb, 0); -} - -/* - * See if the user edited the message in the editor or left what - * was in the template intact - */ -static int template_untouched(struct strbuf *sb) -{ - struct strbuf tmpl = STRBUF_INIT; - const char *start; - - if (cleanup_mode == CLEANUP_NONE && sb->len) - return 0; - - if (!template_file || strbuf_read_file(&tmpl, template_file, 0) <= 0) - return 0; - - strbuf_stripspace(&tmpl, cleanup_mode == CLEANUP_ALL); - if (!skip_prefix(sb->buf, tmpl.buf, &start)) - start = sb->buf; - strbuf_release(&tmpl); - return rest_is_empty(sb, start - sb->buf); -} - static const char *find_author_by_nickname(const char *name) { struct rev_info revs; @@ -1229,15 +1137,17 @@ static int parse_and_validate_options(int argc, const char *argv[], if (argc == 0 && (also || (only && !amend && !allow_empty))) die(_("No paths with --include/--only does not make sense.")); if (!cleanup_arg || !strcmp(cleanup_arg, "default")) - cleanup_mode = use_editor ? CLEANUP_ALL : CLEANUP_SPACE; + cleanup_mode = use_editor ? COMMIT_MSG_CLEANUP_ALL : + COMMIT_MSG_CLEANUP_SPACE; else if (!strcmp(cleanup_arg, "verbatim")) - cleanup_mode = CLEANUP_NONE; + cleanup_mode = COMMIT_MSG_CLEANUP_NONE; else if (!strcmp(cleanup_arg, "whitespace")) - cleanup_mode = CLEANUP_SPACE; + cleanup_mode = COMMIT_MSG_CLEANUP_SPACE; else if (!strcmp(cleanup_arg, "strip")) - cleanup_mode = CLEANUP_ALL; + cleanup_mode = COMMIT_MSG_CLEANUP_ALL; else if (!strcmp(cleanup_arg, "scissors")) - cleanup_mode = use_editor ? CLEANUP_SCISSORS : CLEANUP_SPACE; + cleanup_mode = use_editor ? COMMIT_MSG_CLEANUP_SCISSORS : + COMMIT_MSG_CLEANUP_SPACE; else die(_("Invalid cleanup mode %s"), cleanup_arg); @@ -1439,98 +1349,6 @@ int cmd_status(int argc, const char **argv, const char *prefix) return 0; } -static const char *implicit_ident_advice(void) -{ - char *user_config = expand_user_path("~/.gitconfig", 0); - char *xdg_config = xdg_config_home("config"); - int config_exists = file_exists(user_config) || file_exists(xdg_config); - - free(user_config); - free(xdg_config); - - if (config_exists) - return _(implicit_ident_advice_config); - else - return _(implicit_ident_advice_noconfig); - -} - -static void print_summary(const char *prefix, const struct object_id *oid, - int initial_commit) -{ - struct rev_info rev; - struct commit *commit; - struct strbuf format = STRBUF_INIT; - const char *head; - struct pretty_print_context pctx = {0}; - struct strbuf author_ident = STRBUF_INIT; - struct strbuf committer_ident = STRBUF_INIT; - - commit = lookup_commit(oid); - if (!commit) - die(_("couldn't look up newly created commit")); - if (parse_commit(commit)) - die(_("could not parse newly created commit")); - - strbuf_addstr(&format, "format:%h] %s"); - - format_commit_message(commit, "%an <%ae>", &author_ident, &pctx); - format_commit_message(commit, "%cn <%ce>", &committer_ident, &pctx); - if (strbuf_cmp(&author_ident, &committer_ident)) { - strbuf_addstr(&format, "\n Author: "); - strbuf_addbuf_percentquote(&format, &author_ident); - } - if (author_date_is_interesting()) { - struct strbuf date = STRBUF_INIT; - format_commit_message(commit, "%ad", &date, &pctx); - strbuf_addstr(&format, "\n Date: "); - strbuf_addbuf_percentquote(&format, &date); - strbuf_release(&date); - } - if (!committer_ident_sufficiently_given()) { - strbuf_addstr(&format, "\n Committer: "); - strbuf_addbuf_percentquote(&format, &committer_ident); - if (advice_implicit_identity) { - strbuf_addch(&format, '\n'); - strbuf_addstr(&format, implicit_ident_advice()); - } - } - strbuf_release(&author_ident); - strbuf_release(&committer_ident); - - init_revisions(&rev, prefix); - setup_revisions(0, NULL, &rev, NULL); - - rev.diff = 1; - rev.diffopt.output_format = - DIFF_FORMAT_SHORTSTAT | DIFF_FORMAT_SUMMARY; - - rev.verbose_header = 1; - rev.show_root_diff = 1; - get_commit_format(format.buf, &rev); - rev.always_show_header = 0; - rev.diffopt.detect_rename = DIFF_DETECT_RENAME; - rev.diffopt.break_opt = 0; - diff_setup_done(&rev.diffopt); - - head = resolve_ref_unsafe("HEAD", 0, NULL, NULL); - if (!head) - die_errno(_("unable to resolve HEAD after creating commit")); - if (!strcmp(head, "HEAD")) - head = _("detached HEAD"); - else - skip_prefix(head, "refs/heads/", &head); - printf("[%s%s ", head, initial_commit ? _(" (root-commit)") : ""); - - if (!log_tree_commit(&rev, commit)) { - rev.always_show_header = 1; - rev.use_terminator = 1; - log_tree_commit(&rev, commit); - } - - strbuf_release(&format); -} - static int git_commit_config(const char *k, const char *v, void *cb) { struct wt_status *s = cb; @@ -1560,37 +1378,6 @@ static int git_commit_config(const char *k, const char *v, void *cb) return git_status_config(k, v, s); } -static int run_rewrite_hook(const struct object_id *oldoid, - const struct object_id *newoid) -{ - struct child_process proc = CHILD_PROCESS_INIT; - const char *argv[3]; - int code; - struct strbuf sb = STRBUF_INIT; - - argv[0] = find_hook("post-rewrite"); - if (!argv[0]) - return 0; - - argv[1] = "amend"; - argv[2] = NULL; - - proc.argv = argv; - proc.in = -1; - proc.stdout_to_stderr = 1; - - code = start_command(&proc); - if (code) - return code; - strbuf_addf(&sb, "%s %s\n", oid_to_hex(oldoid), oid_to_hex(newoid)); - sigchain_push(SIGPIPE, SIG_IGN); - write_in_full(proc.in, sb.buf, sb.len); - close(proc.in); - strbuf_release(&sb); - sigchain_pop(SIGPIPE); - return finish_command(&proc); -} - int run_commit_hook(int editor_is_used, const char *index_file, const char *name, ...) { struct argv_array hook_env = ARGV_ARRAY_INIT; @@ -1673,13 +1460,11 @@ int cmd_commit(int argc, const char **argv, const char *prefix) struct strbuf sb = STRBUF_INIT; struct strbuf author_ident = STRBUF_INIT; const char *index_file, *reflog_msg; - char *nl; struct object_id oid; struct commit_list *parents = NULL; struct stat statbuf; struct commit *current_head = NULL; struct commit_extra_header *extra = NULL; - struct ref_transaction *transaction; struct strbuf err = STRBUF_INIT; if (argc == 2 && !strcmp(argv[1], "-h")) @@ -1770,17 +1555,17 @@ int cmd_commit(int argc, const char **argv, const char *prefix) } if (verbose || /* Truncate the message just before the diff, if any. */ - cleanup_mode == CLEANUP_SCISSORS) + cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS) strbuf_setlen(&sb, wt_status_locate_end(sb.buf, sb.len)); - if (cleanup_mode != CLEANUP_NONE) - strbuf_stripspace(&sb, cleanup_mode == CLEANUP_ALL); + if (cleanup_mode != COMMIT_MSG_CLEANUP_NONE) + strbuf_stripspace(&sb, cleanup_mode == COMMIT_MSG_CLEANUP_ALL); - if (message_is_empty(&sb) && !allow_empty_message) { + if (message_is_empty(&sb, cleanup_mode) && !allow_empty_message) { rollback_index_files(); fprintf(stderr, _("Aborting commit due to empty commit message.\n")); exit(1); } - if (template_untouched(&sb) && !allow_empty_message) { + if (template_untouched(&sb, template_file, cleanup_mode) && !allow_empty_message) { rollback_index_files(); fprintf(stderr, _("Aborting commit; you did not edit the message.\n")); exit(1); @@ -1794,33 +1579,20 @@ int cmd_commit(int argc, const char **argv, const char *prefix) append_merge_tag_headers(parents, &tail); } - if (commit_tree_extended(sb.buf, sb.len, active_cache_tree->oid.hash, - parents, oid.hash, author_ident.buf, sign_commit, extra)) { + if (commit_tree_extended(sb.buf, sb.len, &active_cache_tree->oid, + parents, &oid, author_ident.buf, sign_commit, + extra)) { rollback_index_files(); die(_("failed to write commit object")); } strbuf_release(&author_ident); free_commit_extra_headers(extra); - nl = strchr(sb.buf, '\n'); - if (nl) - strbuf_setlen(&sb, nl + 1 - sb.buf); - else - strbuf_addch(&sb, '\n'); - strbuf_insert(&sb, 0, reflog_msg, strlen(reflog_msg)); - strbuf_insert(&sb, strlen(reflog_msg), ": ", 2); - - transaction = ref_transaction_begin(&err); - if (!transaction || - ref_transaction_update(transaction, "HEAD", &oid, - current_head - ? ¤t_head->object.oid : &null_oid, - 0, sb.buf, &err) || - ref_transaction_commit(transaction, &err)) { + if (update_head_with_reflog(current_head, &oid, reflog_msg, &sb, + &err)) { rollback_index_files(); die("%s", err.buf); } - ref_transaction_free(transaction); unlink(git_path_cherry_pick_head()); unlink(git_path_revert_head()); @@ -1837,17 +1609,17 @@ int cmd_commit(int argc, const char **argv, const char *prefix) rerere(0); run_commit_hook(use_editor, get_index_file(), "post-commit", NULL); if (amend && !no_post_rewrite) { - struct notes_rewrite_cfg *cfg; - cfg = init_copy_notes_for_rewrite("amend"); - if (cfg) { - /* we are amending, so current_head is not NULL */ - copy_note_for_rewrite(cfg, ¤t_head->object.oid, &oid); - finish_copy_notes_for_rewrite(cfg, "Notes added by 'git commit --amend'"); - } - run_rewrite_hook(¤t_head->object.oid, &oid); + commit_post_rewrite(current_head, &oid); + } + if (!quiet) { + unsigned int flags = 0; + + if (!current_head) + flags |= SUMMARY_INITIAL_COMMIT; + if (author_date_is_interesting()) + flags |= SUMMARY_SHOW_AUTHOR_DATE; + print_commit_summary(prefix, &oid, flags); } - if (!quiet) - print_summary(prefix, &oid, !current_head); UNLEAK(err); UNLEAK(sb); diff --git a/builtin/describe.c b/builtin/describe.c index 6fe1c51281..e4869df7b4 100644 --- a/builtin/describe.c +++ b/builtin/describe.c @@ -383,7 +383,7 @@ static void describe_commit(struct object_id *oid, struct strbuf *dst) if (!match_cnt) { struct object_id *cmit_oid = &cmit->object.oid; if (always) { - strbuf_addstr(dst, find_unique_abbrev(cmit_oid->hash, abbrev)); + strbuf_add_unique_abbrev(dst, cmit_oid->hash, abbrev); if (suffix) strbuf_addstr(dst, suffix); return; @@ -502,7 +502,7 @@ static void describe(const char *arg, int last_one) if (cmit) describe_commit(&oid, &sb); - else if (lookup_blob(&oid)) + else if (sha1_object_info(oid.hash, NULL) == OBJ_BLOB) describe_blob(oid, &sb); else die(_("%s is neither a commit nor blob"), arg); diff --git a/builtin/fetch-pack.c b/builtin/fetch-pack.c index 366b9d13f9..a7bc1366ab 100644 --- a/builtin/fetch-pack.c +++ b/builtin/fetch-pack.c @@ -53,6 +53,8 @@ int cmd_fetch_pack(int argc, const char **argv, const char *prefix) struct oid_array shallow = OID_ARRAY_INIT; struct string_list deepen_not = STRING_LIST_INIT_DUP; + fetch_if_missing = 0; + packet_trace_identity("fetch-pack"); memset(&args, 0, sizeof(args)); @@ -143,6 +145,22 @@ int cmd_fetch_pack(int argc, const char **argv, const char *prefix) args.update_shallow = 1; continue; } + if (!strcmp("--from-promisor", arg)) { + args.from_promisor = 1; + continue; + } + if (!strcmp("--no-dependents", arg)) { + args.no_dependents = 1; + continue; + } + if (skip_prefix(arg, ("--" CL_ARG__FILTER "="), &arg)) { + parse_list_objects_filter(&args.filter_options, arg); + continue; + } + if (!strcmp(arg, ("--no-" CL_ARG__FILTER))) { + list_objects_filter_set_no_filter(&args.filter_options); + continue; + } usage(fetch_pack_usage); } if (deepen_not.nr) diff --git a/builtin/fetch.c b/builtin/fetch.c index 7bbcd26faf..d32d94692c 100644 --- a/builtin/fetch.c +++ b/builtin/fetch.c @@ -19,6 +19,7 @@ #include "argv-array.h" #include "utf8.h" #include "packfile.h" +#include "list-objects-filter-options.h" static const char * const builtin_fetch_usage[] = { N_("git fetch [<options>] [<repository> [<refspec>...]]"), @@ -38,6 +39,10 @@ static int fetch_prune_config = -1; /* unspecified */ static int prune = -1; /* unspecified */ #define PRUNE_BY_DEFAULT 0 /* do we prune by default? */ +static int fetch_prune_tags_config = -1; /* unspecified */ +static int prune_tags = -1; /* unspecified */ +#define PRUNE_TAGS_BY_DEFAULT 0 /* do we prune tags by default? */ + static int all, append, dry_run, force, keep, multiple, update_head_ok, verbosity, deepen_relative; static int progress = -1; static int tags = TAGS_DEFAULT, unshallow, update_shallow, deepen; @@ -56,6 +61,7 @@ static int recurse_submodules_default = RECURSE_SUBMODULES_ON_DEMAND; static int shown_url = 0; static int refmap_alloc, refmap_nr; static const char **refmap_array; +static struct list_objects_filter_options filter_options; static int git_fetch_config(const char *k, const char *v, void *cb) { @@ -64,6 +70,11 @@ static int git_fetch_config(const char *k, const char *v, void *cb) return 0; } + if (!strcmp(k, "fetch.prunetags")) { + fetch_prune_tags_config = git_config_bool(k, v); + return 0; + } + if (!strcmp(k, "submodule.recurse")) { int r = git_config_bool(k, v) ? RECURSE_SUBMODULES_ON : RECURSE_SUBMODULES_OFF; @@ -126,6 +137,8 @@ static struct option builtin_fetch_options[] = { N_("number of submodules fetched in parallel")), OPT_BOOL('p', "prune", &prune, N_("prune remote-tracking branches no longer on remote")), + OPT_BOOL('P', "prune-tags", &prune_tags, + N_("prune local tags no longer on remote and clobber changed tags")), { OPTION_CALLBACK, 0, "recurse-submodules", &recurse_submodules, N_("on-demand"), N_("control recursive fetching of submodules"), PARSE_OPT_OPTARG, option_fetch_parse_recurse_submodules }, @@ -161,6 +174,7 @@ static struct option builtin_fetch_options[] = { TRANSPORT_FAMILY_IPV4), OPT_SET_INT('6', "ipv6", &family, N_("use IPv6 addresses only"), TRANSPORT_FAMILY_IPV6), + OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options), OPT_END() }; @@ -1045,6 +1059,11 @@ static struct transport *prepare_transport(struct remote *remote, int deepen) set_option(transport, TRANS_OPT_DEEPEN_RELATIVE, "yes"); if (update_shallow) set_option(transport, TRANS_OPT_UPDATE_SHALLOW, "yes"); + if (filter_options.choice) { + set_option(transport, TRANS_OPT_LIST_OBJECTS_FILTER, + filter_options.filter_spec); + set_option(transport, TRANS_OPT_FROM_PROMISOR, "1"); + } return transport; } @@ -1212,6 +1231,8 @@ static void add_options_to_argv(struct argv_array *argv) argv_array_push(argv, "--dry-run"); if (prune != -1) argv_array_push(argv, prune ? "--prune" : "--no-prune"); + if (prune_tags != -1) + argv_array_push(argv, prune_tags ? "--prune-tags" : "--no-prune-tags"); if (update_head_ok) argv_array_push(argv, "--update-head-ok"); if (force) @@ -1265,12 +1286,65 @@ static int fetch_multiple(struct string_list *list) return result; } -static int fetch_one(struct remote *remote, int argc, const char **argv) +/* + * Fetching from the promisor remote should use the given filter-spec + * or inherit the default filter-spec from the config. + */ +static inline void fetch_one_setup_partial(struct remote *remote) +{ + /* + * Explicit --no-filter argument overrides everything, regardless + * of any prior partial clones and fetches. + */ + if (filter_options.no_filter) + return; + + /* + * If no prior partial clone/fetch and the current fetch DID NOT + * request a partial-fetch, do a normal fetch. + */ + if (!repository_format_partial_clone && !filter_options.choice) + return; + + /* + * If this is the FIRST partial-fetch request, we enable partial + * on this repo and remember the given filter-spec as the default + * for subsequent fetches to this remote. + */ + if (!repository_format_partial_clone && filter_options.choice) { + partial_clone_register(remote->name, &filter_options); + return; + } + + /* + * We are currently limited to only ONE promisor remote and only + * allow partial-fetches from the promisor remote. + */ + if (strcmp(remote->name, repository_format_partial_clone)) { + if (filter_options.choice) + die(_("--filter can only be used with the remote configured in core.partialClone")); + return; + } + + /* + * Do a partial-fetch from the promisor remote using either the + * explicitly given filter-spec or inherit the filter-spec from + * the config. + */ + if (!filter_options.choice) + partial_clone_get_default_filter_spec(&filter_options); + return; +} + +static int fetch_one(struct remote *remote, int argc, const char **argv, int prune_tags_ok) { static const char **refs = NULL; struct refspec *refspec; int ref_nr = 0; + int j = 0; int exit_code; + int maybe_prune_tags; + int remote_via_config = remote_is_configured(remote, 0); if (!remote) die(_("No remote repository specified. Please, specify either a URL or a\n" @@ -1280,18 +1354,39 @@ static int fetch_one(struct remote *remote, int argc, const char **argv) if (prune < 0) { /* no command line request */ - if (0 <= gtransport->remote->prune) - prune = gtransport->remote->prune; + if (0 <= remote->prune) + prune = remote->prune; else if (0 <= fetch_prune_config) prune = fetch_prune_config; else prune = PRUNE_BY_DEFAULT; } + if (prune_tags < 0) { + /* no command line request */ + if (0 <= remote->prune_tags) + prune_tags = remote->prune_tags; + else if (0 <= fetch_prune_tags_config) + prune_tags = fetch_prune_tags_config; + else + prune_tags = PRUNE_TAGS_BY_DEFAULT; + } + + maybe_prune_tags = prune_tags_ok && prune_tags; + if (maybe_prune_tags && remote_via_config) + add_prune_tags_to_fetch_refspec(remote); + + if (argc > 0 || (maybe_prune_tags && !remote_via_config)) { + size_t nr_alloc = st_add3(argc, maybe_prune_tags, 1); + refs = xcalloc(nr_alloc, sizeof(const char *)); + if (maybe_prune_tags) { + refs[j++] = xstrdup("refs/tags/*:refs/tags/*"); + ref_nr++; + } + } + if (argc > 0) { - int j = 0; int i; - refs = xcalloc(st_add(argc, 1), sizeof(const char *)); for (i = 0; i < argc; i++) { if (!strcmp(argv[i], "tag")) { i++; @@ -1301,9 +1396,8 @@ static int fetch_one(struct remote *remote, int argc, const char **argv) argv[i], argv[i]); } else refs[j++] = argv[i]; + ref_nr++; } - refs[j] = NULL; - ref_nr = j; } sigchain_push_common(unlock_pack_on_signal); @@ -1320,12 +1414,15 @@ int cmd_fetch(int argc, const char **argv, const char *prefix) { int i; struct string_list list = STRING_LIST_INIT_DUP; - struct remote *remote; + struct remote *remote = NULL; int result = 0; + int prune_tags_ok = 1; struct argv_array argv_gc_auto = ARGV_ARRAY_INIT; packet_trace_identity("fetch"); + fetch_if_missing = 0; + /* Record the command line for the reflog */ strbuf_addstr(&default_rla, "fetch"); for (i = 1; i < argc; i++) @@ -1359,23 +1456,23 @@ int cmd_fetch(int argc, const char **argv, const char *prefix) if (depth || deepen_since || deepen_not.nr) deepen = 1; + if (filter_options.choice && !repository_format_partial_clone) + die("--filter can only be used when extensions.partialClone is set"); + if (all) { if (argc == 1) die(_("fetch --all does not take a repository argument")); else if (argc > 1) die(_("fetch --all does not make sense with refspecs")); (void) for_each_remote(get_one_remote_for_fetch, &list); - result = fetch_multiple(&list); } else if (argc == 0) { /* No arguments -- use default remote */ remote = remote_get(NULL); - result = fetch_one(remote, argc, argv); } else if (multiple) { /* All arguments are assumed to be remotes or groups */ for (i = 0; i < argc; i++) if (!add_remote_or_group(argv[i], &list)) die(_("No such remote or remote group: %s"), argv[i]); - result = fetch_multiple(&list); } else { /* Single remote or group */ (void) add_remote_or_group(argv[0], &list); @@ -1383,14 +1480,26 @@ int cmd_fetch(int argc, const char **argv, const char *prefix) /* More than one remote */ if (argc > 1) die(_("Fetching a group and specifying refspecs does not make sense")); - result = fetch_multiple(&list); } else { /* Zero or one remotes */ remote = remote_get(argv[0]); - result = fetch_one(remote, argc-1, argv+1); + prune_tags_ok = (argc == 1); + argc--; + argv++; } } + if (remote) { + if (filter_options.choice || repository_format_partial_clone) + fetch_one_setup_partial(remote); + result = fetch_one(remote, argc, argv, prune_tags_ok); + } else { + if (filter_options.choice) + die(_("--filter can only be used with the remote configured in core.partialClone")); + /* TODO should this also die if we have a previous partial-clone? */ + result = fetch_multiple(&list); + } + if (!result && (recurse_submodules != RECURSE_SUBMODULES_OFF)) { struct argv_array options = ARGV_ARRAY_INIT; diff --git a/builtin/fsck.c b/builtin/fsck.c index 04846d46f9..9981db2263 100644 --- a/builtin/fsck.c +++ b/builtin/fsck.c @@ -149,6 +149,15 @@ static int mark_object(struct object *obj, int type, void *data, struct fsck_opt if (obj->flags & REACHABLE) return 0; obj->flags |= REACHABLE; + + if (is_promisor_object(&obj->oid)) + /* + * Further recursion does not need to be performed on this + * object since it is a promisor object (so it does not need to + * be added to "pending"). + */ + return 0; + if (!(obj->flags & HAS_OBJ)) { if (parent && !has_object_file(&obj->oid)) { printf("broken link from %7s %s\n", @@ -171,7 +180,13 @@ static void mark_object_reachable(struct object *obj) static int traverse_one_object(struct object *obj) { - return fsck_walk(obj, obj, &fsck_walk_options); + int result = fsck_walk(obj, obj, &fsck_walk_options); + + if (obj->type == OBJ_TREE) { + struct tree *tree = (struct tree *)obj; + free_tree_buffer(tree); + } + return result; } static int traverse_reachable(void) @@ -208,6 +223,8 @@ static void check_reachable_object(struct object *obj) * do a full fsck */ if (!(obj->flags & HAS_OBJ)) { + if (is_promisor_object(&obj->oid)) + return; if (has_sha1_pack(obj->oid.hash)) return; /* it is in pack - forget about it */ printf("missing %s %s\n", printable_type(obj), @@ -398,7 +415,7 @@ static void fsck_handle_reflog_oid(const char *refname, struct object_id *oid, xstrfmt("%s@{%"PRItime"}", refname, timestamp)); obj->flags |= USED; mark_object_reachable(obj); - } else { + } else if (!is_promisor_object(oid)) { error("%s: invalid reflog entry %s", refname, oid_to_hex(oid)); errors_found |= ERROR_REACHABLE; } @@ -434,6 +451,14 @@ static int fsck_handle_ref(const char *refname, const struct object_id *oid, obj = parse_object(oid); if (!obj) { + if (is_promisor_object(oid)) { + /* + * Increment default_refs anyway, because this is a + * valid ref. + */ + default_refs++; + return 0; + } error("%s: invalid sha1 pointer %s", refname, oid_to_hex(oid)); errors_found |= ERROR_REACHABLE; /* We'll continue with the rest despite the error.. */ @@ -659,6 +684,9 @@ int cmd_fsck(int argc, const char **argv, const char *prefix) int i; struct alternate_object_database *alt; + /* fsck knows how to handle missing promisor objects */ + fetch_if_missing = 0; + errors_found = 0; check_replace_refs = 0; @@ -731,6 +759,8 @@ int cmd_fsck(int argc, const char **argv, const char *prefix) struct object *obj = lookup_object(oid.hash); if (!obj || !(obj->flags & HAS_OBJ)) { + if (is_promisor_object(&oid)) + continue; error("%s: object missing", oid_to_hex(&oid)); errors_found |= ERROR_OBJECT; continue; diff --git a/builtin/gc.c b/builtin/gc.c index 3c5eae0edf..77fa720bd0 100644 --- a/builtin/gc.c +++ b/builtin/gc.c @@ -458,6 +458,9 @@ int cmd_gc(int argc, const char **argv, const char *prefix) argv_array_push(&prune, prune_expire); if (quiet) argv_array_push(&prune, "--no-progress"); + if (repository_format_partial_clone) + argv_array_push(&prune, + "--exclude-promisor-objects"); if (run_command_v_opt(prune.argv, RUN_GIT_CMD)) return error(FAILED_RUN, prune.argv[0]); } diff --git a/builtin/hash-object.c b/builtin/hash-object.c index c532ff9320..526da5c185 100644 --- a/builtin/hash-object.c +++ b/builtin/hash-object.c @@ -24,7 +24,8 @@ static int hash_literally(struct object_id *oid, int fd, const char *type, unsig if (strbuf_read(&buf, fd, 4096) < 0) ret = -1; else - ret = hash_sha1_file_literally(buf.buf, buf.len, type, oid, flags); + ret = hash_object_file_literally(buf.buf, buf.len, type, oid, + flags); strbuf_release(&buf); return ret; } diff --git a/builtin/index-pack.c b/builtin/index-pack.c index 4c51aec81f..7e3e1a461c 100644 --- a/builtin/index-pack.c +++ b/builtin/index-pack.c @@ -91,7 +91,7 @@ static unsigned int input_offset, input_len; static off_t consumed_bytes; static off_t max_input_size; static unsigned deepest_delta; -static git_SHA_CTX input_ctx; +static git_hash_ctx input_ctx; static uint32_t input_crc32; static int input_fd, output_fd; static const char *curr_pack; @@ -253,7 +253,7 @@ static void flush(void) if (input_offset) { if (output_fd >= 0) write_or_die(output_fd, input_buffer, input_offset); - git_SHA1_Update(&input_ctx, input_buffer, input_offset); + the_hash_algo->update_fn(&input_ctx, input_buffer, input_offset); memmove(input_buffer, input_buffer + input_offset, input_len); input_offset = 0; } @@ -326,7 +326,7 @@ static const char *open_pack_file(const char *pack_name) output_fd = -1; nothread_data.pack_fd = input_fd; } - git_SHA1_Init(&input_ctx); + the_hash_algo->init_fn(&input_ctx); return pack_name; } @@ -437,22 +437,22 @@ static int is_delta_type(enum object_type type) } static void *unpack_entry_data(off_t offset, unsigned long size, - enum object_type type, unsigned char *sha1) + enum object_type type, struct object_id *oid) { static char fixed_buf[8192]; int status; git_zstream stream; void *buf; - git_SHA_CTX c; + git_hash_ctx c; char hdr[32]; int hdrlen; if (!is_delta_type(type)) { hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", typename(type), size) + 1; - git_SHA1_Init(&c); - git_SHA1_Update(&c, hdr, hdrlen); + the_hash_algo->init_fn(&c); + the_hash_algo->update_fn(&c, hdr, hdrlen); } else - sha1 = NULL; + oid = NULL; if (type == OBJ_BLOB && size > big_file_threshold) buf = fixed_buf; else @@ -469,8 +469,8 @@ static void *unpack_entry_data(off_t offset, unsigned long size, stream.avail_in = input_len; status = git_inflate(&stream, 0); use(input_len - stream.avail_in); - if (sha1) - git_SHA1_Update(&c, last_out, stream.next_out - last_out); + if (oid) + the_hash_algo->update_fn(&c, last_out, stream.next_out - last_out); if (buf == fixed_buf) { stream.next_out = buf; stream.avail_out = sizeof(fixed_buf); @@ -479,15 +479,15 @@ static void *unpack_entry_data(off_t offset, unsigned long size, if (stream.total_out != size || status != Z_STREAM_END) bad_object(offset, _("inflate returned %d"), status); git_inflate_end(&stream); - if (sha1) - git_SHA1_Final(sha1, &c); + if (oid) + the_hash_algo->final_fn(oid->hash, &c); return buf == fixed_buf ? NULL : buf; } static void *unpack_raw_entry(struct object_entry *obj, off_t *ofs_offset, - unsigned char *ref_sha1, - unsigned char *sha1) + struct object_id *ref_oid, + struct object_id *oid) { unsigned char *p; unsigned long size, c; @@ -515,8 +515,8 @@ static void *unpack_raw_entry(struct object_entry *obj, switch (obj->type) { case OBJ_REF_DELTA: - hashcpy(ref_sha1, fill(20)); - use(20); + hashcpy(ref_oid->hash, fill(the_hash_algo->rawsz)); + use(the_hash_algo->rawsz); break; case OBJ_OFS_DELTA: p = fill(1); @@ -546,7 +546,7 @@ static void *unpack_raw_entry(struct object_entry *obj, } obj->hdr_size = consumed_bytes - obj->idx.offset; - data = unpack_entry_data(obj->idx.offset, obj->size, obj->type, sha1); + data = unpack_entry_data(obj->idx.offset, obj->size, obj->type, oid); obj->idx.crc32 = input_crc32; return data; } @@ -958,9 +958,8 @@ static void resolve_delta(struct object_entry *delta_obj, free(delta_data); if (!result->data) bad_object(delta_obj->idx.offset, _("failed to apply delta")); - hash_sha1_file(result->data, result->size, - typename(delta_obj->real_type), - delta_obj->idx.oid.hash); + hash_object_file(result->data, result->size, + typename(delta_obj->real_type), &delta_obj->idx.oid); sha1_object(result->data, NULL, result->size, delta_obj->real_type, &delta_obj->idx.oid); counter_lock(); @@ -1119,11 +1118,11 @@ static void *threaded_second_pass(void *data) * - calculate SHA1 of all non-delta objects; * - remember base (SHA1 or offset) for all deltas. */ -static void parse_pack_objects(unsigned char *sha1) +static void parse_pack_objects(unsigned char *hash) { int i, nr_delays = 0; struct ofs_delta_entry *ofs_delta = ofs_deltas; - unsigned char ref_delta_sha1[20]; + struct object_id ref_delta_oid; struct stat st; if (verbose) @@ -1133,8 +1132,8 @@ static void parse_pack_objects(unsigned char *sha1) for (i = 0; i < nr_objects; i++) { struct object_entry *obj = &objects[i]; void *data = unpack_raw_entry(obj, &ofs_delta->offset, - ref_delta_sha1, - obj->idx.oid.hash); + &ref_delta_oid, + &obj->idx.oid); obj->real_type = obj->type; if (obj->type == OBJ_OFS_DELTA) { nr_ofs_deltas++; @@ -1142,7 +1141,7 @@ static void parse_pack_objects(unsigned char *sha1) ofs_delta++; } else if (obj->type == OBJ_REF_DELTA) { ALLOC_GROW(ref_deltas, nr_ref_deltas + 1, ref_deltas_alloc); - hashcpy(ref_deltas[nr_ref_deltas].sha1, ref_delta_sha1); + hashcpy(ref_deltas[nr_ref_deltas].sha1, ref_delta_oid.hash); ref_deltas[nr_ref_deltas].obj_no = i; nr_ref_deltas++; } else if (!data) { @@ -1160,10 +1159,10 @@ static void parse_pack_objects(unsigned char *sha1) /* Check pack integrity */ flush(); - git_SHA1_Final(sha1, &input_ctx); - if (hashcmp(fill(20), sha1)) + the_hash_algo->final_fn(hash, &input_ctx); + if (hashcmp(fill(the_hash_algo->rawsz), hash)) die(_("pack is corrupted (SHA1 mismatch)")); - use(20); + use(the_hash_algo->rawsz); /* If input_fd is a file, we should have reached its end now. */ if (fstat(input_fd, &st)) @@ -1239,21 +1238,21 @@ static void resolve_deltas(void) /* * Third pass: * - append objects to convert thin pack to full pack if required - * - write the final 20-byte SHA-1 + * - write the final pack hash */ -static void fix_unresolved_deltas(struct sha1file *f); -static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned char *pack_sha1) +static void fix_unresolved_deltas(struct hashfile *f); +static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned char *pack_hash) { if (nr_ref_deltas + nr_ofs_deltas == nr_resolved_deltas) { stop_progress(&progress); - /* Flush remaining pack final 20-byte SHA1. */ + /* Flush remaining pack final hash. */ flush(); return; } if (fix_thin_pack) { - struct sha1file *f; - unsigned char read_sha1[20], tail_sha1[20]; + struct hashfile *f; + unsigned char read_hash[GIT_MAX_RAWSZ], tail_hash[GIT_MAX_RAWSZ]; struct strbuf msg = STRBUF_INIT; int nr_unresolved = nr_ofs_deltas + nr_ref_deltas - nr_resolved_deltas; int nr_objects_initial = nr_objects; @@ -1262,7 +1261,7 @@ static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned cha REALLOC_ARRAY(objects, nr_objects + nr_unresolved + 1); memset(objects + nr_objects + 1, 0, nr_unresolved * sizeof(*objects)); - f = sha1fd(output_fd, curr_pack); + f = hashfd(output_fd, curr_pack); fix_unresolved_deltas(f); strbuf_addf(&msg, Q_("completed with %d local object", "completed with %d local objects", @@ -1270,12 +1269,12 @@ static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned cha nr_objects - nr_objects_initial); stop_progress_msg(&progress, msg.buf); strbuf_release(&msg); - sha1close(f, tail_sha1, 0); - hashcpy(read_sha1, pack_sha1); - fixup_pack_header_footer(output_fd, pack_sha1, + hashclose(f, tail_hash, 0); + hashcpy(read_hash, pack_hash); + fixup_pack_header_footer(output_fd, pack_hash, curr_pack, nr_objects, - read_sha1, consumed_bytes-20); - if (hashcmp(read_sha1, tail_sha1) != 0) + read_hash, consumed_bytes-the_hash_algo->rawsz); + if (hashcmp(read_hash, tail_hash) != 0) die(_("Unexpected tail checksum for %s " "(disk corruption?)"), curr_pack); } @@ -1286,7 +1285,7 @@ static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned cha nr_ofs_deltas + nr_ref_deltas - nr_resolved_deltas); } -static int write_compressed(struct sha1file *f, void *in, unsigned int size) +static int write_compressed(struct hashfile *f, void *in, unsigned int size) { git_zstream stream; int status; @@ -1300,7 +1299,7 @@ static int write_compressed(struct sha1file *f, void *in, unsigned int size) stream.next_out = outbuf; stream.avail_out = sizeof(outbuf); status = git_deflate(&stream, Z_FINISH); - sha1write(f, outbuf, sizeof(outbuf) - stream.avail_out); + hashwrite(f, outbuf, sizeof(outbuf) - stream.avail_out); } while (status == Z_OK); if (status != Z_STREAM_END) @@ -1310,7 +1309,7 @@ static int write_compressed(struct sha1file *f, void *in, unsigned int size) return size; } -static struct object_entry *append_obj_to_pack(struct sha1file *f, +static struct object_entry *append_obj_to_pack(struct hashfile *f, const unsigned char *sha1, void *buf, unsigned long size, enum object_type type) { @@ -1327,7 +1326,7 @@ static struct object_entry *append_obj_to_pack(struct sha1file *f, } header[n++] = c; crc32_begin(f); - sha1write(f, header, n); + hashwrite(f, header, n); obj[0].size = size; obj[0].hdr_size = n; obj[0].type = type; @@ -1335,7 +1334,7 @@ static struct object_entry *append_obj_to_pack(struct sha1file *f, obj[1].idx.offset = obj[0].idx.offset + n; obj[1].idx.offset += write_compressed(f, buf, size); obj[0].idx.crc32 = crc32_end(f); - sha1flush(f); + hashflush(f); hashcpy(obj->idx.oid.hash, sha1); return obj; } @@ -1347,7 +1346,7 @@ static int delta_pos_compare(const void *_a, const void *_b) return a->obj_no - b->obj_no; } -static void fix_unresolved_deltas(struct sha1file *f) +static void fix_unresolved_deltas(struct hashfile *f) { struct ref_delta_entry **sorted_by_pos; int i; @@ -1389,15 +1388,60 @@ static void fix_unresolved_deltas(struct sha1file *f) free(sorted_by_pos); } +static const char *derive_filename(const char *pack_name, const char *suffix, + struct strbuf *buf) +{ + size_t len; + if (!strip_suffix(pack_name, ".pack", &len)) + die(_("packfile name '%s' does not end with '.pack'"), + pack_name); + strbuf_add(buf, pack_name, len); + strbuf_addch(buf, '.'); + strbuf_addstr(buf, suffix); + return buf->buf; +} + +static void write_special_file(const char *suffix, const char *msg, + const char *pack_name, const unsigned char *hash, + const char **report) +{ + struct strbuf name_buf = STRBUF_INIT; + const char *filename; + int fd; + int msg_len = strlen(msg); + + if (pack_name) + filename = derive_filename(pack_name, suffix, &name_buf); + else + filename = odb_pack_name(&name_buf, hash, suffix); + + fd = odb_pack_keep(filename); + if (fd < 0) { + if (errno != EEXIST) + die_errno(_("cannot write %s file '%s'"), + suffix, filename); + } else { + if (msg_len > 0) { + write_or_die(fd, msg, msg_len); + write_or_die(fd, "\n", 1); + } + if (close(fd) != 0) + die_errno(_("cannot close written %s file '%s'"), + suffix, filename); + if (report) + *report = suffix; + } + strbuf_release(&name_buf); +} + static void final(const char *final_pack_name, const char *curr_pack_name, const char *final_index_name, const char *curr_index_name, - const char *keep_name, const char *keep_msg, - unsigned char *sha1) + const char *keep_msg, const char *promisor_msg, + unsigned char *hash) { const char *report = "pack"; struct strbuf pack_name = STRBUF_INIT; struct strbuf index_name = STRBUF_INIT; - struct strbuf keep_name_buf = STRBUF_INIT; int err; if (!from_stdin) { @@ -1409,32 +1453,16 @@ static void final(const char *final_pack_name, const char *curr_pack_name, die_errno(_("error while closing pack file")); } - if (keep_msg) { - int keep_fd, keep_msg_len = strlen(keep_msg); - - if (!keep_name) - keep_name = odb_pack_name(&keep_name_buf, sha1, "keep"); - - keep_fd = odb_pack_keep(keep_name); - if (keep_fd < 0) { - if (errno != EEXIST) - die_errno(_("cannot write keep file '%s'"), - keep_name); - } else { - if (keep_msg_len > 0) { - write_or_die(keep_fd, keep_msg, keep_msg_len); - write_or_die(keep_fd, "\n", 1); - } - if (close(keep_fd) != 0) - die_errno(_("cannot close written keep file '%s'"), - keep_name); - report = "keep"; - } - } + if (keep_msg) + write_special_file("keep", keep_msg, final_pack_name, hash, + &report); + if (promisor_msg) + write_special_file("promisor", promisor_msg, final_pack_name, + hash, NULL); if (final_pack_name != curr_pack_name) { if (!final_pack_name) - final_pack_name = odb_pack_name(&pack_name, sha1, "pack"); + final_pack_name = odb_pack_name(&pack_name, hash, "pack"); if (finalize_object_file(curr_pack_name, final_pack_name)) die(_("cannot store pack file")); } else if (from_stdin) @@ -1442,18 +1470,18 @@ static void final(const char *final_pack_name, const char *curr_pack_name, if (final_index_name != curr_index_name) { if (!final_index_name) - final_index_name = odb_pack_name(&index_name, sha1, "idx"); + final_index_name = odb_pack_name(&index_name, hash, "idx"); if (finalize_object_file(curr_index_name, final_index_name)) die(_("cannot store index file")); } else chmod(final_index_name, 0444); if (!from_stdin) { - printf("%s\n", sha1_to_hex(sha1)); + printf("%s\n", sha1_to_hex(hash)); } else { struct strbuf buf = STRBUF_INIT; - strbuf_addf(&buf, "%s\t%s\n", report, sha1_to_hex(sha1)); + strbuf_addf(&buf, "%s\t%s\n", report, sha1_to_hex(hash)); write_or_die(1, buf.buf, buf.len); strbuf_release(&buf); @@ -1472,7 +1500,6 @@ static void final(const char *final_pack_name, const char *curr_pack_name, strbuf_release(&index_name); strbuf_release(&pack_name); - strbuf_release(&keep_name_buf); } static int git_index_pack_config(const char *k, const char *v, void *cb) @@ -1615,32 +1642,26 @@ static void show_pack_info(int stat_only) } } -static const char *derive_filename(const char *pack_name, const char *suffix, - struct strbuf *buf) -{ - size_t len; - if (!strip_suffix(pack_name, ".pack", &len)) - die(_("packfile name '%s' does not end with '.pack'"), - pack_name); - strbuf_add(buf, pack_name, len); - strbuf_addstr(buf, suffix); - return buf->buf; -} - int cmd_index_pack(int argc, const char **argv, const char *prefix) { int i, fix_thin_pack = 0, verify = 0, stat_only = 0; const char *curr_index; const char *index_name = NULL, *pack_name = NULL; - const char *keep_name = NULL, *keep_msg = NULL; - struct strbuf index_name_buf = STRBUF_INIT, - keep_name_buf = STRBUF_INIT; + const char *keep_msg = NULL; + const char *promisor_msg = NULL; + struct strbuf index_name_buf = STRBUF_INIT; struct pack_idx_entry **idx_objects; struct pack_idx_option opts; - unsigned char pack_sha1[20]; + unsigned char pack_hash[GIT_MAX_RAWSZ]; unsigned foreign_nr = 1; /* zero is a "good" value, assume bad */ int report_end_of_input = 0; + /* + * index-pack never needs to fetch missing objects, since it only + * accesses the repo to do hash collision checks + */ + fetch_if_missing = 0; + if (argc == 2 && !strcmp(argv[1], "-h")) usage(index_pack_usage); @@ -1678,6 +1699,8 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix) stat_only = 1; } else if (skip_to_optional_arg(arg, "--keep", &keep_msg)) { ; /* nothing to do */ + } else if (skip_to_optional_arg(arg, "--promisor", &promisor_msg)) { + ; /* already parsed */ } else if (starts_with(arg, "--threads=")) { char *end; nr_threads = strtoul(arg+10, &end, 0); @@ -1740,9 +1763,7 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix) if (from_stdin && !startup_info->have_repository) die(_("--stdin requires a git repository")); if (!index_name && pack_name) - index_name = derive_filename(pack_name, ".idx", &index_name_buf); - if (keep_msg && !keep_name && pack_name) - keep_name = derive_filename(pack_name, ".keep", &keep_name_buf); + index_name = derive_filename(pack_name, "idx", &index_name_buf); if (verify) { if (!index_name) @@ -1768,11 +1789,11 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix) if (show_stat) obj_stat = xcalloc(st_add(nr_objects, 1), sizeof(struct object_stat)); ofs_deltas = xcalloc(nr_objects, sizeof(struct ofs_delta_entry)); - parse_pack_objects(pack_sha1); + parse_pack_objects(pack_hash); if (report_end_of_input) write_in_full(2, "\0", 1); resolve_deltas(); - conclude_pack(fix_thin_pack, curr_pack, pack_sha1); + conclude_pack(fix_thin_pack, curr_pack, pack_hash); free(ofs_deltas); free(ref_deltas); if (strict) @@ -1784,19 +1805,18 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix) ALLOC_ARRAY(idx_objects, nr_objects); for (i = 0; i < nr_objects; i++) idx_objects[i] = &objects[i].idx; - curr_index = write_idx_file(index_name, idx_objects, nr_objects, &opts, pack_sha1); + curr_index = write_idx_file(index_name, idx_objects, nr_objects, &opts, pack_hash); free(idx_objects); if (!verify) final(pack_name, curr_pack, index_name, curr_index, - keep_name, keep_msg, - pack_sha1); + keep_msg, promisor_msg, + pack_hash); else close(input_fd); free(objects); strbuf_release(&index_name_buf); - strbuf_release(&keep_name_buf); if (pack_name == NULL) free((void *) curr_pack); if (index_name == NULL) diff --git a/builtin/log.c b/builtin/log.c index 46b4ca13e5..94ee177d56 100644 --- a/builtin/log.c +++ b/builtin/log.c @@ -29,6 +29,8 @@ #include "gpg-interface.h" #include "progress.h" +#define MAIL_DEFAULT_WRAP 72 + /* Set a default date-time format for git log ("log.date" config variable) */ static const char *default_date_mode = NULL; @@ -1044,7 +1046,7 @@ static void make_cover_letter(struct rev_info *rev, int use_stdout, shortlog_init(&log); log.wrap_lines = 1; - log.wrap = 72; + log.wrap = MAIL_DEFAULT_WRAP; log.in1 = 2; log.in2 = 4; log.file = rev->diffopt.file; @@ -1061,6 +1063,7 @@ static void make_cover_letter(struct rev_info *rev, int use_stdout, memcpy(&opts, &rev->diffopt, sizeof(opts)); opts.output_format = DIFF_FORMAT_SUMMARY | DIFF_FORMAT_DIFFSTAT; + opts.stat_width = MAIL_DEFAULT_WRAP; diff_setup_done(&opts); @@ -1614,6 +1617,8 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix) (!rev.diffopt.output_format || rev.diffopt.output_format == DIFF_FORMAT_PATCH)) rev.diffopt.output_format = DIFF_FORMAT_DIFFSTAT | DIFF_FORMAT_SUMMARY; + if (!rev.diffopt.stat_width) + rev.diffopt.stat_width = MAIL_DEFAULT_WRAP; /* Always generate a patch */ rev.diffopt.output_format |= DIFF_FORMAT_PATCH; diff --git a/builtin/merge.c b/builtin/merge.c index 30264cfd7c..92ba99a1a5 100644 --- a/builtin/merge.c +++ b/builtin/merge.c @@ -820,8 +820,8 @@ static int merge_trivial(struct commit *head, struct commit_list *remoteheads) pptr = commit_list_append(head, pptr); pptr = commit_list_append(remoteheads->item, pptr); prepare_to_commit(remoteheads); - if (commit_tree(merge_msg.buf, merge_msg.len, result_tree.hash, parents, - result_commit.hash, NULL, sign_commit)) + if (commit_tree(merge_msg.buf, merge_msg.len, &result_tree, parents, + &result_commit, NULL, sign_commit)) die(_("failed to write commit object")); finish(head, remoteheads, &result_commit, "In-index merge"); drop_save(); @@ -845,8 +845,8 @@ static int finish_automerge(struct commit *head, commit_list_insert(head, &parents); strbuf_addch(&merge_msg, '\n'); prepare_to_commit(remoteheads); - if (commit_tree(merge_msg.buf, merge_msg.len, result_tree->hash, parents, - result_commit.hash, NULL, sign_commit)) + if (commit_tree(merge_msg.buf, merge_msg.len, result_tree, parents, + &result_commit, NULL, sign_commit)) die(_("failed to write commit object")); strbuf_addf(&buf, "Merge made by the '%s' strategy.", wt_strategy); finish(head, remoteheads, &result_commit, buf.buf); diff --git a/builtin/mktag.c b/builtin/mktag.c index 031b750f06..beb552847b 100644 --- a/builtin/mktag.c +++ b/builtin/mktag.c @@ -151,7 +151,7 @@ static int verify_tag(char *buffer, unsigned long size) int cmd_mktag(int argc, const char **argv, const char *prefix) { struct strbuf buf = STRBUF_INIT; - unsigned char result_sha1[20]; + struct object_id result; if (argc != 1) usage("git mktag"); @@ -165,10 +165,10 @@ int cmd_mktag(int argc, const char **argv, const char *prefix) if (verify_tag(buf.buf, buf.len) < 0) die("invalid tag signature file"); - if (write_sha1_file(buf.buf, buf.len, tag_type, result_sha1) < 0) + if (write_object_file(buf.buf, buf.len, tag_type, &result) < 0) die("unable to write tag file"); strbuf_release(&buf); - printf("%s\n", sha1_to_hex(result_sha1)); + printf("%s\n", oid_to_hex(&result)); return 0; } diff --git a/builtin/mktree.c b/builtin/mktree.c index da0fd8cd70..8dd9f52f77 100644 --- a/builtin/mktree.c +++ b/builtin/mktree.c @@ -40,7 +40,7 @@ static int ent_compare(const void *a_, const void *b_) b->name, b->len, b->mode); } -static void write_tree(unsigned char *sha1) +static void write_tree(struct object_id *oid) { struct strbuf buf; size_t size; @@ -57,7 +57,7 @@ static void write_tree(unsigned char *sha1) strbuf_add(&buf, ent->sha1, 20); } - write_sha1_file(buf.buf, buf.len, tree_type, sha1); + write_object_file(buf.buf, buf.len, tree_type, oid); strbuf_release(&buf); } @@ -142,7 +142,7 @@ static void mktree_line(char *buf, size_t len, int nul_term_line, int allow_miss int cmd_mktree(int ac, const char **av, const char *prefix) { struct strbuf sb = STRBUF_INIT; - unsigned char sha1[20]; + struct object_id oid; int nul_term_line = 0; int allow_missing = 0; int is_batch_mode = 0; @@ -181,8 +181,8 @@ int cmd_mktree(int ac, const char **av, const char *prefix) */ ; /* skip creating an empty tree */ } else { - write_tree(sha1); - puts(sha1_to_hex(sha1)); + write_tree(&oid); + puts(oid_to_hex(&oid)); fflush(stdout); } used=0; /* reset tree entry buffer for re-use in batch mode */ diff --git a/builtin/mv.c b/builtin/mv.c index cf3684d907..8ce6a2ddd4 100644 --- a/builtin/mv.c +++ b/builtin/mv.c @@ -286,8 +286,7 @@ int cmd_mv(int argc, const char **argv, const char *prefix) pos = cache_name_pos(src, strlen(src)); assert(pos >= 0); - if (!show_only) - rename_cache_entry_at(pos, dst); + rename_cache_entry_at(pos, dst); } if (gitmodules_modified) diff --git a/builtin/notes.c b/builtin/notes.c index 7c81761645..39304ba743 100644 --- a/builtin/notes.c +++ b/builtin/notes.c @@ -198,9 +198,9 @@ static void prepare_note_data(const struct object_id *object, struct note_data * } } -static void write_note_data(struct note_data *d, unsigned char *sha1) +static void write_note_data(struct note_data *d, struct object_id *oid) { - if (write_sha1_file(d->buf.buf, d->buf.len, blob_type, sha1)) { + if (write_object_file(d->buf.buf, d->buf.len, blob_type, oid)) { error(_("unable to write note object")); if (d->edit_path) error(_("the note contents have been left in %s"), @@ -459,7 +459,7 @@ static int add(int argc, const char **argv, const char *prefix) prepare_note_data(&object, &d, note ? note->hash : NULL); if (d.buf.len || allow_empty) { - write_note_data(&d, new_note.hash); + write_note_data(&d, &new_note); if (add_note(t, &object, &new_note, combine_notes_overwrite)) die("BUG: combine_notes_overwrite failed"); commit_notes(t, "Notes added by 'git notes add'"); @@ -619,7 +619,7 @@ static int append_edit(int argc, const char **argv, const char *prefix) } if (d.buf.len || allow_empty) { - write_note_data(&d, new_note.hash); + write_note_data(&d, &new_note); if (add_note(t, &object, &new_note, combine_notes_overwrite)) die("BUG: combine_notes_overwrite failed"); logmsg = xstrfmt("Notes added by 'git notes %s'", argv[0]); diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c index 6b9cfc289d..5c674b2843 100644 --- a/builtin/pack-objects.c +++ b/builtin/pack-objects.c @@ -26,7 +26,7 @@ #include "reachable.h" #include "sha1-array.h" #include "argv-array.h" -#include "mru.h" +#include "list.h" #include "packfile.h" static const char *pack_usage[] = { @@ -75,6 +75,8 @@ static int use_bitmap_index = -1; static int write_bitmap_index; static uint16_t write_bitmap_options; +static int exclude_promisor_objects; + static unsigned long delta_cache_size = 0; static unsigned long max_delta_cache_size = 256 * 1024 * 1024; static unsigned long cache_max_small_delta_size = 1000; @@ -84,8 +86,9 @@ static unsigned long window_memory_limit = 0; static struct list_objects_filter_options filter_options; enum missing_action { - MA_ERROR = 0, /* fail if any missing objects are encountered */ - MA_ALLOW_ANY, /* silently allow ALL missing objects */ + MA_ERROR = 0, /* fail if any missing objects are encountered */ + MA_ALLOW_ANY, /* silently allow ALL missing objects */ + MA_ALLOW_PROMISOR, /* silently allow all missing PROMISOR objects */ }; static enum missing_action arg_missing_action; static show_object_fn fn_show_object; @@ -161,7 +164,7 @@ static unsigned long do_compress(void **pptr, unsigned long size) return stream.total_out; } -static unsigned long write_large_blob_data(struct git_istream *st, struct sha1file *f, +static unsigned long write_large_blob_data(struct git_istream *st, struct hashfile *f, const struct object_id *oid) { git_zstream stream; @@ -185,7 +188,7 @@ static unsigned long write_large_blob_data(struct git_istream *st, struct sha1fi stream.next_out = obuf; stream.avail_out = sizeof(obuf); zret = git_deflate(&stream, readlen ? 0 : Z_FINISH); - sha1write(f, obuf, stream.next_out - obuf); + hashwrite(f, obuf, stream.next_out - obuf); olen += stream.next_out - obuf; } if (stream.avail_in) @@ -230,7 +233,7 @@ static int check_pack_inflate(struct packed_git *p, stream.total_in == len) ? 0 : -1; } -static void copy_pack_data(struct sha1file *f, +static void copy_pack_data(struct hashfile *f, struct packed_git *p, struct pack_window **w_curs, off_t offset, @@ -243,14 +246,14 @@ static void copy_pack_data(struct sha1file *f, in = use_pack(p, w_curs, offset, &avail); if (avail > len) avail = (unsigned long)len; - sha1write(f, in, avail); + hashwrite(f, in, avail); offset += avail; len -= avail; } } /* Return 0 if we will bust the pack-size limit */ -static unsigned long write_no_reuse_object(struct sha1file *f, struct object_entry *entry, +static unsigned long write_no_reuse_object(struct hashfile *f, struct object_entry *entry, unsigned long limit, int usable_delta) { unsigned long size, datalen; @@ -323,8 +326,8 @@ static unsigned long write_no_reuse_object(struct sha1file *f, struct object_ent free(buf); return 0; } - sha1write(f, header, hdrlen); - sha1write(f, dheader + pos, sizeof(dheader) - pos); + hashwrite(f, header, hdrlen); + hashwrite(f, dheader + pos, sizeof(dheader) - pos); hdrlen += sizeof(dheader) - pos; } else if (type == OBJ_REF_DELTA) { /* @@ -337,8 +340,8 @@ static unsigned long write_no_reuse_object(struct sha1file *f, struct object_ent free(buf); return 0; } - sha1write(f, header, hdrlen); - sha1write(f, entry->delta->idx.oid.hash, 20); + hashwrite(f, header, hdrlen); + hashwrite(f, entry->delta->idx.oid.hash, 20); hdrlen += 20; } else { if (limit && hdrlen + datalen + 20 >= limit) { @@ -347,13 +350,13 @@ static unsigned long write_no_reuse_object(struct sha1file *f, struct object_ent free(buf); return 0; } - sha1write(f, header, hdrlen); + hashwrite(f, header, hdrlen); } if (st) { datalen = write_large_blob_data(st, f, &entry->idx.oid); close_istream(st); } else { - sha1write(f, buf, datalen); + hashwrite(f, buf, datalen); free(buf); } @@ -361,7 +364,7 @@ static unsigned long write_no_reuse_object(struct sha1file *f, struct object_ent } /* Return 0 if we will bust the pack-size limit */ -static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry, +static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry, unsigned long limit, int usable_delta) { struct packed_git *p = entry->in_pack; @@ -412,8 +415,8 @@ static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry, unuse_pack(&w_curs); return 0; } - sha1write(f, header, hdrlen); - sha1write(f, dheader + pos, sizeof(dheader) - pos); + hashwrite(f, header, hdrlen); + hashwrite(f, dheader + pos, sizeof(dheader) - pos); hdrlen += sizeof(dheader) - pos; reused_delta++; } else if (type == OBJ_REF_DELTA) { @@ -421,8 +424,8 @@ static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry, unuse_pack(&w_curs); return 0; } - sha1write(f, header, hdrlen); - sha1write(f, entry->delta->idx.oid.hash, 20); + hashwrite(f, header, hdrlen); + hashwrite(f, entry->delta->idx.oid.hash, 20); hdrlen += 20; reused_delta++; } else { @@ -430,7 +433,7 @@ static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry, unuse_pack(&w_curs); return 0; } - sha1write(f, header, hdrlen); + hashwrite(f, header, hdrlen); } copy_pack_data(f, p, &w_curs, offset, datalen); unuse_pack(&w_curs); @@ -439,7 +442,7 @@ static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry, } /* Return 0 if we will bust the pack-size limit */ -static off_t write_object(struct sha1file *f, +static off_t write_object(struct hashfile *f, struct object_entry *entry, off_t write_offset) { @@ -512,7 +515,7 @@ enum write_one_status { WRITE_ONE_RECURSIVE = 2 /* already scheduled to be written */ }; -static enum write_one_status write_one(struct sha1file *f, +static enum write_one_status write_one(struct hashfile *f, struct object_entry *e, off_t *offset) { @@ -731,7 +734,7 @@ static struct object_entry **compute_write_order(void) return wo; } -static off_t write_reused_pack(struct sha1file *f) +static off_t write_reused_pack(struct hashfile *f) { unsigned char buffer[8192]; off_t to_write, total; @@ -762,7 +765,7 @@ static off_t write_reused_pack(struct sha1file *f) if (read_pack > to_write) read_pack = to_write; - sha1write(f, buffer, read_pack); + hashwrite(f, buffer, read_pack); to_write -= read_pack; /* @@ -791,7 +794,7 @@ static const char no_split_warning[] = N_( static void write_pack_file(void) { uint32_t i = 0, j; - struct sha1file *f; + struct hashfile *f; off_t offset; uint32_t nr_remaining = nr_result; time_t last_mtime = 0; @@ -807,7 +810,7 @@ static void write_pack_file(void) char *pack_tmp_name = NULL; if (pack_to_stdout) - f = sha1fd_throughput(1, "<stdout>", progress_state); + f = hashfd_throughput(1, "<stdout>", progress_state); else f = create_tmp_packfile(&pack_tmp_name); @@ -834,11 +837,11 @@ static void write_pack_file(void) * If so, rewrite it like in fast-import */ if (pack_to_stdout) { - sha1close(f, oid.hash, CSUM_CLOSE); + hashclose(f, oid.hash, CSUM_CLOSE); } else if (nr_written == nr_remaining) { - sha1close(f, oid.hash, CSUM_FSYNC); + hashclose(f, oid.hash, CSUM_FSYNC); } else { - int fd = sha1close(f, oid.hash, 0); + int fd = hashclose(f, oid.hash, 0); fixup_pack_header_footer(fd, oid.hash, pack_tmp_name, nr_written, oid.hash, offset); close(fd); @@ -1006,8 +1009,8 @@ static int want_object_in_pack(const struct object_id *oid, struct packed_git **found_pack, off_t *found_offset) { - struct mru_entry *entry; int want; + struct list_head *pos; if (!exclude && local && has_loose_object_nonlocal(oid->hash)) return 0; @@ -1023,8 +1026,8 @@ static int want_object_in_pack(const struct object_id *oid, return want; } - for (entry = packed_git_mru.head; entry; entry = entry->next) { - struct packed_git *p = entry->item; + list_for_each(pos, &packed_git_mru) { + struct packed_git *p = list_entry(pos, struct packed_git, mru); off_t offset; if (p == *found_pack) @@ -1041,7 +1044,7 @@ static int want_object_in_pack(const struct object_id *oid, } want = want_found_object(exclude, p); if (!exclude && want > 0) - mru_mark(&packed_git_mru, entry); + list_move(&p->mru, &packed_git_mru); if (want != -1) return want; } @@ -2578,6 +2581,20 @@ static void show_object__ma_allow_any(struct object *obj, const char *name, void show_object(obj, name, data); } +static void show_object__ma_allow_promisor(struct object *obj, const char *name, void *data) +{ + assert(arg_missing_action == MA_ALLOW_PROMISOR); + + /* + * Quietly ignore EXPECTED missing objects. This avoids problems with + * staging them now and getting an odd error later. + */ + if (!has_object_file(&obj->oid) && is_promisor_object(&obj->oid)) + return; + + show_object(obj, name, data); +} + static int option_parse_missing_action(const struct option *opt, const char *arg, int unset) { @@ -2592,10 +2609,18 @@ static int option_parse_missing_action(const struct option *opt, if (!strcmp(arg, "allow-any")) { arg_missing_action = MA_ALLOW_ANY; + fetch_if_missing = 0; fn_show_object = show_object__ma_allow_any; return 0; } + if (!strcmp(arg, "allow-promisor")) { + arg_missing_action = MA_ALLOW_PROMISOR; + fetch_if_missing = 0; + fn_show_object = show_object__ma_allow_promisor; + return 0; + } + die(_("invalid value for --missing")); return 0; } @@ -2768,7 +2793,7 @@ static void loosen_unused_packed_objects(struct rev_info *revs) if (!packlist_find(&to_pack, oid.hash, NULL) && !has_sha1_pack_kept_or_nonlocal(&oid) && !loosened_object_can_be_discarded(&oid, p->mtime)) - if (force_object_loose(oid.hash, p->mtime)) + if (force_object_loose(&oid, p->mtime)) die("unable to force loose object"); } } @@ -3009,6 +3034,8 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix) { OPTION_CALLBACK, 0, "missing", NULL, N_("action"), N_("handling for missing objects"), PARSE_OPT_NONEG, option_parse_missing_action }, + OPT_BOOL(0, "exclude-promisor-objects", &exclude_promisor_objects, + N_("do not pack objects in promisor packfiles")), OPT_END(), }; @@ -3054,6 +3081,12 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix) argv_array_push(&rp, "--unpacked"); } + if (exclude_promisor_objects) { + use_internal_rev_list = 1; + fetch_if_missing = 0; + argv_array_push(&rp, "--exclude-promisor-objects"); + } + if (!reuse_object) reuse_delta = 0; if (pack_compression_level == -1) diff --git a/builtin/prune.c b/builtin/prune.c index d2fdae680a..4cfec82f40 100644 --- a/builtin/prune.c +++ b/builtin/prune.c @@ -101,12 +101,15 @@ int cmd_prune(int argc, const char **argv, const char *prefix) { struct rev_info revs; struct progress *progress = NULL; + int exclude_promisor_objects = 0; const struct option options[] = { OPT__DRY_RUN(&show_only, N_("do not remove, show only")), OPT__VERBOSE(&verbose, N_("report pruned objects")), OPT_BOOL(0, "progress", &show_progress, N_("show progress")), OPT_EXPIRY_DATE(0, "expire", &expire, N_("expire objects older than <time>")), + OPT_BOOL(0, "exclude-promisor-objects", &exclude_promisor_objects, + N_("limit traversal to objects outside promisor packfiles")), OPT_END() }; char *s; @@ -139,6 +142,10 @@ int cmd_prune(int argc, const char **argv, const char *prefix) show_progress = isatty(2); if (show_progress) progress = start_delayed_progress(_("Checking connectivity"), 0); + if (exclude_promisor_objects) { + fetch_if_missing = 0; + revs.exclude_promisor_objects = 1; + } mark_reachable_objects(&revs, 1, expire, progress); stop_progress(&progress); diff --git a/builtin/pull.c b/builtin/pull.c index 511dbbe0f6..1876271af9 100644 --- a/builtin/pull.c +++ b/builtin/pull.c @@ -574,6 +574,7 @@ static int rebase_submodules(void) cp.no_stdin = 1; argv_array_pushl(&cp.args, "submodule", "update", "--recursive", "--rebase", NULL); + argv_push_verbosity(&cp.args); return run_command(&cp); } @@ -586,6 +587,7 @@ static int update_submodules(void) cp.no_stdin = 1; argv_array_pushl(&cp.args, "submodule", "update", "--recursive", "--checkout", NULL); + argv_push_verbosity(&cp.args); return run_command(&cp); } diff --git a/builtin/rebase--helper.c b/builtin/rebase--helper.c index 7daee544b7..ad074705bb 100644 --- a/builtin/rebase--helper.c +++ b/builtin/rebase--helper.c @@ -22,6 +22,8 @@ int cmd_rebase__helper(int argc, const char **argv, const char *prefix) struct option options[] = { OPT_BOOL(0, "ff", &opts.allow_ff, N_("allow fast-forward")), OPT_BOOL(0, "keep-empty", &keep_empty, N_("keep empty commits")), + OPT_BOOL(0, "allow-empty-message", &opts.allow_empty_message, + N_("allow commits with empty messages")), OPT_CMDMODE(0, "continue", &command, N_("continue rebase"), CONTINUE), OPT_CMDMODE(0, "abort", &command, N_("abort rebase"), @@ -43,7 +45,7 @@ int cmd_rebase__helper(int argc, const char **argv, const char *prefix) OPT_END() }; - git_config(git_default_config, NULL); + sequencer_init_config(&opts); git_config_get_bool("rebase.abbreviatecommands", &abbreviate_commands); opts.action = REPLAY_INTERACTIVE_REBASE; diff --git a/builtin/receive-pack.c b/builtin/receive-pack.c index b7ce7c7f52..75e7f18ace 100644 --- a/builtin/receive-pack.c +++ b/builtin/receive-pack.c @@ -69,7 +69,7 @@ static int sent_capabilities; static int shallow_update; static const char *alt_shallow_file; static struct strbuf push_cert = STRBUF_INIT; -static unsigned char push_cert_sha1[20]; +static struct object_id push_cert_oid; static struct signature_check sigcheck; static const char *push_cert_nonce; static const char *cert_nonce_seed; @@ -633,8 +633,9 @@ static void prepare_push_cert_sha1(struct child_process *proc) int bogs /* beginning_of_gpg_sig */; already_done = 1; - if (write_sha1_file(push_cert.buf, push_cert.len, "blob", push_cert_sha1)) - hashclr(push_cert_sha1); + if (write_object_file(push_cert.buf, push_cert.len, "blob", + &push_cert_oid)) + oidclr(&push_cert_oid); memset(&sigcheck, '\0', sizeof(sigcheck)); sigcheck.result = 'N'; @@ -655,9 +656,9 @@ static void prepare_push_cert_sha1(struct child_process *proc) strbuf_release(&gpg_status); nonce_status = check_nonce(push_cert.buf, bogs); } - if (!is_null_sha1(push_cert_sha1)) { + if (!is_null_oid(&push_cert_oid)) { argv_array_pushf(&proc->env_array, "GIT_PUSH_CERT=%s", - sha1_to_hex(push_cert_sha1)); + oid_to_hex(&push_cert_oid)); argv_array_pushf(&proc->env_array, "GIT_PUSH_CERT_SIGNER=%s", sigcheck.signer ? sigcheck.signer : ""); argv_array_pushf(&proc->env_array, "GIT_PUSH_CERT_KEY=%s", diff --git a/builtin/repack.c b/builtin/repack.c index f17a68a17d..7bdb40142f 100644 --- a/builtin/repack.c +++ b/builtin/repack.c @@ -83,7 +83,8 @@ static void remove_pack_on_signal(int signo) /* * Adds all packs hex strings to the fname list, which do not - * have a corresponding .keep file. + * have a corresponding .keep or .promisor file. These packs are not to + * be kept if we are going to pack everything into one file. */ static void get_non_kept_pack_filenames(struct string_list *fname_list) { @@ -101,7 +102,8 @@ static void get_non_kept_pack_filenames(struct string_list *fname_list) fname = xmemdupz(e->d_name, len); - if (!file_exists(mkpath("%s/%s.keep", packdir, fname))) + if (!file_exists(mkpath("%s/%s.keep", packdir, fname)) && + !file_exists(mkpath("%s/%s.promisor", packdir, fname))) string_list_append_nodup(fname_list, fname); else free(fname); @@ -232,6 +234,8 @@ int cmd_repack(int argc, const char **argv, const char *prefix) argv_array_push(&cmd.args, "--all"); argv_array_push(&cmd.args, "--reflog"); argv_array_push(&cmd.args, "--indexed-objects"); + if (repository_format_partial_clone) + argv_array_push(&cmd.args, "--exclude-promisor-objects"); if (window) argv_array_pushf(&cmd.args, "--window=%s", window); if (window_memory) diff --git a/builtin/replace.c b/builtin/replace.c index 10078ae371..83d3235721 100644 --- a/builtin/replace.c +++ b/builtin/replace.c @@ -355,7 +355,7 @@ static void check_one_mergetag(struct commit *commit, struct tag *tag; int i; - hash_sha1_file(extra->value, extra->len, typename(OBJ_TAG), tag_oid.hash); + hash_object_file(extra->value, extra->len, typename(OBJ_TAG), &tag_oid); tag = lookup_tag(&tag_oid); if (!tag) die(_("bad mergetag in commit '%s'"), ref); @@ -410,7 +410,7 @@ static int create_graft(int argc, const char **argv, int force) check_mergetags(commit, argc, argv); - if (write_sha1_file(buf.buf, buf.len, commit_type, new.hash)) + if (write_object_file(buf.buf, buf.len, commit_type, &new)) die(_("could not write replacement commit for: '%s'"), old_ref); strbuf_release(&buf); diff --git a/builtin/reset.c b/builtin/reset.c index e15f595799..5da0f75de9 100644 --- a/builtin/reset.c +++ b/builtin/reset.c @@ -106,24 +106,16 @@ out: static void print_new_head_line(struct commit *commit) { - const char *hex, *body; - const char *msg; - - hex = find_unique_abbrev(commit->object.oid.hash, DEFAULT_ABBREV); - printf(_("HEAD is now at %s"), hex); - msg = logmsg_reencode(commit, NULL, get_log_output_encoding()); - body = strstr(msg, "\n\n"); - if (body) { - const char *eol; - size_t len; - body = skip_blank_lines(body + 2); - eol = strchr(body, '\n'); - len = eol ? eol - body : strlen(body); - printf(" %.*s\n", (int) len, body); - } - else - printf("\n"); - unuse_commit_buffer(commit, msg); + struct strbuf buf = STRBUF_INIT; + + printf(_("HEAD is now at %s"), + find_unique_abbrev(commit->object.oid.hash, DEFAULT_ABBREV)); + + pp_commit_easy(CMIT_FMT_ONELINE, commit, &buf); + if (buf.len > 0) + printf(" %s", buf.buf); + putchar('\n'); + strbuf_release(&buf); } static void update_index_from_diff(struct diff_queue_struct *q, diff --git a/builtin/rev-list.c b/builtin/rev-list.c index d5345b6a2e..48300d9e11 100644 --- a/builtin/rev-list.c +++ b/builtin/rev-list.c @@ -15,6 +15,7 @@ #include "progress.h" #include "reflog-walk.h" #include "oidset.h" +#include "packfile.h" static const char rev_list_usage[] = "git rev-list [OPTION] <commit-id>... [ -- paths... ]\n" @@ -67,6 +68,7 @@ enum missing_action { MA_ERROR = 0, /* fail if any missing objects are encountered */ MA_ALLOW_ANY, /* silently allow ALL missing objects */ MA_PRINT, /* print ALL missing objects in special section */ + MA_ALLOW_PROMISOR, /* silently allow all missing PROMISOR objects */ }; static enum missing_action arg_missing_action; @@ -197,6 +199,12 @@ static void finish_commit(struct commit *commit, void *data) static inline void finish_object__ma(struct object *obj) { + /* + * Whether or not we try to dynamically fetch missing objects + * from the server, we currently DO NOT have the object. We + * can either print, allow (ignore), or conditionally allow + * (ignore) them. + */ switch (arg_missing_action) { case MA_ERROR: die("missing blob object '%s'", oid_to_hex(&obj->oid)); @@ -209,25 +217,36 @@ static inline void finish_object__ma(struct object *obj) oidset_insert(&missing_objects, &obj->oid); return; + case MA_ALLOW_PROMISOR: + if (is_promisor_object(&obj->oid)) + return; + die("unexpected missing blob object '%s'", + oid_to_hex(&obj->oid)); + return; + default: BUG("unhandled missing_action"); return; } } -static void finish_object(struct object *obj, const char *name, void *cb_data) +static int finish_object(struct object *obj, const char *name, void *cb_data) { struct rev_list_info *info = cb_data; - if (obj->type == OBJ_BLOB && !has_object_file(&obj->oid)) + if (obj->type == OBJ_BLOB && !has_object_file(&obj->oid)) { finish_object__ma(obj); + return 1; + } if (info->revs->verify_objects && !obj->parsed && obj->type != OBJ_COMMIT) parse_object(&obj->oid); + return 0; } static void show_object(struct object *obj, const char *name, void *cb_data) { struct rev_list_info *info = cb_data; - finish_object(obj, name, cb_data); + if (finish_object(obj, name, cb_data)) + return; display_progress(progress, ++progress_counter); if (info->flags & REV_LIST_QUIET) return; @@ -315,11 +334,19 @@ static inline int parse_missing_action_value(const char *value) if (!strcmp(value, "allow-any")) { arg_missing_action = MA_ALLOW_ANY; + fetch_if_missing = 0; return 1; } if (!strcmp(value, "print")) { arg_missing_action = MA_PRINT; + fetch_if_missing = 0; + return 1; + } + + if (!strcmp(value, "allow-promisor")) { + arg_missing_action = MA_ALLOW_PROMISOR; + fetch_if_missing = 0; return 1; } @@ -344,6 +371,35 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix) init_revisions(&revs, prefix); revs.abbrev = DEFAULT_ABBREV; revs.commit_format = CMIT_FMT_UNSPECIFIED; + + /* + * Scan the argument list before invoking setup_revisions(), so that we + * know if fetch_if_missing needs to be set to 0. + * + * "--exclude-promisor-objects" acts as a pre-filter on missing objects + * by not crossing the boundary from realized objects to promisor + * objects. + * + * Let "--missing" to conditionally set fetch_if_missing. + */ + for (i = 1; i < argc; i++) { + const char *arg = argv[i]; + if (!strcmp(arg, "--exclude-promisor-objects")) { + fetch_if_missing = 0; + revs.exclude_promisor_objects = 1; + break; + } + } + for (i = 1; i < argc; i++) { + const char *arg = argv[i]; + if (skip_prefix(arg, "--missing=", &arg)) { + if (revs.exclude_promisor_objects) + die(_("cannot combine --exclude-promisor-objects and --missing")); + if (parse_missing_action_value(arg)) + break; + } + } + argc = setup_revisions(argc, argv, &revs, NULL); memset(&info, 0, sizeof(info)); @@ -404,7 +460,7 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix) continue; } if (!strcmp(arg, ("--no-" CL_ARG__FILTER))) { - list_objects_filter_release(&filter_options); + list_objects_filter_set_no_filter(&filter_options); continue; } if (!strcmp(arg, "--filter-print-omitted")) { @@ -412,9 +468,10 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix) continue; } - if (skip_prefix(arg, "--missing=", &arg) && - parse_missing_action_value(arg)) - continue; + if (!strcmp(arg, "--exclude-promisor-objects")) + continue; /* already handled above */ + if (skip_prefix(arg, "--missing=", &arg)) + continue; /* already handled above */ usage(rev_list_usage); diff --git a/builtin/rev-parse.c b/builtin/rev-parse.c index 74aa644cbb..96d06a5d01 100644 --- a/builtin/rev-parse.c +++ b/builtin/rev-parse.c @@ -516,7 +516,7 @@ static int cmd_parseopt(int argc, const char **argv, const char *prefix) PARSE_OPT_SHELL_EVAL); strbuf_addstr(&parsed, " --"); - sq_quote_argv(&parsed, argv, 0); + sq_quote_argv(&parsed, argv); puts(parsed.buf); return 0; } @@ -526,7 +526,7 @@ static int cmd_sq_quote(int argc, const char **argv) struct strbuf buf = STRBUF_INIT; if (argc) - sq_quote_argv(&buf, argv, 0); + sq_quote_argv(&buf, argv); printf("%s\n", buf.buf); strbuf_release(&buf); diff --git a/builtin/revert.c b/builtin/revert.c index b9d927eb09..76f0a35b07 100644 --- a/builtin/revert.c +++ b/builtin/revert.c @@ -208,7 +208,7 @@ int cmd_revert(int argc, const char **argv, const char *prefix) if (isatty(0)) opts.edit = 1; opts.action = REPLAY_REVERT; - git_config(git_default_config, NULL); + sequencer_init_config(&opts); res = run_sequencer(argc, argv, &opts); if (res < 0) die(_("revert failed")); @@ -221,7 +221,7 @@ int cmd_cherry_pick(int argc, const char **argv, const char *prefix) int res; opts.action = REPLAY_PICK; - git_config(git_default_config, NULL); + sequencer_init_config(&opts); res = run_sequencer(argc, argv, &opts); if (res < 0) die(_("cherry-pick failed")); diff --git a/builtin/submodule--helper.c b/builtin/submodule--helper.c index a5c4a8a694..b1daca995f 100644 --- a/builtin/submodule--helper.c +++ b/builtin/submodule--helper.c @@ -20,6 +20,7 @@ #define OPT_QUIET (1 << 0) #define OPT_CACHED (1 << 1) #define OPT_RECURSIVE (1 << 2) +#define OPT_FORCE (1 << 3) typedef void (*each_submodule_fn)(const struct cache_entry *list_item, void *cb_data); @@ -50,6 +51,20 @@ static char *get_default_remote(void) return ret; } +static int print_default_remote(int argc, const char **argv, const char *prefix) +{ + const char *remote; + + if (argc != 1) + die(_("submodule--helper print-default-remote takes no arguments")); + + remote = get_default_remote(); + if (remote) + printf("%s\n", remote); + + return 0; +} + static int starts_with_dot_slash(const char *str) { return str[0] == '.' && is_dir_sep(str[1]); @@ -358,6 +373,25 @@ static void module_list_active(struct module_list *list) *list = active_modules; } +static char *get_up_path(const char *path) +{ + int i; + struct strbuf sb = STRBUF_INIT; + + for (i = count_slashes(path); i; i--) + strbuf_addstr(&sb, "../"); + + /* + * Check if 'path' ends with slash or not + * for having the same output for dir/sub_dir + * and dir/sub_dir/ + */ + if (!is_dir_sep(path[strlen(path) - 1])) + strbuf_addstr(&sb, "../"); + + return strbuf_detach(&sb, NULL); +} + static int module_list(int argc, const char **argv, const char *prefix) { int i; @@ -718,6 +752,309 @@ static int module_name(int argc, const char **argv, const char *prefix) return 0; } +struct sync_cb { + const char *prefix; + unsigned int flags; +}; + +#define SYNC_CB_INIT { NULL, 0 } + +static void sync_submodule(const char *path, const char *prefix, + unsigned int flags) +{ + const struct submodule *sub; + char *remote_key = NULL; + char *sub_origin_url, *super_config_url, *displaypath; + struct strbuf sb = STRBUF_INIT; + struct child_process cp = CHILD_PROCESS_INIT; + char *sub_config_path = NULL; + + if (!is_submodule_active(the_repository, path)) + return; + + sub = submodule_from_path(&null_oid, path); + + if (sub && sub->url) { + if (starts_with_dot_dot_slash(sub->url) || + starts_with_dot_slash(sub->url)) { + char *remote_url, *up_path; + char *remote = get_default_remote(); + strbuf_addf(&sb, "remote.%s.url", remote); + + if (git_config_get_string(sb.buf, &remote_url)) + remote_url = xgetcwd(); + + up_path = get_up_path(path); + sub_origin_url = relative_url(remote_url, sub->url, up_path); + super_config_url = relative_url(remote_url, sub->url, NULL); + + free(remote); + free(up_path); + free(remote_url); + } else { + sub_origin_url = xstrdup(sub->url); + super_config_url = xstrdup(sub->url); + } + } else { + sub_origin_url = xstrdup(""); + super_config_url = xstrdup(""); + } + + displaypath = get_submodule_displaypath(path, prefix); + + if (!(flags & OPT_QUIET)) + printf(_("Synchronizing submodule url for '%s'\n"), + displaypath); + + strbuf_reset(&sb); + strbuf_addf(&sb, "submodule.%s.url", sub->name); + if (git_config_set_gently(sb.buf, super_config_url)) + die(_("failed to register url for submodule path '%s'"), + displaypath); + + if (!is_submodule_populated_gently(path, NULL)) + goto cleanup; + + prepare_submodule_repo_env(&cp.env_array); + cp.git_cmd = 1; + cp.dir = path; + argv_array_pushl(&cp.args, "submodule--helper", + "print-default-remote", NULL); + + strbuf_reset(&sb); + if (capture_command(&cp, &sb, 0)) + die(_("failed to get the default remote for submodule '%s'"), + path); + + strbuf_strip_suffix(&sb, "\n"); + remote_key = xstrfmt("remote.%s.url", sb.buf); + + strbuf_reset(&sb); + submodule_to_gitdir(&sb, path); + strbuf_addstr(&sb, "/config"); + + if (git_config_set_in_file_gently(sb.buf, remote_key, sub_origin_url)) + die(_("failed to update remote for submodule '%s'"), + path); + + if (flags & OPT_RECURSIVE) { + struct child_process cpr = CHILD_PROCESS_INIT; + + cpr.git_cmd = 1; + cpr.dir = path; + prepare_submodule_repo_env(&cpr.env_array); + + argv_array_push(&cpr.args, "--super-prefix"); + argv_array_pushf(&cpr.args, "%s/", displaypath); + argv_array_pushl(&cpr.args, "submodule--helper", "sync", + "--recursive", NULL); + + if (flags & OPT_QUIET) + argv_array_push(&cpr.args, "--quiet"); + + if (run_command(&cpr)) + die(_("failed to recurse into submodule '%s'"), + path); + } + +cleanup: + free(super_config_url); + free(sub_origin_url); + strbuf_release(&sb); + free(remote_key); + free(displaypath); + free(sub_config_path); +} + +static void sync_submodule_cb(const struct cache_entry *list_item, void *cb_data) +{ + struct sync_cb *info = cb_data; + sync_submodule(list_item->name, info->prefix, info->flags); + +} + +static int module_sync(int argc, const char **argv, const char *prefix) +{ + struct sync_cb info = SYNC_CB_INIT; + struct pathspec pathspec; + struct module_list list = MODULE_LIST_INIT; + int quiet = 0; + int recursive = 0; + + struct option module_sync_options[] = { + OPT__QUIET(&quiet, N_("Suppress output of synchronizing submodule url")), + OPT_BOOL(0, "recursive", &recursive, + N_("Recurse into nested submodules")), + OPT_END() + }; + + const char *const git_submodule_helper_usage[] = { + N_("git submodule--helper sync [--quiet] [--recursive] [<path>]"), + NULL + }; + + argc = parse_options(argc, argv, prefix, module_sync_options, + git_submodule_helper_usage, 0); + + if (module_list_compute(argc, argv, prefix, &pathspec, &list) < 0) + return 1; + + info.prefix = prefix; + if (quiet) + info.flags |= OPT_QUIET; + if (recursive) + info.flags |= OPT_RECURSIVE; + + for_each_listed_submodule(&list, sync_submodule_cb, &info); + + return 0; +} + +struct deinit_cb { + const char *prefix; + unsigned int flags; +}; +#define DEINIT_CB_INIT { NULL, 0 } + +static void deinit_submodule(const char *path, const char *prefix, + unsigned int flags) +{ + const struct submodule *sub; + char *displaypath = NULL; + struct child_process cp_config = CHILD_PROCESS_INIT; + struct strbuf sb_config = STRBUF_INIT; + char *sub_git_dir = xstrfmt("%s/.git", path); + + sub = submodule_from_path(&null_oid, path); + + if (!sub || !sub->name) + goto cleanup; + + displaypath = get_submodule_displaypath(path, prefix); + + /* remove the submodule work tree (unless the user already did it) */ + if (is_directory(path)) { + struct strbuf sb_rm = STRBUF_INIT; + const char *format; + + /* + * protect submodules containing a .git directory + * NEEDSWORK: instead of dying, automatically call + * absorbgitdirs and (possibly) warn. + */ + if (is_directory(sub_git_dir)) + die(_("Submodule work tree '%s' contains a .git " + "directory (use 'rm -rf' if you really want " + "to remove it including all of its history)"), + displaypath); + + if (!(flags & OPT_FORCE)) { + struct child_process cp_rm = CHILD_PROCESS_INIT; + cp_rm.git_cmd = 1; + argv_array_pushl(&cp_rm.args, "rm", "-qn", + path, NULL); + + if (run_command(&cp_rm)) + die(_("Submodule work tree '%s' contains local " + "modifications; use '-f' to discard them"), + displaypath); + } + + strbuf_addstr(&sb_rm, path); + + if (!remove_dir_recursively(&sb_rm, 0)) + format = _("Cleared directory '%s'\n"); + else + format = _("Could not remove submodule work tree '%s'\n"); + + if (!(flags & OPT_QUIET)) + printf(format, displaypath); + + strbuf_release(&sb_rm); + } + + if (mkdir(path, 0777)) + printf(_("could not create empty submodule directory %s"), + displaypath); + + cp_config.git_cmd = 1; + argv_array_pushl(&cp_config.args, "config", "--get-regexp", NULL); + argv_array_pushf(&cp_config.args, "submodule.%s\\.", sub->name); + + /* remove the .git/config entries (unless the user already did it) */ + if (!capture_command(&cp_config, &sb_config, 0) && sb_config.len) { + char *sub_key = xstrfmt("submodule.%s", sub->name); + /* + * remove the whole section so we have a clean state when + * the user later decides to init this submodule again + */ + git_config_rename_section_in_file(NULL, sub_key, NULL); + if (!(flags & OPT_QUIET)) + printf(_("Submodule '%s' (%s) unregistered for path '%s'\n"), + sub->name, sub->url, displaypath); + free(sub_key); + } + +cleanup: + free(displaypath); + free(sub_git_dir); + strbuf_release(&sb_config); +} + +static void deinit_submodule_cb(const struct cache_entry *list_item, + void *cb_data) +{ + struct deinit_cb *info = cb_data; + deinit_submodule(list_item->name, info->prefix, info->flags); +} + +static int module_deinit(int argc, const char **argv, const char *prefix) +{ + struct deinit_cb info = DEINIT_CB_INIT; + struct pathspec pathspec; + struct module_list list = MODULE_LIST_INIT; + int quiet = 0; + int force = 0; + int all = 0; + + struct option module_deinit_options[] = { + OPT__QUIET(&quiet, N_("Suppress submodule status output")), + OPT__FORCE(&force, N_("Remove submodule working trees even if they contain local changes")), + OPT_BOOL(0, "all", &all, N_("Unregister all submodules")), + OPT_END() + }; + + const char *const git_submodule_helper_usage[] = { + N_("git submodule deinit [--quiet] [-f | --force] [--all | [--] [<path>...]]"), + NULL + }; + + argc = parse_options(argc, argv, prefix, module_deinit_options, + git_submodule_helper_usage, 0); + + if (all && argc) { + error("pathspec and --all are incompatible"); + usage_with_options(git_submodule_helper_usage, + module_deinit_options); + } + + if (!argc && !all) + die(_("Use '--all' if you really want to deinitialize all submodules")); + + if (module_list_compute(argc, argv, prefix, &pathspec, &list) < 0) + BUG("module_list_compute should not choke on empty pathspec"); + + info.prefix = prefix; + if (quiet) + info.flags |= OPT_QUIET; + if (force) + info.flags |= OPT_FORCE; + + for_each_listed_submodule(&list, deinit_submodule_cb, &info); + + return 0; +} + static int clone_submodule(const char *path, const char *gitdir, const char *url, const char *depth, struct string_list *reference, int quiet, int progress) @@ -1498,6 +1835,9 @@ static struct cmd_struct commands[] = { {"resolve-relative-url-test", resolve_relative_url_test, 0}, {"init", module_init, SUPPORT_SUPER_PREFIX}, {"status", module_status, SUPPORT_SUPER_PREFIX}, + {"print-default-remote", print_default_remote, 0}, + {"sync", module_sync, SUPPORT_SUPER_PREFIX}, + {"deinit", module_deinit, 0}, {"remote-branch", resolve_remote_submodule_branch, 0}, {"push-check", push_check, 0}, {"absorb-git-dirs", absorb_git_dirs, SUPPORT_SUPER_PREFIX}, diff --git a/builtin/tag.c b/builtin/tag.c index a7e6a5b0f2..26f8d51365 100644 --- a/builtin/tag.c +++ b/builtin/tag.c @@ -187,13 +187,14 @@ static int build_tag_object(struct strbuf *buf, int sign, struct object_id *resu { if (sign && do_sign(buf) < 0) return error(_("unable to sign the tag")); - if (write_sha1_file(buf->buf, buf->len, tag_type, result->hash) < 0) + if (write_object_file(buf->buf, buf->len, tag_type, result) < 0) return error(_("unable to write tag file")); return 0; } struct create_tag_options { unsigned int message_given:1; + unsigned int use_editor:1; unsigned int sign; enum { CLEANUP_NONE, @@ -224,7 +225,7 @@ static void create_tag(const struct object_id *object, const char *tag, tag, git_committer_info(IDENT_STRICT)); - if (!opt->message_given) { + if (!opt->message_given || opt->use_editor) { int fd; /* write the template message before editing: */ @@ -233,7 +234,10 @@ static void create_tag(const struct object_id *object, const char *tag, if (fd < 0) die_errno(_("could not create file '%s'"), path); - if (!is_null_oid(prev)) { + if (opt->message_given) { + write_or_die(fd, buf->buf, buf->len); + strbuf_reset(buf); + } else if (!is_null_oid(prev)) { write_tag_body(fd, prev); } else { struct strbuf buf = STRBUF_INIT; @@ -372,6 +376,7 @@ int cmd_tag(int argc, const char **argv, const char *prefix) static struct ref_sorting *sorting = NULL, **sorting_tail = &sorting; struct ref_format format = REF_FORMAT_INIT; int icase = 0; + int edit_flag = 0; struct option options[] = { OPT_CMDMODE('l', "list", &cmdmode, N_("list tag names"), 'l'), { OPTION_INTEGER, 'n', NULL, &filter.lines, N_("n"), @@ -386,6 +391,7 @@ int cmd_tag(int argc, const char **argv, const char *prefix) OPT_CALLBACK('m', "message", &msg, N_("message"), N_("tag message"), parse_msg_arg), OPT_FILENAME('F', "file", &msgfile, N_("read message from file")), + OPT_BOOL('e', "edit", &edit_flag, N_("force edit of tag message")), OPT_BOOL('s', "sign", &opt.sign, N_("annotated and GPG-signed tag")), OPT_STRING(0, "cleanup", &cleanup_arg, N_("mode"), N_("how to strip spaces and #comments from message")), @@ -524,6 +530,7 @@ int cmd_tag(int argc, const char **argv, const char *prefix) die(_("tag '%s' already exists"), tag); opt.message_given = msg.given || msgfile; + opt.use_editor = edit_flag; if (!cleanup_arg || !strcmp(cleanup_arg, "strip")) opt.cleanup_mode = CLEANUP_ALL; diff --git a/builtin/unpack-objects.c b/builtin/unpack-objects.c index 62ea264c46..7235d2ffbf 100644 --- a/builtin/unpack-objects.c +++ b/builtin/unpack-objects.c @@ -21,7 +21,7 @@ static unsigned char buffer[4096]; static unsigned int offset, len; static off_t consumed_bytes; static off_t max_input_size; -static git_SHA_CTX ctx; +static git_hash_ctx ctx; static struct fsck_options fsck_options = FSCK_OPTIONS_STRICT; /* @@ -62,7 +62,7 @@ static void *fill(int min) if (min > sizeof(buffer)) die("cannot fill %d bytes", min); if (offset) { - git_SHA1_Update(&ctx, buffer, offset); + the_hash_algo->update_fn(&ctx, buffer, offset); memmove(buffer, buffer + offset, len); offset = 0; } @@ -172,7 +172,8 @@ static void write_cached_object(struct object *obj, struct obj_buffer *obj_buf) { struct object_id oid; - if (write_sha1_file(obj_buf->buffer, obj_buf->size, typename(obj->type), oid.hash) < 0) + if (write_object_file(obj_buf->buffer, obj_buf->size, + typename(obj->type), &oid) < 0) die("failed to write object %s", oid_to_hex(&obj->oid)); obj->flags |= FLAG_WRITTEN; } @@ -237,14 +238,16 @@ static void write_object(unsigned nr, enum object_type type, void *buf, unsigned long size) { if (!strict) { - if (write_sha1_file(buf, size, typename(type), obj_list[nr].oid.hash) < 0) + if (write_object_file(buf, size, typename(type), + &obj_list[nr].oid) < 0) die("failed to write object"); added_object(nr, type, buf, size); free(buf); obj_list[nr].obj = NULL; } else if (type == OBJ_BLOB) { struct blob *blob; - if (write_sha1_file(buf, size, typename(type), obj_list[nr].oid.hash) < 0) + if (write_object_file(buf, size, typename(type), + &obj_list[nr].oid) < 0) die("failed to write object"); added_object(nr, type, buf, size); free(buf); @@ -258,7 +261,7 @@ static void write_object(unsigned nr, enum object_type type, } else { struct object *obj; int eaten; - hash_sha1_file(buf, size, typename(type), obj_list[nr].oid.hash); + hash_object_file(buf, size, typename(type), &obj_list[nr].oid); added_object(nr, type, buf, size); obj = parse_object_buffer(&obj_list[nr].oid, type, size, buf, &eaten); @@ -345,8 +348,8 @@ static void unpack_delta_entry(enum object_type type, unsigned long delta_size, struct object_id base_oid; if (type == OBJ_REF_DELTA) { - hashcpy(base_oid.hash, fill(GIT_SHA1_RAWSZ)); - use(GIT_SHA1_RAWSZ); + hashcpy(base_oid.hash, fill(the_hash_algo->rawsz)); + use(the_hash_algo->rawsz); delta_data = get_data(delta_size); if (dry_run || !delta_data) { free(delta_data); @@ -564,15 +567,15 @@ int cmd_unpack_objects(int argc, const char **argv, const char *prefix) /* We don't take any non-flag arguments now.. Maybe some day */ usage(unpack_usage); } - git_SHA1_Init(&ctx); + the_hash_algo->init_fn(&ctx); unpack_all(); - git_SHA1_Update(&ctx, buffer, offset); - git_SHA1_Final(oid.hash, &ctx); + the_hash_algo->update_fn(&ctx, buffer, offset); + the_hash_algo->final_fn(oid.hash, &ctx); if (strict) write_rest(); - if (hashcmp(fill(GIT_SHA1_RAWSZ), oid.hash)) + if (hashcmp(fill(the_hash_algo->rawsz), oid.hash)) die("final sha1 did not match"); - use(GIT_SHA1_RAWSZ); + use(the_hash_algo->rawsz); /* Write the last part of the buffer to stdout */ while (len) { diff --git a/builtin/worktree.c b/builtin/worktree.c index 7cef5b120b..4e7c98758f 100644 --- a/builtin/worktree.c +++ b/builtin/worktree.c @@ -14,7 +14,7 @@ #include "worktree.h" static const char * const worktree_usage[] = { - N_("git worktree add [<options>] <path> [<branch>]"), + N_("git worktree add [<options>] <path> [<commit-ish>]"), N_("git worktree list [<options>]"), N_("git worktree lock [<options>] <path>"), N_("git worktree prune [<options>]"), @@ -345,9 +345,23 @@ done: * Hook failure does not warrant worktree deletion, so run hook after * is_junk is cleared, but do return appropriate code when hook fails. */ - if (!ret && opts->checkout) - ret = run_hook_le(NULL, "post-checkout", oid_to_hex(&null_oid), - oid_to_hex(&commit->object.oid), "1", NULL); + if (!ret && opts->checkout) { + const char *hook = find_hook("post-checkout"); + if (hook) { + const char *env[] = { "GIT_DIR", "GIT_WORK_TREE", NULL }; + cp.git_cmd = 0; + cp.no_stdin = 1; + cp.stdout_to_stderr = 1; + cp.dir = path; + cp.env = env; + cp.argv = NULL; + argv_array_pushl(&cp.args, absolute_path(hook), + oid_to_hex(&null_oid), + oid_to_hex(&commit->object.oid), + "1", NULL); + ret = run_command(&cp); + } + } argv_array_clear(&child_env); strbuf_release(&sb); diff --git a/bulk-checkin.c b/bulk-checkin.c index 3310fd210a..8bcd1c8665 100644 --- a/bulk-checkin.c +++ b/bulk-checkin.c @@ -12,7 +12,7 @@ static struct bulk_checkin_state { unsigned plugged:1; char *pack_tmp_name; - struct sha1file *f; + struct hashfile *f; off_t offset; struct pack_idx_option pack_idx_opts; @@ -35,9 +35,9 @@ static void finish_bulk_checkin(struct bulk_checkin_state *state) unlink(state->pack_tmp_name); goto clear_exit; } else if (state->nr_written == 1) { - sha1close(state->f, oid.hash, CSUM_FSYNC); + hashclose(state->f, oid.hash, CSUM_FSYNC); } else { - int fd = sha1close(state->f, oid.hash, 0); + int fd = hashclose(state->f, oid.hash, 0); fixup_pack_header_footer(fd, oid.hash, state->pack_tmp_name, state->nr_written, oid.hash, state->offset); @@ -93,7 +93,7 @@ static int already_written(struct bulk_checkin_state *state, unsigned char sha1[ * with a new pack. */ static int stream_to_pack(struct bulk_checkin_state *state, - git_SHA_CTX *ctx, off_t *already_hashed_to, + git_hash_ctx *ctx, off_t *already_hashed_to, int fd, size_t size, enum object_type type, const char *path, unsigned flags) { @@ -127,7 +127,7 @@ static int stream_to_pack(struct bulk_checkin_state *state, if (rsize < hsize) hsize = rsize; if (hsize) - git_SHA1_Update(ctx, ibuf, hsize); + the_hash_algo->update_fn(ctx, ibuf, hsize); *already_hashed_to = offset; } s.next_in = ibuf; @@ -149,7 +149,7 @@ static int stream_to_pack(struct bulk_checkin_state *state, return -1; } - sha1write(state->f, obuf, written); + hashwrite(state->f, obuf, written); state->offset += written; } s.next_out = obuf; @@ -192,10 +192,10 @@ static int deflate_to_pack(struct bulk_checkin_state *state, unsigned flags) { off_t seekback, already_hashed_to; - git_SHA_CTX ctx; + git_hash_ctx ctx; unsigned char obuf[16384]; unsigned header_len; - struct sha1file_checkpoint checkpoint; + struct hashfile_checkpoint checkpoint; struct pack_idx_entry *idx = NULL; seekback = lseek(fd, 0, SEEK_CUR); @@ -204,8 +204,8 @@ static int deflate_to_pack(struct bulk_checkin_state *state, header_len = xsnprintf((char *)obuf, sizeof(obuf), "%s %" PRIuMAX, typename(type), (uintmax_t)size) + 1; - git_SHA1_Init(&ctx); - git_SHA1_Update(&ctx, obuf, header_len); + the_hash_algo->init_fn(&ctx); + the_hash_algo->update_fn(&ctx, obuf, header_len); /* Note: idx is non-NULL when we are writing */ if ((flags & HASH_WRITE_OBJECT) != 0) @@ -216,7 +216,7 @@ static int deflate_to_pack(struct bulk_checkin_state *state, while (1) { prepare_to_stream(state, flags); if (idx) { - sha1file_checkpoint(state->f, &checkpoint); + hashfile_checkpoint(state->f, &checkpoint); idx->offset = state->offset; crc32_begin(state->f); } @@ -230,19 +230,19 @@ static int deflate_to_pack(struct bulk_checkin_state *state, */ if (!idx) die("BUG: should not happen"); - sha1file_truncate(state->f, &checkpoint); + hashfile_truncate(state->f, &checkpoint); state->offset = checkpoint.offset; finish_bulk_checkin(state); if (lseek(fd, seekback, SEEK_SET) == (off_t) -1) return error("cannot seek back"); } - git_SHA1_Final(result_sha1, &ctx); + the_hash_algo->final_fn(result_sha1, &ctx); if (!idx) return 0; idx->crc32 = crc32_end(state->f); if (already_written(state, result_sha1)) { - sha1file_truncate(state->f, &checkpoint); + hashfile_truncate(state->f, &checkpoint); state->offset = checkpoint.offset; free(idx); } else { diff --git a/cache-tree.c b/cache-tree.c index e03e72c34a..c52e4303df 100644 --- a/cache-tree.c +++ b/cache-tree.c @@ -84,9 +84,8 @@ static struct cache_tree_sub *find_subtree(struct cache_tree *it, down->namelen = pathlen; if (pos < it->subtree_nr) - memmove(it->down + pos + 1, - it->down + pos, - sizeof(down) * (it->subtree_nr - pos - 1)); + MOVE_ARRAY(it->down + pos + 1, it->down + pos, + it->subtree_nr - pos - 1); it->down[pos] = down; return down; } @@ -400,16 +399,16 @@ static int update_one(struct cache_tree *it, } if (repair) { - unsigned char sha1[20]; - hash_sha1_file(buffer.buf, buffer.len, tree_type, sha1); - if (has_sha1_file(sha1)) - hashcpy(it->oid.hash, sha1); + struct object_id oid; + hash_object_file(buffer.buf, buffer.len, tree_type, &oid); + if (has_sha1_file(oid.hash)) + oidcpy(&it->oid, &oid); else to_invalidate = 1; - } else if (dryrun) - hash_sha1_file(buffer.buf, buffer.len, tree_type, - it->oid.hash); - else if (write_sha1_file(buffer.buf, buffer.len, tree_type, it->oid.hash)) { + } else if (dryrun) { + hash_object_file(buffer.buf, buffer.len, tree_type, &it->oid); + } else if (write_object_file(buffer.buf, buffer.len, tree_type, + &it->oid)) { strbuf_release(&buffer); return -1; } @@ -608,7 +607,7 @@ int write_index_as_tree(unsigned char *sha1, struct index_state *index_state, co hold_lock_file_for_update(&lock_file, index_path, LOCK_DIE_ON_ERROR); - entries = read_index_from(index_state, index_path); + entries = read_index_from(index_state, index_path, get_git_dir()); if (entries < 0) { ret = WRITE_TREE_UNREADABLE_INDEX; goto out; @@ -4,7 +4,7 @@ #include "git-compat-util.h" #include "strbuf.h" #include "hashmap.h" -#include "mru.h" +#include "list.h" #include "advice.h" #include "gettext.h" #include "convert.h" @@ -16,31 +16,6 @@ #include "sha1-array.h" #include "repository.h" -#ifndef platform_SHA_CTX -/* - * platform's underlying implementation of SHA-1; could be OpenSSL, - * blk_SHA, Apple CommonCrypto, etc... Note that including - * SHA1_HEADER may have already defined platform_SHA_CTX for our - * own implementations like block-sha1 and ppc-sha1, so we list - * the default for OpenSSL compatible SHA-1 implementations here. - */ -#define platform_SHA_CTX SHA_CTX -#define platform_SHA1_Init SHA1_Init -#define platform_SHA1_Update SHA1_Update -#define platform_SHA1_Final SHA1_Final -#endif - -#define git_SHA_CTX platform_SHA_CTX -#define git_SHA1_Init platform_SHA1_Init -#define git_SHA1_Update platform_SHA1_Update -#define git_SHA1_Final platform_SHA1_Final - -#ifdef SHA1_MAX_BLOCK_SIZE -#include "compat/sha1-chunked.h" -#undef git_SHA1_Update -#define git_SHA1_Update git_SHA1_Update_Chunked -#endif - #include <zlib.h> typedef struct git_zstream { z_stream z; @@ -345,7 +320,8 @@ struct index_state { struct split_index *split_index; struct cache_time timestamp; unsigned name_hash_initialized : 1, - initialized : 1; + initialized : 1, + drop_cache_tree : 1; struct hashmap name_hash; struct hashmap dir_hash; unsigned char sha1[20]; @@ -371,7 +347,7 @@ extern void free_name_hash(struct index_state *istate); #define active_cache_tree (the_index.cache_tree) #define read_cache() read_index(&the_index) -#define read_cache_from(path) read_index_from(&the_index, (path)) +#define read_cache_from(path) read_index_from(&the_index, (path), (get_git_dir())) #define read_cache_preload(pathspec) read_index_preload(&the_index, (pathspec)) #define is_cache_unborn() is_index_unborn(&the_index) #define read_cache_unmerged() read_index_unmerged(&the_index) @@ -616,7 +592,8 @@ extern int read_index(struct index_state *); extern int read_index_preload(struct index_state *, const struct pathspec *pathspec); extern int do_read_index(struct index_state *istate, const char *path, int must_exist); /* for testting only! */ -extern int read_index_from(struct index_state *, const char *path); +extern int read_index_from(struct index_state *, const char *path, + const char *gitdir); extern int is_index_unborn(struct index_state *); extern int read_index_unmerged(struct index_state *); @@ -914,10 +891,13 @@ extern int grafts_replace_parents; #define GIT_REPO_VERSION 0 #define GIT_REPO_VERSION_READ 1 extern int repository_format_precious_objects; +extern char *repository_format_partial_clone; +extern const char *core_partial_clone_filter_default; struct repository_format { int version; int precious_objects; + char *partial_clone; /* value of extensions.partialclone */ int is_bare; int hash_algo; char *work_tree; @@ -957,12 +937,10 @@ extern void check_repository_format(void); #define TYPE_CHANGED 0x0040 /* - * Return the name of the file in the local object database that would - * be used to store a loose object with the specified sha1. The - * return value is a pointer to a statically allocated buffer that is - * overwritten each time the function is called. + * Put in `buf` the name of the file in the local object database that + * would be used to store a loose object with the specified sha1. */ -extern const char *sha1_file_name(const unsigned char *sha1); +extern void sha1_file_name(struct strbuf *buf, const unsigned char *sha1); /* * Return an abbreviated sha1 unique within this repository's object database. @@ -1029,7 +1007,7 @@ static inline void hashclr(unsigned char *hash) static inline void oidclr(struct object_id *oid) { - hashclr(oid->hash); + memset(oid->hash, 0, GIT_MAX_RAWSZ); } @@ -1047,8 +1025,6 @@ extern const struct object_id empty_tree_oid; "\xe6\x9d\xe2\x9b\xb2\xd1\xd6\x43\x4b\x8b" \ "\x29\xae\x77\x5a\xd8\xc2\xe4\x8c\x53\x91" extern const struct object_id empty_blob_oid; -#define EMPTY_BLOB_SHA1_BIN (empty_blob_oid.hash) - static inline int is_empty_blob_sha1(const unsigned char *sha1) { @@ -1238,11 +1214,22 @@ static inline const unsigned char *lookup_replace_object(const unsigned char *sh /* Read and unpack a sha1 file into memory, write memory to a sha1 file */ extern int sha1_object_info(const unsigned char *, unsigned long *); -extern int hash_sha1_file(const void *buf, unsigned long len, const char *type, unsigned char *sha1); -extern int write_sha1_file(const void *buf, unsigned long len, const char *type, unsigned char *return_sha1); -extern int hash_sha1_file_literally(const void *buf, unsigned long len, const char *type, struct object_id *oid, unsigned flags); -extern int pretend_sha1_file(void *, unsigned long, enum object_type, unsigned char *); -extern int force_object_loose(const unsigned char *sha1, time_t mtime); + +extern int hash_object_file(const void *buf, unsigned long len, + const char *type, struct object_id *oid); + +extern int write_object_file(const void *buf, unsigned long len, + const char *type, struct object_id *oid); + +extern int hash_object_file_literally(const void *buf, unsigned long len, + const char *type, struct object_id *oid, + unsigned flags); + +extern int pretend_object_file(void *, unsigned long, enum object_type, + struct object_id *oid); + +extern int force_object_loose(const struct object_id *oid, time_t mtime); + extern int git_open_cloexec(const char *name, int flags); #define git_open(name) git_open_cloexec(name, O_RDONLY) extern void *map_sha1_file(const unsigned char *sha1, unsigned long *size); @@ -1635,6 +1622,7 @@ struct pack_window { extern struct packed_git { struct packed_git *next; + struct list_head mru; struct pack_window *windows; off_t pack_size; const void *index_data; @@ -1648,7 +1636,8 @@ extern struct packed_git { unsigned pack_local:1, pack_keep:1, freshened:1, - do_not_close:1; + do_not_close:1, + pack_promisor:1; unsigned char sha1[20]; struct revindex_entry *revindex; /* something like ".git/objects/pack/xxxxx.pack" */ @@ -1656,10 +1645,9 @@ extern struct packed_git { } *packed_git; /* - * A most-recently-used ordered version of the packed_git list, which can - * be iterated instead of packed_git (and marked via mru_mark). + * A most-recently-used ordered version of the packed_git list. */ -extern struct mru packed_git_mru; +extern struct list_head packed_git_mru; struct pack_entry { off_t offset; @@ -1787,6 +1775,14 @@ struct object_info { #define OBJECT_INFO_QUICK 8 extern int sha1_object_info_extended(const unsigned char *, struct object_info *, unsigned flags); +/* + * Set this to 0 to prevent sha1_object_info_extended() from fetching missing + * blobs. This has a difference only if extensions.partialClone is set. + * + * Its default value is 1. + */ +extern int fetch_if_missing; + /* Dumb servers support */ extern int update_server_info(int); diff --git a/ci/lib-travisci.sh b/ci/lib-travisci.sh index 07f27c7270..1efee55ef7 100755 --- a/ci/lib-travisci.sh +++ b/ci/lib-travisci.sh @@ -21,8 +21,6 @@ skip_branch_tip_with_tag () { fi } -good_trees_file="$HOME/travis-cache/good-trees" - # Save some info about the current commit's tree, so we can skip the build # job if we encounter the same tree again and can provide a useful info # message. @@ -83,7 +81,10 @@ check_unignored_build_artifacts () # and installing dependencies. set -ex -mkdir -p "$HOME/travis-cache" +cache_dir="$HOME/travis-cache" +good_trees_file="$cache_dir/good-trees" + +mkdir -p "$cache_dir" skip_branch_tip_with_tag skip_good_tree diff --git a/ci/run-linux32-build.sh b/ci/run-linux32-build.sh index c19c50c1c9..2c60d2e70a 100755 --- a/ci/run-linux32-build.sh +++ b/ci/run-linux32-build.sh @@ -3,31 +3,58 @@ # Build and test Git in a 32-bit environment # # Usage: -# run-linux32-build.sh [host-user-id] +# run-linux32-build.sh <host-user-id> # -set -x +set -ex + +if test $# -ne 1 || test -z "$1" +then + echo >&2 "usage: run-linux32-build.sh <host-user-id>" + exit 1 +fi # Update packages to the latest available versions linux32 --32bit i386 sh -c ' apt update >/dev/null && apt install -y build-essential libcurl4-openssl-dev libssl-dev \ libexpat-dev gettext python >/dev/null -' && +' # If this script runs inside a docker container, then all commands are # usually executed as root. Consequently, the host user might not be # able to access the test output files. -# If a host user id is given, then create a user "ci" with the host user -# id to make everything accessible to the host user. -HOST_UID=$1 && -CI_USER=$USER && -test -z $HOST_UID || (CI_USER="ci" && useradd -u $HOST_UID $CI_USER) && +# If a non 0 host user id is given, then create a user "ci" with that +# user id to make everything accessible to the host user. +HOST_UID=$1 +if test $HOST_UID -eq 0 +then + # Just in case someone does want to run the test suite as root. + CI_USER=root +else + CI_USER=ci + if test "$(id -u $CI_USER 2>/dev/null)" = $HOST_UID + then + echo "user '$CI_USER' already exists with the requested ID $HOST_UID" + else + useradd -u $HOST_UID $CI_USER + fi + + # Due to a bug the test suite was run as root in the past, so + # a prove state file created back then is only accessible by + # root. Now that bug is fixed, the test suite is run as a + # regular user, but the prove state file coming from Travis + # CI's cache might still be owned by root. + # Make sure that this user has rights to any cached files, + # including an existing prove state file. + test -n "$cache_dir" && chown -R $HOST_UID:$HOST_UID "$cache_dir" +fi # Build and test linux32 --32bit i386 su -m -l $CI_USER -c ' - cd /usr/src/git && - ln -s /tmp/travis-cache/.prove t/.prove && - make --jobs=2 && - make --quiet test + set -ex + cd /usr/src/git + test -n "$cache_dir" && ln -s "$cache_dir/.prove" t/.prove + make --jobs=2 + make --quiet test ' diff --git a/ci/run-linux32-docker.sh b/ci/run-linux32-docker.sh index 4f191c5bb1..21637903ce 100755 --- a/ci/run-linux32-docker.sh +++ b/ci/run-linux32-docker.sh @@ -9,7 +9,9 @@ docker pull daald/ubuntu32:xenial # Use the following command to debug the docker build locally: # $ docker run -itv "${PWD}:/usr/src/git" --entrypoint /bin/bash daald/ubuntu32:xenial -# root@container:/# /usr/src/git/ci/run-linux32-build.sh +# root@container:/# /usr/src/git/ci/run-linux32-build.sh <host-user-id> + +container_cache_dir=/tmp/travis-cache docker run \ --interactive \ @@ -18,8 +20,9 @@ docker run \ --env GIT_PROVE_OPTS \ --env GIT_TEST_OPTS \ --env GIT_TEST_CLONE_2GB \ + --env cache_dir="$container_cache_dir" \ --volume "${PWD}:/usr/src/git" \ - --volume "${HOME}/travis-cache:/tmp/travis-cache" \ + --volume "$cache_dir:$container_cache_dir" \ daald/ubuntu32:xenial \ /usr/src/git/ci/run-linux32-build.sh $(id -u $USER) diff --git a/ci/run-tests.sh b/ci/run-tests.sh index 22355f0091..73e273fac7 100755 --- a/ci/run-tests.sh +++ b/ci/run-tests.sh @@ -5,8 +5,13 @@ . ${0%/*}/lib-travisci.sh -ln -s $HOME/travis-cache/.prove t/.prove +ln -s "$cache_dir/.prove" t/.prove + make --quiet test +if test "$jobname" = "linux-gcc" +then + GIT_TEST_SPLIT_INDEX=YesPlease make --quiet test +fi check_unignored_build_artifacts diff --git a/combine-diff.c b/combine-diff.c index bc08c4c5b1..18c74dad51 100644 --- a/combine-diff.c +++ b/combine-diff.c @@ -1053,7 +1053,7 @@ static void show_patch_diff(struct combine_diff_path *elem, int num_parent, if (is_file) { struct strbuf buf = STRBUF_INIT; - if (convert_to_git(&the_index, elem->path, result, len, &buf, safe_crlf)) { + if (convert_to_git(&the_index, elem->path, result, len, &buf, global_conv_flags_eol)) { free(result); result = strbuf_detach(&buf, &len); result_size = len; @@ -126,10 +126,8 @@ int register_commit_graft(struct commit_graft *graft, int ignore_dups) ALLOC_GROW(commit_graft, commit_graft_nr + 1, commit_graft_alloc); commit_graft_nr++; if (pos < commit_graft_nr) - memmove(commit_graft + pos + 1, - commit_graft + pos, - (commit_graft_nr - pos - 1) * - sizeof(*commit_graft)); + MOVE_ARRAY(commit_graft + pos + 1, commit_graft + pos, + commit_graft_nr - pos - 1); commit_graft[pos] = graft; return 0; } @@ -1380,9 +1378,8 @@ void free_commit_extra_headers(struct commit_extra_header *extra) } } -int commit_tree(const char *msg, size_t msg_len, - const unsigned char *tree, - struct commit_list *parents, unsigned char *ret, +int commit_tree(const char *msg, size_t msg_len, const struct object_id *tree, + struct commit_list *parents, struct object_id *ret, const char *author, const char *sign_commit) { struct commit_extra_header *extra = NULL, **tail = &extra; @@ -1511,8 +1508,8 @@ N_("Warning: commit message did not conform to UTF-8.\n" "variable i18n.commitencoding to the encoding your project uses.\n"); int commit_tree_extended(const char *msg, size_t msg_len, - const unsigned char *tree, - struct commit_list *parents, unsigned char *ret, + const struct object_id *tree, + struct commit_list *parents, struct object_id *ret, const char *author, const char *sign_commit, struct commit_extra_header *extra) { @@ -1520,7 +1517,7 @@ int commit_tree_extended(const char *msg, size_t msg_len, int encoding_is_utf8; struct strbuf buffer; - assert_sha1_type(tree, OBJ_TREE); + assert_sha1_type(tree->hash, OBJ_TREE); if (memchr(msg, '\0', msg_len)) return error("a NUL byte in commit log message not allowed."); @@ -1529,7 +1526,7 @@ int commit_tree_extended(const char *msg, size_t msg_len, encoding_is_utf8 = is_encoding_utf8(git_commit_encoding); strbuf_init(&buffer, 8192); /* should avoid reallocs for the headers */ - strbuf_addf(&buffer, "tree %s\n", sha1_to_hex(tree)); + strbuf_addf(&buffer, "tree %s\n", oid_to_hex(tree)); /* * NOTE! This ordering means that the same exact tree merged with a @@ -1568,7 +1565,7 @@ int commit_tree_extended(const char *msg, size_t msg_len, goto out; } - result = write_sha1_file(buffer.buf, buffer.len, commit_type, ret); + result = write_object_file(buffer.buf, buffer.len, commit_type, ret); out: strbuf_release(&buffer); return result; @@ -262,14 +262,15 @@ extern void append_merge_tag_headers(struct commit_list *parents, struct commit_extra_header ***tail); extern int commit_tree(const char *msg, size_t msg_len, - const unsigned char *tree, - struct commit_list *parents, unsigned char *ret, + const struct object_id *tree, + struct commit_list *parents, struct object_id *ret, const char *author, const char *sign_commit); extern int commit_tree_extended(const char *msg, size_t msg_len, - const unsigned char *tree, - struct commit_list *parents, unsigned char *ret, - const char *author, const char *sign_commit, + const struct object_id *tree, + struct commit_list *parents, + struct object_id *ret, const char *author, + const char *sign_commit, struct commit_extra_header *); extern struct commit_extra_header *read_commit_extra_headers(struct commit *, const char **); @@ -1149,11 +1149,14 @@ static int git_default_core_config(const char *var, const char *value) } if (!strcmp(var, "core.safecrlf")) { + int eol_rndtrp_die; if (value && !strcasecmp(value, "warn")) { - safe_crlf = SAFE_CRLF_WARN; + global_conv_flags_eol = CONV_EOL_RNDTRP_WARN; return 0; } - safe_crlf = git_config_bool(var, value); + eol_rndtrp_die = git_config_bool(var, value); + global_conv_flags_eol = eol_rndtrp_die ? + CONV_EOL_RNDTRP_DIE : CONV_EOL_RNDTRP_WARN; return 0; } @@ -1251,6 +1254,11 @@ static int git_default_core_config(const char *var, const char *value) return 0; } + if (!strcmp(var, "core.partialclonefilter")) { + return git_config_string(&core_partial_clone_filter_default, + var, value); + } + /* Add other config variables here and to Documentation/config.txt. */ return 0; } diff --git a/config.mak.uname b/config.mak.uname index 685a80d138..6a1d0de0cc 100644 --- a/config.mak.uname +++ b/config.mak.uname @@ -182,7 +182,6 @@ ifeq ($(uname_O),Cygwin) NO_ST_BLOCKS_IN_STRUCT_STAT = YesPlease X = .exe UNRELIABLE_FSTAT = UnfortunatelyYes - SPARSE_FLAGS = -isystem /usr/include/w32api -Wno-one-bit-signed-bitfield OBJECT_CREATION_USES_RENAMES = UnfortunatelyNeedsTo MMAP_PREVENTS_DELETE = UnfortunatelyYes COMPAT_OBJS += compat/cygwin.o diff --git a/connected.c b/connected.c index 4a47f33270..91feb78815 100644 --- a/connected.c +++ b/connected.c @@ -56,6 +56,8 @@ int check_connected(oid_iterate_fn fn, void *cb_data, argv_array_push(&rev_list.args,"rev-list"); argv_array_push(&rev_list.args, "--objects"); argv_array_push(&rev_list.args, "--stdin"); + if (repository_format_partial_clone) + argv_array_push(&rev_list.args, "--exclude-promisor-objects"); argv_array_push(&rev_list.args, "--not"); argv_array_push(&rev_list.args, "--all"); argv_array_push(&rev_list.args, "--quiet"); diff --git a/contrib/coccinelle/strbuf.cocci b/contrib/coccinelle/strbuf.cocci index 1d580e49b0..e34eada1ad 100644 --- a/contrib/coccinelle/strbuf.cocci +++ b/contrib/coccinelle/strbuf.cocci @@ -1,21 +1,6 @@ @ strbuf_addf_with_format_only @ expression E; -constant fmt; -@@ - strbuf_addf(E, -( - fmt -| - _(fmt) -) - ); - -@ script:python @ -fmt << strbuf_addf_with_format_only.fmt; -@@ -cocci.include_match("%" not in fmt) - -@ extends strbuf_addf_with_format_only @ +constant fmt !~ "%"; @@ - strbuf_addf + strbuf_addstr @@ -29,8 +14,9 @@ cocci.include_match("%" not in fmt) @@ expression E1, E2; +format F =~ "s"; @@ -- strbuf_addf(E1, "%s", E2); +- strbuf_addf(E1, "%@F@", E2); + strbuf_addstr(E1, E2); @@ diff --git a/contrib/completion/git-completion.bash b/contrib/completion/git-completion.bash index 8777805c9f..91536d831c 100644 --- a/contrib/completion/git-completion.bash +++ b/contrib/completion/git-completion.bash @@ -594,7 +594,7 @@ __git_is_configured_remote () __git_list_merge_strategies () { - git merge -s help 2>&1 | + LANG=C LC_ALL=C git merge -s help 2>&1 | sed -n -e '/[Aa]vailable strategies are: /,/^$/{ s/\.$// s/.*:// @@ -1077,7 +1077,7 @@ _git_am () { __git_find_repo_path if [ -d "$__git_repo_path"/rebase-apply ]; then - __gitcomp "--skip --continue --resolved --abort --show-current-patch" + __gitcomp "--skip --continue --resolved --abort --quit --show-current-patch" return fi case "$cur" in @@ -1468,7 +1468,7 @@ __git_fetch_recurse_submodules="yes on-demand no" __git_fetch_options=" --quiet --verbose --append --upload-pack --force --keep --depth= --tags --no-tags --all --prune --dry-run --recurse-submodules= - --unshallow --update-shallow + --unshallow --update-shallow --prune-tags " _git_fetch () diff --git a/contrib/examples/git-difftool.perl b/contrib/examples/git-difftool.perl index df59bdfe97..fb0fd0b84b 100755 --- a/contrib/examples/git-difftool.perl +++ b/contrib/examples/git-difftool.perl @@ -13,7 +13,7 @@ use 5.008; use strict; use warnings; -use Error qw(:try); +use Git::Error qw(:try); use File::Basename qw(dirname); use File::Copy; use File::Find; diff --git a/contrib/subtree/git-subtree.txt b/contrib/subtree/git-subtree.txt index 60d76cdddf..352deda69d 100644 --- a/contrib/subtree/git-subtree.txt +++ b/contrib/subtree/git-subtree.txt @@ -28,7 +28,7 @@ as a subdirectory of your application. Subtrees are not to be confused with submodules, which are meant for the same task. Unlike submodules, subtrees do not need any special -constructions (like .gitmodule files or gitlinks) be present in +constructions (like .gitmodules files or gitlinks) be present in your repository, and do not force end-users of your repository to do anything special or to understand how subtrees work. A subtree is just a subdirectory that can be @@ -193,30 +193,30 @@ static enum eol output_eol(enum crlf_action crlf_action) return core_eol; } -static void check_safe_crlf(const char *path, enum crlf_action crlf_action, +static void check_global_conv_flags_eol(const char *path, enum crlf_action crlf_action, struct text_stat *old_stats, struct text_stat *new_stats, - enum safe_crlf checksafe) + int conv_flags) { if (old_stats->crlf && !new_stats->crlf ) { /* * CRLFs would not be restored by checkout */ - if (checksafe == SAFE_CRLF_WARN) + if (conv_flags & CONV_EOL_RNDTRP_DIE) + die(_("CRLF would be replaced by LF in %s."), path); + else if (conv_flags & CONV_EOL_RNDTRP_WARN) warning(_("CRLF will be replaced by LF in %s.\n" "The file will have its original line" " endings in your working directory."), path); - else /* i.e. SAFE_CRLF_FAIL */ - die(_("CRLF would be replaced by LF in %s."), path); } else if (old_stats->lonelf && !new_stats->lonelf ) { /* * CRLFs would be added by checkout */ - if (checksafe == SAFE_CRLF_WARN) + if (conv_flags & CONV_EOL_RNDTRP_DIE) + die(_("LF would be replaced by CRLF in %s"), path); + else if (conv_flags & CONV_EOL_RNDTRP_WARN) warning(_("LF will be replaced by CRLF in %s.\n" "The file will have its original line" " endings in your working directory."), path); - else /* i.e. SAFE_CRLF_FAIL */ - die(_("LF would be replaced by CRLF in %s"), path); } } @@ -268,7 +268,7 @@ static int will_convert_lf_to_crlf(size_t len, struct text_stat *stats, static int crlf_to_git(const struct index_state *istate, const char *path, const char *src, size_t len, struct strbuf *buf, - enum crlf_action crlf_action, enum safe_crlf checksafe) + enum crlf_action crlf_action, int conv_flags) { struct text_stat stats; char *dst; @@ -298,12 +298,12 @@ static int crlf_to_git(const struct index_state *istate, * unless we want to renormalize in a merge or * cherry-pick. */ - if ((checksafe != SAFE_CRLF_RENORMALIZE) && + if ((!(conv_flags & CONV_EOL_RENORMALIZE)) && has_crlf_in_index(istate, path)) convert_crlf_into_lf = 0; } - if ((checksafe == SAFE_CRLF_WARN || - (checksafe == SAFE_CRLF_FAIL)) && len) { + if (((conv_flags & CONV_EOL_RNDTRP_WARN) || + ((conv_flags & CONV_EOL_RNDTRP_DIE) && len))) { struct text_stat new_stats; memcpy(&new_stats, &stats, sizeof(new_stats)); /* simulate "git add" */ @@ -316,7 +316,7 @@ static int crlf_to_git(const struct index_state *istate, new_stats.crlf += new_stats.lonelf; new_stats.lonelf = 0; } - check_safe_crlf(path, crlf_action, &stats, &new_stats, checksafe); + check_global_conv_flags_eol(path, crlf_action, &stats, &new_stats, conv_flags); } if (!convert_crlf_into_lf) return 0; @@ -898,7 +898,7 @@ static int ident_to_git(const char *path, const char *src, size_t len, static int ident_to_worktree(const char *path, const char *src, size_t len, struct strbuf *buf, int ident) { - unsigned char sha1[20]; + struct object_id oid; char *to_free = NULL, *dollar, *spc; int cnt; @@ -912,7 +912,7 @@ static int ident_to_worktree(const char *path, const char *src, size_t len, /* are we "faking" in place editing ? */ if (src == buf->buf) to_free = strbuf_detach(buf, NULL); - hash_sha1_file(src, len, "blob", sha1); + hash_object_file(src, len, "blob", &oid); strbuf_grow(buf, len + cnt * 43); for (;;) { @@ -969,7 +969,7 @@ static int ident_to_worktree(const char *path, const char *src, size_t len, /* step 4: substitute */ strbuf_addstr(buf, "Id: "); - strbuf_add(buf, sha1_to_hex(sha1), 40); + strbuf_addstr(buf, oid_to_hex(&oid)); strbuf_addstr(buf, " $"); } strbuf_add(buf, src, len); @@ -1129,7 +1129,7 @@ const char *get_convert_attr_ascii(const char *path) int convert_to_git(const struct index_state *istate, const char *path, const char *src, size_t len, - struct strbuf *dst, enum safe_crlf checksafe) + struct strbuf *dst, int conv_flags) { int ret = 0; struct conv_attrs ca; @@ -1144,8 +1144,8 @@ int convert_to_git(const struct index_state *istate, src = dst->buf; len = dst->len; } - if (checksafe != SAFE_CRLF_KEEP_CRLF) { - ret |= crlf_to_git(istate, path, src, len, dst, ca.crlf_action, checksafe); + if (!(conv_flags & CONV_EOL_KEEP_CRLF)) { + ret |= crlf_to_git(istate, path, src, len, dst, ca.crlf_action, conv_flags); if (ret && dst) { src = dst->buf; len = dst->len; @@ -1156,7 +1156,7 @@ int convert_to_git(const struct index_state *istate, void convert_to_git_filter_fd(const struct index_state *istate, const char *path, int fd, struct strbuf *dst, - enum safe_crlf checksafe) + int conv_flags) { struct conv_attrs ca; convert_attrs(&ca, path); @@ -1167,7 +1167,7 @@ void convert_to_git_filter_fd(const struct index_state *istate, if (!apply_filter(path, NULL, 0, fd, dst, ca.drv, CAP_CLEAN, NULL)) die("%s: clean filter '%s' failed", path, ca.drv->name); - crlf_to_git(istate, path, dst->buf, dst->len, dst, ca.crlf_action, checksafe); + crlf_to_git(istate, path, dst->buf, dst->len, dst, ca.crlf_action, conv_flags); ident_to_git(path, dst->buf, dst->len, dst, ca.ident); } @@ -1226,7 +1226,7 @@ int renormalize_buffer(const struct index_state *istate, const char *path, src = dst->buf; len = dst->len; } - return ret | convert_to_git(istate, path, src, len, dst, SAFE_CRLF_RENORMALIZE); + return ret | convert_to_git(istate, path, src, len, dst, CONV_EOL_RENORMALIZE); } /***************************************************************** @@ -8,15 +8,12 @@ struct index_state; -enum safe_crlf { - SAFE_CRLF_FALSE = 0, - SAFE_CRLF_FAIL = 1, - SAFE_CRLF_WARN = 2, - SAFE_CRLF_RENORMALIZE = 3, - SAFE_CRLF_KEEP_CRLF = 4 -}; +#define CONV_EOL_RNDTRP_DIE (1<<0) /* Die if CRLF to LF to CRLF is different */ +#define CONV_EOL_RNDTRP_WARN (1<<1) /* Warn if CRLF to LF to CRLF is different */ +#define CONV_EOL_RENORMALIZE (1<<2) /* Convert CRLF to LF */ +#define CONV_EOL_KEEP_CRLF (1<<3) /* Keep CRLF line endings as is */ -extern enum safe_crlf safe_crlf; +extern int global_conv_flags_eol; enum auto_crlf { AUTO_CRLF_FALSE = 0, @@ -66,7 +63,7 @@ extern const char *get_convert_attr_ascii(const char *path); /* returns 1 if *dst was used */ extern int convert_to_git(const struct index_state *istate, const char *path, const char *src, size_t len, - struct strbuf *dst, enum safe_crlf checksafe); + struct strbuf *dst, int conv_flags); extern int convert_to_working_tree(const char *path, const char *src, size_t len, struct strbuf *dst); extern int async_convert_to_working_tree(const char *path, const char *src, @@ -85,7 +82,7 @@ static inline int would_convert_to_git(const struct index_state *istate, extern void convert_to_git_filter_fd(const struct index_state *istate, const char *path, int fd, struct strbuf *dst, - enum safe_crlf checksafe); + int conv_flags); extern int would_convert_to_git_filter_fd(const char *path); /***************************************************************** diff --git a/csum-file.c b/csum-file.c index 2adae04073..5eda7fb6af 100644 --- a/csum-file.c +++ b/csum-file.c @@ -11,7 +11,7 @@ #include "progress.h" #include "csum-file.h" -static void flush(struct sha1file *f, const void *buf, unsigned int count) +static void flush(struct hashfile *f, const void *buf, unsigned int count) { if (0 <= f->check_fd && count) { unsigned char check_buffer[8192]; @@ -42,28 +42,28 @@ static void flush(struct sha1file *f, const void *buf, unsigned int count) } } -void sha1flush(struct sha1file *f) +void hashflush(struct hashfile *f) { unsigned offset = f->offset; if (offset) { - git_SHA1_Update(&f->ctx, f->buffer, offset); + the_hash_algo->update_fn(&f->ctx, f->buffer, offset); flush(f, f->buffer, offset); f->offset = 0; } } -int sha1close(struct sha1file *f, unsigned char *result, unsigned int flags) +int hashclose(struct hashfile *f, unsigned char *result, unsigned int flags) { int fd; - sha1flush(f); - git_SHA1_Final(f->buffer, &f->ctx); + hashflush(f); + the_hash_algo->final_fn(f->buffer, &f->ctx); if (result) hashcpy(result, f->buffer); if (flags & (CSUM_CLOSE | CSUM_FSYNC)) { /* write checksum and close fd */ - flush(f, f->buffer, 20); + flush(f, f->buffer, the_hash_algo->rawsz); if (flags & CSUM_FSYNC) fsync_or_die(f->fd, f->name); if (close(f->fd)) @@ -86,7 +86,7 @@ int sha1close(struct sha1file *f, unsigned char *result, unsigned int flags) return fd; } -void sha1write(struct sha1file *f, const void *buf, unsigned int count) +void hashwrite(struct hashfile *f, const void *buf, unsigned int count) { while (count) { unsigned offset = f->offset; @@ -110,7 +110,7 @@ void sha1write(struct sha1file *f, const void *buf, unsigned int count) buf = (char *) buf + nr; left -= nr; if (!left) { - git_SHA1_Update(&f->ctx, data, offset); + the_hash_algo->update_fn(&f->ctx, data, offset); flush(f, data, offset); offset = 0; } @@ -118,15 +118,15 @@ void sha1write(struct sha1file *f, const void *buf, unsigned int count) } } -struct sha1file *sha1fd(int fd, const char *name) +struct hashfile *hashfd(int fd, const char *name) { - return sha1fd_throughput(fd, name, NULL); + return hashfd_throughput(fd, name, NULL); } -struct sha1file *sha1fd_check(const char *name) +struct hashfile *hashfd_check(const char *name) { int sink, check; - struct sha1file *f; + struct hashfile *f; sink = open("/dev/null", O_WRONLY); if (sink < 0) @@ -134,14 +134,14 @@ struct sha1file *sha1fd_check(const char *name) check = open(name, O_RDONLY); if (check < 0) die_errno("unable to open '%s'", name); - f = sha1fd(sink, name); + f = hashfd(sink, name); f->check_fd = check; return f; } -struct sha1file *sha1fd_throughput(int fd, const char *name, struct progress *tp) +struct hashfile *hashfd_throughput(int fd, const char *name, struct progress *tp) { - struct sha1file *f = xmalloc(sizeof(*f)); + struct hashfile *f = xmalloc(sizeof(*f)); f->fd = fd; f->check_fd = -1; f->offset = 0; @@ -149,18 +149,18 @@ struct sha1file *sha1fd_throughput(int fd, const char *name, struct progress *tp f->tp = tp; f->name = name; f->do_crc = 0; - git_SHA1_Init(&f->ctx); + the_hash_algo->init_fn(&f->ctx); return f; } -void sha1file_checkpoint(struct sha1file *f, struct sha1file_checkpoint *checkpoint) +void hashfile_checkpoint(struct hashfile *f, struct hashfile_checkpoint *checkpoint) { - sha1flush(f); + hashflush(f); checkpoint->offset = f->total; checkpoint->ctx = f->ctx; } -int sha1file_truncate(struct sha1file *f, struct sha1file_checkpoint *checkpoint) +int hashfile_truncate(struct hashfile *f, struct hashfile_checkpoint *checkpoint) { off_t offset = checkpoint->offset; @@ -169,17 +169,17 @@ int sha1file_truncate(struct sha1file *f, struct sha1file_checkpoint *checkpoint return -1; f->total = offset; f->ctx = checkpoint->ctx; - f->offset = 0; /* sha1flush() was called in checkpoint */ + f->offset = 0; /* hashflush() was called in checkpoint */ return 0; } -void crc32_begin(struct sha1file *f) +void crc32_begin(struct hashfile *f) { f->crc32 = crc32(0, NULL, 0); f->do_crc = 1; } -uint32_t crc32_end(struct sha1file *f) +uint32_t crc32_end(struct hashfile *f) { f->do_crc = 0; return f->crc32; diff --git a/csum-file.h b/csum-file.h index 7530927d77..992e5c0141 100644 --- a/csum-file.h +++ b/csum-file.h @@ -4,11 +4,11 @@ struct progress; /* A SHA1-protected file */ -struct sha1file { +struct hashfile { int fd; int check_fd; unsigned int offset; - git_SHA_CTX ctx; + git_hash_ctx ctx; off_t total; struct progress *tp; const char *name; @@ -18,36 +18,36 @@ struct sha1file { }; /* Checkpoint */ -struct sha1file_checkpoint { +struct hashfile_checkpoint { off_t offset; - git_SHA_CTX ctx; + git_hash_ctx ctx; }; -extern void sha1file_checkpoint(struct sha1file *, struct sha1file_checkpoint *); -extern int sha1file_truncate(struct sha1file *, struct sha1file_checkpoint *); +extern void hashfile_checkpoint(struct hashfile *, struct hashfile_checkpoint *); +extern int hashfile_truncate(struct hashfile *, struct hashfile_checkpoint *); -/* sha1close flags */ +/* hashclose flags */ #define CSUM_CLOSE 1 #define CSUM_FSYNC 2 -extern struct sha1file *sha1fd(int fd, const char *name); -extern struct sha1file *sha1fd_check(const char *name); -extern struct sha1file *sha1fd_throughput(int fd, const char *name, struct progress *tp); -extern int sha1close(struct sha1file *, unsigned char *, unsigned int); -extern void sha1write(struct sha1file *, const void *, unsigned int); -extern void sha1flush(struct sha1file *f); -extern void crc32_begin(struct sha1file *); -extern uint32_t crc32_end(struct sha1file *); +extern struct hashfile *hashfd(int fd, const char *name); +extern struct hashfile *hashfd_check(const char *name); +extern struct hashfile *hashfd_throughput(int fd, const char *name, struct progress *tp); +extern int hashclose(struct hashfile *, unsigned char *, unsigned int); +extern void hashwrite(struct hashfile *, const void *, unsigned int); +extern void hashflush(struct hashfile *f); +extern void crc32_begin(struct hashfile *); +extern uint32_t crc32_end(struct hashfile *); -static inline void sha1write_u8(struct sha1file *f, uint8_t data) +static inline void hashwrite_u8(struct hashfile *f, uint8_t data) { - sha1write(f, &data, sizeof(data)); + hashwrite(f, &data, sizeof(data)); } -static inline void sha1write_be32(struct sha1file *f, uint32_t data) +static inline void hashwrite_be32(struct hashfile *f, uint32_t data) { data = htonl(data); - sha1write(f, &data, sizeof(data)); + hashwrite(f, &data, sizeof(data)); } #endif @@ -9,7 +9,12 @@ #define initgroups(x, y) (0) /* nothing */ #endif -static int log_syslog; +static enum log_destination { + LOG_DESTINATION_UNSET = -1, + LOG_DESTINATION_NONE = 0, + LOG_DESTINATION_STDERR = 1, + LOG_DESTINATION_SYSLOG = 2, +} log_destination = LOG_DESTINATION_UNSET; static int verbose; static int reuseaddr; static int informative_errors; @@ -25,6 +30,7 @@ static const char daemon_usage[] = " [--access-hook=<path>]\n" " [--inetd | [--listen=<host_or_ipaddr>] [--port=<n>]\n" " [--detach] [--user=<user> [--group=<group>]]\n" +" [--log-destination=(stderr|syslog|none)]\n" " [<directory>...]"; /* List of acceptable pathname prefixes */ @@ -74,11 +80,14 @@ static const char *get_ip_address(struct hostinfo *hi) static void logreport(int priority, const char *err, va_list params) { - if (log_syslog) { + switch (log_destination) { + case LOG_DESTINATION_SYSLOG: { char buf[1024]; vsnprintf(buf, sizeof(buf), err, params); syslog(priority, "%s", buf); - } else { + break; + } + case LOG_DESTINATION_STDERR: /* * Since stderr is set to buffered mode, the * logging of different processes will not overlap @@ -88,6 +97,11 @@ static void logreport(int priority, const char *err, va_list params) vfprintf(stderr, err, params); fputc('\n', stderr); fflush(stderr); + break; + case LOG_DESTINATION_NONE: + break; + case LOG_DESTINATION_UNSET: + BUG("log destination not initialized correctly"); } } @@ -597,6 +611,7 @@ static char *parse_host_arg(struct hostinfo *hi, char *extra_args, int buflen) if (strncasecmp("host=", extra_args, 5) == 0) { val = extra_args + 5; vallen = strlen(val) + 1; + loginfo("Extended attribute \"host\": %s", val); if (*val) { /* Split <host>:<port> at colon. */ char *host; @@ -647,9 +662,11 @@ static void parse_extra_args(struct hostinfo *hi, struct argv_array *env, } } - if (git_protocol.len > 0) + if (git_protocol.len > 0) { + loginfo("Extended attribute \"protocol\": %s", git_protocol.buf); argv_array_pushf(env, GIT_PROTOCOL_ENVIRONMENT "=%s", git_protocol.buf); + } strbuf_release(&git_protocol); } @@ -757,14 +774,8 @@ static int execute(void) alarm(0); len = strlen(line); - if (pktlen != len) - loginfo("Extended attributes (%d bytes) exist <%.*s>", - (int) pktlen - len, - (int) pktlen - len, line + len + 1); - if (len && line[len-1] == '\n') { - line[--len] = 0; - pktlen--; - } + if (len && line[len-1] == '\n') + line[len-1] = 0; /* parse additional args hidden behind a NUL byte */ if (len != pktlen) @@ -1289,7 +1300,6 @@ int cmd_main(int argc, const char **argv) } if (!strcmp(arg, "--inetd")) { inetd_mode = 1; - log_syslog = 1; continue; } if (!strcmp(arg, "--verbose")) { @@ -1297,9 +1307,22 @@ int cmd_main(int argc, const char **argv) continue; } if (!strcmp(arg, "--syslog")) { - log_syslog = 1; + log_destination = LOG_DESTINATION_SYSLOG; continue; } + if (skip_prefix(arg, "--log-destination=", &v)) { + if (!strcmp(v, "syslog")) { + log_destination = LOG_DESTINATION_SYSLOG; + continue; + } else if (!strcmp(v, "stderr")) { + log_destination = LOG_DESTINATION_STDERR; + continue; + } else if (!strcmp(v, "none")) { + log_destination = LOG_DESTINATION_NONE; + continue; + } else + die("unknown log destination '%s'", v); + } if (!strcmp(arg, "--export-all")) { export_all_trees = 1; continue; @@ -1356,7 +1379,6 @@ int cmd_main(int argc, const char **argv) } if (!strcmp(arg, "--detach")) { detach = 1; - log_syslog = 1; continue; } if (skip_prefix(arg, "--user=", &v)) { @@ -1402,7 +1424,14 @@ int cmd_main(int argc, const char **argv) usage(daemon_usage); } - if (log_syslog) { + if (log_destination == LOG_DESTINATION_UNSET) { + if (inetd_mode || detach) + log_destination = LOG_DESTINATION_SYSLOG; + else + log_destination = LOG_DESTINATION_STDERR; + } + + if (log_destination == LOG_DESTINATION_SYSLOG) { openlog("git-daemon", LOG_PID, LOG_DAEMON); set_die_routine(daemon_die); } else diff --git a/diff-lib.c b/diff-lib.c index 8104603a3b..a228e1a219 100644 --- a/diff-lib.c +++ b/diff-lib.c @@ -92,6 +92,7 @@ int run_diff_files(struct rev_info *revs, unsigned int option) int diff_unmerged_stage = revs->max_count; unsigned ce_option = ((option & DIFF_RACY_IS_MODIFIED) ? CE_MATCH_RACY_IS_DIRTY : 0); + uint64_t start = getnanotime(); diff_set_mnemonic_prefix(&revs->diffopt, "i/", "w/"); @@ -246,6 +247,7 @@ int run_diff_files(struct rev_info *revs, unsigned int option) } diffcore_std(&revs->diffopt); diff_flush(&revs->diffopt); + trace_performance_since(start, "diff-files"); return 0; } @@ -512,6 +514,7 @@ static int diff_cache(struct rev_info *revs, int run_diff_index(struct rev_info *revs, int cached) { struct object_array_entry *ent; + uint64_t start = getnanotime(); ent = revs->pending.objects; if (diff_cache(revs, &ent->item->oid, ent->name, cached)) @@ -521,6 +524,7 @@ int run_diff_index(struct rev_info *revs, int cached) diffcore_fix_diff_index(&revs->diffopt); diffcore_std(&revs->diffopt); diff_flush(&revs->diffopt); + trace_performance_since(start, "diff-index"); return 0; } @@ -3520,13 +3520,13 @@ int diff_populate_filespec(struct diff_filespec *s, unsigned int flags) { int size_only = flags & CHECK_SIZE_ONLY; int err = 0; + int conv_flags = global_conv_flags_eol; /* * demote FAIL to WARN to allow inspecting the situation * instead of refusing. */ - enum safe_crlf crlf_warn = (safe_crlf == SAFE_CRLF_FAIL - ? SAFE_CRLF_WARN - : safe_crlf); + if (conv_flags & CONV_EOL_RNDTRP_DIE) + conv_flags = CONV_EOL_RNDTRP_WARN; if (!DIFF_FILE_VALID(s)) die("internal error: asking to populate invalid file."); @@ -3603,7 +3603,7 @@ int diff_populate_filespec(struct diff_filespec *s, unsigned int flags) /* * Convert from working tree format to canonical git format */ - if (convert_to_git(&the_index, s->path, s->data, s->size, &buf, crlf_warn)) { + if (convert_to_git(&the_index, s->path, s->data, s->size, &buf, conv_flags)) { size_t size = 0; munmap(s->data, s->size); s->should_munmap = 0; @@ -5471,6 +5471,7 @@ N_("you may want to set your %s variable to at least " void diff_warn_rename_limit(const char *varname, int needed, int degraded_cc) { + fflush(stdout); if (degraded_cc) warning(_(degrade_cc_to_c_warning)); else if (needed) diff --git a/diffcore-rename.c b/diffcore-rename.c index 245e999fe5..0b7e4989a8 100644 --- a/diffcore-rename.c +++ b/diffcore-rename.c @@ -57,8 +57,8 @@ static int add_rename_dst(struct diff_filespec *two) ALLOC_GROW(rename_dst, rename_dst_nr + 1, rename_dst_alloc); rename_dst_nr++; if (first < rename_dst_nr) - memmove(rename_dst + first + 1, rename_dst + first, - (rename_dst_nr - first - 1) * sizeof(*rename_dst)); + MOVE_ARRAY(rename_dst + first + 1, rename_dst + first, + rename_dst_nr - first - 1); rename_dst[first].two = alloc_filespec(two->path); fill_filespec(rename_dst[first].two, &two->oid, two->oid_valid, two->mode); @@ -98,8 +98,8 @@ static struct diff_rename_src *register_rename_src(struct diff_filepair *p) ALLOC_GROW(rename_src, rename_src_nr + 1, rename_src_alloc); rename_src_nr++; if (first < rename_src_nr) - memmove(rename_src + first + 1, rename_src + first, - (rename_src_nr - first - 1) * sizeof(*rename_src)); + MOVE_ARRAY(rename_src + first + 1, rename_src + first, + rename_src_nr - first - 1); rename_src[first].p = p; rename_src[first].score = score; return &(rename_src[first]); @@ -260,8 +260,8 @@ static unsigned int hash_filespec(struct diff_filespec *filespec) if (!filespec->oid_valid) { if (diff_populate_filespec(filespec, 0)) return 0; - hash_sha1_file(filespec->data, filespec->size, "blob", - filespec->oid.hash); + hash_object_file(filespec->data, filespec->size, "blob", + &filespec->oid); } return sha1hash(filespec->oid.hash); } @@ -231,12 +231,10 @@ int within_depth(const char *name, int namelen, * 1 along with { data, size } of the (possibly augmented) buffer * when successful. * - * Optionally updates the given sha1_stat with the given OID (when valid). + * Optionally updates the given oid_stat with the given OID (when valid). */ -static int do_read_blob(const struct object_id *oid, - struct sha1_stat *sha1_stat, - size_t *size_out, - char **data_out) +static int do_read_blob(const struct object_id *oid, struct oid_stat *oid_stat, + size_t *size_out, char **data_out) { enum object_type type; unsigned long sz; @@ -251,9 +249,9 @@ static int do_read_blob(const struct object_id *oid, return -1; } - if (sha1_stat) { - memset(&sha1_stat->stat, 0, sizeof(sha1_stat->stat)); - hashcpy(sha1_stat->sha1, oid->hash); + if (oid_stat) { + memset(&oid_stat->stat, 0, sizeof(oid_stat->stat)); + oidcpy(&oid_stat->oid, oid); } if (sz == 0) { @@ -654,9 +652,8 @@ void add_exclude(const char *string, const char *base, static int read_skip_worktree_file_from_index(const struct index_state *istate, const char *path, - size_t *size_out, - char **data_out, - struct sha1_stat *sha1_stat) + size_t *size_out, char **data_out, + struct oid_stat *oid_stat) { int pos, len; @@ -667,7 +664,7 @@ static int read_skip_worktree_file_from_index(const struct index_state *istate, if (!ce_skip_worktree(istate->cache[pos])) return -1; - return do_read_blob(&istate->cache[pos]->oid, sha1_stat, size_out, data_out); + return do_read_blob(&istate->cache[pos]->oid, oid_stat, size_out, data_out); } /* @@ -747,8 +744,8 @@ static struct untracked_cache_dir *lookup_untracked(struct untracked_cache *uc, FLEX_ALLOC_MEM(d, name, name, len); ALLOC_GROW(dir->dirs, dir->dirs_nr + 1, dir->dirs_alloc); - memmove(dir->dirs + first + 1, dir->dirs + first, - (dir->dirs_nr - first) * sizeof(*dir->dirs)); + MOVE_ARRAY(dir->dirs + first + 1, dir->dirs + first, + dir->dirs_nr - first); dir->dirs_nr++; dir->dirs[first] = d; return d; @@ -774,7 +771,16 @@ static void invalidate_directory(struct untracked_cache *uc, struct untracked_cache_dir *dir) { int i; - uc->dir_invalidated++; + + /* + * Invalidation increment here is just roughly correct. If + * untracked_nr or any of dirs[].recurse is non-zero, we + * should increment dir_invalidated too. But that's more + * expensive to do. + */ + if (dir->valid) + uc->dir_invalidated++; + dir->valid = 0; dir->untracked_nr = 0; for (i = 0; i < dir->dirs_nr; i++) @@ -795,9 +801,8 @@ static int add_excludes_from_buffer(char *buf, size_t size, * ss_valid is non-zero, "ss" must contain good value as input. */ static int add_excludes(const char *fname, const char *base, int baselen, - struct exclude_list *el, - struct index_state *istate, - struct sha1_stat *sha1_stat) + struct exclude_list *el, struct index_state *istate, + struct oid_stat *oid_stat) { struct stat st; int r; @@ -815,16 +820,16 @@ static int add_excludes(const char *fname, const char *base, int baselen, return -1; r = read_skip_worktree_file_from_index(istate, fname, &size, &buf, - sha1_stat); + oid_stat); if (r != 1) return r; } else { size = xsize_t(st.st_size); if (size == 0) { - if (sha1_stat) { - fill_stat_data(&sha1_stat->stat, &st); - hashcpy(sha1_stat->sha1, EMPTY_BLOB_SHA1_BIN); - sha1_stat->valid = 1; + if (oid_stat) { + fill_stat_data(&oid_stat->stat, &st); + oidcpy(&oid_stat->oid, &empty_blob_oid); + oid_stat->valid = 1; } close(fd); return 0; @@ -837,22 +842,23 @@ static int add_excludes(const char *fname, const char *base, int baselen, } buf[size++] = '\n'; close(fd); - if (sha1_stat) { + if (oid_stat) { int pos; - if (sha1_stat->valid && - !match_stat_data_racy(istate, &sha1_stat->stat, &st)) + if (oid_stat->valid && + !match_stat_data_racy(istate, &oid_stat->stat, &st)) ; /* no content change, ss->sha1 still good */ else if (istate && (pos = index_name_pos(istate, fname, strlen(fname))) >= 0 && !ce_stage(istate->cache[pos]) && ce_uptodate(istate->cache[pos]) && !would_convert_to_git(istate, fname)) - hashcpy(sha1_stat->sha1, - istate->cache[pos]->oid.hash); + oidcpy(&oid_stat->oid, + &istate->cache[pos]->oid); else - hash_sha1_file(buf, size, "blob", sha1_stat->sha1); - fill_stat_data(&sha1_stat->stat, &st); - sha1_stat->valid = 1; + hash_object_file(buf, size, "blob", + &oid_stat->oid); + fill_stat_data(&oid_stat->stat, &st); + oid_stat->valid = 1; } } @@ -930,7 +936,7 @@ struct exclude_list *add_exclude_list(struct dir_struct *dir, * Used to set up core.excludesfile and .git/info/exclude lists. */ static void add_excludes_from_file_1(struct dir_struct *dir, const char *fname, - struct sha1_stat *sha1_stat) + struct oid_stat *oid_stat) { struct exclude_list *el; /* @@ -941,7 +947,7 @@ static void add_excludes_from_file_1(struct dir_struct *dir, const char *fname, if (!dir->untracked) dir->unmanaged_exclude_files++; el = add_exclude_list(dir, EXC_FILE, fname); - if (add_excludes(fname, "", 0, el, NULL, sha1_stat) < 0) + if (add_excludes(fname, "", 0, el, NULL, oid_stat) < 0) die("cannot use %s as an exclude file", fname); } @@ -1180,7 +1186,7 @@ static void prep_exclude(struct dir_struct *dir, while (current < baselen) { const char *cp; - struct sha1_stat sha1_stat; + struct oid_stat oid_stat; stk = xcalloc(1, sizeof(*stk)); if (current < 0) { @@ -1223,8 +1229,8 @@ static void prep_exclude(struct dir_struct *dir, } /* Try to read per-directory file */ - hashclr(sha1_stat.sha1); - sha1_stat.valid = 0; + oidclr(&oid_stat.oid); + oid_stat.valid = 0; if (dir->exclude_per_dir && /* * If we know that no files have been added in @@ -1252,7 +1258,7 @@ static void prep_exclude(struct dir_struct *dir, strbuf_addstr(&sb, dir->exclude_per_dir); el->src = strbuf_detach(&sb, NULL); add_excludes(el->src, el->src, stk->baselen, el, istate, - untracked ? &sha1_stat : NULL); + untracked ? &oid_stat : NULL); } /* * NEEDSWORK: when untracked cache is enabled, prep_exclude() @@ -1269,9 +1275,9 @@ static void prep_exclude(struct dir_struct *dir, * order, though, if you do that. */ if (untracked && - hashcmp(sha1_stat.sha1, untracked->exclude_sha1)) { + hashcmp(oid_stat.oid.hash, untracked->exclude_sha1)) { invalidate_gitignore(dir->untracked, untracked); - hashcpy(untracked->exclude_sha1, sha1_stat.sha1); + hashcpy(untracked->exclude_sha1, oid_stat.oid.hash); } dir->exclude_stack = stk; current = stk->baselen; @@ -1773,7 +1779,7 @@ static enum path_treatment treat_path(struct dir_struct *dir, if (!de) return treat_path_fast(dir, untracked, cdir, istate, path, baselen, pathspec); - if (is_dot_or_dotdot(de->d_name) || !strcmp(de->d_name, ".git")) + if (is_dot_or_dotdot(de->d_name) || !fspathcmp(de->d_name, ".git")) return path_none; strbuf_setlen(path, baselen); strbuf_addstr(path, de->d_name); @@ -1809,24 +1815,19 @@ static int valid_cached_dir(struct dir_struct *dir, */ refresh_fsmonitor(istate); if (!(dir->untracked->use_fsmonitor && untracked->valid)) { - if (stat(path->len ? path->buf : ".", &st)) { - invalidate_directory(dir->untracked, untracked); + if (lstat(path->len ? path->buf : ".", &st)) { memset(&untracked->stat_data, 0, sizeof(untracked->stat_data)); return 0; } if (!untracked->valid || match_stat_data_racy(istate, &untracked->stat_data, &st)) { - if (untracked->valid) - invalidate_directory(dir->untracked, untracked); fill_stat_data(&untracked->stat_data, &st); return 0; } } - if (untracked->check_only != !!check_only) { - invalidate_directory(dir->untracked, untracked); + if (untracked->check_only != !!check_only) return 0; - } /* * prep_exclude will be called eventually on this directory, @@ -1853,13 +1854,20 @@ static int open_cached_dir(struct cached_dir *cdir, struct strbuf *path, int check_only) { + const char *c_path; + memset(cdir, 0, sizeof(*cdir)); cdir->untracked = untracked; if (valid_cached_dir(dir, untracked, istate, path, check_only)) return 0; - cdir->fdir = opendir(path->len ? path->buf : "."); - if (dir->untracked) + c_path = path->len ? path->buf : "."; + cdir->fdir = opendir(c_path); + if (!cdir->fdir) + warning_errno(_("could not open directory '%s'"), c_path); + if (dir->untracked) { + invalidate_directory(dir->untracked, untracked); dir->untracked->dir_opened++; + } if (!cdir->fdir) return -1; return 0; @@ -2228,13 +2236,13 @@ static struct untracked_cache_dir *validate_untracked_cache(struct dir_struct *d /* Validate $GIT_DIR/info/exclude and core.excludesfile */ root = dir->untracked->root; - if (hashcmp(dir->ss_info_exclude.sha1, - dir->untracked->ss_info_exclude.sha1)) { + if (oidcmp(&dir->ss_info_exclude.oid, + &dir->untracked->ss_info_exclude.oid)) { invalidate_gitignore(dir->untracked, root); dir->untracked->ss_info_exclude = dir->ss_info_exclude; } - if (hashcmp(dir->ss_excludes_file.sha1, - dir->untracked->ss_excludes_file.sha1)) { + if (oidcmp(&dir->ss_excludes_file.oid, + &dir->untracked->ss_excludes_file.oid)) { invalidate_gitignore(dir->untracked, root); dir->untracked->ss_excludes_file = dir->ss_excludes_file; } @@ -2248,6 +2256,7 @@ int read_directory(struct dir_struct *dir, struct index_state *istate, const char *path, int len, const struct pathspec *pathspec) { struct untracked_cache_dir *untracked; + uint64_t start = getnanotime(); if (has_symlink_leading_path(path, len)) return dir->nr; @@ -2286,6 +2295,7 @@ int read_directory(struct dir_struct *dir, struct index_state *istate, dir->nr = i; } + trace_performance_since(start, "read directory %.*s", len, path); if (dir->untracked) { static struct trace_key trace_untracked_stats = TRACE_KEY_INIT(UNTRACKED_STATS); trace_printf_key(&trace_untracked_stats, @@ -2638,8 +2648,8 @@ void write_untracked_extension(struct strbuf *out, struct untracked_cache *untra FLEX_ALLOC_MEM(ouc, exclude_per_dir, untracked->exclude_per_dir, len); stat_data_to_disk(&ouc->info_exclude_stat, &untracked->ss_info_exclude.stat); stat_data_to_disk(&ouc->excludes_file_stat, &untracked->ss_excludes_file.stat); - hashcpy(ouc->info_exclude_sha1, untracked->ss_info_exclude.sha1); - hashcpy(ouc->excludes_file_sha1, untracked->ss_excludes_file.sha1); + hashcpy(ouc->info_exclude_sha1, untracked->ss_info_exclude.oid.hash); + hashcpy(ouc->excludes_file_sha1, untracked->ss_excludes_file.oid.hash); ouc->dir_flags = htonl(untracked->dir_flags); varint_len = encode_varint(untracked->ident.len, varbuf); @@ -2816,13 +2826,12 @@ static void read_sha1(size_t pos, void *cb) rd->data += 20; } -static void load_sha1_stat(struct sha1_stat *sha1_stat, - const unsigned char *data, - const unsigned char *sha1) +static void load_oid_stat(struct oid_stat *oid_stat, const unsigned char *data, + const unsigned char *sha1) { - stat_data_from_disk(&sha1_stat->stat, data); - hashcpy(sha1_stat->sha1, sha1); - sha1_stat->valid = 1; + stat_data_from_disk(&oid_stat->stat, data); + hashcpy(oid_stat->oid.hash, sha1); + oid_stat->valid = 1; } struct untracked_cache *read_untracked_extension(const void *data, unsigned long sz) @@ -2850,12 +2859,12 @@ struct untracked_cache *read_untracked_extension(const void *data, unsigned long uc = xcalloc(1, sizeof(*uc)); strbuf_init(&uc->ident, ident_len); strbuf_add(&uc->ident, ident, ident_len); - load_sha1_stat(&uc->ss_info_exclude, - next + ouc_offset(info_exclude_stat), - next + ouc_offset(info_exclude_sha1)); - load_sha1_stat(&uc->ss_excludes_file, - next + ouc_offset(excludes_file_stat), - next + ouc_offset(excludes_file_sha1)); + load_oid_stat(&uc->ss_info_exclude, + next + ouc_offset(info_exclude_stat), + next + ouc_offset(info_exclude_sha1)); + load_oid_stat(&uc->ss_excludes_file, + next + ouc_offset(excludes_file_stat), + next + ouc_offset(excludes_file_sha1)); uc->dir_flags = get_be32(next + ouc_offset(dir_flags)); exclude_per_dir = (const char *)next + ouc_offset(exclude_per_dir); uc->exclude_per_dir = xstrdup(exclude_per_dir); @@ -2968,10 +2977,12 @@ static int invalidate_one_component(struct untracked_cache *uc, } void untracked_cache_invalidate_path(struct index_state *istate, - const char *path) + const char *path, int safe_path) { if (!istate->untracked || !istate->untracked->root) return; + if (!safe_path && !verify_path(path)) + return; invalidate_one_component(istate->untracked, istate->untracked->root, path, strlen(path)); } @@ -2979,13 +2990,13 @@ void untracked_cache_invalidate_path(struct index_state *istate, void untracked_cache_remove_from_index(struct index_state *istate, const char *path) { - untracked_cache_invalidate_path(istate, path); + untracked_cache_invalidate_path(istate, path, 1); } void untracked_cache_add_to_index(struct index_state *istate, const char *path) { - untracked_cache_invalidate_path(istate, path); + untracked_cache_invalidate_path(istate, path, 1); } /* Update gitfile and core.worktree setting to connect work tree and git dir */ @@ -74,9 +74,9 @@ struct exclude_list_group { struct exclude_list *el; }; -struct sha1_stat { +struct oid_stat { struct stat_data stat; - unsigned char sha1[20]; + struct object_id oid; int valid; }; @@ -124,8 +124,8 @@ struct untracked_cache_dir { }; struct untracked_cache { - struct sha1_stat ss_info_exclude; - struct sha1_stat ss_excludes_file; + struct oid_stat ss_info_exclude; + struct oid_stat ss_excludes_file; const char *exclude_per_dir; struct strbuf ident; /* @@ -195,8 +195,8 @@ struct dir_struct { /* Enable untracked file cache if set */ struct untracked_cache *untracked; - struct sha1_stat ss_info_exclude; - struct sha1_stat ss_excludes_file; + struct oid_stat ss_info_exclude; + struct oid_stat ss_excludes_file; unsigned unmanaged_exclude_files; }; @@ -350,7 +350,7 @@ static inline int dir_path_match(const struct dir_entry *ent, int cmp_dir_entry(const void *p1, const void *p2); int check_dir_entry_contains(const struct dir_entry *out, const struct dir_entry *in); -void untracked_cache_invalidate_path(struct index_state *, const char *); +void untracked_cache_invalidate_path(struct index_state *, const char *, int safe_path); void untracked_cache_remove_from_index(struct index_state *, const char *); void untracked_cache_add_to_index(struct index_state *, const char *); diff --git a/environment.c b/environment.c index 63ac38a46f..de8431e01e 100644 --- a/environment.c +++ b/environment.c @@ -27,6 +27,8 @@ int warn_ambiguous_refs = 1; int warn_on_object_refname_ambiguity = 1; int ref_paranoia = -1; int repository_format_precious_objects; +char *repository_format_partial_clone; +const char *core_partial_clone_filter_default; const char *git_commit_encoding; const char *git_log_output_encoding; const char *apply_default_whitespace; @@ -49,7 +51,7 @@ enum auto_crlf auto_crlf = AUTO_CRLF_FALSE; int check_replace_refs = 1; char *git_replace_ref_base; enum eol core_eol = EOL_UNSET; -enum safe_crlf safe_crlf = SAFE_CRLF_WARN; +int global_conv_flags_eol = CONV_EOL_RNDTRP_WARN; unsigned whitespace_rule_cfg = WS_DEFAULT_RULE; enum branch_track git_branch_track = BRANCH_TRACK_REMOTE; enum rebase_setup_type autorebase = AUTOREBASE_NEVER; diff --git a/fast-import.c b/fast-import.c index b70ac025e0..0f818cd932 100644 --- a/fast-import.c +++ b/fast-import.c @@ -316,7 +316,7 @@ static struct atom_str **atom_table; /* The .pack file being generated */ static struct pack_idx_option pack_idx_opts; static unsigned int pack_id; -static struct sha1file *pack_file; +static struct hashfile *pack_file; static struct packed_git *pack_data; static struct packed_git **all_packs; static off_t pack_size; @@ -905,12 +905,12 @@ static void start_packfile(void) p->pack_fd = pack_fd; p->do_not_close = 1; - pack_file = sha1fd(pack_fd, p->pack_name); + pack_file = hashfd(pack_fd, p->pack_name); hdr.hdr_signature = htonl(PACK_SIGNATURE); hdr.hdr_version = htonl(2); hdr.hdr_entries = 0; - sha1write(pack_file, &hdr, sizeof(hdr)); + hashwrite(pack_file, &hdr, sizeof(hdr)); pack_data = p; pack_size = sizeof(hdr); @@ -1016,7 +1016,7 @@ static void end_packfile(void) struct tag *t; close_pack_windows(pack_data); - sha1close(pack_file, cur_pack_oid.hash, 0); + hashclose(pack_file, cur_pack_oid.hash, 0); fixup_pack_header_footer(pack_data->pack_fd, pack_data->sha1, pack_data->pack_name, object_count, cur_pack_oid.hash, pack_size); @@ -1092,15 +1092,15 @@ static int store_object( unsigned char hdr[96]; struct object_id oid; unsigned long hdrlen, deltalen; - git_SHA_CTX c; + git_hash_ctx c; git_zstream s; hdrlen = xsnprintf((char *)hdr, sizeof(hdr), "%s %lu", typename(type), (unsigned long)dat->len) + 1; - git_SHA1_Init(&c); - git_SHA1_Update(&c, hdr, hdrlen); - git_SHA1_Update(&c, dat->buf, dat->len); - git_SHA1_Final(oid.hash, &c); + the_hash_algo->init_fn(&c); + the_hash_algo->update_fn(&c, hdr, hdrlen); + the_hash_algo->update_fn(&c, dat->buf, dat->len); + the_hash_algo->final_fn(oid.hash, &c); if (oidout) oidcpy(oidout, &oid); @@ -1118,11 +1118,13 @@ static int store_object( return 1; } - if (last && last->data.buf && last->depth < max_depth && dat->len > 20) { + if (last && last->data.buf && last->depth < max_depth + && dat->len > the_hash_algo->rawsz) { + delta_count_attempts_by_type[type]++; delta = diff_delta(last->data.buf, last->data.len, dat->buf, dat->len, - &deltalen, dat->len - 20); + &deltalen, dat->len - the_hash_algo->rawsz); } else delta = NULL; @@ -1180,23 +1182,23 @@ static int store_object( hdrlen = encode_in_pack_object_header(hdr, sizeof(hdr), OBJ_OFS_DELTA, deltalen); - sha1write(pack_file, hdr, hdrlen); + hashwrite(pack_file, hdr, hdrlen); pack_size += hdrlen; hdr[pos] = ofs & 127; while (ofs >>= 7) hdr[--pos] = 128 | (--ofs & 127); - sha1write(pack_file, hdr + pos, sizeof(hdr) - pos); + hashwrite(pack_file, hdr + pos, sizeof(hdr) - pos); pack_size += sizeof(hdr) - pos; } else { e->depth = 0; hdrlen = encode_in_pack_object_header(hdr, sizeof(hdr), type, dat->len); - sha1write(pack_file, hdr, hdrlen); + hashwrite(pack_file, hdr, hdrlen); pack_size += hdrlen; } - sha1write(pack_file, out, s.total_out); + hashwrite(pack_file, out, s.total_out); pack_size += s.total_out; e->idx.crc32 = crc32_end(pack_file); @@ -1215,9 +1217,9 @@ static int store_object( return 0; } -static void truncate_pack(struct sha1file_checkpoint *checkpoint) +static void truncate_pack(struct hashfile_checkpoint *checkpoint) { - if (sha1file_truncate(pack_file, checkpoint)) + if (hashfile_truncate(pack_file, checkpoint)) die_errno("cannot truncate pack to skip duplicate"); pack_size = checkpoint->offset; } @@ -1231,9 +1233,9 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark) struct object_id oid; unsigned long hdrlen; off_t offset; - git_SHA_CTX c; + git_hash_ctx c; git_zstream s; - struct sha1file_checkpoint checkpoint; + struct hashfile_checkpoint checkpoint; int status = Z_OK; /* Determine if we should auto-checkpoint. */ @@ -1241,13 +1243,13 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark) || (pack_size + 60 + len) < pack_size) cycle_packfile(); - sha1file_checkpoint(pack_file, &checkpoint); + hashfile_checkpoint(pack_file, &checkpoint); offset = checkpoint.offset; hdrlen = xsnprintf((char *)out_buf, out_sz, "blob %" PRIuMAX, len) + 1; - git_SHA1_Init(&c); - git_SHA1_Update(&c, out_buf, hdrlen); + the_hash_algo->init_fn(&c); + the_hash_algo->update_fn(&c, out_buf, hdrlen); crc32_begin(pack_file); @@ -1265,7 +1267,7 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark) if (!n && feof(stdin)) die("EOF in data (%" PRIuMAX " bytes remaining)", len); - git_SHA1_Update(&c, in_buf, n); + the_hash_algo->update_fn(&c, in_buf, n); s.next_in = in_buf; s.avail_in = n; len -= n; @@ -1275,7 +1277,7 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark) if (!s.avail_out || status == Z_STREAM_END) { size_t n = s.next_out - out_buf; - sha1write(pack_file, out_buf, n); + hashwrite(pack_file, out_buf, n); pack_size += n; s.next_out = out_buf; s.avail_out = out_sz; @@ -1291,7 +1293,7 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark) } } git_deflate_end(&s); - git_SHA1_Final(oid.hash, &c); + the_hash_algo->final_fn(oid.hash, &c); if (oidout) oidcpy(oidout, &oid); @@ -1350,25 +1352,25 @@ static void *gfi_unpack_entry( { enum object_type type; struct packed_git *p = all_packs[oe->pack_id]; - if (p == pack_data && p->pack_size < (pack_size + 20)) { + if (p == pack_data && p->pack_size < (pack_size + the_hash_algo->rawsz)) { /* The object is stored in the packfile we are writing to * and we have modified it since the last time we scanned * back to read a previously written object. If an old - * window covered [p->pack_size, p->pack_size + 20) its + * window covered [p->pack_size, p->pack_size + rawsz) its * data is stale and is not valid. Closing all windows * and updating the packfile length ensures we can read * the newly written data. */ close_pack_windows(p); - sha1flush(pack_file); + hashflush(pack_file); - /* We have to offer 20 bytes additional on the end of + /* We have to offer rawsz bytes additional on the end of * the packfile as the core unpacker code assumes the * footer is present at the file end and must promise - * at least 20 bytes within any window it maps. But + * at least rawsz bytes within any window it maps. But * we don't actually create the footer here. */ - p->pack_size = pack_size + 20; + p->pack_size = pack_size + the_hash_algo->rawsz; } return unpack_entry(p, oe->idx.offset, &type, sizep); } @@ -2204,7 +2206,7 @@ static void construct_path_with_fanout(const char *hex_sha1, unsigned char fanout, char *path) { unsigned int i = 0, j = 0; - if (fanout >= 20) + if (fanout >= the_hash_algo->rawsz) die("Too large fanout (%u)", fanout); while (fanout) { path[i++] = hex_sha1[j++]; @@ -2212,8 +2214,8 @@ static void construct_path_with_fanout(const char *hex_sha1, path[i++] = '/'; fanout--; } - memcpy(path + i, hex_sha1 + j, GIT_SHA1_HEXSZ - j); - path[i + GIT_SHA1_HEXSZ - j] = '\0'; + memcpy(path + i, hex_sha1 + j, the_hash_algo->hexsz - j); + path[i + the_hash_algo->hexsz - j] = '\0'; } static uintmax_t do_change_note_fanout( diff --git a/fetch-object.c b/fetch-object.c new file mode 100644 index 0000000000..853624f811 --- /dev/null +++ b/fetch-object.c @@ -0,0 +1,45 @@ +#include "cache.h" +#include "packfile.h" +#include "pkt-line.h" +#include "strbuf.h" +#include "transport.h" +#include "fetch-object.h" + +static void fetch_refs(const char *remote_name, struct ref *ref) +{ + struct remote *remote; + struct transport *transport; + int original_fetch_if_missing = fetch_if_missing; + + fetch_if_missing = 0; + remote = remote_get(remote_name); + if (!remote->url[0]) + die(_("Remote with no URL")); + transport = transport_get(remote, remote->url[0]); + + transport_set_option(transport, TRANS_OPT_FROM_PROMISOR, "1"); + transport_set_option(transport, TRANS_OPT_NO_DEPENDENTS, "1"); + transport_fetch_refs(transport, ref); + fetch_if_missing = original_fetch_if_missing; +} + +void fetch_object(const char *remote_name, const unsigned char *sha1) +{ + struct ref *ref = alloc_ref(sha1_to_hex(sha1)); + hashcpy(ref->old_oid.hash, sha1); + fetch_refs(remote_name, ref); +} + +void fetch_objects(const char *remote_name, const struct oid_array *to_fetch) +{ + struct ref *ref = NULL; + int i; + + for (i = 0; i < to_fetch->nr; i++) { + struct ref *new_ref = alloc_ref(oid_to_hex(&to_fetch->oid[i])); + oidcpy(&new_ref->old_oid, &to_fetch->oid[i]); + new_ref->next = ref; + ref = new_ref; + } + fetch_refs(remote_name, ref); +} diff --git a/fetch-object.h b/fetch-object.h new file mode 100644 index 0000000000..4b269d07ed --- /dev/null +++ b/fetch-object.h @@ -0,0 +1,11 @@ +#ifndef FETCH_OBJECT_H +#define FETCH_OBJECT_H + +#include "sha1-array.h" + +extern void fetch_object(const char *remote_name, const unsigned char *sha1); + +extern void fetch_objects(const char *remote_name, + const struct oid_array *to_fetch); + +#endif diff --git a/fetch-pack.c b/fetch-pack.c index 9f6b07ad91..d97461296d 100644 --- a/fetch-pack.c +++ b/fetch-pack.c @@ -29,6 +29,7 @@ static int deepen_not_ok; static int fetch_fsck_objects = -1; static int transfer_fsck_objects = -1; static int agent_supported; +static int server_supports_filtering; static struct lock_file shallow_lock; static const char *alternate_shallow_file; @@ -260,8 +261,8 @@ static enum ack_type get_ack(int fd, struct object_id *result_oid) char *line = packet_read_line(fd, &len); const char *arg; - if (!len) - die(_("git fetch-pack: expected ACK/NAK, got EOF")); + if (!line) + die(_("git fetch-pack: expected ACK/NAK, got a flush packet")); if (!strcmp(line, "NAK")) return NAK; if (skip_prefix(line, "ACK ", &arg)) { @@ -379,6 +380,8 @@ static int find_common(struct fetch_pack_args *args, if (deepen_not_ok) strbuf_addstr(&c, " deepen-not"); if (agent_supported) strbuf_addf(&c, " agent=%s", git_user_agent_sanitized()); + if (args->filter_options.choice) + strbuf_addstr(&c, " filter"); packet_buf_write(&req_buf, "want %s%s\n", remote_hex, c.buf); strbuf_release(&c); } else @@ -407,6 +410,9 @@ static int find_common(struct fetch_pack_args *args, packet_buf_write(&req_buf, "deepen-not %s", s->string); } } + if (server_supports_filtering && args->filter_options.choice) + packet_buf_write(&req_buf, "filter %s", + args->filter_options.filter_spec); packet_buf_flush(&req_buf); state_len = req_buf.len; @@ -450,6 +456,8 @@ static int find_common(struct fetch_pack_args *args, flushes = 0; retval = -1; + if (args->no_dependents) + goto done; while ((oid = get_rev())) { packet_buf_write(&req_buf, "have %s\n", oid_to_hex(oid)); print_verbose(args, "have %s", oid_to_hex(oid)); @@ -709,6 +717,7 @@ static int everything_local(struct fetch_pack_args *args, { struct ref *ref; int retval; + int old_save_commit_buffer = save_commit_buffer; timestamp_t cutoff = 0; save_commit_buffer = 0; @@ -735,29 +744,31 @@ static int everything_local(struct fetch_pack_args *args, } } - if (!args->deepen) { - for_each_ref(mark_complete_oid, NULL); - for_each_cached_alternate(mark_alternate_complete); - commit_list_sort_by_date(&complete); - if (cutoff) - mark_recent_complete_commits(args, cutoff); - } + if (!args->no_dependents) { + if (!args->deepen) { + for_each_ref(mark_complete_oid, NULL); + for_each_cached_alternate(mark_alternate_complete); + commit_list_sort_by_date(&complete); + if (cutoff) + mark_recent_complete_commits(args, cutoff); + } - /* - * Mark all complete remote refs as common refs. - * Don't mark them common yet; the server has to be told so first. - */ - for (ref = *refs; ref; ref = ref->next) { - struct object *o = deref_tag(lookup_object(ref->old_oid.hash), - NULL, 0); + /* + * Mark all complete remote refs as common refs. + * Don't mark them common yet; the server has to be told so first. + */ + for (ref = *refs; ref; ref = ref->next) { + struct object *o = deref_tag(lookup_object(ref->old_oid.hash), + NULL, 0); - if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE)) - continue; + if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE)) + continue; - if (!(o->flags & SEEN)) { - rev_list_push((struct commit *)o, COMMON_REF | SEEN); + if (!(o->flags & SEEN)) { + rev_list_push((struct commit *)o, COMMON_REF | SEEN); - mark_common((struct commit *)o, 1, 1); + mark_common((struct commit *)o, 1, 1); + } } } @@ -777,6 +788,9 @@ static int everything_local(struct fetch_pack_args *args, print_verbose(args, _("already have %s (%s)"), oid_to_hex(remote), ref->name); } + + save_commit_buffer = old_save_commit_buffer; + return retval; } @@ -833,7 +847,7 @@ static int get_pack(struct fetch_pack_args *args, argv_array_push(&cmd.args, alternate_shallow_file); } - if (do_keep) { + if (do_keep || args->from_promisor) { if (pack_lockfile) cmd.out = -1; cmd_name = "index-pack"; @@ -843,7 +857,7 @@ static int get_pack(struct fetch_pack_args *args, argv_array_push(&cmd.args, "-v"); if (args->use_thin_pack) argv_array_push(&cmd.args, "--fix-thin"); - if (args->lock_pack || unpack_limit) { + if (do_keep && (args->lock_pack || unpack_limit)) { char hostname[HOST_NAME_MAX + 1]; if (xgethostname(hostname, sizeof(hostname))) xsnprintf(hostname, sizeof(hostname), "localhost"); @@ -853,6 +867,8 @@ static int get_pack(struct fetch_pack_args *args, } if (args->check_self_contained_and_connected) argv_array_push(&cmd.args, "--check-self-contained-and-connected"); + if (args->from_promisor) + argv_array_push(&cmd.args, "--promisor"); } else { cmd_name = "unpack-objects"; @@ -964,6 +980,13 @@ static struct ref *do_fetch_pack(struct fetch_pack_args *args, else prefer_ofs_delta = 0; + if (server_supports("filter")) { + server_supports_filtering = 1; + print_verbose(args, _("Server supports filter")); + } else if (args->filter_options.choice) { + warning("filtering not recognized by server, ignoring"); + } + if ((agent_feature = server_feature_value("agent", &agent_len))) { agent_supported = 1; if (agent_len) diff --git a/fetch-pack.h b/fetch-pack.h index b6aeb43a8e..3e224a1822 100644 --- a/fetch-pack.h +++ b/fetch-pack.h @@ -3,6 +3,7 @@ #include "string-list.h" #include "run-command.h" +#include "list-objects-filter-options.h" struct oid_array; @@ -12,6 +13,7 @@ struct fetch_pack_args { int depth; const char *deepen_since; const struct string_list *deepen_not; + struct list_objects_filter_options filter_options; unsigned deepen_relative:1; unsigned quiet:1; unsigned keep_pack:1; @@ -29,6 +31,14 @@ struct fetch_pack_args { unsigned cloning:1; unsigned update_shallow:1; unsigned deepen:1; + unsigned from_promisor:1; + + /* + * If 1, fetch_pack() will also not modify any object flags. + * This allows fetch_pack() to safely be called by any function, + * regardless of which object flags it uses (if any). + */ + unsigned no_dependents:1; }; /* diff --git a/fsmonitor.c b/fsmonitor.c index 0af7c4edba..6d7bcd5d0e 100644 --- a/fsmonitor.c +++ b/fsmonitor.c @@ -130,7 +130,7 @@ static void fsmonitor_refresh_callback(struct index_state *istate, const char *n * as it could be a new untracked file. */ trace_printf_key(&trace_fsmonitor, "fsmonitor_refresh_callback '%s'", name); - untracked_cache_invalidate_path(istate, name); + untracked_cache_invalidate_path(istate, name, 0); } void refresh_fsmonitor(struct index_state *istate) diff --git a/fsmonitor.h b/fsmonitor.h index cd3cc0ccf2..65f3743636 100644 --- a/fsmonitor.h +++ b/fsmonitor.h @@ -65,7 +65,7 @@ static inline void mark_fsmonitor_invalid(struct index_state *istate, struct cac { if (core_fsmonitor) { ce->ce_flags &= ~CE_FSMONITOR_VALID; - untracked_cache_invalidate_path(istate, ce->name); + untracked_cache_invalidate_path(istate, ce->name, 1); trace_printf_key(&trace_fsmonitor, "mark_fsmonitor_invalid '%s'", ce->name); } } diff --git a/git-rebase--am.sh b/git-rebase--am.sh index c931891cbc..be3f068922 100644 --- a/git-rebase--am.sh +++ b/git-rebase--am.sh @@ -49,6 +49,7 @@ then # makes this easy git cherry-pick ${gpg_sign_opt:+"$gpg_sign_opt"} --allow-empty \ $allow_rerere_autoupdate --right-only "$revisions" \ + $allow_empty_message \ ${restrict_revision+^$restrict_revision} ret=$? else diff --git a/git-rebase--interactive.sh b/git-rebase--interactive.sh index a613156bcb..331c8dfeac 100644 --- a/git-rebase--interactive.sh +++ b/git-rebase--interactive.sh @@ -283,7 +283,7 @@ pick_one () { test -d "$rewritten" && pick_one_preserving_merges "$@" && return - output eval git cherry-pick $allow_rerere_autoupdate \ + output eval git cherry-pick $allow_rerere_autoupdate $allow_empty_message \ ${gpg_sign_opt:+$(git rev-parse --sq-quote "$gpg_sign_opt")} \ "$strategy_args" $empty_args $ff "$@" @@ -398,7 +398,7 @@ pick_one_preserving_merges () { --sq-quote "$gpg_sign_opt")} \ $allow_rerere_autoupdate "$merge_args" \ "$strategy_args" \ - -m $(git rev-parse --sq-quote "$msg_content") \ + -m "$(git rev-parse --sq-quote "$msg_content")" \ "$new_parents" then printf "%s\n" "$msg_content" > "$GIT_DIR"/MERGE_MSG @@ -408,6 +408,7 @@ pick_one_preserving_merges () { ;; *) output eval git cherry-pick $allow_rerere_autoupdate \ + $allow_empty_message \ ${gpg_sign_opt:+$(git rev-parse --sq-quote "$gpg_sign_opt")} \ "$strategy_args" "$@" || die_with_patch $sha1 "$(eval_gettext "Could not pick \$sha1")" @@ -561,7 +562,8 @@ do_next () { mark_action_done do_pick $sha1 "$rest" - git commit --amend --no-post-rewrite ${gpg_sign_opt:+"$gpg_sign_opt"} || { + git commit --amend --no-post-rewrite ${gpg_sign_opt:+"$gpg_sign_opt"} \ + $allow_empty_message || { warn "$(eval_gettext "\ Could not amend commit after successfully picking \$sha1... \$rest This is most likely due to an empty commit message, or the pre-commit hook @@ -609,7 +611,7 @@ you are able to reword the commit.")" # This is an intermediate commit; its message will only be # used in case of trouble. So use the long version: do_with_author output git commit --amend --no-verify -F "$squash_msg" \ - ${gpg_sign_opt:+"$gpg_sign_opt"} || + ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message || die_failed_squash $sha1 "$rest" ;; *) @@ -617,13 +619,13 @@ you are able to reword the commit.")" if test -f "$fixup_msg" then do_with_author git commit --amend --no-verify -F "$fixup_msg" \ - ${gpg_sign_opt:+"$gpg_sign_opt"} || + ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message || die_failed_squash $sha1 "$rest" else cp "$squash_msg" "$GIT_DIR"/SQUASH_MSG || exit rm -f "$GIT_DIR"/MERGE_MSG do_with_author git commit --amend --no-verify -F "$GIT_DIR"/SQUASH_MSG -e \ - ${gpg_sign_opt:+"$gpg_sign_opt"} || + ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message || die_failed_squash $sha1 "$rest" fi rm -f "$squash_msg" "$fixup_msg" @@ -756,7 +758,8 @@ case "$action" in continue) if test ! -d "$rewritten" then - exec git rebase--helper ${force_rebase:+--no-ff} --continue + exec git rebase--helper ${force_rebase:+--no-ff} $allow_empty_message \ + --continue fi # do we have anything to commit? if git diff-index --cached --quiet HEAD -- @@ -796,11 +799,11 @@ In both cases, once you're done, continue with: You have uncommitted changes in your working tree. Please commit them first and then run 'git rebase --continue' again.")" do_with_author git commit --amend --no-verify -F "$msg" -e \ - ${gpg_sign_opt:+"$gpg_sign_opt"} || + ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message || die "$(gettext "Could not commit staged changes.")" else do_with_author git commit --no-verify -F "$msg" -e \ - ${gpg_sign_opt:+"$gpg_sign_opt"} || + ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message || die "$(gettext "Could not commit staged changes.")" fi fi @@ -819,7 +822,8 @@ skip) if test ! -d "$rewritten" then - exec git rebase--helper ${force_rebase:+--no-ff} --continue + exec git rebase--helper ${force_rebase:+--no-ff} $allow_empty_message \ + --continue fi do_rest return 0 @@ -1022,7 +1026,8 @@ checkout_onto if test -z "$rebase_root" && test ! -d "$rewritten" then require_clean_work_tree "rebase" - exec git rebase--helper ${force_rebase:+--no-ff} --continue + exec git rebase--helper ${force_rebase:+--no-ff} $allow_empty_message \ + --continue fi do_rest diff --git a/git-rebase--merge.sh b/git-rebase--merge.sh index 957688f236..ceb715453c 100644 --- a/git-rebase--merge.sh +++ b/git-rebase--merge.sh @@ -27,7 +27,8 @@ continue_merge () { cmt=$(cat "$state_dir/current") if ! git diff-index --quiet --ignore-submodules HEAD -- then - if ! git commit ${gpg_sign_opt:+"$gpg_sign_opt"} --no-verify -C "$cmt" + if ! git commit ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message \ + --no-verify -C "$cmt" then echo "Commit failed, please do not call \"git commit\"" echo "directly, but instead do one of the following: " diff --git a/git-rebase.sh b/git-rebase.sh index a13a581fe6..a1f6e5de6a 100755 --- a/git-rebase.sh +++ b/git-rebase.sh @@ -24,6 +24,7 @@ m,merge! use merging strategies to rebase i,interactive! let the user edit the list of commits to rebase x,exec=! add exec lines after each commit of the editable list k,keep-empty preserve empty commits during rebase +allow-empty-message allow rebasing commits with empty messages f,force-rebase! force rebase even if branch is up to date X,strategy-option=! pass the argument through to the merge strategy stat! display a diffstat of what changed upstream @@ -90,6 +91,7 @@ action= preserve_merges= autosquash= keep_empty= +allow_empty_message= test "$(git config --bool rebase.autosquash)" = "true" && autosquash=t case "$(git config --bool commit.gpgsign)" in true) gpg_sign_opt=-S ;; @@ -264,6 +266,9 @@ do --keep-empty) keep_empty=yes ;; + --allow-empty-message) + allow_empty_message=--allow-empty-message + ;; --preserve-merges) preserve_merges=t test -z "$interactive_rebase" && interactive_rebase=implied diff --git a/git-send-email.perl b/git-send-email.perl index 340b5c8482..435c7c9e26 100755 --- a/git-send-email.perl +++ b/git-send-email.perl @@ -26,7 +26,7 @@ use Text::ParseWords; use Term::ANSIColor; use File::Temp qw/ tempdir tempfile /; use File::Spec::Functions qw(catdir catfile); -use Error qw(:try); +use Git::Error qw(:try); use Cwd qw(abs_path cwd); use Git; use Git::I18N; @@ -379,6 +379,10 @@ unless ($rc) { die __("Cannot run git format-patch from outside a repository\n") if $format_patch and not $repo; +die __("`batch-size` and `relogin` must be specified together " . + "(via command-line or configuration option)\n") + if defined $relogin_delay and not defined $batch_size; + # Now, let's fill any that aren't set in with defaults: sub read_config { diff --git a/git-sh-i18n.sh b/git-sh-i18n.sh index 1ef1889dbd..9d065fb4bf 100644 --- a/git-sh-i18n.sh +++ b/git-sh-i18n.sh @@ -17,15 +17,15 @@ export TEXTDOMAINDIR # First decide what scheme to use... GIT_INTERNAL_GETTEXT_SH_SCHEME=fallthrough -if test -n "@@USE_GETTEXT_SCHEME@@" +if test -n "$GIT_GETTEXT_POISON" +then + GIT_INTERNAL_GETTEXT_SH_SCHEME=poison +elif test -n "@@USE_GETTEXT_SCHEME@@" then GIT_INTERNAL_GETTEXT_SH_SCHEME="@@USE_GETTEXT_SCHEME@@" elif test -n "$GIT_INTERNAL_GETTEXT_TEST_FALLBACKS" then : no probing necessary -elif test -n "$GIT_GETTEXT_POISON" -then - GIT_INTERNAL_GETTEXT_SH_SCHEME=poison elif type gettext.sh >/dev/null 2>&1 then # GNU libintl's gettext.sh diff --git a/git-submodule.sh b/git-submodule.sh index 156255a9e5..24914963ca 100755 --- a/git-submodule.sh +++ b/git-submodule.sh @@ -428,60 +428,7 @@ cmd_deinit() shift done - if test -n "$deinit_all" && test "$#" -ne 0 - then - echo >&2 "$(eval_gettext "pathspec and --all are incompatible")" - usage - fi - if test $# = 0 && test -z "$deinit_all" - then - die "$(eval_gettext "Use '--all' if you really want to deinitialize all submodules")" - fi - - { - git submodule--helper list --prefix "$wt_prefix" "$@" || - echo "#unmatched" $? - } | - while read -r mode sha1 stage sm_path - do - die_if_unmatched "$mode" "$sha1" - name=$(git submodule--helper name "$sm_path") || exit - - displaypath=$(git submodule--helper relative-path "$sm_path" "$wt_prefix") - - # Remove the submodule work tree (unless the user already did it) - if test -d "$sm_path" - then - # Protect submodules containing a .git directory - if test -d "$sm_path/.git" - then - die "$(eval_gettext "\ -Submodule work tree '\$displaypath' contains a .git directory -(use 'rm -rf' if you really want to remove it including all of its history)")" - fi - - if test -z "$force" - then - git rm -qn "$sm_path" || - die "$(eval_gettext "Submodule work tree '\$displaypath' contains local modifications; use '-f' to discard them")" - fi - rm -rf "$sm_path" && - say "$(eval_gettext "Cleared directory '\$displaypath'")" || - say "$(eval_gettext "Could not remove submodule work tree '\$displaypath'")" - fi - - mkdir "$sm_path" || say "$(eval_gettext "Could not create empty submodule directory '\$displaypath'")" - - # Remove the .git/config entries (unless the user already did it) - if test -n "$(git config --get-regexp submodule."$name\.")" - then - # Remove the whole section so we have a clean state when - # the user later decides to init this submodule again - url=$(git config submodule."$name".url) - git config --remove-section submodule."$name" 2>/dev/null && - say "$(eval_gettext "Submodule '\$name' (\$url) unregistered for path '\$displaypath'")" - fi - done + git ${wt_prefix:+-C "$wt_prefix"} submodule--helper deinit ${GIT_QUIET:+--quiet} ${prefix:+--prefix "$prefix"} ${force:+--force} ${deinit_all:+--all} "$@" } is_tip_reachable () ( @@ -1036,63 +983,8 @@ cmd_sync() ;; esac done - cd_to_toplevel - { - git submodule--helper list --prefix "$wt_prefix" "$@" || - echo "#unmatched" $? - } | - while read -r mode sha1 stage sm_path - do - die_if_unmatched "$mode" "$sha1" - - # skip inactive submodules - if ! git submodule--helper is-active "$sm_path" - then - continue - fi - - name=$(git submodule--helper name "$sm_path") - url=$(git config -f .gitmodules --get submodule."$name".url) - - # Possibly a url relative to parent - case "$url" in - ./*|../*) - # rewrite foo/bar as ../.. to find path from - # submodule work tree to superproject work tree - up_path="$(printf '%s\n' "$sm_path" | sed "s/[^/][^/]*/../g")" && - # guarantee a trailing / - up_path=${up_path%/}/ && - # path from submodule work tree to submodule origin repo - sub_origin_url=$(git submodule--helper resolve-relative-url "$url" "$up_path") && - # path from superproject work tree to submodule origin repo - super_config_url=$(git submodule--helper resolve-relative-url "$url") || exit - ;; - *) - sub_origin_url="$url" - super_config_url="$url" - ;; - esac - - displaypath=$(git submodule--helper relative-path "$prefix$sm_path" "$wt_prefix") - say "$(eval_gettext "Synchronizing submodule url for '\$displaypath'")" - git config submodule."$name".url "$super_config_url" - - if test -e "$sm_path"/.git - then - ( - sanitize_submodule_env - cd "$sm_path" - remote=$(get_default_remote) - git config remote."$remote".url "$sub_origin_url" - if test -n "$recursive" - then - prefix="$prefix$sm_path/" - eval cmd_sync - fi - ) - fi - done + git ${wt_prefix:+-C "$wt_prefix"} ${prefix:+--super-prefix "$prefix"} submodule--helper sync ${GIT_QUIET:+--quiet} ${recursive:+--recursive} "$@" } cmd_absorbgitdirs() diff --git a/git-svn.perl b/git-svn.perl index 76a75d0b3d..a6b6c3e40c 100755 --- a/git-svn.perl +++ b/git-svn.perl @@ -1200,6 +1200,11 @@ sub cmd_branch { $ctx->copy($src, $rev, $dst) unless $_dry_run; + # Release resources held by ctx before creating another SVN::Ra + # so destruction is orderly. This seems necessary with SVN 1.9.5 + # to avoid segfaults. + $ctx = undef; + $gs->fetch_all; } @@ -5,11 +5,11 @@ #include "run-command.h" const char git_usage_string[] = - "git [--version] [--help] [-C <path>] [-c name=value]\n" - " [--exec-path[=<path>]] [--html-path] [--man-path] [--info-path]\n" - " [-p | --paginate | --no-pager] [--no-replace-objects] [--bare]\n" - " [--git-dir=<path>] [--work-tree=<path>] [--namespace=<name>]\n" - " <command> [<args>]"; + N_("git [--version] [--help] [-C <path>] [-c <name>=<value>]\n" + " [--exec-path[=<path>]] [--html-path] [--man-path] [--info-path]\n" + " [-p | --paginate | --no-pager] [--no-replace-objects] [--bare]\n" + " [--git-dir=<path>] [--work-tree=<path>] [--namespace=<name>]\n" + " <command> [<args>]"); const char git_more_info_string[] = N_("'git help -a' and 'git help -g' list available subcommands and some\n" @@ -92,7 +92,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged) *envchanged = 1; } else if (!strcmp(cmd, "--git-dir")) { if (*argc < 2) { - fprintf(stderr, "No directory given for --git-dir.\n" ); + fprintf(stderr, _("no directory given for --git-dir\n" )); usage(git_usage_string); } setenv(GIT_DIR_ENVIRONMENT, (*argv)[1], 1); @@ -106,7 +106,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged) *envchanged = 1; } else if (!strcmp(cmd, "--namespace")) { if (*argc < 2) { - fprintf(stderr, "No namespace given for --namespace.\n" ); + fprintf(stderr, _("no namespace given for --namespace\n" )); usage(git_usage_string); } setenv(GIT_NAMESPACE_ENVIRONMENT, (*argv)[1], 1); @@ -120,7 +120,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged) *envchanged = 1; } else if (!strcmp(cmd, "--work-tree")) { if (*argc < 2) { - fprintf(stderr, "No directory given for --work-tree.\n" ); + fprintf(stderr, _("no directory given for --work-tree\n" )); usage(git_usage_string); } setenv(GIT_WORK_TREE_ENVIRONMENT, (*argv)[1], 1); @@ -134,7 +134,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged) *envchanged = 1; } else if (!strcmp(cmd, "--super-prefix")) { if (*argc < 2) { - fprintf(stderr, "No prefix given for --super-prefix.\n" ); + fprintf(stderr, _("no prefix given for --super-prefix\n" )); usage(git_usage_string); } setenv(GIT_SUPER_PREFIX_ENVIRONMENT, (*argv)[1], 1); @@ -156,7 +156,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged) *envchanged = 1; } else if (!strcmp(cmd, "-c")) { if (*argc < 2) { - fprintf(stderr, "-c expects a configuration string\n" ); + fprintf(stderr, _("-c expects a configuration string\n" )); usage(git_usage_string); } git_config_push_parameter((*argv)[1]); @@ -194,12 +194,12 @@ static int handle_options(const char ***argv, int *argc, int *envchanged) *envchanged = 1; } else if (!strcmp(cmd, "-C")) { if (*argc < 2) { - fprintf(stderr, "No directory given for -C.\n" ); + fprintf(stderr, _("no directory given for -C\n" )); usage(git_usage_string); } if ((*argv)[1][0]) { if (chdir((*argv)[1])) - die_errno("Cannot change to '%s'", (*argv)[1]); + die_errno("cannot change to '%s'", (*argv)[1]); if (envchanged) *envchanged = 1; } @@ -209,7 +209,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged) list_builtins(); exit(0); } else { - fprintf(stderr, "Unknown option: %s\n", cmd); + fprintf(stderr, _("unknown option: %s\n"), cmd); usage(git_usage_string); } @@ -247,7 +247,7 @@ static int handle_alias(int *argcp, const char ***argv) if (ret >= 0) /* normal exit */ exit(ret); - die_errno("While expanding alias '%s': '%s'", + die_errno("while expanding alias '%s': '%s'", alias_command, alias_string + 1); } count = split_cmdline(alias_string, &new_argv); @@ -256,8 +256,8 @@ static int handle_alias(int *argcp, const char ***argv) split_cmdline_strerror(count)); option_count = handle_options(&new_argv, &count, &envchanged); if (envchanged) - die("alias '%s' changes environment variables\n" - "You can use '!git' in the alias to do this.", + die("alias '%s' changes environment variables.\n" + "You can use '!git' in the alias to do this", alias_command); memmove(new_argv - option_count, new_argv, count * sizeof(char *)); @@ -684,8 +684,8 @@ int cmd_main(int argc, const char **argv) if (errno != ENOENT) break; if (was_alias) { - fprintf(stderr, "Expansion of alias '%s' failed; " - "'%s' is not a git command\n", + fprintf(stderr, _("expansion of alias '%s' failed; " + "'%s' is not a git command\n"), cmd, argv[0]); exit(1); } @@ -696,7 +696,7 @@ int cmd_main(int argc, const char **argv) break; } - fprintf(stderr, "Failed to run command '%s': %s\n", + fprintf(stderr, _("failed to run command '%s': %s\n"), cmd, strerror(errno)); return 1; @@ -15,6 +15,31 @@ #include "block-sha1/sha1.h" #endif +#ifndef platform_SHA_CTX +/* + * platform's underlying implementation of SHA-1; could be OpenSSL, + * blk_SHA, Apple CommonCrypto, etc... Note that the relevant + * SHA-1 header may have already defined platform_SHA_CTX for our + * own implementations like block-sha1 and ppc-sha1, so we list + * the default for OpenSSL compatible SHA-1 implementations here. + */ +#define platform_SHA_CTX SHA_CTX +#define platform_SHA1_Init SHA1_Init +#define platform_SHA1_Update SHA1_Update +#define platform_SHA1_Final SHA1_Final +#endif + +#define git_SHA_CTX platform_SHA_CTX +#define git_SHA1_Init platform_SHA1_Init +#define git_SHA1_Update platform_SHA1_Update +#define git_SHA1_Final platform_SHA1_Final + +#ifdef SHA1_MAX_BLOCK_SIZE +#include "compat/sha1-chunked.h" +#undef git_SHA1_Update +#define git_SHA1_Update git_SHA1_Update_Chunked +#endif + /* * Note that these constants are suitable for indexing the hash_algos array and * comparing against each other, but are otherwise arbitrary, so they should not @@ -30,9 +55,15 @@ /* Number of algorithms supported (including unknown). */ #define GIT_HASH_NALGOS (GIT_HASH_SHA1 + 1) -typedef void (*git_hash_init_fn)(void *ctx); -typedef void (*git_hash_update_fn)(void *ctx, const void *in, size_t len); -typedef void (*git_hash_final_fn)(unsigned char *hash, void *ctx); +/* A suitably aligned type for stack allocations of hash contexts. */ +union git_hash_ctx { + git_SHA_CTX sha1; +}; +typedef union git_hash_ctx git_hash_ctx; + +typedef void (*git_hash_init_fn)(git_hash_ctx *ctx); +typedef void (*git_hash_update_fn)(git_hash_ctx *ctx, const void *in, size_t len); +typedef void (*git_hash_final_fn)(unsigned char *hash, git_hash_ctx *ctx); struct git_hash_algo { /* @@ -44,9 +75,6 @@ struct git_hash_algo { /* A four-byte version identifier, used in pack indices. */ uint32_t format_id; - /* The size of a hash context (e.g. git_SHA_CTX). */ - size_t ctxsz; - /* The length of the hash in binary. */ size_t rawsz; @@ -400,7 +400,6 @@ static inline void hashmap_disable_item_counting(struct hashmap *map) */ static inline void hashmap_enable_item_counting(struct hashmap *map) { - void *item; unsigned int n = 0; struct hashmap_iter iter; @@ -408,7 +407,7 @@ static inline void hashmap_enable_item_counting(struct hashmap *map) return; hashmap_iter_init(map, &iter); - while ((item = hashmap_iter_next(&iter))) + while (hashmap_iter_next(&iter)) n++; map->do_count_items = 1; diff --git a/http-push.c b/http-push.c index 14435ab65d..0913f8ab86 100644 --- a/http-push.c +++ b/http-push.c @@ -915,6 +915,10 @@ static struct remote_lock *lock_remote(const char *path, long timeout) lock->timeout = -1; } XML_ParserFree(parser); + } else { + fprintf(stderr, + "error: curl result=%d, HTTP code=%ld\n", + results.curl_result, results.http_code); } } else { fprintf(stderr, "Unable to start LOCK request\n"); diff --git a/http-walker.c b/http-walker.c index 1ae8363de2..07c2b1af82 100644 --- a/http-walker.c +++ b/http-walker.c @@ -544,8 +544,10 @@ static int fetch_object(struct walker *walker, unsigned char *sha1) } else if (hashcmp(obj_req->sha1, req->real_sha1)) { ret = error("File %s has bad hash", hex); } else if (req->rename < 0) { - ret = error("unable to write sha1 filename %s", - sha1_file_name(req->sha1)); + struct strbuf buf = STRBUF_INIT; + sha1_file_name(&buf, req->sha1); + ret = error("unable to write sha1 filename %s", buf.buf); + strbuf_release(&buf); } release_http_object_request(req); @@ -13,8 +13,11 @@ #include "transport.h" #include "packfile.h" #include "protocol.h" +#include "string-list.h" static struct trace_key trace_curl = TRACE_KEY_INIT(CURL); +static int trace_curl_data = 1; +static struct string_list cookies_to_redact = STRING_LIST_INIT_DUP; #if LIBCURL_VERSION_NUM >= 0x070a08 long int git_curl_ipresolve = CURL_IPRESOLVE_WHATEVER; #else @@ -575,6 +578,54 @@ static void redact_sensitive_header(struct strbuf *header) /* Everything else is opaque and possibly sensitive */ strbuf_setlen(header, sensitive_header - header->buf); strbuf_addstr(header, " <redacted>"); + } else if (cookies_to_redact.nr && + skip_prefix(header->buf, "Cookie:", &sensitive_header)) { + struct strbuf redacted_header = STRBUF_INIT; + char *cookie; + + while (isspace(*sensitive_header)) + sensitive_header++; + + /* + * The contents of header starting from sensitive_header will + * subsequently be overridden, so it is fine to mutate this + * string (hence the assignment to "char *"). + */ + cookie = (char *) sensitive_header; + + while (cookie) { + char *equals; + char *semicolon = strstr(cookie, "; "); + if (semicolon) + *semicolon = 0; + equals = strchrnul(cookie, '='); + if (!equals) { + /* invalid cookie, just append and continue */ + strbuf_addstr(&redacted_header, cookie); + continue; + } + *equals = 0; /* temporarily set to NUL for lookup */ + if (string_list_lookup(&cookies_to_redact, cookie)) { + strbuf_addstr(&redacted_header, cookie); + strbuf_addstr(&redacted_header, "=<redacted>"); + } else { + *equals = '='; + strbuf_addstr(&redacted_header, cookie); + } + if (semicolon) { + /* + * There are more cookies. (Or, for some + * reason, the input string ends in "; ".) + */ + strbuf_addstr(&redacted_header, "; "); + cookie = semicolon + strlen("; "); + } else { + cookie = NULL; + } + } + + strbuf_setlen(header, sensitive_header - header->buf); + strbuf_addbuf(header, &redacted_header); } } @@ -645,24 +696,32 @@ static int curl_trace(CURL *handle, curl_infotype type, char *data, size_t size, curl_dump_header(text, (unsigned char *)data, size, DO_FILTER); break; case CURLINFO_DATA_OUT: - text = "=> Send data"; - curl_dump_data(text, (unsigned char *)data, size); + if (trace_curl_data) { + text = "=> Send data"; + curl_dump_data(text, (unsigned char *)data, size); + } break; case CURLINFO_SSL_DATA_OUT: - text = "=> Send SSL data"; - curl_dump_data(text, (unsigned char *)data, size); + if (trace_curl_data) { + text = "=> Send SSL data"; + curl_dump_data(text, (unsigned char *)data, size); + } break; case CURLINFO_HEADER_IN: text = "<= Recv header"; curl_dump_header(text, (unsigned char *)data, size, NO_FILTER); break; case CURLINFO_DATA_IN: - text = "<= Recv data"; - curl_dump_data(text, (unsigned char *)data, size); + if (trace_curl_data) { + text = "<= Recv data"; + curl_dump_data(text, (unsigned char *)data, size); + } break; case CURLINFO_SSL_DATA_IN: - text = "<= Recv SSL data"; - curl_dump_data(text, (unsigned char *)data, size); + if (trace_curl_data) { + text = "<= Recv SSL data"; + curl_dump_data(text, (unsigned char *)data, size); + } break; default: /* we ignore unknown types by default */ @@ -807,6 +866,13 @@ static CURL *get_curl_handle(void) if (getenv("GIT_CURL_VERBOSE")) curl_easy_setopt(result, CURLOPT_VERBOSE, 1L); setup_curl_trace(result); + if (getenv("GIT_TRACE_CURL_NO_DATA")) + trace_curl_data = 0; + if (getenv("GIT_REDACT_COOKIES")) { + string_list_split(&cookies_to_redact, + getenv("GIT_REDACT_COOKIES"), ',', -1); + string_list_sort(&cookies_to_redact); + } curl_easy_setopt(result, CURLOPT_USERAGENT, user_agent ? user_agent : git_user_agent()); @@ -2168,7 +2234,7 @@ struct http_object_request *new_http_object_request(const char *base_url, unsigned char *sha1) { char *hex = sha1_to_hex(sha1); - const char *filename; + struct strbuf filename = STRBUF_INIT; char prevfile[PATH_MAX]; int prevlocal; char prev_buf[PREV_BUF_SIZE]; @@ -2180,14 +2246,15 @@ struct http_object_request *new_http_object_request(const char *base_url, hashcpy(freq->sha1, sha1); freq->localfile = -1; - filename = sha1_file_name(sha1); + sha1_file_name(&filename, sha1); snprintf(freq->tmpfile, sizeof(freq->tmpfile), - "%s.temp", filename); + "%s.temp", filename.buf); - snprintf(prevfile, sizeof(prevfile), "%s.prev", filename); + snprintf(prevfile, sizeof(prevfile), "%s.prev", filename.buf); unlink_or_warn(prevfile); rename(freq->tmpfile, prevfile); unlink_or_warn(freq->tmpfile); + strbuf_release(&filename); if (freq->localfile != -1) error("fd leakage in start: %d", freq->localfile); @@ -2302,6 +2369,7 @@ void process_http_object_request(struct http_object_request *freq) int finish_http_object_request(struct http_object_request *freq) { struct stat st; + struct strbuf filename = STRBUF_INIT; close(freq->localfile); freq->localfile = -1; @@ -2327,8 +2395,10 @@ int finish_http_object_request(struct http_object_request *freq) unlink_or_warn(freq->tmpfile); return -1; } - freq->rename = - finalize_object_file(freq->tmpfile, sha1_file_name(freq->sha1)); + + sha1_file_name(&filename, freq->sha1); + freq->rename = finalize_object_file(freq->tmpfile, filename.buf); + strbuf_release(&filename); return freq->rename; } diff --git a/list-objects-filter-options.c b/list-objects-filter-options.c index 4c5b34e949..6a3cc985c4 100644 --- a/list-objects-filter-options.c +++ b/list-objects-filter-options.c @@ -21,29 +21,36 @@ * subordinate commands when necessary. We also "intern" the arg for * the convenience of the current command. */ -int parse_list_objects_filter(struct list_objects_filter_options *filter_options, - const char *arg) +static int gently_parse_list_objects_filter( + struct list_objects_filter_options *filter_options, + const char *arg, + struct strbuf *errbuf) { const char *v0; - if (filter_options->choice) - die(_("multiple object filter types cannot be combined")); + if (filter_options->choice) { + if (errbuf) { + strbuf_init(errbuf, 0); + strbuf_addstr( + errbuf, + _("multiple filter-specs cannot be combined")); + } + return 1; + } filter_options->filter_spec = strdup(arg); if (!strcmp(arg, "blob:none")) { filter_options->choice = LOFC_BLOB_NONE; return 0; - } - if (skip_prefix(arg, "blob:limit=", &v0)) { - if (!git_parse_ulong(v0, &filter_options->blob_limit_value)) - die(_("invalid filter-spec expression '%s'"), arg); - filter_options->choice = LOFC_BLOB_LIMIT; - return 0; - } + } else if (skip_prefix(arg, "blob:limit=", &v0)) { + if (git_parse_ulong(v0, &filter_options->blob_limit_value)) { + filter_options->choice = LOFC_BLOB_LIMIT; + return 0; + } - if (skip_prefix(arg, "sparse:oid=", &v0)) { + } else if (skip_prefix(arg, "sparse:oid=", &v0)) { struct object_context oc; struct object_id sparse_oid; @@ -57,15 +64,27 @@ int parse_list_objects_filter(struct list_objects_filter_options *filter_options filter_options->sparse_oid_value = oiddup(&sparse_oid); filter_options->choice = LOFC_SPARSE_OID; return 0; - } - if (skip_prefix(arg, "sparse:path=", &v0)) { + } else if (skip_prefix(arg, "sparse:path=", &v0)) { filter_options->choice = LOFC_SPARSE_PATH; filter_options->sparse_path_value = strdup(v0); return 0; } - die(_("invalid filter-spec expression '%s'"), arg); + if (errbuf) { + strbuf_init(errbuf, 0); + strbuf_addf(errbuf, "invalid filter-spec '%s'", arg); + } + memset(filter_options, 0, sizeof(*filter_options)); + return 1; +} + +int parse_list_objects_filter(struct list_objects_filter_options *filter_options, + const char *arg) +{ + struct strbuf buf = STRBUF_INIT; + if (gently_parse_list_objects_filter(filter_options, arg, &buf)) + die("%s", buf.buf); return 0; } @@ -75,7 +94,7 @@ int opt_parse_list_objects_filter(const struct option *opt, struct list_objects_filter_options *filter_options = opt->value; if (unset || !arg) { - list_objects_filter_release(filter_options); + list_objects_filter_set_no_filter(filter_options); return 0; } @@ -90,3 +109,44 @@ void list_objects_filter_release( free(filter_options->sparse_path_value); memset(filter_options, 0, sizeof(*filter_options)); } + +void partial_clone_register( + const char *remote, + const struct list_objects_filter_options *filter_options) +{ + /* + * Record the name of the partial clone remote in the + * config and in the global variable -- the latter is + * used throughout to indicate that partial clone is + * enabled and to expect missing objects. + */ + if (repository_format_partial_clone && + *repository_format_partial_clone && + strcmp(remote, repository_format_partial_clone)) + die(_("cannot change partial clone promisor remote")); + + git_config_set("core.repositoryformatversion", "1"); + git_config_set("extensions.partialclone", remote); + + repository_format_partial_clone = xstrdup(remote); + + /* + * Record the initial filter-spec in the config as + * the default for subsequent fetches from this remote. + */ + core_partial_clone_filter_default = + xstrdup(filter_options->filter_spec); + git_config_set("core.partialclonefilter", + core_partial_clone_filter_default); +} + +void partial_clone_get_default_filter_spec( + struct list_objects_filter_options *filter_options) +{ + /* + * Parse default value, but silently ignore it if it is invalid. + */ + gently_parse_list_objects_filter(filter_options, + core_partial_clone_filter_default, + NULL); +} diff --git a/list-objects-filter-options.h b/list-objects-filter-options.h index eea44a1a51..0000a61f82 100644 --- a/list-objects-filter-options.h +++ b/list-objects-filter-options.h @@ -31,6 +31,11 @@ struct list_objects_filter_options { enum list_objects_filter_choice choice; /* + * Choice is LOFC_DISABLED because "--no-filter" was requested. + */ + unsigned int no_filter : 1; + + /* * Parsed values (fields) from within the filter-spec. These are * choice-specific; not all values will be defined for any given * choice. @@ -58,4 +63,17 @@ int opt_parse_list_objects_filter(const struct option *opt, void list_objects_filter_release( struct list_objects_filter_options *filter_options); +static inline void list_objects_filter_set_no_filter( + struct list_objects_filter_options *filter_options) +{ + list_objects_filter_release(filter_options); + filter_options->no_filter = 1; +} + +void partial_clone_register( + const char *remote, + const struct list_objects_filter_options *filter_options); +void partial_clone_get_default_filter_spec( + struct list_objects_filter_options *filter_options); + #endif /* LIST_OBJECTS_FILTER_OPTIONS_H */ diff --git a/list-objects.c b/list-objects.c index 0966cdc9fa..168bef688a 100644 --- a/list-objects.c +++ b/list-objects.c @@ -9,6 +9,7 @@ #include "list-objects.h" #include "list-objects-filter.h" #include "list-objects-filter-options.h" +#include "packfile.h" static void process_blob(struct rev_info *revs, struct blob *blob, @@ -30,6 +31,20 @@ static void process_blob(struct rev_info *revs, if (obj->flags & (UNINTERESTING | SEEN)) return; + /* + * Pre-filter known-missing objects when explicitly requested. + * Otherwise, a missing object error message may be reported + * later (depending on other filtering criteria). + * + * Note that this "--exclude-promisor-objects" pre-filtering + * may cause the actual filter to report an incomplete list + * of missing objects. + */ + if (revs->exclude_promisor_objects && + !has_object_file(&obj->oid) && + is_promisor_object(&obj->oid)) + return; + pathlen = path->len; strbuf_addstr(path, name); if (filter_fn) @@ -91,6 +106,8 @@ static void process_tree(struct rev_info *revs, all_entries_interesting: entry_not_interesting; int baselen = base->len; enum list_objects_filter_result r = LOFR_MARK_SEEN | LOFR_DO_SHOW; + int gently = revs->ignore_missing_links || + revs->exclude_promisor_objects; if (!revs->tree_objects) return; @@ -98,9 +115,19 @@ static void process_tree(struct rev_info *revs, die("bad tree object"); if (obj->flags & (UNINTERESTING | SEEN)) return; - if (parse_tree_gently(tree, revs->ignore_missing_links) < 0) { + if (parse_tree_gently(tree, gently) < 0) { if (revs->ignore_missing_links) return; + + /* + * Pre-filter known-missing tree objects when explicitly + * requested. This may cause the actual filter to report + * an incomplete list of missing objects. + */ + if (revs->exclude_promisor_objects && + is_promisor_object(&obj->oid)) + return; + die("bad tree object %s", oid_to_hex(&obj->oid)); } diff --git a/log-tree.c b/log-tree.c index fca29d4799..fc0cc0d6d1 100644 --- a/log-tree.c +++ b/log-tree.c @@ -499,7 +499,7 @@ static void show_one_mergetag(struct commit *commit, int status, nth; size_t payload_size, gpg_message_offset; - hash_sha1_file(extra->value, extra->len, typename(OBJ_TAG), oid.hash); + hash_object_file(extra->value, extra->len, typename(OBJ_TAG), &oid); tag = lookup_tag(&oid); if (!tag) return; /* error message already given */ diff --git a/mailinfo.c b/mailinfo.c index a89db22ab0..d04142ccc7 100644 --- a/mailinfo.c +++ b/mailinfo.c @@ -1167,11 +1167,13 @@ void clear_mailinfo(struct mailinfo *mi) strbuf_release(&mi->inbody_header_accum); free(mi->message_id); - for (i = 0; mi->p_hdr_data[i]; i++) - strbuf_release(mi->p_hdr_data[i]); + if (mi->p_hdr_data) + for (i = 0; mi->p_hdr_data[i]; i++) + strbuf_release(mi->p_hdr_data[i]); free(mi->p_hdr_data); - for (i = 0; mi->s_hdr_data[i]; i++) - strbuf_release(mi->s_hdr_data[i]); + if (mi->s_hdr_data) + for (i = 0; mi->s_hdr_data[i]; i++) + strbuf_release(mi->s_hdr_data[i]); free(mi->s_hdr_data); while (mi->content < mi->content_top) { diff --git a/match-trees.c b/match-trees.c index 396b7338df..0ca99d5162 100644 --- a/match-trees.c +++ b/match-trees.c @@ -158,22 +158,20 @@ static void match_trees(const struct object_id *hash1, } /* - * A tree "hash1" has a subdirectory at "prefix". Come up with a - * tree object by replacing it with another tree "hash2". + * A tree "oid1" has a subdirectory at "prefix". Come up with a tree object by + * replacing it with another tree "oid2". */ -static int splice_tree(const unsigned char *hash1, - const char *prefix, - const unsigned char *hash2, - unsigned char *result) +static int splice_tree(const struct object_id *oid1, const char *prefix, + const struct object_id *oid2, struct object_id *result) { char *subpath; int toplen; char *buf; unsigned long sz; struct tree_desc desc; - unsigned char *rewrite_here; - const unsigned char *rewrite_with; - unsigned char subtree[20]; + struct object_id *rewrite_here; + const struct object_id *rewrite_with; + struct object_id subtree; enum object_type type; int status; @@ -182,9 +180,9 @@ static int splice_tree(const unsigned char *hash1, if (*subpath) subpath++; - buf = read_sha1_file(hash1, &type, &sz); + buf = read_sha1_file(oid1->hash, &type, &sz); if (!buf) - die("cannot read tree %s", sha1_to_hex(hash1)); + die("cannot read tree %s", oid_to_hex(oid1)); init_tree_desc(&desc, buf, sz); rewrite_here = NULL; @@ -197,26 +195,26 @@ static int splice_tree(const unsigned char *hash1, if (strlen(name) == toplen && !memcmp(name, prefix, toplen)) { if (!S_ISDIR(mode)) - die("entry %s in tree %s is not a tree", - name, sha1_to_hex(hash1)); - rewrite_here = (unsigned char *) oid->hash; + die("entry %s in tree %s is not a tree", name, + oid_to_hex(oid1)); + rewrite_here = (struct object_id *)oid; break; } update_tree_entry(&desc); } if (!rewrite_here) - die("entry %.*s not found in tree %s", - toplen, prefix, sha1_to_hex(hash1)); + die("entry %.*s not found in tree %s", toplen, prefix, + oid_to_hex(oid1)); if (*subpath) { - status = splice_tree(rewrite_here, subpath, hash2, subtree); + status = splice_tree(rewrite_here, subpath, oid2, &subtree); if (status) return status; - rewrite_with = subtree; + rewrite_with = &subtree; + } else { + rewrite_with = oid2; } - else - rewrite_with = hash2; - hashcpy(rewrite_here, rewrite_with); - status = write_sha1_file(buf, sz, tree_type, result); + oidcpy(rewrite_here, rewrite_with); + status = write_object_file(buf, sz, tree_type, result); free(buf); return status; } @@ -280,7 +278,7 @@ void shift_tree(const struct object_id *hash1, if (!*add_prefix) return; - splice_tree(hash1->hash, add_prefix, hash2->hash, shifted->hash); + splice_tree(hash1, add_prefix, hash2, shifted); } /* @@ -334,7 +332,7 @@ void shift_tree_by(const struct object_id *hash1, * shift tree2 down by adding shift_prefix above it * to match tree1. */ - splice_tree(hash1->hash, shift_prefix, hash2->hash, shifted->hash); + splice_tree(hash1, shift_prefix, hash2, shifted); else /* * shift tree2 up by removing shift_prefix from it diff --git a/merge-recursive.c b/merge-recursive.c index cc5fa0a949..6ff971f9a2 100644 --- a/merge-recursive.c +++ b/merge-recursive.c @@ -513,6 +513,25 @@ static void record_df_conflict_files(struct merge_options *o, struct rename { struct diff_filepair *pair; + /* + * Purpose of src_entry and dst_entry: + * + * If 'before' is renamed to 'after' then src_entry will contain + * the versions of 'before' from the merge_base, HEAD, and MERGE in + * stages 1, 2, and 3; dst_entry will contain the respective + * versions of 'after' in corresponding locations. Thus, we have a + * total of six modes and oids, though some will be null. (Stage 0 + * is ignored; we're interested in handling conflicts.) + * + * Since we don't turn on break-rewrites by default, neither + * src_entry nor dst_entry can have all three of their stages have + * non-null oids, meaning at most four of the six will be non-null. + * Also, since this is a rename, both src_entry and dst_entry will + * have at least one non-null oid, meaning at least two will be + * non-null. Of the six oids, a typical rename will have three be + * non-null. Only two implies a rename/delete, and four implies a + * rename/add. + */ struct stage_data *src_entry; struct stage_data *dst_entry; unsigned processed:1; @@ -1009,8 +1028,9 @@ static int merge_file_1(struct merge_options *o, if ((merge_status < 0) || !result_buf.ptr) ret = err(o, _("Failed to execute internal merge")); - if (!ret && write_sha1_file(result_buf.ptr, result_buf.size, - blob_type, result->oid.hash)) + if (!ret && + write_object_file(result_buf.ptr, result_buf.size, + blob_type, &result->oid)) ret = err(o, _("Unable to add %s to database"), a->path); @@ -1998,10 +2018,10 @@ int merge_trees(struct merge_options *o, get_files_dirs(o, merge); entries = get_unmerged(); - record_df_conflict_files(o, entries); re_head = get_renames(o, head, common, head, merge, entries); re_merge = get_renames(o, merge, common, head, merge, entries); clean = process_renames(o, re_head, re_merge); + record_df_conflict_files(o, entries); if (clean < 0) goto cleanup; for (i = entries->nr-1; 0 <= i; i--) { diff --git a/mru.c b/mru.c deleted file mode 100644 index 9dedae0287..0000000000 --- a/mru.c +++ /dev/null @@ -1,50 +0,0 @@ -#include "cache.h" -#include "mru.h" - -void mru_append(struct mru *mru, void *item) -{ - struct mru_entry *cur = xmalloc(sizeof(*cur)); - cur->item = item; - cur->prev = mru->tail; - cur->next = NULL; - - if (mru->tail) - mru->tail->next = cur; - else - mru->head = cur; - mru->tail = cur; -} - -void mru_mark(struct mru *mru, struct mru_entry *entry) -{ - /* If we're already at the front of the list, nothing to do */ - if (mru->head == entry) - return; - - /* Otherwise, remove us from our current slot... */ - if (entry->prev) - entry->prev->next = entry->next; - if (entry->next) - entry->next->prev = entry->prev; - else - mru->tail = entry->prev; - - /* And insert us at the beginning. */ - entry->prev = NULL; - entry->next = mru->head; - if (mru->head) - mru->head->prev = entry; - mru->head = entry; -} - -void mru_clear(struct mru *mru) -{ - struct mru_entry *p = mru->head; - - while (p) { - struct mru_entry *to_free = p; - p = p->next; - free(to_free); - } - mru->head = mru->tail = NULL; -} diff --git a/mru.h b/mru.h deleted file mode 100644 index 42e4aeaa10..0000000000 --- a/mru.h +++ /dev/null @@ -1,45 +0,0 @@ -#ifndef MRU_H -#define MRU_H - -/** - * A simple most-recently-used cache, backed by a doubly-linked list. - * - * Usage is roughly: - * - * // Create a list. Zero-initialization is required. - * static struct mru cache; - * mru_append(&cache, item); - * ... - * - * // Iterate in MRU order. - * struct mru_entry *p; - * for (p = cache.head; p; p = p->next) { - * if (matches(p->item)) - * break; - * } - * - * // Mark an item as used, moving it to the front of the list. - * mru_mark(&cache, p); - * - * // Reset the list to empty, cleaning up all resources. - * mru_clear(&cache); - * - * Note that you SHOULD NOT call mru_mark() and then continue traversing the - * list; it reorders the marked item to the front of the list, and therefore - * you will begin traversing the whole list again. - */ - -struct mru_entry { - void *item; - struct mru_entry *prev, *next; -}; - -struct mru { - struct mru_entry *head, *tail; -}; - -void mru_append(struct mru *mru, void *item); -void mru_mark(struct mru *mru, struct mru_entry *entry); -void mru_clear(struct mru *mru); - -#endif /* MRU_H */ diff --git a/name-hash.c b/name-hash.c index 45c98db0a0..163849831c 100644 --- a/name-hash.c +++ b/name-hash.c @@ -578,6 +578,8 @@ static void threaded_lazy_init_name_hash( static void lazy_init_name_hash(struct index_state *istate) { + uint64_t start = getnanotime(); + if (istate->name_hash_initialized) return; hashmap_init(&istate->name_hash, cache_entry_cmp, NULL, istate->cache_nr); @@ -600,6 +602,7 @@ static void lazy_init_name_hash(struct index_state *istate) } istate->name_hash_initialized = 1; + trace_performance_since(start, "initialize name hash"); } /* @@ -696,12 +699,12 @@ void adjust_dirname_case(struct index_state *istate, char *name) if (*ptr == '/') { struct dir_entry *dir; - ptr++; - dir = find_dir_entry(istate, name, ptr - name + 1); + dir = find_dir_entry(istate, name, ptr - name); if (dir) { memcpy((void *)startPtr, dir->name + (startPtr - name), ptr - startPtr); - startPtr = ptr; + startPtr = ptr + 1; } + ptr++; } } } diff --git a/notes-cache.c b/notes-cache.c index 17ee8602b3..398e61d5e9 100644 --- a/notes-cache.c +++ b/notes-cache.c @@ -54,10 +54,10 @@ int notes_cache_write(struct notes_cache *c) if (!c->tree.dirty) return 0; - if (write_notes_tree(&c->tree, tree_oid.hash)) + if (write_notes_tree(&c->tree, &tree_oid)) return -1; - if (commit_tree(c->validity, strlen(c->validity), tree_oid.hash, NULL, - commit_oid.hash, NULL, NULL) < 0) + if (commit_tree(c->validity, strlen(c->validity), &tree_oid, NULL, + &commit_oid, NULL, NULL) < 0) return -1; if (update_ref("update notes cache", c->tree.update_ref, &commit_oid, NULL, 0, UPDATE_REFS_QUIET_ON_ERR) < 0) @@ -88,7 +88,7 @@ int notes_cache_put(struct notes_cache *c, struct object_id *key_oid, { struct object_id value_oid; - if (write_sha1_file(data, size, "blob", value_oid.hash) < 0) + if (write_object_file(data, size, "blob", &value_oid) < 0) return -1; return add_note(&c->tree, key_oid, &value_oid, NULL); } diff --git a/notes-merge.c b/notes-merge.c index 0f6573cb17..c09c5e0e47 100644 --- a/notes-merge.c +++ b/notes-merge.c @@ -642,9 +642,8 @@ int notes_merge(struct notes_merge_options *o, struct commit_list *parents = NULL; commit_list_insert(remote, &parents); /* LIFO order */ commit_list_insert(local, &parents); - create_notes_commit(local_tree, parents, - o->commit_msg.buf, o->commit_msg.len, - result_oid->hash); + create_notes_commit(local_tree, parents, o->commit_msg.buf, + o->commit_msg.len, result_oid); } found_result: @@ -718,8 +717,8 @@ int notes_merge_commit(struct notes_merge_options *o, strbuf_setlen(&path, baselen); } - create_notes_commit(partial_tree, partial_commit->parents, - msg, strlen(msg), result_oid->hash); + create_notes_commit(partial_tree, partial_commit->parents, msg, + strlen(msg), result_oid); unuse_commit_buffer(partial_commit, buffer); if (o->verbosity >= 4) printf("Finalized notes merge commit: %s\n", diff --git a/notes-utils.c b/notes-utils.c index 5c8e70c98f..02407fe2a7 100644 --- a/notes-utils.c +++ b/notes-utils.c @@ -6,13 +6,13 @@ void create_notes_commit(struct notes_tree *t, struct commit_list *parents, const char *msg, size_t msg_len, - unsigned char *result_sha1) + struct object_id *result_oid) { struct object_id tree_oid; assert(t->initialized); - if (write_notes_tree(t, tree_oid.hash)) + if (write_notes_tree(t, &tree_oid)) die("Failed to write notes tree to database"); if (!parents) { @@ -27,7 +27,8 @@ void create_notes_commit(struct notes_tree *t, struct commit_list *parents, /* else: t->ref points to nothing, assume root/orphan commit */ } - if (commit_tree(msg, msg_len, tree_oid.hash, parents, result_sha1, NULL, NULL)) + if (commit_tree(msg, msg_len, &tree_oid, parents, result_oid, NULL, + NULL)) die("Failed to commit notes tree to database"); } @@ -47,7 +48,7 @@ void commit_notes(struct notes_tree *t, const char *msg) strbuf_addstr(&buf, msg); strbuf_complete_line(&buf); - create_notes_commit(t, NULL, buf.buf, buf.len, commit_oid.hash); + create_notes_commit(t, NULL, buf.buf, buf.len, &commit_oid); strbuf_insert(&buf, 0, "notes: ", 7); /* commit message starts at index 7 */ update_ref(buf.buf, t->update_ref, &commit_oid, NULL, 0, UPDATE_REFS_DIE_ON_ERR); diff --git a/notes-utils.h b/notes-utils.h index 1190578398..5d79cbef51 100644 --- a/notes-utils.h +++ b/notes-utils.h @@ -15,7 +15,8 @@ * The resulting commit SHA1 is stored in result_sha1. */ void create_notes_commit(struct notes_tree *t, struct commit_list *parents, - const char *msg, size_t msg_len, unsigned char *result_sha1); + const char *msg, size_t msg_len, + struct object_id *result_oid); void commit_notes(struct notes_tree *t, const char *msg); @@ -270,8 +270,8 @@ static int note_tree_insert(struct notes_tree *t, struct int_node *tree, if (!oidcmp(&l->val_oid, &entry->val_oid)) return 0; - ret = combine_notes(l->val_oid.hash, - entry->val_oid.hash); + ret = combine_notes(&l->val_oid, + &entry->val_oid); if (!ret && is_null_oid(&l->val_oid)) note_tree_remove(t, tree, n, entry); free(entry); @@ -667,7 +667,7 @@ static int tree_write_stack_finish_subtree(struct tree_write_stack *tws) ret = tree_write_stack_finish_subtree(n); if (ret) return ret; - ret = write_sha1_file(n->buf.buf, n->buf.len, tree_type, s.hash); + ret = write_object_file(n->buf.buf, n->buf.len, tree_type, &s); if (ret) return ret; strbuf_release(&n->buf); @@ -786,8 +786,8 @@ static int prune_notes_helper(const struct object_id *object_oid, return 0; } -int combine_notes_concatenate(unsigned char *cur_sha1, - const unsigned char *new_sha1) +int combine_notes_concatenate(struct object_id *cur_oid, + const struct object_id *new_oid) { char *cur_msg = NULL, *new_msg = NULL, *buf; unsigned long cur_len, new_len, buf_len; @@ -795,18 +795,18 @@ int combine_notes_concatenate(unsigned char *cur_sha1, int ret; /* read in both note blob objects */ - if (!is_null_sha1(new_sha1)) - new_msg = read_sha1_file(new_sha1, &new_type, &new_len); + if (!is_null_oid(new_oid)) + new_msg = read_sha1_file(new_oid->hash, &new_type, &new_len); if (!new_msg || !new_len || new_type != OBJ_BLOB) { free(new_msg); return 0; } - if (!is_null_sha1(cur_sha1)) - cur_msg = read_sha1_file(cur_sha1, &cur_type, &cur_len); + if (!is_null_oid(cur_oid)) + cur_msg = read_sha1_file(cur_oid->hash, &cur_type, &cur_len); if (!cur_msg || !cur_len || cur_type != OBJ_BLOB) { free(cur_msg); free(new_msg); - hashcpy(cur_sha1, new_sha1); + oidcpy(cur_oid, new_oid); return 0; } @@ -825,20 +825,20 @@ int combine_notes_concatenate(unsigned char *cur_sha1, free(new_msg); /* create a new blob object from buf */ - ret = write_sha1_file(buf, buf_len, blob_type, cur_sha1); + ret = write_object_file(buf, buf_len, blob_type, cur_oid); free(buf); return ret; } -int combine_notes_overwrite(unsigned char *cur_sha1, - const unsigned char *new_sha1) +int combine_notes_overwrite(struct object_id *cur_oid, + const struct object_id *new_oid) { - hashcpy(cur_sha1, new_sha1); + oidcpy(cur_oid, new_oid); return 0; } -int combine_notes_ignore(unsigned char *cur_sha1, - const unsigned char *new_sha1) +int combine_notes_ignore(struct object_id *cur_oid, + const struct object_id *new_oid) { return 0; } @@ -848,17 +848,17 @@ int combine_notes_ignore(unsigned char *cur_sha1, * newlines removed. */ static int string_list_add_note_lines(struct string_list *list, - const unsigned char *sha1) + const struct object_id *oid) { char *data; unsigned long len; enum object_type t; - if (is_null_sha1(sha1)) + if (is_null_oid(oid)) return 0; /* read_sha1_file NUL-terminates */ - data = read_sha1_file(sha1, &t, &len); + data = read_sha1_file(oid->hash, &t, &len); if (t != OBJ_BLOB || !data || !len) { free(data); return t != OBJ_BLOB || !data; @@ -884,17 +884,17 @@ static int string_list_join_lines_helper(struct string_list_item *item, return 0; } -int combine_notes_cat_sort_uniq(unsigned char *cur_sha1, - const unsigned char *new_sha1) +int combine_notes_cat_sort_uniq(struct object_id *cur_oid, + const struct object_id *new_oid) { struct string_list sort_uniq_list = STRING_LIST_INIT_DUP; struct strbuf buf = STRBUF_INIT; int ret = 1; /* read both note blob objects into unique_lines */ - if (string_list_add_note_lines(&sort_uniq_list, cur_sha1)) + if (string_list_add_note_lines(&sort_uniq_list, cur_oid)) goto out; - if (string_list_add_note_lines(&sort_uniq_list, new_sha1)) + if (string_list_add_note_lines(&sort_uniq_list, new_oid)) goto out; string_list_remove_empty_items(&sort_uniq_list, 0); string_list_sort(&sort_uniq_list); @@ -905,7 +905,7 @@ int combine_notes_cat_sort_uniq(unsigned char *cur_sha1, string_list_join_lines_helper, &buf)) goto out; - ret = write_sha1_file(buf.buf, buf.len, blob_type, cur_sha1); + ret = write_object_file(buf.buf, buf.len, blob_type, cur_oid); out: strbuf_release(&buf); @@ -1123,11 +1123,12 @@ int for_each_note(struct notes_tree *t, int flags, each_note_fn fn, return for_each_note_helper(t, t->root, 0, 0, flags, fn, cb_data); } -int write_notes_tree(struct notes_tree *t, unsigned char *result) +int write_notes_tree(struct notes_tree *t, struct object_id *result) { struct tree_write_stack root; struct write_each_note_data cb_data; int ret; + int flags; if (!t) t = &default_notes_tree; @@ -1141,12 +1142,12 @@ int write_notes_tree(struct notes_tree *t, unsigned char *result) cb_data.next_non_note = t->first_non_note; /* Write tree objects representing current notes tree */ - ret = for_each_note(t, FOR_EACH_NOTE_DONT_UNPACK_SUBTREES | - FOR_EACH_NOTE_YIELD_SUBTREES, - write_each_note, &cb_data) || - write_each_non_note_until(NULL, &cb_data) || - tree_write_stack_finish_subtree(&root) || - write_sha1_file(root.buf.buf, root.buf.len, tree_type, result); + flags = FOR_EACH_NOTE_DONT_UNPACK_SUBTREES | + FOR_EACH_NOTE_YIELD_SUBTREES; + ret = for_each_note(t, flags, write_each_note, &cb_data) || + write_each_non_note_until(NULL, &cb_data) || + tree_write_stack_finish_subtree(&root) || + write_object_file(root.buf.buf, root.buf.len, tree_type, result); strbuf_release(&root.buf); return ret; } @@ -9,27 +9,32 @@ * When adding a new note annotating the same object as an existing note, it is * up to the caller to decide how to combine the two notes. The decision is * made by passing in a function of the following form. The function accepts - * two SHA1s -- of the existing note and the new note, respectively. The + * two object_ids -- of the existing note and the new note, respectively. The * function then combines the notes in whatever way it sees fit, and writes the - * resulting SHA1 into the first SHA1 argument (cur_sha1). A non-zero return + * resulting oid into the first argument (cur_oid). A non-zero return * value indicates failure. * - * The two given SHA1s shall both be non-NULL and different from each other. - * Either of them (but not both) may be == null_sha1, which indicates an - * empty/non-existent note. If the resulting SHA1 (cur_sha1) is == null_sha1, + * The two given object_ids shall both be non-NULL and different from each + * other. Either of them (but not both) may be == null_oid, which indicates an + * empty/non-existent note. If the resulting oid (cur_oid) is == null_oid, * the note will be removed from the notes tree. * * The default combine_notes function (you get this when passing NULL) is * combine_notes_concatenate(), which appends the contents of the new note to * the contents of the existing note. */ -typedef int (*combine_notes_fn)(unsigned char *cur_sha1, const unsigned char *new_sha1); +typedef int (*combine_notes_fn)(struct object_id *cur_oid, + const struct object_id *new_oid); /* Common notes combinators */ -int combine_notes_concatenate(unsigned char *cur_sha1, const unsigned char *new_sha1); -int combine_notes_overwrite(unsigned char *cur_sha1, const unsigned char *new_sha1); -int combine_notes_ignore(unsigned char *cur_sha1, const unsigned char *new_sha1); -int combine_notes_cat_sort_uniq(unsigned char *cur_sha1, const unsigned char *new_sha1); +int combine_notes_concatenate(struct object_id *cur_oid, + const struct object_id *new_oid); +int combine_notes_overwrite(struct object_id *cur_oid, + const struct object_id *new_oid); +int combine_notes_ignore(struct object_id *cur_oid, + const struct object_id *new_oid); +int combine_notes_cat_sort_uniq(struct object_id *cur_oid, + const struct object_id *new_oid); /* * Notes tree object @@ -212,7 +217,7 @@ int for_each_note(struct notes_tree *t, int flags, each_note_fn fn, * Write the given notes_tree structure to the object database * * Creates a new tree object encapsulating the current state of the given - * notes_tree, and stores its SHA1 into the 'result' argument. + * notes_tree, and stores its object id into the 'result' argument. * * Returns zero on success, non-zero on failure. * @@ -220,7 +225,7 @@ int for_each_note(struct notes_tree *t, int flags, each_note_fn fn, * this function has returned zero. Please also remember to create a * corresponding commit object, and update the appropriate notes ref. */ -int write_notes_tree(struct notes_tree *t, unsigned char *result); +int write_notes_tree(struct notes_tree *t, struct object_id *result); /* Flags controlling the operation of prune */ #define NOTES_PRUNE_VERBOSE 1 @@ -252,7 +252,7 @@ struct object *parse_object(const struct object_id *oid) if (obj && obj->parsed) return obj; - if ((obj && obj->type == OBJ_BLOB) || + if ((obj && obj->type == OBJ_BLOB && has_object_file(oid)) || (!obj && has_object_file(oid) && sha1_object_info(oid->hash, NULL) == OBJ_BLOB)) { if (check_sha1_signature(repl, NULL, 0, NULL) < 0) { diff --git a/pack-bitmap-write.c b/pack-bitmap-write.c index a8df5ce2ab..e01f992884 100644 --- a/pack-bitmap-write.c +++ b/pack-bitmap-write.c @@ -440,19 +440,19 @@ void bitmap_writer_select_commits(struct commit **indexed_commits, } -static int sha1write_ewah_helper(void *f, const void *buf, size_t len) +static int hashwrite_ewah_helper(void *f, const void *buf, size_t len) { - /* sha1write will die on error */ - sha1write(f, buf, len); + /* hashwrite will die on error */ + hashwrite(f, buf, len); return len; } /** * Write the bitmap index to disk */ -static inline void dump_bitmap(struct sha1file *f, struct ewah_bitmap *bitmap) +static inline void dump_bitmap(struct hashfile *f, struct ewah_bitmap *bitmap) { - if (ewah_serialize_to(bitmap, sha1write_ewah_helper, f) < 0) + if (ewah_serialize_to(bitmap, hashwrite_ewah_helper, f) < 0) die("Failed to write bitmap index"); } @@ -462,7 +462,7 @@ static const unsigned char *sha1_access(size_t pos, void *table) return index[pos]->oid.hash; } -static void write_selected_commits_v1(struct sha1file *f, +static void write_selected_commits_v1(struct hashfile *f, struct pack_idx_entry **index, uint32_t index_nr) { @@ -477,15 +477,15 @@ static void write_selected_commits_v1(struct sha1file *f, if (commit_pos < 0) die("BUG: trying to write commit not in index"); - sha1write_be32(f, commit_pos); - sha1write_u8(f, stored->xor_offset); - sha1write_u8(f, stored->flags); + hashwrite_be32(f, commit_pos); + hashwrite_u8(f, stored->xor_offset); + hashwrite_u8(f, stored->flags); dump_bitmap(f, stored->write_as); } } -static void write_hash_cache(struct sha1file *f, +static void write_hash_cache(struct hashfile *f, struct pack_idx_entry **index, uint32_t index_nr) { @@ -494,7 +494,7 @@ static void write_hash_cache(struct sha1file *f, for (i = 0; i < index_nr; ++i) { struct object_entry *entry = (struct object_entry *)index[i]; uint32_t hash_value = htonl(entry->hash); - sha1write(f, &hash_value, sizeof(hash_value)); + hashwrite(f, &hash_value, sizeof(hash_value)); } } @@ -511,13 +511,13 @@ void bitmap_writer_finish(struct pack_idx_entry **index, static uint16_t default_version = 1; static uint16_t flags = BITMAP_OPT_FULL_DAG; struct strbuf tmp_file = STRBUF_INIT; - struct sha1file *f; + struct hashfile *f; struct bitmap_disk_header header; int fd = odb_mkstemp(&tmp_file, "pack/tmp_bitmap_XXXXXX"); - f = sha1fd(fd, tmp_file.buf); + f = hashfd(fd, tmp_file.buf); memcpy(header.magic, BITMAP_IDX_SIGNATURE, sizeof(BITMAP_IDX_SIGNATURE)); header.version = htons(default_version); @@ -525,7 +525,7 @@ void bitmap_writer_finish(struct pack_idx_entry **index, header.entry_count = htonl(writer.selected_nr); hashcpy(header.checksum, writer.pack_checksum); - sha1write(f, &header, sizeof(header)); + hashwrite(f, &header, sizeof(header)); dump_bitmap(f, writer.commits); dump_bitmap(f, writer.trees); dump_bitmap(f, writer.blobs); @@ -535,7 +535,7 @@ void bitmap_writer_finish(struct pack_idx_entry **index, if (options & BITMAP_OPT_HASH_CACHE) write_hash_cache(f, index, index_nr); - sha1close(f, NULL, CSUM_FSYNC); + hashclose(f, NULL, CSUM_FSYNC); if (adjust_shared_perm(tmp_file.buf)) die_errno("unable to make temporary bitmap file readable"); diff --git a/pack-check.c b/pack-check.c index 073c1fbd46..403a572567 100644 --- a/pack-check.c +++ b/pack-check.c @@ -41,7 +41,7 @@ int check_pack_crc(struct packed_git *p, struct pack_window **w_curs, } while (len); index_crc = p->index_data; - index_crc += 2 + 256 + p->num_objects * (20/4) + nr; + index_crc += 2 + 256 + p->num_objects * (the_hash_algo->rawsz/4) + nr; return data_crc != ntohl(*index_crc); } @@ -54,7 +54,7 @@ static int verify_packfile(struct packed_git *p, { off_t index_size = p->index_size; const unsigned char *index_base = p->index_data; - git_SHA_CTX ctx; + git_hash_ctx ctx; unsigned char hash[GIT_MAX_RAWSZ], *pack_sig; off_t offset = 0, pack_sig_ofs = 0; uint32_t nr_objects, i; @@ -64,24 +64,24 @@ static int verify_packfile(struct packed_git *p, if (!is_pack_valid(p)) return error("packfile %s cannot be accessed", p->pack_name); - git_SHA1_Init(&ctx); + the_hash_algo->init_fn(&ctx); do { unsigned long remaining; unsigned char *in = use_pack(p, w_curs, offset, &remaining); offset += remaining; if (!pack_sig_ofs) - pack_sig_ofs = p->pack_size - 20; + pack_sig_ofs = p->pack_size - the_hash_algo->rawsz; if (offset > pack_sig_ofs) remaining -= (unsigned int)(offset - pack_sig_ofs); - git_SHA1_Update(&ctx, in, remaining); + the_hash_algo->update_fn(&ctx, in, remaining); } while (offset < pack_sig_ofs); - git_SHA1_Final(hash, &ctx); + the_hash_algo->final_fn(hash, &ctx); pack_sig = use_pack(p, w_curs, pack_sig_ofs, NULL); if (hashcmp(hash, pack_sig)) - err = error("%s SHA1 checksum mismatch", + err = error("%s pack checksum mismatch", p->pack_name); - if (hashcmp(index_base + index_size - 40, pack_sig)) - err = error("%s SHA1 does not match its index", + if (hashcmp(index_base + index_size - the_hash_algo->hexsz, pack_sig)) + err = error("%s pack checksum does not match its index", p->pack_name); unuse_pack(w_curs); @@ -165,8 +165,8 @@ int verify_pack_index(struct packed_git *p) { off_t index_size; const unsigned char *index_base; - git_SHA_CTX ctx; - unsigned char sha1[20]; + git_hash_ctx ctx; + unsigned char hash[GIT_MAX_RAWSZ]; int err = 0; if (open_pack_index(p)) @@ -175,11 +175,11 @@ int verify_pack_index(struct packed_git *p) index_base = p->index_data; /* Verify SHA1 sum of the index file */ - git_SHA1_Init(&ctx); - git_SHA1_Update(&ctx, index_base, (unsigned int)(index_size - 20)); - git_SHA1_Final(sha1, &ctx); - if (hashcmp(sha1, index_base + index_size - 20)) - err = error("Packfile index for %s SHA1 mismatch", + the_hash_algo->init_fn(&ctx); + the_hash_algo->update_fn(&ctx, index_base, (unsigned int)(index_size - the_hash_algo->rawsz)); + the_hash_algo->final_fn(hash, &ctx); + if (hashcmp(hash, index_base + index_size - the_hash_algo->rawsz)) + err = error("Packfile index for %s hash mismatch", p->pack_name); return err; } diff --git a/pack-revindex.c b/pack-revindex.c index 1b7ebd8d7e..ff5f62c033 100644 --- a/pack-revindex.c +++ b/pack-revindex.c @@ -134,10 +134,8 @@ static void create_pack_revindex(struct packed_git *p) if (!(off & 0x80000000)) { p->revindex[i].offset = off; } else { - p->revindex[i].offset = - ((uint64_t)ntohl(*off_64++)) << 32; - p->revindex[i].offset |= - ntohl(*off_64++); + p->revindex[i].offset = get_be64(off_64); + off_64 += 2; } p->revindex[i].nr = i; } diff --git a/pack-write.c b/pack-write.c index fea6284192..d775c7406d 100644 --- a/pack-write.c +++ b/pack-write.c @@ -46,7 +46,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec int nr_objects, const struct pack_idx_option *opts, const unsigned char *sha1) { - struct sha1file *f; + struct hashfile *f; struct pack_idx_entry **sorted_by_sha, **list, **last; off_t last_obj_offset = 0; uint32_t array[256]; @@ -68,7 +68,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec if (opts->flags & WRITE_IDX_VERIFY) { assert(index_name); - f = sha1fd_check(index_name); + f = hashfd_check(index_name); } else { if (!index_name) { struct strbuf tmp_file = STRBUF_INIT; @@ -80,7 +80,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec if (fd < 0) die_errno("unable to create '%s'", index_name); } - f = sha1fd(fd, index_name); + f = hashfd(fd, index_name); } /* if last object's offset is >= 2^31 we should use index V2 */ @@ -91,7 +91,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec struct pack_idx_header hdr; hdr.idx_signature = htonl(PACK_IDX_SIGNATURE); hdr.idx_version = htonl(index_version); - sha1write(f, &hdr, sizeof(hdr)); + hashwrite(f, &hdr, sizeof(hdr)); } /* @@ -110,7 +110,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec array[i] = htonl(next - sorted_by_sha); list = next; } - sha1write(f, array, 256 * 4); + hashwrite(f, array, 256 * 4); /* * Write the actual SHA1 entries.. @@ -120,9 +120,9 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec struct pack_idx_entry *obj = *list++; if (index_version < 2) { uint32_t offset = htonl(obj->offset); - sha1write(f, &offset, 4); + hashwrite(f, &offset, 4); } - sha1write(f, obj->oid.hash, 20); + hashwrite(f, obj->oid.hash, the_hash_algo->rawsz); if ((opts->flags & WRITE_IDX_STRICT) && (i && !oidcmp(&list[-2]->oid, &obj->oid))) die("The same object %s appears twice in the pack", @@ -137,7 +137,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec for (i = 0; i < nr_objects; i++) { struct pack_idx_entry *obj = *list++; uint32_t crc32_val = htonl(obj->crc32); - sha1write(f, &crc32_val, 4); + hashwrite(f, &crc32_val, 4); } /* write the 32-bit offset table */ @@ -150,7 +150,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec ? (0x80000000 | nr_large_offset++) : obj->offset); offset = htonl(offset); - sha1write(f, &offset, 4); + hashwrite(f, &offset, 4); } /* write the large offset table */ @@ -164,25 +164,25 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec continue; split[0] = htonl(offset >> 32); split[1] = htonl(offset & 0xffffffff); - sha1write(f, split, 8); + hashwrite(f, split, 8); nr_large_offset--; } } - sha1write(f, sha1, 20); - sha1close(f, NULL, ((opts->flags & WRITE_IDX_VERIFY) + hashwrite(f, sha1, the_hash_algo->rawsz); + hashclose(f, NULL, ((opts->flags & WRITE_IDX_VERIFY) ? CSUM_CLOSE : CSUM_FSYNC)); return index_name; } -off_t write_pack_header(struct sha1file *f, uint32_t nr_entries) +off_t write_pack_header(struct hashfile *f, uint32_t nr_entries) { struct pack_header hdr; hdr.hdr_signature = htonl(PACK_SIGNATURE); hdr.hdr_version = htonl(PACK_VERSION); hdr.hdr_entries = htonl(nr_entries); - sha1write(f, &hdr, sizeof(hdr)); + hashwrite(f, &hdr, sizeof(hdr)); return sizeof(hdr); } @@ -203,20 +203,20 @@ off_t write_pack_header(struct sha1file *f, uint32_t nr_entries) * interested in the resulting SHA1 of pack data above partial_pack_offset. */ void fixup_pack_header_footer(int pack_fd, - unsigned char *new_pack_sha1, + unsigned char *new_pack_hash, const char *pack_name, uint32_t object_count, - unsigned char *partial_pack_sha1, + unsigned char *partial_pack_hash, off_t partial_pack_offset) { int aligned_sz, buf_sz = 8 * 1024; - git_SHA_CTX old_sha1_ctx, new_sha1_ctx; + git_hash_ctx old_hash_ctx, new_hash_ctx; struct pack_header hdr; char *buf; ssize_t read_result; - git_SHA1_Init(&old_sha1_ctx); - git_SHA1_Init(&new_sha1_ctx); + the_hash_algo->init_fn(&old_hash_ctx); + the_hash_algo->init_fn(&new_hash_ctx); if (lseek(pack_fd, 0, SEEK_SET) != 0) die_errno("Failed seeking to start of '%s'", pack_name); @@ -228,9 +228,9 @@ void fixup_pack_header_footer(int pack_fd, pack_name); if (lseek(pack_fd, 0, SEEK_SET) != 0) die_errno("Failed seeking to start of '%s'", pack_name); - git_SHA1_Update(&old_sha1_ctx, &hdr, sizeof(hdr)); + the_hash_algo->update_fn(&old_hash_ctx, &hdr, sizeof(hdr)); hdr.hdr_entries = htonl(object_count); - git_SHA1_Update(&new_sha1_ctx, &hdr, sizeof(hdr)); + the_hash_algo->update_fn(&new_hash_ctx, &hdr, sizeof(hdr)); write_or_die(pack_fd, &hdr, sizeof(hdr)); partial_pack_offset -= sizeof(hdr); @@ -238,28 +238,28 @@ void fixup_pack_header_footer(int pack_fd, aligned_sz = buf_sz - sizeof(hdr); for (;;) { ssize_t m, n; - m = (partial_pack_sha1 && partial_pack_offset < aligned_sz) ? + m = (partial_pack_hash && partial_pack_offset < aligned_sz) ? partial_pack_offset : aligned_sz; n = xread(pack_fd, buf, m); if (!n) break; if (n < 0) die_errno("Failed to checksum '%s'", pack_name); - git_SHA1_Update(&new_sha1_ctx, buf, n); + the_hash_algo->update_fn(&new_hash_ctx, buf, n); aligned_sz -= n; if (!aligned_sz) aligned_sz = buf_sz; - if (!partial_pack_sha1) + if (!partial_pack_hash) continue; - git_SHA1_Update(&old_sha1_ctx, buf, n); + the_hash_algo->update_fn(&old_hash_ctx, buf, n); partial_pack_offset -= n; if (partial_pack_offset == 0) { - unsigned char sha1[20]; - git_SHA1_Final(sha1, &old_sha1_ctx); - if (hashcmp(sha1, partial_pack_sha1) != 0) + unsigned char hash[GIT_MAX_RAWSZ]; + the_hash_algo->final_fn(hash, &old_hash_ctx); + if (hashcmp(hash, partial_pack_hash) != 0) die("Unexpected checksum for %s " "(disk corruption?)", pack_name); /* @@ -267,23 +267,24 @@ void fixup_pack_header_footer(int pack_fd, * pack, which also means making partial_pack_offset * big enough not to matter anymore. */ - git_SHA1_Init(&old_sha1_ctx); + the_hash_algo->init_fn(&old_hash_ctx); partial_pack_offset = ~partial_pack_offset; partial_pack_offset -= MSB(partial_pack_offset, 1); } } free(buf); - if (partial_pack_sha1) - git_SHA1_Final(partial_pack_sha1, &old_sha1_ctx); - git_SHA1_Final(new_pack_sha1, &new_sha1_ctx); - write_or_die(pack_fd, new_pack_sha1, 20); + if (partial_pack_hash) + the_hash_algo->final_fn(partial_pack_hash, &old_hash_ctx); + the_hash_algo->final_fn(new_pack_hash, &new_hash_ctx); + write_or_die(pack_fd, new_pack_hash, the_hash_algo->rawsz); fsync_or_die(pack_fd, pack_name); } char *index_pack_lockfile(int ip_out) { - char packname[46]; + char packname[GIT_MAX_HEXSZ + 6]; + const int len = the_hash_algo->hexsz + 6; /* * The first thing we expect from index-pack's output @@ -292,9 +293,9 @@ char *index_pack_lockfile(int ip_out) * case, we need it to remove the corresponding .keep file * later on. If we don't get that then tough luck with it. */ - if (read_in_full(ip_out, packname, 46) == 46 && packname[45] == '\n') { + if (read_in_full(ip_out, packname, len) == len && packname[len-1] == '\n') { const char *name; - packname[45] = 0; + packname[len-1] = 0; if (skip_prefix(packname, "keep\t", &name)) return xstrfmt("%s/pack/pack-%s.keep", get_object_directory(), name); @@ -332,14 +333,14 @@ int encode_in_pack_object_header(unsigned char *hdr, int hdr_len, return n; } -struct sha1file *create_tmp_packfile(char **pack_tmp_name) +struct hashfile *create_tmp_packfile(char **pack_tmp_name) { struct strbuf tmpname = STRBUF_INIT; int fd; fd = odb_mkstemp(&tmpname, "pack/tmp_pack_XXXXXX"); *pack_tmp_name = strbuf_detach(&tmpname, NULL); - return sha1fd(fd, *pack_tmp_name); + return hashfd(fd, *pack_tmp_name); } void finish_tmp_packfile(struct strbuf *name_buffer, @@ -81,7 +81,7 @@ extern const char *write_idx_file(const char *index_name, struct pack_idx_entry extern int check_pack_crc(struct packed_git *p, struct pack_window **w_curs, off_t offset, off_t len, unsigned int nr); extern int verify_pack_index(struct packed_git *); extern int verify_pack(struct packed_git *, verify_fn fn, struct progress *, uint32_t); -extern off_t write_pack_header(struct sha1file *f, uint32_t); +extern off_t write_pack_header(struct hashfile *f, uint32_t); extern void fixup_pack_header_footer(int, unsigned char *, const char *, uint32_t, unsigned char *, off_t); extern char *index_pack_lockfile(int fd); @@ -98,7 +98,7 @@ extern int encode_in_pack_object_header(unsigned char *hdr, int hdr_len, #define PH_ERROR_PROTOCOL (-3) extern int read_pack_header(int fd, struct pack_header *); -extern struct sha1file *create_tmp_packfile(char **pack_tmp_name); +extern struct hashfile *create_tmp_packfile(char **pack_tmp_name); extern void finish_tmp_packfile(struct strbuf *name_buffer, const char *pack_tmp_name, struct pack_idx_entry **written_list, uint32_t nr_written, struct pack_idx_option *pack_idx_opts, unsigned char sha1[]); #endif diff --git a/packfile.c b/packfile.c index 4a5fe7ab18..5d07f330c8 100644 --- a/packfile.c +++ b/packfile.c @@ -1,5 +1,5 @@ #include "cache.h" -#include "mru.h" +#include "list.h" #include "pack.h" #include "dir.h" #include "mergesort.h" @@ -8,6 +8,11 @@ #include "list.h" #include "streaming.h" #include "sha1-lookup.h" +#include "commit.h" +#include "object.h" +#include "tag.h" +#include "tree-walk.h" +#include "tree.h" char *odb_pack_name(struct strbuf *buf, const unsigned char *sha1, @@ -40,7 +45,7 @@ static unsigned int pack_max_fds; static size_t peak_pack_mapped; static size_t pack_mapped; struct packed_git *packed_git; -struct mru packed_git_mru; +LIST_HEAD(packed_git_mru); #define SZ_FMT PRIuMAX static inline uintmax_t sz_fmt(size_t s) { return s; } @@ -643,10 +648,10 @@ struct packed_git *add_packed_git(const char *path, size_t path_len, int local) return NULL; /* - * ".pack" is long enough to hold any suffix we're adding (and + * ".promisor" is long enough to hold any suffix we're adding (and * the use xsnprintf double-checks that) */ - alloc = st_add3(path_len, strlen(".pack"), 1); + alloc = st_add3(path_len, strlen(".promisor"), 1); p = alloc_packed_git(alloc); memcpy(p->pack_name, path, path_len); @@ -654,6 +659,10 @@ struct packed_git *add_packed_git(const char *path, size_t path_len, int local) if (!access(p->pack_name, F_OK)) p->pack_keep = 1; + xsnprintf(p->pack_name + path_len, alloc - path_len, ".promisor"); + if (!access(p->pack_name, F_OK)) + p->pack_promisor = 1; + xsnprintf(p->pack_name + path_len, alloc - path_len, ".pack"); if (stat(p->pack_name, &st) || !S_ISREG(st.st_mode)) { free(p); @@ -781,7 +790,8 @@ static void prepare_packed_git_one(char *objdir, int local) if (ends_with(de->d_name, ".idx") || ends_with(de->d_name, ".pack") || ends_with(de->d_name, ".bitmap") || - ends_with(de->d_name, ".keep")) + ends_with(de->d_name, ".keep") || + ends_with(de->d_name, ".promisor")) string_list_append(&garbage, path.buf); else report_garbage(PACKDIR_FILE_GARBAGE, path.buf); @@ -866,9 +876,10 @@ static void prepare_packed_git_mru(void) { struct packed_git *p; - mru_clear(&packed_git_mru); + INIT_LIST_HEAD(&packed_git_mru); + for (p = packed_git; p; p = p->next) - mru_append(&packed_git_mru, p); + list_add_tail(&p->mru, &packed_git_mru); } static int prepare_packed_git_run_once = 0; @@ -1702,8 +1713,7 @@ off_t nth_packed_object_offset(const struct packed_git *p, uint32_t n) return off; index += p->num_objects * 4 + (off & 0x7fffffff) * 8; check_pack_index_ptr(p, index); - return (((uint64_t)ntohl(*((uint32_t *)(index + 0)))) << 32) | - ntohl(*((uint32_t *)(index + 4))); + return get_be64(index); } } @@ -1712,11 +1722,8 @@ off_t find_pack_entry_one(const unsigned char *sha1, { const uint32_t *level1_ofs = p->index_data; const unsigned char *index = p->index_data; - unsigned hi, lo, stride; - static int debug_lookup = -1; - - if (debug_lookup < 0) - debug_lookup = !!getenv("GIT_DEBUG_LOOKUP"); + unsigned stride; + uint32_t result; if (!index) { if (open_pack_index(p)) @@ -1729,8 +1736,6 @@ off_t find_pack_entry_one(const unsigned char *sha1, index += 8; } index += 4 * 256; - hi = ntohl(level1_ofs[*sha1]); - lo = ((*sha1 == 0x0) ? 0 : ntohl(level1_ofs[*sha1 - 1])); if (p->index_version > 1) { stride = 20; } else { @@ -1738,24 +1743,8 @@ off_t find_pack_entry_one(const unsigned char *sha1, index += 4; } - if (debug_lookup) - printf("%02x%02x%02x... lo %u hi %u nr %"PRIu32"\n", - sha1[0], sha1[1], sha1[2], lo, hi, p->num_objects); - - while (lo < hi) { - unsigned mi = lo + (hi - lo) / 2; - int cmp = hashcmp(index + mi * stride, sha1); - - if (debug_lookup) - printf("lo %u hi %u rg %u mi %u\n", - lo, hi, hi - lo, mi); - if (!cmp) - return nth_packed_object_offset(p, mi); - if (cmp > 0) - hi = mi; - else - lo = mi+1; - } + if (bsearch_hash(sha1, level1_ofs, index, stride, &result)) + return nth_packed_object_offset(p, result); return 0; } @@ -1831,15 +1820,16 @@ static int fill_pack_entry(const unsigned char *sha1, */ int find_pack_entry(const unsigned char *sha1, struct pack_entry *e) { - struct mru_entry *p; + struct list_head *pos; prepare_packed_git(); if (!packed_git) return 0; - for (p = packed_git_mru.head; p; p = p->next) { - if (fill_pack_entry(sha1, e, p->item)) { - mru_mark(&packed_git_mru, p); + list_for_each(pos, &packed_git_mru) { + struct packed_git *p = list_entry(pos, struct packed_git, mru); + if (fill_pack_entry(sha1, e, p)) { + list_move(&p->mru, &packed_git_mru); return 1; } } @@ -1889,6 +1879,9 @@ int for_each_packed_object(each_packed_object_fn cb, void *data, unsigned flags) for (p = packed_git; p; p = p->next) { if ((flags & FOR_EACH_OBJECT_LOCAL_ONLY) && !p->pack_local) continue; + if ((flags & FOR_EACH_OBJECT_PROMISOR_ONLY) && + !p->pack_promisor) + continue; if (open_pack_index(p)) { pack_errors = 1; continue; @@ -1899,3 +1892,61 @@ int for_each_packed_object(each_packed_object_fn cb, void *data, unsigned flags) } return r ? r : pack_errors; } + +static int add_promisor_object(const struct object_id *oid, + struct packed_git *pack, + uint32_t pos, + void *set_) +{ + struct oidset *set = set_; + struct object *obj = parse_object(oid); + if (!obj) + return 1; + + oidset_insert(set, oid); + + /* + * If this is a tree, commit, or tag, the objects it refers + * to are also promisor objects. (Blobs refer to no objects.) + */ + if (obj->type == OBJ_TREE) { + struct tree *tree = (struct tree *)obj; + struct tree_desc desc; + struct name_entry entry; + if (init_tree_desc_gently(&desc, tree->buffer, tree->size)) + /* + * Error messages are given when packs are + * verified, so do not print any here. + */ + return 0; + while (tree_entry_gently(&desc, &entry)) + oidset_insert(set, entry.oid); + } else if (obj->type == OBJ_COMMIT) { + struct commit *commit = (struct commit *) obj; + struct commit_list *parents = commit->parents; + + oidset_insert(set, &commit->tree->object.oid); + for (; parents; parents = parents->next) + oidset_insert(set, &parents->item->object.oid); + } else if (obj->type == OBJ_TAG) { + struct tag *tag = (struct tag *) obj; + oidset_insert(set, &tag->tagged->oid); + } + return 0; +} + +int is_promisor_object(const struct object_id *oid) +{ + static struct oidset promisor_objects; + static int promisor_objects_prepared; + + if (!promisor_objects_prepared) { + if (repository_format_partial_clone) { + for_each_packed_object(add_promisor_object, + &promisor_objects, + FOR_EACH_OBJECT_PROMISOR_ONLY); + } + promisor_objects_prepared = 1; + } + return oidset_contains(&promisor_objects, oid); +} diff --git a/packfile.h b/packfile.h index 0cdeb54dcd..a7fca598d6 100644 --- a/packfile.h +++ b/packfile.h @@ -1,6 +1,8 @@ #ifndef PACKFILE_H #define PACKFILE_H +#include "oidset.h" + /* * Generate the filename to be used for a pack file with checksum "sha1" and * extension "ext". The result is written into the strbuf "buf", overwriting @@ -125,6 +127,11 @@ extern int has_sha1_pack(const unsigned char *sha1); extern int has_pack_index(const unsigned char *sha1); /* + * Only iterate over packs obtained from the promisor remote. + */ +#define FOR_EACH_OBJECT_PROMISOR_ONLY 2 + +/* * Iterate over packed objects in both the local * repository and any alternates repositories (unless the * FOR_EACH_OBJECT_LOCAL_ONLY flag, defined in cache.h, is set). @@ -135,4 +142,10 @@ typedef int each_packed_object_fn(const struct object_id *oid, void *data); extern int for_each_packed_object(each_packed_object_fn, void *, unsigned flags); +/* + * Return 1 if an object in a promisor packfile is or refers to the given + * object, 0 otherwise. + */ +extern int is_promisor_object(const struct object_id *oid); + #endif diff --git a/parse-options.c b/parse-options.c index fca7159646..d02eb8b015 100644 --- a/parse-options.c +++ b/parse-options.c @@ -525,7 +525,7 @@ unknown: int parse_options_end(struct parse_opt_ctx_t *ctx) { - memmove(ctx->out + ctx->cpidx, ctx->argv, ctx->argc * sizeof(*ctx->out)); + MOVE_ARRAY(ctx->out + ctx->cpidx, ctx->argv, ctx->argc); ctx->out[ctx->cpidx + ctx->argc] = NULL; return ctx->cpidx + ctx->argc; } diff --git a/perl/.gitignore b/perl/.gitignore index 0f1fc27f86..84c048a73c 100644 --- a/perl/.gitignore +++ b/perl/.gitignore @@ -1,8 +1 @@ -perl.mak -perl.mak.old -MYMETA.json -MYMETA.yml -blib -blibdirs -pm_to_blib -PM.stamp +/build/ diff --git a/perl/Git.pm b/perl/Git.pm index 65e6b32a0f..9d60d7948b 100644 --- a/perl/Git.pm +++ b/perl/Git.pm @@ -101,7 +101,7 @@ increase notwithstanding). use Carp qw(carp croak); # but croak is bad - throw instead -use Error qw(:try); +use Git::Error qw(:try); use Cwd qw(abs_path cwd); use IPC::Open2 qw(open2); use Fcntl qw(SEEK_SET SEEK_CUR); diff --git a/perl/Git/Error.pm b/perl/Git/Error.pm new file mode 100644 index 0000000000..09bbc97390 --- /dev/null +++ b/perl/Git/Error.pm @@ -0,0 +1,46 @@ +package Git::Error; +use 5.008; +use strict; +use warnings; + +=head1 NAME + +Git::Error - Wrapper for the L<Error> module, in case it's not installed + +=head1 DESCRIPTION + +Wraps the import function for the L<Error> module. + +This module is only intended to be used for code shipping in the +C<git.git> repository. Use it for anything else at your peril! + +=cut + +sub import { + shift; + my $caller = caller; + + eval { + require Error; + 1; + } or do { + my $error = $@ || "Zombie Error"; + + my $Git_Error_pm_path = $INC{"Git/Error.pm"} || die "BUG: Should have our own path from %INC!"; + + require File::Basename; + my $Git_Error_pm_root = File::Basename::dirname($Git_Error_pm_path) || die "BUG: Can't figure out lib/Git dirname from '$Git_Error_pm_path'!"; + + require File::Spec; + my $Git_pm_FromCPAN_root = File::Spec->catdir($Git_Error_pm_root, 'FromCPAN'); + die "BUG: '$Git_pm_FromCPAN_root' should be a directory!" unless -d $Git_pm_FromCPAN_root; + + local @INC = ($Git_pm_FromCPAN_root, @INC); + require Error; + }; + + unshift @_, $caller; + goto &Error::import; +} + +1; diff --git a/perl/private-Error.pm b/perl/Git/FromCPAN/Error.pm index 6098135ae2..6098135ae2 100644 --- a/perl/private-Error.pm +++ b/perl/Git/FromCPAN/Error.pm diff --git a/perl/Git/I18N.pm b/perl/Git/I18N.pm index 836a5c2382..dba96fff0a 100644 --- a/perl/Git/I18N.pm +++ b/perl/Git/I18N.pm @@ -18,7 +18,7 @@ our @EXPORT_OK = @EXPORT; sub __bootstrap_locale_messages { our $TEXTDOMAIN = 'git'; - our $TEXTDOMAINDIR = $ENV{GIT_TEXTDOMAINDIR} || '++LOCALEDIR++'; + our $TEXTDOMAINDIR = $ENV{GIT_TEXTDOMAINDIR} || '@@LOCALEDIR@@'; require POSIX; POSIX->import(qw(setlocale)); diff --git a/perl/Makefile b/perl/Makefile deleted file mode 100644 index f657de20e3..0000000000 --- a/perl/Makefile +++ /dev/null @@ -1,90 +0,0 @@ -# -# Makefile for perl support modules and routine -# -makfile:=perl.mak -modules = - -PERL_PATH_SQ = $(subst ','\'',$(PERL_PATH)) -prefix_SQ = $(subst ','\'',$(prefix)) -localedir_SQ = $(subst ','\'',$(localedir)) - -ifndef V - QUIET = @ -endif - -all install instlibdir: $(makfile) - $(QUIET)$(MAKE) -f $(makfile) $@ - -clean: - $(QUIET)test -f $(makfile) && $(MAKE) -f $(makfile) $@ || exit 0 - $(RM) ppport.h - $(RM) $(makfile) - $(RM) $(makfile).old - $(RM) PM.stamp - -$(makfile): PM.stamp - -ifdef NO_PERL_MAKEMAKER -instdir_SQ = $(subst ','\'',$(prefix)/lib) - -modules += Git -modules += Git/I18N -modules += Git/IndexInfo -modules += Git/Packet -modules += Git/SVN -modules += Git/SVN/Memoize/YAML -modules += Git/SVN/Fetcher -modules += Git/SVN/Editor -modules += Git/SVN/GlobSpec -modules += Git/SVN/Log -modules += Git/SVN/Migration -modules += Git/SVN/Prompt -modules += Git/SVN/Ra -modules += Git/SVN/Utils - -$(makfile): ../GIT-CFLAGS Makefile - echo all: private-Error.pm Git.pm Git/I18N.pm > $@ - set -e; \ - for i in $(modules); \ - do \ - if test $$i = $${i%/*}; \ - then \ - subdir=; \ - else \ - subdir=/$${i%/*}; \ - fi; \ - echo ' $(RM) blib/lib/'$$i'.pm' >> $@; \ - echo ' mkdir -p blib/lib'$$subdir >> $@; \ - echo ' cp '$$i'.pm blib/lib/'$$i'.pm' >> $@; \ - done - echo ' $(RM) blib/lib/Error.pm' >> $@ - '$(PERL_PATH_SQ)' -MError -e 'exit($$Error::VERSION < 0.15009)' || \ - echo ' cp private-Error.pm blib/lib/Error.pm' >> $@ - echo install: >> $@ - set -e; \ - for i in $(modules); \ - do \ - if test $$i = $${i%/*}; \ - then \ - subdir=; \ - else \ - subdir=/$${i%/*}; \ - fi; \ - echo ' $(RM) "$$(DESTDIR)$(instdir_SQ)/'$$i'.pm"' >> $@; \ - echo ' mkdir -p "$$(DESTDIR)$(instdir_SQ)'$$subdir'"' >> $@; \ - echo ' cp '$$i'.pm "$$(DESTDIR)$(instdir_SQ)/'$$i'.pm"' >> $@; \ - done - echo ' $(RM) "$$(DESTDIR)$(instdir_SQ)/Error.pm"' >> $@ - '$(PERL_PATH_SQ)' -MError -e 'exit($$Error::VERSION < 0.15009)' || \ - echo ' cp private-Error.pm "$$(DESTDIR)$(instdir_SQ)/Error.pm"' >> $@ - echo instlibdir: >> $@ - echo ' echo $(instdir_SQ)' >> $@ -else -$(makfile): Makefile.PL ../GIT-CFLAGS - $(PERL_PATH) $< PREFIX='$(prefix_SQ)' INSTALL_BASE='' --localedir='$(localedir_SQ)' -endif - -# this is just added comfort for calling make directly in perl dir -# (even though GIT-CFLAGS aren't used yet. If ever) -../GIT-CFLAGS: - $(MAKE) -C .. GIT-CFLAGS diff --git a/perl/Makefile.PL b/perl/Makefile.PL deleted file mode 100644 index 3f29ba98a6..0000000000 --- a/perl/Makefile.PL +++ /dev/null @@ -1,62 +0,0 @@ -use strict; -use warnings; -use ExtUtils::MakeMaker; -use Getopt::Long; -use File::Find; - -# Don't forget to update the perl/Makefile, too. -# Don't forget to test with NO_PERL_MAKEMAKER=YesPlease - -# Sanity: die at first unknown option -Getopt::Long::Configure qw/ pass_through /; - -my $localedir = ''; -GetOptions("localedir=s" => \$localedir); - -sub MY::postamble { - return <<'MAKE_FRAG'; -instlibdir: - @echo '$(INSTALLSITELIB)' - -ifneq (,$(DESTDIR)) -ifeq (0,$(shell expr '$(MM_VERSION)' '>' 6.10)) -$(error ExtUtils::MakeMaker version "$(MM_VERSION)" is older than 6.11 and so \ - is likely incompatible with the DESTDIR mechanism. Try setting \ - NO_PERL_MAKEMAKER=1 instead) -endif -endif - -MAKE_FRAG -} - -# Find all the .pm files in "Git/" and Git.pm -my %pm; -find sub { - return unless /\.pm$/; - - # sometimes File::Find prepends a ./ Strip it. - my $pm_path = $File::Find::name; - $pm_path =~ s{^\./}{}; - - $pm{$pm_path} = '$(INST_LIBDIR)/'.$pm_path; -}, "Git", "Git.pm"; - - -# We come with our own bundled Error.pm. It's not in the set of default -# Perl modules so install it if it's not available on the system yet. -if ( !eval { require Error } || $Error::VERSION < 0.15009) { - $pm{'private-Error.pm'} = '$(INST_LIBDIR)/Error.pm'; -} - -# redirect stdout, otherwise the message "Writing perl.mak for Git" -# disrupts the output for the target 'instlibdir' -open STDOUT, ">&STDERR"; - -WriteMakefile( - NAME => 'Git', - VERSION_FROM => 'Git.pm', - PM => \%pm, - PM_FILTER => qq[\$(PERL) -pe "s<\\Q++LOCALEDIR++\\E><$localedir>"], - MAKEFILE => 'perl.mak', - INSTALLSITEMAN3DIR => '$(SITEPREFIX)/share/man/man3' -); diff --git a/preload-index.c b/preload-index.c index 2a83255e4e..4d08d44874 100644 --- a/preload-index.c +++ b/preload-index.c @@ -78,6 +78,7 @@ static void preload_index(struct index_state *index, { int threads, i, work, offset; struct thread_data data[MAX_PARALLEL]; + uint64_t start = getnanotime(); if (!core_preload_index) return; @@ -108,6 +109,7 @@ static void preload_index(struct index_state *index, if (pthread_join(p->pthread, NULL)) die("unable to join threaded lstat"); } + trace_performance_since(start, "preload index"); } #endif @@ -43,6 +43,22 @@ void sq_quote_buf(struct strbuf *dst, const char *src) free(to_free); } +void sq_quote_buf_pretty(struct strbuf *dst, const char *src) +{ + static const char ok_punct[] = "+,-./:=@_^"; + const char *p; + + for (p = src; *p; p++) { + if (!isalpha(*p) && !isdigit(*p) && !strchr(ok_punct, *p)) { + sq_quote_buf(dst, src); + return; + } + } + + /* if we get here, we did not need quoting */ + strbuf_addstr(dst, src); +} + void sq_quotef(struct strbuf *dst, const char *fmt, ...) { struct strbuf src = STRBUF_INIT; @@ -56,7 +72,7 @@ void sq_quotef(struct strbuf *dst, const char *fmt, ...) strbuf_release(&src); } -void sq_quote_argv(struct strbuf *dst, const char** argv, size_t maxlen) +void sq_quote_argv(struct strbuf *dst, const char **argv) { int i; @@ -65,8 +81,16 @@ void sq_quote_argv(struct strbuf *dst, const char** argv, size_t maxlen) for (i = 0; argv[i]; ++i) { strbuf_addch(dst, ' '); sq_quote_buf(dst, argv[i]); - if (maxlen && dst->len > maxlen) - die("Too many or long arguments"); + } +} + +void sq_quote_argv_pretty(struct strbuf *dst, const char **argv) +{ + int i; + + for (i = 0; argv[i]; i++) { + strbuf_addch(dst, ' '); + sq_quote_buf_pretty(dst, argv[i]); } } @@ -94,9 +118,15 @@ static char *sq_dequote_step(char *arg, char **next) *next = NULL; return arg; case '\\': - c = *++src; - if (need_bs_quote(c) && *++src == '\'') { - *dst++ = c; + /* + * Allow backslashed characters outside of + * single-quotes only if they need escaping, + * and only if we resume the single-quoted part + * afterward. + */ + if (need_bs_quote(src[1]) && src[2] == '\'') { + *dst++ = src[1]; + src += 2; continue; } /* Fallthrough */ @@ -30,9 +30,17 @@ struct strbuf; */ extern void sq_quote_buf(struct strbuf *, const char *src); -extern void sq_quote_argv(struct strbuf *, const char **argv, size_t maxlen); +extern void sq_quote_argv(struct strbuf *, const char **argv); extern void sq_quotef(struct strbuf *, const char *fmt, ...); +/* + * These match their non-pretty variants, except that they avoid + * quoting when there are no exotic characters. These should only be used for + * human-readable output, as sq_dequote() is not smart enough to dequote it. + */ +void sq_quote_buf_pretty(struct strbuf *, const char *src); +void sq_quote_argv_pretty(struct strbuf *, const char **argv); + /* This unwraps what sq_quote() produces in place, but returns * NULL if the input does not look like what sq_quote would have * produced. diff --git a/read-cache.c b/read-cache.c index 2eb81a66b9..28bf0db9d9 100644 --- a/read-cache.c +++ b/read-cache.c @@ -631,10 +631,10 @@ static struct cache_entry *create_alias_ce(struct index_state *istate, void set_object_name_for_intent_to_add_entry(struct cache_entry *ce) { - unsigned char sha1[20]; - if (write_sha1_file("", 0, blob_type, sha1)) + struct object_id oid; + if (write_object_file("", 0, blob_type, &oid)) die("cannot create an empty blob in the object database"); - hashcpy(ce->oid.hash, sha1); + oidcpy(&ce->oid, &oid); } int add_to_index(struct index_state *istate, const char *path, struct stat *st, int flags) @@ -1217,9 +1217,8 @@ int add_index_entry(struct index_state *istate, struct cache_entry *ce, int opti /* Add it in.. */ istate->cache_nr++; if (istate->cache_nr > pos + 1) - memmove(istate->cache + pos + 1, - istate->cache + pos, - (istate->cache_nr - pos - 1) * sizeof(ce)); + MOVE_ARRAY(istate->cache + pos + 1, istate->cache + pos, + istate->cache_nr - pos - 1); set_index_entry(istate, pos, ce); istate->cache_changed |= CE_ENTRY_ADDED; return 0; @@ -1372,6 +1371,7 @@ int refresh_index(struct index_state *istate, unsigned int flags, const char *typechange_fmt; const char *added_fmt; const char *unmerged_fmt; + uint64_t start = getnanotime(); modified_fmt = (in_porcelain ? "M\t%s\n" : "%s: needs update\n"); deleted_fmt = (in_porcelain ? "D\t%s\n" : "%s: needs update\n"); @@ -1442,6 +1442,7 @@ int refresh_index(struct index_state *istate, unsigned int flags, replace_index_entry(istate, i, new); } + trace_performance_since(start, "refresh index"); return has_errors; } @@ -1545,8 +1546,8 @@ int verify_ce_order; static int verify_hdr(struct cache_header *hdr, unsigned long size) { - git_SHA_CTX c; - unsigned char sha1[20]; + git_hash_ctx c; + unsigned char hash[GIT_MAX_RAWSZ]; int hdr_version; if (hdr->hdr_signature != htonl(CACHE_SIGNATURE)) @@ -1558,10 +1559,10 @@ static int verify_hdr(struct cache_header *hdr, unsigned long size) if (!verify_index_checksum) return 0; - git_SHA1_Init(&c); - git_SHA1_Update(&c, hdr, size - 20); - git_SHA1_Final(sha1, &c); - if (hashcmp(sha1, (unsigned char *)hdr + size - 20)) + the_hash_algo->init_fn(&c); + the_hash_algo->update_fn(&c, hdr, size - the_hash_algo->rawsz); + the_hash_algo->final_fn(hash, &c); + if (hashcmp(hash, (unsigned char *)hdr + size - the_hash_algo->rawsz)) return error("bad index file sha1 signature"); return 0; } @@ -1603,7 +1604,7 @@ int hold_locked_index(struct lock_file *lk, int lock_flags) int read_index(struct index_state *istate) { - return read_index_from(istate, get_index_file()); + return read_index_from(istate, get_index_file(), get_git_dir()); } static struct cache_entry *cache_entry_from_ondisk(struct ondisk_cache_entry *ondisk, @@ -1791,7 +1792,7 @@ int do_read_index(struct index_state *istate, const char *path, int must_exist) die_errno("cannot stat the open index"); mmap_size = xsize_t(st.st_size); - if (mmap_size < sizeof(struct cache_header) + 20) + if (mmap_size < sizeof(struct cache_header) + the_hash_algo->rawsz) die("index file smaller than expected"); mmap = xmmap(NULL, mmap_size, PROT_READ, MAP_PRIVATE, fd, 0); @@ -1803,7 +1804,7 @@ int do_read_index(struct index_state *istate, const char *path, int must_exist) if (verify_hdr(hdr, mmap_size) < 0) goto unmap; - hashcpy(istate->sha1, (const unsigned char *)hdr + mmap_size - 20); + hashcpy(istate->sha1, (const unsigned char *)hdr + mmap_size - the_hash_algo->rawsz); istate->version = ntohl(hdr->hdr_version); istate->cache_nr = ntohl(hdr->hdr_entries); istate->cache_alloc = alloc_nr(istate->cache_nr); @@ -1831,7 +1832,7 @@ int do_read_index(struct index_state *istate, const char *path, int must_exist) istate->timestamp.sec = st.st_mtime; istate->timestamp.nsec = ST_MTIME_NSEC(st); - while (src_offset <= mmap_size - 20 - 8) { + while (src_offset <= mmap_size - the_hash_algo->rawsz - 8) { /* After an array of active_nr index entries, * there can be arbitrary number of extended * sections, each of which is prefixed with @@ -1863,26 +1864,27 @@ unmap: * This way, shared index can be removed if they have not been used * for some time. */ -static void freshen_shared_index(char *base_sha1_hex, int warn) +static void freshen_shared_index(const char *shared_index, int warn) { - char *shared_index = git_pathdup("sharedindex.%s", base_sha1_hex); if (!check_and_freshen_file(shared_index, 1) && warn) warning("could not freshen shared index '%s'", shared_index); - free(shared_index); } -int read_index_from(struct index_state *istate, const char *path) +int read_index_from(struct index_state *istate, const char *path, + const char *gitdir) { + uint64_t start = getnanotime(); struct split_index *split_index; int ret; char *base_sha1_hex; - const char *base_path; + char *base_path; /* istate->initialized covers both .git/index and .git/sharedindex.xxx */ if (istate->initialized) return istate->cache_nr; ret = do_read_index(istate, path, 0); + trace_performance_since(start, "read cache %s", path); split_index = istate->split_index; if (!split_index || is_null_sha1(split_index->base_sha1)) { @@ -1896,16 +1898,18 @@ int read_index_from(struct index_state *istate, const char *path) split_index->base = xcalloc(1, sizeof(*split_index->base)); base_sha1_hex = sha1_to_hex(split_index->base_sha1); - base_path = git_path("sharedindex.%s", base_sha1_hex); + base_path = xstrfmt("%s/sharedindex.%s", gitdir, base_sha1_hex); ret = do_read_index(split_index->base, base_path, 1); if (hashcmp(split_index->base_sha1, split_index->base->sha1)) die("broken index, expect %s in %s, got %s", base_sha1_hex, base_path, sha1_to_hex(split_index->base->sha1)); - freshen_shared_index(base_sha1_hex, 0); + freshen_shared_index(base_path, 0); merge_base_index(istate); post_read_index_from(istate); + trace_performance_since(start, "read cache %s", base_path); + free(base_path); return ret; } @@ -1957,11 +1961,11 @@ int unmerged_index(const struct index_state *istate) static unsigned char write_buffer[WRITE_BUFFER_SIZE]; static unsigned long write_buffer_len; -static int ce_write_flush(git_SHA_CTX *context, int fd) +static int ce_write_flush(git_hash_ctx *context, int fd) { unsigned int buffered = write_buffer_len; if (buffered) { - git_SHA1_Update(context, write_buffer, buffered); + the_hash_algo->update_fn(context, write_buffer, buffered); if (write_in_full(fd, write_buffer, buffered) < 0) return -1; write_buffer_len = 0; @@ -1969,7 +1973,7 @@ static int ce_write_flush(git_SHA_CTX *context, int fd) return 0; } -static int ce_write(git_SHA_CTX *context, int fd, void *data, unsigned int len) +static int ce_write(git_hash_ctx *context, int fd, void *data, unsigned int len) { while (len) { unsigned int buffered = write_buffer_len; @@ -1991,7 +1995,7 @@ static int ce_write(git_SHA_CTX *context, int fd, void *data, unsigned int len) return 0; } -static int write_index_ext_header(git_SHA_CTX *context, int fd, +static int write_index_ext_header(git_hash_ctx *context, int fd, unsigned int ext, unsigned int sz) { ext = htonl(ext); @@ -2000,26 +2004,26 @@ static int write_index_ext_header(git_SHA_CTX *context, int fd, (ce_write(context, fd, &sz, 4) < 0)) ? -1 : 0; } -static int ce_flush(git_SHA_CTX *context, int fd, unsigned char *sha1) +static int ce_flush(git_hash_ctx *context, int fd, unsigned char *hash) { unsigned int left = write_buffer_len; if (left) { write_buffer_len = 0; - git_SHA1_Update(context, write_buffer, left); + the_hash_algo->update_fn(context, write_buffer, left); } - /* Flush first if not enough space for SHA1 signature */ - if (left + 20 > WRITE_BUFFER_SIZE) { + /* Flush first if not enough space for hash signature */ + if (left + the_hash_algo->rawsz > WRITE_BUFFER_SIZE) { if (write_in_full(fd, write_buffer, left) < 0) return -1; left = 0; } - /* Append the SHA1 signature at the end */ - git_SHA1_Final(write_buffer + left, context); - hashcpy(sha1, write_buffer + left); - left += 20; + /* Append the hash signature at the end */ + the_hash_algo->final_fn(write_buffer + left, context); + hashcpy(hash, write_buffer + left); + left += the_hash_algo->rawsz; return (write_in_full(fd, write_buffer, left) < 0) ? -1 : 0; } @@ -2100,7 +2104,7 @@ static void copy_cache_entry_to_ondisk(struct ondisk_cache_entry *ondisk, } } -static int ce_write_entry(git_SHA_CTX *c, int fd, struct cache_entry *ce, +static int ce_write_entry(git_hash_ctx *c, int fd, struct cache_entry *ce, struct strbuf *previous_name, struct ondisk_cache_entry *ondisk) { int size; @@ -2167,7 +2171,7 @@ static int verify_index_from(const struct index_state *istate, const char *path) int fd; ssize_t n; struct stat st; - unsigned char sha1[20]; + unsigned char hash[GIT_MAX_RAWSZ]; if (!istate->initialized) return 0; @@ -2179,14 +2183,14 @@ static int verify_index_from(const struct index_state *istate, const char *path) if (fstat(fd, &st)) goto out; - if (st.st_size < sizeof(struct cache_header) + 20) + if (st.st_size < sizeof(struct cache_header) + the_hash_algo->rawsz) goto out; - n = pread_in_full(fd, sha1, 20, st.st_size - 20); - if (n != 20) + n = pread_in_full(fd, hash, the_hash_algo->rawsz, st.st_size - the_hash_algo->rawsz); + if (n != the_hash_algo->rawsz) goto out; - if (hashcmp(istate->sha1, sha1)) + if (hashcmp(istate->sha1, hash)) goto out; close(fd); @@ -2234,8 +2238,9 @@ void update_index_if_able(struct index_state *istate, struct lock_file *lockfile static int do_write_index(struct index_state *istate, struct tempfile *tempfile, int strip_extensions) { + uint64_t start = getnanotime(); int newfd = tempfile->fd; - git_SHA_CTX c; + git_hash_ctx c; struct cache_header hdr; int i, err = 0, removed, extended, hdr_version; struct cache_entry **cache = istate->cache; @@ -2243,7 +2248,7 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile, struct stat st; struct ondisk_cache_entry_extended ondisk; struct strbuf previous_name_buf = STRBUF_INIT, *previous_name; - int drop_cache_tree = 0; + int drop_cache_tree = istate->drop_cache_tree; for (i = removed = extended = 0; i < entries; i++) { if (cache[i]->ce_flags & CE_REMOVE) @@ -2273,7 +2278,7 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile, hdr.hdr_version = htonl(hdr_version); hdr.hdr_entries = htonl(entries - removed); - git_SHA1_Init(&c); + the_hash_algo->init_fn(&c); if (ce_write(&c, newfd, &hdr, sizeof(hdr)) < 0) return -1; @@ -2374,6 +2379,7 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile, return -1; istate->timestamp.sec = (unsigned int)st.st_mtime; istate->timestamp.nsec = ST_MTIME_NSEC(st); + trace_performance_since(start, "write index, changed mask = %x", istate->cache_changed); return 0; } @@ -2472,32 +2478,21 @@ static int clean_shared_index_files(const char *current_hex) } static int write_shared_index(struct index_state *istate, - struct lock_file *lock, unsigned flags) + struct tempfile **temp) { - struct tempfile *temp; struct split_index *si = istate->split_index; int ret; - temp = mks_tempfile(git_path("sharedindex_XXXXXX")); - if (!temp) { - hashclr(si->base_sha1); - return do_write_locked_index(istate, lock, flags); - } move_cache_to_base_index(istate); - ret = do_write_index(si->base, temp, 1); - if (ret) { - delete_tempfile(&temp); + ret = do_write_index(si->base, *temp, 1); + if (ret) return ret; - } - ret = adjust_shared_perm(get_tempfile_path(temp)); + ret = adjust_shared_perm(get_tempfile_path(*temp)); if (ret) { - int save_errno = errno; - error("cannot fix permission bits on %s", get_tempfile_path(temp)); - delete_tempfile(&temp); - errno = save_errno; + error("cannot fix permission bits on %s", get_tempfile_path(*temp)); return ret; } - ret = rename_tempfile(&temp, + ret = rename_tempfile(temp, git_path("sharedindex.%s", sha1_to_hex(si->base->sha1))); if (!ret) { hashcpy(si->base_sha1, si->base->sha1); @@ -2565,7 +2560,22 @@ int write_locked_index(struct index_state *istate, struct lock_file *lock, new_shared_index = istate->cache_changed & SPLIT_INDEX_ORDERED; if (new_shared_index) { - ret = write_shared_index(istate, lock, flags); + struct tempfile *temp; + int saved_errno; + + temp = mks_tempfile(git_path("sharedindex_XXXXXX")); + if (!temp) { + hashclr(si->base_sha1); + ret = do_write_locked_index(istate, lock, flags); + goto out; + } + ret = write_shared_index(istate, &temp); + + saved_errno = errno; + if (is_tempfile_active(temp)) + delete_tempfile(&temp); + errno = saved_errno; + if (ret) goto out; } @@ -2573,8 +2583,11 @@ int write_locked_index(struct index_state *istate, struct lock_file *lock, ret = write_split_index(istate, lock, flags); /* Freshen the shared index only if the split-index was written */ - if (!ret && !new_shared_index) - freshen_shared_index(sha1_to_hex(si->base_sha1), 1); + if (!ret && !new_shared_index) { + const char *shared_index = git_path("sharedindex.%s", + sha1_to_hex(si->base_sha1)); + freshen_shared_index(shared_index, 1); + } out: if (flags & COMMIT_LOCK) diff --git a/refs/files-backend.c b/refs/files-backend.c index f75d960e19..bec8e30e9e 100644 --- a/refs/files-backend.c +++ b/refs/files-backend.c @@ -2931,13 +2931,12 @@ static int files_initial_transaction_commit(struct ref_store *ref_store, if (initial_ref_transaction_commit(packed_transaction, err)) { ret = TRANSACTION_GENERIC_ERROR; - goto cleanup; } + packed_refs_unlock(refs->packed_ref_store); cleanup: if (packed_transaction) ref_transaction_free(packed_transaction); - packed_refs_unlock(refs->packed_ref_store); transaction->state = REF_TRANSACTION_CLOSED; string_list_clear(&affected_refnames, 0); return ret; diff --git a/refs/packed-backend.c b/refs/packed-backend.c index 023243fd5f..65288c6472 100644 --- a/refs/packed-backend.c +++ b/refs/packed-backend.c @@ -68,17 +68,21 @@ struct snapshot { int mmapped; /* - * The contents of the `packed-refs` file. If the file was - * already sorted, this points at the mmapped contents of the - * file. If not, this points at heap-allocated memory - * containing the contents, sorted. If there were no contents - * (e.g., because the file didn't exist), `buf` and `eof` are - * both NULL. + * The contents of the `packed-refs` file: + * + * - buf -- a pointer to the start of the memory + * - start -- a pointer to the first byte of actual references + * (i.e., after the header line, if one is present) + * - eof -- a pointer just past the end of the reference + * contents + * + * If the `packed-refs` file was already sorted, `buf` points + * at the mmapped contents of the file. If not, it points at + * heap-allocated memory containing the contents, sorted. If + * there were no contents (e.g., because the file didn't + * exist), `buf`, `start`, and `eof` are all NULL. */ - char *buf, *eof; - - /* The size of the header line, if any; otherwise, 0: */ - size_t header_len; + char *buf, *start, *eof; /* * What is the peeled state of the `packed-refs` file that @@ -169,8 +173,7 @@ static void clear_snapshot_buffer(struct snapshot *snapshot) } else { free(snapshot->buf); } - snapshot->buf = snapshot->eof = NULL; - snapshot->header_len = 0; + snapshot->buf = snapshot->start = snapshot->eof = NULL; } /* @@ -319,13 +322,14 @@ static void sort_snapshot(struct snapshot *snapshot) size_t len, i; char *new_buffer, *dst; - pos = snapshot->buf + snapshot->header_len; + pos = snapshot->start; eof = snapshot->eof; - len = eof - pos; - if (!len) + if (pos == eof) return; + len = eof - pos; + /* * Initialize records based on a crude estimate of the number * of references in the file (we'll grow it below if needed): @@ -391,9 +395,8 @@ static void sort_snapshot(struct snapshot *snapshot) * place: */ clear_snapshot_buffer(snapshot); - snapshot->buf = new_buffer; + snapshot->buf = snapshot->start = new_buffer; snapshot->eof = new_buffer + len; - snapshot->header_len = 0; cleanup: free(records); @@ -442,23 +445,26 @@ static const char *find_end_of_record(const char *p, const char *end) */ static void verify_buffer_safe(struct snapshot *snapshot) { - const char *buf = snapshot->buf + snapshot->header_len; + const char *start = snapshot->start; const char *eof = snapshot->eof; const char *last_line; - if (buf == eof) + if (start == eof) return; - last_line = find_start_of_record(buf, eof - 1); + last_line = find_start_of_record(start, eof - 1); if (*(eof - 1) != '\n' || eof - last_line < GIT_SHA1_HEXSZ + 2) die_invalid_line(snapshot->refs->path, last_line, eof - last_line); } +#define SMALL_FILE_SIZE (32*1024) + /* * Depending on `mmap_strategy`, either mmap or read the contents of * the `packed-refs` file into the snapshot. Return 1 if the file - * existed and was read, or 0 if the file was absent. Die on errors. + * existed and was read, or 0 if the file was absent or empty. Die on + * errors. */ static int load_contents(struct snapshot *snapshot) { @@ -489,24 +495,23 @@ static int load_contents(struct snapshot *snapshot) die_errno("couldn't stat %s", snapshot->refs->path); size = xsize_t(st.st_size); - switch (mmap_strategy) { - case MMAP_NONE: + if (!size) { + return 0; + } else if (mmap_strategy == MMAP_NONE || size <= SMALL_FILE_SIZE) { snapshot->buf = xmalloc(size); bytes_read = read_in_full(fd, snapshot->buf, size); if (bytes_read < 0 || bytes_read != size) die_errno("couldn't read %s", snapshot->refs->path); - snapshot->eof = snapshot->buf + size; snapshot->mmapped = 0; - break; - case MMAP_TEMPORARY: - case MMAP_OK: + } else { snapshot->buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0); - snapshot->eof = snapshot->buf + size; snapshot->mmapped = 1; - break; } close(fd); + snapshot->start = snapshot->buf; + snapshot->eof = snapshot->buf + size; + return 1; } @@ -515,9 +520,11 @@ static int load_contents(struct snapshot *snapshot) * `refname` starts. If `mustexist` is true and the reference doesn't * exist, then return NULL. If `mustexist` is false and the reference * doesn't exist, then return the point where that reference would be - * inserted. In the latter mode, `refname` doesn't have to be a proper - * reference name; for example, one could search for "refs/replace/" - * to find the start of any replace references. + * inserted, or `snapshot->eof` (which might be NULL) if it would be + * inserted at the end of the file. In the latter mode, `refname` + * doesn't have to be a proper reference name; for example, one could + * search for "refs/replace/" to find the start of any replace + * references. * * The record is sought using a binary search, so `snapshot->buf` must * be sorted. @@ -539,7 +546,7 @@ static const char *find_reference_location(struct snapshot *snapshot, * preceding records all have reference names that come * *before* `refname`. */ - const char *lo = snapshot->buf + snapshot->header_len; + const char *lo = snapshot->start; /* * A pointer to a the first character of a record whose @@ -547,7 +554,7 @@ static const char *find_reference_location(struct snapshot *snapshot, */ const char *hi = snapshot->eof; - while (lo < hi) { + while (lo != hi) { const char *mid, *rec; int cmp; @@ -616,9 +623,7 @@ static struct snapshot *create_snapshot(struct packed_ref_store *refs) /* If the file has a header line, process it: */ if (snapshot->buf < snapshot->eof && *snapshot->buf == '#') { - struct strbuf tmp = STRBUF_INIT; - char *p; - const char *eol; + char *tmp, *p, *eol; struct string_list traits = STRING_LIST_INIT_NODUP; eol = memchr(snapshot->buf, '\n', @@ -628,9 +633,9 @@ static struct snapshot *create_snapshot(struct packed_ref_store *refs) snapshot->buf, snapshot->eof - snapshot->buf); - strbuf_add(&tmp, snapshot->buf, eol - snapshot->buf); + tmp = xmemdupz(snapshot->buf, eol - snapshot->buf); - if (!skip_prefix(tmp.buf, "# pack-refs with:", (const char **)&p)) + if (!skip_prefix(tmp, "# pack-refs with:", (const char **)&p)) die_invalid_line(refs->path, snapshot->buf, snapshot->eof - snapshot->buf); @@ -647,10 +652,10 @@ static struct snapshot *create_snapshot(struct packed_ref_store *refs) /* perhaps other traits later as well */ /* The "+ 1" is for the LF character. */ - snapshot->header_len = eol + 1 - snapshot->buf; + snapshot->start = eol + 1; string_list_clear(&traits, 0); - strbuf_release(&tmp); + free(tmp); } verify_buffer_safe(snapshot); @@ -671,13 +676,12 @@ static struct snapshot *create_snapshot(struct packed_ref_store *refs) * We don't want to leave the file mmapped, so we are * forced to make a copy now: */ - size_t size = snapshot->eof - - (snapshot->buf + snapshot->header_len); + size_t size = snapshot->eof - snapshot->start; char *buf_copy = xmalloc(size); - memcpy(buf_copy, snapshot->buf + snapshot->header_len, size); + memcpy(buf_copy, snapshot->start, size); clear_snapshot_buffer(snapshot); - snapshot->buf = buf_copy; + snapshot->buf = snapshot->start = buf_copy; snapshot->eof = buf_copy + size; } @@ -924,7 +928,12 @@ static struct ref_iterator *packed_ref_iterator_begin( */ snapshot = get_snapshot(refs); - if (!snapshot->buf) + if (prefix && *prefix) + start = find_reference_location(snapshot, prefix, 0); + else + start = snapshot->start; + + if (start == snapshot->eof) return empty_ref_iterator_begin(); iter = xcalloc(1, sizeof(*iter)); @@ -934,11 +943,6 @@ static struct ref_iterator *packed_ref_iterator_begin( iter->snapshot = snapshot; acquire_snapshot(snapshot); - if (prefix && *prefix) - start = find_reference_location(snapshot, prefix, 0); - else - start = snapshot->buf + snapshot->header_len; - iter->pos = start; iter->eof = snapshot->eof; strbuf_init(&iter->refname_buf, 0); diff --git a/refs/ref-cache.c b/refs/ref-cache.c index 82c1cf90a7..e90bd3e727 100644 --- a/refs/ref-cache.c +++ b/refs/ref-cache.c @@ -238,10 +238,8 @@ int remove_entry_from_dir(struct ref_dir *dir, const char *refname) return -1; entry = dir->entries[entry_index]; - memmove(&dir->entries[entry_index], - &dir->entries[entry_index + 1], - (dir->nr - entry_index - 1) * sizeof(*dir->entries) - ); + MOVE_ARRAY(&dir->entries[entry_index], + &dir->entries[entry_index + 1], dir->nr - entry_index - 1); dir->nr--; if (dir->sorted > entry_index) dir->sorted--; diff --git a/remote-curl.c b/remote-curl.c index 0053b09549..a7c4c9b5ff 100644 --- a/remote-curl.c +++ b/remote-curl.c @@ -13,6 +13,7 @@ #include "credential.h" #include "sha1-array.h" #include "send-pack.h" +#include "quote.h" static struct remote *remote; /* always ends with a trailing slash */ @@ -24,6 +25,7 @@ struct options { char *deepen_since; struct string_list deepen_not; struct string_list push_options; + char *filter; unsigned progress : 1, check_self_contained_and_connected : 1, cloning : 1, @@ -33,7 +35,9 @@ struct options { thin : 1, /* One of the SEND_PACK_PUSH_CERT_* constants. */ push_cert : 2, - deepen_relative : 1; + deepen_relative : 1, + from_promisor : 1, + no_dependents : 1; }; static struct options options; static struct string_list cas_options = STRING_LIST_INIT_DUP; @@ -142,7 +146,15 @@ static int set_option(const char *name, const char *value) return -1; return 0; } else if (!strcmp(name, "push-option")) { - string_list_append(&options.push_options, value); + if (*value != '"') + string_list_append(&options.push_options, value); + else { + struct strbuf unquoted = STRBUF_INIT; + if (unquote_c_style(&unquoted, value, NULL) < 0) + die("invalid quoting in push-option value"); + string_list_append_nodup(&options.push_options, + strbuf_detach(&unquoted, NULL)); + } return 0; #if LIBCURL_VERSION_NUM >= 0x070a08 @@ -157,6 +169,15 @@ static int set_option(const char *name, const char *value) return -1; return 0; #endif /* LIBCURL_VERSION_NUM >= 0x070a08 */ + } else if (!strcmp(name, "from-promisor")) { + options.from_promisor = 1; + return 0; + } else if (!strcmp(name, "no-dependents")) { + options.no_dependents = 1; + return 0; + } else if (!strcmp(name, "filter")) { + options.filter = xstrdup(value);; + return 0; } else { return 1 /* unsupported */; } @@ -339,6 +360,8 @@ static struct discovery *discover_refs(const char *service, int for_push) * pkt-line matches our request. */ line = packet_read_line_buf(&last->buf, &last->len, NULL); + if (!line) + die("invalid server response; expected service, got flush packet"); strbuf_reset(&exp); strbuf_addf(&exp, "# service=%s", service); @@ -822,6 +845,12 @@ static int fetch_git(struct discovery *heads, options.deepen_not.items[i].string); if (options.deepen_relative && options.depth) argv_array_push(&args, "--deepen-relative"); + if (options.from_promisor) + argv_array_push(&args, "--from-promisor"); + if (options.no_dependents) + argv_array_push(&args, "--no-dependents"); + if (options.filter) + argv_array_pushf(&args, "--filter=%s", options.filter); argv_array_push(&args, url.buf); for (i = 0; i < nr_heads; i++) { @@ -22,6 +22,7 @@ static struct refspec s_tag_refspec = { "refs/tags/*" }; +/* See TAG_REFSPEC for the string version */ const struct refspec *tag_refspec = &s_tag_refspec; struct counted_string { @@ -103,6 +104,17 @@ static void add_fetch_refspec(struct remote *remote, const char *ref) remote->fetch_refspec[remote->fetch_refspec_nr++] = ref; } +void add_prune_tags_to_fetch_refspec(struct remote *remote) +{ + int nr = remote->fetch_refspec_nr; + int bufsize = nr + 1; + int size = sizeof(struct refspec); + + remote->fetch = xrealloc(remote->fetch, size * bufsize); + memcpy(&remote->fetch[nr], tag_refspec, size); + add_fetch_refspec(remote, xstrdup(TAG_REFSPEC)); +} + static void add_url(struct remote *remote, const char *url) { ALLOC_GROW(remote->url, remote->url_nr + 1, remote->url_alloc); @@ -173,6 +185,7 @@ static struct remote *make_remote(const char *name, int len) ret = xcalloc(1, sizeof(struct remote)); ret->prune = -1; /* unspecified */ + ret->prune_tags = -1; /* unspecified */ ALLOC_GROW(remotes, remotes_nr + 1, remotes_alloc); remotes[remotes_nr++] = ret; ret->name = xstrndup(name, len); @@ -391,6 +404,8 @@ static int handle_config(const char *key, const char *value, void *cb) remote->skip_default_update = git_config_bool(key, value); else if (!strcmp(subkey, "prune")) remote->prune = git_config_bool(key, value); + else if (!strcmp(subkey, "prunetags")) + remote->prune_tags = git_config_bool(key, value); else if (!strcmp(subkey, "url")) { const char *v; if (git_config_string(&v, key, value)) @@ -47,6 +47,7 @@ struct remote { int skip_default_update; int mirror; int prune; + int prune_tags; const char *receivepack; const char *uploadpack; @@ -297,4 +298,8 @@ extern int parseopt_push_cas_option(const struct option *, const char *arg, int extern int is_empty_cas(const struct push_cas_option *); void apply_push_cas(struct push_cas_option *, struct remote *, struct ref *); +#define TAG_REFSPEC "refs/tags/*:refs/tags/*" + +void add_prune_tags_to_fetch_refspec(struct remote *remote); + #endif diff --git a/replace_object.c b/replace_object.c index f0b39f06d5..3e49965d05 100644 --- a/replace_object.c +++ b/replace_object.c @@ -44,10 +44,8 @@ static int register_replace_object(struct replace_object *replace, ALLOC_GROW(replace_object, replace_object_nr + 1, replace_object_alloc); replace_object_nr++; if (pos < replace_object_nr) - memmove(replace_object + pos + 1, - replace_object + pos, - (replace_object_nr - pos - 1) * - sizeof(*replace_object)); + MOVE_ARRAY(replace_object + pos + 1, replace_object + pos, + replace_object_nr - pos - 1); replace_object[pos] = replace; return 0; } diff --git a/repository.c b/repository.c index f66fcb1342..4ffbe9bc94 100644 --- a/repository.c +++ b/repository.c @@ -236,5 +236,5 @@ int repo_read_index(struct repository *repo) if (!repo->index) repo->index = xcalloc(1, sizeof(*repo->index)); - return read_index_from(repo->index, repo->index_file); + return read_index_from(repo->index, repo->index_file, repo->gitdir); } @@ -159,8 +159,8 @@ static struct rerere_dir *find_rerere_dir(const char *hex) ALLOC_GROW(rerere_dir, rerere_dir_nr + 1, rerere_dir_alloc); /* ... and add it in. */ rerere_dir_nr++; - memmove(rerere_dir + pos + 1, rerere_dir + pos, - (rerere_dir_nr - pos - 1) * sizeof(*rerere_dir)); + MOVE_ARRAY(rerere_dir + pos + 1, rerere_dir + pos, + rerere_dir_nr - pos - 1); rerere_dir[pos] = rr_dir; scan_rerere_dir(rr_dir); } diff --git a/revision.c b/revision.c index a60628fbff..5ce9b93baa 100644 --- a/revision.c +++ b/revision.c @@ -198,6 +198,8 @@ static struct object *get_reference(struct rev_info *revs, const char *name, if (!object) { if (revs->ignore_missing) return object; + if (revs->exclude_promisor_objects && is_promisor_object(oid)) + return NULL; die("bad object %s", name); } object->flags |= flags; @@ -799,9 +801,17 @@ static int add_parents_to_list(struct rev_info *revs, struct commit *commit, for (parent = commit->parents; parent; parent = parent->next) { struct commit *p = parent->item; - - if (parse_commit_gently(p, revs->ignore_missing_links) < 0) + int gently = revs->ignore_missing_links || + revs->exclude_promisor_objects; + if (parse_commit_gently(p, gently) < 0) { + if (revs->exclude_promisor_objects && + is_promisor_object(&p->object.oid)) { + if (revs->first_parent_only) + break; + continue; + } return -1; + } if (revs->show_source && !p->util) p->util = commit->util; p->object.flags |= left_flag; @@ -1352,7 +1362,8 @@ void add_index_objects_to_pending(struct rev_info *revs, unsigned int flags) continue; /* current index already taken care of */ if (read_index_from(&istate, - worktree_git_path(wt, "index")) > 0) + worktree_git_path(wt, "index"), + get_worktree_git_dir(wt)) > 0) do_add_index_objects_to_pending(revs, &istate); discard_index(&istate); } @@ -2100,6 +2111,10 @@ static int handle_revision_opt(struct rev_info *revs, int argc, const char **arg revs->limited = 1; } else if (!strcmp(arg, "--ignore-missing")) { revs->ignore_missing = 1; + } else if (!strcmp(arg, "--exclude-promisor-objects")) { + if (fetch_if_missing) + die("BUG: exclude_promisor_objects can only be used when fetch_if_missing is 0"); + revs->exclude_promisor_objects = 1; } else { int opts = diff_opt_parse(&revs->diffopt, argv, argc, revs->prefix); if (!opts) @@ -2845,6 +2860,16 @@ void reset_revision_walk(void) clear_object_flags(SEEN | ADDED | SHOWN); } +static int mark_uninteresting(const struct object_id *oid, + struct packed_git *pack, + uint32_t pos, + void *unused) +{ + struct object *o = parse_object(oid); + o->flags |= UNINTERESTING | SEEN; + return 0; +} + int prepare_revision_walk(struct rev_info *revs) { int i; @@ -2872,6 +2897,11 @@ int prepare_revision_walk(struct rev_info *revs) (revs->limited && limiting_can_increase_treesame(revs))) revs->treesame.name = "treesame"; + if (revs->exclude_promisor_objects) { + for_each_packed_object(mark_uninteresting, NULL, + FOR_EACH_OBJECT_PROMISOR_ONLY); + } + if (revs->no_walk != REVISION_WALK_NO_WALK_UNSORTED) commit_list_sort_by_date(&revs->commits); if (revs->no_walk) diff --git a/revision.h b/revision.h index d7a35c8c9e..3dee97bfb9 100644 --- a/revision.h +++ b/revision.h @@ -122,7 +122,10 @@ struct rev_info { ancestry_path:1, first_parent_only:1, line_level_traverse:1, - tree_blobs_in_commit_order:1; + tree_blobs_in_commit_order:1, + + /* for internal use only */ + exclude_promisor_objects:1; /* Diff flags */ unsigned int diff:1, diff --git a/run-command.c b/run-command.c index 31fc5ea86e..a483d5904a 100644 --- a/run-command.c +++ b/run-command.c @@ -6,6 +6,7 @@ #include "thread-utils.h" #include "strbuf.h" #include "string-list.h" +#include "quote.h" void child_process_init(struct child_process *child) { @@ -556,6 +557,90 @@ static int wait_or_whine(pid_t pid, const char *argv0, int in_signal) return code; } +static void trace_add_env(struct strbuf *dst, const char *const *deltaenv) +{ + struct string_list envs = STRING_LIST_INIT_DUP; + const char *const *e; + int i; + int printed_unset = 0; + + /* Last one wins, see run-command.c:prep_childenv() for context */ + for (e = deltaenv; e && *e; e++) { + struct strbuf key = STRBUF_INIT; + char *equals = strchr(*e, '='); + + if (equals) { + strbuf_add(&key, *e, equals - *e); + string_list_insert(&envs, key.buf)->util = equals + 1; + } else { + string_list_insert(&envs, *e)->util = NULL; + } + strbuf_release(&key); + } + + /* "unset X Y...;" */ + for (i = 0; i < envs.nr; i++) { + const char *var = envs.items[i].string; + const char *val = envs.items[i].util; + + if (val || !getenv(var)) + continue; + + if (!printed_unset) { + strbuf_addstr(dst, " unset"); + printed_unset = 1; + } + strbuf_addf(dst, " %s", var); + } + if (printed_unset) + strbuf_addch(dst, ';'); + + /* ... followed by "A=B C=D ..." */ + for (i = 0; i < envs.nr; i++) { + const char *var = envs.items[i].string; + const char *val = envs.items[i].util; + const char *oldval; + + if (!val) + continue; + + oldval = getenv(var); + if (oldval && !strcmp(val, oldval)) + continue; + + strbuf_addf(dst, " %s=", var); + sq_quote_buf_pretty(dst, val); + } + string_list_clear(&envs, 0); +} + +static void trace_run_command(const struct child_process *cp) +{ + struct strbuf buf = STRBUF_INIT; + + if (!trace_want(&trace_default_key)) + return; + + strbuf_addf(&buf, "trace: run_command:"); + if (cp->dir) { + strbuf_addstr(&buf, " cd "); + sq_quote_buf_pretty(&buf, cp->dir); + strbuf_addch(&buf, ';'); + } + /* + * The caller is responsible for initializing cp->env from + * cp->env_array if needed. We only check one place. + */ + if (cp->env) + trace_add_env(&buf, cp->env); + if (cp->git_cmd) + strbuf_addstr(&buf, " git"); + sq_quote_argv_pretty(&buf, cp->argv); + + trace_printf("%s", buf.buf); + strbuf_release(&buf); +} + int start_command(struct child_process *cmd) { int need_in, need_out, need_err; @@ -624,7 +709,8 @@ fail_pipe: cmd->err = fderr[0]; } - trace_argv_printf(cmd->argv, "trace: run_command:"); + trace_run_command(cmd); + fflush(NULL); #ifndef GIT_WINDOWS_NATIVE diff --git a/send-pack.c b/send-pack.c index 2112d3b27a..8d9190f5e7 100644 --- a/send-pack.c +++ b/send-pack.c @@ -137,6 +137,8 @@ static int pack_objects(int fd, struct ref *refs, struct oid_array *extra, struc static int receive_unpack_status(int in) { const char *line = packet_read_line(in, NULL); + if (!line) + return error(_("unexpected flush packet while reading remote unpack status")); if (!skip_prefix(line, "unpack ", &line)) return error(_("unable to parse remote unpack status: %s"), line); if (strcmp(line, "ok")) diff --git a/sequencer.c b/sequencer.c index f692221999..c24ecdb21f 100644 --- a/sequencer.c +++ b/sequencer.c @@ -1,10 +1,10 @@ #include "cache.h" #include "config.h" #include "lockfile.h" -#include "sequencer.h" #include "dir.h" #include "object.h" #include "commit.h" +#include "sequencer.h" #include "tag.h" #include "run-command.h" #include "exec_cmd.h" @@ -21,12 +21,16 @@ #include "log-tree.h" #include "wt-status.h" #include "hashmap.h" +#include "notes-utils.h" +#include "sigchain.h" #define GIT_REFLOG_ACTION "GIT_REFLOG_ACTION" const char sign_off_header[] = "Signed-off-by: "; static const char cherry_picked_prefix[] = "(cherry picked from commit "; +GIT_PATH_FUNC(git_path_commit_editmsg, "COMMIT_EDITMSG") + GIT_PATH_FUNC(git_path_seq_dir, "sequencer") static GIT_PATH_FUNC(git_path_todo_file, "sequencer/todo") @@ -130,6 +134,51 @@ static GIT_PATH_FUNC(rebase_path_strategy, "rebase-merge/strategy") static GIT_PATH_FUNC(rebase_path_strategy_opts, "rebase-merge/strategy_opts") static GIT_PATH_FUNC(rebase_path_allow_rerere_autoupdate, "rebase-merge/allow_rerere_autoupdate") +static int git_sequencer_config(const char *k, const char *v, void *cb) +{ + struct replay_opts *opts = cb; + int status; + + if (!strcmp(k, "commit.cleanup")) { + const char *s; + + status = git_config_string(&s, k, v); + if (status) + return status; + + if (!strcmp(s, "verbatim")) + opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_NONE; + else if (!strcmp(s, "whitespace")) + opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_SPACE; + else if (!strcmp(s, "strip")) + opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_ALL; + else if (!strcmp(s, "scissors")) + opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_SPACE; + else + warning(_("invalid commit message cleanup mode '%s'"), + s); + + return status; + } + + if (!strcmp(k, "commit.gpgsign")) { + opts->gpg_sign = git_config_bool(k, v) ? xstrdup("") : NULL; + return 0; + } + + status = git_gpg_config(k, v, NULL); + if (status) + return status; + + return git_diff_basic_config(k, v, NULL); +} + +void sequencer_init_config(struct replay_opts *opts) +{ + opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_NONE; + git_config(git_sequencer_config, opts); +} + static inline int is_rebase_i(const struct replay_opts *opts) { return opts->action == REPLAY_INTERACTIVE_REBASE; @@ -478,9 +527,6 @@ static int do_recursive_merge(struct commit *base, struct commit *next, _(action_name(opts))); rollback_lock_file(&index_lock); - if (opts->signoff) - append_signoff(msgbuf, 0, 0); - if (!clean) append_conflicts_hint(msgbuf); @@ -596,6 +642,18 @@ static int read_env_script(struct argv_array *env) return 0; } +static char *get_author(const char *message) +{ + size_t len; + const char *a; + + a = find_commit_header(message, "author", &len); + if (a) + return xmemdupz(a, len); + + return NULL; +} + static const char staged_changes_advice[] = N_("you have staged changes in your working tree\n" "If these changes are meant to be squashed into the previous commit, run:\n" @@ -658,8 +716,6 @@ static int run_git_commit(const char *defmsg, struct replay_opts *opts, argv_array_push(&cmd.args, "--amend"); if (opts->gpg_sign) argv_array_pushf(&cmd.args, "-S%s", opts->gpg_sign); - if (opts->signoff) - argv_array_push(&cmd.args, "-s"); if (defmsg) argv_array_pushl(&cmd.args, "-F", defmsg, NULL); if ((flags & CLEANUP_MSG)) @@ -694,6 +750,461 @@ static int run_git_commit(const char *defmsg, struct replay_opts *opts, return run_command(&cmd); } +static int rest_is_empty(const struct strbuf *sb, int start) +{ + int i, eol; + const char *nl; + + /* Check if the rest is just whitespace and Signed-off-by's. */ + for (i = start; i < sb->len; i++) { + nl = memchr(sb->buf + i, '\n', sb->len - i); + if (nl) + eol = nl - sb->buf; + else + eol = sb->len; + + if (strlen(sign_off_header) <= eol - i && + starts_with(sb->buf + i, sign_off_header)) { + i = eol; + continue; + } + while (i < eol) + if (!isspace(sb->buf[i++])) + return 0; + } + + return 1; +} + +/* + * Find out if the message in the strbuf contains only whitespace and + * Signed-off-by lines. + */ +int message_is_empty(const struct strbuf *sb, + enum commit_msg_cleanup_mode cleanup_mode) +{ + if (cleanup_mode == COMMIT_MSG_CLEANUP_NONE && sb->len) + return 0; + return rest_is_empty(sb, 0); +} + +/* + * See if the user edited the message in the editor or left what + * was in the template intact + */ +int template_untouched(const struct strbuf *sb, const char *template_file, + enum commit_msg_cleanup_mode cleanup_mode) +{ + struct strbuf tmpl = STRBUF_INIT; + const char *start; + + if (cleanup_mode == COMMIT_MSG_CLEANUP_NONE && sb->len) + return 0; + + if (!template_file || strbuf_read_file(&tmpl, template_file, 0) <= 0) + return 0; + + strbuf_stripspace(&tmpl, cleanup_mode == COMMIT_MSG_CLEANUP_ALL); + if (!skip_prefix(sb->buf, tmpl.buf, &start)) + start = sb->buf; + strbuf_release(&tmpl); + return rest_is_empty(sb, start - sb->buf); +} + +int update_head_with_reflog(const struct commit *old_head, + const struct object_id *new_head, + const char *action, const struct strbuf *msg, + struct strbuf *err) +{ + struct ref_transaction *transaction; + struct strbuf sb = STRBUF_INIT; + const char *nl; + int ret = 0; + + if (action) { + strbuf_addstr(&sb, action); + strbuf_addstr(&sb, ": "); + } + + nl = strchr(msg->buf, '\n'); + if (nl) { + strbuf_add(&sb, msg->buf, nl + 1 - msg->buf); + } else { + strbuf_addbuf(&sb, msg); + strbuf_addch(&sb, '\n'); + } + + transaction = ref_transaction_begin(err); + if (!transaction || + ref_transaction_update(transaction, "HEAD", new_head, + old_head ? &old_head->object.oid : &null_oid, + 0, sb.buf, err) || + ref_transaction_commit(transaction, err)) { + ret = -1; + } + ref_transaction_free(transaction); + strbuf_release(&sb); + + return ret; +} + +static int run_rewrite_hook(const struct object_id *oldoid, + const struct object_id *newoid) +{ + struct child_process proc = CHILD_PROCESS_INIT; + const char *argv[3]; + int code; + struct strbuf sb = STRBUF_INIT; + + argv[0] = find_hook("post-rewrite"); + if (!argv[0]) + return 0; + + argv[1] = "amend"; + argv[2] = NULL; + + proc.argv = argv; + proc.in = -1; + proc.stdout_to_stderr = 1; + + code = start_command(&proc); + if (code) + return code; + strbuf_addf(&sb, "%s %s\n", oid_to_hex(oldoid), oid_to_hex(newoid)); + sigchain_push(SIGPIPE, SIG_IGN); + write_in_full(proc.in, sb.buf, sb.len); + close(proc.in); + strbuf_release(&sb); + sigchain_pop(SIGPIPE); + return finish_command(&proc); +} + +void commit_post_rewrite(const struct commit *old_head, + const struct object_id *new_head) +{ + struct notes_rewrite_cfg *cfg; + + cfg = init_copy_notes_for_rewrite("amend"); + if (cfg) { + /* we are amending, so old_head is not NULL */ + copy_note_for_rewrite(cfg, &old_head->object.oid, new_head); + finish_copy_notes_for_rewrite(cfg, "Notes added by 'git commit --amend'"); + } + run_rewrite_hook(&old_head->object.oid, new_head); +} + +static int run_prepare_commit_msg_hook(struct strbuf *msg, const char *commit) +{ + struct argv_array hook_env = ARGV_ARRAY_INIT; + int ret; + const char *name; + + name = git_path_commit_editmsg(); + if (write_message(msg->buf, msg->len, name, 0)) + return -1; + + argv_array_pushf(&hook_env, "GIT_INDEX_FILE=%s", get_index_file()); + argv_array_push(&hook_env, "GIT_EDITOR=:"); + if (commit) + ret = run_hook_le(hook_env.argv, "prepare-commit-msg", name, + "commit", commit, NULL); + else + ret = run_hook_le(hook_env.argv, "prepare-commit-msg", name, + "message", NULL); + if (ret) + ret = error(_("'prepare-commit-msg' hook failed")); + argv_array_clear(&hook_env); + + return ret; +} + +static const char implicit_ident_advice_noconfig[] = +N_("Your name and email address were configured automatically based\n" +"on your username and hostname. Please check that they are accurate.\n" +"You can suppress this message by setting them explicitly. Run the\n" +"following command and follow the instructions in your editor to edit\n" +"your configuration file:\n" +"\n" +" git config --global --edit\n" +"\n" +"After doing this, you may fix the identity used for this commit with:\n" +"\n" +" git commit --amend --reset-author\n"); + +static const char implicit_ident_advice_config[] = +N_("Your name and email address were configured automatically based\n" +"on your username and hostname. Please check that they are accurate.\n" +"You can suppress this message by setting them explicitly:\n" +"\n" +" git config --global user.name \"Your Name\"\n" +" git config --global user.email you@example.com\n" +"\n" +"After doing this, you may fix the identity used for this commit with:\n" +"\n" +" git commit --amend --reset-author\n"); + +static const char *implicit_ident_advice(void) +{ + char *user_config = expand_user_path("~/.gitconfig", 0); + char *xdg_config = xdg_config_home("config"); + int config_exists = file_exists(user_config) || file_exists(xdg_config); + + free(user_config); + free(xdg_config); + + if (config_exists) + return _(implicit_ident_advice_config); + else + return _(implicit_ident_advice_noconfig); + +} + +void print_commit_summary(const char *prefix, const struct object_id *oid, + unsigned int flags) +{ + struct rev_info rev; + struct commit *commit; + struct strbuf format = STRBUF_INIT; + const char *head; + struct pretty_print_context pctx = {0}; + struct strbuf author_ident = STRBUF_INIT; + struct strbuf committer_ident = STRBUF_INIT; + + commit = lookup_commit(oid); + if (!commit) + die(_("couldn't look up newly created commit")); + if (parse_commit(commit)) + die(_("could not parse newly created commit")); + + strbuf_addstr(&format, "format:%h] %s"); + + format_commit_message(commit, "%an <%ae>", &author_ident, &pctx); + format_commit_message(commit, "%cn <%ce>", &committer_ident, &pctx); + if (strbuf_cmp(&author_ident, &committer_ident)) { + strbuf_addstr(&format, "\n Author: "); + strbuf_addbuf_percentquote(&format, &author_ident); + } + if (flags & SUMMARY_SHOW_AUTHOR_DATE) { + struct strbuf date = STRBUF_INIT; + + format_commit_message(commit, "%ad", &date, &pctx); + strbuf_addstr(&format, "\n Date: "); + strbuf_addbuf_percentquote(&format, &date); + strbuf_release(&date); + } + if (!committer_ident_sufficiently_given()) { + strbuf_addstr(&format, "\n Committer: "); + strbuf_addbuf_percentquote(&format, &committer_ident); + if (advice_implicit_identity) { + strbuf_addch(&format, '\n'); + strbuf_addstr(&format, implicit_ident_advice()); + } + } + strbuf_release(&author_ident); + strbuf_release(&committer_ident); + + init_revisions(&rev, prefix); + setup_revisions(0, NULL, &rev, NULL); + + rev.diff = 1; + rev.diffopt.output_format = + DIFF_FORMAT_SHORTSTAT | DIFF_FORMAT_SUMMARY; + + rev.verbose_header = 1; + rev.show_root_diff = 1; + get_commit_format(format.buf, &rev); + rev.always_show_header = 0; + rev.diffopt.detect_rename = DIFF_DETECT_RENAME; + rev.diffopt.break_opt = 0; + diff_setup_done(&rev.diffopt); + + head = resolve_ref_unsafe("HEAD", 0, NULL, NULL); + if (!head) + die_errno(_("unable to resolve HEAD after creating commit")); + if (!strcmp(head, "HEAD")) + head = _("detached HEAD"); + else + skip_prefix(head, "refs/heads/", &head); + printf("[%s%s ", head, (flags & SUMMARY_INITIAL_COMMIT) ? + _(" (root-commit)") : ""); + + if (!log_tree_commit(&rev, commit)) { + rev.always_show_header = 1; + rev.use_terminator = 1; + log_tree_commit(&rev, commit); + } + + strbuf_release(&format); +} + +static int parse_head(struct commit **head) +{ + struct commit *current_head; + struct object_id oid; + + if (get_oid("HEAD", &oid)) { + current_head = NULL; + } else { + current_head = lookup_commit_reference(&oid); + if (!current_head) + return error(_("could not parse HEAD")); + if (oidcmp(&oid, ¤t_head->object.oid)) { + warning(_("HEAD %s is not a commit!"), + oid_to_hex(&oid)); + } + if (parse_commit(current_head)) + return error(_("could not parse HEAD commit")); + } + *head = current_head; + + return 0; +} + +/* + * Try to commit without forking 'git commit'. In some cases we need + * to run 'git commit' to display an error message + * + * Returns: + * -1 - error unable to commit + * 0 - success + * 1 - run 'git commit' + */ +static int try_to_commit(struct strbuf *msg, const char *author, + struct replay_opts *opts, unsigned int flags, + struct object_id *oid) +{ + struct object_id tree; + struct commit *current_head; + struct commit_list *parents = NULL; + struct commit_extra_header *extra = NULL; + struct strbuf err = STRBUF_INIT; + struct strbuf commit_msg = STRBUF_INIT; + char *amend_author = NULL; + const char *hook_commit = NULL; + enum commit_msg_cleanup_mode cleanup; + int res = 0; + + if (parse_head(¤t_head)) + return -1; + + if (flags & AMEND_MSG) { + const char *exclude_gpgsig[] = { "gpgsig", NULL }; + const char *out_enc = get_commit_output_encoding(); + const char *message = logmsg_reencode(current_head, NULL, + out_enc); + + if (!msg) { + const char *orig_message = NULL; + + find_commit_subject(message, &orig_message); + msg = &commit_msg; + strbuf_addstr(msg, orig_message); + hook_commit = "HEAD"; + } + author = amend_author = get_author(message); + unuse_commit_buffer(current_head, message); + if (!author) { + res = error(_("unable to parse commit author")); + goto out; + } + parents = copy_commit_list(current_head->parents); + extra = read_commit_extra_headers(current_head, exclude_gpgsig); + } else if (current_head) { + commit_list_insert(current_head, &parents); + } + + if (write_cache_as_tree(tree.hash, 0, NULL)) { + res = error(_("git write-tree failed to write a tree")); + goto out; + } + + if (!(flags & ALLOW_EMPTY) && !oidcmp(current_head ? + ¤t_head->tree->object.oid : + &empty_tree_oid, &tree)) { + res = 1; /* run 'git commit' to display error message */ + goto out; + } + + if (find_hook("prepare-commit-msg")) { + res = run_prepare_commit_msg_hook(msg, hook_commit); + if (res) + goto out; + if (strbuf_read_file(&commit_msg, git_path_commit_editmsg(), + 2048) < 0) { + res = error_errno(_("unable to read commit message " + "from '%s'"), + git_path_commit_editmsg()); + goto out; + } + msg = &commit_msg; + } + + cleanup = (flags & CLEANUP_MSG) ? COMMIT_MSG_CLEANUP_ALL : + opts->default_msg_cleanup; + + if (cleanup != COMMIT_MSG_CLEANUP_NONE) + strbuf_stripspace(msg, cleanup == COMMIT_MSG_CLEANUP_ALL); + if (!opts->allow_empty_message && message_is_empty(msg, cleanup)) { + res = 1; /* run 'git commit' to display error message */ + goto out; + } + + if (commit_tree_extended(msg->buf, msg->len, &tree, parents, + oid, author, opts->gpg_sign, extra)) { + res = error(_("failed to write commit object")); + goto out; + } + + if (update_head_with_reflog(current_head, oid, + getenv("GIT_REFLOG_ACTION"), msg, &err)) { + res = error("%s", err.buf); + goto out; + } + + if (flags & AMEND_MSG) + commit_post_rewrite(current_head, oid); + +out: + free_commit_extra_headers(extra); + strbuf_release(&err); + strbuf_release(&commit_msg); + free(amend_author); + + return res; +} + +static int do_commit(const char *msg_file, const char *author, + struct replay_opts *opts, unsigned int flags) +{ + int res = 1; + + if (!(flags & EDIT_MSG) && !(flags & VERIFY_MSG)) { + struct object_id oid; + struct strbuf sb = STRBUF_INIT; + + if (msg_file && strbuf_read_file(&sb, msg_file, 2048) < 0) + return error_errno(_("unable to read commit message " + "from '%s'"), + msg_file); + + res = try_to_commit(msg_file ? &sb : NULL, author, opts, flags, + &oid); + strbuf_release(&sb); + if (!res) { + unlink(git_path_cherry_pick_head()); + unlink(git_path_merge_msg()); + if (!is_rebase_i(opts)) + print_commit_summary(NULL, &oid, + SUMMARY_SHOW_AUTHOR_DATE); + return res; + } + } + if (res == 1) + return run_git_commit(msg_file, opts, flags); + + return res; +} + static int is_original_commit_empty(struct commit *commit) { const struct object_id *ptree_oid; @@ -952,6 +1463,7 @@ static int do_pick_commit(enum todo_command command, struct commit *commit, struct object_id head; struct commit *base, *next, *parent; const char *base_label, *next_label; + char *author = NULL; struct commit_message msg = { NULL, NULL, NULL, NULL }; struct strbuf msgbuf = STRBUF_INIT; int res, unborn = 0, allow; @@ -1066,6 +1578,8 @@ static int do_pick_commit(enum todo_command command, struct commit *commit, strbuf_addstr(&msgbuf, oid_to_hex(&commit->object.oid)); strbuf_addstr(&msgbuf, ")\n"); } + if (!is_fixup(command)) + author = get_author(msg.message); } if (command == TODO_REWORD) @@ -1091,6 +1605,9 @@ static int do_pick_commit(enum todo_command command, struct commit *commit, } } + if (opts->signoff) + append_signoff(&msgbuf, 0, 0); + if (is_rebase_i(opts) && write_author_script(msg.message) < 0) res = -1; else if (!opts->strategy || !strcmp(opts->strategy, "recursive") || command == TODO_REVERT) { @@ -1148,9 +1665,13 @@ static int do_pick_commit(enum todo_command command, struct commit *commit, goto leave; } else if (allow) flags |= ALLOW_EMPTY; - if (!opts->no_commit) + if (!opts->no_commit) { fast_forward_edit: - res = run_git_commit(msg_file, opts, flags); + if (author || command == TODO_REVERT || (flags & AMEND_MSG)) + res = do_commit(msg_file, author, opts, flags); + else + res = error(_("unable to parse commit author")); + } if (!res && final_fixup) { unlink(rebase_path_fixup_msg()); @@ -1159,6 +1680,7 @@ fast_forward_edit: leave: free_message(commit, &msg); + free(author); update_abort_safety_file(); return res; diff --git a/sequencer.h b/sequencer.h index 81f6d7d393..e45b178dfc 100644 --- a/sequencer.h +++ b/sequencer.h @@ -1,6 +1,7 @@ #ifndef SEQUENCER_H #define SEQUENCER_H +const char *git_path_commit_editmsg(void); const char *git_path_seq_dir(void); #define APPEND_SIGNOFF_DEDUP (1u << 0) @@ -11,6 +12,13 @@ enum replay_action { REPLAY_INTERACTIVE_REBASE }; +enum commit_msg_cleanup_mode { + COMMIT_MSG_CLEANUP_SPACE, + COMMIT_MSG_CLEANUP_NONE, + COMMIT_MSG_CLEANUP_SCISSORS, + COMMIT_MSG_CLEANUP_ALL +}; + struct replay_opts { enum replay_action action; @@ -29,6 +37,7 @@ struct replay_opts { int mainline; char *gpg_sign; + enum commit_msg_cleanup_mode default_msg_cleanup; /* Merge strategy */ char *strategy; @@ -40,6 +49,8 @@ struct replay_opts { }; #define REPLAY_OPTS_INIT { -1 } +/* Call this to setup defaults before parsing command line options */ +void sequencer_init_config(struct replay_opts *opts); int sequencer_pick_revisions(struct replay_opts *opts); int sequencer_continue(struct replay_opts *opts); int sequencer_rollback(struct replay_opts *opts); @@ -61,5 +72,19 @@ extern const char sign_off_header[]; void append_signoff(struct strbuf *msgbuf, int ignore_footer, unsigned flag); void append_conflicts_hint(struct strbuf *msgbuf); +int message_is_empty(const struct strbuf *sb, + enum commit_msg_cleanup_mode cleanup_mode); +int template_untouched(const struct strbuf *sb, const char *template_file, + enum commit_msg_cleanup_mode cleanup_mode); +int update_head_with_reflog(const struct commit *old_head, + const struct object_id *new_head, + const char *action, const struct strbuf *msg, + struct strbuf *err); +void commit_post_rewrite(const struct commit *current_head, + const struct object_id *new_head); +#define SUMMARY_INITIAL_COMMIT (1 << 0) +#define SUMMARY_SHOW_AUTHOR_DATE (1 << 1) +void print_commit_summary(const char *prefix, const struct object_id *oid, + unsigned int flags); #endif @@ -119,7 +119,7 @@ char *prefix_path(const char *prefix, int len, const char *path) { char *r = prefix_path_gently(prefix, len, NULL, path); if (!r) - die("'%s' is outside repository", path); + die(_("'%s' is outside repository"), path); return r; } @@ -160,7 +160,7 @@ int check_filename(const char *prefix, const char *arg) free(to_free); return 0; /* file does not exist */ } - die_errno("failed to stat '%s'", arg); + die_errno(_("failed to stat '%s'"), arg); } static void NORETURN die_verify_filename(const char *prefix, @@ -230,7 +230,7 @@ void verify_filename(const char *prefix, int diagnose_misspelt_rev) { if (*arg == '-') - die("option '%s' must come before non-option arguments", arg); + die(_("option '%s' must come before non-option arguments"), arg); if (looks_like_pathspec(arg) || check_filename(prefix, arg)) return; die_verify_filename(prefix, arg, diagnose_misspelt_rev); @@ -385,14 +385,14 @@ void setup_work_tree(void) return; if (work_tree_config_is_bogus) - die("unable to set up work tree using invalid config"); + die(_("unable to set up work tree using invalid config")); work_tree = get_git_work_tree(); git_dir = get_git_dir(); if (!is_absolute_path(git_dir)) git_dir = real_path(get_git_dir()); if (!work_tree || chdir(work_tree)) - die("This operation must be run in a work tree"); + die(_("this operation must be run in a work tree")); /* * Make sure subsequent git processes find correct worktree @@ -422,7 +422,11 @@ static int check_repo_format(const char *var, const char *value, void *vdata) ; else if (!strcmp(ext, "preciousobjects")) data->precious_objects = git_config_bool(var, value); - else + else if (!strcmp(ext, "partialclone")) { + if (!value) + return config_error_nonbool(var); + data->partial_clone = xstrdup(value); + } else string_list_append(&data->unknown_extensions, ext); } else if (strcmp(var, "core.bare") == 0) { data->is_bare = git_config_bool(var, value); @@ -464,6 +468,7 @@ static int check_repository_format_gently(const char *gitdir, struct repository_ } repository_format_precious_objects = candidate->precious_objects; + repository_format_partial_clone = candidate->partial_clone; string_list_clear(&candidate->unknown_extensions, 0); if (!has_common) { if (candidate->is_bare != -1) { @@ -525,17 +530,17 @@ void read_gitfile_error_die(int error_code, const char *path, const char *dir) /* non-fatal; follow return path */ break; case READ_GITFILE_ERR_OPEN_FAILED: - die_errno("Error opening '%s'", path); + die_errno(_("error opening '%s'"), path); case READ_GITFILE_ERR_TOO_LARGE: - die("Too large to be a .git file: '%s'", path); + die(_("too large to be a .git file: '%s'"), path); case READ_GITFILE_ERR_READ_FAILED: - die("Error reading %s", path); + die(_("error reading %s"), path); case READ_GITFILE_ERR_INVALID_FORMAT: - die("Invalid gitfile format: %s", path); + die(_("invalid gitfile format: %s"), path); case READ_GITFILE_ERR_NO_PATH: - die("No path in gitfile: %s", path); + die(_("no path in gitfile: %s"), path); case READ_GITFILE_ERR_NOT_A_REPO: - die("Not a git repository: %s", dir); + die(_("not a git repository: %s"), dir); default: die("BUG: unknown error code"); } @@ -634,7 +639,7 @@ static const char *setup_explicit_git_dir(const char *gitdirenv, int offset; if (PATH_MAX - 40 < strlen(gitdirenv)) - die("'$%s' too big", GIT_DIR_ENVIRONMENT); + die(_("'$%s' too big"), GIT_DIR_ENVIRONMENT); gitfile = (char*)read_gitfile(gitdirenv); if (gitfile) { @@ -648,7 +653,7 @@ static const char *setup_explicit_git_dir(const char *gitdirenv, free(gitfile); return NULL; } - die("Not a git repository: '%s'", gitdirenv); + die(_("not a git repository: '%s'"), gitdirenv); } if (check_repository_format_gently(gitdirenv, repo_fmt, nongit_ok)) { @@ -677,12 +682,12 @@ static const char *setup_explicit_git_dir(const char *gitdirenv, else { char *core_worktree; if (chdir(gitdirenv)) - die_errno("Could not chdir to '%s'", gitdirenv); + die_errno(_("cannot chdir to '%s'"), gitdirenv); if (chdir(git_work_tree_cfg)) - die_errno("Could not chdir to '%s'", git_work_tree_cfg); + die_errno(_("cannot chdir to '%s'"), git_work_tree_cfg); core_worktree = xgetcwd(); if (chdir(cwd->buf)) - die_errno("Could not come back to cwd"); + die_errno(_("cannot come back to cwd")); set_git_work_tree(core_worktree); free(core_worktree); } @@ -710,7 +715,7 @@ static const char *setup_explicit_git_dir(const char *gitdirenv, if (offset >= 0) { /* cwd inside worktree? */ set_git_dir(real_path(gitdirenv)); if (chdir(worktree)) - die_errno("Could not chdir to '%s'", worktree); + die_errno(_("cannot chdir to '%s'"), worktree); strbuf_addch(cwd, '/'); free(gitfile); return cwd->buf + offset; @@ -738,7 +743,7 @@ static const char *setup_discovered_git_dir(const char *gitdir, if (offset != cwd->len && !is_absolute_path(gitdir)) gitdir = to_free = real_pathdup(gitdir, 1); if (chdir(cwd->buf)) - die_errno("Could not come back to cwd"); + die_errno(_("cannot come back to cwd")); ret = setup_explicit_git_dir(gitdir, cwd, repo_fmt, nongit_ok); free(to_free); return ret; @@ -748,7 +753,7 @@ static const char *setup_discovered_git_dir(const char *gitdir, if (is_bare_repository_cfg > 0) { set_git_dir(offset == cwd->len ? gitdir : real_path(gitdir)); if (chdir(cwd->buf)) - die_errno("Could not come back to cwd"); + die_errno(_("cannot come back to cwd")); return NULL; } @@ -787,7 +792,7 @@ static const char *setup_bare_git_dir(struct strbuf *cwd, int offset, gitdir = offset == cwd->len ? "." : xmemdupz(cwd->buf, offset); if (chdir(cwd->buf)) - die_errno("Could not come back to cwd"); + die_errno(_("cannot come back to cwd")); return setup_explicit_git_dir(gitdir, cwd, repo_fmt, nongit_ok); } @@ -795,7 +800,7 @@ static const char *setup_bare_git_dir(struct strbuf *cwd, int offset, inside_work_tree = 0; if (offset != cwd->len) { if (chdir(cwd->buf)) - die_errno("Cannot come back to cwd"); + die_errno(_("cannot come back to cwd")); root_len = offset_1st_component(cwd->buf); strbuf_setlen(cwd, offset > root_len ? offset : root_len); set_git_dir(cwd->buf); @@ -808,9 +813,9 @@ static const char *setup_bare_git_dir(struct strbuf *cwd, int offset, static const char *setup_nongit(const char *cwd, int *nongit_ok) { if (!nongit_ok) - die(_("Not a git repository (or any of the parent directories): %s"), DEFAULT_GIT_DIR_ENVIRONMENT); + die(_("not a git repository (or any of the parent directories): %s"), DEFAULT_GIT_DIR_ENVIRONMENT); if (chdir(cwd)) - die_errno(_("Cannot come back to cwd")); + die_errno(_("cannot come back to cwd")); *nongit_ok = 1; return NULL; } @@ -819,7 +824,7 @@ static dev_t get_device_or_die(const char *path, const char *prefix, int prefix_ { struct stat buf; if (stat(path, &buf)) { - die_errno("failed to stat '%*s%s%s'", + die_errno(_("failed to stat '%*s%s%s'"), prefix_len, prefix ? prefix : "", prefix ? "/" : "", path); @@ -1061,13 +1066,13 @@ const char *setup_git_directory_gently(int *nongit_ok) break; case GIT_DIR_DISCOVERED: if (dir.len < cwd.len && chdir(dir.buf)) - die(_("Cannot change to '%s'"), dir.buf); + die(_("cannot change to '%s'"), dir.buf); prefix = setup_discovered_git_dir(gitdir.buf, &cwd, dir.len, &repo_fmt, nongit_ok); break; case GIT_DIR_BARE: if (dir.len < cwd.len && chdir(dir.buf)) - die(_("Cannot change to '%s'"), dir.buf); + die(_("cannot change to '%s'"), dir.buf); prefix = setup_bare_git_dir(&cwd, dir.len, &repo_fmt, nongit_ok); break; case GIT_DIR_HIT_CEILING: @@ -1080,7 +1085,7 @@ const char *setup_git_directory_gently(int *nongit_ok) strbuf_release(&dir); return NULL; } - die(_("Not a git repository (or any parent up to mount point %s)\n" + die(_("not a git repository (or any parent up to mount point %s)\n" "Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set)."), dir.buf); default: @@ -1164,7 +1169,7 @@ int git_config_perm(const char *var, const char *value) /* A filemode value was given: 0xxx */ if ((i & 0600) != 0600) - die(_("Problem with core.sharedRepository filemode value " + die(_("problem with core.sharedRepository filemode value " "(0%.3o).\nThe owner of files must always have " "read and write permissions."), i); @@ -1207,7 +1212,7 @@ void sanitize_stdfds(void) while (fd != -1 && fd < 2) fd = dup(fd); if (fd == -1) - die_errno("open /dev/null or dup failed"); + die_errno(_("open /dev/null or dup failed")); if (fd > 2) close(fd); } @@ -1222,12 +1227,12 @@ int daemonize(void) case 0: break; case -1: - die_errno("fork failed"); + die_errno(_("fork failed")); default: exit(0); } if (setsid() == -1) - die_errno("setsid failed"); + die_errno(_("setsid failed")); close(0); close(1); close(2); diff --git a/sha1-lookup.c b/sha1-lookup.c index 4cf3ebd921..8d0b1db3e2 100644 --- a/sha1-lookup.c +++ b/sha1-lookup.c @@ -99,3 +99,31 @@ int sha1_pos(const unsigned char *sha1, void *table, size_t nr, } while (lo < hi); return -lo-1; } + +int bsearch_hash(const unsigned char *sha1, const uint32_t *fanout_nbo, + const unsigned char *table, size_t stride, uint32_t *result) +{ + uint32_t hi, lo; + + hi = ntohl(fanout_nbo[*sha1]); + lo = ((*sha1 == 0x0) ? 0 : ntohl(fanout_nbo[*sha1 - 1])); + + while (lo < hi) { + unsigned mi = lo + (hi - lo) / 2; + int cmp = hashcmp(table + mi * stride, sha1); + + if (!cmp) { + if (result) + *result = mi; + return 1; + } + if (cmp > 0) + hi = mi; + else + lo = mi + 1; + } + + if (result) + *result = lo; + return 0; +} diff --git a/sha1-lookup.h b/sha1-lookup.h index cf5314f402..7678b23b36 100644 --- a/sha1-lookup.h +++ b/sha1-lookup.h @@ -7,4 +7,26 @@ extern int sha1_pos(const unsigned char *sha1, void *table, size_t nr, sha1_access_fn fn); + +/* + * Searches for sha1 in table, using the given fanout table to determine the + * interval to search, then using binary search. Returns 1 if found, 0 if not. + * + * Takes the following parameters: + * + * - sha1: the hash to search for + * - fanout_nbo: a 256-element array of NETWORK-order 32-bit integers; the + * integer at position i represents the number of elements in table whose + * first byte is less than or equal to i + * - table: a sorted list of hashes with optional extra information in between + * - stride: distance between two consecutive elements in table (should be + * GIT_MAX_RAWSZ or greater) + * - result: if not NULL, this function stores the element index of the + * position found (if the search is successful) or the index of the least + * element that is greater than sha1 (if the search is not successful) + * + * This function does not verify the validity of the fanout table. + */ +int bsearch_hash(const unsigned char *sha1, const uint32_t *fanout_nbo, + const unsigned char *table, size_t stride, uint32_t *result); #endif diff --git a/sha1_file.c b/sha1_file.c index 3da70ac650..826d7a0ae3 100644 --- a/sha1_file.c +++ b/sha1_file.c @@ -24,11 +24,11 @@ #include "bulk-checkin.h" #include "streaming.h" #include "dir.h" -#include "mru.h" #include "list.h" #include "mergesort.h" #include "quote.h" #include "packfile.h" +#include "fetch-object.h" const unsigned char null_sha1[GIT_MAX_RAWSZ]; const struct object_id null_oid; @@ -39,32 +39,32 @@ const struct object_id empty_blob_oid = { EMPTY_BLOB_SHA1_BIN_LITERAL }; -static void git_hash_sha1_init(void *ctx) +static void git_hash_sha1_init(git_hash_ctx *ctx) { - git_SHA1_Init((git_SHA_CTX *)ctx); + git_SHA1_Init(&ctx->sha1); } -static void git_hash_sha1_update(void *ctx, const void *data, size_t len) +static void git_hash_sha1_update(git_hash_ctx *ctx, const void *data, size_t len) { - git_SHA1_Update((git_SHA_CTX *)ctx, data, len); + git_SHA1_Update(&ctx->sha1, data, len); } -static void git_hash_sha1_final(unsigned char *hash, void *ctx) +static void git_hash_sha1_final(unsigned char *hash, git_hash_ctx *ctx) { - git_SHA1_Final(hash, (git_SHA_CTX *)ctx); + git_SHA1_Final(hash, &ctx->sha1); } -static void git_hash_unknown_init(void *ctx) +static void git_hash_unknown_init(git_hash_ctx *ctx) { die("trying to init unknown hash"); } -static void git_hash_unknown_update(void *ctx, const void *data, size_t len) +static void git_hash_unknown_update(git_hash_ctx *ctx, const void *data, size_t len) { die("trying to update unknown hash"); } -static void git_hash_unknown_final(unsigned char *hash, void *ctx) +static void git_hash_unknown_final(unsigned char *hash, git_hash_ctx *ctx) { die("trying to finalize unknown hash"); } @@ -75,7 +75,6 @@ const struct git_hash_algo hash_algos[GIT_HASH_NALGOS] = { 0x00000000, 0, 0, - 0, git_hash_unknown_init, git_hash_unknown_update, git_hash_unknown_final, @@ -86,7 +85,6 @@ const struct git_hash_algo hash_algos[GIT_HASH_NALGOS] = { "sha-1", /* "sha1", big-endian */ 0x73686131, - sizeof(git_SHA_CTX), GIT_SHA1_RAWSZ, GIT_SHA1_HEXSZ, git_hash_sha1_init, @@ -133,14 +131,14 @@ static struct cached_object *find_cached_object(const unsigned char *sha1) } -static enum safe_crlf get_safe_crlf(unsigned flags) +static int get_conv_flags(unsigned flags) { if (flags & HASH_RENORMALIZE) - return SAFE_CRLF_RENORMALIZE; + return CONV_EOL_RENORMALIZE; else if (flags & HASH_WRITE_OBJECT) - return safe_crlf; + return global_conv_flags_eol; else - return SAFE_CRLF_FALSE; + return 0; } @@ -321,15 +319,11 @@ static void fill_sha1_path(struct strbuf *buf, const unsigned char *sha1) } } -const char *sha1_file_name(const unsigned char *sha1) +void sha1_file_name(struct strbuf *buf, const unsigned char *sha1) { - static struct strbuf buf = STRBUF_INIT; - - strbuf_reset(&buf); - strbuf_addf(&buf, "%s/", get_object_directory()); - - fill_sha1_path(&buf, sha1); - return buf.buf; + strbuf_addstr(buf, get_object_directory()); + strbuf_addch(buf, '/'); + fill_sha1_path(buf, sha1); } struct strbuf *alt_scratch_buf(struct alternate_object_database *alt) @@ -710,7 +704,12 @@ int check_and_freshen_file(const char *fn, int freshen) static int check_and_freshen_local(const unsigned char *sha1, int freshen) { - return check_and_freshen_file(sha1_file_name(sha1), freshen); + static struct strbuf buf = STRBUF_INIT; + + strbuf_reset(&buf); + sha1_file_name(&buf, sha1); + + return check_and_freshen_file(buf.buf, freshen); } static int check_and_freshen_nonlocal(const unsigned char *sha1, int freshen) @@ -788,16 +787,16 @@ void *xmmap(void *start, size_t length, int check_sha1_signature(const unsigned char *sha1, void *map, unsigned long size, const char *type) { - unsigned char real_sha1[20]; + struct object_id real_oid; enum object_type obj_type; struct git_istream *st; - git_SHA_CTX c; + git_hash_ctx c; char hdr[32]; int hdrlen; if (map) { - hash_sha1_file(map, size, type, real_sha1); - return hashcmp(sha1, real_sha1) ? -1 : 0; + hash_object_file(map, size, type, &real_oid); + return hashcmp(sha1, real_oid.hash) ? -1 : 0; } st = open_istream(sha1, &obj_type, &size, NULL); @@ -808,8 +807,8 @@ int check_sha1_signature(const unsigned char *sha1, void *map, hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", typename(obj_type), size) + 1; /* Sha1.. */ - git_SHA1_Init(&c); - git_SHA1_Update(&c, hdr, hdrlen); + the_hash_algo->init_fn(&c); + the_hash_algo->update_fn(&c, hdr, hdrlen); for (;;) { char buf[1024 * 16]; ssize_t readlen = read_istream(st, buf, sizeof(buf)); @@ -820,11 +819,11 @@ int check_sha1_signature(const unsigned char *sha1, void *map, } if (!readlen) break; - git_SHA1_Update(&c, buf, readlen); + the_hash_algo->update_fn(&c, buf, readlen); } - git_SHA1_Final(real_sha1, &c); + the_hash_algo->final_fn(real_oid.hash, &c); close_istream(st); - return hashcmp(sha1, real_sha1) ? -1 : 0; + return hashcmp(sha1, real_oid.hash) ? -1 : 0; } int git_open_cloexec(const char *name, int flags) @@ -866,8 +865,12 @@ static int stat_sha1_file(const unsigned char *sha1, struct stat *st, const char **path) { struct alternate_object_database *alt; + static struct strbuf buf = STRBUF_INIT; + + strbuf_reset(&buf); + sha1_file_name(&buf, sha1); + *path = buf.buf; - *path = sha1_file_name(sha1); if (!lstat(*path, st)) return 0; @@ -891,8 +894,12 @@ static int open_sha1_file(const unsigned char *sha1, const char **path) int fd; struct alternate_object_database *alt; int most_interesting_errno; + static struct strbuf buf = STRBUF_INIT; + + strbuf_reset(&buf); + sha1_file_name(&buf, sha1); + *path = buf.buf; - *path = sha1_file_name(sha1); fd = git_open(*path); if (fd >= 0) return fd; @@ -1213,6 +1220,8 @@ static int sha1_loose_object_info(const unsigned char *sha1, return (status < 0) ? status : 0; } +int fetch_if_missing = 1; + int sha1_object_info_extended(const unsigned char *sha1, struct object_info *oi, unsigned flags) { static struct object_info blank_oi = OBJECT_INFO_INIT; @@ -1221,6 +1230,7 @@ int sha1_object_info_extended(const unsigned char *sha1, struct object_info *oi, const unsigned char *real = (flags & OBJECT_INFO_LOOKUP_REPLACE) ? lookup_replace_object(sha1) : sha1; + int already_retried = 0; if (is_null_sha1(real)) return -1; @@ -1248,19 +1258,32 @@ int sha1_object_info_extended(const unsigned char *sha1, struct object_info *oi, } } - if (!find_pack_entry(real, &e)) { + while (1) { + if (find_pack_entry(real, &e)) + break; + /* Most likely it's a loose object. */ if (!sha1_loose_object_info(real, oi, flags)) return 0; /* Not a loose object; someone else may have just packed it. */ - if (flags & OBJECT_INFO_QUICK) { - return -1; - } else { - reprepare_packed_git(); - if (!find_pack_entry(real, &e)) - return -1; + reprepare_packed_git(); + if (find_pack_entry(real, &e)) + break; + + /* Check if it is a missing object */ + if (fetch_if_missing && repository_format_partial_clone && + !already_retried) { + /* + * TODO Investigate haveing fetch_object() return + * TODO error/success and stopping the music here. + */ + fetch_object(repository_format_partial_clone, real); + already_retried = 1; + continue; } + + return -1; } if (oi == &blank_oi) @@ -1269,7 +1292,6 @@ int sha1_object_info_extended(const unsigned char *sha1, struct object_info *oi, * information below, so return early. */ return 0; - rtype = packed_object_info(e.p, e.offset, oi); if (rtype < 0) { mark_bad_packed_object(e.p, real); @@ -1312,13 +1334,13 @@ static void *read_object(const unsigned char *sha1, enum object_type *type, return content; } -int pretend_sha1_file(void *buf, unsigned long len, enum object_type type, - unsigned char *sha1) +int pretend_object_file(void *buf, unsigned long len, enum object_type type, + struct object_id *oid) { struct cached_object *co; - hash_sha1_file(buf, len, typename(type), sha1); - if (has_sha1_file(sha1) || find_cached_object(sha1)) + hash_object_file(buf, len, typename(type), oid); + if (has_sha1_file(oid->hash) || find_cached_object(oid->hash)) return 0; ALLOC_GROW(cached_objects, cached_object_nr + 1, cached_object_alloc); co = &cached_objects[cached_object_nr++]; @@ -1326,7 +1348,7 @@ int pretend_sha1_file(void *buf, unsigned long len, enum object_type type, co->type = type; co->buf = xmalloc(len); memcpy(co->buf, buf, len); - hashcpy(co->sha1, sha1); + hashcpy(co->sha1, oid->hash); return 0; } @@ -1419,20 +1441,20 @@ void *read_object_with_reference(const unsigned char *sha1, } } -static void write_sha1_file_prepare(const void *buf, unsigned long len, - const char *type, unsigned char *sha1, - char *hdr, int *hdrlen) +static void write_object_file_prepare(const void *buf, unsigned long len, + const char *type, struct object_id *oid, + char *hdr, int *hdrlen) { - git_SHA_CTX c; + git_hash_ctx c; /* Generate the header */ *hdrlen = xsnprintf(hdr, *hdrlen, "%s %lu", type, len)+1; /* Sha1.. */ - git_SHA1_Init(&c); - git_SHA1_Update(&c, hdr, *hdrlen); - git_SHA1_Update(&c, buf, len); - git_SHA1_Final(sha1, &c); + the_hash_algo->init_fn(&c); + the_hash_algo->update_fn(&c, hdr, *hdrlen); + the_hash_algo->update_fn(&c, buf, len); + the_hash_algo->final_fn(oid->hash, &c); } /* @@ -1485,12 +1507,12 @@ static int write_buffer(int fd, const void *buf, size_t len) return 0; } -int hash_sha1_file(const void *buf, unsigned long len, const char *type, - unsigned char *sha1) +int hash_object_file(const void *buf, unsigned long len, const char *type, + struct object_id *oid) { char hdr[32]; int hdrlen = sizeof(hdr); - write_sha1_file_prepare(buf, len, type, sha1, hdr, &hdrlen); + write_object_file_prepare(buf, len, type, oid, hdr, &hdrlen); return 0; } @@ -1548,18 +1570,22 @@ static int create_tmpfile(struct strbuf *tmp, const char *filename) return fd; } -static int write_loose_object(const unsigned char *sha1, char *hdr, int hdrlen, - const void *buf, unsigned long len, time_t mtime) +static int write_loose_object(const struct object_id *oid, char *hdr, + int hdrlen, const void *buf, unsigned long len, + time_t mtime) { int fd, ret; unsigned char compressed[4096]; git_zstream stream; - git_SHA_CTX c; - unsigned char parano_sha1[20]; + git_hash_ctx c; + struct object_id parano_oid; static struct strbuf tmp_file = STRBUF_INIT; - const char *filename = sha1_file_name(sha1); + static struct strbuf filename = STRBUF_INIT; + + strbuf_reset(&filename); + sha1_file_name(&filename, oid->hash); - fd = create_tmpfile(&tmp_file, filename); + fd = create_tmpfile(&tmp_file, filename.buf); if (fd < 0) { if (errno == EACCES) return error("insufficient permission for adding an object to repository database %s", get_object_directory()); @@ -1571,14 +1597,14 @@ static int write_loose_object(const unsigned char *sha1, char *hdr, int hdrlen, git_deflate_init(&stream, zlib_compression_level); stream.next_out = compressed; stream.avail_out = sizeof(compressed); - git_SHA1_Init(&c); + the_hash_algo->init_fn(&c); /* First header.. */ stream.next_in = (unsigned char *)hdr; stream.avail_in = hdrlen; while (git_deflate(&stream, 0) == Z_OK) ; /* nothing */ - git_SHA1_Update(&c, hdr, hdrlen); + the_hash_algo->update_fn(&c, hdr, hdrlen); /* Then the data itself.. */ stream.next_in = (void *)buf; @@ -1586,7 +1612,7 @@ static int write_loose_object(const unsigned char *sha1, char *hdr, int hdrlen, do { unsigned char *in0 = stream.next_in; ret = git_deflate(&stream, Z_FINISH); - git_SHA1_Update(&c, in0, stream.next_in - in0); + the_hash_algo->update_fn(&c, in0, stream.next_in - in0); if (write_buffer(fd, compressed, stream.next_out - compressed) < 0) die("unable to write sha1 file"); stream.next_out = compressed; @@ -1594,13 +1620,16 @@ static int write_loose_object(const unsigned char *sha1, char *hdr, int hdrlen, } while (ret == Z_OK); if (ret != Z_STREAM_END) - die("unable to deflate new object %s (%d)", sha1_to_hex(sha1), ret); + die("unable to deflate new object %s (%d)", oid_to_hex(oid), + ret); ret = git_deflate_end_gently(&stream); if (ret != Z_OK) - die("deflateEnd on object %s failed (%d)", sha1_to_hex(sha1), ret); - git_SHA1_Final(parano_sha1, &c); - if (hashcmp(sha1, parano_sha1) != 0) - die("confused by unstable object source data for %s", sha1_to_hex(sha1)); + die("deflateEnd on object %s failed (%d)", oid_to_hex(oid), + ret); + the_hash_algo->final_fn(parano_oid.hash, &c); + if (oidcmp(oid, ¶no_oid) != 0) + die("confused by unstable object source data for %s", + oid_to_hex(oid)); close_sha1_file(fd); @@ -1612,7 +1641,7 @@ static int write_loose_object(const unsigned char *sha1, char *hdr, int hdrlen, warning_errno("failed utime() on %s", tmp_file.buf); } - return finalize_object_file(tmp_file.buf, filename); + return finalize_object_file(tmp_file.buf, filename.buf); } static int freshen_loose_object(const unsigned char *sha1) @@ -1633,7 +1662,8 @@ static int freshen_packed_object(const unsigned char *sha1) return 1; } -int write_sha1_file(const void *buf, unsigned long len, const char *type, unsigned char *sha1) +int write_object_file(const void *buf, unsigned long len, const char *type, + struct object_id *oid) { char hdr[32]; int hdrlen = sizeof(hdr); @@ -1641,14 +1671,15 @@ int write_sha1_file(const void *buf, unsigned long len, const char *type, unsign /* Normally if we have it in the pack then we do not bother writing * it out into .git/objects/??/?{38} file. */ - write_sha1_file_prepare(buf, len, type, sha1, hdr, &hdrlen); - if (freshen_packed_object(sha1) || freshen_loose_object(sha1)) + write_object_file_prepare(buf, len, type, oid, hdr, &hdrlen); + if (freshen_packed_object(oid->hash) || freshen_loose_object(oid->hash)) return 0; - return write_loose_object(sha1, hdr, hdrlen, buf, len, 0); + return write_loose_object(oid, hdr, hdrlen, buf, len, 0); } -int hash_sha1_file_literally(const void *buf, unsigned long len, const char *type, - struct object_id *oid, unsigned flags) +int hash_object_file_literally(const void *buf, unsigned long len, + const char *type, struct object_id *oid, + unsigned flags) { char *header; int hdrlen, status = 0; @@ -1656,20 +1687,20 @@ int hash_sha1_file_literally(const void *buf, unsigned long len, const char *typ /* type string, SP, %lu of the length plus NUL must fit this */ hdrlen = strlen(type) + 32; header = xmalloc(hdrlen); - write_sha1_file_prepare(buf, len, type, oid->hash, header, &hdrlen); + write_object_file_prepare(buf, len, type, oid, header, &hdrlen); if (!(flags & HASH_WRITE_OBJECT)) goto cleanup; if (freshen_packed_object(oid->hash) || freshen_loose_object(oid->hash)) goto cleanup; - status = write_loose_object(oid->hash, header, hdrlen, buf, len, 0); + status = write_loose_object(oid, header, hdrlen, buf, len, 0); cleanup: free(header); return status; } -int force_object_loose(const unsigned char *sha1, time_t mtime) +int force_object_loose(const struct object_id *oid, time_t mtime) { void *buf; unsigned long len; @@ -1678,13 +1709,13 @@ int force_object_loose(const unsigned char *sha1, time_t mtime) int hdrlen; int ret; - if (has_loose_object(sha1)) + if (has_loose_object(oid->hash)) return 0; - buf = read_object(sha1, &type, &len); + buf = read_object(oid->hash, &type, &len); if (!buf) - return error("cannot read sha1_file for %s", sha1_to_hex(sha1)); + return error("cannot read sha1_file for %s", oid_to_hex(oid)); hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", typename(type), len) + 1; - ret = write_loose_object(sha1, hdr, hdrlen, buf, len, mtime); + ret = write_loose_object(oid, hdr, hdrlen, buf, len, mtime); free(buf); return ret; @@ -1752,7 +1783,7 @@ static int index_mem(struct object_id *oid, void *buf, size_t size, if ((type == OBJ_BLOB) && path) { struct strbuf nbuf = STRBUF_INIT; if (convert_to_git(&the_index, path, buf, size, &nbuf, - get_safe_crlf(flags))) { + get_conv_flags(flags))) { buf = strbuf_detach(&nbuf, &size); re_allocated = 1; } @@ -1767,9 +1798,9 @@ static int index_mem(struct object_id *oid, void *buf, size_t size, } if (write_object) - ret = write_sha1_file(buf, size, typename(type), oid->hash); + ret = write_object_file(buf, size, typename(type), oid); else - ret = hash_sha1_file(buf, size, typename(type), oid->hash); + ret = hash_object_file(buf, size, typename(type), oid); if (re_allocated) free(buf); return ret; @@ -1786,14 +1817,14 @@ static int index_stream_convert_blob(struct object_id *oid, int fd, assert(would_convert_to_git_filter_fd(path)); convert_to_git_filter_fd(&the_index, path, fd, &sbuf, - get_safe_crlf(flags)); + get_conv_flags(flags)); if (write_object) - ret = write_sha1_file(sbuf.buf, sbuf.len, typename(OBJ_BLOB), - oid->hash); + ret = write_object_file(sbuf.buf, sbuf.len, typename(OBJ_BLOB), + oid); else - ret = hash_sha1_file(sbuf.buf, sbuf.len, typename(OBJ_BLOB), - oid->hash); + ret = hash_object_file(sbuf.buf, sbuf.len, typename(OBJ_BLOB), + oid); strbuf_release(&sbuf); return ret; } @@ -1907,8 +1938,8 @@ int index_path(struct object_id *oid, const char *path, struct stat *st, unsigne if (strbuf_readlink(&sb, path, st->st_size)) return error_errno("readlink(\"%s\")", path); if (!(flags & HASH_WRITE_OBJECT)) - hash_sha1_file(sb.buf, sb.len, blob_type, oid->hash); - else if (write_sha1_file(sb.buf, sb.len, blob_type, oid->hash)) + hash_object_file(sb.buf, sb.len, blob_type, oid); + else if (write_object_file(sb.buf, sb.len, blob_type, oid)) rc = error("%s: failed to insert into database", path); strbuf_release(&sb); break; @@ -2093,14 +2124,14 @@ static int check_stream_sha1(git_zstream *stream, const char *path, const unsigned char *expected_sha1) { - git_SHA_CTX c; + git_hash_ctx c; unsigned char real_sha1[GIT_MAX_RAWSZ]; unsigned char buf[4096]; unsigned long total_read; int status = Z_OK; - git_SHA1_Init(&c); - git_SHA1_Update(&c, hdr, stream->total_out); + the_hash_algo->init_fn(&c); + the_hash_algo->update_fn(&c, hdr, stream->total_out); /* * We already read some bytes into hdr, but the ones up to the NUL @@ -2119,7 +2150,7 @@ static int check_stream_sha1(git_zstream *stream, if (size - total_read < stream->avail_out) stream->avail_out = size - total_read; status = git_inflate(stream, Z_FINISH); - git_SHA1_Update(&c, buf, stream->next_out - buf); + the_hash_algo->update_fn(&c, buf, stream->next_out - buf); total_read += stream->next_out - buf; } git_inflate_end(stream); @@ -2134,7 +2165,7 @@ static int check_stream_sha1(git_zstream *stream, return -1; } - git_SHA1_Final(real_sha1, &c); + the_hash_algo->final_fn(real_sha1, &c); if (hashcmp(expected_sha1, real_sha1)) { error("sha1 mismatch for %s (expected %s)", path, sha1_to_hex(expected_sha1)); diff --git a/sha1dc_git.h b/sha1dc_git.h index a8c2729278..41e1c3fd3f 100644 --- a/sha1dc_git.h +++ b/sha1dc_git.h @@ -1,9 +1,9 @@ /* Plumbing with collition-detecting SHA1 code */ -#ifdef DC_SHA1_SUBMODULE -#include "sha1collisiondetection/lib/sha1.h" -#elif defined(DC_SHA1_EXTERNAL) +#ifdef DC_SHA1_EXTERNAL #include <sha1dc/sha1.h> +#elif defined(DC_SHA1_SUBMODULE) +#include "sha1collisiondetection/lib/sha1.h" #else #include "sha1dc/sha1.h" #endif diff --git a/split-index.c b/split-index.c index 83e39ec8d7..284d04d67f 100644 --- a/split-index.c +++ b/split-index.c @@ -238,6 +238,8 @@ void prepare_to_write_split_index(struct index_state *istate) ALLOC_GROW(entries, nr_entries+1, nr_alloc); entries[nr_entries++] = ce; } + if (is_null_oid(&ce->oid)) + istate->drop_cache_tree = 1; } } diff --git a/sub-process.h b/sub-process.h index 49701998c9..71b18ad5af 100644 --- a/sub-process.h +++ b/sub-process.h @@ -73,8 +73,8 @@ static inline struct child_process *subprocess_get_child_process( } /* - * Perform the version and capability negotiation as described in the "Long - * Running Filter Process" section of the gitattributes documentation using the + * Perform the version and capability negotiation as described in the + * "Handshake" section of long-running-process-protocol.txt using the * given requested versions and capabilities. The "versions" and "capabilities" * parameters are arrays terminated by a 0 or blank struct. * diff --git a/submodule-config.c b/submodule-config.c index 2aa8a1747f..602ba8ca8b 100644 --- a/submodule-config.c +++ b/submodule-config.c @@ -9,7 +9,7 @@ /* * submodule cache lookup structure * There is one shared set of 'struct submodule' entries which can be - * looked up by their sha1 blob id of the .gitmodule file and either + * looked up by their sha1 blob id of the .gitmodules file and either * using path or name as key. * for_path stores submodule entries with path as key * for_name stores submodule entries with name as key @@ -91,7 +91,7 @@ static void submodule_cache_clear(struct submodule_cache *cache) /* * We iterate over the name hash here to be symmetric with the * allocation of struct submodule entries. Each is allocated by - * their .gitmodule blob sha1 and submodule name. + * their .gitmodules blob sha1 and submodule name. */ hashmap_iter_init(&cache->for_name, &iter); while ((entry = hashmap_iter_next(&iter))) @@ -655,7 +655,7 @@ library for your script to use. test_expect_code 1 git merge "merge msg" B master ' - - test_must_fail <git-command> + - test_must_fail [<options>] <git-command> Run a git command and ensure it fails in a controlled way. Use this instead of "! <git-command>". When git-command dies due to a @@ -663,11 +663,21 @@ library for your script to use. treats it as just another expected failure, which would let such a bug go unnoticed. - - test_might_fail <git-command> + Accepts the following options: + + ok=<signal-name>[,<...>]: + Don't treat an exit caused by the given signal as error. + Multiple signals can be specified as a comma separated list. + Currently recognized signal names are: sigpipe, success. + (Don't use 'success', use 'test_might_fail' instead.) + + - test_might_fail [<options>] <git-command> Similar to test_must_fail, but tolerate success, too. Use this instead of "<git-command> || :" to catch failures due to segv. + Accepts the same options as test_must_fail. + - test_cmp <expected> <actual> Check whether the content of the <actual> file matches the diff --git a/t/helper/test-dump-untracked-cache.c b/t/helper/test-dump-untracked-cache.c index f752532ffb..d7c55c2355 100644 --- a/t/helper/test-dump-untracked-cache.c +++ b/t/helper/test-dump-untracked-cache.c @@ -54,8 +54,8 @@ int cmd_main(int ac, const char **av) printf("no untracked cache\n"); return 0; } - printf("info/exclude %s\n", sha1_to_hex(uc->ss_info_exclude.sha1)); - printf("core.excludesfile %s\n", sha1_to_hex(uc->ss_excludes_file.sha1)); + printf("info/exclude %s\n", oid_to_hex(&uc->ss_info_exclude.oid)); + printf("core.excludesfile %s\n", oid_to_hex(&uc->ss_excludes_file.oid)); printf("exclude_per_dir %s\n", uc->exclude_per_dir); printf("flags %08x\n", uc->dir_flags); if (uc->root) diff --git a/t/helper/test-hashmap.c b/t/helper/test-hashmap.c index 1145d51671..9ae9281c07 100644 --- a/t/helper/test-hashmap.c +++ b/t/helper/test-hashmap.c @@ -1,5 +1,6 @@ #include "git-compat-util.h" #include "hashmap.h" +#include "strbuf.h" struct test_entry { @@ -29,11 +30,12 @@ static int test_entry_cmp(const void *cmp_data, return strcmp(e1->key, key ? key : e2->key); } -static struct test_entry *alloc_test_entry(int hash, char *key, int klen, - char *value, int vlen) +static struct test_entry *alloc_test_entry(unsigned int hash, + char *key, char *value) { - struct test_entry *entry = malloc(sizeof(struct test_entry) + klen - + vlen + 2); + size_t klen = strlen(key); + size_t vlen = strlen(value); + struct test_entry *entry = xmalloc(st_add4(sizeof(*entry), klen, vlen, 2)); hashmap_entry_init(entry, hash); memcpy(entry->key, key, klen + 1); memcpy(entry->key + klen + 1, value, vlen + 1); @@ -85,11 +87,11 @@ static void perf_hashmap(unsigned int method, unsigned int rounds) unsigned int *hashes; unsigned int i, j; - entries = malloc(TEST_SIZE * sizeof(struct test_entry *)); - hashes = malloc(TEST_SIZE * sizeof(int)); + ALLOC_ARRAY(entries, TEST_SIZE); + ALLOC_ARRAY(hashes, TEST_SIZE); for (i = 0; i < TEST_SIZE; i++) { - snprintf(buf, sizeof(buf), "%i", i); - entries[i] = alloc_test_entry(0, buf, strlen(buf), "", 0); + xsnprintf(buf, sizeof(buf), "%i", i); + entries[i] = alloc_test_entry(0, buf, ""); hashes[i] = hash(method, i, entries[i]->key); } @@ -144,7 +146,7 @@ static void perf_hashmap(unsigned int method, unsigned int rounds) */ int cmd_main(int argc, const char **argv) { - char line[1024]; + struct strbuf line = STRBUF_INIT; struct hashmap map; int icase; @@ -153,44 +155,42 @@ int cmd_main(int argc, const char **argv) hashmap_init(&map, test_entry_cmp, &icase, 0); /* process commands from stdin */ - while (fgets(line, sizeof(line), stdin)) { + while (strbuf_getline(&line, stdin) != EOF) { char *cmd, *p1 = NULL, *p2 = NULL; - int l1 = 0, l2 = 0, hash = 0; + unsigned int hash = 0; struct test_entry *entry; /* break line into command and up to two parameters */ - cmd = strtok(line, DELIM); + cmd = strtok(line.buf, DELIM); /* ignore empty lines */ if (!cmd || *cmd == '#') continue; p1 = strtok(NULL, DELIM); if (p1) { - l1 = strlen(p1); hash = icase ? strihash(p1) : strhash(p1); p2 = strtok(NULL, DELIM); - if (p2) - l2 = strlen(p2); } - if (!strcmp("hash", cmd) && l1) { + if (!strcmp("hash", cmd) && p1) { /* print results of different hash functions */ - printf("%u %u %u %u\n", strhash(p1), memhash(p1, l1), - strihash(p1), memihash(p1, l1)); + printf("%u %u %u %u\n", + strhash(p1), memhash(p1, strlen(p1)), + strihash(p1), memihash(p1, strlen(p1))); - } else if (!strcmp("add", cmd) && l1 && l2) { + } else if (!strcmp("add", cmd) && p1 && p2) { /* create entry with key = p1, value = p2 */ - entry = alloc_test_entry(hash, p1, l1, p2, l2); + entry = alloc_test_entry(hash, p1, p2); /* add to hashmap */ hashmap_add(&map, entry); - } else if (!strcmp("put", cmd) && l1 && l2) { + } else if (!strcmp("put", cmd) && p1 && p2) { /* create entry with key = p1, value = p2 */ - entry = alloc_test_entry(hash, p1, l1, p2, l2); + entry = alloc_test_entry(hash, p1, p2); /* add / replace entry */ entry = hashmap_put(&map, entry); @@ -199,7 +199,7 @@ int cmd_main(int argc, const char **argv) puts(entry ? get_value(entry) : "NULL"); free(entry); - } else if (!strcmp("get", cmd) && l1) { + } else if (!strcmp("get", cmd) && p1) { /* lookup entry in hashmap */ entry = hashmap_get_from_hash(&map, hash, p1); @@ -212,7 +212,7 @@ int cmd_main(int argc, const char **argv) entry = hashmap_get_next(&map, entry); } - } else if (!strcmp("remove", cmd) && l1) { + } else if (!strcmp("remove", cmd) && p1) { /* setup static key */ struct hashmap_entry key; @@ -238,7 +238,7 @@ int cmd_main(int argc, const char **argv) printf("%u %u\n", map.tablesize, hashmap_get_size(&map)); - } else if (!strcmp("intern", cmd) && l1) { + } else if (!strcmp("intern", cmd) && p1) { /* test that strintern works */ const char *i1 = strintern(p1); @@ -252,7 +252,7 @@ int cmd_main(int argc, const char **argv) else printf("%s\n", i1); - } else if (!strcmp("perfhashmap", cmd) && l1 && l2) { + } else if (!strcmp("perfhashmap", cmd) && p1 && p2) { perf_hashmap(atoi(p1), atoi(p2)); @@ -263,6 +263,7 @@ int cmd_main(int argc, const char **argv) } } + strbuf_release(&line); hashmap_free(&map, 1); return 0; } diff --git a/t/helper/test-run-command.c b/t/helper/test-run-command.c index d24d157379..153342e44d 100644 --- a/t/helper/test-run-command.c +++ b/t/helper/test-run-command.c @@ -56,6 +56,15 @@ int cmd_main(int argc, const char **argv) if (argc < 3) return 1; + while (!strcmp(argv[1], "env")) { + if (!argv[2]) + die("env specifier without a value"); + argv_array_push(&proc.env_array, argv[2]); + argv += 2; + argc -= 2; + } + if (argc < 3) + return 1; proc.argv = (const char **)argv + 2; if (!strcmp(argv[1], "start-command-ENOENT")) { diff --git a/t/helper/test-wildmatch.c b/t/helper/test-wildmatch.c index 921d7b3e7e..66d33dfcfd 100644 --- a/t/helper/test-wildmatch.c +++ b/t/helper/test-wildmatch.c @@ -16,6 +16,8 @@ int cmd_main(int argc, const char **argv) return !!wildmatch(argv[3], argv[2], WM_PATHNAME | WM_CASEFOLD); else if (!strcmp(argv[1], "pathmatch")) return !!wildmatch(argv[3], argv[2], 0); + else if (!strcmp(argv[1], "ipathmatch")) + return !!wildmatch(argv[3], argv[2], WM_CASEFOLD); else return 1; } diff --git a/t/lib-git-daemon.sh b/t/lib-git-daemon.sh index 987d40680b..edbea2d986 100644 --- a/t/lib-git-daemon.sh +++ b/t/lib-git-daemon.sh @@ -32,7 +32,8 @@ LIB_GIT_DAEMON_PORT=${LIB_GIT_DAEMON_PORT-${this_test#t}} GIT_DAEMON_PID= GIT_DAEMON_DOCUMENT_ROOT_PATH="$PWD"/repo -GIT_DAEMON_URL=git://127.0.0.1:$LIB_GIT_DAEMON_PORT +GIT_DAEMON_HOST_PORT=127.0.0.1:$LIB_GIT_DAEMON_PORT +GIT_DAEMON_URL=git://$GIT_DAEMON_HOST_PORT start_git_daemon() { if test -n "$GIT_DAEMON_PID" @@ -53,11 +54,19 @@ start_git_daemon() { "$@" "$GIT_DAEMON_DOCUMENT_ROOT_PATH" \ >&3 2>git_daemon_output & GIT_DAEMON_PID=$! + >daemon.log { - read line <&7 - echo >&4 "$line" - cat <&7 >&4 & - } 7<git_daemon_output && + read -r line <&7 + printf "%s\n" "$line" + printf >&4 "%s\n" "$line" + ( + while read -r line <&7 + do + printf "%s\n" "$line" + printf >&4 "%s\n" "$line" + done + ) & + } 7<git_daemon_output >>"$TRASH_DIRECTORY/daemon.log" && # Check expected output if test x"$(expr "$line" : "\[[0-9]*\] \(.*\)")" != x"Ready to rumble" @@ -90,3 +99,25 @@ stop_git_daemon() { GIT_DAEMON_PID= rm -f git_daemon_output } + +# A stripped-down version of a netcat client, that connects to a "host:port" +# given in $1, sends its stdin followed by EOF, then dumps the response (until +# EOF) to stdout. +fake_nc() { + if ! test_declared_prereq FAKENC + then + echo >&4 "fake_nc: need to declare FAKENC prerequisite" + return 127 + fi + perl -Mstrict -MIO::Socket::INET -e ' + my $s = IO::Socket::INET->new(shift) + or die "unable to open socket: $!"; + print $s <STDIN>; + $s->shutdown(1); + print <$s>; + ' "$@" +} + +test_lazy_prereq FAKENC ' + perl -MIO::Socket::INET -e "exit 0" +' diff --git a/t/perf/aggregate.perl b/t/perf/aggregate.perl index 5c439f6bc2..821cf1498b 100755 --- a/t/perf/aggregate.perl +++ b/t/perf/aggregate.perl @@ -1,6 +1,6 @@ #!/usr/bin/perl -use lib '../../perl/blib/lib'; +use lib '../../perl/build/lib'; use strict; use warnings; use JSON; @@ -36,7 +36,8 @@ sub format_times { return $out; } -my (@dirs, %dirnames, %dirabbrevs, %prefixes, @tests, $codespeed); +my (@dirs, %dirnames, %dirabbrevs, %prefixes, @tests, + $codespeed, $subsection, $reponame); while (scalar @ARGV) { my $arg = $ARGV[0]; my $dir; @@ -45,6 +46,24 @@ while (scalar @ARGV) { shift @ARGV; next; } + if ($arg eq "--subsection") { + shift @ARGV; + $subsection = $ARGV[0]; + shift @ARGV; + if (! $subsection) { + die "empty subsection"; + } + next; + } + if ($arg eq "--reponame") { + shift @ARGV; + $reponame = $ARGV[0]; + shift @ARGV; + if (! $reponame) { + die "empty reponame"; + } + next; + } last if -f $arg or $arg eq "--"; if (! -d $arg) { my $rev = Git::command_oneline(qw(rev-parse --verify), $arg); @@ -76,10 +95,15 @@ if (not @tests) { } my $resultsdir = "test-results"; -my $results_section = ""; -if (exists $ENV{GIT_PERF_SUBSECTION} and $ENV{GIT_PERF_SUBSECTION} ne "") { - $resultsdir .= "/" . $ENV{GIT_PERF_SUBSECTION}; - $results_section = $ENV{GIT_PERF_SUBSECTION}; + +if (! $subsection and + exists $ENV{GIT_PERF_SUBSECTION} and + $ENV{GIT_PERF_SUBSECTION} ne "") { + $subsection = $ENV{GIT_PERF_SUBSECTION}; +} + +if ($subsection) { + $resultsdir .= "/" . $subsection; } my @subtests; @@ -183,19 +207,21 @@ sub print_default_results { } sub print_codespeed_results { - my ($results_section) = @_; + my ($subsection) = @_; my $project = "Git"; my $executable = `uname -s -m`; chomp $executable; - if ($results_section ne "") { - $executable .= ", " . $results_section; + if ($subsection) { + $executable .= ", " . $subsection; } my $environment; - if (exists $ENV{GIT_PERF_REPO_NAME} and $ENV{GIT_PERF_REPO_NAME} ne "") { + if ($reponame) { + $environment = $reponame; + } elsif (exists $ENV{GIT_PERF_REPO_NAME} and $ENV{GIT_PERF_REPO_NAME} ne "") { $environment = $ENV{GIT_PERF_REPO_NAME}; } elsif (exists $ENV{GIT_TEST_INSTALLED} and $ENV{GIT_TEST_INSTALLED} ne "") { $environment = $ENV{GIT_TEST_INSTALLED}; @@ -227,13 +253,13 @@ sub print_codespeed_results { } } - print to_json(\@data, {utf8 => 1, pretty => 1}), "\n"; + print to_json(\@data, {utf8 => 1, pretty => 1, canonical => 1}), "\n"; } binmode STDOUT, ":utf8" or die "PANIC on binmode: $!"; if ($codespeed) { - print_codespeed_results($results_section); + print_codespeed_results($subsection); } else { print_default_results(); } diff --git a/t/t0002-gitfile.sh b/t/t0002-gitfile.sh index 9670e8cbe6..3691023d51 100755 --- a/t/t0002-gitfile.sh +++ b/t/t0002-gitfile.sh @@ -10,15 +10,6 @@ objpath() { echo "$1" | sed -e 's|\(..\)|\1/|' } -objck() { - p=$(objpath "$1") - if test ! -f "$REAL/objects/$p" - then - echo "Object not found: $REAL/objects/$p" - false - fi -} - test_expect_success 'initial setup' ' REAL="$(pwd)/.real" && mv .git "$REAL" @@ -26,30 +17,14 @@ test_expect_success 'initial setup' ' test_expect_success 'bad setup: invalid .git file format' ' echo "gitdir $REAL" >.git && - if git rev-parse 2>.err - then - echo "git rev-parse accepted an invalid .git file" - false - fi && - if ! grep "Invalid gitfile format" .err - then - echo "git rev-parse returned wrong error" - false - fi + test_must_fail git rev-parse 2>.err && + test_i18ngrep "invalid gitfile format" .err ' test_expect_success 'bad setup: invalid .git file path' ' echo "gitdir: $REAL.not" >.git && - if git rev-parse 2>.err - then - echo "git rev-parse accepted an invalid .git file path" - false - fi && - if ! grep "Not a git repository" .err - then - echo "git rev-parse returned wrong error" - false - fi + test_must_fail git rev-parse 2>.err && + test_i18ngrep "not a git repository" .err ' test_expect_success 'final setup + check rev-parse --git-dir' ' @@ -60,7 +35,7 @@ test_expect_success 'final setup + check rev-parse --git-dir' ' test_expect_success 'check hash-object' ' echo "foo" >bar && SHA=$(cat bar | git hash-object -w --stdin) && - objck $SHA + test_path_is_file "$REAL/objects/$(objpath $SHA)" ' test_expect_success 'check cat-file' ' @@ -69,29 +44,21 @@ test_expect_success 'check cat-file' ' ' test_expect_success 'check update-index' ' - if test -f "$REAL/index" - then - echo "Hmm, $REAL/index exists?" - false - fi && + test_path_is_missing "$REAL/index" && rm -f "$REAL/objects/$(objpath $SHA)" && git update-index --add bar && - if ! test -f "$REAL/index" - then - echo "$REAL/index not found" - false - fi && - objck $SHA + test_path_is_file "$REAL/index" && + test_path_is_file "$REAL/objects/$(objpath $SHA)" ' test_expect_success 'check write-tree' ' SHA=$(git write-tree) && - objck $SHA + test_path_is_file "$REAL/objects/$(objpath $SHA)" ' test_expect_success 'check commit-tree' ' SHA=$(echo "commit bar" | git commit-tree $SHA) && - objck $SHA + test_path_is_file "$REAL/objects/$(objpath $SHA)" ' test_expect_success 'check rev-list' ' diff --git a/t/t0008-ignores.sh b/t/t0008-ignores.sh index d27f438bf4..c03f155a35 100755 --- a/t/t0008-ignores.sh +++ b/t/t0008-ignores.sh @@ -307,7 +307,7 @@ test_expect_success_multi 'needs work tree' '' ' cd .git && test_check_ignore "foo" 128 ) && - stderr_contains "fatal: This operation must be run in a work tree" + stderr_contains "fatal: this operation must be run in a work tree" ' ############################################################################ @@ -775,6 +775,26 @@ test_expect_success PIPE 'streaming support for --stdin' ' echo "$response" | grep "^:: two" ' +test_expect_success 'existing file and directory' ' + test_when_finished "rm one" && + test_when_finished "rmdir top-level-dir" && + >one && + mkdir top-level-dir && + git check-ignore one top-level-dir >actual && + grep one actual && + grep top-level-dir actual +' + +test_expect_success 'existing directory and file' ' + test_when_finished "rm one" && + test_when_finished "rmdir top-level-dir" && + >one && + mkdir top-level-dir && + git check-ignore top-level-dir one >actual && + grep one actual && + grep top-level-dir actual +' + ############################################################################ # # test whitespace handling diff --git a/t/t0050-filesystem.sh b/t/t0050-filesystem.sh index b29d749bb7..192c94eccd 100755 --- a/t/t0050-filesystem.sh +++ b/t/t0050-filesystem.sh @@ -80,7 +80,21 @@ test_expect_success 'merge (case change)' ' git merge topic ' - +test_expect_success CASE_INSENSITIVE_FS 'add directory (with different case)' ' + git reset --hard initial && + mkdir -p dir1/dir2 && + echo >dir1/dir2/a && + echo >dir1/dir2/b && + git add dir1/dir2/a && + git add dir1/DIR2/b && + git ls-files >actual && + cat >expected <<-\EOF && + camelcase + dir1/dir2/a + dir1/dir2/b + EOF + test_cmp expected actual +' test_expect_failure CASE_INSENSITIVE_FS 'add (with different case)' ' git reset --hard initial && diff --git a/t/t0061-run-command.sh b/t/t0061-run-command.sh index e4739170aa..24c92b6cd7 100755 --- a/t/t0061-run-command.sh +++ b/t/t0061-run-command.sh @@ -141,4 +141,41 @@ test_expect_success 'run_command outputs ' ' test_cmp expect actual ' +test_trace () { + expect="$1" + shift + GIT_TRACE=1 test-run-command "$@" run-command true 2>&1 >/dev/null | \ + sed 's/.* run_command: //' >actual && + echo "$expect true" >expect && + test_cmp expect actual +} + +test_expect_success 'GIT_TRACE with environment variables' ' + test_trace "abc=1 def=2" env abc=1 env def=2 && + test_trace "abc=2" env abc env abc=1 env abc=2 && + test_trace "abc=2" env abc env abc=2 && + ( + abc=1 && export abc && + test_trace "def=1" env abc=1 env def=1 + ) && + ( + abc=1 && export abc && + test_trace "def=1" env abc env abc=1 env def=1 + ) && + test_trace "def=1" env non-exist env def=1 && + test_trace "abc=2" env abc=1 env abc env abc=2 && + ( + abc=1 def=2 && export abc def && + test_trace "unset abc def;" env abc env def + ) && + ( + abc=1 def=2 && export abc def && + test_trace "unset def; abc=3" env abc env def env abc=3 + ) && + ( + abc=1 && export abc && + test_trace "unset abc;" env abc=2 env abc + ) +' + test_done diff --git a/t/t0205-gettext-poison.sh b/t/t0205-gettext-poison.sh index 2361590d54..438e778d6a 100755 --- a/t/t0205-gettext-poison.sh +++ b/t/t0205-gettext-poison.sh @@ -7,10 +7,6 @@ test_description='Gettext Shell poison' . ./lib-gettext.sh -test_expect_success GETTEXT_POISON "sanity: \$GIT_INTERNAL_GETTEXT_SH_SCHEME is set (to $GIT_INTERNAL_GETTEXT_SH_SCHEME)" ' - test -n "$GIT_INTERNAL_GETTEXT_SH_SCHEME" -' - test_expect_success GETTEXT_POISON 'sanity: $GIT_INTERNAL_GETTEXT_SH_SCHEME" is poison' ' test "$GIT_INTERNAL_GETTEXT_SH_SCHEME" = "poison" ' diff --git a/t/t0410-partial-clone.sh b/t/t0410-partial-clone.sh new file mode 100755 index 0000000000..cc18b75c03 --- /dev/null +++ b/t/t0410-partial-clone.sh @@ -0,0 +1,343 @@ +#!/bin/sh + +test_description='partial clone' + +. ./test-lib.sh + +delete_object () { + rm $1/.git/objects/$(echo $2 | sed -e 's|^..|&/|') +} + +pack_as_from_promisor () { + HASH=$(git -C repo pack-objects .git/objects/pack/pack) && + >repo/.git/objects/pack/pack-$HASH.promisor && + echo $HASH +} + +promise_and_delete () { + HASH=$(git -C repo rev-parse "$1") && + git -C repo tag -a -m message my_annotated_tag "$HASH" && + git -C repo rev-parse my_annotated_tag | pack_as_from_promisor && + # tag -d prints a message to stdout, so redirect it + git -C repo tag -d my_annotated_tag >/dev/null && + delete_object repo "$HASH" +} + +test_expect_success 'missing reflog object, but promised by a commit, passes fsck' ' + test_create_repo repo && + test_commit -C repo my_commit && + + A=$(git -C repo commit-tree -m a HEAD^{tree}) && + C=$(git -C repo commit-tree -m c -p $A HEAD^{tree}) && + + # Reference $A only from reflog, and delete it + git -C repo branch my_branch "$A" && + git -C repo branch -f my_branch my_commit && + delete_object repo "$A" && + + # State that we got $C, which refers to $A, from promisor + printf "$C\n" | pack_as_from_promisor && + + # Normally, it fails + test_must_fail git -C repo fsck && + + # But with the extension, it succeeds + git -C repo config core.repositoryformatversion 1 && + git -C repo config extensions.partialclone "arbitrary string" && + git -C repo fsck +' + +test_expect_success 'missing reflog object, but promised by a tag, passes fsck' ' + rm -rf repo && + test_create_repo repo && + test_commit -C repo my_commit && + + A=$(git -C repo commit-tree -m a HEAD^{tree}) && + git -C repo tag -a -m d my_tag_name $A && + T=$(git -C repo rev-parse my_tag_name) && + git -C repo tag -d my_tag_name && + + # Reference $A only from reflog, and delete it + git -C repo branch my_branch "$A" && + git -C repo branch -f my_branch my_commit && + delete_object repo "$A" && + + # State that we got $T, which refers to $A, from promisor + printf "$T\n" | pack_as_from_promisor && + + git -C repo config core.repositoryformatversion 1 && + git -C repo config extensions.partialclone "arbitrary string" && + git -C repo fsck +' + +test_expect_success 'missing reflog object alone fails fsck, even with extension set' ' + rm -rf repo && + test_create_repo repo && + test_commit -C repo my_commit && + + A=$(git -C repo commit-tree -m a HEAD^{tree}) && + B=$(git -C repo commit-tree -m b HEAD^{tree}) && + + # Reference $A only from reflog, and delete it + git -C repo branch my_branch "$A" && + git -C repo branch -f my_branch my_commit && + delete_object repo "$A" && + + git -C repo config core.repositoryformatversion 1 && + git -C repo config extensions.partialclone "arbitrary string" && + test_must_fail git -C repo fsck +' + +test_expect_success 'missing ref object, but promised, passes fsck' ' + rm -rf repo && + test_create_repo repo && + test_commit -C repo my_commit && + + A=$(git -C repo commit-tree -m a HEAD^{tree}) && + + # Reference $A only from ref + git -C repo branch my_branch "$A" && + promise_and_delete "$A" && + + git -C repo config core.repositoryformatversion 1 && + git -C repo config extensions.partialclone "arbitrary string" && + git -C repo fsck +' + +test_expect_success 'missing object, but promised, passes fsck' ' + rm -rf repo && + test_create_repo repo && + test_commit -C repo 1 && + test_commit -C repo 2 && + test_commit -C repo 3 && + git -C repo tag -a annotated_tag -m "annotated tag" && + + C=$(git -C repo rev-parse 1) && + T=$(git -C repo rev-parse 2^{tree}) && + B=$(git hash-object repo/3.t) && + AT=$(git -C repo rev-parse annotated_tag) && + + promise_and_delete "$C" && + promise_and_delete "$T" && + promise_and_delete "$B" && + promise_and_delete "$AT" && + + git -C repo config core.repositoryformatversion 1 && + git -C repo config extensions.partialclone "arbitrary string" && + git -C repo fsck +' + +test_expect_success 'missing CLI object, but promised, passes fsck' ' + rm -rf repo && + test_create_repo repo && + test_commit -C repo my_commit && + + A=$(git -C repo commit-tree -m a HEAD^{tree}) && + promise_and_delete "$A" && + + git -C repo config core.repositoryformatversion 1 && + git -C repo config extensions.partialclone "arbitrary string" && + git -C repo fsck "$A" +' + +test_expect_success 'fetching of missing objects' ' + rm -rf repo && + test_create_repo server && + test_commit -C server foo && + git -C server repack -a -d --write-bitmap-index && + + git clone "file://$(pwd)/server" repo && + HASH=$(git -C repo rev-parse foo) && + rm -rf repo/.git/objects/* && + + git -C repo config core.repositoryformatversion 1 && + git -C repo config extensions.partialclone "origin" && + git -C repo cat-file -p "$HASH" && + + # Ensure that the .promisor file is written, and check that its + # associated packfile contains the object + ls repo/.git/objects/pack/pack-*.promisor >promisorlist && + test_line_count = 1 promisorlist && + IDX=$(cat promisorlist | sed "s/promisor$/idx/") && + git verify-pack --verbose "$IDX" | grep "$HASH" +' + +test_expect_success 'rev-list stops traversal at missing and promised commit' ' + rm -rf repo && + test_create_repo repo && + test_commit -C repo foo && + test_commit -C repo bar && + + FOO=$(git -C repo rev-parse foo) && + promise_and_delete "$FOO" && + + git -C repo config core.repositoryformatversion 1 && + git -C repo config extensions.partialclone "arbitrary string" && + git -C repo rev-list --exclude-promisor-objects --objects bar >out && + grep $(git -C repo rev-parse bar) out && + ! grep $FOO out +' + +test_expect_success 'rev-list stops traversal at missing and promised tree' ' + rm -rf repo && + test_create_repo repo && + test_commit -C repo foo && + mkdir repo/a_dir && + echo something >repo/a_dir/something && + git -C repo add a_dir/something && + git -C repo commit -m bar && + + # foo^{tree} (tree referenced from commit) + TREE=$(git -C repo rev-parse foo^{tree}) && + + # a tree referenced by HEAD^{tree} (tree referenced from tree) + TREE2=$(git -C repo ls-tree HEAD^{tree} | grep " tree " | head -1 | cut -b13-52) && + + promise_and_delete "$TREE" && + promise_and_delete "$TREE2" && + + git -C repo config core.repositoryformatversion 1 && + git -C repo config extensions.partialclone "arbitrary string" && + git -C repo rev-list --exclude-promisor-objects --objects HEAD >out && + grep $(git -C repo rev-parse foo) out && + ! grep $TREE out && + grep $(git -C repo rev-parse HEAD) out && + ! grep $TREE2 out +' + +test_expect_success 'rev-list stops traversal at missing and promised blob' ' + rm -rf repo && + test_create_repo repo && + echo something >repo/something && + git -C repo add something && + git -C repo commit -m foo && + + BLOB=$(git -C repo hash-object -w something) && + promise_and_delete "$BLOB" && + + git -C repo config core.repositoryformatversion 1 && + git -C repo config extensions.partialclone "arbitrary string" && + git -C repo rev-list --exclude-promisor-objects --objects HEAD >out && + grep $(git -C repo rev-parse HEAD) out && + ! grep $BLOB out +' + +test_expect_success 'rev-list stops traversal at promisor commit, tree, and blob' ' + rm -rf repo && + test_create_repo repo && + test_commit -C repo foo && + test_commit -C repo bar && + test_commit -C repo baz && + + COMMIT=$(git -C repo rev-parse foo) && + TREE=$(git -C repo rev-parse bar^{tree}) && + BLOB=$(git hash-object repo/baz.t) && + printf "%s\n%s\n%s\n" $COMMIT $TREE $BLOB | pack_as_from_promisor && + + git -C repo config core.repositoryformatversion 1 && + git -C repo config extensions.partialclone "arbitrary string" && + git -C repo rev-list --exclude-promisor-objects --objects HEAD >out && + ! grep $COMMIT out && + ! grep $TREE out && + ! grep $BLOB out && + grep $(git -C repo rev-parse bar) out # sanity check that some walking was done +' + +test_expect_success 'rev-list accepts missing and promised objects on command line' ' + rm -rf repo && + test_create_repo repo && + test_commit -C repo foo && + test_commit -C repo bar && + test_commit -C repo baz && + + COMMIT=$(git -C repo rev-parse foo) && + TREE=$(git -C repo rev-parse bar^{tree}) && + BLOB=$(git hash-object repo/baz.t) && + + promise_and_delete $COMMIT && + promise_and_delete $TREE && + promise_and_delete $BLOB && + + git -C repo config core.repositoryformatversion 1 && + git -C repo config extensions.partialclone "arbitrary string" && + git -C repo rev-list --exclude-promisor-objects --objects "$COMMIT" "$TREE" "$BLOB" +' + +test_expect_success 'gc does not repack promisor objects' ' + rm -rf repo && + test_create_repo repo && + test_commit -C repo my_commit && + + TREE_HASH=$(git -C repo rev-parse HEAD^{tree}) && + HASH=$(printf "$TREE_HASH\n" | pack_as_from_promisor) && + + git -C repo config core.repositoryformatversion 1 && + git -C repo config extensions.partialclone "arbitrary string" && + git -C repo gc && + + # Ensure that the promisor packfile still exists, and remove it + test -e repo/.git/objects/pack/pack-$HASH.pack && + rm repo/.git/objects/pack/pack-$HASH.* && + + # Ensure that the single other pack contains the commit, but not the tree + ls repo/.git/objects/pack/pack-*.pack >packlist && + test_line_count = 1 packlist && + git verify-pack repo/.git/objects/pack/pack-*.pack -v >out && + grep "$(git -C repo rev-parse HEAD)" out && + ! grep "$TREE_HASH" out +' + +test_expect_success 'gc stops traversal when a missing but promised object is reached' ' + rm -rf repo && + test_create_repo repo && + test_commit -C repo my_commit && + + TREE_HASH=$(git -C repo rev-parse HEAD^{tree}) && + HASH=$(promise_and_delete $TREE_HASH) && + + git -C repo config core.repositoryformatversion 1 && + git -C repo config extensions.partialclone "arbitrary string" && + git -C repo gc && + + # Ensure that the promisor packfile still exists, and remove it + test -e repo/.git/objects/pack/pack-$HASH.pack && + rm repo/.git/objects/pack/pack-$HASH.* && + + # Ensure that the single other pack contains the commit, but not the tree + ls repo/.git/objects/pack/pack-*.pack >packlist && + test_line_count = 1 packlist && + git verify-pack repo/.git/objects/pack/pack-*.pack -v >out && + grep "$(git -C repo rev-parse HEAD)" out && + ! grep "$TREE_HASH" out +' + +LIB_HTTPD_PORT=12345 # default port, 410, cannot be used as non-root +. "$TEST_DIRECTORY"/lib-httpd.sh +start_httpd + +test_expect_success 'fetching of missing objects from an HTTP server' ' + rm -rf repo && + SERVER="$HTTPD_DOCUMENT_ROOT_PATH/server" && + test_create_repo "$SERVER" && + test_commit -C "$SERVER" foo && + git -C "$SERVER" repack -a -d --write-bitmap-index && + + git clone $HTTPD_URL/smart/server repo && + HASH=$(git -C repo rev-parse foo) && + rm -rf repo/.git/objects/* && + + git -C repo config core.repositoryformatversion 1 && + git -C repo config extensions.partialclone "origin" && + git -C repo cat-file -p "$HASH" && + + # Ensure that the .promisor file is written, and check that its + # associated packfile contains the object + ls repo/.git/objects/pack/pack-*.promisor >promisorlist && + test_line_count = 1 promisorlist && + IDX=$(cat promisorlist | sed "s/promisor$/idx/") && + git verify-pack --verbose "$IDX" | grep "$HASH" +' + +stop_httpd + +test_done diff --git a/t/t1300-repo-config.sh b/t/t1300-repo-config.sh index cbeb9bebee..4f8e6f5fde 100755 --- a/t/t1300-repo-config.sh +++ b/t/t1300-repo-config.sh @@ -1206,6 +1206,29 @@ test_expect_success 'git -c is not confused by empty environment' ' GIT_CONFIG_PARAMETERS="" git -c x.one=1 config --list ' +sq="'" +test_expect_success 'detect bogus GIT_CONFIG_PARAMETERS' ' + cat >expect <<-\EOF && + env.one one + env.two two + EOF + GIT_CONFIG_PARAMETERS="${sq}env.one=one${sq} ${sq}env.two=two${sq}" \ + git config --get-regexp "env.*" >actual && + test_cmp expect actual && + + cat >expect <<-EOF && + env.one one${sq} + env.two two + EOF + GIT_CONFIG_PARAMETERS="${sq}env.one=one${sq}\\$sq$sq$sq ${sq}env.two=two${sq}" \ + git config --get-regexp "env.*" >actual && + test_cmp expect actual && + + test_must_fail env \ + GIT_CONFIG_PARAMETERS="${sq}env.one=one${sq}\\$sq ${sq}env.two=two${sq}" \ + git config --get-regexp "env.*" +' + test_expect_success 'git config --edit works' ' git config -f tmp test.value no && echo test.value=yes >expect && diff --git a/t/t1506-rev-parse-diagnosis.sh b/t/t1506-rev-parse-diagnosis.sh index 79a0251efa..4ee009da66 100755 --- a/t/t1506-rev-parse-diagnosis.sh +++ b/t/t1506-rev-parse-diagnosis.sh @@ -157,7 +157,7 @@ test_expect_success 'relative path not found' ' test_expect_success 'relative path outside worktree' ' test_must_fail git rev-parse HEAD:../file.txt >output 2>error && test -z "$(cat output)" && - grep "outside repository" error + test_i18ngrep "outside repository" error ' test_expect_success 'relative path when cwd is outside worktree' ' diff --git a/t/t1700-split-index.sh b/t/t1700-split-index.sh index af9b847761..a66936fe9b 100755 --- a/t/t1700-split-index.sh +++ b/t/t1700-split-index.sh @@ -401,4 +401,42 @@ done <<\EOF 0642 -rw-r---w- EOF +test_expect_success POSIXPERM,SANITY 'graceful handling when splitting index is not allowed' ' + test_create_repo ro && + ( + cd ro && + test_commit initial && + git update-index --split-index && + test -f .git/sharedindex.* + ) && + cp ro/.git/index new-index && + test_when_finished "chmod u+w ro/.git" && + chmod u-w ro/.git && + GIT_INDEX_FILE="$(pwd)/new-index" git -C ro update-index --split-index && + chmod u+w ro/.git && + rm ro/.git/sharedindex.* && + GIT_INDEX_FILE=new-index git ls-files >actual && + echo initial.t >expected && + test_cmp expected actual +' + +test_expect_success 'writing split index with null sha1 does not write cache tree' ' + git config core.splitIndex true && + git config splitIndex.maxPercentChange 0 && + git commit -m "commit" && + { + git ls-tree HEAD && + printf "160000 commit $_z40\\tbroken\\n" + } >broken-tree && + echo "add broken entry" >msg && + + tree=$(git mktree <broken-tree) && + test_tick && + commit=$(git commit-tree $tree -p HEAD <msg) && + git update-ref HEAD "$commit" && + GIT_ALLOW_NULL_SHA1=1 git reset --hard && + (test-dump-cache-tree >cache-tree.out || true) && + test_line_count = 0 cache-tree.out +' + test_done diff --git a/t/t2025-worktree-add.sh b/t/t2025-worktree-add.sh index 2b95944973..d0d2e4f7ec 100755 --- a/t/t2025-worktree-add.sh +++ b/t/t2025-worktree-add.sh @@ -451,32 +451,68 @@ test_expect_success 'git worktree --no-guess-remote option overrides config' ' ' post_checkout_hook () { - test_when_finished "rm -f .git/hooks/post-checkout" && - mkdir -p .git/hooks && - write_script .git/hooks/post-checkout <<-\EOF - echo $* >hook.actual + gitdir=${1:-.git} + test_when_finished "rm -f $gitdir/hooks/post-checkout" && + mkdir -p $gitdir/hooks && + write_script $gitdir/hooks/post-checkout <<-\EOF + { + echo $* + git rev-parse --git-dir --show-toplevel + } >hook.actual EOF } test_expect_success '"add" invokes post-checkout hook (branch)' ' post_checkout_hook && - printf "%s %s 1\n" $_z40 $(git rev-parse HEAD) >hook.expect && + { + echo $_z40 $(git rev-parse HEAD) 1 && + echo $(pwd)/.git/worktrees/gumby && + echo $(pwd)/gumby + } >hook.expect && git worktree add gumby && - test_cmp hook.expect hook.actual + test_cmp hook.expect gumby/hook.actual ' test_expect_success '"add" invokes post-checkout hook (detached)' ' post_checkout_hook && - printf "%s %s 1\n" $_z40 $(git rev-parse HEAD) >hook.expect && + { + echo $_z40 $(git rev-parse HEAD) 1 && + echo $(pwd)/.git/worktrees/grumpy && + echo $(pwd)/grumpy + } >hook.expect && git worktree add --detach grumpy && - test_cmp hook.expect hook.actual + test_cmp hook.expect grumpy/hook.actual ' test_expect_success '"add --no-checkout" suppresses post-checkout hook' ' post_checkout_hook && rm -f hook.actual && git worktree add --no-checkout gloopy && - test_path_is_missing hook.actual + test_path_is_missing gloopy/hook.actual +' + +test_expect_success '"add" in other worktree invokes post-checkout hook' ' + post_checkout_hook && + { + echo $_z40 $(git rev-parse HEAD) 1 && + echo $(pwd)/.git/worktrees/guppy && + echo $(pwd)/guppy + } >hook.expect && + git -C gloopy worktree add --detach ../guppy && + test_cmp hook.expect guppy/hook.actual +' + +test_expect_success '"add" in bare repo invokes post-checkout hook' ' + rm -rf bare && + git clone --bare . bare && + { + echo $_z40 $(git --git-dir=bare rev-parse HEAD) 1 && + echo $(pwd)/bare/worktrees/goozy && + echo $(pwd)/goozy + } >hook.expect && + post_checkout_hook bare && + git -C bare worktree add --detach ../goozy && + test_cmp hook.expect goozy/hook.actual ' test_done diff --git a/t/t3070-wildmatch.sh b/t/t3070-wildmatch.sh index 163a14a1c2..c1fc6ca730 100755 --- a/t/t3070-wildmatch.sh +++ b/t/t3070-wildmatch.sh @@ -4,266 +4,431 @@ test_description='wildmatch tests' . ./test-lib.sh -match() { - if [ $1 = 1 ]; then - test_expect_success "wildmatch: match '$3' '$4'" " - test-wildmatch wildmatch '$3' '$4' - " - else - test_expect_success "wildmatch: no match '$3' '$4'" " - ! test-wildmatch wildmatch '$3' '$4' - " - fi +should_create_test_file() { + file=$1 + + case $file in + # `touch .` will succeed but obviously not do what we intend + # here. + ".") + return 1 + ;; + # We cannot create a file with an empty filename. + "") + return 1 + ;; + # The tests that are testing that e.g. foo//bar is matched by + # foo/*/bar can't be tested on filesystems since there's no + # way we're getting a double slash. + *//*) + return 1 + ;; + # When testing the difference between foo/bar and foo/bar/ we + # can't test the latter. + */) + return 1 + ;; + # On Windows, \ in paths is silently converted to /, which + # would result in the "touch" below working, but the test + # itself failing. See 6fd1106aa4 ("t3700: Skip a test with + # backslashes in pathspec", 2009-03-13) for prior art and + # details. + *\\*) + if ! test_have_prereq BSLASHPSPEC + then + return 1 + fi + # NOTE: The ;;& bash extension is not portable, so + # this test needs to be at the end of the pattern + # list. + # + # If we want to add more conditional returns we either + # need a new case statement, or turn this whole thing + # into a series of "if" tests. + ;; + esac + + + # On Windows proper (i.e. not Cygwin) many file names which + # under Cygwin would be emulated don't work. + if test_have_prereq MINGW + then + case $file in + " ") + # Files called " " are forbidden on Windows + return 1 + ;; + *\<*|*\>*|*:*|*\"*|*\|*|*\?*|*\**) + # Files with various special characters aren't + # allowed on Windows. Sourced from + # https://stackoverflow.com/a/31976060 + return 1 + ;; + esac + fi + + return 0 } -imatch() { - if [ $1 = 1 ]; then - test_expect_success "iwildmatch: match '$2' '$3'" " - test-wildmatch iwildmatch '$2' '$3' - " - else - test_expect_success "iwildmatch: no match '$2' '$3'" " - ! test-wildmatch iwildmatch '$2' '$3' - " - fi +match_with_function() { + text=$1 + pattern=$2 + match_expect=$3 + match_function=$4 + + if test "$match_expect" = 1 + then + test_expect_success "$match_function: match '$text' '$pattern'" " + test-wildmatch $match_function '$text' '$pattern' + " + elif test "$match_expect" = 0 + then + test_expect_success "$match_function: no match '$text' '$pattern'" " + test_must_fail test-wildmatch $match_function '$text' '$pattern' + " + else + test_expect_success "PANIC: Test framework error. Unknown matches value $match_expect" 'false' + fi + +} + +match_with_ls_files() { + text=$1 + pattern=$2 + match_expect=$3 + match_function=$4 + ls_files_args=$5 + + match_stdout_stderr_cmp=" + tr -d '\0' <actual.raw >actual && + >expect.err && + test_cmp expect.err actual.err && + test_cmp expect actual" + + if test "$match_expect" = 'E' + then + if test -e .git/created_test_file + then + test_expect_success EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): match dies on '$pattern' '$text'" " + printf '%s' '$text' >expect && + test_must_fail git$ls_files_args ls-files -z -- '$pattern' + " + else + test_expect_failure EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): match skip '$pattern' '$text'" 'false' + fi + elif test "$match_expect" = 1 + then + if test -e .git/created_test_file + then + test_expect_success EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): match '$pattern' '$text'" " + printf '%s' '$text' >expect && + git$ls_files_args ls-files -z -- '$pattern' >actual.raw 2>actual.err && + $match_stdout_stderr_cmp + " + else + test_expect_failure EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): match skip '$pattern' '$text'" 'false' + fi + elif test "$match_expect" = 0 + then + if test -e .git/created_test_file + then + test_expect_success EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): no match '$pattern' '$text'" " + >expect && + git$ls_files_args ls-files -z -- '$pattern' >actual.raw 2>actual.err && + $match_stdout_stderr_cmp + " + else + test_expect_failure EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): no match skip '$pattern' '$text'" 'false' + fi + else + test_expect_success "PANIC: Test framework error. Unknown matches value $match_expect" 'false' + fi } -pathmatch() { - if [ $1 = 1 ]; then - test_expect_success "pathmatch: match '$2' '$3'" " - test-wildmatch pathmatch '$2' '$3' - " - else - test_expect_success "pathmatch: no match '$2' '$3'" " - ! test-wildmatch pathmatch '$2' '$3' - " - fi +match() { + if test "$#" = 6 + then + # When test-wildmatch and git ls-files produce the same + # result. + match_glob=$1 + match_file_glob=$match_glob + match_iglob=$2 + match_file_iglob=$match_iglob + match_pathmatch=$3 + match_file_pathmatch=$match_pathmatch + match_pathmatchi=$4 + match_file_pathmatchi=$match_pathmatchi + text=$5 + pattern=$6 + elif test "$#" = 10 + then + match_glob=$1 + match_iglob=$2 + match_pathmatch=$3 + match_pathmatchi=$4 + match_file_glob=$5 + match_file_iglob=$6 + match_file_pathmatch=$7 + match_file_pathmatchi=$8 + text=$9 + pattern=${10} + fi + + test_expect_success EXPENSIVE_ON_WINDOWS 'cleanup after previous file test' ' + if test -e .git/created_test_file + then + git reset && + git clean -df + fi + ' + + printf '%s' "$text" >.git/expected_test_file + + test_expect_success EXPENSIVE_ON_WINDOWS "setup match file test for $text" ' + file=$(cat .git/expected_test_file) && + if should_create_test_file "$file" + then + dirs=${file%/*} + if test "$file" != "$dirs" + then + mkdir -p -- "$dirs" && + touch -- "./$text" + else + touch -- "./$file" + fi && + git add -A && + printf "%s" "$file" >.git/created_test_file + elif test -e .git/created_test_file + then + rm .git/created_test_file + fi + ' + + # $1: Case sensitive glob match: test-wildmatch & ls-files + match_with_function "$text" "$pattern" $match_glob "wildmatch" + match_with_ls_files "$text" "$pattern" $match_file_glob "wildmatch" " --glob-pathspecs" + + # $2: Case insensitive glob match: test-wildmatch & ls-files + match_with_function "$text" "$pattern" $match_iglob "iwildmatch" + match_with_ls_files "$text" "$pattern" $match_file_iglob "iwildmatch" " --glob-pathspecs --icase-pathspecs" + + # $3: Case sensitive path match: test-wildmatch & ls-files + match_with_function "$text" "$pattern" $match_pathmatch "pathmatch" + match_with_ls_files "$text" "$pattern" $match_file_pathmatch "pathmatch" "" + + # $4: Case insensitive path match: test-wildmatch & ls-files + match_with_function "$text" "$pattern" $match_pathmatchi "ipathmatch" + match_with_ls_files "$text" "$pattern" $match_file_pathmatchi "ipathmatch" " --icase-pathspecs" } -# Basic wildmat features -match 1 1 foo foo -match 0 0 foo bar -match 1 1 '' "" -match 1 1 foo '???' -match 0 0 foo '??' -match 1 1 foo '*' -match 1 1 foo 'f*' -match 0 0 foo '*f' -match 1 1 foo '*foo*' -match 1 1 foobar '*ob*a*r*' -match 1 1 aaaaaaabababab '*ab' -match 1 1 'foo*' 'foo\*' -match 0 0 foobar 'foo\*bar' -match 1 1 'f\oo' 'f\\oo' -match 1 1 ball '*[al]?' -match 0 0 ten '[ten]' -match 0 1 ten '**[!te]' -match 0 0 ten '**[!ten]' -match 1 1 ten 't[a-g]n' -match 0 0 ten 't[!a-g]n' -match 1 1 ton 't[!a-g]n' -match 1 1 ton 't[^a-g]n' -match 1 x 'a]b' 'a[]]b' -match 1 x a-b 'a[]-]b' -match 1 x 'a]b' 'a[]-]b' -match 0 x aab 'a[]-]b' -match 1 x aab 'a[]a-]b' -match 1 1 ']' ']' +# Basic wildmatch features +match 1 1 1 1 foo foo +match 0 0 0 0 foo bar +match 1 1 1 1 '' "" +match 1 1 1 1 foo '???' +match 0 0 0 0 foo '??' +match 1 1 1 1 foo '*' +match 1 1 1 1 foo 'f*' +match 0 0 0 0 foo '*f' +match 1 1 1 1 foo '*foo*' +match 1 1 1 1 foobar '*ob*a*r*' +match 1 1 1 1 aaaaaaabababab '*ab' +match 1 1 1 1 'foo*' 'foo\*' +match 0 0 0 0 foobar 'foo\*bar' +match 1 1 1 1 'f\oo' 'f\\oo' +match 1 1 1 1 ball '*[al]?' +match 0 0 0 0 ten '[ten]' +match 0 0 1 1 ten '**[!te]' +match 0 0 0 0 ten '**[!ten]' +match 1 1 1 1 ten 't[a-g]n' +match 0 0 0 0 ten 't[!a-g]n' +match 1 1 1 1 ton 't[!a-g]n' +match 1 1 1 1 ton 't[^a-g]n' +match 1 1 1 1 'a]b' 'a[]]b' +match 1 1 1 1 a-b 'a[]-]b' +match 1 1 1 1 'a]b' 'a[]-]b' +match 0 0 0 0 aab 'a[]-]b' +match 1 1 1 1 aab 'a[]a-]b' +match 1 1 1 1 ']' ']' # Extended slash-matching features -match 0 0 'foo/baz/bar' 'foo*bar' -match 0 0 'foo/baz/bar' 'foo**bar' -match 0 1 'foobazbar' 'foo**bar' -match 1 1 'foo/baz/bar' 'foo/**/bar' -match 1 0 'foo/baz/bar' 'foo/**/**/bar' -match 1 0 'foo/b/a/z/bar' 'foo/**/bar' -match 1 0 'foo/b/a/z/bar' 'foo/**/**/bar' -match 1 0 'foo/bar' 'foo/**/bar' -match 1 0 'foo/bar' 'foo/**/**/bar' -match 0 0 'foo/bar' 'foo?bar' -match 0 0 'foo/bar' 'foo[/]bar' -match 0 0 'foo/bar' 'foo[^a-z]bar' -match 0 0 'foo/bar' 'f[^eiu][^eiu][^eiu][^eiu][^eiu]r' -match 1 1 'foo-bar' 'f[^eiu][^eiu][^eiu][^eiu][^eiu]r' -match 1 0 'foo' '**/foo' -match 1 x 'XXX/foo' '**/foo' -match 1 0 'bar/baz/foo' '**/foo' -match 0 0 'bar/baz/foo' '*/foo' -match 0 0 'foo/bar/baz' '**/bar*' -match 1 0 'deep/foo/bar/baz' '**/bar/*' -match 0 0 'deep/foo/bar/baz/' '**/bar/*' -match 1 0 'deep/foo/bar/baz/' '**/bar/**' -match 0 0 'deep/foo/bar' '**/bar/*' -match 1 0 'deep/foo/bar/' '**/bar/**' -match 0 0 'foo/bar/baz' '**/bar**' -match 1 0 'foo/bar/baz/x' '*/bar/**' -match 0 0 'deep/foo/bar/baz/x' '*/bar/**' -match 1 0 'deep/foo/bar/baz/x' '**/bar/*/*' +match 0 0 1 1 'foo/baz/bar' 'foo*bar' +match 0 0 1 1 'foo/baz/bar' 'foo**bar' +match 0 0 1 1 'foobazbar' 'foo**bar' +match 1 1 1 1 'foo/baz/bar' 'foo/**/bar' +match 1 1 0 0 'foo/baz/bar' 'foo/**/**/bar' +match 1 1 1 1 'foo/b/a/z/bar' 'foo/**/bar' +match 1 1 1 1 'foo/b/a/z/bar' 'foo/**/**/bar' +match 1 1 0 0 'foo/bar' 'foo/**/bar' +match 1 1 0 0 'foo/bar' 'foo/**/**/bar' +match 0 0 1 1 'foo/bar' 'foo?bar' +match 0 0 1 1 'foo/bar' 'foo[/]bar' +match 0 0 1 1 'foo/bar' 'foo[^a-z]bar' +match 0 0 1 1 'foo/bar' 'f[^eiu][^eiu][^eiu][^eiu][^eiu]r' +match 1 1 1 1 'foo-bar' 'f[^eiu][^eiu][^eiu][^eiu][^eiu]r' +match 1 1 0 0 'foo' '**/foo' +match 1 1 1 1 'XXX/foo' '**/foo' +match 1 1 1 1 'bar/baz/foo' '**/foo' +match 0 0 1 1 'bar/baz/foo' '*/foo' +match 0 0 1 1 'foo/bar/baz' '**/bar*' +match 1 1 1 1 'deep/foo/bar/baz' '**/bar/*' +match 0 0 1 1 'deep/foo/bar/baz/' '**/bar/*' +match 1 1 1 1 'deep/foo/bar/baz/' '**/bar/**' +match 0 0 0 0 'deep/foo/bar' '**/bar/*' +match 1 1 1 1 'deep/foo/bar/' '**/bar/**' +match 0 0 1 1 'foo/bar/baz' '**/bar**' +match 1 1 1 1 'foo/bar/baz/x' '*/bar/**' +match 0 0 1 1 'deep/foo/bar/baz/x' '*/bar/**' +match 1 1 1 1 'deep/foo/bar/baz/x' '**/bar/*/*' # Various additional tests -match 0 0 'acrt' 'a[c-c]st' -match 1 1 'acrt' 'a[c-c]rt' -match 0 0 ']' '[!]-]' -match 1 x 'a' '[!]-]' -match 0 0 '' '\' -match 0 x '\' '\' -match 0 x 'XXX/\' '*/\' -match 1 x 'XXX/\' '*/\\' -match 1 1 'foo' 'foo' -match 1 1 '@foo' '@foo' -match 0 0 'foo' '@foo' -match 1 1 '[ab]' '\[ab]' -match 1 1 '[ab]' '[[]ab]' -match 1 x '[ab]' '[[:]ab]' -match 0 x '[ab]' '[[::]ab]' -match 1 x '[ab]' '[[:digit]ab]' -match 1 x '[ab]' '[\[:]ab]' -match 1 1 '?a?b' '\??\?b' -match 1 1 'abc' '\a\b\c' -match 0 0 'foo' '' -match 1 0 'foo/bar/baz/to' '**/t[o]' +match 0 0 0 0 'acrt' 'a[c-c]st' +match 1 1 1 1 'acrt' 'a[c-c]rt' +match 0 0 0 0 ']' '[!]-]' +match 1 1 1 1 'a' '[!]-]' +match 0 0 0 0 '' '\' +match 0 0 0 0 \ + 1 1 1 1 '\' '\' +match 0 0 0 0 'XXX/\' '*/\' +match 1 1 1 1 'XXX/\' '*/\\' +match 1 1 1 1 'foo' 'foo' +match 1 1 1 1 '@foo' '@foo' +match 0 0 0 0 'foo' '@foo' +match 1 1 1 1 '[ab]' '\[ab]' +match 1 1 1 1 '[ab]' '[[]ab]' +match 1 1 1 1 '[ab]' '[[:]ab]' +match 0 0 0 0 '[ab]' '[[::]ab]' +match 1 1 1 1 '[ab]' '[[:digit]ab]' +match 1 1 1 1 '[ab]' '[\[:]ab]' +match 1 1 1 1 '?a?b' '\??\?b' +match 1 1 1 1 'abc' '\a\b\c' +match 0 0 0 0 \ + E E E E 'foo' '' +match 1 1 1 1 'foo/bar/baz/to' '**/t[o]' # Character class tests -match 1 x 'a1B' '[[:alpha:]][[:digit:]][[:upper:]]' -match 0 x 'a' '[[:digit:][:upper:][:space:]]' -match 1 x 'A' '[[:digit:][:upper:][:space:]]' -match 1 x '1' '[[:digit:][:upper:][:space:]]' -match 0 x '1' '[[:digit:][:upper:][:spaci:]]' -match 1 x ' ' '[[:digit:][:upper:][:space:]]' -match 0 x '.' '[[:digit:][:upper:][:space:]]' -match 1 x '.' '[[:digit:][:punct:][:space:]]' -match 1 x '5' '[[:xdigit:]]' -match 1 x 'f' '[[:xdigit:]]' -match 1 x 'D' '[[:xdigit:]]' -match 1 x '_' '[[:alnum:][:alpha:][:blank:][:cntrl:][:digit:][:graph:][:lower:][:print:][:punct:][:space:][:upper:][:xdigit:]]' -match 1 x '.' '[^[:alnum:][:alpha:][:blank:][:cntrl:][:digit:][:lower:][:space:][:upper:][:xdigit:]]' -match 1 x '5' '[a-c[:digit:]x-z]' -match 1 x 'b' '[a-c[:digit:]x-z]' -match 1 x 'y' '[a-c[:digit:]x-z]' -match 0 x 'q' '[a-c[:digit:]x-z]' - -# Additional tests, including some malformed wildmats -match 1 x ']' '[\\-^]' -match 0 0 '[' '[\\-^]' -match 1 x '-' '[\-_]' -match 1 x ']' '[\]]' -match 0 0 '\]' '[\]]' -match 0 0 '\' '[\]]' -match 0 0 'ab' 'a[]b' -match 0 x 'a[]b' 'a[]b' -match 0 x 'ab[' 'ab[' -match 0 0 'ab' '[!' -match 0 0 'ab' '[-' -match 1 1 '-' '[-]' -match 0 0 '-' '[a-' -match 0 0 '-' '[!a-' -match 1 x '-' '[--A]' -match 1 x '5' '[--A]' -match 1 1 ' ' '[ --]' -match 1 1 '$' '[ --]' -match 1 1 '-' '[ --]' -match 0 0 '0' '[ --]' -match 1 x '-' '[---]' -match 1 x '-' '[------]' -match 0 0 'j' '[a-e-n]' -match 1 x '-' '[a-e-n]' -match 1 x 'a' '[!------]' -match 0 0 '[' '[]-a]' -match 1 x '^' '[]-a]' -match 0 0 '^' '[!]-a]' -match 1 x '[' '[!]-a]' -match 1 1 '^' '[a^bc]' -match 1 x '-b]' '[a-]b]' -match 0 0 '\' '[\]' -match 1 1 '\' '[\\]' -match 0 0 '\' '[!\\]' -match 1 1 'G' '[A-\\]' -match 0 0 'aaabbb' 'b*a' -match 0 0 'aabcaa' '*ba*' -match 1 1 ',' '[,]' -match 1 1 ',' '[\\,]' -match 1 1 '\' '[\\,]' -match 1 1 '-' '[,-.]' -match 0 0 '+' '[,-.]' -match 0 0 '-.]' '[,-.]' -match 1 1 '2' '[\1-\3]' -match 1 1 '3' '[\1-\3]' -match 0 0 '4' '[\1-\3]' -match 1 1 '\' '[[-\]]' -match 1 1 '[' '[[-\]]' -match 1 1 ']' '[[-\]]' -match 0 0 '-' '[[-\]]' +match 1 1 1 1 'a1B' '[[:alpha:]][[:digit:]][[:upper:]]' +match 0 1 0 1 'a' '[[:digit:][:upper:][:space:]]' +match 1 1 1 1 'A' '[[:digit:][:upper:][:space:]]' +match 1 1 1 1 '1' '[[:digit:][:upper:][:space:]]' +match 0 0 0 0 '1' '[[:digit:][:upper:][:spaci:]]' +match 1 1 1 1 ' ' '[[:digit:][:upper:][:space:]]' +match 0 0 0 0 '.' '[[:digit:][:upper:][:space:]]' +match 1 1 1 1 '.' '[[:digit:][:punct:][:space:]]' +match 1 1 1 1 '5' '[[:xdigit:]]' +match 1 1 1 1 'f' '[[:xdigit:]]' +match 1 1 1 1 'D' '[[:xdigit:]]' +match 1 1 1 1 '_' '[[:alnum:][:alpha:][:blank:][:cntrl:][:digit:][:graph:][:lower:][:print:][:punct:][:space:][:upper:][:xdigit:]]' +match 1 1 1 1 '.' '[^[:alnum:][:alpha:][:blank:][:cntrl:][:digit:][:lower:][:space:][:upper:][:xdigit:]]' +match 1 1 1 1 '5' '[a-c[:digit:]x-z]' +match 1 1 1 1 'b' '[a-c[:digit:]x-z]' +match 1 1 1 1 'y' '[a-c[:digit:]x-z]' +match 0 0 0 0 'q' '[a-c[:digit:]x-z]' -# Test recursion and the abort code (use "wildtest -i" to see iteration counts) -match 1 1 '-adobe-courier-bold-o-normal--12-120-75-75-m-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*' -match 0 0 '-adobe-courier-bold-o-normal--12-120-75-75-X-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*' -match 0 0 '-adobe-courier-bold-o-normal--12-120-75-75-/-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*' -match 1 1 'XXX/adobe/courier/bold/o/normal//12/120/75/75/m/70/iso8859/1' 'XXX/*/*/*/*/*/*/12/*/*/*/m/*/*/*' -match 0 0 'XXX/adobe/courier/bold/o/normal//12/120/75/75/X/70/iso8859/1' 'XXX/*/*/*/*/*/*/12/*/*/*/m/*/*/*' -match 1 0 'abcd/abcdefg/abcdefghijk/abcdefghijklmnop.txt' '**/*a*b*g*n*t' -match 0 0 'abcd/abcdefg/abcdefghijk/abcdefghijklmnop.txtz' '**/*a*b*g*n*t' -match 0 x foo '*/*/*' -match 0 x foo/bar '*/*/*' -match 1 x foo/bba/arr '*/*/*' -match 0 x foo/bb/aa/rr '*/*/*' -match 1 x foo/bb/aa/rr '**/**/**' -match 1 x abcXdefXghi '*X*i' -match 0 x ab/cXd/efXg/hi '*X*i' -match 1 x ab/cXd/efXg/hi '*/*X*/*/*i' -match 1 x ab/cXd/efXg/hi '**/*X*/**/*i' +# Additional tests, including some malformed wildmatch patterns +match 1 1 1 1 ']' '[\\-^]' +match 0 0 0 0 '[' '[\\-^]' +match 1 1 1 1 '-' '[\-_]' +match 1 1 1 1 ']' '[\]]' +match 0 0 0 0 '\]' '[\]]' +match 0 0 0 0 '\' '[\]]' +match 0 0 0 0 'ab' 'a[]b' +match 0 0 0 0 \ + 1 1 1 1 'a[]b' 'a[]b' +match 0 0 0 0 \ + 1 1 1 1 'ab[' 'ab[' +match 0 0 0 0 'ab' '[!' +match 0 0 0 0 'ab' '[-' +match 1 1 1 1 '-' '[-]' +match 0 0 0 0 '-' '[a-' +match 0 0 0 0 '-' '[!a-' +match 1 1 1 1 '-' '[--A]' +match 1 1 1 1 '5' '[--A]' +match 1 1 1 1 ' ' '[ --]' +match 1 1 1 1 '$' '[ --]' +match 1 1 1 1 '-' '[ --]' +match 0 0 0 0 '0' '[ --]' +match 1 1 1 1 '-' '[---]' +match 1 1 1 1 '-' '[------]' +match 0 0 0 0 'j' '[a-e-n]' +match 1 1 1 1 '-' '[a-e-n]' +match 1 1 1 1 'a' '[!------]' +match 0 0 0 0 '[' '[]-a]' +match 1 1 1 1 '^' '[]-a]' +match 0 0 0 0 '^' '[!]-a]' +match 1 1 1 1 '[' '[!]-a]' +match 1 1 1 1 '^' '[a^bc]' +match 1 1 1 1 '-b]' '[a-]b]' +match 0 0 0 0 '\' '[\]' +match 1 1 1 1 '\' '[\\]' +match 0 0 0 0 '\' '[!\\]' +match 1 1 1 1 'G' '[A-\\]' +match 0 0 0 0 'aaabbb' 'b*a' +match 0 0 0 0 'aabcaa' '*ba*' +match 1 1 1 1 ',' '[,]' +match 1 1 1 1 ',' '[\\,]' +match 1 1 1 1 '\' '[\\,]' +match 1 1 1 1 '-' '[,-.]' +match 0 0 0 0 '+' '[,-.]' +match 0 0 0 0 '-.]' '[,-.]' +match 1 1 1 1 '2' '[\1-\3]' +match 1 1 1 1 '3' '[\1-\3]' +match 0 0 0 0 '4' '[\1-\3]' +match 1 1 1 1 '\' '[[-\]]' +match 1 1 1 1 '[' '[[-\]]' +match 1 1 1 1 ']' '[[-\]]' +match 0 0 0 0 '-' '[[-\]]' -pathmatch 1 foo foo -pathmatch 0 foo fo -pathmatch 1 foo/bar foo/bar -pathmatch 1 foo/bar 'foo/*' -pathmatch 1 foo/bba/arr 'foo/*' -pathmatch 1 foo/bba/arr 'foo/**' -pathmatch 1 foo/bba/arr 'foo*' -pathmatch 1 foo/bba/arr 'foo**' -pathmatch 1 foo/bba/arr 'foo/*arr' -pathmatch 1 foo/bba/arr 'foo/**arr' -pathmatch 0 foo/bba/arr 'foo/*z' -pathmatch 0 foo/bba/arr 'foo/**z' -pathmatch 1 foo/bar 'foo?bar' -pathmatch 1 foo/bar 'foo[/]bar' -pathmatch 1 foo/bar 'foo[^a-z]bar' -pathmatch 0 foo '*/*/*' -pathmatch 0 foo/bar '*/*/*' -pathmatch 1 foo/bba/arr '*/*/*' -pathmatch 1 foo/bb/aa/rr '*/*/*' -pathmatch 1 abcXdefXghi '*X*i' -pathmatch 1 ab/cXd/efXg/hi '*/*X*/*/*i' -pathmatch 1 ab/cXd/efXg/hi '*Xg*i' +# Test recursion +match 1 1 1 1 '-adobe-courier-bold-o-normal--12-120-75-75-m-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*' +match 0 0 0 0 '-adobe-courier-bold-o-normal--12-120-75-75-X-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*' +match 0 0 0 0 '-adobe-courier-bold-o-normal--12-120-75-75-/-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*' +match 1 1 1 1 'XXX/adobe/courier/bold/o/normal//12/120/75/75/m/70/iso8859/1' 'XXX/*/*/*/*/*/*/12/*/*/*/m/*/*/*' +match 0 0 0 0 'XXX/adobe/courier/bold/o/normal//12/120/75/75/X/70/iso8859/1' 'XXX/*/*/*/*/*/*/12/*/*/*/m/*/*/*' +match 1 1 1 1 'abcd/abcdefg/abcdefghijk/abcdefghijklmnop.txt' '**/*a*b*g*n*t' +match 0 0 0 0 'abcd/abcdefg/abcdefghijk/abcdefghijklmnop.txtz' '**/*a*b*g*n*t' +match 0 0 0 0 foo '*/*/*' +match 0 0 0 0 foo/bar '*/*/*' +match 1 1 1 1 foo/bba/arr '*/*/*' +match 0 0 1 1 foo/bb/aa/rr '*/*/*' +match 1 1 1 1 foo/bb/aa/rr '**/**/**' +match 1 1 1 1 abcXdefXghi '*X*i' +match 0 0 1 1 ab/cXd/efXg/hi '*X*i' +match 1 1 1 1 ab/cXd/efXg/hi '*/*X*/*/*i' +match 1 1 1 1 ab/cXd/efXg/hi '**/*X*/**/*i' -# Case-sensitivity features -match 0 x 'a' '[A-Z]' -match 1 x 'A' '[A-Z]' -match 0 x 'A' '[a-z]' -match 1 x 'a' '[a-z]' -match 0 x 'a' '[[:upper:]]' -match 1 x 'A' '[[:upper:]]' -match 0 x 'A' '[[:lower:]]' -match 1 x 'a' '[[:lower:]]' -match 0 x 'A' '[B-Za]' -match 1 x 'a' '[B-Za]' -match 0 x 'A' '[B-a]' -match 1 x 'a' '[B-a]' -match 0 x 'z' '[Z-y]' -match 1 x 'Z' '[Z-y]' +# Extra pathmatch tests +match 0 0 0 0 foo fo +match 1 1 1 1 foo/bar foo/bar +match 1 1 1 1 foo/bar 'foo/*' +match 0 0 1 1 foo/bba/arr 'foo/*' +match 1 1 1 1 foo/bba/arr 'foo/**' +match 0 0 1 1 foo/bba/arr 'foo*' +match 0 0 1 1 \ + 1 1 1 1 foo/bba/arr 'foo**' +match 0 0 1 1 foo/bba/arr 'foo/*arr' +match 0 0 1 1 foo/bba/arr 'foo/**arr' +match 0 0 0 0 foo/bba/arr 'foo/*z' +match 0 0 0 0 foo/bba/arr 'foo/**z' +match 0 0 1 1 foo/bar 'foo?bar' +match 0 0 1 1 foo/bar 'foo[/]bar' +match 0 0 1 1 foo/bar 'foo[^a-z]bar' +match 0 0 1 1 ab/cXd/efXg/hi '*Xg*i' -imatch 1 'a' '[A-Z]' -imatch 1 'A' '[A-Z]' -imatch 1 'A' '[a-z]' -imatch 1 'a' '[a-z]' -imatch 1 'a' '[[:upper:]]' -imatch 1 'A' '[[:upper:]]' -imatch 1 'A' '[[:lower:]]' -imatch 1 'a' '[[:lower:]]' -imatch 1 'A' '[B-Za]' -imatch 1 'a' '[B-Za]' -imatch 1 'A' '[B-a]' -imatch 1 'a' '[B-a]' -imatch 1 'z' '[Z-y]' -imatch 1 'Z' '[Z-y]' +# Extra case-sensitivity tests +match 0 1 0 1 'a' '[A-Z]' +match 1 1 1 1 'A' '[A-Z]' +match 0 1 0 1 'A' '[a-z]' +match 1 1 1 1 'a' '[a-z]' +match 0 1 0 1 'a' '[[:upper:]]' +match 1 1 1 1 'A' '[[:upper:]]' +match 0 1 0 1 'A' '[[:lower:]]' +match 1 1 1 1 'a' '[[:lower:]]' +match 0 1 0 1 'A' '[B-Za]' +match 1 1 1 1 'a' '[B-Za]' +match 0 1 0 1 'A' '[B-a]' +match 1 1 1 1 'a' '[B-a]' +match 0 1 0 1 'z' '[Z-y]' +match 1 1 1 1 'Z' '[Z-y]' test_done diff --git a/t/t3404-rebase-interactive.sh b/t/t3404-rebase-interactive.sh index 23a54a4c49..3b905406df 100755 --- a/t/t3404-rebase-interactive.sh +++ b/t/t3404-rebase-interactive.sh @@ -461,6 +461,10 @@ test_expect_success C_LOCALE_OUTPUT 'squash and fixup generate correct log messa git rebase -i $base && git cat-file commit HEAD | sed -e 1,/^\$/d > actual-squash-fixup && test_cmp expect-squash-fixup actual-squash-fixup && + git cat-file commit HEAD@{2} | + grep "^# This is a combination of 3 commits\." && + git cat-file commit HEAD@{3} | + grep "^# This is a combination of 2 commits\." && git checkout to-be-rebased && git branch -D squash-fixup ' @@ -1344,6 +1348,16 @@ test_expect_success 'editor saves as CR/LF' ' SQ="'" test_expect_success 'rebase -i --gpg-sign=<key-id>' ' + test_when_finished "test_might_fail git rebase --abort" && + set_fake_editor && + FAKE_LINES="edit 1" git rebase -i --gpg-sign="\"S I Gner\"" HEAD^ \ + >out 2>err && + test_i18ngrep "$SQ-S\"S I Gner\"$SQ" err +' + +test_expect_success 'rebase -i --gpg-sign=<key-id> overrides commit.gpgSign' ' + test_when_finished "test_might_fail git rebase --abort" && + test_config commit.gpgsign true && set_fake_editor && FAKE_LINES="edit 1" git rebase -i --gpg-sign="\"S I Gner\"" HEAD^ \ >out 2>err && diff --git a/t/t3405-rebase-malformed.sh b/t/t3405-rebase-malformed.sh index ff8c360cd5..cb7c6de84a 100755 --- a/t/t3405-rebase-malformed.sh +++ b/t/t3405-rebase-malformed.sh @@ -3,6 +3,7 @@ test_description='rebase should handle arbitrary git message' . ./test-lib.sh +. "$TEST_DIRECTORY"/lib-rebase.sh cat >F <<\EOF This is an example of a commit log message @@ -25,6 +26,7 @@ test_expect_success setup ' test_tick && git commit -m "Initial commit" && git branch diff-in-message && + git branch empty-message-merge && git checkout -b multi-line-subject && cat F >file2 && @@ -45,6 +47,11 @@ test_expect_success setup ' git cat-file commit HEAD | sed -e "1,/^\$/d" >G0 && + git checkout empty-message-merge && + echo file3 >file3 && + git add file3 && + git commit --allow-empty-message -m "" && + git checkout master && echo One >file1 && @@ -69,4 +76,20 @@ test_expect_success 'rebase commit with diff in message' ' test_cmp G G0 ' +test_expect_success 'rebase -m commit with empty message' ' + test_must_fail git rebase -m master empty-message-merge && + git rebase --abort && + git rebase -m --allow-empty-message master empty-message-merge +' + +test_expect_success 'rebase -i commit with empty message' ' + git checkout diff-in-message && + set_fake_editor && + test_must_fail env FAKE_COMMIT_MESSAGE=" " FAKE_LINES="reword 1" \ + git rebase -i HEAD^ && + git rebase --abort && + FAKE_COMMIT_MESSAGE=" " FAKE_LINES="reword 1" \ + git rebase -i --allow-empty-message HEAD^ +' + test_done diff --git a/t/t3408-rebase-multi-line.sh b/t/t3408-rebase-multi-line.sh index 6b84e6042a..e7292f5b9b 100755 --- a/t/t3408-rebase-multi-line.sh +++ b/t/t3408-rebase-multi-line.sh @@ -24,8 +24,23 @@ But otherwise with a sane description." && >elif && git add elif && test_tick && - git commit -m second + git commit -m second && + git checkout -b side2 && + >afile && + git add afile && + test_tick && + git commit -m third && + echo hello >afile && + test_tick && + git commit -a -m fourth && + git checkout -b side-merge && + git reset --hard HEAD^^ && + git merge --no-ff -m "A merge commit log message that has a long +summary that spills over multiple lines. + +But otherwise with a sane description." side2 && + git branch side-merge-original ' test_expect_success rebase ' @@ -37,5 +52,14 @@ test_expect_success rebase ' test_cmp expect actual ' +test_expect_success rebasep ' + + git checkout side-merge && + git rebase -p side && + git cat-file commit HEAD | sed -e "1,/^\$/d" >actual && + git cat-file commit side-merge-original | sed -e "1,/^\$/d" >expect && + test_cmp expect actual + +' test_done diff --git a/t/t3501-revert-cherry-pick.sh b/t/t3501-revert-cherry-pick.sh index 4f2a263b63..783bdbf59d 100755 --- a/t/t3501-revert-cherry-pick.sh +++ b/t/t3501-revert-cherry-pick.sh @@ -141,7 +141,7 @@ test_expect_success 'cherry-pick "-" works with arguments' ' test_cmp expect actual ' -test_expect_success 'cherry-pick works with dirty renamed file' ' +test_expect_failure 'cherry-pick works with dirty renamed file' ' test_commit to-rename && git checkout -b unrelated && test_commit unrelated && @@ -150,7 +150,10 @@ test_expect_success 'cherry-pick works with dirty renamed file' ' test_tick && git commit -m renamed && echo modified >renamed && - git cherry-pick refs/heads/unrelated + test_must_fail git cherry-pick refs/heads/unrelated >out && + test_i18ngrep "Refusing to lose dirty file at renamed" out && + test $(git rev-parse :0:renamed) = $(git rev-parse HEAD^:to-rename.t) && + grep -q "^modified$" renamed ' test_done diff --git a/t/t3512-cherry-pick-submodule.sh b/t/t3512-cherry-pick-submodule.sh index ce48c4fcca..bd78287841 100755 --- a/t/t3512-cherry-pick-submodule.sh +++ b/t/t3512-cherry-pick-submodule.sh @@ -5,7 +5,6 @@ test_description='cherry-pick can handle submodules' . ./test-lib.sh . "$TEST_DIRECTORY"/lib-submodule-update.sh -KNOWN_FAILURE_CHERRY_PICK_SEES_EMPTY_COMMIT=1 KNOWN_FAILURE_NOFF_MERGE_DOESNT_CREATE_EMPTY_SUBMODULE_DIR=1 KNOWN_FAILURE_NOFF_MERGE_ATTEMPTS_TO_MERGE_REMOVED_SUBMODULE_FILES=1 test_submodule_switch "git cherry-pick" diff --git a/t/t3513-revert-submodule.sh b/t/t3513-revert-submodule.sh index db9378142a..5e39fcdb66 100755 --- a/t/t3513-revert-submodule.sh +++ b/t/t3513-revert-submodule.sh @@ -25,7 +25,6 @@ git_revert () { git revert HEAD } -KNOWN_FAILURE_CHERRY_PICK_SEES_EMPTY_COMMIT=1 KNOWN_FAILURE_NOFF_MERGE_DOESNT_CREATE_EMPTY_SUBMODULE_DIR=1 test_submodule_switch "git_revert" diff --git a/t/t4001-diff-rename.sh b/t/t4001-diff-rename.sh index eadf4f6244..a07816d560 100755 --- a/t/t4001-diff-rename.sh +++ b/t/t4001-diff-rename.sh @@ -134,11 +134,15 @@ test_expect_success 'favour same basenames over different ones' ' git rm path1 && mkdir subdir && git mv another-path subdir/path1 && - git status | test_i18ngrep "renamed: .*path1 -> subdir/path1"' + git status >out && + test_i18ngrep "renamed: .*path1 -> subdir/path1" out +' test_expect_success 'favour same basenames even with minor differences' ' git show HEAD:path1 | sed "s/15/16/" > subdir/path1 && - git status | test_i18ngrep "renamed: .*path1 -> subdir/path1"' + git status >out && + test_i18ngrep "renamed: .*path1 -> subdir/path1" out +' test_expect_success 'two files with same basename and same content' ' git reset --hard && @@ -148,7 +152,8 @@ test_expect_success 'two files with same basename and same content' ' git add dir && git commit -m 2 && git mv dir other-dir && - git status | test_i18ngrep "renamed: .*dir/A/file -> other-dir/A/file" + git status >out && + test_i18ngrep "renamed: .*dir/A/file -> other-dir/A/file" out ' test_expect_success 'setup for many rename source candidates' ' diff --git a/t/t4052-stat-output.sh b/t/t4052-stat-output.sh index 9f563db20a..6e2cf933f7 100755 --- a/t/t4052-stat-output.sh +++ b/t/t4052-stat-output.sh @@ -19,17 +19,33 @@ test_expect_success 'preparation' ' git commit -m message "$name" ' +cat >expect72 <<-'EOF' + ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1 + +EOF +test_expect_success "format-patch: small change with long name gives more space to the name" ' + git format-patch -1 --stdout >output && + grep " | " output >actual && + test_cmp expect72 actual +' + while read cmd args do - cat >expect <<-'EOF' + cat >expect80 <<-'EOF' ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1 + EOF test_expect_success "$cmd: small change with long name gives more space to the name" ' git $cmd $args >output && grep " | " output >actual && - test_cmp expect actual + test_cmp expect80 actual ' +done <<\EOF +diff HEAD^ HEAD --stat +show --stat +log -1 --stat +EOF +while read cmd args +do cat >expect <<-'EOF' ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1 + EOF @@ -79,11 +95,11 @@ test_expect_success 'preparation for big change tests' ' git commit -m message abcd ' -cat >expect80 <<'EOF' - abcd | 1000 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +cat >expect72 <<'EOF' + abcd | 1000 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ EOF -cat >expect80-graph <<'EOF' -| abcd | 1000 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +cat >expect72-graph <<'EOF' +| abcd | 1000 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ EOF cat >expect200 <<'EOF' abcd | 1000 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ @@ -107,7 +123,7 @@ do test_cmp "$expect-graph" actual ' done <<\EOF -ignores expect80 format-patch -1 --stdout +ignores expect72 format-patch -1 --stdout respects expect200 diff HEAD^ HEAD --stat respects expect200 show --stat respects expect200 log -1 --stat @@ -135,7 +151,7 @@ do test_cmp "$expect-graph" actual ' done <<\EOF -ignores expect80 format-patch -1 --stdout +ignores expect72 format-patch -1 --stdout respects expect40 diff HEAD^ HEAD --stat respects expect40 show --stat respects expect40 log -1 --stat @@ -163,7 +179,7 @@ do test_cmp "$expect-graph" actual ' done <<\EOF -ignores expect80 format-patch -1 --stdout +ignores expect72 format-patch -1 --stdout respects expect40 diff HEAD^ HEAD --stat respects expect40 show --stat respects expect40 log -1 --stat @@ -250,11 +266,11 @@ show --stat log -1 --stat EOF -cat >expect80 <<'EOF' - ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1000 ++++++++++++++++++++ +cat >expect72 <<'EOF' + ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1000 +++++++++++++++++ EOF -cat >expect80-graph <<'EOF' -| ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1000 ++++++++++++++++++++ +cat >expect72-graph <<'EOF' +| ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1000 +++++++++++++++++ EOF cat >expect200 <<'EOF' aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1000 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ @@ -278,7 +294,7 @@ do test_cmp "$expect-graph" actual ' done <<\EOF -ignores expect80 format-patch -1 --stdout +ignores expect72 format-patch -1 --stdout respects expect200 diff HEAD^ HEAD --stat respects expect200 show --stat respects expect200 log -1 --stat @@ -308,7 +324,7 @@ do test_cmp "$expect-graph" actual ' done <<\EOF -ignores expect80 format-patch -1 --stdout +ignores expect72 format-patch -1 --stdout respects expect1 diff HEAD^ HEAD --stat respects expect1 show --stat respects expect1 log -1 --stat diff --git a/t/t4135-apply-weird-filenames.sh b/t/t4135-apply-weird-filenames.sh index 27cb0009fb..c7c688fcc4 100755 --- a/t/t4135-apply-weird-filenames.sh +++ b/t/t4135-apply-weird-filenames.sh @@ -89,4 +89,21 @@ test_expect_success 'traditional, whitespace-damaged, colon in timezone' ' test_cmp expected "post image.txt" ' +cat >diff-from-svn <<\EOF +Index: Makefile +=================================================================== +diff --git a/branches/Makefile +deleted file mode 100644 +--- a/branches/Makefile (revision 13) ++++ /dev/null (nonexistent) +@@ +1 0,0 @@ +- +EOF + +test_expect_success 'apply handles a diff generated by Subversion' ' + >Makefile && + git apply -p2 diff-from-svn && + test_path_is_missing Makefile +' + test_done diff --git a/t/t4150-am.sh b/t/t4150-am.sh index 23abf42abc..1eccfb71d0 100755 --- a/t/t4150-am.sh +++ b/t/t4150-am.sh @@ -1050,4 +1050,16 @@ test_expect_success 'am works with multi-line in-body headers' ' git cat-file commit HEAD | grep "^$LONG$" ' +test_expect_success 'am --quit keeps HEAD where it is' ' + mkdir .git/rebase-apply && + >.git/rebase-apply/last && + >.git/rebase-apply/next && + git rev-parse HEAD^ >.git/ORIG_HEAD && + git rev-parse HEAD >expected && + git am --quit && + test_path_is_missing .git/rebase-apply && + git rev-parse HEAD >actual && + test_cmp expected actual +' + test_done diff --git a/t/t5500-fetch-pack.sh b/t/t5500-fetch-pack.sh index 80a1a3239a..ec9ba9bf6e 100755 --- a/t/t5500-fetch-pack.sh +++ b/t/t5500-fetch-pack.sh @@ -755,4 +755,67 @@ test_expect_success 'fetching deepen' ' ) ' +test_expect_success 'filtering by size' ' + rm -rf server client && + test_create_repo server && + test_commit -C server one && + test_config -C server uploadpack.allowfilter 1 && + + test_create_repo client && + git -C client fetch-pack --filter=blob:limit=0 ../server HEAD && + + # Ensure that object is not inadvertently fetched + test_must_fail git -C client cat-file -e $(git hash-object server/one.t) +' + +test_expect_success 'filtering by size has no effect if support for it is not advertised' ' + rm -rf server client && + test_create_repo server && + test_commit -C server one && + + test_create_repo client && + git -C client fetch-pack --filter=blob:limit=0 ../server HEAD 2> err && + + # Ensure that object is fetched + git -C client cat-file -e $(git hash-object server/one.t) && + + test_i18ngrep "filtering not recognized by server" err +' + +fetch_filter_blob_limit_zero () { + SERVER="$1" + URL="$2" + + rm -rf "$SERVER" client && + test_create_repo "$SERVER" && + test_commit -C "$SERVER" one && + test_config -C "$SERVER" uploadpack.allowfilter 1 && + + git clone "$URL" client && + test_config -C client extensions.partialclone origin && + + test_commit -C "$SERVER" two && + + git -C client fetch --filter=blob:limit=0 origin HEAD:somewhere && + + # Ensure that commit is fetched, but blob is not + test_config -C client extensions.partialclone "arbitrary string" && + git -C client cat-file -e $(git -C "$SERVER" rev-parse two) && + test_must_fail git -C client cat-file -e $(git hash-object "$SERVER/two.t") +} + +test_expect_success 'fetch with --filter=blob:limit=0' ' + fetch_filter_blob_limit_zero server server +' + +. "$TEST_DIRECTORY"/lib-httpd.sh +start_httpd + +test_expect_success 'fetch with --filter=blob:limit=0 and HTTP' ' + fetch_filter_blob_limit_zero "$HTTPD_DOCUMENT_ROOT_PATH/server" "$HTTPD_URL/smart/server" +' + +stop_httpd + + test_done diff --git a/t/t5510-fetch.sh b/t/t5510-fetch.sh index 668c54be41..da9ac00557 100755 --- a/t/t5510-fetch.sh +++ b/t/t5510-fetch.sh @@ -222,12 +222,9 @@ test_expect_success 'fetch uses remote ref names to describe new refs' ' ( cd descriptive && git fetch o 2>actual && - grep " -> refs/crazyheads/descriptive-branch$" actual | - test_i18ngrep "new branch" && - grep " -> descriptive-tag$" actual | - test_i18ngrep "new tag" && - grep " -> crazy$" actual | - test_i18ngrep "new ref" + test_i18ngrep "new branch.* -> refs/crazyheads/descriptive-branch$" actual && + test_i18ngrep "new tag.* -> descriptive-tag$" actual && + test_i18ngrep "new ref.* -> crazy$" actual ) && git checkout master ' @@ -543,82 +540,232 @@ test_expect_success "should be able to fetch with duplicate refspecs" ' set_config_tristate () { # var=$1 val=$2 case "$2" in - unset) test_unconfig "$1" ;; - *) git config "$1" "$2" ;; + unset) + test_unconfig "$1" + ;; + *) + git config "$1" "$2" + key=$(echo $1 | sed -e 's/^remote\.origin/fetch/') + git_fetch_c="$git_fetch_c -c $key=$2" + ;; esac } test_configured_prune () { - fetch_prune=$1 remote_origin_prune=$2 cmdline=$3 expected=$4 + test_configured_prune_type "$@" "name" + test_configured_prune_type "$@" "link" +} - test_expect_success "prune fetch.prune=$1 remote.origin.prune=$2${3:+ $3}; $4" ' +test_configured_prune_type () { + fetch_prune=$1 + remote_origin_prune=$2 + fetch_prune_tags=$3 + remote_origin_prune_tags=$4 + expected_branch=$5 + expected_tag=$6 + cmdline=$7 + mode=$8 + + if test -z "$cmdline_setup" + then + test_expect_success 'setup cmdline_setup variable for subsequent test' ' + remote_url="file://$(git -C one config remote.origin.url)" && + remote_fetch="$(git -C one config remote.origin.fetch)" && + cmdline_setup="\"$remote_url\" \"$remote_fetch\"" + ' + fi + + if test "$mode" = 'link' + then + new_cmdline="" + + if test "$cmdline" = "" + then + new_cmdline=$cmdline_setup + else + new_cmdline=$(printf "%s" "$cmdline" | perl -pe 's[origin(?!/)]["'"$remote_url"'"]g') + fi + + if test "$fetch_prune_tags" = 'true' || + test "$remote_origin_prune_tags" = 'true' + then + if ! printf '%s' "$cmdline\n" | grep -q refs/remotes/origin/ + then + new_cmdline="$new_cmdline refs/tags/*:refs/tags/*" + fi + fi + + cmdline="$new_cmdline" + fi + + test_expect_success "$mode prune fetch.prune=$1 remote.origin.prune=$2 fetch.pruneTags=$3 remote.origin.pruneTags=$4${7:+ $7}; branch:$5 tag:$6" ' # make sure a newbranch is there in . and also in one git branch -f newbranch && + git tag -f newtag && ( cd one && test_unconfig fetch.prune && + test_unconfig fetch.pruneTags && test_unconfig remote.origin.prune && - git fetch && - git rev-parse --verify refs/remotes/origin/newbranch + test_unconfig remote.origin.pruneTags && + git fetch '"$cmdline_setup"' && + git rev-parse --verify refs/remotes/origin/newbranch && + git rev-parse --verify refs/tags/newtag ) && # now remove it git branch -d newbranch && + git tag -d newtag && # then test ( cd one && + git_fetch_c="" && set_config_tristate fetch.prune $fetch_prune && + set_config_tristate fetch.pruneTags $fetch_prune_tags && set_config_tristate remote.origin.prune $remote_origin_prune && - - git fetch $cmdline && - case "$expected" in + set_config_tristate remote.origin.pruneTags $remote_origin_prune_tags && + + if test "$mode" != "link" + then + git_fetch_c="" + fi && + git$git_fetch_c fetch '"$cmdline"' && + case "$expected_branch" in pruned) test_must_fail git rev-parse --verify refs/remotes/origin/newbranch ;; kept) git rev-parse --verify refs/remotes/origin/newbranch ;; + esac && + case "$expected_tag" in + pruned) + test_must_fail git rev-parse --verify refs/tags/newtag + ;; + kept) + git rev-parse --verify refs/tags/newtag + ;; esac ) ' } -test_configured_prune unset unset "" kept -test_configured_prune unset unset "--no-prune" kept -test_configured_prune unset unset "--prune" pruned - -test_configured_prune false unset "" kept -test_configured_prune false unset "--no-prune" kept -test_configured_prune false unset "--prune" pruned - -test_configured_prune true unset "" pruned -test_configured_prune true unset "--prune" pruned -test_configured_prune true unset "--no-prune" kept - -test_configured_prune unset false "" kept -test_configured_prune unset false "--no-prune" kept -test_configured_prune unset false "--prune" pruned - -test_configured_prune false false "" kept -test_configured_prune false false "--no-prune" kept -test_configured_prune false false "--prune" pruned - -test_configured_prune true false "" kept -test_configured_prune true false "--prune" pruned -test_configured_prune true false "--no-prune" kept - -test_configured_prune unset true "" pruned -test_configured_prune unset true "--no-prune" kept -test_configured_prune unset true "--prune" pruned - -test_configured_prune false true "" pruned -test_configured_prune false true "--no-prune" kept -test_configured_prune false true "--prune" pruned - -test_configured_prune true true "" pruned -test_configured_prune true true "--prune" pruned -test_configured_prune true true "--no-prune" kept +# $1 config: fetch.prune +# $2 config: remote.<name>.prune +# $3 config: fetch.pruneTags +# $4 config: remote.<name>.pruneTags +# $5 expect: branch to be pruned? +# $6 expect: tag to be pruned? +# $7 git-fetch $cmdline: +# +# $1 $2 $3 $4 $5 $6 $7 +test_configured_prune unset unset unset unset kept kept "" +test_configured_prune unset unset unset unset kept kept "--no-prune" +test_configured_prune unset unset unset unset pruned kept "--prune" +test_configured_prune unset unset unset unset kept pruned \ + "--prune origin refs/tags/*:refs/tags/*" +test_configured_prune unset unset unset unset pruned pruned \ + "--prune origin refs/tags/*:refs/tags/* +refs/heads/*:refs/remotes/origin/*" + +test_configured_prune false unset unset unset kept kept "" +test_configured_prune false unset unset unset kept kept "--no-prune" +test_configured_prune false unset unset unset pruned kept "--prune" + +test_configured_prune true unset unset unset pruned kept "" +test_configured_prune true unset unset unset pruned kept "--prune" +test_configured_prune true unset unset unset kept kept "--no-prune" + +test_configured_prune unset false unset unset kept kept "" +test_configured_prune unset false unset unset kept kept "--no-prune" +test_configured_prune unset false unset unset pruned kept "--prune" + +test_configured_prune false false unset unset kept kept "" +test_configured_prune false false unset unset kept kept "--no-prune" +test_configured_prune false false unset unset pruned kept "--prune" +test_configured_prune false false unset unset kept pruned \ + "--prune origin refs/tags/*:refs/tags/*" +test_configured_prune false false unset unset pruned pruned \ + "--prune origin refs/tags/*:refs/tags/* +refs/heads/*:refs/remotes/origin/*" + +test_configured_prune true false unset unset kept kept "" +test_configured_prune true false unset unset pruned kept "--prune" +test_configured_prune true false unset unset kept kept "--no-prune" + +test_configured_prune unset true unset unset pruned kept "" +test_configured_prune unset true unset unset kept kept "--no-prune" +test_configured_prune unset true unset unset pruned kept "--prune" + +test_configured_prune false true unset unset pruned kept "" +test_configured_prune false true unset unset kept kept "--no-prune" +test_configured_prune false true unset unset pruned kept "--prune" + +test_configured_prune true true unset unset pruned kept "" +test_configured_prune true true unset unset pruned kept "--prune" +test_configured_prune true true unset unset kept kept "--no-prune" +test_configured_prune true true unset unset kept pruned \ + "--prune origin refs/tags/*:refs/tags/*" +test_configured_prune true true unset unset pruned pruned \ + "--prune origin refs/tags/*:refs/tags/* +refs/heads/*:refs/remotes/origin/*" + +# --prune-tags on its own does nothing, needs --prune as well, same +# for for fetch.pruneTags without fetch.prune +test_configured_prune unset unset unset unset kept kept "--prune-tags" +test_configured_prune unset unset true unset kept kept "" +test_configured_prune unset unset unset true kept kept "" + +# These will prune the tags +test_configured_prune unset unset unset unset pruned pruned "--prune --prune-tags" +test_configured_prune true unset true unset pruned pruned "" +test_configured_prune unset true unset true pruned pruned "" + +# remote.<name>.pruneTags overrides fetch.pruneTags, just like +# remote.<name>.prune overrides fetch.prune if set. +test_configured_prune true unset true unset pruned pruned "" +test_configured_prune false true false true pruned pruned "" +test_configured_prune true false true false kept kept "" + +# When --prune-tags is supplied it's ignored if an explicit refspec is +# given, same for the configuration options. +test_configured_prune unset unset unset unset pruned kept \ + "--prune --prune-tags origin +refs/heads/*:refs/remotes/origin/*" +test_configured_prune unset unset true unset pruned kept \ + "--prune origin +refs/heads/*:refs/remotes/origin/*" +test_configured_prune unset unset unset true pruned kept \ + "--prune origin +refs/heads/*:refs/remotes/origin/*" + +# Pruning that also takes place if a file:// url replaces a named +# remote. However, because there's no implicit +# +refs/heads/*:refs/remotes/origin/* refspec and supplying it on the +# command-line negates --prune-tags, the branches will not be pruned. +test_configured_prune_type unset unset unset unset kept kept "origin --prune-tags" "name" +test_configured_prune_type unset unset unset unset kept kept "origin --prune-tags" "link" +test_configured_prune_type unset unset unset unset pruned pruned "origin --prune --prune-tags" "name" +test_configured_prune_type unset unset unset unset kept pruned "origin --prune --prune-tags" "link" +test_configured_prune_type unset unset unset unset pruned pruned "--prune --prune-tags origin" "name" +test_configured_prune_type unset unset unset unset kept pruned "--prune --prune-tags origin" "link" +test_configured_prune_type unset unset true unset pruned pruned "--prune origin" "name" +test_configured_prune_type unset unset true unset kept pruned "--prune origin" "link" +test_configured_prune_type unset unset unset true pruned pruned "--prune origin" "name" +test_configured_prune_type unset unset unset true kept pruned "--prune origin" "link" +test_configured_prune_type true unset true unset pruned pruned "origin" "name" +test_configured_prune_type true unset true unset kept pruned "origin" "link" +test_configured_prune_type unset true true unset pruned pruned "origin" "name" +test_configured_prune_type unset true true unset kept pruned "origin" "link" +test_configured_prune_type unset true unset true pruned pruned "origin" "name" +test_configured_prune_type unset true unset true kept pruned "origin" "link" + +# When all remote.origin.fetch settings are deleted a --prune +# --prune-tags still implicitly supplies refs/tags/*:refs/tags/* so +# tags, but not tracking branches, will be deleted. +test_expect_success 'remove remote.origin.fetch "one"' ' + ( + cd one && + git config --unset-all remote.origin.fetch + ) +' +test_configured_prune_type unset unset unset unset kept pruned "origin --prune --prune-tags" "name" +test_configured_prune_type unset unset unset unset kept pruned "origin --prune --prune-tags" "link" test_expect_success 'all boundary commits are excluded' ' test_commit base && diff --git a/t/t5526-fetch-submodules.sh b/t/t5526-fetch-submodules.sh index a552ad4ead..74486c73b0 100755 --- a/t/t5526-fetch-submodules.sh +++ b/t/t5526-fetch-submodules.sh @@ -485,7 +485,7 @@ test_expect_success "don't fetch submodule when newly recorded commits are alrea ) ' -test_expect_success "'fetch.recurseSubmodules=on-demand' works also without .gitmodule entry" ' +test_expect_success "'fetch.recurseSubmodules=on-demand' works also without .gitmodules entry" ' ( cd downstream && git fetch --recurse-submodules diff --git a/t/t5536-fetch-conflicts.sh b/t/t5536-fetch-conflicts.sh index 2e42cf3316..644736b8a3 100755 --- a/t/t5536-fetch-conflicts.sh +++ b/t/t5536-fetch-conflicts.sh @@ -22,7 +22,7 @@ verify_stderr () { cat >expected && # We're not interested in the error # "fatal: The remote end hung up unexpectedly": - test_i18ngrep -E '^(fatal|warning):' <error | grep -v 'hung up' >actual | sort && + test_i18ngrep -E '^(fatal|warning):' error | grep -v 'hung up' >actual | sort && test_i18ncmp expected actual } diff --git a/t/t5541-http-push-smart.sh b/t/t5541-http-push-smart.sh index d38bf32470..21340e89c9 100755 --- a/t/t5541-http-push-smart.sh +++ b/t/t5541-http-push-smart.sh @@ -234,7 +234,7 @@ test_expect_success TTY 'push --no-progress silences progress but not status' ' test_commit no-progress && test_terminal git push --no-progress >output 2>&1 && test_i18ngrep "^To http" output && - test_i18ngrep ! "^Writing objects" + test_i18ngrep ! "^Writing objects" output ' test_expect_success 'push --progress shows progress to non-tty' ' diff --git a/t/t5545-push-options.sh b/t/t5545-push-options.sh index 463783789c..b47a95871c 100755 --- a/t/t5545-push-options.sh +++ b/t/t5545-push-options.sh @@ -217,17 +217,32 @@ test_expect_success 'invalid push option in config' ' test_refs master HEAD@{1} ' +test_expect_success 'push options keep quoted characters intact (direct)' ' + mk_repo_pair && + git -C upstream config receive.advertisePushOptions true && + test_commit -C workbench one && + git -C workbench push --push-option="\"embedded quotes\"" up master && + echo "\"embedded quotes\"" >expect && + test_cmp expect upstream/.git/hooks/pre-receive.push_options +' + . "$TEST_DIRECTORY"/lib-httpd.sh start_httpd -test_expect_success 'push option denied properly by http server' ' +# set up http repository for fetching/pushing, with push options config +# bool set to $1 +mk_http_pair () { test_when_finished "rm -rf test_http_clone" && - test_when_finished "rm -rf \"$HTTPD_DOCUMENT_ROOT_PATH\"/upstream.git" && + test_when_finished 'rm -rf "$HTTPD_DOCUMENT_ROOT_PATH"/upstream.git' && mk_repo_pair && - git -C upstream config receive.advertisePushOptions false && + git -C upstream config receive.advertisePushOptions "$1" && git -C upstream config http.receivepack true && cp -R upstream/.git "$HTTPD_DOCUMENT_ROOT_PATH"/upstream.git && - git clone "$HTTPD_URL"/smart/upstream test_http_clone && + git clone "$HTTPD_URL"/smart/upstream test_http_clone +} + +test_expect_success 'push option denied properly by http server' ' + mk_http_pair false && test_commit -C test_http_clone one && test_must_fail git -C test_http_clone push --push-option=asdf origin master 2>actual && test_i18ngrep "the receiving end does not support push options" actual && @@ -235,13 +250,7 @@ test_expect_success 'push option denied properly by http server' ' ' test_expect_success 'push options work properly across http' ' - test_when_finished "rm -rf test_http_clone" && - test_when_finished "rm -rf \"$HTTPD_DOCUMENT_ROOT_PATH\"/upstream.git" && - mk_repo_pair && - git -C upstream config receive.advertisePushOptions true && - git -C upstream config http.receivepack true && - cp -R upstream/.git "$HTTPD_DOCUMENT_ROOT_PATH"/upstream.git && - git clone "$HTTPD_URL"/smart/upstream test_http_clone && + mk_http_pair true && test_commit -C test_http_clone one && git -C test_http_clone push origin master && @@ -260,6 +269,15 @@ test_expect_success 'push options work properly across http' ' test_cmp expect actual ' +test_expect_success 'push options keep quoted characters intact (http)' ' + mk_http_pair true && + + test_commit -C test_http_clone one && + git -C test_http_clone push --push-option="\"embedded quotes\"" origin master && + echo "\"embedded quotes\"" >expect && + test_cmp expect "$HTTPD_DOCUMENT_ROOT_PATH"/upstream.git/hooks/pre-receive.push_options +' + stop_httpd test_done diff --git a/t/t5551-http-fetch-smart.sh b/t/t5551-http-fetch-smart.sh index a51b7e20d3..f5721b4a59 100755 --- a/t/t5551-http-fetch-smart.sh +++ b/t/t5551-http-fetch-smart.sh @@ -364,5 +364,38 @@ test_expect_success 'custom http headers' ' submodule update sub ' +test_expect_success 'GIT_REDACT_COOKIES redacts cookies' ' + rm -rf clone && + echo "Set-Cookie: Foo=1" >cookies && + echo "Set-Cookie: Bar=2" >>cookies && + GIT_TRACE_CURL=true GIT_REDACT_COOKIES=Bar,Baz \ + git -c "http.cookieFile=$(pwd)/cookies" clone \ + $HTTPD_URL/smart/repo.git clone 2>err && + grep "Cookie:.*Foo=1" err && + grep "Cookie:.*Bar=<redacted>" err && + ! grep "Cookie:.*Bar=2" err +' + +test_expect_success 'GIT_REDACT_COOKIES handles empty values' ' + rm -rf clone && + echo "Set-Cookie: Foo=" >cookies && + GIT_TRACE_CURL=true GIT_REDACT_COOKIES=Foo \ + git -c "http.cookieFile=$(pwd)/cookies" clone \ + $HTTPD_URL/smart/repo.git clone 2>err && + grep "Cookie:.*Foo=<redacted>" err +' + +test_expect_success 'GIT_TRACE_CURL_NO_DATA prevents data from being traced' ' + rm -rf clone && + GIT_TRACE_CURL=true \ + git clone $HTTPD_URL/smart/repo.git clone 2>err && + grep "=> Send data" err && + + rm -rf clone && + GIT_TRACE_CURL=true GIT_TRACE_CURL_NO_DATA=1 \ + git clone $HTTPD_URL/smart/repo.git clone 2>err && + ! grep "=> Send data" err +' + stop_httpd test_done diff --git a/t/t5570-git-daemon.sh b/t/t5570-git-daemon.sh index 225a022e8a..755b05a8ae 100755 --- a/t/t5570-git-daemon.sh +++ b/t/t5570-git-daemon.sh @@ -167,23 +167,48 @@ test_expect_success 'access repo via interpolated hostname' ' git init --bare "$repo" && git push "$repo" HEAD && >"$repo"/git-daemon-export-ok && - rm -rf tmp.git && GIT_OVERRIDE_VIRTUAL_HOST=localhost \ - git clone --bare "$GIT_DAEMON_URL/interp.git" tmp.git && - rm -rf tmp.git && + git ls-remote "$GIT_DAEMON_URL/interp.git" && GIT_OVERRIDE_VIRTUAL_HOST=LOCALHOST \ - git clone --bare "$GIT_DAEMON_URL/interp.git" tmp.git + git ls-remote "$GIT_DAEMON_URL/interp.git" ' test_expect_success 'hostname cannot break out of directory' ' - rm -rf tmp.git && repo="$GIT_DAEMON_DOCUMENT_ROOT_PATH/../escape.git" && git init --bare "$repo" && git push "$repo" HEAD && >"$repo"/git-daemon-export-ok && test_must_fail \ env GIT_OVERRIDE_VIRTUAL_HOST=.. \ - git clone --bare "$GIT_DAEMON_URL/escape.git" tmp.git + git ls-remote "$GIT_DAEMON_URL/escape.git" +' + +test_expect_success 'daemon log records all attributes' ' + cat >expect <<-\EOF && + Extended attribute "host": localhost + Extended attribute "protocol": version=1 + EOF + >daemon.log && + GIT_OVERRIDE_VIRTUAL_HOST=localhost \ + git -c protocol.version=1 \ + ls-remote "$GIT_DAEMON_URL/interp.git" && + grep -i extended.attribute daemon.log | cut -d" " -f2- >actual && + test_cmp expect actual +' + +test_expect_success FAKENC 'hostname interpolation works after LF-stripping' ' + { + printf "git-upload-pack /interp.git\n\0host=localhost" | packetize + printf "0000" + } >input && + fake_nc "$GIT_DAEMON_HOST_PORT" <input >output && + depacketize <output >output.raw && + + # just pick out the value of master, which avoids any protocol + # particulars + perl -lne "print \$1 if m{^(\\S+) refs/heads/master}" <output.raw >actual && + git -C "$repo" rev-parse master >expect && + test_cmp expect actual ' stop_git_daemon diff --git a/t/t5601-clone.sh b/t/t5601-clone.sh index 8c437bf872..0b62037744 100755 --- a/t/t5601-clone.sh +++ b/t/t5601-clone.sh @@ -628,4 +628,105 @@ test_expect_success 'clone on case-insensitive fs' ' ) ' +partial_clone () { + SERVER="$1" && + URL="$2" && + + rm -rf "$SERVER" client && + test_create_repo "$SERVER" && + test_commit -C "$SERVER" one && + HASH1=$(git hash-object "$SERVER/one.t") && + git -C "$SERVER" revert HEAD && + test_commit -C "$SERVER" two && + HASH2=$(git hash-object "$SERVER/two.t") && + test_config -C "$SERVER" uploadpack.allowfilter 1 && + test_config -C "$SERVER" uploadpack.allowanysha1inwant 1 && + + git clone --filter=blob:limit=0 "$URL" client && + + git -C client fsck && + + # Ensure that unneeded blobs are not inadvertently fetched. + test_config -C client extensions.partialclone "not a remote" && + test_must_fail git -C client cat-file -e "$HASH1" && + + # But this blob was fetched, because clone performs an initial checkout + git -C client cat-file -e "$HASH2" +} + +test_expect_success 'partial clone' ' + partial_clone server "file://$(pwd)/server" +' + +test_expect_success 'partial clone: warn if server does not support object filtering' ' + rm -rf server client && + test_create_repo server && + test_commit -C server one && + + git clone --filter=blob:limit=0 "file://$(pwd)/server" client 2> err && + + test_i18ngrep "filtering not recognized by server" err +' + +test_expect_success 'batch missing blob request during checkout' ' + rm -rf server client && + + test_create_repo server && + echo a >server/a && + echo b >server/b && + git -C server add a b && + + git -C server commit -m x && + echo aa >server/a && + echo bb >server/b && + git -C server add a b && + git -C server commit -m x && + + test_config -C server uploadpack.allowfilter 1 && + test_config -C server uploadpack.allowanysha1inwant 1 && + + git clone --filter=blob:limit=0 "file://$(pwd)/server" client && + + # Ensure that there is only one negotiation by checking that there is + # only "done" line sent. ("done" marks the end of negotiation.) + GIT_TRACE_PACKET="$(pwd)/trace" git -C client checkout HEAD^ && + grep "git> done" trace >done_lines && + test_line_count = 1 done_lines +' + +test_expect_success 'batch missing blob request does not inadvertently try to fetch gitlinks' ' + rm -rf server client && + + test_create_repo repo_for_submodule && + test_commit -C repo_for_submodule x && + + test_create_repo server && + echo a >server/a && + echo b >server/b && + git -C server add a b && + git -C server commit -m x && + + echo aa >server/a && + echo bb >server/b && + # Also add a gitlink pointing to an arbitrary repository + git -C server submodule add "$(pwd)/repo_for_submodule" c && + git -C server add a b c && + git -C server commit -m x && + + test_config -C server uploadpack.allowfilter 1 && + test_config -C server uploadpack.allowanysha1inwant 1 && + + # Make sure that it succeeds + git clone --filter=blob:limit=0 "file://$(pwd)/server" client +' + +. "$TEST_DIRECTORY"/lib-httpd.sh +start_httpd + +test_expect_success 'partial clone using HTTP' ' + partial_clone "$HTTPD_DOCUMENT_ROOT_PATH/server" "$HTTPD_URL/smart/server" +' + +stop_httpd + test_done diff --git a/t/t5616-partial-clone.sh b/t/t5616-partial-clone.sh new file mode 100755 index 0000000000..29d8631184 --- /dev/null +++ b/t/t5616-partial-clone.sh @@ -0,0 +1,146 @@ +#!/bin/sh + +test_description='git partial clone' + +. ./test-lib.sh + +# create a normal "src" repo where we can later create new commits. +# expect_1.oids will contain a list of the OIDs of all blobs. +test_expect_success 'setup normal src repo' ' + echo "{print \$1}" >print_1.awk && + echo "{print \$2}" >print_2.awk && + + git init src && + for n in 1 2 3 4 + do + echo "This is file: $n" > src/file.$n.txt + git -C src add file.$n.txt + git -C src commit -m "file $n" + git -C src ls-files -s file.$n.txt >>temp + done && + awk -f print_2.awk <temp | sort >expect_1.oids && + test_line_count = 4 expect_1.oids +' + +# bare clone "src" giving "srv.bare" for use as our server. +test_expect_success 'setup bare clone for server' ' + git clone --bare "file://$(pwd)/src" srv.bare && + git -C srv.bare config --local uploadpack.allowfilter 1 && + git -C srv.bare config --local uploadpack.allowanysha1inwant 1 +' + +# do basic partial clone from "srv.bare" +# confirm we are missing all of the known blobs. +# confirm partial clone was registered in the local config. +test_expect_success 'do partial clone 1' ' + git clone --no-checkout --filter=blob:none "file://$(pwd)/srv.bare" pc1 && + git -C pc1 rev-list HEAD --quiet --objects --missing=print \ + | awk -f print_1.awk \ + | sed "s/?//" \ + | sort >observed.oids && + test_cmp expect_1.oids observed.oids && + test "$(git -C pc1 config --local core.repositoryformatversion)" = "1" && + test "$(git -C pc1 config --local extensions.partialclone)" = "origin" && + test "$(git -C pc1 config --local core.partialclonefilter)" = "blob:none" +' + +# checkout master to force dynamic object fetch of blobs at HEAD. +test_expect_success 'verify checkout with dynamic object fetch' ' + git -C pc1 rev-list HEAD --quiet --objects --missing=print >observed && + test_line_count = 4 observed && + git -C pc1 checkout master && + git -C pc1 rev-list HEAD --quiet --objects --missing=print >observed && + test_line_count = 0 observed +' + +# create new commits in "src" repo to establish a blame history on file.1.txt +# and push to "srv.bare". +test_expect_success 'push new commits to server' ' + git -C src remote add srv "file://$(pwd)/srv.bare" && + for x in a b c d e + do + echo "Mod file.1.txt $x" >>src/file.1.txt + git -C src add file.1.txt + git -C src commit -m "mod $x" + done && + git -C src blame master -- file.1.txt >expect.blame && + git -C src push -u srv master +' + +# (partial) fetch in the partial clone repo from the promisor remote. +# verify that fetch inherited the filter-spec from the config and DOES NOT +# have the new blobs. +test_expect_success 'partial fetch inherits filter settings' ' + git -C pc1 fetch origin && + git -C pc1 rev-list master..origin/master --quiet --objects --missing=print >observed && + test_line_count = 5 observed +' + +# force dynamic object fetch using diff. +# we should only get 1 new blob (for the file in origin/master). +test_expect_success 'verify diff causes dynamic object fetch' ' + git -C pc1 diff master..origin/master -- file.1.txt && + git -C pc1 rev-list master..origin/master --quiet --objects --missing=print >observed && + test_line_count = 4 observed +' + +# force full dynamic object fetch of the file's history using blame. +# we should get the intermediate blobs for the file. +test_expect_success 'verify blame causes dynamic object fetch' ' + git -C pc1 blame origin/master -- file.1.txt >observed.blame && + test_cmp expect.blame observed.blame && + git -C pc1 rev-list master..origin/master --quiet --objects --missing=print >observed && + test_line_count = 0 observed +' + +# create new commits in "src" repo to establish a history on file.2.txt +# and push to "srv.bare". +test_expect_success 'push new commits to server for file.2.txt' ' + for x in a b c d e f + do + echo "Mod file.2.txt $x" >>src/file.2.txt + git -C src add file.2.txt + git -C src commit -m "mod $x" + done && + git -C src push -u srv master +' + +# Do FULL fetch by disabling inherited filter-spec using --no-filter. +# Verify we have all the new blobs. +test_expect_success 'override inherited filter-spec using --no-filter' ' + git -C pc1 fetch --no-filter origin && + git -C pc1 rev-list master..origin/master --quiet --objects --missing=print >observed && + test_line_count = 0 observed +' + +# create new commits in "src" repo to establish a history on file.3.txt +# and push to "srv.bare". +test_expect_success 'push new commits to server for file.3.txt' ' + for x in a b c d e f + do + echo "Mod file.3.txt $x" >>src/file.3.txt + git -C src add file.3.txt + git -C src commit -m "mod $x" + done && + git -C src push -u srv master +' + +# Do a partial fetch and then try to manually fetch the missing objects. +# This can be used as the basis of a pre-command hook to bulk fetch objects +# perhaps combined with a command in dry-run mode. +test_expect_success 'manual prefetch of missing objects' ' + git -C pc1 fetch --filter=blob:none origin && + git -C pc1 rev-list master..origin/master --quiet --objects --missing=print \ + | awk -f print_1.awk \ + | sed "s/?//" \ + | sort >observed.oids && + test_line_count = 6 observed.oids && + git -C pc1 fetch-pack --stdin "file://$(pwd)/srv.bare" <observed.oids && + git -C pc1 rev-list master..origin/master --quiet --objects --missing=print \ + | awk -f print_1.awk \ + | sed "s/?//" \ + | sort >observed.oids && + test_line_count = 0 observed.oids +' + +test_done diff --git a/t/t5812-proto-disable-http.sh b/t/t5812-proto-disable-http.sh index d911afd24c..872788ac8c 100755 --- a/t/t5812-proto-disable-http.sh +++ b/t/t5812-proto-disable-http.sh @@ -20,10 +20,7 @@ test_expect_success 'curl redirects respect whitelist' ' test_must_fail env GIT_ALLOW_PROTOCOL=http:https \ GIT_SMART_HTTP=0 \ git clone "$HTTPD_URL/ftp-redir/repo.git" 2>stderr && - { - test_i18ngrep "ftp.*disabled" stderr || - test_i18ngrep "your curl version is too old" - } + test_i18ngrep -E "(ftp.*disabled|your curl version is too old)" stderr ' test_expect_success 'curl limits redirects' ' diff --git a/t/t6022-merge-rename.sh b/t/t6022-merge-rename.sh index 05ebba7afa..c01f721f13 100755 --- a/t/t6022-merge-rename.sh +++ b/t/t6022-merge-rename.sh @@ -242,10 +242,12 @@ test_expect_success 'merge of identical changes in a renamed file' ' rm -f A M N && git reset --hard && git checkout change+rename && - GIT_MERGE_VERBOSITY=3 git merge change | test_i18ngrep "^Skipped B" && + GIT_MERGE_VERBOSITY=3 git merge change >out && + test_i18ngrep "^Skipped B" out && git reset --hard HEAD^ && git checkout change && - GIT_MERGE_VERBOSITY=3 git merge change+rename | test_i18ngrep "^Skipped B" + GIT_MERGE_VERBOSITY=3 git merge change+rename >out && + test_i18ngrep "^Skipped B" out ' test_expect_success 'setup for rename + d/f conflicts' ' diff --git a/t/t6120-describe.sh b/t/t6120-describe.sh index a5d9015024..bae78c4e89 100755 --- a/t/t6120-describe.sh +++ b/t/t6120-describe.sh @@ -378,4 +378,12 @@ check_describe tags/A --all A check_describe tags/c --all c check_describe heads/branch_A --all --match='branch_*' branch_A +test_expect_success 'describe complains about tree object' ' + test_must_fail git describe HEAD^{tree} +' + +test_expect_success 'describe complains about missing object' ' + test_must_fail git describe $_z40 +' + test_done diff --git a/t/t7001-mv.sh b/t/t7001-mv.sh index 6e5031f56f..d4e6485a26 100755 --- a/t/t7001-mv.sh +++ b/t/t7001-mv.sh @@ -39,6 +39,12 @@ test_expect_success \ grep "^R100..*path1/COPYING..*path0/COPYING"' test_expect_success \ + 'mv --dry-run does not move file' \ + 'git mv -n path0/COPYING MOVED && + test -f path0/COPYING && + test ! -f MOVED' + +test_expect_success \ 'checking -k on non-existing file' \ 'git mv -k idontexist path0' diff --git a/t/t7004-tag.sh b/t/t7004-tag.sh index a9af2de996..2aac77af70 100755 --- a/t/t7004-tag.sh +++ b/t/t7004-tag.sh @@ -452,6 +452,21 @@ test_expect_success \ test_cmp expect actual ' +get_tag_header annotated-tag-edit $commit commit $time >expect +echo "An edited message" >>expect +test_expect_success 'set up editor' ' + write_script fakeeditor <<-\EOF + sed -e "s/A message/An edited message/g" <"$1" >"$1-" + mv "$1-" "$1" + EOF +' +test_expect_success \ + 'creating an annotated tag with -m message --edit should succeed' ' + GIT_EDITOR=./fakeeditor git tag -m "A message" --edit annotated-tag-edit && + get_tag_msg annotated-tag-edit >actual && + test_cmp expect actual +' + cat >msgfile <<EOF Another message in a file. @@ -465,6 +480,21 @@ test_expect_success \ test_cmp expect actual ' +get_tag_header file-annotated-tag-edit $commit commit $time >expect +sed -e "s/Another message/Another edited message/g" msgfile >>expect +test_expect_success 'set up editor' ' + write_script fakeeditor <<-\EOF + sed -e "s/Another message/Another edited message/g" <"$1" >"$1-" + mv "$1-" "$1" + EOF +' +test_expect_success \ + 'creating an annotated tag with -F messagefile --edit should succeed' ' + GIT_EDITOR=./fakeeditor git tag -F msgfile --edit file-annotated-tag-edit && + get_tag_msg file-annotated-tag-edit >actual && + test_cmp expect actual +' + cat >inputmsg <<EOF A message from the standard input diff --git a/t/t7063-status-untracked-cache.sh b/t/t7063-status-untracked-cache.sh index e5fb892f95..46b947824f 100755 --- a/t/t7063-status-untracked-cache.sh +++ b/t/t7063-status-untracked-cache.sh @@ -22,6 +22,12 @@ avoid_racy() { sleep 1 } +status_is_clean() { + >../status.expect && + git status --porcelain >../status.actual && + test_cmp ../status.expect ../status.actual +} + test_lazy_prereq UNTRACKED_CACHE ' { git update-index --test-untracked-cache; ret=$?; } && test $ret -ne 1 @@ -683,4 +689,85 @@ test_expect_success 'untracked cache survives a commit' ' test_cmp ../before ../after ' +test_expect_success 'teardown worktree' ' + cd .. +' + +test_expect_success SYMLINKS 'setup worktree for symlink test' ' + git init worktree-symlink && + cd worktree-symlink && + git config core.untrackedCache true && + mkdir one two && + touch one/file two/file && + git add one/file two/file && + git commit -m"first commit" && + git rm -rf one && + ln -s two one && + git add one && + git commit -m"second commit" +' + +test_expect_success SYMLINKS '"status" after symlink replacement should be clean with UC=true' ' + git checkout HEAD~ && + status_is_clean && + status_is_clean && + git checkout master && + avoid_racy && + status_is_clean && + status_is_clean +' + +test_expect_success SYMLINKS '"status" after symlink replacement should be clean with UC=false' ' + git config core.untrackedCache false && + git checkout HEAD~ && + status_is_clean && + status_is_clean && + git checkout master && + avoid_racy && + status_is_clean && + status_is_clean +' + +test_expect_success 'setup worktree for non-symlink test' ' + git init worktree-non-symlink && + cd worktree-non-symlink && + git config core.untrackedCache true && + mkdir one two && + touch one/file two/file && + git add one/file two/file && + git commit -m"first commit" && + git rm -rf one && + cp two/file one && + git add one && + git commit -m"second commit" +' + +test_expect_success '"status" after file replacement should be clean with UC=true' ' + git checkout HEAD~ && + status_is_clean && + status_is_clean && + git checkout master && + avoid_racy && + status_is_clean && + test-dump-untracked-cache >../actual && + grep -F "recurse valid" ../actual >../actual.grep && + cat >../expect.grep <<EOF && +/ 0000000000000000000000000000000000000000 recurse valid +/two/ 0000000000000000000000000000000000000000 recurse valid +EOF + status_is_clean && + test_cmp ../expect.grep ../actual.grep +' + +test_expect_success '"status" after file replacement should be clean with UC=false' ' + git config core.untrackedCache false && + git checkout HEAD~ && + status_is_clean && + status_is_clean && + git checkout master && + avoid_racy && + status_is_clean && + status_is_clean +' + test_done diff --git a/t/t7409-submodule-detached-work-tree.sh b/t/t7409-submodule-detached-work-tree.sh index c20717181e..fc018e3638 100755 --- a/t/t7409-submodule-detached-work-tree.sh +++ b/t/t7409-submodule-detached-work-tree.sh @@ -6,7 +6,7 @@ test_description='Test submodules on detached working tree This test verifies that "git submodule" initialization, update and addition works -on detahced working trees +on detached working trees ' TEST_NO_CREATE_REPO=1 diff --git a/t/t7505-prepare-commit-msg-hook.sh b/t/t7505-prepare-commit-msg-hook.sh index b13f72975e..1f43b3cd4c 100755 --- a/t/t7505-prepare-commit-msg-hook.sh +++ b/t/t7505-prepare-commit-msg-hook.sh @@ -4,6 +4,38 @@ test_description='prepare-commit-msg hook' . ./test-lib.sh +test_expect_success 'set up commits for rebasing' ' + test_commit root && + test_commit a a a && + test_commit b b b && + git checkout -b rebase-me root && + test_commit rebase-a a aa && + test_commit rebase-b b bb && + for i in $(test_seq 1 13) + do + test_commit rebase-$i c $i + done && + git checkout master && + + cat >rebase-todo <<-EOF + pick $(git rev-parse rebase-a) + pick $(git rev-parse rebase-b) + fixup $(git rev-parse rebase-1) + fixup $(git rev-parse rebase-2) + pick $(git rev-parse rebase-3) + fixup $(git rev-parse rebase-4) + squash $(git rev-parse rebase-5) + reword $(git rev-parse rebase-6) + squash $(git rev-parse rebase-7) + fixup $(git rev-parse rebase-8) + fixup $(git rev-parse rebase-9) + edit $(git rev-parse rebase-10) + squash $(git rev-parse rebase-11) + squash $(git rev-parse rebase-12) + edit $(git rev-parse rebase-13) + EOF +' + test_expect_success 'with no hook' ' echo "foo" > file && @@ -31,17 +63,41 @@ mkdir -p "$HOOKDIR" echo "#!$SHELL_PATH" > "$HOOK" cat >> "$HOOK" <<'EOF' -if test "$2" = commit; then - source=$(git rev-parse "$3") +GIT_DIR=$(git rev-parse --git-dir) +if test -d "$GIT_DIR/rebase-merge" +then + rebasing=1 else - source=${2-default} + rebasing=0 fi -if test "$GIT_EDITOR" = :; then - sed -e "1s/.*/$source (no editor)/" "$1" > msg.tmp + +get_last_cmd () { + tail -n1 "$GIT_DIR/rebase-merge/done" | { + read cmd id _ + git log --pretty="[$cmd %s]" -n1 $id + } +} + +if test "$2" = commit +then + if test $rebasing = 1 + then + source="$3" + else + source=$(git rev-parse "$3") + fi else - sed -e "1s/.*/$source/" "$1" > msg.tmp + source=${2-default} +fi +test "$GIT_EDITOR" = : && source="$source (no editor)" + +if test $rebasing = 1 +then + echo "$source $(get_last_cmd)" >"$1" +else + sed -e "1s/.*/$source/" "$1" >msg.tmp + mv msg.tmp "$1" fi -mv msg.tmp "$1" exit 0 EOF chmod +x "$HOOK" @@ -156,6 +212,63 @@ test_expect_success 'with hook and editor (merge)' ' test "$(git log -1 --pretty=format:%s)" = "merge" ' +test_rebase () { + expect=$1 && + mode=$2 && + test_expect_$expect C_LOCALE_OUTPUT "with hook (rebase $mode)" ' + test_when_finished "\ + git rebase --abort + git checkout -f master + git branch -D tmp" && + git checkout -b tmp rebase-me && + GIT_SEQUENCE_EDITOR="cp rebase-todo" && + GIT_EDITOR="\"$FAKE_EDITOR\"" && + ( + export GIT_SEQUENCE_EDITOR GIT_EDITOR && + test_must_fail git rebase $mode b && + echo x >a && + git add a && + test_must_fail git rebase --continue && + echo x >b && + git add b && + git commit && + git rebase --continue && + echo y >a && + git add a && + git commit && + git rebase --continue && + echo y >b && + git add b && + git rebase --continue + ) && + if test $mode = -p # reword amended after pick + then + n=18 + else + n=17 + fi && + git log --pretty=%s -g -n$n HEAD@{1} >actual && + test_cmp "$TEST_DIRECTORY/t7505/expected-rebase$mode" actual + ' +} + +test_rebase success -i +test_rebase success -p + +test_expect_success 'with hook (cherry-pick)' ' + test_when_finished "git checkout -f master" && + git checkout -B other b && + git cherry-pick rebase-1 && + test "$(git log -1 --pretty=format:%s)" = "message (no editor)" +' + +test_expect_success 'with hook and editor (cherry-pick)' ' + test_when_finished "git checkout -f master" && + git checkout -B other b && + git cherry-pick -e rebase-1 && + test "$(git log -1 --pretty=format:%s)" = merge +' + cat > "$HOOK" <<'EOF' #!/bin/sh exit 1 @@ -197,4 +310,11 @@ test_expect_success 'with failing hook (merge)' ' ' +test_expect_success C_LOCALE_OUTPUT 'with failing hook (cherry-pick)' ' + test_when_finished "git checkout -f master" && + git checkout -B other b && + test_must_fail git cherry-pick rebase-1 2>actual && + test $(grep -c prepare-commit-msg actual) = 1 +' + test_done diff --git a/t/t7505/expected-rebase-i b/t/t7505/expected-rebase-i new file mode 100644 index 0000000000..c514bdbb94 --- /dev/null +++ b/t/t7505/expected-rebase-i @@ -0,0 +1,17 @@ +message [edit rebase-13] +message (no editor) [edit rebase-13] +message [squash rebase-12] +message (no editor) [squash rebase-11] +default [edit rebase-10] +message (no editor) [edit rebase-10] +message [fixup rebase-9] +message (no editor) [fixup rebase-8] +message (no editor) [squash rebase-7] +message [reword rebase-6] +message [squash rebase-5] +message (no editor) [fixup rebase-4] +message (no editor) [pick rebase-3] +message (no editor) [fixup rebase-2] +message (no editor) [fixup rebase-1] +merge [pick rebase-b] +message [pick rebase-a] diff --git a/t/t7505/expected-rebase-p b/t/t7505/expected-rebase-p new file mode 100644 index 0000000000..93bada596e --- /dev/null +++ b/t/t7505/expected-rebase-p @@ -0,0 +1,18 @@ +message [edit rebase-13] +message (no editor) [edit rebase-13] +message [squash rebase-12] +message (no editor) [squash rebase-11] +default [edit rebase-10] +message (no editor) [edit rebase-10] +message [fixup rebase-9] +message (no editor) [fixup rebase-8] +message (no editor) [squash rebase-7] +HEAD [reword rebase-6] +message (no editor) [reword rebase-6] +message [squash rebase-5] +message (no editor) [fixup rebase-4] +message (no editor) [pick rebase-3] +message (no editor) [fixup rebase-2] +message (no editor) [fixup rebase-1] +merge [pick rebase-b] +message [pick rebase-a] diff --git a/t/t7519-status-fsmonitor.sh b/t/t7519-status-fsmonitor.sh index eb2d13bbcf..756beb0d8e 100755 --- a/t/t7519-status-fsmonitor.sh +++ b/t/t7519-status-fsmonitor.sh @@ -314,4 +314,43 @@ test_expect_success 'splitting the index results in the same state' ' test_cmp expect actual ' +test_expect_success UNTRACKED_CACHE 'ignore .git changes when invalidating UNTR' ' + test_create_repo dot-git && + ( + cd dot-git && + mkdir -p .git/hooks && + : >tracked && + : >modified && + mkdir dir1 && + : >dir1/tracked && + : >dir1/modified && + mkdir dir2 && + : >dir2/tracked && + : >dir2/modified && + write_integration_script && + git config core.fsmonitor .git/hooks/fsmonitor-test && + git update-index --untracked-cache && + git update-index --fsmonitor && + GIT_TRACE_UNTRACKED_STATS="$TRASH_DIRECTORY/trace-before" \ + git status && + test-dump-untracked-cache >../before + ) && + cat >>dot-git/.git/hooks/fsmonitor-test <<-\EOF && + printf ".git\0" + printf ".git/index\0" + printf "dir1/.git\0" + printf "dir1/.git/index\0" + EOF + ( + cd dot-git && + GIT_TRACE_UNTRACKED_STATS="$TRASH_DIRECTORY/trace-after" \ + git status && + test-dump-untracked-cache >../after + ) && + grep "directory invalidation" trace-before >>before && + grep "directory invalidation" trace-after >>after && + # UNTR extension unchanged, dir invalidation count unchanged + test_cmp before after +' + test_done diff --git a/t/t7607-merge-overwrite.sh b/t/t7607-merge-overwrite.sh index 9444d6a9b9..9c422bcd7c 100755 --- a/t/t7607-merge-overwrite.sh +++ b/t/t7607-merge-overwrite.sh @@ -97,7 +97,10 @@ test_expect_failure 'will not overwrite unstaged changes in renamed file' ' git mv c1.c other.c && git commit -m rename && cp important other.c && - git merge c1a && + test_must_fail git merge c1a >out && + test_i18ngrep "Refusing to lose dirty file at other.c" out && + test_path_is_file other.c~HEAD && + test $(git hash-object other.c~HEAD) = $(git rev-parse c1a:c1.c) && test_cmp important other.c ' diff --git a/t/t9001-send-email.sh b/t/t9001-send-email.sh index a06e5d7ba5..19601fb546 100755 --- a/t/t9001-send-email.sh +++ b/t/t9001-send-email.sh @@ -6,6 +6,12 @@ test_description='git send-email' # May be altered later in the test PREREQ="PERL" +replace_variable_fields () { + sed -e "s/^\(Date:\).*/\1 DATE-STRING/" \ + -e "s/^\(Message-Id:\).*/\1 MESSAGE-ID-STRING/" \ + -e "s/^\(X-Mailer:\).*/\1 X-MAILER-STRING/" +} + test_expect_success $PREREQ 'prepare reference tree' ' echo "1A quick brown fox jumps over the" >file && echo "lazy dog" >>file && @@ -315,10 +321,7 @@ test_expect_success $PREREQ 'Show all headers' ' --bcc=bcc@example.com \ --in-reply-to="<unique-message-id@example.com>" \ --smtp-server relay.example.com \ - $patches | - sed -e "s/^\(Date:\).*/\1 DATE-STRING/" \ - -e "s/^\(Message-Id:\).*/\1 MESSAGE-ID-STRING/" \ - -e "s/^\(X-Mailer:\).*/\1 X-MAILER-STRING/" \ + $patches | replace_variable_fields \ >actual-show-all-headers && test_cmp expected-show-all-headers actual-show-all-headers ' @@ -573,12 +576,6 @@ Result: OK EOF " -replace_variable_fields () { - sed -e "s/^\(Date:\).*/\1 DATE-STRING/" \ - -e "s/^\(Message-Id:\).*/\1 MESSAGE-ID-STRING/" \ - -e "s/^\(X-Mailer:\).*/\1 X-MAILER-STRING/" -} - test_suppression () { git send-email \ --dry-run \ diff --git a/t/test-lib-functions.sh b/t/test-lib-functions.sh index 1701fe2a06..8a8a9329ee 100644 --- a/t/test-lib-functions.sh +++ b/t/test-lib-functions.sh @@ -610,6 +610,14 @@ list_contains () { # # Writing this as "! git checkout ../outerspace" is wrong, because # the failure could be due to a segv. We want a controlled failure. +# +# Accepts the following options: +# +# ok=<signal-name>[,<...>]: +# Don't treat an exit caused by the given signal as error. +# Multiple signals can be specified as a comma separated list. +# Currently recognized signal names are: sigpipe, success. +# (Don't use 'success', use 'test_might_fail' instead.) test_must_fail () { case "$1" in @@ -656,6 +664,8 @@ test_must_fail () { # # Writing "git config --unset all.configuration || :" would be wrong, # because we want to notice if it fails due to segv. +# +# Accepts the same options as test_must_fail. test_might_fail () { test_must_fail ok=success "$@" @@ -705,6 +715,60 @@ test_cmp_bin() { cmp "$@" } +# Use this instead of test_cmp to compare files that contain expected and +# actual output from git commands that can be translated. When running +# under GETTEXT_POISON this pretends that the command produced expected +# results. +test_i18ncmp () { + test -n "$GETTEXT_POISON" || test_cmp "$@" +} + +# Use this instead of "grep expected-string actual" to see if the +# output from a git command that can be translated either contains an +# expected string, or does not contain an unwanted one. When running +# under GETTEXT_POISON this pretends that the command produced expected +# results. +test_i18ngrep () { + eval "last_arg=\${$#}" + + test -f "$last_arg" || + error "bug in the test script: test_i18ngrep requires a file" \ + "to read as the last parameter" + + if test $# -lt 2 || + { test "x!" = "x$1" && test $# -lt 3 ; } + then + error "bug in the test script: too few parameters to test_i18ngrep" + fi + + if test -n "$GETTEXT_POISON" + then + # pretend success + return 0 + fi + + if test "x!" = "x$1" + then + shift + ! grep "$@" && return 0 + + echo >&2 "error: '! grep $@' did find a match in:" + else + grep "$@" && return 0 + + echo >&2 "error: 'grep $@' didn't find a match in:" + fi + + if test -s "$last_arg" + then + cat >&2 "$last_arg" + else + echo >&2 "<File '$last_arg' is empty>" + fi + + return 1 +} + # Call any command "$@" but be more verbose about its # failure. This is handy for commands like "test" which do # not output anything when they fail. @@ -1020,3 +1084,37 @@ nongit () { "$@" ) } + +# convert stdin to pktline representation; note that empty input becomes an +# empty packet, not a flush packet (for that you can just print 0000 yourself). +packetize() { + cat >packetize.tmp && + len=$(wc -c <packetize.tmp) && + printf '%04x%s' "$(($len + 4))" && + cat packetize.tmp && + rm -f packetize.tmp +} + +# Parse the input as a series of pktlines, writing the result to stdout. +# Sideband markers are removed automatically, and the output is routed to +# stderr if appropriate. +# +# NUL bytes are converted to "\\0" for ease of parsing with text tools. +depacketize () { + perl -e ' + while (read(STDIN, $len, 4) == 4) { + if ($len eq "0000") { + print "FLUSH\n"; + } else { + read(STDIN, $buf, hex($len) - 4); + $buf =~ s/\0/\\0/g; + if ($buf =~ s/^[\x2\x3]//) { + print STDERR $buf; + } else { + $buf =~ s/^\x1//; + print $buf; + } + } + } + ' +} diff --git a/t/test-lib.sh b/t/test-lib.sh index 9a0a21f49a..9535d2e0a9 100644 --- a/t/test-lib.sh +++ b/t/test-lib.sh @@ -116,6 +116,7 @@ unset VISUAL EMAIL LANGUAGE COLUMNS $("$PERL_PATH" -e ' my @vars = grep(/^GIT_/ && !/^GIT_($ok)/o, @env); print join("\n", @vars); ') +unset XDG_CACHE_HOME unset XDG_CONFIG_HOME unset GITPERLLIB GIT_AUTHOR_EMAIL=author@example.com @@ -939,7 +940,7 @@ then fi fi -GITPERLLIB="$GIT_BUILD_DIR"/perl/blib/lib:"$GIT_BUILD_DIR"/perl/blib/arch/auto/Git +GITPERLLIB="$GIT_BUILD_DIR"/perl/build/lib export GITPERLLIB test -d "$GIT_BUILD_DIR"/templates/blt || { error "You haven't built things yet, have you?" @@ -1062,32 +1063,6 @@ else test_set_prereq C_LOCALE_OUTPUT fi -# Use this instead of test_cmp to compare files that contain expected and -# actual output from git commands that can be translated. When running -# under GETTEXT_POISON this pretends that the command produced expected -# results. -test_i18ncmp () { - test -n "$GETTEXT_POISON" || test_cmp "$@" -} - -# Use this instead of "grep expected-string actual" to see if the -# output from a git command that can be translated either contains an -# expected string, or does not contain an unwanted one. When running -# under GETTEXT_POISON this pretends that the command produced expected -# results. -test_i18ngrep () { - if test -n "$GETTEXT_POISON" - then - : # pretend success - elif test "x!" = "x$1" - then - shift - ! grep "$@" - else - grep "$@" - fi -} - test_lazy_prereq PIPE ' # test whether the filesystem supports FIFOs test_have_prereq !MINGW,!CYGWIN && @@ -1132,6 +1107,10 @@ test_lazy_prereq EXPENSIVE ' test -n "$GIT_TEST_LONG" ' +test_lazy_prereq EXPENSIVE_ON_WINDOWS ' + test_have_prereq EXPENSIVE || test_have_prereq !MINGW,!CYGWIN +' + test_lazy_prereq USR_BIN_TIME ' test -x /usr/bin/time ' @@ -131,7 +131,6 @@ static void print_trace_line(struct trace_key *key, struct strbuf *buf) { strbuf_complete_line(buf); trace_write(key, buf->buf, buf->len); - strbuf_release(buf); } static void trace_vprintf_fl(const char *file, int line, struct trace_key *key, @@ -144,6 +143,7 @@ static void trace_vprintf_fl(const char *file, int line, struct trace_key *key, strbuf_vaddf(&buf, format, ap); print_trace_line(key, &buf); + strbuf_release(&buf); } static void trace_argv_vprintf_fl(const char *file, int line, @@ -157,8 +157,9 @@ static void trace_argv_vprintf_fl(const char *file, int line, strbuf_vaddf(&buf, format, ap); - sq_quote_argv(&buf, argv, 0); + sq_quote_argv_pretty(&buf, argv); print_trace_line(&trace_default_key, &buf); + strbuf_release(&buf); } void trace_strbuf_fl(const char *file, int line, struct trace_key *key, @@ -171,6 +172,7 @@ void trace_strbuf_fl(const char *file, int line, struct trace_key *key, strbuf_addbuf(&buf, data); print_trace_line(key, &buf); + strbuf_release(&buf); } static void trace_performance_vprintf_fl(const char *file, int line, @@ -190,6 +192,7 @@ static void trace_performance_vprintf_fl(const char *file, int line, } print_trace_line(&trace_perf_key, &buf); + strbuf_release(&buf); } #ifndef HAVE_VARIADIC_MACROS @@ -426,6 +429,6 @@ void trace_command_performance(const char **argv) atexit(print_command_performance_atexit); strbuf_reset(&command_line); - sq_quote_argv(&command_line, argv, 0); + sq_quote_argv_pretty(&command_line, argv); command_start_time = getnanotime(); } diff --git a/transport-helper.c b/transport-helper.c index 5080150231..3f380d87d9 100644 --- a/transport-helper.c +++ b/transport-helper.c @@ -672,6 +672,11 @@ static int fetch(struct transport *transport, if (data->transport_options.update_shallow) set_helper_option(transport, "update-shallow", "true"); + if (data->transport_options.filter_options.choice) + set_helper_option( + transport, "filter", + data->transport_options.filter_options.filter_spec); + if (data->fetch) return fetch_with_fetch(transport, nr_heads, to_fetch); diff --git a/transport.c b/transport.c index fc802260f6..00d48b5b56 100644 --- a/transport.c +++ b/transport.c @@ -161,6 +161,15 @@ static int set_git_option(struct git_transport_options *opts, } else if (!strcmp(name, TRANS_OPT_DEEPEN_RELATIVE)) { opts->deepen_relative = !!value; return 0; + } else if (!strcmp(name, TRANS_OPT_FROM_PROMISOR)) { + opts->from_promisor = !!value; + return 0; + } else if (!strcmp(name, TRANS_OPT_NO_DEPENDENTS)) { + opts->no_dependents = !!value; + return 0; + } else if (!strcmp(name, TRANS_OPT_LIST_OBJECTS_FILTER)) { + parse_list_objects_filter(&opts->filter_options, value); + return 0; } return 1; } @@ -229,6 +238,9 @@ static int fetch_refs_via_pack(struct transport *transport, data->options.check_self_contained_and_connected; args.cloning = transport->cloning; args.update_shallow = data->options.update_shallow; + args.from_promisor = data->options.from_promisor; + args.no_dependents = data->options.no_dependents; + args.filter_options = data->options.filter_options; if (!data->got_remote_heads) { connect_setup(transport, 0); diff --git a/transport.h b/transport.h index 731c78b679..3c68d73b21 100644 --- a/transport.h +++ b/transport.h @@ -4,6 +4,7 @@ #include "cache.h" #include "run-command.h" #include "remote.h" +#include "list-objects-filter-options.h" struct string_list; @@ -15,12 +16,15 @@ struct git_transport_options { unsigned self_contained_and_connected : 1; unsigned update_shallow : 1; unsigned deepen_relative : 1; + unsigned from_promisor : 1; + unsigned no_dependents : 1; int depth; const char *deepen_since; const struct string_list *deepen_not; const char *uploadpack; const char *receivepack; struct push_cas_option *cas; + struct list_objects_filter_options filter_options; }; enum transport_family { @@ -159,6 +163,18 @@ void transport_check_allowed(const char *type); /* Send push certificates */ #define TRANS_OPT_PUSH_CERT "pushcert" +/* Indicate that these objects are being fetched by a promisor */ +#define TRANS_OPT_FROM_PROMISOR "from-promisor" + +/* + * Indicate that only the objects wanted need to be fetched, not their + * dependents + */ +#define TRANS_OPT_NO_DEPENDENTS "no-dependents" + +/* Filter objects for partial clone and fetch */ +#define TRANS_OPT_LIST_OBJECTS_FILTER "filter" + /** * Returns 0 if the option was used, non-zero otherwise. Prints a * message to stderr if the option is not used. diff --git a/unpack-trees.c b/unpack-trees.c index 96c3327f19..c9f6e314d5 100644 --- a/unpack-trees.c +++ b/unpack-trees.c @@ -15,6 +15,7 @@ #include "submodule.h" #include "submodule-config.h" #include "fsmonitor.h" +#include "fetch-object.h" /* * Error messages expected by scripts out of plumbing commands such as @@ -370,6 +371,27 @@ static int check_updates(struct unpack_trees_options *o) load_gitmodules_file(index, &state); enable_delayed_checkout(&state); + if (repository_format_partial_clone && o->update && !o->dry_run) { + /* + * Prefetch the objects that are to be checked out in the loop + * below. + */ + struct oid_array to_fetch = OID_ARRAY_INIT; + int fetch_if_missing_store = fetch_if_missing; + fetch_if_missing = 0; + for (i = 0; i < index->cache_nr; i++) { + struct cache_entry *ce = index->cache[i]; + if ((ce->ce_flags & CE_UPDATE) && + !S_ISGITLINK(ce->ce_mode)) { + if (!has_object_file(&ce->oid)) + oid_array_append(&to_fetch, &ce->oid); + } + } + if (to_fetch.nr) + fetch_objects(repository_format_partial_clone, + &to_fetch); + fetch_if_missing = fetch_if_missing_store; + } for (i = 0; i < index->cache_nr; i++) { struct cache_entry *ce = index->cache[i]; @@ -1506,7 +1528,7 @@ static void invalidate_ce_path(const struct cache_entry *ce, if (!ce) return; cache_tree_invalidate_path(o->src_index, ce->name); - untracked_cache_invalidate_path(o->src_index, ce->name); + untracked_cache_invalidate_path(o->src_index, ce->name, 1); } /* diff --git a/upload-pack.c b/upload-pack.c index d5de18127c..f51b6cfca9 100644 --- a/upload-pack.c +++ b/upload-pack.c @@ -10,6 +10,8 @@ #include "diff.h" #include "revision.h" #include "list-objects.h" +#include "list-objects-filter.h" +#include "list-objects-filter-options.h" #include "run-command.h" #include "connect.h" #include "sigchain.h" @@ -19,6 +21,7 @@ #include "argv-array.h" #include "prio-queue.h" #include "protocol.h" +#include "quote.h" static const char * const upload_pack_usage[] = { N_("git upload-pack [<options>] <dir>"), @@ -65,6 +68,10 @@ static int advertise_refs; static int stateless_rpc; static const char *pack_objects_hook; +static int filter_capability_requested; +static int filter_advertise; +static struct list_objects_filter_options filter_options; + static void reset_timeout(void) { alarm(timeout); @@ -132,6 +139,17 @@ static void create_pack_file(void) argv_array_push(&pack_objects.args, "--delta-base-offset"); if (use_include_tag) argv_array_push(&pack_objects.args, "--include-tag"); + if (filter_options.filter_spec) { + if (pack_objects.use_shell) { + struct strbuf buf = STRBUF_INIT; + sq_quote_buf(&buf, filter_options.filter_spec); + argv_array_pushf(&pack_objects.args, "--filter=%s", buf.buf); + strbuf_release(&buf); + } else { + argv_array_pushf(&pack_objects.args, "--filter=%s", + filter_options.filter_spec); + } + } pack_objects.in = -1; pack_objects.out = -1; @@ -795,6 +813,12 @@ static void receive_needs(void) deepen_rev_list = 1; continue; } + if (skip_prefix(line, "filter ", &arg)) { + if (!filter_capability_requested) + die("git upload-pack: filtering capability not negotiated"); + parse_list_objects_filter(&filter_options, arg); + continue; + } if (!skip_prefix(line, "want ", &arg) || get_oid_hex(arg, &oid_buf)) die("git upload-pack: protocol error, " @@ -822,6 +846,8 @@ static void receive_needs(void) no_progress = 1; if (parse_feature_request(features, "include-tag")) use_include_tag = 1; + if (parse_feature_request(features, "filter")) + filter_capability_requested = 1; o = parse_object(&oid_buf); if (!o) { @@ -941,7 +967,7 @@ static int send_ref(const char *refname, const struct object_id *oid, struct strbuf symref_info = STRBUF_INIT; format_symref_info(&symref_info, cb_data); - packet_write_fmt(1, "%s %s%c%s%s%s%s%s agent=%s\n", + packet_write_fmt(1, "%s %s%c%s%s%s%s%s%s agent=%s\n", oid_to_hex(oid), refname_nons, 0, capabilities, (allow_unadvertised_object_request & ALLOW_TIP_SHA1) ? @@ -950,6 +976,7 @@ static int send_ref(const char *refname, const struct object_id *oid, " allow-reachable-sha1-in-want" : "", stateless_rpc ? " no-done" : "", symref_info.buf, + filter_advertise ? " filter" : "", git_user_agent_sanitized()); strbuf_release(&symref_info); } else { @@ -1028,6 +1055,8 @@ static int upload_pack_config(const char *var, const char *value, void *unused) } else if (current_config_scope() != CONFIG_SCOPE_REPO) { if (!strcmp("uploadpack.packobjectshook", var)) return git_config_string(&pack_objects_hook, var, value); + } else if (!strcmp("uploadpack.allowfilter", var)) { + filter_advertise = git_config_bool(var, value); } return parse_hide_refs_config(var, value, "uploadpack"); } diff --git a/wrap-for-bin.sh b/wrap-for-bin.sh index 22b6e4948f..5842408817 100644 --- a/wrap-for-bin.sh +++ b/wrap-for-bin.sh @@ -14,7 +14,7 @@ else GIT_TEMPLATE_DIR='@@BUILD_DIR@@/templates/blt' export GIT_TEMPLATE_DIR fi -GITPERLLIB='@@BUILD_DIR@@/perl/blib/lib'"${GITPERLLIB:+:$GITPERLLIB}" +GITPERLLIB='@@BUILD_DIR@@/perl/build/lib'"${GITPERLLIB:+:$GITPERLLIB}" GIT_TEXTDOMAINDIR='@@BUILD_DIR@@/po/build/locale' PATH='@@BUILD_DIR@@/bin-wrappers:'"$PATH" |